[mlpack-svn] r13116 - mlpack/trunk/src/mlpack/methods/local_coordinate_coding

fastlab-svn at coffeetalk-1.cc.gatech.edu fastlab-svn at coffeetalk-1.cc.gatech.edu
Wed Jun 27 14:29:07 EDT 2012


Author: rcurtin
Date: 2012-06-27 14:29:07 -0400 (Wed, 27 Jun 2012)
New Revision: 13116

Modified:
   mlpack/trunk/src/mlpack/methods/local_coordinate_coding/lcc.cpp
   mlpack/trunk/src/mlpack/methods/local_coordinate_coding/lcc.hpp
Log:
Clean up style violations.


Modified: mlpack/trunk/src/mlpack/methods/local_coordinate_coding/lcc.cpp
===================================================================
--- mlpack/trunk/src/mlpack/methods/local_coordinate_coding/lcc.cpp	2012-06-27 17:07:04 UTC (rev 13115)
+++ mlpack/trunk/src/mlpack/methods/local_coordinate_coding/lcc.cpp	2012-06-27 18:29:07 UTC (rev 13116)
@@ -17,7 +17,9 @@
 namespace mlpack {
 namespace lcc {
 
-LocalCoordinateCoding::LocalCoordinateCoding(const mat& matX, uword nAtoms, double lambda) :
+LocalCoordinateCoding::LocalCoordinateCoding(const mat& matX,
+                                             uword nAtoms,
+                                             double lambda) :
   nDims(matX.n_rows),
   nAtoms(nAtoms),
   nPoints(matX.n_cols),
@@ -27,64 +29,68 @@
 { /* nothing left to do */ }
 
 
-void LocalCoordinateCoding::SetDictionary(const mat& matD) {
+void LocalCoordinateCoding::SetDictionary(const mat& matD)
+{
   this->matD = matD;
 }
 
 
-void LocalCoordinateCoding::InitDictionary() {
+void LocalCoordinateCoding::InitDictionary()
+{
   RandomInitDictionary();
 }
 
 
-void LocalCoordinateCoding::LoadDictionary(const char* dictionaryFilename) {
+void LocalCoordinateCoding::LoadDictionary(const char* dictionaryFilename)
+{
   matD.load(dictionaryFilename);
 }
 
 
-void LocalCoordinateCoding::RandomInitDictionary() {
+void LocalCoordinateCoding::RandomInitDictionary()
+{
   matD = randn(nDims, nAtoms);
-  for(uword j = 0; j < nAtoms; j++) {
+  for (uword j = 0; j < nAtoms; j++)
     matD.col(j) /= norm(matD.col(j), 2);
-  }
 }
 
 
-void LocalCoordinateCoding::DataDependentRandomInitDictionary() {
+void LocalCoordinateCoding::DataDependentRandomInitDictionary()
+{
   matD = mat(nDims, nAtoms);
-  for(uword j = 0; j < nAtoms; j++) {
+  for (uword j = 0; j < nAtoms; j++)
+  {
     vec vecD_j = matD.unsafe_col(j);
     RandomAtom(vecD_j);
   }
 }
 
 
-void LocalCoordinateCoding::RandomAtom(vec& atom) {
+void LocalCoordinateCoding::RandomAtom(vec& atom)
+{
   atom.zeros();
   const uword nSeedAtoms = 3;
-  for(uword i = 0; i < nSeedAtoms; i++) {
-    atom +=  matX.col(rand() % nPoints);
-  }
+  for (uword i = 0; i < nSeedAtoms; i++)
+    atom += matX.col(rand() % nPoints);
+
   atom /= ((double) nSeedAtoms);
   atom /= norm(atom, 2);
 }
 
-
-void LocalCoordinateCoding::DoLCC(uword nIterations) {
-
+void LocalCoordinateCoding::DoLCC(uword nIterations)
+{
   bool converged = false;
   double lastObjVal = 1e99;
 
   Log::Info << "Initial Coding Step" << endl;
   OptimizeCode();
   uvec adjacencies = find(matZ);
-  Log::Info << "\tSparsity level: "
-	    << 100.0 * ((double)(adjacencies.n_elem))
-                     / ((double)(nAtoms * nPoints))
-	    << "%\n";
+  Log::Info << "\tSparsity level: " << 100.0 * ((double)(adjacencies.n_elem)) /
+      ((double)(nAtoms * nPoints)) << "%\n";
   Log::Info << "\tObjective value: " << Objective(adjacencies) << endl;
 
-  for(uword t = 1; t <= nIterations && !converged; t++) {
+  for (uword t = 1; t <= nIterations && !converged; t++)
+  {
     Log::Info << "Iteration " << t << " of " << nIterations << endl;
 
     Log::Info << "Dictionary Step\n";
@@ -95,21 +101,22 @@
     Log::Info << "Coding Step" << endl;
     OptimizeCode();
     adjacencies = find(matZ);
-    Log::Info << "\tSparsity level: "
-	      << 100.0 * ((double)(adjacencies.n_elem))
-                       / ((double)(nAtoms * nPoints))
-	      << "%\n";
+    Log::Info << "\tSparsity level: " << 100.0 * ((double)(adjacencies.n_elem))
+        / ((double)(nAtoms * nPoints)) << "%\n";
     double curObjVal = Objective(adjacencies);
     Log::Info << "\tObjective value: " << curObjVal << endl;
 
-    if(curObjVal > dsObjVal) {
+    if (curObjVal > dsObjVal)
+    {
       Log::Fatal << "Objective increased in sparse coding step!" << endl;
     }
 
     double objValImprov = lastObjVal - curObjVal;
-    Log::Info << "\t\t\t\t\tImprovement: " << std::scientific
-	      <<  objValImprov << endl;
-    if(objValImprov < OBJ_TOL) {
+    Log::Info << "\t\t\t\t\tImprovement: " << std::scientific << objValImprov
+        << endl;
+
+    if (objValImprov < OBJ_TOL)
+    {
       converged = true;
       Log::Info << "Converged within tolerance\n";
     }
@@ -118,21 +125,21 @@
   }
 }
 
+void LocalCoordinateCoding::OptimizeCode()
+{
+  mat matSqDists = repmat(trans(sum(square(matD))), 1, nPoints) +
+      repmat(sum(square(matX)), nAtoms, 1) - 2 * trans(matD) * matX;
 
-void LocalCoordinateCoding::OptimizeCode() {
-  mat matSqDists =
-    repmat(trans(sum(square(matD))), 1, nPoints)
-    + repmat(sum(square(matX)), nAtoms, 1)
-    - 2 * trans(matD) * matX;
-
   mat matInvSqDists = 1.0 / matSqDists;
 
   mat matDTD = trans(matD) * matD;
   mat matDPrimeTDPrime(matDTD.n_rows, matDTD.n_cols);
 
-  for(uword i = 0; i < nPoints; i++) {
+  for (uword i = 0; i < nPoints; i++)
+  {
     // report progress
-    if((i % 100) == 0) {
+    if ((i % 100) == 0)
+    {
       Log::Debug << "\t" << i << endl;
     }
 
@@ -144,11 +151,13 @@
 
     //LARS lars;
     // do we still need 0.5 * lambda? yes, yes we do
-    //lars.Init(matDPrime.memptr(), matX.colptr(i), nDims, nAtoms, true, 0.5 * lambda); // apparently not as fast as using the below duo
-                                                                                       // this may change, depending on the dimensionality and sparsity
+    //lars.Init(matDPrime.memptr(), matX.colptr(i), nDims, nAtoms, true, 0.5 *
+    //lambda); // apparently not as fast as using the below duo
+    // this may change, depending on the dimensionality and sparsity
 
     // the duo
-    /* lars.Init(matDPrime.memptr(), matX.colptr(i), nDims, nAtoms, false, 0.5 * lambda); */
+    /* lars.Init(matDPrime.memptr(), matX.colptr(i), nDims, nAtoms, false, 0.5 *
+     * lambda); */
     /* lars.SetGram(matDPrimeTDPrime.memptr(), nAtoms); */
 
     bool useCholesky = false;
@@ -160,22 +169,26 @@
   }
 }
 
-
-void LocalCoordinateCoding::OptimizeDictionary(uvec adjacencies) {
+void LocalCoordinateCoding::OptimizeDictionary(uvec adjacencies)
+{
   // count number of atomic neighbors for each point x^i
   uvec neighborCounts = zeros<uvec>(nPoints, 1);
-  if(adjacencies.n_elem > 0) {
+  if (adjacencies.n_elem > 0)
+  {
     // this gets the column index
     uword curPointInd = (uword)(adjacencies(0) / nAtoms);
     uword curCount = 1;
-    for(uword l = 1; l < adjacencies.n_elem; l++) {
-      if((uword)(adjacencies(l) / nAtoms) == curPointInd) {
-	curCount++;
+    for (uword l = 1; l < adjacencies.n_elem; l++)
+    {
+      if ((uword) (adjacencies(l) / nAtoms) == curPointInd)
+      {
+        curCount++;
       }
-      else {
-	neighborCounts(curPointInd) = curCount;
-	curPointInd = (uword)(adjacencies(l) / nAtoms);
-	curCount = 1;
+      else
+      {
+        neighborCounts(curPointInd) = curCount;
+        curPointInd = (uword)(adjacencies(l) / nAtoms);
+        curCount = 1;
       }
     }
     neighborCounts(curPointInd) = curCount;
@@ -186,10 +199,12 @@
   mat matXPrime = zeros(nDims, nPoints + adjacencies.n_elem);
   matXPrime(span::all, span(0, nPoints - 1)) = matX;
   uword curCol = nPoints;
-  for(uword i = 0; i < nPoints; i++) {
-    if(neighborCounts(i) > 0) {
+  for (uword i = 0; i < nPoints; i++)
+  {
+    if (neighborCounts(i) > 0)
+    {
       matXPrime(span::all, span(curCol, curCol + neighborCounts(i) - 1)) =
-	repmat(matX.col(i), 1, neighborCounts(i));
+          repmat(matX.col(i), 1, neighborCounts(i));
     }
     curCol += neighborCounts(i);
   }
@@ -198,11 +213,14 @@
   std::vector<uword> inactiveAtoms;
   std::vector<uword> activeAtoms;
   activeAtoms.reserve(nAtoms);
-  for(uword j = 0; j < nAtoms; j++) {
-    if(accu(matZ.row(j) != 0) == 0) {
+  for (uword j = 0; j < nAtoms; j++)
+  {
+    if (accu(matZ.row(j) != 0) == 0)
+    {
       inactiveAtoms.push_back(j);
     }
-    else {
+    else
+    {
       activeAtoms.push_back(j);
     }
   }
@@ -211,22 +229,27 @@
 
   // efficient construction of Z restricted to active atoms
   mat matActiveZ;
-  if(inactiveAtoms.empty()) {
+  if (inactiveAtoms.empty())
+  {
     matActiveZ = matZ;
   }
-  else {
-    uvec inactiveAtomsVec = conv_to< uvec >::from(inactiveAtoms);
+  else
+  {
+    uvec inactiveAtomsVec = conv_to<uvec>::from(inactiveAtoms);
     RemoveRows(matZ, inactiveAtomsVec, matActiveZ);
   }
 
   uvec atomReverseLookup = uvec(nAtoms);
-  for(uword i = 0; i < nActiveAtoms; i++) {
+  for (uword i = 0; i < nActiveAtoms; i++)
+  {
     atomReverseLookup(activeAtoms[i]) = i;
   }
 
 
-  if(nInactiveAtoms > 0) {
-    Log::Info << "There are " << nInactiveAtoms << " inactive atoms. They will be re-initialized randomly.\n";
+  if (nInactiveAtoms > 0)
+  {
+    Log::Info << "There are " << nInactiveAtoms << " inactive atoms. They will"
+        << " be re-initialized randomly.\n";
   }
 
   mat matZPrime = zeros(nActiveAtoms, nPoints + adjacencies.n_elem);
@@ -235,19 +258,21 @@
 
   vec wSquared = ones(nPoints + adjacencies.n_elem, 1);
   //Log::Debug << "building up matZPrime\n";
-  for(uword l = 0; l < adjacencies.n_elem; l++) {
+  for (uword l = 0; l < adjacencies.n_elem; l++)
+  {
     uword atomInd = adjacencies(l) % nAtoms;
     uword pointInd = (uword) (adjacencies(l) / nAtoms);
     matZPrime(atomReverseLookup(atomInd), nPoints + l) = 1.0;
     wSquared(nPoints + l) = matZ(atomInd, pointInd);
   }
 
-  wSquared.subvec(nPoints, wSquared.n_elem - 1) =
-    lambda * abs(wSquared.subvec(nPoints, wSquared.n_elem - 1));
+  wSquared.subvec(nPoints, wSquared.n_elem - 1) = lambda *
+      abs(wSquared.subvec(nPoints, wSquared.n_elem - 1));
 
   //Log::Debug << "about to solve\n";
   mat matDEstimate;
-  if(inactiveAtoms.empty()) {
+  if (inactiveAtoms.empty())
+  {
     mat A = matZPrime * diagmat(wSquared) * trans(matZPrime);
     mat B = matZPrime * diagmat(wSquared) * trans(matXPrime);
 
@@ -257,105 +282,110 @@
     /*
     matDEstimate =
       trans(solve(matZPrime * diagmat(wSquared) * trans(matZPrime),
-		  matZPrime * diagmat(wSquared) * trans(matXPrime)));
+                  matZPrime * diagmat(wSquared) * trans(matXPrime)));
     */
   }
-  else {
+  else
+  {
     matDEstimate = zeros(nDims, nAtoms);
     //Log::Debug << "solving...\n";
     mat matDActiveEstimate =
       trans(solve(matZPrime * diagmat(wSquared) * trans(matZPrime),
-		  matZPrime * diagmat(wSquared) * trans(matXPrime)));
-    for(uword j = 0; j < nActiveAtoms; j++) {
+                  matZPrime * diagmat(wSquared) * trans(matXPrime)));
+    for (uword j = 0; j < nActiveAtoms; j++)
+    {
       matDEstimate.col(activeAtoms[j]) = matDActiveEstimate.col(j);
     }
-    for(uword j = 0; j < nInactiveAtoms; j++) {
+
+    for (uword j = 0; j < nInactiveAtoms; j++)
+    {
       vec vecD_j = matDEstimate.unsafe_col(inactiveAtoms[j]);
       RandomAtom(vecD_j);
       /*
       vec new_atom = randn(nDims, 1);
-      matDEstimate.col(inactiveAtoms[i]) =
-	new_atom / norm(new_atom, 2);
+      matDEstimate.col(inactiveAtoms[i]) = new_atom / norm(new_atom, 2);
       */
     }
   }
+
   matD = matDEstimate;
 }
-// need to test above function, sleepy now, will resume soon!
 
-
-double LocalCoordinateCoding::Objective(uvec adjacencies) {
+double LocalCoordinateCoding::Objective(uvec adjacencies)
+{
   double weightedL1NormZ = 0;
   uword nAdjacencies = adjacencies.n_elem;
-  for(uword l = 0; l < nAdjacencies; l++) {
+  for (uword l = 0; l < nAdjacencies; l++)
+  {
     uword atomInd = adjacencies(l) % nAtoms;
     uword pointInd = (uword) (adjacencies(l) / nAtoms);
-    weightedL1NormZ += fabs(matZ(atomInd, pointInd)) * as_scalar(sum(square(matD.col(atomInd) - matX.col(pointInd))));
+    weightedL1NormZ += fabs(matZ(atomInd, pointInd)) *
+        as_scalar(sum(square(matD.col(atomInd) - matX.col(pointInd))));
   }
+
   double froNormResidual = norm(matX - matD * matZ, "fro");
   return froNormResidual * froNormResidual + lambda * weightedL1NormZ;
 }
 
-
-void LocalCoordinateCoding::PrintDictionary() {
+void LocalCoordinateCoding::PrintDictionary()
+{
   matD.print("Dictionary");
 }
 
-
-void LocalCoordinateCoding::PrintCoding() {
+void LocalCoordinateCoding::PrintCoding()
+{
   matZ.print("Coding matrix");
 }
 
-
-void RemoveRows(const mat& X, uvec rows_to_remove, mat& X_mod) {
-
+void RemoveRows(const mat& X, uvec rows_to_remove, mat& X_mod)
+{
   uword n_cols = X.n_cols;
   uword n_rows = X.n_rows;
   uword n_to_remove = rows_to_remove.n_elem;
   uword n_to_keep = n_rows - n_to_remove;
 
-  if(n_to_remove == 0) {
+  if (n_to_remove == 0)
+  {
     X_mod = X;
   }
-  else {
+  else
+  {
     X_mod.set_size(n_to_keep, n_cols);
 
     uword cur_row = 0;
     uword remove_ind = 0;
     // first, check 0 to first row to remove
-    if(rows_to_remove(0) > 0) {
+    if (rows_to_remove(0) > 0)
+    {
       // note that this implies that n_rows > 1
       uword height = rows_to_remove(0);
       X_mod(span(cur_row, cur_row + height - 1), span::all) =
-	X(span(0, rows_to_remove(0) - 1), span::all);
+          X(span(0, rows_to_remove(0) - 1), span::all);
       cur_row += height;
     }
-    // now, check i'th row to remove to (i + 1)'th row to remove, until i = penultimate row
-    while(remove_ind < n_to_remove - 1) {
-      uword height =
-	rows_to_remove[remove_ind + 1]
-	- rows_to_remove[remove_ind]
-	- 1;
-      if(height > 0) {
-	X_mod(span(cur_row, cur_row + height - 1),
-	      span::all) =
-	  X(span(rows_to_remove[remove_ind] + 1,
-		 rows_to_remove[remove_ind + 1] - 1),
-	    span::all);
-	cur_row += height;
+    // now, check i'th row to remove to (i + 1)'th row to remove, until i =
+    // penultimate row
+    while (remove_ind < n_to_remove - 1)
+    {
+      uword height = rows_to_remove[remove_ind + 1] - rows_to_remove[remove_ind]
+          - 1;
+      if (height > 0)
+      {
+        X_mod(span(cur_row, cur_row + height - 1), span::all) =
+            X(span(rows_to_remove[remove_ind] + 1,
+            rows_to_remove[remove_ind + 1] - 1), span::all);
+        cur_row += height;
       }
       remove_ind++;
     }
     // now that i is last row to remove, check last row to remove to last row
-    if(rows_to_remove[remove_ind] < n_rows - 1) {
-      X_mod(span(cur_row, n_to_keep - 1),
-	    span::all) =
-	X(span(rows_to_remove[remove_ind] + 1, n_rows - 1),
-	  span::all);
+    if (rows_to_remove[remove_ind] < n_rows - 1)
+    {
+      X_mod(span(cur_row, n_to_keep - 1), span::all) =
+          X(span(rows_to_remove[remove_ind] + 1, n_rows - 1), span::all);
     }
   }
 }
 
-
 }; // namespace lcc
 }; // namespace mlpack

Modified: mlpack/trunk/src/mlpack/methods/local_coordinate_coding/lcc.hpp
===================================================================
--- mlpack/trunk/src/mlpack/methods/local_coordinate_coding/lcc.hpp	2012-06-27 17:07:04 UTC (rev 13115)
+++ mlpack/trunk/src/mlpack/methods/local_coordinate_coding/lcc.hpp	2012-06-27 18:29:07 UTC (rev 13116)
@@ -1,52 +1,54 @@
-/** 
+/**
  * @file lcc.hpp
  * @author Nishant Mehta
  *
- * Definition of the LocalCoordinateCoding class, which performs the Local 
- * Coordinate Coding algorithm
+ * Definition of the LocalCoordinateCoding class, which performs the Local
+ * Coordinate Coding algorithm.
  */
-
 #ifndef __MLPACK_METHODS_LCC_LCC_HPP
 #define __MLPACK_METHODS_LCC_LCC_HPP
 
-//#include <armadillo>
 #include <mlpack/core.hpp>
 #include <mlpack/methods/lars/lars.hpp>
 
+// Include three simple dictionary initializers from
+
 namespace mlpack {
 namespace lcc {
 
 /**
- * An implementation of Local Coordinate Coding (LCC) that codes data which 
- * approximately lives on a manifold using a variation of l1-norm regularized 
- * sparse coding; in LCC, the penalty on the absolute value of each point's 
- * coefficient for each atom is weighted by the squared distance of that point 
- * to that atom. The paper is below. 
- * Let d be the number of dimensions in the original space, m the number of 
- * training points, and k the number of atoms in the dictionary (the dimension 
- * of the learned feature space). The training data X is a d-by-m matrix where 
- * each column is a point and each row is a dimension. The dictionary D is a 
+ * An implementation of Local Coordinate Coding (LCC) that codes data which
+ * approximately lives on a manifold using a variation of l1-norm regularized
+ * sparse coding; in LCC, the penalty on the absolute value of each point's
+ * coefficient for each atom is weighted by the squared distance of that point
+ * to that atom.
+ *
+ * Let d be the number of dimensions in the original space, m the number of
+ * training points, and k the number of atoms in the dictionary (the dimension
+ * of the learned feature space). The training data X is a d-by-m matrix where
+ * each column is a point and each row is a dimension. The dictionary D is a
  * d-by-k matrix, and the sparse codes matrix Z is a k-by-m matrix.
  * This program seeks to minimize the objective:
- * min_{D,Z} ||X - D Z||_{Fro}^2 
-             + lambda sum_{i=1}^m sum_{j=1}^k dist(X_i,D_j)^2 Z_i^j
+ * min_{D,Z} ||X - D Z||_{Fro}^2
+ *            + lambda sum_{i=1}^m sum_{j=1}^k dist(X_i,D_j)^2 Z_i^j
  * where lambda > 0.
  *
  * This problem is solved by an algorithm that alternates between a dictionary
  * learning step and a sparse coding step. The dictionary learning step updates
  * the dictionary D by solving a linear system (note that the objective is a
- * positive definite quadratic program). The sparse coding step involves 
- * solving a large number of weighted l1-norm regularized linear regression 
- * problems problems; this can be done efficiently using LARS, an algorithm 
+ * positive definite quadratic program). The sparse coding step involves
+ * solving a large number of weighted l1-norm regularized linear regression
+ * problems problems; this can be done efficiently using LARS, an algorithm
  * that can solve the LASSO (paper below).
  *
- * The papers:
+ * The papers are listed below.
  *
  * @incollection{NIPS2009_0719,
  *   title = {Nonlinear Learning using Local Coordinate Coding},
  *   author = {Kai Yu and Tong Zhang and Yihong Gong},
  *   booktitle = {Advances in Neural Information Processing Systems 22},
- *   editor = {Y. Bengio and D. Schuurmans and J. Lafferty and C. K. I. Williams and A. Culotta},
+ *   editor = {Y. Bengio and D. Schuurmans and J. Lafferty and C. K. I. Williams
+ *       and A. Culotta},
  *   pages = {2223--2231},
  *   year = {2009}
  * }
@@ -65,10 +67,9 @@
  * }
  * @endcode
  */
-class LocalCoordinateCoding {
-
+class LocalCoordinateCoding
+{
  public:
-  
   /**
    * Set the parameters to LocalCoordinateCoding.
    *
@@ -76,24 +77,25 @@
    * @param nAtoms Number of atoms in dictionary
    * @param lambda Regularization parameter for weighted l1-norm penalty
    */
-  LocalCoordinateCoding(const arma::mat& matX, arma::uword nAtoms, double lambda);
-  
+  LocalCoordinateCoding(const arma::mat& matX, arma::uword nAtoms,
+      double lambda);
+
   /**
-   * Initialize dictionary somehow
+   * Initialize dictionary somehow.
    */
   void InitDictionary();
-  
-  /** 
+
+  /**
    * Load dictionary from a file
-   * 
+   *
    * @param dictionaryFilename Filename containing dictionary
    */
   void LoadDictionary(const char* dictionaryFilename);
 
   /**
-   * Initialize dictionary by drawing k vectors uniformly at random from the 
+   * Initialize dictionary by drawing k vectors uniformly at random from the
    * unit sphere
-   */  
+   */
   void RandomInitDictionary();
 
   /**
@@ -112,7 +114,7 @@
 
 
   // core algorithm functions
-  
+
   /**
    * Run LCC
    *
@@ -124,40 +126,35 @@
    * Sparse code each point via distance-weighted LARS
    */
   void OptimizeCode();
-  
-  /** 
+
+  /**
    * Learn dictionary by solving linear systemx
    *
-   * @param adjacencies Indices of entries (unrolled column by column) of 
-   *    the coding matrix Z that are non-zero (the adjacency matrix for the 
+   * @param adjacencies Indices of entries (unrolled column by column) of
+   *    the coding matrix Z that are non-zero (the adjacency matrix for the
    *    bipartite graph of points and atoms)
    */
   void OptimizeDictionary(arma::uvec adjacencies);
 
   /**
    * Compute objective function
-   */  
+   */
   double Objective(arma::uvec adjacencies);
 
-
   // accessors, modifiers, printers
-  
+
   //! Modifier for matD
   void SetDictionary(const arma::mat& matD);
 
   //! Accessor for matD
-  const arma::mat& MatD() {
-    return matD;
-  }
+  const arma::mat& MatD() { return matD; }
 
   //! Accessor for matZ
-  const arma::mat& MatZ() {
-    return matZ;
-  }
+  const arma::mat& MatZ() { return matZ; }
 
   // Print the dictionary matD
   void PrintDictionary();
-    
+
   // Print the sparse codes matZ
   void PrintCoding();
 
@@ -174,14 +171,15 @@
   arma::mat matD;
 
   // sparse codes (columns are points)
-  arma::mat matZ; 
-  
+  arma::mat matZ;
+
   // l1 regularization term
   double lambda;
-  
 };
 
-void RemoveRows(const arma::mat& X, arma::uvec rows_to_remove, arma::mat& X_mod);
+void RemoveRows(const arma::mat& X,
+                arma::uvec rows_to_remove,
+                arma::mat& X_mod);
 
 
 }; // namespace lcc




More information about the mlpack-svn mailing list