[mlpack-git] master,mlpack-1.0.x: Syntax cleanup; just some spacing. No functionality change. (13bbeed)

gitdub at big.cc.gt.atl.ga.us gitdub at big.cc.gt.atl.ga.us
Thu Mar 5 21:46:39 EST 2015


Repository : https://github.com/mlpack/mlpack

On branches: master,mlpack-1.0.x
Link       : https://github.com/mlpack/mlpack/compare/904762495c039e345beba14c1142fd719b3bd50e...f94823c800ad6f7266995c700b1b630d5ffdcf40

>---------------------------------------------------------------

commit 13bbeedf18291cc16584617cc4cf1675ede93270
Author: Ryan Curtin <ryan at ratml.org>
Date:   Wed Apr 16 18:53:24 2014 +0000

    Syntax cleanup; just some spacing.  No functionality change.


>---------------------------------------------------------------

13bbeedf18291cc16584617cc4cf1675ede93270
 .../sparse_autoencoder_function.cpp                | 54 ++++++++++++----------
 .../sparse_autoencoder/sparse_autoencoder_impl.hpp |  4 +-
 2 files changed, 31 insertions(+), 27 deletions(-)

diff --git a/src/mlpack/methods/sparse_autoencoder/sparse_autoencoder_function.cpp b/src/mlpack/methods/sparse_autoencoder/sparse_autoencoder_function.cpp
index 1094d7e..b7ad71f 100644
--- a/src/mlpack/methods/sparse_autoencoder/sparse_autoencoder_function.cpp
+++ b/src/mlpack/methods/sparse_autoencoder/sparse_autoencoder_function.cpp
@@ -49,18 +49,18 @@ const arma::mat SparseAutoencoderFunction::InitializeWeights()
   // operations without making the code too ugly.
 
   arma::mat parameters;
-  parameters.zeros(2*hiddenSize + 1, visibleSize + 1);
+  parameters.zeros(2 * hiddenSize + 1, visibleSize + 1);
 
   // Initialize w1 and w2 to random values in the range [0, 1].
-  parameters.submat(0, 0, 2*hiddenSize - 1, visibleSize - 1).randu();
+  parameters.submat(0, 0, 2 * hiddenSize - 1, visibleSize - 1).randu();
 
   // Decide the parameter 'r' depending on the size of the visible and hidden
   // layers. The formula used is r = sqrt(6) / sqrt(vSize + hSize + 1).
   const double range = sqrt(6) / sqrt(visibleSize + hiddenSize + 1);
 
   //Shift range of w1 and w2 values from [0, 1] to [-r, r].
-  parameters.submat(0, 0, 2*hiddenSize - 1, visibleSize - 1) = 2 * range *
-      (parameters.submat(0, 0, 2*hiddenSize - 1, visibleSize - 1) - 0.5);
+  parameters.submat(0, 0, 2 * hiddenSize - 1, visibleSize - 1) = 2 * range *
+      (parameters.submat(0, 0, 2 * hiddenSize - 1, visibleSize - 1) - 0.5);
 
   return parameters;
 }
@@ -80,7 +80,7 @@ double SparseAutoencoderFunction::Evaluate(const arma::mat& parameters) const
   // Compute the limits for the parameters w1, w2, b1 and b2.
   const size_t l1 = hiddenSize;
   const size_t l2 = visibleSize;
-  const size_t l3 = 2*hiddenSize;
+  const size_t l3 = 2 * hiddenSize;
 
   // w1, w2, b1 and b2 are not extracted separately, 'parameters' is directly
   // used in their place to avoid copying data. The following representations
@@ -93,11 +93,12 @@ double SparseAutoencoderFunction::Evaluate(const arma::mat& parameters) const
   arma::mat hiddenLayer, outputLayer;
 
   // Compute activations of the hidden and output layers.
-  hiddenLayer = Sigmoid(parameters.submat(0, 0, l1-1, l2-1) * data +
-      arma::repmat(parameters.submat(0, l2, l1-1, l2), 1, data.n_cols));
+  hiddenLayer = Sigmoid(parameters.submat(0, 0, l1 - 1, l2 - 1) * data +
+      arma::repmat(parameters.submat(0, l2, l1 - 1, l2), 1, data.n_cols));
 
-  outputLayer = Sigmoid(parameters.submat(l1, 0, l3-1, l2-1).t() * hiddenLayer +
-      arma::repmat(parameters.submat(l3, 0, l3, l2-1).t(), 1, data.n_cols));
+  outputLayer = Sigmoid(
+      parameters.submat(l1, 0, l3 - 1, l2 - 1).t() * hiddenLayer +
+      arma::repmat(parameters.submat(l3, 0, l3, l2 - 1).t(), 1, data.n_cols));
 
   arma::mat rhoCap, diff;
 
@@ -109,8 +110,8 @@ double SparseAutoencoderFunction::Evaluate(const arma::mat& parameters) const
   double wL2SquaredNorm;
 
   // Calculate squared L2-norms of w1 and w2.
-  wL2SquaredNorm = arma::accu(parameters.submat(0, 0, l3-1, l2-1) %
-      parameters.submat(0, 0, l3-1, l2-1));
+  wL2SquaredNorm = arma::accu(parameters.submat(0, 0, l3 - 1, l2 - 1) %
+      parameters.submat(0, 0, l3 - 1, l2 - 1));
 
   double sumOfSquaresError, weightDecay, klDivergence, cost;
 
@@ -145,7 +146,7 @@ void SparseAutoencoderFunction::Gradient(const arma::mat& parameters,
   // Compute the limits for the parameters w1, w2, b1 and b2.
   const size_t l1 = hiddenSize;
   const size_t l2 = visibleSize;
-  const size_t l3 = 2*hiddenSize;
+  const size_t l3 = 2 * hiddenSize;
 
   // w1, w2, b1 and b2 are not extracted separately, 'parameters' is directly
   // used in their place to avoid copying data. The following representations
@@ -158,11 +159,12 @@ void SparseAutoencoderFunction::Gradient(const arma::mat& parameters,
   arma::mat hiddenLayer, outputLayer;
 
   // Compute activations of the hidden and output layers.
-  hiddenLayer = Sigmoid(parameters.submat(0, 0, l1-1, l2-1) * data +
-      arma::repmat(parameters.submat(0, l2, l1-1, l2), 1, data.n_cols));
+  hiddenLayer = Sigmoid(parameters.submat(0, 0, l1 - 1, l2 - 1) * data +
+      arma::repmat(parameters.submat(0, l2, l1 - 1, l2), 1, data.n_cols));
 
-  outputLayer = Sigmoid(parameters.submat(l1, 0, l3-1, l2-1).t() * hiddenLayer +
-      arma::repmat(parameters.submat(l3, 0, l3, l2-1).t(), 1, data.n_cols));
+  outputLayer = Sigmoid(
+      parameters.submat(l1, 0, l3 - 1, l2 - 1).t() * hiddenLayer +
+      arma::repmat(parameters.submat(l3, 0, l3, l2 - 1).t(), 1, data.n_cols));
 
   arma::mat rhoCap, diff;
 
@@ -181,18 +183,20 @@ void SparseAutoencoderFunction::Gradient(const arma::mat& parameters,
   // includes the KL divergence term, we adjust for that in the formula below.
   klDivGrad = beta * (-(rho / rhoCap) + (1 - rho) / (1 - rhoCap));
   delOut = diff % outputLayer % (1 - outputLayer);
-  delHid = (parameters.submat(l1, 0, l3-1, l2-1) * delOut +
-      arma::repmat(klDivGrad, 1, data.n_cols)) % hiddenLayer % (1-hiddenLayer);
+  delHid = (parameters.submat(l1, 0, l3 - 1, l2 - 1) * delOut +
+      arma::repmat(klDivGrad, 1, data.n_cols)) % hiddenLayer %
+      (1 - hiddenLayer);
 
-  gradient.zeros(2*hiddenSize + 1, visibleSize + 1);
+  gradient.zeros(2 * hiddenSize + 1, visibleSize + 1);
 
   // Compute the gradient values using the activations and the delta values. The
   // formula also accounts for the regularization terms in the objective.
   // function.
-  gradient.submat(0, 0, l1-1, l2-1) = delHid * data.t() / data.n_cols + lambda *
-      parameters.submat(0, 0, l1-1, l2-1);
-  gradient.submat(l1, 0, l3-1, l2-1) = (delOut * hiddenLayer.t() / data.n_cols +
-      lambda * parameters.submat(l1, 0, l3-1, l2-1).t()).t();
-  gradient.submat(0, l2, l1-1, l2) = arma::sum(delHid, 1) / data.n_cols;
-  gradient.submat(l3, 0, l3, l2-1) = (arma::sum(delOut, 1) / data.n_cols).t();
+  gradient.submat(0, 0, l1 - 1, l2 - 1) = delHid * data.t() / data.n_cols +
+      lambda * parameters.submat(0, 0, l1 - 1, l2 - 1);
+  gradient.submat(l1, 0, l3 - 1, l2 - 1) =
+      (delOut * hiddenLayer.t() / data.n_cols +
+      lambda * parameters.submat(l1, 0, l3 - 1, l2 - 1).t()).t();
+  gradient.submat(0, l2, l1 - 1, l2) = arma::sum(delHid, 1) / data.n_cols;
+  gradient.submat(l3, 0, l3, l2 - 1) = (arma::sum(delOut, 1) / data.n_cols).t();
 }
diff --git a/src/mlpack/methods/sparse_autoencoder/sparse_autoencoder_impl.hpp b/src/mlpack/methods/sparse_autoencoder/sparse_autoencoder_impl.hpp
index 6de0d97..16115da 100644
--- a/src/mlpack/methods/sparse_autoencoder/sparse_autoencoder_impl.hpp
+++ b/src/mlpack/methods/sparse_autoencoder/sparse_autoencoder_impl.hpp
@@ -66,8 +66,8 @@ void SparseAutoencoder<OptimizerType>::GetNewFeatures(arma::mat& data,
   const size_t l1 = hiddenSize;
   const size_t l2 = visibleSize;
 
-  features = Sigmoid(parameters.submat(0, 0, l1-1, l2-1) * data +
-      arma::repmat(parameters.submat(0, l2, l1-1, l2), 1, data.n_cols));
+  features = Sigmoid(parameters.submat(0, 0, l1 - 1, l2 - 1) * data +
+      arma::repmat(parameters.submat(0, l2, l1 - 1, l2), 1, data.n_cols));
 }
 
 }; // namespace nn



More information about the mlpack-git mailing list