[mlpack-svn] r17388 - mlpack/trunk/src/mlpack/methods/amf/update_rules
fastlab-svn at coffeetalk-1.cc.gatech.edu
fastlab-svn at coffeetalk-1.cc.gatech.edu
Wed Nov 19 12:09:01 EST 2014
Author: rcurtin
Date: Wed Nov 19 12:09:01 2014
New Revision: 17388
Log:
Minor code cleanups.
Modified:
mlpack/trunk/src/mlpack/methods/amf/update_rules/svd_batch_learning.hpp
Modified: mlpack/trunk/src/mlpack/methods/amf/update_rules/svd_batch_learning.hpp
==============================================================================
--- mlpack/trunk/src/mlpack/methods/amf/update_rules/svd_batch_learning.hpp (original)
+++ mlpack/trunk/src/mlpack/methods/amf/update_rules/svd_batch_learning.hpp Wed Nov 19 12:09:01 2014
@@ -9,25 +9,23 @@
#include <mlpack/core.hpp>
-namespace mlpack
-{
-namespace amf
-{
+namespace mlpack {
+namespace amf {
/**
- * This class implements SVD batch learning with momentum. This procedure is
- * described in the paper 'A Guide to singular Value Decomposition'
- * by Chih-Chao Ma. Class implements 'Algorithm 4' given in the paper.
+ * This class implements SVD batch learning with momentum. This procedure is
+ * described in the paper 'A Guide to singular Value Decomposition'
+ * by Chih-Chao Ma. Class implements 'Algorithm 4' given in the paper.
* This factorizer decomposes the matrix V into two matrices W and H such that
* sum of sum of squared error between V and W*H is minimum. This optimization is
- * performed with gradient descent. To make gradient descent faster momentum is
- * added.
+ * performed with gradient descent. To make gradient descent faster momentum is
+ * added.
*/
class SVDBatchLearning
{
public:
/**
- * SVD Batch learning constructor.
+ * SVD Batch learning constructor.
*
* @param u step value used in batch learning
* @param kw regularization constant for W matrix
@@ -91,7 +89,7 @@
{
double val;
if((val = V(i, j)) != 0)
- deltaW.row(i) += (val - arma::dot(W.row(i), H.col(j))) *
+ deltaW.row(i) += (val - arma::dot(W.row(i), H.col(j))) *
arma::trans(H.col(j));
}
// add regularization
@@ -135,7 +133,7 @@
{
double val;
if((val = V(i, j)) != 0)
- deltaH.col(j) += (val - arma::dot(W.row(i), H.col(j))) *
+ deltaH.col(j) += (val - arma::dot(W.row(i), H.col(j))) *
arma::trans(W.row(i));
}
// add regularization
@@ -147,7 +145,7 @@
// add momentum to H
H += mH;
}
-
+
private:
//! step size of the algorithm
double u;
@@ -164,13 +162,13 @@
arma::mat mH;
}; // class SBDBatchLearning
-//! TODO : Merge this template specialized function for sparse matrix using
+//! TODO : Merge this template specialized function for sparse matrix using
//! common row_col_iterator
/**
* WUpdate function specialization for sparse matrix
*/
-template<>
+template<>
inline void SVDBatchLearning::WUpdate<arma::sp_mat>(const arma::sp_mat& V,
arma::mat& W,
const arma::mat& H)
@@ -188,7 +186,7 @@
{
size_t row = it.row();
size_t col = it.col();
- deltaW.row(it.row()) += (*it - arma::dot(W.row(row), H.col(col))) *
+ deltaW.row(it.row()) += (*it - arma::dot(W.row(row), H.col(col))) *
arma::trans(H.col(col));
}
@@ -219,7 +217,7 @@
{
size_t row = it.row();
size_t col = it.col();
- deltaH.col(col) += (*it - arma::dot(W.row(row), H.col(col))) *
+ deltaH.col(col) += (*it - arma::dot(W.row(row), H.col(col))) *
arma::trans(W.row(row));
}
@@ -235,7 +233,6 @@
} // namespace amf
} // namespace mlpack
-
#endif // __MLPACK_METHODS_AMF_UPDATE_RULES_SVD_BATCHLEARNING_HPP
More information about the mlpack-svn
mailing list