[mlpack-git] master: Add implementation of the RPROP- method to update the weights. (3dfc05c)
gitdub at big.cc.gt.atl.ga.us
gitdub at big.cc.gt.atl.ga.us
Thu Mar 5 22:09:18 EST 2015
Repository : https://github.com/mlpack/mlpack
On branch : master
Link : https://github.com/mlpack/mlpack/compare/904762495c039e345beba14c1142fd719b3bd50e...f94823c800ad6f7266995c700b1b630d5ffdcf40
>---------------------------------------------------------------
commit 3dfc05c46410ae9dee200070059d75c0ff24988d
Author: Marcus Edel <marcus.edel at fu-berlin.de>
Date: Thu Jan 1 13:13:57 2015 +0100
Add implementation of the RPROP- method to update the weights.
>---------------------------------------------------------------
3dfc05c46410ae9dee200070059d75c0ff24988d
.../ann/optimizer/{irpropm.hpp => rpropm.hpp} | 38 +++++++++-------------
1 file changed, 16 insertions(+), 22 deletions(-)
diff --git a/src/mlpack/methods/ann/optimizer/irpropm.hpp b/src/mlpack/methods/ann/optimizer/rpropm.hpp
similarity index 70%
copy from src/mlpack/methods/ann/optimizer/irpropm.hpp
copy to src/mlpack/methods/ann/optimizer/rpropm.hpp
index 4bb077b..36c6e35 100644
--- a/src/mlpack/methods/ann/optimizer/irpropm.hpp
+++ b/src/mlpack/methods/ann/optimizer/rpropm.hpp
@@ -1,12 +1,12 @@
/**
- * @file irpropm.hpp
+ * @file rpropm.hpp
* @author Marcus Edel
*
* Intialization rule for the neural networks. This simple initialization is
* performed by assigning a random matrix to the weight matrix.
*/
-#ifndef __MLPACK_METHOS_ANN_OPTIMIZER_IRPROPM_HPP
-#define __MLPACK_METHOS_ANN_OPTIMIZER_IRPROPM_HPP
+#ifndef __MLPACK_METHOS_ANN_OPTIMIZER_RPROPM_HPP
+#define __MLPACK_METHOS_ANN_OPTIMIZER_RPROPM_HPP
#include <mlpack/core.hpp>
#include <boost/math/special_functions/sign.hpp>
@@ -20,7 +20,7 @@ namespace ann /** Artificial Neural Network. */ {
* @tparam MatType Type of matrix (should be arma::mat or arma::spmat).
*/
template<typename MatType = arma::mat, typename VecType = arma::rowvec>
-class iRPROPm
+class RPROPm
{
public:
/**
@@ -30,18 +30,16 @@ class iRPROPm
* @param lowerBound The number used as lower bound.
* @param upperBound The number used as upper bound.
*/
- iRPROPm(const size_t cols,
- const size_t rows,
- const double etaMin = 0.5,
- const double etaPlus = 1.2,
- const double minDelta = 1e-9,
- const double maxDelta = 50) :
+ RPROPm(const size_t cols,
+ const size_t rows,
+ const double etaMin = 0.5,
+ const double etaPlus = 1.2,
+ const double minDelta = 1e-9,
+ const double maxDelta = 50) :
etaMin(etaMin), etaPlus(etaPlus), minDelta(minDelta), maxDelta(maxDelta)
{
prevDerivs = arma::zeros<MatType>(rows, cols);
prevDelta = arma::zeros<MatType>(rows, cols);
-
- prevError = arma::datum::inf;
}
void UpdateWeights(MatType& weights,
@@ -54,22 +52,18 @@ class iRPROPm
{
for (size_t j(0); j < derivs.n_rows; j++)
{
- if (derivs(j, i) >= 0)
- {
+ if (derivs(j, i) > 0)
prevDelta(j, i) = std::min(prevDelta(j, i) * etaPlus, maxDelta);
- prevDerivs(j, i) = gradient(j, i);
- }
else
- {
prevDelta(j, i) = std::max(prevDelta(j, i) * etaMin, minDelta);
- prevDerivs(j, i) = 0;
- }
}
}
weights -= arma::sign(gradient) % prevDelta;
+ prevDerivs = gradient;
}
+
private:
//! The number used as learning rate.
const double etaMin;
@@ -80,15 +74,15 @@ class iRPROPm
const double maxDelta;
- double prevError;
-
MatType prevDelta;
//! weight momentum
MatType prevDerivs;
-}; // class iRPROPm
+}; // class RPROPm
}; // namespace ann
}; // namespace mlpack
#endif
+
+
More information about the mlpack-git
mailing list