[mlpack-svn] master: Add implementation of the RPROP+ method to update the weights. (d27b633)

gitdub at big.cc.gt.atl.ga.us gitdub at big.cc.gt.atl.ga.us
Thu Jan 1 07:14:46 EST 2015


Repository : https://github.com/mlpack/mlpack

On branch  : master
Link       : https://github.com/mlpack/mlpack/compare/e5279f82fd462f76ed757b66420a68494e7329b9...23b900168ef50d2ad1b247c450645de621e2043e

>---------------------------------------------------------------

commit d27b63314a056a3ad30d8b6676dd9116002d63a4
Author: Marcus Edel <marcus.edel at fu-berlin.de>
Date:   Thu Jan 1 13:14:12 2015 +0100

    Add implementation of the RPROP+ method to update the weights.


>---------------------------------------------------------------

d27b63314a056a3ad30d8b6676dd9116002d63a4
 .../ann/optimizer/{irpropp.hpp => rpropp.hpp}      | 35 +++++++++-------------
 1 file changed, 14 insertions(+), 21 deletions(-)

diff --git a/src/mlpack/methods/ann/optimizer/irpropp.hpp b/src/mlpack/methods/ann/optimizer/rpropp.hpp
similarity index 77%
copy from src/mlpack/methods/ann/optimizer/irpropp.hpp
copy to src/mlpack/methods/ann/optimizer/rpropp.hpp
index 71c95e0..b13adb0 100644
--- a/src/mlpack/methods/ann/optimizer/irpropp.hpp
+++ b/src/mlpack/methods/ann/optimizer/rpropp.hpp
@@ -1,12 +1,12 @@
 /**
- * @file irpropp.hpp
+ * @file rpropp.hpp
  * @author Marcus Edel
  *
  * Intialization rule for the neural networks. This simple initialization is
  * performed by assigning a random matrix to the weight matrix. 
  */
-#ifndef __MLPACK_METHOS_ANN_OPTIMIZER_IRPROPP_HPP
-#define __MLPACK_METHOS_ANN_OPTIMIZER_IRPROPP_HPP
+#ifndef __MLPACK_METHOS_ANN_OPTIMIZER_RPROPP_HPP
+#define __MLPACK_METHOS_ANN_OPTIMIZER_RPROPP_HPP
 
 #include <mlpack/core.hpp>
 #include <boost/math/special_functions/sign.hpp>
@@ -20,7 +20,7 @@ namespace ann /** Artificial Neural Network. */ {
  * @tparam MatType Type of matrix (should be arma::mat or arma::spmat).
  */
 template<typename MatType = arma::mat, typename VecType = arma::rowvec>
-class iRPROPp
+class RPROPp
 {
  public:
   /**
@@ -30,14 +30,14 @@ class iRPROPp
    * @param lowerBound The number used as lower bound.
    * @param upperBound The number used as upper bound.
    */
-  iRPROPp(const size_t cols,
-          const size_t rows,
-          const double etaMin = 0.5,
-          const double etaPlus = 1.2,
-          const double minDelta = 1e-9,
-          const double maxDelta = 50,
-          const double initialUpdate = 0.1) :
-      etaMin(etaMin), etaPlus(etaPlus), minDelta(minDelta), maxDelta(maxDelta), prevError(arma::datum::inf)
+  RPROPp(const size_t cols,
+         const size_t rows,
+         const double etaMin = 0.5,
+         const double etaPlus = 1.2,
+         const double minDelta = 1e-9,
+         const double maxDelta = 50,
+         const double initialUpdate = 0.1) :
+      etaMin(etaMin), etaPlus(etaPlus), minDelta(minDelta), maxDelta(maxDelta)
   {
     prevDerivs = arma::zeros<MatType>(rows, cols);
     prevWeightChange = arma::zeros<MatType>(rows, cols);
@@ -48,7 +48,7 @@ class iRPROPp
 
   void UpdateWeights(MatType& weights,
                      const MatType& gradient,
-                     const double error)
+                     const double /* unused */)
   {
     MatType derivs = gradient % prevDerivs;
 
@@ -66,9 +66,6 @@ class iRPROPp
         {
           updateValues(j, i) = std::max(updateValues(j, i) * etaMin, minDelta);
           prevDerivs(j, i) = 0;
-
-          if (error < prevError)
-            prevWeightChange(j, i) = 0;
         }
         else
         {
@@ -92,19 +89,15 @@ class iRPROPp
 
   const double maxDelta;
 
-  double prevError;
-
   MatType updateValues;
 
   MatType prevWeightChange;
 
   //! weight momentum
   MatType prevDerivs;
-}; // class iRPROPp
+}; // class RPROPp
 
 }; // namespace ann
 }; // namespace mlpack
 
 #endif
-
-




More information about the mlpack-svn mailing list