[mlpack-svn] master: Add implementation of the RPROP- method to update the weights. (9ce7682)

gitdub at big.cc.gt.atl.ga.us gitdub at big.cc.gt.atl.ga.us
Thu Jan 1 07:14:44 EST 2015


Repository : https://github.com/mlpack/mlpack

On branch  : master
Link       : https://github.com/mlpack/mlpack/compare/e5279f82fd462f76ed757b66420a68494e7329b9...23b900168ef50d2ad1b247c450645de621e2043e

>---------------------------------------------------------------

commit 9ce76828da42d1dfa0e75209f7b12dfed5101d68
Author: Marcus Edel <marcus.edel at fu-berlin.de>
Date:   Thu Jan 1 13:13:57 2015 +0100

    Add implementation of the RPROP- method to update the weights.


>---------------------------------------------------------------

9ce76828da42d1dfa0e75209f7b12dfed5101d68
 .../ann/optimizer/{irpropm.hpp => rpropm.hpp}      | 38 +++++++++-------------
 1 file changed, 16 insertions(+), 22 deletions(-)

diff --git a/src/mlpack/methods/ann/optimizer/irpropm.hpp b/src/mlpack/methods/ann/optimizer/rpropm.hpp
similarity index 70%
copy from src/mlpack/methods/ann/optimizer/irpropm.hpp
copy to src/mlpack/methods/ann/optimizer/rpropm.hpp
index 4bb077b..36c6e35 100644
--- a/src/mlpack/methods/ann/optimizer/irpropm.hpp
+++ b/src/mlpack/methods/ann/optimizer/rpropm.hpp
@@ -1,12 +1,12 @@
 /**
- * @file irpropm.hpp
+ * @file rpropm.hpp
  * @author Marcus Edel
  *
  * Intialization rule for the neural networks. This simple initialization is
  * performed by assigning a random matrix to the weight matrix. 
  */
-#ifndef __MLPACK_METHOS_ANN_OPTIMIZER_IRPROPM_HPP
-#define __MLPACK_METHOS_ANN_OPTIMIZER_IRPROPM_HPP
+#ifndef __MLPACK_METHOS_ANN_OPTIMIZER_RPROPM_HPP
+#define __MLPACK_METHOS_ANN_OPTIMIZER_RPROPM_HPP
 
 #include <mlpack/core.hpp>
 #include <boost/math/special_functions/sign.hpp>
@@ -20,7 +20,7 @@ namespace ann /** Artificial Neural Network. */ {
  * @tparam MatType Type of matrix (should be arma::mat or arma::spmat).
  */
 template<typename MatType = arma::mat, typename VecType = arma::rowvec>
-class iRPROPm
+class RPROPm
 {
  public:
   /**
@@ -30,18 +30,16 @@ class iRPROPm
    * @param lowerBound The number used as lower bound.
    * @param upperBound The number used as upper bound.
    */
-  iRPROPm(const size_t cols,
-          const size_t rows,
-          const double etaMin = 0.5,
-          const double etaPlus = 1.2,
-          const double minDelta = 1e-9,
-          const double maxDelta = 50) :
+  RPROPm(const size_t cols,
+         const size_t rows,
+         const double etaMin = 0.5,
+         const double etaPlus = 1.2,
+         const double minDelta = 1e-9,
+         const double maxDelta = 50) : 
       etaMin(etaMin), etaPlus(etaPlus), minDelta(minDelta), maxDelta(maxDelta)
   {
     prevDerivs = arma::zeros<MatType>(rows, cols);
     prevDelta = arma::zeros<MatType>(rows, cols);
-
-    prevError = arma::datum::inf;
   }
 
   void UpdateWeights(MatType& weights,
@@ -54,22 +52,18 @@ class iRPROPm
     {
       for (size_t j(0); j < derivs.n_rows; j++)
       {
-        if (derivs(j, i) >= 0)
-        {
+        if (derivs(j, i) > 0)
           prevDelta(j, i) = std::min(prevDelta(j, i) * etaPlus, maxDelta);
-          prevDerivs(j, i) = gradient(j, i);
-        }
         else
-        {
           prevDelta(j, i) = std::max(prevDelta(j, i) * etaMin, minDelta);
-          prevDerivs(j, i) = 0;
-        }
       }
     }
 
     weights -= arma::sign(gradient) % prevDelta;
+    prevDerivs = gradient;
   }
 
+
  private:
   //! The number used as learning rate.
   const double etaMin;
@@ -80,15 +74,15 @@ class iRPROPm
 
   const double maxDelta;
 
-  double prevError;
-
   MatType prevDelta;
 
   //! weight momentum
   MatType prevDerivs;
-}; // class iRPROPm
+}; // class RPROPm
 
 }; // namespace ann
 }; // namespace mlpack
 
 #endif
+
+




More information about the mlpack-svn mailing list