[mlpack-svn] master: Add implementation of the iRPROP- method to update the weights. (4222b16)

gitdub at big.cc.gt.atl.ga.us gitdub at big.cc.gt.atl.ga.us
Thu Jan 1 07:14:40 EST 2015


Repository : https://github.com/mlpack/mlpack

On branch  : master
Link       : https://github.com/mlpack/mlpack/compare/e5279f82fd462f76ed757b66420a68494e7329b9...23b900168ef50d2ad1b247c450645de621e2043e

>---------------------------------------------------------------

commit 4222b1640e53f03522d1e8271f7ac046ebc2ce77
Author: Marcus Edel <marcus.edel at fu-berlin.de>
Date:   Thu Jan 1 13:13:08 2015 +0100

    Add implementation of the iRPROP- method to update the weights.


>---------------------------------------------------------------

4222b1640e53f03522d1e8271f7ac046ebc2ce77
 src/mlpack/methods/ann/optimizer/irpropm.hpp | 94 ++++++++++++++++++++++++++++
 1 file changed, 94 insertions(+)

diff --git a/src/mlpack/methods/ann/optimizer/irpropm.hpp b/src/mlpack/methods/ann/optimizer/irpropm.hpp
new file mode 100644
index 0000000..4bb077b
--- /dev/null
+++ b/src/mlpack/methods/ann/optimizer/irpropm.hpp
@@ -0,0 +1,94 @@
+/**
+ * @file irpropm.hpp
+ * @author Marcus Edel
+ *
+ * Intialization rule for the neural networks. This simple initialization is
+ * performed by assigning a random matrix to the weight matrix.
+ */
+#ifndef __MLPACK_METHOS_ANN_OPTIMIZER_IRPROPM_HPP
+#define __MLPACK_METHOS_ANN_OPTIMIZER_IRPROPM_HPP
+
+#include <mlpack/core.hpp>
+#include <boost/math/special_functions/sign.hpp>
+
+namespace mlpack {
+namespace ann /** Artificial Neural Network. */ {
+
+/**
+ * This class is used to initialize randomly the weight matrix.
+ *
+ * @tparam MatType Type of matrix (should be arma::mat or arma::spmat).
+ */
+template<typename MatType = arma::mat, typename VecType = arma::rowvec>
+class iRPROPm
+{
+ public:
+  /**
+   * Initialize the random initialization rule with the given lower bound and
+   * upper bound.
+   *
+   * @param lowerBound The number used as lower bound.
+   * @param upperBound The number used as upper bound.
+   */
+  iRPROPm(const size_t cols,
+          const size_t rows,
+          const double etaMin = 0.5,
+          const double etaPlus = 1.2,
+          const double minDelta = 1e-9,
+          const double maxDelta = 50) :
+      etaMin(etaMin), etaPlus(etaPlus), minDelta(minDelta), maxDelta(maxDelta)
+  {
+    prevDerivs = arma::zeros<MatType>(rows, cols);
+    prevDelta = arma::zeros<MatType>(rows, cols);
+
+    prevError = arma::datum::inf;
+  }
+
+  void UpdateWeights(MatType& weights,
+                     const MatType& gradient,
+                     const double /* unused */)
+  {
+    MatType derivs = gradient % prevDerivs;
+
+    for (size_t i(0); i < derivs.n_cols; i++)
+    {
+      for (size_t j(0); j < derivs.n_rows; j++)
+      {
+        if (derivs(j, i) >= 0)
+        {
+          prevDelta(j, i) = std::min(prevDelta(j, i) * etaPlus, maxDelta);
+          prevDerivs(j, i) = gradient(j, i);
+        }
+        else
+        {
+          prevDelta(j, i) = std::max(prevDelta(j, i) * etaMin, minDelta);
+          prevDerivs(j, i) = 0;
+        }
+      }
+    }
+
+    weights -= arma::sign(gradient) % prevDelta;
+  }
+
+ private:
+  //! The number used as learning rate.
+  const double etaMin;
+
+  const double etaPlus;
+
+  const double minDelta;
+
+  const double maxDelta;
+
+  double prevError;
+
+  MatType prevDelta;
+
+  //! weight momentum
+  MatType prevDerivs;
+}; // class iRPROPm
+
+}; // namespace ann
+}; // namespace mlpack
+
+#endif




More information about the mlpack-svn mailing list