[mlpack-git] master: Update the default step size (adam optimizer). (386138b)

gitdub at mlpack.org gitdub at mlpack.org
Mon Mar 14 18:20:33 EDT 2016


Repository : https://github.com/mlpack/mlpack
On branch  : master
Link       : https://github.com/mlpack/mlpack/compare/b864df8cf10592b3874b079302774dbe7a4c1dbc...386138b3172e0485aced365cb31e4f5b13c3bd7d

>---------------------------------------------------------------

commit 386138b3172e0485aced365cb31e4f5b13c3bd7d
Author: marcus <marcus.edel at fu-berlin.de>
Date:   Mon Mar 14 23:20:33 2016 +0100

    Update the default step size (adam optimizer).


>---------------------------------------------------------------

386138b3172e0485aced365cb31e4f5b13c3bd7d
 src/mlpack/core/optimizers/adam/adam.hpp      | 2 +-
 src/mlpack/core/optimizers/adam/adam_impl.hpp | 7 +++++--
 2 files changed, 6 insertions(+), 3 deletions(-)

diff --git a/src/mlpack/core/optimizers/adam/adam.hpp b/src/mlpack/core/optimizers/adam/adam.hpp
index fae4362..965ecfb 100644
--- a/src/mlpack/core/optimizers/adam/adam.hpp
+++ b/src/mlpack/core/optimizers/adam/adam.hpp
@@ -77,7 +77,7 @@ class Adam
    *        function is visited in linear order.
    */
   Adam(DecomposableFunctionType& function,
-      const double stepSize = 0.01,
+      const double stepSize = 0.001,
       const double beta1 = 0.9,
       const double beta2 = 0.999,
       const double eps = 1e-8,
diff --git a/src/mlpack/core/optimizers/adam/adam_impl.hpp b/src/mlpack/core/optimizers/adam/adam_impl.hpp
index 51efa05..bd1b953 100644
--- a/src/mlpack/core/optimizers/adam/adam_impl.hpp
+++ b/src/mlpack/core/optimizers/adam/adam_impl.hpp
@@ -105,8 +105,11 @@ double Adam<DecomposableFunctionType>::Optimize(arma::mat& iterate)
       function.Gradient(iterate, currentFunction, gradient);
 
     // And update the iterate.
-    mean += (1 - beta1) * (gradient - mean);
-    variance += (1 - beta2) * (gradient % gradient - variance);
+    mean *= beta1;
+    mean += (1 - beta1) * gradient;
+
+    variance *= beta2;
+    variance += (1 - beta2) * (gradient % gradient);
 
     double biasCorrection1 = 1.0 - std::pow(beta1, (double) i);
     double biasCorrection2 = 1.0 - std::pow(beta2, (double) i);




More information about the mlpack-git mailing list