[mlpack-git] master: Widen tolerance slightly and disable non-deterministic behavior by setting the shuffle parameter to false. (67e0a13)

gitdub at big.cc.gt.atl.ga.us gitdub at big.cc.gt.atl.ga.us
Tue Oct 20 05:41:43 EDT 2015


Repository : https://github.com/mlpack/mlpack

On branch  : master
Link       : https://github.com/mlpack/mlpack/compare/fecf1194c123ced12d56e7daad761c7b9aaac262...67e0a132c7f62820c734eb508fe1bc83128a3e13

>---------------------------------------------------------------

commit 67e0a132c7f62820c734eb508fe1bc83128a3e13
Author: marcus <marcus.edel at fu-berlin.de>
Date:   Tue Oct 20 11:41:27 2015 +0200

    Widen tolerance slightly and disable non-deterministic behavior by setting the shuffle parameter to false.


>---------------------------------------------------------------

67e0a132c7f62820c734eb508fe1bc83128a3e13
 src/mlpack/tests/ada_delta_test.cpp |  6 +++---
 src/mlpack/tests/adam_test.cpp      |  2 +-
 src/mlpack/tests/rmsprop_test.cpp   | 10 +++++-----
 3 files changed, 9 insertions(+), 9 deletions(-)

diff --git a/src/mlpack/tests/ada_delta_test.cpp b/src/mlpack/tests/ada_delta_test.cpp
index aa78119..961fede 100644
--- a/src/mlpack/tests/ada_delta_test.cpp
+++ b/src/mlpack/tests/ada_delta_test.cpp
@@ -36,7 +36,7 @@ BOOST_AUTO_TEST_SUITE(AdaDeltaTest);
 BOOST_AUTO_TEST_CASE(SimpleAdaDeltaTestFunction)
 {
   const size_t hiddenLayerSize = 10;
-  const size_t maxEpochs = 100;
+  const size_t maxEpochs = 300;
 
   // Load the dataset.
   arma::mat dataset, labels, labelsIdx;
@@ -49,7 +49,7 @@ BOOST_AUTO_TEST_CASE(SimpleAdaDeltaTestFunction)
     labels(labelsIdx(0, i), i) = 1;
 
   // Construct a feed forward network using the specified parameters.
-  RandomInitialization randInit(0.5, 0.5);
+  RandomInitialization randInit(0.1, 0.1);
 
   LinearLayer<AdaDelta, RandomInitialization> inputLayer(dataset.n_rows,
       hiddenLayerSize, randInit);
@@ -90,7 +90,7 @@ BOOST_AUTO_TEST_CASE(SimpleAdaDeltaTestFunction)
   BOOST_REQUIRE_GE(classificationError, 0.09);
 
   // Train the feed forward network.
-  Trainer<decltype(net)> trainer(net, maxEpochs, 1, 0.01);
+  Trainer<decltype(net)> trainer(net, maxEpochs, 1, 0.01, false);
   trainer.Train(dataset, labels, dataset, labels);
 
   // Evaluate the feed forward network.
diff --git a/src/mlpack/tests/adam_test.cpp b/src/mlpack/tests/adam_test.cpp
index 423268c..b212d3b 100644
--- a/src/mlpack/tests/adam_test.cpp
+++ b/src/mlpack/tests/adam_test.cpp
@@ -91,7 +91,7 @@ BOOST_AUTO_TEST_CASE(SimpleAdamTestFunction)
   BOOST_REQUIRE_GE(classificationError, 0.09);
 
   // Train the feed forward network.
-  Trainer<decltype(net)> trainer(net, maxEpochs, 1, 0.01);
+  Trainer<decltype(net)> trainer(net, maxEpochs, 1, 0.01, false);
   trainer.Train(dataset, labels, dataset, labels);
 
   // Evaluate the feed forward network.
diff --git a/src/mlpack/tests/rmsprop_test.cpp b/src/mlpack/tests/rmsprop_test.cpp
index 0ae76ba..13c4bdd 100644
--- a/src/mlpack/tests/rmsprop_test.cpp
+++ b/src/mlpack/tests/rmsprop_test.cpp
@@ -36,7 +36,7 @@ BOOST_AUTO_TEST_SUITE(RMSPropTest);
 BOOST_AUTO_TEST_CASE(SimpleRMSPropTestFunction)
 {
   const size_t hiddenLayerSize = 10;
-  const size_t maxEpochs = 100;
+  const size_t maxEpochs = 300;
 
   // Load the dataset.
   arma::mat dataset, labels, labelsIdx;
@@ -49,7 +49,7 @@ BOOST_AUTO_TEST_CASE(SimpleRMSPropTestFunction)
     labels(labelsIdx(0, i), i) = 1;
 
   // Construct a feed forward network using the specified parameters.
-  RandomInitialization randInit(0.5, 0.5);
+  RandomInitialization randInit(0.1, 0.1);
 
   LinearLayer<RMSPROP, RandomInitialization> inputLayer(dataset.n_rows,
       hiddenLayerSize, randInit);
@@ -87,10 +87,10 @@ BOOST_AUTO_TEST_CASE(SimpleRMSPropTestFunction)
 
   // Check if the selected model isn't already optimized.
   double classificationError = 1 - double(error) / dataset.n_cols;
-  BOOST_REQUIRE_GE(classificationError, 0.05);
+  BOOST_REQUIRE_GE(classificationError, 0.09);
 
   // Train the feed forward network.
-  Trainer<decltype(net)> trainer(net, maxEpochs, 1, 0.01);
+  Trainer<decltype(net)> trainer(net, maxEpochs, 1, 0.01, false);
   trainer.Train(dataset, labels, dataset, labels);
 
   // Evaluate the feed forward network.
@@ -107,7 +107,7 @@ BOOST_AUTO_TEST_CASE(SimpleRMSPropTestFunction)
 
   classificationError = 1 - double(error) / dataset.n_cols;
 
-  BOOST_REQUIRE_LE(classificationError, 0.05);
+  BOOST_REQUIRE_LE(classificationError, 0.09);
 }
 
 BOOST_AUTO_TEST_SUITE_END();



More information about the mlpack-git mailing list