[mlpack-git] master: Adjust the activation function test; Use the simplified layer structure. (384556e)

gitdub at mlpack.org gitdub at mlpack.org
Fri Feb 19 09:53:01 EST 2016


Repository : https://github.com/mlpack/mlpack
On branch  : master
Link       : https://github.com/mlpack/mlpack/compare/491123bfa264636fd4e0d3079e16f0222ed231db...384556ec9b6218fc332d557dd893ea4349dd9414

>---------------------------------------------------------------

commit 384556ec9b6218fc332d557dd893ea4349dd9414
Author: marcus <marcus.edel at fu-berlin.de>
Date:   Fri Feb 19 15:53:01 2016 +0100

    Adjust the activation function test; Use the simplified layer structure.


>---------------------------------------------------------------

384556ec9b6218fc332d557dd893ea4349dd9414
 src/mlpack/tests/activation_functions_test.cpp | 138 -------------------------
 1 file changed, 138 deletions(-)

diff --git a/src/mlpack/tests/activation_functions_test.cpp b/src/mlpack/tests/activation_functions_test.cpp
index f1d8557..5c40e2e 100644
--- a/src/mlpack/tests/activation_functions_test.cpp
+++ b/src/mlpack/tests/activation_functions_test.cpp
@@ -199,142 +199,4 @@ BOOST_AUTO_TEST_CASE(RectifierFunctionTest)
       desiredDerivatives);
 }
 
-/*
- * Implementation of the numerical gradient checking.
- *
- * @param input Input data used for evaluating the network.
- * @param target Target data used to calculate the network error.
- * @param perturbation Constant perturbation value.
- * @param threshold Threshold used as bounding check.
- *
- * @tparam ActivationFunction Activation function used for the gradient check.
- */
-template<class ActivationFunction>
-void CheckGradientNumericallyCorrect(const arma::mat input,
-                                     const arma::mat target,
-                                     const double perturbation,
-                                     const double threshold)
-{
-  // Specify the structure of the feed forward neural network.
-  RandomInitialization randInit(-0.5, 0.5);
-  arma::mat error;
-
-  // Number of hidden layer units.
-  const size_t hiddenLayerSize = 4;
-
-  LinearLayer<mlpack::ann::RMSPROP, RandomInitialization> linearLayer0(
-        input.n_rows, hiddenLayerSize, randInit);
-  BiasLayer<> biasLayer0(hiddenLayerSize);
-  BaseLayer<ActivationFunction> baseLayer0;
-
-  LinearLayer<mlpack::ann::RMSPROP, RandomInitialization> linearLayer1(
-         hiddenLayerSize, hiddenLayerSize, randInit);
-  BiasLayer<> biasLayer1(hiddenLayerSize);
-  BaseLayer<ActivationFunction> baseLayer1;
-
-  LinearLayer<mlpack::ann::RMSPROP, RandomInitialization> linearLayer2(
-         hiddenLayerSize, target.n_rows, randInit);
-  BiasLayer<> biasLayer2(target.n_rows);
-  BaseLayer<ActivationFunction> baseLayer2;
-
-  BinaryClassificationLayer classOutputLayer;
-
-  auto modules = std::tie(linearLayer0, biasLayer0, baseLayer0,
-                          linearLayer1, biasLayer1, baseLayer1,
-                          linearLayer2, biasLayer2, baseLayer2);
-
-  FFN<decltype(modules), decltype(classOutputLayer), MeanSquaredErrorFunction>
-      net(modules, classOutputLayer);
-
-  // Initialize the feed forward neural network.
-  net.FeedForward(input, target, error);
-  net.FeedBackward(input, error);
-
-  std::vector<std::reference_wrapper<decltype(linearLayer0)> > layer {
-         linearLayer0, linearLayer1, linearLayer2 };
-
-  std::vector<arma::mat> gradient {linearLayer0.Gradient(),
-                                   linearLayer1.Gradient(),
-                                   linearLayer2.Gradient()};
-
-  double weight, mLoss, pLoss, dW, e;
-
-  for (size_t l = 0; l < layer.size(); ++l)
-  {
-    for (size_t i = 0; i < layer[l].get().Weights().n_rows; ++i)
-    {
-      for (size_t j = 0; j < layer[l].get().Weights().n_cols; ++j)
-      {
-        // Store original weight.
-        weight = layer[l].get().Weights()(i, j);
-
-        // Add negative perturbation and compute error.
-        layer[l].get().Weights().at(i, j) -= perturbation;
-        net.FeedForward(input, target, error);
-        mLoss = arma::as_scalar(0.5 * arma::sum(arma::pow(error, 2)));
-
-        // Add positive perturbation and compute error.
-        layer[l].get().Weights().at(i, j) += (2 * perturbation);
-        net.FeedForward(input, target, error);
-        pLoss = arma::as_scalar(0.5 * arma::sum(arma::pow(error, 2)));
-
-        // Compute symmetric difference.
-        dW = (pLoss - mLoss) / (2 * perturbation);
-        e = std::abs(dW - gradient[l].at(i, j));
-
-        bool b = e < threshold;
-        BOOST_REQUIRE_EQUAL(b, 1);
-
-        // Restore original weight.
-        layer[l].get().Weights().at(i, j) = weight;
-      }
-    }
-  }
-}
-
-/**
- * The following test implements numerical gradient checking. It computes the
- * numerical gradient, a numerical approximation of the partial derivative of J
- * with respect to the i-th input argument, evaluated at g. The numerical
- * gradient should be approximately the partial derivative of J with respect to
- * g(i).
- *
- * Given a function g(\theta) that is supposedly computing:
- *
- * @f[
- * \frac{\partial}{\partial \theta} J(\theta)
- * @f]
- *
- * we can now numerically verify its correctness by checking:
- *
- * @f[
- * g(\theta) \approx \frac{J(\theta + eps) - J(\theta - eps)}{2 * eps}
- * @f]
- */
-BOOST_AUTO_TEST_CASE(GradientNumericallyCorrect)
-{
-  // Initialize dataset.
-  const arma::colvec input = arma::randu<arma::colvec>(10);
-  const arma::colvec target("0 1;");
-
-  // Perturbation and threshold constant.
-  const double perturbation = 1e-6;
-  const double threshold = 1e-5;
-
-  CheckGradientNumericallyCorrect<LogisticFunction>(input, target,
-      perturbation, threshold);
-
-  CheckGradientNumericallyCorrect<IdentityFunction>(input, target,
-      perturbation, threshold);
-
-  CheckGradientNumericallyCorrect<RectifierFunction>(input, target,
-      perturbation, threshold);
-
-  CheckGradientNumericallyCorrect<SoftsignFunction>(input, target,
-      perturbation, threshold);
-
-  CheckGradientNumericallyCorrect<TanhFunction>(input, target,
-      perturbation, threshold);
-}
-
 BOOST_AUTO_TEST_SUITE_END();




More information about the mlpack-git mailing list