[mlpack-svn] r16037 - mlpack/trunk/src/mlpack/tests

fastlab-svn at coffeetalk-1.cc.gatech.edu fastlab-svn at coffeetalk-1.cc.gatech.edu
Fri Nov 15 13:07:41 EST 2013


Author: rcurtin
Date: Fri Nov 15 13:07:41 2013
New Revision: 16037

Log:
Add test for regularization in Gradient().


Modified:
   mlpack/trunk/src/mlpack/tests/logistic_regression_test.cpp

Modified: mlpack/trunk/src/mlpack/tests/logistic_regression_test.cpp
==============================================================================
--- mlpack/trunk/src/mlpack/tests/logistic_regression_test.cpp	(original)
+++ mlpack/trunk/src/mlpack/tests/logistic_regression_test.cpp	Fri Nov 15 13:07:41 2013
@@ -338,4 +338,66 @@
   BOOST_REQUIRE_SMALL(gradient[2], 1e-15);
 }
 
+/**
+ * Test Gradient() function when regularization is used.
+ */
+BOOST_AUTO_TEST_CASE(LogisticRegressionFunctionRegularizationGradient)
+{
+  const size_t points = 5000;
+  const size_t dimension = 25;
+  const size_t trials = 10;
+
+  // Create a random dataset.
+  arma::mat data;
+  data.randu(dimension, points);
+  // Create random responses.
+  arma::vec responses(points);
+  for (size_t i = 0; i < points; ++i)
+    responses[i] = math::RandInt(0, 2);
+
+  LogisticRegressionFunction lrfNoReg(data, responses, 0.0);
+  LogisticRegressionFunction lrfSmallReg(data, responses, 0.5);
+  LogisticRegressionFunction lrfBigReg(data, responses, 20.0);
+
+  for (size_t i = 0; i < trials; ++i)
+  {
+    arma::vec parameters(dimension);
+    parameters.randu();
+
+    // Regularization term: 0.5 * lambda * || parameters ||_2^2 (but note that
+    // the first parameters term is ignored).  Now we take the gradient of this
+    // to obtain
+    //   g[i] = lambda * parameters[i]
+    // although g(0) == 0 because we are not regularizing the intercept term of
+    // the model.
+    arma::vec gradient;
+    arma::vec smallRegGradient;
+    arma::vec bigRegGradient;
+
+    lrfNoReg.Gradient(parameters, gradient);
+    lrfSmallReg.Gradient(parameters, smallRegGradient);
+    lrfBigReg.Gradient(parameters, bigRegGradient);
+
+    // Check sizes of gradients.
+    BOOST_REQUIRE_EQUAL(gradient.n_elem, parameters.n_elem);
+    BOOST_REQUIRE_EQUAL(smallRegGradient.n_elem, parameters.n_elem);
+    BOOST_REQUIRE_EQUAL(bigRegGradient.n_elem, parameters.n_elem);
+
+    // Make sure first term has zero regularization.
+    BOOST_REQUIRE_CLOSE(gradient[0], smallRegGradient[0], 1e-5);
+    BOOST_REQUIRE_CLOSE(gradient[0], bigRegGradient[0], 1e-5);
+
+    // Check other terms.
+    for (size_t j = 1; j < parameters.n_elem; ++j)
+    {
+      const double smallRegTerm = 0.5 * parameters[j];
+      const double bigRegTerm = 20.0 * parameters[j];
+
+      BOOST_REQUIRE_CLOSE(gradient[j] - smallRegTerm, smallRegGradient[j],
+          1e-5);
+      BOOST_REQUIRE_CLOSE(gradient[j] - bigRegTerm, bigRegGradient[j], 1e-5);
+    }
+  }
+}
+
 BOOST_AUTO_TEST_SUITE_END();



More information about the mlpack-svn mailing list