[mlpack-svn] r10361 - mlpack/trunk/src/mlpack/core/optimizers/lbfgs

fastlab-svn at coffeetalk-1.cc.gatech.edu fastlab-svn at coffeetalk-1.cc.gatech.edu
Wed Nov 23 16:58:20 EST 2011


Author: rcurtin
Date: 2011-11-23 16:58:20 -0500 (Wed, 23 Nov 2011)
New Revision: 10361

Removed:
   mlpack/trunk/src/mlpack/core/optimizers/lbfgs/lbfgs_test.cpp
Modified:
   mlpack/trunk/src/mlpack/core/optimizers/lbfgs/CMakeLists.txt
   mlpack/trunk/src/mlpack/core/optimizers/lbfgs/lbfgs.hpp
   mlpack/trunk/src/mlpack/core/optimizers/lbfgs/lbfgs_impl.hpp
   mlpack/trunk/src/mlpack/core/optimizers/lbfgs/test_functions.cpp
   mlpack/trunk/src/mlpack/core/optimizers/lbfgs/test_functions.hpp
Log:
Update style as per #153.  Also, we didn't need the test here anymore.


Modified: mlpack/trunk/src/mlpack/core/optimizers/lbfgs/CMakeLists.txt
===================================================================
--- mlpack/trunk/src/mlpack/core/optimizers/lbfgs/CMakeLists.txt	2011-11-23 20:34:04 UTC (rev 10360)
+++ mlpack/trunk/src/mlpack/core/optimizers/lbfgs/CMakeLists.txt	2011-11-23 21:58:20 UTC (rev 10361)
@@ -13,14 +13,3 @@
 endforeach()
 
 set(MLPACK_SRCS ${MLPACK_SRCS} ${DIR_SRCS} PARENT_SCOPE)
-
-# test executable
-add_executable(lbfgs_test
-  EXCLUDE_FROM_ALL
-  lbfgs_test.cpp
-)
-# link dependencies of test executable
-target_link_libraries(lbfgs_test
-  mlpack
-  boost_unit_test_framework
-)

Modified: mlpack/trunk/src/mlpack/core/optimizers/lbfgs/lbfgs.hpp
===================================================================
--- mlpack/trunk/src/mlpack/core/optimizers/lbfgs/lbfgs.hpp	2011-11-23 20:34:04 UTC (rev 10360)
+++ mlpack/trunk/src/mlpack/core/optimizers/lbfgs/lbfgs.hpp	2011-11-23 21:58:20 UTC (rev 10361)
@@ -1,10 +1,10 @@
-/** @author Dongryeol Lee
+/**
+ * @file lbfgs.hpp
+ * @author Dongryeol Lee
+ * @author Ryan Curtin
  *
- *  @brief The generic L-BFGS optimizer.
- *
- *  @file lbfgs.h
+ * The generic L-BFGS optimizer.
  */
-
 #ifndef __MLPACK_CORE_OPTIMIZERS_LBFGS_LBFGS_HPP
 #define __MLPACK_CORE_OPTIMIZERS_LBFGS_LBFGS_HPP
 
@@ -30,9 +30,10 @@
     "the optimization.", "lbfgs", 1e-10);
 
 template<typename FunctionType>
-class L_BFGS {
+class L_BFGS
+{
  public:
-  /***
+  /**
    * Initialize the L-BFGS object.  Copy the function we will be optimizing
    * and set the size of the memory for the algorithm.
    *
@@ -41,7 +42,7 @@
    */
   L_BFGS(FunctionType& function_in, int num_basis);
 
-  /***
+  /**
    * Return the point where the lowest function value has been found.
    *
    * @return arma::vec representing the point and a double with the function
@@ -49,7 +50,7 @@
    */
   const std::pair<arma::mat, double>& min_point_iterate() const;
 
-  /***
+  /**
    * Use L-BFGS to optimize the given function, starting at the given iterate
    * point and performing no more than the specified number of maximum
    * iterations.  The given starting point will be modified to store the
@@ -61,17 +62,23 @@
   bool Optimize(int num_iterations, arma::mat& iterate);
 
  private:
+  //! Internal copy of the function we are optimizing.
   FunctionType function_;
 
+  //! Position of the new iterate.
   arma::mat new_iterate_tmp_;
-  arma::cube s_lbfgs_; // stores all the s matrices in memory
-  arma::cube y_lbfgs_; // stores all the y matrices in memory
+  //! Stores all the s matrices in memory.
+  arma::cube s_lbfgs_;
+  //! Stores all the y matrices in memory.
+  arma::cube y_lbfgs_;
 
+  //! Size of memory for this L-BFGS optimizer.
   int num_basis_;
 
+  //! Best point found so far.
   std::pair<arma::mat, double> min_point_iterate_;
 
-  /***
+  /**
    * Evaluate the function at the given iterate point and store the result if it
    * is a new minimum.
    *
@@ -79,7 +86,7 @@
    */
   double Evaluate_(const arma::mat& iterate);
 
-  /***
+  /**
    * Calculate the scaling factor gamma which is used to scale the Hessian
    * approximation matrix.  See method M3 in Section 4 of Liu and Nocedal
    * (1989).
@@ -88,7 +95,7 @@
    */
   double ChooseScalingFactor_(int iteration_num, const arma::mat& gradient);
 
-  /***
+  /**
    * Check to make sure that the norm of the gradient is not smaller than 1e-5.
    * Currently that value is not configurable.
    *
@@ -96,7 +103,7 @@
    */
   bool GradientNormTooSmall_(const arma::mat& gradient);
 
-  /***
+  /**
    * Perform a back-tracking line search along the search direction to
    * calculate a step size satisfying the Wolfe conditions.  The parameter
    * iterate will be modified if the method is successful.
@@ -115,7 +122,7 @@
                    const arma::mat& search_direction,
                    double& step_size);
 
-  /***
+  /**
    * Find the L-BFGS search direction.
    *
    * @param gradient The gradient at the current point
@@ -128,7 +135,7 @@
                         double scaling_factor,
                         arma::mat& search_direction);
 
-  /***
+  /**
    * Update the vectors y_bfgs_ and s_bfgs_, which store the differences
    * between the iterate and old iterate and the differences between the
    * gradient and the old gradient, respectively.

Modified: mlpack/trunk/src/mlpack/core/optimizers/lbfgs/lbfgs_impl.hpp
===================================================================
--- mlpack/trunk/src/mlpack/core/optimizers/lbfgs/lbfgs_impl.hpp	2011-11-23 20:34:04 UTC (rev 10360)
+++ mlpack/trunk/src/mlpack/core/optimizers/lbfgs/lbfgs_impl.hpp	2011-11-23 21:58:20 UTC (rev 10361)
@@ -1,30 +1,31 @@
-/** @author Dongryeol Lee (dongryel at cc.gatech.edu)
+/**
+ * @file lbfgs_impl.hpp
+ * @author Dongryeol Lee (dongryel at cc.gatech.edu)
+ * @author Ryan Curtin
  *
- *  @brief The implementation of the L_BFGS optimizer.
- *
- *  @file lbfgs_impl.h
+ * The implementation of the L_BFGS optimizer.
  */
-
 #ifndef __MLPACK_CORE_OPTIMIZERS_LBFGS_LBFGS_IMPL_HPP
 #define __MLPACK_CORE_OPTIMIZERS_LBFGS_LBFGS_IMPL_HPP
 
 namespace mlpack {
 namespace optimization {
 
-/***
+/**
  * Evaluate the function at the given iterate point and store the result if
  * it is a new minimum.
  *
  * @return The value of the function
  */
 template<typename FunctionType>
-double L_BFGS<FunctionType>::Evaluate_(const arma::mat& iterate) {
-
+double L_BFGS<FunctionType>::Evaluate_(const arma::mat& iterate)
+{
   // Evaluate the function and keep track of the minimum function
   // value encountered during the optimization.
   double function_value = function_.Evaluate(iterate);
 
-  if(function_value < min_point_iterate_.second) {
+  if (function_value < min_point_iterate_.second)
+  {
     min_point_iterate_.first = iterate;
     min_point_iterate_.second = function_value;
   }
@@ -32,7 +33,7 @@
   return function_value;
 }
 
-/***
+/**
  * Calculate the scaling factor gamma which is used to scale the Hessian
  * approximation matrix.  See method M3 in Section 4 of Liu and Nocedal (1989).
  *
@@ -40,35 +41,40 @@
  */
 template<typename FunctionType>
 double L_BFGS<FunctionType>::ChooseScalingFactor_(int iteration_num,
-                                                  const arma::mat& gradient) {
+                                                  const arma::mat& gradient)
+{
   double scaling_factor = 1.0;
-  if(iteration_num > 0) {
+  if (iteration_num > 0)
+  {
     int previous_pos = (iteration_num - 1) % num_basis_;
     // Get s and y matrices once instead of multiple times.
     arma::mat& s_col = s_lbfgs_.slice(previous_pos);
     arma::mat& y_col = y_lbfgs_.slice(previous_pos);
     scaling_factor = dot(s_col, y_col) / dot(y_col, y_col);
-  } else {
+  }
+  else
+  {
     scaling_factor = 1.0 / sqrt(dot(gradient, gradient));
   }
 
   return scaling_factor;
 }
 
-/***
+/**
  * Check to make sure that the norm of the gradient is not smaller than 1e-10.
  * Currently that value is not configurable.
  *
  * @return (norm < lbfgs/min_gradient_norm)
  */
 template<typename FunctionType>
-bool L_BFGS<FunctionType>::GradientNormTooSmall_(const arma::mat& gradient) {
+bool L_BFGS<FunctionType>::GradientNormTooSmall_(const arma::mat& gradient)
+{
   double norm = arma::norm(gradient, 2);
 
   return (norm < CLI::GetParam<double>("lbfgs/min_gradient_norm"));
 }
 
-/***
+/**
  * Perform a back-tracking line search along the search direction to calculate a
  * step size satisfying the Wolfe conditions.
  *
@@ -85,16 +91,15 @@
                                        arma::mat& iterate,
                                        arma::mat& gradient,
                                        const arma::mat& search_direction,
-                                       double& step_size) {
-  // Implements the line search with back-tracking.
-
+                                       double& step_size)
+{
   // The initial linear term approximation in the direction of the
   // search direction.
   double initial_search_direction_dot_gradient =
-    arma::dot(gradient, search_direction);
+      arma::dot(gradient, search_direction);
 
   // If it is not a descent direction, just report failure.
-  if(initial_search_direction_dot_gradient > 0.0)
+  if (initial_search_direction_dot_gradient > 0.0)
     return false;
 
   // Save the initial function value.
@@ -102,7 +107,7 @@
 
   // Unit linear approximation to the decrease in function value.
   double linear_approx_function_value_decrease =
-      CLI::GetParam<double>("lbfgs/armijo_constant") * 
+      CLI::GetParam<double>("lbfgs/armijo_constant") *
       initial_search_direction_dot_gradient;
 
   // The number of iteration in the search.
@@ -112,32 +117,43 @@
   const double inc = 2.1;
   const double dec = 0.5;
   double width = 0;
-  for(; ;) {
-    // Perform a step and evaluate the gradient and the function
-    // values at that point.
+
+  while(true)
+  {
+    // Perform a step and evaluate the gradient and the function values at that
+    // point.
     new_iterate_tmp_ = iterate;
     new_iterate_tmp_ += step_size * search_direction;
     function_value = Evaluate_(new_iterate_tmp_);
     function_.Gradient(new_iterate_tmp_, gradient);
     num_iterations++;
 
-    if(function_value > initial_function_value + step_size *
-        linear_approx_function_value_decrease) {
+    if (function_value > initial_function_value + step_size *
+        linear_approx_function_value_decrease)
+    {
       width = dec;
-    } else {
+    }
+    else
+    {
       // Check Wolfe's condition.
       double search_direction_dot_gradient =
           arma::dot(gradient, search_direction);
       double wolfe = CLI::GetParam<double>("lbfgs/wolfe");
 
       if(search_direction_dot_gradient < wolfe *
-          initial_search_direction_dot_gradient) {
+          initial_search_direction_dot_gradient)
+      {
         width = inc;
-      } else {
-        if(search_direction_dot_gradient > -wolfe *
-            initial_search_direction_dot_gradient) {
+      }
+      else
+      {
+        if (search_direction_dot_gradient > -wolfe *
+            initial_search_direction_dot_gradient)
+        {
           width = dec;
-        } else {
+        }
+        else
+        {
           break;
         }
       }
@@ -145,9 +161,10 @@
 
     // Terminate when the step size gets too small or too big or it
     // exceeds the max number of iterations.
-    if((step_size < CLI::GetParam<double>("lbfgs/min_step")) || 
-       (step_size > CLI::GetParam<double>("lbfgs/max_step")) ||
-       (num_iterations >= CLI::GetParam<int>("lbfgs/max_line_search_trials"))) {
+    if ((step_size < CLI::GetParam<double>("lbfgs/min_step")) ||
+        (step_size > CLI::GetParam<double>("lbfgs/max_step")) ||
+        (num_iterations >= CLI::GetParam<int>("lbfgs/max_line_search_trials")))
+    {
       return false;
     }
 
@@ -160,7 +177,7 @@
   return true;
 }
 
-/***
+/**
  * Find the L_BFGS search direction.
  *
  * @param gradient The gradient at the current point
@@ -172,9 +189,10 @@
 void L_BFGS<FunctionType>::SearchDirection_(const arma::mat& gradient,
                                             int iteration_num,
                                             double scaling_factor,
-                                            arma::mat& search_direction) {
+                                            arma::mat& search_direction)
+{
   arma::mat q = gradient;
-  
+
   // See "A Recursive Formula to Compute H * g" in "Updating quasi-Newton
   // matrices with limited storage" (Nocedal, 1980).
 
@@ -183,7 +201,8 @@
   arma::vec alpha(num_basis_);
 
   int limit = std::max(iteration_num - num_basis_, 0);
-  for(int i = iteration_num - 1; i >= limit; i--) {
+  for (int i = iteration_num - 1; i >= limit; i--)
+  {
     int translated_position = i % num_basis_;
     rho[iteration_num - i - 1] = 1.0 / arma::dot(
         y_lbfgs_.slice(translated_position),
@@ -192,8 +211,11 @@
         arma::dot(s_lbfgs_.slice(translated_position), q);
     q -= alpha[iteration_num - i - 1] * y_lbfgs_.slice(translated_position);
   }
+
   search_direction = scaling_factor * q;
-  for(int i = limit; i <= iteration_num - 1; i++) {
+
+  for (int i = limit; i <= iteration_num - 1; i++)
+  {
     int translated_position = i % num_basis_;
     double beta = rho[iteration_num - i - 1] *
         arma::dot(y_lbfgs_.slice(translated_position), search_direction);
@@ -202,11 +224,10 @@
   }
 
   // Negate the search direction so that it is a descent direction.
-  for(unsigned int i = 0; i < search_direction.n_elem; i++)
-    search_direction[i] = -search_direction[i];
+  search_direction *= -1;
 }
 
-/***
+/**
  * Update the vectors y_bfgs_ and s_bfgs_, which store the differences between
  * the iterate and old iterate and the differences between the gradient and the
  * old gradient, respectively.
@@ -222,7 +243,8 @@
                                            const arma::mat& iterate,
                                            const arma::mat& old_iterate,
                                            const arma::mat& gradient,
-                                           const arma::mat& old_gradient) {
+                                           const arma::mat& old_gradient)
+{
   // Overwrite a certain position instead of pushing everything in the vector
   // back one position
   int overwrite_pos = iteration_num % num_basis_;
@@ -239,8 +261,8 @@
  */
 template<typename FunctionType>
 L_BFGS<FunctionType>::L_BFGS(FunctionType& function_in, int num_basis) :
-  function_(function_in) {
-
+  function_(function_in)
+{
   // Get the dimensions of the coordinates of the function; GetInitialPoint()
   // might return an arma::vec, but that's okay because then n_cols will simply
   // be 1.
@@ -257,7 +279,7 @@
   min_point_iterate_.second = std::numeric_limits<double>::max();
 }
 
-/***
+/**
  * Return the point where the lowest function value has been found.
  *
  * @return arma::vec representing the point and a double with the function
@@ -265,11 +287,12 @@
  */
 template<typename FunctionType>
 const std::pair<arma::mat, double>&
-L_BFGS<FunctionType>::min_point_iterate() const {
+L_BFGS<FunctionType>::min_point_iterate() const
+{
   return min_point_iterate_;
 }
 
-/***
+/**
  * Use L_BFGS to optimize the given function, starting at the given iterate
  * point and performing no more than the specified number of maximum iterations.
  * The given starting point will be modified to store the finishing point of the
@@ -279,7 +302,8 @@
  * @param iterate Starting point (will be modified)
  */
 template<typename FunctionType>
-bool L_BFGS<FunctionType>::Optimize(int num_iterations, arma::mat& iterate) {
+bool L_BFGS<FunctionType>::Optimize(int num_iterations, arma::mat& iterate)
+{
   // The old iterate to be saved.
   arma::mat old_iterate;
   old_iterate.zeros(iterate.n_rows, iterate.n_cols);
@@ -307,15 +331,18 @@
   bool success = false;
 
   // The main optimization loop.
-  for(int it_num = 0; optimize_until_convergence || it_num < num_iterations;
-      it_num++) {
+  for (int it_num = 0; optimize_until_convergence || it_num < num_iterations;
+       it_num++)
+  {
     Log::Debug << "L-BFGS iteration " << it_num << "; objective " <<
         function_.Evaluate(iterate) << "." << std::endl;
 
     // Break when the norm of the gradient becomes too small.
-    if(GradientNormTooSmall_(gradient)) {
+    if(GradientNormTooSmall_(gradient))
+    {
       success = true; // We have found the minimum.
-      Log::Debug << "L-BFGS gradient norm too small (terminating)." << std::endl;
+      Log::Debug << "L-BFGS gradient norm too small (terminating)."
+          << std::endl;
       break;
     }
 
@@ -335,7 +362,7 @@
     success = LineSearch_(function_value, iterate, gradient, search_direction,
         step_size);
 
-    if(!success)
+    if (!success)
       break; // The line search failed; nothing else to try.
 
     // Overwrite an old basis set.

Deleted: mlpack/trunk/src/mlpack/core/optimizers/lbfgs/lbfgs_test.cpp
===================================================================
--- mlpack/trunk/src/mlpack/core/optimizers/lbfgs/lbfgs_test.cpp	2011-11-23 20:34:04 UTC (rev 10360)
+++ mlpack/trunk/src/mlpack/core/optimizers/lbfgs/lbfgs_test.cpp	2011-11-23 21:58:20 UTC (rev 10361)
@@ -1,102 +0,0 @@
-/***
- * @file lbfgs_test.cc
- *
- * Tests the L-BFGS optimizer on a couple test functions.
- *
- * @author Ryan Curtin (gth671b at mail.gatech.edu)
- */
-
-#include <mlpack/core.h>
-#include "lbfgs.hpp"
-#include "test_functions.hpp"
-
-#define BOOST_TEST_MODULE L-BFGS Test
-#include <boost/test/unit_test.hpp>
-
-using namespace mlpack::optimization;
-using namespace mlpack::optimization::test;
-
-/***
- * Tests the L-BFGS optimizer using the Rosenbrock Function.
- */
-BOOST_AUTO_TEST_CASE(rosenbrock_function) {
-  RosenbrockFunction f;
-  L_BFGS<RosenbrockFunction> lbfgs(f, 10);
-
-  arma::vec coords = f.GetInitialPoint();
-  if(!lbfgs.Optimize(0, coords))
-    BOOST_FAIL("L-BFGS optimization reported failure.");
-
-  double final_value = f.Evaluate(coords);
-
-  BOOST_REQUIRE_SMALL(final_value, 1e-5);
-  BOOST_REQUIRE_CLOSE(coords[0], 1, 1e-5);
-  BOOST_REQUIRE_CLOSE(coords[1], 1, 1e-5);
-}
-
-/***
- * Tests the L-BFGS optimizer using the Wood Function.
- */
-BOOST_AUTO_TEST_CASE(wood_function) {
-  WoodFunction f;
-  L_BFGS<WoodFunction> lbfgs(f, 10);
-
-  arma::vec coords = f.GetInitialPoint();
-  if(!lbfgs.Optimize(0, coords))
-    BOOST_FAIL("L-BFGS optimization reported failure.");
-
-  double final_value = f.Evaluate(coords);
-
-  BOOST_REQUIRE_SMALL(final_value, 1e-5);
-  BOOST_REQUIRE_CLOSE(coords[0], 1, 1e-5);
-  BOOST_REQUIRE_CLOSE(coords[1], 1, 1e-5);
-  BOOST_REQUIRE_CLOSE(coords[2], 1, 1e-5);
-  BOOST_REQUIRE_CLOSE(coords[3], 1, 1e-5);
-}
-
-/***
- * Tests the L-BFGS optimizer using the generalized Rosenbrock function.  This
- * is actually multiple tests, increasing the dimension by powers of 2, from 4
- * dimensions to 1024 dimensions.
- */
-BOOST_AUTO_TEST_CASE(generalized_rosenbrock_function) {
-  for (int i = 2; i < 10; i++) {
-    // Dimension: powers of 2
-    int dim = std::pow(2, i);
-
-    GeneralizedRosenbrockFunction f(dim);
-    L_BFGS<GeneralizedRosenbrockFunction> lbfgs(f, 20);
-
-    arma::vec coords = f.GetInitialPoint();
-    if(!lbfgs.Optimize(0, coords))
-      BOOST_FAIL("L-BFGS optimization reported failure.");
-
-    double final_value = f.Evaluate(coords);
-
-    // Test the output to make sure it is correct.
-    BOOST_REQUIRE_SMALL(final_value, 1e-5);
-    for (int j = 0; j < dim; j++)
-      BOOST_REQUIRE_CLOSE(coords[j], 1, 1e-5);
-  }
-};
-
-/***
- * Tests the L-BFGS optimizer using the Rosenbrock-Wood combined function.  This
- * is a test on optimizing a matrix of coordinates.
- */
-BOOST_AUTO_TEST_CASE(rosenbrock_wood_function) {
-  RosenbrockWoodFunction f;
-  L_BFGS<RosenbrockWoodFunction> lbfgs(f, 10);
-
-  arma::mat coords = f.GetInitialPoint();
-  if(!lbfgs.Optimize(0, coords))
-    BOOST_FAIL("L-BFGS optimization reported failure.");
-
-  double final_value = f.Evaluate(coords);
-
-  BOOST_REQUIRE_SMALL(final_value, 1e-5);
-  for (int row = 0; row < 4; row++) {
-    BOOST_REQUIRE_CLOSE((coords(row, 0)), 1, 1e-5);
-    BOOST_REQUIRE_CLOSE((coords(row, 1)), 1, 1e-5);
-  }
-}

Modified: mlpack/trunk/src/mlpack/core/optimizers/lbfgs/test_functions.cpp
===================================================================
--- mlpack/trunk/src/mlpack/core/optimizers/lbfgs/test_functions.cpp	2011-11-23 20:34:04 UTC (rev 10360)
+++ mlpack/trunk/src/mlpack/core/optimizers/lbfgs/test_functions.cpp	2011-11-23 21:58:20 UTC (rev 10361)
@@ -1,9 +1,8 @@
-/***
- * test_functions.cc
+/**
+ * @file test_functions.cpp
+ * @author Ryan Curtin
  *
- * Implementations of the test functions defined in test_functions.h.
- *
- * @author Ryan Curtin
+ * Implementations of the test functions defined in test_functions.hpp.
  */
 #include "test_functions.hpp"
 
@@ -13,16 +12,18 @@
 // RosenbrockFunction implementation
 //
 
-RosenbrockFunction::RosenbrockFunction() {
+RosenbrockFunction::RosenbrockFunction()
+{
   initial_point.set_size(2, 1);
   initial_point[0] = -1.2;
   initial_point[1] = 1;
 }
 
-/***
+/**
  * Calculate the objective function.
  */
-double RosenbrockFunction::Evaluate(const arma::mat& coordinates) {
+double RosenbrockFunction::Evaluate(const arma::mat& coordinates)
+{
   double x1 = coordinates[0];
   double x2 = coordinates[1];
 
@@ -32,23 +33,25 @@
   return objective;
 }
 
-/***
+/**
  * Calculate the gradient.
  */
 void RosenbrockFunction::Gradient(const arma::mat& coordinates,
-                                  arma::mat& gradient) {
+                                  arma::mat& gradient)
+{
   // f'_{x1}(x) = -2 (1 - x1) + 400 (x1^3 - (x2 x1))
   // f'_{x2}(x) = 200 (x2 - x1^2)
 
   double x1 = coordinates[0];
   double x2 = coordinates[1];
- 
+
   gradient.set_size(2, 1);
   gradient[0] = -2 * (1 - x1) + 400 * (std::pow(x1, 3) - x2 * x1);
   gradient[1] = 200 * (x2 - std::pow(x1, 2));
 }
 
-const arma::mat& RosenbrockFunction::GetInitialPoint() {
+const arma::mat& RosenbrockFunction::GetInitialPoint()
+{
   return initial_point;
 }
 
@@ -56,7 +59,8 @@
 // WoodFunction implementation
 //
 
-WoodFunction::WoodFunction() {
+WoodFunction::WoodFunction()
+{
   initial_point.set_size(4, 1);
   initial_point[0] = -3;
   initial_point[1] = -1;
@@ -64,10 +68,11 @@
   initial_point[3] = -1;
 }
 
-/***
+/**
  * Calculate the objective function.
  */
-double WoodFunction::Evaluate(const arma::mat& coordinates) {
+double WoodFunction::Evaluate(const arma::mat& coordinates)
+{
   // For convenience; we assume these temporaries will be optimized out.
   double x1 = coordinates[0];
   double x2 = coordinates[1];
@@ -84,11 +89,12 @@
   return objective;
 }
 
-/***
+/**
  * Calculate the gradient.
  */
 void WoodFunction::Gradient(const arma::mat& coordinates,
-                            arma::mat& gradient) {
+                            arma::mat& gradient)
+{
   // For convenience; we assume these temporaries will be optimized out.
   double x1 = coordinates[0];
   double x2 = coordinates[1];
@@ -108,7 +114,8 @@
       (1 / 5) * (x2 - x4);
 }
 
-const arma::mat& WoodFunction::GetInitialPoint() {
+const arma::mat& WoodFunction::GetInitialPoint()
+{
   return initial_point;
 }
 
@@ -116,9 +123,11 @@
 // GeneralizedRosenbrockFunction implementation
 //
 
-GeneralizedRosenbrockFunction::GeneralizedRosenbrockFunction(int n) : n(n) {
+GeneralizedRosenbrockFunction::GeneralizedRosenbrockFunction(int n) : n(n)
+{
   initial_point.set_size(n, 1);
-  for (int i = 0; i < n; i++) { // Set to [-1.2 1 -1.2 1 ...].
+  for (int i = 0; i < n; i++) // Set to [-1.2 1 -1.2 1 ...].
+  {
     if (i % 2 == 1)
       initial_point[i] = -1.2;
     else
@@ -126,12 +135,14 @@
   }
 }
 
-/***
+/**
  * Calculate the objective function.
  */
-double GeneralizedRosenbrockFunction::Evaluate(const arma::mat& coordinates) {
+double GeneralizedRosenbrockFunction::Evaluate(const arma::mat& coordinates)
+{
   double fval = 0;
-  for (int i = 0; i < (n - 1); i++) {
+  for (int i = 0; i < (n - 1); i++)
+  {
     fval += 100 * std::pow(std::pow(coordinates[i], 2) -
         coordinates[i + 1], 2) + std::pow(1 - coordinates[i], 2);
   }
@@ -139,24 +150,28 @@
   return fval;
 }
 
-/***
+/**
  * Calculate the gradient.
  */
 void GeneralizedRosenbrockFunction::Gradient(const arma::mat& coordinates,
-                                             arma::mat& gradient) {
+                                             arma::mat& gradient)
+{
   gradient.set_size(n);
-  for(int i = 0; i < (n - 1); i++) {
-    gradient[i] = 400 * (std::pow(coordinates[i], 3) - coordinates[i] * 
+  for (int i = 0; i < (n - 1); i++)
+  {
+    gradient[i] = 400 * (std::pow(coordinates[i], 3) - coordinates[i] *
         coordinates[i + 1]) + 2 * (coordinates[i] - 1);
-    if(i > 0)
+
+    if (i > 0)
       gradient[i] += 200 * (coordinates[i] - std::pow(coordinates[i - 1], 2));
   }
 
   gradient[n - 1] = 200 * (coordinates[n - 1] -
-      std::pow(coordinates[n - 2], 2)); 
+      std::pow(coordinates[n - 2], 2));
 }
 
-const arma::mat& GeneralizedRosenbrockFunction::GetInitialPoint() {
+const arma::mat& GeneralizedRosenbrockFunction::GetInitialPoint()
+{
   return initial_point;
 }
 
@@ -164,16 +179,18 @@
 // RosenbrockWoodFunction implementation
 //
 
-RosenbrockWoodFunction::RosenbrockWoodFunction() : rf(4), wf() {
+RosenbrockWoodFunction::RosenbrockWoodFunction() : rf(4), wf()
+{
   initial_point.set_size(4, 2);
   initial_point.col(0) = rf.GetInitialPoint();
   initial_point.col(1) = wf.GetInitialPoint();
 }
 
-/***
+/**
  * Calculate the objective function.
  */
-double RosenbrockWoodFunction::Evaluate(const arma::mat& coordinates) {
+double RosenbrockWoodFunction::Evaluate(const arma::mat& coordinates)
+{
   double objective = rf.Evaluate(coordinates.col(0)) +
                      wf.Evaluate(coordinates.col(1));
 
@@ -184,9 +201,10 @@
  * Calculate the gradient.
  */
 void RosenbrockWoodFunction::Gradient(const arma::mat& coordinates,
-                                      arma::mat& gradient) {
+                                      arma::mat& gradient)
+{
   gradient.set_size(4, 2);
-  
+
   arma::vec grf(4);
   arma::vec gwf(4);
 
@@ -197,6 +215,7 @@
   gradient.col(1) = gwf;
 }
 
-const arma::mat& RosenbrockWoodFunction::GetInitialPoint() {
+const arma::mat& RosenbrockWoodFunction::GetInitialPoint()
+{
   return initial_point;
 }

Modified: mlpack/trunk/src/mlpack/core/optimizers/lbfgs/test_functions.hpp
===================================================================
--- mlpack/trunk/src/mlpack/core/optimizers/lbfgs/test_functions.hpp	2011-11-23 20:34:04 UTC (rev 10360)
+++ mlpack/trunk/src/mlpack/core/optimizers/lbfgs/test_functions.hpp	2011-11-23 21:58:20 UTC (rev 10361)
@@ -1,5 +1,6 @@
-/***
- * test_functions.h
+/**
+ * @file test_functions.h
+ * @author Ryan Curtin
  *
  * A collection of functions to test optimizers (in this case, L-BFGS).  These
  * come from the following paper:
@@ -8,13 +9,10 @@
  *  Jorge J. Moré, Burton S. Garbow, and Kenneth E. Hillstrom. 1981.
  *  ACM Trans. Math. Softw. 7, 1 (March 1981), 17-41.
  *  http://portal.acm.org/citation.cfm?id=355934.355936
- *
- * @author Ryan Curtin
  */
+#ifndef __MLPACK_CORE_OPTIMIZERS_LBFGS_TEST_FUNCTIONS_HPP
+#define __MLPACK_CORE_OPTIMIZERS_LBFGS_TEST_FUNCTIONS_HPP
 
-#ifndef __MLPAC_CORE_OPTIMIZERS_LBFGS_TEST_FUNCTIONS_HPP
-#define __MLPAC_CORE_OPTIMIZERS_LBFGS_TEST_FUNCTIONS_HPP
-
 #include <mlpack/core.h>
 
 // To fulfill the template policy class 'FunctionType', we must implement
@@ -31,12 +29,11 @@
 // use either internally but the L-BFGS method requires arma::mat& to be passed
 // (C++ does not allow implicit reference casting to subclasses).
 
-// these names should probably be changed later
 namespace mlpack {
 namespace optimization {
 namespace test {
 
-/***
+/**
  * The Rosenbrock function, defined by
  *  f(x) = f1(x) + f2(x)
  *  f1(x) = 100 (x2 - x1^2)^2
@@ -48,7 +45,8 @@
  * "An automatic method for finding the greatest or least value of a function."
  *   H.H. Rosenbrock.  1960.  Comput. J. 3., 175-184.
  */
-class RosenbrockFunction {
+class RosenbrockFunction
+{
  public:
   RosenbrockFunction(); // initialize initial point
 
@@ -61,7 +59,7 @@
   arma::mat initial_point;
 };
 
-/***
+/**
  * The Wood function, defined by
  *  f(x) = f1(x) + f2(x) + f3(x) + f4(x) + f5(x) + f6(x)
  *  f1(x) = 100 (x2 - x1^2)^2
@@ -90,7 +88,7 @@
   arma::mat initial_point;
 };
 
-/***
+/**
  * The Generalized Rosenbrock function in n dimensions, defined by
  *  f(x) = sum_i^{n - 1} (f(i)(x))
  *  f_i(x) = 100 * (x_i^2 - x_{i + 1})^2 + (1 - x_i)^2
@@ -101,7 +99,8 @@
  * "An analysis of the behavior of a glass of genetic adaptive systems."
  *   K.A. De Jong.  Ph.D. thesis, University of Michigan, 1975.
  */
-class GeneralizedRosenbrockFunction {
+class GeneralizedRosenbrockFunction
+{
  public:
   /***
    * Set the dimensionality of the extended Rosenbrock function.
@@ -120,13 +119,13 @@
   int n; // Dimensionality
 };
 
-
-/***
+/**
  * The Generalized Rosenbrock function in 4 dimensions with the Wood Function in
  * four dimensions.  In this function we are actually optimizing a 2x4 matrix of
  * coordinates, not a vector.
  */
-class RosenbrockWoodFunction {
+class RosenbrockWoodFunction
+{
  public:
   RosenbrockWoodFunction(); // initialize initial point
 




More information about the mlpack-svn mailing list