[mlpack-svn] r10843 - in mlpack/trunk/src/mlpack/core/optimizers: aug_lagrangian lbfgs
fastlab-svn at coffeetalk-1.cc.gatech.edu
fastlab-svn at coffeetalk-1.cc.gatech.edu
Fri Dec 16 01:37:32 EST 2011
Author: rcurtin
Date: 2011-12-16 01:37:32 -0500 (Fri, 16 Dec 2011)
New Revision: 10843
Modified:
mlpack/trunk/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian.hpp
mlpack/trunk/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian_impl.hpp
mlpack/trunk/src/mlpack/core/optimizers/lbfgs/lbfgs_impl.hpp
Log:
Clean up warning, use Info instead of Debug.
Modified: mlpack/trunk/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian.hpp
===================================================================
--- mlpack/trunk/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian.hpp 2011-12-16 06:37:10 UTC (rev 10842)
+++ mlpack/trunk/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian.hpp 2011-12-16 06:37:32 UTC (rev 10843)
@@ -20,29 +20,46 @@
* optimization. In this scheme, a penalty term is added to the Lagrangian.
* This method is also called the "method of multipliers".
*
- * The template class LagrangianFunction must implement the following three
+ * The template class LagrangianFunction must implement the following five
* methods:
- * double Evaluate(const arma::mat& coordinates);
- * void Gradient(const arma::mat& coordinates, arma::mat& gradient);
- * int NumConstraints();
- * double EvaluateConstraint(int index, const arma::mat& coordinates);
- * double GradientConstraint(int index, const arma::mat& coordinates,
- * arma::mat& gradient);
*
+ * - double Evaluate(const arma::mat& coordinates);
+ * - void Gradient(const arma::mat& coordinates, arma::mat& gradient);
+ * - size_t NumConstraints();
+ * - double EvaluateConstraint(size_t index, const arma::mat& coordinates);
+ * - double GradientConstraint(size_t index, const arma::mat& coordinates,
+ * arma::mat& gradient);
+ *
* The number of constraints must be greater than or equal to 0, and
* EvaluateConstraint() should evaluate the constraint at the given index for
* the given coordinates. Evaluate() should provide the objective function
* value for the given coordinates.
+ *
+ * @tparam LagrangianFunction Function which can be optimized by this class.
*/
template<typename LagrangianFunction>
class AugLagrangian
{
public:
- AugLagrangian(LagrangianFunction& function, size_t numBasis);
- // not sure what to do here yet
+ /**
+ * Construct the Augmented Lagrangian optimizer with an instance of the given
+ * function.
+ *
+ * @param function Function to be optimizer.
+ * @param numBasis Number of points of memory for L-BFGS.
+ */
+ AugLagrangian(LagrangianFunction& function, size_t numBasis = 5);
- bool Optimize(size_t num_iterations,
- arma::mat& coordinates,
+ /**
+ * Optimize the function.
+ *
+ * @param coordinates Output matrix to store the optimized coordinates in.
+ * @param maxIterations Maximum number of iterations of the Augmented
+ * Lagrangian algorithm. 0 indicates no maximum.
+ * @param sigma Initial penalty parameter.
+ */
+ bool Optimize(arma::mat& coordinates,
+ const size_t maxIterations = 1000,
double sigma = 0.5);
//! Get the LagrangianFunction.
@@ -56,7 +73,9 @@
size_t& NumBasis() { return numBasis; }
private:
+ //! Function to be optimized.
LagrangianFunction& function;
+ //! Number of memory points for L-BFGS.
size_t numBasis;
/**
@@ -76,11 +95,11 @@
double Evaluate(const arma::mat& coordinates);
void Gradient(const arma::mat& coordinates, arma::mat& gradient);
- const arma::mat& GetInitialPoint();
+ const arma::mat& GetInitialPoint() const;
- //! Get the Lagrangian multipliers.
+ //! Get the Lagrange multipliers.
const arma::vec& Lambda() const { return lambda; }
- //! Modify the Lagrangian multipliers.
+ //! Modify the Lagrange multipliers.
arma::vec& Lambda() { return lambda; }
//! Get sigma.
Modified: mlpack/trunk/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian_impl.hpp
===================================================================
--- mlpack/trunk/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian_impl.hpp 2011-12-16 06:37:10 UTC (rev 10842)
+++ mlpack/trunk/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian_impl.hpp 2011-12-16 06:37:32 UTC (rev 10843)
@@ -24,8 +24,8 @@
}
template<typename LagrangianFunction>
-bool AugLagrangian<LagrangianFunction>::Optimize(size_t numIterations,
- arma::mat& coordinates,
+bool AugLagrangian<LagrangianFunction>::Optimize(arma::mat& coordinates,
+ const size_t maxIterations,
double sigma)
{
// Choose initial lambda parameters (vector of zeros, for simplicity).
@@ -44,21 +44,21 @@
for (size_t i = 0; i < function.NumConstraints(); i++)
penalty += std::pow(function.EvaluateConstraint(i, coordinates), 2);
- Log::Debug << "Penalty is " << penalty << " (threshold " << penalty_threshold
+ Log::Info << "Penalty is " << penalty << " (threshold " << penalty_threshold
<< ")." << std::endl;
- // The odd comparison allows user to pass num_iterations = 0 (i.e. no limit on
+ // The odd comparison allows user to pass maxIterations = 0 (i.e. no limit on
// number of iterations).
- int it;
- for (it = 0; it != (num_iterations - 1); it++)
+ size_t it;
+ for (it = 0; it != (maxIterations - 1); it++)
{
- Log::Debug << "AugLagrangian on iteration " << it
+ Log::Info << "AugLagrangian on iteration " << it
<< ", starting with objective " << last_objective << "." << std::endl;
// Use L-BFGS to optimize this function for the given lambda and sigma.
L_BFGS<AugLagrangianFunction> lbfgs(f, numBasis);
if (!lbfgs.Optimize(0, coordinates))
- Log::Debug << "L-BFGS reported an error during optimization."
+ Log::Info << "L-BFGS reported an error during optimization."
<< std::endl;
// Check if we are done with the entire optimization (the threshold we are
@@ -75,24 +75,25 @@
// First, calculate the current penalty.
double penalty = 0;
- for (int i = 0; i < function.NumConstraints(); i++)
+ for (size_t i = 0; i < function.NumConstraints(); i++)
penalty += std::pow(function.EvaluateConstraint(i, coordinates), 2);
- Log::Debug << "Penalty is " << penalty << " (threshold "
+ Log::Info << "Penalty is " << penalty << " (threshold "
<< penalty_threshold << ")." << std::endl;
+
if (penalty < penalty_threshold) // We update lambda.
{
// We use the update: lambda{k + 1} = lambdak - sigma * c(coordinates),
// but we have to write a loop to do this for each constraint.
- for (int i = 0; i < function.NumConstraints(); i++)
+ for (size_t i = 0; i < function.NumConstraints(); i++)
lambda[i] -= sigma * function.EvaluateConstraint(i, coordinates);
- f.lambda = lambda;
+ f.Lambda() = lambda;
// We also update the penalty threshold to be a factor of the current
// penalty. TODO: this factor should be a parameter (from CLI). The
// value of 0.25 is taken from Burer and Monteiro (2002).
penalty_threshold = 0.25 * penalty;
- Log::Debug << "Lagrange multiplier estimates updated." << std::endl;
+ Log::Info << "Lagrange multiplier estimates updated." << std::endl;
}
else
{
@@ -100,8 +101,8 @@
// parameter (from CLI). The value of 10 is taken from Burer and Monteiro
// (2002).
sigma *= 10;
- f.sigma = sigma;
- Log::Debug << "Updated sigma to " << sigma << "." << std::endl;
+ f.Sigma() = sigma;
+ Log::Info << "Updated sigma to " << sigma << "." << std::endl;
}
}
@@ -129,7 +130,7 @@
double objective = function.Evaluate(coordinates);
// Now loop over constraints.
- for (int i = 0; i < function.NumConstraints(); i++)
+ for (size_t i = 0; i < function.NumConstraints(); i++)
{
double constraint = function.EvaluateConstraint(i, coordinates);
objective += (-lambda[i] * constraint) +
@@ -154,7 +155,7 @@
// std::cout << gradient << std::endl;
arma::mat constraint_gradient; // Temporary for constraint gradients.
- for (int i = 0; i < function.NumConstraints(); i++)
+ for (size_t i = 0; i < function.NumConstraints(); i++)
{
function.GradientConstraint(i, coordinates, constraint_gradient);
@@ -174,7 +175,7 @@
template<typename LagrangianFunction>
const arma::mat& AugLagrangian<LagrangianFunction>::AugLagrangianFunction::
- GetInitialPoint()
+ GetInitialPoint() const
{
return function.GetInitialPoint();
}
Modified: mlpack/trunk/src/mlpack/core/optimizers/lbfgs/lbfgs_impl.hpp
===================================================================
--- mlpack/trunk/src/mlpack/core/optimizers/lbfgs/lbfgs_impl.hpp 2011-12-16 06:37:10 UTC (rev 10842)
+++ mlpack/trunk/src/mlpack/core/optimizers/lbfgs/lbfgs_impl.hpp 2011-12-16 06:37:32 UTC (rev 10843)
@@ -284,8 +284,8 @@
// Get the dimensions of the coordinates of the function; GetInitialPoint()
// might return an arma::vec, but that's okay because then n_cols will simply
// be 1.
- int rows = function.GetInitialPoint().n_rows;
- int cols = function.GetInitialPoint().n_cols;
+ const size_t rows = function.GetInitialPoint().n_rows;
+ const size_t cols = function.GetInitialPoint().n_cols;
newIterateTmp.set_size(rows, cols);
s.set_size(rows, cols, numBasis);
@@ -352,14 +352,14 @@
for (size_t itNum = 0; optimizeUntilConvergence || (itNum != numIterations);
itNum++)
{
- Log::Debug << "L-BFGS iteration " << itNum << "; objective " <<
+ Log::Info << "L-BFGS iteration " << itNum << "; objective " <<
function.Evaluate(iterate) << "." << std::endl;
// Break when the norm of the gradient becomes too small.
if(GradientNormTooSmall(gradient))
{
success = true; // We have found the minimum.
- Log::Debug << "L-BFGS gradient norm too small (terminating)."
+ Log::Info << "L-BFGS gradient norm too small (terminating)."
<< std::endl;
break;
}
More information about the mlpack-svn
mailing list