[mlpack-git] master: This isn't warning output. Make it "info" instead. (2b76e6d)

gitdub at big.cc.gt.atl.ga.us gitdub at big.cc.gt.atl.ga.us
Thu Mar 5 22:17:30 EST 2015


Repository : https://github.com/mlpack/mlpack

On branch  : master
Link       : https://github.com/mlpack/mlpack/compare/904762495c039e345beba14c1142fd719b3bd50e...f94823c800ad6f7266995c700b1b630d5ffdcf40

>---------------------------------------------------------------

commit 2b76e6d0ac86f310e4e1e47e768e43af558c4547
Author: Ryan Curtin <ryan at ratml.org>
Date:   Mon Mar 2 11:23:55 2015 -0500

    This isn't warning output.  Make it "info" instead.


>---------------------------------------------------------------

2b76e6d0ac86f310e4e1e47e768e43af558c4547
 .../optimizers/aug_lagrangian/aug_lagrangian_impl.hpp | 14 +++++---------
 .../aug_lagrangian/aug_lagrangian_test_functions.cpp  | 19 -------------------
 2 files changed, 5 insertions(+), 28 deletions(-)

diff --git a/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian_impl.hpp b/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian_impl.hpp
index 967638f..9ed30e7 100644
--- a/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian_impl.hpp
+++ b/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian_impl.hpp
@@ -85,15 +85,11 @@ bool AugLagrangian<LagrangianFunction>::Optimize(arma::mat& coordinates,
   size_t it;
   for (it = 0; it != (maxIterations - 1); it++)
   {
-    Log::Warn << "AugLagrangian on iteration " << it
+    Log::Info << "AugLagrangian on iteration " << it
         << ", starting with objective "  << lastObjective << "." << std::endl;
 
- //   Log::Warn << coordinates << std::endl;
-
-//    Log::Warn << trans(coordinates) * coordinates << std::endl;
-
     if (!lbfgs.Optimize(coordinates))
-      Log::Warn << "L-BFGS reported an error during optimization."
+      Log::Info << "L-BFGS reported an error during optimization."
           << std::endl;
 
     // Check if we are done with the entire optimization (the threshold we are
@@ -117,7 +113,7 @@ bool AugLagrangian<LagrangianFunction>::Optimize(arma::mat& coordinates,
 //          function.EvaluateConstraint(i, coordinates) << std::endl;
     }
 
-    Log::Warn << "Penalty is " << penalty << " (threshold "
+    Log::Info << "Penalty is " << penalty << " (threshold "
         << penaltyThreshold << ")." << std::endl;
 
     for (size_t i = 0; i < function.NumConstraints(); ++i)
@@ -140,7 +136,7 @@ bool AugLagrangian<LagrangianFunction>::Optimize(arma::mat& coordinates,
       // penalty.  TODO: this factor should be a parameter (from CLI).  The
       // value of 0.25 is taken from Burer and Monteiro (2002).
       penaltyThreshold = 0.25 * penalty;
-      Log::Warn << "Lagrange multiplier estimates updated." << std::endl;
+      Log::Info << "Lagrange multiplier estimates updated." << std::endl;
     }
     else
     {
@@ -148,7 +144,7 @@ bool AugLagrangian<LagrangianFunction>::Optimize(arma::mat& coordinates,
       // parameter (from CLI).  The value of 10 is taken from Burer and Monteiro
       // (2002).
       augfunc.Sigma() *= 10;
-      Log::Warn << "Updated sigma to " << augfunc.Sigma() << "." << std::endl;
+      Log::Info << "Updated sigma to " << augfunc.Sigma() << "." << std::endl;
     }
   }
 
diff --git a/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian_test_functions.cpp b/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian_test_functions.cpp
index 8e71e49..e2d0dbb 100644
--- a/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian_test_functions.cpp
+++ b/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian_test_functions.cpp
@@ -202,8 +202,6 @@ void LovaszThetaSDP::Gradient(const arma::mat& coordinates,
   // The gradient is equal to (2 S' R^T)^T, with R being coordinates.
   // S' = C - sum_{i = 1}^{m} [ y_i - sigma (Tr(A_i * (R^T R)) - b_i)] * A_i
   // We will calculate it in a not very smart way, but it should work.
- // Log::Warn << "Using stupid specialization for gradient calculation!"
- //    << std::endl;
 
   // Initialize S' piece by piece.  It is of size n x n.
   const size_t n = coordinates.n_cols;
@@ -252,15 +250,8 @@ void LovaszThetaSDP::Gradient(const arma::mat& coordinates,
     }
   }
 
-//  Log::Warn << "Calculated S is: " << std::endl << s << std::endl;
-
   gradient = trans(2 * s * trans(coordinates));
 
-//  Log::Warn << "Calculated gradient is: " << std::endl << gradient << std::endl;
-
-
-//  Log::Debug << "Evaluating gradient. " << std::endl;
-
   // The gradient of -Tr(ones * X) is equal to -2 * ones * R
 //  arma::mat ones;
 //  ones.ones(coordinates.n_rows, coordinates.n_rows);
@@ -358,9 +349,6 @@ const arma::mat& LovaszThetaSDP::GetInitialPoint()
   if (ceil(r) > vertices)
     r = vertices; // An upper bound on the dimension.
 
-  Log::Debug << "Dimension will be " << ceil(r) << " x " << vertices << "."
-      << std::endl;
-
   initialPoint.set_size(ceil(r), vertices);
 
   // Now we set the entries of the initial matrix according to the formula given
@@ -376,13 +364,6 @@ const arma::mat& LovaszThetaSDP::GetInitialPoint()
     }
   }
 
-  Log::Debug << "Initial matrix " << std::endl << initialPoint << std::endl;
-
-  Log::Warn << "X " << std::endl << trans(initialPoint) * initialPoint
-      << std::endl;
-
-  Log::Warn << "accu " << accu(trans(initialPoint) * initialPoint) << std::endl;
-
   return initialPoint;
 }
 



More information about the mlpack-git mailing list