[mlpack-svn] r10360 - mlpack/trunk/src/mlpack/core/optimizers/aug_lagrangian
fastlab-svn at coffeetalk-1.cc.gatech.edu
fastlab-svn at coffeetalk-1.cc.gatech.edu
Wed Nov 23 15:34:05 EST 2011
Author: rcurtin
Date: 2011-11-23 15:34:04 -0500 (Wed, 23 Nov 2011)
New Revision: 10360
Removed:
mlpack/trunk/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian_test.cpp
Modified:
mlpack/trunk/src/mlpack/core/optimizers/aug_lagrangian/CMakeLists.txt
mlpack/trunk/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian.hpp
mlpack/trunk/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian_impl.hpp
mlpack/trunk/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian_test_functions.cpp
mlpack/trunk/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian_test_functions.hpp
Log:
Remove test file, and fix formatting as per #153.
Modified: mlpack/trunk/src/mlpack/core/optimizers/aug_lagrangian/CMakeLists.txt
===================================================================
--- mlpack/trunk/src/mlpack/core/optimizers/aug_lagrangian/CMakeLists.txt 2011-11-23 17:56:15 UTC (rev 10359)
+++ mlpack/trunk/src/mlpack/core/optimizers/aug_lagrangian/CMakeLists.txt 2011-11-23 20:34:04 UTC (rev 10360)
@@ -13,13 +13,3 @@
endforeach()
set(MLPACK_SRCS ${MLPACK_SRCS} ${DIR_SRCS} PARENT_SCOPE)
-
-# test executable
-add_executable(aug_lagrangian_test
- EXCLUDE_FROM_ALL
- aug_lagrangian_test.cpp
-)
-target_link_libraries(aug_lagrangian_test
- mlpack
- boost_unit_test_framework
-)
Modified: mlpack/trunk/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian.hpp
===================================================================
--- mlpack/trunk/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian.hpp 2011-11-23 17:56:15 UTC (rev 10359)
+++ mlpack/trunk/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian.hpp 2011-11-23 20:34:04 UTC (rev 10360)
@@ -1,4 +1,4 @@
-/***
+/**
* @file aug_lagrangian.h
* @author Ryan Curtin
*
@@ -15,7 +15,7 @@
namespace mlpack {
namespace optimization {
-/***
+/**
* The AugLagrangian class implements the Augmented Lagrangian method of
* optimization. In this scheme, a penalty term is added to the Lagrangian.
* This method is also called the "method of multipliers".
@@ -35,7 +35,8 @@
* value for the given coordinates.
*/
template<typename LagrangianFunction>
-class AugLagrangian {
+class AugLagrangian
+{
public:
AugLagrangian(LagrangianFunction& function_in, int num_basis);
// not sure what to do here yet
@@ -48,14 +49,15 @@
LagrangianFunction& function_;
int num_basis_;
- /***
+ /**
* This is a utility class, which we will pass to L-BFGS during the
* optimization. We use a utility class so that we do not have to expose
* Evaluate() and Gradient() to the AugLagrangian public interface; instead,
* with a private class, these methods are correctly protected (since they
* should not be being used anywhere else).
*/
- class AugLagrangianFunction {
+ class AugLagrangianFunction
+ {
public:
AugLagrangianFunction(LagrangianFunction& function_in,
arma::vec& lambda_in,
Modified: mlpack/trunk/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian_impl.hpp
===================================================================
--- mlpack/trunk/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian_impl.hpp 2011-11-23 17:56:15 UTC (rev 10359)
+++ mlpack/trunk/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian_impl.hpp 2011-11-23 20:34:04 UTC (rev 10360)
@@ -18,14 +18,16 @@
AugLagrangian<LagrangianFunction>::AugLagrangian(
LagrangianFunction& function_in, int num_basis) :
function_(function_in),
- num_basis_(num_basis) {
+ num_basis_(num_basis)
+{
// Not sure what to do here (if anything).
}
template<typename LagrangianFunction>
bool AugLagrangian<LagrangianFunction>::Optimize(int num_iterations,
arma::mat& coordinates,
- double sigma) {
+ double sigma)
+{
// Choose initial lambda parameters (vector of zeros, for simplicity).
arma::vec lambda(function_.NumConstraints());
lambda.ones();
@@ -48,21 +50,23 @@
// The odd comparison allows user to pass num_iterations = 0 (i.e. no limit on
// number of iterations).
int it;
- for (it = 0; it != (num_iterations - 1); it++) {
+ for (it = 0; it != (num_iterations - 1); it++)
+ {
Log::Debug << "AugLagrangian on iteration " << it
<< ", starting with objective " << last_objective << "." << std::endl;
// Use L-BFGS to optimize this function for the given lambda and sigma.
L_BFGS<AugLagrangianFunction> lbfgs(f, num_basis_);
- if(!lbfgs.Optimize(0, coordinates)) {
- Log::Debug << "L-BFGS reported an error during optimization." << std::endl;
- }
+ if (!lbfgs.Optimize(0, coordinates))
+ Log::Debug << "L-BFGS reported an error during optimization."
+ << std::endl;
// Check if we are done with the entire optimization (the threshold we are
// comparing with is arbitrary).
if (std::abs(last_objective - function_.Evaluate(coordinates)) < 1e-10 &&
sigma > 500000)
return true;
+
last_objective = function_.Evaluate(coordinates);
// Assuming that the optimization has converged to a new set of coordinates,
@@ -74,9 +78,10 @@
for (int i = 0; i < function_.NumConstraints(); i++)
penalty += std::pow(function_.EvaluateConstraint(i, coordinates), 2);
- Log::Debug << "Penalty is " << penalty << " (threshold " << penalty_threshold
- << ")." << std::endl;
- if (penalty < penalty_threshold) { // We update lambda.
+ Log::Debug << "Penalty is " << penalty << " (threshold "
+ << penalty_threshold << ")." << std::endl;
+ if (penalty < penalty_threshold) // We update lambda.
+ {
// We use the update: lambda_{k + 1} = lambda_k - sigma * c(coordinates),
// but we have to write a loop to do this for each constraint.
for (int i = 0; i < function_.NumConstraints(); i++)
@@ -84,12 +89,13 @@
f.lambda_ = lambda;
// We also update the penalty threshold to be a factor of the current
- // penalty. TODO: this factor should be a parameter (from CLI). The value
- // of 0.25 is taken from Burer and Monteiro (2002).
+ // penalty. TODO: this factor should be a parameter (from CLI). The
+ // value of 0.25 is taken from Burer and Monteiro (2002).
penalty_threshold = 0.25 * penalty;
Log::Debug << "Lagrange multiplier estimates updated." << std::endl;
-
- } else {
+ }
+ else
+ {
// We multiply sigma by a constant value. TODO: this factor should be a
// parameter (from CLI). The value of 10 is taken from Burer and Monteiro
// (2002).
@@ -108,20 +114,23 @@
LagrangianFunction& function_in, arma::vec& lambda_in, double sigma) :
lambda_(lambda_in),
sigma_(sigma),
- function_(function_in) {
+ function_(function_in)
+{
// Nothing to do.
}
template<typename LagrangianFunction>
double AugLagrangian<LagrangianFunction>::AugLagrangianFunction::Evaluate(
- const arma::mat& coordinates) {
+ const arma::mat& coordinates)
+{
// The augmented Lagrangian is evaluated as
// f(x) + {-lambda_i * c_i(x) + (sigma / 2) c_i(x)^2} for all constraints
// Log::Debug << "Evaluating augmented Lagrangian." << std::endl;
double objective = function_.Evaluate(coordinates);
// Now loop over constraints.
- for (int i = 0; i < function_.NumConstraints(); i++) {
+ for (int i = 0; i < function_.NumConstraints(); i++)
+ {
double constraint = function_.EvaluateConstraint(i, coordinates);
objective += (-lambda_[i] * constraint) +
sigma_ * std::pow(constraint, 2) / 2;
@@ -134,7 +143,8 @@
template<typename LagrangianFunction>
void AugLagrangian<LagrangianFunction>::AugLagrangianFunction::Gradient(
- const arma::mat& coordinates, arma::mat& gradient) {
+ const arma::mat& coordinates, arma::mat& gradient)
+{
// The augmented Lagrangian's gradient is evaluated as
// f'(x) + {(-lambda_i + sigma * c_i(x)) * c'_i(x)} for all constraints
// gradient.zeros();
@@ -144,7 +154,8 @@
// std::cout << gradient << std::endl;
arma::mat constraint_gradient; // Temporary for constraint gradients.
- for (int i = 0; i < function_.NumConstraints(); i++) {
+ for (int i = 0; i < function_.NumConstraints(); i++)
+ {
function_.GradientConstraint(i, coordinates, constraint_gradient);
// Now calculate scaling factor and add to existing gradient.
@@ -163,7 +174,8 @@
template<typename LagrangianFunction>
const arma::mat& AugLagrangian<LagrangianFunction>::AugLagrangianFunction::
- GetInitialPoint() {
+ GetInitialPoint()
+{
return function_.GetInitialPoint();
}
Deleted: mlpack/trunk/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian_test.cpp
===================================================================
--- mlpack/trunk/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian_test.cpp 2011-11-23 17:56:15 UTC (rev 10359)
+++ mlpack/trunk/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian_test.cpp 2011-11-23 20:34:04 UTC (rev 10360)
@@ -1,107 +0,0 @@
-/***
- * @file aug_lagrangian_test.cc
- * @author Ryan Curtin
- *
- * Test of the AugmentedLagrangian class using the test functions defined in
- * aug_lagrangian_test_functions.h.
- */
-
-#include <mlpack/core.h>
-#include "aug_lagrangian.hpp"
-#include "aug_lagrangian_test_functions.hpp"
-
-#define BOOST_TEST_MODULE Augmented Lagrangian Test
-#include <boost/test/unit_test.hpp>
-
-using namespace mlpack;
-using namespace mlpack::optimization;
-
-/***
- * Tests the Augmented Lagrangian optimizer using the
- * AugmentedLagrangianTestFunction class.
- */
-BOOST_AUTO_TEST_CASE(aug_lagrangian_test_function) {
- // The choice of 10 memory slots is arbitrary.
- AugLagrangianTestFunction f;
- AugLagrangian<AugLagrangianTestFunction> aug(f, 10);
-
- arma::vec coords = f.GetInitialPoint();
-
- if(!aug.Optimize(0, coords))
- BOOST_FAIL("Optimization reported failure.");
-
- double final_value = f.Evaluate(coords);
-
- BOOST_REQUIRE_CLOSE(final_value, 70, 1e-5);
- BOOST_REQUIRE_CLOSE(coords[0], 1, 1e-5);
- BOOST_REQUIRE_CLOSE(coords[1], 4, 1e-5);
-}
-
-/***
- * Tests the Augmented Lagrangian optimizer using the Gockenbach function.
- */
-BOOST_AUTO_TEST_CASE(gockenbach_function) {
- GockenbachFunction f;
- AugLagrangian<GockenbachFunction> aug(f, 10);
-
- arma::vec coords = f.GetInitialPoint();
-
- if(!aug.Optimize(0, coords))
- BOOST_FAIL("Optimization reported failure.");
-
- double final_value = f.Evaluate(coords);
-
- BOOST_REQUIRE_CLOSE(final_value, 29.633926, 1e-5);
- BOOST_REQUIRE_CLOSE(coords[0], 0.12288178, 1e-5);
- BOOST_REQUIRE_CLOSE(coords[1], -1.10778185, 1e-5);
- BOOST_REQUIRE_CLOSE(coords[2], 0.015099932, 1e-5);
-}
-
-/***
- * Extremely simple test case for the Lovasz theta SDP.
- */
-BOOST_AUTO_TEST_CASE(extremely_simple_lovasz_theta_sdp) {
- // Manually input the single edge.
- arma::mat edges = "0; 1";
-
- LovaszThetaSDP ltsdp(edges);
- AugLagrangian<LovaszThetaSDP> aug(ltsdp, 10);
-
- arma::mat coords = ltsdp.GetInitialPoint();
-
- if (!aug.Optimize(0, coords))
- BOOST_FAIL("Optimization reported failure.");
-
- double final_value = ltsdp.Evaluate(coords);
-
- arma::mat X = trans(coords) * coords;
-
- BOOST_CHECK_CLOSE(final_value, -1.0, 1e-5);
-
- BOOST_CHECK_CLOSE(X(0, 0) + X(1, 1), 1.0, 1e-5);
- BOOST_CHECK_SMALL(X(0, 1), 1e-8);
- BOOST_CHECK_SMALL(X(1, 0), 1e-8);
-}
-
-/***
- * Tests the Augmented Lagrangian optimizer on the Lovasz theta SDP, using the
- * hamming_10_2 dataset, just like in the paper by Monteiro and Burer.
- *
-BOOST_AUTO_TEST_CASE(lovasz_theta_johnson8_4_4) {
- arma::mat edges;
- // Hardcoded filename: bad!
- data::Load("MANN-a27.csv", edges);
-
- LovaszThetaSDP ltsdp(edges);
- AugLagrangian<LovaszThetaSDP> aug(ltsdp, 10);
-
- arma::mat coords = ltsdp.GetInitialPoint();
-
- if(!aug.Optimize(0, coords))
- BOOST_FAIL("Optimization reported failure.");
-
- double final_value = ltsdp.Evaluate(coords);
-
- BOOST_REQUIRE_CLOSE(final_value, -14.0, 1e-5);
-}
-*/
Modified: mlpack/trunk/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian_test_functions.cpp
===================================================================
--- mlpack/trunk/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian_test_functions.cpp 2011-11-23 17:56:15 UTC (rev 10359)
+++ mlpack/trunk/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian_test_functions.cpp 2011-11-23 20:34:04 UTC (rev 10360)
@@ -1,5 +1,5 @@
/**
- * @file aug_lagrangian_test_functions.cc
+ * @file aug_lagrangian_test_functions.cpp
* @author Ryan Curtin
*
* Implementation of AugLagrangianTestFunction class.
@@ -13,18 +13,21 @@
//
// AugLagrangianTestFunction
//
-AugLagrangianTestFunction::AugLagrangianTestFunction() {
+AugLagrangianTestFunction::AugLagrangianTestFunction()
+{
// Set the initial point to be (0, 0).
initial_point_.zeros(2, 1);
}
AugLagrangianTestFunction::AugLagrangianTestFunction(
const arma::mat& initial_point) :
- initial_point_(initial_point) {
+ initial_point_(initial_point)
+{
// Nothing to do.
}
-double AugLagrangianTestFunction::Evaluate(const arma::mat& coordinates) {
+double AugLagrangianTestFunction::Evaluate(const arma::mat& coordinates)
+{
// f(x) = 6 x_1^2 + 4 x_1 x_2 + 3 x_2^2
return ((6 * std::pow(coordinates[0], 2)) +
(4 * (coordinates[0] * coordinates[1])) +
@@ -32,7 +35,8 @@
}
void AugLagrangianTestFunction::Gradient(const arma::mat& coordinates,
- arma::mat& gradient) {
+ arma::mat& gradient)
+{
// f'_x1(x) = 12 x_1 + 4 x_2
// f'_x2(x) = 4 x_1 + 6 x_2
gradient.set_size(2, 1);
@@ -42,7 +46,8 @@
}
double AugLagrangianTestFunction::EvaluateConstraint(int index,
- const arma::mat& coordinates) {
+ const arma::mat& coordinates)
+{
// We return 0 if the index is wrong (not 0).
if (index != 0)
return 0;
@@ -52,7 +57,8 @@
}
void AugLagrangianTestFunction::GradientConstraint(int index,
- const arma::mat& coordinates, arma::mat& gradient) {
+ const arma::mat& coordinates, arma::mat& gradient)
+{
// If the user passed an invalid index (not 0), we will return a zero
// gradient.
gradient.zeros(2, 1);
@@ -67,18 +73,21 @@
//
// GockenbachFunction
//
-GockenbachFunction::GockenbachFunction() {
+GockenbachFunction::GockenbachFunction()
+{
// Set the initial point to (0, 0, 1).
initial_point_.zeros(3, 1);
initial_point_[2] = 1;
}
GockenbachFunction::GockenbachFunction(const arma::mat& initial_point) :
- initial_point_(initial_point) {
+ initial_point_(initial_point)
+{
// Nothing to do.
}
-double GockenbachFunction::Evaluate(const arma::mat& coordinates) {
+double GockenbachFunction::Evaluate(const arma::mat& coordinates)
+{
// f(x) = (x_1 - 1)^2 + 2 (x_2 + 2)^2 + 3(x_3 + 3)^2
return ((std::pow(coordinates[0] - 1, 2)) +
(2 * std::pow(coordinates[1] + 2, 2)) +
@@ -86,7 +95,8 @@
}
void GockenbachFunction::Gradient(const arma::mat& coordinates,
- arma::mat& gradient) {
+ arma::mat& gradient)
+{
// f'_x1(x) = 2 (x_1 - 1)
// f'_x2(x) = 4 (x_2 + 2)
// f'_x3(x) = 6 (x_3 + 3)
@@ -98,10 +108,12 @@
}
double GockenbachFunction::EvaluateConstraint(int index,
- const arma::mat& coordinates) {
+ const arma::mat& coordinates)
+{
double constraint = 0;
- switch(index) {
+ switch (index)
+ {
case 0: // g(x) = (x_3 - x_2 - x_1 - 1) = 0
constraint = (coordinates[2] - coordinates[1] - coordinates[0] - 1);
break;
@@ -120,10 +132,12 @@
void GockenbachFunction::GradientConstraint(int index,
const arma::mat& coordinates,
- arma::mat& gradient) {
+ arma::mat& gradient)
+{
gradient.zeros(3, 1);
- switch(index) {
+ switch (index)
+ {
case 0:
// g'_x1(x) = -1
// g'_x2(x) = -1
@@ -150,13 +164,15 @@
{ }
LovaszThetaSDP::LovaszThetaSDP(const arma::mat& edges) : edges_(edges),
- initial_point_(0, 0) {
+ initial_point_(0, 0)
+{
// Calculate V by finding the maximum index in the edges matrix.
vertices_ = max(max(edges_)) + 1;
// Log::Debug << vertices_ << " vertices in graph." << std::endl;
}
-double LovaszThetaSDP::Evaluate(const arma::mat& coordinates) {
+double LovaszThetaSDP::Evaluate(const arma::mat& coordinates)
+{
// The objective is equal to -Tr(ones * X) = -Tr(ones * (R^T * R)).
// This can be simplified into the negative sum of (R^T * R).
// Log::Debug << "Evaluting objective function with coordinates:" << std::endl;
@@ -174,7 +190,8 @@
}
void LovaszThetaSDP::Gradient(const arma::mat& coordinates,
- arma::mat& gradient) {
+ arma::mat& gradient)
+{
// Log::Debug << "Evaluating gradient. " << std::endl;
// The gradient of -Tr(ones * X) is equal to -2 * ones * R
@@ -186,14 +203,17 @@
// std::cout << gradient;
}
-int LovaszThetaSDP::NumConstraints() {
+int LovaszThetaSDP::NumConstraints()
+{
// Each edge is a constraint, and we have the constraint Tr(X) = 1.
return edges_.n_cols + 1;
}
double LovaszThetaSDP::EvaluateConstraint(int index,
- const arma::mat& coordinates) {
- if (index == 0) { // This is the constraint Tr(X) = 1.
+ const arma::mat& coordinates)
+{
+ if (index == 0) // This is the constraint Tr(X) = 1.
+ {
double sum = -1; // Tr(X) - 1 = 0, so we prefix the subtraction.
for (size_t i = 0; i < coordinates.n_cols; i++)
sum += dot(coordinates.col(i), coordinates.col(i));
@@ -214,9 +234,11 @@
void LovaszThetaSDP::GradientConstraint(int index,
const arma::mat& coordinates,
- arma::mat& gradient) {
+ arma::mat& gradient)
+{
// Log::Debug << "Gradient of constraint " << index << " is " << std::endl;
- if (index == 0) { // This is the constraint Tr(X) = 1.
+ if (index == 0) // This is the constraint Tr(X) = 1.
+ {
gradient = 2 * coordinates; // d/dX (Tr(R^T R)) = 2 R.
// std::cout << gradient;
return;
@@ -244,7 +266,8 @@
// std::cout << gradient;
}
-const arma::mat& LovaszThetaSDP::GetInitialPoint() {
+const arma::mat& LovaszThetaSDP::GetInitialPoint()
+{
if (initial_point_.n_rows != 0 && initial_point_.n_cols != 0)
return initial_point_; // It has already been calculated.
@@ -275,8 +298,10 @@
// Now we set the entries of the initial matrix according to the formula given
// in Section 4 of Monteiro and Burer.
- for (size_t i = 0; i < r; i++) {
- for (size_t j = 0; j < (size_t) vertices_; j++) {
+ for (size_t i = 0; i < r; i++)
+ {
+ for (size_t j = 0; j < (size_t) vertices_; j++)
+ {
if (i == j)
initial_point_(i, j) = sqrt(1.0 / r) + sqrt(1.0 / (vertices_ * m));
else
Modified: mlpack/trunk/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian_test_functions.hpp
===================================================================
--- mlpack/trunk/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian_test_functions.hpp 2011-11-23 17:56:15 UTC (rev 10359)
+++ mlpack/trunk/src/mlpack/core/optimizers/aug_lagrangian/aug_lagrangian_test_functions.hpp 2011-11-23 20:34:04 UTC (rev 10360)
@@ -1,18 +1,19 @@
-/***
- * @file aug_lagrangian_test.h
+/**
+ * @file aug_lagrangian_test_functions.hpp
+ * @author Ryan Curtin
*
- * Define a test function for the augmented Lagrangian method.
+ * Define test functions for the augmented Lagrangian method.
*/
-#ifndef __MLPACK_CORE_OPTIMIZERS_AUG_LAGRANGIAN_AUG_LAGRANGIAN_TEST_FUNCTIONS_HPP
-#define __MLPACK_CORE_OPTIMIZERS_AUG_LAGRANGIAN_AUG_LAGRANGIAN_TEST_FUNCTIONS_HPP
+#ifndef __MLPACK_CORE_OPTIMIZERS_AUG_LAGRANGIAN_TEST_FUNCTIONS_HPP
+#define __MLPACK_CORE_OPTIMIZERS_AUG_LAGRANGIAN_TEST_FUNCTIONS_HPP
#include <mlpack/core.h>
namespace mlpack {
namespace optimization {
-/***
+/**
* This function is taken from "Practical Mathematical Optimization" (Snyman),
* section 5.3.8 ("Application of the Augmented Lagrangian Method"). It has
* only one constraint.
@@ -20,7 +21,8 @@
* The minimum that satisfies the constraint is x = [1, 4], with an objective
* value of 70.
*/
-class AugLagrangianTestFunction {
+class AugLagrangianTestFunction
+{
public:
AugLagrangianTestFunction();
AugLagrangianTestFunction(const arma::mat& initial_point);
@@ -41,7 +43,7 @@
arma::mat initial_point_;
};
-/***
+/**
* This function is taken from M. Gockenbach's lectures on general nonlinear
* programs, found at:
* http://www.math.mtu.edu/~msgocken/ma5630spring2003/lectures/nlp/nlp.pdf
@@ -52,7 +54,8 @@
* The minimum that satisfies the two constraints is given as
* x = [0.12288, -1.1078, 0.015100], with an objective value of about 29.634.
*/
-class GockenbachFunction {
+class GockenbachFunction
+{
public:
GockenbachFunction();
GockenbachFunction(const arma::mat& initial_point);
@@ -73,10 +76,9 @@
arma::mat initial_point_;
};
-}; // namespace optimization
-}; // namespace mlpack
-/***
+
+/**
* This function is the Lovasz-Theta semidefinite program, as implemented in the
* following paper:
*
@@ -96,11 +98,12 @@
* coordinates given to the Evaluate(), Gradient(), EvaluateConstraint(), and
* GradientConstraint() functions.
*/
-class LovaszThetaSDP {
+class LovaszThetaSDP
+{
public:
LovaszThetaSDP();
- /***
+ /**
* Initialize the Lovasz-Theta SDP with the given set of edges. The edge
* matrix should consist of rows of two dimensions, where dimension 0 is the
* first vertex of the edge and dimension 1 is the second edge (or vice versa,
@@ -129,4 +132,7 @@
arma::mat initial_point_;
};
-#endif // __MLPACK_CORE_OPTIMIZERS_AUG_LAGRANGIAN_AUG_LAGRANGIAN_TEST_FUNCTIONS_HPP
+}; // namespace optimization
+}; // namespace mlpack
+
+#endif // __MLPACK_CORE_OPTIMIZERS_AUG_LAGRANGIAN_TEST_FUNCTIONS_HPP
More information about the mlpack-svn
mailing list