[mlpack-git] master: Many syntax details. (e06cfc8)

gitdub at mlpack.org gitdub at mlpack.org
Tue Aug 16 14:32:28 EDT 2016


Repository : https://github.com/mlpack/mlpack
On branch  : master
Link       : https://github.com/mlpack/mlpack/compare/0f4b25acd6aaa14294c044874ba6cc0751712baa...0a19d07bd39e6223991976474bc79671ba8aa0f0

>---------------------------------------------------------------

commit e06cfc8f950430060506bc895a94f69b1f3fb29d
Author: MarcosPividori <marcos.pividori at gmail.com>
Date:   Tue Aug 16 15:32:28 2016 -0300

    Many syntax details.


>---------------------------------------------------------------

e06cfc8f950430060506bc895a94f69b1f3fb29d
 src/mlpack/core/data/load_impl.hpp                 |  4 ++--
 .../tree/rectangle_tree/r_star_tree_split_impl.hpp |  8 +++----
 .../core/tree/rectangle_tree/x_tree_split_impl.hpp |  2 +-
 .../tree/spill_tree/dual_tree_traverser_impl.hpp   |  6 ++---
 .../tree/spill_tree/single_tree_traverser_impl.hpp |  2 +-
 src/mlpack/core/util/backtrace.cpp                 |  4 ++--
 .../simple_tolerance_termination.hpp               |  8 +++----
 .../validation_RMSE_termination.hpp                | 10 ++++----
 .../svd_complete_incremental_learning.hpp          |  8 +++----
 .../svd_incomplete_incremental_learning.hpp        |  6 ++---
 .../ann/activation_functions/logistic_function.hpp |  2 +-
 src/mlpack/methods/ann/layer/dropconnect_layer.hpp | 28 +++++++++++-----------
 src/mlpack/methods/cf/svd_wrapper_impl.hpp         |  4 ++--
 src/mlpack/methods/hmm/hmm_regression_impl.hpp     |  2 +-
 src/mlpack/methods/pca/pca_main.cpp                |  4 ++--
 .../regularized_svd/regularized_svd_function.cpp   |  2 +-
 src/mlpack/tests/adaboost_test.cpp                 |  2 +-
 src/mlpack/tests/rectangle_tree_test.cpp           |  4 ++--
 18 files changed, 53 insertions(+), 53 deletions(-)

diff --git a/src/mlpack/core/data/load_impl.hpp b/src/mlpack/core/data/load_impl.hpp
index 45266b5..65a07d5 100644
--- a/src/mlpack/core/data/load_impl.hpp
+++ b/src/mlpack/core/data/load_impl.hpp
@@ -420,7 +420,7 @@ bool Load(const std::string& filename,
     stream.close();
     stream.open(filename, std::fstream::in);
 
-    if(transpose)
+    if (transpose)
     {
       std::vector<std::vector<std::string>> tokensArray;
       std::vector<std::string> tokens;
@@ -430,7 +430,7 @@ bool Load(const std::string& filename,
         std::getline(stream, buffer, '\n');
         Tokenizer lineTok(buffer, sep);
         tokens = details::ToTokens(lineTok);
-        if(tokens.size() == cols)
+        if (tokens.size() == cols)
         {
           tokensArray.emplace_back(std::move(tokens));
         }
diff --git a/src/mlpack/core/tree/rectangle_tree/r_star_tree_split_impl.hpp b/src/mlpack/core/tree/rectangle_tree/r_star_tree_split_impl.hpp
index 7d6224f..862bec6 100644
--- a/src/mlpack/core/tree/rectangle_tree/r_star_tree_split_impl.hpp
+++ b/src/mlpack/core/tree/rectangle_tree/r_star_tree_split_impl.hpp
@@ -275,7 +275,7 @@ bool RStarTreeSplit::SplitNonLeafNode(TreeType *tree,std::vector<bool>& relevels
 
  /*
   // If we haven't yet reinserted on this level, we try doing so now.
-  if(relevels[tree->TreeDepth()]) {
+  if (relevels[tree->TreeDepth()]) {
     relevels[tree->TreeDepth()] = false;
     // We sort the points by decreasing centroid to centroid distance.
     // We then remove the first p entries and reinsert them at the root.
@@ -283,7 +283,7 @@ bool RStarTreeSplit::SplitNonLeafNode(TreeType *tree,std::vector<bool>& relevels
     while(root->Parent() != NULL)
       root = root->Parent();
     size_t p = tree->MaxNumChildren() * 0.3; // The paper says this works the best.
-    if(p == 0) {
+    if (p == 0) {
       SplitNonLeafNode(tree, relevels);
       return false;
     }
@@ -313,10 +313,10 @@ bool RStarTreeSplit::SplitNonLeafNode(TreeType *tree,std::vector<bool>& relevels
 
     // If we went below min fill, delete this node and reinsert all children.
     //SOMETHING IS WRONG.  SHOULD NOT GO BELOW MIN FILL.
-//    if(!startBelowMinFill && tree->NumChildren() < tree->MinNumChildren())
+//    if (!startBelowMinFill && tree->NumChildren() < tree->MinNumChildren())
 //    std::cout<<"MINFILLERROR "<< p << ", " << tree->NumChildren() << "; " << tree->MaxNumChildren()<<std::endl;
 
-//    if(tree->NumChildren() < tree->MinNumChildren()) {
+//    if (tree->NumChildren() < tree->MinNumChildren()) {
 //      std::vector<RectangleTree<RStarTreeSplit, DescentType, StatisticType, MatType>*> rmNodes(tree->NumChildren());
 //      for(size_t i = 0; i < rmNodes.size(); i++) {
 //        rmNodes[i] = tree->Children()[i];
diff --git a/src/mlpack/core/tree/rectangle_tree/x_tree_split_impl.hpp b/src/mlpack/core/tree/rectangle_tree/x_tree_split_impl.hpp
index 65038e8..28e74cb 100644
--- a/src/mlpack/core/tree/rectangle_tree/x_tree_split_impl.hpp
+++ b/src/mlpack/core/tree/rectangle_tree/x_tree_split_impl.hpp
@@ -94,7 +94,7 @@ void XTreeSplit::SplitLeafNode(TreeType *tree,std::vector<bool>& relevels)
     }
 
 //    // If we went below min fill, delete this node and reinsert all points.
-//    if(tree->Count() < tree->MinLeafSize()) {
+//    if (tree->Count() < tree->MinLeafSize()) {
 //      std::vector<int> pointIndices(tree->Count());
 //      for(size_t i = 0; i < tree->Count(); i++) {
 //        pointIndices[i] = tree->Points()[i];
diff --git a/src/mlpack/core/tree/spill_tree/dual_tree_traverser_impl.hpp b/src/mlpack/core/tree/spill_tree/dual_tree_traverser_impl.hpp
index 3e4e7f1..c85d211 100644
--- a/src/mlpack/core/tree/spill_tree/dual_tree_traverser_impl.hpp
+++ b/src/mlpack/core/tree/spill_tree/dual_tree_traverser_impl.hpp
@@ -155,7 +155,7 @@ DualTreeTraverser<RuleType>::Traverse(
       }
       else
       {
-        if(referenceNode.Overlap())
+        if (referenceNode.Overlap())
         {
           // If referenceNode is a overlapping node and we can't decide which
           // child node to traverse, this means that queryNode is at both sides
@@ -259,7 +259,7 @@ DualTreeTraverser<RuleType>::Traverse(
       }
       else
       {
-        if(referenceNode.Overlap())
+        if (referenceNode.Overlap())
         {
           // If referenceNode is a overlapping node and we can't decide which
           // child node to traverse, this means that queryNode.Left() is at both
@@ -348,7 +348,7 @@ DualTreeTraverser<RuleType>::Traverse(
       }
       else
       {
-        if(referenceNode.Overlap())
+        if (referenceNode.Overlap())
         {
           // If referenceNode is a overlapping node and we can't decide which
           // child node to traverse, this means that queryNode.Right() is at
diff --git a/src/mlpack/core/tree/spill_tree/single_tree_traverser_impl.hpp b/src/mlpack/core/tree/spill_tree/single_tree_traverser_impl.hpp
index 8d50b36..3aca649 100644
--- a/src/mlpack/core/tree/spill_tree/single_tree_traverser_impl.hpp
+++ b/src/mlpack/core/tree/spill_tree/single_tree_traverser_impl.hpp
@@ -50,7 +50,7 @@ SingleTreeTraverser<RuleType>::Traverse(
   }
   else
   {
-    if(referenceNode.Overlap())
+    if (referenceNode.Overlap())
     {
       // If referenceNode is a overlapping node we do defeatist search. In this
       // case, it is enough to calculate the score of only one child node. As we
diff --git a/src/mlpack/core/util/backtrace.cpp b/src/mlpack/core/util/backtrace.cpp
index ba1c1af..582177b 100644
--- a/src/mlpack/core/util/backtrace.cpp
+++ b/src/mlpack/core/util/backtrace.cpp
@@ -91,7 +91,7 @@ void Backtrace::GetAddress(int maxDepth)
     Dl_info addressHandler;
 
     //No backtrace will be printed if no compile flags: -g -rdynamic
-    if(TRACE_CONDITION_1)
+    if (TRACE_CONDITION_1)
     {
       return ;
     }
@@ -173,7 +173,7 @@ std::string Backtrace::ToString()
   std::ostringstream lineOss;
   std::ostringstream it;
 
-  if(stack.size() <= 0)
+  if (stack.size() <= 0)
   {
     stackStr = "Cannot give backtrace because program was compiled";
     stackStr += " without: -g -rdynamic\nFor a backtrace,";
diff --git a/src/mlpack/methods/amf/termination_policies/simple_tolerance_termination.hpp b/src/mlpack/methods/amf/termination_policies/simple_tolerance_termination.hpp
index 836d24a..dbb2c38 100644
--- a/src/mlpack/methods/amf/termination_policies/simple_tolerance_termination.hpp
+++ b/src/mlpack/methods/amf/termination_policies/simple_tolerance_termination.hpp
@@ -78,7 +78,7 @@ class SimpleToleranceTermination
         for(size_t j = 0;j < m;j++)
         {
             double temp = 0;
-            if((temp = (*V)(i,j)) != 0)
+            if ((temp = (*V)(i,j)) != 0)
             {
                 temp = (temp - WH(i, j));
                 temp = temp * temp;
@@ -118,18 +118,18 @@ class SimpleToleranceTermination
       // initialize successive drop count
       reverseStepCount = 0;
       // if residue is droped below minimum scrap stored values
-      if(residue <= c_indexOld && isCopy == true)
+      if (residue <= c_indexOld && isCopy == true)
       {
         isCopy = false;
       }
     }
 
     // check if termination criterion is met
-    if(reverseStepCount == reverseStepTolerance || iteration > maxIterations)
+    if (reverseStepCount == reverseStepTolerance || iteration > maxIterations)
     {
       // if stored values are present replace them with current value as they
       // represent the minimum residue point
-      if(isCopy)
+      if (isCopy)
       {
         W = this->W;
         H = this->H;
diff --git a/src/mlpack/methods/amf/termination_policies/validation_RMSE_termination.hpp b/src/mlpack/methods/amf/termination_policies/validation_RMSE_termination.hpp
index b967552..2f80ce5 100644
--- a/src/mlpack/methods/amf/termination_policies/validation_RMSE_termination.hpp
+++ b/src/mlpack/methods/amf/termination_policies/validation_RMSE_termination.hpp
@@ -134,10 +134,10 @@ class ValidationRMSETermination
     iteration++;
 
     // if RMSE tolerance is not satisfied
-    if((rmseOld - rmse) / rmseOld < tolerance && iteration > 4)
+    if ((rmseOld - rmse) / rmseOld < tolerance && iteration > 4)
     {
       // check if this is a first of successive drops
-      if(reverseStepCount == 0 && isCopy == false)
+      if (reverseStepCount == 0 && isCopy == false)
       {
         // store a copy of W and H matrix
         isCopy = true;
@@ -156,18 +156,18 @@ class ValidationRMSETermination
       // initialize successive drop count
       reverseStepCount = 0;
       // if residue is droped below minimum scrap stored values
-      if(rmse <= c_indexOld && isCopy == true)
+      if (rmse <= c_indexOld && isCopy == true)
       {
         isCopy = false;
       }
     }
 
     // check if termination criterion is met
-    if(reverseStepCount == reverseStepTolerance || iteration > maxIterations)
+    if (reverseStepCount == reverseStepTolerance || iteration > maxIterations)
     {
       // if stored values are present replace them with current value as they
       // represent the minimum residue point
-      if(isCopy)
+      if (isCopy)
       {
         W = this->W;
         H = this->H;
diff --git a/src/mlpack/methods/amf/update_rules/svd_complete_incremental_learning.hpp b/src/mlpack/methods/amf/update_rules/svd_complete_incremental_learning.hpp
index 2ce9b16..56b2522 100644
--- a/src/mlpack/methods/amf/update_rules/svd_complete_incremental_learning.hpp
+++ b/src/mlpack/methods/amf/update_rules/svd_complete_incremental_learning.hpp
@@ -198,10 +198,10 @@ class SVDCompleteIncrementalLearning<arma::sp_mat>
                       arma::mat& W,
                       const arma::mat& H)
   {
-    if(!isStart) (*it)++;
+    if (!isStart) (*it)++;
     else isStart = false;
 
-    if(*it == V.end())
+    if (*it == V.end())
     {
         delete it;
         it = new arma::sp_mat::const_iterator(V.begin());
@@ -215,7 +215,7 @@ class SVDCompleteIncrementalLearning<arma::sp_mat>
 
     deltaW += (**it - arma::dot(W.row(currentItemIndex), H.col(currentUserIndex)))
                                       * arma::trans(H.col(currentUserIndex));
-    if(kw != 0) deltaW -= kw * W.row(currentItemIndex);
+    if (kw != 0) deltaW -= kw * W.row(currentItemIndex);
 
     W.row(currentItemIndex) += u*deltaW;
   }
@@ -243,7 +243,7 @@ class SVDCompleteIncrementalLearning<arma::sp_mat>
 
     deltaH += (**it - arma::dot(W.row(currentItemIndex), H.col(currentUserIndex)))
                                         * arma::trans(W.row(currentItemIndex));
-    if(kh != 0) deltaH -= kh * H.col(currentUserIndex);
+    if (kh != 0) deltaH -= kh * H.col(currentUserIndex);
 
     H.col(currentUserIndex) += u * deltaH;
   }
diff --git a/src/mlpack/methods/amf/update_rules/svd_incomplete_incremental_learning.hpp b/src/mlpack/methods/amf/update_rules/svd_incomplete_incremental_learning.hpp
index 65d07fc..a66ed41 100644
--- a/src/mlpack/methods/amf/update_rules/svd_incomplete_incremental_learning.hpp
+++ b/src/mlpack/methods/amf/update_rules/svd_incomplete_incremental_learning.hpp
@@ -168,7 +168,7 @@ inline void SVDIncompleteIncrementalLearning::
     size_t i = it.row();
     deltaW.row(i) += (val - arma::dot(W.row(i), H.col(currentUserIndex))) *
                                          arma::trans(H.col(currentUserIndex));
-    if(kw != 0) deltaW.row(i) -= kw * W.row(i);
+    if (kw != 0) deltaW.row(i) -= kw * W.row(i);
   }
 
   W += u*deltaW;
@@ -188,11 +188,11 @@ inline void SVDIncompleteIncrementalLearning::
   {
     double val = *it;
     size_t i = it.row();
-    if((val = V(i, currentUserIndex)) != 0)
+    if ((val = V(i, currentUserIndex)) != 0)
       deltaH += (val - arma::dot(W.row(i), H.col(currentUserIndex))) *
                                                     arma::trans(W.row(i));
   }
-  if(kh != 0) deltaH -= kh * H.col(currentUserIndex);
+  if (kh != 0) deltaH -= kh * H.col(currentUserIndex);
 
   H.col(currentUserIndex++) += u * deltaH;
   currentUserIndex = currentUserIndex % V.n_cols;
diff --git a/src/mlpack/methods/ann/activation_functions/logistic_function.hpp b/src/mlpack/methods/ann/activation_functions/logistic_function.hpp
index 626d9ea..9105473 100644
--- a/src/mlpack/methods/ann/activation_functions/logistic_function.hpp
+++ b/src/mlpack/methods/ann/activation_functions/logistic_function.hpp
@@ -33,7 +33,7 @@ class LogisticFunction
   template<typename eT>
   static double fn(const eT x)
   {
-    if(x < arma::Datum<eT>::log_max)
+    if (x < arma::Datum<eT>::log_max)
     {
       if (x > -arma::Datum<eT>::log_max)
         return 1.0 /  (1.0 + std::exp(-x));
diff --git a/src/mlpack/methods/ann/layer/dropconnect_layer.hpp b/src/mlpack/methods/ann/layer/dropconnect_layer.hpp
index 651a8a7..54867cc 100644
--- a/src/mlpack/methods/ann/layer/dropconnect_layer.hpp
+++ b/src/mlpack/methods/ann/layer/dropconnect_layer.hpp
@@ -108,7 +108,7 @@ class DropConnectLayer
     // (during testing).
     if (deterministic)
     {
-      if(uselayer)
+      if (uselayer)
       {
         baseLayer.Forward(input, output);
       }
@@ -119,7 +119,7 @@ class DropConnectLayer
     }
     else
     {
-      if(uselayer)
+      if (uselayer)
       {
         // Scale with input / (1 - ratio) and set values to zero with
         // probability ratio.
@@ -162,7 +162,7 @@ class DropConnectLayer
   template<typename DataType>
   void Backward(const DataType& input, const DataType& gy, DataType& g)
   {
-    if(uselayer)
+    if (uselayer)
     {
       baseLayer.Backward(input, gy, g);
     }
@@ -184,7 +184,7 @@ class DropConnectLayer
                 const arma::Mat<eT>& d,
                 GradientDataType& g)
   {
-    if(uselayer)
+    if (uselayer)
     {
       baseLayer.Gradient(input, d, g);
 
@@ -203,7 +203,7 @@ class DropConnectLayer
   //! Get the weights.
   OutputDataType const& Weights() const
   {
-    if(uselayer)
+    if (uselayer)
       return baseLayer.Weights();
 
     return weights;
@@ -212,7 +212,7 @@ class DropConnectLayer
   //! Modify the weights.
   OutputDataType& Weights()
   {
-    if(uselayer)
+    if (uselayer)
       return baseLayer.Weights();
 
     return weights;
@@ -221,7 +221,7 @@ class DropConnectLayer
   //! Get the input parameter.
   InputDataType &InputParameter() const
   {
-    if(uselayer)
+    if (uselayer)
       return baseLayer.InputParameter();
 
     return inputParameter;
@@ -230,7 +230,7 @@ class DropConnectLayer
   //! Modify the input parameter.
   InputDataType &InputParameter()
   {
-    if(uselayer)
+    if (uselayer)
       return baseLayer.InputParameter();
 
     return inputParameter;
@@ -239,7 +239,7 @@ class DropConnectLayer
   //! Get the output parameter.
   OutputDataType &OutputParameter() const
   {
-    if(uselayer)
+    if (uselayer)
       return baseLayer.OutputParameter();
 
     return outputParameter;
@@ -248,7 +248,7 @@ class DropConnectLayer
   //! Modify the output parameter.
   OutputDataType &OutputParameter()
   {
-    if(uselayer)
+    if (uselayer)
       return baseLayer.OutputParameter();
 
     return outputParameter;
@@ -257,7 +257,7 @@ class DropConnectLayer
   //! Get the delta.
   OutputDataType const& Delta() const
   {
-    if(uselayer)
+    if (uselayer)
       return baseLayer.Delta();
 
     return delta;
@@ -266,7 +266,7 @@ class DropConnectLayer
   //! Modify the delta.
   OutputDataType& Delta()
   {
-    if(uselayer)
+    if (uselayer)
       return baseLayer.Delta();
 
     return delta;
@@ -275,7 +275,7 @@ class DropConnectLayer
   //! Get the gradient.
   OutputDataType const& Gradient() const
   {
-    if(uselayer)
+    if (uselayer)
       return baseLayer.Gradient();
 
     return gradient;
@@ -284,7 +284,7 @@ class DropConnectLayer
   //! Modify the gradient.
   OutputDataType& Gradient()
   {
-    if(uselayer)
+    if (uselayer)
       return baseLayer.Gradient();
 
     return gradient;
diff --git a/src/mlpack/methods/cf/svd_wrapper_impl.hpp b/src/mlpack/methods/cf/svd_wrapper_impl.hpp
index 53880fb..47255c6 100644
--- a/src/mlpack/methods/cf/svd_wrapper_impl.hpp
+++ b/src/mlpack/methods/cf/svd_wrapper_impl.hpp
@@ -55,7 +55,7 @@ double mlpack::cf::SVDWrapper<Factorizer>::Apply(const arma::mat& V,
                          arma::mat& H) const
 {
   // check if the given rank is valid
-  if(r > V.n_rows || r > V.n_cols)
+  if (r > V.n_rows || r > V.n_cols)
   {
     Log::Info << "Rank " << r << ", given for decomposition is invalid." << std::endl;
     r = (V.n_rows > V.n_cols) ? V.n_cols : V.n_rows;
@@ -94,7 +94,7 @@ double mlpack::cf::SVDWrapper<DummyClass>::Apply(const arma::mat& V,
                                      arma::mat& H) const
 {
   // check if the given rank is valid
-  if(r > V.n_rows || r > V.n_cols)
+  if (r > V.n_rows || r > V.n_cols)
   {
     Log::Info << "Rank " << r << ", given for decomposition is invalid." << std::endl;
     r = (V.n_rows > V.n_cols) ? V.n_cols : V.n_rows;
diff --git a/src/mlpack/methods/hmm/hmm_regression_impl.hpp b/src/mlpack/methods/hmm/hmm_regression_impl.hpp
index 90f9d1c..6f4e0e7 100644
--- a/src/mlpack/methods/hmm/hmm_regression_impl.hpp
+++ b/src/mlpack/methods/hmm/hmm_regression_impl.hpp
@@ -101,7 +101,7 @@ void HMMRegression::Filter(const arma::mat& predictors,
   Forward(predictors, responses, scales, forwardProb);
 
   // Propagate state, predictors ahead
-  if(ahead != 0) {
+  if (ahead != 0) {
     forwardProb = pow(transition, ahead)*forwardProb;
     forwardProb = forwardProb.cols(0, forwardProb.n_cols-ahead-1);
   }
diff --git a/src/mlpack/methods/pca/pca_main.cpp b/src/mlpack/methods/pca/pca_main.cpp
index 981eeac..cc77e86 100644
--- a/src/mlpack/methods/pca/pca_main.cpp
+++ b/src/mlpack/methods/pca/pca_main.cpp
@@ -108,11 +108,11 @@ int main(int argc, char** argv)
   {
     RunPCA<ExactSVDPolicy>(dataset, newDimension, scale, varToRetain);
   }
-  else if(decompositionMethod == "randomized")
+  else if (decompositionMethod == "randomized")
   {
     RunPCA<RandomizedSVDPolicy>(dataset, newDimension, scale, varToRetain);
   }
-  else if(decompositionMethod == "quic")
+  else if (decompositionMethod == "quic")
   {
     RunPCA<QUICSVDPolicy>(dataset, newDimension, scale, varToRetain);
   }
diff --git a/src/mlpack/methods/regularized_svd/regularized_svd_function.cpp b/src/mlpack/methods/regularized_svd/regularized_svd_function.cpp
index 32d7c0e..8c263f1 100644
--- a/src/mlpack/methods/regularized_svd/regularized_svd_function.cpp
+++ b/src/mlpack/methods/regularized_svd/regularized_svd_function.cpp
@@ -143,7 +143,7 @@ double SGD<mlpack::svd::RegularizedSVDFunction>::Optimize(arma::mat& parameters)
   for(size_t i = 1; i != maxIterations; i++, currentFunction++)
   {
     // Is this iteration the start of a sequence?
-    if((currentFunction % numFunctions) == 0)
+    if ((currentFunction % numFunctions) == 0)
     {
       // Reset the counter variables.
       overallObjective = 0;
diff --git a/src/mlpack/tests/adaboost_test.cpp b/src/mlpack/tests/adaboost_test.cpp
index e164958..6a04e6e 100644
--- a/src/mlpack/tests/adaboost_test.cpp
+++ b/src/mlpack/tests/adaboost_test.cpp
@@ -186,7 +186,7 @@ BOOST_AUTO_TEST_CASE(WeakLearnerErrorVertebralColumn)
 
   size_t countError = 0;
   for (size_t i = 0; i < labels.n_cols; i++)
-    if(labels(i) != predictedLabels(i))
+    if (labels(i) != predictedLabels(i))
       countError++;
   double error = (double) countError / labels.n_cols;
 
diff --git a/src/mlpack/tests/rectangle_tree_test.cpp b/src/mlpack/tests/rectangle_tree_test.cpp
index 80e6fc7..c90ff37 100644
--- a/src/mlpack/tests/rectangle_tree_test.cpp
+++ b/src/mlpack/tests/rectangle_tree_test.cpp
@@ -185,9 +185,9 @@ void CheckExactContainment(const TreeType& tree)
       double max = -1.0 * DBL_MAX;
       for (size_t j = 0; j < tree.NumChildren(); j++)
       {
-        if(tree.Child(j).Bound()[i].Lo() < min)
+        if (tree.Child(j).Bound()[i].Lo() < min)
           min = tree.Child(j).Bound()[i].Lo();
-        if(tree.Child(j).Bound()[i].Hi() > max)
+        if (tree.Child(j).Bound()[i].Hi() > max)
           max = tree.Child(j).Bound()[i].Hi();
       }
 




More information about the mlpack-git mailing list