[mlpack-git] mlpack-2.0.x: Fix compiler warnings (e348366)

gitdub at mlpack.org gitdub at mlpack.org
Wed Jul 20 15:23:23 EDT 2016


Repository : https://github.com/mlpack/mlpack
On branch  : mlpack-2.0.x
Link       : https://github.com/mlpack/mlpack/compare/e434bc4ac042534529a2a440a44d86935b4d7164...fc4195d27bb9e642356a384d1fa6fe10cbdf89a6

>---------------------------------------------------------------

commit e3483663574a506ba12f9a5e0be57399e59e5ead
Author: Mikhail Lozhnikov <lozhnikovma at gmail.com>
Date:   Tue Jul 12 15:46:39 2016 +0300

    Fix compiler warnings


>---------------------------------------------------------------

e3483663574a506ba12f9a5e0be57399e59e5ead
 .../tree/rectangle_tree/r_plus_tree_split_impl.hpp | 363 +++++++++++++++++++++
 src/mlpack/methods/adaboost/adaboost_impl.hpp      |   2 +-
 src/mlpack/methods/ann/layer/one_hot_layer.hpp     |  91 ++++++
 .../methods/ann/layer/vr_class_reward_layer.hpp    | 166 ++++++++++
 src/mlpack/methods/perceptron/perceptron_impl.hpp  |   2 +-
 src/mlpack/methods/radical/radical.cpp             |   2 +-
 6 files changed, 623 insertions(+), 3 deletions(-)

diff --git a/src/mlpack/core/tree/rectangle_tree/r_plus_tree_split_impl.hpp b/src/mlpack/core/tree/rectangle_tree/r_plus_tree_split_impl.hpp
new file mode 100644
index 0000000..67025d5
--- /dev/null
+++ b/src/mlpack/core/tree/rectangle_tree/r_plus_tree_split_impl.hpp
@@ -0,0 +1,363 @@
+/**
+ * @file r_plus_tree_split_impl.hpp
+ * @author Mikhail Lozhnikov
+ *
+ * Implementation of class (RPlusTreeSplit) to split a RectangleTree.
+ */
+#ifndef MLPACK_CORE_TREE_RECTANGLE_TREE_R_PLUS_TREE_SPLIT_IMPL_HPP
+#define MLPACK_CORE_TREE_RECTANGLE_TREE_R_PLUS_TREE_SPLIT_IMPL_HPP
+
+#include "r_plus_tree_split.hpp"
+#include "rectangle_tree.hpp"
+#include "r_plus_plus_tree_auxiliary_information.hpp"
+#include "r_plus_tree_split_policy.hpp"
+#include "r_plus_plus_tree_split_policy.hpp"
+
+namespace mlpack {
+namespace tree {
+
+template<typename SplitPolicyType,
+         template<typename> class SweepType>
+template<typename TreeType>
+void RPlusTreeSplit<SplitPolicyType, SweepType>::
+SplitLeafNode(TreeType* tree, std::vector<bool>& relevels)
+{
+  typedef typename TreeType::ElemType ElemType;
+
+  if (tree->Count() == 1)
+  {
+    // Check if an intermediate node was added during the insertion process.
+    // i.e. we couldn't enlarge a node of the R+ tree. So, one of intermediate
+    // nodes may be overflowed.
+    TreeType* node = tree->Parent();
+
+    while (node != NULL)
+    {
+      if (node->NumChildren() == node->MaxNumChildren() + 1)
+      {
+        // Split the overflowed node.
+        RPlusTreeSplit::SplitNonLeafNode(node, relevels);
+        return;
+      }
+      node = node->Parent();
+    }
+    return;
+  }
+  else if (tree->Count() <= tree->MaxLeafSize())
+  {
+    return;
+  }
+
+  // If we are splitting the root node, we need will do things differently so
+  // that the constructor and other methods don't confuse the end user by giving
+  // an address of another node.
+  if (tree->Parent() == NULL)
+  {
+    // We actually want to copy this way.  Pointers and everything.
+    TreeType* copy = new TreeType(*tree, false);
+    copy->Parent() = tree;
+    tree->Count() = 0;
+    tree->NullifyData();
+    // Because this was a leaf node, numChildren must be 0.
+    tree->children[(tree->NumChildren())++] = copy;
+    assert(tree->NumChildren() == 1);
+
+    RPlusTreeSplit::SplitLeafNode(copy, relevels);
+    return;
+  }
+
+  size_t cutAxis = tree->Bound().Dim();
+  ElemType cut = std::numeric_limits<ElemType>::lowest();
+
+  // Try to find a partiotion of the node.
+  if (!PartitionNode(tree, cutAxis, cut))
+    return;
+
+  // If we could not find a suitable partition.
+  if (cutAxis == tree->Bound().Dim())
+  {
+    tree->MaxLeafSize()++;
+    tree->points.resize(tree->MaxLeafSize() + 1);
+    Log::Warn << "Could not find an acceptable partition."
+        "The size of the node will be increased.";
+    return;
+  }
+
+  TreeType* treeOne = new TreeType(tree->Parent());
+  TreeType* treeTwo = new TreeType(tree->Parent());
+  treeOne->MinLeafSize() = 0;
+  treeOne->MinNumChildren() = 0;
+  treeTwo->MinLeafSize() = 0;
+  treeTwo->MinNumChildren() = 0;
+
+  // Split the node into two new nodes.
+  SplitLeafNodeAlongPartition(tree, treeOne, treeTwo, cutAxis, cut);
+
+  TreeType* parent = tree->Parent();
+  size_t i = 0;
+  while (parent->children[i] != tree)
+    i++;
+
+  assert(i < parent->NumChildren());
+
+  // Insert two new nodes to the tree.
+  parent->children[i] = treeOne;
+  parent->children[parent->NumChildren()++] = treeTwo;
+
+  assert(parent->NumChildren() <= parent->MaxNumChildren() + 1);
+
+  // Propagate the split upward if necessary.
+  if (parent->NumChildren() == parent->MaxNumChildren() + 1)
+    RPlusTreeSplit::SplitNonLeafNode(parent, relevels);
+
+  tree->SoftDelete();
+}
+
+template<typename SplitPolicyType,
+         template<typename> class SweepType>
+template<typename TreeType>
+bool RPlusTreeSplit<SplitPolicyType, SweepType>::
+SplitNonLeafNode(TreeType* tree, std::vector<bool>& relevels)
+{
+  typedef typename TreeType::ElemType ElemType;
+  // If we are splitting the root node, we need will do things differently so
+  // that the constructor and other methods don't confuse the end user by giving
+  // an address of another node.
+  if (tree->Parent() == NULL)
+  {
+    // We actually want to copy this way.  Pointers and everything.
+    TreeType* copy = new TreeType(*tree, false);
+
+    copy->Parent() = tree;
+    tree->NumChildren() = 0;
+    tree->NullifyData();
+    tree->children[(tree->NumChildren())++] = copy;
+
+    RPlusTreeSplit::SplitNonLeafNode(copy,relevels);
+    return true;
+  }
+  size_t cutAxis = tree->Bound().Dim();
+  ElemType cut = std::numeric_limits<ElemType>::lowest();
+
+  // Try to find a partiotion of the node.
+  if ( !PartitionNode(tree, cutAxis, cut))
+    return false;
+
+  // If we could not find a suitable partition.
+  if (cutAxis == tree->Bound().Dim())
+  {
+    tree->MaxNumChildren()++;
+    tree->children.resize(tree->MaxNumChildren() + 1);
+    Log::Warn << "Could not find an acceptable partition."
+        "The size of the node will be increased.";
+    return false;
+  }
+
+  TreeType* treeOne = new TreeType(tree->Parent());
+  TreeType* treeTwo = new TreeType(tree->Parent());
+  treeOne->MinLeafSize() = 0;
+  treeOne->MinNumChildren() = 0;
+  treeTwo->MinLeafSize() = 0;
+  treeTwo->MinNumChildren() = 0;
+
+  // Split the node into two new nodes.
+  SplitNonLeafNodeAlongPartition(tree, treeOne, treeTwo, cutAxis, cut);
+
+  TreeType* parent = tree->Parent();
+  size_t i = 0;
+  while (parent->children[i] != tree)
+    i++;
+
+  assert(i < parent->NumChildren());
+
+  // Insert two new nodes to the tree.
+  parent->children[i] = treeOne;
+  parent->children[parent->NumChildren()++] = treeTwo;
+
+  tree->SoftDelete();
+
+  assert(parent->NumChildren() <= parent->MaxNumChildren() + 1);
+
+  // Propagate the split upward if necessary.
+  if (parent->NumChildren() == parent->MaxNumChildren() + 1)
+    RPlusTreeSplit::SplitNonLeafNode(parent, relevels);
+
+  return false;
+}
+
+template<typename SplitPolicyType,
+         template<typename> class SweepType>
+template<typename TreeType>
+void RPlusTreeSplit<SplitPolicyType, SweepType>::SplitLeafNodeAlongPartition(
+    TreeType* tree,
+    TreeType* treeOne,
+    TreeType* treeTwo,
+    const size_t cutAxis,
+    const typename TreeType::ElemType cut)
+{
+  // Split the auxiliary information.
+  tree->AuxiliaryInfo().SplitAuxiliaryInfo(treeOne, treeTwo, cutAxis, cut);
+
+  // Insert points into the corresponding subtree.
+  for (size_t i = 0; i < tree->NumPoints(); i++)
+  {
+    if (tree->Dataset().col(tree->Point(i))[cutAxis] <= cut)
+    {
+      treeOne->Point(treeOne->Count()++) = tree->Point(i);
+      treeOne->Bound() |= tree->Dataset().col(tree->Point(i));
+    }
+    else
+    {
+      treeTwo->Point(treeTwo->Count()++) = tree->Point(i);
+      treeTwo->Bound() |= tree->Dataset().col(tree->Point(i));
+    }
+  }
+  // Update the number of descandants.
+  treeOne->numDescendants = treeOne->Count();
+  treeTwo->numDescendants = treeTwo->Count();
+
+  assert(treeOne->Count() <= treeOne->MaxLeafSize());
+  assert(treeTwo->Count() <= treeTwo->MaxLeafSize());
+
+  assert(tree->Count() == treeOne->Count() + treeTwo->Count());
+  assert(treeOne->Bound()[cutAxis].Hi() < treeTwo->Bound()[cutAxis].Lo());
+}
+
+template<typename SplitPolicyType,
+         template<typename> class SweepType>
+template<typename TreeType>
+void RPlusTreeSplit<SplitPolicyType, SweepType>::SplitNonLeafNodeAlongPartition(
+    TreeType* tree,
+    TreeType* treeOne,
+    TreeType* treeTwo,
+    const size_t cutAxis,
+    const typename TreeType::ElemType cut)
+{
+  // Split the auxiliary information.
+  tree->AuxiliaryInfo().SplitAuxiliaryInfo(treeOne, treeTwo, cutAxis, cut);
+
+  // Insert children into the corresponding subtree.
+  for (size_t i = 0; i < tree->NumChildren(); i++)
+  {
+    TreeType* child = tree->children[i];
+    int policy = SplitPolicyType::GetSplitPolicy(*child, cutAxis, cut);
+
+    if (policy == SplitPolicyType::AssignToFirstTree)
+    {
+      InsertNodeIntoTree(treeOne, child);
+      child->Parent() = treeOne;
+    }
+    else if (policy == SplitPolicyType::AssignToSecondTree)
+    {
+      InsertNodeIntoTree(treeTwo, child);
+      child->Parent() = treeTwo;
+    }
+    else
+    {
+      // The child should be split (i.e. the partition divides its bound).
+      TreeType* childOne = new TreeType(treeOne);
+      TreeType* childTwo = new TreeType(treeTwo);
+      treeOne->MinLeafSize() = 0;
+      treeOne->MinNumChildren() = 0;
+      treeTwo->MinLeafSize() = 0;
+      treeTwo->MinNumChildren() = 0;
+
+      // Propagate the split downward.
+      if (child->IsLeaf())
+        SplitLeafNodeAlongPartition(child, childOne, childTwo, cutAxis, cut);
+      else
+        SplitNonLeafNodeAlongPartition(child, childOne, childTwo, cutAxis, cut);
+
+      InsertNodeIntoTree(treeOne, childOne);
+      InsertNodeIntoTree(treeTwo, childTwo);
+
+      child->SoftDelete();
+    }
+  }
+
+  assert(treeOne->NumChildren() + treeTwo->NumChildren() != 0);
+
+  // Add a fake subtree if one of the subtrees is empty.
+  if (treeOne->NumChildren() == 0)
+    AddFakeNodes(treeTwo, treeOne);
+  else if (treeTwo->NumChildren() == 0)
+    AddFakeNodes(treeOne, treeTwo);
+
+  assert(treeOne->NumChildren() <= treeOne->MaxNumChildren());
+  assert(treeTwo->NumChildren() <= treeTwo->MaxNumChildren());
+}
+
+template<typename SplitPolicyType,
+         template<typename> class SweepType>
+template<typename TreeType>
+void RPlusTreeSplit<SplitPolicyType, SweepType>::
+AddFakeNodes(const TreeType* tree, TreeType* emptyTree)
+{
+  size_t numDescendantNodes = tree->TreeDepth() - 1;
+
+  TreeType* node = emptyTree;
+  for (size_t i = 0; i < numDescendantNodes; i++)
+  {
+    TreeType* child = new TreeType(node);
+    node->children[node->NumChildren()++] = child;
+
+    node = child;
+  }
+}
+
+template<typename SplitPolicyType,
+         template<typename> class SweepType>
+template<typename TreeType>
+bool RPlusTreeSplit<SplitPolicyType, SweepType>::
+PartitionNode(const TreeType* node, size_t& minCutAxis,
+    typename TreeType::ElemType& minCut)
+{
+  if ((node->NumChildren() <= node->MaxNumChildren() && !node->IsLeaf()) ||
+      (node->Count() <= node->MaxLeafSize() && node->IsLeaf()))
+    return false; // No partition required.
+
+  // Define the type of the sweep cost.
+  typedef typename
+      SweepType<SplitPolicyType>::template SweepCost<TreeType>::type
+      SweepCostType;
+
+  SweepCostType minCost = std::numeric_limits<SweepCostType>::max();
+  minCutAxis = node->Bound().Dim();
+
+  // Find the sweep with a minimal cost.
+  for (size_t k = 0; k < node->Bound().Dim(); k++)
+  {
+    typename TreeType::ElemType cut;
+    SweepCostType cost;
+
+    if (node->IsLeaf())
+      cost = SweepType<SplitPolicyType>::SweepLeafNode(k, node, cut);
+    else
+      cost = SweepType<SplitPolicyType>::SweepNonLeafNode(k, node, cut);
+
+    if (cost < minCost)
+    {
+      minCost = cost;
+      minCutAxis = k;
+      minCut = cut;
+    }
+  }
+
+  return true;
+}
+
+template<typename SplitPolicyType,
+         template<typename> class SweepType>
+template<typename TreeType>
+void RPlusTreeSplit<SplitPolicyType, SweepType>::
+InsertNodeIntoTree(TreeType* destTree, TreeType* srcNode)
+{
+  destTree->Bound() |= srcNode->Bound();
+  destTree->numDescendants += srcNode->numDescendants;
+  destTree->children[destTree->NumChildren()++] = srcNode;
+}
+
+} // namespace tree
+} // namespace mlpack
+
+#endif  //  MLPACK_CORE_TREE_RECTANGLE_TREE_R_PLUS_TREE_SPLIT_IMPL_HPP
diff --git a/src/mlpack/methods/adaboost/adaboost_impl.hpp b/src/mlpack/methods/adaboost/adaboost_impl.hpp
index e20e3b9..a304dfa 100644
--- a/src/mlpack/methods/adaboost/adaboost_impl.hpp
+++ b/src/mlpack/methods/adaboost/adaboost_impl.hpp
@@ -215,7 +215,7 @@ void AdaBoost<WeakLearnerType, MatType>::Classify(
   }
 
   arma::colvec cMRow;
-  arma::uword maxIndex;
+  arma::uword maxIndex = 0;
 
   for (size_t i = 0; i < predictedLabels.n_cols; i++)
   {
diff --git a/src/mlpack/methods/ann/layer/one_hot_layer.hpp b/src/mlpack/methods/ann/layer/one_hot_layer.hpp
new file mode 100644
index 0000000..671696e
--- /dev/null
+++ b/src/mlpack/methods/ann/layer/one_hot_layer.hpp
@@ -0,0 +1,91 @@
+/**
+ * @file one_hot_layer.hpp
+ * @author Shangtong Zhang
+ *
+ * Definition of the OneHotLayer class, which implements a standard network
+ * layer.
+ */
+#ifndef MLPACK_METHODS_ANN_LAYER_ONE_HOT_LAYER_HPP
+#define MLPACK_METHODS_ANN_LAYER_ONE_HOT_LAYER_HPP
+
+#include <mlpack/core.hpp>
+#include <mlpack/methods/ann/layer/layer_traits.hpp>
+
+namespace mlpack {
+namespace ann /** Artificial Neural Network. */ {
+
+/**
+ * An implementation of a one hot classification layer that can be used as
+ * output layer.
+ */
+class OneHotLayer
+{
+ public:
+  /**
+   * Create the OneHotLayer object.
+   */
+  OneHotLayer()
+  {
+    // Nothing to do here.
+  }
+
+  /*
+   * Calculate the error using the specified input activation and the target.
+   * The error is stored into the given error parameter.
+   *
+   * @param inputActivations Input data used for evaluating the network.
+   * @param target Target data used for evaluating the network.
+   * @param error The calculated error with respect to the input activation and
+   * the given target.
+   */
+  template<typename DataType>
+  void CalculateError(const DataType& inputActivations,
+                      const DataType& target,
+                      DataType& error)
+  {
+    error = inputActivations - target;
+  }
+
+  /*
+   * Calculate the output class using the specified input activation.
+   *
+   * @param inputActivations Input data used to calculate the output class.
+   * @param output Output class of the input activation.
+   */
+  template<typename DataType>
+  void OutputClass(const DataType& inputActivations, DataType& output)
+  {
+    output = inputActivations;
+    output.zeros();
+
+    arma::uword maxIndex = 0;
+    inputActivations.max(maxIndex);
+    output(maxIndex) = 1;
+  }
+
+  /**
+   * Serialize the layer.
+   */
+  template<typename Archive>
+  void Serialize(Archive& /* ar */, const unsigned int /* version */)
+  {
+    /* Nothing to do here */
+  }
+}; // class OneHotLayer
+
+//! Layer traits for the one-hot class classification layer.
+template <>
+class LayerTraits<OneHotLayer>
+{
+ public:
+  static const bool IsBinary = true;
+  static const bool IsOutputLayer = true;
+  static const bool IsBiasLayer = false;
+  static const bool IsConnection = false;
+};
+
+} // namespace ann
+} // namespace mlpack
+
+
+#endif
diff --git a/src/mlpack/methods/ann/layer/vr_class_reward_layer.hpp b/src/mlpack/methods/ann/layer/vr_class_reward_layer.hpp
new file mode 100644
index 0000000..aec0b85
--- /dev/null
+++ b/src/mlpack/methods/ann/layer/vr_class_reward_layer.hpp
@@ -0,0 +1,166 @@
+/**
+ * @file vr_class_reward_layer.hpp
+ * @author Marcus Edel
+ *
+ * Definition of the VRClassRewardLayer class, which implements the variance
+ * reduced classification reinforcement layer.
+ */
+#ifndef MLPACK_METHODS_ANN_LAYER_VR_CLASS_REWARD_LAYER_HPP
+#define MLPACK_METHODS_ANN_LAYER_VR_CLASS_REWARD_LAYER_HPP
+
+#include <mlpack/core.hpp>
+
+namespace mlpack {
+namespace ann /** Artificial Neural Network. */ {
+
+/**
+ * Implementation of the variance reduced classification reinforcement layer.
+ * This layer is meant to be used in combination with the reinforce normal layer
+ * (ReinforceNormalLayer), which expects that an reward:
+ * (1 for success, 0 otherwise).
+ *
+ * @tparam InputDataType Type of the input data (arma::colvec, arma::mat,
+ *         arma::sp_mat or arma::cube).
+ * @tparam OutputDataType Type of the output data (arma::colvec, arma::mat,
+ *         arma::sp_mat or arma::cube).
+ */
+template <
+    typename InputDataType = arma::field<arma::mat>,
+    typename OutputDataType = arma::field<arma::mat>
+>
+class VRClassRewardLayer
+{
+ public:
+  /**
+   * Create the VRClassRewardLayer object.
+   *
+   * @param scale Parameter used to scale the reward.
+   * @param sizeAverage Take the average over all batches.
+   */
+  VRClassRewardLayer(const double scale = 1, const bool sizeAverage = true) :
+      scale(scale),
+      sizeAverage(sizeAverage)
+  {
+    // Nothing to do here.
+  }
+
+  /**
+   * Ordinary feed forward pass of a neural network, evaluating the function
+   * f(x) by propagating the activity forward through f.
+   *
+   * @param input Input data that contains the log-probabilities for each class.
+   * @param target The target vector, that contains the class index in the range
+   *        between 1 and the number of classes.
+   */
+  template<typename eT>
+  double Forward(const arma::field<arma::Mat<eT> >& input,
+                 const arma::Mat<eT>& target)
+  {
+    return Forward(input(0, 0), target);
+  }
+
+  /**
+   * Ordinary feed forward pass of a neural network, evaluating the function
+   * f(x) by propagating the activity forward through f.
+   *
+   * @param input Input data that contains the log-probabilities for each class.
+   * @param target The target vector, that contains the class index in the range
+   *        between 1 and the number of classes.
+   */
+  template<typename eT>
+  double Forward(const arma::Mat<eT>& input, const arma::Mat<eT>& target)
+  {
+    reward = 0;
+    arma::uword index = 0;
+
+    for (size_t i = 0; i < input.n_cols; i++)
+    {
+      input.unsafe_col(i).max(index);
+      reward = ((index + 1) == target(i)) * scale;
+    }
+
+    if (sizeAverage)
+    {
+      return -reward / input.n_cols;
+    }
+
+    return -reward;
+  }
+
+  /**
+   * Ordinary feed backward pass of a neural network, calculating the function
+   * f(x) by propagating x backwards through f. Using the results from the feed
+   * forward pass.
+   *
+   * @param input The propagated input activation.
+   * @param gy The backpropagated error.
+   * @param g The calculated gradient.
+   */
+  template<typename eT>
+  double Backward(const arma::field<arma::Mat<eT> >& input,
+                const arma::Mat<eT>& /* gy */,
+                arma::field<arma::Mat<eT> >& g)
+  {
+    g = arma::field<arma::Mat<eT> >(2, 1);
+    g(0, 0) = arma::zeros(input(0, 0).n_rows, input(0, 0).n_cols);
+
+    double vrReward = reward - arma::as_scalar(input(1, 0));
+    if (sizeAverage)
+    {
+      vrReward /= input(0, 0).n_cols;
+    }
+
+    const double norm = sizeAverage ? 2.0 / input.n_cols : 2.0;
+
+    g(1, 0) = norm * (input(1, 0) - reward);
+
+    return vrReward;
+  }
+
+  //! Get the input parameter.
+  InputDataType& InputParameter() const {return inputParameter; }
+  //! Modify the input parameter.
+  InputDataType& InputParameter() { return inputParameter; }
+
+  //! Get the output parameter.
+  OutputDataType& OutputParameter() const {return outputParameter; }
+  //! Modify the output parameter.
+  OutputDataType& OutputParameter() { return outputParameter; }
+
+  //! Get the delta.
+  OutputDataType& Delta() const {return delta; }
+  //! Modify the delta.
+  OutputDataType& Delta() { return delta; }
+
+  //! Get the value of the deterministic parameter.
+  bool Deterministic() const { return deterministic; }
+  //! Modify the value of the deterministic parameter.
+  bool& Deterministic() { return deterministic; }
+
+ private:
+  //! Locally-stored value to scale the reward.
+  const double scale;
+
+  //! If true take the average over all batches.
+  const bool sizeAverage;
+
+  //! Locally stored reward parameter.
+  double reward;
+
+  //! Locally-stored delta object.
+  OutputDataType delta;
+
+  //! Locally-stored input parameter object.
+  InputDataType inputParameter;
+
+  //! Locally-stored output parameter object.
+  OutputDataType outputParameter;
+
+  //! If true dropout and scaling is disabled, see notes above.
+  bool deterministic;
+}; // class VRClassRewardLayer
+
+}; // namespace ann
+}; // namespace mlpack
+
+#endif
diff --git a/src/mlpack/methods/perceptron/perceptron_impl.hpp b/src/mlpack/methods/perceptron/perceptron_impl.hpp
index 5c044d0..6e65163 100644
--- a/src/mlpack/methods/perceptron/perceptron_impl.hpp
+++ b/src/mlpack/methods/perceptron/perceptron_impl.hpp
@@ -115,7 +115,7 @@ void Perceptron<LearnPolicy, WeightInitializationPolicy, MatType>::Classify(
     arma::Row<size_t>& predictedLabels)
 {
   arma::vec tempLabelMat;
-  arma::uword maxIndex;
+  arma::uword maxIndex = 0;
 
   // Could probably be faster if done in batch.
   for (size_t i = 0; i < test.n_cols; i++)
diff --git a/src/mlpack/methods/radical/radical.cpp b/src/mlpack/methods/radical/radical.cpp
index daaecb5..c149fb7 100644
--- a/src/mlpack/methods/radical/radical.cpp
+++ b/src/mlpack/methods/radical/radical.cpp
@@ -92,7 +92,7 @@ double Radical::DoRadical2D(const mat& matX)
     values(i) = Vasicek(candidateY1) + Vasicek(candidateY2);
   }
 
-  uword indOpt;
+  uword indOpt = 0;
   values.min(indOpt); // we ignore the return value; we don't care about it
   return (indOpt / (double) angles) * M_PI / 2.0;
 }




More information about the mlpack-git mailing list