[mlpack-git] master: Changed scope of some functions to private in HardTanH Layer and its tests; some minor style changes (7de1645)
gitdub at mlpack.org
gitdub at mlpack.org
Mon Mar 7 05:35:49 EST 2016
Repository : https://github.com/mlpack/mlpack
On branch : master
Link : https://github.com/mlpack/mlpack/compare/d5841b1f7204434f39f4887b342335e195a009f7...3e366ec5016cbf3e48607b585cf5c6554121fc84
>---------------------------------------------------------------
commit 7de16454b3b97268ce7402f433f3371376696a40
Author: Dhawal Arora <d.p.arora1 at gmail.com>
Date: Mon Mar 7 16:05:49 2016 +0530
Changed scope of some functions to private in HardTanH Layer and its tests; some minor style changes
>---------------------------------------------------------------
7de16454b3b97268ce7402f433f3371376696a40
src/mlpack/methods/ann/layer/hard_tanh_layer.hpp | 164 +++++++++++++----------
src/mlpack/tests/activation_functions_test.cpp | 10 +-
2 files changed, 102 insertions(+), 72 deletions(-)
diff --git a/src/mlpack/methods/ann/layer/hard_tanh_layer.hpp b/src/mlpack/methods/ann/layer/hard_tanh_layer.hpp
index 7e71675..89114b0 100644
--- a/src/mlpack/methods/ann/layer/hard_tanh_layer.hpp
+++ b/src/mlpack/methods/ann/layer/hard_tanh_layer.hpp
@@ -54,89 +54,46 @@ class HardTanHLayer
}
/**
- * Computes the HardTanH function.
- *
- * @param x Input data.
- * @return f(x).
- */
- double fn(const double x)
- {
- double val;
- val = x;
- if (x > maxValue)
- val = maxValue;
- else if (x < minValue)
- val = minValue;
- return val;
- }
-
- /**
- * Computes the HardTanH function using a dense matrix as input.
+ * Ordinary feed forward pass of a neural network, evaluating the function
+ * f(x) by propagating the activity forward through f.
*
- * @param x Input data.
- * @param y The resulting output activation.
+ * @param x Input data used for evaluating the specified function. This is for just one input value.
+ * @return f(x) The activation value for the input.
*/
- template<typename eT>
- void fn(const arma::Mat<eT>& x, arma::Mat<eT>& y)
+ double Forward(const double x)
{
- arma::Mat<eT> t;
- t = x;
- y = t.transform( [&](eT val) { return std::min( std::max( val, minValue ), maxValue ); } );
+ return fn(x);
}
/**
- * Computes the HardTanH function using a 3rd-order tensor as input.
- *
- * @param x Input data.
- * @param y The resulting output activation.
- */
- template<typename eT>
- void fn(const arma::Cube<eT>& x, arma::Cube<eT>& y)
- {
- y = x;
- for (size_t s = 0; s < x.n_slices; s++)
- fn(x.slice(s), y.slice(s));
- }
-
- /**
- * Computes the first derivative of the HardTanH function.
+ * Ordinary feed forward pass of a neural network, evaluating the function
+ * f(x) by propagating the activity forward through f.
*
- * @param x Input data.
- * @return f'(x)
+ * @param input Input data used for evaluating the specified function.
+ * @param output Resulting output activation.
*/
- double deriv(const double x)
- {
- return (x > maxValue || x < minValue) ? 0 : 1;
- }
- /**
- * Computes the first derivative of the HardTanH function.
- *
- * @param y Input activations.
- * @param x The resulting derivatives.
- */
template<typename InputType, typename OutputType>
- void deriv(const InputType& x, OutputType& y)
+ void Forward(const InputType& input, OutputType& output)
{
- y = x;
-
- for (size_t i = 0; i < x.n_elem; i++)
- y(i) = deriv(x(i));
+ fn(input, output);
}
/**
- * Ordinary feed forward pass of a neural network, evaluating the function
- * f(x) by propagating the activity forward through f.
+ * Ordinary feed backward pass of a neural network, calculating the function
+ * f(x) by propagating x backwards through f. Using the results from the feed
+ * forward pass.
*
- * @param input Input data used for evaluating the specified function.
- * @param output Resulting output activation.
+ * @param input The propagated input activation. This function is for just a single input.
+ * @param gy The backpropagated error.
+ * @return The calculated gradient.
*/
- template<typename InputType, typename OutputType>
- void Forward(const InputType& input, OutputType& output)
+ double Backward(const double input,
+ const double gy)
{
- fn(input, output);
+ return gy * deriv(input);
}
/**
@@ -210,14 +167,14 @@ class HardTanHLayer
OutputDataType& Delta() { return delta; }
//! Get the Maximum value.
- double const& getmaxValue() const { return maxValue; }
+ double const& MaxValue() const { return maxValue; }
//! Modify the Maximum value.
- double& setmaxValue() { return maxValue; }
+ double& MaxValue() { return maxValue; }
//! Get the Minimum value.
- double const& getminValue() const { return minValue; }
+ double const& MinValue() const { return minValue; }
//! Modify the Minimum value.
- double& setminValue() { return minValue; }
+ double& MinValue() { return minValue; }
/**
@@ -230,6 +187,77 @@ class HardTanHLayer
}
private:
+
+
+ /**
+ * Computes the HardTanH function.
+ *
+ * @param x Input data.
+ * @return f(x).
+ */
+ double fn(const double x)
+ {
+ if (x > maxValue)
+ return maxValue;
+ else if (x < minValue)
+ return minValue;
+ return x;
+ }
+
+ /**
+ * Computes the HardTanH function using a dense matrix as input.
+ *
+ * @param x Input data.
+ * @param y The resulting output activation.
+ */
+
+ template<typename eT>
+ void fn(const arma::Mat<eT>& x, arma::Mat<eT>& y)
+ {
+ y = x;
+ y = y.transform( [&](eT val) { return std::min( std::max( val, minValue ), maxValue ); } );
+ }
+
+ /**
+ * Computes the HardTanH function using a 3rd-order tensor as input.
+ *
+ * @param x Input data.
+ * @param y The resulting output activation.
+ */
+ template<typename eT>
+ void fn(const arma::Cube<eT>& x, arma::Cube<eT>& y)
+ {
+ y = x;
+ for (size_t s = 0; s < x.n_slices; s++)
+ fn(x.slice(s), y.slice(s));
+ }
+
+ /**
+ * Computes the first derivative of the HardTanH function.
+ *
+ * @param x Input data.
+ * @return f'(x)
+ */
+ double deriv(const double x)
+ {
+ return (x > maxValue || x < minValue) ? 0 : 1;
+ }
+
+ /**
+ * Computes the first derivative of the HardTanH function.
+ *
+ * @param y Input activations.
+ * @param x The resulting derivatives.
+ */
+ template<typename InputType, typename OutputType>
+ void deriv(const InputType& x, OutputType& y)
+ {
+ y = x;
+
+ for (size_t i = 0; i < x.n_elem; i++)
+ y(i) = deriv(x(i));
+ }
+
//! Locally-stored delta object.
OutputDataType delta;
diff --git a/src/mlpack/tests/activation_functions_test.cpp b/src/mlpack/tests/activation_functions_test.cpp
index 26bedb5..986e89a 100644
--- a/src/mlpack/tests/activation_functions_test.cpp
+++ b/src/mlpack/tests/activation_functions_test.cpp
@@ -135,13 +135,13 @@ void CheckHardTanHActivationCorrect(const arma::colvec input, const arma::colvec
// Test the activation function using a single value as input.
for (size_t i = 0; i < target.n_elem; i++)
{
- BOOST_REQUIRE_CLOSE(htf.fn(input.at(i)),
+ BOOST_REQUIRE_CLOSE(htf.Forward(input.at(i)),
target.at(i), 1e-3);
}
// Test the activation function using the entire vector as input.
arma::colvec activations;
- htf.fn(input, activations);
+ htf.Forward(input, activations);
for (size_t i = 0; i < activations.n_elem; i++)
{
BOOST_REQUIRE_CLOSE(activations.at(i), target.at(i), 1e-3);
@@ -162,13 +162,15 @@ void CheckHardTanHDerivativeCorrect(const arma::colvec input, const arma::colvec
// Test the calculation of the derivatives using a single value as input.
for (size_t i = 0; i < target.n_elem; i++)
{
- BOOST_REQUIRE_CLOSE(htf.deriv(input.at(i)),
+ BOOST_REQUIRE_CLOSE(htf.Backward(input.at(i), 1),
target.at(i), 1e-3);
}
// Test the calculation of the derivatives using the entire vector as input.
arma::colvec derivatives;
- htf.deriv(input, derivatives);
+ // This error vector will be set to 1 to get the derivatives.
+ arma::colvec error(input.n_elem);
+ htf.Backward(input, (arma::colvec)error.ones(), derivatives);
for (size_t i = 0; i < derivatives.n_elem; i++)
{
BOOST_REQUIRE_CLOSE(derivatives.at(i), target.at(i), 1e-3);
More information about the mlpack-git
mailing list