[mlpack-git] master: Remove the single value Forward function to be consistent with the rest of the layer interface and minor formatting fixes. (971213a)
gitdub at mlpack.org
gitdub at mlpack.org
Tue Mar 8 15:13:09 EST 2016
Repository : https://github.com/mlpack/mlpack
On branch : master
Link : https://github.com/mlpack/mlpack/compare/3e366ec5016cbf3e48607b585cf5c6554121fc84...971213a377e27ac4153df70eb0cab02b8f7bdc1f
>---------------------------------------------------------------
commit 971213a377e27ac4153df70eb0cab02b8f7bdc1f
Author: marcus <marcus.edel at fu-berlin.de>
Date: Tue Mar 8 21:13:09 2016 +0100
Remove the single value Forward function to be consistent with the rest of the layer interface and minor formatting fixes.
>---------------------------------------------------------------
971213a377e27ac4153df70eb0cab02b8f7bdc1f
src/mlpack/methods/ann/layer/CMakeLists.txt | 1 +
src/mlpack/methods/ann/layer/hard_tanh_layer.hpp | 92 +++++++++---------------
src/mlpack/tests/activation_functions_test.cpp | 39 ++++------
3 files changed, 46 insertions(+), 86 deletions(-)
diff --git a/src/mlpack/methods/ann/layer/CMakeLists.txt b/src/mlpack/methods/ann/layer/CMakeLists.txt
index b1b99ad..75a6075 100644
--- a/src/mlpack/methods/ann/layer/CMakeLists.txt
+++ b/src/mlpack/methods/ann/layer/CMakeLists.txt
@@ -6,6 +6,7 @@ set(SOURCES
base_layer.hpp
bias_layer.hpp
dropout_layer.hpp
+ hard_tanh_layer.hpp
linear_layer.hpp
conv_layer.hpp
pooling_layer.hpp
diff --git a/src/mlpack/methods/ann/layer/hard_tanh_layer.hpp b/src/mlpack/methods/ann/layer/hard_tanh_layer.hpp
index 89114b0..ca59bb0 100644
--- a/src/mlpack/methods/ann/layer/hard_tanh_layer.hpp
+++ b/src/mlpack/methods/ann/layer/hard_tanh_layer.hpp
@@ -2,9 +2,8 @@
* @file hard_tanh_layer.hpp
* @author Dhawal Arora
*
- * Implementation of hard_tanh activation function. The function is mentioned below.
+ * Definition and implementation of the HardTanHLayer layer.
*/
-
#ifndef __MLPACK_METHODS_ANN_LAYER_HARD_TANH_LAYER_HPP
#define __MLPACK_METHODS_ANN_LAYER_HARD_TANH_LAYER_HPP
@@ -32,23 +31,29 @@ namespace ann /** Artificial Neural Network. */ {
* \end{array}
* \right.
* @f}
+ *
+ * @tparam InputDataType Type of the input data (arma::colvec, arma::mat,
+ * arma::sp_mat or arma::cube).
+ * @tparam OutputDataType Type of the output data (arma::colvec, arma::mat,
+ * arma::sp_mat or arma::cube).
*/
-
template <
typename InputDataType = arma::mat,
typename OutputDataType = arma::mat
>
-
class HardTanHLayer
{
public:
-
/**
- * Constructor. Default maxValue is set to 1 and default minValue is set to -1.
+ * Create the HardTanHLayer object using the specified parameters. The range
+ * of the linear region can be adjusted by specifying the maxValue and
+ * minValue. Default (maxValue = 1, minValue = -1).
*
+ * @param maxValue Range of the linear region maximum value.
+ * @param minValue Range of the linear region minimum value.
*/
-
- HardTanHLayer(const double maxValue = 1.00, const double minValue = -1.00) : maxValue(maxValue), minValue(minValue)
+ HardTanHLayer(const double maxValue = 1, const double minValue = -1) :
+ maxValue(maxValue), minValue(minValue)
{
// Nothing to do here.
}
@@ -57,43 +62,13 @@ class HardTanHLayer
* Ordinary feed forward pass of a neural network, evaluating the function
* f(x) by propagating the activity forward through f.
*
- * @param x Input data used for evaluating the specified function. This is for just one input value.
- * @return f(x) The activation value for the input.
- */
-
- double Forward(const double x)
- {
- return fn(x);
- }
-
- /**
- * Ordinary feed forward pass of a neural network, evaluating the function
- * f(x) by propagating the activity forward through f.
- *
* @param input Input data used for evaluating the specified function.
* @param output Resulting output activation.
*/
-
template<typename InputType, typename OutputType>
void Forward(const InputType& input, OutputType& output)
{
- fn(input, output);
- }
-
- /**
- * Ordinary feed backward pass of a neural network, calculating the function
- * f(x) by propagating x backwards through f. Using the results from the feed
- * forward pass.
- *
- * @param input The propagated input activation. This function is for just a single input.
- * @param gy The backpropagated error.
- * @return The calculated gradient.
- */
-
- double Backward(const double input,
- const double gy)
- {
- return gy * deriv(input);
+ Fn(input, output);
}
/**
@@ -111,7 +86,7 @@ class HardTanHLayer
DataType& g)
{
DataType derivative;
- deriv(input, derivative);
+ Deriv(input, derivative);
g = gy % derivative;
}
@@ -147,7 +122,7 @@ class HardTanHLayer
}
arma::Cube<eT> derivative;
- deriv(input, derivative);
+ Deriv(input, derivative);
g = mappedError % derivative;
}
@@ -166,36 +141,34 @@ class HardTanHLayer
//! Modify the delta.
OutputDataType& Delta() { return delta; }
- //! Get the Maximum value.
+ //! Get the maximum value.
double const& MaxValue() const { return maxValue; }
- //! Modify the Maximum value.
+ //! Modify the maximum value.
double& MaxValue() { return maxValue; }
- //! Get the Minimum value.
+ //! Get the minimum value.
double const& MinValue() const { return minValue; }
- //! Modify the Minimum value.
+ //! Modify the minimum value.
double& MinValue() { return minValue; }
-
/**
* Serialize the layer.
*/
template<typename Archive>
- void Serialize(Archive& /* ar */, const unsigned int /* version */)
+ void Serialize(Archive& ar, const unsigned int /* version */)
{
- /* Nothing to do here */
+ ar & data::CreateNVP(maxValue, "maxValue");
+ ar & data::CreateNVP(minValue, "minValue");
}
private:
-
-
/**
* Computes the HardTanH function.
*
* @param x Input data.
* @return f(x).
*/
- double fn(const double x)
+ double Fn(const double x)
{
if (x > maxValue)
return maxValue;
@@ -212,10 +185,11 @@ class HardTanHLayer
*/
template<typename eT>
- void fn(const arma::Mat<eT>& x, arma::Mat<eT>& y)
+ void Fn(const arma::Mat<eT>& x, arma::Mat<eT>& y)
{
y = x;
- y = y.transform( [&](eT val) { return std::min( std::max( val, minValue ), maxValue ); } );
+ y.transform( [&](eT val) { return std::min(
+ std::max( val, minValue ), maxValue ); } );
}
/**
@@ -225,11 +199,11 @@ class HardTanHLayer
* @param y The resulting output activation.
*/
template<typename eT>
- void fn(const arma::Cube<eT>& x, arma::Cube<eT>& y)
+ void Fn(const arma::Cube<eT>& x, arma::Cube<eT>& y)
{
y = x;
for (size_t s = 0; s < x.n_slices; s++)
- fn(x.slice(s), y.slice(s));
+ Fn(x.slice(s), y.slice(s));
}
/**
@@ -238,7 +212,7 @@ class HardTanHLayer
* @param x Input data.
* @return f'(x)
*/
- double deriv(const double x)
+ double Deriv(const double x)
{
return (x > maxValue || x < minValue) ? 0 : 1;
}
@@ -250,12 +224,12 @@ class HardTanHLayer
* @param x The resulting derivatives.
*/
template<typename InputType, typename OutputType>
- void deriv(const InputType& x, OutputType& y)
+ void Deriv(const InputType& x, OutputType& y)
{
y = x;
for (size_t i = 0; i < x.n_elem; i++)
- y(i) = deriv(x(i));
+ y(i) = Deriv(x(i));
}
//! Locally-stored delta object.
@@ -272,8 +246,6 @@ class HardTanHLayer
//! Minimum value for the HardTanH function.
double minValue;
-
-
}; // class HardTanHLayer
} // namespace ann
diff --git a/src/mlpack/tests/activation_functions_test.cpp b/src/mlpack/tests/activation_functions_test.cpp
index 986e89a..61e28cb 100644
--- a/src/mlpack/tests/activation_functions_test.cpp
+++ b/src/mlpack/tests/activation_functions_test.cpp
@@ -1,6 +1,7 @@
/**
* @file activation_functions_test.cpp
* @author Marcus Edel
+ * @author Dhawal Arora
*
* Tests for the various activation functions.
*/
@@ -121,23 +122,17 @@ void CheckInverseCorrect(const arma::colvec input)
}
}
-
/*
- * Implementation of the HardTanH activation function test. The function is implemented as a HardTanH Layer
- * in file hard_tanh_layer.hpp
+ * Implementation of the HardTanH activation function test. The function is
+ * implemented as a HardTanH Layer in hard_tanh_layer.hpp
+ *
* @param input Input data used for evaluating the HardTanH activation function.
* @param target Target data used to evaluate the HardTanH activation.
- *
*/
-void CheckHardTanHActivationCorrect(const arma::colvec input, const arma::colvec target)
+void CheckHardTanHActivationCorrect(const arma::colvec input,
+ const arma::colvec target)
{
HardTanHLayer<> htf;
- // Test the activation function using a single value as input.
- for (size_t i = 0; i < target.n_elem; i++)
- {
- BOOST_REQUIRE_CLOSE(htf.Forward(input.at(i)),
- target.at(i), 1e-3);
- }
// Test the activation function using the entire vector as input.
arma::colvec activations;
@@ -149,25 +144,20 @@ void CheckHardTanHActivationCorrect(const arma::colvec input, const arma::colvec
}
/*
- * Implementation of the HardTanH activation function derivative test. The derivative is implemented in HardTanH Layer
- * in file hard_tanh_layer.hpp
+ * Implementation of the HardTanH activation function derivative test. The
+ * derivative is implemented as HardTanH Layer in hard_tanh_layer.hpp
+ *
* @param input Input data used for evaluating the HardTanH activation function.
* @param target Target data used to evaluate the HardTanH activation.
- *
*/
-
-void CheckHardTanHDerivativeCorrect(const arma::colvec input, const arma::colvec target)
+void CheckHardTanHDerivativeCorrect(const arma::colvec input,
+ const arma::colvec target)
{
HardTanHLayer<> htf;
- // Test the calculation of the derivatives using a single value as input.
- for (size_t i = 0; i < target.n_elem; i++)
- {
- BOOST_REQUIRE_CLOSE(htf.Backward(input.at(i), 1),
- target.at(i), 1e-3);
- }
// Test the calculation of the derivatives using the entire vector as input.
arma::colvec derivatives;
+
// This error vector will be set to 1 to get the derivatives.
arma::colvec error(input.n_elem);
htf.Backward(input, (arma::colvec)error.ones(), derivatives);
@@ -177,8 +167,6 @@ void CheckHardTanHDerivativeCorrect(const arma::colvec input, const arma::colvec
}
}
-
-
/**
* Basic test of the tanh function.
*/
@@ -273,6 +261,5 @@ BOOST_AUTO_TEST_CASE(HardTanHFunctionTest)
CheckHardTanHDerivativeCorrect(activationData, desiredDerivatives);
}
-
-
BOOST_AUTO_TEST_SUITE_END();
+
More information about the mlpack-git
mailing list