[mlpack-git] master: Add function to evaluate the network. (3f5f7ba)
gitdub at big.cc.gt.atl.ga.us
gitdub at big.cc.gt.atl.ga.us
Fri Feb 27 15:51:50 EST 2015
Repository : https://github.com/mlpack/mlpack
On branch : master
Link : https://github.com/mlpack/mlpack/compare/594fd9f61d1280152c758559b4fc60bf0c827cca...45f682337b1daa4c82797f950e16a605fe4971bd
>---------------------------------------------------------------
commit 3f5f7bac84021545de0dcdd43a7060b8877eab85
Author: Marcus Edel <marcus.edel at fu-berlin.de>
Date: Fri Feb 27 21:09:28 2015 +0100
Add function to evaluate the network.
>---------------------------------------------------------------
3f5f7bac84021545de0dcdd43a7060b8877eab85
src/mlpack/methods/ann/ffnn.hpp | 48 +++++++++++++++++++++++++----------------
1 file changed, 29 insertions(+), 19 deletions(-)
diff --git a/src/mlpack/methods/ann/ffnn.hpp b/src/mlpack/methods/ann/ffnn.hpp
index 65b33cd..8cfb347 100644
--- a/src/mlpack/methods/ann/ffnn.hpp
+++ b/src/mlpack/methods/ann/ffnn.hpp
@@ -44,7 +44,7 @@ class FFNN
* @param outputLayer The outputlayer used to evaluate the network.
*/
FFNN(const ConnectionTypes& network, OutputLayerType& outputLayer)
- : network(network), outputLayer(outputLayer), err(0), seqNum(0)
+ : network(network), outputLayer(outputLayer), trainError(0), seqNum(0)
{
// Nothing to do here.
}
@@ -64,13 +64,8 @@ class FFNN
const VecType& target,
VecType& error)
{
- ResetActivations(network);
seqNum++;
-
- std::get<0>(std::get<0>(network)).InputLayer().InputActivation() = input;
-
- FeedForward(network);
- OutputError(network, target, error);
+ trainError += Evaluate(input, target, error);
}
/**
@@ -102,7 +97,7 @@ class FFNN
ApplyGradients(network);
// Reset the overall error.
- err = 0;
+ trainError = 0;
seqNum = 0;
}
@@ -125,6 +120,26 @@ class FFNN
OutputPrediction(network, output);
}
+ /**
+ * Evaluate the trained network using the given input and compare the output
+ * with the given target vector.
+ *
+ * @param input Input data used to evaluate the trained network.
+ * @param target Target data used to calculate the network error.
+ * @param error The calulated error of the output layer.
+ * @tparam VecType Type of data (arma::colvec, arma::mat or arma::sp_mat).
+ */
+ template <typename VecType>
+ double Evaluate(const VecType& input, const VecType& target, VecType& error)
+ {
+ ResetActivations(network);
+
+ std::get<0>(std::get<0>(network)).InputLayer().InputActivation() = input;
+
+ FeedForward(network);
+ return OutputError(network, target, error);
+ }
+
//! Get the error of the network.
double Error() const { return trainError; }
@@ -218,9 +233,9 @@ class FFNN
* Calculate the output error and update the overall error.
*/
template<typename VecType, typename... Tp>
- void OutputError(std::tuple<Tp...>& t,
- const VecType& target,
- VecType& error)
+ double OutputError(std::tuple<Tp...>& t,
+ const VecType& target,
+ VecType& error)
{
// Calculate and store the output error.
outputLayer.calculateError(std::get<0>(
@@ -229,12 +244,9 @@ class FFNN
// Masures the network's performance with the specified performance
// function.
- err += PerformanceFunction::error(std::get<0>(
+ return PerformanceFunction::error(std::get<0>(
std::get<sizeof...(Tp) - 1>(t)).OutputLayer().InputActivation(),
target);
-
- // Update the final training error.
- trainError = err;
}
/*
@@ -396,11 +408,12 @@ class FFNN
typename std::enable_if<I < sizeof...(Tp), void>::type
Apply(std::tuple<Tp...>& t)
{
+ // Take a mean gradient step over the number of inputs.
if (seqNum > 1)
gradients[gradientNum] /= seqNum;
std::get<I>(t).Optimzer().UpdateWeights(std::get<I>(t).Weights(),
- gradients[gradientNum], err);
+ gradients[gradientNum], trainError);
// Reset the gradient storage.
gradients[gradientNum++].zeros();
@@ -457,9 +470,6 @@ class FFNN
//! The outputlayer used to evaluate the network
OutputLayerType& outputLayer;
- //! The current error of the network.
- double err;
-
//! The current training error of the network.
double trainError;
More information about the mlpack-git
mailing list