[mlpack-git] master: Remove unused layer and optimizer classes. (c5dbcc0)
gitdub at big.cc.gt.atl.ga.us
gitdub at big.cc.gt.atl.ga.us
Wed Sep 2 11:18:51 EDT 2015
Repository : https://github.com/mlpack/mlpack
On branch : master
Link : https://github.com/mlpack/mlpack/compare/49d765cc540f0499e14b5d5bfafe5e63bb25c1f5...c5dbcc05fc3eacfe02b6652b901054734c25cdc1
>---------------------------------------------------------------
commit c5dbcc05fc3eacfe02b6652b901054734c25cdc1
Author: Marcus Edel <marcus.edel at fu-berlin.de>
Date: Wed Sep 2 17:18:17 2015 +0200
Remove unused layer and optimizer classes.
>---------------------------------------------------------------
c5dbcc05fc3eacfe02b6652b901054734c25cdc1
src/mlpack/methods/ann/ffnn.hpp | 458 --------------------------
src/mlpack/methods/ann/layer/neuron_layer.hpp | 291 ----------------
src/mlpack/methods/ann/optimizer/irpropm.hpp | 94 ------
src/mlpack/methods/ann/optimizer/irpropp.hpp | 110 -------
src/mlpack/methods/ann/optimizer/rpropm.hpp | 88 -----
src/mlpack/methods/ann/optimizer/rpropp.hpp | 103 ------
6 files changed, 1144 deletions(-)
diff --git a/src/mlpack/methods/ann/ffnn.hpp b/src/mlpack/methods/ann/ffnn.hpp
deleted file mode 100644
index ec7084a..0000000
--- a/src/mlpack/methods/ann/ffnn.hpp
+++ /dev/null
@@ -1,458 +0,0 @@
-/**
- * @file ffnn.hpp
- * @author Marcus Edel
- *
- * Definition of the FFNN class, which implements feed forward neural networks.
- */
-#ifndef __MLPACK_METHODS_ANN_FFNN_HPP
-#define __MLPACK_METHODS_ANN_FFNN_HPP
-
-#include <mlpack/core.hpp>
-
-#include <boost/ptr_container/ptr_vector.hpp>
-
-#include <mlpack/methods/ann/network_traits.hpp>
-#include <mlpack/methods/ann/performance_functions/cee_function.hpp>
-#include <mlpack/methods/ann/layer/layer_traits.hpp>
-#include <mlpack/methods/ann/connections/connection_traits.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * An implementation of a standard feed forward network.
- *
- * @tparam ConnectionTypes Tuple that contains all connection module which will
- * be used to construct the network.
- * @tparam OutputLayerType The outputlayer type used to evaluate the network.
- * @tparam PerformanceFunction Performance strategy used to claculate the error.
- * @tparam MaType Type of the gradients. (arma::mat or arma::sp_mat).
- */
-template <
- typename ConnectionTypes,
- typename OutputLayerType,
- class PerformanceFunction = CrossEntropyErrorFunction<>,
- typename MatType = arma::mat
->
-class FFNN
-{
- public:
- /**
- * Construct the FFNN object, which will construct a feed forward neural
- * network with the specified layers.
- *
- * @param network The network modules used to construct the network.
- * @param outputLayer The outputlayer used to evaluate the network.
- */
- FFNN(const ConnectionTypes& network, OutputLayerType& outputLayer)
- : network(network), outputLayer(outputLayer), trainError(0), seqNum(0)
- {
- // Nothing to do here.
- }
-
- /**
- * Run a single iteration of the feed forward algorithm, using the given
- * input and target vector, updating the resulting error into the error
- * vector.
- *
- * @param input Input data used to evaluate the network.
- * @param target Target data used to calculate the network error.
- * @param error The calulated error of the output layer.
- * @tparam VecType Type of data (arma::colvec, arma::mat or arma::sp_mat).
- */
- template <typename VecType>
- void FeedForward(const VecType& input,
- const VecType& target,
- VecType& error)
- {
- deterministic = false;
- seqNum++;
- trainError += Evaluate(input, target, error);
- }
-
- /**
- * Run a single iteration of the feed backward algorithm, using the given
- * error of the output layer.
- *
- * @param error The calulated error of the output layer.
- * @tparam VecType Type of data (arma::colvec, arma::mat or arma::sp_mat).
- */
- template <typename VecType>
- void FeedBackward(const VecType& error)
- {
- LayerBackward(network, error);
- UpdateGradients(network);
- }
-
- /**
- * Updating the weights using the specified optimizer.
- *
- */
- void ApplyGradients()
- {
- ApplyGradients(network);
-
- // Reset the overall error.
- trainError = 0;
- seqNum = 0;
- }
-
- /**
- * Evaluate the network using the given input. The output activation is
- * stored into the output parameter.
- *
- * @param input Input data used to evaluate the network.
- * @param output Output data used to store the output activation
- * @tparam VecType Type of data (arma::colvec, arma::mat or arma::sp_mat).
- */
- template <typename VecType>
- void Predict(const VecType& input, VecType& output)
- {
- deterministic = true;
- ResetActivations(network);
-
- std::get<0>(std::get<0>(network)).InputLayer().InputActivation() = input;
-
- LayerForward(network);
- OutputPrediction(network, output);
- }
-
- /**
- * Evaluate the trained network using the given input and compare the output
- * with the given target vector.
- *
- * @param input Input data used to evaluate the trained network.
- * @param target Target data used to calculate the network error.
- * @param error The calulated error of the output layer.
- * @tparam VecType Type of data (arma::colvec, arma::mat or arma::sp_mat).
- */
- template <typename VecType>
- double Evaluate(const VecType& input, const VecType& target, VecType& error)
- {
- deterministic = false;
- ResetActivations(network);
-
- std::get<0>(std::get<0>(network)).InputLayer().InputActivation() = input;
-
- LayerForward(network);
- return OutputError(network, target, error);
- }
-
- //! Get the error of the network.
- double Error() const { return trainError; }
-
- private:
- /**
- * Helper function to reset the network by zeroing the layer activations.
- *
- * enable_if (SFINAE) is used to iterate through the network connection
- * modules. The general case peels off the first type and recurses, as usual
- * with variadic function templates.
- */
- template<size_t I = 0, typename... Tp>
- typename std::enable_if<I == sizeof...(Tp), void>::type
- ResetActivations(std::tuple<Tp...>& /* unused */) { }
-
- template<size_t I = 0, typename... Tp>
- typename std::enable_if<I < sizeof...(Tp), void>::type
- ResetActivations(std::tuple<Tp...>& t)
- {
- Reset(std::get<I>(t));
- ResetActivations<I + 1, Tp...>(t);
- }
-
- /**
- * Reset the network by zeroing the layer activations.
- *
- * enable_if (SFINAE) is used to iterate through the network connections.
- * The general case peels off the first type and recurses, as usual with
- * variadic function templates.
- */
- template<size_t I = 0, typename... Tp>
- typename std::enable_if<I == sizeof...(Tp), void>::type
- Reset(std::tuple<Tp...>& /* unused */) { }
-
- template<size_t I = 0, typename... Tp>
- typename std::enable_if<I < sizeof...(Tp), void>::type
- Reset(std::tuple<Tp...>& t)
- {
- std::get<I>(t).OutputLayer().Deterministic() = deterministic;
- std::get<I>(t).OutputLayer().InputActivation().zeros();
- Reset<I + 1, Tp...>(t);
- }
-
- /**
- * Run a single iteration of the feed forward algorithm, using the given
- * input and target vector, updating the resulting error into the error
- * vector.
- *
- * enable_if (SFINAE) is used to select between two template overloads of
- * the get function - one for when I is equal the size of the tuple of
- * connections, and one for the general case which peels off the first type
- * and recurses, as usual with variadic function templates.
- */
- template<size_t I = 0, typename... Tp>
- typename std::enable_if<I == sizeof...(Tp), void>::type
- LayerForward(std::tuple<Tp...>& /* unused */) { }
-
- template<size_t I = 0, typename... Tp>
- typename std::enable_if<I < sizeof...(Tp), void>::type
- LayerForward(std::tuple<Tp...>& t)
- {
- ConnectionForward(std::get<I>(t));
-
- // Use the first connection to perform the feed forward algorithm.
- std::get<0>(std::get<I>(t)).OutputLayer().FeedForward(
- std::get<0>(std::get<I>(t)).OutputLayer().InputActivation(),
- std::get<0>(std::get<I>(t)).OutputLayer().InputActivation());
-
- LayerForward<I + 1, Tp...>(t);
- }
-
- /**
- * Sum up all layer activations by evaluating all connections.
- *
- * enable_if (SFINAE) is used to iterate through the network connections.
- * The general case peels off the first type and recurses, as usual with
- * variadic function templates.
- */
- template<size_t I = 0, typename... Tp>
- typename std::enable_if<I == sizeof...(Tp), void>::type
- ConnectionForward(std::tuple<Tp...>& /* unused */) { }
-
- template<size_t I = 0, typename... Tp>
- typename std::enable_if<I < sizeof...(Tp), void>::type
- ConnectionForward(std::tuple<Tp...>& t)
- {
- std::get<I>(t).FeedForward(std::get<I>(t).InputLayer().InputActivation());
- ConnectionForward<I + 1, Tp...>(t);
- }
-
- /*
- * Calculate the output error and update the overall error.
- */
- template<typename VecType, typename... Tp>
- double OutputError(std::tuple<Tp...>& t,
- const VecType& target,
- VecType& error)
- {
- // Calculate and store the output error.
- outputLayer.CalculateError(std::get<0>(
- std::get<sizeof...(Tp) - 1>(t)).OutputLayer().InputActivation(),
- target, error);
-
- // Masures the network's performance with the specified performance
- // function.
- return PerformanceFunction::Error(std::get<0>(
- std::get<sizeof...(Tp) - 1>(t)).OutputLayer().InputActivation(),
- target);
- }
-
- /*
- * Calculate and store the output activation.
- */
- template<typename VecType, typename... Tp>
- void OutputPrediction(std::tuple<Tp...>& t, VecType& output)
- {
- // Calculate and store the output prediction.
- outputLayer.OutputClass(std::get<0>(
- std::get<sizeof...(Tp) - 1>(t)).OutputLayer().InputActivation(),
- output);
- }
-
- /**
- * Run a single iteration of the feed backward algorithm, using the given
- * error of the output layer. Note that we iterate backward through the
- * connection modules.
- *
- * enable_if (SFINAE) is used to select between two template overloads of
- * the get function - one for when I is equal the size of the tuple of
- * connections, and one for the general case which peels off the first type
- * and recurses, as usual with variadic function templates.
- */
- template<size_t I = 0, typename VecType, typename... Tp>
- typename std::enable_if<I == sizeof...(Tp), void>::type
- LayerBackward(std::tuple<Tp...>& /* unused */, VecType& /* unused */)
- { }
-
- template<size_t I = 1, typename VecType, typename... Tp>
- typename std::enable_if<I < sizeof...(Tp), void>::type
- LayerBackward(std::tuple<Tp...>& t, VecType& error)
- {
- // Distinguish between the output layer and the other layer. In case of
- // the output layer use specified error vector to store the error and to
- // perform the feed backward pass.
- if (I == 1)
- {
- // Use the first connection from the last connection module to
- // calculate the error.
- std::get<0>(std::get<sizeof...(Tp) - I>(t)).OutputLayer().FeedBackward(
- std::get<0>(
- std::get<sizeof...(Tp) - I>(t)).OutputLayer().InputActivation(),
- error, std::get<0>(
- std::get<sizeof...(Tp) - I>(t)).OutputLayer().Delta());
- }
-
- ConnectionBackward(std::get<sizeof...(Tp) - I>(t), std::get<0>(
- std::get<sizeof...(Tp) - I>(t)).OutputLayer().Delta());
-
- LayerBackward<I + 1, VecType, Tp...>(t, error);
- }
-
- /**
- * Back propagate the given error and store the delta in the connection
- * between the corresponding layer.
- *
- * enable_if (SFINAE) is used to iterate through the network connections.
- * The general case peels off the first type and recurses, as usual with
- * variadic function templates.
- */
- template<size_t I = 0, typename VecType, typename... Tp>
- typename std::enable_if<I == sizeof...(Tp), void>::type
- ConnectionBackward(std::tuple<Tp...>& /* unused */, VecType& /* unused */) { }
-
- template<size_t I = 0, typename VecType, typename... Tp>
- typename std::enable_if<I < sizeof...(Tp), void>::type
- ConnectionBackward(std::tuple<Tp...>& t, VecType& error)
- {
- std::get<I>(t).FeedBackward(error);
-
- // We calculate the delta only for non bias layer.
- if (!LayerTraits<typename std::remove_reference<decltype(
- std::get<I>(t).InputLayer())>::type>::IsBiasLayer)
- {
- std::get<I>(t).InputLayer().FeedBackward(
- std::get<I>(t).InputLayer().InputActivation(),
- std::get<I>(t).Delta(), std::get<I>(t).InputLayer().Delta());
- }
-
- ConnectionBackward<I + 1, VecType, Tp...>(t, error);
- }
-
- /**
- * Helper function to iterate through all connection modules and to update
- * the gradient storage.
- *
- * enable_if (SFINAE) is used to select between two template overloads of
- * the get function - one for when I is equal the size of the tuple of
- * connections, and one for the general case which peels off the first type
- * and recurses, as usual with variadic function templates.
- */
- template<size_t I = 0, typename... Tp>
- typename std::enable_if<I == sizeof...(Tp), void>::type
- UpdateGradients(std::tuple<Tp...>& /* unused */) { }
-
- template<size_t I = 0, typename... Tp>
- typename std::enable_if<I < sizeof...(Tp), void>::type
- UpdateGradients(std::tuple<Tp...>& t)
- {
- Gradients(std::get<I>(t));
- UpdateGradients<I + 1, Tp...>(t);
- }
-
- /**
- * Sum up all gradients and store the results in the gradients storage.
- *
- * enable_if (SFINAE) is used to iterate through the network connections.
- * The general case peels off the first type and recurses, as usual with
- * variadic function templates.
- */
- template<size_t I = 0, typename... Tp>
- typename std::enable_if<I == sizeof...(Tp), void>::type
- Gradients(std::tuple<Tp...>& /* unused */) { }
-
- template<size_t I = 0, typename... Tp>
- typename std::enable_if<I < sizeof...(Tp), void>::type
- Gradients(std::tuple<Tp...>& t)
- {
- if (!ConnectionTraits<typename std::remove_reference<decltype(
- std::get<I>(t))>::type>::IsIdentityConnection)
- {
- std::get<I>(t).Optimizer().Update();
- }
-
- Gradients<I + 1, Tp...>(t);
- }
-
- /**
- * Helper function to update the weights using the specified optimizer and
- * the given input.
- *
- * enable_if (SFINAE) is used to select between two template overloads of
- * the get function - one for when I is equal the size of the tuple of
- * connections, and one for the general case which peels off the first type
- * and recurses, as usual with variadic function templates.
- */
- template<size_t I = 0, typename... Tp>
- typename std::enable_if<I == sizeof...(Tp), void>::type
- ApplyGradients(std::tuple<Tp...>& /* unused */) { }
-
- template<size_t I = 0, typename... Tp>
- typename std::enable_if<I < sizeof...(Tp), void>::type
- ApplyGradients(std::tuple<Tp...>& t)
- {
- Apply(std::get<I>(t));
- ApplyGradients<I + 1, Tp...>(t);
- }
-
- /**
- * Update the weights using the gradients from the gradient store.
- *
- * enable_if (SFINAE) is used to iterate through the network connections.
- * The general case peels off the first type and recurses, as usual with
- * variadic function templates.
- */
- template<size_t I = 0, typename... Tp>
- typename std::enable_if<I == sizeof...(Tp), void>::type
- Apply(std::tuple<Tp...>& /* unused */) { }
-
- template<size_t I = 0, typename... Tp>
- typename std::enable_if<I < sizeof...(Tp), void>::type
- Apply(std::tuple<Tp...>& t)
- {
- if (!ConnectionTraits<typename std::remove_reference<decltype(
- std::get<I>(t))>::type>::IsIdentityConnection)
- {
- std::get<I>(t).Optimizer().Optimize();
- std::get<I>(t).Optimizer().Reset();
- }
-
- Apply<I + 1, Tp...>(t);
- }
-
- //! The connection modules used to build the network.
- ConnectionTypes network;
-
- //! The outputlayer used to evaluate the network
- OutputLayerType& outputLayer;
-
- //! The current training error of the network.
- double trainError;
-
- //! The number of the current input sequence.
- size_t seqNum;
-
- //! The current evaluation mode (training or testing).
- bool deterministic;
-}; // class FFNN
-
-
-//! Network traits for the FFNN network.
-template <
- typename ConnectionTypes,
- typename OutputLayerType,
- class PerformanceFunction
->
-class NetworkTraits<
- FFNN<ConnectionTypes, OutputLayerType, PerformanceFunction> >
-{
- public:
- static const bool IsFNN = true;
- static const bool IsRNN = false;
- static const bool IsCNN = false;
-};
-
-}; // namespace ann
-}; // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/layer/neuron_layer.hpp b/src/mlpack/methods/ann/layer/neuron_layer.hpp
deleted file mode 100644
index 2f0697e..0000000
--- a/src/mlpack/methods/ann/layer/neuron_layer.hpp
+++ /dev/null
@@ -1,291 +0,0 @@
-/**
- * @file neuron_layer.hpp
- * @author Marcus Edel
- * @author Shangtong Zhang
- *
- * Definition of the NeuronLayer class, which implements a standard network
- * layer.
- */
-#ifndef __MLPACK_METHODS_ANN_LAYER_NEURON_LAYER_HPP
-#define __MLPACK_METHODS_ANN_LAYER_NEURON_LAYER_HPP
-
-#include <mlpack/core.hpp>
-#include <mlpack/methods/ann/layer/layer_traits.hpp>
-#include <mlpack/methods/ann/activation_functions/logistic_function.hpp>
-#include <mlpack/methods/ann/activation_functions/identity_function.hpp>
-#include <mlpack/methods/ann/activation_functions/rectifier_function.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * An implementation of a standard network layer.
- *
- * This class allows the specification of the type of the activation function.
- *
- * A few convenience typedefs are given:
- *
- * - InputLayer
- * - HiddenLayer
- * - ReluLayer
- * - ConvLayer
- * - PoolingLayer
- *
- * @tparam ActivationFunction Activation function used for the embedding layer.
- * @tparam DataType Type of data (arma::colvec, arma::mat arma::sp_mat or
- * arma::cube).
- */
-template <
- class ActivationFunction = LogisticFunction,
- typename DataType = arma::colvec
->
-class NeuronLayer
-{
- public:
- /**
- * Create the NeuronLayer object using the specified number of neurons.
- *
- * @param layerSize The number of neurons.
- */
- NeuronLayer(const size_t layerSize) :
- inputActivations(arma::zeros<DataType>(layerSize)),
- delta(arma::zeros<DataType>(layerSize)),
- layerRows(layerSize),
- layerCols(1),
- layerSlices(1),
- outputMaps(1)
- {
- // Nothing to do here.
- }
-
- /**
- * Create 2-dimensional NeuronLayer object using the specified rows and
- * columns. In this case, DataType must be arma::mat or arma::sp_mat.
- *
- * @param layerRows The number of rows of neurons.
- * @param layerCols The number of columns of neurons.
- */
- NeuronLayer(const size_t layerRows, const size_t layerCols) :
- inputActivations(arma::zeros<DataType>(layerRows, layerCols)),
- delta(arma::zeros<DataType>(layerRows, layerCols)),
- layerRows(layerRows),
- layerCols(layerCols),
- layerSlices(1),
- outputMaps(1)
- {
- // Nothing to do here.
- }
-
- /**
- * Create n-dimensional NeuronLayer object using the specified rows and
- * columns and number of slices. In this case, DataType must be arma::cube.
- *
- * @param layerRows The number of rows of neurons.
- * @param layerCols The number of columns of neurons.
- * @param layerCols The number of slices of neurons.
- * @param layerCols The number of output maps.
- */
- NeuronLayer(const size_t layerRows,
- const size_t layerCols,
- const size_t layerSlices,
- const size_t outputMaps = 1) :
- inputActivations(arma::zeros<DataType>(layerRows, layerCols,
- layerSlices * outputMaps)),
- delta(arma::zeros<DataType>(layerRows, layerCols,
- layerSlices * outputMaps)),
- layerRows(layerRows),
- layerCols(layerCols),
- layerSlices(layerSlices),
- outputMaps(outputMaps)
- {
- // Nothing to do here.
- }
-
- /**
- * Ordinary feed forward pass of a neural network, evaluating the function
- * f(x) by propagating the activity forward through f.
- *
- * @param inputActivation Input data used for evaluating the specified
- * activity function.
- * @param outputActivation Data to store the resulting output activation.
- */
- void FeedForward(const DataType& inputActivation,
- DataType& outputActivation)
- {
- ActivationFunction::fn(inputActivation, outputActivation);
- }
-
- /**
- * Ordinary feed backward pass of a neural network, calculating the function
- * f(x) by propagating x backwards trough f. Using the results from the feed
- * forward pass.
- *
- * @param inputActivation Input data used for calculating the function f(x).
- * @param error The backpropagated error.
- * @param delta The calculating delta using the partial derivative of the
- * error with respect to a weight.
- */
- void FeedBackward(const DataType& inputActivation,
- const DataType& error,
- DataType& delta)
- {
- DataType derivative;
- ActivationFunction::deriv(inputActivation, derivative);
- delta = error % derivative;
- }
-
-
- /**
- * Ordinary feed backward pass of a neural network, using 3rd-order tensors as
- * input, calculating the function f(x) by propagating x backwards trough f.
- * Using the results from the feed forward pass.
- *
- * @param inputActivation Input data used for calculating the function f(x).
- * @param error The backpropagated error.
- * @param delta The calculating delta using the partial derivative of the
- * error with respect to a weight.
- */
- template<typename eT>
- void FeedBackward(const arma::Cube<eT>& inputActivation,
- const arma::Mat<eT>& error,
- arma::Cube<eT>& delta)
- {
- // Generate a cube from the error matrix.
- arma::Cube<eT> mappedError = arma::zeros<arma::cube>(inputActivation.n_rows,
- inputActivation.n_cols, inputActivation.n_slices);
-
- for (size_t s = 0, j = 0; s < mappedError.n_slices; s+= error.n_cols, j++)
- {
- for (size_t i = 0; i < error.n_cols; i++)
- {
- arma::Col<eT> temp = error.col(i).subvec(
- j * inputActivation.n_rows * inputActivation.n_cols,
- (j + 1) * inputActivation.n_rows * inputActivation.n_cols - 1);
-
- mappedError.slice(s + i) = arma::Mat<eT>(temp.memptr(),
- inputActivation.n_rows, inputActivation.n_cols);
- }
- }
-
- arma::Cube<eT> derivative;
- ActivationFunction::deriv(inputActivation, derivative);
- delta = mappedError % derivative;
- }
-
- //! Get the input activations.
- DataType& InputActivation() const { return inputActivations; }
- //! Modify the input activations.
- DataType& InputActivation() { return inputActivations; }
-
- //! Get the detla.
- DataType& Delta() const { return delta; }
- //! Modify the delta.
- DataType& Delta() { return delta; }
-
- //! Get input size.
- size_t InputSize() const { return layerRows; }
- //! Modify the delta.
- size_t& InputSize() { return layerRows; }
-
- //! Get output size.
- size_t OutputSize() const { return layerRows; }
- //! Modify the output size.
- size_t& OutputSize() { return layerRows; }
-
- //! Get the number of layer rows.
- size_t LayerRows() const { return layerRows; }
- //! Modify the number of layer rows.
- size_t& LayerRows() { return layerRows; }
-
- //! Get the number of layer columns.
- size_t LayerCols() const { return layerCols; }
- //! Modify the number of layer columns.
- size_t& LayerCols() { return layerCols; }
-
- //! Get the number of layer slices.
- size_t LayerSlices() const { return layerSlices; }
-
- //! Get the number of output maps.
- size_t OutputMaps() const { return outputMaps; }
-
- //! The the value of the deterministic parameter.
- bool Deterministic() const {return deterministic; }
- //! Modify the value of the deterministic parameter.
- bool& Deterministic() {return deterministic; }
-
- private:
- //! Locally-stored input activation object.
- DataType inputActivations;
-
- //! Locally-stored delta object.
- DataType delta;
-
- //! Locally-stored number of layer rows.
- size_t layerRows;
-
- //! Locally-stored number of layer cols.
- size_t layerCols;
-
- //! Locally-stored number of layer slices.
- size_t layerSlices;
-
- //! Locally-stored number of output maps.
- size_t outputMaps;
-
- //! Locally-stored deterministic parameter.
- bool deterministic;
-}; // class NeuronLayer
-
-// Convenience typedefs.
-
-/**
- * Standard Input-Layer using the logistic activation function.
- */
-template <
- class ActivationFunction = LogisticFunction,
- typename DataType = arma::colvec
->
-using InputLayer = NeuronLayer<ActivationFunction, DataType>;
-
-/**
- * Standard Hidden-Layer using the logistic activation function.
- */
-template <
- class ActivationFunction = LogisticFunction,
- typename DataType = arma::colvec
->
-using HiddenLayer = NeuronLayer<ActivationFunction, DataType>;
-
-/**
- * Layer of rectified linear units (relu) using the rectifier activation
- * function.
- */
-template <
- class ActivationFunction = RectifierFunction,
- typename DataType = arma::colvec
->
-using ReluLayer = NeuronLayer<ActivationFunction, DataType>;
-
-/**
- * Convolution layer using the logistic activation function.
- */
-template <
- class ActivationFunction = LogisticFunction,
- typename DataType = arma::cube
->
-using ConvLayer = NeuronLayer<ActivationFunction, DataType>;
-
-/**
- * Pooling layer using the logistic activation function.
- */
-template <
- class ActivationFunction = IdentityFunction,
- typename DataType = arma::cube
->
-using PoolingLayer = NeuronLayer<ActivationFunction, DataType>;
-
-
-}; // namespace ann
-}; // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/optimizer/irpropm.hpp b/src/mlpack/methods/ann/optimizer/irpropm.hpp
deleted file mode 100644
index ef848fd..0000000
--- a/src/mlpack/methods/ann/optimizer/irpropm.hpp
+++ /dev/null
@@ -1,94 +0,0 @@
-/**
- * @file irpropm.hpp
- * @author Marcus Edel
- *
- * Intialization rule for the neural networks. This simple initialization is
- * performed by assigning a random matrix to the weight matrix.
- */
-#ifndef __MLPACK_METHODS_ANN_OPTIMIZER_IRPROPM_HPP
-#define __MLPACK_METHODS_ANN_OPTIMIZER_IRPROPM_HPP
-
-#include <mlpack/core.hpp>
-#include <boost/math/special_functions/sign.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * This class is used to initialize randomly the weight matrix.
- *
- * @tparam MatType Type of matrix (should be arma::mat or arma::spmat).
- */
-template<typename MatType = arma::mat, typename VecType = arma::rowvec>
-class iRPROPm
-{
- public:
- /**
- * Initialize the random initialization rule with the given lower bound and
- * upper bound.
- *
- * @param lowerBound The number used as lower bound.
- * @param upperBound The number used as upper bound.
- */
- iRPROPm(const size_t cols,
- const size_t rows,
- const double etaMin = 0.5,
- const double etaPlus = 1.2,
- const double minDelta = 1e-9,
- const double maxDelta = 50) :
- etaMin(etaMin), etaPlus(etaPlus), minDelta(minDelta), maxDelta(maxDelta)
- {
- prevDerivs = arma::zeros<MatType>(rows, cols);
- prevDelta = arma::zeros<MatType>(rows, cols);
-
- prevError = arma::datum::inf;
- }
-
- void UpdateWeights(MatType& weights,
- const MatType& gradient,
- const double /* unused */)
- {
- MatType derivs = gradient % prevDerivs;
-
- for (size_t i(0); i < derivs.n_cols; i++)
- {
- for (size_t j(0); j < derivs.n_rows; j++)
- {
- if (derivs(j, i) >= 0)
- {
- prevDelta(j, i) = std::min(prevDelta(j, i) * etaPlus, maxDelta);
- prevDerivs(j, i) = gradient(j, i);
- }
- else
- {
- prevDelta(j, i) = std::max(prevDelta(j, i) * etaMin, minDelta);
- prevDerivs(j, i) = 0;
- }
- }
- }
-
- weights -= arma::sign(gradient) % prevDelta;
- }
-
- private:
- //! The number used as learning rate.
- const double etaMin;
-
- const double etaPlus;
-
- const double minDelta;
-
- const double maxDelta;
-
- double prevError;
-
- MatType prevDelta;
-
- //! weight momentum
- MatType prevDerivs;
-}; // class iRPROPm
-
-}; // namespace ann
-}; // namespace mlpack
-
-#endif
diff --git a/src/mlpack/methods/ann/optimizer/irpropp.hpp b/src/mlpack/methods/ann/optimizer/irpropp.hpp
deleted file mode 100644
index a0deac5..0000000
--- a/src/mlpack/methods/ann/optimizer/irpropp.hpp
+++ /dev/null
@@ -1,110 +0,0 @@
-/**
- * @file irpropp.hpp
- * @author Marcus Edel
- *
- * Intialization rule for the neural networks. This simple initialization is
- * performed by assigning a random matrix to the weight matrix.
- */
-#ifndef __MLPACK_METHODS_ANN_OPTIMIZER_IRPROPP_HPP
-#define __MLPACK_METHODS_ANN_OPTIMIZER_IRPROPP_HPP
-
-#include <mlpack/core.hpp>
-#include <boost/math/special_functions/sign.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * This class is used to initialize randomly the weight matrix.
- *
- * @tparam MatType Type of matrix (should be arma::mat or arma::spmat).
- */
-template<typename MatType = arma::mat, typename VecType = arma::rowvec>
-class iRPROPp
-{
- public:
- /**
- * Initialize the random initialization rule with the given lower bound and
- * upper bound.
- *
- * @param lowerBound The number used as lower bound.
- * @param upperBound The number used as upper bound.
- */
- iRPROPp(const size_t cols,
- const size_t rows,
- const double etaMin = 0.5,
- const double etaPlus = 1.2,
- const double minDelta = 1e-9,
- const double maxDelta = 50,
- const double initialUpdate = 0.1) :
- etaMin(etaMin), etaPlus(etaPlus), minDelta(minDelta), maxDelta(maxDelta), prevError(arma::datum::inf)
- {
- prevDerivs = arma::zeros<MatType>(rows, cols);
- prevWeightChange = arma::zeros<MatType>(rows, cols);
-
- updateValues = arma::ones<MatType>(rows, cols);
- updateValues.fill(initialUpdate);
- }
-
- void UpdateWeights(MatType& weights,
- const MatType& gradient,
- const double error)
- {
- MatType derivs = gradient % prevDerivs;
-
- for (size_t i(0); i < derivs.n_cols; i++)
- {
- for (size_t j(0); j < derivs.n_rows; j++)
- {
- if (derivs(j, i) > 0)
- {
- updateValues(j, i) = std::min(updateValues(j, i) * etaPlus, maxDelta);
- prevWeightChange(j, i) = boost::math::sign(gradient(j, i)) * updateValues(j, i);
- prevDerivs(j, i) = gradient(j, i);
- }
- else if (derivs(j, i) < 0)
- {
- updateValues(j, i) = std::max(updateValues(j, i) * etaMin, minDelta);
- prevDerivs(j, i) = 0;
-
- if (error < prevError)
- prevWeightChange(j, i) = 0;
- }
- else
- {
- prevWeightChange(j, i) = boost::math::sign(gradient(j, i)) * updateValues(j, i);
- prevDerivs(j, i) = gradient(j, i);
- }
-
- weights(j, i) -= prevWeightChange(j, i);
- }
- }
- }
-
-
- private:
- //! The number used as learning rate.
- const double etaMin;
-
- const double etaPlus;
-
- const double minDelta;
-
- const double maxDelta;
-
- double prevError;
-
- MatType updateValues;
-
- MatType prevWeightChange;
-
- //! weight momentum
- MatType prevDerivs;
-}; // class iRPROPp
-
-}; // namespace ann
-}; // namespace mlpack
-
-#endif
-
-
diff --git a/src/mlpack/methods/ann/optimizer/rpropm.hpp b/src/mlpack/methods/ann/optimizer/rpropm.hpp
deleted file mode 100644
index 468a1d3..0000000
--- a/src/mlpack/methods/ann/optimizer/rpropm.hpp
+++ /dev/null
@@ -1,88 +0,0 @@
-/**
- * @file rpropm.hpp
- * @author Marcus Edel
- *
- * Intialization rule for the neural networks. This simple initialization is
- * performed by assigning a random matrix to the weight matrix.
- */
-#ifndef __MLPACK_METHODS_ANN_OPTIMIZER_RPROPM_HPP
-#define __MLPACK_METHODS_ANN_OPTIMIZER_RPROPM_HPP
-
-#include <mlpack/core.hpp>
-#include <boost/math/special_functions/sign.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * This class is used to initialize randomly the weight matrix.
- *
- * @tparam MatType Type of matrix (should be arma::mat or arma::spmat).
- */
-template<typename MatType = arma::mat, typename VecType = arma::rowvec>
-class RPROPm
-{
- public:
- /**
- * Initialize the random initialization rule with the given lower bound and
- * upper bound.
- *
- * @param lowerBound The number used as lower bound.
- * @param upperBound The number used as upper bound.
- */
- RPROPm(const size_t cols,
- const size_t rows,
- const double etaMin = 0.5,
- const double etaPlus = 1.2,
- const double minDelta = 1e-9,
- const double maxDelta = 50) :
- etaMin(etaMin), etaPlus(etaPlus), minDelta(minDelta), maxDelta(maxDelta)
- {
- prevDerivs = arma::zeros<MatType>(rows, cols);
- prevDelta = arma::zeros<MatType>(rows, cols);
- }
-
- void UpdateWeights(MatType& weights,
- const MatType& gradient,
- const double /* unused */)
- {
- MatType derivs = gradient % prevDerivs;
-
- for (size_t i(0); i < derivs.n_cols; i++)
- {
- for (size_t j(0); j < derivs.n_rows; j++)
- {
- if (derivs(j, i) > 0)
- prevDelta(j, i) = std::min(prevDelta(j, i) * etaPlus, maxDelta);
- else
- prevDelta(j, i) = std::max(prevDelta(j, i) * etaMin, minDelta);
- }
- }
-
- weights -= arma::sign(gradient) % prevDelta;
- prevDerivs = gradient;
- }
-
-
- private:
- //! The number used as learning rate.
- const double etaMin;
-
- const double etaPlus;
-
- const double minDelta;
-
- const double maxDelta;
-
- MatType prevDelta;
-
- //! weight momentum
- MatType prevDerivs;
-}; // class RPROPm
-
-}; // namespace ann
-}; // namespace mlpack
-
-#endif
-
-
diff --git a/src/mlpack/methods/ann/optimizer/rpropp.hpp b/src/mlpack/methods/ann/optimizer/rpropp.hpp
deleted file mode 100644
index bf4eb29..0000000
--- a/src/mlpack/methods/ann/optimizer/rpropp.hpp
+++ /dev/null
@@ -1,103 +0,0 @@
-/**
- * @file rpropp.hpp
- * @author Marcus Edel
- *
- * Intialization rule for the neural networks. This simple initialization is
- * performed by assigning a random matrix to the weight matrix.
- */
-#ifndef __MLPACK_METHODS_ANN_OPTIMIZER_RPROPP_HPP
-#define __MLPACK_METHODS_ANN_OPTIMIZER_RPROPP_HPP
-
-#include <mlpack/core.hpp>
-#include <boost/math/special_functions/sign.hpp>
-
-namespace mlpack {
-namespace ann /** Artificial Neural Network. */ {
-
-/**
- * This class is used to initialize randomly the weight matrix.
- *
- * @tparam MatType Type of matrix (should be arma::mat or arma::spmat).
- */
-template<typename MatType = arma::mat, typename VecType = arma::rowvec>
-class RPROPp
-{
- public:
- /**
- * Initialize the random initialization rule with the given lower bound and
- * upper bound.
- *
- * @param lowerBound The number used as lower bound.
- * @param upperBound The number used as upper bound.
- */
- RPROPp(const size_t cols,
- const size_t rows,
- const double etaMin = 0.5,
- const double etaPlus = 1.2,
- const double minDelta = 1e-9,
- const double maxDelta = 50,
- const double initialUpdate = 0.1) :
- etaMin(etaMin), etaPlus(etaPlus), minDelta(minDelta), maxDelta(maxDelta)
- {
- prevDerivs = arma::zeros<MatType>(rows, cols);
- prevWeightChange = arma::zeros<MatType>(rows, cols);
-
- updateValues = arma::ones<MatType>(rows, cols);
- updateValues.fill(initialUpdate);
- }
-
- void UpdateWeights(MatType& weights,
- const MatType& gradient,
- const double /* unused */)
- {
- MatType derivs = gradient % prevDerivs;
-
- for (size_t i(0); i < derivs.n_cols; i++)
- {
- for (size_t j(0); j < derivs.n_rows; j++)
- {
- if (derivs(j, i) > 0)
- {
- updateValues(j, i) = std::min(updateValues(j, i) * etaPlus, maxDelta);
- prevWeightChange(j, i) = boost::math::sign(gradient(j, i)) * updateValues(j, i);
- prevDerivs(j, i) = gradient(j, i);
- }
- else if (derivs(j, i) < 0)
- {
- updateValues(j, i) = std::max(updateValues(j, i) * etaMin, minDelta);
- prevDerivs(j, i) = 0;
- }
- else
- {
- prevWeightChange(j, i) = boost::math::sign(gradient(j, i)) * updateValues(j, i);
- prevDerivs(j, i) = gradient(j, i);
- }
-
- weights(j, i) -= prevWeightChange(j, i);
- }
- }
- }
-
-
- private:
- //! The number used as learning rate.
- const double etaMin;
-
- const double etaPlus;
-
- const double minDelta;
-
- const double maxDelta;
-
- MatType updateValues;
-
- MatType prevWeightChange;
-
- //! weight momentum
- MatType prevDerivs;
-}; // class RPROPp
-
-}; // namespace ann
-}; // namespace mlpack
-
-#endif
More information about the mlpack-git
mailing list