[mlpack-git] master: Use the correct number of feature maps in the network description (thanks Ngap wei Tham for pointing it out). (1190791)

gitdub at big.cc.gt.atl.ga.us gitdub at big.cc.gt.atl.ga.us
Sat Oct 17 09:19:14 EDT 2015


Repository : https://github.com/mlpack/mlpack

On branch  : master
Link       : https://github.com/mlpack/mlpack/compare/54f77d383ddb8546c6615d0c4aca29f18758ded2...1190791f07f3e1f2080f26a5c894f90df46f5316

>---------------------------------------------------------------

commit 1190791f07f3e1f2080f26a5c894f90df46f5316
Author: Marcus Edel <marcus.edel at fu-berlin.de>
Date:   Sat Oct 17 15:19:07 2015 +0200

    Use the correct number of feature maps in the network description (thanks Ngap wei Tham for pointing it out).


>---------------------------------------------------------------

1190791f07f3e1f2080f26a5c894f90df46f5316
 src/mlpack/tests/convolutional_network_test.cpp | 12 ++++++++----
 1 file changed, 8 insertions(+), 4 deletions(-)

diff --git a/src/mlpack/tests/convolutional_network_test.cpp b/src/mlpack/tests/convolutional_network_test.cpp
index 5a2840c..bd20116 100644
--- a/src/mlpack/tests/convolutional_network_test.cpp
+++ b/src/mlpack/tests/convolutional_network_test.cpp
@@ -71,7 +71,7 @@ void BuildVanillaNetwork()
 
   /*
    * Construct a convolutional neural network with a 28x28x1 input layer,
-   * 24x24x6 convolution layer, 12x12x6 pooling layer, 8x8x12 convolution layer
+   * 24x24x8 convolution layer, 12x12x8 pooling layer, 8x8x12 convolution layer
    * and a 4x4x12 pooling layer which is fully connected with the output layer.
    * The network structure looks like:
    *
@@ -92,6 +92,9 @@ void BuildVanillaNetwork()
   BaseLayer2D<PerformanceFunction> baseLayer0;
   PoolingLayer<> poolingLayer0(2);
 
+
+
+
   ConvLayer<RMSPROP> convLayer1(8, 12, 5, 5);
   BiasLayer2D<RMSPROP, ZeroInitialization> biasLayer1(12);
   BaseLayer2D<PerformanceFunction> baseLayer1;
@@ -162,9 +165,10 @@ void BuildVanillaDropoutNetwork()
 
   /*
    * Construct a convolutional neural network with a 28x28x1 input layer,
-   * 24x24x6 convolution layer, 12x12x6 pooling layer, 8x8x12 convolution layer,
-   * 8x8x12 Dropout Layer and a 4x4x12 pooling layer which is fully connected
-   * with the output layer. The network structure looks like:
+   * 24x24x4 convolution layer, 24x24x4 dropout layer, 12x12x4 pooling layer,
+   * 8x8x8 convolution layer,8x8x8 Dropout Layer and a 4x4x12 pooling layer
+   * which is fully connected with the output layer. The network structure looks
+   * like:
    *
    * Input    Convolution  Dropout      Pooling     Convolution,     Output
    * Layer    Layer        Layer        Layer       Dropout,         Layer



More information about the mlpack-git mailing list