[mlpack-git] master: Fix some non-compiling Doxygen Latex documentation. (71ea42f)

gitdub at big.cc.gt.atl.ga.us gitdub at big.cc.gt.atl.ga.us
Mon Aug 17 10:24:47 EDT 2015


Repository : https://github.com/mlpack/mlpack

On branch  : master
Link       : https://github.com/mlpack/mlpack/compare/3e452e0697b862783efae7adf055b7aa48dc09fc...71ea42ffea6f3816c642a8052e7443f526bc5f14

>---------------------------------------------------------------

commit 71ea42ffea6f3816c642a8052e7443f526bc5f14
Author: Ryan Curtin <ryan at ratml.org>
Date:   Mon Aug 17 10:24:32 2015 -0400

    Fix some non-compiling Doxygen Latex documentation.


>---------------------------------------------------------------

71ea42ffea6f3816c642a8052e7443f526bc5f14
 .../ann/activation_functions/identity_function.hpp        |  4 ++--
 .../ann/activation_functions/logistic_function.hpp        |  4 ++--
 .../ann/activation_functions/rectifier_function.hpp       |  6 +++---
 .../ann/activation_functions/softsign_function.hpp        | 10 +++++-----
 .../methods/ann/activation_functions/tanh_function.hpp    | 10 +++++-----
 .../ann/init_rules/kathirvalavakumar_subavathi_init.hpp   | 15 ++++++++-------
 src/mlpack/methods/ann/init_rules/nguyen_widrow_init.hpp  |  6 +++---
 src/mlpack/methods/ann/init_rules/oivs_init.hpp           | 12 ++++++------
 src/mlpack/methods/ann/optimizer/ada_delta.hpp            | 10 +++++-----
 src/mlpack/methods/ann/optimizer/rmsprop.hpp              |  6 +++---
 10 files changed, 42 insertions(+), 41 deletions(-)

diff --git a/src/mlpack/methods/ann/activation_functions/identity_function.hpp b/src/mlpack/methods/ann/activation_functions/identity_function.hpp
index 62f218a..7160ee0 100644
--- a/src/mlpack/methods/ann/activation_functions/identity_function.hpp
+++ b/src/mlpack/methods/ann/activation_functions/identity_function.hpp
@@ -15,10 +15,10 @@ namespace ann /** Artificial Neural Network. */ {
 /**
  * The identity function, defined by
  *
- * @f[
+ * @f{eqnarray*}{
  * f(x) &=& x \\
  * f'(x) &=& 1
- * @f]
+ * @f}
  */
 class IdentityFunction
 {
diff --git a/src/mlpack/methods/ann/activation_functions/logistic_function.hpp b/src/mlpack/methods/ann/activation_functions/logistic_function.hpp
index aa37375..e8b5d60 100644
--- a/src/mlpack/methods/ann/activation_functions/logistic_function.hpp
+++ b/src/mlpack/methods/ann/activation_functions/logistic_function.hpp
@@ -15,11 +15,11 @@ namespace ann /** Artificial Neural Network. */ {
 /**
  * The logistic function, defined by
  *
- * @f[
+ * @f{eqnarray*}{
  * f(x) &=& \frac{1}{1 + e^{-x}} \\
  * f'(x) &=& f(x) * (1 - f(x)) \\
  * f^{-1}(y) &=& ln(\frac{y}{1-y})
- * @f]
+ * @f}
  */
 class LogisticFunction
 {
diff --git a/src/mlpack/methods/ann/activation_functions/rectifier_function.hpp b/src/mlpack/methods/ann/activation_functions/rectifier_function.hpp
index ba14a14..593b4ec 100644
--- a/src/mlpack/methods/ann/activation_functions/rectifier_function.hpp
+++ b/src/mlpack/methods/ann/activation_functions/rectifier_function.hpp
@@ -27,15 +27,15 @@ namespace ann /** Artificial Neural Network. */ {
 /**
  * The rectifier function, defined by
  *
- * @f[
+ * @f{eqnarray*}{
  * f(x) &=& \max(0, x) \\
  * f'(x) &=& \left\{
  *   \begin{array}{lr}
  *     1 & : x > 0 \\
  *     0 & : x \le 0
  *   \end{array}
- * \right
- * @f]
+ * \right.
+ * @f}
  */
 class RectifierFunction
 {
diff --git a/src/mlpack/methods/ann/activation_functions/softsign_function.hpp b/src/mlpack/methods/ann/activation_functions/softsign_function.hpp
index 48699eb..75f25b5 100644
--- a/src/mlpack/methods/ann/activation_functions/softsign_function.hpp
+++ b/src/mlpack/methods/ann/activation_functions/softsign_function.hpp
@@ -28,16 +28,16 @@ namespace ann /** Artificial Neural Network. */ {
 /**
  * The softsign function, defined by
  *
- * @f[
- * f(x) &=& \frac{x}{1 + \abs{x}} \\
- * f'(x) &=& (1 - \abs{x})^2
+ * @f{eqnarray*}{
+ * f(x) &=& \frac{x}{1 + |x|} \\
+ * f'(x) &=& (1 - |x|)^2 \\
  * f(x) &=& \left\{
  *   \begin{array}{lr}
  *     -\frac{y}{y-1} & : x > 0 \\
  *     \frac{x}{1 + x} & : x \le 0
  *   \end{array}
- * \right
- * @f]
+ * \right.
+ * @f}
  */
 class SoftsignFunction
 {
diff --git a/src/mlpack/methods/ann/activation_functions/tanh_function.hpp b/src/mlpack/methods/ann/activation_functions/tanh_function.hpp
index 92ff455..55a3c12 100644
--- a/src/mlpack/methods/ann/activation_functions/tanh_function.hpp
+++ b/src/mlpack/methods/ann/activation_functions/tanh_function.hpp
@@ -15,11 +15,11 @@ namespace ann /** Artificial Neural Network. */ {
 /**
  * The tanh function, defined by
  *
- * @f[
- * f(x) &=& \frac{e^x - e^{-x}{e^x + e^{-x}}} \\
- * f'(x) &=& 1 - tanh^2(x) \\
- * f^{-1}(x) &=& atan(x)
- * @f]
+ * @f{eqnarray*}{
+ * f(x) &=& \frac{e^x - e^{-x}}{e^x + e^{-x}} \\
+ * f'(x) &=& 1 - \tanh^2(x) \\
+ * f^{-1}(x) &=& \arctan(x)
+ * @f}
  */
 class TanhFunction
 {
diff --git a/src/mlpack/methods/ann/init_rules/kathirvalavakumar_subavathi_init.hpp b/src/mlpack/methods/ann/init_rules/kathirvalavakumar_subavathi_init.hpp
index c86f6af..c806fae 100644
--- a/src/mlpack/methods/ann/init_rules/kathirvalavakumar_subavathi_init.hpp
+++ b/src/mlpack/methods/ann/init_rules/kathirvalavakumar_subavathi_init.hpp
@@ -35,14 +35,15 @@ namespace ann /** Artificial Neural Network. */ {
  * by T. Kathirvalavakumar and S. Subavathi. The method is based on sensitivity
  * analysis using using cauchy’s inequality. The method is defined by
  *
- * @f[
- * \={s} &=& f^{-1}(\={t})
- * \Theta^{1}_{p} \le \={s} \sqrt{\frac{3}{I\sum_{i=1}^{I}(x_{ip}^2)}}
- * \Theta^1 = min(\Theta_{p}^{1}); p=1,2,..,P
- * -\Theta^{1} \le w_{i}^{1} \le \Theta^{1} \\
- * @f]
+ * @f{eqnarray*}{
+ * \overline{s} &=& f^{-1}(\overline{t}) \\
+ * \Theta^{1}_{p} &\le& \overline{s}
+ *     \sqrt{\frac{3}{I \sum_{i = 1}^{I} (x_{ip}^2)}} \\
+ * \Theta^1 &=& min(\Theta_{p}^{1}); p=1,2,..,P \\
+ * -\Theta^{1} \le w_{i}^{1} &\le& \Theta^{1}
+ * @f}
  *
- * Where I is the number of inputs including the bias, p refers the pattern
+ * where I is the number of inputs including the bias, p refers the pattern
  * considered in training, f is the transfer function and \={s} is the active
  * region in which the derivative of the activation function is greater than 4%
  * of the maximum derivatives.
diff --git a/src/mlpack/methods/ann/init_rules/nguyen_widrow_init.hpp b/src/mlpack/methods/ann/init_rules/nguyen_widrow_init.hpp
index eade5ed..39e9ebb 100644
--- a/src/mlpack/methods/ann/init_rules/nguyen_widrow_init.hpp
+++ b/src/mlpack/methods/ann/init_rules/nguyen_widrow_init.hpp
@@ -32,12 +32,12 @@ namespace ann /** Artificial Neural Network. */ {
  * This class is used to initialize the weight matrix with the Nguyen-Widrow
  * method. The method is defined by
  *
- * @f[
- * \gamma \le w_i \le \gamma \\
+ * @f{eqnarray*}{
+ * \gamma &\le& w_i \le \gamma \\
  * \beta &=& 0.7H^{\frac{1}{I}} \\
  * n &=& \sqrt{\sum_{i=0}{I}w_{i}^{2}} \\
  * w_i &=& \frac{\beta w_i}{n}
- * @f]
+ * @f}
  *
  * Where H is the number of neurons in the outgoing layer, I represents the
  * number of neurons in the ingoing layer and gamma defines the random interval
diff --git a/src/mlpack/methods/ann/init_rules/oivs_init.hpp b/src/mlpack/methods/ann/init_rules/oivs_init.hpp
index 4805c7d..f70e5ff 100644
--- a/src/mlpack/methods/ann/init_rules/oivs_init.hpp
+++ b/src/mlpack/methods/ann/init_rules/oivs_init.hpp
@@ -35,12 +35,12 @@ namespace ann /** Artificial Neural Network. */ {
  * method is based on the equations representing the characteristics of the
  * information transformation mechanism of a node. The method is defined by
  *
- * @f[
- * b = |f^{-1}(1 - \epsilon) - f^{-1}(\epsilon)| \\
- * \^w = \frac{b}{k \cdot n} \\
- * \gamma \le a_i \le \gamma \\
- * w_i = \^w \cdot \sqrt{a_i + 1}
- * @f]
+ * @f{eqnarray*}{
+ * b &=& |F^{-1}(1 - \epsilon) - f^{-1}(\epsilon)| \\
+ * \hat{w} &=& \frac{b}{k \cdot n} \\
+ * \gamma &\le& a_i \le \gamma \\
+ * w_i &=& \hat{w} \cdot \sqrt{a_i + 1}
+ * @f}
  *
  * Where f is the transfer function epsilon, k custom parameters, n the number of
  * neurons in the outgoing layer and gamma a parameter that defines the random
diff --git a/src/mlpack/methods/ann/optimizer/ada_delta.hpp b/src/mlpack/methods/ann/optimizer/ada_delta.hpp
index 791253d..9e8b8e5 100644
--- a/src/mlpack/methods/ann/optimizer/ada_delta.hpp
+++ b/src/mlpack/methods/ann/optimizer/ada_delta.hpp
@@ -19,13 +19,13 @@ namespace ann /** Artificial Neural Network. */ {
  * \f$ \gamma \f$ and a decay term \f$ \alpha \f$ we perform the following
  * updates:
  *
- * \f[
- *  g_t &=& (1 - \gamma)f'(\Delta_t)^2 + \gammag_{t - 1} \\
- *  \vec{\Delta} \Delta_t = \alpha \frac{\sqrt(s_{t-1} +
+ * \f{eqnarray*}{
+ *  g_t &=& (1 - \gamma)f'(\Delta_t)^2 + \gamma g_{t - 1} \\
+ *  \vec{\Delta} \Delta_t &=& \alpha \frac{\sqrt(s_{t-1} +
  *  \epsilon)}{\sqrt{g_t + \epsilon}} f'(\Delta_t) \\
  *  \Delta_{t + 1} &=& \Delta_t - \vec{\Delta} \Delta_t \\
- *  s_t &=& (1 - \gamma) \vec{\Delta} \Delta_t^2 + \gammas_{t - 1}
- * \f]
+ *  s_t &=& (1 - \gamma) \vec{\Delta} \Delta_t^2 + \gamma s_{t - 1}
+ * \f}
  *
  * For more information, see the following.
  *
diff --git a/src/mlpack/methods/ann/optimizer/rmsprop.hpp b/src/mlpack/methods/ann/optimizer/rmsprop.hpp
index d1b2a8c..a29be5a 100644
--- a/src/mlpack/methods/ann/optimizer/rmsprop.hpp
+++ b/src/mlpack/methods/ann/optimizer/rmsprop.hpp
@@ -18,11 +18,11 @@ namespace ann /** Artificial Neural Network. */ {
  * normalize the gradients. In its basic form, given a step rate \f$ \gamma \f$
  * and a decay term \f$ \alpha \f$ we perform the following updates:
  *
- * \f[
- * r_t &=& (1 - \gamma) f'(\Delta_t)^2 + \gammar_{t - 1} \\
+ * \f{eqnarray*}{
+ * r_t &=& (1 - \gamma) f'(\Delta_t)^2 + \gamma r_{t - 1} \\
  * v_{t + 1} &=& \frac{\alpha}{\sqrt{r_t}}f'(\Delta_t) \\
  * \Delta_{t + 1} &=& \Delta_t - v_{t + 1}
- * \f]
+ * \f}
  *
  * For more information, see the following.
  *



More information about the mlpack-git mailing list