22 #ifndef MLPACK_CORE_OPTIMIZERS_ADAM_ADAM_HPP
23 #define MLPACK_CORE_OPTIMIZERS_ADAM_ADAM_HPP
36 namespace optimization {
86 template<
typename UpdateRule = AdamUpdate>
110 AdamType(
const double stepSize = 0.001,
111 const size_t batchSize = 32,
112 const double beta1 = 0.9,
113 const double beta2 = 0.999,
114 const double eps = 1e-8,
115 const size_t maxIterations = 100000,
116 const double tolerance = 1e-5,
117 const bool shuffle =
true);
129 template<
typename DecomposableFunctionType>
130 double Optimize(DecomposableFunctionType&
function, arma::mat& iterate)
132 return optimizer.
Optimize(
function, iterate);
196 #include "adam_impl.hpp"
bool Shuffle() const
Get whether or not the individual functions are shuffled.
double & Tolerance()
Modify the tolerance for termination.
bool & Shuffle()
Modify whether or not the individual functions are shuffled.
double Tolerance() const
Get the tolerance for termination.
double Beta1() const
Get the smoothing parameter.
const UpdatePolicyType & UpdatePolicy() const
Get the update policy.
double & Beta2()
Modify the second moment coefficient.
The core includes that mlpack expects; standard C++ includes and Armadillo.
size_t & BatchSize()
Modify the batch size.
size_t & MaxIterations()
Modify the maximum number of iterations (0 indicates no limit).
double Optimize(DecomposableFunctionType &function, arma::mat &iterate)
Optimize the given function using Adam.
double Epsilon() const
Get the value used to initialise the mean squared gradient parameter.
double Beta2() const
Get the second moment coefficient.
Adam is an optimizer that computes individual adaptive learning rates for different parameters from e...
AdamType(const double stepSize=0.001, const size_t batchSize=32, const double beta1=0.9, const double beta2=0.999, const double eps=1e-8, const size_t maxIterations=100000, const double tolerance=1e-5, const bool shuffle=true)
Construct the Adam optimizer with the given function and parameters.
bool Shuffle() const
Get whether or not the individual functions are shuffled.
double & StepSize()
Modify the step size.
size_t MaxIterations() const
Get the maximum number of iterations (0 indicates no limit).
double & Beta1()
Modify the smoothing parameter.
double & Epsilon()
Modify the value used to initialise the mean squared gradient parameter.
double StepSize() const
Get the step size.
double Optimize(DecomposableFunctionType &function, arma::mat &iterate)
Optimize the given function using stochastic gradient descent.
size_t BatchSize() const
Get the batch size.
double Tolerance() const
Get the tolerance for termination.
double StepSize() const
Get the step size.
size_t MaxIterations() const
Get the maximum number of iterations (0 indicates no limit).
size_t BatchSize() const
Get the batch size.