12 #ifndef MLPACK_CORE_OPTIMIZERS_SPALERA_SGD_SPALERA_SGD_HPP
13 #define MLPACK_CORE_OPTIMIZERS_SPALERA_SGD_SPALERA_SGD_HPP
20 namespace optimization {
87 template<
typename DecayPolicyType = NoDecay>
115 const size_t batchSize = 32,
116 const size_t maxIterations = 100000,
117 const double tolerance = 1e-5,
118 const double lambda = 0.01,
119 const double alpha = 0.001,
120 const double epsilon = 1e-6,
121 const double adaptRate = 3.10e-8,
122 const bool shuffle =
true,
123 const DecayPolicyType& decayPolicy = DecayPolicyType(),
124 const bool resetPolicy =
true);
136 template<
typename DecomposableFunctionType>
137 double Optimize(DecomposableFunctionType&
function, arma::mat& iterate);
199 size_t maxIterations;
215 DecayPolicyType decayPolicy;
226 #include "spalera_sgd_impl.hpp"
bool ResetPolicy() const
Get whether or not the update policy parameters are reset before Optimize call.
size_t MaxIterations() const
Get the maximum number of iterations (0 indicates no limit).
double StepSize() const
Get the step size.
bool & Shuffle()
Modify whether or not the individual functions are shuffled.
SPALeRA Stochastic Gradient Descent is a technique for minimizing a function which can be expressed a...
SPALeRAStepsize & UpdatePolicy()
Modify the update policy.
bool & ResetPolicy()
Modify whether or not the update policy parameters are reset before Optimize call.
The core includes that mlpack expects; standard C++ includes and Armadillo.
SPALeRAStepsize UpdatePolicy() const
Get the update policy.
size_t BatchSize() const
Get the batch size.
SPALeRASGD(const double stepSize=0.01, const size_t batchSize=32, const size_t maxIterations=100000, const double tolerance=1e-5, const double lambda=0.01, const double alpha=0.001, const double epsilon=1e-6, const double adaptRate=3.10e-8, const bool shuffle=true, const DecayPolicyType &decayPolicy=DecayPolicyType(), const bool resetPolicy=true)
Construct the SPALeRASGD optimizer with the given function and parameters.
Definition of the SPALeRA stepize technique, which implementes a change detection mechanism with an a...
double AdaptRate() const
Get the agnostic learning rate update rate.
bool Shuffle() const
Get whether or not the individual functions are shuffled.
double Optimize(DecomposableFunctionType &function, arma::mat &iterate)
Optimize the given function using SPALeRA SGD.
DecayPolicyType & DecayPolicy()
Modify the decay policy.
double & Alpha()
Modify the tolerance for termination.
double & Tolerance()
Modify the tolerance for termination.
size_t & BatchSize()
Modify the batch size.
DecayPolicyType DecayPolicy() const
Get the decay policy.
see subsection cli_alt_reg_tut Alternate DET regularization The usual regularized error f $R_ alpha(t)\f $of a node\f $t\f $is given by
size_t & MaxIterations()
Modify the maximum number of iterations (0 indicates no limit).
double & AdaptRate()
Modify the agnostic learning rate update rate.
double & StepSize()
Modify the step size.
double Alpha() const
Get the agnostic learning rate adaptation parameter.
double Tolerance() const
Get the tolerance for termination.
double Alpha() const
Get the tolerance for termination.
double AdaptRate() const
Get the agnostic learning rate update rate.