9#ifndef GRADIENTDESCENT_H
10#define GRADIENTDESCENT_H
27#include "loss_index.h"
29#include "optimization_algorithm.h"
30#include "learning_rate_algorithm.h"
36struct GradientDescentData;
110 string write_optimization_algorithm_type()
const;
128 const type first_learning_rate =
static_cast<type
>(0.01);
167 set(new_gradient_descent_pointer);
176 gradient_descent_pointer = new_gradient_descent_pointer;
186 potential_parameters.resize(parameters_number);
188 parameters_increment.resize(parameters_number);
192 training_direction.resize(parameters_number);
198 cout <<
"Training direction:" << endl;
199 cout << training_direction << endl;
201 cout <<
"Learning rate:" << endl;
202 cout << learning_rate << endl;
205 GradientDescent* gradient_descent_pointer =
nullptr;
209 Tensor<type, 1> parameters_increment;
215 Tensor<type, 0> training_slope;
217 type learning_rate = type(0);
218 type old_learning_rate = type(0);
TrainingResults perform_training()
void set_maximum_selection_failures(const Index &)
void set_loss_index_pointer(LossIndex *)
const type & get_maximum_time() const
Returns the maximum training time.
string get_hardware_use() const
Returns the hardware used. Default: Multi-core.
const type & get_loss_goal() const
void from_XML(const tinyxml2::XMLDocument &)
void set_default()
Sets the members of the optimization algorithm object to their default values.
const Index & get_maximum_epochs_number() const
Returns the maximum number of iterations for training.
Tensor< string, 2 > to_string_matrix() const
Writes as matrix of strings the most representative atributes.
type minimum_loss_decrease
Minimum loss improvement between two successive iterations. It is used as a stopping criterion.
LearningRateAlgorithm * get_learning_rate_algorithm_pointer()
Returns a pointer to the learning rate algorithm object inside the gradient descent object.
const LearningRateAlgorithm & get_learning_rate_algorithm() const
Returns a constant reference to the learning rate algorithm object inside the gradient descent object...
void set_maximum_time(const type &)
LearningRateAlgorithm learning_rate_algorithm
Learning rate algorithm object for one-dimensional minimization.
void set_loss_goal(const type &)
type maximum_time
Maximum training time. It is used as a stopping criterion.
void set_maximum_epochs_number(const Index &)
void calculate_training_direction(const Tensor< type, 1 > &, Tensor< type, 1 > &) const
void set_minimum_loss_decrease(const type &)
type training_loss_goal
Goal value for the loss. It is used as a stopping criterion.
Index maximum_epochs_number
Maximum epochs number.
virtual ~GradientDescent()
Destructor.
void write_XML(tinyxml2::XMLPrinter &) const
void update_parameters(const DataSetBatch &batch, NeuralNetworkForwardPropagation &forward_propagation, LossIndexBackPropagation &back_propagation, GradientDescentData &optimization_data)
GradientDescent::update_parameters.
const Index & get_maximum_selection_failures() const
Returns the maximum number of selection error increases during the training process.
Index maximum_selection_failures
const type & get_minimum_loss_decrease() const
Returns the minimum loss improvement during training.
A learning rate that is adjusted according to an algorithm during training to minimize training time.
This abstract class represents the concept of loss index composed of an error term and a regularizati...
NeuralNetwork * get_neural_network_pointer() const
Returns a pointer to the neural network object associated to the error term.
Index get_parameters_number() const
LossIndex * get_loss_index_pointer() const
virtual ~GradientDescentData()
Destructor.
GradientDescentData()
Default constructor.
This structure contains the optimization algorithm results.