26#include "neural_network.h"
31struct LossIndexBackPropagation;
32struct LossIndexBackPropagationLM;
78 buffer <<
"OpenNN Exception: LossIndex class.\n"
79 <<
"NeuralNetwork* get_neural_network_pointer() const method.\n"
80 <<
"Neural network pointer is nullptr.\n";
82 throw logic_error(buffer.str());
100 buffer <<
"OpenNN Exception: LossIndex class.\n"
101 <<
"DataSet* get_data_set_pointer() const method.\n"
102 <<
"DataSet pointer is nullptr.\n";
104 throw logic_error(buffer.str());
133 void set_threads_number(
const int&);
147 virtual void set_normalization_coefficient() {}
153 type calculate_eta()
const;
156 Tensor<type, 1> calculate_gradient_numerical_differentiation();
158 Tensor<type, 2> calculate_jacobian_numerical_differentiation();
162 void calculate_errors(
const DataSetBatch&,
163 const NeuralNetworkForwardPropagation&,
164 LossIndexBackPropagation&)
const;
166 virtual void calculate_error(
const DataSetBatch&,
167 const NeuralNetworkForwardPropagation&,
168 LossIndexBackPropagation&)
const = 0;
170 virtual void calculate_output_delta(
const DataSetBatch&,
171 NeuralNetworkForwardPropagation&,
172 LossIndexBackPropagation&)
const = 0;
174 void calculate_layers_delta(
const DataSetBatch&,
175 NeuralNetworkForwardPropagation&,
176 LossIndexBackPropagation&)
const;
178 void calculate_error_gradient(
const DataSetBatch&,
179 const NeuralNetworkForwardPropagation&,
180 LossIndexBackPropagation&)
const;
182 void back_propagate(
const DataSetBatch&,
183 NeuralNetworkForwardPropagation&,
184 LossIndexBackPropagation&)
const;
188 void calculate_errors_lm(
const DataSetBatch&,
189 const NeuralNetworkForwardPropagation&,
190 LossIndexBackPropagationLM&)
const;
192 virtual void calculate_squared_errors_lm(
const DataSetBatch&,
193 const NeuralNetworkForwardPropagation&,
194 LossIndexBackPropagationLM&)
const;
196 virtual void calculate_error_lm(
const DataSetBatch&,
197 const NeuralNetworkForwardPropagation&,
198 LossIndexBackPropagationLM&)
const {}
200 virtual void calculate_output_delta_lm(
const DataSetBatch&,
201 NeuralNetworkForwardPropagation&,
202 LossIndexBackPropagationLM&)
const {}
204 void calculate_layers_delta_lm(
const DataSetBatch&,
205 NeuralNetworkForwardPropagation&,
206 LossIndexBackPropagationLM&)
const;
208 virtual void calculate_error_gradient_lm(
const DataSetBatch&,
209 LossIndexBackPropagationLM&)
const;
212 NeuralNetworkForwardPropagation&,
213 LossIndexBackPropagationLM&)
const;
215 virtual void calculate_error_hessian_lm(
const DataSetBatch&,
216 LossIndexBackPropagationLM&)
const {}
219 NeuralNetworkForwardPropagation&,
220 LossIndexBackPropagationLM&)
const;
249 NonBlockingThreadPool* non_blocking_thread_pool =
nullptr;
250 ThreadPoolDevice* thread_pool_device =
nullptr;
272 const Eigen::array<IndexPair<Index>, 1> AT_B = {IndexPair<Index>(0, 0)};
273 const Eigen::array<IndexPair<Index>, 1> A_B = {IndexPair<Index>(1, 0)};
275 const Eigen::array<IndexPair<Index>, 2> SSE = {IndexPair<Index>(0, 0), IndexPair<Index>(1, 1)};
277 const Eigen::array<int, 1> rows_sum = {Eigen::array<int, 1>({1})};
280 #include "../../opennn-cuda/opennn-cuda/loss_index_cuda.h"
297 if(new_batch_samples_number == 0)
return;
299 set(new_batch_samples_number, new_loss_index_pointer);
304 void set(
const Index& new_batch_samples_number, LossIndex* new_loss_index_pointer)
306 batch_samples_number = new_batch_samples_number;
308 loss_index_pointer = new_loss_index_pointer;
316 const Index outputs_number = neural_network_pointer->get_outputs_number();
320 neural_network.set(batch_samples_number, neural_network_pointer);
326 errors.resize(batch_samples_number, outputs_number);
328 parameters = neural_network_pointer->get_parameters();
330 gradient.resize(parameters_number);
332 regularization_gradient.resize(parameters_number);
333 regularization_gradient.setConstant(type(0));
338 cout <<
"Loss index back-propagation" << endl;
340 cout <<
"Errors:" << endl;
341 cout << errors << endl;
343 cout <<
"Error:" << endl;
344 cout << error << endl;
346 cout <<
"Loss:" << endl;
347 cout << loss << endl;
349 cout <<
"Gradient:" << endl;
350 cout << gradient << endl;
352 neural_network.print();
355 LossIndex* loss_index_pointer =
nullptr;
357 Index batch_samples_number = 0;
359 NeuralNetworkBackPropagation neural_network;
361 type error = type(0);
365 Tensor<type, 2> errors;
367 Tensor<type, 1> parameters;
369 Tensor<type, 1> gradient;
371 Tensor<type, 1> regularization_gradient;
390 if(new_batch_samples_number == 0)
return;
392 set(new_batch_samples_number, new_loss_index_pointer);
395 void set(
const Index& new_batch_samples_number, LossIndex* new_loss_index_pointer)
397 batch_samples_number = new_batch_samples_number;
399 loss_index_pointer = new_loss_index_pointer;
405 const Index outputs_number = neural_network_pointer->get_outputs_number();
407 neural_network.set(batch_samples_number, neural_network_pointer);
409 parameters = neural_network_pointer->get_parameters();
415 gradient.resize(parameters_number);
417 regularization_gradient.resize(parameters_number);
418 regularization_gradient.setZero();
420 squared_errors_jacobian.resize(batch_samples_number, parameters_number);
422 hessian.resize(parameters_number, parameters_number);
424 regularization_hessian.resize(parameters_number, parameters_number);
425 regularization_hessian.setZero();
427 errors.resize(batch_samples_number, outputs_number);
429 squared_errors.resize(batch_samples_number);
434 cout <<
"Loss index back-propagation LM" << endl;
436 cout <<
"Errors:" << endl;
437 cout << errors << endl;
439 cout <<
"Squared errors:" << endl;
440 cout << squared_errors << endl;
442 cout <<
"Squared errors Jacobian:" << endl;
443 cout << squared_errors_jacobian << endl;
445 cout <<
"Error:" << endl;
446 cout << error << endl;
448 cout <<
"Loss:" << endl;
449 cout << loss << endl;
451 cout <<
"Gradient:" << endl;
452 cout << gradient << endl;
454 cout <<
"Hessian:" << endl;
455 cout << hessian << endl;
458 LossIndex* loss_index_pointer =
nullptr;
460 Index batch_samples_number = 0;
462 type error = type(0);
465 Tensor<type, 1> parameters;
467 NeuralNetworkBackPropagationLM neural_network;
469 Tensor<type, 2> errors;
470 Tensor<type, 1> squared_errors;
471 Tensor<type, 2> squared_errors_jacobian;
473 Tensor<type, 1> gradient;
474 Tensor<type, 2> hessian;
476 Tensor<type, 1> regularization_gradient;
477 Tensor<type, 2> regularization_hessian;
This class represents the concept of data set for data modelling problems, such as approximation,...
This abstract class represents the concept of loss index composed of an error term and a regularizati...
type calculate_h(const type &) const
void calculate_regularization_hessian(const Tensor< type, 1 > &, Tensor< type, 2 > &) const
DataSet * data_set_pointer
Pointer to a data set object.
void set_regularization_method(const RegularizationMethod &)
NeuralNetwork * neural_network_pointer
Pointer to a neural network object.
virtual void set_data_set_pointer(DataSet *)
Sets a new data set on which the error term is to be measured.
const bool & get_display() const
bool has_selection() const
Returns true if there are selection samples and false otherwise.
void from_XML(const tinyxml2::XMLDocument &)
void set_default()
Sets the members of the error term to their default values:
void calculate_squared_errors_jacobian_lm(const DataSetBatch &, NeuralNetworkForwardPropagation &, LossIndexBackPropagationLM &) const
bool display
Display messages to screen.
bool has_data_set() const
void back_propagate_lm(const DataSetBatch &, NeuralNetworkForwardPropagation &, LossIndexBackPropagationLM &) const
type regularization_weight
Regularization weight value.
bool has_neural_network() const
virtual string get_error_type() const
Returns a string with the default type of error term, "USER_PERFORMANCE_TERM".
NeuralNetwork * get_neural_network_pointer() const
Returns a pointer to the neural network object associated to the error term.
RegularizationMethod get_regularization_method() const
Returns the regularization method.
virtual ~LossIndex()
Destructor.
type calculate_regularization(const Tensor< type, 1 > &) const
const type & get_regularization_weight() const
Returns regularization weight.
void set_neural_network_pointer(NeuralNetwork *)
string write_regularization_method() const
RegularizationMethod regularization_method
Pointer to a regularization method object.
void set_display(const bool &)
RegularizationMethod
Enumeration of available regularization methods.
virtual void write_XML(tinyxml2::XMLPrinter &) const
void calculate_regularization_gradient(const Tensor< type, 1 > &, Tensor< type, 1 > &) const
virtual string get_error_type_text() const
Returns a string with the default type of error term in text format, "USER_PERFORMANCE_TERM".
DataSet * get_data_set_pointer() const
Returns a pointer to the data set object associated to the error term.
void set_regularization_weight(const type &)
Index get_parameters_number() const
virtual ~LossIndexBackPropagation()
Destructor.
LossIndexBackPropagation()
Default constructor.
A loss index composed of several terms, this structure represent the First Order for this function.
LossIndexBackPropagationLM()
Default constructor.