adaptive_moment_estimation.cpp
1 // OpenNN: Open Neural Networks Library
2// www.opennn.net
3//
4// A D A P T I V E M O M E N T E S T I M A T I O N
5//
6// Artificial Intelligence Techniques SL
7// artelnics@artelnics.com
8
9#include "adaptive_moment_estimation.h"
10
11namespace OpenNN
12{
13
17
20{
22}
23
24
29
31 : OptimizationAlgorithm(new_loss_index_pointer)
32{
34}
35
36
38
40{
41}
42
43
45
47{
49}
50
51
53
55{
56 return beta_1;
57}
58
59
61
63{
64 return beta_2;
65}
66
67
69
71{
72 return epsilon;
73}
74
75
78
80{
81 return training_loss_goal;
82}
83
84
86
88{
89 return maximum_time;
90}
91
92
96
98{
99 loss_index_pointer = new_loss_index_pointer;
100}
101
102
105
107{
108 initial_learning_rate= new_learning_rate;
109}
110
111
114
115void AdaptiveMomentEstimation::set_beta_1(const type& new_beta_1)
116{
117 beta_1= new_beta_1;
118}
119
120
123
124void AdaptiveMomentEstimation::set_beta_2(const type& new_beta_2)
125{
126 beta_2= new_beta_2;
127}
128
129
132
133void AdaptiveMomentEstimation::set_epsilon(const type& new_epsilon)
134{
135 epsilon= new_epsilon;
136}
137
138
141
142void AdaptiveMomentEstimation::set_maximum_epochs_number(const Index& new_maximum_epochs_number)
143{
144#ifdef OPENNN_DEBUG
145
146 if(new_maximum_epochs_number < static_cast<type>(0.0))
147 {
148 ostringstream buffer;
149
150 buffer << "OpenNN Exception: AdaptiveMomentEstimation class.\n"
151 << "void set_maximum_epochs_number(const type&) method.\n"
152 << "Maximum epochs number must be equal or greater than 0.\n";
153
154 throw logic_error(buffer.str());
155 }
156
157#endif
158
159 // Set maximum_epochs number
160
161 maximum_epochs_number = new_maximum_epochs_number;
162}
163
164
168
169void AdaptiveMomentEstimation::set_loss_goal(const type& new_loss_goal)
170{
171 training_loss_goal = new_loss_goal;
172}
173
174
177
178void AdaptiveMomentEstimation::set_maximum_time(const type& new_maximum_time)
179{
180#ifdef OPENNN_DEBUG
181
182 if(new_maximum_time < static_cast<type>(0.0))
183 {
184 ostringstream buffer;
185
186 buffer << "OpenNN Exception: AdaptiveMomentEstimation class.\n"
187 << "void set_maximum_time(const type&) method.\n"
188 << "Maximum time must be equal or greater than 0.\n";
189
190 throw logic_error(buffer.str());
191 }
192
193#endif
194
195 // Set maximum time
196
197 maximum_time = new_maximum_time;
198}
199
200
205
207{
209
210 check();
211
212 // Start training
213
214 if(display) cout << "Training with adaptive moment estimation \"Adam\" ...\n";
215
216 // Data set
217
218 DataSet* data_set_pointer = loss_index_pointer->get_data_set_pointer();
219
220 const bool has_selection = data_set_pointer->has_selection();
221
222 const Tensor<Index, 1> input_variables_indices = data_set_pointer->get_input_variables_indices();
223 const Tensor<Index, 1> target_variables_indices = data_set_pointer->get_target_variables_indices();
224
225 const Tensor<Index, 1> training_samples_indices = data_set_pointer->get_training_samples_indices();
226 const Tensor<Index, 1> selection_samples_indices = data_set_pointer->get_selection_samples_indices();
227
228 const Tensor<string, 1> inputs_names = data_set_pointer->get_input_variables_names();
229 const Tensor<string, 1> targets_names = data_set_pointer->get_target_variables_names();
230
231 const Tensor<Scaler, 1> input_variables_scalers = data_set_pointer->get_input_variables_scalers();
232 const Tensor<Scaler, 1> target_variables_scalers = data_set_pointer->get_target_variables_scalers();
233
234 const Tensor<Descriptives, 1> input_variables_descriptives = data_set_pointer->scale_input_variables();
235 Tensor<Descriptives, 1> target_variables_descriptives;
236
237 Index batch_size_training = 0;
238 Index batch_size_selection = 0;
239
240 const Index training_samples_number = data_set_pointer->get_training_samples_number();
241 const Index selection_samples_number = data_set_pointer->get_selection_samples_number();
242
243 training_samples_number < batch_samples_number
244 ? batch_size_training = training_samples_number
245 : batch_size_training = batch_samples_number;
246
247 selection_samples_number < batch_samples_number && selection_samples_number != 0
248 ? batch_size_selection = selection_samples_number
249 : batch_size_selection = batch_samples_number;
250
251 DataSetBatch batch_training(batch_size_training, data_set_pointer);
252 DataSetBatch batch_selection(batch_size_selection, data_set_pointer);
253
254 const Index training_batches_number = training_samples_number/batch_size_training;
255 const Index selection_batches_number = selection_samples_number/batch_size_selection;
256
257 Tensor<Index, 2> training_batches(training_batches_number, batch_size_training);
258 Tensor<Index, 2> selection_batches(selection_batches_number, batch_size_selection);
259
260 // Neural network
261
263
264 neural_network_pointer->set_inputs_names(inputs_names);
265 neural_network_pointer->set_outputs_names(targets_names);
266
267 if(neural_network_pointer->has_scaling_layer())
268 {
269 ScalingLayer* scaling_layer_pointer = neural_network_pointer->get_scaling_layer_pointer();
270 scaling_layer_pointer->set(input_variables_descriptives, input_variables_scalers);
271 }
272
273 if(neural_network_pointer->has_unscaling_layer())
274 {
275 target_variables_descriptives = data_set_pointer->scale_target_variables();
276
277 UnscalingLayer* unscaling_layer_pointer = neural_network_pointer->get_unscaling_layer_pointer();
278 unscaling_layer_pointer->set(target_variables_descriptives, target_variables_scalers);
279 }
280
281 NeuralNetworkForwardPropagation training_forward_propagation(batch_size_training, neural_network_pointer);
282 NeuralNetworkForwardPropagation selection_forward_propagation(batch_size_selection, neural_network_pointer);
283
284 // Loss index
285
286 loss_index_pointer->set_normalization_coefficient();
287
288 LossIndexBackPropagation training_back_propagation(batch_size_training, loss_index_pointer);
289 LossIndexBackPropagation selection_back_propagation(batch_size_selection, loss_index_pointer);
290
291 type training_error = type(0);
292 type training_loss = type(0);
293
294 type selection_error = type(0);
295
296 Index selection_failures = 0;
297
298 // Optimization algorithm
299
300 AdaptiveMomentEstimationData optimization_data(this);
301
302 bool stop_training = false;
303
304 time_t beginning_time, current_time;
305 time(&beginning_time);
306 type elapsed_time = type(0);
307
308 bool shuffle = false;
309
310 if(neural_network_pointer->has_long_short_term_memory_layer()
311 || neural_network_pointer->has_recurrent_layer())
312 shuffle = false;
313
314 // Main loop
315
316 for(Index epoch = 0; epoch <= maximum_epochs_number; epoch++)
317 {
318 if(display && epoch%display_period == 0) cout << "Epoch: " << epoch << endl;
319
320 training_batches = data_set_pointer->get_batches(training_samples_indices, batch_size_training, shuffle);
321
322 const Index batches_number = training_batches.dimension(0);
323
324 training_loss = type(0);
325 training_error = type(0);
326
327 optimization_data.iteration = 1;
328
329 for(Index iteration = 0; iteration < batches_number; iteration++)
330 {
331 // Data set
332
333 batch_training.fill(training_batches.chip(iteration, 0), input_variables_indices, target_variables_indices);
334
335 // Neural network
336
337 neural_network_pointer->forward_propagate(batch_training, training_forward_propagation);
338
339 // Loss index
340
341 loss_index_pointer->back_propagate(batch_training, training_forward_propagation, training_back_propagation);
342
343 training_error += training_back_propagation.error;
344 training_loss += training_back_propagation.loss;
345
346 update_parameters(training_back_propagation, optimization_data);
347 }
348
349 // Loss
350
351 training_loss /= static_cast<type>(batches_number);
352 training_error /= static_cast<type>(batches_number);
353
354 results.training_error_history(epoch) = training_error;
355
356 if(has_selection)
357 {
358 selection_batches = data_set_pointer->get_batches(selection_samples_indices, batch_size_selection, shuffle);
359
360 selection_error = type(0);
361
362 for(Index iteration = 0; iteration < selection_batches_number; iteration++)
363 {
364 // Data set
365
366 batch_selection.fill(selection_batches.chip(iteration,0), input_variables_indices, target_variables_indices);
367
368 // Neural network
369
370 neural_network_pointer->forward_propagate(batch_selection, selection_forward_propagation);
371
372 // Loss
373
374 loss_index_pointer->calculate_errors(batch_selection, selection_forward_propagation, selection_back_propagation);
375
376 loss_index_pointer->calculate_error(batch_selection, selection_forward_propagation, selection_back_propagation);
377
378 selection_error += selection_back_propagation.error;
379 }
380
381 selection_error /= static_cast<type>(selection_batches_number);
382
383 results.selection_error_history(epoch) = selection_error;
384
385 if(epoch != 0 && results.selection_error_history(epoch) > results.selection_error_history(epoch-1)) selection_failures++;
386 }
387
388 // Elapsed time
389
390 time(&current_time);
391 elapsed_time = static_cast<type>(difftime(current_time, beginning_time));
392
393 if(display && epoch%display_period == 0)
394 {
395 cout << "Training error: " << training_error << endl;
396 if(has_selection) cout << "Selection error: " << selection_error << endl;
397 cout << "Elapsed time: " << write_time(elapsed_time) << endl;
398 }
399
400 // Training history
401
402 if(epoch == maximum_epochs_number)
403 {
404 if(display) cout << "Epoch " << epoch << endl << "Maximum number of epochs reached: " << epoch << endl;
405
406 stop_training = true;
407
408 results.stopping_condition = StoppingCondition::MaximumEpochsNumber;
409 }
410
411 if(elapsed_time >= maximum_time)
412 {
413 if(display) cout << "Epoch " << epoch << endl << "Maximum training time reached: " << write_time(elapsed_time) << endl;
414
415 stop_training = true;
416
417 results.stopping_condition = StoppingCondition::MaximumTime;
418 }
419
420 if(training_loss <= training_loss_goal)
421 {
422 if(display) cout << "Epoch " << epoch << endl << "Loss goal reached: " << training_loss << endl;
423
424 stop_training = true;
425
426 results.stopping_condition = StoppingCondition::LossGoal;
427 }
428
429 if(selection_failures >= maximum_selection_failures)
430 {
431 if(display) cout << "Epoch " << epoch << endl << "Maximum selection failures reached: " << selection_failures << endl;
432
433 stop_training = true;
434
435 results.stopping_condition = StoppingCondition::MaximumSelectionErrorIncreases;
436 }
437
438 if(stop_training)
439 {
440 results.resize_training_error_history(epoch+1);
441
442 if(has_selection) results.resize_selection_error_history(epoch+1);
443 else results.resize_selection_error_history(0);
444
445 results.elapsed_time = write_time(elapsed_time);
446
447 break;
448 }
449
450 if(epoch != 0 && epoch % save_period == 0) neural_network_pointer->save(neural_network_file_name);
451 }
452
453 data_set_pointer->unscale_input_variables(input_variables_descriptives);
454
455 if(neural_network_pointer->has_unscaling_layer())
456 data_set_pointer->unscale_target_variables(target_variables_descriptives);
457
458 if(display) results.print();
459
460 return results;
461}
462
463
465{
466 return "ADAPTIVE_MOMENT_ESTIMATION";
467}
468
469
471
473{
474 Tensor<string, 2> labels_values(9, 2);
475
476 // Initial learning rate
477
478 labels_values(0,0) = "Initial learning rate";
479 labels_values(0,1) = to_string(double(initial_learning_rate));
480
481 // Initial decay
482
483 labels_values(1,0) = "Initial decay";
484 labels_values(1,1) = to_string(double(initial_decay));
485
486 // Beta 1
487
488 labels_values(2,0) = "Beta 1";
489 labels_values(2,1) = to_string(double(beta_1));
490
491 // Beta 2
492
493 labels_values(3,0) = "Beta 2";
494 labels_values(3,1) = to_string(double(beta_2));
495
496 // Epsilon
497
498 labels_values(4,0) = "Epsilon";
499 labels_values(4,1) = to_string(double(epsilon));
500
501 // Training loss goal
502
503 labels_values(5,0) = "Training loss goal";
504 labels_values(5,1) = to_string(double(training_loss_goal));
505
506 // Maximum epochs number
507
508 labels_values(6,0) = "Maximum epochs number";
509 labels_values(6,1) = to_string(maximum_epochs_number);
510
511 // Maximum time
512
513 labels_values(7,0) = "Maximum time";
514 labels_values(7,1) = write_time(maximum_time);
515
516 // Batch samples number
517
518 labels_values(8,0) = "Batch samples number";
519 labels_values(8,1) = to_string(batch_samples_number);
520
521 return labels_values;
522}
523
524
527
529{
530 ostringstream buffer;
531
532 file_stream.OpenElement("AdaptiveMomentEstimation");
533
534 // DataSetBatch size
535
536 file_stream.OpenElement("BatchSize");
537
538 buffer.str("");
539 buffer << batch_samples_number;
540
541 file_stream.PushText(buffer.str().c_str());
542
543 file_stream.CloseElement();
544
545 // Loss goal
546
547 file_stream.OpenElement("LossGoal");
548
549 buffer.str("");
550 buffer << training_loss_goal;
551
552 file_stream.PushText(buffer.str().c_str());
553
554 file_stream.CloseElement();
555
556 // Maximum iterations number
557
558 file_stream.OpenElement("MaximumEpochsNumber");
559
560 buffer.str("");
561 buffer << maximum_epochs_number;
562
563 file_stream.PushText(buffer.str().c_str());
564
565 file_stream.CloseElement();
566
567 // Maximum time
568
569 file_stream.OpenElement("MaximumTime");
570
571 buffer.str("");
572 buffer << maximum_time;
573
574 file_stream.PushText(buffer.str().c_str());
575
576 file_stream.CloseElement();
577
578 // Hardware use
579
580 file_stream.OpenElement("HardwareUse");
581
582 buffer.str("");
583 buffer << this->get_hardware_use();
584
585 file_stream.PushText(buffer.str().c_str());
586
587 file_stream.CloseElement();
588
589 // End element
590
591 file_stream.CloseElement();
592}
593
594
596{
597 const tinyxml2::XMLElement* root_element = document.FirstChildElement("AdaptiveMomentEstimation");
598
599 if(!root_element)
600 {
601 ostringstream buffer;
602
603 buffer << "OpenNN Exception: AdaptiveMomentEstimation class.\n"
604 << "void from_XML(const tinyxml2::XMLDocument&) method.\n"
605 << "Adaptive moment estimation element is nullptr.\n";
606
607 throw logic_error(buffer.str());
608 }
609
610 // DataSetBatch size
611
612 const tinyxml2::XMLElement* batch_size_element = root_element->FirstChildElement("BatchSize");
613
614 if(batch_size_element)
615 {
616 const Index new_batch_size = static_cast<Index>(atoi(batch_size_element->GetText()));
617
618 try
619 {
620 set_batch_samples_number(new_batch_size);
621 }
622 catch(const logic_error& e)
623 {
624 cerr << e.what() << endl;
625 }
626 }
627
628 // Loss goal
629 {
630 const tinyxml2::XMLElement* element = root_element->FirstChildElement("LossGoal");
631
632 if(element)
633 {
634 const type new_loss_goal = static_cast<type>(atof(element->GetText()));
635
636 try
637 {
638 set_loss_goal(new_loss_goal);
639 }
640 catch(const logic_error& e)
641 {
642 cerr << e.what() << endl;
643 }
644 }
645 }
646
647 // Maximum eochs number
648 {
649 const tinyxml2::XMLElement* element = root_element->FirstChildElement("MaximumEpochsNumber");
650
651 if(element)
652 {
653 const Index new_maximum_epochs_number = static_cast<Index>(atoi(element->GetText()));
654
655 try
656 {
657 set_maximum_epochs_number(new_maximum_epochs_number);
658 }
659 catch(const logic_error& e)
660 {
661 cerr << e.what() << endl;
662 }
663 }
664 }
665
666 // Maximum time
667 {
668 const tinyxml2::XMLElement* element = root_element->FirstChildElement("MaximumTime");
669
670 if(element)
671 {
672 const type new_maximum_time = static_cast<type>(atof(element->GetText()));
673
674 try
675 {
676 set_maximum_time(new_maximum_time);
677 }
678 catch(const logic_error& e)
679 {
680 cerr << e.what() << endl;
681 }
682 }
683 }
684
685 // Hardware use
686 {
687 const tinyxml2::XMLElement* element = root_element->FirstChildElement("HardwareUse");
688
689 if(element)
690 {
691 const string new_hardware_use = element->GetText();
692
693 try
694 {
695 set_hardware_use(new_hardware_use);
696 }
697 catch(const logic_error& e)
698 {
699 cerr << e.what() << endl;
700 }
701 }
702 }
703}
704
705
707
708void AdaptiveMomentEstimation::set_batch_samples_number(const Index& new_batch_samples_number)
709{
710 batch_samples_number = new_batch_samples_number;
711}
712
713
715{
716 display_period = 100;
717}
718
719
720Index AdaptiveMomentEstimation::get_batch_samples_number() const
721{
723}
724
725
727
729 AdaptiveMomentEstimationData& optimization_data)
730{
731 const type learning_rate =
733 sqrt(type(1) - pow(beta_2, static_cast<type>(optimization_data.iteration)))/
734 (type(1) - pow(beta_1, static_cast<type>(optimization_data.iteration))));
735
736 optimization_data.gradient_exponential_decay.device(*thread_pool_device)
737 = optimization_data.gradient_exponential_decay*beta_1
738 + back_propagation.gradient*(type(1) - beta_1);
739
740 optimization_data.square_gradient_exponential_decay.device(*thread_pool_device)
741 = optimization_data.square_gradient_exponential_decay*beta_2
742 + back_propagation.gradient*back_propagation.gradient*(type(1) - beta_2);
743
744 back_propagation.parameters.device(*thread_pool_device) -=
745 optimization_data.gradient_exponential_decay*learning_rate/(optimization_data.square_gradient_exponential_decay.sqrt() + epsilon);
746
747 optimization_data.iteration++;
748
749 // Update parameters
750
751 back_propagation.loss_index_pointer->get_neural_network_pointer()->set_parameters(back_propagation.parameters);
752}
753
754
756{
757}
758
759
761{
762 set(new_stochastic_gradient_descent_pointer);
763}
764
765
766AdaptiveMomentEstimationData::~AdaptiveMomentEstimationData()
767{
768}
769
770
771void AdaptiveMomentEstimationData::set(AdaptiveMomentEstimation* new_adaptive_moment_estimation_pointer)
772{
773 adaptive_moment_estimation_pointer = new_adaptive_moment_estimation_pointer;
774
775 LossIndex* loss_index_pointer = new_adaptive_moment_estimation_pointer->get_loss_index_pointer();
776
777 NeuralNetwork* neural_network_pointer = loss_index_pointer->get_neural_network_pointer();
778
779 const Index parameters_number = neural_network_pointer->get_parameters_number();
780
781 gradient_exponential_decay.resize(parameters_number);
782 gradient_exponential_decay.setZero();
783
784 square_gradient_exponential_decay.resize(parameters_number);
785 square_gradient_exponential_decay.setZero();
786}
787
788
789void AdaptiveMomentEstimationData::print() const
790{
791 cout << "Gradient exponential decay:" << endl
792 <<gradient_exponential_decay << endl;
793
794 cout << "Square gradient exponential decay:" << endl
795 << square_gradient_exponential_decay << endl;
796}
797
798}
799
800// OpenNN: Open Neural Networks Library.
801// Copyright(C) 2005-2021 Artificial Intelligence Techniques, SL.
802//
803// This library is free software; you can redistribute it and/or
804// modify it under the terms of the GNU Lesser General Public
805// License as published by the Free Software Foundation; either
806// version 2.1 of the License, or any later version.
807//
808// This library is distributed in the hope that it will be useful,
809// but WITHOUT ANY WARRANTY; without even the implied warranty of
810// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
811// Lesser General Public License for more details.
812
813// You should have received a copy of the GNU Lesser General Public
814// License along with this library; if not, write to the Free Software
815// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
const type & get_epsilon() const
Returns epsilon.
const type & get_maximum_time() const
Returns the maximum training time.
const type & get_beta_2() const
Returns beta 2.
void from_XML(const tinyxml2::XMLDocument &)
void set_default()
Sets the members of the optimization algorithm object to their default values.
type initial_learning_rate
Initial learning rate.
const type & get_beta_1() const
Returns beta 1.
Tensor< string, 2 > to_string_matrix() const
Writes as matrix of strings the most representative atributes.
type beta_1
Exponential decay over gradient estimates.
string write_optimization_algorithm_type() const
Return the algorithm optimum for your model.
type epsilon
Small number to prevent any division by zero.
void set_batch_samples_number(const Index &new_batch_samples_number)
Set number of samples in each batch. Default 1000.
type maximum_time
Maximum training time. It is used as a stopping criterion.
void update_parameters(LossIndexBackPropagation &, AdaptiveMomentEstimationData &)
Update iteration parameters.
type initial_decay
Learning rate decay over each update.
type training_loss_goal
Goal value for the loss. It is used as a stopping criterion.
Index maximum_epochs_number
Maximum epochs number.
void write_XML(tinyxml2::XMLPrinter &) const
Index batch_samples_number
Training and selection batch size.
Index maximum_selection_failures
Maximum number of times when selection error increases.
const type & get_initial_learning_rate() const
Returns the initial learning rate.
type beta_2
Exponential decay over square gradient estimates.
This class represents the concept of data set for data modelling problems, such as approximation,...
Definition: data_set.h:57
Index get_training_samples_number() const
Returns the number of samples in the data set which will be used for training.
Definition: data_set.cpp:1382
Tensor< Descriptives, 1 > scale_target_variables()
Definition: data_set.cpp:6298
Tensor< Index, 1 > get_training_samples_indices() const
Returns the indices of the samples which will be used for training.
Definition: data_set.cpp:1073
Tensor< Index, 1 > get_selection_samples_indices() const
Returns the indices of the samples which will be used for selection.
Definition: data_set.cpp:1098
void unscale_input_variables(const Tensor< Descriptives, 1 > &)
Definition: data_set.cpp:6351
Tensor< Index, 1 > get_target_variables_indices() const
Returns the indices of the target variables.
Definition: data_set.cpp:3094
Index get_selection_samples_number() const
Returns the number of samples in the data set which will be used for selection.
Definition: data_set.cpp:1402
void unscale_target_variables(const Tensor< Descriptives, 1 > &)
Definition: data_set.cpp:6397
Tensor< string, 1 > get_target_variables_names() const
Definition: data_set.cpp:2215
Tensor< Index, 1 > get_input_variables_indices() const
Returns the indices of the input variables.
Definition: data_set.cpp:3047
Tensor< string, 1 > get_input_variables_names() const
Definition: data_set.cpp:2184
Tensor< Index, 2 > get_batches(const Tensor< Index, 1 > &, const Index &, const bool &, const Index &buffer_size=100) const
Definition: data_set.cpp:1217
Tensor< Descriptives, 1 > scale_input_variables()
Definition: data_set.cpp:6243
This abstract class represents the concept of loss index composed of an error term and a regularizati...
Definition: loss_index.h:48
NeuralNetwork * get_neural_network_pointer() const
Returns a pointer to the neural network object associated to the error term.
Definition: loss_index.h:70
DataSet * get_data_set_pointer() const
Returns a pointer to the data set object associated to the error term.
Definition: loss_index.h:92
ScalingLayer * get_scaling_layer_pointer() const
Returns a pointer to the scaling layers object composing this neural network object.
bool has_long_short_term_memory_layer() const
bool has_scaling_layer() const
bool has_unscaling_layer() const
bool has_recurrent_layer() const
void forward_propagate(const DataSetBatch &, NeuralNetworkForwardPropagation &) const
Calculate forward propagation in neural network.
void save(const string &) const
void set_parameters(Tensor< type, 1 > &)
UnscalingLayer * get_unscaling_layer_pointer() const
Returns a pointer to the unscaling layers object composing this neural network object.
void set_inputs_names(const Tensor< string, 1 > &)
Index get_parameters_number() const
void set_outputs_names(const Tensor< string, 1 > &)
string neural_network_file_name
Path where the neural network is saved.
void set_hardware_use(const string &)
Set hardware to use. Default: Multi-core.
string get_hardware_use() const
Hardware use.
LossIndex * loss_index_pointer
Pointer to a loss index for a neural network object.
bool display
Display messages to screen.
const string write_time(const type &) const
Writes the time from seconds in format HH:mm:ss.
Index save_period
Number of iterations between the training saving progress.
Index display_period
Number of iterations between the training showing progress.
This class represents a layer of scaling neurons.
Definition: scaling_layer.h:38
void set()
Sets the scaling layer to be empty.
This class represents a layer of unscaling neurons.
void set()
Sets the unscaling layer to be empty.
void PushText(const char *text, bool cdata=false)
Add a text node.
Definition: tinyxml2.cpp:2878
virtual void CloseElement(bool compactMode=false)
If streaming, close the Element.
Definition: tinyxml2.cpp:2834
half pow(half x, half y)
Definition: half.hpp:3427
This structure contains the optimization algorithm results.
Tensor< type, 1 > selection_error_history
History of the selection error over the training iterations.
void resize_training_error_history(const Index &)
Resizes the training error history keeping the values.
OptimizationAlgorithm::StoppingCondition stopping_condition
Stopping condition of the algorithm.
void resize_selection_error_history(const Index &)
Resizes the selection error history keeping the values.
Tensor< type, 1 > training_error_history
History of the loss function loss over the training iterations.
string elapsed_time
Elapsed time of the training process.