9#include "normalized_squared_error.h"
32 :
LossIndex(new_neural_network_pointer, new_data_set_pointer)
57 return selection_normalization_coefficient;
87 const Tensor<type, 1> targets_mean =
data_set_pointer->calculate_used_targets_mean();
107 const Index rows = targets.dimension(0)-1;
108 const Index columns = targets.dimension(1);
110 Tensor<type, 2> targets_t(rows, columns);
111 Tensor<type, 2> targets_t_1(rows, columns);
113 for(Index i = 0; i < columns; i++)
115 memcpy(targets_t_1.data() + targets_t_1.dimension(0) * i,
116 targets.data() + targets.dimension(0) * i,
117 static_cast<size_t>(rows)*
sizeof(type));
120 for(Index i = 0; i < columns; i++)
122 memcpy(targets_t.data() + targets_t.dimension(0) * i,
123 targets.data() + targets.dimension(0) * i + 1,
124 static_cast<size_t>(rows)*
sizeof(type));
133type NormalizedSquaredError::calculate_time_series_normalization_coefficient(
const Tensor<type, 2>& targets_t_1,
134 const Tensor<type, 2>& targets_t)
const
140 const Index target_t_1_samples_number = targets_t_1.dimension(0);
141 const Index target_t_1_variables_number = targets_t_1.dimension(1);
142 const Index target_t_samples_number = targets_t.dimension(0);
143 const Index target_t_variables_number = targets_t.dimension(1);
145 if(target_t_1_samples_number != target_t_samples_number || target_t_1_variables_number != target_t_variables_number)
147 ostringstream buffer;
149 buffer <<
"OpenNN Exception: NormalizedquaredError class.\n"
150 <<
"type calculate_time_series_normalization_coefficient(const Tensor<type, 2>& targets_t_1, const Tensor<type, 2>& targets_t) function.\n"
151 <<
" The columns number of targets("<< target_t_variables_number <<
") must be equal("<< target_t_1_variables_number<<
").\n"
152 <<
" The samples number of targets("<< target_t_1_samples_number <<
") must be equal("<< target_t_samples_number<<
").\n";
154 throw logic_error(buffer.str());
158 const Index target_samples_number = targets_t_1.dimension(0);
159 const Index target_varaibles_number = targets_t_1.dimension(1);
163 for(Index i = 0; i < target_samples_number; i++)
165 for(Index j = 0; j < target_varaibles_number; j++)
193 const Index selection_samples_number = selection_indices.size();
195 if(selection_samples_number == 0)
return;
212 selection_normalization_coefficient = new_selection_normalization_coefficient;
228 selection_normalization_coefficient = type(NAN);
239 const Tensor<type, 1>& targets_mean)
const
245 const Index means_number = targets_mean.dimension(0);
246 const Index targets_number = targets.dimension(1);
248 if(targets_number != means_number)
250 ostringstream buffer;
252 buffer <<
"OpenNN Exception: NormalizedquaredError class.\n"
253 <<
"type calculate_normalization_coefficient(const Tensor<type, 2>& targets, const Tensor<type, 1>& targets_mean) function.\n"
254 <<
" The columns number of targets("<< targets_number <<
") must be equal("<< means_number<<
").\n";
256 throw logic_error(buffer.str());
260 const Index size = targets.dimension(0);
264 for(Index i = 0; i < size; i++)
266 const Tensor<type, 0> norm = (targets.chip(i,0) - targets_mean).square().sum();
290 ostringstream buffer;
292 buffer <<
"OpenNN Exception: NormalizedquaredError class.\n"
293 <<
"calculate_error() method.\n"
294 <<
"Normalization coefficient is NAN.\n";
296 throw logic_error(buffer.str());
300 Tensor<type, 0> sum_squared_error;
302 sum_squared_error.device(*thread_pool_device) = back_propagation.errors.contract(back_propagation.errors, SSE);
304 const Index batch_samples_number = batch.get_samples_number();
307 const type coefficient = ((
static_cast<type
>(batch_samples_number)/
static_cast<type
>(total_samples_number))*
normalization_coefficient);
309 back_propagation.error
310 = sum_squared_error(0)/coefficient;
314void NormalizedSquaredError::calculate_error_lm(
const DataSetBatch& batch,
322 ostringstream buffer;
324 buffer <<
"OpenNN Exception: NormalizedquaredError class.\n"
325 <<
"calculate_error() method.\n"
326 <<
"Normalization coefficient is NAN.\n";
328 throw logic_error(buffer.str());
332 Tensor<type, 0> sum_squared_error;
334 sum_squared_error.device(*thread_pool_device) = (back_propagation.squared_errors*back_propagation.squared_errors).sum();
336 const Index batch_samples_number = batch.get_samples_number();
339 const type coefficient = ((
static_cast<type
>(batch_samples_number)/
static_cast<type
>(total_samples_number))*
normalization_coefficient);
341 back_propagation.error = sum_squared_error(0)/coefficient;
345void NormalizedSquaredError::calculate_output_delta(
const DataSetBatch& batch,
346 NeuralNetworkForwardPropagation&,
347 LossIndexBackPropagation& back_propagation)
const
357 LayerBackPropagation* output_layer_back_propagation = back_propagation.neural_network.layers(trainable_layers_number-1);
359 Layer* output_layer_pointer = output_layer_back_propagation->layer_pointer;
361 const Index batch_samples_number = batch.get_samples_number();
364 const type coefficient
365 =
static_cast<type
>(2)/(
static_cast<type
>(batch_samples_number)/
static_cast<type
>(total_samples_number)*
normalization_coefficient);
367 switch(output_layer_pointer->get_type())
369 case Layer::Type::Perceptron:
371 PerceptronLayerBackPropagation* perceptron_layer_back_propagation
372 =
static_cast<PerceptronLayerBackPropagation*
>(output_layer_back_propagation);
374 perceptron_layer_back_propagation->delta.device(*thread_pool_device) = coefficient*back_propagation.errors;
378 case Layer::Type::Probabilistic:
380 ProbabilisticLayerBackPropagation* probabilistic_layer_back_propagation
381 =
static_cast<ProbabilisticLayerBackPropagation*
>(output_layer_back_propagation);
383 probabilistic_layer_back_propagation->delta.device(*thread_pool_device) = coefficient*back_propagation.errors;
387 case Layer::Type::Recurrent:
389 RecurrentLayerBackPropagation* recurrent_layer_back_propagation
390 =
static_cast<RecurrentLayerBackPropagation*
>(output_layer_back_propagation);
392 recurrent_layer_back_propagation->delta.device(*thread_pool_device) = coefficient*back_propagation.errors;
396 case Layer::Type::LongShortTermMemory:
398 LongShortTermMemoryLayerBackPropagation* long_short_term_memory_layer_back_propagation
399 =
static_cast<LongShortTermMemoryLayerBackPropagation*
>(output_layer_back_propagation);
401 long_short_term_memory_layer_back_propagation->delta.device(*thread_pool_device) = coefficient*back_propagation.errors;
410void NormalizedSquaredError::calculate_output_delta_lm(
const DataSetBatch& ,
411 NeuralNetworkForwardPropagation&,
412 LossIndexBackPropagationLM & loss_index_back_propagation)
const
420 LayerBackPropagationLM* output_layer_back_propagation = loss_index_back_propagation.neural_network.layers(trainable_layers_number-1);
422 Layer* output_layer_pointer = output_layer_back_propagation->layer_pointer;
424 switch(output_layer_pointer->get_type())
426 case Layer::Type::Perceptron:
428 PerceptronLayerBackPropagationLM* perceptron_layer_back_propagation
429 =
static_cast<PerceptronLayerBackPropagationLM*
>(output_layer_back_propagation);
431 memcpy(perceptron_layer_back_propagation->delta.data(),
432 loss_index_back_propagation.errors.data(),
433 static_cast<size_t>(loss_index_back_propagation.errors.size())*
sizeof(type));
435 divide_columns(perceptron_layer_back_propagation->delta, loss_index_back_propagation.squared_errors);
439 case Layer::Type::Probabilistic:
441 ProbabilisticLayerBackPropagationLM* probabilistic_layer_back_propagation
442 =
static_cast<ProbabilisticLayerBackPropagationLM*
>(output_layer_back_propagation);
444 memcpy(probabilistic_layer_back_propagation->delta.data(),
445 loss_index_back_propagation.errors.data(),
446 static_cast<size_t>(loss_index_back_propagation.errors.size())*
sizeof(type));
448 divide_columns(probabilistic_layer_back_propagation->delta, loss_index_back_propagation.squared_errors);
454 ostringstream buffer;
456 buffer <<
"OpenNN Exception: NeuralNetwork class.\n"
457 <<
"Levenberg-Marquardt can only be used with Perceptron and Probabilistic layers.\n";
459 throw logic_error(buffer.str());
465void NormalizedSquaredError::calculate_error_gradient_lm(
const DataSetBatch& batch,
466 LossIndexBackPropagationLM& loss_index_back_propagation_lm)
const
468 const Index batch_samples_number = batch.get_samples_number();
471 const type coefficient = type(2)/((
static_cast<type
>(batch_samples_number)/
static_cast<type
>(total_samples_number))*
normalization_coefficient);
473 loss_index_back_propagation_lm.gradient.device(*thread_pool_device)
474 = loss_index_back_propagation_lm.squared_errors_jacobian.contract(loss_index_back_propagation_lm.squared_errors, AT_B);
476 loss_index_back_propagation_lm.gradient.device(*thread_pool_device) = coefficient * loss_index_back_propagation_lm.gradient;
480void NormalizedSquaredError::calculate_error_hessian_lm(
const DataSetBatch& batch,
481 LossIndexBackPropagationLM& loss_index_back_propagation_lm)
const
489 const Index batch_samples_number = batch.get_samples_number();
492 const type coefficient = type(2)/((
static_cast<type
>(batch_samples_number)/
static_cast<type
>(total_samples_number))*
normalization_coefficient);
494 loss_index_back_propagation_lm.hessian.device(*thread_pool_device) =
495 loss_index_back_propagation_lm.squared_errors_jacobian.contract(loss_index_back_propagation_lm.squared_errors_jacobian, AT_B);
497 loss_index_back_propagation_lm.hessian.device(*thread_pool_device) = coefficient*loss_index_back_propagation_lm.hessian;
505 return "NORMALIZED_SQUARED_ERROR";
513 return "Normalized squared error";
524 file_stream.OpenElement(
"NormalizedSquaredError");
535 const tinyxml2::XMLElement* root_element = document.FirstChildElement(
"NormalizedSquaredError");
539 ostringstream buffer;
541 buffer <<
"OpenNN Exception: NormalizedSquaredError class.\n"
542 <<
"void from_XML(const tinyxml2::XMLDocument&) method.\n"
543 <<
"Normalized squared element is nullptr.\n";
545 throw logic_error(buffer.str());
This class represents the concept of data set for data modelling problems, such as approximation,...
Tensor< type, 2 > get_target_data() const
Tensor< Index, 1 > get_selection_samples_indices() const
Returns the indices of the samples which will be used for selection.
Tensor< type, 1 > calculate_selection_targets_mean() const
Returns the mean values of the target variables on the selection.
bool is_empty() const
Returns true if the data matrix is empty, and false otherwise.
Tensor< type, 2 > get_selection_target_data() const
This abstract class represents the concept of loss index composed of an error term and a regularizati...
DataSet * data_set_pointer
Pointer to a data set object.
NeuralNetwork * neural_network_pointer
Pointer to a neural network object.
bool has_data_set() const
bool has_neural_network() const
bool has_long_short_term_memory_layer() const
bool has_recurrent_layer() const
type normalization_coefficient
Coefficient of normalization for the calculation of the training error.
type get_selection_normalization_coefficient() const
Returns the selection normalization coefficient.
void set_normalization_coefficient()
void from_XML(const tinyxml2::XMLDocument &)
void set_default()
Sets the default values.
void calculate_error(const DataSetBatch &, const NeuralNetworkForwardPropagation &, LossIndexBackPropagation &) const
NormalizedSquaredError::calculate_error.
type get_normalization_coefficient() const
Returns the normalization coefficient.
void set_selection_normalization_coefficient()
string get_error_type() const
Returns a string with the name of the normalized squared error loss type, "NORMALIZED_SQUARED_ERROR".
type calculate_normalization_coefficient(const Tensor< type, 2 > &, const Tensor< type, 1 > &) const
void set_time_series_normalization_coefficient()
void set_data_set_pointer(DataSet *new_data_set_pointer)
set_data_set_pointer
virtual ~NormalizedSquaredError()
Destructor.
void write_XML(tinyxml2::XMLPrinter &) const
string get_error_type_text() const
Returns a string with the name of the normalized squared error loss type in text format.
virtual void CloseElement(bool compactMode=false)
If streaming, close the Element.
HALF_CONSTEXPR bool isnan(half arg)
A loss index composed of several terms, this structure represent the First Order for this function.