9#include "weighted_squared_error.h"
33 :
LossIndex(new_neural_network_pointer, new_data_set_pointer)
62type WeightedSquaredError::get_normalizaton_coefficient()
const
138 const Index negatives = target_distribution[0];
139 const Index positives = target_distribution[1];
141 if(positives == 0 || negatives == 0)
150 positives_weight =
static_cast<type
>(negatives)/
static_cast<type
>(positives);
205void WeightedSquaredError::calculate_error(
const DataSetBatch& batch,
211 LayerForwardPropagation* output_layer_forward_propagation = forward_propagation.layers(trainable_layers_number-1);
216 const Tensor<type, 2>& targets = batch.targets_2d;
217 const Tensor<type, 2>& outputs = probabilistic_layer_back_propagation->activations;
219 const Tensor<bool, 2> if_sentence = targets == targets.constant(type(1));
220 const Tensor<bool, 2> else_sentence = targets == targets.constant(type(0));
222 Tensor<type, 2> f_1(targets.dimension(0), targets.dimension(1));
225 Tensor<type, 2> f_2(targets.dimension(0), targets.dimension(1));
228 Tensor<type, 2> f_3(targets.dimension(0), targets.dimension(1));
229 f_3 = outputs.constant(type(0));
231 const Tensor<type, 0> weighted_sum_squared_error = (if_sentence.select(f_1, else_sentence.select(f_2, f_3))).sum();
233 const Index batch_samples_number = batch.get_samples_number();
236 const type coefficient = (
static_cast<type
>(batch_samples_number)/
static_cast<type
>(total_samples_number))*
normalization_coefficient;
238 back_propagation.error = weighted_sum_squared_error(0)/coefficient;
242void WeightedSquaredError::calculate_error_lm(
const DataSetBatch& batch,
243 const NeuralNetworkForwardPropagation&,
244 LossIndexBackPropagationLM &back_propagation)
const
246 Tensor<type, 0> error;
247 error.device(*thread_pool_device) = (back_propagation.squared_errors*back_propagation.squared_errors).sum();
249 const Index batch_samples_number = batch.get_samples_number();
252 const type coefficient = (
static_cast<type
>(batch_samples_number)/
static_cast<type
>(total_samples_number))*
normalization_coefficient;
254 back_propagation.error = error()/coefficient;
258void WeightedSquaredError::calculate_output_delta(
const DataSetBatch& batch,
259 NeuralNetworkForwardPropagation& ,
260 LossIndexBackPropagation& back_propagation)
const
270 LayerBackPropagation* output_layer_back_propagation = back_propagation.neural_network.layers(trainable_layers_number-1);
272 ProbabilisticLayerBackPropagation* probabilistic_layer_back_propagation
273 =
static_cast<ProbabilisticLayerBackPropagation*
>(output_layer_back_propagation);
275 const Tensor<type, 2>& targets = batch.targets_2d;
277 const Index batch_samples_number = batch.targets_2d.size();
280 const type coefficient =
static_cast<type
>(2.0)/((
static_cast<type
>(batch_samples_number)/
static_cast<type
>(total_samples_number))*
normalization_coefficient);
282 const Tensor<bool, 2> if_sentence = targets == targets.constant(type(1));
283 const Tensor<bool, 2> else_sentence = targets == targets.constant(type(0));
285 Tensor<type, 2> f_1(targets.dimension(0), targets.dimension(1));
288 Tensor<type, 2> f_2(targets.dimension(0), targets.dimension(1));
291 Tensor<type, 2> f_3(targets.dimension(0), targets.dimension(1));
292 f_3 = targets.constant(type(0));
294 probabilistic_layer_back_propagation->delta.device(*thread_pool_device) = if_sentence.select(f_1, else_sentence.select(f_2, f_3));
309 const Index batch_samples_number = batch.get_samples_number();
312 const type coefficient = type(2)/((
static_cast<type
>(batch_samples_number)/
static_cast<type
>(total_samples_number))*
normalization_coefficient);
314 loss_index_back_propagation_lm.gradient.device(*thread_pool_device)
315 = loss_index_back_propagation_lm.squared_errors_jacobian.contract(loss_index_back_propagation_lm.squared_errors, AT_B);
317 loss_index_back_propagation_lm.gradient.device(*thread_pool_device) = coefficient * loss_index_back_propagation_lm.gradient;
321void WeightedSquaredError::calculate_error_hessian_lm(
const DataSetBatch& batch,
330 const Index batch_samples_number = batch.get_samples_number();
333 const type coefficient = type(2)/((
static_cast<type
>(batch_samples_number)/
static_cast<type
>(total_samples_number))*
normalization_coefficient);
335 loss_index_back_propagation_lm.hessian.device(*thread_pool_device)
336 = loss_index_back_propagation_lm.squared_errors_jacobian.contract(loss_index_back_propagation_lm.squared_errors_jacobian, AT_B);
338 loss_index_back_propagation_lm.hessian.device(*thread_pool_device) = coefficient*loss_index_back_propagation_lm.hessian;
346 return "WEIGHTED_SQUARED_ERROR";
354 return "Weighted squared error";
364 ostringstream buffer;
368 file_stream.OpenElement(
"WeightedSquaredError");
372 file_stream.OpenElement(
"PositivesWeight");
377 file_stream.
PushText(buffer.str().c_str());
383 file_stream.OpenElement(
"NegativesWeight");
388 file_stream.
PushText(buffer.str().c_str());
407 ostringstream buffer;
409 buffer <<
"OpenNN Exception: WeightedSquaredError class.\n"
410 <<
"void from_XML(const tinyxml2::XMLDocument&) method.\n"
411 <<
"Weighted squared element is nullptr.\n";
413 throw logic_error(buffer.str());
418 const tinyxml2::XMLElement* positives_weight_element = root_element->FirstChildElement(
"PositivesWeight");
420 if(positives_weight_element)
422 const string string = positives_weight_element->GetText();
428 catch(
const logic_error& e)
430 cerr << e.what() << endl;
436 const tinyxml2::XMLElement* negatives_weight_element = root_element->FirstChildElement(
"NegativesWeight");
438 if(negatives_weight_element)
440 const string string = negatives_weight_element->GetText();
446 catch(
const logic_error& e)
448 cerr << e.what() << endl;
454type WeightedSquaredError::weighted_sum_squared_error(
const Tensor<type, 2>& x,
const Tensor<type, 2>& y)
const
456#ifdef __OPENNN_DEBUG__
458 const Index rows_number = x.dimension(0);
459 const Index columns_number = x.dimension(1);
461 const Index other_rows_number = y.dimension(0);
463 if(other_rows_number != rows_number)
465 ostringstream buffer;
467 buffer <<
"OpenNN Exception: Metrics functions.\n"
468 <<
"double minkowski_error(const Matrix<double>&, const double&) method.\n"
469 <<
"Other number of rows must be equal to this number of rows.\n";
471 throw logic_error(buffer.str());
474 const Index other_columns_number = y.dimension(1);
476 if(other_columns_number != columns_number)
478 ostringstream buffer;
480 buffer <<
"OpenNN Exception: Metrics functions.\n"
481 <<
"double minkowski_error(const Matrix<double>&, const double&) method.\n"
482 <<
"Other number of columns must be equal to this number of columns.\n";
484 throw logic_error(buffer.str());
489 const Tensor<bool, 2> if_sentence = y == y.constant(type(1));
490 const Tensor<bool, 2> else_sentence = y == y.constant(type(0));
492 Tensor<type, 2> f_1(x.dimension(0), x.dimension(1));
494 Tensor<type, 2> f_2(x.dimension(0), x.dimension(1));
496 Tensor<type, 2> f_3(x.dimension(0), x.dimension(1));
502 f_3 = x.constant(type(0));
504 const Tensor<type, 0> weighted_sum_squared_error = (if_sentence.select(f_1, else_sentence.select(f_2, f_3))).sum();
506 return weighted_sum_squared_error(0);
510void WeightedSquaredError::calculate_squared_errors_lm(
const DataSetBatch& batch,
511 const NeuralNetworkForwardPropagation& forward_propagation,
512 LossIndexBackPropagationLM& loss_index_back_propagation_lm)
const
516 LayerForwardPropagation* output_layer_forward_propagation = forward_propagation.layers(trainable_layers_number-1);
518 const Tensor<type, 2>& targets = batch.targets_2d;
520 ProbabilisticLayerForwardPropagation* probabilistic_layer_forward_propagation
521 =
static_cast<ProbabilisticLayerForwardPropagation*
>(output_layer_forward_propagation);
523 const Tensor<type, 2>& outputs = probabilistic_layer_forward_propagation->activations;
525 const Tensor<bool, 2> if_sentence = outputs == outputs.constant(type(1));
527 Tensor<type, 2> f_1(outputs.dimension(0), outputs.dimension(1));
530 Tensor<type, 2> f_2(outputs.dimension(0), outputs.dimension(1));
533 loss_index_back_propagation_lm.squared_errors = ((if_sentence.select(f_1, f_2)).sum(rows_sum).square()).
sqrt();
This class represents the concept of data set for data modelling problems, such as approximation,...
Index get_target_variables_number() const
Returns the number of target variables of the data set.
bool is_empty() const
Returns true if the data matrix is empty, and false otherwise.
Tensor< Index, 1 > calculate_target_distribution() const
Tensor< Index, 1 > get_target_variables_indices() const
Returns the indices of the target variables.
Index calculate_used_negatives(const Index &) const
Tensor< Column, 1 > get_target_columns() const
Returns the target columns of the data set.
This abstract class represents the concept of loss index composed of an error term and a regularizati...
DataSet * data_set_pointer
Pointer to a data set object.
NeuralNetwork * neural_network_pointer
Pointer to a neural network object.
bool has_data_set() const
type positives_weight
Weight for the positives for the calculation of the error.
void set_weights()
Calculates of the weights for the positives and negatives values with the data of the data set.
type normalization_coefficient
Coefficient of normalization.
void set_data_set_pointer(DataSet *)
set_data_set_pointer
void set_normalization_coefficient()
Calculates of the normalization coefficient with the data of the data set.
void from_XML(const tinyxml2::XMLDocument &)
void set_default()
Set the default values for the object.
type get_positives_weight() const
Returns the weight of the positives.
type negatives_weight
Weight for the negatives for the calculation of the error.
void set_negatives_weight(const type &)
string get_error_type() const
Returns a string with the name of the weighted squared error loss type, "WEIGHTED_SQUARED_ERROR".
void set_positives_weight(const type &)
void calculate_error_gradient_lm(const DataSetBatch &, LossIndexBackPropagationLM &) const
void write_XML(tinyxml2::XMLPrinter &) const
string get_error_type_text() const
Returns a string with the name of the weighted squared error loss type in text format.
type get_negatives_weight() const
Returns the weight of the negatives.
virtual ~WeightedSquaredError()
Destructor.
void PushText(const char *text, bool cdata=false)
Add a text node.
virtual void CloseElement(bool compactMode=false)
If streaming, close the Element.
uint32 sqrt(uint32 &r, int &exp)
A loss index composed of several terms, this structure represent the First Order for this function.