9#include "correlations.h"
11#include "neural_network.h"
12#include "training_strategy.h"
21Correlation linear_correlation(
const ThreadPoolDevice* thread_pool_device,
22 const Tensor<type, 1>& x,
23 const Tensor<type, 1>& y)
27 const Index x_size = x.size();
31 if(x_size != y.size())
33 buffer <<
"OpenNN Exception: Vector Template.\n"
34 <<
"Correlation linear_correlation(const Tensor<type, 1>&) const method.\n"
35 <<
"Y size must be equal to X size.\n";
37 throw logic_error(buffer.str());
42 Correlation linear_correlation;
48 linear_correlation.
a = y(0);
49 linear_correlation.
b = type(0);
50 linear_correlation.
r = type(1);
52 return linear_correlation;
55 pair<Tensor<type, 1>, Tensor<type, 1>> filter_vectors = filter_missing_values_vector_vector(x,y);
57 const Tensor<double, 1> x_filter = filter_vectors.first.cast<
double>();
58 const Tensor<double, 1> y_filter = filter_vectors.second.cast<
double>();
60 Tensor<double, 0> s_x;
61 Tensor<double, 0> s_y;
63 Tensor<double, 0> s_xx;
64 Tensor<double, 0> s_yy;
66 Tensor<double, 0> s_xy;
68 s_x.device(*thread_pool_device) = x_filter.sum();
69 s_y.device(*thread_pool_device) = y_filter.sum();
70 s_xx.device(*thread_pool_device) = x_filter.square().sum();
71 s_yy.device(*thread_pool_device) = y_filter.square().sum();
72 s_xy.device(*thread_pool_device) = (y_filter*x_filter).sum();
74 if(
abs(s_x()) < NUMERIC_LIMITS_MIN
75 &&
abs(s_y()) < NUMERIC_LIMITS_MIN
76 &&
abs(s_xx()) < NUMERIC_LIMITS_MIN
77 &&
abs(s_yy()) < NUMERIC_LIMITS_MIN
78 &&
abs(s_xy()) < NUMERIC_LIMITS_MIN)
80 linear_correlation.
a = type(0);
82 linear_correlation.
b = type(0);
84 linear_correlation.
r = type(1);
88 const Index n = x_filter.size();
90 linear_correlation.
a =
91 type((s_y() * s_xx() - s_x() * s_xy())/(
static_cast<double>(n) * s_xx() - s_x() * s_x()));
93 linear_correlation.
b =
94 type(((
static_cast<double>(n) * s_xy()) - (s_x() * s_y())) /((
static_cast<double>(n) * s_xx()) - (s_x() * s_x())));
96 if(
sqrt((
static_cast<double>(n) * s_xx() - s_x() * s_x()) *(
static_cast<double>(n) * s_yy() - s_y() * s_y())) < NUMERIC_LIMITS_MIN)
98 linear_correlation.
r = type(1);
102 linear_correlation.
r =
103 type((
static_cast<double>(n) * s_xy() - s_x() * s_y()) /
104 sqrt((
static_cast<double>(n) * s_xx() - s_x() * s_x()) *(
static_cast<double>(n) * s_yy() - s_y() * s_y())));
108 return linear_correlation;
117Correlation logarithmic_correlation(
const ThreadPoolDevice* thread_pool_device,
const Tensor<type, 1>& x,
const Tensor<type, 1>& y)
123 const Index x_size = x.size();
125 ostringstream buffer;
129 buffer <<
"OpenNN Exception: Vector Template.\n"
131 "logarithmic_correlation(const Tensor<type, 1>&) const "
133 <<
"Y size must be equal to X size.\n";
135 throw logic_error(buffer.str());
142 Correlation logarithmic_correlation;
144 for(Index i = 0; i < x.dimension(0); i++)
146 if(!
isnan(x(i)) && x(i) <= type(0))
148 logarithmic_correlation.
r = type(NAN);
150 return logarithmic_correlation;
154 logarithmic_correlation = linear_correlation(thread_pool_device, x.log(), y);
158 return logarithmic_correlation;
166Correlation exponential_correlation(
const ThreadPoolDevice* thread_pool_device,
const Tensor<type, 1>& x,
const Tensor<type, 1>& y)
170 ostringstream buffer;
172 if(x.size() != y.size())
174 buffer <<
"OpenNN Exception: Vector Template.\n"
176 "exponential_correlation(const Tensor<type, 1>&, const Tensor<type, 1>&) const method.\n"
177 <<
"Y size must be equal to X size.\n";
179 throw logic_error(buffer.str());
186 Correlation exponential_correlation;
188 for(Index i = 0; i < y.dimension(0); i++)
190 if(!
isnan(y(i)) && y(i) <= type(0))
192 exponential_correlation.
r = type(NAN);
194 return exponential_correlation;
198 exponential_correlation = linear_correlation(thread_pool_device, x, y.log());
202 exponential_correlation.
a =
exp(exponential_correlation.
a);
203 exponential_correlation.
b = exponential_correlation.
b;
205 return exponential_correlation;
213Correlation power_correlation(
const ThreadPoolDevice* thread_pool_device,
const Tensor<type, 1>& x,
const Tensor<type, 1>& y)
217 ostringstream buffer;
219 if(x.size() != y.size())
221 buffer <<
"OpenNN Exception: Vector Template.\n"
223 "power_correlation(const Tensor<type, 1>&) const "
225 <<
"Y size must be equal to X size.\n";
227 throw logic_error(buffer.str());
234 Correlation power_correlation;
236 for(Index i = 0; i < x.dimension(0); i++)
238 if(!
isnan(x(i)) && x(i) <= type(0))
240 power_correlation.
r = type(NAN);
242 return power_correlation;
245 if(!
isnan(y(i)) && y(i) <= type(0))
247 power_correlation.
r = type(NAN);
249 return power_correlation;
253 power_correlation = linear_correlation(thread_pool_device, x.log(), y.log());
257 power_correlation.
a =
exp(power_correlation.
a);
259 return power_correlation;
267Correlation logistic_correlation_vector_vector(
const ThreadPoolDevice* thread_pool_device,
const Tensor<type, 1>& x,
const Tensor<type, 1>& y)
269 Correlation correlation;
271 const Tensor<type, 2> data = assemble_vector_vector(x, y);
273 DataSet data_set(data);
274 data_set.set_training();
276 data_set.set_columns_scalers(Scaler::MinimumMaximum);
278 NeuralNetwork neural_network(NeuralNetwork::ProjectType::Classification, {1,1});
280 neural_network.get_probabilistic_layer_pointer()->set_activation_function(ProbabilisticLayer::ActivationFunction::Logistic);
282 TrainingStrategy training_strategy(&neural_network, &data_set);
283 training_strategy.set_display(
false);
284 training_strategy.set_display_period(1);
286 training_strategy.set_loss_method(TrainingStrategy::LossMethod::NORMALIZED_SQUARED_ERROR);
287 training_strategy.get_loss_index_pointer()->set_regularization_method(LossIndex::RegularizationMethod::NoRegularization);
289 training_strategy.set_optimization_method(TrainingStrategy::OptimizationMethod::LEVENBERG_MARQUARDT_ALGORITHM);
291 training_strategy.perform_training();
293 const Tensor<type, 2> inputs = data_set.get_input_data();
294 const Tensor<type, 2> targets = data_set.get_target_data();
295 const Tensor<type, 2> outputs = neural_network.calculate_outputs(inputs);
299 const Eigen::array<Index, 1> vector{{x.size()}};
301 correlation.
r = linear_correlation(thread_pool_device, outputs.reshape(vector), targets.reshape(vector)).
r;
305 const Tensor<type, 1> coefficients = neural_network.get_parameters();
307 correlation.
a = coefficients(0);
308 correlation.
b = coefficients(1);
310 if(correlation.
b < type(0)) correlation.
r *= type(-1);
316Correlation logistic_correlation_vector_matrix(
const ThreadPoolDevice* thread_pool_device,
317 const Tensor<type, 1>& x,
318 const Tensor<type, 2>& y)
320 Correlation correlation;
322 const Tensor<type, 2> data = OpenNN::assemble_vector_matrix(x, y);
324 Tensor<Index, 1> input_columns_indices(1);
325 input_columns_indices(0) = 0;
327 Tensor<Index, 1> target_columns_indices(y.dimension(1));
328 for(Index i = 0; i < y.dimension(1); i++) target_columns_indices(i) = 1+i;
330 DataSet data_set(data);
332 data_set.set_input_target_columns(input_columns_indices, target_columns_indices);
334 data_set.set_training();
336 const Index input_variables_number = data_set.get_input_variables_number();
337 const Index target_variables_number = data_set.get_target_variables_number();
339 NeuralNetwork neural_network(NeuralNetwork::ProjectType::Classification, {input_variables_number, target_variables_number});
340 neural_network.get_probabilistic_layer_pointer()->set_activation_function(ProbabilisticLayer::ActivationFunction::Logistic);
342 TrainingStrategy training_strategy(&neural_network, &data_set);
344 training_strategy.get_loss_index_pointer()->set_regularization_method(LossIndex::RegularizationMethod::NoRegularization);
346 training_strategy.set_optimization_method(TrainingStrategy::OptimizationMethod::LEVENBERG_MARQUARDT_ALGORITHM);
348 training_strategy.set_display(
false);
350 training_strategy.perform_training();
354 const Tensor<type, 2> inputs = data_set.get_input_data();
355 const Tensor<type, 2> targets = data_set.get_target_data();
356 const Tensor<type, 2> outputs = neural_network.calculate_outputs(inputs);
358 const Eigen::array<Index, 1> vector{{targets.size()}};
360 correlation.
r = linear_correlation(thread_pool_device, outputs.reshape(vector), targets.reshape(vector)).
r;
368Correlation logistic_correlation_matrix_vector(
const ThreadPoolDevice* thread_pool_device,
369 const Tensor<type, 2>& x,
const Tensor<type, 1>& y)
371 return OpenNN::logistic_correlation_vector_matrix(thread_pool_device, y, x);
375Correlation logistic_correlation_matrix_matrix(
const ThreadPoolDevice* thread_pool_device,
const Tensor<type, 2>& x,
const Tensor<type, 2>& y)
377 Correlation correlation;
379 const Tensor<type, 2> data = OpenNN::assemble_matrix_matrix(x, y);
381 Tensor<Index, 1> input_columns_indices(1);
382 input_columns_indices(0) = 0;
384 Tensor<Index, 1> target_columns_indices(y.dimension(1));
385 for(Index i = 0; i < y.dimension(1); i++) target_columns_indices(i) = 1+i;
387 DataSet data_set(data);
389 data_set.set_input_target_columns(input_columns_indices, target_columns_indices);
391 data_set.set_training();
393 const Index input_variables_number = data_set.get_input_variables_number();
394 const Index target_variables_number = data_set.get_target_variables_number();
396 NeuralNetwork neural_network(NeuralNetwork::ProjectType::Classification, {input_variables_number, target_variables_number});
397 neural_network.get_probabilistic_layer_pointer()->set_activation_function(ProbabilisticLayer::ActivationFunction::Logistic);
399 TrainingStrategy training_strategy(&neural_network, &data_set);
401 training_strategy.get_loss_index_pointer()->set_regularization_method(LossIndex::RegularizationMethod::NoRegularization);
403 training_strategy.set_optimization_method(TrainingStrategy::OptimizationMethod::LEVENBERG_MARQUARDT_ALGORITHM);
405 training_strategy.set_display(
false);
407 training_strategy.perform_training();
411 const Tensor<type, 2> inputs = data_set.get_input_data();
412 const Tensor<type, 2> targets = data_set.get_target_data();
413 const Tensor<type, 2> outputs = neural_network.calculate_outputs(inputs);
415 const Eigen::array<Index, 1> vector{{targets.size()}};
417 correlation.
r = linear_correlation(thread_pool_device, outputs.reshape(vector), targets.reshape(vector)).
r;
425Correlation correlation(
const ThreadPoolDevice* thread_pool_device,
const Tensor<type, 2>& x,
const Tensor<type, 2>& y)
427 Correlation correlation;
429 const Index x_rows = x.dimension(0);
430 const Index x_columns = x.dimension(1);
431 const Index y_columns = y.dimension(1);
433 const bool x_binary = is_binary(x);
434 const bool y_binary = is_binary(y);
436 const Eigen::array<Index, 1> vector{{x_rows}};
438 if(x_columns == 1 && y_columns == 1)
440 if(!x_binary && !y_binary)
442 const Correlation linear_correlation
443 = OpenNN::linear_correlation(thread_pool_device, x.reshape(vector), y.reshape(vector));
445 const Correlation exponential_correlation
446 = OpenNN::exponential_correlation(thread_pool_device, x.reshape(vector), y.reshape(vector));
448 const Correlation logarithmic_correlation
449 = OpenNN::logarithmic_correlation(thread_pool_device, x.reshape(vector), y.reshape(vector));
451 const Correlation power_correlation
452 = OpenNN::power_correlation(thread_pool_device, x.reshape(vector), y.reshape(vector));
454 Correlation strongest_correlation = linear_correlation;
456 if(
abs(exponential_correlation.
r) >
abs(strongest_correlation.r))
457 strongest_correlation = exponential_correlation;
459 if(
abs(logarithmic_correlation.
r) >
abs(strongest_correlation.r))
460 strongest_correlation = logarithmic_correlation;
462 if(
abs(power_correlation.
r) >
abs(strongest_correlation.r))
463 strongest_correlation = power_correlation;
465 return strongest_correlation;
467 else if(!x_binary && y_binary)
469 return OpenNN::logistic_correlation_vector_vector(thread_pool_device, x.reshape(vector), y.reshape(vector));
471 else if(x_binary && !y_binary)
473 return OpenNN::logistic_correlation_vector_vector(thread_pool_device, y.reshape(vector), x.reshape(vector));
475 else if(x_binary && y_binary)
477 return OpenNN::linear_correlation(thread_pool_device, x.reshape(vector), y.reshape(vector));
480 else if(x_columns != 1 && y_columns == 1)
482 return OpenNN::logistic_correlation_matrix_vector(thread_pool_device, x, y.reshape(vector));
484 else if(x_columns == 1 && y_columns != 1)
486 return OpenNN::logistic_correlation_vector_matrix(thread_pool_device, x.reshape(vector), y);
488 else if(x_columns != 1 && y_columns != 1)
490 return OpenNN::logistic_correlation_matrix_matrix(thread_pool_device, x, y);
494 throw logic_error(
"Correlations Exception: Unknown case.");
503pair<Tensor<type, 1>, Tensor<type, 1>> filter_missing_values_vector_vector(
const Tensor<type, 1>& x,
const Tensor<type, 1>& y)
507 for(Index i = 0; i < x.size(); i++)
512 if(new_size == x.size())
514 return make_pair(x, y);
517 Tensor<type, 1> new_x(new_size);
519 Tensor<type, 1> new_y(new_size);
523 for(Index i = 0; i < x.size(); i++)
534 return make_pair(new_x, new_y);
538pair<Tensor<type, 2>, Tensor<type, 2>> filter_missing_values_matrix_matrix(
const Tensor<type, 2>& x,
const Tensor<type, 2>& y)
540 const Index rows_number = x.dimension(0);
541 const Index x_columns_number = x.dimension(1);
542 const Index y_columns_number = y.dimension(1);
544 Index new_rows_number = 0;
546 Tensor<bool, 1> not_NAN_row(rows_number);
548 for(Index i = 0; i < rows_number; i++)
550 not_NAN_row(i) =
true;
552 if(
float(
isnan(y(i))))
554 not_NAN_row(i) =
false;
558 for(Index j = 0; j < x_columns_number; j++)
560 if(
float(
isnan(x(i,j))))
562 not_NAN_row(i) =
false;
568 if(not_NAN_row(i)) new_rows_number++;
571 Tensor<type, 2> new_x(new_rows_number, x_columns_number);
573 Tensor<type, 2> new_y(new_rows_number,y_columns_number);
577 for(Index i = 0; i < rows_number; i++)
581 for(Index j = 0; j < y_columns_number; j++)
583 new_y(index, j) = y(i,j);
586 for(Index j = 0; j < x_columns_number; j++)
588 new_x(index, j) = x(i, j);
595 return make_pair(new_x, new_y);
603Tensor<type, 1> autocorrelations(
const ThreadPoolDevice* thread_pool_device,
const Tensor<type, 1>& x,
const Index& lags_number)
605 Tensor<type, 1> autocorrelation(lags_number);
607 const Index this_size = x.size();
609 for(Index i = 0; i < lags_number; i++)
611 Tensor<type, 1> column_x(this_size-i);
612 Tensor<type, 1> column_y(this_size-i);
614 for(Index j = 0; j < this_size - i; j++)
617 column_y(j) = x(j + i);
620 autocorrelation(i) = linear_correlation(thread_pool_device, column_x, column_y).
r;
623 return autocorrelation;
632Tensor<type, 1> cross_correlations(
const ThreadPoolDevice* thread_pool_device,
633 const Tensor<type, 1>& x,
const Tensor<type, 1>& y,
const Index& maximum_lags_number)
635 if(y.size() != x.size())
637 ostringstream buffer;
639 buffer <<
"OpenNN Exception: Correlations.\n"
640 <<
"Tensor<type, 1> calculate_cross_correlation(const Tensor<type, 1>&) method.\n"
641 <<
"Both vectors must have the same size.\n";
643 throw logic_error(buffer.str());
646 Tensor<type, 1> cross_correlation(maximum_lags_number);
648 const Index this_size = x.size();
650 for(Index i = 0; i < maximum_lags_number; i++)
652 Tensor<type, 1> column_x(this_size-i);
653 Tensor<type, 1> column_y(this_size-i);
655 for(Index j = 0; j < this_size - i; j++)
658 column_y(j) = y(j + i);
661 cross_correlation[i] = linear_correlation(thread_pool_device, column_x, column_y).
r;
664 return cross_correlation;
668Tensor<type, 2> get_correlation_values(
const Tensor<Correlation, 2>& correlations)
670 const Index rows_number = correlations.dimension(0);
671 const Index columns_number = correlations.dimension(1);
673 Tensor<type, 2> values(rows_number, columns_number);
675 for(Index i = 0; i < rows_number; i++)
677 for(Index j = 0; j < columns_number; j++)
679 values(i,j) = correlations(i,j).r;
uint32 sqrt(uint32 &r, int &exp)
HALF_CONSTEXPR half abs(half arg)
HALF_CONSTEXPR bool isnan(half arg)
type a
Independent coefficient of the logistic function.
CorrelationMethod correlation_type
Regression method type.
type r
Correlation coefficient of the regression.
type b
x coefficient of the logistic function.