9#include "testing_analysis.h"
19 : neural_network_pointer(nullptr),
20 data_set_pointer(nullptr)
48 delete thread_pool_device;
62 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
63 <<
"NeuralNetwork* get_neural_network_pointer() const method.\n"
64 <<
"Neural network pointer is nullptr.\n";
66 throw logic_error(buffer.str());
85 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
86 <<
"DataSet* get_data_set_pointer() const method.\n"
87 <<
"Data set pointer is nullptr.\n";
89 throw logic_error(buffer.str());
115 delete thread_pool_device;
117 const int n = omp_get_max_threads();
118 thread_pool =
new ThreadPool(n);
119 thread_pool_device =
new ThreadPoolDevice(thread_pool, n);
123void TestingAnalysis::set_threads_number(
const int& new_threads_number)
125 if(thread_pool !=
nullptr)
delete this->thread_pool;
126 if(thread_pool_device !=
nullptr)
delete this->thread_pool_device;
128 thread_pool =
new ThreadPool(new_threads_number);
129 thread_pool_device =
new ThreadPoolDevice(thread_pool, new_threads_number);
170 ostringstream buffer;
174 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
175 <<
"void check() const method.\n"
176 <<
"Neural network pointer is nullptr.\n";
178 throw logic_error(buffer.str());
183 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
184 <<
"void check() const method.\n"
185 <<
"Data set pointer is nullptr.\n";
187 throw logic_error(buffer.str());
207 ostringstream buffer;
209 if(testing_samples_number == 0)
211 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
212 <<
"Tensor<Correlation, 1> linear_correlation() const method.\n"
213 <<
"Number of testing samples is zero.\n";
215 throw logic_error(buffer.str());
238 for(Index i = 0; i < outputs_number; i++)
240 linear_correlation[i] = OpenNN::linear_correlation(thread_pool_device, output.chip(i,1), target.chip(i,1));
247void TestingAnalysis::print_linear_regression_correlations()
const
255 for(Index i = 0; i < targets_number; i++)
277 if(testing_samples_number == 0)
279 ostringstream buffer;
281 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
282 <<
"LinearRegressionResults perform_linear_regression_analysis() const method.\n"
283 <<
"Number of testing samples is zero.\n";
285 throw logic_error(buffer.str());
298 Tensor<LinearRegressionAnalysis, 1> linear_regression_results(outputs_number);
300 for(Index i = 0; i < outputs_number; i++)
302 const Tensor<type, 1> targets = testing_targets.chip(i,1);
303 const Tensor<type, 1> outputs = testing_outputs.chip(i,1);
307 linear_regression_results[i].targets = targets;
308 linear_regression_results[i].outputs = outputs;
315 return linear_regression_results;
319void TestingAnalysis::print_linear_regression_analysis()
const
323 for(Index i = 0; i < linear_regression_analysis.size(); i++)
325 linear_regression_analysis(i).print();
354 ostringstream buffer;
356 if(testing_samples_number == 0)
358 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
359 <<
"Tensor<Tensor<type, 2>, 1> calculate_error_data() const.\n"
360 <<
"Number of testing samples is zero.\n";
362 throw logic_error(buffer.str());
381 if(!unscaling_layer_pointer)
383 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
384 <<
"Tensor<Tensor<type, 2>, 1> calculate_error_data() const.\n"
385 <<
"Unscaling layer is nullptr.\n";
387 throw logic_error(buffer.str());
392 const Tensor<type, 1>& outputs_minimum = unscaling_layer_pointer->
get_minimums();
394 const Tensor<type, 1>& outputs_maximum = unscaling_layer_pointer->
get_maximums();
398 Tensor<type, 3> error_data(testing_samples_number, 3, outputs_number);
402 Tensor<type, 2> difference_absolute_value = (targets - outputs).
abs();
404 for(Index i = 0; i < outputs_number; i++)
406 for(Index j = 0; j < testing_samples_number; j++)
409 error_data(j,0,i) = difference_absolute_value(j,i);
411 error_data(j,1,i) = difference_absolute_value(j,i)/
abs(outputs_maximum(i)-outputs_minimum(i));
413 error_data(j,2,i) = (difference_absolute_value(j,i)*
static_cast<type
>(100.0))/
abs(outputs_maximum(i)-outputs_minimum(i));
438 ostringstream buffer;
440 if(testing_samples_number == 0)
442 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
443 <<
"Tensor<Tensor<type, 2>, 1> calculate_error_data() const.\n"
444 <<
"Number of testing samples is zero.\n";
446 throw logic_error(buffer.str());
463 if(!unscaling_layer_pointer)
465 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
466 <<
"Tensor<Tensor<type, 1>, 1> calculate_percentage_error_data() const.\n"
467 <<
"Unscaling layer is nullptr.\n";
469 throw logic_error(buffer.str());
474 const Tensor<type, 1>& outputs_minimum = unscaling_layer_pointer->
get_minimums();
475 const Tensor<type, 1>& outputs_maximum = unscaling_layer_pointer->
get_maximums();
481 Tensor<type, 2> error_data(testing_samples_number, outputs_number);
483 Tensor<type, 2> difference_value = (targets - outputs);
485 for(Index i = 0; i < testing_samples_number; i++)
487 for(Index j = 0; j < outputs_number; j++)
489 error_data(i,j) = (difference_value(i,j)*
static_cast<type
>(100.0))/
abs(outputs_maximum(j)-outputs_minimum(j));
497Tensor<Descriptives, 1> TestingAnalysis::calculate_absolute_errors_descriptives()
const
510 return calculate_absolute_errors_descriptives(targets, outputs);
514Tensor<Descriptives, 1> TestingAnalysis::calculate_absolute_errors_descriptives(
const Tensor<type, 2>& targets,
515 const Tensor<type, 2>& outputs)
const
517 const Tensor<type, 2> diff = (targets-outputs).
abs();
519 return descriptives(diff);
523Tensor<Descriptives, 1> TestingAnalysis::calculate_percentage_errors_descriptives()
const
536 return calculate_percentage_errors_descriptives(targets,outputs);
540Tensor<Descriptives, 1> TestingAnalysis::calculate_percentage_errors_descriptives(
const Tensor<type, 2>& targets,
541 const Tensor<type, 2>& outputs)
const
543 const Tensor<type, 2> diff = (
static_cast<type
>(100)*(targets-outputs).abs())/targets;
545 return descriptives(diff);
567 Tensor<Tensor<Descriptives, 1>, 1> descriptives(outputs_number);
573 for(Index i = 0; i < outputs_number; i++)
575 TensorMap< Tensor<type, 2> > matrix_error(error_data.data()+index, testing_samples_number, 3);
577 Tensor<type, 2> matrix(matrix_error);
579 descriptives[i] = OpenNN::descriptives(matrix);
581 index += testing_samples_number*3;
588void TestingAnalysis::print_error_data_descriptives()
const
596 for(Index i = 0; i < targets_number; i++)
598 cout << targets_name[i] << endl;
599 cout <<
"Minimum error: " << error_data_statistics[i][0].minimum << endl;
600 cout <<
"Maximum error: " << error_data_statistics[i][0].maximum << endl;
601 cout <<
"Mean error: " << error_data_statistics[i][0].mean <<
" " << endl;
602 cout <<
"Standard deviation error: " << error_data_statistics[i][0].standard_deviation <<
" " << endl;
604 cout <<
"Minimum percentage error: " << error_data_statistics[i][2].minimum <<
" %" << endl;
605 cout <<
"Maximum percentage error: " << error_data_statistics[i][2].maximum <<
" %" << endl;
606 cout <<
"Mean percentage error: " << error_data_statistics[i][2].mean <<
" %" << endl;
607 cout <<
"Standard deviation percentage error: " << error_data_statistics[i][2].standard_deviation <<
" %" << endl;
621 const Index outputs_number = error_data.dimension(1);
623 Tensor<Histogram, 1> histograms(outputs_number);
625 for(Index i = 0; i < outputs_number; i++)
627 histograms(i) = histogram_centered(error_data.chip(i,1), type(0), bins_number);
641 const Index outputs_number = error_data.dimension(2);
642 const Index testing_samples_number = error_data.dimension(0);
644 Tensor<Tensor<Index, 1>, 1> maximal_errors(samples_number);
648 for(Index i = 0; i < outputs_number; i++)
650 TensorMap< Tensor<type, 2> > matrix_error(error_data.data()+index, testing_samples_number, 3);
652 maximal_errors[i] = maximal_indices(matrix_error.chip(0,1), samples_number);
654 index += testing_samples_number*3;
657 return maximal_errors;
666 Tensor<type, 2> errors(5,3);
668 const Tensor<type, 1> training_errors = calculate_training_errors();
669 const Tensor<type, 1> selection_errors = calculate_selection_errors();
672 errors(0,0) = training_errors(0);
673 errors(1,0) = training_errors(1);
674 errors(2,0) = training_errors(2);
675 errors(3,0) = training_errors(3);
676 errors(4,0) = training_errors(4);
678 errors(0,1) = selection_errors(0);
679 errors(1,1) = selection_errors(1);
680 errors(2,1) = selection_errors(2);
681 errors(3,1) = selection_errors(3);
682 errors(4,1) = selection_errors(4);
684 errors(0,2) = testing_errors(0);
685 errors(1,2) = testing_errors(1);
686 errors(2,2) = testing_errors(2);
687 errors(3,2) = testing_errors(3);
688 errors(4,2) = testing_errors(4);
694Tensor<type, 2> TestingAnalysis::calculate_binary_classification_errors()
const
696 Tensor<type, 2> errors(7, 3);
698 const Tensor<type, 1> training_errors = calculate_binary_classification_training_errors();
699 const Tensor<type, 1> selection_errors = calculate_binary_classification_selection_errors();
702 errors(0,0) = training_errors(0);
703 errors(1,0) = training_errors(1);
704 errors(2,0) = training_errors(2);
705 errors(3,0) = training_errors(3);
706 errors(4,0) = training_errors(4);
707 errors(5,0) = training_errors(5);
708 errors(6,0) = training_errors(6);
710 errors(0,1) = selection_errors(0);
711 errors(1,1) = selection_errors(1);
712 errors(2,1) = selection_errors(2);
713 errors(3,1) = selection_errors(3);
714 errors(4,1) = selection_errors(4);
715 errors(5,1) = selection_errors(5);
716 errors(6,1) = selection_errors(6);
718 errors(0,2) = testing_errors(0);
719 errors(1,2) = testing_errors(1);
720 errors(2,2) = testing_errors(2);
721 errors(3,2) = testing_errors(3);
722 errors(4,2) = testing_errors(4);
723 errors(5,2) = testing_errors(5);
724 errors(6,2) = testing_errors(6);
730Tensor<type, 2> TestingAnalysis::calculate_multiple_classification_errors()
const
732 Tensor<type, 2> errors(6,3);
734 const Tensor<type, 1> training_errors = calculate_multiple_classification_training_errors();
735 const Tensor<type, 1> selection_errors = calculate_multiple_classification_selection_errors();
738 errors(0,0) = training_errors(0);
739 errors(1,0) = training_errors(1);
740 errors(2,0) = training_errors(2);
741 errors(3,0) = training_errors(3);
742 errors(4,0) = training_errors(4);
743 errors(5,0) = training_errors(5);
745 errors(0,1) = selection_errors(0);
746 errors(1,1) = selection_errors(1);
747 errors(2,1) = selection_errors(2);
748 errors(3,1) = selection_errors(3);
749 errors(4,1) = selection_errors(4);
750 errors(5,1) = selection_errors(5);
752 errors(0,2) = testing_errors(0);
753 errors(1,2) = testing_errors(1);
754 errors(2,2) = testing_errors(2);
755 errors(3,2) = testing_errors(3);
756 errors(4,2) = testing_errors(4);
757 errors(5,2) = testing_errors(5);
763Tensor<type, 1> TestingAnalysis::calculate_training_errors()
const
777 ostringstream buffer;
779 if(training_samples_number == 0)
781 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
782 <<
"Tensor<type, 1> calculate_training_errors() const.\n"
783 <<
"Number of training samples is zero.\n";
785 throw logic_error(buffer.str());
798 Tensor<type, 1> errors(4);
802 const Tensor<type, 0> sum_squared_error = (outputs-targets).square().sum().sqrt();
804 errors(0) = sum_squared_error(0);
805 errors(1) = errors(0)/training_samples_number;
806 errors(2) =
sqrt(errors(1));
813Tensor<type, 1> TestingAnalysis::calculate_binary_classification_training_errors()
const
827 ostringstream buffer;
829 if(training_samples_number == 0)
831 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
832 <<
"Tensor<type, 1> calculate_binary_classification_training_errors() const.\n"
833 <<
"Number of training samples is zero.\n";
835 throw logic_error(buffer.str());
848 Tensor<type, 1> errors(6);
851 const Tensor<type, 0> sum_squared_error = (outputs-targets).square().sum().sqrt();
854 errors(0) = sum_squared_error(0);
857 errors(1) = errors(0)/training_samples_number;
860 errors(2) =
sqrt(errors(1));
874Tensor<type, 1> TestingAnalysis::calculate_multiple_classification_training_errors()
const
888 ostringstream buffer;
890 if(training_samples_number == 0)
892 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
893 <<
"Tensor<type, 1> calculate_multiple_classification_training_errors() const.\n"
894 <<
"Number of training samples is zero.\n";
896 throw logic_error(buffer.str());
909 Tensor<type, 1> errors(5);
913 const Tensor<type, 0> sum_squared_error = (outputs-targets).square().sum().sqrt();
915 errors(0) = sum_squared_error(0);
916 errors(1) = errors(0)/training_samples_number;
917 errors(2) =
sqrt(errors(1));
924Tensor<type, 1> TestingAnalysis::calculate_selection_errors()
const
938 ostringstream buffer;
940 if(selection_samples_number == 0)
942 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
943 <<
"Tensor<type, 1> calculate_selection_errors() const.\n"
944 <<
"Number of selection samples is zero.\n";
946 throw logic_error(buffer.str());
959 Tensor<type, 1> errors(4);
963 const Tensor<type, 0> sum_squared_error = (outputs-targets).square().sum().sqrt();
965 errors(0) = sum_squared_error(0);
966 errors(1) = errors(0)/selection_samples_number;
967 errors(2) =
sqrt(errors(1));
974Tensor<type, 1> TestingAnalysis::calculate_binary_classification_selection_errors()
const
988 ostringstream buffer;
990 if(selection_samples_number == 0)
992 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
993 <<
"Tensor<type, 1> calculate_binary_classification_selection_errors() const.\n"
994 <<
"Number of selection samples is zero.\n";
996 throw logic_error(buffer.str());
1009 Tensor<type, 1> errors(6);
1013 const Tensor<type, 0> sum_squared_error = (outputs-targets).square().sum().sqrt();
1015 errors(0) = sum_squared_error(0);
1016 errors(1) = errors(0)/selection_samples_number;
1017 errors(2) =
sqrt(errors(1));
1026Tensor<type, 1> TestingAnalysis::calculate_multiple_classification_selection_errors()
const
1040 ostringstream buffer;
1042 if(selection_samples_number == 0)
1044 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
1045 <<
"Tensor<type, 1> calculate_multiple_classification_selection_errors() const.\n"
1046 <<
"Number of selection samples is zero.\n";
1048 throw logic_error(buffer.str());
1061 Tensor<type, 1> errors(5);
1065 const Tensor<type, 0> sum_squared_error = (outputs-targets).square().sum().sqrt();
1067 errors(0) = sum_squared_error(0);
1068 errors(1) = errors(0)/selection_samples_number;
1069 errors(2) =
sqrt(errors(1));
1100 ostringstream buffer;
1102 if(testing_samples_number == 0)
1104 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
1105 <<
"Tensor<Tensor<type, 2>, 1> calculate_testing_errors() const.\n"
1106 <<
"Number of testing samples is zero.\n";
1108 throw logic_error(buffer.str());
1121 Tensor<type, 1> errors(4);
1125 const Tensor<type, 0> sum_squared_error = (outputs-targets).square().sum().sqrt();
1127 errors(0) = sum_squared_error(0);
1128 errors(1) = errors(0)/testing_samples_number;
1129 errors(2) = sqrt(errors(1));
1161 ostringstream buffer;
1163 if(testing_samples_number == 0)
1165 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
1166 <<
"Tensor<type, 1> calculate_binary_classification_testing_errors() const.\n"
1167 <<
"Number of testing samples is zero.\n";
1169 throw logic_error(buffer.str());
1182 Tensor<type, 1> errors(6);
1186 const Tensor<type, 0> sum_squared_error = (outputs-targets).square().sum().sqrt();
1188 errors(0) = sum_squared_error(0);
1189 errors(1) = errors(0)/testing_samples_number;
1190 errors(2) = sqrt(errors(1));
1223 ostringstream buffer;
1225 if(testing_samples_number == 0)
1227 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
1228 <<
"Tensor<type, 1> calculate_multiple_classification_testing_errors() const.\n"
1229 <<
"Number of testing samples is zero.\n";
1231 throw logic_error(buffer.str());
1244 Tensor<type, 1> errors(4);
1248 const Tensor<type, 0> sum_squared_error = (outputs-targets).square().sum().sqrt();
1250 errors(0) = sum_squared_error(0);
1251 errors(1) = errors(0)/testing_samples_number;
1252 errors(2) = sqrt(errors(1));
1265 const Index samples_number = targets.dimension(0);
1267 const Tensor<type, 1> targets_mean = mean(targets);
1269 Tensor<type, 0> sum_squared_error = (outputs - targets).square().sum();
1271 type normalization_coefficient = type(0);
1273#pragma omp parallel for reduction(+: normalization_coefficient)
1275 for(Index i = 0; i < samples_number; i++)
1277 const Tensor<type, 0> norm_1 = (targets.chip(i,0) - targets_mean).square().sum();
1279 normalization_coefficient += norm_1(0);
1282 return sum_squared_error()/normalization_coefficient;
1292 const Tensor<type, 2>& outputs)
const
1294 const Index testing_samples_number = targets.dimension(0);
1295 const Index outputs_number = targets.dimension(1);
1297 Tensor<type, 1> targets_row(outputs_number);
1298 Tensor<type, 1> outputs_row(outputs_number);
1300 type cross_entropy_error = type(0);
1302#pragma omp parallel for reduction(+:cross_entropy_error)
1304 for(Index i = 0; i < testing_samples_number; i++)
1306 outputs_row = outputs.chip(i, 0);
1307 targets_row = targets.chip(i, 0);
1309 for(Index j = 0; j < outputs_number; j++)
1311 if(outputs_row(j) < type(NUMERIC_LIMITS_MIN))
1313 outputs_row(j) =
static_cast<type
>(1.0e-6);
1315 else if(
static_cast<double>(outputs_row(j)) == 1.0)
1317 outputs_row(j) = numeric_limits<type>::max();
1320 cross_entropy_error -=
1321 targets_row(j)*
log(outputs_row(j)) + (
static_cast<type
>(1) - targets_row(j))*
log(
static_cast<type
>(1) - outputs_row(j));
1325 return cross_entropy_error/
static_cast<type
>(testing_samples_number);
1336 const Tensor<type, 2>& outputs,
1337 const Tensor<type, 1>& weights)
const
1341 const Index outputs_number = outputs.dimension(1);
1343 ostringstream buffer;
1345 if(outputs_number != 1)
1347 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
1348 <<
"type calculate_testing_weighted_squared_error(const Tensor<type, 2>&, const Tensor<type, 2>&, const Tensor<type, 1>&) const.\n"
1349 <<
"Number of outputs must be one.\n";
1351 throw logic_error(buffer.str());
1356 type negatives_weight;
1357 type positives_weight;
1359 if(weights.size() != 2)
1363 const Index negatives_number = target_distribution[0];
1364 const Index positives_number = target_distribution[1];
1366 negatives_weight = type(1);
1367 positives_weight =
static_cast<type
>(negatives_number/positives_number);
1371 positives_weight = weights[0];
1372 negatives_weight = weights[1];
1375 const Tensor<bool, 2> if_sentence = targets == targets.constant(type(1));
1376 const Tensor<bool, 2> else_sentence = targets == targets.constant(type(0));
1378 Tensor<type, 2> f_1(targets.dimension(0), targets.dimension(1));
1380 Tensor<type, 2> f_2(targets.dimension(0), targets.dimension(1));
1382 Tensor<type, 2> f_3(targets.dimension(0), targets.dimension(1));
1384 f_1 = (targets - outputs).square() * positives_weight;
1386 f_2 = (targets - outputs).square()*negatives_weight;
1388 f_3 = targets.constant(type(0));
1390 Tensor<type, 0> sum_squared_error = (if_sentence.select(f_1, else_sentence.select(f_2, f_3))).sum();
1392 Index negatives = 0;
1394 Tensor<type, 1> target_column = targets.chip(0,1);
1396 for(Index i = 0; i < target_column.size(); i++)
1398 if(
static_cast<double>(target_column(i)) == 0.0) negatives++;
1401 const type normalization_coefficient = type(negatives)*negatives_weight*
static_cast<type
>(0.5);
1403 return sum_squared_error(0)/normalization_coefficient;
1407type TestingAnalysis::calculate_Minkowski_error(
const Tensor<type, 2>& targets,
const Tensor<type, 2>& outputs,
const type minkowski_parameter)
const
1409 Tensor<type, 0> Minkoski_error = (outputs - targets).
abs().
pow(minkowski_parameter).sum().
pow(
static_cast<type
>(1.0)/minkowski_parameter);
1411 return Minkoski_error();
1421 const Tensor<type, 2>& outputs,
1422 const type& decision_threshold)
const
1424 const Index testing_samples_number = targets.dimension(0);
1426 Tensor<Index, 2> confusion(2, 2);
1428 Index true_positive = 0;
1429 Index false_negative = 0;
1430 Index false_positive = 0;
1431 Index true_negative = 0;
1433 type target = type(0);
1434 type output = type(0);
1436 for(Index i = 0; i < testing_samples_number; i++)
1438 target = targets(i,0);
1439 output = outputs(i,0);
1441 if(target > decision_threshold && output > decision_threshold)
1445 else if(target >= decision_threshold && output < decision_threshold)
1449 else if(target <= decision_threshold && output > decision_threshold)
1453 else if(target < decision_threshold && output < decision_threshold)
1459 ostringstream buffer;
1461 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
1462 <<
"Tensor<Index, 2> calculate_confusion_binary_classification(const Tensor<type, 2>&, const Tensor<type, 2>&, const type&) const method.\n"
1463 <<
"Unknown case.\n";
1465 throw logic_error(buffer.str());
1469 confusion(0,0) = true_positive;
1470 confusion(0,1) = false_negative;
1471 confusion(1,0) = false_positive;
1472 confusion(1,1) = true_negative;
1474 const Index confusion_sum = true_positive + false_negative + false_positive + true_negative;
1476 if(confusion_sum != testing_samples_number)
1478 ostringstream buffer;
1480 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
1481 <<
"Tensor<Index, 2> calculate_confusion_binary_classification(const Tensor<type, 2>&, const Tensor<type, 2>&, const type&) const method.\n"
1482 <<
"Number of elements in confusion matrix (" << confusion_sum <<
") must be equal to number of testing samples (" << testing_samples_number <<
").\n";
1484 throw logic_error(buffer.str());
1497 const Index samples_number = targets.dimension(0);
1498 const Index targets_number = targets.dimension(1);
1500 if(targets_number != outputs.dimension(1))
1502 ostringstream buffer;
1504 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
1505 <<
"Tensor<Index, 2> calculate_confusion_multiple_classification(const Tensor<type, 2>&, const Tensor<type, 2>&) const method.\n"
1506 <<
"Number of targets (" << targets_number <<
") must be equal to number of outputs (" << outputs.dimension(1) <<
").\n";
1508 throw logic_error(buffer.str());
1511 Tensor<Index, 2> confusion(targets_number, targets_number);
1512 confusion.setZero();
1514 Index target_index = 0;
1515 Index output_index = 0;
1517 for(Index i = 0; i < samples_number; i++)
1519 target_index = maximal_index(targets.chip(i, 0));
1520 output_index = maximal_index(outputs.chip(i, 0));
1522 confusion(target_index,output_index)++;
1540 Tensor<Index, 1> positives_negatives_rate(2);
1542 positives_negatives_rate[0] = confusion(0,0) + confusion(0,1);
1543 positives_negatives_rate[1] = confusion(1,0) + confusion(1,1);
1545 return positives_negatives_rate;
1563 ostringstream buffer;
1565 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
1566 <<
"Tensor<Index, 2> calculate_confusion() const method.\n"
1567 <<
"Pointer to neural network in neural network is nullptr.\n";
1569 throw logic_error(buffer.str());
1578 ostringstream buffer;
1580 buffer <<
"OpenNN Exception: TestingAnalysis class." << endl
1581 <<
"Tensor<Index, 2> calculate_confusion() const method." << endl
1582 <<
"Number of inputs in neural network must be equal to number of inputs in data set." << endl;
1584 throw logic_error(buffer.str());
1589 ostringstream buffer;
1591 buffer <<
"OpenNN Exception: TestingAnalysis class." << endl
1592 <<
"Tensor<Index, 2> calculate_confusion() const method." << endl
1593 <<
"Number of outputs in neural network must be equal to number of targets in data set." << endl;
1595 throw logic_error(buffer.str());
1606 if(outputs_number == 1)
1608 type decision_threshold;
1616 decision_threshold = type(0.5);
1644 ostringstream buffer;
1646 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
1647 <<
"RocCurveResults perform_roc_analysis() const method.\n"
1648 <<
"Pointer to neural network in neural network is nullptr.\n";
1650 throw logic_error(buffer.str());
1659 ostringstream buffer;
1661 buffer <<
"OpenNN Exception: TestingAnalysis class." << endl
1662 <<
"RocCurveResults perform_roc_analysis() const method." << endl
1663 <<
"Number of inputs in neural network must be equal to number of inputs in data set." << endl;
1665 throw logic_error(buffer.str());
1672 ostringstream buffer;
1674 buffer <<
"OpenNN Exception: TestingAnalysis class." << endl
1675 <<
"RocCurveResults perform_roc_analysis() const method." << endl
1676 <<
"Number of outputs in neural network must be equal to number of targets in data set." << endl;
1678 throw logic_error(buffer.str());
1691 cout <<
"Calculating ROC curve..." << endl;
1695 cout <<
"Calculating area under curve..." << endl;
1699 cout <<
"Calculating confidence limits..." << endl;
1703 cout <<
"Calculating optimal threshold..." << endl;
1707 return roc_analysis_results;
1744 const Index total_positives = positives_negatives_rate(0);
1745 const Index total_negatives = positives_negatives_rate(1);
1747 if(total_positives == 0)
1749 ostringstream buffer;
1751 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
1752 <<
"Tensor<type, 2> calculate_roc_curve(const Tensor<type, 2>&, const Tensor<type, 2>&) const.\n"
1753 <<
"Number of positive samples ("<< total_positives <<
") must be greater than zero.\n";
1755 throw logic_error(buffer.str());
1758 if(total_negatives == 0)
1760 ostringstream buffer;
1762 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
1763 <<
"Tensor<type, 2> calculate_roc_curve(const Tensor<type, 2>&, const Tensor<type, 2>&) const.\n"
1764 <<
"Number of negative samples ("<< total_negatives <<
") must be greater than zero.\n";
1766 throw logic_error(buffer.str());
1769 const Index maximum_points_number = 501;
1773 const Index testing_samples_number = targets.dimension(0);
1774 Index points_number;
1776 if(testing_samples_number > maximum_points_number)
1778 step_size =
static_cast<Index
>(
static_cast<type
>(testing_samples_number)/
static_cast<type
>(maximum_points_number));
1779 points_number =
static_cast<Index
>(
static_cast<type
>(testing_samples_number)/
static_cast<type
>(step_size));
1783 points_number = testing_samples_number;
1787 if(targets.dimension(1) != 1)
1789 ostringstream buffer;
1791 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
1792 <<
"Tensor<type, 2> calculate_roc_curve(const Tensor<type, 2>&, const Tensor<type, 2>&) const.\n"
1793 <<
"Number of of target variables ("<< targets.dimension(1) <<
") must be one.\n";
1795 throw logic_error(buffer.str());
1798 if(outputs.dimension(1) != 1)
1800 ostringstream buffer;
1802 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
1803 <<
"Tensor<type, 2> calculate_roc_curve(const Tensor<type, 2>&, const Tensor<type, 2>&) const.\n"
1804 <<
"Number of of output variables ("<< targets.dimension(1) <<
") must be one.\n";
1806 throw logic_error(buffer.str());
1811 Tensor<Index, 1> sorted_indices(outputs.dimension(0));
1812 iota(sorted_indices.data(), sorted_indices.data() + sorted_indices.size(), 0);
1814 stable_sort(sorted_indices.data(), sorted_indices.data()+sorted_indices.size(), [outputs](Index i1, Index i2) {return outputs(i1,0) < outputs(i2,0);});
1816 Tensor<type, 1> sorted_targets(testing_samples_number);
1817 Tensor<type, 1> sorted_outputs(testing_samples_number);
1819 for(Index i = 0; i < testing_samples_number; i++)
1821 sorted_targets(i) = targets(sorted_indices(i),0);
1822 sorted_outputs(i) = outputs(sorted_indices(i),0);
1825 Tensor<type, 2> roc_curve(points_number+1, 3);
1826 roc_curve.setZero();
1828 #pragma omp parallel for schedule(dynamic)
1830 for(Index i = 0; i < static_cast<Index>(points_number); i++)
1832 Index positives = 0;
1833 Index negatives = 0;
1835 const Index current_index = i*step_size;
1837 const type threshold = sorted_outputs(current_index);
1839 for(Index j = 0; j < static_cast<Index>(current_index); j++)
1841 if(sorted_outputs(j) < threshold &&
static_cast<double>(sorted_targets(j)) == 1.0)
1845 if(sorted_outputs(j) < threshold && sorted_targets(j) < type(NUMERIC_LIMITS_MIN))
1851 roc_curve(i,0) =
static_cast<type
>(positives)/
static_cast<type
>(total_positives);
1852 roc_curve(i,1) =
static_cast<type
>(negatives)/
static_cast<type
>(total_negatives);
1853 roc_curve(i,2) =
static_cast<type
>(threshold);
1856 roc_curve(points_number, 0) = type(1);
1857 roc_curve(points_number, 1) = type(1);
1858 roc_curve(points_number, 2) = type(1);
1872 const Index total_positives = positives_negatives_rate[0];
1873 const Index total_negatives = positives_negatives_rate[1];
1875 if(total_positives == 0)
1877 ostringstream buffer;
1879 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
1880 <<
"Tensor<type, 2> calculate_roc_curve(const Tensor<type, 2>&, const Tensor<type, 2>&) const.\n"
1881 <<
"Number of positive samples("<< total_positives <<
") must be greater than zero.\n";
1883 throw logic_error(buffer.str());
1886 if(total_negatives == 0)
1888 ostringstream buffer;
1890 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
1891 <<
"Tensor<type, 2> calculate_roc_curve(const Tensor<type, 2>&, const Tensor<type, 2>&) const.\n"
1892 <<
"Number of negative samples("<< total_negatives <<
") must be greater than zero.\n";
1894 throw logic_error(buffer.str());
1897 Index testing_samples_number = targets.dimension(0);
1901 type area_under_curve;
1903 #pragma omp parallel for reduction(+ : sum) schedule(dynamic)
1905 for(Index i = 0; i < testing_samples_number; i++)
1907 if(
abs(targets(i,0) -
static_cast<type
>(1.0)) < type(NUMERIC_LIMITS_MIN))
1909 for(Index j = 0; j < testing_samples_number; j++)
1911 if(
abs(targets(j,0)) < type(NUMERIC_LIMITS_MIN))
1919 area_under_curve =
static_cast<type
>(sum)/
static_cast<type
>(total_positives*total_negatives);
1921 return area_under_curve;
1930 type area_under_curve = type(0);
1932 for(Index i = 1; i < roc_curve.dimension(0); i++)
1934 area_under_curve += (roc_curve(i,0)-roc_curve(i-1,0))*(roc_curve(i,1)+roc_curve(i-1,1));
1937 return area_under_curve/ type(2);
1949 const Index total_positives = positives_negatives_rate[0];
1950 const Index total_negatives = positives_negatives_rate[1];
1952 if(total_positives == 0)
1954 ostringstream buffer;
1956 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
1957 <<
"Tensor<type, 2> calculate_roc_curve_confidence_limit(const Tensor<type, 2>&, const Tensor<type, 2>&) const.\n"
1958 <<
"Number of positive samples("<< total_positives <<
") must be greater than zero.\n";
1960 throw logic_error(buffer.str());
1963 if(total_negatives == 0)
1965 ostringstream buffer;
1967 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
1968 <<
"Tensor<type, 2> calculate_roc_curve_confidence_limit(const Tensor<type, 2>&, const Tensor<type, 2>&) const.\n"
1969 <<
"Number of negative samples("<< total_negatives <<
") must be greater than zero.\n";
1971 throw logic_error(buffer.str());
1976 const type Q_1 = area_under_curve/(
static_cast<type
>(2.0) - area_under_curve);
1977 const type Q_2 = (
static_cast<type
>(2.0) *area_under_curve*area_under_curve)/(
static_cast<type
>(1.0) *area_under_curve);
1979 const type confidence_limit = type(type(1.64485)*sqrt((area_under_curve*(type(1) - area_under_curve)
1980 + (type(total_positives) - type(1))*(Q_1-area_under_curve*area_under_curve)
1981 + (type(total_negatives) - type(1))*(Q_2-area_under_curve*area_under_curve))/(type(total_positives*total_negatives))));
1983 return confidence_limit;
1996 const Index total_positives = positives_negatives_rate[0];
1997 const Index total_negatives = positives_negatives_rate[1];
1999 if(total_positives == 0)
2001 ostringstream buffer;
2003 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
2004 <<
"calculate_area_under_curve_confidence_limit(const Tensor<type, 2>&, const Tensor<type, 2>&, const type&) const.\n"
2005 <<
"Number of positive samples("<< total_positives <<
") must be greater than zero.\n";
2007 throw logic_error(buffer.str());
2010 if(total_negatives == 0)
2012 ostringstream buffer;
2014 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
2015 <<
"calculate_area_under_curve_confidence_limit(const Tensor<type, 2>&, const Tensor<type, 2>&, const type&) const.\n"
2016 <<
"Number of negative samples("<< total_negatives <<
") must be greater than zero.\n";
2018 throw logic_error(buffer.str());
2021 const type Q_1 = area_under_curve/(
static_cast<type
>(2.0) -area_under_curve);
2022 const type Q_2 = (
static_cast<type
>(2.0) *area_under_curve*area_under_curve)/(
static_cast<type
>(1.0) *area_under_curve);
2024 const type confidence_limit =
static_cast<type
>(1.64485) *sqrt((area_under_curve*(
static_cast<type
>(1.0) - area_under_curve)
2025 + (type(total_positives)-
static_cast<type
>(1.0))*(Q_1-area_under_curve*area_under_curve)
2026 + (type(total_negatives)-
static_cast<type
>(1.0))*(Q_2-area_under_curve*area_under_curve))/(type(total_positives*total_negatives)));
2028 return confidence_limit;
2038 const Index maximum_points_number = 1000;
2042 const Index testing_samples_number = targets.dimension(0);
2043 Index points_number;
2045 if(testing_samples_number > maximum_points_number)
2047 step_size = testing_samples_number/maximum_points_number;
2048 points_number = testing_samples_number/step_size;
2052 points_number = testing_samples_number;
2056 Tensor<type, 2> targets_outputs(targets.dimension(0), targets.dimension(1)+outputs.dimension(1));
2058 for(Index i = 0; i < targets.dimension(1)+outputs.dimension(1); i++)
2060 for(Index j = 0; j < targets.dimension(0); j++)
2062 if(i < targets.dimension(1)) targets_outputs(j,i) = targets(j,i);
2063 else targets_outputs(j,i) = outputs(j,i);
2069 sort(targets_outputs.data(), targets_outputs.data()+targets.size(), less<type>());
2071 const TensorMap< Tensor<type, 2> > sorted_targets(targets_outputs.data(), targets.dimension(0), targets.dimension(1));
2072 const TensorMap< Tensor<type, 2> > sorted_outputs(targets_outputs.data()+targets.size(), outputs.dimension(0), outputs.dimension(1));
2076 type threshold = type(0);
2077 type optimal_threshold = type(0.5);
2079 type minimun_distance = numeric_limits<type>::max();
2082 Index current_index;
2084 for(Index i = 0; i < points_number; i++)
2086 current_index = i*step_size;
2088 threshold = sorted_outputs(current_index, 0);
2090 distance =
static_cast<type
>(sqrt(roc_curve(i,0)*roc_curve(i,0) + (roc_curve(i,1) - type(1))*(roc_curve(i,1) - type(1))));
2092 if(distance < minimun_distance)
2094 optimal_threshold = threshold;
2096 minimun_distance = distance;
2100 return optimal_threshold;
2111 const Index points_number = roc_curve.dimension(0);
2113 type optimal_threshold = type(0.5);
2115 type minimun_distance = numeric_limits<type>::max();
2118 for(Index i = 0; i < points_number; i++)
2120 distance = sqrt(roc_curve(i,0)*roc_curve(i,0) + (roc_curve(i,1) -
static_cast<type
>(1))*(roc_curve(i,1) -
static_cast<type
>(1)));
2122 if(distance < minimun_distance)
2124 optimal_threshold = roc_curve(i,2);
2126 minimun_distance = distance;
2130 return optimal_threshold;
2149 ostringstream buffer;
2151 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
2152 <<
"Tensor<type, 2> perform_cumulative_gain_analysis() const method.\n"
2153 <<
"Pointer to neural network in neural network is nullptr.\n";
2155 throw logic_error(buffer.str());
2168 ostringstream buffer;
2170 buffer <<
"OpenNN Exception: TestingAnalysis class." << endl
2171 <<
"Tensor<type, 2> perform_cumulative_gain_analysis() const method." << endl
2172 <<
"Number of inputs in neural network must be equal to number of inputs in data set." << endl;
2174 throw logic_error(buffer.str());
2181 ostringstream buffer;
2183 buffer <<
"OpenNN Exception: TestingAnalysis class." << endl
2184 <<
"Tensor<type, 2> perform_cumulative_gain_analysis() const method." << endl
2185 <<
"Number of outputs in neural network must be equal to number of targets in data set." << endl;
2187 throw logic_error(buffer.str());
2199 return cumulative_gain;
2212 if(total_positives == 0)
2214 ostringstream buffer;
2216 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
2217 <<
"Tensor<type, 2> calculate_cumulative_gain(const Tensor<type, 2>&, const Tensor<type, 2>&) const.\n"
2218 <<
"Number of positive samples(" << total_positives <<
") must be greater than zero.\n";
2220 throw logic_error(buffer.str());
2223 const Index testing_samples_number = targets.dimension(0);
2227 Tensor<Index, 1> sorted_indices(outputs.dimension(0));
2228 iota(sorted_indices.data(), sorted_indices.data() + sorted_indices.size(), 0);
2230 stable_sort(sorted_indices.data(),
2231 sorted_indices.data()+sorted_indices.size(),
2232 [outputs](Index i1, Index i2) {return outputs(i1,0) > outputs(i2,0);});
2234 Tensor<type, 1> sorted_targets(testing_samples_number);
2236 for(Index i = 0; i < testing_samples_number; i++)
2238 sorted_targets(i) = targets(sorted_indices(i),0);
2241 const Index points_number = 21;
2242 const type percentage_increment =
static_cast<type
>(0.05);
2244 Tensor<type, 2> cumulative_gain(points_number, 2);
2246 cumulative_gain(0,0) = type(0);
2247 cumulative_gain(0,1) = type(0);
2249 Index positives = 0;
2251 type percentage = type(0);
2253 Index maximum_index;
2255 for(Index i = 0; i < points_number - 1; i++)
2257 percentage += percentage_increment;
2261 maximum_index =
static_cast<Index
>(percentage* type(testing_samples_number));
2263 for(Index j = 0; j < maximum_index; j++)
2265 if(
static_cast<double>(sorted_targets(j)) == 1.0)
2271 cumulative_gain(i + 1, 0) = percentage;
2272 cumulative_gain(i + 1, 1) =
static_cast<type
>(positives)/
static_cast<type
>(total_positives);
2275 return cumulative_gain;
2288 if(total_negatives == 0)
2290 ostringstream buffer;
2292 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
2293 <<
"Tensor<type, 2> calculate_negative_cumulative_gain(const Tensor<type, 2>&, const Tensor<type, 2>&) const.\n"
2294 <<
"Number of negative samples(" << total_negatives <<
") must be greater than zero.\n";
2296 throw logic_error(buffer.str());
2299 const Index testing_samples_number = targets.dimension(0);
2303 Tensor<Index, 1> sorted_indices(outputs.dimension(0));
2304 iota(sorted_indices.data(), sorted_indices.data() + sorted_indices.size(), 0);
2306 stable_sort(sorted_indices.data(), sorted_indices.data()+sorted_indices.size(), [outputs](Index i1, Index i2) {return outputs(i1,0) > outputs(i2,0);});
2308 Tensor<type, 1> sorted_targets(testing_samples_number);
2310 for(Index i = 0; i < testing_samples_number; i++)
2312 sorted_targets(i) = targets(sorted_indices(i),0);
2315 const Index points_number = 21;
2316 const type percentage_increment =
static_cast<type
>(0.05);
2318 Tensor<type, 2> negative_cumulative_gain(points_number, 2);
2320 negative_cumulative_gain(0,0) = type(0);
2321 negative_cumulative_gain(0,1) = type(0);
2323 Index negatives = 0;
2325 type percentage = type(0);
2327 Index maximum_index;
2329 for(Index i = 0; i < points_number - 1; i++)
2331 percentage += percentage_increment;
2335 maximum_index =
static_cast<Index
>(percentage* type(testing_samples_number));
2337 for(Index j = 0; j < maximum_index; j++)
2339 if(sorted_targets(j) < type(NUMERIC_LIMITS_MIN))
2345 negative_cumulative_gain(i + 1, 0) = percentage;
2347 negative_cumulative_gain(i + 1, 1) =
static_cast<type
>(negatives)/
static_cast<type
>(total_negatives);
2350 return negative_cumulative_gain;
2365 ostringstream buffer;
2367 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
2368 <<
"Tensor<type, 2> perform_lift_chart_analysis() const method.\n"
2369 <<
"Pointer to neural network in neural network is nullptr.\n";
2371 throw logic_error(buffer.str());
2380 ostringstream buffer;
2382 buffer <<
"OpenNN Exception: TestingAnalysis class." << endl
2383 <<
"Tensor<type, 2> perform_lift_chart_analysis() const method." << endl
2384 <<
"Number of inputs in neural network must be equal to number of inputs in data set." << endl;
2386 throw logic_error(buffer.str());
2393 ostringstream buffer;
2395 buffer <<
"OpenNN Exception: TestingAnalysis class." << endl
2396 <<
"Tensor<type, 2> perform_lift_chart_analysis() const method." << endl
2397 <<
"Number of outputs in neural network must be equal to number of targets in data set." << endl;
2399 throw logic_error(buffer.str());
2422 const Index rows_number = cumulative_gain.dimension(0);
2423 const Index columns_number = cumulative_gain.dimension(1);
2425 Tensor<type, 2> lift_chart(rows_number, columns_number);
2427 lift_chart(0,0) = type(0);
2428 lift_chart(0,1) = type(1);
2430 #pragma omp parallel for
2432 for(Index i = 1; i < rows_number; i++)
2434 lift_chart(i, 0) =
static_cast<type
>(cumulative_gain(i, 0));
2435 lift_chart(i, 1) =
static_cast<type
>(cumulative_gain(i, 1))/
static_cast<type
>(cumulative_gain(i, 0));
2458 ostringstream buffer;
2460 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
2461 <<
"Tensor<type, 2> perform_Kolmogorov_Smirnov_analysis() const method.\n"
2462 <<
"Pointer to neural network in neural network is nullptr.\n";
2464 throw logic_error(buffer.str());
2477 ostringstream buffer;
2479 buffer <<
"OpenNN Exception: TestingAnalysis class." << endl
2480 <<
"Tensor<type, 2> perform_Kolmogorov_Smirnov_analysis() const method." << endl
2481 <<
"Number of inputs in neural network must be equal to number of inputs in data set." << endl;
2483 throw logic_error(buffer.str());
2490 ostringstream buffer;
2492 buffer <<
"OpenNN Exception: TestingAnalysis class." << endl
2493 <<
"Tensor<type, 2> perform_Kolmogorov_Smirnov_analysis() const method." << endl
2494 <<
"Number of outputs in neural network must be equal to number of targets in data set." << endl;
2496 throw logic_error(buffer.str());
2513 return Kolmogorov_Smirnov_results;
2524 const Index points_number = positive_cumulative_gain.dimension(0);
2528 if(points_number != negative_cumulative_gain.dimension(0))
2530 ostringstream buffer;
2532 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
2533 <<
"Tensor<type, 2> calculate_maximum_gain() const method.\n"
2534 <<
"Positive and negative cumulative gain matrix must have the same rows number.\n";
2536 throw logic_error(buffer.str());
2541 Tensor<type, 1> maximum_gain(2);
2543 const type percentage_increment =
static_cast<type
>(0.05);
2545 type percentage = type(0);
2547 for(Index i = 0; i < points_number - 1; i++)
2549 percentage += percentage_increment;
2551 if(positive_cumulative_gain(i+1,1)-negative_cumulative_gain(i+1,1) > maximum_gain[1]
2552 && positive_cumulative_gain(i+1,1)-negative_cumulative_gain(i+1,1) >
static_cast<type
>(0.0))
2554 maximum_gain(1) = positive_cumulative_gain(i+1,1)-negative_cumulative_gain(i+1,1);
2555 maximum_gain(0) = percentage;
2559 return maximum_gain;
2573 ostringstream buffer;
2575 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
2576 <<
"Tensor<type, 2> perform_calibration_plot_analysis() const method.\n"
2577 <<
"Pointer to neural network in neural network is nullptr.\n";
2579 throw logic_error(buffer.str());
2588 ostringstream buffer;
2590 buffer <<
"OpenNN Exception: TestingAnalysis class." << endl
2591 <<
"Tensor<type, 2> perform_calibration_plot_analysis() const method." << endl
2592 <<
"Number of inputs in neural network must be equal to number of inputs in data set." << endl;
2594 throw logic_error(buffer.str());
2601 ostringstream buffer;
2603 buffer <<
"OpenNN Exception: TestingAnalysis class." << endl
2604 <<
"Tensor<type, 2> perform_calibration_plot_analysis() const method." << endl
2605 <<
"Number of outputs in neural network must be equal to number of targets in data set." << endl;
2607 throw logic_error(buffer.str());
2619 return calibration_plot;
2631 const Index rows_number = targets.dimension(0);
2633 const Index points_number = 10;
2635 Tensor<type, 2> calibration_plot(points_number+2, 2);
2639 calibration_plot(0,0) = type(0);
2640 calibration_plot(0,1) = type(0);
2642 Index positives = 0;
2646 type probability = type(0);
2650 for(Index i = 1; i < points_number+1; i++)
2657 probability +=
static_cast<type
>(0.1);
2659 for(Index j = 0; j < rows_number; j++)
2661 if(outputs(j, 0) >= (probability -
static_cast<type
>(0.1)) && outputs(j, 0) < probability)
2665 sum += outputs(j, 0);
2667 if(
static_cast<Index
>(targets(j, 0)) == 1)
2676 calibration_plot(i, 0) = type(-1);
2677 calibration_plot(i, 1) = type(-1);
2681 calibration_plot(i, 0) = sum/
static_cast<type
>(count);
2682 calibration_plot(i, 1) =
static_cast<type
>(positives)/
static_cast<type
>(count);
2688 calibration_plot(points_number+1,0) = type(1);
2689 calibration_plot(points_number+1,1) = type(1);
2693 Index points_number_subtracted = 0;
2695 while(contains(calibration_plot.chip(0,1), type(-1)))
2697 for(Index i = 1; i < points_number - points_number_subtracted+1; i++)
2699 if(
abs(calibration_plot(i, 0) + type(1)) < type(NUMERIC_LIMITS_MIN))
2701 calibration_plot = delete_row(calibration_plot, i);
2703 points_number_subtracted++;
2708 return calibration_plot;
2718 Tensor<Histogram, 1> output_histogram(1);
2720 Tensor<type, 1> output_column = outputs.chip(0,1);
2722 output_histogram (0) = histogram(output_column, bins_number);
2724 return output_histogram;
2744 ostringstream buffer;
2746 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
2747 <<
"BinaryClassificationRates calculate_binary_classification_rates() const method.\n"
2748 <<
"Pointer to neural network in neural network is nullptr.\n";
2750 throw logic_error(buffer.str());
2759 ostringstream buffer;
2761 buffer <<
"OpenNN Exception: TestingAnalysis class." << endl
2762 <<
"BinaryClassificationRates calculate_binary_classification_rates() const method." << endl
2763 <<
"Number of inputs in neural network must be equal to number of inputs in data set." << endl;
2765 throw logic_error(buffer.str());
2772 ostringstream buffer;
2774 buffer <<
"OpenNN Exception: TestingAnalysis class." << endl
2775 <<
"BinaryClassificationRates calculate_binary_classification_rates() const method." << endl
2776 <<
"Number of outputs in neural network must be equal to number of targets in data set." << endl;
2778 throw logic_error(buffer.str());
2790 type decision_threshold;
2798 decision_threshold = type(0.5);
2808 return binary_classification_rates;
2820 const Tensor<Index, 1>& testing_indices,
const type& decision_threshold)
const
2822 const Index rows_number = targets.dimension(0);
2824 Tensor<Index, 1> true_positives_indices_copy(rows_number);
2828 for(Index i = 0; i < rows_number; i++)
2830 Tensor<Index, 1> copy;
2831 if(targets(i,0) >= decision_threshold && outputs(i,0) >= decision_threshold)
2833 true_positives_indices_copy(index) = testing_indices(i);
2838 Tensor<Index, 1> true_positives_indices(index);
2840 memcpy(true_positives_indices.data(), true_positives_indices_copy.data(),
static_cast<size_t>(index)*
sizeof(Index));
2842 return true_positives_indices;
2854 const Tensor<Index, 1>& testing_indices,
const type& decision_threshold)
const
2856 const Index rows_number = targets.dimension(0);
2858 Tensor<Index, 1> false_positives_indices_copy(rows_number);
2862 for(Index i = 0; i < rows_number; i++)
2864 if(targets(i,0) < decision_threshold && outputs(i,0) >= decision_threshold)
2866 false_positives_indices_copy(index) = testing_indices(i);
2871 Tensor<Index, 1> false_positives_indices(index);
2873 memcpy(false_positives_indices.data(), false_positives_indices_copy.data(),
static_cast<size_t>(index)*
sizeof(Index));
2875 return false_positives_indices;
2887 const Tensor<Index, 1>& testing_indices,
const type& decision_threshold)
const
2889 const Index rows_number = targets.dimension(0);
2891 Tensor<Index, 1> false_negatives_indices_copy(rows_number);
2895 for(Index i = 0; i < rows_number; i++)
2897 if(targets(i,0) > decision_threshold && outputs(i,0) < decision_threshold)
2899 false_negatives_indices_copy(index) = testing_indices(i);
2904 Tensor<Index, 1> false_negatives_indices(index);
2906 memcpy(false_negatives_indices.data(), false_negatives_indices_copy.data(),
static_cast<size_t>(index)*
sizeof(Index));
2908 return false_negatives_indices;
2920 const Tensor<Index, 1>& testing_indices,
const type& decision_threshold)
const
2922 const Index rows_number = targets.dimension(0);
2924 Tensor<Index, 1> true_negatives_indices_copy(rows_number);
2928 for(Index i = 0; i < rows_number; i++)
2930 if(targets(i,0) < decision_threshold && outputs(i,0) < decision_threshold)
2932 true_negatives_indices_copy(index) = testing_indices(i);
2937 Tensor<Index, 1> true_negatives_indices(index);
2939 memcpy(true_negatives_indices.data(), true_negatives_indices_copy.data(),
static_cast<size_t>(index)*
sizeof(Index));
2941 return true_negatives_indices;
2945Tensor<type, 1> TestingAnalysis::calculate_multiple_classification_tests()
const
2947 Tensor<type, 1> multiple_classification_tests(2);
2956 type diagonal_sum = type(0);
2957 type off_diagonal_sum = type(0);
2958 const Tensor<Index, 0> total_sum = confusion_matrix.sum();
2960 for(Index i = 0; i < confusion_matrix.dimension(0); i++)
2962 for(Index j = 0; j < confusion_matrix.dimension(1); j++)
2964 i == j ? diagonal_sum +=
static_cast<type
>(confusion_matrix(i,j)) : off_diagonal_sum +=
static_cast<type
>(confusion_matrix(i,j));
2968 multiple_classification_tests(0) = diagonal_sum/
static_cast<type
>(total_sum());
2969 multiple_classification_tests(1) = off_diagonal_sum/
static_cast<type
>(total_sum());
2971 return multiple_classification_tests;
2975void TestingAnalysis::save_confusion(
const string& confusion_file_name)
const
2979 const Index columns_number = confusion.dimension(0);
2981 ofstream confusion_file(confusion_file_name);
2985 confusion_file <<
",";
2987 for(Index i = 0; i < confusion.dimension(0); i++)
2989 confusion_file << target_variable_names(i);
2991 if(i != target_variable_names.dimension(0) -1)
2993 confusion_file <<
",";
2997 confusion_file << endl;
2999 for(Index i = 0; i < columns_number; i++)
3001 confusion_file << target_variable_names(i) <<
",";
3003 for(Index j = 0; j < columns_number; j++)
3005 if(j == columns_number - 1)
3007 confusion_file << confusion(i,j) << endl;
3011 confusion_file << confusion(i,j) <<
",";
3015 confusion_file.close();
3019void TestingAnalysis::save_multiple_classification_tests(
const string& classification_tests_file_name)
const
3021 const Tensor<type, 1> multiple_classification_tests = calculate_multiple_classification_tests();
3023 ofstream multiple_classifiaction_tests_file(classification_tests_file_name);
3025 multiple_classifiaction_tests_file <<
"accuracy,error" << endl;
3026 multiple_classifiaction_tests_file << multiple_classification_tests(0)* type(100) <<
"," << multiple_classification_tests(1)* type(100) << endl;
3028 multiple_classifiaction_tests_file.close();
3042 ostringstream buffer;
3044 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
3045 <<
"BinaryClassificationRates calculate_binary_classification_rates() const method.\n"
3046 <<
"Pointer to neural network in neural network is nullptr.\n";
3048 throw logic_error(buffer.str());
3057 ostringstream buffer;
3059 buffer <<
"OpenNN Exception: TestingAnalysis class." << endl
3060 <<
"BinaryClassificationRates calculate_binary_classification_rates() const method." << endl
3061 <<
"Number of inputs in neural network must be equal to number of inputs in data set." << endl;
3063 throw logic_error(buffer.str());
3070 ostringstream buffer;
3072 buffer <<
"OpenNN Exception: TestingAnalysis class." << endl
3073 <<
"BinaryClassificationRates calculate_binary_classification_rates() const method." << endl
3074 <<
"Number of outputs in neural network must be equal to number of targets in data set." << endl;
3076 throw logic_error(buffer.str());
3097 const Tensor<type, 2>& outputs,
3098 const Tensor<Index, 1>& testing_indices)
const
3100 const Index samples_number = targets.dimension(0);
3101 const Index targets_number = targets.dimension(1);
3103 Tensor< Tensor<Index, 1>, 2> multiple_classification_rates(targets_number, targets_number);
3109 for(Index i = 0; i < targets_number; i++)
3111 for(Index j = 0; j < targets_number; j++)
3113 multiple_classification_rates(i,j).resize(confusion(i,j));
3122 Tensor<Index, 2> indices(targets_number, targets_number);
3125 for(Index i = 0; i < samples_number; i++)
3127 target_index = maximal_index(targets.chip(i, 0));
3128 output_index = maximal_index(outputs.chip(i, 0));
3130 multiple_classification_rates(target_index, output_index)(indices(target_index, output_index)) = testing_indices(i);
3132 indices(target_index, output_index)++;
3135 return multiple_classification_rates;
3139Tensor<string, 2> TestingAnalysis::calculate_well_classified_samples(
const Tensor<type, 2>& targets,
3140 const Tensor<type, 2>& outputs,
3141 const Tensor<string, 1>& labels)
3143 const Index samples_number = targets.dimension(0);
3145 Tensor<string, 2> well_lassified_samples(samples_number, 4);
3147 Index predicted_class, actual_class;
3148 Index number_of_well_classified = 0;
3153 for(Index i = 0; i < samples_number; i++)
3155 predicted_class = maximal_index(outputs.chip(i, 0));
3156 actual_class = maximal_index(targets.chip(i, 0));
3158 if(actual_class != predicted_class)
3164 well_lassified_samples(number_of_well_classified, 0) = labels(i);
3165 class_name = target_variables_names(actual_class);
3166 well_lassified_samples(number_of_well_classified, 1) = class_name;
3167 class_name = target_variables_names(predicted_class);
3168 well_lassified_samples(number_of_well_classified, 2) = class_name;
3169 well_lassified_samples(number_of_well_classified, 3) = to_string(
double(outputs(i, predicted_class)));
3171 number_of_well_classified ++;
3175 Eigen::array<Index, 2> offsets = {0, 0};
3176 Eigen::array<Index, 2> extents = {number_of_well_classified, 4};
3178 return well_lassified_samples.slice(offsets, extents);
3182Tensor<string, 2> TestingAnalysis::calculate_misclassified_samples(
const Tensor<type, 2>& targets,
3183 const Tensor<type, 2>& outputs,
3184 const Tensor<string, 1>& labels)
3186 const Index samples_number = targets.dimension(0);
3188 Index predicted_class, actual_class;
3193 Index count_misclassified = 0;
3195 for(Index i = 0; i < samples_number; i++)
3197 predicted_class = maximal_index(outputs.chip(i, 0));
3198 actual_class = maximal_index(targets.chip(i, 0));
3200 if(actual_class != predicted_class) count_misclassified++;
3203 Tensor<string, 2> misclassified_samples(count_misclassified, 4);
3207 for(Index i = 0; i < samples_number; i++)
3209 predicted_class = maximal_index(outputs.chip(i, 0));
3210 actual_class = maximal_index(targets.chip(i, 0));
3212 if(actual_class == predicted_class)
3218 misclassified_samples(j, 0) = labels(i);
3219 class_name = target_variables_names(actual_class);
3220 misclassified_samples(j, 1) = class_name;
3221 class_name = target_variables_names(predicted_class);
3222 misclassified_samples(j, 2) = class_name;
3223 misclassified_samples(j, 3) = to_string(
double(outputs(i, predicted_class)));
3232 return misclassified_samples;
3236void TestingAnalysis::save_well_classified_samples(
const Tensor<type, 2>& targets,
3237 const Tensor<type, 2>& outputs,
3238 const Tensor<string, 1>& labels,
3239 const string& well_classified_samples_file_name)
3241 const Tensor<string,2> well_classified_samples = calculate_well_classified_samples(targets,
3245 ofstream well_classified_samples_file(well_classified_samples_file_name);
3246 well_classified_samples_file <<
"sample_name,actual_class,predicted_class,probability" << endl;
3247 for(Index i = 0; i < well_classified_samples.dimension(0); i++)
3249 well_classified_samples_file << well_classified_samples(i, 0) <<
",";
3250 well_classified_samples_file << well_classified_samples(i, 1) <<
",";
3251 well_classified_samples_file << well_classified_samples(i, 2) <<
",";
3252 well_classified_samples_file << well_classified_samples(i, 3) << endl;
3254 well_classified_samples_file.close();
3258void TestingAnalysis::save_misclassified_samples(
const Tensor<type, 2>& targets,
3259 const Tensor<type, 2>& outputs,
3260 const Tensor<string, 1>& labels,
3261 const string& misclassified_samples_file_name)
3263 const Tensor<string,2> misclassified_samples = calculate_misclassified_samples(targets,
3267 ofstream misclassified_samples_file(misclassified_samples_file_name);
3268 misclassified_samples_file <<
"sample_name,actual_class,predicted_class,probability" << endl;
3269 for(Index i = 0; i < misclassified_samples.dimension(0); i++)
3271 misclassified_samples_file << misclassified_samples(i, 0) <<
",";
3272 misclassified_samples_file << misclassified_samples(i, 1) <<
",";
3273 misclassified_samples_file << misclassified_samples(i, 2) <<
",";
3274 misclassified_samples_file << misclassified_samples(i, 3) << endl;
3276 misclassified_samples_file.close();
3280void TestingAnalysis::save_well_classified_samples_statistics(
const Tensor<type, 2>& targets,
3281 const Tensor<type, 2>& outputs,
3282 const Tensor<string, 1>& labels,
3283 const string& statistics_file_name)
3285 const Tensor<string, 2> well_classified_samples = calculate_well_classified_samples(targets,
3289 Tensor<type, 1> well_classified_numerical_probabilities(well_classified_samples.dimension(0));
3291 for(Index i = 0; i < well_classified_numerical_probabilities.size(); i++)
3293 well_classified_numerical_probabilities(i) = type(::atof(well_classified_samples(i, 3).c_str()));
3296 ofstream classification_statistics_file(statistics_file_name);
3297 classification_statistics_file <<
"minimum,maximum,mean,std" << endl;
3298 classification_statistics_file << well_classified_numerical_probabilities.minimum() <<
",";
3299 classification_statistics_file << well_classified_numerical_probabilities.maximum() <<
",";
3301 classification_statistics_file << well_classified_numerical_probabilities.mean() <<
",";
3303 classification_statistics_file << standard_deviation(well_classified_numerical_probabilities);
3308void TestingAnalysis::save_misclassified_samples_statistics(
const Tensor<type, 2>& targets,
3309 const Tensor<type, 2>& outputs,
3310 const Tensor<string, 1>& labels,
3311 const string& statistics_file_name)
3313 const Tensor<string, 2> misclassified_samples = calculate_misclassified_samples(targets,
3317 Tensor<type, 1> misclassified_numerical_probabilities(misclassified_samples.dimension(0));
3319 for(Index i = 0; i < misclassified_numerical_probabilities.size(); i++)
3321 misclassified_numerical_probabilities(i) = type(::atof(misclassified_samples(i, 3).c_str()));
3323 ofstream classification_statistics_file(statistics_file_name);
3324 classification_statistics_file <<
"minimum,maximum,mean,std" << endl;
3325 classification_statistics_file << misclassified_numerical_probabilities.minimum() <<
",";
3326 classification_statistics_file << misclassified_numerical_probabilities.maximum() <<
",";
3330 classification_statistics_file << standard_deviation(misclassified_numerical_probabilities);
3334void TestingAnalysis::save_well_classified_samples_probability_histogram(
const Tensor<type, 2>& targets,
3335 const Tensor<type, 2>& outputs,
3336 const Tensor<string, 1>& labels,
3337 const string& histogram_file_name)
3339 const Tensor<string, 2> well_classified_samples = calculate_well_classified_samples(targets,
3343 Tensor<type, 1> well_classified_numerical_probabilities(well_classified_samples.dimension(0));
3345 for(Index i = 0; i < well_classified_numerical_probabilities.size(); i++)
3347 well_classified_numerical_probabilities(i) = type(::atof(well_classified_samples(i, 3).c_str()));
3350 Histogram misclassified_samples_histogram(well_classified_numerical_probabilities);
3351 misclassified_samples_histogram.save(histogram_file_name);
3355void TestingAnalysis::save_well_classified_samples_probability_histogram(
const Tensor<string, 2>& well_classified_samples,
3356 const string& histogram_file_name)
3359 Tensor<type, 1> well_classified_numerical_probabilities(well_classified_samples.dimension(0));
3361 for(Index i = 0; i < well_classified_numerical_probabilities.size(); i++)
3363 well_classified_numerical_probabilities(i) = type(::atof(well_classified_samples(i, 3).c_str()));
3366 Histogram misclassified_samples_histogram(well_classified_numerical_probabilities);
3367 misclassified_samples_histogram.save(histogram_file_name);
3371void TestingAnalysis::save_misclassified_samples_probability_histogram(
const Tensor<type, 2>& targets,
3372 const Tensor<type, 2>& outputs,
3373 const Tensor<string, 1>& labels,
3374 const string& histogram_file_name)
3376 const Tensor<string, 2> misclassified_samples = calculate_misclassified_samples(targets,
3380 Tensor<type, 1> misclassified_numerical_probabilities(misclassified_samples.dimension(0));
3382 for(Index i = 0; i < misclassified_numerical_probabilities.size(); i++)
3384 misclassified_numerical_probabilities(i) = type(::atof(misclassified_samples(i, 3).c_str()));
3387 Histogram misclassified_samples_histogram(misclassified_numerical_probabilities);
3388 misclassified_samples_histogram.save(histogram_file_name);
3392void TestingAnalysis::save_misclassified_samples_probability_histogram(
const Tensor<string, 2>& misclassified_samples,
3393 const string& histogram_file_name)
3396 Tensor<type, 1> misclassified_numerical_probabilities(misclassified_samples.dimension(0));
3398 for(Index i = 0; i < misclassified_numerical_probabilities.size(); i++)
3400 misclassified_numerical_probabilities(i) = type(::atof(misclassified_samples(i, 3).c_str()));
3403 Histogram misclassified_samples_histogram(misclassified_numerical_probabilities);
3404 misclassified_samples_histogram.save(histogram_file_name);
3422 ostringstream buffer;
3424 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
3425 <<
"Tensor<type, 1> calculate_error_autocorrelation() const method.\n"
3426 <<
"Pointer to neural network in neural network is nullptr.\n";
3428 throw logic_error(buffer.str());
3437 ostringstream buffer;
3439 buffer <<
"OpenNN Exception: TestingAnalysis class." << endl
3440 <<
"Tensor<type, 1> calculate_error_autocorrelation() const method." << endl
3441 <<
"Number of inputs in neural network must be equal to number of inputs in data set." << endl;
3443 throw logic_error(buffer.str());
3450 ostringstream buffer;
3452 buffer <<
"OpenNN Exception: TestingAnalysis class." << endl
3453 <<
"Tensor<type, 1> calculate_error_autocorrelation() const method." << endl
3454 <<
"Number of outputs in neural network must be equal to number of targets in data set." << endl;
3456 throw logic_error(buffer.str());
3468 const Tensor<type, 2> error = targets - outputs;
3470 Tensor<Tensor<type, 1>, 1> error_autocorrelations(targets_number);
3472 for(Index i = 0; i < targets_number; i++)
3474 error_autocorrelations[i] = autocorrelations(this->thread_pool_device, error.chip(i,1), maximum_lags_number);
3477 return error_autocorrelations;
3495 ostringstream buffer;
3497 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
3498 <<
"Tensor<type, 1> calculate_inputs_errors_cross_correlation() const method.\n"
3499 <<
"Pointer to neural network in neural network is nullptr.\n";
3501 throw logic_error(buffer.str());
3511 ostringstream buffer;
3513 buffer <<
"OpenNN Exception: TestingAnalysis class." << endl
3514 <<
"Tensor<type, 1> calculate_inputs_errors_cross_correlation() const method." << endl
3515 <<
"Number of inputs in neural network must be equal to number of inputs in data set." << endl;
3517 throw logic_error(buffer.str());
3524 ostringstream buffer;
3526 buffer <<
"OpenNN Exception: TestingAnalysis class." << endl
3527 <<
"Tensor<type, 1> calculate_inputs_errors_cross_correlation() const method." << endl
3528 <<
"Number of outputs in neural network must be equal to number of targets in data set." << endl;
3530 throw logic_error(buffer.str());
3543 const Tensor<type, 2> errors = targets - outputs;
3545 Tensor<Tensor<type, 1>, 1> inputs_errors_cross_correlation(targets_number);
3547 for(Index i = 0; i < targets_number; i++)
3549 inputs_errors_cross_correlation[i] = cross_correlations(this->thread_pool_device, inputs.chip(i,1), errors.chip(i,1), lags_number);
3552 return inputs_errors_cross_correlation;
3585 ostringstream buffer;
3587 buffer <<
"OpenNN Exception: TestingAnalysis class." << endl
3588 <<
"Tensor<type, 1> calculate_binary_classification_tests() const." << endl
3589 <<
"Data set is nullptr." << endl;
3591 throw logic_error(buffer.str());
3602 ostringstream buffer;
3604 buffer <<
"OpenNN Exception: TestingAnalysis class." << endl
3605 <<
"Tensor<type, 1> calculate_binary_classification_tests() const." << endl
3606 <<
"Number of inputs in neural network is not equal to number of inputs in data set." << endl;
3608 throw logic_error(buffer.str());
3610 else if(outputs_number != 1)
3612 ostringstream buffer;
3614 buffer <<
"OpenNN Exception: TestingAnalysis class." << endl
3615 <<
"Tensor<type, 1> calculate_binary_classification_tests() const." << endl
3616 <<
"Number of outputs in neural network must be one." << endl;
3618 throw logic_error(buffer.str());
3620 else if(targets_number != 1)
3622 ostringstream buffer;
3624 buffer <<
"OpenNN Exception: TestingAnalysis class." << endl
3625 <<
"Tensor<type, 1> calculate_binary_classification_tests() const." << endl
3626 <<
"Number of targets in data set must be one." << endl;
3628 throw logic_error(buffer.str());
3637 const Index true_positive = confusion(0,0);
3638 const Index false_positive = confusion(1,0);
3639 const Index false_negative = confusion(0,1);
3640 const Index true_negative = confusion(1,1);
3644 type classification_accuracy;
3646 if(true_positive + true_negative + false_positive + false_negative == 0)
3648 classification_accuracy = type(0);
3652 classification_accuracy =
static_cast<type
>(true_positive + true_negative)/
static_cast<type
>(true_positive + true_negative + false_positive + false_negative);
3659 if(true_positive + true_negative + false_positive + false_negative == 0)
3661 error_rate = type(0);
3665 error_rate =
static_cast<type
>(false_positive + false_negative)/
static_cast<type
>(true_positive + true_negative + false_positive + false_negative);
3672 if(true_positive + false_negative == 0)
3674 sensitivity = type(0);
3678 sensitivity =
static_cast<type
>(true_positive)/
static_cast<type
>(true_positive + false_negative);
3685 if(true_negative + false_positive == 0)
3687 specificity = type(0);
3691 specificity =
static_cast<type
>(true_negative)/
static_cast<type
>(true_negative + false_positive);
3698 if(true_positive + false_positive == 0)
3700 precision = type(0);
3704 precision =
static_cast<type
>(true_positive) /
static_cast<type
>(true_positive + false_positive);
3709 type positive_likelihood;
3711 if(
abs(classification_accuracy -
static_cast<type
>(1.0)) < type(NUMERIC_LIMITS_MIN))
3713 positive_likelihood = type(1);
3715 else if(
abs(
static_cast<type
>(1.0) - specificity) < type(NUMERIC_LIMITS_MIN))
3717 positive_likelihood = type(0);
3721 positive_likelihood = sensitivity/(
static_cast<type
>(1.0) - specificity);
3726 type negative_likelihood;
3728 if(
static_cast<Index
>(classification_accuracy) == 1)
3730 negative_likelihood = type(1);
3732 else if(
abs(type(1) - sensitivity) < type(NUMERIC_LIMITS_MIN))
3734 negative_likelihood = type(0);
3738 negative_likelihood = specificity/(
static_cast<type
>(1.0) - sensitivity);
3745 if(2*true_positive + false_positive + false_negative == 0)
3751 f1_score =
static_cast<type
>(2.0)* type(true_positive)/(
static_cast<type
>(2.0)* type(true_positive) + type(false_positive) + type(false_negative));
3756 type false_positive_rate;
3758 if(false_positive + true_negative == 0)
3760 false_positive_rate = type(0);
3764 false_positive_rate =
static_cast<type
>(false_positive)/
static_cast<type
>(false_positive + true_negative);
3769 type false_discovery_rate;
3771 if(false_positive + true_positive == 0)
3773 false_discovery_rate = type(0);
3777 false_discovery_rate =
static_cast<type
>(false_positive) /
static_cast<type
>(false_positive + true_positive);
3782 type false_negative_rate;
3784 if(false_negative + true_positive == 0)
3786 false_negative_rate = type(0);
3790 false_negative_rate =
static_cast<type
>(false_negative)/
static_cast<type
>(false_negative + true_positive);
3795 type negative_predictive_value;
3797 if(true_negative + false_negative == 0)
3799 negative_predictive_value = type(0);
3803 negative_predictive_value =
static_cast<type
>(true_negative)/
static_cast<type
>(true_negative + false_negative);
3808 type Matthews_correlation_coefficient;
3810 if((true_positive + false_positive) *(true_positive + false_negative) *(true_negative + false_positive) *(true_negative + false_negative) == 0)
3812 Matthews_correlation_coefficient = type(0);
3816 Matthews_correlation_coefficient =
static_cast<type
>(true_positive * true_negative - false_positive * false_negative) /
static_cast<type
>(sqrt(((true_positive + false_positive) *(true_positive + false_negative) *(true_negative + false_positive) *(true_negative + false_negative))))
3822 type informedness = sensitivity + specificity - type(1);
3828 if(true_negative + false_positive == 0)
3830 markedness = precision - type(1);
3834 markedness = precision +
static_cast<type
>(true_negative)/
static_cast<type
>(true_negative + false_positive) -
static_cast<type
>(1.0);
3839 Tensor<type, 1> binary_classification_test(15);
3841 binary_classification_test[0] = classification_accuracy;
3842 binary_classification_test[1] = error_rate;
3843 binary_classification_test[2] = sensitivity;
3844 binary_classification_test[3] = specificity;
3845 binary_classification_test[4] = precision;
3846 binary_classification_test[5] = positive_likelihood;
3847 binary_classification_test[6] = negative_likelihood;
3848 binary_classification_test[7] = f1_score;
3849 binary_classification_test[8] = false_positive_rate;
3850 binary_classification_test[9] = false_discovery_rate;
3851 binary_classification_test[10] = false_negative_rate;
3852 binary_classification_test[11] = negative_predictive_value;
3853 binary_classification_test[12] = Matthews_correlation_coefficient;
3854 binary_classification_test[13] = informedness;
3855 binary_classification_test[14] = markedness;
3857 return binary_classification_test;
3861void TestingAnalysis::print_binary_classification_tests()
const
3865 cout <<
"Binary classification tests: " << endl;
3866 cout <<
"Classification accuracy : " << binary_classification_tests[0] << endl;
3867 cout <<
"Error rate : " << binary_classification_tests[1] << endl;
3868 cout <<
"Sensitivity : " << binary_classification_tests[2] << endl;
3869 cout <<
"Specificity : " << binary_classification_tests[3] << endl;
3883 ostringstream buffer;
3885 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
3886 <<
"Tensor<type, 1> calculate_inputs_errors_cross_correlation() const method.\n"
3887 <<
"Pointer to neural network in neural network is nullptr.\n";
3889 throw logic_error(buffer.str());
3898 ostringstream buffer;
3900 buffer <<
"OpenNN Exception: TestingAnalysis class." << endl
3901 <<
"Tensor<type, 1> calculate_inputs_errors_cross_correlation() const method." << endl
3902 <<
"Number of inputs in neural network must be equal to number of inputs in data set." << endl;
3904 throw logic_error(buffer.str());
3911 ostringstream buffer;
3913 buffer <<
"OpenNN Exception: TestingAnalysis class." << endl
3914 <<
"Tensor<type, 1> calculate_inputs_errors_cross_correlation() const method." << endl
3915 <<
"Number of outputs in neural network must be equal to number of targets in data set." << endl;
3917 throw logic_error(buffer.str());
3929 type logloss = type(0);
3931 for(Index i = 0; i < testing_samples_number; i++)
3933 logloss += targets(i,0)*
log(outputs(i,0)) + (type(1) - targets(i,0))*
log(type(1) - outputs(i,0));
3936 return -logloss/type(testing_samples_number);
3952 ostringstream buffer;
3953 file_stream.OpenElement(
"TestingAnalysis");
3957 file_stream.OpenElement(
"Display");
3962 file_stream.
PushText(buffer.str().c_str());
3976 ostringstream buffer;
3982 buffer <<
"OpenNN Exception: TestingAnalysis class.\n"
3983 <<
"void from_XML(const tinyxml2::XMLDocument&) method.\n"
3984 <<
"Testing analysis element is nullptr.\n";
3986 throw logic_error(buffer.str());
3995 string new_display_string = element->GetText();
4001 catch(
const logic_error& e)
4003 cerr << e.what() << endl;
4016 pFile = fopen(file_name.c_str(),
"w");
4035 if(document.LoadFile(file_name.c_str()))
4037 ostringstream buffer;
4039 buffer <<
"OpenNN Exception: Testing analysis class.\n"
4040 <<
"void load(const string&) method.\n"
4041 <<
"Cannot load XML file " << file_name <<
".\n";
4043 throw logic_error(buffer.str());
4049bool TestingAnalysis::contains(
const Tensor<type, 1>& tensor,
const type& value)
const
4051 Tensor<type, 1> copy(tensor);
4053 type* it = find(copy.data(), copy.data()+copy.size(), value);
4055 return it != (copy.data()+copy.size());
4058Tensor<type, 2> TestingAnalysis::delete_row(
const Tensor<type, 2>& tensor,
const Index& row_index)
const
4060 const Index rows_number = tensor.dimension(0);
4061 const Index columns_number = tensor.dimension(1);
4064 if(row_index > rows_number)
4066 ostringstream buffer;
4068 buffer <<
"OpenNN Exception: Matrix Template.\n"
4069 <<
"Matrix<T> delete_row(const size_t&) const.\n"
4070 <<
"Index of row must be less than number of rows.\n"
4071 <<
"row index: " << row_index <<
"rows_number" << rows_number <<
"\n";
4073 throw logic_error(buffer.str());
4075 else if(rows_number < 2)
4077 ostringstream buffer;
4079 buffer <<
"OpenNN Exception: Matrix Template.\n"
4080 <<
"Matrix<T> delete_row(const size_t&) const.\n"
4081 <<
"Number of rows must be equal or greater than two.\n";
4083 throw logic_error(buffer.str());
4088 Tensor<type, 2> new_matrix(rows_number-1, columns_number);
4090 for(Index i = 0; i < row_index; i++)
4092 for(Index j = 0; j < columns_number; j++)
4094 new_matrix(i,j) = tensor(i,j);
4098 for(Index i = row_index+1; i < rows_number; i++)
4100 for(Index j = 0; j < columns_number; j++)
4102 new_matrix(i-1,j) = tensor(i,j);
This class represents the concept of a data set for data modelling problems, such as approximation,...
Index get_training_samples_number() const
Returns the number of samples in the data set which will be used for training.
Index get_target_variables_number() const
Returns the number of target variables of the data set.
Tensor< Index, 1 > get_testing_samples_indices() const
Returns the indices of the samples which will be used for testing.
Tensor< Index, 1 > calculate_target_distribution() const
Index get_selection_samples_number() const
Returns the number of samples in the data set which will be used for selection.
Tensor< type, 2 > get_selection_target_data() const
Tensor< type, 2 > get_testing_input_data() const
Index get_input_variables_number() const
Index get_testing_samples_number() const
Returns the number of samples in the data set which will be used for testing.
Tensor< type, 2 > get_selection_input_data() const
Tensor< string, 1 > get_target_variables_names() const
Tensor< type, 2 > get_training_input_data() const
Tensor< type, 2 > get_testing_target_data() const
Tensor< type, 2 > get_training_target_data() const
This class represents the concept of neural network in the OpenNN library.
const Tensor< string, 1 > & get_outputs_names() const
Returns a string vector with the names of the variables used as outputs.
Index get_inputs_number() const
Returns the number of inputs to the neural network.
Tensor< type, 2 > calculate_outputs(const Tensor< type, 2 > &)
UnscalingLayer * get_unscaling_layer_pointer() const
Returns a pointer to the unscaling layers object composing this neural network object.
ProbabilisticLayer * get_probabilistic_layer_pointer() const
Returns a pointer to the first probabilistic layer composing this neural network.
const type & get_decision_threshold() const
Returns the decision threshold.
Tensor< type, 2 > calculate_roc_curve(const Tensor< type, 2 > &, const Tensor< type, 2 > &) const
Tensor< type, 2 > perform_calibration_plot_analysis() const
Performs a calibration plot analysis.
KolmogorovSmirnovResults perform_Kolmogorov_Smirnov_analysis() const
DataSet * data_set_pointer
Pointer to a data set object.
Tensor< type, 2 > calculate_lift_chart(const Tensor< type, 2 > &) const
NeuralNetwork * neural_network_pointer
Pointer to the neural network object to be tested.
type calculate_optimal_threshold(const Tensor< type, 2 > &, const Tensor< type, 2 > &) const
Tensor< Tensor< Index, 1 >, 2 > calculate_multiple_classification_rates() const
Returns a matrix of subvectors which have the rates for a multiple classification problem.
Tensor< type, 2 > calculate_percentage_error_data() const
void set_data_set_pointer(DataSet *)
Tensor< type, 2 > calculate_errors() const
const bool & get_display() const
Tensor< type, 1 > calculate_multiple_classification_testing_errors() const
Tensor< Index, 1 > calculate_false_negative_samples(const Tensor< type, 2 > &, const Tensor< type, 2 > &, const Tensor< Index, 1 > &, const type &) const
Tensor< Index, 1 > calculate_true_positive_samples(const Tensor< type, 2 > &, const Tensor< type, 2 > &, const Tensor< Index, 1 > &, const type &) const
type calculate_weighted_squared_error(const Tensor< type, 2 > &, const Tensor< type, 2 > &, const Tensor< type, 1 > &=Tensor< type, 1 >()) const
type calculate_cross_entropy_error(const Tensor< type, 2 > &, const Tensor< type, 2 > &) const
virtual void from_XML(const tinyxml2::XMLDocument &)
void load(const string &)
Tensor< type, 3 > calculate_error_data() const
Tensor< Index, 1 > calculate_positives_negatives_rate(const Tensor< type, 2 > &, const Tensor< type, 2 > &) const
Tensor< Index, 1 > calculate_false_positive_samples(const Tensor< type, 2 > &, const Tensor< type, 2 > &, const Tensor< Index, 1 > &, const type &) const
bool display
Display messages to screen.
Tensor< type, 2 > perform_cumulative_gain_analysis() const
Tensor< Index, 2 > calculate_confusion_multiple_classification(const Tensor< type, 2 > &, const Tensor< type, 2 > &) const
Tensor< type, 2 > perform_lift_chart_analysis() const
type calculate_Wilcoxon_parameter(const type &, const type &) const
type calculate_area_under_curve_confidence_limit(const Tensor< type, 2 > &, const Tensor< type, 2 > &) const
Tensor< type, 2 > calculate_calibration_plot(const Tensor< type, 2 > &, const Tensor< type, 2 > &) const
type calculate_area_under_curve(const Tensor< type, 2 > &, const Tensor< type, 2 > &) const
RocAnalysisResults perform_roc_analysis() const
Tensor< Index, 2 > calculate_confusion_binary_classification(const Tensor< type, 2 > &, const Tensor< type, 2 > &, const type &) const
Tensor< Tensor< Index, 1 >, 1 > calculate_maximal_errors(const Index &=10) const
void save(const string &) const
Tensor< Tensor< type, 1 >, 1 > calculate_inputs_errors_cross_correlation(const Index &=10) const
NeuralNetwork * get_neural_network_pointer() const
Returns a pointer to the neural network object which is to be tested.
type calculate_normalized_squared_error(const Tensor< type, 2 > &, const Tensor< type, 2 > &) const
Tensor< Index, 2 > calculate_confusion() const
Tensor< type, 2 > calculate_negative_cumulative_gain(const Tensor< type, 2 > &, const Tensor< type, 2 > &) const
void set_neural_network_pointer(NeuralNetwork *)
Tensor< Tensor< type, 1 >, 1 > calculate_error_autocorrelation(const Index &=10) const
Tensor< LinearRegressionAnalysis, 1 > perform_linear_regression_analysis() const
type calculate_logloss() const
Returns the logloss for a binary classification problem.
Tensor< type, 1 > calculate_maximum_gain(const Tensor< type, 2 > &, const Tensor< type, 2 > &) const
Tensor< type, 1 > calculate_binary_classification_tests() const
void print() const
Prints to the standard output the string representation of this testing analysis object.
Tensor< Histogram, 1 > calculate_error_data_histograms(const Index &=10) const
void set_display(const bool &)
virtual void write_XML(tinyxml2::XMLPrinter &) const
Tensor< type, 1 > calculate_binary_classification_testing_errors() const
Tensor< type, 1 > calculate_testing_errors() const
Tensor< Index, 1 > calculate_true_negative_samples(const Tensor< type, 2 > &, const Tensor< type, 2 > &, const Tensor< Index, 1 > &, const type &) const
Tensor< Correlation, 1 > linear_correlation() const
BinaryClassifcationRates calculate_binary_classification_rates() const
Tensor< type, 2 > calculate_cumulative_gain(const Tensor< type, 2 > &, const Tensor< type, 2 > &) const
DataSet * get_data_set_pointer() const
Returns a pointer to the data set object on which the neural network is tested.
virtual ~TestingAnalysis()
Tensor< Histogram, 1 > calculate_output_histogram(const Tensor< type, 2 > &, const Index &=10) const
Tensor< Tensor< Descriptives, 1 >, 1 > calculate_error_data_descriptives() const
This class represents a layer of unscaling neurons.
Tensor< type, 1 > get_minimums() const
Tensor< type, 1 > get_maximums() const
friend half pow(half, half)
void PushText(const char *text, bool cdata=false)
Add a text node.
virtual void CloseElement(bool compactMode=false)
If streaming, close the Element.
uint32 sqrt(uint32 &r, int &exp)
HALF_CONSTEXPR half abs(half arg)
This structure provides the results obtained from the regression analysis.
This structure contains the binary classification rates.
Tensor< Index, 1 > true_negatives_indices
Vector with the indices of the samples which are true negative.
Tensor< Index, 1 > true_positives_indices
Vector with the indices of the samples which are true positive.
Tensor< Index, 1 > false_positives_indices
Vector with the indices of the samples which are false positive.
Tensor< Index, 1 > false_negatives_indices
Vector with the indices of the samples which are false negative.
This structure contains the results of a Kolmogorov-Smirnov analysis.
Tensor< type, 2 > positive_cumulative_gain
Matrix containing the data of a positive cumulative gain.
Tensor< type, 1 > maximum_gain
Maximum gain of the cumulative gain analysis.
Tensor< type, 2 > negative_cumulative_gain
Matrix containing the data of a negative cumulative gain.
This structure contains the results of a roc curve analysis.
type confidence_limit
Confidence limit.
type optimal_threshold
Optimal threshold of a ROC curve.
Tensor< type, 2 > roc_curve
Matrix containing the data of a ROC curve.
type area_under_curve
Area under a ROC curve.