tensor_utilities.cpp
1// OpenNN: Open Neural Networks Library
2// www.opennn.net
3//
4// T E N S O R U T I L I T I E S S O U R C E
5//
6// Artificial Intelligence Techniques, SL
7// artelnics@artelnics.com
8
9#include "tensor_utilities.h"
10
11#define GET_VARIABLE_NAME(Variable) (#Variable)
12
13
14namespace OpenNN
15{
16
17void initialize_sequential(Tensor<type, 1>& vector)
18{
19 for(Index i = 0; i < vector.size(); i++) vector(i) = type(i);
20}
21
22
23void multiply_rows(Tensor<type, 2>& matrix, const Tensor<type, 1>& vector)
24{
25 const Index columns_number = matrix.dimension(1);
26 const Index rows_number = matrix.dimension(0);
27
28// #pragma omp parallel for
29
30 for(Index i = 0; i < rows_number; i++)
31 {
32 for(Index j = 0; j < columns_number; j++)
33 {
34 matrix(i,j) *= vector(j);
35 }
36 }
37}
38
39
40void divide_columns(Tensor<type, 2>& matrix, const Tensor<type, 1>& vector)
41{
42 const Index columns_number = matrix.dimension(1);
43 const Index rows_number = matrix.dimension(0);
44
45// #pragma omp parallel for
46
47 for(Index j = 0; j < columns_number; j++)
48 {
49 for(Index i = 0; i < rows_number; i++)
50 {
51 matrix(i,j) /= vector(i) == type(0) ? type(1) : vector(i);
52 }
53 }
54}
55
56
57bool is_zero(const Tensor<type, 1>& tensor)
58{
59 const Index size = tensor.size();
60
61 for(Index i = 0; i < size; i++)
62 {
63 if(abs(tensor[i]) > type(NUMERIC_LIMITS_MIN)) return false;
64 }
65
66 return true;
67}
68
69bool is_zero(const Tensor<type,1>& tensor,const type& limit)
70{
71 const Index size = tensor.size();
72
73 for(Index i = 0; i < size; i++)
74 {
75 if(abs(tensor[i]) > type(limit)) return false;
76 }
77
78 return true;
79}
80
81
82bool is_false(const Tensor<bool, 1>& tensor)
83{
84 const Index size = tensor.size();
85
86 for(Index i = 0; i < size; i++)
87 {
88 if(tensor(i)) return false;
89 }
90
91 return true;
92}
93
94
95bool is_binary(const Tensor<type, 2>& matrix)
96{
97 const Index size = matrix.size();
98
99 for(Index i = 0; i < size; i++)
100 {
101 if(matrix(i) != type(0) && matrix(i) != type(1)) return false;
102 }
103
104 return true;
105}
106
107
108bool is_constant(const Tensor<type, 1>& vector)
109{
110 const Index size = vector.size();
111
112 for(Index i = 0; i < size; i++)
113 {
114 for(Index j = 0; j < size; j++)
115 {
116 if((vector(i) - vector(j)) != type(0)) return false;
117 }
118 }
119
120 return true;
121}
122
123
124bool is_equal(const Tensor<type, 2>& matrix, const type& value, const type& tolerance)
125{
126 const Index size = matrix.size();
127
128 for(Index i = 0; i < size; i++)
129 {
130 if(abs(matrix(i) - value) > tolerance) return false;
131 }
132
133 return true;
134}
135
136
137
138bool are_equal(const Tensor<type, 1>& vector_1, const Tensor<type, 1>& vector_2, const type& tolerance)
139{
140 const Index size = vector_1.size();
141
142 for(Index i = 0; i < size; i++)
143 {
144 if(abs(vector_1(i) - vector_2(i)) > tolerance) return false;
145 }
146
147 return true;
148}
149
150
151bool are_equal(const Tensor<type, 2>& matrix_1, const Tensor<type, 2>& matrix_2, const type& tolerance)
152{
153 const Index size = matrix_1.size();
154
155 for(Index i = 0; i < size; i++)
156 {
157 if(abs(matrix_1(i) - matrix_2(i)) > tolerance) return false;
158 }
159
160 return true;
161}
162
163
164void save_csv(const Tensor<type,2>& data, const string& filename)
165{
166 ofstream file(filename);
167
168 if(!file.is_open())
169 {
170 ostringstream buffer;
171
172 buffer << "OpenNN Exception: Matrix template." << endl
173 << "void save_csv(const Tensor<type,2>&, const string&) method." << endl
174 << "Cannot open matrix data file: " << filename << endl;
175
176 throw logic_error(buffer.str());
177 }
178
179 file.precision(20);
180
181 const Index data_rows = data.dimension(0);
182 const Index data_columns = data.dimension(1);
183
184 char separator_char = ';';
185
186 for(Index i = 0; i < data_rows; i++)
187 {
188 for(Index j = 0; j < data_columns; j++)
189 {
190 file << data(i,j);
191
192 if(j != data_columns-1)
193 {
194 file << separator_char;
195 }
196 }
197 file << endl;
198 }
199 file.close();
200}
201
202
204
205Tensor<Index, 1> calculate_rank_greater(const Tensor<type, 1>& vector)
206{
207 const Index size = vector.size();
208
209 Tensor<Index, 1> rank(size);
210 iota(rank.data(), rank.data() + rank.size(), 0);
211
212 sort(rank.data(),
213 rank.data() + rank.size(),
214 [&](Index i, Index j){return vector[i] > vector[j];});
215
216 return rank;
217}
218
219
220Tensor<Index, 1> calculate_rank_less(const Tensor<type, 1>& vector)
221{
222 const Index size = vector.size();
223
224 Tensor<Index, 1> rank(size);
225 iota(rank.data(), rank.data() + rank.size(), 0);
226
227 sort(rank.data(),
228 rank.data() + rank.size(),
229 [&](Index i, Index j){return vector[i] < vector[j];});
230
231 return rank;
232}
233
234
235void scrub_missing_values(Tensor<type, 2>& matrix, const type& value)
236{
237 std::replace_if(matrix.data(), matrix.data()+matrix.size(), [](type x){return isnan(x);}, value);
238}
239
240
241Tensor<type, 2> kronecker_product(const Tensor<type, 1>& vector, const Tensor<type, 1>& other_vector)
242{
243 const Index size = vector.size();
244
245 Tensor<type, 2> direct(size, size);
246
247 #pragma omp parallel for
248
249 for(Index i = 0; i < size; i++)
250 {
251 for(Index j = 0; j < size; j++)
252 {
253 direct(i, j) = vector(i) * other_vector(j);
254 }
255 }
256
257 return direct;
258}
259
260
261type l1_norm(const ThreadPoolDevice* thread_pool_device, const Tensor<type, 1>& vector)
262{
263 Tensor<type, 0> norm;
264
265 norm.device(*thread_pool_device) = vector.abs().sum();
266
267 return norm(0);
268}
269
270
271void l1_norm_gradient(const ThreadPoolDevice* thread_pool_device, const Tensor<type, 1>& vector, Tensor<type, 1>& gradient)
272{
273 gradient.device(*thread_pool_device) = vector.sign();
274}
275
276
277void l1_norm_hessian(const ThreadPoolDevice*, const Tensor<type, 1>&, Tensor<type, 2>& hessian)
278{
279 hessian.setZero();
280}
281
282
284
285type l2_norm(const ThreadPoolDevice* thread_pool_device, const Tensor<type, 1>& vector)
286{
287 Tensor<type, 0> norm;
288
289 norm.device(*thread_pool_device) = vector.square().sum().sqrt();
290
291 if(isnan(norm(0)))
292 {
293// cout << "OpenNN Warning: l2 norm of vector is NaN" << endl;
294
295// ostringstream buffer;
296
297// buffer << "OpenNN Exception: l2 norm of vector is not a number" << endl;
298
299// throw logic_error(buffer.str());
300 }
301
302 return norm(0);
303}
304
305
306void l2_norm_gradient(const ThreadPoolDevice* thread_pool_device, const Tensor<type, 1>& vector, Tensor<type, 1>& gradient)
307{
308 const type norm = l2_norm(thread_pool_device, vector);
309
310 if(norm < type(NUMERIC_LIMITS_MIN))
311 {
312 gradient.setZero();
313
314 return;
315 }
316
317 gradient.device(*thread_pool_device) = vector/norm;
318}
319
320
321void l2_norm_hessian(const ThreadPoolDevice* thread_pool_device, const Tensor<type, 1>& vector, Tensor<type, 2>& hessian)
322{
323 const type norm = l2_norm(thread_pool_device, vector);
324
325 if(norm < type(NUMERIC_LIMITS_MIN))
326 {
327 hessian.setZero();
328
329 return;
330 }
331
332 hessian.device(*thread_pool_device) = kronecker_product(vector, vector)/(norm*norm*norm);
333}
334
335
336void sum_diagonal(Tensor<type, 2>& matrix, const type& value)
337{
338 const Index rows_number = matrix.dimension(0);
339
340 #pragma omp parallel for
341 for(Index i = 0; i < rows_number; i++)
342 matrix(i,i) += value;
343}
344
346
347Tensor<type, 1> perform_Householder_QR_decomposition(const Tensor<type, 2>& A, const Tensor<type, 1>& b)
348{
349 const Index n = A.dimension(0);
350
351 Tensor<type, 1> x(n);
352
353 const Map<Matrix<type, Dynamic, Dynamic>> A_eigen((type*)A.data(), n, n);
354 const Map<Matrix<type, Dynamic, 1>> b_eigen((type*)b.data(), n, 1);
355 Map<Matrix<type, Dynamic, 1>> x_eigen((type*)x.data(), n);
356
357 x_eigen = A_eigen.colPivHouseholderQr().solve(b_eigen);
358
359 return x;
360}
361
362
363void fill_submatrix(const Tensor<type, 2>& matrix,
364 const Tensor<Index, 1>& rows_indices,
365 const Tensor<Index, 1>& columns_indices,
366 type* submatrix_pointer)
367{
368 const Index rows_number = rows_indices.size();
369 const Index columns_number = columns_indices.size();
370
371 const type* matrix_pointer = matrix.data();
372
373 #pragma omp parallel for
374
375 for(Index j = 0; j < columns_number; j++)
376 {
377 const type* matrix_column_pointer = matrix_pointer + matrix.dimension(0)*columns_indices[j];
378 type* submatrix_column_pointer = submatrix_pointer + rows_number*j;
379
380 const type* value_pointer = nullptr;
381 const Index* rows_indices_pointer = rows_indices.data();
382 for(Index i = 0; i < rows_number; i++)
383 {
384 value_pointer = matrix_column_pointer + *rows_indices_pointer;
385 rows_indices_pointer++;
386 *submatrix_column_pointer = *value_pointer;
387 submatrix_column_pointer++;
388 }
389 }
390}
391
392Index count_NAN(const Tensor<type, 1>& x)
393{
394 Index NAN_number = 0;
395
396 for(Index i = 0; i < x.size(); i++)
397 {
398 if(isnan(x(i))) NAN_number++;
399 }
400
401 return NAN_number;
402}
403
404
405void check_size(const Tensor<type, 1>& vector, const Index& size, const string& log)
406{
407 if(vector.size() != size)
408 {
409 ostringstream buffer;
410
411 buffer << "OpenNN Exception: " << log
412 << "Size of vector is " << vector.size() << ", but must be " << size << ".";
413
414 throw logic_error(buffer.str());
415 }
416}
417
418
419void check_dimensions(const Tensor<type, 2>& matrix, const Index& rows_number, const Index& columns_number, const string& log)
420{
421 if(matrix.dimension(0) != rows_number)
422 {
423 ostringstream buffer;
424
425 buffer << "OpenNN Exception: " << log
426 << "Number of rows in matrix is " << matrix.dimension(0) << ", but must be " << rows_number << ".";
427
428 throw logic_error(buffer.str());
429 }
430
431 if(matrix.dimension(1) != columns_number)
432 {
433 ostringstream buffer;
434
435 buffer << "OpenNN Exception: " << log
436 << "Number of columns in matrix is " << matrix.dimension(0) << ", but must be " << columns_number << ".";
437
438 throw logic_error(buffer.str());
439 }
440}
441
442
443void check_columns_number(const Tensor<type, 2>& matrix, const Index& columns_number, const string& log)
444{
445 if(matrix.dimension(1) != columns_number)
446 {
447 ostringstream buffer;
448
449 buffer << "OpenNN Exception: " << log
450 << "Number of columns in matrix is " << matrix.dimension(0) << ", but must be " << columns_number << ".";
451
452 throw logic_error(buffer.str());
453 }
454}
455
456Tensor<type, 2> assemble_vector_vector(const Tensor<type, 1>& x, const Tensor<type, 1>& y)
457{
458 const Index rows_number = x.size();
459 const Index columns_number = 2;
460
461 Tensor<type, 2> data(rows_number, columns_number);
462
463 for(Index i = 0; i < rows_number; i++)
464 {
465 data(i, 0) = x(i);
466 data(i, 1) = y(i);
467 }
468
469 return data;
470}
471
472
473Tensor<type, 2> assemble_vector_matrix(const Tensor<type, 1>& x, const Tensor<type, 2>& y)
474{
475 const Index rows_number = x.size();
476 const Index columns_number = 1 + y.dimension(1);
477
478 Tensor<type, 2> data(rows_number, columns_number);
479
480 for(Index i = 0; i < rows_number; i++)
481 {
482 data(i, 0) = x(i);
483
484 for(Index j = 0; j < y.dimension(1); j++)
485 {
486 data(i, 1+j) = y(i,j);
487 }
488 }
489
490 return data;
491}
492
493
494Tensor<type, 2> assemble_matrix_vector(const Tensor<type, 2>& x, const Tensor<type, 1>& y)
495{
496 const Index rows_number = x.size();
497 const Index columns_number = x.dimension(1) + 1;
498
499 Tensor<type, 2> data(rows_number, columns_number);
500
501 for(Index i = 0; i < rows_number; i++)
502 {
503 for(Index j = 0; j < x.dimension(1); j++)
504 {
505 data(i, j) = x(i,j);
506 }
507
508 data(i, columns_number-1) = y(i);
509 }
510
511 return data;
512}
513
514
515Tensor<type, 2> assemble_matrix_matrix(const Tensor<type, 2>& x, const Tensor<type, 2>& y)
516{
517 const Index rows_number = x.dimension(0);
518 const Index columns_number = x.dimension(1) + y.dimension(1);
519
520 Tensor<type, 2> data(rows_number, columns_number);
521
522 for(Index i = 0; i < rows_number; i++)
523 {
524 for(Index j = 0; j < x.dimension(1); j++)
525 {
526 data(i,j) = x(i,j);
527 }
528
529 for(Index j = 0; j < y.dimension(1); j++)
530 {
531 data(i, x.dimension(1) + j) = y(i,j);
532 }
533 }
534
535 return data;
536}
537
538
540
541bool is_less_than(const Tensor<type, 1>& column, const type& value)
542{
543 const Tensor<bool, 1> if_sentence = column <= column.constant(value);
544
545 Tensor<bool, 1> sentence(column.size());
546 sentence.setConstant(true);
547
548 Tensor<bool, 1> else_sentence(column.size());
549 else_sentence.setConstant(false);
550
551 const Tensor<bool, 0> is_less = (if_sentence.select(sentence, else_sentence)).any();
552
553 return is_less(0);
554}
555
556
557}
558
559
560// OpenNN: Open Neural Networks Library.
561// Copyright(C) 2005-2021 Artificial Intelligence Techniques, SL.
562//
563// This library is free software; you can redistribute it and/or
564// modify it under the terms of the GNU Lesser General Public
565// License as published by the Free Software Foundation; either
566// version 2.1 of the License, or any later version.
567//
568// This library is distributed in the hope that it will be useful,
569// but WITHOUT ANY WARRANTY; without even the implied warranty of
570// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
571// Lesser General Public License for more details.
572
573// You should have received a copy of the GNU Lesser General Public
574// License along with this library; if not, write to the Free Software
575// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
HALF_CONSTEXPR half abs(half arg)
Definition: half.hpp:2735
half log(half arg)
Definition: half.hpp:3050
HALF_CONSTEXPR bool isnan(half arg)
Definition: half.hpp:4385