pooling_layer.cpp
1// OpenNN: Open Neural Networks Library
2// www.opennn.net
3//
4// P O O L I N G L A Y E R C L A S S
5//
6// Artificial Intelligence Techniques SL
7// artelnics@artelnics.com
8
9#include "pooling_layer.h"
10
11namespace OpenNN
12{
13
16
18{
20}
21
25
26PoolingLayer::PoolingLayer(const Tensor<Index, 1>& new_input_variables_dimensions) : Layer()
27{
29}
30
31
36
37PoolingLayer::PoolingLayer(const Tensor<Index, 1>& new_input_variables_dimensions, const Tensor<Index, 1>& pool_dimensions) : Layer()
38{
39 pool_rows_number = pool_dimensions[0];
40
41 pool_columns_number = pool_dimensions[1];
42
44}
45
46
48
50{
51}
52
53
56
57Tensor<type, 4> PoolingLayer::calculate_outputs(const Tensor<type, 4>& inputs)
58{
59#ifdef OPENNN_DEBUG
60
61 const Index input_variables_dimensions_number = inputs.rank();
62
63 if(input_variables_dimensions_number != 4)
64 {
65 ostringstream buffer;
66
67 buffer << "OpenNN Exception: ConvolutionalLayer class.\n"
68 << "Tensor<type, 2> calculate_outputs(const Tensor<type, 2>&) method.\n"
69 << "Number of inputs dimensions (" << input_variables_dimensions_number << ") must be 4 (batch, filters, rows, columns).\n";
70
71 throw logic_error(buffer.str());
72 }
73
74#endif
75
76 switch(pooling_method)
77 {
78 case PoolingMethod::NoPooling:
79 return calculate_no_pooling_outputs(inputs);
80
81 case PoolingMethod::MaxPooling:
82 return calculate_max_pooling_outputs(inputs);
83
84 case PoolingMethod::AveragePooling:
86 }
87
88 return Tensor<type, 4>();
89}
90
91
94
95Tensor<type, 4> PoolingLayer::calculate_average_pooling_outputs(const Tensor<type, 4>& inputs) const
96{
97 const Index images_number = inputs.dimension(0);
98
99 const Index channels_number = inputs.dimension(1);
100
101 const Index inputs_rows_number = inputs.dimension(2);
102
103 const Index inputs_columns_number = inputs.dimension(3);
104
105 const Index outputs_rows_number = (inputs_rows_number - pool_rows_number)/row_stride + 1;
106
107 const Index outputs_columns_number = (inputs_columns_number - pool_columns_number)/column_stride + 1;
108
109 Tensor<type, 4> outputs(images_number, channels_number, outputs_rows_number, outputs_columns_number);
110
111 for(Index image_index = 0; image_index < images_number; image_index ++)
112 {
113 for(Index channel_index = 0; channel_index < channels_number; channel_index ++)
114 {
115 for(Index row_index = 0; row_index < outputs_rows_number; row_index ++)
116 {
117 for(Index column_index = 0; column_index < outputs_columns_number; column_index ++)
118 {
119 outputs(image_index, channel_index, row_index, column_index) = type(0);
120
121 for(Index window_row = 0; window_row < pool_rows_number; window_row ++)
122 {
123 const Index row = row_index*row_stride + window_row;
124
125 for(Index window_column = 0; window_column < pool_columns_number; window_column ++)
126 {
127 const Index column = column_index*column_stride + window_column;
128
129 outputs(image_index, channel_index, row_index, column_index) += inputs(image_index, channel_index, row, column);
130 }
131 }
132
133 outputs(image_index, channel_index, row_index, column_index) /= type(pool_rows_number*pool_columns_number);
134 }
135 }
136 }
137 }
138
139 return outputs;
140}
141
142
145
146Tensor<type, 4> PoolingLayer::calculate_no_pooling_outputs(const Tensor<type, 4>& inputs) const
147{
148 return inputs;
149}
150
151
154
155Tensor<type, 4> PoolingLayer::calculate_max_pooling_outputs(const Tensor<type, 4>& inputs) const
156{
157 const Index images_number = inputs.dimension(0);
158
159 const Index channels_number = inputs.dimension(1);
160
161 const Index inputs_rows_number = inputs.dimension(2);
162
163 const Index inputs_columns_number = inputs.dimension(3);
164
165 const Index outputs_rows_number = (inputs_rows_number - pool_rows_number)/row_stride + 1;
166
167 const Index outputs_columns_number = (inputs_columns_number - pool_columns_number)/column_stride + 1;
168
169 Tensor<type, 4> outputs(images_number, channels_number, outputs_rows_number, outputs_columns_number);
170
171 for(Index image_index = 0; image_index < images_number; image_index ++)
172 {
173 for(Index channel_index = 0; channel_index < channels_number; channel_index ++)
174 {
175 for(Index row_index = 0; row_index < outputs_rows_number; row_index ++)
176 {
177 for(Index column_index = 0; column_index < outputs_columns_number; column_index ++)
178 {
179 outputs(image_index, channel_index, row_index, column_index) =
180 inputs(image_index, channel_index, row_index*row_stride, column_index*column_stride);
181
182 for(Index window_row = 0; window_row < pool_rows_number; window_row ++)
183 {
184 const Index row = row_index*row_stride + window_row;
185
186 for(Index window_column = 0; window_column < pool_columns_number; window_column ++)
187 {
188 const Index column = column_index*column_stride + window_column;
189
190 if(inputs(image_index, channel_index, row, column) > outputs(image_index, channel_index, row_index, column_index))
191 {
192 outputs(image_index, channel_index, row_index, column_index) = inputs(image_index, channel_index, row, column);
193 }
194
195 }
196 }
197 }
198 }
199 }
200 }
201
202 return outputs;
203}
204
205
206Tensor<type, 4> PoolingLayer::calculate_hidden_delta(Layer* next_layer_pointer,
207 const Tensor<type, 4>& activations,
208 const Tensor<type, 4>& activations_derivatives,
209 const Tensor<type, 4>& next_layer_delta) const
210{
211 if(pooling_method == PoolingMethod::NoPooling) return next_layer_delta;
212
213 else
214 {
215 const Type layer_type = next_layer_pointer->get_type();
216
217 if(layer_type == Type::Convolutional)
218 {
219 ConvolutionalLayer* convolutional_layer = dynamic_cast<ConvolutionalLayer*>(next_layer_pointer);
220
221 return calculate_hidden_delta_convolutional(convolutional_layer, activations, activations_derivatives, next_layer_delta);
222 }
223 else if(layer_type == Type::Pooling)
224 {
225 PoolingLayer* pooling_layer = dynamic_cast<PoolingLayer*>(next_layer_pointer);
226
227 return calculate_hidden_delta_pooling(pooling_layer, activations, activations_derivatives, next_layer_delta);
228 }
229 else if(layer_type == Type::Perceptron)
230 {
231 PerceptronLayer* perceptron_layer = static_cast<PerceptronLayer*>(next_layer_pointer);
232
233 return calculate_hidden_delta_perceptron(perceptron_layer, activations, activations_derivatives, next_layer_delta);
234 }
235 else if(layer_type == Type::Probabilistic)
236 {
237 ProbabilisticLayer* probabilistic_layer = dynamic_cast<ProbabilisticLayer*>(next_layer_pointer);
238
239 return calculate_hidden_delta_probabilistic(probabilistic_layer, activations, activations_derivatives, next_layer_delta);
240 }
241 }
242
243 return Tensor<type, 4>();
244}
245
246
247Tensor<type, 4> PoolingLayer::calculate_hidden_delta_convolutional(ConvolutionalLayer* next_layer_pointer,
248 const Tensor<type, 4>&,
249 const Tensor<type, 4>&,
250 const Tensor<type, 4>& next_layer_delta) const
251{
252 // Current layer's values
253
254 const Index images_number = next_layer_delta.dimension(0);
255 const Index channels_number = get_inputs_channels_number();
256 const Index output_rows_number = get_outputs_rows_number();
257 const Index output_columns_number = get_outputs_columns_number();
258
259 // Next layer's values
260
261 const Index next_layers_filters_number = next_layer_pointer->get_kernels_number();
262 const Index next_layers_filter_rows = next_layer_pointer->get_kernels_rows_number();
263 const Index next_layers_filter_columns = next_layer_pointer->get_kernels_columns_number();
264 const Index next_layers_output_rows = next_layer_pointer->get_outputs_rows_number();
265 const Index next_layers_output_columns = next_layer_pointer->get_outputs_columns_number();
266 const Index next_layers_row_stride = next_layer_pointer->get_row_stride();
267 const Index next_layers_column_stride = next_layer_pointer->get_column_stride();
268
269 const Tensor<type, 4> next_layers_weights = next_layer_pointer->get_synaptic_weights();
270
271 // Hidden delta calculation
272
273 Tensor<type, 4> hidden_delta(images_number, channels_number, output_rows_number, output_columns_number);
274
275 const Index size = hidden_delta.size();
276
277 #pragma omp parallel for
278
279 for(Index tensor_index = 0; tensor_index < size; tensor_index++)
280 {
281 const Index image_index = tensor_index/(channels_number*output_rows_number*output_columns_number);
282 const Index channel_index = (tensor_index/(output_rows_number*output_columns_number))%channels_number;
283 const Index row_index = (tensor_index/output_columns_number)%output_rows_number;
284 const Index column_index = tensor_index%output_columns_number;
285
286 type sum = type(0);
287
288 const Index lower_row_index = (row_index - next_layers_filter_rows)/next_layers_row_stride + 1;
289 const Index upper_row_index = min(row_index/next_layers_row_stride + 1, next_layers_output_rows);
290 const Index lower_column_index = (column_index - next_layers_filter_columns)/next_layers_column_stride + 1;
291 const Index upper_column_index = min(column_index/next_layers_column_stride + 1, next_layers_output_columns);
292
293 for(Index i = 0; i < next_layers_filters_number; i++)
294 {
295 for(Index j = lower_row_index; j < upper_row_index; j++)
296 {
297 for(Index k = lower_column_index; k < upper_column_index; k++)
298 {
299 const type delta_element = next_layer_delta(image_index, i, j, k);
300
301 const type weight = next_layers_weights(i, channel_index, row_index - j*next_layers_row_stride, column_index - k*next_layers_column_stride);
302
303 sum += delta_element*weight;
304 }
305 }
306 }
307
308 hidden_delta(image_index, channel_index, row_index, column_index) = sum;
309 }
310
311 return hidden_delta;
312}
313
314
315Tensor<type, 4> PoolingLayer::calculate_hidden_delta_pooling(PoolingLayer* next_layer_pointer,
316 const Tensor<type, 4>& activations,
317 const Tensor<type, 4>&,
318 const Tensor<type, 4>& next_layer_delta) const
319{
320 switch(next_layer_pointer->get_pooling_method())
321 {
322 case OpenNN::PoolingLayer::PoolingMethod::NoPooling:
323 {
324 return next_layer_delta;
325 }
326
327 case OpenNN::PoolingLayer::PoolingMethod::AveragePooling:
328 {
329 // Current layer's values
330
331 const Index images_number = next_layer_delta.dimension(0);
332 const Index channels_number = get_inputs_channels_number();
333 const Index output_rows_number = get_outputs_rows_number();
334 const Index output_columns_number = get_outputs_columns_number();
335
336 // Next layer's values
337
338 const Index next_layers_pool_rows = next_layer_pointer->get_pool_rows_number();
339 const Index next_layers_pool_columns = next_layer_pointer->get_pool_columns_number();
340 const Index next_layers_output_rows = next_layer_pointer->get_outputs_rows_number();
341 const Index next_layers_output_columns = next_layer_pointer->get_outputs_columns_number();
342 const Index next_layers_row_stride = next_layer_pointer->get_row_stride();
343 const Index next_layers_column_stride = next_layer_pointer->get_column_stride();
344
345 // Hidden delta calculation
346
347 Tensor<type, 4> hidden_delta(images_number, channels_number, output_rows_number, output_columns_number);
348
349 const Index size = hidden_delta.size();
350
351 #pragma omp parallel for
352
353 for(Index tensor_index = 0; tensor_index < size; tensor_index++)
354 {
355 const Index image_index = tensor_index/(channels_number*output_rows_number*output_columns_number);
356 const Index channel_index = (tensor_index/(output_rows_number*output_columns_number))%channels_number;
357 const Index row_index = (tensor_index/output_columns_number)%output_rows_number;
358 const Index column_index = tensor_index%output_columns_number;
359
360 type sum = type(0);
361
362 const Index lower_row_index = (row_index - next_layers_pool_rows)/next_layers_row_stride + 1;
363 const Index upper_row_index = min(row_index/next_layers_row_stride + 1, next_layers_output_rows);
364 const Index lower_column_index = (column_index - next_layers_pool_columns)/next_layers_column_stride + 1;
365 const Index upper_column_index = min(column_index/next_layers_column_stride + 1, next_layers_output_columns);
366
367 for(Index i = lower_row_index; i < upper_row_index; i++)
368 {
369 for(Index j = lower_column_index; j < upper_column_index; j++)
370 {
371 const type delta_element = next_layer_delta(image_index, channel_index, i, j);
372
373 sum += delta_element;
374 }
375 }
376
377 hidden_delta(image_index, channel_index, row_index, column_index) = sum;
378 }
379
380// return hidden_delta/(next_layers_pool_rows*next_layers_pool_columns);
381 }
382
383 case OpenNN::PoolingLayer::PoolingMethod::MaxPooling:
384 {
385 // Current layer's values
386
387 const Index images_number = next_layer_delta.dimension(0);
388 const Index channels_number = get_inputs_channels_number();
389 const Index output_rows_number = get_outputs_rows_number();
390 const Index output_columns_number = get_outputs_columns_number();
391
392 // Next layer's values
393
394 const Index next_layers_pool_rows = next_layer_pointer->get_pool_rows_number();
395 const Index next_layers_pool_columns = next_layer_pointer->get_pool_columns_number();
396 const Index next_layers_output_rows = next_layer_pointer->get_outputs_rows_number();
397 const Index next_layers_output_columns = next_layer_pointer->get_outputs_columns_number();
398 const Index next_layers_row_stride = next_layer_pointer->get_row_stride();
399 const Index next_layers_column_stride = next_layer_pointer->get_column_stride();
400
401 // Hidden delta calculation
402
403 Tensor<type, 4> hidden_delta(images_number, channels_number, output_rows_number, output_columns_number);
404
405 const Index size = hidden_delta.size();
406
407 #pragma omp parallel for
408
409 for(Index tensor_index = 0; tensor_index < size; tensor_index++)
410 {
411 const Index image_index = tensor_index/(channels_number*output_rows_number*output_columns_number);
412 const Index channel_index = (tensor_index/(output_rows_number*output_columns_number))%channels_number;
413 const Index row_index = (tensor_index/output_columns_number)%output_rows_number;
414 const Index column_index = tensor_index%output_columns_number;
415
416 type sum = type(0);
417
418 const Index lower_row_index = (row_index - next_layers_pool_rows)/next_layers_row_stride + 1;
419 const Index upper_row_index = min(row_index/next_layers_row_stride + 1, next_layers_output_rows);
420 const Index lower_column_index = (column_index - next_layers_pool_columns)/next_layers_column_stride + 1;
421 const Index upper_column_index = min(column_index/next_layers_column_stride + 1, next_layers_output_columns);
422
423 for(Index i = lower_row_index; i < upper_row_index; i++)
424 {
425 for(Index j = lower_column_index; j < upper_column_index; j++)
426 {
427 Tensor<type, 2> activations_current_submatrix(next_layers_pool_rows, next_layers_pool_columns);
428
429 for(Index submatrix_row_index = 0; submatrix_row_index < next_layers_pool_rows; submatrix_row_index++)
430 {
431 for(Index submatrix_column_index = 0; submatrix_column_index < next_layers_pool_columns; submatrix_column_index++)
432 {
433 activations_current_submatrix(submatrix_row_index, submatrix_column_index) =
434 activations(image_index, channel_index, i*next_layers_row_stride + submatrix_row_index, j*next_layers_column_stride + submatrix_column_index);
435 }
436 }
437
438 Tensor<type, 2> multiply_not_multiply(next_layers_pool_rows, next_layers_pool_columns);
439
440 type max_value = activations_current_submatrix(0,0);
441
442 for(Index submatrix_row_index = 0; submatrix_row_index < next_layers_pool_rows; submatrix_row_index++)
443 {
444 for(Index submatrix_column_index = 0; submatrix_column_index < next_layers_pool_columns; submatrix_column_index++)
445 {
446 if(activations_current_submatrix(submatrix_row_index, submatrix_column_index) > max_value)
447 {
448 max_value = activations_current_submatrix(submatrix_row_index, submatrix_column_index);
449
450 //multiply_not_multiply.resize(next_layers_pool_rows, next_layers_pool_columns, 0.0);
451 //multiply_not_multiply(submatrix_row_index, submatrix_column_index) = type(1);
452 }
453 }
454 }
455
456 const type delta_element = next_layer_delta(image_index, channel_index, i, j);
457
458 const type max_derivative = multiply_not_multiply(row_index - i*next_layers_row_stride, column_index - j*next_layers_column_stride);
459
460 sum += delta_element*max_derivative;
461 }
462 }
463
464 hidden_delta(image_index, channel_index, row_index, column_index) = sum;
465 }
466
467 return hidden_delta;
468 }
469 }
470
471 return Tensor<type, 4>();
472}
473
474
475Tensor<type, 4> PoolingLayer::calculate_hidden_delta_perceptron(PerceptronLayer* next_layer_pointer,
476 const Tensor<type, 4>&,
477 const Tensor<type, 4>&,
478 const Tensor<type, 4>& next_layer_delta) const
479{
480 // Current layer's values
481
482 const Index images_number = next_layer_delta.dimension(0);
483 const Index channels_number = get_inputs_channels_number();
484 const Index output_rows_number = get_outputs_rows_number();
485 const Index output_columns_number = get_outputs_columns_number();
486
487 // Next layer's values
488
489 const Index next_layers_output_columns = next_layer_delta.dimension(1);
490
491 const Tensor<type, 2> next_layers_weights = next_layer_pointer->get_synaptic_weights();
492
493 // Hidden delta calculation
494
495 Tensor<type, 4> hidden_delta(images_number, channels_number, output_rows_number, output_columns_number);
496
497 const Index size = hidden_delta.size();
498
499 #pragma omp parallel for
500
501 for(Index tensor_index = 0; tensor_index < size; tensor_index++)
502 {
503 //Index image_index = tensor_index/(channels_number*output_rows_number*output_columns_number);
504 //Index channel_index = (tensor_index/(output_rows_number*output_columns_number))%channels_number;
505 //Index row_index = (tensor_index/output_columns_number)%output_rows_number;
506 //Index column_index = tensor_index%output_columns_number;
507
508 //type sum = type(0);
509
510 for(Index sum_index = 0; sum_index < next_layers_output_columns; sum_index++)
511 {
512 //const type delta_element = next_layer_delta(image_index, sum_index);
513
514 //const type weight = next_layers_weights(channel_index + row_index*channels_number + column_index*channels_number*output_rows_number, sum_index);
515
516 //sum += delta_element*weight;
517 }
518
519 //hidden_delta(image_index, channel_index, row_index, column_index) = sum;
520 }
521
522 return hidden_delta;
523}
524
525
526Tensor<type, 4> PoolingLayer::calculate_hidden_delta_probabilistic(ProbabilisticLayer* next_layer_pointer,
527 const Tensor<type, 4>&,
528 const Tensor<type, 4>&,
529 const Tensor<type, 4>& next_layer_delta) const
530{
531 // Current layer's values
532
533 const Index images_number = next_layer_delta.dimension(0);
534 const Index channels_number = get_inputs_channels_number();
535 const Index output_rows_number = get_outputs_rows_number();
536 const Index output_columns_number = get_outputs_columns_number();
537
538 // Next layer's values
539
540 const Index next_layers_output_columns = next_layer_delta.dimension(1);
541
542 const Tensor<type, 2> next_layers_weights = next_layer_pointer->get_synaptic_weights();
543
544 // Hidden delta calculation
545
546 Tensor<type, 4> hidden_delta(images_number, channels_number, output_rows_number, output_columns_number);
547
548 const Index size = hidden_delta.size();
549
550 #pragma omp parallel for
551
552 for(Index tensor_index = 0; tensor_index < size; tensor_index++)
553 {
554 //const Index image_index = tensor_index/(channels_number*output_rows_number*output_columns_number);
555 //const Index channel_index = (tensor_index/(output_rows_number*output_columns_number))%channels_number;
556 //const Index row_index = (tensor_index/output_columns_number)%output_rows_number;
557 //const Index column_index = tensor_index%output_columns_number;
558
559 //type sum = type(0);
560
561 for(Index sum_index = 0; sum_index < next_layers_output_columns; sum_index++)
562 {
563 // const type delta_element = next_layer_delta(image_index, sum_index);
564
565 // const type weight = next_layers_weights(channel_index + row_index*channels_number + column_index*channels_number*output_rows_number, sum_index);
566
567 // sum += delta_element*weight;
568 }
569
570 //hidden_delta(image_index, channel_index, row_index, column_index) = sum;
571 }
572
573 return hidden_delta;
574}
575
576
577Tensor<type, 1> PoolingLayer::calculate_error_gradient(const Tensor<type, 2>&,
578 const LayerForwardPropagation&,
579 const Tensor<type, 2>&)
580{
581 return Tensor<type, 1>();
582}
583
584
586
588{
590}
591
592
594
596{
597 Tensor<Index, 1> outputs_dimensions(3);
598
599// outputs_dimensions[0] = input_variables_dimensions[0];
600// outputs_dimensions[1] = get_outputs_rows_number();
601// outputs_dimensions[2] = get_outputs_columns_number();
602
603 return outputs_dimensions;
604}
605
606
608
610{
611 return input_variables_dimensions.size();
612}
613
614
616
618{
619// return input_variables_dimensions[0];
620
621 return 0;
622}
623
624
626
628{
629// return input_variables_dimensions[1];
630
631 return 0;
632}
633
634
636
638{
639// return input_variables_dimensions[2];
640
641 return 0;
642}
643
644
646
648{
649// return (input_variables_dimensions[1] - pool_rows_number)/row_stride + 1;
650
651 return 0;
652}
653
654
656
658{
659// return (input_variables_dimensions[2] - pool_columns_number)/column_stride + 1;
660
661 return 0;
662}
663
664
666
668{
669 return padding_width;
670}
671
672
674
676{
677 return row_stride;
678}
679
680
682
684{
685 return column_stride;
686}
687
688
690
692{
693 return pool_rows_number;
694}
695
696
698
700{
701 return pool_columns_number;
702}
703
704
706
708{
709 return 0;
710}
711
712
714
715Tensor<type, 1> PoolingLayer::get_parameters() const
716{
717 return Tensor<type, 1>();
718}
719
720
722
724{
725 return pooling_method;
726}
727
728
731
732void PoolingLayer::set_input_variables_dimensions(const Tensor<Index, 1>& new_input_variables_dimensions)
733{
734 input_variables_dimensions = new_input_variables_dimensions;
735}
736
737
740
741void PoolingLayer::set_padding_width(const Index& new_padding_width)
742{
743 padding_width = new_padding_width;
744}
745
746
749
750void PoolingLayer::set_row_stride(const Index& new_row_stride)
751{
752 row_stride = new_row_stride;
753}
754
755
758
759void PoolingLayer::set_column_stride(const Index& new_column_stride)
760{
761 column_stride = new_column_stride;
762}
763
764
768
769void PoolingLayer::set_pool_size(const Index& new_pool_rows_number,
770 const Index& new_pool_columns_number)
771{
772 pool_rows_number = new_pool_rows_number;
773
774 pool_columns_number = new_pool_columns_number;
775}
776
777
780
781void PoolingLayer::set_pooling_method(const PoolingMethod& new_pooling_method)
782{
783 pooling_method = new_pooling_method;
784}
785
786
788
790{
791 layer_type = Layer::Type::Pooling;
792}
793}
794
795// OpenNN: Open Neural Networks Library.
796// Copyright(C) 2005-2021 Artificial Intelligence Techniques, SL.
797//
798// This library is free software; you can redistribute it and/or
799// modify it under the terms of the GNU Lesser General Public
800// License as published by the Free Software Foundation; either
801// version 2.1 of the License, or any later version.
802//
803// This library is distributed in the hope that it will be useful,
804// but WITHOUT ANY WARRANTY; without even the implied warranty of
805// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
806// Lesser General Public License for more details.
807
808// You should have received a copy of the GNU Lesser General Public
809// License along with this library; if not, write to the Free Software
810// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
This abstract class represents the concept of layer of neurons in OpenNN.
Definition: layer.h:53
Type
This enumeration represents the possible types of layers.
Definition: layer.h:61
Type layer_type
Layer type.
Definition: layer.h:183
Type get_type() const
Definition: layer.cpp:25
void set_input_variables_dimensions(const Tensor< Index, 1 > &)
virtual ~PoolingLayer()
Destructor.
Tensor< type, 4 > calculate_max_pooling_outputs(const Tensor< type, 4 > &) const
Tensor< type, 4 > calculate_average_pooling_outputs(const Tensor< type, 4 > &) const
Index get_column_stride() const
Returns the pooling filter's column stride.
Index get_inputs_number() const
Returns the number of inputs of the layer.
Tensor< type, 4 > calculate_outputs(const Tensor< type, 4 > &)
Index get_outputs_rows_number() const
Returns the number of rows of the layer's output.
void set_pooling_method(const PoolingMethod &)
void set_default()
Sets the layer type to Layer::Pooling.
Index get_inputs_channels_number() const
Returns the number of channels of the layers' input.
Tensor< type, 4 > calculate_no_pooling_outputs(const Tensor< type, 4 > &) const
Index get_pool_rows_number() const
Returns the number of rows of the pooling filter.
Index get_inputs_rows_number() const
Returns the number of rows of the layer's input.
Index get_inputs_columns_number() const
Returns the number of columns of the layer's input.
void set_row_stride(const Index &)
Index get_outputs_columns_number() const
Returns the number of columns of the layer's output.
void set_pool_size(const Index &, const Index &)
void set_column_stride(const Index &)
PoolingMethod
Enumeration of available methods for pooling data.
Definition: pooling_layer.h:45
Index get_padding_width() const
Returns the padding width.
Index get_neurons_number() const
Returns the number of neurons the layer applies to an image.
void set_padding_width(const Index &)
Tensor< Index, 1 > get_outputs_dimensions() const
Returns the layer's outputs dimensions.
Index get_row_stride() const
Returns the pooling filter's row stride.
PoolingMethod get_pooling_method() const
Returns the pooling method.
Index get_parameters_number() const
Returns the number of parameters of the layer.
Index get_pool_columns_number() const
Returns the number of columns of the pooling filter.
Tensor< type, 1 > get_parameters() const
Returns the layer's parameters.