sum_squared_error.cpp
1// OpenNN: Open Neural Networks Library
2// www.opennn.net
3//
4// S U M S Q U A R E D E R R O R C L A S S
5//
6// Artificial Intelligence Techniques SL
7// artelnics@artelnics.com
8
9#include "sum_squared_error.h"
10
11namespace OpenNN
12{
13
17
19{
20}
21
22
28
29SumSquaredError::SumSquaredError(NeuralNetwork* new_neural_network_pointer, DataSet* new_data_set_pointer)
30 : LossIndex(new_neural_network_pointer, new_data_set_pointer)
31{
32}
33
34
36
38{
39}
40
41
42void SumSquaredError::calculate_error(const DataSetBatch&,
44 LossIndexBackPropagation& back_propagation) const
45{
46 Tensor<type, 0> sum_squared_error;
47
48 sum_squared_error.device(*thread_pool_device) = back_propagation.errors.contract(back_propagation.errors, SSE);
49
50 back_propagation.error = sum_squared_error(0);
51}
52
53
54void SumSquaredError::calculate_error_lm(const DataSetBatch&,
55 const NeuralNetworkForwardPropagation&,
56 LossIndexBackPropagationLM& back_propagation) const
57{
58 Tensor<type, 0> sum_squared_error;
59
60 sum_squared_error.device(*thread_pool_device) = (back_propagation.squared_errors*back_propagation.squared_errors).sum();
61
62 back_propagation.error = sum_squared_error(0);
63}
64
65
66void SumSquaredError::calculate_output_delta(const DataSetBatch&,
67 NeuralNetworkForwardPropagation&,
68 LossIndexBackPropagation& back_propagation) const
69{
70 #ifdef OPENNN_DEBUG
71
72 check();
73
74 #endif
75
76 const Index trainable_layers_number = neural_network_pointer->get_trainable_layers_number();
77
78 LayerBackPropagation* output_layer_back_propagation = back_propagation.neural_network.layers(trainable_layers_number-1);
79
80 Layer* output_layer_pointer = output_layer_back_propagation->layer_pointer;
81
82 const type coefficient = static_cast<type>(2.0);
83
84 switch(output_layer_pointer->get_type())
85 {
86 case Layer::Type::Perceptron:
87 {
88 PerceptronLayerBackPropagation* perceptron_layer_back_propagation
89 = static_cast<PerceptronLayerBackPropagation*>(output_layer_back_propagation);
90
91 perceptron_layer_back_propagation->delta.device(*thread_pool_device) = coefficient*back_propagation.errors;
92 }
93 break;
94
95 case Layer::Type::Probabilistic:
96 {
97 ProbabilisticLayerBackPropagation* probabilistic_layer_back_propagation
98 = static_cast<ProbabilisticLayerBackPropagation*>(output_layer_back_propagation);
99
100 probabilistic_layer_back_propagation->delta.device(*thread_pool_device) = coefficient*back_propagation.errors;
101 }
102 break;
103
104 case Layer::Type::Recurrent:
105 {
106 RecurrentLayerBackPropagation* recurrent_layer_back_propagation
107 = static_cast<RecurrentLayerBackPropagation*>(output_layer_back_propagation);
108
109 recurrent_layer_back_propagation->delta.device(*thread_pool_device) = coefficient*back_propagation.errors;
110 }
111 break;
112
113 case Layer::Type::LongShortTermMemory:
114 {
115 LongShortTermMemoryLayerBackPropagation* long_short_term_memory_layer_back_propagation
116 = static_cast<LongShortTermMemoryLayerBackPropagation*>(output_layer_back_propagation);
117
118 long_short_term_memory_layer_back_propagation->delta.device(*thread_pool_device) = coefficient*back_propagation.errors;
119 }
120 break;
121
122 default: break;
123 }
124}
125
126
127void SumSquaredError::calculate_output_delta_lm(const DataSetBatch&,
128 NeuralNetworkForwardPropagation&,
129 LossIndexBackPropagationLM& loss_index_back_propagation) const
130{
131#ifdef OPENNN_DEBUG
132
133 check();
134
135#endif
136
137 const Index trainable_layers_number = neural_network_pointer->get_trainable_layers_number();
138
139 LayerBackPropagationLM* output_layer_back_propagation = loss_index_back_propagation.neural_network.layers(trainable_layers_number-1);
140
141 Layer* output_layer_pointer = output_layer_back_propagation->layer_pointer;
142
143 switch(output_layer_pointer->get_type())
144 {
145 case Layer::Type::Perceptron:
146 {
147 PerceptronLayerBackPropagationLM* perceptron_layer_back_propagation
148 = static_cast<PerceptronLayerBackPropagationLM*>(output_layer_back_propagation);
149
150 memcpy(perceptron_layer_back_propagation->delta.data(),
151 loss_index_back_propagation.errors.data(),
152 static_cast<size_t>(loss_index_back_propagation.errors.size())*sizeof(type));
153
154 divide_columns(perceptron_layer_back_propagation->delta, loss_index_back_propagation.squared_errors);
155 }
156 break;
157
158 case Layer::Type::Probabilistic:
159 {
160 ProbabilisticLayerBackPropagationLM* probabilistic_layer_back_propagation
161 = static_cast<ProbabilisticLayerBackPropagationLM*>(output_layer_back_propagation);
162
163 memcpy(probabilistic_layer_back_propagation->delta.data(),
164 loss_index_back_propagation.errors.data(),
165 static_cast<size_t>(loss_index_back_propagation.errors.size())*sizeof(type));
166
167 divide_columns(probabilistic_layer_back_propagation->delta, loss_index_back_propagation.squared_errors);
168 }
169 break;
170
171 default:
172 {
173 ostringstream buffer;
174
175 buffer << "OpenNN Exception: MeanSquaredError class.\n"
176 << "Levenberg-Marquardt can only be used with Perceptron and Probabilistic layers.\n";
177
178 throw logic_error(buffer.str());
179 }
180 }
181}
182
183
184void SumSquaredError::calculate_error_gradient_lm(const DataSetBatch& ,
185 LossIndexBackPropagationLM& loss_index_back_propagation_lm) const
186{
187#ifdef OPENNN_DEBUG
188
189 check();
190
191#endif
192
193 const type coefficient = (static_cast<type>(2.0));
194
195 loss_index_back_propagation_lm.gradient.device(*thread_pool_device)
196 = loss_index_back_propagation_lm.squared_errors_jacobian.contract(loss_index_back_propagation_lm.squared_errors, AT_B);
197
198 loss_index_back_propagation_lm.gradient.device(*thread_pool_device)
199 = coefficient*loss_index_back_propagation_lm.gradient;
200}
201
202
203void SumSquaredError::calculate_error_hessian_lm(const DataSetBatch&,
204 LossIndexBackPropagationLM& loss_index_back_propagation_lm) const
205{
206 #ifdef OPENNN_DEBUG
207
208 check();
209
210 #endif
211
212 const type coefficient = static_cast<type>(2.0);
213
214 loss_index_back_propagation_lm.hessian.device(*thread_pool_device)
215 = loss_index_back_propagation_lm.squared_errors_jacobian.contract(loss_index_back_propagation_lm.squared_errors_jacobian, AT_B);
216
217 loss_index_back_propagation_lm.hessian.device(*thread_pool_device)
218 = coefficient*loss_index_back_propagation_lm.hessian;
219}
220
221
223
225{
226 return "SUM_SQUARED_ERROR";
227}
228
229
231
233{
234 return "Sum squared error";
235}
236
237
240
242{
243 // Error type
244
245 file_stream.OpenElement("SumSquaredError");
246
247 file_stream.CloseElement();
248}
249
250
253
255{
256 const tinyxml2::XMLElement* root_element = document.FirstChildElement("SumSquaredError");
257
258 if(!root_element)
259 {
260 ostringstream buffer;
261
262 buffer << "OpenNN Exception: SumSquaredError class.\n"
263 << "void from_XML(const tinyxml2::XMLDocument&) method.\n"
264 << "Sum squared element is nullptr.\n";
265
266 throw logic_error(buffer.str());
267 }
268}
269
270}
271
272// OpenNN: Open Neural Networks Library.
273// Copyright(C) 2005-2021 Artificial Intelligence Techniques, SL.
274//
275// This library is free software; you can redistribute it and/or
276// modify it under the terms of the GNU Lesser General Public
277// License as published by the Free Software Foundation; either
278// version 2.1 of the License, or any later version.
279//
280// This library is distributed in the hope that it will be useful,
281// but WITHOUT ANY WARRANTY; without even the implied warranty of
282// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
283// Lesser General Public License for more details.
284// You should have received a copy of the GNU Lesser General Public
285// License along with this library; if not, write to the Free Software
286// Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
This class represents the concept of data set for data modelling problems, such as approximation,...
Definition: data_set.h:57
This abstract class represents the concept of loss index composed of an error term and a regularizati...
Definition: loss_index.h:48
NeuralNetwork * neural_network_pointer
Pointer to a neural network object.
Definition: loss_index.h:254
void check() const
Definition: loss_index.cpp:295
virtual ~SumSquaredError()
Destructor.
void from_XML(const tinyxml2::XMLDocument &)
string get_error_type() const
Returns a string with the name of the sum squared error loss type, "SUM_SQUARED_ERROR".
void write_XML(tinyxml2::XMLPrinter &) const
string get_error_type_text() const
Returns a string with the name of the sum squared error loss type in text format.
virtual void CloseElement(bool compactMode=false)
If streaming, close the Element.
Definition: tinyxml2.cpp:2834