• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2017-2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "src/core/NEON/kernels/NENormalizationLayerKernel.h"
25 
26 #include "arm_compute/core/Error.h"
27 #include "arm_compute/core/Helpers.h"
28 #include "arm_compute/core/TensorInfo.h"
29 #include "arm_compute/core/Utils.h"
30 #include "arm_compute/core/Validate.h"
31 #include "arm_compute/core/Window.h"
32 #include "src/core/AccessWindowStatic.h"
33 #include "src/core/CPP/Validate.h"
34 #include "src/core/NEON/NEFixedPoint.h"
35 #include "src/core/NEON/NEMath.h"
36 #include "src/core/NEON/wrapper/wrapper.h"
37 #include "src/core/helpers/AutoConfiguration.h"
38 #include "src/core/helpers/NormalizationHelpers.h"
39 #include "src/core/helpers/WindowHelpers.h"
40 
41 namespace arm_compute
42 {
43 namespace
44 {
validate_arguments(const ITensorInfo * input,const ITensorInfo * input_squared,const ITensorInfo * output,const NormalizationLayerInfo & norm_info)45 Status validate_arguments(const ITensorInfo *input, const ITensorInfo *input_squared, const ITensorInfo *output, const NormalizationLayerInfo &norm_info)
46 {
47     ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, input_squared, output);
48     ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input);
49     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
50 
51     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, input_squared);
52     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, input_squared);
53     ARM_COMPUTE_RETURN_ERROR_ON_MSG(!(norm_info.norm_size() % 2), "Normalization size should be odd");
54 
55     // Checks performed when output is configured
56     if(output->total_size() != 0)
57     {
58         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
59         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
60         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output);
61     }
62 
63     return Status{};
64 }
65 
66 } // namespace
67 
NENormalizationLayerKernel()68 NENormalizationLayerKernel::NENormalizationLayerKernel()
69     : _func(nullptr), _input(nullptr), _input_squared(nullptr), _output(nullptr), _norm_info(NormType::IN_MAP_1D)
70 {
71 }
72 
configure(const ITensor * input,const ITensor * input_squared,ITensor * output,NormalizationLayerInfo norm_info)73 void NENormalizationLayerKernel::configure(const ITensor *input, const ITensor *input_squared, ITensor *output, NormalizationLayerInfo norm_info)
74 {
75     ARM_COMPUTE_ERROR_ON_NULLPTR(input, input_squared, output);
76     // Output tensor auto initialization if not yet initialized
77     auto_init_if_empty(*output->info(), *input->info());
78 
79     // Perform validation step
80     ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), input_squared->info(), output->info(), norm_info));
81 
82     const unsigned int norm_idx = get_normalization_dimension_index(input->info()->data_layout(), norm_info);
83 
84     _input         = input;
85     _input_squared = input_squared;
86     _output        = output;
87     _norm_info     = norm_info;
88 
89     switch(_input->info()->data_type())
90     {
91         case DataType::F32:
92         {
93             switch(norm_idx)
94             {
95                 case 0:
96                 {
97                     if(norm_info.type() == NormType::IN_MAP_2D)
98                     {
99                         _func = &NENormalizationLayerKernel::normalize_float<float, 4, 0, true>;
100                     }
101                     else
102                     {
103                         _func = &NENormalizationLayerKernel::normalize_float<float, 4, 0, false>;
104                     }
105                     break;
106                 }
107                 case 1:
108                     if(norm_info.type() == NormType::IN_MAP_2D)
109                     {
110                         _func = &NENormalizationLayerKernel::normalize_float<float, 4, 1, true>;
111                     }
112                     else
113                     {
114                         _func = &NENormalizationLayerKernel::normalize_float<float, 4, 1, false>;
115                     }
116                     break;
117                 case 2:
118                     _func = &NENormalizationLayerKernel::normalize_float<float, 4, 2, false>;
119                     break;
120                 default:
121                     break;
122             }
123             break;
124         }
125 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
126         case DataType::F16:
127         {
128             switch(norm_idx)
129             {
130                 case 0:
131                 {
132                     if(norm_info.type() == NormType::IN_MAP_2D)
133                     {
134                         _func = &NENormalizationLayerKernel::normalize_float<float16_t, 8, 0, true>;
135                     }
136                     else
137                     {
138                         _func = &NENormalizationLayerKernel::normalize_float<float16_t, 8, 0, false>;
139                     }
140                     break;
141                 }
142                 case 1:
143                     if(norm_info.type() == NormType::IN_MAP_2D)
144                     {
145                         _func = &NENormalizationLayerKernel::normalize_float<float16_t, 8, 1, true>;
146                     }
147                     else
148                     {
149                         _func = &NENormalizationLayerKernel::normalize_float<float16_t, 8, 1, false>;
150                     }
151                     break;
152                 case 2:
153                     _func = &NENormalizationLayerKernel::normalize_float<float16_t, 8, 2, false>;
154                     break;
155                 default:
156                     break;
157             }
158             break;
159         }
160 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
161         default:
162             ARM_COMPUTE_ERROR("NOT SUPPORTED!");
163     }
164 
165     // Configure kernel window
166     Window      win = calculate_max_window(*input->info(), Steps());
167     Coordinates coord;
168     coord.set_num_dimensions(output->info()->num_dimensions());
169     output->info()->set_valid_region(ValidRegion(coord, output->info()->tensor_shape()));
170     INEKernel::configure(win);
171 }
172 
173 template <typename T, unsigned int S, unsigned int dim, bool do_2D_norm>
normalize_float(const Window & window)174 void NENormalizationLayerKernel::normalize_float(const Window &window)
175 {
176     /** NEON vector tag type. */
177     using ExactTagType = typename wrapper::traits::neon_vector<T, S>::tag_type;
178 
179     Window win(window);
180     win.set(Window::DimX, Window::Dimension(0, 1, 1));
181 
182     const auto window_start_x = static_cast<int>(window.x().start());
183     const auto window_end_x   = static_cast<int>(window.x().end());
184     const int  window_step_x  = S;
185 
186     Iterator input(_input, win);
187     Iterator input_squared(_input_squared, win);
188     Iterator output(_output, win);
189 
190     const int dim_y                      = _input->info()->data_layout() == DataLayout::NCHW ? 1 : 2;
191     const int radius                     = _norm_info.norm_size() / 2;
192     const int input_squared_stride_x     = _input_squared->info()->strides_in_bytes()[0];
193     const int input_squared_stride_slice = _input_squared->info()->strides_in_bytes()[dim];
194     const int input_squared_stride_row   = _input_squared->info()->strides_in_bytes()[dim_y];
195 
196     const int max_right  = _input->info()->dimension(dim) - 1;
197     const int max_bottom = _input->info()->dimension(dim_y) - 1;
198 
199     const auto coeff_vec = wrapper::vdup_n(static_cast<T>(_norm_info.scale_coeff()), ExactTagType{});
200     const auto beta_vec  = wrapper::vdup_n(static_cast<T>(_norm_info.beta()), ExactTagType{});
201     const auto kappa_vec = wrapper::vdup_n(static_cast<T>(_norm_info.kappa()), ExactTagType{});
202 
203     auto sequential_normalization = [&](const int x, const Coordinates & id, const int current_row, const int first_row, const int last_row, const T * input_ptr, const uint8_t *input_squared_start_ptr,
204                                         T * output_ptr)
205     {
206         const int current_slice = dim == 0 ? x : id[dim];
207         const int first_slice   = std::max(current_slice - radius, 0);
208         const int last_slice    = std::min(current_slice + radius, max_right);
209 
210         const uint8_t *const input_squared_x_ptr = input_squared_start_ptr + x * input_squared_stride_x;
211         // Accumulate 2D In-Map values
212         auto accu = static_cast<T>(0.f);
213         for(int j = first_row; j <= last_row; ++j)
214         {
215             // Compute row displacement
216             const uint8_t *const input_squared_ptr = input_squared_x_ptr + (j - current_row) * input_squared_stride_row;
217             for(int i = first_slice; i <= last_slice; ++i)
218             {
219                 accu += *reinterpret_cast<const T *>(input_squared_ptr + (i - current_slice) * input_squared_stride_slice);
220             }
221         }
222 
223         // Normalize
224         const auto normalized       = std::pow(accu * static_cast<T>(_norm_info.scale_coeff()) + static_cast<T>(_norm_info.kappa()), _norm_info.beta());
225         const auto normalized_pixel = (*(input_ptr + x)) / normalized;
226         *(output_ptr + x)           = normalized_pixel;
227     };
228 
229     execute_window_loop(win, [&](const Coordinates & id)
230     {
231         const auto input_ptr  = reinterpret_cast<const T *>(input.ptr());
232         auto       output_ptr = reinterpret_cast<T *>(output.ptr());
233 
234         // Get range to normalize
235         const int current_row = do_2D_norm ? id[dim_y] : 0;
236         const int first_row   = do_2D_norm ? std::max(current_row - radius, 0) : 0;
237         const int last_row    = do_2D_norm ? std::min(current_row + radius, max_bottom) : 0;
238 
239         int x = window_start_x;
240         // Compute serially starting elements for the case x dimension is width
241         for(; x < radius && x < window_end_x && dim == 0; ++x)
242         {
243             sequential_normalization(x, id, current_row, first_row, last_row, input_ptr, input_squared.ptr(), output_ptr);
244         }
245 
246         // Compute vectorized
247         for(; x <= window_end_x - window_step_x - radius; x += window_step_x)
248         {
249             const int current_slice = dim == 0 ? x : id[dim];
250             const int first_slice   = std::max(current_slice - radius, 0);
251             const int last_slice    = std::min(current_slice + radius, max_right);
252 
253             const uint8_t *const input_squared_x_ptr = input_squared.ptr() + x * input_squared_stride_x;
254             // Accumulate 2D In-Map values
255             auto accu = wrapper::vdup_n(static_cast<T>(0.f), ExactTagType{});
256             for(int j = first_row; j <= last_row; ++j)
257             {
258                 // Compute row displacement
259                 const uint8_t *const input_squared_ptr = input_squared_x_ptr + (j - current_row) * input_squared_stride_row;
260                 for(int i = first_slice; i <= last_slice; ++i)
261                 {
262                     accu = wrapper::vadd(accu, wrapper::vloadq(reinterpret_cast<const T *>(input_squared_ptr + (i - current_slice) * input_squared_stride_slice)));
263                 }
264             }
265 
266             // Normalize
267             const auto normalized       = wrapper::vpow(wrapper::vmla(kappa_vec, coeff_vec, accu), beta_vec);
268             const auto normalized_pixel = wrapper::vmul(wrapper::vloadq(input_ptr + x), wrapper::vinv(normalized));
269             wrapper::vstore(reinterpret_cast<T *>(output_ptr + x), normalized_pixel);
270         }
271 
272         // Compute left-over elements
273         for(; x < window_end_x; ++x)
274         {
275             sequential_normalization(x, id, current_row, first_row, last_row, input_ptr, input_squared.ptr(), output_ptr);
276         }
277     },
278     input, input_squared, output);
279 }
280 
validate(const ITensorInfo * input,const ITensorInfo * input_squared,const ITensorInfo * output,const NormalizationLayerInfo norm_info)281 Status NENormalizationLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *input_squared, const ITensorInfo *output, const NormalizationLayerInfo norm_info)
282 {
283     ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, input_squared, output, norm_info));
284 
285     return Status{};
286 }
287 
run(const Window & window,const ThreadInfo & info)288 void NENormalizationLayerKernel::run(const Window &window, const ThreadInfo &info)
289 {
290     ARM_COMPUTE_UNUSED(info);
291     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
292     ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
293     ARM_COMPUTE_ERROR_ON(_func == nullptr);
294 
295     // Run function
296     (this->*_func)(window);
297 }
298 } // namespace arm_compute