1 /*
2 * Copyright (c) 2017-2020 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "src/core/NEON/kernels/NEL2NormalizeLayerKernel.h"
25
26 #include "arm_compute/core/Error.h"
27 #include "arm_compute/core/Helpers.h"
28 #include "arm_compute/core/ITensor.h"
29 #include "arm_compute/core/TensorInfo.h"
30 #include "arm_compute/core/Utils.h"
31 #include "arm_compute/core/Validate.h"
32 #include "arm_compute/core/Window.h"
33 #include "src/core/NEON/NEMath.h"
34 #include "src/core/helpers/AutoConfiguration.h"
35 #include "src/core/helpers/WindowHelpers.h"
36
37 #include "src/core/NEON/wrapper/wrapper.h"
38 #include <arm_neon.h>
39 #include <cmath>
40
41 namespace arm_compute
42 {
43 namespace
44 {
45 constexpr int max_input_tensor_dim = 3;
46
47 template <typename T, int S>
l2_normalize_X(const ITensor * in,const ITensor * sum,ITensor * out,float epsilon,const Window & window)48 void l2_normalize_X(const ITensor *in, const ITensor *sum, ITensor *out, float epsilon, const Window &window)
49 {
50 using ExactTagType = typename wrapper::traits::neon_vector<T, S>::tag_type;
51
52 const int window_step_x = 16 / data_size_from_type(in->info()->data_type());
53 const auto window_start_x = static_cast<int>(window.x().start());
54 const auto window_end_x = static_cast<int>(window.x().end());
55
56 Window win_collapsed = window.collapse_if_possible(window, Window::DimZ);
57 win_collapsed.set(Window::DimX, Window::Dimension(0, 1, 1));
58
59 Iterator input_it(in, win_collapsed);
60 Iterator sum_it(sum, win_collapsed);
61 Iterator output_it(out, win_collapsed);
62
63 execute_window_loop(win_collapsed, [&](const Coordinates &)
64 {
65 const auto in_ptr = reinterpret_cast<const T *>(input_it.ptr());
66 const auto out_ptr = reinterpret_cast<T *>(output_it.ptr());
67
68 const T sum_value = *reinterpret_cast<const T *>(sum_it.ptr());
69 const T norm_value = static_cast<T>(1.f) / std::sqrt(std::max(sum_value, static_cast<T>(epsilon)));
70 const auto vec_norm_value = wrapper::vdup_n(norm_value, ExactTagType{});
71
72 // Compute elements over vector steps
73 int x = window_start_x;
74 for(; x <= (window_end_x - window_step_x); x += window_step_x)
75 {
76 wrapper::vstore(out_ptr + x, wrapper::vmul(wrapper::vloadq(in_ptr + x), vec_norm_value));
77 }
78
79 // Compute left-over elements
80 for(; x < window_end_x; ++x)
81 {
82 out_ptr[x] = in_ptr[x] * norm_value;
83 }
84 },
85 input_it, sum_it, output_it);
86 }
87
88 template <typename T, int S>
l2_normalize_YZ(const ITensor * in,const ITensor * sum,ITensor * out,float epsilon,const Window & window,size_t axis)89 void l2_normalize_YZ(const ITensor *in, const ITensor *sum, ITensor *out, float epsilon, const Window &window, size_t axis)
90 {
91 using ExactTagType = typename wrapper::traits::neon_vector<T, S>::tag_type;
92
93 const int window_step_x = 16 / data_size_from_type(in->info()->data_type());
94 const auto window_start_x = static_cast<int>(window.x().start());
95 const auto window_end_x = static_cast<int>(window.x().end());
96
97 Window win = window;
98 win.set(Window::DimX, Window::Dimension(0, 1, 1));
99
100 Window window_sum(win);
101 window_sum.set(axis, Window::Dimension(0, 0, 0));
102
103 Iterator input_it(in, win);
104 Iterator sum_it(sum, window_sum);
105 Iterator output_it(out, win);
106
107 const auto vec_eps = wrapper::vdup_n(static_cast<T>(epsilon), ExactTagType{});
108
109 execute_window_loop(win, [&](const Coordinates &)
110 {
111 const auto in_ptr = reinterpret_cast<const T *>(input_it.ptr());
112 const auto sum_ptr = reinterpret_cast<const T *>(sum_it.ptr());
113 const auto out_ptr = reinterpret_cast<T *>(output_it.ptr());
114
115 // Compute elements over vector steps
116 int x = window_start_x;
117 for(; x <= (window_end_x - window_step_x); x += window_step_x)
118 {
119 const auto vec_norm_value = wrapper::vinvsqrt(wrapper::vmax(wrapper::vloadq(sum_ptr + x), vec_eps));
120 wrapper::vstore(out_ptr + x, wrapper::vmul(wrapper::vloadq(in_ptr + x), vec_norm_value));
121 }
122
123 // Compute left-over elements
124 for(; x < window_end_x; ++x)
125 {
126 const T norm_value = static_cast<T>(1.f) / std::sqrt(std::max(sum_ptr[x], static_cast<T>(epsilon)));
127 out_ptr[x] = in_ptr[x] * norm_value;
128 }
129 },
130 input_it, sum_it, output_it);
131 }
132
validate_arguments(const ITensorInfo * input,const ITensorInfo * sum,const ITensorInfo * output,int axis,float epsilon)133 Status validate_arguments(const ITensorInfo *input, const ITensorInfo *sum, const ITensorInfo *output, int axis, float epsilon)
134 {
135 ARM_COMPUTE_UNUSED(epsilon);
136
137 const uint32_t actual_axis = wrap_around(axis, max_input_tensor_dim);
138 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input, sum, output);
139 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, sum);
140 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::F16, DataType::F32);
141 ARM_COMPUTE_RETURN_ERROR_ON_MSG(actual_axis > 2, "Actual axis greater than 2 is not supported");
142 ARM_COMPUTE_RETURN_ERROR_ON_MSG(actual_axis >= TensorShape::num_max_dimensions, "Actual normalization axis greater than max number of dimensions");
143
144 // Reduce shape on axis
145 TensorShape sum_shape = input->tensor_shape();
146 sum_shape.set(actual_axis, 1);
147 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(sum->tensor_shape(), sum_shape);
148
149 if(output->total_size() != 0)
150 {
151 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
152 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
153 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DIMENSIONS(input->tensor_shape(), output->tensor_shape());
154 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output);
155 }
156
157 return Status{};
158 }
159
validate_and_configure_window(ITensorInfo * input,ITensorInfo * output)160 std::tuple<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output)
161 {
162 Window win = calculate_max_window(*input, Steps());
163
164 // Output auto initialization if not yet initialized
165 auto_init_if_empty(*output, input->tensor_shape(), 1, input->data_type());
166
167 // NEL2NormalizeLayerKernel doesn't need padding so update_window_and_padding() can be skipped
168 Coordinates coord;
169 coord.set_num_dimensions(output->num_dimensions());
170 output->set_valid_region(ValidRegion(coord, output->tensor_shape()));
171
172 return std::make_tuple(Status{}, win);
173 }
174 } // namespace
175
NEL2NormalizeLayerKernel()176 NEL2NormalizeLayerKernel::NEL2NormalizeLayerKernel()
177 : _input(nullptr), _sum(nullptr), _output(nullptr), _actual_axis(0), _epsilon(1e-12)
178 {
179 }
180
configure(const ITensor * input,const ITensor * sum,ITensor * output,int axis,float epsilon)181 void NEL2NormalizeLayerKernel::configure(const ITensor *input, const ITensor *sum, ITensor *output, int axis, float epsilon)
182 {
183 ARM_COMPUTE_ERROR_ON_NULLPTR(input, sum, output);
184 ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(input->info(), sum->info(), output->info(), axis, epsilon));
185
186 _input = input;
187 _sum = sum;
188 _output = output;
189 _actual_axis = wrap_around(axis, max_input_tensor_dim);
190 _epsilon = epsilon;
191
192 // Configure kernel window
193 auto win_config = validate_and_configure_window(_input->info(), _output->info());
194 ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config));
195
196 INEKernel::configure(std::get<1>(win_config));
197 }
198
validate(const ITensorInfo * input,const ITensorInfo * sum,const ITensorInfo * output,int axis,float epsilon)199 Status NEL2NormalizeLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *sum, const ITensorInfo *output, int axis, float epsilon)
200 {
201 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, sum, output, axis, epsilon));
202 ARM_COMPUTE_RETURN_ON_ERROR(std::get<0>(validate_and_configure_window(input->clone().get(), output->clone().get())));
203
204 return Status{};
205 }
206
run(const Window & window,const ThreadInfo & info)207 void NEL2NormalizeLayerKernel::run(const Window &window, const ThreadInfo &info)
208 {
209 ARM_COMPUTE_UNUSED(info);
210 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
211 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
212
213 if(_actual_axis > 2)
214 {
215 ARM_COMPUTE_ERROR("Unsupported normalization axis");
216 }
217
218 switch(_input->info()->data_type())
219 {
220 case DataType::F32:
221 (_actual_axis == Window::DimX) ? l2_normalize_X<float, 4>(_input, _sum, _output, _epsilon, window) : l2_normalize_YZ<float, 4>(_input, _sum, _output, _epsilon, window, _actual_axis);
222 break;
223 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
224 case DataType::F16:
225 (_actual_axis == Window::DimX) ? l2_normalize_X<float16_t, 8>(_input, _sum, _output, _epsilon, window) : l2_normalize_YZ<float16_t, 8>(_input, _sum, _output, _epsilon, window, _actual_axis);
226 break;
227 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
228 default:
229 ARM_COMPUTE_ERROR("Not implemented");
230 }
231 }
232 } // namespace arm_compute
233