1 /*
2 * Copyright (c) 2019-2020 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "src/core/NEON/kernels/NEInstanceNormalizationLayerKernel.h"
25
26 #include "arm_compute/core/Error.h"
27 #include "arm_compute/core/Helpers.h"
28 #include "arm_compute/core/ITensor.h"
29 #include "arm_compute/core/KernelDescriptors.h"
30 #include "arm_compute/core/TensorInfo.h"
31 #include "arm_compute/core/Utils.h"
32 #include "arm_compute/core/Validate.h"
33 #include "arm_compute/core/Window.h"
34 #include "src/core/CPP/Validate.h"
35 #include "src/core/NEON/NEMath.h"
36 #include "src/core/NEON/wrapper/wrapper.h"
37 #include "src/core/helpers/AutoConfiguration.h"
38 #include "src/core/helpers/WindowHelpers.h"
39
40 #include <arm_neon.h>
41
42 namespace arm_compute
43 {
44 namespace
45 {
46 template <typename InputType, typename AccType = InputType>
vector_float_sum(AccType & result,AccType & result_square,const InputType & inputs)47 void vector_float_sum(AccType &result, AccType &result_square, const InputType &inputs)
48 {
49 result = wrapper::vadd(result, inputs);
50 result_square = wrapper::vadd(result_square, wrapper::vmul(inputs, inputs));
51 }
52
53 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
54 template <>
vector_float_sum(float32x4_t & result,float32x4_t & result_square,const float16x8_t & inputs)55 inline void vector_float_sum(float32x4_t &result, float32x4_t &result_square, const float16x8_t &inputs)
56 {
57 vector_float_sum(result, result_square, wrapper::vcvt<float>(wrapper::vgetlow(inputs)));
58 vector_float_sum(result, result_square, wrapper::vcvt<float>(wrapper::vgethigh(inputs)));
59 }
60 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
61
62 template <typename InputType, typename AccType = InputType>
vector_float_norm(const InputType & inputs,const AccType & vec_mean,const AccType & vec_multip,const AccType & vec_beta)63 InputType vector_float_norm(const InputType &inputs, const AccType &vec_mean, const AccType &vec_multip, const AccType &vec_beta)
64 {
65 return wrapper::vadd(wrapper::vmul(wrapper::vsub(inputs, vec_mean), vec_multip), vec_beta);
66 }
67
68 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
69 template <>
vector_float_norm(const float16x8_t & inputs,const float32x4_t & vec_mean,const float32x4_t & vec_multip,const float32x4_t & vec_beta)70 inline float16x8_t vector_float_norm(const float16x8_t &inputs, const float32x4_t &vec_mean, const float32x4_t &vec_multip, const float32x4_t &vec_beta)
71 {
72 const auto input_low = wrapper::vcvt<float>(wrapper::vgetlow(inputs));
73 const auto input_high = wrapper::vcvt<float>(wrapper::vgethigh(inputs));
74 const auto result_low = wrapper::vcvt<float16_t>(vector_float_norm(input_low, vec_mean, vec_multip, vec_beta));
75 const auto result_high = wrapper::vcvt<float16_t>(vector_float_norm(input_high, vec_mean, vec_multip, vec_beta));
76 float16x8_t result = wrapper::vcombine(result_low, result_high);
77
78 return result;
79 }
80 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
81
82 template <typename T, typename AccType = T>
instance_normalization_nchw(ITensor * input,ITensor * output,float gamma,float beta,float epsilon,const Window & window)83 void instance_normalization_nchw(ITensor *input, ITensor *output, float gamma, float beta, float epsilon, const Window &window)
84 {
85 /** NEON vector tag type. */
86 using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<T, wrapper::traits::BitWidth::W128>;
87
88 // Clear X/Y dimensions on execution window as we handle the planes manually
89 Window win = window;
90 win.set(Window::DimX, Window::Dimension(0, 1, 1));
91 win.set(Window::DimY, Window::Dimension(0, 1, 1));
92
93 constexpr int window_step_x = 16 / sizeof(T);
94 const unsigned int elements_plane = input->info()->dimension(0) * output->info()->dimension(1);
95
96 Iterator input_it(input, win);
97 execute_window_loop(win, [&](const Coordinates & id)
98 {
99 Window win_plane = window;
100 win_plane.set(Window::DimX, Window::Dimension(0, 1, 1));
101 win_plane.set(Window::DimZ, Window::Dimension(id[2], id[2] + 1, 1));
102 win_plane.set(3, Window::Dimension(id[3], id[3] + 1, 1));
103
104 Iterator input_plane_it(input, win_plane);
105 Iterator output_plane_it(output, win_plane);
106
107 auto sum_h_w = static_cast<AccType>(0.f);
108 auto sum_squares_h_w = static_cast<AccType>(0.f);
109
110 execute_window_loop(win_plane, [&](const Coordinates &)
111 {
112 const auto input_ptr = reinterpret_cast<const T *>(input_plane_it.ptr());
113
114 auto vec_sum_h_w = wrapper::vdup_n(static_cast<AccType>(0.f), ExactTagType{});
115 auto vec_sum_squares_h_w = wrapper::vdup_n(static_cast<AccType>(0.f), ExactTagType{});
116
117 // Compute S elements per iteration
118 int x = window.x().start();
119 for(; x <= (window.x().end() - window_step_x); x += window_step_x)
120 {
121 auto vec_input_val = wrapper::vloadq(input_ptr + x);
122 vector_float_sum(vec_sum_h_w, vec_sum_squares_h_w, vec_input_val);
123 }
124
125 auto vec2_sum_h_w = wrapper::vpadd(wrapper::vgethigh(vec_sum_h_w), wrapper::vgetlow(vec_sum_h_w));
126 auto vec2_sum_squares_h_w = wrapper::vpadd(wrapper::vgethigh(vec_sum_squares_h_w), wrapper::vgetlow(vec_sum_squares_h_w));
127
128 vec2_sum_h_w = wrapper::vpadd(vec2_sum_h_w, vec2_sum_h_w);
129 vec2_sum_squares_h_w = wrapper::vpadd(vec2_sum_squares_h_w, vec2_sum_squares_h_w);
130
131 sum_h_w += wrapper::vgetlane(vec2_sum_h_w, 0);
132 sum_squares_h_w += wrapper::vgetlane(vec2_sum_squares_h_w, 0);
133
134 // Compute left-over elements
135 for(; x < window.x().end(); ++x)
136 {
137 const auto value = static_cast<AccType>(*(input_ptr + x));
138 sum_h_w += value;
139 sum_squares_h_w += value * value;
140 }
141 },
142 input_plane_it, output_plane_it);
143
144 const auto mean_h_w = sum_h_w / elements_plane;
145 const auto var_h_w = sum_squares_h_w / elements_plane - mean_h_w * mean_h_w;
146
147 const auto multip_h_w = gamma / std::sqrt(var_h_w + epsilon);
148 const auto vec_mean_h_w = wrapper::vdup_n(static_cast<AccType>(mean_h_w), ExactTagType{});
149 const auto vec_multip_h_w = wrapper::vdup_n(static_cast<AccType>(multip_h_w), ExactTagType{});
150 const auto vec_beta = wrapper::vdup_n(static_cast<AccType>(beta), ExactTagType{});
151
152 execute_window_loop(win_plane, [&](const Coordinates &)
153 {
154 auto input_ptr = reinterpret_cast<T *>(input_plane_it.ptr());
155 auto output_ptr = reinterpret_cast<T *>(output_plane_it.ptr());
156
157 // Compute S elements per iteration
158 int x = window.x().start();
159 //auto vec_val = wrapper::vdup_n(static_cast<T>(0.0f), ExactTagType{});
160 for(; x <= (window.x().end() - window_step_x); x += window_step_x)
161 {
162 const auto vec_val = wrapper::vloadq(input_ptr + x);
163 const auto normalized_vec = vector_float_norm(vec_val, vec_mean_h_w, vec_multip_h_w, vec_beta);
164 wrapper::vstore(output_ptr + x, normalized_vec);
165 }
166
167 // Compute left-over elements
168 for(; x < window.x().end(); ++x)
169 {
170 const auto val = static_cast<AccType>(*(input_ptr + x));
171 *(output_ptr + x) = static_cast<T>((val - mean_h_w) * multip_h_w + beta);
172 }
173 },
174 input_plane_it, output_plane_it);
175 },
176 input_it);
177 }
178
validate_arguments(const ITensorInfo * input,const ITensorInfo * output,float gamma,float beta,float epsilon)179 Status validate_arguments(const ITensorInfo *input, const ITensorInfo *output, float gamma, float beta, float epsilon)
180 {
181 ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(input);
182 ARM_COMPUTE_UNUSED(gamma);
183 ARM_COMPUTE_UNUSED(beta);
184 ARM_COMPUTE_RETURN_ERROR_ON_MSG(epsilon == 0.f, "Epsilon must be different than 0");
185
186 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_NOT_IN(input, DataType::F16, DataType::F32);
187 ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->data_layout() == DataLayout::NHWC, "NHWC data layout is not supported by the kernel directly");
188
189 if(output != nullptr && output->total_size() != 0)
190 {
191 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
192 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
193 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_LAYOUT(input, output);
194 ARM_COMPUTE_RETURN_ERROR_ON_MSG(input->num_channels() != output->num_channels(), "Input and output have different number of channels");
195 }
196 return Status{};
197 }
198
validate_and_configure_window(ITensorInfo * input,ITensorInfo * output)199 std::tuple<Status, Window> validate_and_configure_window(ITensorInfo *input, ITensorInfo *output)
200 {
201 // We handle the planes manually
202 Window win = calculate_max_window(*input, Steps(1));
203
204 // Output auto initialization if not yet initialized
205 auto_init_if_empty(*output, input->tensor_shape(), 1, input->data_type());
206
207 // NEInstanceNormalizationLayerKernel doesn't need padding so update_window_and_padding() can be skipped
208 Coordinates coord;
209 coord.set_num_dimensions(output->num_dimensions());
210 output->set_valid_region(ValidRegion(coord, output->tensor_shape()));
211 return std::make_pair(Status{}, win);
212 }
213 } // namespace
214
NEInstanceNormalizationLayerKernel()215 NEInstanceNormalizationLayerKernel::NEInstanceNormalizationLayerKernel()
216 : _func(nullptr), _input(nullptr), _output(nullptr), _gamma(1), _beta(0), _epsilon(1e-12)
217 {
218 }
219
configure(ITensor * input,ITensor * output,const InstanceNormalizationLayerKernelInfo & info)220 void NEInstanceNormalizationLayerKernel::configure(ITensor *input, ITensor *output, const InstanceNormalizationLayerKernelInfo &info)
221 {
222 ARM_COMPUTE_ERROR_ON_NULLPTR(input);
223
224 _input = input;
225 _output = output == nullptr ? input : output;
226 _gamma = info.gamma;
227 _beta = info.beta;
228 _epsilon = info.epsilon;
229 _use_mixed_precision = info.use_mixed_precision;
230
231 ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(_input->info(), _output->info(), _gamma, _beta, _epsilon));
232
233 if(_input->info()->data_type() == DataType::F32)
234 {
235 _func = &instance_normalization_nchw<float>;
236 }
237 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
238 else if(_input->info()->data_type() == DataType::F16)
239 {
240 if(_use_mixed_precision)
241 {
242 _func = &instance_normalization_nchw<float16_t, float>;
243 }
244 else
245 {
246 _func = &instance_normalization_nchw<float16_t>;
247 }
248 }
249 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
250 else
251 {
252 ARM_COMPUTE_ERROR("Unsupported data type");
253 }
254
255 // Configure kernel window
256 auto win_config = validate_and_configure_window(_input->info(), _output->info());
257 ARM_COMPUTE_ERROR_THROW_ON(std::get<0>(win_config));
258
259 INEKernel::configure(std::get<1>(win_config));
260 }
261
validate(const ITensorInfo * input,const ITensorInfo * output,const InstanceNormalizationLayerKernelInfo & info)262 Status NEInstanceNormalizationLayerKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const InstanceNormalizationLayerKernelInfo &info)
263 {
264 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(input, output, info.gamma, info.beta, info.epsilon));
265 ARM_COMPUTE_RETURN_ON_ERROR(std::get<0>(validate_and_configure_window(input->clone().get(), (output == nullptr ? input->clone().get() : output->clone().get()))));
266 return Status{};
267 }
268
run(const Window & window,const ThreadInfo & info)269 void NEInstanceNormalizationLayerKernel::run(const Window &window, const ThreadInfo &info)
270 {
271 ARM_COMPUTE_UNUSED(info);
272 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
273 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
274 (*_func)(_input, _output, _gamma, _beta, _epsilon, window);
275 }
276 } // namespace arm_compute
277