• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2020 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "src/core/NEON/kernels/NEQLSTMLayerNormalizationKernel.h"
25 
26 #include "arm_compute/core/Helpers.h"
27 #include "arm_compute/core/TensorInfo.h"
28 #include "arm_compute/core/Utils.h"
29 #include "arm_compute/core/Validate.h"
30 #include "arm_compute/core/Window.h"
31 #include "arm_compute/core/utils/quantization/AsymmHelpers.h"
32 #include "src/core/CPP/Validate.h"
33 #include "src/core/NEON/NEFixedPoint.h"
34 #include "src/core/NEON/NEMath.h"
35 #include "src/core/NEON/NESymm.h"
36 #include "src/core/helpers/AutoConfiguration.h"
37 #include "src/core/helpers/WindowHelpers.h"
38 
39 #include "src/core/NEON/kernels/detail/NEActivationFunctionDetail.h"
40 
41 #include <map>
42 
43 namespace arm_compute
44 {
45 namespace
46 {
compute_mean_variance(int64_t sum,int64_t sum_sq,uint32_t num_input)47 inline std::pair<int64_t, int64_t> compute_mean_variance(int64_t sum, int64_t sum_sq, uint32_t num_input)
48 {
49     const auto    temp     = static_cast<int64_t>(0x100000) / num_input;
50     const auto    mean     = sum * 1024 / static_cast<int64_t>(num_input);
51     const int64_t variance = ((sum_sq * temp) - (mean * mean)) / 0x100000;
52 
53     return std::make_pair(mean, variance);
54 }
55 
mul_add(const int32x4_t & a,const int32x4_t & b,const int32x4_t & bias)56 inline int64x2x2_t mul_add(const int32x4_t &a, const int32x4_t &b, const int32x4_t &bias)
57 {
58     using namespace wrapper;
59     const int64x2_t a_low  = vmovl(vgetlow(a));
60     const int64x2_t a_high = vmovl(vgethigh(a));
61     const int64x2_t b_low  = vmovl(vgetlow(b));
62     const int64x2_t b_high = vmovl(vgethigh(b));
63 
64     const int64_t a_0 = vgetlane(a_low, 0);
65     const int64_t a_1 = vgetlane(a_low, 1);
66     const int64_t a_2 = vgetlane(a_high, 0);
67     const int64_t a_3 = vgetlane(a_high, 1);
68 
69     const int64_t b_0 = vgetlane(b_low, 0);
70     const int64_t b_1 = vgetlane(b_low, 1);
71     const int64_t b_2 = vgetlane(b_high, 0);
72     const int64_t b_3 = vgetlane(b_high, 1);
73 
74     int64x2x2_t     result;
75     const int64x2_t result_0{ a_0 * b_0, a_1 * b_1 };
76     const int64x2_t result_1{ a_2 * b_2, a_3 * b_3 };
77     result.val[0] = vadd(vmovl(vgetlow(bias)), result_0);
78     result.val[1] = vadd(vmovl(vgethigh(bias)), result_1);
79 
80     return result;
81 }
82 } // namespace
83 
configure(const ITensor * input,ITensor * output,const ITensor * weight,const ITensor * bias)84 void NEQLSTMLayerNormalizationKernel::configure(const ITensor *input, ITensor *output, const ITensor *weight, const ITensor *bias)
85 {
86     ARM_COMPUTE_ERROR_ON_NULLPTR(input, weight, bias, output);
87     ARM_COMPUTE_ERROR_ON(input == output);
88     ARM_COMPUTE_ERROR_THROW_ON(validate(input->info(), output->info(), weight->info(), bias->info()));
89 
90     static const std::map<DataType, ComputeFuncType> fn_map =
91     {
92         { DataType::QSYMM16, std::mem_fn(&NEQLSTMLayerNormalizationKernel::compute_qsymm16) },
93     };
94 
95     _input  = input;
96     _output = output;
97     _weight = weight;
98     _bias   = bias;
99     _fn     = fn_map.at(_input->info()->data_type());
100 
101     auto_init_if_empty(*_output->info(), *_input->info());
102     _output->info()->set_quantization_info(compute_output_qinfo());
103 
104     const UniformQuantizationInfo wq_info = _weight->info()->quantization_info().uniform();
105     const Status                  s       = quantization::calculate_quantized_multiplier(wq_info.scale, &_output_multiplier, &_output_shift);
106     _output_shift *= -1;
107 
108     if(!bool(s))
109     {
110         _output_multiplier = 0;
111         _output_shift      = 0;
112     }
113 
114     Window win = configure_window(output);
115     INEKernel::configure(win);
116 }
117 
configure_window(ITensor * target)118 Window NEQLSTMLayerNormalizationKernel::configure_window(ITensor *target)
119 {
120     Window      window = calculate_max_window(*target->info(), Steps());
121     Coordinates coord;
122     coord.set_num_dimensions(target->info()->num_dimensions());
123     target->info()->set_valid_region(ValidRegion(coord, target->info()->tensor_shape()));
124 
125     _window_start_x = static_cast<int32_t>(window.x().start());
126     _window_end_x   = static_cast<int32_t>(window.x().end());
127     _window_step_x  = static_cast<int32_t>(vector_size_byte) / _output->info()->element_size();
128 
129     // input and output windows will iterator over y-axis, while execute_window will handler x-axis.
130     _inout_window = window;
131     _inout_window.set(Window::DimX, Window::Dimension(0, 1, 1));
132 
133     // weight and bias cannot iterator along y-axis since they are 1D.
134     _weight_window = _inout_window;
135     _weight_window.set(Window::DimY, Window::Dimension(0, 1, 1));
136 
137     return window;
138 }
139 
validate(const ITensorInfo * input,const ITensorInfo * output,const ITensorInfo * weight,const ITensorInfo * bias)140 Status NEQLSTMLayerNormalizationKernel::validate(const ITensorInfo *input, const ITensorInfo *output, const ITensorInfo *weight, const ITensorInfo *bias)
141 {
142     ARM_COMPUTE_UNUSED(output, bias, weight, input);
143 
144     ARM_COMPUTE_ERROR_ON_NULLPTR(input, weight, bias, output);
145 
146     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(input, 1, DataType::QSYMM16);
147     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(weight, 1, DataType::QSYMM16);
148     ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(bias, 1, DataType::S32);
149 
150     ARM_COMPUTE_RETURN_ERROR_ON(input->num_dimensions() > max_input_dimension);
151     ARM_COMPUTE_RETURN_ERROR_ON(weight->num_dimensions() > max_weight_dimension);
152     ARM_COMPUTE_RETURN_ERROR_ON(bias->num_dimensions() > max_bias_dimension);
153 
154     ARM_COMPUTE_RETURN_ERROR_ON(input->tensor_shape().x() != weight->tensor_shape().x());
155     ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(weight, bias);
156 
157     if(output->total_size() != 0)
158     {
159         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(input, output);
160         ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_SHAPES(input, output);
161     }
162 
163     return Status{};
164 }
165 
run(const Window & window,const ThreadInfo & info)166 void NEQLSTMLayerNormalizationKernel::run(const Window &window, const ThreadInfo &info)
167 {
168     ARM_COMPUTE_UNUSED(window, info);
169     ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
170     ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
171     ARM_COMPUTE_ERROR_ON_MSG(!_fn, "internal function is not defined for computation");
172 
173     _fn(*this);
174 }
175 
compute_output_qinfo()176 inline QuantizationInfo NEQLSTMLayerNormalizationKernel::compute_output_qinfo()
177 {
178     return QuantizationInfo(1.f / 4096);
179 }
180 
sum_qsymm16(const int16_t * input_ptr)181 inline std::pair<int64_t, int64_t> NEQLSTMLayerNormalizationKernel::sum_qsymm16(const int16_t *input_ptr)
182 {
183     ARM_COMPUTE_ERROR_ON(!input_ptr);
184 
185     using AccType       = int64_t;
186     using InputDataType = int16_t;
187 
188     AccType sum{ 0 };
189     AccType sum_sq{ 0 };
190 
191     int32_t x = _window_start_x;
192     for(; x <= _window_end_x && _window_step_x <= (_window_end_x - x); x += _window_step_x)
193     {
194         using namespace wrapper;
195         const int16x8_t val      = vloadq(input_ptr + x);
196         const int32x4_t val_low  = vmovl(vgetlow(val));
197         const int32x4_t val_high = vmovl(vgethigh(val));
198 
199 #if defined(__aarch64__)
200         sum += static_cast<AccType>(vaddv(val_low));
201         sum += static_cast<AccType>(vaddv(val_high));
202 
203         sum_sq += static_cast<AccType>(vaddv(vmul(val_low, val_low)));
204         sum_sq += static_cast<AccType>(vaddv(vmul(val_high, val_high)));
205 #else  // __aarch64__
206         // only AArch64 supports vaddv
207         const int64x2_t pair_sum_low  = vpaddl(val_low);
208         const int64x2_t pair_sum_high = vpaddl(val_high);
209         const int64x2_t pair_sum      = vadd(pair_sum_low, pair_sum_high);
210         sum += vgetlane(pair_sum, 0) + vgetlane(pair_sum, 1);
211 
212         const int32x4_t square_low       = vmul(val_low, val_low);
213         const int32x4_t square_high      = vmul(val_high, val_high);
214         const int64x2_t pair_sum_sq_low  = vpaddl(square_low);
215         const int64x2_t pair_sum_sq_high = vpaddl(square_high);
216         const int64x2_t pair_sum_sq      = vadd(pair_sum_sq_low, pair_sum_sq_high);
217         sum_sq += vgetlane(pair_sum_sq, 0) + vgetlane(pair_sum_sq, 1);
218 #endif // __aarch64__
219     }
220 
221     for(; x < _window_end_x; ++x)
222     {
223         const InputDataType val = input_ptr[x];
224         sum += static_cast<AccType>(val);
225         sum_sq += static_cast<AccType>(val * val);
226     }
227 
228     return std::make_pair(sum, sum_sq);
229 }
230 
normalize_qasymm16(const int16_t * input_ptr,int16_t * output_ptr,const int16_t * weight_ptr,const int32_t * bias_ptr,int32_t mean,int32_t inv_std_mul,int32_t inv_std_shift)231 inline void NEQLSTMLayerNormalizationKernel::normalize_qasymm16(const int16_t *input_ptr,
232                                                                 int16_t       *output_ptr,
233                                                                 const int16_t *weight_ptr,
234                                                                 const int32_t *bias_ptr,
235                                                                 int32_t mean, int32_t inv_std_mul, int32_t inv_std_shift)
236 {
237     using OutputDataType = int16_t;
238 
239     using namespace wrapper;
240     const int32x4_t mean_vec = vdup_n(mean, wrapper::traits::vector_128_tag{});
241 
242     int32_t x = _window_start_x;
243     for(; x <= _window_end_x && _window_step_x <= (_window_end_x - x); x += _window_step_x)
244     {
245         const int16x8_t val = vloadq(input_ptr + x);
246         int32x4x2_t     shifted;
247         shifted.val[0] = vsub(vshlq_n_s32(vmovl(vgetlow(val)), 10), mean_vec);
248         shifted.val[1] = vsub(vshlq_n_s32(vmovl(vgethigh(val)), 10), mean_vec);
249 
250         int32x4x2_t rescaled = multiply_by_quantized_multiplier_2row(shifted, inv_std_mul, inv_std_shift);
251 
252         const int16x8_t weight_val  = vloadq(weight_ptr + x);
253         const int32x4_t weight_low  = vmovl(vgetlow(weight_val));
254         const int32x4_t weight_high = vmovl(vgethigh(weight_val));
255 
256         const int32x4_t bias_low  = vloadq(bias_ptr + x);
257         const int32x4_t bias_high = vloadq(bias_ptr + 4 + x);
258 
259         int64x2x2_t result_0 = mul_add(rescaled.val[0], weight_low, bias_low);
260         int64x2x2_t result_1 = mul_add(rescaled.val[1], weight_high, bias_high);
261 
262         int32x4x2_t combined;
263         combined.val[0] = vcombine(vmovn(vrshrq_n_s64(result_0.val[0], 10)), vmovn(vrshrq_n_s64(result_0.val[1], 10)));
264         combined.val[1] = vcombine(vmovn(vrshrq_n_s64(result_1.val[0], 10)), vmovn(vrshrq_n_s64(result_1.val[1], 10)));
265 
266         int32x4x2_t out_val = multiply_by_quantized_multiplier_2row(combined, _output_multiplier, _output_shift + 12);
267 
268         vstore(output_ptr + x, vqmovn(out_val.val[0]));
269         vstore(output_ptr + x + 4, vqmovn(out_val.val[1]));
270     }
271 
272     for(; x < _window_end_x; ++x)
273     {
274         const auto    val             = static_cast<int32_t>(input_ptr[x]);
275         const int32_t shifted         = (val << 10) - mean;
276         const int32_t rescaled        = quantization::multiply_by_quantized_multiplier(shifted, inv_std_mul, inv_std_shift);
277         const int64_t weighted        = rescaled * weight_ptr[x] + bias_ptr[x];
278         const auto    reverse_shifted = static_cast<int32_t>((weighted + 512) >> 10);
279         int32_t       out_val         = quantization::multiply_by_quantized_multiplier(reverse_shifted, _output_multiplier, _output_shift + 12);
280         out_val                       = utility::clamp<decltype(out_val), OutputDataType>(out_val, std::numeric_limits<OutputDataType>::min());
281         output_ptr[x]                 = static_cast<OutputDataType>(out_val);
282     }
283 }
284 
compute_qsymm16()285 void NEQLSTMLayerNormalizationKernel::compute_qsymm16()
286 {
287     using InputDataType  = int16_t;
288     using OutputDataType = int16_t;
289     using BiasDataType   = int32_t;
290     using AccType        = int64_t;
291 
292     Iterator input_iterator{ _input, _inout_window };
293     Iterator output_iterator{ _output, _inout_window };
294     Iterator weight_iterator{ _weight, _weight_window };
295     Iterator bias_iterator{ _bias, _weight_window };
296 
297     const auto weight_ptr = reinterpret_cast<const InputDataType *>(weight_iterator.ptr());
298     const auto bias_ptr   = reinterpret_cast<const BiasDataType *>(bias_iterator.ptr());
299 
300     const uint32_t column_size = _input->info()->tensor_shape()[0];
301 
302     execute_window_loop(_inout_window, [ &, this](const Coordinates &)
303     {
304         const auto in_ptr  = reinterpret_cast<const InputDataType *>(input_iterator.ptr());
305         auto       out_ptr = reinterpret_cast<OutputDataType *>(output_iterator.ptr());
306 
307         AccType sum{ 0 };
308         AccType sum_sq{ 0 };
309         std::tie(sum, sum_sq) = sum_qsymm16(in_ptr);
310 
311         AccType mean{ 0 };
312         AccType variance{ 0 };
313         std::tie(mean, variance) = compute_mean_variance(sum, sum_sq, column_size);
314 
315         int32_t stddev_invsqrt_mul{};
316         int32_t stddev_invsqrt_shift{};
317         quantization::get_invsqrt_quantized_multiplier_exp(static_cast<int32_t>(variance), -1, stddev_invsqrt_mul, stddev_invsqrt_shift);
318 
319         normalize_qasymm16(in_ptr, out_ptr, weight_ptr, bias_ptr, mean, stddev_invsqrt_mul, stddev_invsqrt_shift);
320     },
321     input_iterator, output_iterator);
322 }
323 } // namespace arm_compute