• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2020-2022 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 
25 #include "src/cpu/kernels/add/generic/neon/impl.h"
26 #include "arm_compute/core/Helpers.h"
27 #include "arm_compute/core/utils/misc/Traits.h"
28 #include "src/core/NEON/wrapper/wrapper.h"
29 namespace arm_compute
30 {
31 namespace cpu
32 {
33 template <typename ScalarType>
add_same_neon(const ITensor * src0,const ITensor * src1,ITensor * dst,const ConvertPolicy & policy,const Window & window)34 void add_same_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
35 {
36     /** SIMD vector tag type. */
37     using ExactTagType = typename wrapper::traits::neon_bitvector_tag_t<ScalarType, wrapper::traits::BitWidth::W128>;
38 
39     // Create input windows
40     Window input1_win = window.broadcast_if_dimension_le_one(src0->info()->tensor_shape());
41     Window input2_win = window.broadcast_if_dimension_le_one(src1->info()->tensor_shape());
42 
43     // Clear X Dimension on execution window as we handle manually
44     Window win = window;
45     win.set(Window::DimX, Window::Dimension(0, 1, 1));
46 
47     constexpr int window_step_x         = 16 / sizeof(ScalarType);
48     const auto    window_start_x        = static_cast<int>(window.x().start());
49     const auto    window_end_x          = static_cast<int>(window.x().end());
50     const bool    is_broadcast_across_x = src0->info()->tensor_shape().x() != src1->info()->tensor_shape().x();
51 
52     if(is_broadcast_across_x)
53     {
54         const bool     is_broadcast_input_2 = input2_win.x().step() == 0;
55         Window         broadcast_win        = is_broadcast_input_2 ? input2_win : input1_win;
56         Window         non_broadcast_win    = !is_broadcast_input_2 ? input2_win : input1_win;
57         const ITensor *broadcast_tensor     = is_broadcast_input_2 ? src1 : src0;
58         const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? src1 : src0;
59 
60         // Clear X Dimension on execution window as we handle manually
61         non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1));
62 
63         Iterator broadcast_input(broadcast_tensor, broadcast_win);
64         Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
65         Iterator output(dst, win);
66 
67         execute_window_loop(
68             win, [&](const Coordinates &)
69         {
70             const auto non_broadcast_input_ptr = reinterpret_cast<const ScalarType *>(non_broadcast_input.ptr());
71             const auto output_ptr              = reinterpret_cast<ScalarType *>(output.ptr());
72 
73             const ScalarType broadcast_value     = *reinterpret_cast<const ScalarType *>(broadcast_input.ptr());
74             const auto       broadcast_value_vec = wrapper::vdup_n(broadcast_value, ExactTagType{});
75 
76             // Compute S elements per iteration
77             int x = window_start_x;
78             for(; x <= (window_end_x - window_step_x); x += window_step_x)
79             {
80                 const auto non_broadcast_v = wrapper::vloadq(non_broadcast_input_ptr + x);
81                 const auto res             = (policy == ConvertPolicy::SATURATE) ? wrapper::vqadd(broadcast_value_vec, non_broadcast_v) : wrapper::vadd(broadcast_value_vec, non_broadcast_v);
82                 wrapper::vstore(output_ptr + x, res);
83             }
84 
85             // Compute left-over elements
86             for(; x < window_end_x; ++x)
87             {
88                 const auto non_broadcast_v = *(non_broadcast_input_ptr + x);
89                 *(output_ptr + x)          = (policy == ConvertPolicy::SATURATE) ? wrapper::add_sat(broadcast_value, non_broadcast_v) : broadcast_value + non_broadcast_v;
90             }
91         },
92         broadcast_input, non_broadcast_input, output);
93     }
94     else
95     {
96         // Clear X Dimension on execution window as we handle manually
97         input1_win.set(Window::DimX, Window::Dimension(0, 1, 1));
98         input2_win.set(Window::DimX, Window::Dimension(0, 1, 1));
99 
100         Iterator input1(src0, input1_win);
101         Iterator input2(src1, input2_win);
102         Iterator output(dst, win);
103 
104         execute_window_loop(
105             win, [&](const Coordinates &)
106         {
107             const auto input1_ptr = reinterpret_cast<const ScalarType *>(input1.ptr());
108             const auto input2_ptr = reinterpret_cast<const ScalarType *>(input2.ptr());
109             const auto output_ptr = reinterpret_cast<ScalarType *>(output.ptr());
110 
111             // Compute S elements per iteration
112             int x = window_start_x;
113             for(; x <= (window_end_x - window_step_x); x += window_step_x)
114             {
115                 const auto val1 = wrapper::vloadq(input1_ptr + x);
116                 const auto val2 = wrapper::vloadq(input2_ptr + x);
117                 const auto res  = (policy == ConvertPolicy::SATURATE) ? wrapper::vqadd(val1, val2) : wrapper::vadd(val1, val2);
118                 wrapper::vstore(output_ptr + x, res);
119             }
120 
121             // Compute left-over elements
122             for(; x < window_end_x; ++x)
123             {
124                 const auto val1   = *(input1_ptr + x);
125                 const auto val2   = *(input2_ptr + x);
126                 *(output_ptr + x) = (policy == ConvertPolicy::SATURATE) ? wrapper::add_sat(val1, val2) : val1 + val2;
127             }
128         },
129         input1, input2, output);
130     }
131 }
132 
sub_q8_neon_fixedpoint_possible(const ITensorInfo * src0,const ITensorInfo * src1,const ITensorInfo * dst)133 bool sub_q8_neon_fixedpoint_possible(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *dst)
134 {
135     return add_sub_q8_neon_fixedpoint_possible(src0, src1, dst, false);
136 }
137 
add_q8_neon_fixedpoint_possible(const ITensorInfo * src0,const ITensorInfo * src1,const ITensorInfo * dst)138 bool add_q8_neon_fixedpoint_possible(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *dst)
139 {
140     return add_sub_q8_neon_fixedpoint_possible(src0, src1, dst, true);
141 }
142 
add_sub_q8_neon_fixedpoint_possible(const ITensorInfo * src0,const ITensorInfo * src1,const ITensorInfo * dst,bool is_addition)143 bool add_sub_q8_neon_fixedpoint_possible(const ITensorInfo *src0, const ITensorInfo *src1, const ITensorInfo *dst, bool is_addition)
144 {
145     const auto iq0 = src0->quantization_info().uniform();
146     const auto iq1 = src1->quantization_info().uniform();
147     const auto oq  = dst->quantization_info().uniform();
148 
149     const auto scale0 = iq0.scale / oq.scale;
150     const auto scale1 = iq1.scale / oq.scale;
151 
152     if(scale0 < -15.f || scale0 > 15.f || scale1 < -15.f || scale1 > 15.f)
153     {
154         // The scale factor cannot be stored as 5.11 signed fixed-point number.
155         return false;
156     }
157 
158     const auto offset = float(oq.offset) - scale0 * float(iq0.offset) - scale1 * float(iq1.offset);
159 
160     const auto max_acc = is_addition ? ((std::abs(scale0) + std::abs(scale1)) * 256.f + std::abs(offset)) : ((std::abs(scale0) - std::abs(scale1)) * 256.f + std::abs(offset));
161 
162     if(max_acc > 1048575.f) // 2^20 - 1
163     {
164         // It might not be possible to store the result as 21.11 signed fixed-point number.
165         return false;
166     }
167 
168     return true;
169 }
170 
171 template <typename ScalarType>
add_q8_neon_fixedpoint(const ITensor * src0,const ITensor * src1,ITensor * dst,const ConvertPolicy & policy,const Window & window)172 void add_q8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
173 {
174     add_sub_q8_neon_fixedpoint<ScalarType>(src0, src1, dst, policy, window, true /*is_addition*/);
175 }
176 
177 template <typename ScalarType>
add_sub_q8_neon_fixedpoint(const ITensor * src0,const ITensor * src1,ITensor * dst,const ConvertPolicy & policy,const Window & window,bool is_addition)178 void add_sub_q8_neon_fixedpoint(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window, bool is_addition)
179 {
180     ARM_COMPUTE_UNUSED(policy);
181 
182     const auto in0_info = src0->info();
183     const auto in1_info = src1->info();
184 
185     const auto &in0_shape = in0_info->tensor_shape();
186     const auto &in1_shape = in1_info->tensor_shape();
187 
188     // Create input windows.
189     Window in0_win = window.broadcast_if_dimension_le_one(in0_shape);
190     Window in1_win = window.broadcast_if_dimension_le_one(in1_shape);
191 
192     // Clear the x dimension on the execution window as we process the whole row each iteration.
193     Window win = window;
194     win.set(Window::DimX, Window::Dimension(0, 1, 1));
195 
196     constexpr int window_step_x         = 16;
197     const auto    window_start_x        = window.x().start();
198     const auto    window_end_x          = window.x().end();
199     const auto    is_broadcast_across_x = in0_shape.x() != in1_shape.x();
200 
201     const auto iq0_info  = in0_info->quantization_info().uniform();
202     const auto iq1_info  = in1_info->quantization_info().uniform();
203     const auto oq_info   = dst->info()->quantization_info().uniform();
204     const auto in0_scale = iq0_info.scale / oq_info.scale;
205     const auto in1_scale = is_addition ? (iq1_info.scale / oq_info.scale) : (-(iq1_info.scale / oq_info.scale));
206     const auto offset    = float(oq_info.offset) - in0_scale * float(iq0_info.offset) - in1_scale * float(iq1_info.offset);
207 
208     constexpr float _2pow11        = 2048;
209     const auto      in0_scale_5p11 = static_cast<int16_t>(support::cpp11::lround(in0_scale * _2pow11));
210     const auto      in1_scale_5p11 = static_cast<int16_t>(support::cpp11::lround(in1_scale * _2pow11));
211     const auto      offset_21p11   = static_cast<int32_t>(support::cpp11::lround(offset * _2pow11));
212 
213     constexpr uint8_t shift_amount_remainder = 3;
214 
215     if(is_broadcast_across_x)
216     {
217         // Prefix: a = non-broadcast, b = broadcast.
218 
219         const auto is_broadcast_input_1 = in1_win.x().step() == 0;
220         auto       a_win                = is_broadcast_input_1 ? in0_win : in1_win;
221         auto       b_win                = is_broadcast_input_1 ? in1_win : in0_win;
222         const auto a_tensor             = is_broadcast_input_1 ? src0 : src1;
223         const auto b_tensor             = is_broadcast_input_1 ? src1 : src0;
224 
225         const auto a_scale_5p11  = is_broadcast_input_1 ? in0_scale_5p11 : in1_scale_5p11;
226         const auto b_scale       = is_broadcast_input_1 ? in1_scale : in0_scale;
227         const auto a_vscale_5p11 = wrapper::vdup_n(a_scale_5p11, wrapper::traits::vector_64_tag());
228 
229 #ifndef __aarch64__
230         const auto a_scale = is_broadcast_input_1 ? in0_scale : in1_scale;
231 #endif // __aarch64__
232 
233         // Clear the x dimension on the execution window as we process the whole row each iteration.
234         a_win.set(Window::DimX, Window::Dimension(0, 1, 1));
235 
236         Iterator a_input_it(a_tensor, a_win);
237         Iterator b_input_it(b_tensor, b_win);
238         Iterator out_it(dst, win);
239 
240         execute_window_loop(
241             win, [&](const Coordinates &)
242         {
243             const auto a_ptr   = reinterpret_cast<const ScalarType *>(a_input_it.ptr());
244             const auto b_ptr   = reinterpret_cast<const ScalarType *>(b_input_it.ptr());
245             const auto out_ptr = reinterpret_cast<ScalarType *>(out_it.ptr());
246 
247             const auto b_val                    = *b_ptr;
248             const auto b_scaled                 = b_scale * b_val;
249             const auto b_scaled_21p11           = static_cast<int32_t>(support::cpp11::lround(b_scaled * _2pow11));
250             const auto b_scaled_offseted_21p11  = b_scaled_21p11 + offset_21p11;
251             const auto b_vscaled_offseted_21p11 = wrapper::vdup_n(b_scaled_offseted_21p11, wrapper::traits::vector_128_tag());
252 
253 #ifndef __aarch64__
254             const auto b_scaled_offseted = b_scaled + offset;
255 #endif // __aarch64__
256 
257             int x = window_start_x;
258 
259             for(; x <= (window_end_x - window_step_x); x += window_step_x)
260             {
261                 // Load the input.
262                 const auto a_vin_8p0 = wrapper::vloadq(a_ptr + x);
263 
264                 // Widen the non-broadcast elements to signed 16-bit regardless of the input signedness.
265                 const auto a_vin_16p0_0 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(a_vin_8p0)));
266                 const auto a_vin_16p0_1 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(a_vin_8p0)));
267 
268                 // Multiply the non-broadcast elements by the scale factor, add the scaled broadcast elements and the offset.
269                 // Widen and store the result in 32-bit integer.
270                 const auto vout_21p11_00 = wrapper::vmlal(b_vscaled_offseted_21p11, wrapper::vgetlow(a_vin_16p0_0), a_vscale_5p11);
271                 const auto vout_21p11_01 = wrapper::vmlal(b_vscaled_offseted_21p11, wrapper::vgethigh(a_vin_16p0_0), a_vscale_5p11);
272                 const auto vout_21p11_10 = wrapper::vmlal(b_vscaled_offseted_21p11, wrapper::vgetlow(a_vin_16p0_1), a_vscale_5p11);
273                 const auto vout_21p11_11 = wrapper::vmlal(b_vscaled_offseted_21p11, wrapper::vgethigh(a_vin_16p0_1), a_vscale_5p11);
274 
275                 // Remove 3 bits of the fractional part, round, narrow to 16-bit and saturate the result.
276                 const auto vout_8p8_0 = wrapper::vcombine(
277                                             wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(vout_21p11_00),
278                                             wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(vout_21p11_01));
279                 const auto vout_8p8_1 = wrapper::vcombine(
280                                             wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(vout_21p11_10),
281                                             wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(vout_21p11_11));
282 
283                 // Remove 8 bits of the fractional part, round, narrow to 8-bit and saturate the result.
284                 const auto vout_8p0 = wrapper::vcombine(
285                                           wrapper::vqrshrn<8>(vout_8p8_0),
286                                           wrapper::vqrshrn<8>(vout_8p8_1));
287 
288                 // Store the result.
289                 wrapper::vstore(out_ptr + x, vout_8p0);
290             }
291 
292             // Process the left-over elements.
293             for(; x < window_end_x; ++x)
294             {
295 #ifdef __aarch64__
296                 out_ptr[x] = wrapper::vqrshrn<8>(wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(int32_t(a_ptr[x]) * a_scale_5p11 + b_scaled_offseted_21p11));
297 #else  // __aarch64__
298                 out_ptr[x] = utility::clamp<int, ScalarType>(support::cpp11::lround(float(a_ptr[x]) * a_scale + b_scaled_offseted));
299 #endif // __aarch64__
300             }
301         },
302         b_input_it, a_input_it, out_it);
303     }
304     else
305     {
306         const auto vscale0_5p11  = wrapper::vdup_n(in0_scale_5p11, wrapper::traits::vector_64_tag());
307         const auto vscale1_5p11  = wrapper::vdup_n(in1_scale_5p11, wrapper::traits::vector_64_tag());
308         const auto voffset_21p11 = wrapper::vdup_n(offset_21p11, wrapper::traits::vector_128_tag());
309 
310         // Clear the x dimension on the execution window as we process the whole row each iteration.
311         in0_win.set(Window::DimX, Window::Dimension(0, 1, 1));
312         in1_win.set(Window::DimX, Window::Dimension(0, 1, 1));
313 
314         Iterator in0_it(src0, in0_win);
315         Iterator in1_it(src1, in1_win);
316         Iterator out_it(dst, win);
317 
318         execute_window_loop(
319             win, [&](const Coordinates &)
320         {
321             const auto in0_ptr = reinterpret_cast<const ScalarType *>(in0_it.ptr());
322             const auto in1_ptr = reinterpret_cast<const ScalarType *>(in1_it.ptr());
323             const auto out_ptr = reinterpret_cast<ScalarType *>(out_it.ptr());
324 
325             int x = window_start_x;
326 
327             for(; x <= (window_end_x - window_step_x); x += window_step_x)
328             {
329                 // Load the inputs.
330                 const auto vin0_8p0 = wrapper::vloadq(in0_ptr + x);
331                 const auto vin1_8p0 = wrapper::vloadq(in1_ptr + x);
332 
333                 // Widen the input elements to signed 16-bit regardless of the input signedness.
334                 const auto vin0_16p0_0 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(vin0_8p0)));
335                 const auto vin0_16p0_1 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(vin0_8p0)));
336                 const auto vin1_16p0_0 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgetlow(vin1_8p0)));
337                 const auto vin1_16p0_1 = wrapper::vreinterpret(wrapper::vmovl(wrapper::vgethigh(vin1_8p0)));
338 
339                 // Multiply the input elements by the scale factor and add the offset.
340                 // Widen and store the result in 32-bit integer.
341                 const auto vscaled0_offseted_21p11_00 = wrapper::vmlal(voffset_21p11, wrapper::vgetlow(vin0_16p0_0), vscale0_5p11);
342                 const auto vscaled0_offseted_21p11_01 = wrapper::vmlal(voffset_21p11, wrapper::vgethigh(vin0_16p0_0), vscale0_5p11);
343                 const auto vscaled0_offseted_21p11_10 = wrapper::vmlal(voffset_21p11, wrapper::vgetlow(vin0_16p0_1), vscale0_5p11);
344                 const auto vscaled0_offseted_21p11_11 = wrapper::vmlal(voffset_21p11, wrapper::vgethigh(vin0_16p0_1), vscale0_5p11);
345 
346                 const auto vout_21p11_00 = wrapper::vmlal(vscaled0_offseted_21p11_00, wrapper::vgetlow(vin1_16p0_0), vscale1_5p11);
347                 const auto vout_21p11_01 = wrapper::vmlal(vscaled0_offseted_21p11_01, wrapper::vgethigh(vin1_16p0_0), vscale1_5p11);
348                 const auto vout_21p11_10 = wrapper::vmlal(vscaled0_offseted_21p11_10, wrapper::vgetlow(vin1_16p0_1), vscale1_5p11);
349                 const auto vout_21p11_11 = wrapper::vmlal(vscaled0_offseted_21p11_11, wrapper::vgethigh(vin1_16p0_1), vscale1_5p11);
350 
351                 // Remove 3 bits of the fractional part, round, narrow to 16-bit and saturate the result.
352                 const auto vout_8p8_0 = wrapper::vcombine(
353                                             wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(vout_21p11_00),
354                                             wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(vout_21p11_01));
355                 const auto vout_8p8_1 = wrapper::vcombine(
356                                             wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(vout_21p11_10),
357                                             wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(vout_21p11_11));
358 
359                 // Remove 8 bits of the fractional part, round, narrow to 8-bit and saturate the result.
360                 const auto vout_8p0 = wrapper::vcombine(
361                                           wrapper::vqrshrn<8>(vout_8p8_0),
362                                           wrapper::vqrshrn<8>(vout_8p8_1));
363 
364                 // Store the result.
365                 wrapper::vstore(out_ptr + x, vout_8p0);
366             }
367 
368             // Process the left-over elements.
369             for(; x < window_end_x; ++x)
370             {
371 #ifdef __aarch64__
372                 out_ptr[x] = wrapper::vqrshrn<8>(wrapper::vqrshrn_ex<shift_amount_remainder, ScalarType>(int32_t(in0_ptr[x]) * in0_scale_5p11 + int32_t(in1_ptr[x]) * in1_scale_5p11 + offset_21p11));
373 #else  // __aarch64__
374                 out_ptr[x] = utility::clamp<int, ScalarType>(support::cpp11::lround(float(in0_ptr[x]) * in0_scale + float(in1_ptr[x]) * in1_scale + offset));
375 #endif // __aarch64__
376             }
377         },
378         in0_it, in1_it, out_it);
379     }
380 }
381 
add_sub_qasymm8_neon(const ITensor * src0,const ITensor * src1,ITensor * dst,const ConvertPolicy & policy,const Window & window,bool is_addition)382 void add_sub_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window, bool is_addition)
383 {
384     ARM_COMPUTE_UNUSED(policy);
385 
386     // Create input windows
387     Window input1_win = window.broadcast_if_dimension_le_one(src0->info()->tensor_shape());
388     Window input2_win = window.broadcast_if_dimension_le_one(src1->info()->tensor_shape());
389 
390     // Clear X Dimension on execution window as we handle manually
391     Window win = window;
392     win.set(Window::DimX, Window::Dimension(0, 1, 1));
393 
394     constexpr int window_step_x         = 16;
395     const auto    window_start_x        = static_cast<int>(window.x().start());
396     const auto    window_end_x          = static_cast<int>(window.x().end());
397     const bool    is_broadcast_across_x = src0->info()->tensor_shape().x() != src1->info()->tensor_shape().x();
398 
399     const UniformQuantizationInfo iq1_info = src0->info()->quantization_info().uniform();
400     const UniformQuantizationInfo iq2_info = src1->info()->quantization_info().uniform();
401     const UniformQuantizationInfo oq_info  = dst->info()->quantization_info().uniform();
402 
403     const auto scale1 = iq1_info.scale / oq_info.scale;
404     const auto scale2 = is_addition ? (iq2_info.scale / oq_info.scale) : (-(iq2_info.scale / oq_info.scale));
405     const auto offset = float(oq_info.offset) - scale1 * float(iq1_info.offset) - scale2 * float(iq2_info.offset);
406 
407     if(is_broadcast_across_x)
408     {
409         const bool     is_broadcast_input_2 = input2_win.x().step() == 0;
410         Window         broadcast_win        = is_broadcast_input_2 ? input2_win : input1_win;
411         Window         non_broadcast_win    = !is_broadcast_input_2 ? input2_win : input1_win;
412         const ITensor *broadcast_tensor     = is_broadcast_input_2 ? src1 : src0;
413         const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? src1 : src0;
414 
415         const auto af_scale = is_broadcast_input_2 ? scale1 : scale2;
416         const auto bf_scale = is_broadcast_input_2 ? scale2 : scale1;
417         const auto vscale1  = vdupq_n_f32(af_scale);
418 
419         // Clear X Dimension on execution window as we handle manually
420         non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1));
421 
422         Iterator broadcast_input(broadcast_tensor, broadcast_win);
423         Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
424         Iterator output(dst, win);
425 
426         execute_window_loop(
427             win, [&](const Coordinates &)
428         {
429             const auto non_broadcast_input_ptr = non_broadcast_input.ptr();
430             const auto output_ptr              = output.ptr();
431 
432             const auto broadcast_value = *broadcast_input.ptr();
433             const auto bf              = vdupq_n_f32(float(broadcast_value) * scale2 + offset);
434             const auto bfs             = float(broadcast_value) * bf_scale + offset;
435 
436             // Compute S elements per iteration
437             int x = window_start_x;
438             for(; x <= (window_end_x - window_step_x); x += window_step_x)
439             {
440                 const uint8x16_t a = vld1q_u8(non_broadcast_input_ptr + x);
441 
442                 const auto a_u16_0 = vmovl_u8(vget_low_u8(a));
443                 const auto a_u16_1 = vmovl_u8(vget_high_u8(a));
444 
445                 const auto af_0 = vmlaq_f32(bf, vcvtq_f32_u32(vmovl_u16(vget_low_u16(a_u16_0))), vscale1);
446                 const auto af_1 = vmlaq_f32(bf, vcvtq_f32_u32(vmovl_u16(vget_high_u16(a_u16_0))), vscale1);
447                 const auto af_2 = vmlaq_f32(bf, vcvtq_f32_u32(vmovl_u16(vget_low_u16(a_u16_1))), vscale1);
448                 const auto af_3 = vmlaq_f32(bf, vcvtq_f32_u32(vmovl_u16(vget_high_u16(a_u16_1))), vscale1);
449 
450                 int32x4_t rf_0{};
451                 int32x4_t rf_1{};
452                 int32x4_t rf_2{};
453                 int32x4_t rf_3{};
454 
455 #ifdef __aarch64__
456                 rf_0 = vcvtnq_s32_f32(af_0);
457                 rf_1 = vcvtnq_s32_f32(af_1);
458                 rf_2 = vcvtnq_s32_f32(af_2);
459                 rf_3 = vcvtnq_s32_f32(af_3);
460 #else  //__aarch64__
461                 rf_0 = vcvtq_s32_f32(af_0);
462                 rf_1 = vcvtq_s32_f32(af_1);
463                 rf_2 = vcvtq_s32_f32(af_2);
464                 rf_3 = vcvtq_s32_f32(af_3);
465 #endif //__aarch64__
466 
467                 const uint8x8_t pa = vqmovun_s16(vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1)));
468                 const uint8x8_t pb = vqmovun_s16(vcombine_s16(vqmovn_s32(rf_2), vqmovn_s32(rf_3)));
469                 vst1q_u8(output_ptr + x, vcombine_u8(pa, pb));
470             }
471 
472             // Compute left-over elements
473             for(; x < window_end_x; ++x)
474             {
475                 const auto result = float(non_broadcast_input_ptr[x]) * af_scale + bfs;
476 #ifdef __aarch64__
477                 output_ptr[x] = utility::clamp<int, uint8_t>(support::cpp11::lround(result));
478 #else  // __aarch64__
479                 output_ptr[x] = utility::clamp<int, uint8_t>(support::cpp11::trunc(result));
480 #endif // __aarch64__
481             }
482         },
483         broadcast_input, non_broadcast_input, output);
484     }
485     else
486     {
487         // Clear X Dimension on execution window as we handle manually
488         input1_win.set(Window::DimX, Window::Dimension(0, 1, 1));
489         input2_win.set(Window::DimX, Window::Dimension(0, 1, 1));
490 
491         Iterator input1(src0, input1_win);
492         Iterator input2(src1, input2_win);
493         Iterator output(dst, win);
494 
495         const auto vscale1 = vdupq_n_f32(scale1);
496         const auto vscale2 = vdupq_n_f32(scale2);
497         const auto voffset = vdupq_n_f32(offset);
498 
499         execute_window_loop(
500             win, [&](const Coordinates &)
501         {
502             const auto input1_ptr = input1.ptr();
503             const auto input2_ptr = input2.ptr();
504             const auto output_ptr = output.ptr();
505 
506             // Compute S elements per iteration
507             int x = window_start_x;
508             for(; x <= (window_end_x - window_step_x); x += window_step_x)
509             {
510                 const uint8x16_t a = vld1q_u8(input1_ptr + x);
511                 const uint8x16_t b = vld1q_u8(input2_ptr + x);
512 
513                 const auto a_u16_0 = vmovl_u8(vget_low_u8(a));
514                 const auto a_u16_1 = vmovl_u8(vget_high_u8(a));
515                 const auto b_u16_0 = vmovl_u8(vget_low_u8(b));
516                 const auto b_u16_1 = vmovl_u8(vget_high_u8(b));
517 
518                 const auto af_0 = vmlaq_f32(voffset, vcvtq_f32_u32(vmovl_u16(vget_low_u16(a_u16_0))), vscale1);
519                 const auto af_1 = vmlaq_f32(voffset, vcvtq_f32_u32(vmovl_u16(vget_high_u16(a_u16_0))), vscale1);
520                 const auto af_2 = vmlaq_f32(voffset, vcvtq_f32_u32(vmovl_u16(vget_low_u16(a_u16_1))), vscale1);
521                 const auto af_3 = vmlaq_f32(voffset, vcvtq_f32_u32(vmovl_u16(vget_high_u16(a_u16_1))), vscale1);
522 
523                 const auto bf_0 = vmlaq_f32(af_0, vcvtq_f32_u32(vmovl_u16(vget_low_u16(b_u16_0))), vscale2);
524                 const auto bf_1 = vmlaq_f32(af_1, vcvtq_f32_u32(vmovl_u16(vget_high_u16(b_u16_0))), vscale2);
525                 const auto bf_2 = vmlaq_f32(af_2, vcvtq_f32_u32(vmovl_u16(vget_low_u16(b_u16_1))), vscale2);
526                 const auto bf_3 = vmlaq_f32(af_3, vcvtq_f32_u32(vmovl_u16(vget_high_u16(b_u16_1))), vscale2);
527 
528                 int32x4_t rf_0{};
529                 int32x4_t rf_1{};
530                 int32x4_t rf_2{};
531                 int32x4_t rf_3{};
532 
533 #ifdef __aarch64__
534                 rf_0 = vcvtnq_s32_f32(bf_0);
535                 rf_1 = vcvtnq_s32_f32(bf_1);
536                 rf_2 = vcvtnq_s32_f32(bf_2);
537                 rf_3 = vcvtnq_s32_f32(bf_3);
538 #else  //__aarch64__
539                 rf_0 = vcvtq_s32_f32(bf_0);
540                 rf_1 = vcvtq_s32_f32(bf_1);
541                 rf_2 = vcvtq_s32_f32(bf_2);
542                 rf_3 = vcvtq_s32_f32(bf_3);
543 #endif //__aarch64__
544 
545                 const uint8x8_t pa = vqmovun_s16(vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1)));
546                 const uint8x8_t pb = vqmovun_s16(vcombine_s16(vqmovn_s32(rf_2), vqmovn_s32(rf_3)));
547                 vst1q_u8(output_ptr + x, vcombine_u8(pa, pb));
548             }
549 
550             // Compute left-over elements
551             for(; x < window_end_x; ++x)
552             {
553                 const auto result = float(input1_ptr[x]) * scale1 + float(input2_ptr[x]) * scale2 + offset;
554 #ifdef __aarch64__
555                 output_ptr[x] = utility::clamp<int, uint8_t>(support::cpp11::lround(result));
556 #else  // __aarch64__
557                 output_ptr[x] = utility::clamp<int, uint8_t>(support::cpp11::trunc(result));
558 #endif // __aarch64__
559             }
560         },
561         input1, input2, output);
562     }
563 }
564 
add_sub_qasymm8_signed_neon(const ITensor * src0,const ITensor * src1,ITensor * dst,const ConvertPolicy & policy,const Window & window,bool is_addition)565 void add_sub_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window, bool is_addition)
566 {
567     ARM_COMPUTE_UNUSED(policy);
568 
569     // Create input windows
570     Window input1_win = window.broadcast_if_dimension_le_one(src0->info()->tensor_shape());
571     Window input2_win = window.broadcast_if_dimension_le_one(src1->info()->tensor_shape());
572 
573     // Clear X Dimension on execution window as we handle manually
574     Window win = window;
575     win.set(Window::DimX, Window::Dimension(0, 1, 1));
576 
577     constexpr int window_step_x         = 16;
578     const auto    window_start_x        = static_cast<int>(window.x().start());
579     const auto    window_end_x          = static_cast<int>(window.x().end());
580     const bool    is_broadcast_across_x = src0->info()->tensor_shape().x() != src1->info()->tensor_shape().x();
581 
582     const UniformQuantizationInfo iq1_info = src0->info()->quantization_info().uniform();
583     const UniformQuantizationInfo iq2_info = src1->info()->quantization_info().uniform();
584     const UniformQuantizationInfo oq_info  = dst->info()->quantization_info().uniform();
585 
586     const auto scale1 = iq1_info.scale / oq_info.scale;
587     const auto scale2 = is_addition ? (iq2_info.scale / oq_info.scale) : (-(iq2_info.scale / oq_info.scale));
588     const auto offset = float(oq_info.offset) - scale1 * float(iq1_info.offset) - scale2 * float(iq2_info.offset);
589 
590     if(is_broadcast_across_x)
591     {
592         const bool     is_broadcast_input_2 = input2_win.x().step() == 0;
593         Window         broadcast_win        = is_broadcast_input_2 ? input2_win : input1_win;
594         Window         non_broadcast_win    = !is_broadcast_input_2 ? input2_win : input1_win;
595         const ITensor *broadcast_tensor     = is_broadcast_input_2 ? src1 : src0;
596         const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? src1 : src0;
597 
598         const auto af_scale = is_broadcast_input_2 ? scale1 : scale2;
599         const auto bf_scale = is_broadcast_input_2 ? scale2 : scale1;
600         const auto vscale1  = vdupq_n_f32(af_scale);
601 
602         // Clear X Dimension on execution window as we handle manually
603         non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1));
604 
605         Iterator broadcast_input(broadcast_tensor, broadcast_win);
606         Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
607         Iterator output(dst, win);
608 
609         execute_window_loop(
610             win, [&](const Coordinates &)
611         {
612             const auto non_broadcast_input_ptr = reinterpret_cast<const int8_t *>(non_broadcast_input.ptr());
613             const auto output_ptr              = reinterpret_cast<int8_t *>(output.ptr());
614 
615             const auto broadcast_value = *reinterpret_cast<const int8_t *>(broadcast_input.ptr());
616             const auto bf              = vdupq_n_f32(float(broadcast_value) * scale2 + offset);
617             const auto bfs             = float(broadcast_value) * bf_scale + offset;
618 
619             // Compute S elements per iteration
620             int x = window_start_x;
621             for(; x <= (window_end_x - window_step_x); x += window_step_x)
622             {
623                 const int8x16_t a = vld1q_s8(non_broadcast_input_ptr + x);
624 
625                 const auto a_s16_0 = vmovl_s8(vget_low_s8(a));
626                 const auto a_s16_1 = vmovl_s8(vget_high_s8(a));
627 
628                 const auto af_0 = vmlaq_f32(bf, vcvtq_f32_s32(vmovl_s16(vget_low_s16(a_s16_0))), vscale1);
629                 const auto af_1 = vmlaq_f32(bf, vcvtq_f32_s32(vmovl_s16(vget_high_s16(a_s16_0))), vscale1);
630                 const auto af_2 = vmlaq_f32(bf, vcvtq_f32_s32(vmovl_s16(vget_low_s16(a_s16_1))), vscale1);
631                 const auto af_3 = vmlaq_f32(bf, vcvtq_f32_s32(vmovl_s16(vget_high_s16(a_s16_1))), vscale1);
632 
633                 int32x4_t rf_0{};
634                 int32x4_t rf_1{};
635                 int32x4_t rf_2{};
636                 int32x4_t rf_3{};
637 
638 #ifdef __aarch64__
639                 rf_0 = vcvtnq_s32_f32(af_0);
640                 rf_1 = vcvtnq_s32_f32(af_1);
641                 rf_2 = vcvtnq_s32_f32(af_2);
642                 rf_3 = vcvtnq_s32_f32(af_3);
643 #else  //__aarch64__
644                 rf_0 = vcvtq_s32_f32(af_0);
645                 rf_1 = vcvtq_s32_f32(af_1);
646                 rf_2 = vcvtq_s32_f32(af_2);
647                 rf_3 = vcvtq_s32_f32(af_3);
648 #endif //__aarch64__
649 
650                 const int8x8_t pa = vqmovn_s16(vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1)));
651                 const int8x8_t pb = vqmovn_s16(vcombine_s16(vqmovn_s32(rf_2), vqmovn_s32(rf_3)));
652                 vst1q_s8(output_ptr + x, vcombine_s8(pa, pb));
653             }
654 
655             // Compute left-over elements
656             for(; x < window_end_x; ++x)
657             {
658                 const auto result = float(non_broadcast_input_ptr[x]) * af_scale + bfs;
659 #ifdef __aarch64__
660                 output_ptr[x] = utility::clamp<int, int8_t>(support::cpp11::lround(result));
661 #else  // __aarch64__
662                 output_ptr[x] = utility::clamp<int, int8_t>(support::cpp11::trunc(result));
663 #endif // __aarch64__
664             }
665         },
666         broadcast_input, non_broadcast_input, output);
667     }
668     else
669     {
670         // Clear X Dimension on execution window as we handle manually
671         input1_win.set(Window::DimX, Window::Dimension(0, 1, 1));
672         input2_win.set(Window::DimX, Window::Dimension(0, 1, 1));
673 
674         Iterator input1(src0, input1_win);
675         Iterator input2(src1, input2_win);
676         Iterator output(dst, win);
677 
678         const auto vscale1 = vdupq_n_f32(scale1);
679         const auto vscale2 = vdupq_n_f32(scale2);
680         const auto voffset = vdupq_n_f32(offset);
681 
682         execute_window_loop(
683             win, [&](const Coordinates &)
684         {
685             const auto input1_ptr = reinterpret_cast<const int8_t *>(input1.ptr());
686             const auto input2_ptr = reinterpret_cast<const int8_t *>(input2.ptr());
687             const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
688 
689             // Compute S elements per iteration
690             int x = window_start_x;
691             for(; x <= (window_end_x - window_step_x); x += window_step_x)
692             {
693                 const int8x16_t a = vld1q_s8(input1_ptr + x);
694                 const int8x16_t b = vld1q_s8(input2_ptr + x);
695 
696                 const auto a_s16_0 = vmovl_s8(vget_low_s8(a));
697                 const auto a_s16_1 = vmovl_s8(vget_high_s8(a));
698                 const auto b_s16_0 = vmovl_s8(vget_low_s8(b));
699                 const auto b_s16_1 = vmovl_s8(vget_high_s8(b));
700 
701                 const auto af_0 = vmlaq_f32(voffset, vcvtq_f32_s32(vmovl_s16(vget_low_s16(a_s16_0))), vscale1);
702                 const auto af_1 = vmlaq_f32(voffset, vcvtq_f32_s32(vmovl_s16(vget_high_s16(a_s16_0))), vscale1);
703                 const auto af_2 = vmlaq_f32(voffset, vcvtq_f32_s32(vmovl_s16(vget_low_s16(a_s16_1))), vscale1);
704                 const auto af_3 = vmlaq_f32(voffset, vcvtq_f32_s32(vmovl_s16(vget_high_s16(a_s16_1))), vscale1);
705 
706                 const auto bf_0 = vmlaq_f32(af_0, vcvtq_f32_s32(vmovl_s16(vget_low_s16(b_s16_0))), vscale2);
707                 const auto bf_1 = vmlaq_f32(af_1, vcvtq_f32_s32(vmovl_s16(vget_high_s16(b_s16_0))), vscale2);
708                 const auto bf_2 = vmlaq_f32(af_2, vcvtq_f32_s32(vmovl_s16(vget_low_s16(b_s16_1))), vscale2);
709                 const auto bf_3 = vmlaq_f32(af_3, vcvtq_f32_s32(vmovl_s16(vget_high_s16(b_s16_1))), vscale2);
710 
711                 int32x4_t rf_0{};
712                 int32x4_t rf_1{};
713                 int32x4_t rf_2{};
714                 int32x4_t rf_3{};
715 
716 #ifdef __aarch64__
717                 rf_0 = vcvtnq_s32_f32(bf_0);
718                 rf_1 = vcvtnq_s32_f32(bf_1);
719                 rf_2 = vcvtnq_s32_f32(bf_2);
720                 rf_3 = vcvtnq_s32_f32(bf_3);
721 #else  //__aarch64__
722                 rf_0 = vcvtq_s32_f32(bf_0);
723                 rf_1 = vcvtq_s32_f32(bf_1);
724                 rf_2 = vcvtq_s32_f32(bf_2);
725                 rf_3 = vcvtq_s32_f32(bf_3);
726 #endif //__aarch64__
727 
728                 const int8x8_t pa = vqmovn_s16(vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1)));
729                 const int8x8_t pb = vqmovn_s16(vcombine_s16(vqmovn_s32(rf_2), vqmovn_s32(rf_3)));
730                 vst1q_s8(output_ptr + x, vcombine_s8(pa, pb));
731             }
732 
733             // Compute left-over elements
734             for(; x < window_end_x; ++x)
735             {
736                 const auto result = float(input1_ptr[x]) * scale1 + float(input2_ptr[x]) * scale2 + offset;
737 #ifdef __aarch64__
738                 output_ptr[x] = utility::clamp<int, int8_t>(support::cpp11::lround(result));
739 #else  // __aarch64__
740                 output_ptr[x] = utility::clamp<int, int8_t>(support::cpp11::trunc(result));
741 #endif // __aarch64__
742             }
743         },
744         input1, input2, output);
745     }
746 }
747 
748 template void add_same_neon<float>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
749 template void add_same_neon<uint8_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
750 template void add_same_neon<int32_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
751 template void add_same_neon<int16_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
752 
753 #if defined(__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS)
754 template void add_same_neon<float16_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
755 #endif /* (__ARM_FEATURE_FP16_VECTOR_ARITHMETIC) && defined(ENABLE_FP16_KERNELS) */
756 
757 template void add_q8_neon_fixedpoint<int8_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
758 template void add_q8_neon_fixedpoint<uint8_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window);
759 
760 template void add_sub_q8_neon_fixedpoint<int8_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window, bool is_addition);
761 template void add_sub_q8_neon_fixedpoint<uint8_t>(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window, bool is_addition);
762 
763 void add_sub_qasymm8_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window, bool is_addition);
764 void add_sub_qasymm8_signed_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window, bool is_addition);
765 
766 } // namespace cpu
767 } // namespace arm_compute
768