1 /*
2 * Copyright (c) 2018-2020 Arm Limited.
3 *
4 * SPDX-License-Identifier: MIT
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in all
14 * copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22 * SOFTWARE.
23 */
24 #include "src/core/NEON/kernels/NEElementwiseOperationKernel.h"
25
26 #include "arm_compute/core/Helpers.h"
27 #include "arm_compute/core/IAccessWindow.h"
28 #include "src/core/CPP/Validate.h"
29 #include "src/core/NEON/NEAsymm.h"
30 #include "src/core/NEON/NEFixedPoint.h"
31 #include "src/core/NEON/wrapper/wrapper.h"
32 #include "src/core/helpers/AutoConfiguration.h"
33 #include "src/core/helpers/WindowHelpers.h"
34
35 #include <arm_neon.h>
36 #include <map>
37
38 namespace arm_compute
39 {
40 namespace
41 {
load_quantized(const uint8_t * input1_ptr,const int32x4_t & offset,const float32x4_t & scale)42 float32x4x4_t load_quantized(const uint8_t *input1_ptr, const int32x4_t &offset, const float32x4_t &scale)
43 {
44 qasymm8x16_t x = vld1q_u8(input1_ptr);
45 const float32x4x4_t out =
46 {
47 {
48 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_low_u8(x))))), offset)), scale),
49 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_low_u8(x))))), offset)), scale),
50 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_low_u16(vmovl_u8(vget_high_u8(x))))), offset)), scale),
51 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vreinterpretq_s32_u32(vmovl_u16(vget_high_u16(vmovl_u8(vget_high_u8(x))))), offset)), scale),
52 }
53 };
54 return out;
55 }
56
load_quantized_signed(const int8_t * input1_ptr,const int32x4_t & offset,const float32x4_t & scale)57 float32x4x4_t load_quantized_signed(const int8_t *input1_ptr, const int32x4_t &offset, const float32x4_t &scale)
58 {
59 qasymm8x16_signed_t x = vld1q_s8(input1_ptr);
60 const float32x4x4_t out =
61 {
62 {
63 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_low_s8(x)))), offset)), scale),
64 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_low_s8(x)))), offset)), scale),
65 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_low_s16(vmovl_s8(vget_high_s8(x)))), offset)), scale),
66 vmulq_f32(vcvtq_f32_s32(vsubq_s32(vmovl_s16(vget_high_s16(vmovl_s8(vget_high_s8(x)))), offset)), scale),
67 }
68 };
69 return out;
70 }
71
store_quantized(uint8_t * output_ptr,const uint32x4x4_t & out)72 void store_quantized(uint8_t *output_ptr, const uint32x4x4_t &out)
73 {
74 const uint8x8_t pa = vqmovn_u16(vcombine_u16(vqmovn_u32(out.val[0]), vqmovn_u32(out.val[1])));
75 const uint8x8_t pb = vqmovn_u16(vcombine_u16(vqmovn_u32(out.val[2]), vqmovn_u32(out.val[3])));
76 vst1q_u8(output_ptr, vcombine_u8(pa, pb));
77 }
78
store_quantized(uint8_t * output_ptr,const int32x4x4_t & out)79 void store_quantized(uint8_t *output_ptr, const int32x4x4_t &out)
80 {
81 const uint8x8_t pa = vqmovun_s16(vcombine_s16(vqmovn_s32(out.val[0]), vqmovn_s32(out.val[1])));
82 const uint8x8_t pb = vqmovun_s16(vcombine_s16(vqmovn_s32(out.val[2]), vqmovn_s32(out.val[3])));
83 vst1q_u8(output_ptr, vcombine_u8(pa, pb));
84 }
85
store_quantized(uint8_t * output_ptr,const float32x4x4_t & rf,const float32x4_t & offset,const float32x4_t & invscale)86 void store_quantized(uint8_t *output_ptr, const float32x4x4_t &rf, const float32x4_t &offset, const float32x4_t &invscale)
87 {
88 int32x4x4_t out =
89 {
90 {
91 vcvtq_s32_f32(vmlaq_f32(offset, rf.val[0], invscale)),
92 vcvtq_s32_f32(vmlaq_f32(offset, rf.val[1], invscale)),
93 vcvtq_s32_f32(vmlaq_f32(offset, rf.val[2], invscale)),
94 vcvtq_s32_f32(vmlaq_f32(offset, rf.val[3], invscale)),
95 }
96 };
97 store_quantized(output_ptr, out);
98 }
99
store_quantized_signed(int8_t * output_ptr,const int32x4x4_t & out)100 void store_quantized_signed(int8_t *output_ptr, const int32x4x4_t &out)
101 {
102 const int8x8_t pa = vqmovn_s16(vcombine_s16(vqmovn_s32(out.val[0]), vqmovn_s32(out.val[1])));
103 const int8x8_t pb = vqmovn_s16(vcombine_s16(vqmovn_s32(out.val[2]), vqmovn_s32(out.val[3])));
104 vst1q_s8(output_ptr, vcombine_s8(pa, pb));
105 }
106
store_quantized_signed(int8_t * output_ptr,const float32x4x4_t & rf,const float32x4_t & offset,const float32x4_t & invscale)107 void store_quantized_signed(int8_t *output_ptr, const float32x4x4_t &rf, const float32x4_t &offset, const float32x4_t &invscale)
108 {
109 int32x4x4_t out =
110 {
111 {
112 vcvtq_s32_f32(vmlaq_f32(offset, rf.val[0], invscale)),
113 vcvtq_s32_f32(vmlaq_f32(offset, rf.val[1], invscale)),
114 vcvtq_s32_f32(vmlaq_f32(offset, rf.val[2], invscale)),
115 vcvtq_s32_f32(vmlaq_f32(offset, rf.val[3], invscale)),
116 }
117 };
118 store_quantized_signed(output_ptr, out);
119 }
120
121 template <ArithmeticOperation op, typename ScalarType>
elementwise_arithm_op_scalar(const ScalarType & a,const ScalarType & b)122 inline ScalarType elementwise_arithm_op_scalar(const ScalarType &a, const ScalarType &b)
123 {
124 auto res = ScalarType(0);
125
126 switch(op)
127 {
128 case ArithmeticOperation::MAX:
129 res = std::max(a, b);
130 break;
131 case ArithmeticOperation::MIN:
132 res = std::min(a, b);
133 break;
134 case ArithmeticOperation::SQUARED_DIFF:
135 {
136 res = (a - b) * (a - b);
137 break;
138 }
139 case ArithmeticOperation::PRELU:
140 {
141 res = (a > 0 ? a : a * b);
142 break;
143 }
144 case ArithmeticOperation::DIV:
145 {
146 res = a / b;
147 if(std::is_integral<ScalarType>::value)
148 {
149 res = (b == 0) ? 0 : res;
150 if(static_cast<int32_t>(a) % static_cast<int32_t>(b) != 0 && ((a < 0) != (b < 0)))
151 {
152 --res;
153 }
154 }
155 break;
156 }
157 case ArithmeticOperation::POWER:
158 {
159 res = std::pow(a, b);
160 break;
161 }
162 default:
163 ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
164 }
165 return res;
166 }
167
168 template <ArithmeticOperation op>
elementwise_arithm_op_quantized_scalar(const float & a,const float & b,UniformQuantizationInfo qinfo)169 inline uint8_t elementwise_arithm_op_quantized_scalar(const float &a, const float &b, UniformQuantizationInfo qinfo)
170 {
171 return quantize_qasymm8(elementwise_arithm_op_scalar<op>(a, b), qinfo);
172 }
173
174 template <ArithmeticOperation op>
elementwise_arithm_op_quantized_signed_scalar(const float & a,const float & b,UniformQuantizationInfo qinfo)175 inline int8_t elementwise_arithm_op_quantized_signed_scalar(const float &a, const float &b, UniformQuantizationInfo qinfo)
176 {
177 return quantize_qasymm8_signed(elementwise_arithm_op_scalar<op>(a, b), qinfo);
178 }
179
180 template <ArithmeticOperation op, typename VectorType>
elementwise_arithm_op(const typename VectorType::type & a,const typename VectorType::type & b)181 inline typename VectorType::type elementwise_arithm_op(const typename VectorType::type &a, const typename VectorType::type &b)
182 {
183 using vec_type = typename VectorType::type;
184 using scalar_type = typename VectorType::scalar_type;
185 using tag_type = typename VectorType::tag_type;
186
187 vec_type res = wrapper::vdup_n(static_cast<scalar_type>(0), tag_type{});
188
189 switch(op)
190 {
191 case ArithmeticOperation::MAX:
192 res = wrapper::vmax(a, b);
193 break;
194 case ArithmeticOperation::MIN:
195 res = wrapper::vmin(a, b);
196 break;
197 case ArithmeticOperation::SQUARED_DIFF:
198 {
199 const vec_type tmp = wrapper::vsub(a, b);
200 res = wrapper::vmul(tmp, tmp);
201 break;
202 }
203 case ArithmeticOperation::PRELU:
204 {
205 const vec_type zero = wrapper::vdup_n(static_cast<scalar_type>(0), tag_type{});
206 const vec_type tmp = wrapper::vmul(a, b);
207 const auto gt = wrapper::vcgt(a, zero);
208
209 res = wrapper::vbsl(gt, a, tmp);
210 break;
211 }
212
213 default:
214 ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
215 }
216
217 return res;
218 }
219
220 template <>
elementwise_arithm_op(const int32x4_t & a,const int32x4_t & b)221 inline int32x4_t elementwise_arithm_op<ArithmeticOperation::DIV, typename wrapper::traits::neon_vector<int32_t, 4>>(const int32x4_t &a, const int32x4_t &b)
222 {
223 return vcvtq_s32_f32(vfloorq_f32(wrapper::vdiv(vcvtq_f32_s32(a), vcvtq_f32_s32(b))));
224 }
225
226 template <>
elementwise_arithm_op(const float32x4_t & a,const float32x4_t & b)227 inline float32x4_t elementwise_arithm_op<ArithmeticOperation::DIV, typename wrapper::traits::neon_vector<float, 4>>(const float32x4_t &a, const float32x4_t &b)
228 {
229 return wrapper::vdiv(a, b);
230 }
231
232 template <>
elementwise_arithm_op(const float32x4_t & a,const float32x4_t & b)233 inline float32x4_t elementwise_arithm_op<ArithmeticOperation::POWER, typename wrapper::traits::neon_vector<float, 4>>(const float32x4_t &a, const float32x4_t &b)
234 {
235 return wrapper::vpow(a, b);
236 }
237
238 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
239 template <>
elementwise_arithm_op(const float16x8_t & a,const float16x8_t & b)240 inline float16x8_t elementwise_arithm_op<ArithmeticOperation::DIV, typename wrapper::traits::neon_vector<float16_t, 8>>(const float16x8_t &a, const float16x8_t &b)
241 {
242 return wrapper::vdiv(a, b);
243 }
244
245 template <>
elementwise_arithm_op(const float16x8_t & a,const float16x8_t & b)246 inline float16x8_t elementwise_arithm_op<ArithmeticOperation::POWER, typename wrapper::traits::neon_vector<float16_t, 8>>(const float16x8_t &a, const float16x8_t &b)
247 {
248 return wrapper::vpow(a, b);
249 }
250 #endif // __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
251
252 template <ArithmeticOperation op>
elementwise_arithm_op(const float32x4x4_t & a,const float32x4x4_t & b)253 inline float32x4x4_t elementwise_arithm_op(const float32x4x4_t &a, const float32x4x4_t &b)
254 {
255 using neon_vector_float = wrapper::traits::neon_vector<float, 4>;
256 float32x4x4_t out =
257 {
258 {
259 elementwise_arithm_op<op, neon_vector_float>(a.val[0], b.val[0]),
260 elementwise_arithm_op<op, neon_vector_float>(a.val[1], b.val[1]),
261 elementwise_arithm_op<op, neon_vector_float>(a.val[2], b.val[2]),
262 elementwise_arithm_op<op, neon_vector_float>(a.val[3], b.val[3]),
263 }
264 };
265 return out;
266 }
267
268 template <ArithmeticOperation op, typename ScalarType, typename VectorType>
elementwise_arithm_op_broadcast(const typename VectorType::type & a,const ScalarType & broadcast_value,const bool reorder)269 inline typename VectorType::type elementwise_arithm_op_broadcast(const typename VectorType::type &a, const ScalarType &broadcast_value, const bool reorder)
270 {
271 using tag_type = typename VectorType::tag_type;
272 using vec_type = typename VectorType::type;
273
274 vec_type broadcast_vector = wrapper::vdup_n(broadcast_value, tag_type{});
275 return elementwise_arithm_op<op, VectorType>(reorder ? broadcast_vector : a, reorder ? a : broadcast_vector);
276 }
277
278 template <ComparisonOperation op, typename InputScalarType>
elementwise_comp_op_scalar(const InputScalarType & a,const InputScalarType & b)279 inline uint8_t elementwise_comp_op_scalar(const InputScalarType &a, const InputScalarType &b)
280 {
281 bool res = false;
282
283 switch(op)
284 {
285 case ComparisonOperation::Equal:
286 res = (a == b);
287 break;
288 case ComparisonOperation::NotEqual:
289 res = (a != b);
290 break;
291 case ComparisonOperation::Greater:
292 res = (a > b);
293 break;
294 case ComparisonOperation::GreaterEqual:
295 res = (a >= b);
296 break;
297 case ComparisonOperation::Less:
298 res = (a < b);
299 break;
300 case ComparisonOperation::LessEqual:
301 res = (a <= b);
302 break;
303 default:
304 ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
305 }
306 return res ? ~static_cast<uint8_t>(0) : static_cast<uint8_t>(0);
307 }
308
309 template <ComparisonOperation op>
elementwise_comp_op_quantized_scalar(const float & a,const float & b,UniformQuantizationInfo qinfo)310 inline uint8_t elementwise_comp_op_quantized_scalar(const float &a, const float &b, UniformQuantizationInfo qinfo)
311 {
312 ARM_COMPUTE_UNUSED(qinfo);
313 return elementwise_comp_op_scalar<op>(a, b);
314 }
315
316 template <ComparisonOperation op, typename InputVectorType, typename OutputVectorType>
elementwise_comp_op(const InputVectorType & a,const InputVectorType & b)317 inline OutputVectorType elementwise_comp_op(const InputVectorType &a, const InputVectorType &b)
318 {
319 OutputVectorType res = { 0, 0, 0, 0 };
320
321 switch(op)
322 {
323 case ComparisonOperation::Equal:
324 res = wrapper::vceq(a, b);
325 break;
326 case ComparisonOperation::NotEqual:
327 res = wrapper::vnot(wrapper::vceq(a, b));
328 break;
329 case ComparisonOperation::Greater:
330 res = wrapper::vcgt(a, b);
331 break;
332 case ComparisonOperation::GreaterEqual:
333 res = wrapper::vcge(a, b);
334 break;
335 case ComparisonOperation::Less:
336 res = wrapper::vcgt(b, a);
337 break;
338 case ComparisonOperation::LessEqual:
339 res = wrapper::vcge(b, a);
340 break;
341 default:
342 ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
343 }
344
345 return res;
346 }
347
348 template <ComparisonOperation op>
elementwise_comp_op(const float32x4x4_t & a,const float32x4x4_t & b)349 inline uint32x4x4_t elementwise_comp_op(const float32x4x4_t &a, const float32x4x4_t &b)
350 {
351 uint32x4x4_t out =
352 {
353 {
354 elementwise_comp_op<op, float32x4_t, uint32x4_t>(a.val[0], b.val[0]),
355 elementwise_comp_op<op, float32x4_t, uint32x4_t>(a.val[1], b.val[1]),
356 elementwise_comp_op<op, float32x4_t, uint32x4_t>(a.val[2], b.val[2]),
357 elementwise_comp_op<op, float32x4_t, uint32x4_t>(a.val[3], b.val[3])
358 }
359 };
360 return out;
361 }
362
363 template <ComparisonOperation op, typename InputScalarType, typename InputVectorType, typename OutputVectorType>
elementwise_comp_op_broadcast(const InputVectorType & a,const InputScalarType & broadcast_value,const bool reorder)364 inline OutputVectorType elementwise_comp_op_broadcast(const InputVectorType &a, const InputScalarType &broadcast_value, const bool reorder)
365 {
366 InputVectorType broadcast_vector = wrapper::vdup_n(broadcast_value, wrapper::traits::vector_128_tag());
367 return elementwise_comp_op<op, InputVectorType, OutputVectorType>(reorder ? broadcast_vector : a, reorder ? a : broadcast_vector);
368 }
369
370 template <ArithmeticOperation op, typename ScalarType, typename VectorType>
elementwise_arithm_op_loop(int window_start_x,int window_end_x,int window_step_x,const ScalarType * input1_ptr,const ScalarType * input2_ptr,ScalarType * output_ptr)371 inline int elementwise_arithm_op_loop(int window_start_x, int window_end_x, int window_step_x,
372 const ScalarType *input1_ptr, const ScalarType *input2_ptr, ScalarType *output_ptr)
373 {
374 int x = window_start_x;
375 for(; x <= (window_end_x - window_step_x); x += window_step_x)
376 {
377 const auto a = wrapper::vloadq(input1_ptr + x);
378 const auto b = wrapper::vloadq(input2_ptr + x);
379 wrapper::vstore(output_ptr + x, elementwise_arithm_op<op, VectorType>(a, b));
380 }
381 return x;
382 }
383
384 template <ArithmeticOperation op>
elementwise_arithm_op_quantized_loop(int window_start_x,int window_end_x,int window_step_x,const uint8_t * input1_ptr,const uint8_t * input2_ptr,uint8_t * output_ptr,int32x4_t voffset1,int32x4_t voffset2,float32x4_t vscale1,float32x4_t vscale2,float32x4_t voffseto,float32x4_t invvscaleo)385 inline int elementwise_arithm_op_quantized_loop(int window_start_x, int window_end_x, int window_step_x,
386 const uint8_t *input1_ptr, const uint8_t *input2_ptr, uint8_t *output_ptr,
387 int32x4_t voffset1, int32x4_t voffset2, float32x4_t vscale1, float32x4_t vscale2,
388 float32x4_t voffseto, float32x4_t invvscaleo)
389 {
390 int x = window_start_x;
391 for(; x <= (window_end_x - window_step_x); x += window_step_x)
392 {
393 // Get inputs and compute output
394 const float32x4x4_t af = load_quantized(input1_ptr + x, voffset1, vscale1);
395 const float32x4x4_t bf = load_quantized(input2_ptr + x, voffset2, vscale2);
396 const float32x4x4_t rf = elementwise_arithm_op<op>(af, bf);
397 store_quantized(output_ptr + x, rf, voffseto, invvscaleo);
398 }
399 return x;
400 }
401
402 template <ArithmeticOperation op>
elementwise_arithm_op_quantized_singed_loop(int window_start_x,int window_end_x,int window_step_x,const int8_t * input1_ptr,const int8_t * input2_ptr,int8_t * output_ptr,int32x4_t voffset1,int32x4_t voffset2,float32x4_t vscale1,float32x4_t vscale2,float32x4_t voffseto,float32x4_t invvscaleo)403 inline int elementwise_arithm_op_quantized_singed_loop(int window_start_x, int window_end_x, int window_step_x,
404 const int8_t *input1_ptr, const int8_t *input2_ptr, int8_t *output_ptr,
405 int32x4_t voffset1, int32x4_t voffset2, float32x4_t vscale1, float32x4_t vscale2,
406 float32x4_t voffseto, float32x4_t invvscaleo)
407 {
408 int x = window_start_x;
409 for(; x <= (window_end_x - window_step_x); x += window_step_x)
410 {
411 // Get inputs and compute output
412 const float32x4x4_t af = load_quantized_signed(input1_ptr + x, voffset1, vscale1);
413 const float32x4x4_t bf = load_quantized_signed(input2_ptr + x, voffset2, vscale2);
414 const float32x4x4_t rf = elementwise_arithm_op<op>(af, bf);
415 store_quantized_signed(output_ptr + x, rf, voffseto, invvscaleo);
416 }
417 return x;
418 }
419
420 template <ArithmeticOperation op, typename ScalarType, typename VectorType>
elementwise_arithm_op_broadcast_loop(int window_start_x,int window_end_x,int window_step_x,const ScalarType * non_broadcast_input_ptr,const ScalarType & broadcast_value,ScalarType * output_ptr,const bool reorder)421 inline int elementwise_arithm_op_broadcast_loop(int window_start_x, int window_end_x, int window_step_x,
422 const ScalarType *non_broadcast_input_ptr, const ScalarType &broadcast_value, ScalarType *output_ptr, const bool reorder)
423 {
424 int x = window_start_x;
425 for(; x <= (window_end_x - window_step_x); x += window_step_x)
426 {
427 const auto a = wrapper::vloadq((non_broadcast_input_ptr + x));
428 wrapper::vstore(output_ptr + x, elementwise_arithm_op_broadcast<op, ScalarType, VectorType>(a, broadcast_value, reorder));
429 }
430 return x;
431 }
432
433 template <ArithmeticOperation op>
elementwise_arithm_op_quantized_broadcast_loop(int window_start_x,int window_end_x,int window_step_x,const uint8_t * non_broadcast_input_ptr,float32x4x4_t broadcast_vector,uint8_t * output_ptr,int32x4_t voffset_non_broadcast,float32x4_t vscale_non_broadcast,float32x4_t voffseto,float32x4_t invvscaleo,bool reorder)434 inline int elementwise_arithm_op_quantized_broadcast_loop(int window_start_x, int window_end_x, int window_step_x,
435 const uint8_t *non_broadcast_input_ptr, float32x4x4_t broadcast_vector, uint8_t *output_ptr,
436 int32x4_t voffset_non_broadcast, float32x4_t vscale_non_broadcast,
437 float32x4_t voffseto, float32x4_t invvscaleo, bool reorder)
438 {
439 int x = window_start_x;
440 for(; x <= (window_end_x - window_step_x); x += window_step_x)
441 {
442 const float32x4x4_t af = load_quantized(non_broadcast_input_ptr + x, voffset_non_broadcast, vscale_non_broadcast);
443 const float32x4x4_t rf = elementwise_arithm_op<op>(reorder ? broadcast_vector : af, reorder ? af : broadcast_vector);
444 store_quantized(output_ptr + x, rf, voffseto, invvscaleo);
445 }
446 return x;
447 }
448 template <ArithmeticOperation op>
elementwise_arithm_op_quantized_signed_broadcast_loop(int window_start_x,int window_end_x,int window_step_x,const int8_t * non_broadcast_input_ptr,float32x4x4_t broadcast_vector,int8_t * output_ptr,int32x4_t voffset_non_broadcast,float32x4_t vscale_non_broadcast,float32x4_t voffseto,float32x4_t invvscaleo,bool reorder)449 inline int elementwise_arithm_op_quantized_signed_broadcast_loop(int window_start_x, int window_end_x, int window_step_x,
450 const int8_t *non_broadcast_input_ptr, float32x4x4_t broadcast_vector, int8_t *output_ptr,
451 int32x4_t voffset_non_broadcast, float32x4_t vscale_non_broadcast,
452 float32x4_t voffseto, float32x4_t invvscaleo, bool reorder)
453 {
454 int x = window_start_x;
455 for(; x <= (window_end_x - window_step_x); x += window_step_x)
456 {
457 const float32x4x4_t af = load_quantized_signed(non_broadcast_input_ptr + x, voffset_non_broadcast, vscale_non_broadcast);
458 const float32x4x4_t rf = elementwise_arithm_op<op>(reorder ? broadcast_vector : af, reorder ? af : broadcast_vector);
459 store_quantized_signed(output_ptr + x, rf, voffseto, invvscaleo);
460 }
461 return x;
462 }
463
464 template <ComparisonOperation op, typename InputScalarType, typename InputVectorType>
elementwise_comp_op_8_loop(int window_start_x,int window_end_x,int window_step_x,const InputScalarType * input1_ptr,const InputScalarType * input2_ptr,uint8_t * output_ptr)465 inline int elementwise_comp_op_8_loop(int window_start_x, int window_end_x, int window_step_x,
466 const InputScalarType *input1_ptr, const InputScalarType *input2_ptr, uint8_t *output_ptr)
467 {
468 int x = window_start_x;
469 for(; x <= (window_end_x - window_step_x); x += window_step_x)
470 {
471 const auto a = wrapper::vloadq(input1_ptr + x);
472 const auto b = wrapper::vloadq(input2_ptr + x);
473 const auto res = elementwise_comp_op<op, InputVectorType, uint8x16_t>(a, b);
474 wrapper::vstore(output_ptr + x, res);
475 }
476 return x;
477 }
478
479 template <ComparisonOperation op, typename InputScalarType, typename InputVectorType>
elementwise_comp_op_16_loop(int window_start_x,int window_end_x,int window_step_x,const InputScalarType * input1_ptr,const InputScalarType * input2_ptr,uint8_t * output_ptr)480 inline int elementwise_comp_op_16_loop(int window_start_x, int window_end_x, int window_step_x,
481 const InputScalarType *input1_ptr, const InputScalarType *input2_ptr, uint8_t *output_ptr)
482 {
483 int x = window_start_x;
484 for(; x <= (window_end_x - window_step_x); x += window_step_x)
485 {
486 const auto a = wrapper::vloadq(input1_ptr + x);
487 const auto b = wrapper::vloadq(input2_ptr + x);
488 const auto res = elementwise_comp_op<op, InputVectorType, uint16x8_t>(a, b);
489 wrapper::vstore(output_ptr + x, wrapper::vmovn(res));
490 }
491 return x;
492 }
493
494 template <ComparisonOperation op, typename InputScalarType, typename InputVectorType>
elementwise_comp_op_32_loop(int window_start_x,int window_end_x,int window_step_x,const InputScalarType * input1_ptr,const InputScalarType * input2_ptr,uint8_t * output_ptr)495 inline int elementwise_comp_op_32_loop(int window_start_x, int window_end_x, int window_step_x,
496 const InputScalarType *input1_ptr, const InputScalarType *input2_ptr, uint8_t *output_ptr)
497 {
498 int x = window_start_x;
499 for(; x <= (window_end_x - window_step_x); x += window_step_x)
500 {
501 auto a = wrapper::vloadq(input1_ptr + x);
502 auto b = wrapper::vloadq(input2_ptr + x);
503 const auto res = elementwise_comp_op<op, InputVectorType, uint32x4_t>(a, b);
504 a = wrapper::vloadq(input1_ptr + x + 4);
505 b = wrapper::vloadq(input2_ptr + x + 4);
506 const auto res2 = elementwise_comp_op<op, InputVectorType, uint32x4_t>(a, b);
507 wrapper::vstore(output_ptr + x, wrapper::vmovn(wrapper::vcombine(wrapper::vmovn(res), wrapper::vmovn(res2))));
508 }
509 if(x <= window_end_x - 4)
510 {
511 const auto a = wrapper::vloadq(input1_ptr + x);
512 const auto b = wrapper::vloadq(input2_ptr + x);
513 const auto res = elementwise_comp_op<op, InputVectorType, uint32x4_t>(a, b);
514 for(int i = 0; i < 4; i++)
515 {
516 *(output_ptr + x + i) = wrapper::vgetlane(res, i);
517 }
518 x = +4;
519 }
520 return x;
521 }
522
523 template <ComparisonOperation op>
elementwise_comp_op_quantized_loop(int window_start_x,int window_end_x,int window_step_x,const uint8_t * input1_ptr,const uint8_t * input2_ptr,uint8_t * output_ptr,int32x4_t voffset1,int32x4_t voffset2,float32x4_t vscale1,float32x4_t vscale2,float32x4_t voffseto,float32x4_t invvscaleo)524 inline int elementwise_comp_op_quantized_loop(int window_start_x, int window_end_x, int window_step_x,
525 const uint8_t *input1_ptr, const uint8_t *input2_ptr, uint8_t *output_ptr,
526 int32x4_t voffset1, int32x4_t voffset2, float32x4_t vscale1, float32x4_t vscale2,
527 float32x4_t voffseto, float32x4_t invvscaleo)
528 {
529 ARM_COMPUTE_UNUSED(voffseto, invvscaleo);
530 int x = window_start_x;
531 for(; x <= (window_end_x - window_step_x); x += window_step_x)
532 {
533 const float32x4x4_t af = load_quantized(input1_ptr + x, voffset1, vscale1);
534 const float32x4x4_t bf = load_quantized(input2_ptr + x, voffset2, vscale2);
535 const uint32x4x4_t rf = elementwise_comp_op<op>(af, bf);
536 store_quantized(output_ptr + x, rf);
537 }
538 return x;
539 }
540
541 template <ComparisonOperation op>
elementwise_comp_op_quantized_signed_loop(int window_start_x,int window_end_x,int window_step_x,const int8_t * input1_ptr,const int8_t * input2_ptr,uint8_t * output_ptr,int32x4_t voffset1,int32x4_t voffset2,float32x4_t vscale1,float32x4_t vscale2,float32x4_t voffseto,float32x4_t invvscaleo)542 inline int elementwise_comp_op_quantized_signed_loop(int window_start_x, int window_end_x, int window_step_x,
543 const int8_t *input1_ptr, const int8_t *input2_ptr, uint8_t *output_ptr,
544 int32x4_t voffset1, int32x4_t voffset2, float32x4_t vscale1, float32x4_t vscale2,
545 float32x4_t voffseto, float32x4_t invvscaleo)
546 {
547 ARM_COMPUTE_UNUSED(voffseto, invvscaleo);
548 int x = window_start_x;
549 for(; x <= (window_end_x - window_step_x); x += window_step_x)
550 {
551 const float32x4x4_t af = load_quantized_signed(input1_ptr + x, voffset1, vscale1);
552 const float32x4x4_t bf = load_quantized_signed(input2_ptr + x, voffset2, vscale2);
553 const uint32x4x4_t rf = elementwise_comp_op<op>(af, bf);
554 store_quantized(output_ptr + x, rf);
555 }
556 return x;
557 }
558
559 template <ComparisonOperation op, typename InputScalarType, typename InputVectorType>
elementwise_comp_op_broadcast_8_loop(int window_start_x,int window_end_x,int window_step_x,const InputScalarType * non_broadcast_input_ptr,const InputScalarType & broadcast_value,uint8_t * output_ptr,const bool reorder)560 inline int elementwise_comp_op_broadcast_8_loop(int window_start_x, int window_end_x, int window_step_x,
561 const InputScalarType *non_broadcast_input_ptr, const InputScalarType &broadcast_value, uint8_t *output_ptr, const bool reorder)
562 {
563 int x = window_start_x;
564 for(; x <= (window_end_x - window_step_x); x += window_step_x)
565 {
566 const auto a = elementwise_comp_op_broadcast<op, InputScalarType, InputVectorType, uint8x16_t>(wrapper::vloadq((non_broadcast_input_ptr + x)), broadcast_value, reorder);
567 wrapper::vstore(output_ptr + x, a);
568 }
569 return x;
570 }
571
572 template <ComparisonOperation op, typename InputScalarType, typename InputVectorType>
elementwise_comp_op_broadcast_16_loop(int window_start_x,int window_end_x,int window_step_x,const InputScalarType * non_broadcast_input_ptr,const InputScalarType & broadcast_value,uint8_t * output_ptr,const bool reorder)573 inline int elementwise_comp_op_broadcast_16_loop(int window_start_x, int window_end_x, int window_step_x,
574 const InputScalarType *non_broadcast_input_ptr, const InputScalarType &broadcast_value, uint8_t *output_ptr, const bool reorder)
575 {
576 int x = window_start_x;
577 for(; x <= (window_end_x - window_step_x); x += window_step_x)
578 {
579 const auto a = elementwise_comp_op_broadcast<op, InputScalarType, InputVectorType, uint16x8_t>(wrapper::vloadq((non_broadcast_input_ptr + x)), broadcast_value, reorder);
580 wrapper::vstore(output_ptr + x, wrapper::vmovn(a));
581 }
582 return x;
583 }
584
585 template <ComparisonOperation op, typename InputScalarType, typename InputVectorType>
elementwise_comp_op_broadcast_32_loop(int window_start_x,int window_end_x,int window_step_x,const InputScalarType * non_broadcast_input_ptr,const InputScalarType & broadcast_value,uint8_t * output_ptr,const bool reorder)586 inline int elementwise_comp_op_broadcast_32_loop(int window_start_x, int window_end_x, int window_step_x,
587 const InputScalarType *non_broadcast_input_ptr, const InputScalarType &broadcast_value, uint8_t *output_ptr, const bool reorder)
588 {
589 int x = window_start_x;
590 for(; x <= (window_end_x - window_step_x); x += window_step_x)
591 {
592 const auto a = elementwise_comp_op_broadcast<op, InputScalarType, InputVectorType, uint32x4_t>(wrapper::vloadq(non_broadcast_input_ptr + x), broadcast_value, reorder);
593 const auto b = elementwise_comp_op_broadcast<op, InputScalarType, InputVectorType, uint32x4_t>(wrapper::vloadq(non_broadcast_input_ptr + x + 4), broadcast_value, reorder);
594 wrapper::vstore(output_ptr + x, wrapper::vmovn(wrapper::vcombine(wrapper::vmovn(a), wrapper::vmovn(b))));
595 }
596 if(x <= window_end_x - 4)
597 {
598 const auto a = elementwise_comp_op_broadcast<op, InputScalarType, InputVectorType, uint32x4_t>(wrapper::vloadq((non_broadcast_input_ptr + x)), broadcast_value, reorder);
599 for(int i = 0; i < 4; i++)
600 {
601 *(output_ptr + x + i) = wrapper::vgetlane(a, i);
602 }
603 x = +4;
604 }
605 return x;
606 }
607
608 template <ComparisonOperation op>
elementwise_comp_op_quantized_broadcast_loop(int window_start_x,int window_end_x,int window_step_x,const uint8_t * non_broadcast_input_ptr,float32x4x4_t broadcast_vector,uint8_t * output_ptr,int32x4_t voffset_non_broadcast,float32x4_t vscale_non_broadcast,float32x4_t voffseto,float32x4_t invvscaleo,bool reorder)609 inline int elementwise_comp_op_quantized_broadcast_loop(int window_start_x, int window_end_x, int window_step_x,
610 const uint8_t *non_broadcast_input_ptr, float32x4x4_t broadcast_vector, uint8_t *output_ptr,
611 int32x4_t voffset_non_broadcast, float32x4_t vscale_non_broadcast,
612 float32x4_t voffseto, float32x4_t invvscaleo, bool reorder)
613 {
614 ARM_COMPUTE_UNUSED(voffseto, invvscaleo);
615 int x = window_start_x;
616 for(; x <= (window_end_x - window_step_x); x += window_step_x)
617 {
618 const float32x4x4_t af = load_quantized(non_broadcast_input_ptr + x, voffset_non_broadcast, vscale_non_broadcast);
619 const uint32x4x4_t rf = elementwise_comp_op<op>(reorder ? broadcast_vector : af, reorder ? af : broadcast_vector);
620 store_quantized(output_ptr + x, rf);
621 }
622 return x;
623 }
624
625 template <ComparisonOperation op>
elementwise_comp_op_quantized_signed_broadcast_loop(int window_start_x,int window_end_x,int window_step_x,const int8_t * non_broadcast_input_ptr,float32x4x4_t broadcast_vector,uint8_t * output_ptr,int32x4_t voffset_non_broadcast,float32x4_t vscale_non_broadcast,float32x4_t voffseto,float32x4_t invvscaleo,bool reorder)626 inline int elementwise_comp_op_quantized_signed_broadcast_loop(int window_start_x, int window_end_x, int window_step_x,
627 const int8_t *non_broadcast_input_ptr, float32x4x4_t broadcast_vector, uint8_t *output_ptr,
628 int32x4_t voffset_non_broadcast, float32x4_t vscale_non_broadcast,
629 float32x4_t voffseto, float32x4_t invvscaleo, bool reorder)
630 {
631 ARM_COMPUTE_UNUSED(voffseto, invvscaleo);
632 int x = window_start_x;
633 for(; x <= (window_end_x - window_step_x); x += window_step_x)
634 {
635 const float32x4x4_t af = load_quantized_signed(non_broadcast_input_ptr + x, voffset_non_broadcast, vscale_non_broadcast);
636 const uint32x4x4_t rf = elementwise_comp_op<op>(reorder ? broadcast_vector : af, reorder ? af : broadcast_vector);
637 store_quantized(output_ptr + x, rf);
638 }
639 return x;
640 }
641
642 template <typename InputScalarType, typename OutputScalarType, typename InputVectorType>
elementwise_op(const ITensor * in1,const ITensor * in2,ITensor * out,const Window & window,OutputScalarType (* scalar_func)(const InputScalarType &,const InputScalarType &),int (* broadcast_func)(int,int,int,const InputScalarType *,const InputScalarType &,OutputScalarType *,const bool),int (* neon_func)(int,int,int,const InputScalarType *,const InputScalarType *,OutputScalarType *))643 void elementwise_op(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window,
644 OutputScalarType (*scalar_func)(const InputScalarType &, const InputScalarType &),
645 int (*broadcast_func)(int, int, int, const InputScalarType *, const InputScalarType &, OutputScalarType *, const bool),
646 int (*neon_func)(int, int, int, const InputScalarType *, const InputScalarType *, OutputScalarType *))
647 {
648 // Create input windows
649 Window input1_win = window.broadcast_if_dimension_le_one(in1->info()->tensor_shape());
650 Window input2_win = window.broadcast_if_dimension_le_one(in2->info()->tensor_shape());
651
652 // Clear X Dimension on execution window as we handle manually
653 Window win = window;
654 win.set(Window::DimX, Window::Dimension(0, 1, 1));
655
656 const int window_step_x = std::min(16 / static_cast<int>(sizeof(OutputScalarType)), 8);
657 const auto window_start_x = static_cast<int>(window.x().start());
658 const auto window_end_x = static_cast<int>(window.x().end());
659 const bool is_broadcast_across_x = in1->info()->tensor_shape().x() != in2->info()->tensor_shape().x();
660
661 if(is_broadcast_across_x)
662 {
663 const bool is_broadcast_input_2 = input2_win.x().step() == 0;
664 Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win;
665 Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win;
666 const ITensor *broadcast_tensor = is_broadcast_input_2 ? in2 : in1;
667 const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? in2 : in1;
668
669 // Clear X Dimension on execution window as we handle manually
670 non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1));
671
672 Iterator broadcast_input(broadcast_tensor, broadcast_win);
673 Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
674 Iterator output(out, win);
675
676 execute_window_loop(win, [&](const Coordinates &)
677 {
678 auto output_ptr = reinterpret_cast<OutputScalarType *>(output.ptr());
679 const auto non_broadcast_input_ptr = reinterpret_cast<const InputScalarType *>(non_broadcast_input.ptr());
680 const InputScalarType broadcast_value = *reinterpret_cast<const InputScalarType *>(broadcast_input.ptr());
681
682 int x = (*broadcast_func)(window_start_x, window_end_x, window_step_x, non_broadcast_input_ptr, broadcast_value, output_ptr, !is_broadcast_input_2);
683 for(; x < window_end_x; ++x)
684 {
685 const auto a = *(non_broadcast_input_ptr + x);
686 *(output_ptr + x) = (*scalar_func)(!is_broadcast_input_2 ? broadcast_value : a, !is_broadcast_input_2 ? a : broadcast_value);
687 }
688 },
689 broadcast_input, non_broadcast_input, output);
690 }
691 else
692 {
693 // Clear X Dimension on execution window as we handle manually
694 input1_win.set(Window::DimX, Window::Dimension(0, 1, 1));
695 input2_win.set(Window::DimX, Window::Dimension(0, 1, 1));
696
697 Iterator input1(in1, input1_win);
698 Iterator input2(in2, input2_win);
699 Iterator output(out, win);
700
701 execute_window_loop(win, [&](const Coordinates &)
702 {
703 auto output_ptr = reinterpret_cast<OutputScalarType *>(output.ptr());
704 const auto input1_ptr = reinterpret_cast<const InputScalarType *>(input1.ptr());
705 const auto input2_ptr = reinterpret_cast<const InputScalarType *>(input2.ptr());
706
707 int x = (*neon_func)(window_start_x, window_end_x, window_step_x, input1_ptr, input2_ptr, output_ptr);
708 for(; x < window_end_x; ++x)
709 {
710 const auto a = *(input1_ptr + x);
711 const auto b = *(input2_ptr + x);
712 *(output_ptr + x) = (*scalar_func)(a, b);
713 }
714 },
715 input1, input2, output);
716 }
717 }
718
elementwise_op_quantized(const ITensor * in1,const ITensor * in2,ITensor * out,const Window & window,uint8_t (* scalar_func)(const float &,const float &,UniformQuantizationInfo),int (* broadcast_func)(int,int,int,const uint8_t *,float32x4x4_t,uint8_t *,int32x4_t,float32x4_t,float32x4_t,float32x4_t,const bool),int (* neon_func)(int,int,int,const uint8_t *,const uint8_t *,uint8_t *,int32x4_t,int32x4_t,float32x4_t,float32x4_t,float32x4_t,float32x4_t))719 void elementwise_op_quantized(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window,
720 uint8_t (*scalar_func)(const float &, const float &, UniformQuantizationInfo),
721 int (*broadcast_func)(int, int, int, const uint8_t *, float32x4x4_t, uint8_t *, int32x4_t, float32x4_t,
722 float32x4_t, float32x4_t, const bool),
723 int (*neon_func)(int, int, int, const uint8_t *, const uint8_t *, uint8_t *,
724 int32x4_t, int32x4_t, float32x4_t, float32x4_t,
725 float32x4_t, float32x4_t))
726 {
727 // Create input windows
728 Window input1_win = window.broadcast_if_dimension_le_one(in1->info()->tensor_shape());
729 Window input2_win = window.broadcast_if_dimension_le_one(in2->info()->tensor_shape());
730
731 // Clear X Dimension on execution window as we handle manually
732 Window win = window;
733 win.set(Window::DimX, Window::Dimension(0, 1, 1));
734
735 const int window_step_x = 16;
736 const auto window_start_x = static_cast<int>(window.x().start());
737 const auto window_end_x = static_cast<int>(window.x().end());
738 const bool is_broadcast_across_x = in1->info()->tensor_shape().x() != in2->info()->tensor_shape().x();
739
740 const UniformQuantizationInfo output_qinfo = out->info()->quantization_info().uniform();
741
742 // Output quantization info (add 0.5 to round toward the nearest integer - 0.5 rounds away from zero)
743 const float32x4_t voffseto = vdupq_n_f32(output_qinfo.offset + 0.5f);
744 const float32x4_t invvscaleo = vdupq_n_f32(1.f / output_qinfo.scale);
745
746 if(is_broadcast_across_x)
747 {
748 // Select the broadcast input on the X axis
749 const bool is_broadcast_input_2 = input2_win.x().step() == 0;
750 Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win;
751 Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win;
752 const ITensor *broadcast_tensor = is_broadcast_input_2 ? in2 : in1;
753 const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? in2 : in1;
754
755 const UniformQuantizationInfo broadcast_qinfo = broadcast_tensor->info()->quantization_info().uniform();
756 const UniformQuantizationInfo non_broadcast_qinfo = non_broadcast_tensor->info()->quantization_info().uniform();
757
758 const int32x4_t voffset_non_broadcast = vdupq_n_s32(non_broadcast_qinfo.offset);
759 const float32x4_t vscale_non_broadcast = vdupq_n_f32(non_broadcast_qinfo.scale);
760
761 // Clear X Dimension on execution window as we handle manually
762 non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1));
763
764 Iterator broadcast_input(broadcast_tensor, broadcast_win);
765 Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
766 Iterator output(out, win);
767
768 execute_window_loop(win, [&](const Coordinates &)
769 {
770 const auto non_broadcast_input_ptr = reinterpret_cast<const uint8_t *>(non_broadcast_input.ptr());
771 const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
772
773 const uint8_t broadcast_value = *reinterpret_cast<const uint8_t *>(broadcast_input.ptr());
774 const float32x4x4_t broadcast_vector = vdequantize(vdupq_n_u8(broadcast_value), broadcast_qinfo);
775
776 int x = (*broadcast_func)(window_start_x, window_end_x, window_step_x, non_broadcast_input_ptr, broadcast_vector, output_ptr,
777 voffset_non_broadcast, vscale_non_broadcast, voffseto, invvscaleo, !is_broadcast_input_2);
778 for(; x < window_end_x; ++x)
779 {
780 const float afs = dequantize_qasymm8(*(non_broadcast_input_ptr + x), non_broadcast_qinfo);
781 const float bfs = dequantize_qasymm8(broadcast_value, broadcast_qinfo);
782 *(output_ptr + x) = (*scalar_func)(!is_broadcast_input_2 ? bfs : afs, !is_broadcast_input_2 ? afs : bfs, output_qinfo);
783 }
784 },
785 broadcast_input, non_broadcast_input, output);
786 }
787 else
788 {
789 const UniformQuantizationInfo input1_qinfo = in1->info()->quantization_info().uniform();
790 const UniformQuantizationInfo input2_qinfo = in2->info()->quantization_info().uniform();
791
792 // Input1 quantization info
793 const int32x4_t voffset1 = vdupq_n_s32(input1_qinfo.offset);
794 const float32x4_t vscale1 = vdupq_n_f32(input1_qinfo.scale);
795
796 // Input2 quantization info
797 const int32x4_t voffset2 = vdupq_n_s32(input2_qinfo.offset);
798 const float32x4_t vscale2 = vdupq_n_f32(input2_qinfo.scale);
799
800 // Clear X Dimension on execution window as we handle manually
801 input1_win.set(Window::DimX, Window::Dimension(0, 1, 1));
802 input2_win.set(Window::DimX, Window::Dimension(0, 1, 1));
803
804 Iterator input1(in1, input1_win);
805 Iterator input2(in2, input2_win);
806 Iterator output(out, win);
807
808 execute_window_loop(win, [&](const Coordinates &)
809 {
810 const auto input1_ptr = reinterpret_cast<const uint8_t *>(input1.ptr());
811 const auto input2_ptr = reinterpret_cast<const uint8_t *>(input2.ptr());
812 const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
813
814 int x = (*neon_func)(window_start_x, window_end_x, window_step_x, input1_ptr, input2_ptr, output_ptr, voffset1, voffset2,
815 vscale1, vscale2, voffseto, invvscaleo);
816 for(; x < window_end_x; ++x)
817 {
818 const float afs = dequantize_qasymm8(*(input1_ptr + x), input1_qinfo);
819 const float bfs = dequantize_qasymm8(*(input2_ptr + x), input2_qinfo);
820 *(output_ptr + x) = (*scalar_func)(afs, bfs, output_qinfo);
821 }
822 },
823 input1, input2, output);
824 }
825 }
826
elementwise_comp_quantized_signed(const ITensor * in1,const ITensor * in2,ITensor * out,const Window & window,uint8_t (* scalar_func)(const float &,const float &,UniformQuantizationInfo),int (* broadcast_func)(int,int,int,const int8_t *,float32x4x4_t,uint8_t *,int32x4_t,float32x4_t,float32x4_t,float32x4_t,const bool),int (* neon_func)(int,int,int,const int8_t *,const int8_t *,uint8_t *,int32x4_t,int32x4_t,float32x4_t,float32x4_t,float32x4_t,float32x4_t))827 void elementwise_comp_quantized_signed(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window,
828 uint8_t (*scalar_func)(const float &, const float &, UniformQuantizationInfo),
829 int (*broadcast_func)(int, int, int, const int8_t *, float32x4x4_t, uint8_t *, int32x4_t, float32x4_t,
830 float32x4_t, float32x4_t, const bool),
831 int (*neon_func)(int, int, int, const int8_t *, const int8_t *, uint8_t *,
832 int32x4_t, int32x4_t, float32x4_t, float32x4_t,
833 float32x4_t, float32x4_t))
834 {
835 // Create input windows
836 Window input1_win = window.broadcast_if_dimension_le_one(in1->info()->tensor_shape());
837 Window input2_win = window.broadcast_if_dimension_le_one(in2->info()->tensor_shape());
838
839 // Clear X Dimension on execution window as we handle manually
840 Window win = window;
841 win.set(Window::DimX, Window::Dimension(0, 1, 1));
842
843 const int window_step_x = 16;
844 const auto window_start_x = static_cast<int>(window.x().start());
845 const auto window_end_x = static_cast<int>(window.x().end());
846 const bool is_broadcast_across_x = in1->info()->tensor_shape().x() != in2->info()->tensor_shape().x();
847
848 const UniformQuantizationInfo output_qinfo = out->info()->quantization_info().uniform();
849
850 const float32x4_t voffseto = vdupq_n_f32(output_qinfo.offset);
851 const float32x4_t invvscaleo = vdupq_n_f32(1.f / output_qinfo.scale);
852
853 if(is_broadcast_across_x)
854 {
855 // Select the broadcast input on the X axis
856 const bool is_broadcast_input_2 = input2_win.x().step() == 0;
857 Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win;
858 Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win;
859 const ITensor *broadcast_tensor = is_broadcast_input_2 ? in2 : in1;
860 const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? in2 : in1;
861
862 const UniformQuantizationInfo broadcast_qinfo = broadcast_tensor->info()->quantization_info().uniform();
863 const UniformQuantizationInfo non_broadcast_qinfo = non_broadcast_tensor->info()->quantization_info().uniform();
864
865 const int32x4_t voffset_non_broadcast = vdupq_n_s32(non_broadcast_qinfo.offset);
866 const float32x4_t vscale_non_broadcast = vdupq_n_f32(non_broadcast_qinfo.scale);
867
868 // Clear X Dimension on execution window as we handle manually
869 non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1));
870
871 Iterator broadcast_input(broadcast_tensor, broadcast_win);
872 Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
873 Iterator output(out, win);
874
875 execute_window_loop(win, [&](const Coordinates &)
876 {
877 const auto non_broadcast_input_ptr = reinterpret_cast<const int8_t *>(non_broadcast_input.ptr());
878 const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
879
880 const int8_t broadcast_value = *reinterpret_cast<const int8_t *>(broadcast_input.ptr());
881 const float32x4x4_t broadcast_vector = vdequantize(vdupq_n_s8(broadcast_value), broadcast_qinfo);
882
883 int x = (*broadcast_func)(window_start_x, window_end_x, window_step_x, non_broadcast_input_ptr, broadcast_vector, output_ptr,
884 voffset_non_broadcast, vscale_non_broadcast, voffseto, invvscaleo, !is_broadcast_input_2);
885 for(; x < window_end_x; ++x)
886 {
887 const float afs = dequantize_qasymm8_signed(*(non_broadcast_input_ptr + x), non_broadcast_qinfo);
888 const float bfs = dequantize_qasymm8_signed(broadcast_value, broadcast_qinfo);
889 *(output_ptr + x) = (*scalar_func)(!is_broadcast_input_2 ? bfs : afs, !is_broadcast_input_2 ? afs : bfs, output_qinfo);
890 }
891 },
892 broadcast_input, non_broadcast_input, output);
893 }
894 else
895 {
896 const UniformQuantizationInfo input1_qinfo = in1->info()->quantization_info().uniform();
897 const UniformQuantizationInfo input2_qinfo = in2->info()->quantization_info().uniform();
898
899 // Input1 quantization info
900 const int32x4_t voffset1 = vdupq_n_s32(input1_qinfo.offset);
901 const float32x4_t vscale1 = vdupq_n_f32(input1_qinfo.scale);
902
903 // Input2 quantization info
904 const int32x4_t voffset2 = vdupq_n_s32(input2_qinfo.offset);
905 const float32x4_t vscale2 = vdupq_n_f32(input2_qinfo.scale);
906
907 // Clear X Dimension on execution window as we handle manually
908 input1_win.set(Window::DimX, Window::Dimension(0, 1, 1));
909 input2_win.set(Window::DimX, Window::Dimension(0, 1, 1));
910
911 Iterator input1(in1, input1_win);
912 Iterator input2(in2, input2_win);
913 Iterator output(out, win);
914
915 execute_window_loop(win, [&](const Coordinates &)
916 {
917 const auto input1_ptr = reinterpret_cast<const int8_t *>(input1.ptr());
918 const auto input2_ptr = reinterpret_cast<const int8_t *>(input2.ptr());
919 const auto output_ptr = reinterpret_cast<uint8_t *>(output.ptr());
920
921 int x = (*neon_func)(window_start_x, window_end_x, window_step_x, input1_ptr, input2_ptr, output_ptr, voffset1, voffset2,
922 vscale1, vscale2, voffseto, invvscaleo);
923 for(; x < window_end_x; ++x)
924 {
925 const float afs = dequantize_qasymm8_signed(*(input1_ptr + x), input1_qinfo);
926 const float bfs = dequantize_qasymm8_signed(*(input2_ptr + x), input2_qinfo);
927 *(output_ptr + x) = (*scalar_func)(afs, bfs, output_qinfo);
928 }
929 },
930 input1, input2, output);
931 }
932 }
933
elementwise_op_quantized_signed(const ITensor * in1,const ITensor * in2,ITensor * out,const Window & window,int8_t (* scalar_func)(const float &,const float &,UniformQuantizationInfo),int (* broadcast_func)(int,int,int,const int8_t *,float32x4x4_t,int8_t *,int32x4_t,float32x4_t,float32x4_t,float32x4_t,const bool),int (* neon_func)(int,int,int,const int8_t *,const int8_t *,int8_t *,int32x4_t,int32x4_t,float32x4_t,float32x4_t,float32x4_t,float32x4_t))934 void elementwise_op_quantized_signed(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window,
935 int8_t (*scalar_func)(const float &, const float &, UniformQuantizationInfo),
936 int (*broadcast_func)(int, int, int, const int8_t *, float32x4x4_t, int8_t *, int32x4_t, float32x4_t,
937 float32x4_t, float32x4_t, const bool),
938 int (*neon_func)(int, int, int, const int8_t *, const int8_t *, int8_t *,
939 int32x4_t, int32x4_t, float32x4_t, float32x4_t,
940 float32x4_t, float32x4_t))
941 {
942 // Create input windows
943 Window input1_win = window.broadcast_if_dimension_le_one(in1->info()->tensor_shape());
944 Window input2_win = window.broadcast_if_dimension_le_one(in2->info()->tensor_shape());
945
946 // Clear X Dimension on execution window as we handle manually
947 Window win = window;
948 win.set(Window::DimX, Window::Dimension(0, 1, 1));
949
950 const int window_step_x = 16;
951 const auto window_start_x = static_cast<int>(window.x().start());
952 const auto window_end_x = static_cast<int>(window.x().end());
953 const bool is_broadcast_across_x = in1->info()->tensor_shape().x() != in2->info()->tensor_shape().x();
954
955 const UniformQuantizationInfo output_qinfo = out->info()->quantization_info().uniform();
956
957 const float32x4_t voffseto = vdupq_n_f32(output_qinfo.offset);
958 const float32x4_t invvscaleo = vdupq_n_f32(1.f / output_qinfo.scale);
959
960 if(is_broadcast_across_x)
961 {
962 // Select the broadcast input on the X axis
963 const bool is_broadcast_input_2 = input2_win.x().step() == 0;
964 Window broadcast_win = is_broadcast_input_2 ? input2_win : input1_win;
965 Window non_broadcast_win = !is_broadcast_input_2 ? input2_win : input1_win;
966 const ITensor *broadcast_tensor = is_broadcast_input_2 ? in2 : in1;
967 const ITensor *non_broadcast_tensor = !is_broadcast_input_2 ? in2 : in1;
968
969 const UniformQuantizationInfo broadcast_qinfo = broadcast_tensor->info()->quantization_info().uniform();
970 const UniformQuantizationInfo non_broadcast_qinfo = non_broadcast_tensor->info()->quantization_info().uniform();
971
972 const int32x4_t voffset_non_broadcast = vdupq_n_s32(non_broadcast_qinfo.offset);
973 const float32x4_t vscale_non_broadcast = vdupq_n_f32(non_broadcast_qinfo.scale);
974
975 // Clear X Dimension on execution window as we handle manually
976 non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1));
977
978 Iterator broadcast_input(broadcast_tensor, broadcast_win);
979 Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
980 Iterator output(out, win);
981
982 execute_window_loop(win, [&](const Coordinates &)
983 {
984 const auto non_broadcast_input_ptr = reinterpret_cast<const int8_t *>(non_broadcast_input.ptr());
985 const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
986
987 const int8_t broadcast_value = *reinterpret_cast<const int8_t *>(broadcast_input.ptr());
988 const float32x4x4_t broadcast_vector = vdequantize(vdupq_n_s8(broadcast_value), broadcast_qinfo);
989
990 int x = (*broadcast_func)(window_start_x, window_end_x, window_step_x, non_broadcast_input_ptr, broadcast_vector, output_ptr,
991 voffset_non_broadcast, vscale_non_broadcast, voffseto, invvscaleo, !is_broadcast_input_2);
992 for(; x < window_end_x; ++x)
993 {
994 const float afs = dequantize_qasymm8_signed(*(non_broadcast_input_ptr + x), non_broadcast_qinfo);
995 const float bfs = dequantize_qasymm8_signed(broadcast_value, broadcast_qinfo);
996 *(output_ptr + x) = (*scalar_func)(!is_broadcast_input_2 ? bfs : afs, !is_broadcast_input_2 ? afs : bfs, output_qinfo);
997 }
998 },
999 broadcast_input, non_broadcast_input, output);
1000 }
1001 else
1002 {
1003 const UniformQuantizationInfo input1_qinfo = in1->info()->quantization_info().uniform();
1004 const UniformQuantizationInfo input2_qinfo = in2->info()->quantization_info().uniform();
1005
1006 // Input1 quantization info
1007 const int32x4_t voffset1 = vdupq_n_s32(input1_qinfo.offset);
1008 const float32x4_t vscale1 = vdupq_n_f32(input1_qinfo.scale);
1009
1010 // Input2 quantization info
1011 const int32x4_t voffset2 = vdupq_n_s32(input2_qinfo.offset);
1012 const float32x4_t vscale2 = vdupq_n_f32(input2_qinfo.scale);
1013
1014 // Clear X Dimension on execution window as we handle manually
1015 input1_win.set(Window::DimX, Window::Dimension(0, 1, 1));
1016 input2_win.set(Window::DimX, Window::Dimension(0, 1, 1));
1017
1018 Iterator input1(in1, input1_win);
1019 Iterator input2(in2, input2_win);
1020 Iterator output(out, win);
1021
1022 execute_window_loop(win, [&](const Coordinates &)
1023 {
1024 const auto input1_ptr = reinterpret_cast<const int8_t *>(input1.ptr());
1025 const auto input2_ptr = reinterpret_cast<const int8_t *>(input2.ptr());
1026 const auto output_ptr = reinterpret_cast<int8_t *>(output.ptr());
1027
1028 int x = (*neon_func)(window_start_x, window_end_x, window_step_x, input1_ptr, input2_ptr, output_ptr, voffset1, voffset2,
1029 vscale1, vscale2, voffseto, invvscaleo);
1030 for(; x < window_end_x; ++x)
1031 {
1032 const float afs = dequantize_qasymm8_signed(*(input1_ptr + x), input1_qinfo);
1033 const float bfs = dequantize_qasymm8_signed(*(input2_ptr + x), input2_qinfo);
1034 *(output_ptr + x) = (*scalar_func)(afs, bfs, output_qinfo);
1035 }
1036 },
1037 input1, input2, output);
1038 }
1039 }
1040
1041 template <ComparisonOperation op, typename InputScalarType, typename InputVectorType>
elementwise_comp_op_8(const ITensor * in1,const ITensor * in2,ITensor * out,const Window & window)1042 void elementwise_comp_op_8(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
1043 {
1044 elementwise_op<InputScalarType, uint8_t, InputVectorType>(in1, in2, out, window,
1045 &elementwise_comp_op_scalar<op, InputScalarType>,
1046 &elementwise_comp_op_broadcast_8_loop<op, InputScalarType, InputVectorType>,
1047 &elementwise_comp_op_8_loop<op, InputScalarType, InputVectorType>);
1048 }
1049
1050 template <ComparisonOperation op, typename InputScalarType, typename InputVectorType>
elementwise_comp_op_16(const ITensor * in1,const ITensor * in2,ITensor * out,const Window & window)1051 void elementwise_comp_op_16(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
1052 {
1053 elementwise_op<InputScalarType, uint8_t, InputVectorType>(in1, in2, out, window,
1054 &elementwise_comp_op_scalar<op, InputScalarType>,
1055 &elementwise_comp_op_broadcast_16_loop<op, InputScalarType, InputVectorType>,
1056 &elementwise_comp_op_16_loop<op, InputScalarType, InputVectorType>);
1057 }
1058
1059 template <ComparisonOperation op, typename InputScalarType, typename InputVectorType>
elementwise_comp_op_32(const ITensor * in1,const ITensor * in2,ITensor * out,const Window & window)1060 void elementwise_comp_op_32(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
1061 {
1062 elementwise_op<InputScalarType, uint8_t, InputVectorType>(in1, in2, out, window,
1063 &elementwise_comp_op_scalar<op, InputScalarType>,
1064 &elementwise_comp_op_broadcast_32_loop<op, InputScalarType, InputVectorType>,
1065 &elementwise_comp_op_32_loop<op, InputScalarType, InputVectorType>);
1066 }
1067
1068 template <ArithmeticOperation op, typename VectorType>
elementwise_arithm_op(const ITensor * in1,const ITensor * in2,ITensor * out,const Window & window)1069 void elementwise_arithm_op(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
1070 {
1071 using scalar_type = typename VectorType::scalar_type;
1072
1073 elementwise_op<scalar_type, scalar_type, VectorType>(in1, in2, out, window,
1074 &elementwise_arithm_op_scalar<op, scalar_type>,
1075 &elementwise_arithm_op_broadcast_loop<op, scalar_type, VectorType>,
1076 &elementwise_arithm_op_loop<op, scalar_type, VectorType>);
1077 }
1078
1079 template <ArithmeticOperation op>
elementwise_arithm_op_quantized(const ITensor * in1,const ITensor * in2,ITensor * out,const Window & window)1080 void elementwise_arithm_op_quantized(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
1081 {
1082 elementwise_op_quantized(in1, in2, out, window, &elementwise_arithm_op_quantized_scalar<op>,
1083 &elementwise_arithm_op_quantized_broadcast_loop<op>,
1084 &elementwise_arithm_op_quantized_loop<op>);
1085 }
1086 template <ArithmeticOperation op>
elementwise_arithm_op_quantized_signed(const ITensor * in1,const ITensor * in2,ITensor * out,const Window & window)1087 void elementwise_arithm_op_quantized_signed(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
1088 {
1089 elementwise_op_quantized_signed(in1, in2, out, window, &elementwise_arithm_op_quantized_signed_scalar<op>,
1090 &elementwise_arithm_op_quantized_signed_broadcast_loop<op>,
1091 &elementwise_arithm_op_quantized_singed_loop<op>);
1092 }
1093
1094 template <ComparisonOperation op>
elementwise_comp_op_quantized(const ITensor * in1,const ITensor * in2,ITensor * out,const Window & window)1095 void elementwise_comp_op_quantized(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
1096 {
1097 elementwise_op_quantized(in1, in2, out, window, &elementwise_comp_op_quantized_scalar<op>,
1098 &elementwise_comp_op_quantized_broadcast_loop<op>,
1099 &elementwise_comp_op_quantized_loop<op>);
1100 }
1101
1102 template <ComparisonOperation op>
elementwise_comp_op_quantized_signed(const ITensor * in1,const ITensor * in2,ITensor * out,const Window & window)1103 void elementwise_comp_op_quantized_signed(const ITensor *in1, const ITensor *in2, ITensor *out, const Window &window)
1104 {
1105 elementwise_comp_quantized_signed(in1, in2, out, window, &elementwise_comp_op_quantized_scalar<op>,
1106 &elementwise_comp_op_quantized_signed_broadcast_loop<op>,
1107 &elementwise_comp_op_quantized_signed_loop<op>);
1108 }
1109
1110 std::function<void(const ITensor *, const ITensor *, ITensor *, const Window &)>
configure_func(const ITensorInfo * input1,const ITensorInfo * input2,ITensorInfo * output,std::map<std::string,NEElementwiseOperationKernel::ElementwiseFunction * > map_function)1111 configure_func(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output,
1112 std::map<std::string, NEElementwiseOperationKernel::ElementwiseFunction *> map_function)
1113 {
1114 std::string function_to_call("op_");
1115 function_to_call += string_from_data_type(input1->data_type()) + "_";
1116 function_to_call += string_from_data_type(input2->data_type()) + "_";
1117 function_to_call += string_from_data_type(output->data_type());
1118
1119 auto it = map_function.find(function_to_call);
1120
1121 if(it != map_function.end())
1122 {
1123 auto func = it->second;
1124 return [func](const ITensor * input1, const ITensor * input2, ITensor * output, const Window & window)
1125 {
1126 func(input1, input2, output, window);
1127 };
1128 }
1129 return nullptr;
1130 }
1131
1132 template <ArithmeticOperation op>
1133 std::function<void(const ITensor *, const ITensor *, ITensor *, const Window &)>
configure_arithm_func(const ITensorInfo * input1,const ITensorInfo * input2,ITensorInfo * output)1134 configure_arithm_func(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output)
1135 {
1136 static std::map<std::string, NEElementwiseOperationKernel::ElementwiseFunction *> map_function =
1137 {
1138 { "op_F32_F32_F32", &elementwise_arithm_op<op, typename wrapper::traits::neon_vector<float, 4>> },
1139 { "op_S16_S16_S16", &elementwise_arithm_op<op, typename wrapper::traits::neon_vector<int16_t, 8>> },
1140 { "op_S32_S32_S32", &elementwise_arithm_op<op, typename wrapper::traits::neon_vector<int32_t, 4>> },
1141 { "op_QASYMM8_QASYMM8_QASYMM8", &elementwise_arithm_op_quantized<op> },
1142 { "op_QASYMM8_SIGNED_QASYMM8_SIGNED_QASYMM8_SIGNED", &elementwise_arithm_op_quantized_signed<op> }
1143 };
1144 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
1145 map_function["op_F16_F16_F16"] = &elementwise_arithm_op<op, typename wrapper::traits::neon_vector<float16_t, 8>>;
1146 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
1147
1148 return configure_func(input1, input2, output, map_function);
1149 }
1150
1151 template <ComparisonOperation op>
1152 std::function<void(const ITensor *input1, const ITensor *input2, ITensor *output, const Window &window)>
configure_comp_func(const ITensorInfo * input1,const ITensorInfo * input2,ITensorInfo * output)1153 configure_comp_func(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output)
1154 {
1155 static std::map<std::string, NEElementwiseOperationKernel::ElementwiseFunction *> map_function =
1156 {
1157 { "op_U8_U8_U8", &elementwise_comp_op_8<op, uint8_t, uint8x16_t> },
1158 { "op_F32_F32_U8", &elementwise_comp_op_32<op, float, float32x4_t> },
1159 { "op_S16_S16_U8", &elementwise_comp_op_16<op, int16_t, int16x8_t> },
1160 { "op_S32_S32_U8", &elementwise_comp_op_32<op, int32_t, int32x4_t> },
1161 { "op_QASYMM8_SIGNED_QASYMM8_SIGNED_U8", &elementwise_comp_op_quantized_signed<op> },
1162 { "op_QASYMM8_QASYMM8_U8", &elementwise_comp_op_quantized<op> }
1163 };
1164 #ifdef __ARM_FEATURE_FP16_VECTOR_ARITHMETIC
1165 map_function["op_F16_F16_U8"] = &elementwise_comp_op_16<op, float16_t, float16x8_t>;
1166 #endif /* __ARM_FEATURE_FP16_VECTOR_ARITHMETIC */
1167
1168 return configure_func(input1, input2, output, map_function);
1169 }
1170 } // namespace
1171
NEElementwiseOperationKernel()1172 NEElementwiseOperationKernel::NEElementwiseOperationKernel()
1173 : _function(nullptr), _input1(nullptr), _input2(nullptr), _output(nullptr)
1174 {
1175 }
1176
validate_arguments_common(const ITensorInfo & input1,const ITensorInfo & input2,const ITensorInfo & output)1177 Status NEElementwiseOperationKernel::validate_arguments_common(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output)
1178 {
1179 ARM_COMPUTE_RETURN_ERROR_ON_CPU_F16_UNSUPPORTED(&input1);
1180 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&input1, &input2);
1181
1182 const TensorShape out_shape = TensorShape::broadcast_shape(input1.tensor_shape(), input2.tensor_shape());
1183
1184 ARM_COMPUTE_RETURN_ERROR_ON_MSG(out_shape.total_size() == 0, "Inputs are not broadcast compatible");
1185
1186 // Validate in case of configured output
1187 if(output.total_size() > 0)
1188 {
1189 ARM_COMPUTE_RETURN_ERROR_ON_MSG(detail::have_different_dimensions(out_shape, output.tensor_shape(), 0),
1190 "Wrong shape for output");
1191 }
1192
1193 return Status{};
1194 }
1195
configure_common(const ITensorInfo * input1,const ITensorInfo * input2,ITensorInfo * output)1196 void NEElementwiseOperationKernel::configure_common(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output)
1197 {
1198 ARM_COMPUTE_ERROR_ON_NULLPTR(input1, input2, output);
1199
1200 // Configure kernel window
1201 const std::pair<TensorShape, ValidRegion> broadcast_pair = ITensorInfo::broadcast_shape_and_valid_region(*input1, *input2);
1202 const TensorShape &out_shape = broadcast_pair.first;
1203 const ValidRegion &valid_region = broadcast_pair.second;
1204
1205 // Auto initialize output if not initialized
1206 auto_init_if_empty(*output, out_shape, 1, input1->data_type());
1207
1208 Window win = calculate_max_window(valid_region);
1209
1210 INEKernel::configure(win);
1211 }
1212
run_op(ITensorPack & tensors,const Window & window,const ThreadInfo & info)1213 void NEElementwiseOperationKernel::run_op(ITensorPack &tensors, const Window &window, const ThreadInfo &info)
1214 {
1215 ARM_COMPUTE_UNUSED(info, window);
1216 ARM_COMPUTE_ERROR_ON_UNCONFIGURED_KERNEL(this);
1217 ARM_COMPUTE_ERROR_ON_INVALID_SUBWINDOW(INEKernel::window(), window);
1218 ARM_COMPUTE_ERROR_ON(_function == nullptr);
1219 _function(tensors.get_const_tensor(TensorType::ACL_SRC_0),
1220 tensors.get_const_tensor(TensorType::ACL_SRC_1),
1221 tensors.get_tensor(TensorType::ACL_DST), window);
1222 }
1223
1224 /** Arithmetic operators (min, max, squared_diff) */
configure(ArithmeticOperation op,const ITensorInfo * input1,const ITensorInfo * input2,ITensorInfo * output)1225 void NEArithmeticOperationKernel::configure(ArithmeticOperation op, const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output)
1226 {
1227 ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(*input1, *input2, *output));
1228 configure_common(input1, input2, output);
1229 switch(op)
1230 {
1231 case ArithmeticOperation::MAX:
1232 _function = configure_arithm_func<ArithmeticOperation::MAX>(input1, input2, output);
1233 break;
1234 case ArithmeticOperation::MIN:
1235 _function = configure_arithm_func<ArithmeticOperation::MIN>(input1, input2, output);
1236 break;
1237 case ArithmeticOperation::SQUARED_DIFF:
1238 _function = configure_arithm_func<ArithmeticOperation::SQUARED_DIFF>(input1, input2, output);
1239 break;
1240 case ArithmeticOperation::PRELU:
1241 _function = configure_arithm_func<ArithmeticOperation::PRELU>(input1, input2, output);
1242 break;
1243 default:
1244 ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
1245 }
1246 }
1247
validate_arguments(const ITensorInfo & input1,const ITensorInfo & input2,const ITensorInfo & output)1248 Status NEArithmeticOperationKernel::validate_arguments(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output)
1249 {
1250 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input1, 1, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::S16, DataType::F16, DataType::S32, DataType::F32);
1251 // Validate in case of configured output
1252 if(output.total_size() > 0)
1253 {
1254 ARM_COMPUTE_RETURN_ERROR_ON_MISMATCHING_DATA_TYPES(&input1, &output);
1255 }
1256 return validate_arguments_common(input1, input2, output);
1257 }
1258
validate(ArithmeticOperation op,const ITensorInfo * input1,const ITensorInfo * input2,const ITensorInfo * output)1259 Status NEArithmeticOperationKernel::validate(ArithmeticOperation op, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
1260 {
1261 ARM_COMPUTE_UNUSED(op);
1262 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input1, input2, output);
1263 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(*input1, *input2, *output));
1264 return Status{};
1265 }
1266
1267 /** The division operator */
1268
configure(const ITensorInfo * input1,const ITensorInfo * input2,ITensorInfo * output)1269 void NEDivisionOperationKernel::configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output)
1270 {
1271 ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(*input1, *input2, *output));
1272 configure_common(input1, input2, output);
1273 _function = configure_arithm_func<ArithmeticOperation::DIV>(input1, input2, output);
1274 }
1275
validate_arguments(const ITensorInfo & input1,const ITensorInfo & input2,const ITensorInfo & output)1276 Status NEDivisionOperationKernel::validate_arguments(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output)
1277 {
1278 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input1, 1, DataType::S32, DataType::F16, DataType::F32);
1279 return NEArithmeticOperationKernel::validate_arguments(input1, input2, output);
1280 }
1281
validate(const ITensorInfo * input1,const ITensorInfo * input2,const ITensorInfo * output)1282 Status NEDivisionOperationKernel::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
1283 {
1284 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input1, input2, output);
1285 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(*input1, *input2, *output));
1286 return Status{};
1287 }
1288
1289 /** The power operator */
configure(const ITensorInfo * input1,const ITensorInfo * input2,ITensorInfo * output)1290 void NEPowerOperationKernel::configure(const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output)
1291 {
1292 ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(*input1, *input2, *output));
1293 configure_common(input1, input2, output);
1294 _function = configure_arithm_func<ArithmeticOperation::POWER>(input1, input2, output);
1295 }
1296
validate_arguments(const ITensorInfo & input1,const ITensorInfo & input2,const ITensorInfo & output)1297 Status NEPowerOperationKernel::validate_arguments(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output)
1298 {
1299 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input1, 1, DataType::F16, DataType::F32);
1300 return NEArithmeticOperationKernel::validate_arguments(input1, input2, output);
1301 }
1302
validate(const ITensorInfo * input1,const ITensorInfo * input2,const ITensorInfo * output)1303 Status NEPowerOperationKernel::validate(const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
1304 {
1305 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input1, input2, output);
1306 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(*input1, *input2, *output));
1307 return Status{};
1308 }
1309
1310 /** Comparison operators (equal, not equal, less than, greater than, less than or equal, greater than or equal) */
configure(ComparisonOperation op,const ITensorInfo * input1,const ITensorInfo * input2,ITensorInfo * output)1311 void NEComparisonOperationKernel::configure(ComparisonOperation op, const ITensorInfo *input1, const ITensorInfo *input2, ITensorInfo *output)
1312 {
1313 ARM_COMPUTE_ERROR_THROW_ON(validate_arguments(*input1, *input2, *output));
1314 configure_common(input1, input2, output);
1315 switch(op)
1316 {
1317 case ComparisonOperation::Equal:
1318 _function = configure_comp_func<ComparisonOperation::Equal>(input1, input2, output);
1319 break;
1320 case ComparisonOperation::NotEqual:
1321 _function = configure_comp_func<ComparisonOperation::NotEqual>(input1, input2, output);
1322 break;
1323 case ComparisonOperation::Greater:
1324 _function = configure_comp_func<ComparisonOperation::Greater>(input1, input2, output);
1325 break;
1326 case ComparisonOperation::GreaterEqual:
1327 _function = configure_comp_func<ComparisonOperation::GreaterEqual>(input1, input2, output);
1328 break;
1329 case ComparisonOperation::Less:
1330 _function = configure_comp_func<ComparisonOperation::Less>(input1, input2, output);
1331 break;
1332 case ComparisonOperation::LessEqual:
1333 _function = configure_comp_func<ComparisonOperation::LessEqual>(input1, input2, output);
1334 break;
1335 default:
1336 ARM_COMPUTE_ERROR("NOT_SUPPORTED!");
1337 }
1338 }
1339
validate_arguments(const ITensorInfo & input1,const ITensorInfo & input2,const ITensorInfo & output)1340 Status NEComparisonOperationKernel::validate_arguments(const ITensorInfo &input1, const ITensorInfo &input2, const ITensorInfo &output)
1341 {
1342 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&input1, 1, DataType::U8, DataType::QASYMM8, DataType::QASYMM8_SIGNED, DataType::S16, DataType::F16, DataType::S32, DataType::F32);
1343 // Validate in case of configured output
1344 if(output.total_size() > 0)
1345 {
1346 ARM_COMPUTE_RETURN_ERROR_ON_DATA_TYPE_CHANNEL_NOT_IN(&output, 1, DataType::U8);
1347 }
1348 return validate_arguments_common(input1, input2, output);
1349 }
1350
validate(ComparisonOperation op,const ITensorInfo * input1,const ITensorInfo * input2,const ITensorInfo * output)1351 Status NEComparisonOperationKernel::validate(ComparisonOperation op, const ITensorInfo *input1, const ITensorInfo *input2, const ITensorInfo *output)
1352 {
1353 ARM_COMPUTE_UNUSED(op);
1354 ARM_COMPUTE_RETURN_ERROR_ON_NULLPTR(input1, input2, output);
1355 ARM_COMPUTE_RETURN_ON_ERROR(validate_arguments(*input1, *input2, *output));
1356 return Status{};
1357 }
1358 } // namespace arm_compute
1359