• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2020-2021 Arm Limited.
3  *
4  * SPDX-License-Identifier: MIT
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a copy
7  * of this software and associated documentation files (the "Software"), to
8  * deal in the Software without restriction, including without limitation the
9  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10  * sell copies of the Software, and to permit persons to whom the Software is
11  * furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in all
14  * copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21  * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
22  * SOFTWARE.
23  */
24 #include "arm_compute/core/Helpers.h"
25 #include "arm_compute/core/ITensor.h"
26 #include "arm_compute/core/Types.h"
27 #include "arm_compute/core/utils/misc/Traits.h"
28 #include "src/core/NEON/wrapper/intrinsics/intrinsics.h"
29 #include "src/core/helpers/WindowHelpers.h"
30 
31 namespace arm_compute
32 {
33 namespace cpu
34 {
add_qsymm16_neon(const ITensor * src0,const ITensor * src1,ITensor * dst,const ConvertPolicy & policy,const Window & window)35 void add_qsymm16_neon(const ITensor *src0, const ITensor *src1, ITensor *dst, const ConvertPolicy &policy, const Window &window)
36 {
37     ARM_COMPUTE_UNUSED(policy);
38 
39     // Create input windows
40     Window input1_win = window.broadcast_if_dimension_le_one(src0->info()->tensor_shape());
41     Window input2_win = window.broadcast_if_dimension_le_one(src1->info()->tensor_shape());
42 
43     // Clear X Dimension on execution window as we handle manually
44     Window win = window;
45     win.set(Window::DimX, Window::Dimension(0, 1, 1));
46 
47     const int  window_step_x         = 8;
48     const auto window_start_x        = static_cast<int>(window.x().start());
49     const auto window_end_x          = static_cast<int>(window.x().end());
50     const bool is_broadcast_across_x = src0->info()->tensor_shape().x() != src1->info()->tensor_shape().x();
51 
52     const UniformQuantizationInfo iq1_info = src0->info()->quantization_info().uniform();
53     const UniformQuantizationInfo iq2_info = src1->info()->quantization_info().uniform();
54     const UniformQuantizationInfo oq_info  = dst->info()->quantization_info().uniform();
55 
56     const float32x4_t vscale1    = vdupq_n_f32(iq1_info.scale);
57     const float32x4_t vscale2    = vdupq_n_f32(iq2_info.scale);
58     const float32x4_t invvscaleo = vdupq_n_f32(1.f / oq_info.scale);
59 
60     if(is_broadcast_across_x)
61     {
62         const bool                    is_broadcast_input_2 = input2_win.x().step() == 0;
63         Window                        broadcast_win        = is_broadcast_input_2 ? input2_win : input1_win;
64         Window                        non_broadcast_win    = !is_broadcast_input_2 ? input2_win : input1_win;
65         const ITensor                *broadcast_tensor     = is_broadcast_input_2 ? src1 : src0;
66         const ITensor                *non_broadcast_tensor = !is_broadcast_input_2 ? src1 : src0;
67         const UniformQuantizationInfo broadcast_qinfo      = broadcast_tensor->info()->quantization_info().uniform();
68         const UniformQuantizationInfo non_broadcast_qinfo  = non_broadcast_tensor->info()->quantization_info().uniform();
69 
70         // Clear X Dimension on execution window as we handle manually
71         non_broadcast_win.set(Window::DimX, Window::Dimension(0, 1, 1));
72 
73         Iterator broadcast_input(broadcast_tensor, broadcast_win);
74         Iterator non_broadcast_input(non_broadcast_tensor, non_broadcast_win);
75         Iterator output(dst, win);
76 
77         execute_window_loop(win, [&](const Coordinates &)
78         {
79             const auto non_broadcast_input_ptr = reinterpret_cast<const int16_t *>(non_broadcast_input.ptr());
80             const auto output_ptr              = reinterpret_cast<int16_t *>(output.ptr());
81 
82             const int16_t   broadcast_value     = *reinterpret_cast<const int16_t *>(broadcast_input.ptr());
83             const int16x8_t broadcast_value_vec = vdupq_n_s16(broadcast_value);
84 
85             const auto  bf_0 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(broadcast_value_vec))), vscale2);
86             const auto  bf_1 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(broadcast_value_vec))), vscale2);
87             const float bfs  = static_cast<int32_t>(broadcast_value) * broadcast_qinfo.scale;
88 
89             // Compute S elements per iteration
90             int x = window_start_x;
91             for(; x <= (window_end_x - window_step_x); x += window_step_x)
92             {
93                 const int16x8_t a    = vld1q_s16(non_broadcast_input_ptr + x);
94                 const auto      af_0 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(a))), vscale1);
95                 const auto      af_1 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(a))), vscale1);
96 
97                 int32x4_t rf_0{};
98                 int32x4_t rf_1{};
99 #ifdef __aarch64__
100                 rf_0 = vcvtnq_s32_f32(vmulq_f32(vaddq_f32(af_0, bf_0), invvscaleo));
101                 rf_1 = vcvtnq_s32_f32(vmulq_f32(vaddq_f32(af_1, bf_1), invvscaleo));
102 #else  //__aarch64__
103                 rf_0 = vcvtq_s32_f32(vmulq_f32(vaddq_f32(af_0, bf_0), invvscaleo));
104                 rf_1 = vcvtq_s32_f32(vmulq_f32(vaddq_f32(af_1, bf_1), invvscaleo));
105 #endif //__aarch64__
106 
107                 const int16x8_t pa = vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1));
108                 vst1q_s16(output_ptr + x, pa);
109             }
110 
111             // Compute left-over elements
112             for(; x < window_end_x; ++x)
113             {
114                 const float afs   = static_cast<int32_t>(*(non_broadcast_input_ptr + x)) * non_broadcast_qinfo.scale;
115                 *(output_ptr + x) = quantize_qsymm16((afs + bfs), oq_info);
116             }
117         },
118         broadcast_input, non_broadcast_input, output);
119     }
120     else
121     {
122         // Clear X Dimension on execution window as we handle manually
123         input1_win.set(Window::DimX, Window::Dimension(0, 1, 1));
124         input2_win.set(Window::DimX, Window::Dimension(0, 1, 1));
125 
126         Iterator input1(src0, input1_win);
127         Iterator input2(src1, input2_win);
128         Iterator output(dst, win);
129 
130         execute_window_loop(win, [&](const Coordinates &)
131         {
132             const auto input1_ptr = reinterpret_cast<const int16_t *>(input1.ptr());
133             const auto input2_ptr = reinterpret_cast<const int16_t *>(input2.ptr());
134             const auto output_ptr = reinterpret_cast<int16_t *>(output.ptr());
135 
136             // Compute S elements per iteration
137             int x = window_start_x;
138             for(; x <= (window_end_x - window_step_x); x += window_step_x)
139             {
140                 const int16x8_t a = vld1q_s16(input1_ptr + x);
141                 const int16x8_t b = vld1q_s16(input2_ptr + x);
142 
143                 const auto af_0 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(a))), vscale1);
144                 const auto af_1 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(a))), vscale1);
145                 const auto bf_0 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_low_s16(b))), vscale2);
146                 const auto bf_1 = vmulq_f32(vcvtq_f32_s32(vmovl_s16(vget_high_s16(b))), vscale2);
147 
148                 int32x4_t rf_0{};
149                 int32x4_t rf_1{};
150 #ifdef __aarch64__
151                 rf_0 = vcvtnq_s32_f32(vmulq_f32(vaddq_f32(af_0, bf_0), invvscaleo));
152                 rf_1 = vcvtnq_s32_f32(vmulq_f32(vaddq_f32(af_1, bf_1), invvscaleo));
153 #else  //__aarch64__
154                 rf_0 = vcvtq_s32_f32(vmulq_f32(vaddq_f32(af_0, bf_0), invvscaleo));
155                 rf_1 = vcvtq_s32_f32(vmulq_f32(vaddq_f32(af_1, bf_1), invvscaleo));
156 #endif //__aarch64__
157 
158                 const int16x8_t pa = vcombine_s16(vqmovn_s32(rf_0), vqmovn_s32(rf_1));
159                 vst1q_s16(output_ptr + x, pa);
160             }
161 
162             // Compute left-over elements
163             for(; x < window_end_x; ++x)
164             {
165                 const float afs   = static_cast<int32_t>((*(input1_ptr + x))) * iq1_info.scale;
166                 const float bfs   = static_cast<int32_t>((*(input2_ptr + x))) * iq2_info.scale;
167                 *(output_ptr + x) = quantize_qsymm16((afs + bfs), dst->info()->quantization_info());
168             }
169         },
170         input1, input2, output);
171     }
172 }
173 } // namespace cpu
174 } // namespace arm_compute