1 /*
2 * Copyright (c) 2019, Alliance for Open Media. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #ifndef AOM_AOM_DSP_ARM_SUM_NEON_H_
12 #define AOM_AOM_DSP_ARM_SUM_NEON_H_
13
14 #include "config/aom_dsp_rtcd.h"
15 #include "config/aom_config.h"
16
17 #include "aom/aom_integer.h"
18 #include "aom_ports/mem.h"
19
horizontal_add_u8x8(const uint8x8_t a)20 static INLINE int horizontal_add_u8x8(const uint8x8_t a) {
21 #if AOM_ARCH_AARCH64
22 return vaddlv_u8(a);
23 #else
24 uint16x4_t b = vpaddl_u8(a);
25 uint32x2_t c = vpaddl_u16(b);
26 return vget_lane_u32(c, 0) + vget_lane_u32(c, 1);
27 #endif
28 }
29
horizontal_add_s16x8(const int16x8_t a)30 static INLINE int horizontal_add_s16x8(const int16x8_t a) {
31 #if AOM_ARCH_AARCH64
32 return vaddlvq_s16(a);
33 #else
34 const int32x4_t b = vpaddlq_s16(a);
35 const int64x2_t c = vpaddlq_s32(b);
36 const int32x2_t d = vadd_s32(vreinterpret_s32_s64(vget_low_s64(c)),
37 vreinterpret_s32_s64(vget_high_s64(c)));
38 return vget_lane_s32(d, 0);
39 #endif
40 }
41
horizontal_add_s32x4(const int32x4_t a)42 static INLINE int horizontal_add_s32x4(const int32x4_t a) {
43 #if AOM_ARCH_AARCH64
44 return vaddvq_s32(a);
45 #else
46 const int64x2_t b = vpaddlq_s32(a);
47 const int32x2_t c = vadd_s32(vreinterpret_s32_s64(vget_low_s64(b)),
48 vreinterpret_s32_s64(vget_high_s64(b)));
49 return vget_lane_s32(c, 0);
50 #endif
51 }
52
horizontal_add_s64x2(const int64x2_t a)53 static INLINE int64_t horizontal_add_s64x2(const int64x2_t a) {
54 #if AOM_ARCH_AARCH64
55 return vaddvq_s64(a);
56 #else
57 return vgetq_lane_s64(a, 0) + vgetq_lane_s64(a, 1);
58 #endif
59 }
60
horizontal_add_u64x2(const uint64x2_t a)61 static INLINE uint64_t horizontal_add_u64x2(const uint64x2_t a) {
62 #if AOM_ARCH_AARCH64
63 return vaddvq_u64(a);
64 #else
65 return vgetq_lane_u64(a, 0) + vgetq_lane_u64(a, 1);
66 #endif
67 }
68
horizontal_long_add_u32x4(const uint32x4_t a)69 static INLINE uint64_t horizontal_long_add_u32x4(const uint32x4_t a) {
70 #if AOM_ARCH_AARCH64
71 return vaddlvq_u32(a);
72 #else
73 const uint64x2_t b = vpaddlq_u32(a);
74 return vgetq_lane_u64(b, 0) + vgetq_lane_u64(b, 1);
75 #endif
76 }
77
horizontal_long_add_s32x4(const int32x4_t a)78 static INLINE int64_t horizontal_long_add_s32x4(const int32x4_t a) {
79 #if AOM_ARCH_AARCH64
80 return vaddlvq_s32(a);
81 #else
82 const int64x2_t b = vpaddlq_s32(a);
83 return vgetq_lane_s64(b, 0) + vgetq_lane_s64(b, 1);
84 #endif
85 }
86
horizontal_add_u32x4(const uint32x4_t a)87 static INLINE uint32_t horizontal_add_u32x4(const uint32x4_t a) {
88 #if AOM_ARCH_AARCH64
89 return vaddvq_u32(a);
90 #else
91 const uint64x2_t b = vpaddlq_u32(a);
92 const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)),
93 vreinterpret_u32_u64(vget_high_u64(b)));
94 return vget_lane_u32(c, 0);
95 #endif
96 }
97
horizontal_add_4d_u32x4(const uint32x4_t sum[4])98 static INLINE uint32x4_t horizontal_add_4d_u32x4(const uint32x4_t sum[4]) {
99 #if AOM_ARCH_AARCH64
100 uint32x4_t res01 = vpaddq_u32(sum[0], sum[1]);
101 uint32x4_t res23 = vpaddq_u32(sum[2], sum[3]);
102 return vpaddq_u32(res01, res23);
103 #else
104 uint32x4_t res = vdupq_n_u32(0);
105 res = vsetq_lane_u32(horizontal_add_u32x4(sum[0]), res, 0);
106 res = vsetq_lane_u32(horizontal_add_u32x4(sum[1]), res, 1);
107 res = vsetq_lane_u32(horizontal_add_u32x4(sum[2]), res, 2);
108 res = vsetq_lane_u32(horizontal_add_u32x4(sum[3]), res, 3);
109 return res;
110 #endif
111 }
112
horizontal_add_4d_s32x4(const int32x4_t sum[4])113 static INLINE int32x4_t horizontal_add_4d_s32x4(const int32x4_t sum[4]) {
114 #if AOM_ARCH_AARCH64
115 int32x4_t res01 = vpaddq_s32(sum[0], sum[1]);
116 int32x4_t res23 = vpaddq_s32(sum[2], sum[3]);
117 return vpaddq_s32(res01, res23);
118 #else
119 int32x4_t res = vdupq_n_s32(0);
120 res = vsetq_lane_s32(horizontal_add_s32x4(sum[0]), res, 0);
121 res = vsetq_lane_s32(horizontal_add_s32x4(sum[1]), res, 1);
122 res = vsetq_lane_s32(horizontal_add_s32x4(sum[2]), res, 2);
123 res = vsetq_lane_s32(horizontal_add_s32x4(sum[3]), res, 3);
124 return res;
125 #endif
126 }
127
horizontal_long_add_u16x8(const uint16x8_t vec_lo,const uint16x8_t vec_hi)128 static INLINE uint32_t horizontal_long_add_u16x8(const uint16x8_t vec_lo,
129 const uint16x8_t vec_hi) {
130 #if AOM_ARCH_AARCH64
131 return vaddlvq_u16(vec_lo) + vaddlvq_u16(vec_hi);
132 #else
133 const uint32x4_t vec_l_lo =
134 vaddl_u16(vget_low_u16(vec_lo), vget_high_u16(vec_lo));
135 const uint32x4_t vec_l_hi =
136 vaddl_u16(vget_low_u16(vec_hi), vget_high_u16(vec_hi));
137 const uint32x4_t a = vaddq_u32(vec_l_lo, vec_l_hi);
138 const uint64x2_t b = vpaddlq_u32(a);
139 const uint32x2_t c = vadd_u32(vreinterpret_u32_u64(vget_low_u64(b)),
140 vreinterpret_u32_u64(vget_high_u64(b)));
141 return vget_lane_u32(c, 0);
142 #endif
143 }
144
horizontal_long_add_4d_u16x8(const uint16x8_t sum_lo[4],const uint16x8_t sum_hi[4])145 static INLINE uint32x4_t horizontal_long_add_4d_u16x8(
146 const uint16x8_t sum_lo[4], const uint16x8_t sum_hi[4]) {
147 const uint32x4_t a0 = vpaddlq_u16(sum_lo[0]);
148 const uint32x4_t a1 = vpaddlq_u16(sum_lo[1]);
149 const uint32x4_t a2 = vpaddlq_u16(sum_lo[2]);
150 const uint32x4_t a3 = vpaddlq_u16(sum_lo[3]);
151 const uint32x4_t b0 = vpadalq_u16(a0, sum_hi[0]);
152 const uint32x4_t b1 = vpadalq_u16(a1, sum_hi[1]);
153 const uint32x4_t b2 = vpadalq_u16(a2, sum_hi[2]);
154 const uint32x4_t b3 = vpadalq_u16(a3, sum_hi[3]);
155 #if AOM_ARCH_AARCH64
156 const uint32x4_t c0 = vpaddq_u32(b0, b1);
157 const uint32x4_t c1 = vpaddq_u32(b2, b3);
158 return vpaddq_u32(c0, c1);
159 #else
160 const uint32x2_t c0 = vadd_u32(vget_low_u32(b0), vget_high_u32(b0));
161 const uint32x2_t c1 = vadd_u32(vget_low_u32(b1), vget_high_u32(b1));
162 const uint32x2_t c2 = vadd_u32(vget_low_u32(b2), vget_high_u32(b2));
163 const uint32x2_t c3 = vadd_u32(vget_low_u32(b3), vget_high_u32(b3));
164 const uint32x2_t d0 = vpadd_u32(c0, c1);
165 const uint32x2_t d1 = vpadd_u32(c2, c3);
166 return vcombine_u32(d0, d1);
167 #endif
168 }
169
horizontal_add_u16x8(const uint16x8_t a)170 static INLINE uint32_t horizontal_add_u16x8(const uint16x8_t a) {
171 #if AOM_ARCH_AARCH64
172 return vaddlvq_u16(a);
173 #else
174 const uint32x4_t b = vpaddlq_u16(a);
175 const uint64x2_t c = vpaddlq_u32(b);
176 const uint32x2_t d = vadd_u32(vreinterpret_u32_u64(vget_low_u64(c)),
177 vreinterpret_u32_u64(vget_high_u64(c)));
178 return vget_lane_u32(d, 0);
179 #endif
180 }
181
horizontal_add_4d_u16x8(const uint16x8_t sum[4])182 static INLINE uint32x4_t horizontal_add_4d_u16x8(const uint16x8_t sum[4]) {
183 #if AOM_ARCH_AARCH64
184 const uint16x8_t a0 = vpaddq_u16(sum[0], sum[1]);
185 const uint16x8_t a1 = vpaddq_u16(sum[2], sum[3]);
186 const uint16x8_t b0 = vpaddq_u16(a0, a1);
187 return vpaddlq_u16(b0);
188 #else
189 const uint16x4_t a0 = vadd_u16(vget_low_u16(sum[0]), vget_high_u16(sum[0]));
190 const uint16x4_t a1 = vadd_u16(vget_low_u16(sum[1]), vget_high_u16(sum[1]));
191 const uint16x4_t a2 = vadd_u16(vget_low_u16(sum[2]), vget_high_u16(sum[2]));
192 const uint16x4_t a3 = vadd_u16(vget_low_u16(sum[3]), vget_high_u16(sum[3]));
193 const uint16x4_t b0 = vpadd_u16(a0, a1);
194 const uint16x4_t b1 = vpadd_u16(a2, a3);
195 return vpaddlq_u16(vcombine_u16(b0, b1));
196 #endif
197 }
198
horizontal_add_4d_s16x8(const int16x8_t sum[4])199 static INLINE int32x4_t horizontal_add_4d_s16x8(const int16x8_t sum[4]) {
200 #if AOM_ARCH_AARCH64
201 const int16x8_t a0 = vpaddq_s16(sum[0], sum[1]);
202 const int16x8_t a1 = vpaddq_s16(sum[2], sum[3]);
203 const int16x8_t b0 = vpaddq_s16(a0, a1);
204 return vpaddlq_s16(b0);
205 #else
206 const int16x4_t a0 = vadd_s16(vget_low_s16(sum[0]), vget_high_s16(sum[0]));
207 const int16x4_t a1 = vadd_s16(vget_low_s16(sum[1]), vget_high_s16(sum[1]));
208 const int16x4_t a2 = vadd_s16(vget_low_s16(sum[2]), vget_high_s16(sum[2]));
209 const int16x4_t a3 = vadd_s16(vget_low_s16(sum[3]), vget_high_s16(sum[3]));
210 const int16x4_t b0 = vpadd_s16(a0, a1);
211 const int16x4_t b1 = vpadd_s16(a2, a3);
212 return vpaddlq_s16(vcombine_s16(b0, b1));
213 #endif
214 }
215
horizontal_add_u32x2(const uint32x2_t a)216 static INLINE uint32_t horizontal_add_u32x2(const uint32x2_t a) {
217 #if AOM_ARCH_AARCH64
218 return vaddv_u32(a);
219 #else
220 const uint64x1_t b = vpaddl_u32(a);
221 return vget_lane_u32(vreinterpret_u32_u64(b), 0);
222 #endif
223 }
224
horizontal_long_add_u32x2(const uint32x2_t a)225 static INLINE uint64_t horizontal_long_add_u32x2(const uint32x2_t a) {
226 #if AOM_ARCH_AARCH64
227 return vaddlv_u32(a);
228 #else
229 const uint64x1_t b = vpaddl_u32(a);
230 return vget_lane_u64(b, 0);
231 #endif
232 }
233
horizontal_add_u16x4(const uint16x4_t a)234 static INLINE uint32_t horizontal_add_u16x4(const uint16x4_t a) {
235 #if AOM_ARCH_AARCH64
236 return vaddlv_u16(a);
237 #else
238 const uint32x2_t b = vpaddl_u16(a);
239 const uint64x1_t c = vpaddl_u32(b);
240 return vget_lane_u32(vreinterpret_u32_u64(c), 0);
241 #endif
242 }
243
horizontal_add_2d_s32(int32x4_t a,int32x4_t b)244 static INLINE int32x4_t horizontal_add_2d_s32(int32x4_t a, int32x4_t b) {
245 #if AOM_ARCH_AARCH64
246 return vpaddq_s32(a, b);
247 #else
248 const int32x2_t a0 = vpadd_s32(vget_low_s32(a), vget_high_s32(a));
249 const int32x2_t b0 = vpadd_s32(vget_low_s32(b), vget_high_s32(b));
250 return vcombine_s32(a0, b0);
251 #endif
252 }
253
add_pairwise_s32x4(int32x4_t a)254 static INLINE int32x2_t add_pairwise_s32x4(int32x4_t a) {
255 #if AOM_ARCH_AARCH64
256 return vget_low_s32(vpaddq_s32(a, a));
257 #else
258 return vpadd_s32(vget_low_s32(a), vget_high_s32(a));
259 #endif
260 }
261
horizontal_long_add_u32x4_x2(const uint32x4_t a[2])262 static INLINE uint64_t horizontal_long_add_u32x4_x2(const uint32x4_t a[2]) {
263 return horizontal_long_add_u32x4(a[0]) + horizontal_long_add_u32x4(a[1]);
264 }
265
horizontal_long_add_u32x4_x4(const uint32x4_t a[4])266 static INLINE uint64_t horizontal_long_add_u32x4_x4(const uint32x4_t a[4]) {
267 uint64x2_t sum = vpaddlq_u32(a[0]);
268 sum = vpadalq_u32(sum, a[1]);
269 sum = vpadalq_u32(sum, a[2]);
270 sum = vpadalq_u32(sum, a[3]);
271
272 return horizontal_add_u64x2(sum);
273 }
274
horizontal_long_add_u32x4_x8(const uint32x4_t a[8])275 static INLINE uint64_t horizontal_long_add_u32x4_x8(const uint32x4_t a[8]) {
276 uint64x2_t sum[2];
277 sum[0] = vpaddlq_u32(a[0]);
278 sum[1] = vpaddlq_u32(a[1]);
279 sum[0] = vpadalq_u32(sum[0], a[2]);
280 sum[1] = vpadalq_u32(sum[1], a[3]);
281 sum[0] = vpadalq_u32(sum[0], a[4]);
282 sum[1] = vpadalq_u32(sum[1], a[5]);
283 sum[0] = vpadalq_u32(sum[0], a[6]);
284 sum[1] = vpadalq_u32(sum[1], a[7]);
285
286 return horizontal_add_u64x2(vaddq_u64(sum[0], sum[1]));
287 }
288
horizontal_long_add_u32x4_x16(const uint32x4_t a[16])289 static INLINE uint64_t horizontal_long_add_u32x4_x16(const uint32x4_t a[16]) {
290 uint64x2_t sum[2];
291 sum[0] = vpaddlq_u32(a[0]);
292 sum[1] = vpaddlq_u32(a[1]);
293 sum[0] = vpadalq_u32(sum[0], a[2]);
294 sum[1] = vpadalq_u32(sum[1], a[3]);
295 sum[0] = vpadalq_u32(sum[0], a[4]);
296 sum[1] = vpadalq_u32(sum[1], a[5]);
297 sum[0] = vpadalq_u32(sum[0], a[6]);
298 sum[1] = vpadalq_u32(sum[1], a[7]);
299 sum[0] = vpadalq_u32(sum[0], a[8]);
300 sum[1] = vpadalq_u32(sum[1], a[9]);
301 sum[0] = vpadalq_u32(sum[0], a[10]);
302 sum[1] = vpadalq_u32(sum[1], a[11]);
303 sum[0] = vpadalq_u32(sum[0], a[12]);
304 sum[1] = vpadalq_u32(sum[1], a[13]);
305 sum[0] = vpadalq_u32(sum[0], a[14]);
306 sum[1] = vpadalq_u32(sum[1], a[15]);
307
308 return horizontal_add_u64x2(vaddq_u64(sum[0], sum[1]));
309 }
310
311 #endif // AOM_AOM_DSP_ARM_SUM_NEON_H_
312