1 /*
2 * Copyright (c) 2018, Alliance for Open Media. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #ifndef AOM_AV1_COMMON_ARM_CONVOLVE_NEON_H_
12 #define AOM_AV1_COMMON_ARM_CONVOLVE_NEON_H_
13
14 #include <arm_neon.h>
15
16 #include "config/aom_config.h"
17
18 #include "aom_dsp/arm/mem_neon.h"
19 #include "av1/common/convolve.h"
20 #include "av1/common/filter.h"
21
22 static INLINE int32x4_t
convolve12_4_2d_v(const int16x4_t s0,const int16x4_t s1,const int16x4_t s2,const int16x4_t s3,const int16x4_t s4,const int16x4_t s5,const int16x4_t s6,const int16x4_t s7,const int16x4_t s8,const int16x4_t s9,const int16x4_t s10,const int16x4_t s11,const int16x8_t y_filter_0_7,const int16x4_t y_filter_8_11)23 convolve12_4_2d_v(const int16x4_t s0, const int16x4_t s1, const int16x4_t s2,
24 const int16x4_t s3, const int16x4_t s4, const int16x4_t s5,
25 const int16x4_t s6, const int16x4_t s7, const int16x4_t s8,
26 const int16x4_t s9, const int16x4_t s10, const int16x4_t s11,
27 const int16x8_t y_filter_0_7, const int16x4_t y_filter_8_11) {
28 const int16x4_t y_filter_0_3 = vget_low_s16(y_filter_0_7);
29 const int16x4_t y_filter_4_7 = vget_high_s16(y_filter_0_7);
30
31 int32x4_t sum = vmull_lane_s16(s0, y_filter_0_3, 0);
32 sum = vmlal_lane_s16(sum, s1, y_filter_0_3, 1);
33 sum = vmlal_lane_s16(sum, s2, y_filter_0_3, 2);
34 sum = vmlal_lane_s16(sum, s3, y_filter_0_3, 3);
35 sum = vmlal_lane_s16(sum, s4, y_filter_4_7, 0);
36 sum = vmlal_lane_s16(sum, s5, y_filter_4_7, 1);
37 sum = vmlal_lane_s16(sum, s6, y_filter_4_7, 2);
38 sum = vmlal_lane_s16(sum, s7, y_filter_4_7, 3);
39 sum = vmlal_lane_s16(sum, s8, y_filter_8_11, 0);
40 sum = vmlal_lane_s16(sum, s9, y_filter_8_11, 1);
41 sum = vmlal_lane_s16(sum, s10, y_filter_8_11, 2);
42 sum = vmlal_lane_s16(sum, s11, y_filter_8_11, 3);
43
44 return sum;
45 }
46
47 static INLINE uint8x8_t
convolve12_8_2d_v(const int16x8_t s0,const int16x8_t s1,const int16x8_t s2,const int16x8_t s3,const int16x8_t s4,const int16x8_t s5,const int16x8_t s6,const int16x8_t s7,const int16x8_t s8,const int16x8_t s9,const int16x8_t s10,const int16x8_t s11,const int16x8_t y_filter_0_7,const int16x4_t y_filter_8_11,const int16x8_t sub_const)48 convolve12_8_2d_v(const int16x8_t s0, const int16x8_t s1, const int16x8_t s2,
49 const int16x8_t s3, const int16x8_t s4, const int16x8_t s5,
50 const int16x8_t s6, const int16x8_t s7, const int16x8_t s8,
51 const int16x8_t s9, const int16x8_t s10, const int16x8_t s11,
52 const int16x8_t y_filter_0_7, const int16x4_t y_filter_8_11,
53 const int16x8_t sub_const) {
54 const int16x4_t y_filter_0_3 = vget_low_s16(y_filter_0_7);
55 const int16x4_t y_filter_4_7 = vget_high_s16(y_filter_0_7);
56
57 int32x4_t sum0 = vmull_lane_s16(vget_low_s16(s0), y_filter_0_3, 0);
58 sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), y_filter_0_3, 1);
59 sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), y_filter_0_3, 2);
60 sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), y_filter_0_3, 3);
61 sum0 = vmlal_lane_s16(sum0, vget_low_s16(s4), y_filter_4_7, 0);
62 sum0 = vmlal_lane_s16(sum0, vget_low_s16(s5), y_filter_4_7, 1);
63 sum0 = vmlal_lane_s16(sum0, vget_low_s16(s6), y_filter_4_7, 2);
64 sum0 = vmlal_lane_s16(sum0, vget_low_s16(s7), y_filter_4_7, 3);
65 sum0 = vmlal_lane_s16(sum0, vget_low_s16(s8), y_filter_8_11, 0);
66 sum0 = vmlal_lane_s16(sum0, vget_low_s16(s9), y_filter_8_11, 1);
67 sum0 = vmlal_lane_s16(sum0, vget_low_s16(s10), y_filter_8_11, 2);
68 sum0 = vmlal_lane_s16(sum0, vget_low_s16(s11), y_filter_8_11, 3);
69
70 int32x4_t sum1 = vmull_lane_s16(vget_high_s16(s0), y_filter_0_3, 0);
71 sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), y_filter_0_3, 1);
72 sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), y_filter_0_3, 2);
73 sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), y_filter_0_3, 3);
74 sum1 = vmlal_lane_s16(sum1, vget_high_s16(s4), y_filter_4_7, 0);
75 sum1 = vmlal_lane_s16(sum1, vget_high_s16(s5), y_filter_4_7, 1);
76 sum1 = vmlal_lane_s16(sum1, vget_high_s16(s6), y_filter_4_7, 2);
77 sum1 = vmlal_lane_s16(sum1, vget_high_s16(s7), y_filter_4_7, 3);
78 sum1 = vmlal_lane_s16(sum1, vget_high_s16(s8), y_filter_8_11, 0);
79 sum1 = vmlal_lane_s16(sum1, vget_high_s16(s9), y_filter_8_11, 1);
80 sum1 = vmlal_lane_s16(sum1, vget_high_s16(s10), y_filter_8_11, 2);
81 sum1 = vmlal_lane_s16(sum1, vget_high_s16(s11), y_filter_8_11, 3);
82
83 int16x8_t res =
84 vcombine_s16(vqrshrn_n_s32(sum0, 2 * FILTER_BITS - ROUND0_BITS),
85 vqrshrn_n_s32(sum1, 2 * FILTER_BITS - ROUND0_BITS));
86 res = vsubq_s16(res, sub_const);
87
88 return vqmovun_s16(res);
89 }
90
convolve_2d_sr_vert_12tap_neon(int16_t * src_ptr,int src_stride,uint8_t * dst_ptr,int dst_stride,int w,int h,const int16x8_t y_filter_0_7,const int16x4_t y_filter_8_11)91 static INLINE void convolve_2d_sr_vert_12tap_neon(
92 int16_t *src_ptr, int src_stride, uint8_t *dst_ptr, int dst_stride, int w,
93 int h, const int16x8_t y_filter_0_7, const int16x4_t y_filter_8_11) {
94 const int bd = 8;
95 const int16x8_t sub_const = vdupq_n_s16(1 << (bd - 1));
96
97 if (w <= 4) {
98 int16x4_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10;
99 load_s16_4x11(src_ptr, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7,
100 &s8, &s9, &s10);
101 src_ptr += 11 * src_stride;
102
103 do {
104 int16x4_t s11, s12, s13, s14;
105 load_s16_4x4(src_ptr, src_stride, &s11, &s12, &s13, &s14);
106
107 int32x4_t d0 = convolve12_4_2d_v(s0, s1, s2, s3, s4, s5, s6, s7, s8, s9,
108 s10, s11, y_filter_0_7, y_filter_8_11);
109 int32x4_t d1 = convolve12_4_2d_v(s1, s2, s3, s4, s5, s6, s7, s8, s9, s10,
110 s11, s12, y_filter_0_7, y_filter_8_11);
111 int32x4_t d2 = convolve12_4_2d_v(s2, s3, s4, s5, s6, s7, s8, s9, s10, s11,
112 s12, s13, y_filter_0_7, y_filter_8_11);
113 int32x4_t d3 =
114 convolve12_4_2d_v(s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13, s14,
115 y_filter_0_7, y_filter_8_11);
116
117 int16x8_t dd01 =
118 vcombine_s16(vqrshrn_n_s32(d0, 2 * FILTER_BITS - ROUND0_BITS),
119 vqrshrn_n_s32(d1, 2 * FILTER_BITS - ROUND0_BITS));
120 int16x8_t dd23 =
121 vcombine_s16(vqrshrn_n_s32(d2, 2 * FILTER_BITS - ROUND0_BITS),
122 vqrshrn_n_s32(d3, 2 * FILTER_BITS - ROUND0_BITS));
123
124 dd01 = vsubq_s16(dd01, sub_const);
125 dd23 = vsubq_s16(dd23, sub_const);
126
127 uint8x8_t d01 = vqmovun_s16(dd01);
128 uint8x8_t d23 = vqmovun_s16(dd23);
129
130 store_u8x4_strided_x2(dst_ptr + 0 * dst_stride, dst_stride, d01);
131 store_u8x4_strided_x2(dst_ptr + 2 * dst_stride, dst_stride, d23);
132
133 s0 = s4;
134 s1 = s5;
135 s2 = s6;
136 s3 = s7;
137 s4 = s8;
138 s5 = s9;
139 s6 = s10;
140 s7 = s11;
141 s8 = s12;
142 s9 = s13;
143 s10 = s14;
144 src_ptr += 4 * src_stride;
145 dst_ptr += 4 * dst_stride;
146 h -= 4;
147 } while (h != 0);
148
149 } else {
150 do {
151 int height = h;
152 int16_t *s = src_ptr;
153 uint8_t *d = dst_ptr;
154
155 int16x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10;
156 load_s16_8x11(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7, &s8,
157 &s9, &s10);
158 s += 11 * src_stride;
159
160 do {
161 int16x8_t s11, s12, s13, s14;
162 load_s16_8x4(s, src_stride, &s11, &s12, &s13, &s14);
163
164 uint8x8_t d0 =
165 convolve12_8_2d_v(s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11,
166 y_filter_0_7, y_filter_8_11, sub_const);
167 uint8x8_t d1 =
168 convolve12_8_2d_v(s1, s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12,
169 y_filter_0_7, y_filter_8_11, sub_const);
170 uint8x8_t d2 =
171 convolve12_8_2d_v(s2, s3, s4, s5, s6, s7, s8, s9, s10, s11, s12,
172 s13, y_filter_0_7, y_filter_8_11, sub_const);
173 uint8x8_t d3 =
174 convolve12_8_2d_v(s3, s4, s5, s6, s7, s8, s9, s10, s11, s12, s13,
175 s14, y_filter_0_7, y_filter_8_11, sub_const);
176
177 store_u8_8x4(d, dst_stride, d0, d1, d2, d3);
178
179 s0 = s4;
180 s1 = s5;
181 s2 = s6;
182 s3 = s7;
183 s4 = s8;
184 s5 = s9;
185 s6 = s10;
186 s7 = s11;
187 s8 = s12;
188 s9 = s13;
189 s10 = s14;
190 s += 4 * src_stride;
191 d += 4 * dst_stride;
192 height -= 4;
193 } while (height != 0);
194 src_ptr += 8;
195 dst_ptr += 8;
196 w -= 8;
197 } while (w != 0);
198 }
199 }
200
convolve8_4_2d_v(const int16x4_t s0,const int16x4_t s1,const int16x4_t s2,const int16x4_t s3,const int16x4_t s4,const int16x4_t s5,const int16x4_t s6,const int16x4_t s7,const int16x8_t y_filter)201 static INLINE int16x4_t convolve8_4_2d_v(const int16x4_t s0, const int16x4_t s1,
202 const int16x4_t s2, const int16x4_t s3,
203 const int16x4_t s4, const int16x4_t s5,
204 const int16x4_t s6, const int16x4_t s7,
205 const int16x8_t y_filter) {
206 const int16x4_t y_filter_lo = vget_low_s16(y_filter);
207 const int16x4_t y_filter_hi = vget_high_s16(y_filter);
208
209 int32x4_t sum = vmull_lane_s16(s0, y_filter_lo, 0);
210 sum = vmlal_lane_s16(sum, s1, y_filter_lo, 1);
211 sum = vmlal_lane_s16(sum, s2, y_filter_lo, 2);
212 sum = vmlal_lane_s16(sum, s3, y_filter_lo, 3);
213 sum = vmlal_lane_s16(sum, s4, y_filter_hi, 0);
214 sum = vmlal_lane_s16(sum, s5, y_filter_hi, 1);
215 sum = vmlal_lane_s16(sum, s6, y_filter_hi, 2);
216 sum = vmlal_lane_s16(sum, s7, y_filter_hi, 3);
217
218 return vqrshrn_n_s32(sum, 2 * FILTER_BITS - ROUND0_BITS);
219 }
220
convolve8_8_2d_v(const int16x8_t s0,const int16x8_t s1,const int16x8_t s2,const int16x8_t s3,const int16x8_t s4,const int16x8_t s5,const int16x8_t s6,const int16x8_t s7,const int16x8_t y_filter,const int16x8_t sub_const)221 static INLINE uint8x8_t convolve8_8_2d_v(const int16x8_t s0, const int16x8_t s1,
222 const int16x8_t s2, const int16x8_t s3,
223 const int16x8_t s4, const int16x8_t s5,
224 const int16x8_t s6, const int16x8_t s7,
225 const int16x8_t y_filter,
226 const int16x8_t sub_const) {
227 const int16x4_t y_filter_lo = vget_low_s16(y_filter);
228 const int16x4_t y_filter_hi = vget_high_s16(y_filter);
229
230 int32x4_t sum0 = vmull_lane_s16(vget_low_s16(s0), y_filter_lo, 0);
231 sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), y_filter_lo, 1);
232 sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), y_filter_lo, 2);
233 sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), y_filter_lo, 3);
234 sum0 = vmlal_lane_s16(sum0, vget_low_s16(s4), y_filter_hi, 0);
235 sum0 = vmlal_lane_s16(sum0, vget_low_s16(s5), y_filter_hi, 1);
236 sum0 = vmlal_lane_s16(sum0, vget_low_s16(s6), y_filter_hi, 2);
237 sum0 = vmlal_lane_s16(sum0, vget_low_s16(s7), y_filter_hi, 3);
238
239 int32x4_t sum1 = vmull_lane_s16(vget_high_s16(s0), y_filter_lo, 0);
240 sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), y_filter_lo, 1);
241 sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), y_filter_lo, 2);
242 sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), y_filter_lo, 3);
243 sum1 = vmlal_lane_s16(sum1, vget_high_s16(s4), y_filter_hi, 0);
244 sum1 = vmlal_lane_s16(sum1, vget_high_s16(s5), y_filter_hi, 1);
245 sum1 = vmlal_lane_s16(sum1, vget_high_s16(s6), y_filter_hi, 2);
246 sum1 = vmlal_lane_s16(sum1, vget_high_s16(s7), y_filter_hi, 3);
247
248 int16x8_t res =
249 vcombine_s16(vqrshrn_n_s32(sum0, 2 * FILTER_BITS - ROUND0_BITS),
250 vqrshrn_n_s32(sum1, 2 * FILTER_BITS - ROUND0_BITS));
251 res = vsubq_s16(res, sub_const);
252
253 return vqmovun_s16(res);
254 }
255
convolve_2d_sr_vert_8tap_neon(int16_t * src_ptr,int src_stride,uint8_t * dst_ptr,int dst_stride,int w,int h,const int16x8_t y_filter)256 static INLINE void convolve_2d_sr_vert_8tap_neon(int16_t *src_ptr,
257 int src_stride,
258 uint8_t *dst_ptr,
259 int dst_stride, int w, int h,
260 const int16x8_t y_filter) {
261 const int bd = 8;
262 const int16x8_t sub_const = vdupq_n_s16(1 << (bd - 1));
263
264 if (w <= 4) {
265 int16x4_t s0, s1, s2, s3, s4, s5, s6;
266 load_s16_4x7(src_ptr, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
267 src_ptr += 7 * src_stride;
268
269 do {
270 #if AOM_ARCH_AARCH64
271 int16x4_t s7, s8, s9, s10;
272 load_s16_4x4(src_ptr, src_stride, &s7, &s8, &s9, &s10);
273
274 int16x4_t d0 = convolve8_4_2d_v(s0, s1, s2, s3, s4, s5, s6, s7, y_filter);
275 int16x4_t d1 = convolve8_4_2d_v(s1, s2, s3, s4, s5, s6, s7, s8, y_filter);
276 int16x4_t d2 = convolve8_4_2d_v(s2, s3, s4, s5, s6, s7, s8, s9, y_filter);
277 int16x4_t d3 =
278 convolve8_4_2d_v(s3, s4, s5, s6, s7, s8, s9, s10, y_filter);
279
280 uint8x8_t d01 = vqmovun_s16(vsubq_s16(vcombine_s16(d0, d1), sub_const));
281 uint8x8_t d23 = vqmovun_s16(vsubq_s16(vcombine_s16(d2, d3), sub_const));
282
283 store_u8x4_strided_x2(dst_ptr + 0 * dst_stride, dst_stride, d01);
284 store_u8x4_strided_x2(dst_ptr + 2 * dst_stride, dst_stride, d23);
285
286 s0 = s4;
287 s1 = s5;
288 s2 = s6;
289 s3 = s7;
290 s4 = s8;
291 s5 = s9;
292 s6 = s10;
293 src_ptr += 4 * src_stride;
294 dst_ptr += 4 * dst_stride;
295 h -= 4;
296 #else // !AOM_ARCH_AARCH64
297 int16x4_t s7 = vld1_s16(src_ptr);
298 int16x4_t d0 = convolve8_4_2d_v(s0, s1, s2, s3, s4, s5, s6, s7, y_filter);
299 uint8x8_t d01 =
300 vqmovun_s16(vsubq_s16(vcombine_s16(d0, vdup_n_s16(0)), sub_const));
301
302 store_u8_4x1(dst_ptr, d01);
303
304 s0 = s1;
305 s1 = s2;
306 s2 = s3;
307 s3 = s4;
308 s4 = s5;
309 s5 = s6;
310 s6 = s7;
311 src_ptr += src_stride;
312 dst_ptr += dst_stride;
313 h--;
314 #endif // AOM_ARCH_AARCH64
315 } while (h != 0);
316 } else {
317 // Width is a multiple of 8 and height is a multiple of 4.
318 do {
319 int height = h;
320 int16_t *s = src_ptr;
321 uint8_t *d = dst_ptr;
322
323 int16x8_t s0, s1, s2, s3, s4, s5, s6;
324 load_s16_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
325 s += 7 * src_stride;
326
327 do {
328 #if AOM_ARCH_AARCH64
329 int16x8_t s7, s8, s9, s10;
330 load_s16_8x4(s, src_stride, &s7, &s8, &s9, &s10);
331
332 uint8x8_t d0 = convolve8_8_2d_v(s0, s1, s2, s3, s4, s5, s6, s7,
333 y_filter, sub_const);
334 uint8x8_t d1 = convolve8_8_2d_v(s1, s2, s3, s4, s5, s6, s7, s8,
335 y_filter, sub_const);
336 uint8x8_t d2 = convolve8_8_2d_v(s2, s3, s4, s5, s6, s7, s8, s9,
337 y_filter, sub_const);
338 uint8x8_t d3 = convolve8_8_2d_v(s3, s4, s5, s6, s7, s8, s9, s10,
339 y_filter, sub_const);
340
341 store_u8_8x4(d, dst_stride, d0, d1, d2, d3);
342
343 s0 = s4;
344 s1 = s5;
345 s2 = s6;
346 s3 = s7;
347 s4 = s8;
348 s5 = s9;
349 s6 = s10;
350 s += 4 * src_stride;
351 d += 4 * dst_stride;
352 height -= 4;
353 #else // !AOM_ARCH_AARCH64
354 int16x8_t s7 = vld1q_s16(s);
355 uint8x8_t d0 = convolve8_8_2d_v(s0, s1, s2, s3, s4, s5, s6, s7,
356 y_filter, sub_const);
357 vst1_u8(d, d0);
358
359 s0 = s1;
360 s1 = s2;
361 s2 = s3;
362 s3 = s4;
363 s4 = s5;
364 s5 = s6;
365 s6 = s7;
366 s += src_stride;
367 d += dst_stride;
368 height--;
369 #endif // AOM_ARCH_AARCH64
370 } while (height != 0);
371 src_ptr += 8;
372 dst_ptr += 8;
373 w -= 8;
374 } while (w != 0);
375 }
376 }
377
convolve6_4_2d_v(const int16x4_t s0,const int16x4_t s1,const int16x4_t s2,const int16x4_t s3,const int16x4_t s4,const int16x4_t s5,const int16x8_t y_filter)378 static INLINE int16x4_t convolve6_4_2d_v(const int16x4_t s0, const int16x4_t s1,
379 const int16x4_t s2, const int16x4_t s3,
380 const int16x4_t s4, const int16x4_t s5,
381 const int16x8_t y_filter) {
382 const int16x4_t y_filter_lo = vget_low_s16(y_filter);
383 const int16x4_t y_filter_hi = vget_high_s16(y_filter);
384
385 int32x4_t sum = vmull_lane_s16(s0, y_filter_lo, 1);
386 sum = vmlal_lane_s16(sum, s1, y_filter_lo, 2);
387 sum = vmlal_lane_s16(sum, s2, y_filter_lo, 3);
388 sum = vmlal_lane_s16(sum, s3, y_filter_hi, 0);
389 sum = vmlal_lane_s16(sum, s4, y_filter_hi, 1);
390 sum = vmlal_lane_s16(sum, s5, y_filter_hi, 2);
391
392 return vqrshrn_n_s32(sum, 2 * FILTER_BITS - ROUND0_BITS);
393 }
394
convolve6_8_2d_v(const int16x8_t s0,const int16x8_t s1,const int16x8_t s2,const int16x8_t s3,const int16x8_t s4,const int16x8_t s5,const int16x8_t y_filter,const int16x8_t sub_const)395 static INLINE uint8x8_t convolve6_8_2d_v(const int16x8_t s0, const int16x8_t s1,
396 const int16x8_t s2, const int16x8_t s3,
397 const int16x8_t s4, const int16x8_t s5,
398 const int16x8_t y_filter,
399 const int16x8_t sub_const) {
400 const int16x4_t y_filter_lo = vget_low_s16(y_filter);
401 const int16x4_t y_filter_hi = vget_high_s16(y_filter);
402
403 int32x4_t sum0 = vmull_lane_s16(vget_low_s16(s0), y_filter_lo, 1);
404 sum0 = vmlal_lane_s16(sum0, vget_low_s16(s1), y_filter_lo, 2);
405 sum0 = vmlal_lane_s16(sum0, vget_low_s16(s2), y_filter_lo, 3);
406 sum0 = vmlal_lane_s16(sum0, vget_low_s16(s3), y_filter_hi, 0);
407 sum0 = vmlal_lane_s16(sum0, vget_low_s16(s4), y_filter_hi, 1);
408 sum0 = vmlal_lane_s16(sum0, vget_low_s16(s5), y_filter_hi, 2);
409
410 int32x4_t sum1 = vmull_lane_s16(vget_high_s16(s0), y_filter_lo, 1);
411 sum1 = vmlal_lane_s16(sum1, vget_high_s16(s1), y_filter_lo, 2);
412 sum1 = vmlal_lane_s16(sum1, vget_high_s16(s2), y_filter_lo, 3);
413 sum1 = vmlal_lane_s16(sum1, vget_high_s16(s3), y_filter_hi, 0);
414 sum1 = vmlal_lane_s16(sum1, vget_high_s16(s4), y_filter_hi, 1);
415 sum1 = vmlal_lane_s16(sum1, vget_high_s16(s5), y_filter_hi, 2);
416
417 int16x8_t res =
418 vcombine_s16(vqrshrn_n_s32(sum0, 2 * FILTER_BITS - ROUND0_BITS),
419 vqrshrn_n_s32(sum1, 2 * FILTER_BITS - ROUND0_BITS));
420 res = vsubq_s16(res, sub_const);
421
422 return vqmovun_s16(res);
423 }
424
convolve_2d_sr_vert_6tap_neon(int16_t * src_ptr,int src_stride,uint8_t * dst_ptr,int dst_stride,int w,int h,const int16x8_t y_filter)425 static INLINE void convolve_2d_sr_vert_6tap_neon(int16_t *src_ptr,
426 int src_stride,
427 uint8_t *dst_ptr,
428 int dst_stride, int w, int h,
429 const int16x8_t y_filter) {
430 const int bd = 8;
431 const int16x8_t sub_const = vdupq_n_s16(1 << (bd - 1));
432
433 if (w <= 4) {
434 int16x4_t s0, s1, s2, s3, s4;
435 load_s16_4x5(src_ptr, src_stride, &s0, &s1, &s2, &s3, &s4);
436 src_ptr += 5 * src_stride;
437
438 do {
439 #if AOM_ARCH_AARCH64
440 int16x4_t s5, s6, s7, s8;
441 load_s16_4x4(src_ptr, src_stride, &s5, &s6, &s7, &s8);
442
443 int16x4_t d0 = convolve6_4_2d_v(s0, s1, s2, s3, s4, s5, y_filter);
444 int16x4_t d1 = convolve6_4_2d_v(s1, s2, s3, s4, s5, s6, y_filter);
445 int16x4_t d2 = convolve6_4_2d_v(s2, s3, s4, s5, s6, s7, y_filter);
446 int16x4_t d3 = convolve6_4_2d_v(s3, s4, s5, s6, s7, s8, y_filter);
447
448 uint8x8_t d01 = vqmovun_s16(vsubq_s16(vcombine_s16(d0, d1), sub_const));
449 uint8x8_t d23 = vqmovun_s16(vsubq_s16(vcombine_s16(d2, d3), sub_const));
450
451 store_u8x4_strided_x2(dst_ptr + 0 * dst_stride, dst_stride, d01);
452 store_u8x4_strided_x2(dst_ptr + 2 * dst_stride, dst_stride, d23);
453
454 s0 = s4;
455 s1 = s5;
456 s2 = s6;
457 s3 = s7;
458 s4 = s8;
459 src_ptr += 4 * src_stride;
460 dst_ptr += 4 * dst_stride;
461 h -= 4;
462 #else // !AOM_ARCH_AARCH64
463 int16x4_t s5 = vld1_s16(src_ptr);
464 int16x4_t d0 = convolve6_4_2d_v(s0, s1, s2, s3, s4, s5, y_filter);
465 uint8x8_t d01 =
466 vqmovun_s16(vsubq_s16(vcombine_s16(d0, vdup_n_s16(0)), sub_const));
467
468 store_u8_4x1(dst_ptr, d01);
469
470 s0 = s1;
471 s1 = s2;
472 s2 = s3;
473 s3 = s4;
474 s4 = s5;
475 src_ptr += src_stride;
476 dst_ptr += dst_stride;
477 h--;
478 #endif // AOM_ARCH_AARCH64
479 } while (h != 0);
480 } else {
481 // Width is a multiple of 8 and height is a multiple of 4.
482 do {
483 int height = h;
484 int16_t *s = src_ptr;
485 uint8_t *d = dst_ptr;
486
487 int16x8_t s0, s1, s2, s3, s4;
488 load_s16_8x5(s, src_stride, &s0, &s1, &s2, &s3, &s4);
489 s += 5 * src_stride;
490
491 do {
492 #if AOM_ARCH_AARCH64
493 int16x8_t s5, s6, s7, s8;
494 load_s16_8x4(s, src_stride, &s5, &s6, &s7, &s8);
495
496 uint8x8_t d0 =
497 convolve6_8_2d_v(s0, s1, s2, s3, s4, s5, y_filter, sub_const);
498 uint8x8_t d1 =
499 convolve6_8_2d_v(s1, s2, s3, s4, s5, s6, y_filter, sub_const);
500 uint8x8_t d2 =
501 convolve6_8_2d_v(s2, s3, s4, s5, s6, s7, y_filter, sub_const);
502 uint8x8_t d3 =
503 convolve6_8_2d_v(s3, s4, s5, s6, s7, s8, y_filter, sub_const);
504
505 store_u8_8x4(d, dst_stride, d0, d1, d2, d3);
506
507 s0 = s4;
508 s1 = s5;
509 s2 = s6;
510 s3 = s7;
511 s4 = s8;
512 s += 4 * src_stride;
513 d += 4 * dst_stride;
514 height -= 4;
515 #else // !AOM_ARCH_AARCH64
516 int16x8_t s5 = vld1q_s16(s);
517 uint8x8_t d0 =
518 convolve6_8_2d_v(s0, s1, s2, s3, s4, s5, y_filter, sub_const);
519 vst1_u8(d, d0);
520
521 s0 = s1;
522 s1 = s2;
523 s2 = s3;
524 s3 = s4;
525 s4 = s5;
526 s += src_stride;
527 d += dst_stride;
528 height--;
529 #endif // AOM_ARCH_AARCH64
530 } while (height != 0);
531 src_ptr += 8;
532 dst_ptr += 8;
533 w -= 8;
534 } while (w != 0);
535 }
536 }
537
538 #endif // AOM_AV1_COMMON_ARM_CONVOLVE_NEON_H_
539