• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (c) 2023 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <arm_neon.h>
12 #include <assert.h>
13 
14 #include "./vpx_config.h"
15 #include "./vpx_dsp_rtcd.h"
16 #include "vpx/vpx_integer.h"
17 #include "vpx_dsp/arm/mem_neon.h"
18 #include "vpx_dsp/arm/transpose_neon.h"
19 #include "vpx_dsp/arm/vpx_convolve8_neon.h"
20 #include "vpx_dsp/vpx_filter.h"
21 #include "vpx_ports/mem.h"
22 
23 DECLARE_ALIGNED(16, static const uint8_t, dot_prod_permute_tbl[48]) = {
24   0, 1, 2,  3,  1, 2,  3,  4,  2,  3,  4,  5,  3,  4,  5,  6,
25   4, 5, 6,  7,  5, 6,  7,  8,  6,  7,  8,  9,  7,  8,  9,  10,
26   8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14
27 };
28 
29 DECLARE_ALIGNED(16, static const uint8_t, dot_prod_tran_concat_tbl[32]) = {
30   0, 8,  16, 24, 1, 9,  17, 25, 2, 10, 18, 26, 3, 11, 19, 27,
31   4, 12, 20, 28, 5, 13, 21, 29, 6, 14, 22, 30, 7, 15, 23, 31
32 };
33 
34 DECLARE_ALIGNED(16, static const uint8_t, dot_prod_merge_block_tbl[48]) = {
35   /* Shift left and insert new last column in transposed 4x4 block. */
36   1, 2, 3, 16, 5, 6, 7, 20, 9, 10, 11, 24, 13, 14, 15, 28,
37   /* Shift left and insert two new columns in transposed 4x4 block. */
38   2, 3, 16, 17, 6, 7, 20, 21, 10, 11, 24, 25, 14, 15, 28, 29,
39   /* Shift left and insert three new columns in transposed 4x4 block. */
40   3, 16, 17, 18, 7, 20, 21, 22, 11, 24, 25, 26, 15, 28, 29, 30
41 };
42 
vpx_convolve8_2d_horiz_neon_i8mm(const uint8_t * src,ptrdiff_t src_stride,uint8_t * dst,ptrdiff_t dst_stride,const InterpKernel * filter,int x0_q4,int x_step_q4,int y0_q4,int y_step_q4,int w,int h)43 void vpx_convolve8_2d_horiz_neon_i8mm(const uint8_t *src, ptrdiff_t src_stride,
44                                       uint8_t *dst, ptrdiff_t dst_stride,
45                                       const InterpKernel *filter, int x0_q4,
46                                       int x_step_q4, int y0_q4, int y_step_q4,
47                                       int w, int h) {
48   const int8x8_t filters = vmovn_s16(vld1q_s16(filter[x0_q4]));
49   uint8x16_t s0, s1, s2, s3;
50 
51   assert((intptr_t)dst % 4 == 0);
52   assert(dst_stride % 4 == 0);
53   assert(x_step_q4 == 16);
54   assert(h % 4 == 3);
55 
56   (void)x_step_q4;
57   (void)y0_q4;
58   (void)y_step_q4;
59 
60   src -= 3;
61 
62   if (w == 4) {
63     const uint8x16x2_t perm_tbl = vld1q_u8_x2(dot_prod_permute_tbl);
64     int16x4_t d0, d1, d2, d3;
65     uint8x8_t d01, d23;
66 
67     do {
68       load_u8_16x4(src, src_stride, &s0, &s1, &s2, &s3);
69 
70       d0 = convolve8_4_usdot(s0, filters, perm_tbl);
71       d1 = convolve8_4_usdot(s1, filters, perm_tbl);
72       d2 = convolve8_4_usdot(s2, filters, perm_tbl);
73       d3 = convolve8_4_usdot(s3, filters, perm_tbl);
74       d01 = vqrshrun_n_s16(vcombine_s16(d0, d1), FILTER_BITS);
75       d23 = vqrshrun_n_s16(vcombine_s16(d2, d3), FILTER_BITS);
76 
77       store_u8(dst + 0 * dst_stride, dst_stride, d01);
78       store_u8(dst + 2 * dst_stride, dst_stride, d23);
79 
80       src += 4 * src_stride;
81       dst += 4 * dst_stride;
82       h -= 4;
83     } while (h > 3);
84 
85     /* Process final three rows (h % 4 == 3). See vpx_convolve_neon.c for
86      * further details on possible values of block height. */
87     load_u8_16x3(src, src_stride, &s0, &s1, &s2);
88 
89     d0 = convolve8_4_usdot(s0, filters, perm_tbl);
90     d1 = convolve8_4_usdot(s1, filters, perm_tbl);
91     d2 = convolve8_4_usdot(s2, filters, perm_tbl);
92     d01 = vqrshrun_n_s16(vcombine_s16(d0, d1), FILTER_BITS);
93     d23 = vqrshrun_n_s16(vcombine_s16(d2, vdup_n_s16(0)), FILTER_BITS);
94 
95     store_u8(dst + 0 * dst_stride, dst_stride, d01);
96     store_u8_4x1(dst + 2 * dst_stride, d23);
97   } else {
98     const uint8x16x3_t perm_tbl = vld1q_u8_x3(dot_prod_permute_tbl);
99     const uint8_t *s;
100     uint8_t *d;
101     int width;
102     uint8x8_t d0, d1, d2, d3;
103 
104     do {
105       width = w;
106       s = src;
107       d = dst;
108       do {
109         load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3);
110 
111         d0 = convolve8_8_usdot(s0, filters, perm_tbl);
112         d1 = convolve8_8_usdot(s1, filters, perm_tbl);
113         d2 = convolve8_8_usdot(s2, filters, perm_tbl);
114         d3 = convolve8_8_usdot(s3, filters, perm_tbl);
115 
116         store_u8_8x4(d, dst_stride, d0, d1, d2, d3);
117 
118         s += 8;
119         d += 8;
120         width -= 8;
121       } while (width > 0);
122       src += 4 * src_stride;
123       dst += 4 * dst_stride;
124       h -= 4;
125     } while (h > 3);
126 
127     /* Process final three rows (h % 4 == 3). See vpx_convolve_neon.c for
128      * further details on possible values of block height. */
129     width = w;
130     s = src;
131     d = dst;
132     do {
133       load_u8_16x3(s, src_stride, &s0, &s1, &s2);
134 
135       d0 = convolve8_8_usdot(s0, filters, perm_tbl);
136       d1 = convolve8_8_usdot(s1, filters, perm_tbl);
137       d2 = convolve8_8_usdot(s2, filters, perm_tbl);
138 
139       store_u8_8x3(d, dst_stride, d0, d1, d2);
140 
141       s += 8;
142       d += 8;
143       width -= 8;
144     } while (width > 0);
145   }
146 }
147 
vpx_convolve8_horiz_neon_i8mm(const uint8_t * src,ptrdiff_t src_stride,uint8_t * dst,ptrdiff_t dst_stride,const InterpKernel * filter,int x0_q4,int x_step_q4,int y0_q4,int y_step_q4,int w,int h)148 void vpx_convolve8_horiz_neon_i8mm(const uint8_t *src, ptrdiff_t src_stride,
149                                    uint8_t *dst, ptrdiff_t dst_stride,
150                                    const InterpKernel *filter, int x0_q4,
151                                    int x_step_q4, int y0_q4, int y_step_q4,
152                                    int w, int h) {
153   const int8x8_t filters = vmovn_s16(vld1q_s16(filter[x0_q4]));
154   uint8x16_t s0, s1, s2, s3;
155 
156   assert((intptr_t)dst % 4 == 0);
157   assert(dst_stride % 4 == 0);
158   assert(x_step_q4 == 16);
159 
160   (void)x_step_q4;
161   (void)y0_q4;
162   (void)y_step_q4;
163 
164   src -= 3;
165 
166   if (w == 4) {
167     const uint8x16x2_t perm_tbl = vld1q_u8_x2(dot_prod_permute_tbl);
168     do {
169       int16x4_t t0, t1, t2, t3;
170       uint8x8_t d01, d23;
171 
172       load_u8_16x4(src, src_stride, &s0, &s1, &s2, &s3);
173 
174       t0 = convolve8_4_usdot(s0, filters, perm_tbl);
175       t1 = convolve8_4_usdot(s1, filters, perm_tbl);
176       t2 = convolve8_4_usdot(s2, filters, perm_tbl);
177       t3 = convolve8_4_usdot(s3, filters, perm_tbl);
178       d01 = vqrshrun_n_s16(vcombine_s16(t0, t1), FILTER_BITS);
179       d23 = vqrshrun_n_s16(vcombine_s16(t2, t3), FILTER_BITS);
180 
181       store_u8(dst + 0 * dst_stride, dst_stride, d01);
182       store_u8(dst + 2 * dst_stride, dst_stride, d23);
183 
184       src += 4 * src_stride;
185       dst += 4 * dst_stride;
186       h -= 4;
187     } while (h != 0);
188   } else {
189     const uint8x16x3_t perm_tbl = vld1q_u8_x3(dot_prod_permute_tbl);
190     const uint8_t *s;
191     uint8_t *d;
192     int width;
193     uint8x8_t d0, d1, d2, d3;
194 
195     do {
196       width = w;
197       s = src;
198       d = dst;
199       do {
200         load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3);
201 
202         d0 = convolve8_8_usdot(s0, filters, perm_tbl);
203         d1 = convolve8_8_usdot(s1, filters, perm_tbl);
204         d2 = convolve8_8_usdot(s2, filters, perm_tbl);
205         d3 = convolve8_8_usdot(s3, filters, perm_tbl);
206 
207         store_u8_8x4(d, dst_stride, d0, d1, d2, d3);
208 
209         s += 8;
210         d += 8;
211         width -= 8;
212       } while (width != 0);
213       src += 4 * src_stride;
214       dst += 4 * dst_stride;
215       h -= 4;
216     } while (h != 0);
217   }
218 }
219 
vpx_convolve8_avg_horiz_neon_i8mm(const uint8_t * src,ptrdiff_t src_stride,uint8_t * dst,ptrdiff_t dst_stride,const InterpKernel * filter,int x0_q4,int x_step_q4,int y0_q4,int y_step_q4,int w,int h)220 void vpx_convolve8_avg_horiz_neon_i8mm(const uint8_t *src, ptrdiff_t src_stride,
221                                        uint8_t *dst, ptrdiff_t dst_stride,
222                                        const InterpKernel *filter, int x0_q4,
223                                        int x_step_q4, int y0_q4, int y_step_q4,
224                                        int w, int h) {
225   const int8x8_t filters = vmovn_s16(vld1q_s16(filter[x0_q4]));
226   uint8x16_t s0, s1, s2, s3;
227 
228   assert((intptr_t)dst % 4 == 0);
229   assert(dst_stride % 4 == 0);
230   assert(x_step_q4 == 16);
231 
232   (void)x_step_q4;
233   (void)y0_q4;
234   (void)y_step_q4;
235 
236   src -= 3;
237 
238   if (w == 4) {
239     const uint8x16x2_t perm_tbl = vld1q_u8_x2(dot_prod_permute_tbl);
240     do {
241       int16x4_t t0, t1, t2, t3;
242       uint8x8_t d01, d23, dd01, dd23;
243 
244       load_u8_16x4(src, src_stride, &s0, &s1, &s2, &s3);
245 
246       t0 = convolve8_4_usdot(s0, filters, perm_tbl);
247       t1 = convolve8_4_usdot(s1, filters, perm_tbl);
248       t2 = convolve8_4_usdot(s2, filters, perm_tbl);
249       t3 = convolve8_4_usdot(s3, filters, perm_tbl);
250       d01 = vqrshrun_n_s16(vcombine_s16(t0, t1), FILTER_BITS);
251       d23 = vqrshrun_n_s16(vcombine_s16(t2, t3), FILTER_BITS);
252 
253       dd01 = load_u8(dst + 0 * dst_stride, dst_stride);
254       dd23 = load_u8(dst + 2 * dst_stride, dst_stride);
255 
256       d01 = vrhadd_u8(d01, dd01);
257       d23 = vrhadd_u8(d23, dd23);
258 
259       store_u8(dst + 0 * dst_stride, dst_stride, d01);
260       store_u8(dst + 2 * dst_stride, dst_stride, d23);
261 
262       src += 4 * src_stride;
263       dst += 4 * dst_stride;
264       h -= 4;
265     } while (h != 0);
266   } else {
267     const uint8x16x3_t perm_tbl = vld1q_u8_x3(dot_prod_permute_tbl);
268     const uint8_t *s;
269     uint8_t *d;
270     int width;
271     uint8x8_t d0, d1, d2, d3, dd0, dd1, dd2, dd3;
272 
273     do {
274       width = w;
275       s = src;
276       d = dst;
277       do {
278         load_u8_16x4(s, src_stride, &s0, &s1, &s2, &s3);
279 
280         d0 = convolve8_8_usdot(s0, filters, perm_tbl);
281         d1 = convolve8_8_usdot(s1, filters, perm_tbl);
282         d2 = convolve8_8_usdot(s2, filters, perm_tbl);
283         d3 = convolve8_8_usdot(s3, filters, perm_tbl);
284 
285         load_u8_8x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3);
286 
287         d0 = vrhadd_u8(d0, dd0);
288         d1 = vrhadd_u8(d1, dd1);
289         d2 = vrhadd_u8(d2, dd2);
290         d3 = vrhadd_u8(d3, dd3);
291 
292         store_u8_8x4(d, dst_stride, d0, d1, d2, d3);
293 
294         s += 8;
295         d += 8;
296         width -= 8;
297       } while (width != 0);
298       src += 4 * src_stride;
299       dst += 4 * dst_stride;
300       h -= 4;
301     } while (h != 0);
302   }
303 }
304 
transpose_concat_4x4(uint8x8_t a0,uint8x8_t a1,uint8x8_t a2,uint8x8_t a3,uint8x16_t * b,const uint8x16_t permute_tbl)305 static INLINE void transpose_concat_4x4(uint8x8_t a0, uint8x8_t a1,
306                                         uint8x8_t a2, uint8x8_t a3,
307                                         uint8x16_t *b,
308                                         const uint8x16_t permute_tbl) {
309   /* Transpose 8-bit elements and concatenate result rows as follows:
310    * a0: 00, 01, 02, 03, XX, XX, XX, XX
311    * a1: 10, 11, 12, 13, XX, XX, XX, XX
312    * a2: 20, 21, 22, 23, XX, XX, XX, XX
313    * a3: 30, 31, 32, 33, XX, XX, XX, XX
314    *
315    * b: 00, 10, 20, 30, 01, 11, 21, 31, 02, 12, 22, 32, 03, 13, 23, 33
316    *
317    * The 'permute_tbl' is always 'dot_prod_tran_concat_tbl' above. Passing it
318    * as an argument is preferable to loading it directly from memory as this
319    * inline helper is called many times from the same parent function.
320    */
321 
322   uint8x16x2_t samples = { { vcombine_u8(a0, a1), vcombine_u8(a2, a3) } };
323   *b = vqtbl2q_u8(samples, permute_tbl);
324 }
325 
transpose_concat_8x4(uint8x8_t a0,uint8x8_t a1,uint8x8_t a2,uint8x8_t a3,uint8x16_t * b0,uint8x16_t * b1,const uint8x16x2_t permute_tbl)326 static INLINE void transpose_concat_8x4(uint8x8_t a0, uint8x8_t a1,
327                                         uint8x8_t a2, uint8x8_t a3,
328                                         uint8x16_t *b0, uint8x16_t *b1,
329                                         const uint8x16x2_t permute_tbl) {
330   /* Transpose 8-bit elements and concatenate result rows as follows:
331    * a0: 00, 01, 02, 03, 04, 05, 06, 07
332    * a1: 10, 11, 12, 13, 14, 15, 16, 17
333    * a2: 20, 21, 22, 23, 24, 25, 26, 27
334    * a3: 30, 31, 32, 33, 34, 35, 36, 37
335    *
336    * b0: 00, 10, 20, 30, 01, 11, 21, 31, 02, 12, 22, 32, 03, 13, 23, 33
337    * b1: 04, 14, 24, 34, 05, 15, 25, 35, 06, 16, 26, 36, 07, 17, 27, 37
338    *
339    * The 'permute_tbl' is always 'dot_prod_tran_concat_tbl' above. Passing it
340    * as an argument is preferable to loading it directly from memory as this
341    * inline helper is called many times from the same parent function.
342    */
343 
344   uint8x16x2_t samples = { { vcombine_u8(a0, a1), vcombine_u8(a2, a3) } };
345   *b0 = vqtbl2q_u8(samples, permute_tbl.val[0]);
346   *b1 = vqtbl2q_u8(samples, permute_tbl.val[1]);
347 }
348 
vpx_convolve8_vert_neon_i8mm(const uint8_t * src,ptrdiff_t src_stride,uint8_t * dst,ptrdiff_t dst_stride,const InterpKernel * filter,int x0_q4,int x_step_q4,int y0_q4,int y_step_q4,int w,int h)349 void vpx_convolve8_vert_neon_i8mm(const uint8_t *src, ptrdiff_t src_stride,
350                                   uint8_t *dst, ptrdiff_t dst_stride,
351                                   const InterpKernel *filter, int x0_q4,
352                                   int x_step_q4, int y0_q4, int y_step_q4,
353                                   int w, int h) {
354   const int8x8_t filters = vmovn_s16(vld1q_s16(filter[y0_q4]));
355   const uint8x16x3_t merge_block_tbl = vld1q_u8_x3(dot_prod_merge_block_tbl);
356   uint8x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10;
357   uint8x16x2_t samples_LUT;
358 
359   assert((intptr_t)dst % 4 == 0);
360   assert(dst_stride % 4 == 0);
361   assert(y_step_q4 == 16);
362 
363   (void)x0_q4;
364   (void)x_step_q4;
365   (void)y_step_q4;
366 
367   src -= 3 * src_stride;
368 
369   if (w == 4) {
370     const uint8x16_t tran_concat_tbl = vld1q_u8(dot_prod_tran_concat_tbl);
371     uint8x16_t s0123, s1234, s2345, s3456, s4567, s5678, s6789, s78910;
372     int16x4_t d0, d1, d2, d3;
373     uint8x8_t d01, d23;
374 
375     load_u8_8x7(src, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
376     src += 7 * src_stride;
377 
378     s7 = vdup_n_u8(0);
379     s8 = vdup_n_u8(0);
380     s9 = vdup_n_u8(0);
381 
382     /* This operation combines a conventional transpose and the sample permute
383      * (see horizontal case) required before computing the dot product.
384      */
385     transpose_concat_4x4(s0, s1, s2, s3, &s0123, tran_concat_tbl);
386     transpose_concat_4x4(s1, s2, s3, s4, &s1234, tran_concat_tbl);
387     transpose_concat_4x4(s2, s3, s4, s5, &s2345, tran_concat_tbl);
388     transpose_concat_4x4(s3, s4, s5, s6, &s3456, tran_concat_tbl);
389     transpose_concat_4x4(s4, s5, s6, s7, &s4567, tran_concat_tbl);
390     transpose_concat_4x4(s5, s6, s7, s8, &s5678, tran_concat_tbl);
391     transpose_concat_4x4(s6, s7, s8, s9, &s6789, tran_concat_tbl);
392 
393     do {
394       load_u8_8x4(src, src_stride, &s7, &s8, &s9, &s10);
395 
396       transpose_concat_4x4(s7, s8, s9, s10, &s78910, tran_concat_tbl);
397 
398       /* Merge new data into block from previous iteration. */
399       samples_LUT.val[0] = s3456;
400       samples_LUT.val[1] = s78910;
401       s4567 = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[0]);
402       s5678 = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[1]);
403       s6789 = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[2]);
404 
405       d0 = convolve8_4_usdot_partial(s0123, s4567, filters);
406       d1 = convolve8_4_usdot_partial(s1234, s5678, filters);
407       d2 = convolve8_4_usdot_partial(s2345, s6789, filters);
408       d3 = convolve8_4_usdot_partial(s3456, s78910, filters);
409       d01 = vqrshrun_n_s16(vcombine_s16(d0, d1), FILTER_BITS);
410       d23 = vqrshrun_n_s16(vcombine_s16(d2, d3), FILTER_BITS);
411 
412       store_u8(dst + 0 * dst_stride, dst_stride, d01);
413       store_u8(dst + 2 * dst_stride, dst_stride, d23);
414 
415       /* Prepare block for next iteration - re-using as much as possible. */
416       /* Shuffle everything up four rows. */
417       s0123 = s4567;
418       s1234 = s5678;
419       s2345 = s6789;
420       s3456 = s78910;
421 
422       src += 4 * src_stride;
423       dst += 4 * dst_stride;
424       h -= 4;
425     } while (h != 0);
426   } else {
427     const uint8x16x2_t tran_concat_tbl = vld1q_u8_x2(dot_prod_tran_concat_tbl);
428     uint8x16_t s0123_lo, s0123_hi, s1234_lo, s1234_hi, s2345_lo, s2345_hi,
429         s3456_lo, s3456_hi, s4567_lo, s4567_hi, s5678_lo, s5678_hi, s6789_lo,
430         s6789_hi, s78910_lo, s78910_hi;
431     uint8x8_t d0, d1, d2, d3;
432     const uint8_t *s;
433     uint8_t *d;
434     int height;
435 
436     do {
437       height = h;
438       s = src;
439       d = dst;
440 
441       load_u8_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
442       s += 7 * src_stride;
443 
444       s7 = vdup_n_u8(0);
445       s8 = vdup_n_u8(0);
446       s9 = vdup_n_u8(0);
447 
448       /* This operation combines a conventional transpose and the sample permute
449        * (see horizontal case) required before computing the dot product.
450        */
451       transpose_concat_8x4(s0, s1, s2, s3, &s0123_lo, &s0123_hi,
452                            tran_concat_tbl);
453       transpose_concat_8x4(s1, s2, s3, s4, &s1234_lo, &s1234_hi,
454                            tran_concat_tbl);
455       transpose_concat_8x4(s2, s3, s4, s5, &s2345_lo, &s2345_hi,
456                            tran_concat_tbl);
457       transpose_concat_8x4(s3, s4, s5, s6, &s3456_lo, &s3456_hi,
458                            tran_concat_tbl);
459       transpose_concat_8x4(s4, s5, s6, s7, &s4567_lo, &s4567_hi,
460                            tran_concat_tbl);
461       transpose_concat_8x4(s5, s6, s7, s8, &s5678_lo, &s5678_hi,
462                            tran_concat_tbl);
463       transpose_concat_8x4(s6, s7, s8, s9, &s6789_lo, &s6789_hi,
464                            tran_concat_tbl);
465 
466       do {
467         load_u8_8x4(s, src_stride, &s7, &s8, &s9, &s10);
468 
469         transpose_concat_8x4(s7, s8, s9, s10, &s78910_lo, &s78910_hi,
470                              tran_concat_tbl);
471 
472         /* Merge new data into block from previous iteration. */
473         samples_LUT.val[0] = s3456_lo;
474         samples_LUT.val[1] = s78910_lo;
475         s4567_lo = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[0]);
476         s5678_lo = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[1]);
477         s6789_lo = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[2]);
478 
479         samples_LUT.val[0] = s3456_hi;
480         samples_LUT.val[1] = s78910_hi;
481         s4567_hi = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[0]);
482         s5678_hi = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[1]);
483         s6789_hi = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[2]);
484 
485         d0 = convolve8_8_usdot_partial(s0123_lo, s4567_lo, s0123_hi, s4567_hi,
486                                        filters);
487         d1 = convolve8_8_usdot_partial(s1234_lo, s5678_lo, s1234_hi, s5678_hi,
488                                        filters);
489         d2 = convolve8_8_usdot_partial(s2345_lo, s6789_lo, s2345_hi, s6789_hi,
490                                        filters);
491         d3 = convolve8_8_usdot_partial(s3456_lo, s78910_lo, s3456_hi, s78910_hi,
492                                        filters);
493 
494         store_u8_8x4(d, dst_stride, d0, d1, d2, d3);
495 
496         /* Prepare block for next iteration - re-using as much as possible. */
497         /* Shuffle everything up four rows. */
498         s0123_lo = s4567_lo;
499         s0123_hi = s4567_hi;
500         s1234_lo = s5678_lo;
501         s1234_hi = s5678_hi;
502         s2345_lo = s6789_lo;
503         s2345_hi = s6789_hi;
504         s3456_lo = s78910_lo;
505         s3456_hi = s78910_hi;
506 
507         s += 4 * src_stride;
508         d += 4 * dst_stride;
509         height -= 4;
510       } while (height != 0);
511       src += 8;
512       dst += 8;
513       w -= 8;
514     } while (w != 0);
515   }
516 }
517 
vpx_convolve8_avg_vert_neon_i8mm(const uint8_t * src,ptrdiff_t src_stride,uint8_t * dst,ptrdiff_t dst_stride,const InterpKernel * filter,int x0_q4,int x_step_q4,int y0_q4,int y_step_q4,int w,int h)518 void vpx_convolve8_avg_vert_neon_i8mm(const uint8_t *src, ptrdiff_t src_stride,
519                                       uint8_t *dst, ptrdiff_t dst_stride,
520                                       const InterpKernel *filter, int x0_q4,
521                                       int x_step_q4, int y0_q4, int y_step_q4,
522                                       int w, int h) {
523   const int8x8_t filters = vmovn_s16(vld1q_s16(filter[y0_q4]));
524   const uint8x16x3_t merge_block_tbl = vld1q_u8_x3(dot_prod_merge_block_tbl);
525   uint8x8_t s0, s1, s2, s3, s4, s5, s6, s7, s8, s9, s10;
526   uint8x16x2_t samples_LUT;
527 
528   assert((intptr_t)dst % 4 == 0);
529   assert(dst_stride % 4 == 0);
530   assert(y_step_q4 == 16);
531 
532   (void)x0_q4;
533   (void)x_step_q4;
534   (void)y_step_q4;
535 
536   src -= 3 * src_stride;
537 
538   if (w == 4) {
539     const uint8x16_t tran_concat_tbl = vld1q_u8(dot_prod_tran_concat_tbl);
540     uint8x16_t s0123, s1234, s2345, s3456, s4567, s5678, s6789, s78910;
541     int16x4_t d0, d1, d2, d3;
542     uint8x8_t d01, d23, dd01, dd23;
543 
544     load_u8_8x7(src, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
545     src += 7 * src_stride;
546 
547     s7 = vdup_n_u8(0);
548     s8 = vdup_n_u8(0);
549     s9 = vdup_n_u8(0);
550 
551     /* This operation combines a conventional transpose and the sample permute
552      * (see horizontal case) required before computing the dot product.
553      */
554     transpose_concat_4x4(s0, s1, s2, s3, &s0123, tran_concat_tbl);
555     transpose_concat_4x4(s1, s2, s3, s4, &s1234, tran_concat_tbl);
556     transpose_concat_4x4(s2, s3, s4, s5, &s2345, tran_concat_tbl);
557     transpose_concat_4x4(s3, s4, s5, s6, &s3456, tran_concat_tbl);
558     transpose_concat_4x4(s4, s5, s6, s7, &s4567, tran_concat_tbl);
559     transpose_concat_4x4(s5, s6, s7, s8, &s5678, tran_concat_tbl);
560     transpose_concat_4x4(s6, s7, s8, s9, &s6789, tran_concat_tbl);
561 
562     do {
563       load_u8_8x4(src, src_stride, &s7, &s8, &s9, &s10);
564 
565       transpose_concat_4x4(s7, s8, s9, s10, &s78910, tran_concat_tbl);
566 
567       /* Merge new data into block from previous iteration. */
568       samples_LUT.val[0] = s3456;
569       samples_LUT.val[1] = s78910;
570       s4567 = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[0]);
571       s5678 = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[1]);
572       s6789 = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[2]);
573 
574       d0 = convolve8_4_usdot_partial(s0123, s4567, filters);
575       d1 = convolve8_4_usdot_partial(s1234, s5678, filters);
576       d2 = convolve8_4_usdot_partial(s2345, s6789, filters);
577       d3 = convolve8_4_usdot_partial(s3456, s78910, filters);
578       d01 = vqrshrun_n_s16(vcombine_s16(d0, d1), FILTER_BITS);
579       d23 = vqrshrun_n_s16(vcombine_s16(d2, d3), FILTER_BITS);
580 
581       dd01 = load_u8(dst + 0 * dst_stride, dst_stride);
582       dd23 = load_u8(dst + 2 * dst_stride, dst_stride);
583 
584       d01 = vrhadd_u8(d01, dd01);
585       d23 = vrhadd_u8(d23, dd23);
586 
587       store_u8(dst + 0 * dst_stride, dst_stride, d01);
588       store_u8(dst + 2 * dst_stride, dst_stride, d23);
589 
590       /* Prepare block for next iteration - re-using as much as possible. */
591       /* Shuffle everything up four rows. */
592       s0123 = s4567;
593       s1234 = s5678;
594       s2345 = s6789;
595       s3456 = s78910;
596 
597       src += 4 * src_stride;
598       dst += 4 * dst_stride;
599       h -= 4;
600     } while (h != 0);
601   } else {
602     const uint8x16x2_t tran_concat_tbl = vld1q_u8_x2(dot_prod_tran_concat_tbl);
603     uint8x16_t s0123_lo, s0123_hi, s1234_lo, s1234_hi, s2345_lo, s2345_hi,
604         s3456_lo, s3456_hi, s4567_lo, s4567_hi, s5678_lo, s5678_hi, s6789_lo,
605         s6789_hi, s78910_lo, s78910_hi;
606     uint8x8_t d0, d1, d2, d3, dd0, dd1, dd2, dd3;
607     const uint8_t *s;
608     uint8_t *d;
609     int height;
610 
611     do {
612       height = h;
613       s = src;
614       d = dst;
615 
616       load_u8_8x7(s, src_stride, &s0, &s1, &s2, &s3, &s4, &s5, &s6);
617       s += 7 * src_stride;
618 
619       s7 = vdup_n_u8(0);
620       s8 = vdup_n_u8(0);
621       s9 = vdup_n_u8(0);
622 
623       /* This operation combines a conventional transpose and the sample permute
624        * (see horizontal case) required before computing the dot product.
625        */
626       transpose_concat_8x4(s0, s1, s2, s3, &s0123_lo, &s0123_hi,
627                            tran_concat_tbl);
628       transpose_concat_8x4(s1, s2, s3, s4, &s1234_lo, &s1234_hi,
629                            tran_concat_tbl);
630       transpose_concat_8x4(s2, s3, s4, s5, &s2345_lo, &s2345_hi,
631                            tran_concat_tbl);
632       transpose_concat_8x4(s3, s4, s5, s6, &s3456_lo, &s3456_hi,
633                            tran_concat_tbl);
634       transpose_concat_8x4(s4, s5, s6, s7, &s4567_lo, &s4567_hi,
635                            tran_concat_tbl);
636       transpose_concat_8x4(s5, s6, s7, s8, &s5678_lo, &s5678_hi,
637                            tran_concat_tbl);
638       transpose_concat_8x4(s6, s7, s8, s9, &s6789_lo, &s6789_hi,
639                            tran_concat_tbl);
640 
641       do {
642         load_u8_8x4(s, src_stride, &s7, &s8, &s9, &s10);
643 
644         transpose_concat_8x4(s7, s8, s9, s10, &s78910_lo, &s78910_hi,
645                              tran_concat_tbl);
646 
647         /* Merge new data into block from previous iteration. */
648         samples_LUT.val[0] = s3456_lo;
649         samples_LUT.val[1] = s78910_lo;
650         s4567_lo = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[0]);
651         s5678_lo = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[1]);
652         s6789_lo = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[2]);
653 
654         samples_LUT.val[0] = s3456_hi;
655         samples_LUT.val[1] = s78910_hi;
656         s4567_hi = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[0]);
657         s5678_hi = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[1]);
658         s6789_hi = vqtbl2q_u8(samples_LUT, merge_block_tbl.val[2]);
659 
660         d0 = convolve8_8_usdot_partial(s0123_lo, s4567_lo, s0123_hi, s4567_hi,
661                                        filters);
662         d1 = convolve8_8_usdot_partial(s1234_lo, s5678_lo, s1234_hi, s5678_hi,
663                                        filters);
664         d2 = convolve8_8_usdot_partial(s2345_lo, s6789_lo, s2345_hi, s6789_hi,
665                                        filters);
666         d3 = convolve8_8_usdot_partial(s3456_lo, s78910_lo, s3456_hi, s78910_hi,
667                                        filters);
668 
669         load_u8_8x4(d, dst_stride, &dd0, &dd1, &dd2, &dd3);
670 
671         d0 = vrhadd_u8(d0, dd0);
672         d1 = vrhadd_u8(d1, dd1);
673         d2 = vrhadd_u8(d2, dd2);
674         d3 = vrhadd_u8(d3, dd3);
675 
676         store_u8_8x4(d, dst_stride, d0, d1, d2, d3);
677 
678         /* Prepare block for next iteration - re-using as much as possible. */
679         /* Shuffle everything up four rows. */
680         s0123_lo = s4567_lo;
681         s0123_hi = s4567_hi;
682         s1234_lo = s5678_lo;
683         s1234_hi = s5678_hi;
684         s2345_lo = s6789_lo;
685         s2345_hi = s6789_hi;
686         s3456_lo = s78910_lo;
687         s3456_hi = s78910_hi;
688 
689         s += 4 * src_stride;
690         d += 4 * dst_stride;
691         height -= 4;
692       } while (height != 0);
693       src += 8;
694       dst += 8;
695       w -= 8;
696     } while (w != 0);
697   }
698 }
699