• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2023, Alliance for Open Media. All rights reserved
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 
12 #include "warp_plane_neon.h"
13 
14 DECLARE_ALIGNED(16, static const uint8_t, usdot_permute_idx[48]) = {
15   0, 1, 2,  3,  1, 2,  3,  4,  2,  3,  4,  5,  3,  4,  5,  6,
16   4, 5, 6,  7,  5, 6,  7,  8,  6,  7,  8,  9,  7,  8,  9,  10,
17   8, 9, 10, 11, 9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14
18 };
19 
horizontal_filter_4x1_f4(const uint8x16_t in,int sx,int alpha)20 static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f4(const uint8x16_t in,
21                                                            int sx, int alpha) {
22   const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
23 
24   // Loading the 8 filter taps
25   int16x8_t f[4];
26   load_filters_4(f, sx, alpha);
27 
28   int8x16_t f01_u8 = vcombine_s8(vmovn_s16(f[0]), vmovn_s16(f[1]));
29   int8x16_t f23_u8 = vcombine_s8(vmovn_s16(f[2]), vmovn_s16(f[3]));
30 
31   uint8x8_t in0 = vget_low_u8(in);
32   uint8x8_t in1 = vget_low_u8(vextq_u8(in, in, 1));
33   uint8x8_t in2 = vget_low_u8(vextq_u8(in, in, 2));
34   uint8x8_t in3 = vget_low_u8(vextq_u8(in, in, 3));
35 
36   int32x4_t m01 = vusdotq_s32(vdupq_n_s32(0), vcombine_u8(in0, in1), f01_u8);
37   int32x4_t m23 = vusdotq_s32(vdupq_n_s32(0), vcombine_u8(in2, in3), f23_u8);
38 
39   int32x4_t tmp_res_low = vpaddq_s32(m01, m23);
40 
41   tmp_res_low = vaddq_s32(tmp_res_low, add_const);
42 
43   uint16x8_t res =
44       vcombine_u16(vqrshrun_n_s32(tmp_res_low, ROUND0_BITS), vdup_n_u16(0));
45   return vreinterpretq_s16_u16(res);
46 }
47 
horizontal_filter_8x1_f8(const uint8x16_t in,int sx,int alpha)48 static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f8(const uint8x16_t in,
49                                                            int sx, int alpha) {
50   const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
51 
52   // Loading the 8 filter taps
53   int16x8_t f[8];
54   load_filters_8(f, sx, alpha);
55 
56   int8x16_t f01_u8 = vcombine_s8(vmovn_s16(f[0]), vmovn_s16(f[1]));
57   int8x16_t f23_u8 = vcombine_s8(vmovn_s16(f[2]), vmovn_s16(f[3]));
58   int8x16_t f45_u8 = vcombine_s8(vmovn_s16(f[4]), vmovn_s16(f[5]));
59   int8x16_t f67_u8 = vcombine_s8(vmovn_s16(f[6]), vmovn_s16(f[7]));
60 
61   uint8x8_t in0 = vget_low_u8(in);
62   uint8x8_t in1 = vget_low_u8(vextq_u8(in, in, 1));
63   uint8x8_t in2 = vget_low_u8(vextq_u8(in, in, 2));
64   uint8x8_t in3 = vget_low_u8(vextq_u8(in, in, 3));
65   uint8x8_t in4 = vget_low_u8(vextq_u8(in, in, 4));
66   uint8x8_t in5 = vget_low_u8(vextq_u8(in, in, 5));
67   uint8x8_t in6 = vget_low_u8(vextq_u8(in, in, 6));
68   uint8x8_t in7 = vget_low_u8(vextq_u8(in, in, 7));
69 
70   int32x4_t m01 = vusdotq_s32(vdupq_n_s32(0), vcombine_u8(in0, in1), f01_u8);
71   int32x4_t m23 = vusdotq_s32(vdupq_n_s32(0), vcombine_u8(in2, in3), f23_u8);
72   int32x4_t m45 = vusdotq_s32(vdupq_n_s32(0), vcombine_u8(in4, in5), f45_u8);
73   int32x4_t m67 = vusdotq_s32(vdupq_n_s32(0), vcombine_u8(in6, in7), f67_u8);
74 
75   int32x4_t tmp_res_low = vpaddq_s32(m01, m23);
76   int32x4_t tmp_res_high = vpaddq_s32(m45, m67);
77 
78   tmp_res_low = vaddq_s32(tmp_res_low, add_const);
79   tmp_res_high = vaddq_s32(tmp_res_high, add_const);
80 
81   uint16x8_t res = vcombine_u16(vqrshrun_n_s32(tmp_res_low, ROUND0_BITS),
82                                 vqrshrun_n_s32(tmp_res_high, ROUND0_BITS));
83   return vreinterpretq_s16_u16(res);
84 }
85 
horizontal_filter_4x1_f1(const uint8x16_t in,int sx)86 static AOM_FORCE_INLINE int16x8_t horizontal_filter_4x1_f1(const uint8x16_t in,
87                                                            int sx) {
88   const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
89 
90   int16x8_t f_s16 =
91       vld1q_s16((int16_t *)(av1_warped_filter + (sx >> WARPEDDIFF_PREC_BITS)));
92 
93   int8x16_t f_s8 = vcombine_s8(vmovn_s16(f_s16), vmovn_s16(f_s16));
94 
95   uint8x16_t perm0 = vld1q_u8(&usdot_permute_idx[0]);
96   uint8x16_t perm1 = vld1q_u8(&usdot_permute_idx[16]);
97 
98   // Permute samples ready for dot product.
99   // { 0,  1,  2,  3,  1,  2,  3,  4,  2,  3,  4,  5,  3,  4,  5,  6 }
100   // { 4,  5,  6,  7,  5,  6,  7,  8,  6,  7,  8,  9,  7,  8,  9, 10 }
101   uint8x16_t in_0123 = vqtbl1q_u8(in, perm0);
102   uint8x16_t in_4567 = vqtbl1q_u8(in, perm1);
103 
104   int32x4_t m0123 = vusdotq_laneq_s32(vdupq_n_s32(0), in_0123, f_s8, 0);
105   m0123 = vusdotq_laneq_s32(m0123, in_4567, f_s8, 1);
106 
107   int32x4_t tmp_res_low = m0123;
108 
109   tmp_res_low = vaddq_s32(tmp_res_low, add_const);
110 
111   uint16x8_t res =
112       vcombine_u16(vqrshrun_n_s32(tmp_res_low, ROUND0_BITS), vdup_n_u16(0));
113   return vreinterpretq_s16_u16(res);
114 }
115 
horizontal_filter_8x1_f1(const uint8x16_t in,int sx)116 static AOM_FORCE_INLINE int16x8_t horizontal_filter_8x1_f1(const uint8x16_t in,
117                                                            int sx) {
118   const int32x4_t add_const = vdupq_n_s32(1 << (8 + FILTER_BITS - 1));
119 
120   int16x8_t f_s16 =
121       vld1q_s16((int16_t *)(av1_warped_filter + (sx >> WARPEDDIFF_PREC_BITS)));
122 
123   int8x16_t f_s8 = vcombine_s8(vmovn_s16(f_s16), vmovn_s16(f_s16));
124 
125   uint8x16_t perm0 = vld1q_u8(&usdot_permute_idx[0]);
126   uint8x16_t perm1 = vld1q_u8(&usdot_permute_idx[16]);
127   uint8x16_t perm2 = vld1q_u8(&usdot_permute_idx[32]);
128 
129   // Permute samples ready for dot product.
130   // { 0,  1,  2,  3,  1,  2,  3,  4,  2,  3,  4,  5,  3,  4,  5,  6 }
131   // { 4,  5,  6,  7,  5,  6,  7,  8,  6,  7,  8,  9,  7,  8,  9, 10 }
132   // { 8,  9, 10, 11,  9, 10, 11, 12, 10, 11, 12, 13, 11, 12, 13, 14 }
133   uint8x16_t in_0123 = vqtbl1q_u8(in, perm0);
134   uint8x16_t in_4567 = vqtbl1q_u8(in, perm1);
135   uint8x16_t in_89ab = vqtbl1q_u8(in, perm2);
136 
137   int32x4_t m0123 = vusdotq_laneq_s32(vdupq_n_s32(0), in_0123, f_s8, 0);
138   m0123 = vusdotq_laneq_s32(m0123, in_4567, f_s8, 1);
139 
140   int32x4_t m4567 = vusdotq_laneq_s32(vdupq_n_s32(0), in_4567, f_s8, 0);
141   m4567 = vusdotq_laneq_s32(m4567, in_89ab, f_s8, 1);
142 
143   int32x4_t tmp_res_low = m0123;
144   int32x4_t tmp_res_high = m4567;
145 
146   tmp_res_low = vaddq_s32(tmp_res_low, add_const);
147   tmp_res_high = vaddq_s32(tmp_res_high, add_const);
148 
149   uint16x8_t res = vcombine_u16(vqrshrun_n_s32(tmp_res_low, ROUND0_BITS),
150                                 vqrshrun_n_s32(tmp_res_high, ROUND0_BITS));
151   return vreinterpretq_s16_u16(res);
152 }
153 
vertical_filter_4x1_f1(const int16x8_t * src,int32x4_t * res,int sy)154 static AOM_FORCE_INLINE void vertical_filter_4x1_f1(const int16x8_t *src,
155                                                     int32x4_t *res, int sy) {
156   int16x4_t s0 = vget_low_s16(src[0]);
157   int16x4_t s1 = vget_low_s16(src[1]);
158   int16x4_t s2 = vget_low_s16(src[2]);
159   int16x4_t s3 = vget_low_s16(src[3]);
160   int16x4_t s4 = vget_low_s16(src[4]);
161   int16x4_t s5 = vget_low_s16(src[5]);
162   int16x4_t s6 = vget_low_s16(src[6]);
163   int16x4_t s7 = vget_low_s16(src[7]);
164 
165   int16x8_t f =
166       vld1q_s16((int16_t *)(av1_warped_filter + (sy >> WARPEDDIFF_PREC_BITS)));
167 
168   int32x4_t m0123 = vmull_lane_s16(s0, vget_low_s16(f), 0);
169   m0123 = vmlal_lane_s16(m0123, s1, vget_low_s16(f), 1);
170   m0123 = vmlal_lane_s16(m0123, s2, vget_low_s16(f), 2);
171   m0123 = vmlal_lane_s16(m0123, s3, vget_low_s16(f), 3);
172   m0123 = vmlal_lane_s16(m0123, s4, vget_high_s16(f), 0);
173   m0123 = vmlal_lane_s16(m0123, s5, vget_high_s16(f), 1);
174   m0123 = vmlal_lane_s16(m0123, s6, vget_high_s16(f), 2);
175   m0123 = vmlal_lane_s16(m0123, s7, vget_high_s16(f), 3);
176 
177   *res = m0123;
178 }
179 
vertical_filter_4x1_f4(const int16x8_t * src,int32x4_t * res,int sy,int gamma)180 static AOM_FORCE_INLINE void vertical_filter_4x1_f4(const int16x8_t *src,
181                                                     int32x4_t *res, int sy,
182                                                     int gamma) {
183   int16x8_t s0, s1, s2, s3;
184   transpose_elems_s16_4x8(
185       vget_low_s16(src[0]), vget_low_s16(src[1]), vget_low_s16(src[2]),
186       vget_low_s16(src[3]), vget_low_s16(src[4]), vget_low_s16(src[5]),
187       vget_low_s16(src[6]), vget_low_s16(src[7]), &s0, &s1, &s2, &s3);
188 
189   int16x8_t f[4];
190   load_filters_4(f, sy, gamma);
191 
192   int32x4_t m0 = vmull_s16(vget_low_s16(s0), vget_low_s16(f[0]));
193   m0 = vmlal_s16(m0, vget_high_s16(s0), vget_high_s16(f[0]));
194   int32x4_t m1 = vmull_s16(vget_low_s16(s1), vget_low_s16(f[1]));
195   m1 = vmlal_s16(m1, vget_high_s16(s1), vget_high_s16(f[1]));
196   int32x4_t m2 = vmull_s16(vget_low_s16(s2), vget_low_s16(f[2]));
197   m2 = vmlal_s16(m2, vget_high_s16(s2), vget_high_s16(f[2]));
198   int32x4_t m3 = vmull_s16(vget_low_s16(s3), vget_low_s16(f[3]));
199   m3 = vmlal_s16(m3, vget_high_s16(s3), vget_high_s16(f[3]));
200 
201   int32x4_t m0123_pairs[] = { m0, m1, m2, m3 };
202 
203   *res = horizontal_add_4d_s32x4(m0123_pairs);
204 }
205 
vertical_filter_8x1_f1(const int16x8_t * src,int32x4_t * res_low,int32x4_t * res_high,int sy)206 static AOM_FORCE_INLINE void vertical_filter_8x1_f1(const int16x8_t *src,
207                                                     int32x4_t *res_low,
208                                                     int32x4_t *res_high,
209                                                     int sy) {
210   int16x8_t s0 = src[0];
211   int16x8_t s1 = src[1];
212   int16x8_t s2 = src[2];
213   int16x8_t s3 = src[3];
214   int16x8_t s4 = src[4];
215   int16x8_t s5 = src[5];
216   int16x8_t s6 = src[6];
217   int16x8_t s7 = src[7];
218 
219   int16x8_t f =
220       vld1q_s16((int16_t *)(av1_warped_filter + (sy >> WARPEDDIFF_PREC_BITS)));
221 
222   int32x4_t m0123 = vmull_lane_s16(vget_low_s16(s0), vget_low_s16(f), 0);
223   m0123 = vmlal_lane_s16(m0123, vget_low_s16(s1), vget_low_s16(f), 1);
224   m0123 = vmlal_lane_s16(m0123, vget_low_s16(s2), vget_low_s16(f), 2);
225   m0123 = vmlal_lane_s16(m0123, vget_low_s16(s3), vget_low_s16(f), 3);
226   m0123 = vmlal_lane_s16(m0123, vget_low_s16(s4), vget_high_s16(f), 0);
227   m0123 = vmlal_lane_s16(m0123, vget_low_s16(s5), vget_high_s16(f), 1);
228   m0123 = vmlal_lane_s16(m0123, vget_low_s16(s6), vget_high_s16(f), 2);
229   m0123 = vmlal_lane_s16(m0123, vget_low_s16(s7), vget_high_s16(f), 3);
230 
231   int32x4_t m4567 = vmull_lane_s16(vget_high_s16(s0), vget_low_s16(f), 0);
232   m4567 = vmlal_lane_s16(m4567, vget_high_s16(s1), vget_low_s16(f), 1);
233   m4567 = vmlal_lane_s16(m4567, vget_high_s16(s2), vget_low_s16(f), 2);
234   m4567 = vmlal_lane_s16(m4567, vget_high_s16(s3), vget_low_s16(f), 3);
235   m4567 = vmlal_lane_s16(m4567, vget_high_s16(s4), vget_high_s16(f), 0);
236   m4567 = vmlal_lane_s16(m4567, vget_high_s16(s5), vget_high_s16(f), 1);
237   m4567 = vmlal_lane_s16(m4567, vget_high_s16(s6), vget_high_s16(f), 2);
238   m4567 = vmlal_lane_s16(m4567, vget_high_s16(s7), vget_high_s16(f), 3);
239 
240   *res_low = m0123;
241   *res_high = m4567;
242 }
243 
vertical_filter_8x1_f8(const int16x8_t * src,int32x4_t * res_low,int32x4_t * res_high,int sy,int gamma)244 static AOM_FORCE_INLINE void vertical_filter_8x1_f8(const int16x8_t *src,
245                                                     int32x4_t *res_low,
246                                                     int32x4_t *res_high, int sy,
247                                                     int gamma) {
248   int16x8_t s0 = src[0];
249   int16x8_t s1 = src[1];
250   int16x8_t s2 = src[2];
251   int16x8_t s3 = src[3];
252   int16x8_t s4 = src[4];
253   int16x8_t s5 = src[5];
254   int16x8_t s6 = src[6];
255   int16x8_t s7 = src[7];
256   transpose_elems_inplace_s16_8x8(&s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7);
257 
258   int16x8_t f[8];
259   load_filters_8(f, sy, gamma);
260 
261   int32x4_t m0 = vmull_s16(vget_low_s16(s0), vget_low_s16(f[0]));
262   m0 = vmlal_s16(m0, vget_high_s16(s0), vget_high_s16(f[0]));
263   int32x4_t m1 = vmull_s16(vget_low_s16(s1), vget_low_s16(f[1]));
264   m1 = vmlal_s16(m1, vget_high_s16(s1), vget_high_s16(f[1]));
265   int32x4_t m2 = vmull_s16(vget_low_s16(s2), vget_low_s16(f[2]));
266   m2 = vmlal_s16(m2, vget_high_s16(s2), vget_high_s16(f[2]));
267   int32x4_t m3 = vmull_s16(vget_low_s16(s3), vget_low_s16(f[3]));
268   m3 = vmlal_s16(m3, vget_high_s16(s3), vget_high_s16(f[3]));
269   int32x4_t m4 = vmull_s16(vget_low_s16(s4), vget_low_s16(f[4]));
270   m4 = vmlal_s16(m4, vget_high_s16(s4), vget_high_s16(f[4]));
271   int32x4_t m5 = vmull_s16(vget_low_s16(s5), vget_low_s16(f[5]));
272   m5 = vmlal_s16(m5, vget_high_s16(s5), vget_high_s16(f[5]));
273   int32x4_t m6 = vmull_s16(vget_low_s16(s6), vget_low_s16(f[6]));
274   m6 = vmlal_s16(m6, vget_high_s16(s6), vget_high_s16(f[6]));
275   int32x4_t m7 = vmull_s16(vget_low_s16(s7), vget_low_s16(f[7]));
276   m7 = vmlal_s16(m7, vget_high_s16(s7), vget_high_s16(f[7]));
277 
278   int32x4_t m0123_pairs[] = { m0, m1, m2, m3 };
279   int32x4_t m4567_pairs[] = { m4, m5, m6, m7 };
280 
281   *res_low = horizontal_add_4d_s32x4(m0123_pairs);
282   *res_high = horizontal_add_4d_s32x4(m4567_pairs);
283 }
284 
av1_warp_affine_neon_i8mm(const int32_t * mat,const uint8_t * ref,int width,int height,int stride,uint8_t * pred,int p_col,int p_row,int p_width,int p_height,int p_stride,int subsampling_x,int subsampling_y,ConvolveParams * conv_params,int16_t alpha,int16_t beta,int16_t gamma,int16_t delta)285 void av1_warp_affine_neon_i8mm(const int32_t *mat, const uint8_t *ref,
286                                int width, int height, int stride, uint8_t *pred,
287                                int p_col, int p_row, int p_width, int p_height,
288                                int p_stride, int subsampling_x,
289                                int subsampling_y, ConvolveParams *conv_params,
290                                int16_t alpha, int16_t beta, int16_t gamma,
291                                int16_t delta) {
292   av1_warp_affine_common(mat, ref, width, height, stride, pred, p_col, p_row,
293                          p_width, p_height, p_stride, subsampling_x,
294                          subsampling_y, conv_params, alpha, beta, gamma, delta);
295 }
296