• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (c) 2023, Alliance for Open Media. All rights reserved
3  *
4  * This source code is subject to the terms of the BSD 2 Clause License and
5  * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6  * was not distributed with this source code in the LICENSE file, you can
7  * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8  * Media Patent License 1.0 was not distributed with this source code in the
9  * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10  */
11 
12 #include <arm_neon.h>
13 #include <assert.h>
14 #include <stdbool.h>
15 
16 #include "aom_dsp/aom_dsp_common.h"
17 #include "aom_dsp/arm/mem_neon.h"
18 #include "aom_dsp/arm/sum_neon.h"
19 #include "aom_dsp/arm/transpose_neon.h"
20 #include "aom_ports/mem.h"
21 #include "av1/common/scale.h"
22 #include "av1/common/warped_motion.h"
23 #include "config/av1_rtcd.h"
24 #include "highbd_warp_plane_neon.h"
25 
26 static AOM_FORCE_INLINE int16x8_t
highbd_horizontal_filter_4x1_f4(uint16x8x2_t in,int bd,int sx,int alpha)27 highbd_horizontal_filter_4x1_f4(uint16x8x2_t in, int bd, int sx, int alpha) {
28   int16x8_t f[4];
29   load_filters_4(f, sx, alpha);
30 
31   int16x8_t rv0 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
32                             vreinterpretq_s16_u16(in.val[1]), 0);
33   int16x8_t rv1 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
34                             vreinterpretq_s16_u16(in.val[1]), 1);
35   int16x8_t rv2 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
36                             vreinterpretq_s16_u16(in.val[1]), 2);
37   int16x8_t rv3 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
38                             vreinterpretq_s16_u16(in.val[1]), 3);
39 
40   int32x4_t m0 = vmull_s16(vget_low_s16(f[0]), vget_low_s16(rv0));
41   m0 = vmlal_s16(m0, vget_high_s16(f[0]), vget_high_s16(rv0));
42   int32x4_t m1 = vmull_s16(vget_low_s16(f[1]), vget_low_s16(rv1));
43   m1 = vmlal_s16(m1, vget_high_s16(f[1]), vget_high_s16(rv1));
44   int32x4_t m2 = vmull_s16(vget_low_s16(f[2]), vget_low_s16(rv2));
45   m2 = vmlal_s16(m2, vget_high_s16(f[2]), vget_high_s16(rv2));
46   int32x4_t m3 = vmull_s16(vget_low_s16(f[3]), vget_low_s16(rv3));
47   m3 = vmlal_s16(m3, vget_high_s16(f[3]), vget_high_s16(rv3));
48 
49   int32x4_t m0123[] = { m0, m1, m2, m3 };
50 
51   const int round0 = (bd == 12) ? ROUND0_BITS + 2 : ROUND0_BITS;
52   const int offset_bits_horiz = bd + FILTER_BITS - 1;
53 
54   int32x4_t res = horizontal_add_4d_s32x4(m0123);
55   res = vaddq_s32(res, vdupq_n_s32(1 << offset_bits_horiz));
56   res = vrshlq_s32(res, vdupq_n_s32(-round0));
57   return vcombine_s16(vmovn_s32(res), vdup_n_s16(0));
58 }
59 
60 static AOM_FORCE_INLINE int16x8_t
highbd_horizontal_filter_8x1_f8(uint16x8x2_t in,int bd,int sx,int alpha)61 highbd_horizontal_filter_8x1_f8(uint16x8x2_t in, int bd, int sx, int alpha) {
62   int16x8_t f[8];
63   load_filters_8(f, sx, alpha);
64 
65   int16x8_t rv0 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
66                             vreinterpretq_s16_u16(in.val[1]), 0);
67   int16x8_t rv1 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
68                             vreinterpretq_s16_u16(in.val[1]), 1);
69   int16x8_t rv2 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
70                             vreinterpretq_s16_u16(in.val[1]), 2);
71   int16x8_t rv3 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
72                             vreinterpretq_s16_u16(in.val[1]), 3);
73   int16x8_t rv4 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
74                             vreinterpretq_s16_u16(in.val[1]), 4);
75   int16x8_t rv5 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
76                             vreinterpretq_s16_u16(in.val[1]), 5);
77   int16x8_t rv6 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
78                             vreinterpretq_s16_u16(in.val[1]), 6);
79   int16x8_t rv7 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
80                             vreinterpretq_s16_u16(in.val[1]), 7);
81 
82   int32x4_t m0 = vmull_s16(vget_low_s16(f[0]), vget_low_s16(rv0));
83   m0 = vmlal_s16(m0, vget_high_s16(f[0]), vget_high_s16(rv0));
84   int32x4_t m1 = vmull_s16(vget_low_s16(f[1]), vget_low_s16(rv1));
85   m1 = vmlal_s16(m1, vget_high_s16(f[1]), vget_high_s16(rv1));
86   int32x4_t m2 = vmull_s16(vget_low_s16(f[2]), vget_low_s16(rv2));
87   m2 = vmlal_s16(m2, vget_high_s16(f[2]), vget_high_s16(rv2));
88   int32x4_t m3 = vmull_s16(vget_low_s16(f[3]), vget_low_s16(rv3));
89   m3 = vmlal_s16(m3, vget_high_s16(f[3]), vget_high_s16(rv3));
90   int32x4_t m4 = vmull_s16(vget_low_s16(f[4]), vget_low_s16(rv4));
91   m4 = vmlal_s16(m4, vget_high_s16(f[4]), vget_high_s16(rv4));
92   int32x4_t m5 = vmull_s16(vget_low_s16(f[5]), vget_low_s16(rv5));
93   m5 = vmlal_s16(m5, vget_high_s16(f[5]), vget_high_s16(rv5));
94   int32x4_t m6 = vmull_s16(vget_low_s16(f[6]), vget_low_s16(rv6));
95   m6 = vmlal_s16(m6, vget_high_s16(f[6]), vget_high_s16(rv6));
96   int32x4_t m7 = vmull_s16(vget_low_s16(f[7]), vget_low_s16(rv7));
97   m7 = vmlal_s16(m7, vget_high_s16(f[7]), vget_high_s16(rv7));
98 
99   int32x4_t m0123[] = { m0, m1, m2, m3 };
100   int32x4_t m4567[] = { m4, m5, m6, m7 };
101 
102   const int round0 = (bd == 12) ? ROUND0_BITS + 2 : ROUND0_BITS;
103   const int offset_bits_horiz = bd + FILTER_BITS - 1;
104 
105   int32x4_t res0 = horizontal_add_4d_s32x4(m0123);
106   int32x4_t res1 = horizontal_add_4d_s32x4(m4567);
107   res0 = vaddq_s32(res0, vdupq_n_s32(1 << offset_bits_horiz));
108   res1 = vaddq_s32(res1, vdupq_n_s32(1 << offset_bits_horiz));
109   res0 = vrshlq_s32(res0, vdupq_n_s32(-round0));
110   res1 = vrshlq_s32(res1, vdupq_n_s32(-round0));
111   return vcombine_s16(vmovn_s32(res0), vmovn_s32(res1));
112 }
113 
114 static AOM_FORCE_INLINE int16x8_t
highbd_horizontal_filter_4x1_f1(uint16x8x2_t in,int bd,int sx)115 highbd_horizontal_filter_4x1_f1(uint16x8x2_t in, int bd, int sx) {
116   int16x8_t f = load_filters_1(sx);
117 
118   int16x8_t rv0 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
119                             vreinterpretq_s16_u16(in.val[1]), 0);
120   int16x8_t rv1 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
121                             vreinterpretq_s16_u16(in.val[1]), 1);
122   int16x8_t rv2 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
123                             vreinterpretq_s16_u16(in.val[1]), 2);
124   int16x8_t rv3 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
125                             vreinterpretq_s16_u16(in.val[1]), 3);
126 
127   int32x4_t m0 = vmull_s16(vget_low_s16(f), vget_low_s16(rv0));
128   m0 = vmlal_s16(m0, vget_high_s16(f), vget_high_s16(rv0));
129   int32x4_t m1 = vmull_s16(vget_low_s16(f), vget_low_s16(rv1));
130   m1 = vmlal_s16(m1, vget_high_s16(f), vget_high_s16(rv1));
131   int32x4_t m2 = vmull_s16(vget_low_s16(f), vget_low_s16(rv2));
132   m2 = vmlal_s16(m2, vget_high_s16(f), vget_high_s16(rv2));
133   int32x4_t m3 = vmull_s16(vget_low_s16(f), vget_low_s16(rv3));
134   m3 = vmlal_s16(m3, vget_high_s16(f), vget_high_s16(rv3));
135 
136   int32x4_t m0123[] = { m0, m1, m2, m3 };
137 
138   const int round0 = (bd == 12) ? ROUND0_BITS + 2 : ROUND0_BITS;
139   const int offset_bits_horiz = bd + FILTER_BITS - 1;
140 
141   int32x4_t res = horizontal_add_4d_s32x4(m0123);
142   res = vaddq_s32(res, vdupq_n_s32(1 << offset_bits_horiz));
143   res = vrshlq_s32(res, vdupq_n_s32(-round0));
144   return vcombine_s16(vmovn_s32(res), vdup_n_s16(0));
145 }
146 
147 static AOM_FORCE_INLINE int16x8_t
highbd_horizontal_filter_8x1_f1(uint16x8x2_t in,int bd,int sx)148 highbd_horizontal_filter_8x1_f1(uint16x8x2_t in, int bd, int sx) {
149   int16x8_t f = load_filters_1(sx);
150 
151   int16x8_t rv0 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
152                             vreinterpretq_s16_u16(in.val[1]), 0);
153   int16x8_t rv1 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
154                             vreinterpretq_s16_u16(in.val[1]), 1);
155   int16x8_t rv2 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
156                             vreinterpretq_s16_u16(in.val[1]), 2);
157   int16x8_t rv3 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
158                             vreinterpretq_s16_u16(in.val[1]), 3);
159   int16x8_t rv4 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
160                             vreinterpretq_s16_u16(in.val[1]), 4);
161   int16x8_t rv5 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
162                             vreinterpretq_s16_u16(in.val[1]), 5);
163   int16x8_t rv6 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
164                             vreinterpretq_s16_u16(in.val[1]), 6);
165   int16x8_t rv7 = vextq_s16(vreinterpretq_s16_u16(in.val[0]),
166                             vreinterpretq_s16_u16(in.val[1]), 7);
167 
168   int32x4_t m0 = vmull_s16(vget_low_s16(f), vget_low_s16(rv0));
169   m0 = vmlal_s16(m0, vget_high_s16(f), vget_high_s16(rv0));
170   int32x4_t m1 = vmull_s16(vget_low_s16(f), vget_low_s16(rv1));
171   m1 = vmlal_s16(m1, vget_high_s16(f), vget_high_s16(rv1));
172   int32x4_t m2 = vmull_s16(vget_low_s16(f), vget_low_s16(rv2));
173   m2 = vmlal_s16(m2, vget_high_s16(f), vget_high_s16(rv2));
174   int32x4_t m3 = vmull_s16(vget_low_s16(f), vget_low_s16(rv3));
175   m3 = vmlal_s16(m3, vget_high_s16(f), vget_high_s16(rv3));
176   int32x4_t m4 = vmull_s16(vget_low_s16(f), vget_low_s16(rv4));
177   m4 = vmlal_s16(m4, vget_high_s16(f), vget_high_s16(rv4));
178   int32x4_t m5 = vmull_s16(vget_low_s16(f), vget_low_s16(rv5));
179   m5 = vmlal_s16(m5, vget_high_s16(f), vget_high_s16(rv5));
180   int32x4_t m6 = vmull_s16(vget_low_s16(f), vget_low_s16(rv6));
181   m6 = vmlal_s16(m6, vget_high_s16(f), vget_high_s16(rv6));
182   int32x4_t m7 = vmull_s16(vget_low_s16(f), vget_low_s16(rv7));
183   m7 = vmlal_s16(m7, vget_high_s16(f), vget_high_s16(rv7));
184 
185   int32x4_t m0123[] = { m0, m1, m2, m3 };
186   int32x4_t m4567[] = { m4, m5, m6, m7 };
187 
188   const int round0 = (bd == 12) ? ROUND0_BITS + 2 : ROUND0_BITS;
189   const int offset_bits_horiz = bd + FILTER_BITS - 1;
190 
191   int32x4_t res0 = horizontal_add_4d_s32x4(m0123);
192   int32x4_t res1 = horizontal_add_4d_s32x4(m4567);
193   res0 = vaddq_s32(res0, vdupq_n_s32(1 << offset_bits_horiz));
194   res1 = vaddq_s32(res1, vdupq_n_s32(1 << offset_bits_horiz));
195   res0 = vrshlq_s32(res0, vdupq_n_s32(-round0));
196   res1 = vrshlq_s32(res1, vdupq_n_s32(-round0));
197   return vcombine_s16(vmovn_s32(res0), vmovn_s32(res1));
198 }
199 
vertical_filter_4x1_f1(const int16x8_t * tmp,int sy)200 static AOM_FORCE_INLINE int32x4_t vertical_filter_4x1_f1(const int16x8_t *tmp,
201                                                          int sy) {
202   const int16x8_t f = load_filters_1(sy);
203   const int16x4_t f0123 = vget_low_s16(f);
204   const int16x4_t f4567 = vget_high_s16(f);
205 
206   int32x4_t m0123 = vmull_lane_s16(vget_low_s16(tmp[0]), f0123, 0);
207   m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[1]), f0123, 1);
208   m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[2]), f0123, 2);
209   m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[3]), f0123, 3);
210   m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[4]), f4567, 0);
211   m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[5]), f4567, 1);
212   m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[6]), f4567, 2);
213   m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[7]), f4567, 3);
214   return m0123;
215 }
216 
vertical_filter_8x1_f1(const int16x8_t * tmp,int sy)217 static AOM_FORCE_INLINE int32x4x2_t vertical_filter_8x1_f1(const int16x8_t *tmp,
218                                                            int sy) {
219   const int16x8_t f = load_filters_1(sy);
220   const int16x4_t f0123 = vget_low_s16(f);
221   const int16x4_t f4567 = vget_high_s16(f);
222 
223   int32x4_t m0123 = vmull_lane_s16(vget_low_s16(tmp[0]), f0123, 0);
224   m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[1]), f0123, 1);
225   m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[2]), f0123, 2);
226   m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[3]), f0123, 3);
227   m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[4]), f4567, 0);
228   m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[5]), f4567, 1);
229   m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[6]), f4567, 2);
230   m0123 = vmlal_lane_s16(m0123, vget_low_s16(tmp[7]), f4567, 3);
231 
232   int32x4_t m4567 = vmull_lane_s16(vget_high_s16(tmp[0]), f0123, 0);
233   m4567 = vmlal_lane_s16(m4567, vget_high_s16(tmp[1]), f0123, 1);
234   m4567 = vmlal_lane_s16(m4567, vget_high_s16(tmp[2]), f0123, 2);
235   m4567 = vmlal_lane_s16(m4567, vget_high_s16(tmp[3]), f0123, 3);
236   m4567 = vmlal_lane_s16(m4567, vget_high_s16(tmp[4]), f4567, 0);
237   m4567 = vmlal_lane_s16(m4567, vget_high_s16(tmp[5]), f4567, 1);
238   m4567 = vmlal_lane_s16(m4567, vget_high_s16(tmp[6]), f4567, 2);
239   m4567 = vmlal_lane_s16(m4567, vget_high_s16(tmp[7]), f4567, 3);
240   return (int32x4x2_t){ { m0123, m4567 } };
241 }
242 
vertical_filter_4x1_f4(const int16x8_t * tmp,int sy,int gamma)243 static AOM_FORCE_INLINE int32x4_t vertical_filter_4x1_f4(const int16x8_t *tmp,
244                                                          int sy, int gamma) {
245   int16x8_t s0, s1, s2, s3;
246   transpose_elems_s16_4x8(
247       vget_low_s16(tmp[0]), vget_low_s16(tmp[1]), vget_low_s16(tmp[2]),
248       vget_low_s16(tmp[3]), vget_low_s16(tmp[4]), vget_low_s16(tmp[5]),
249       vget_low_s16(tmp[6]), vget_low_s16(tmp[7]), &s0, &s1, &s2, &s3);
250 
251   int16x8_t f[4];
252   load_filters_4(f, sy, gamma);
253 
254   int32x4_t m0 = vmull_s16(vget_low_s16(s0), vget_low_s16(f[0]));
255   m0 = vmlal_s16(m0, vget_high_s16(s0), vget_high_s16(f[0]));
256   int32x4_t m1 = vmull_s16(vget_low_s16(s1), vget_low_s16(f[1]));
257   m1 = vmlal_s16(m1, vget_high_s16(s1), vget_high_s16(f[1]));
258   int32x4_t m2 = vmull_s16(vget_low_s16(s2), vget_low_s16(f[2]));
259   m2 = vmlal_s16(m2, vget_high_s16(s2), vget_high_s16(f[2]));
260   int32x4_t m3 = vmull_s16(vget_low_s16(s3), vget_low_s16(f[3]));
261   m3 = vmlal_s16(m3, vget_high_s16(s3), vget_high_s16(f[3]));
262 
263   int32x4_t m0123[] = { m0, m1, m2, m3 };
264   return horizontal_add_4d_s32x4(m0123);
265 }
266 
vertical_filter_8x1_f8(const int16x8_t * tmp,int sy,int gamma)267 static AOM_FORCE_INLINE int32x4x2_t vertical_filter_8x1_f8(const int16x8_t *tmp,
268                                                            int sy, int gamma) {
269   int16x8_t s0 = tmp[0];
270   int16x8_t s1 = tmp[1];
271   int16x8_t s2 = tmp[2];
272   int16x8_t s3 = tmp[3];
273   int16x8_t s4 = tmp[4];
274   int16x8_t s5 = tmp[5];
275   int16x8_t s6 = tmp[6];
276   int16x8_t s7 = tmp[7];
277   transpose_elems_inplace_s16_8x8(&s0, &s1, &s2, &s3, &s4, &s5, &s6, &s7);
278 
279   int16x8_t f[8];
280   load_filters_8(f, sy, gamma);
281 
282   int32x4_t m0 = vmull_s16(vget_low_s16(s0), vget_low_s16(f[0]));
283   m0 = vmlal_s16(m0, vget_high_s16(s0), vget_high_s16(f[0]));
284   int32x4_t m1 = vmull_s16(vget_low_s16(s1), vget_low_s16(f[1]));
285   m1 = vmlal_s16(m1, vget_high_s16(s1), vget_high_s16(f[1]));
286   int32x4_t m2 = vmull_s16(vget_low_s16(s2), vget_low_s16(f[2]));
287   m2 = vmlal_s16(m2, vget_high_s16(s2), vget_high_s16(f[2]));
288   int32x4_t m3 = vmull_s16(vget_low_s16(s3), vget_low_s16(f[3]));
289   m3 = vmlal_s16(m3, vget_high_s16(s3), vget_high_s16(f[3]));
290   int32x4_t m4 = vmull_s16(vget_low_s16(s4), vget_low_s16(f[4]));
291   m4 = vmlal_s16(m4, vget_high_s16(s4), vget_high_s16(f[4]));
292   int32x4_t m5 = vmull_s16(vget_low_s16(s5), vget_low_s16(f[5]));
293   m5 = vmlal_s16(m5, vget_high_s16(s5), vget_high_s16(f[5]));
294   int32x4_t m6 = vmull_s16(vget_low_s16(s6), vget_low_s16(f[6]));
295   m6 = vmlal_s16(m6, vget_high_s16(s6), vget_high_s16(f[6]));
296   int32x4_t m7 = vmull_s16(vget_low_s16(s7), vget_low_s16(f[7]));
297   m7 = vmlal_s16(m7, vget_high_s16(s7), vget_high_s16(f[7]));
298 
299   int32x4_t m0123[] = { m0, m1, m2, m3 };
300   int32x4_t m4567[] = { m4, m5, m6, m7 };
301 
302   int32x4x2_t ret;
303   ret.val[0] = horizontal_add_4d_s32x4(m0123);
304   ret.val[1] = horizontal_add_4d_s32x4(m4567);
305   return ret;
306 }
307 
av1_highbd_warp_affine_neon(const int32_t * mat,const uint16_t * ref,int width,int height,int stride,uint16_t * pred,int p_col,int p_row,int p_width,int p_height,int p_stride,int subsampling_x,int subsampling_y,int bd,ConvolveParams * conv_params,int16_t alpha,int16_t beta,int16_t gamma,int16_t delta)308 void av1_highbd_warp_affine_neon(const int32_t *mat, const uint16_t *ref,
309                                  int width, int height, int stride,
310                                  uint16_t *pred, int p_col, int p_row,
311                                  int p_width, int p_height, int p_stride,
312                                  int subsampling_x, int subsampling_y, int bd,
313                                  ConvolveParams *conv_params, int16_t alpha,
314                                  int16_t beta, int16_t gamma, int16_t delta) {
315   highbd_warp_affine_common(mat, ref, width, height, stride, pred, p_col, p_row,
316                             p_width, p_height, p_stride, subsampling_x,
317                             subsampling_y, bd, conv_params, alpha, beta, gamma,
318                             delta);
319 }
320