1 /*
2 * Copyright (c) 2017, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11 #include <arm_neon.h>
12
13 #include "config/av1_rtcd.h"
14
15 #include "av1/common/cfl.h"
16
vldsubstq_s16(int16_t * dst,const uint16_t * src,int offset,int16x8_t sub)17 static INLINE void vldsubstq_s16(int16_t *dst, const uint16_t *src, int offset,
18 int16x8_t sub) {
19 vst1q_s16(dst + offset,
20 vsubq_s16(vreinterpretq_s16_u16(vld1q_u16(src + offset)), sub));
21 }
22
vldaddq_u16(const uint16_t * buf,size_t offset)23 static INLINE uint16x8_t vldaddq_u16(const uint16_t *buf, size_t offset) {
24 return vaddq_u16(vld1q_u16(buf), vld1q_u16(buf + offset));
25 }
26
27 // Load half of a vector and duplicated in other half
vldh_dup_u8(const uint8_t * ptr)28 static INLINE uint8x8_t vldh_dup_u8(const uint8_t *ptr) {
29 return vreinterpret_u8_u32(vld1_dup_u32((const uint32_t *)ptr));
30 }
31
32 // Store half of a vector.
vsth_u16(uint16_t * ptr,uint16x4_t val)33 static INLINE void vsth_u16(uint16_t *ptr, uint16x4_t val) {
34 *((uint32_t *)ptr) = vreinterpret_u32_u16(val)[0];
35 }
36
37 // Store half of a vector.
vsth_u8(uint8_t * ptr,uint8x8_t val)38 static INLINE void vsth_u8(uint8_t *ptr, uint8x8_t val) {
39 *((uint32_t *)ptr) = vreinterpret_u32_u8(val)[0];
40 }
41
cfl_luma_subsampling_420_lbd_neon(const uint8_t * input,int input_stride,uint16_t * pred_buf_q3,int width,int height)42 static void cfl_luma_subsampling_420_lbd_neon(const uint8_t *input,
43 int input_stride,
44 uint16_t *pred_buf_q3, int width,
45 int height) {
46 const uint16_t *end = pred_buf_q3 + (height >> 1) * CFL_BUF_LINE;
47 const int luma_stride = input_stride << 1;
48 do {
49 if (width == 4) {
50 const uint16x4_t top = vpaddl_u8(vldh_dup_u8(input));
51 const uint16x4_t sum = vpadal_u8(top, vldh_dup_u8(input + input_stride));
52 vsth_u16(pred_buf_q3, vshl_n_u16(sum, 1));
53 } else if (width == 8) {
54 const uint16x4_t top = vpaddl_u8(vld1_u8(input));
55 const uint16x4_t sum = vpadal_u8(top, vld1_u8(input + input_stride));
56 vst1_u16(pred_buf_q3, vshl_n_u16(sum, 1));
57 } else if (width == 16) {
58 const uint16x8_t top = vpaddlq_u8(vld1q_u8(input));
59 const uint16x8_t sum = vpadalq_u8(top, vld1q_u8(input + input_stride));
60 vst1q_u16(pred_buf_q3, vshlq_n_u16(sum, 1));
61 } else {
62 const uint8x8x4_t top = vld4_u8(input);
63 const uint8x8x4_t bot = vld4_u8(input + input_stride);
64 // equivalent to a vpaddlq_u8 (because vld4q interleaves)
65 const uint16x8_t top_0 = vaddl_u8(top.val[0], top.val[1]);
66 // equivalent to a vpaddlq_u8 (because vld4q interleaves)
67 const uint16x8_t bot_0 = vaddl_u8(bot.val[0], bot.val[1]);
68 // equivalent to a vpaddlq_u8 (because vld4q interleaves)
69 const uint16x8_t top_1 = vaddl_u8(top.val[2], top.val[3]);
70 // equivalent to a vpaddlq_u8 (because vld4q interleaves)
71 const uint16x8_t bot_1 = vaddl_u8(bot.val[2], bot.val[3]);
72 uint16x8x2_t sum;
73 sum.val[0] = vshlq_n_u16(vaddq_u16(top_0, bot_0), 1);
74 sum.val[1] = vshlq_n_u16(vaddq_u16(top_1, bot_1), 1);
75 vst2q_u16(pred_buf_q3, sum);
76 }
77 input += luma_stride;
78 } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
79 }
80
cfl_luma_subsampling_422_lbd_neon(const uint8_t * input,int input_stride,uint16_t * pred_buf_q3,int width,int height)81 static void cfl_luma_subsampling_422_lbd_neon(const uint8_t *input,
82 int input_stride,
83 uint16_t *pred_buf_q3, int width,
84 int height) {
85 const uint16_t *end = pred_buf_q3 + height * CFL_BUF_LINE;
86 do {
87 if (width == 4) {
88 const uint16x4_t top = vpaddl_u8(vldh_dup_u8(input));
89 vsth_u16(pred_buf_q3, vshl_n_u16(top, 2));
90 } else if (width == 8) {
91 const uint16x4_t top = vpaddl_u8(vld1_u8(input));
92 vst1_u16(pred_buf_q3, vshl_n_u16(top, 2));
93 } else if (width == 16) {
94 const uint16x8_t top = vpaddlq_u8(vld1q_u8(input));
95 vst1q_u16(pred_buf_q3, vshlq_n_u16(top, 2));
96 } else {
97 const uint8x8x4_t top = vld4_u8(input);
98 uint16x8x2_t sum;
99 // vaddl_u8 is equivalent to a vpaddlq_u8 (because vld4q interleaves)
100 sum.val[0] = vshlq_n_u16(vaddl_u8(top.val[0], top.val[1]), 2);
101 sum.val[1] = vshlq_n_u16(vaddl_u8(top.val[2], top.val[3]), 2);
102 vst2q_u16(pred_buf_q3, sum);
103 }
104 input += input_stride;
105 } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
106 }
107
cfl_luma_subsampling_444_lbd_neon(const uint8_t * input,int input_stride,uint16_t * pred_buf_q3,int width,int height)108 static void cfl_luma_subsampling_444_lbd_neon(const uint8_t *input,
109 int input_stride,
110 uint16_t *pred_buf_q3, int width,
111 int height) {
112 const uint16_t *end = pred_buf_q3 + height * CFL_BUF_LINE;
113 do {
114 if (width == 4) {
115 const uint16x8_t top = vshll_n_u8(vldh_dup_u8(input), 3);
116 vst1_u16(pred_buf_q3, vget_low_u16(top));
117 } else if (width == 8) {
118 const uint16x8_t top = vshll_n_u8(vld1_u8(input), 3);
119 vst1q_u16(pred_buf_q3, top);
120 } else {
121 const uint8x16_t top = vld1q_u8(input);
122 vst1q_u16(pred_buf_q3, vshll_n_u8(vget_low_u8(top), 3));
123 vst1q_u16(pred_buf_q3 + 8, vshll_n_u8(vget_high_u8(top), 3));
124 if (width == 32) {
125 const uint8x16_t next_top = vld1q_u8(input + 16);
126 vst1q_u16(pred_buf_q3 + 16, vshll_n_u8(vget_low_u8(next_top), 3));
127 vst1q_u16(pred_buf_q3 + 24, vshll_n_u8(vget_high_u8(next_top), 3));
128 }
129 }
130 input += input_stride;
131 } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
132 }
133
134 #if CONFIG_AV1_HIGHBITDEPTH
135 #ifndef __aarch64__
vpaddq_u16(uint16x8_t a,uint16x8_t b)136 uint16x8_t vpaddq_u16(uint16x8_t a, uint16x8_t b) {
137 return vcombine_u16(vpadd_u16(vget_low_u16(a), vget_high_u16(a)),
138 vpadd_u16(vget_low_u16(b), vget_high_u16(b)));
139 }
140 #endif
141
cfl_luma_subsampling_420_hbd_neon(const uint16_t * input,int input_stride,uint16_t * pred_buf_q3,int width,int height)142 static void cfl_luma_subsampling_420_hbd_neon(const uint16_t *input,
143 int input_stride,
144 uint16_t *pred_buf_q3, int width,
145 int height) {
146 const uint16_t *end = pred_buf_q3 + (height >> 1) * CFL_BUF_LINE;
147 const int luma_stride = input_stride << 1;
148 do {
149 if (width == 4) {
150 const uint16x4_t top = vld1_u16(input);
151 const uint16x4_t bot = vld1_u16(input + input_stride);
152 const uint16x4_t sum = vadd_u16(top, bot);
153 const uint16x4_t hsum = vpadd_u16(sum, sum);
154 vsth_u16(pred_buf_q3, vshl_n_u16(hsum, 1));
155 } else if (width < 32) {
156 const uint16x8_t top = vld1q_u16(input);
157 const uint16x8_t bot = vld1q_u16(input + input_stride);
158 const uint16x8_t sum = vaddq_u16(top, bot);
159 if (width == 8) {
160 const uint16x4_t hsum = vget_low_u16(vpaddq_u16(sum, sum));
161 vst1_u16(pred_buf_q3, vshl_n_u16(hsum, 1));
162 } else {
163 const uint16x8_t top_1 = vld1q_u16(input + 8);
164 const uint16x8_t bot_1 = vld1q_u16(input + 8 + input_stride);
165 const uint16x8_t sum_1 = vaddq_u16(top_1, bot_1);
166 const uint16x8_t hsum = vpaddq_u16(sum, sum_1);
167 vst1q_u16(pred_buf_q3, vshlq_n_u16(hsum, 1));
168 }
169 } else {
170 const uint16x8x4_t top = vld4q_u16(input);
171 const uint16x8x4_t bot = vld4q_u16(input + input_stride);
172 // equivalent to a vpaddq_u16 (because vld4q interleaves)
173 const uint16x8_t top_0 = vaddq_u16(top.val[0], top.val[1]);
174 // equivalent to a vpaddq_u16 (because vld4q interleaves)
175 const uint16x8_t bot_0 = vaddq_u16(bot.val[0], bot.val[1]);
176 // equivalent to a vpaddq_u16 (because vld4q interleaves)
177 const uint16x8_t top_1 = vaddq_u16(top.val[2], top.val[3]);
178 // equivalent to a vpaddq_u16 (because vld4q interleaves)
179 const uint16x8_t bot_1 = vaddq_u16(bot.val[2], bot.val[3]);
180 uint16x8x2_t sum;
181 sum.val[0] = vshlq_n_u16(vaddq_u16(top_0, bot_0), 1);
182 sum.val[1] = vshlq_n_u16(vaddq_u16(top_1, bot_1), 1);
183 vst2q_u16(pred_buf_q3, sum);
184 }
185 input += luma_stride;
186 } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
187 }
188
cfl_luma_subsampling_422_hbd_neon(const uint16_t * input,int input_stride,uint16_t * pred_buf_q3,int width,int height)189 static void cfl_luma_subsampling_422_hbd_neon(const uint16_t *input,
190 int input_stride,
191 uint16_t *pred_buf_q3, int width,
192 int height) {
193 const uint16_t *end = pred_buf_q3 + height * CFL_BUF_LINE;
194 do {
195 if (width == 4) {
196 const uint16x4_t top = vld1_u16(input);
197 const uint16x4_t hsum = vpadd_u16(top, top);
198 vsth_u16(pred_buf_q3, vshl_n_u16(hsum, 2));
199 } else if (width == 8) {
200 const uint16x4x2_t top = vld2_u16(input);
201 // equivalent to a vpadd_u16 (because vld2 interleaves)
202 const uint16x4_t hsum = vadd_u16(top.val[0], top.val[1]);
203 vst1_u16(pred_buf_q3, vshl_n_u16(hsum, 2));
204 } else if (width == 16) {
205 const uint16x8x2_t top = vld2q_u16(input);
206 // equivalent to a vpaddq_u16 (because vld2q interleaves)
207 const uint16x8_t hsum = vaddq_u16(top.val[0], top.val[1]);
208 vst1q_u16(pred_buf_q3, vshlq_n_u16(hsum, 2));
209 } else {
210 const uint16x8x4_t top = vld4q_u16(input);
211 // equivalent to a vpaddq_u16 (because vld4q interleaves)
212 const uint16x8_t hsum_0 = vaddq_u16(top.val[0], top.val[1]);
213 // equivalent to a vpaddq_u16 (because vld4q interleaves)
214 const uint16x8_t hsum_1 = vaddq_u16(top.val[2], top.val[3]);
215 uint16x8x2_t result = { { vshlq_n_u16(hsum_0, 2),
216 vshlq_n_u16(hsum_1, 2) } };
217 vst2q_u16(pred_buf_q3, result);
218 }
219 input += input_stride;
220 } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
221 }
222
cfl_luma_subsampling_444_hbd_neon(const uint16_t * input,int input_stride,uint16_t * pred_buf_q3,int width,int height)223 static void cfl_luma_subsampling_444_hbd_neon(const uint16_t *input,
224 int input_stride,
225 uint16_t *pred_buf_q3, int width,
226 int height) {
227 const uint16_t *end = pred_buf_q3 + height * CFL_BUF_LINE;
228 do {
229 if (width == 4) {
230 const uint16x4_t top = vld1_u16(input);
231 vst1_u16(pred_buf_q3, vshl_n_u16(top, 3));
232 } else if (width == 8) {
233 const uint16x8_t top = vld1q_u16(input);
234 vst1q_u16(pred_buf_q3, vshlq_n_u16(top, 3));
235 } else if (width == 16) {
236 uint16x8x2_t top = vld2q_u16(input);
237 top.val[0] = vshlq_n_u16(top.val[0], 3);
238 top.val[1] = vshlq_n_u16(top.val[1], 3);
239 vst2q_u16(pred_buf_q3, top);
240 } else {
241 uint16x8x4_t top = vld4q_u16(input);
242 top.val[0] = vshlq_n_u16(top.val[0], 3);
243 top.val[1] = vshlq_n_u16(top.val[1], 3);
244 top.val[2] = vshlq_n_u16(top.val[2], 3);
245 top.val[3] = vshlq_n_u16(top.val[3], 3);
246 vst4q_u16(pred_buf_q3, top);
247 }
248 input += input_stride;
249 } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
250 }
251 #endif // CONFIG_AV1_HIGHBITDEPTH
252
CFL_GET_SUBSAMPLE_FUNCTION(neon)253 CFL_GET_SUBSAMPLE_FUNCTION(neon)
254
255 static INLINE void subtract_average_neon(const uint16_t *src, int16_t *dst,
256 int width, int height,
257 int round_offset,
258 const int num_pel_log2) {
259 const uint16_t *const end = src + height * CFL_BUF_LINE;
260
261 // Round offset is not needed, because NEON will handle the rounding.
262 (void)round_offset;
263
264 // To optimize the use of the CPU pipeline, we process 4 rows per iteration
265 const int step = 4 * CFL_BUF_LINE;
266
267 // At this stage, the prediction buffer contains scaled reconstructed luma
268 // pixels, which are positive integer and only require 15 bits. By using
269 // unsigned integer for the sum, we can do one addition operation inside 16
270 // bits (8 lanes) before having to convert to 32 bits (4 lanes).
271 const uint16_t *sum_buf = src;
272 uint32x4_t sum_32x4 = { 0, 0, 0, 0 };
273 do {
274 // For all widths, we load, add and combine the data so it fits in 4 lanes.
275 if (width == 4) {
276 const uint16x4_t a0 =
277 vadd_u16(vld1_u16(sum_buf), vld1_u16(sum_buf + CFL_BUF_LINE));
278 const uint16x4_t a1 = vadd_u16(vld1_u16(sum_buf + 2 * CFL_BUF_LINE),
279 vld1_u16(sum_buf + 3 * CFL_BUF_LINE));
280 sum_32x4 = vaddq_u32(sum_32x4, vaddl_u16(a0, a1));
281 } else if (width == 8) {
282 const uint16x8_t a0 = vldaddq_u16(sum_buf, CFL_BUF_LINE);
283 const uint16x8_t a1 =
284 vldaddq_u16(sum_buf + 2 * CFL_BUF_LINE, CFL_BUF_LINE);
285 sum_32x4 = vpadalq_u16(sum_32x4, a0);
286 sum_32x4 = vpadalq_u16(sum_32x4, a1);
287 } else {
288 const uint16x8_t row0 = vldaddq_u16(sum_buf, 8);
289 const uint16x8_t row1 = vldaddq_u16(sum_buf + CFL_BUF_LINE, 8);
290 const uint16x8_t row2 = vldaddq_u16(sum_buf + 2 * CFL_BUF_LINE, 8);
291 const uint16x8_t row3 = vldaddq_u16(sum_buf + 3 * CFL_BUF_LINE, 8);
292 sum_32x4 = vpadalq_u16(sum_32x4, row0);
293 sum_32x4 = vpadalq_u16(sum_32x4, row1);
294 sum_32x4 = vpadalq_u16(sum_32x4, row2);
295 sum_32x4 = vpadalq_u16(sum_32x4, row3);
296
297 if (width == 32) {
298 const uint16x8_t row0_1 = vldaddq_u16(sum_buf + 16, 8);
299 const uint16x8_t row1_1 = vldaddq_u16(sum_buf + CFL_BUF_LINE + 16, 8);
300 const uint16x8_t row2_1 =
301 vldaddq_u16(sum_buf + 2 * CFL_BUF_LINE + 16, 8);
302 const uint16x8_t row3_1 =
303 vldaddq_u16(sum_buf + 3 * CFL_BUF_LINE + 16, 8);
304
305 sum_32x4 = vpadalq_u16(sum_32x4, row0_1);
306 sum_32x4 = vpadalq_u16(sum_32x4, row1_1);
307 sum_32x4 = vpadalq_u16(sum_32x4, row2_1);
308 sum_32x4 = vpadalq_u16(sum_32x4, row3_1);
309 }
310 }
311 sum_buf += step;
312 } while (sum_buf < end);
313
314 // Permute and add in such a way that each lane contains the block sum.
315 // [A+C+B+D, B+D+A+C, C+A+D+B, D+B+C+A]
316 #ifdef __aarch64__
317 sum_32x4 = vpaddq_u32(sum_32x4, sum_32x4);
318 sum_32x4 = vpaddq_u32(sum_32x4, sum_32x4);
319 #else
320 uint32x4_t flip =
321 vcombine_u32(vget_high_u32(sum_32x4), vget_low_u32(sum_32x4));
322 sum_32x4 = vaddq_u32(sum_32x4, flip);
323 sum_32x4 = vaddq_u32(sum_32x4, vrev64q_u32(sum_32x4));
324 #endif
325
326 // Computing the average could be done using scalars, but getting off the NEON
327 // engine introduces latency, so we use vqrshrn.
328 int16x4_t avg_16x4;
329 // Constant propagation makes for some ugly code.
330 switch (num_pel_log2) {
331 case 4: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 4)); break;
332 case 5: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 5)); break;
333 case 6: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 6)); break;
334 case 7: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 7)); break;
335 case 8: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 8)); break;
336 case 9: avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 9)); break;
337 case 10:
338 avg_16x4 = vreinterpret_s16_u16(vqrshrn_n_u32(sum_32x4, 10));
339 break;
340 default: assert(0);
341 }
342
343 if (width == 4) {
344 do {
345 vst1_s16(dst, vsub_s16(vreinterpret_s16_u16(vld1_u16(src)), avg_16x4));
346 src += CFL_BUF_LINE;
347 dst += CFL_BUF_LINE;
348 } while (src < end);
349 } else {
350 const int16x8_t avg_16x8 = vcombine_s16(avg_16x4, avg_16x4);
351 do {
352 vldsubstq_s16(dst, src, 0, avg_16x8);
353 vldsubstq_s16(dst, src, CFL_BUF_LINE, avg_16x8);
354 vldsubstq_s16(dst, src, 2 * CFL_BUF_LINE, avg_16x8);
355 vldsubstq_s16(dst, src, 3 * CFL_BUF_LINE, avg_16x8);
356
357 if (width > 8) {
358 vldsubstq_s16(dst, src, 8, avg_16x8);
359 vldsubstq_s16(dst, src, 8 + CFL_BUF_LINE, avg_16x8);
360 vldsubstq_s16(dst, src, 8 + 2 * CFL_BUF_LINE, avg_16x8);
361 vldsubstq_s16(dst, src, 8 + 3 * CFL_BUF_LINE, avg_16x8);
362 }
363 if (width == 32) {
364 vldsubstq_s16(dst, src, 16, avg_16x8);
365 vldsubstq_s16(dst, src, 16 + CFL_BUF_LINE, avg_16x8);
366 vldsubstq_s16(dst, src, 16 + 2 * CFL_BUF_LINE, avg_16x8);
367 vldsubstq_s16(dst, src, 16 + 3 * CFL_BUF_LINE, avg_16x8);
368 vldsubstq_s16(dst, src, 24, avg_16x8);
369 vldsubstq_s16(dst, src, 24 + CFL_BUF_LINE, avg_16x8);
370 vldsubstq_s16(dst, src, 24 + 2 * CFL_BUF_LINE, avg_16x8);
371 vldsubstq_s16(dst, src, 24 + 3 * CFL_BUF_LINE, avg_16x8);
372 }
373 src += step;
374 dst += step;
375 } while (src < end);
376 }
377 }
378
CFL_SUB_AVG_FN(neon)379 CFL_SUB_AVG_FN(neon)
380
381 // Saturating negate 16-bit integers in a when the corresponding signed 16-bit
382 // integer in b is negative.
383 // Notes:
384 // * Negating INT16_MIN results in INT16_MIN. However, this cannot occur in
385 // practice, as scaled_luma is the multiplication of two absolute values.
386 // * In the Intel equivalent, elements in a are zeroed out when the
387 // corresponding elements in b are zero. Because vsign is used twice in a
388 // row, with b in the first call becoming a in the second call, there's no
389 // impact from not zeroing out.
390 static int16x4_t vsign_s16(int16x4_t a, int16x4_t b) {
391 const int16x4_t mask = vshr_n_s16(b, 15);
392 return veor_s16(vadd_s16(a, mask), mask);
393 }
394
395 // Saturating negate 16-bit integers in a when the corresponding signed 16-bit
396 // integer in b is negative.
397 // Notes:
398 // * Negating INT16_MIN results in INT16_MIN. However, this cannot occur in
399 // practice, as scaled_luma is the multiplication of two absolute values.
400 // * In the Intel equivalent, elements in a are zeroed out when the
401 // corresponding elements in b are zero. Because vsignq is used twice in a
402 // row, with b in the first call becoming a in the second call, there's no
403 // impact from not zeroing out.
vsignq_s16(int16x8_t a,int16x8_t b)404 static int16x8_t vsignq_s16(int16x8_t a, int16x8_t b) {
405 const int16x8_t mask = vshrq_n_s16(b, 15);
406 return veorq_s16(vaddq_s16(a, mask), mask);
407 }
408
predict_w4(const int16_t * pred_buf_q3,int16x4_t alpha_sign,int abs_alpha_q12,int16x4_t dc)409 static INLINE int16x4_t predict_w4(const int16_t *pred_buf_q3,
410 int16x4_t alpha_sign, int abs_alpha_q12,
411 int16x4_t dc) {
412 const int16x4_t ac_q3 = vld1_s16(pred_buf_q3);
413 const int16x4_t ac_sign = veor_s16(alpha_sign, ac_q3);
414 int16x4_t scaled_luma = vqrdmulh_n_s16(vabs_s16(ac_q3), abs_alpha_q12);
415 return vadd_s16(vsign_s16(scaled_luma, ac_sign), dc);
416 }
417
predict_w8(const int16_t * pred_buf_q3,int16x8_t alpha_sign,int abs_alpha_q12,int16x8_t dc)418 static INLINE int16x8_t predict_w8(const int16_t *pred_buf_q3,
419 int16x8_t alpha_sign, int abs_alpha_q12,
420 int16x8_t dc) {
421 const int16x8_t ac_q3 = vld1q_s16(pred_buf_q3);
422 const int16x8_t ac_sign = veorq_s16(alpha_sign, ac_q3);
423 int16x8_t scaled_luma = vqrdmulhq_n_s16(vabsq_s16(ac_q3), abs_alpha_q12);
424 return vaddq_s16(vsignq_s16(scaled_luma, ac_sign), dc);
425 }
426
predict_w16(const int16_t * pred_buf_q3,int16x8_t alpha_sign,int abs_alpha_q12,int16x8_t dc)427 static INLINE int16x8x2_t predict_w16(const int16_t *pred_buf_q3,
428 int16x8_t alpha_sign, int abs_alpha_q12,
429 int16x8_t dc) {
430 // vld2q_s16 interleaves, which is not useful for prediction. vst1q_s16_x2
431 // does not interleave, but is not currently available in the compilier used
432 // by the AOM build system.
433 const int16x8x2_t ac_q3 = vld2q_s16(pred_buf_q3);
434 const int16x8_t ac_sign_0 = veorq_s16(alpha_sign, ac_q3.val[0]);
435 const int16x8_t ac_sign_1 = veorq_s16(alpha_sign, ac_q3.val[1]);
436 const int16x8_t scaled_luma_0 =
437 vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[0]), abs_alpha_q12);
438 const int16x8_t scaled_luma_1 =
439 vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[1]), abs_alpha_q12);
440 int16x8x2_t result;
441 result.val[0] = vaddq_s16(vsignq_s16(scaled_luma_0, ac_sign_0), dc);
442 result.val[1] = vaddq_s16(vsignq_s16(scaled_luma_1, ac_sign_1), dc);
443 return result;
444 }
445
predict_w32(const int16_t * pred_buf_q3,int16x8_t alpha_sign,int abs_alpha_q12,int16x8_t dc)446 static INLINE int16x8x4_t predict_w32(const int16_t *pred_buf_q3,
447 int16x8_t alpha_sign, int abs_alpha_q12,
448 int16x8_t dc) {
449 // vld4q_s16 interleaves, which is not useful for prediction. vst1q_s16_x4
450 // does not interleave, but is not currently available in the compilier used
451 // by the AOM build system.
452 const int16x8x4_t ac_q3 = vld4q_s16(pred_buf_q3);
453 const int16x8_t ac_sign_0 = veorq_s16(alpha_sign, ac_q3.val[0]);
454 const int16x8_t ac_sign_1 = veorq_s16(alpha_sign, ac_q3.val[1]);
455 const int16x8_t ac_sign_2 = veorq_s16(alpha_sign, ac_q3.val[2]);
456 const int16x8_t ac_sign_3 = veorq_s16(alpha_sign, ac_q3.val[3]);
457 const int16x8_t scaled_luma_0 =
458 vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[0]), abs_alpha_q12);
459 const int16x8_t scaled_luma_1 =
460 vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[1]), abs_alpha_q12);
461 const int16x8_t scaled_luma_2 =
462 vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[2]), abs_alpha_q12);
463 const int16x8_t scaled_luma_3 =
464 vqrdmulhq_n_s16(vabsq_s16(ac_q3.val[3]), abs_alpha_q12);
465 int16x8x4_t result;
466 result.val[0] = vaddq_s16(vsignq_s16(scaled_luma_0, ac_sign_0), dc);
467 result.val[1] = vaddq_s16(vsignq_s16(scaled_luma_1, ac_sign_1), dc);
468 result.val[2] = vaddq_s16(vsignq_s16(scaled_luma_2, ac_sign_2), dc);
469 result.val[3] = vaddq_s16(vsignq_s16(scaled_luma_3, ac_sign_3), dc);
470 return result;
471 }
472
cfl_predict_lbd_neon(const int16_t * pred_buf_q3,uint8_t * dst,int dst_stride,int alpha_q3,int width,int height)473 static INLINE void cfl_predict_lbd_neon(const int16_t *pred_buf_q3,
474 uint8_t *dst, int dst_stride,
475 int alpha_q3, int width, int height) {
476 const int16_t abs_alpha_q12 = abs(alpha_q3) << 9;
477 const int16_t *const end = pred_buf_q3 + height * CFL_BUF_LINE;
478 if (width == 4) {
479 const int16x4_t alpha_sign = vdup_n_s16(alpha_q3);
480 const int16x4_t dc = vdup_n_s16(*dst);
481 do {
482 const int16x4_t pred =
483 predict_w4(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
484 vsth_u8(dst, vqmovun_s16(vcombine_s16(pred, pred)));
485 dst += dst_stride;
486 } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
487 } else {
488 const int16x8_t alpha_sign = vdupq_n_s16(alpha_q3);
489 const int16x8_t dc = vdupq_n_s16(*dst);
490 do {
491 if (width == 8) {
492 vst1_u8(dst, vqmovun_s16(predict_w8(pred_buf_q3, alpha_sign,
493 abs_alpha_q12, dc)));
494 } else if (width == 16) {
495 const int16x8x2_t pred =
496 predict_w16(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
497 const uint8x8x2_t predun = { { vqmovun_s16(pred.val[0]),
498 vqmovun_s16(pred.val[1]) } };
499 vst2_u8(dst, predun);
500 } else {
501 const int16x8x4_t pred =
502 predict_w32(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
503 const uint8x8x4_t predun = {
504 { vqmovun_s16(pred.val[0]), vqmovun_s16(pred.val[1]),
505 vqmovun_s16(pred.val[2]), vqmovun_s16(pred.val[3]) }
506 };
507 vst4_u8(dst, predun);
508 }
509 dst += dst_stride;
510 } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
511 }
512 }
513
CFL_PREDICT_FN(neon,lbd)514 CFL_PREDICT_FN(neon, lbd)
515
516 #if CONFIG_AV1_HIGHBITDEPTH
517 static INLINE uint16x4_t clamp_s16(int16x4_t a, int16x4_t max) {
518 return vreinterpret_u16_s16(vmax_s16(vmin_s16(a, max), vdup_n_s16(0)));
519 }
520
clampq_s16(int16x8_t a,int16x8_t max)521 static INLINE uint16x8_t clampq_s16(int16x8_t a, int16x8_t max) {
522 return vreinterpretq_u16_s16(vmaxq_s16(vminq_s16(a, max), vdupq_n_s16(0)));
523 }
524
clamp2q_s16(int16x8x2_t a,int16x8_t max)525 static INLINE uint16x8x2_t clamp2q_s16(int16x8x2_t a, int16x8_t max) {
526 uint16x8x2_t result;
527 result.val[0] = vreinterpretq_u16_s16(
528 vmaxq_s16(vminq_s16(a.val[0], max), vdupq_n_s16(0)));
529 result.val[1] = vreinterpretq_u16_s16(
530 vmaxq_s16(vminq_s16(a.val[1], max), vdupq_n_s16(0)));
531 return result;
532 }
533
clamp4q_s16(int16x8x4_t a,int16x8_t max)534 static INLINE uint16x8x4_t clamp4q_s16(int16x8x4_t a, int16x8_t max) {
535 uint16x8x4_t result;
536 result.val[0] = vreinterpretq_u16_s16(
537 vmaxq_s16(vminq_s16(a.val[0], max), vdupq_n_s16(0)));
538 result.val[1] = vreinterpretq_u16_s16(
539 vmaxq_s16(vminq_s16(a.val[1], max), vdupq_n_s16(0)));
540 result.val[2] = vreinterpretq_u16_s16(
541 vmaxq_s16(vminq_s16(a.val[2], max), vdupq_n_s16(0)));
542 result.val[3] = vreinterpretq_u16_s16(
543 vmaxq_s16(vminq_s16(a.val[3], max), vdupq_n_s16(0)));
544 return result;
545 }
546
cfl_predict_hbd_neon(const int16_t * pred_buf_q3,uint16_t * dst,int dst_stride,int alpha_q3,int bd,int width,int height)547 static INLINE void cfl_predict_hbd_neon(const int16_t *pred_buf_q3,
548 uint16_t *dst, int dst_stride,
549 int alpha_q3, int bd, int width,
550 int height) {
551 const int max = (1 << bd) - 1;
552 const int16_t abs_alpha_q12 = abs(alpha_q3) << 9;
553 const int16_t *const end = pred_buf_q3 + height * CFL_BUF_LINE;
554 if (width == 4) {
555 const int16x4_t alpha_sign = vdup_n_s16(alpha_q3);
556 const int16x4_t dc = vdup_n_s16(*dst);
557 const int16x4_t max_16x4 = vdup_n_s16(max);
558 do {
559 const int16x4_t scaled_luma =
560 predict_w4(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
561 vst1_u16(dst, clamp_s16(scaled_luma, max_16x4));
562 dst += dst_stride;
563 } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
564 } else {
565 const int16x8_t alpha_sign = vdupq_n_s16(alpha_q3);
566 const int16x8_t dc = vdupq_n_s16(*dst);
567 const int16x8_t max_16x8 = vdupq_n_s16(max);
568 do {
569 if (width == 8) {
570 const int16x8_t pred =
571 predict_w8(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
572 vst1q_u16(dst, clampq_s16(pred, max_16x8));
573 } else if (width == 16) {
574 const int16x8x2_t pred =
575 predict_w16(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
576 vst2q_u16(dst, clamp2q_s16(pred, max_16x8));
577 } else {
578 const int16x8x4_t pred =
579 predict_w32(pred_buf_q3, alpha_sign, abs_alpha_q12, dc);
580 vst4q_u16(dst, clamp4q_s16(pred, max_16x8));
581 }
582 dst += dst_stride;
583 } while ((pred_buf_q3 += CFL_BUF_LINE) < end);
584 }
585 }
586
587 CFL_PREDICT_FN(neon, hbd)
588 #endif // CONFIG_AV1_HIGHBITDEPTH
589