1 /*
2 * Copyright (c) 2016, Alliance for Open Media. All rights reserved
3 *
4 * This source code is subject to the terms of the BSD 2 Clause License and
5 * the Alliance for Open Media Patent License 1.0. If the BSD 2 Clause License
6 * was not distributed with this source code in the LICENSE file, you can
7 * obtain it at www.aomedia.org/license/software. If the Alliance for Open
8 * Media Patent License 1.0 was not distributed with this source code in the
9 * PATENTS file, you can obtain it at www.aomedia.org/license/patent.
10 */
11
12 #include <arm_neon.h>
13
14 #include "config/aom_config.h"
15 #include "config/aom_dsp_rtcd.h"
16
17 #include "aom/aom_integer.h"
18
19 //------------------------------------------------------------------------------
20 // DC 4x4
21
22 // 'do_above' and 'do_left' facilitate branch removal when inlined.
dc_4x4(uint8_t * dst,ptrdiff_t stride,const uint8_t * above,const uint8_t * left,int do_above,int do_left)23 static INLINE void dc_4x4(uint8_t *dst, ptrdiff_t stride, const uint8_t *above,
24 const uint8_t *left, int do_above, int do_left) {
25 uint16x8_t sum_top;
26 uint16x8_t sum_left;
27 uint8x8_t dc0;
28
29 if (do_above) {
30 const uint8x8_t A = vld1_u8(above); // top row
31 const uint16x4_t p0 = vpaddl_u8(A); // cascading summation of the top
32 const uint16x4_t p1 = vpadd_u16(p0, p0);
33 sum_top = vcombine_u16(p1, p1);
34 }
35
36 if (do_left) {
37 const uint8x8_t L = vld1_u8(left); // left border
38 const uint16x4_t p0 = vpaddl_u8(L); // cascading summation of the left
39 const uint16x4_t p1 = vpadd_u16(p0, p0);
40 sum_left = vcombine_u16(p1, p1);
41 }
42
43 if (do_above && do_left) {
44 const uint16x8_t sum = vaddq_u16(sum_left, sum_top);
45 dc0 = vrshrn_n_u16(sum, 3);
46 } else if (do_above) {
47 dc0 = vrshrn_n_u16(sum_top, 2);
48 } else if (do_left) {
49 dc0 = vrshrn_n_u16(sum_left, 2);
50 } else {
51 dc0 = vdup_n_u8(0x80);
52 }
53
54 {
55 const uint8x8_t dc = vdup_lane_u8(dc0, 0);
56 int i;
57 for (i = 0; i < 4; ++i) {
58 vst1_lane_u32((uint32_t *)(dst + i * stride), vreinterpret_u32_u8(dc), 0);
59 }
60 }
61 }
62
aom_dc_predictor_4x4_neon(uint8_t * dst,ptrdiff_t stride,const uint8_t * above,const uint8_t * left)63 void aom_dc_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
64 const uint8_t *above, const uint8_t *left) {
65 dc_4x4(dst, stride, above, left, 1, 1);
66 }
67
aom_dc_left_predictor_4x4_neon(uint8_t * dst,ptrdiff_t stride,const uint8_t * above,const uint8_t * left)68 void aom_dc_left_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
69 const uint8_t *above, const uint8_t *left) {
70 (void)above;
71 dc_4x4(dst, stride, NULL, left, 0, 1);
72 }
73
aom_dc_top_predictor_4x4_neon(uint8_t * dst,ptrdiff_t stride,const uint8_t * above,const uint8_t * left)74 void aom_dc_top_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
75 const uint8_t *above, const uint8_t *left) {
76 (void)left;
77 dc_4x4(dst, stride, above, NULL, 1, 0);
78 }
79
aom_dc_128_predictor_4x4_neon(uint8_t * dst,ptrdiff_t stride,const uint8_t * above,const uint8_t * left)80 void aom_dc_128_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
81 const uint8_t *above, const uint8_t *left) {
82 (void)above;
83 (void)left;
84 dc_4x4(dst, stride, NULL, NULL, 0, 0);
85 }
86
87 //------------------------------------------------------------------------------
88 // DC 8x8
89
90 // 'do_above' and 'do_left' facilitate branch removal when inlined.
dc_8x8(uint8_t * dst,ptrdiff_t stride,const uint8_t * above,const uint8_t * left,int do_above,int do_left)91 static INLINE void dc_8x8(uint8_t *dst, ptrdiff_t stride, const uint8_t *above,
92 const uint8_t *left, int do_above, int do_left) {
93 uint16x8_t sum_top;
94 uint16x8_t sum_left;
95 uint8x8_t dc0;
96
97 if (do_above) {
98 const uint8x8_t A = vld1_u8(above); // top row
99 const uint16x4_t p0 = vpaddl_u8(A); // cascading summation of the top
100 const uint16x4_t p1 = vpadd_u16(p0, p0);
101 const uint16x4_t p2 = vpadd_u16(p1, p1);
102 sum_top = vcombine_u16(p2, p2);
103 }
104
105 if (do_left) {
106 const uint8x8_t L = vld1_u8(left); // left border
107 const uint16x4_t p0 = vpaddl_u8(L); // cascading summation of the left
108 const uint16x4_t p1 = vpadd_u16(p0, p0);
109 const uint16x4_t p2 = vpadd_u16(p1, p1);
110 sum_left = vcombine_u16(p2, p2);
111 }
112
113 if (do_above && do_left) {
114 const uint16x8_t sum = vaddq_u16(sum_left, sum_top);
115 dc0 = vrshrn_n_u16(sum, 4);
116 } else if (do_above) {
117 dc0 = vrshrn_n_u16(sum_top, 3);
118 } else if (do_left) {
119 dc0 = vrshrn_n_u16(sum_left, 3);
120 } else {
121 dc0 = vdup_n_u8(0x80);
122 }
123
124 {
125 const uint8x8_t dc = vdup_lane_u8(dc0, 0);
126 int i;
127 for (i = 0; i < 8; ++i) {
128 vst1_u32((uint32_t *)(dst + i * stride), vreinterpret_u32_u8(dc));
129 }
130 }
131 }
132
aom_dc_predictor_8x8_neon(uint8_t * dst,ptrdiff_t stride,const uint8_t * above,const uint8_t * left)133 void aom_dc_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
134 const uint8_t *above, const uint8_t *left) {
135 dc_8x8(dst, stride, above, left, 1, 1);
136 }
137
aom_dc_left_predictor_8x8_neon(uint8_t * dst,ptrdiff_t stride,const uint8_t * above,const uint8_t * left)138 void aom_dc_left_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
139 const uint8_t *above, const uint8_t *left) {
140 (void)above;
141 dc_8x8(dst, stride, NULL, left, 0, 1);
142 }
143
aom_dc_top_predictor_8x8_neon(uint8_t * dst,ptrdiff_t stride,const uint8_t * above,const uint8_t * left)144 void aom_dc_top_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
145 const uint8_t *above, const uint8_t *left) {
146 (void)left;
147 dc_8x8(dst, stride, above, NULL, 1, 0);
148 }
149
aom_dc_128_predictor_8x8_neon(uint8_t * dst,ptrdiff_t stride,const uint8_t * above,const uint8_t * left)150 void aom_dc_128_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
151 const uint8_t *above, const uint8_t *left) {
152 (void)above;
153 (void)left;
154 dc_8x8(dst, stride, NULL, NULL, 0, 0);
155 }
156
157 //------------------------------------------------------------------------------
158 // DC 16x16
159
160 // 'do_above' and 'do_left' facilitate branch removal when inlined.
dc_16x16(uint8_t * dst,ptrdiff_t stride,const uint8_t * above,const uint8_t * left,int do_above,int do_left)161 static INLINE void dc_16x16(uint8_t *dst, ptrdiff_t stride,
162 const uint8_t *above, const uint8_t *left,
163 int do_above, int do_left) {
164 uint16x8_t sum_top;
165 uint16x8_t sum_left;
166 uint8x8_t dc0;
167
168 if (do_above) {
169 const uint8x16_t A = vld1q_u8(above); // top row
170 const uint16x8_t p0 = vpaddlq_u8(A); // cascading summation of the top
171 const uint16x4_t p1 = vadd_u16(vget_low_u16(p0), vget_high_u16(p0));
172 const uint16x4_t p2 = vpadd_u16(p1, p1);
173 const uint16x4_t p3 = vpadd_u16(p2, p2);
174 sum_top = vcombine_u16(p3, p3);
175 }
176
177 if (do_left) {
178 const uint8x16_t L = vld1q_u8(left); // left row
179 const uint16x8_t p0 = vpaddlq_u8(L); // cascading summation of the left
180 const uint16x4_t p1 = vadd_u16(vget_low_u16(p0), vget_high_u16(p0));
181 const uint16x4_t p2 = vpadd_u16(p1, p1);
182 const uint16x4_t p3 = vpadd_u16(p2, p2);
183 sum_left = vcombine_u16(p3, p3);
184 }
185
186 if (do_above && do_left) {
187 const uint16x8_t sum = vaddq_u16(sum_left, sum_top);
188 dc0 = vrshrn_n_u16(sum, 5);
189 } else if (do_above) {
190 dc0 = vrshrn_n_u16(sum_top, 4);
191 } else if (do_left) {
192 dc0 = vrshrn_n_u16(sum_left, 4);
193 } else {
194 dc0 = vdup_n_u8(0x80);
195 }
196
197 {
198 const uint8x16_t dc = vdupq_lane_u8(dc0, 0);
199 int i;
200 for (i = 0; i < 16; ++i) {
201 vst1q_u8(dst + i * stride, dc);
202 }
203 }
204 }
205
aom_dc_predictor_16x16_neon(uint8_t * dst,ptrdiff_t stride,const uint8_t * above,const uint8_t * left)206 void aom_dc_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
207 const uint8_t *above, const uint8_t *left) {
208 dc_16x16(dst, stride, above, left, 1, 1);
209 }
210
aom_dc_left_predictor_16x16_neon(uint8_t * dst,ptrdiff_t stride,const uint8_t * above,const uint8_t * left)211 void aom_dc_left_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
212 const uint8_t *above,
213 const uint8_t *left) {
214 (void)above;
215 dc_16x16(dst, stride, NULL, left, 0, 1);
216 }
217
aom_dc_top_predictor_16x16_neon(uint8_t * dst,ptrdiff_t stride,const uint8_t * above,const uint8_t * left)218 void aom_dc_top_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
219 const uint8_t *above,
220 const uint8_t *left) {
221 (void)left;
222 dc_16x16(dst, stride, above, NULL, 1, 0);
223 }
224
aom_dc_128_predictor_16x16_neon(uint8_t * dst,ptrdiff_t stride,const uint8_t * above,const uint8_t * left)225 void aom_dc_128_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
226 const uint8_t *above,
227 const uint8_t *left) {
228 (void)above;
229 (void)left;
230 dc_16x16(dst, stride, NULL, NULL, 0, 0);
231 }
232
233 //------------------------------------------------------------------------------
234 // DC 32x32
235
236 // 'do_above' and 'do_left' facilitate branch removal when inlined.
dc_32x32(uint8_t * dst,ptrdiff_t stride,const uint8_t * above,const uint8_t * left,int do_above,int do_left)237 static INLINE void dc_32x32(uint8_t *dst, ptrdiff_t stride,
238 const uint8_t *above, const uint8_t *left,
239 int do_above, int do_left) {
240 uint16x8_t sum_top;
241 uint16x8_t sum_left;
242 uint8x8_t dc0;
243
244 if (do_above) {
245 const uint8x16_t A0 = vld1q_u8(above); // top row
246 const uint8x16_t A1 = vld1q_u8(above + 16);
247 const uint16x8_t p0 = vpaddlq_u8(A0); // cascading summation of the top
248 const uint16x8_t p1 = vpaddlq_u8(A1);
249 const uint16x8_t p2 = vaddq_u16(p0, p1);
250 const uint16x4_t p3 = vadd_u16(vget_low_u16(p2), vget_high_u16(p2));
251 const uint16x4_t p4 = vpadd_u16(p3, p3);
252 const uint16x4_t p5 = vpadd_u16(p4, p4);
253 sum_top = vcombine_u16(p5, p5);
254 }
255
256 if (do_left) {
257 const uint8x16_t L0 = vld1q_u8(left); // left row
258 const uint8x16_t L1 = vld1q_u8(left + 16);
259 const uint16x8_t p0 = vpaddlq_u8(L0); // cascading summation of the left
260 const uint16x8_t p1 = vpaddlq_u8(L1);
261 const uint16x8_t p2 = vaddq_u16(p0, p1);
262 const uint16x4_t p3 = vadd_u16(vget_low_u16(p2), vget_high_u16(p2));
263 const uint16x4_t p4 = vpadd_u16(p3, p3);
264 const uint16x4_t p5 = vpadd_u16(p4, p4);
265 sum_left = vcombine_u16(p5, p5);
266 }
267
268 if (do_above && do_left) {
269 const uint16x8_t sum = vaddq_u16(sum_left, sum_top);
270 dc0 = vrshrn_n_u16(sum, 6);
271 } else if (do_above) {
272 dc0 = vrshrn_n_u16(sum_top, 5);
273 } else if (do_left) {
274 dc0 = vrshrn_n_u16(sum_left, 5);
275 } else {
276 dc0 = vdup_n_u8(0x80);
277 }
278
279 {
280 const uint8x16_t dc = vdupq_lane_u8(dc0, 0);
281 int i;
282 for (i = 0; i < 32; ++i) {
283 vst1q_u8(dst + i * stride, dc);
284 vst1q_u8(dst + i * stride + 16, dc);
285 }
286 }
287 }
288
aom_dc_predictor_32x32_neon(uint8_t * dst,ptrdiff_t stride,const uint8_t * above,const uint8_t * left)289 void aom_dc_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
290 const uint8_t *above, const uint8_t *left) {
291 dc_32x32(dst, stride, above, left, 1, 1);
292 }
293
aom_dc_left_predictor_32x32_neon(uint8_t * dst,ptrdiff_t stride,const uint8_t * above,const uint8_t * left)294 void aom_dc_left_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
295 const uint8_t *above,
296 const uint8_t *left) {
297 (void)above;
298 dc_32x32(dst, stride, NULL, left, 0, 1);
299 }
300
aom_dc_top_predictor_32x32_neon(uint8_t * dst,ptrdiff_t stride,const uint8_t * above,const uint8_t * left)301 void aom_dc_top_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
302 const uint8_t *above,
303 const uint8_t *left) {
304 (void)left;
305 dc_32x32(dst, stride, above, NULL, 1, 0);
306 }
307
aom_dc_128_predictor_32x32_neon(uint8_t * dst,ptrdiff_t stride,const uint8_t * above,const uint8_t * left)308 void aom_dc_128_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
309 const uint8_t *above,
310 const uint8_t *left) {
311 (void)above;
312 (void)left;
313 dc_32x32(dst, stride, NULL, NULL, 0, 0);
314 }
315
316 // -----------------------------------------------------------------------------
317
aom_d135_predictor_4x4_neon(uint8_t * dst,ptrdiff_t stride,const uint8_t * above,const uint8_t * left)318 void aom_d135_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
319 const uint8_t *above, const uint8_t *left) {
320 const uint8x8_t XABCD_u8 = vld1_u8(above - 1);
321 const uint64x1_t XABCD = vreinterpret_u64_u8(XABCD_u8);
322 const uint64x1_t ____XABC = vshl_n_u64(XABCD, 32);
323 const uint32x2_t zero = vdup_n_u32(0);
324 const uint32x2_t IJKL = vld1_lane_u32((const uint32_t *)left, zero, 0);
325 const uint8x8_t IJKL_u8 = vreinterpret_u8_u32(IJKL);
326 const uint64x1_t LKJI____ = vreinterpret_u64_u8(vrev32_u8(IJKL_u8));
327 const uint64x1_t LKJIXABC = vorr_u64(LKJI____, ____XABC);
328 const uint8x8_t KJIXABC_ = vreinterpret_u8_u64(vshr_n_u64(LKJIXABC, 8));
329 const uint8x8_t JIXABC__ = vreinterpret_u8_u64(vshr_n_u64(LKJIXABC, 16));
330 const uint8_t D = vget_lane_u8(XABCD_u8, 4);
331 const uint8x8_t JIXABCD_ = vset_lane_u8(D, JIXABC__, 6);
332 const uint8x8_t LKJIXABC_u8 = vreinterpret_u8_u64(LKJIXABC);
333 const uint8x8_t avg1 = vhadd_u8(JIXABCD_, LKJIXABC_u8);
334 const uint8x8_t avg2 = vrhadd_u8(avg1, KJIXABC_);
335 const uint64x1_t avg2_u64 = vreinterpret_u64_u8(avg2);
336 const uint32x2_t r3 = vreinterpret_u32_u8(avg2);
337 const uint32x2_t r2 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 8));
338 const uint32x2_t r1 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 16));
339 const uint32x2_t r0 = vreinterpret_u32_u64(vshr_n_u64(avg2_u64, 24));
340 vst1_lane_u32((uint32_t *)(dst + 0 * stride), r0, 0);
341 vst1_lane_u32((uint32_t *)(dst + 1 * stride), r1, 0);
342 vst1_lane_u32((uint32_t *)(dst + 2 * stride), r2, 0);
343 vst1_lane_u32((uint32_t *)(dst + 3 * stride), r3, 0);
344 }
345
aom_v_predictor_4x4_neon(uint8_t * dst,ptrdiff_t stride,const uint8_t * above,const uint8_t * left)346 void aom_v_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
347 const uint8_t *above, const uint8_t *left) {
348 int i;
349 uint32x2_t d0u32 = vdup_n_u32(0);
350 (void)left;
351
352 d0u32 = vld1_lane_u32((const uint32_t *)above, d0u32, 0);
353 for (i = 0; i < 4; i++, dst += stride)
354 vst1_lane_u32((uint32_t *)dst, d0u32, 0);
355 }
356
aom_v_predictor_8x8_neon(uint8_t * dst,ptrdiff_t stride,const uint8_t * above,const uint8_t * left)357 void aom_v_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
358 const uint8_t *above, const uint8_t *left) {
359 int i;
360 uint8x8_t d0u8 = vdup_n_u8(0);
361 (void)left;
362
363 d0u8 = vld1_u8(above);
364 for (i = 0; i < 8; i++, dst += stride) vst1_u8(dst, d0u8);
365 }
366
aom_v_predictor_16x16_neon(uint8_t * dst,ptrdiff_t stride,const uint8_t * above,const uint8_t * left)367 void aom_v_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
368 const uint8_t *above, const uint8_t *left) {
369 int i;
370 uint8x16_t q0u8 = vdupq_n_u8(0);
371 (void)left;
372
373 q0u8 = vld1q_u8(above);
374 for (i = 0; i < 16; i++, dst += stride) vst1q_u8(dst, q0u8);
375 }
376
aom_v_predictor_32x32_neon(uint8_t * dst,ptrdiff_t stride,const uint8_t * above,const uint8_t * left)377 void aom_v_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
378 const uint8_t *above, const uint8_t *left) {
379 int i;
380 uint8x16_t q0u8 = vdupq_n_u8(0);
381 uint8x16_t q1u8 = vdupq_n_u8(0);
382 (void)left;
383
384 q0u8 = vld1q_u8(above);
385 q1u8 = vld1q_u8(above + 16);
386 for (i = 0; i < 32; i++, dst += stride) {
387 vst1q_u8(dst, q0u8);
388 vst1q_u8(dst + 16, q1u8);
389 }
390 }
391
aom_h_predictor_4x4_neon(uint8_t * dst,ptrdiff_t stride,const uint8_t * above,const uint8_t * left)392 void aom_h_predictor_4x4_neon(uint8_t *dst, ptrdiff_t stride,
393 const uint8_t *above, const uint8_t *left) {
394 uint8x8_t d0u8 = vdup_n_u8(0);
395 uint32x2_t d1u32 = vdup_n_u32(0);
396 (void)above;
397
398 d1u32 = vld1_lane_u32((const uint32_t *)left, d1u32, 0);
399
400 d0u8 = vdup_lane_u8(vreinterpret_u8_u32(d1u32), 0);
401 vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0);
402 dst += stride;
403 d0u8 = vdup_lane_u8(vreinterpret_u8_u32(d1u32), 1);
404 vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0);
405 dst += stride;
406 d0u8 = vdup_lane_u8(vreinterpret_u8_u32(d1u32), 2);
407 vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0);
408 dst += stride;
409 d0u8 = vdup_lane_u8(vreinterpret_u8_u32(d1u32), 3);
410 vst1_lane_u32((uint32_t *)dst, vreinterpret_u32_u8(d0u8), 0);
411 }
412
aom_h_predictor_8x8_neon(uint8_t * dst,ptrdiff_t stride,const uint8_t * above,const uint8_t * left)413 void aom_h_predictor_8x8_neon(uint8_t *dst, ptrdiff_t stride,
414 const uint8_t *above, const uint8_t *left) {
415 uint8x8_t d0u8 = vdup_n_u8(0);
416 uint64x1_t d1u64 = vdup_n_u64(0);
417 (void)above;
418
419 d1u64 = vld1_u64((const uint64_t *)left);
420
421 d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 0);
422 vst1_u8(dst, d0u8);
423 dst += stride;
424 d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 1);
425 vst1_u8(dst, d0u8);
426 dst += stride;
427 d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 2);
428 vst1_u8(dst, d0u8);
429 dst += stride;
430 d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 3);
431 vst1_u8(dst, d0u8);
432 dst += stride;
433 d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 4);
434 vst1_u8(dst, d0u8);
435 dst += stride;
436 d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 5);
437 vst1_u8(dst, d0u8);
438 dst += stride;
439 d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 6);
440 vst1_u8(dst, d0u8);
441 dst += stride;
442 d0u8 = vdup_lane_u8(vreinterpret_u8_u64(d1u64), 7);
443 vst1_u8(dst, d0u8);
444 }
445
aom_h_predictor_16x16_neon(uint8_t * dst,ptrdiff_t stride,const uint8_t * above,const uint8_t * left)446 void aom_h_predictor_16x16_neon(uint8_t *dst, ptrdiff_t stride,
447 const uint8_t *above, const uint8_t *left) {
448 int j;
449 uint8x8_t d2u8 = vdup_n_u8(0);
450 uint8x16_t q0u8 = vdupq_n_u8(0);
451 uint8x16_t q1u8 = vdupq_n_u8(0);
452 (void)above;
453
454 q1u8 = vld1q_u8(left);
455 d2u8 = vget_low_u8(q1u8);
456 for (j = 0; j < 2; j++, d2u8 = vget_high_u8(q1u8)) {
457 q0u8 = vdupq_lane_u8(d2u8, 0);
458 vst1q_u8(dst, q0u8);
459 dst += stride;
460 q0u8 = vdupq_lane_u8(d2u8, 1);
461 vst1q_u8(dst, q0u8);
462 dst += stride;
463 q0u8 = vdupq_lane_u8(d2u8, 2);
464 vst1q_u8(dst, q0u8);
465 dst += stride;
466 q0u8 = vdupq_lane_u8(d2u8, 3);
467 vst1q_u8(dst, q0u8);
468 dst += stride;
469 q0u8 = vdupq_lane_u8(d2u8, 4);
470 vst1q_u8(dst, q0u8);
471 dst += stride;
472 q0u8 = vdupq_lane_u8(d2u8, 5);
473 vst1q_u8(dst, q0u8);
474 dst += stride;
475 q0u8 = vdupq_lane_u8(d2u8, 6);
476 vst1q_u8(dst, q0u8);
477 dst += stride;
478 q0u8 = vdupq_lane_u8(d2u8, 7);
479 vst1q_u8(dst, q0u8);
480 dst += stride;
481 }
482 }
483
aom_h_predictor_32x32_neon(uint8_t * dst,ptrdiff_t stride,const uint8_t * above,const uint8_t * left)484 void aom_h_predictor_32x32_neon(uint8_t *dst, ptrdiff_t stride,
485 const uint8_t *above, const uint8_t *left) {
486 int j, k;
487 uint8x8_t d2u8 = vdup_n_u8(0);
488 uint8x16_t q0u8 = vdupq_n_u8(0);
489 uint8x16_t q1u8 = vdupq_n_u8(0);
490 (void)above;
491
492 for (k = 0; k < 2; k++, left += 16) {
493 q1u8 = vld1q_u8(left);
494 d2u8 = vget_low_u8(q1u8);
495 for (j = 0; j < 2; j++, d2u8 = vget_high_u8(q1u8)) {
496 q0u8 = vdupq_lane_u8(d2u8, 0);
497 vst1q_u8(dst, q0u8);
498 vst1q_u8(dst + 16, q0u8);
499 dst += stride;
500 q0u8 = vdupq_lane_u8(d2u8, 1);
501 vst1q_u8(dst, q0u8);
502 vst1q_u8(dst + 16, q0u8);
503 dst += stride;
504 q0u8 = vdupq_lane_u8(d2u8, 2);
505 vst1q_u8(dst, q0u8);
506 vst1q_u8(dst + 16, q0u8);
507 dst += stride;
508 q0u8 = vdupq_lane_u8(d2u8, 3);
509 vst1q_u8(dst, q0u8);
510 vst1q_u8(dst + 16, q0u8);
511 dst += stride;
512 q0u8 = vdupq_lane_u8(d2u8, 4);
513 vst1q_u8(dst, q0u8);
514 vst1q_u8(dst + 16, q0u8);
515 dst += stride;
516 q0u8 = vdupq_lane_u8(d2u8, 5);
517 vst1q_u8(dst, q0u8);
518 vst1q_u8(dst + 16, q0u8);
519 dst += stride;
520 q0u8 = vdupq_lane_u8(d2u8, 6);
521 vst1q_u8(dst, q0u8);
522 vst1q_u8(dst + 16, q0u8);
523 dst += stride;
524 q0u8 = vdupq_lane_u8(d2u8, 7);
525 vst1q_u8(dst, q0u8);
526 vst1q_u8(dst + 16, q0u8);
527 dst += stride;
528 }
529 }
530 }
531
highbd_dc_predictor(uint16_t * dst,ptrdiff_t stride,int bw,const uint16_t * above,const uint16_t * left)532 static INLINE void highbd_dc_predictor(uint16_t *dst, ptrdiff_t stride, int bw,
533 const uint16_t *above,
534 const uint16_t *left) {
535 assert(bw >= 4);
536 assert(IS_POWER_OF_TWO(bw));
537 int expected_dc, sum = 0;
538 const int count = bw * 2;
539 uint32x4_t sum_q = vdupq_n_u32(0);
540 uint32x2_t sum_d;
541 uint16_t *dst_1;
542 if (bw >= 8) {
543 for (int i = 0; i < bw; i += 8) {
544 sum_q = vpadalq_u16(sum_q, vld1q_u16(above));
545 sum_q = vpadalq_u16(sum_q, vld1q_u16(left));
546 above += 8;
547 left += 8;
548 }
549 sum_d = vadd_u32(vget_low_u32(sum_q), vget_high_u32(sum_q));
550 sum = vget_lane_s32(vreinterpret_s32_u64(vpaddl_u32(sum_d)), 0);
551 expected_dc = (sum + (count >> 1)) / count;
552 const uint16x8_t dc = vdupq_n_u16((uint16_t)expected_dc);
553 for (int r = 0; r < bw; r++) {
554 dst_1 = dst;
555 for (int i = 0; i < bw; i += 8) {
556 vst1q_u16(dst_1, dc);
557 dst_1 += 8;
558 }
559 dst += stride;
560 }
561 } else { // 4x4
562 sum_q = vaddl_u16(vld1_u16(above), vld1_u16(left));
563 sum_d = vadd_u32(vget_low_u32(sum_q), vget_high_u32(sum_q));
564 sum = vget_lane_s32(vreinterpret_s32_u64(vpaddl_u32(sum_d)), 0);
565 expected_dc = (sum + (count >> 1)) / count;
566 const uint16x4_t dc = vdup_n_u16((uint16_t)expected_dc);
567 for (int r = 0; r < bw; r++) {
568 vst1_u16(dst, dc);
569 dst += stride;
570 }
571 }
572 }
573
574 #define intra_pred_highbd_sized_neon(type, width) \
575 void aom_highbd_##type##_predictor_##width##x##width##_neon( \
576 uint16_t *dst, ptrdiff_t stride, const uint16_t *above, \
577 const uint16_t *left, int bd) { \
578 (void)bd; \
579 highbd_##type##_predictor(dst, stride, width, above, left); \
580 }
581
582 #define intra_pred_square(type) \
583 intra_pred_highbd_sized_neon(type, 4); \
584 intra_pred_highbd_sized_neon(type, 8); \
585 intra_pred_highbd_sized_neon(type, 16); \
586 intra_pred_highbd_sized_neon(type, 32); \
587 intra_pred_highbd_sized_neon(type, 64);
588
589 intra_pred_square(dc);
590 #undef intra_pred_square
591