1 /*
2 * Copyright (c) 2016 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #ifndef VPX_VPX_DSP_ARM_IDCT_NEON_H_
12 #define VPX_VPX_DSP_ARM_IDCT_NEON_H_
13
14 #include <arm_neon.h>
15
16 #include "./vpx_config.h"
17 #include "vpx_dsp/arm/transpose_neon.h"
18 #include "vpx_dsp/txfm_common.h"
19 #include "vpx_dsp/vpx_dsp_common.h"
20
21 static const int16_t kCospi[16] = {
22 16384 /* cospi_0_64 */, 15137 /* cospi_8_64 */,
23 11585 /* cospi_16_64 */, 6270 /* cospi_24_64 */,
24 16069 /* cospi_4_64 */, 13623 /* cospi_12_64 */,
25 -9102 /* -cospi_20_64 */, 3196 /* cospi_28_64 */,
26 16305 /* cospi_2_64 */, 1606 /* cospi_30_64 */,
27 14449 /* cospi_10_64 */, 7723 /* cospi_22_64 */,
28 15679 /* cospi_6_64 */, -4756 /* -cospi_26_64 */,
29 12665 /* cospi_14_64 */, -10394 /* -cospi_18_64 */
30 };
31
32 static const int32_t kCospi32[16] = {
33 16384 /* cospi_0_64 */, 15137 /* cospi_8_64 */,
34 11585 /* cospi_16_64 */, 6270 /* cospi_24_64 */,
35 16069 /* cospi_4_64 */, 13623 /* cospi_12_64 */,
36 -9102 /* -cospi_20_64 */, 3196 /* cospi_28_64 */,
37 16305 /* cospi_2_64 */, 1606 /* cospi_30_64 */,
38 14449 /* cospi_10_64 */, 7723 /* cospi_22_64 */,
39 15679 /* cospi_6_64 */, -4756 /* -cospi_26_64 */,
40 12665 /* cospi_14_64 */, -10394 /* -cospi_18_64 */
41 };
42
43 //------------------------------------------------------------------------------
44 // Use saturating add/sub to avoid overflow in 2nd pass in high bit-depth
final_add(const int16x8_t a,const int16x8_t b)45 static INLINE int16x8_t final_add(const int16x8_t a, const int16x8_t b) {
46 #if CONFIG_VP9_HIGHBITDEPTH
47 return vqaddq_s16(a, b);
48 #else
49 return vaddq_s16(a, b);
50 #endif
51 }
52
final_sub(const int16x8_t a,const int16x8_t b)53 static INLINE int16x8_t final_sub(const int16x8_t a, const int16x8_t b) {
54 #if CONFIG_VP9_HIGHBITDEPTH
55 return vqsubq_s16(a, b);
56 #else
57 return vsubq_s16(a, b);
58 #endif
59 }
60
61 //------------------------------------------------------------------------------
62
highbd_idct_add_dual(const int32x4x2_t s0,const int32x4x2_t s1)63 static INLINE int32x4x2_t highbd_idct_add_dual(const int32x4x2_t s0,
64 const int32x4x2_t s1) {
65 int32x4x2_t t;
66 t.val[0] = vaddq_s32(s0.val[0], s1.val[0]);
67 t.val[1] = vaddq_s32(s0.val[1], s1.val[1]);
68 return t;
69 }
70
highbd_idct_sub_dual(const int32x4x2_t s0,const int32x4x2_t s1)71 static INLINE int32x4x2_t highbd_idct_sub_dual(const int32x4x2_t s0,
72 const int32x4x2_t s1) {
73 int32x4x2_t t;
74 t.val[0] = vsubq_s32(s0.val[0], s1.val[0]);
75 t.val[1] = vsubq_s32(s0.val[1], s1.val[1]);
76 return t;
77 }
78
79 //------------------------------------------------------------------------------
80
dct_const_round_shift_low_8(const int32x4_t * const in)81 static INLINE int16x8_t dct_const_round_shift_low_8(const int32x4_t *const in) {
82 return vcombine_s16(vrshrn_n_s32(in[0], DCT_CONST_BITS),
83 vrshrn_n_s32(in[1], DCT_CONST_BITS));
84 }
85
dct_const_round_shift_low_8_dual(const int32x4_t * const t32,int16x8_t * const d0,int16x8_t * const d1)86 static INLINE void dct_const_round_shift_low_8_dual(const int32x4_t *const t32,
87 int16x8_t *const d0,
88 int16x8_t *const d1) {
89 *d0 = dct_const_round_shift_low_8(t32 + 0);
90 *d1 = dct_const_round_shift_low_8(t32 + 2);
91 }
92
93 static INLINE int32x4x2_t
dct_const_round_shift_high_4x2(const int64x2_t * const in)94 dct_const_round_shift_high_4x2(const int64x2_t *const in) {
95 int32x4x2_t out;
96 out.val[0] = vcombine_s32(vrshrn_n_s64(in[0], DCT_CONST_BITS),
97 vrshrn_n_s64(in[1], DCT_CONST_BITS));
98 out.val[1] = vcombine_s32(vrshrn_n_s64(in[2], DCT_CONST_BITS),
99 vrshrn_n_s64(in[3], DCT_CONST_BITS));
100 return out;
101 }
102
103 // Multiply a by a_const. Saturate, shift and narrow by DCT_CONST_BITS.
multiply_shift_and_narrow_s16(const int16x8_t a,const int16_t a_const)104 static INLINE int16x8_t multiply_shift_and_narrow_s16(const int16x8_t a,
105 const int16_t a_const) {
106 // Shift by DCT_CONST_BITS + rounding will be within 16 bits for well formed
107 // streams. See WRAPLOW and dct_const_round_shift for details.
108 // This instruction doubles the result and returns the high half, essentially
109 // resulting in a right shift by 15. By multiplying the constant first that
110 // becomes a right shift by DCT_CONST_BITS.
111 // The largest possible value used here is
112 // vpx_dsp/txfm_common.h:cospi_1_64 = 16364 (* 2 = 32728) a which falls *just*
113 // within the range of int16_t (+32767 / -32768) even when negated.
114 return vqrdmulhq_n_s16(a, a_const * 2);
115 }
116
117 // Add a and b, then multiply by ab_const. Shift and narrow by DCT_CONST_BITS.
add_multiply_shift_and_narrow_s16(const int16x8_t a,const int16x8_t b,const int16_t ab_const)118 static INLINE int16x8_t add_multiply_shift_and_narrow_s16(
119 const int16x8_t a, const int16x8_t b, const int16_t ab_const) {
120 // In both add_ and it's pair, sub_, the input for well-formed streams will be
121 // well within 16 bits (input to the idct is the difference between two frames
122 // and will be within -255 to 255, or 9 bits)
123 // However, for inputs over about 25,000 (valid for int16_t, but not for idct
124 // input) this function can not use vaddq_s16.
125 // In order to match existing behavior and intentionally out of range tests,
126 // expand the addition up to 32 bits to prevent truncation.
127 int32x4_t t[2];
128 t[0] = vaddl_s16(vget_low_s16(a), vget_low_s16(b));
129 t[1] = vaddl_s16(vget_high_s16(a), vget_high_s16(b));
130 t[0] = vmulq_n_s32(t[0], ab_const);
131 t[1] = vmulq_n_s32(t[1], ab_const);
132 return dct_const_round_shift_low_8(t);
133 }
134
135 // Subtract b from a, then multiply by ab_const. Shift and narrow by
136 // DCT_CONST_BITS.
sub_multiply_shift_and_narrow_s16(const int16x8_t a,const int16x8_t b,const int16_t ab_const)137 static INLINE int16x8_t sub_multiply_shift_and_narrow_s16(
138 const int16x8_t a, const int16x8_t b, const int16_t ab_const) {
139 int32x4_t t[2];
140 t[0] = vsubl_s16(vget_low_s16(a), vget_low_s16(b));
141 t[1] = vsubl_s16(vget_high_s16(a), vget_high_s16(b));
142 t[0] = vmulq_n_s32(t[0], ab_const);
143 t[1] = vmulq_n_s32(t[1], ab_const);
144 return dct_const_round_shift_low_8(t);
145 }
146
147 // Multiply a by a_const and b by b_const, then accumulate. Shift and narrow by
148 // DCT_CONST_BITS.
multiply_accumulate_shift_and_narrow_s16(const int16x8_t a,const int16_t a_const,const int16x8_t b,const int16_t b_const)149 static INLINE int16x8_t multiply_accumulate_shift_and_narrow_s16(
150 const int16x8_t a, const int16_t a_const, const int16x8_t b,
151 const int16_t b_const) {
152 int32x4_t t[2];
153 t[0] = vmull_n_s16(vget_low_s16(a), a_const);
154 t[1] = vmull_n_s16(vget_high_s16(a), a_const);
155 t[0] = vmlal_n_s16(t[0], vget_low_s16(b), b_const);
156 t[1] = vmlal_n_s16(t[1], vget_high_s16(b), b_const);
157 return dct_const_round_shift_low_8(t);
158 }
159
160 //------------------------------------------------------------------------------
161
162 // Note: The following 4 functions could use 32-bit operations for bit-depth 10.
163 // However, although it's 20% faster with gcc, it's 20% slower with clang.
164 // Use 64-bit operations for now.
165
166 // Multiply a by a_const. Saturate, shift and narrow by DCT_CONST_BITS.
167 static INLINE int32x4x2_t
multiply_shift_and_narrow_s32_dual(const int32x4x2_t a,const int32_t a_const)168 multiply_shift_and_narrow_s32_dual(const int32x4x2_t a, const int32_t a_const) {
169 int64x2_t b[4];
170
171 b[0] = vmull_n_s32(vget_low_s32(a.val[0]), a_const);
172 b[1] = vmull_n_s32(vget_high_s32(a.val[0]), a_const);
173 b[2] = vmull_n_s32(vget_low_s32(a.val[1]), a_const);
174 b[3] = vmull_n_s32(vget_high_s32(a.val[1]), a_const);
175 return dct_const_round_shift_high_4x2(b);
176 }
177
178 // Add a and b, then multiply by ab_const. Shift and narrow by DCT_CONST_BITS.
add_multiply_shift_and_narrow_s32_dual(const int32x4x2_t a,const int32x4x2_t b,const int32_t ab_const)179 static INLINE int32x4x2_t add_multiply_shift_and_narrow_s32_dual(
180 const int32x4x2_t a, const int32x4x2_t b, const int32_t ab_const) {
181 int32x4_t t[2];
182 int64x2_t c[4];
183
184 t[0] = vaddq_s32(a.val[0], b.val[0]);
185 t[1] = vaddq_s32(a.val[1], b.val[1]);
186 c[0] = vmull_n_s32(vget_low_s32(t[0]), ab_const);
187 c[1] = vmull_n_s32(vget_high_s32(t[0]), ab_const);
188 c[2] = vmull_n_s32(vget_low_s32(t[1]), ab_const);
189 c[3] = vmull_n_s32(vget_high_s32(t[1]), ab_const);
190 return dct_const_round_shift_high_4x2(c);
191 }
192
193 // Subtract b from a, then multiply by ab_const. Shift and narrow by
194 // DCT_CONST_BITS.
sub_multiply_shift_and_narrow_s32_dual(const int32x4x2_t a,const int32x4x2_t b,const int32_t ab_const)195 static INLINE int32x4x2_t sub_multiply_shift_and_narrow_s32_dual(
196 const int32x4x2_t a, const int32x4x2_t b, const int32_t ab_const) {
197 int32x4_t t[2];
198 int64x2_t c[4];
199
200 t[0] = vsubq_s32(a.val[0], b.val[0]);
201 t[1] = vsubq_s32(a.val[1], b.val[1]);
202 c[0] = vmull_n_s32(vget_low_s32(t[0]), ab_const);
203 c[1] = vmull_n_s32(vget_high_s32(t[0]), ab_const);
204 c[2] = vmull_n_s32(vget_low_s32(t[1]), ab_const);
205 c[3] = vmull_n_s32(vget_high_s32(t[1]), ab_const);
206 return dct_const_round_shift_high_4x2(c);
207 }
208
209 // Multiply a by a_const and b by b_const, then accumulate. Shift and narrow by
210 // DCT_CONST_BITS.
multiply_accumulate_shift_and_narrow_s32_dual(const int32x4x2_t a,const int32_t a_const,const int32x4x2_t b,const int32_t b_const)211 static INLINE int32x4x2_t multiply_accumulate_shift_and_narrow_s32_dual(
212 const int32x4x2_t a, const int32_t a_const, const int32x4x2_t b,
213 const int32_t b_const) {
214 int64x2_t c[4];
215 c[0] = vmull_n_s32(vget_low_s32(a.val[0]), a_const);
216 c[1] = vmull_n_s32(vget_high_s32(a.val[0]), a_const);
217 c[2] = vmull_n_s32(vget_low_s32(a.val[1]), a_const);
218 c[3] = vmull_n_s32(vget_high_s32(a.val[1]), a_const);
219 c[0] = vmlal_n_s32(c[0], vget_low_s32(b.val[0]), b_const);
220 c[1] = vmlal_n_s32(c[1], vget_high_s32(b.val[0]), b_const);
221 c[2] = vmlal_n_s32(c[2], vget_low_s32(b.val[1]), b_const);
222 c[3] = vmlal_n_s32(c[3], vget_high_s32(b.val[1]), b_const);
223 return dct_const_round_shift_high_4x2(c);
224 }
225
226 // Shift the output down by 6 and add it to the destination buffer.
add_and_store_u8_s16(const int16x8_t * const a,uint8_t * d,const int stride)227 static INLINE void add_and_store_u8_s16(const int16x8_t *const a, uint8_t *d,
228 const int stride) {
229 uint8x8_t b[8];
230 int16x8_t c[8];
231
232 b[0] = vld1_u8(d);
233 d += stride;
234 b[1] = vld1_u8(d);
235 d += stride;
236 b[2] = vld1_u8(d);
237 d += stride;
238 b[3] = vld1_u8(d);
239 d += stride;
240 b[4] = vld1_u8(d);
241 d += stride;
242 b[5] = vld1_u8(d);
243 d += stride;
244 b[6] = vld1_u8(d);
245 d += stride;
246 b[7] = vld1_u8(d);
247 d -= (7 * stride);
248
249 // c = b + (a >> 6)
250 c[0] = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b[0])), a[0], 6);
251 c[1] = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b[1])), a[1], 6);
252 c[2] = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b[2])), a[2], 6);
253 c[3] = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b[3])), a[3], 6);
254 c[4] = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b[4])), a[4], 6);
255 c[5] = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b[5])), a[5], 6);
256 c[6] = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b[6])), a[6], 6);
257 c[7] = vrsraq_n_s16(vreinterpretq_s16_u16(vmovl_u8(b[7])), a[7], 6);
258
259 b[0] = vqmovun_s16(c[0]);
260 b[1] = vqmovun_s16(c[1]);
261 b[2] = vqmovun_s16(c[2]);
262 b[3] = vqmovun_s16(c[3]);
263 b[4] = vqmovun_s16(c[4]);
264 b[5] = vqmovun_s16(c[5]);
265 b[6] = vqmovun_s16(c[6]);
266 b[7] = vqmovun_s16(c[7]);
267
268 vst1_u8(d, b[0]);
269 d += stride;
270 vst1_u8(d, b[1]);
271 d += stride;
272 vst1_u8(d, b[2]);
273 d += stride;
274 vst1_u8(d, b[3]);
275 d += stride;
276 vst1_u8(d, b[4]);
277 d += stride;
278 vst1_u8(d, b[5]);
279 d += stride;
280 vst1_u8(d, b[6]);
281 d += stride;
282 vst1_u8(d, b[7]);
283 }
284
create_dcq(const int16_t dc)285 static INLINE uint8x16_t create_dcq(const int16_t dc) {
286 // Clip both sides and gcc may compile to assembly 'usat'.
287 const int16_t t = (dc < 0) ? 0 : ((dc > 255) ? 255 : dc);
288 return vdupq_n_u8((uint8_t)t);
289 }
290
idct4x4_16_kernel_bd8(int16x8_t * const a)291 static INLINE void idct4x4_16_kernel_bd8(int16x8_t *const a) {
292 const int16x4_t cospis = vld1_s16(kCospi);
293 int16x4_t b[4];
294 int32x4_t c[4];
295 int16x8_t d[2];
296
297 b[0] = vget_low_s16(a[0]);
298 b[1] = vget_high_s16(a[0]);
299 b[2] = vget_low_s16(a[1]);
300 b[3] = vget_high_s16(a[1]);
301 c[0] = vmull_lane_s16(b[0], cospis, 2);
302 c[2] = vmull_lane_s16(b[1], cospis, 2);
303 c[1] = vsubq_s32(c[0], c[2]);
304 c[0] = vaddq_s32(c[0], c[2]);
305 c[3] = vmull_lane_s16(b[2], cospis, 3);
306 c[2] = vmull_lane_s16(b[2], cospis, 1);
307 c[3] = vmlsl_lane_s16(c[3], b[3], cospis, 1);
308 c[2] = vmlal_lane_s16(c[2], b[3], cospis, 3);
309 dct_const_round_shift_low_8_dual(c, &d[0], &d[1]);
310 a[0] = vaddq_s16(d[0], d[1]);
311 a[1] = vsubq_s16(d[0], d[1]);
312 }
313
transpose_idct4x4_16_bd8(int16x8_t * const a)314 static INLINE void transpose_idct4x4_16_bd8(int16x8_t *const a) {
315 transpose_s16_4x4q(&a[0], &a[1]);
316 idct4x4_16_kernel_bd8(a);
317 }
318
idct8x8_12_pass1_bd8(const int16x4_t cospis0,const int16x4_t cospisd0,const int16x4_t cospisd1,int16x4_t * const io)319 static INLINE void idct8x8_12_pass1_bd8(const int16x4_t cospis0,
320 const int16x4_t cospisd0,
321 const int16x4_t cospisd1,
322 int16x4_t *const io) {
323 int16x4_t step1[8], step2[8];
324 int32x4_t t32[2];
325
326 transpose_s16_4x4d(&io[0], &io[1], &io[2], &io[3]);
327
328 // stage 1
329 step1[4] = vqrdmulh_lane_s16(io[1], cospisd1, 3);
330 step1[5] = vqrdmulh_lane_s16(io[3], cospisd1, 2);
331 step1[6] = vqrdmulh_lane_s16(io[3], cospisd1, 1);
332 step1[7] = vqrdmulh_lane_s16(io[1], cospisd1, 0);
333
334 // stage 2
335 step2[1] = vqrdmulh_lane_s16(io[0], cospisd0, 2);
336 step2[2] = vqrdmulh_lane_s16(io[2], cospisd0, 3);
337 step2[3] = vqrdmulh_lane_s16(io[2], cospisd0, 1);
338
339 step2[4] = vadd_s16(step1[4], step1[5]);
340 step2[5] = vsub_s16(step1[4], step1[5]);
341 step2[6] = vsub_s16(step1[7], step1[6]);
342 step2[7] = vadd_s16(step1[7], step1[6]);
343
344 // stage 3
345 step1[0] = vadd_s16(step2[1], step2[3]);
346 step1[1] = vadd_s16(step2[1], step2[2]);
347 step1[2] = vsub_s16(step2[1], step2[2]);
348 step1[3] = vsub_s16(step2[1], step2[3]);
349
350 t32[1] = vmull_lane_s16(step2[6], cospis0, 2);
351 t32[0] = vmlsl_lane_s16(t32[1], step2[5], cospis0, 2);
352 t32[1] = vmlal_lane_s16(t32[1], step2[5], cospis0, 2);
353 step1[5] = vrshrn_n_s32(t32[0], DCT_CONST_BITS);
354 step1[6] = vrshrn_n_s32(t32[1], DCT_CONST_BITS);
355
356 // stage 4
357 io[0] = vadd_s16(step1[0], step2[7]);
358 io[1] = vadd_s16(step1[1], step1[6]);
359 io[2] = vadd_s16(step1[2], step1[5]);
360 io[3] = vadd_s16(step1[3], step2[4]);
361 io[4] = vsub_s16(step1[3], step2[4]);
362 io[5] = vsub_s16(step1[2], step1[5]);
363 io[6] = vsub_s16(step1[1], step1[6]);
364 io[7] = vsub_s16(step1[0], step2[7]);
365 }
366
idct8x8_12_pass2_bd8(const int16x4_t cospis0,const int16x4_t cospisd0,const int16x4_t cospisd1,const int16x4_t * const input,int16x8_t * const output)367 static INLINE void idct8x8_12_pass2_bd8(const int16x4_t cospis0,
368 const int16x4_t cospisd0,
369 const int16x4_t cospisd1,
370 const int16x4_t *const input,
371 int16x8_t *const output) {
372 int16x8_t in[4];
373 int16x8_t step1[8], step2[8];
374 int32x4_t t32[8];
375
376 transpose_s16_4x8(input[0], input[1], input[2], input[3], input[4], input[5],
377 input[6], input[7], &in[0], &in[1], &in[2], &in[3]);
378
379 // stage 1
380 step1[4] = vqrdmulhq_lane_s16(in[1], cospisd1, 3);
381 step1[5] = vqrdmulhq_lane_s16(in[3], cospisd1, 2);
382 step1[6] = vqrdmulhq_lane_s16(in[3], cospisd1, 1);
383 step1[7] = vqrdmulhq_lane_s16(in[1], cospisd1, 0);
384
385 // stage 2
386 step2[1] = vqrdmulhq_lane_s16(in[0], cospisd0, 2);
387 step2[2] = vqrdmulhq_lane_s16(in[2], cospisd0, 3);
388 step2[3] = vqrdmulhq_lane_s16(in[2], cospisd0, 1);
389
390 step2[4] = vaddq_s16(step1[4], step1[5]);
391 step2[5] = vsubq_s16(step1[4], step1[5]);
392 step2[6] = vsubq_s16(step1[7], step1[6]);
393 step2[7] = vaddq_s16(step1[7], step1[6]);
394
395 // stage 3
396 step1[0] = vaddq_s16(step2[1], step2[3]);
397 step1[1] = vaddq_s16(step2[1], step2[2]);
398 step1[2] = vsubq_s16(step2[1], step2[2]);
399 step1[3] = vsubq_s16(step2[1], step2[3]);
400
401 t32[2] = vmull_lane_s16(vget_low_s16(step2[6]), cospis0, 2);
402 t32[3] = vmull_lane_s16(vget_high_s16(step2[6]), cospis0, 2);
403 t32[0] = vmlsl_lane_s16(t32[2], vget_low_s16(step2[5]), cospis0, 2);
404 t32[1] = vmlsl_lane_s16(t32[3], vget_high_s16(step2[5]), cospis0, 2);
405 t32[2] = vmlal_lane_s16(t32[2], vget_low_s16(step2[5]), cospis0, 2);
406 t32[3] = vmlal_lane_s16(t32[3], vget_high_s16(step2[5]), cospis0, 2);
407 dct_const_round_shift_low_8_dual(t32, &step1[5], &step1[6]);
408
409 // stage 4
410 output[0] = vaddq_s16(step1[0], step2[7]);
411 output[1] = vaddq_s16(step1[1], step1[6]);
412 output[2] = vaddq_s16(step1[2], step1[5]);
413 output[3] = vaddq_s16(step1[3], step2[4]);
414 output[4] = vsubq_s16(step1[3], step2[4]);
415 output[5] = vsubq_s16(step1[2], step1[5]);
416 output[6] = vsubq_s16(step1[1], step1[6]);
417 output[7] = vsubq_s16(step1[0], step2[7]);
418 }
419
idct8x8_64_1d_bd8_kernel(const int16x4_t cospis0,const int16x4_t cospis1,int16x8_t * const io)420 static INLINE void idct8x8_64_1d_bd8_kernel(const int16x4_t cospis0,
421 const int16x4_t cospis1,
422 int16x8_t *const io) {
423 int16x4_t input1l, input1h, input3l, input3h, input5l, input5h, input7l,
424 input7h;
425 int16x4_t step1l[4], step1h[4];
426 int16x8_t step1[8], step2[8];
427 int32x4_t t32[8];
428
429 // stage 1
430 input1l = vget_low_s16(io[1]);
431 input1h = vget_high_s16(io[1]);
432 input3l = vget_low_s16(io[3]);
433 input3h = vget_high_s16(io[3]);
434 input5l = vget_low_s16(io[5]);
435 input5h = vget_high_s16(io[5]);
436 input7l = vget_low_s16(io[7]);
437 input7h = vget_high_s16(io[7]);
438 step1l[0] = vget_low_s16(io[0]);
439 step1h[0] = vget_high_s16(io[0]);
440 step1l[1] = vget_low_s16(io[2]);
441 step1h[1] = vget_high_s16(io[2]);
442 step1l[2] = vget_low_s16(io[4]);
443 step1h[2] = vget_high_s16(io[4]);
444 step1l[3] = vget_low_s16(io[6]);
445 step1h[3] = vget_high_s16(io[6]);
446
447 t32[0] = vmull_lane_s16(input1l, cospis1, 3);
448 t32[1] = vmull_lane_s16(input1h, cospis1, 3);
449 t32[2] = vmull_lane_s16(input3l, cospis1, 2);
450 t32[3] = vmull_lane_s16(input3h, cospis1, 2);
451 t32[4] = vmull_lane_s16(input3l, cospis1, 1);
452 t32[5] = vmull_lane_s16(input3h, cospis1, 1);
453 t32[6] = vmull_lane_s16(input1l, cospis1, 0);
454 t32[7] = vmull_lane_s16(input1h, cospis1, 0);
455 t32[0] = vmlsl_lane_s16(t32[0], input7l, cospis1, 0);
456 t32[1] = vmlsl_lane_s16(t32[1], input7h, cospis1, 0);
457 t32[2] = vmlal_lane_s16(t32[2], input5l, cospis1, 1);
458 t32[3] = vmlal_lane_s16(t32[3], input5h, cospis1, 1);
459 t32[4] = vmlsl_lane_s16(t32[4], input5l, cospis1, 2);
460 t32[5] = vmlsl_lane_s16(t32[5], input5h, cospis1, 2);
461 t32[6] = vmlal_lane_s16(t32[6], input7l, cospis1, 3);
462 t32[7] = vmlal_lane_s16(t32[7], input7h, cospis1, 3);
463 dct_const_round_shift_low_8_dual(&t32[0], &step1[4], &step1[5]);
464 dct_const_round_shift_low_8_dual(&t32[4], &step1[6], &step1[7]);
465
466 // stage 2
467 t32[2] = vmull_lane_s16(step1l[0], cospis0, 2);
468 t32[3] = vmull_lane_s16(step1h[0], cospis0, 2);
469 t32[4] = vmull_lane_s16(step1l[1], cospis0, 3);
470 t32[5] = vmull_lane_s16(step1h[1], cospis0, 3);
471 t32[6] = vmull_lane_s16(step1l[1], cospis0, 1);
472 t32[7] = vmull_lane_s16(step1h[1], cospis0, 1);
473 t32[0] = vmlal_lane_s16(t32[2], step1l[2], cospis0, 2);
474 t32[1] = vmlal_lane_s16(t32[3], step1h[2], cospis0, 2);
475 t32[2] = vmlsl_lane_s16(t32[2], step1l[2], cospis0, 2);
476 t32[3] = vmlsl_lane_s16(t32[3], step1h[2], cospis0, 2);
477 t32[4] = vmlsl_lane_s16(t32[4], step1l[3], cospis0, 1);
478 t32[5] = vmlsl_lane_s16(t32[5], step1h[3], cospis0, 1);
479 t32[6] = vmlal_lane_s16(t32[6], step1l[3], cospis0, 3);
480 t32[7] = vmlal_lane_s16(t32[7], step1h[3], cospis0, 3);
481 dct_const_round_shift_low_8_dual(&t32[0], &step2[0], &step2[1]);
482 dct_const_round_shift_low_8_dual(&t32[4], &step2[2], &step2[3]);
483
484 step2[4] = vaddq_s16(step1[4], step1[5]);
485 step2[5] = vsubq_s16(step1[4], step1[5]);
486 step2[6] = vsubq_s16(step1[7], step1[6]);
487 step2[7] = vaddq_s16(step1[7], step1[6]);
488
489 // stage 3
490 step1[0] = vaddq_s16(step2[0], step2[3]);
491 step1[1] = vaddq_s16(step2[1], step2[2]);
492 step1[2] = vsubq_s16(step2[1], step2[2]);
493 step1[3] = vsubq_s16(step2[0], step2[3]);
494
495 t32[2] = vmull_lane_s16(vget_low_s16(step2[6]), cospis0, 2);
496 t32[3] = vmull_lane_s16(vget_high_s16(step2[6]), cospis0, 2);
497 t32[0] = vmlsl_lane_s16(t32[2], vget_low_s16(step2[5]), cospis0, 2);
498 t32[1] = vmlsl_lane_s16(t32[3], vget_high_s16(step2[5]), cospis0, 2);
499 t32[2] = vmlal_lane_s16(t32[2], vget_low_s16(step2[5]), cospis0, 2);
500 t32[3] = vmlal_lane_s16(t32[3], vget_high_s16(step2[5]), cospis0, 2);
501 dct_const_round_shift_low_8_dual(t32, &step1[5], &step1[6]);
502
503 // stage 4
504 io[0] = vaddq_s16(step1[0], step2[7]);
505 io[1] = vaddq_s16(step1[1], step1[6]);
506 io[2] = vaddq_s16(step1[2], step1[5]);
507 io[3] = vaddq_s16(step1[3], step2[4]);
508 io[4] = vsubq_s16(step1[3], step2[4]);
509 io[5] = vsubq_s16(step1[2], step1[5]);
510 io[6] = vsubq_s16(step1[1], step1[6]);
511 io[7] = vsubq_s16(step1[0], step2[7]);
512 }
513
idct8x8_64_1d_bd8(const int16x4_t cospis0,const int16x4_t cospis1,int16x8_t * const io)514 static INLINE void idct8x8_64_1d_bd8(const int16x4_t cospis0,
515 const int16x4_t cospis1,
516 int16x8_t *const io) {
517 transpose_s16_8x8(&io[0], &io[1], &io[2], &io[3], &io[4], &io[5], &io[6],
518 &io[7]);
519 idct8x8_64_1d_bd8_kernel(cospis0, cospis1, io);
520 }
521
idct_cospi_8_24_q_kernel(const int16x8_t s0,const int16x8_t s1,const int16x4_t cospi_0_8_16_24,int32x4_t * const t32)522 static INLINE void idct_cospi_8_24_q_kernel(const int16x8_t s0,
523 const int16x8_t s1,
524 const int16x4_t cospi_0_8_16_24,
525 int32x4_t *const t32) {
526 t32[0] = vmull_lane_s16(vget_low_s16(s0), cospi_0_8_16_24, 3);
527 t32[1] = vmull_lane_s16(vget_high_s16(s0), cospi_0_8_16_24, 3);
528 t32[2] = vmull_lane_s16(vget_low_s16(s1), cospi_0_8_16_24, 3);
529 t32[3] = vmull_lane_s16(vget_high_s16(s1), cospi_0_8_16_24, 3);
530 t32[0] = vmlsl_lane_s16(t32[0], vget_low_s16(s1), cospi_0_8_16_24, 1);
531 t32[1] = vmlsl_lane_s16(t32[1], vget_high_s16(s1), cospi_0_8_16_24, 1);
532 t32[2] = vmlal_lane_s16(t32[2], vget_low_s16(s0), cospi_0_8_16_24, 1);
533 t32[3] = vmlal_lane_s16(t32[3], vget_high_s16(s0), cospi_0_8_16_24, 1);
534 }
535
idct_cospi_8_24_q(const int16x8_t s0,const int16x8_t s1,const int16x4_t cospi_0_8_16_24,int16x8_t * const d0,int16x8_t * const d1)536 static INLINE void idct_cospi_8_24_q(const int16x8_t s0, const int16x8_t s1,
537 const int16x4_t cospi_0_8_16_24,
538 int16x8_t *const d0, int16x8_t *const d1) {
539 int32x4_t t32[4];
540
541 idct_cospi_8_24_q_kernel(s0, s1, cospi_0_8_16_24, t32);
542 dct_const_round_shift_low_8_dual(t32, d0, d1);
543 }
544
idct_cospi_8_24_neg_q(const int16x8_t s0,const int16x8_t s1,const int16x4_t cospi_0_8_16_24,int16x8_t * const d0,int16x8_t * const d1)545 static INLINE void idct_cospi_8_24_neg_q(const int16x8_t s0, const int16x8_t s1,
546 const int16x4_t cospi_0_8_16_24,
547 int16x8_t *const d0,
548 int16x8_t *const d1) {
549 int32x4_t t32[4];
550
551 idct_cospi_8_24_q_kernel(s0, s1, cospi_0_8_16_24, t32);
552 t32[2] = vnegq_s32(t32[2]);
553 t32[3] = vnegq_s32(t32[3]);
554 dct_const_round_shift_low_8_dual(t32, d0, d1);
555 }
556
idct_cospi_16_16_q(const int16x8_t s0,const int16x8_t s1,const int16x4_t cospi_0_8_16_24,int16x8_t * const d0,int16x8_t * const d1)557 static INLINE void idct_cospi_16_16_q(const int16x8_t s0, const int16x8_t s1,
558 const int16x4_t cospi_0_8_16_24,
559 int16x8_t *const d0,
560 int16x8_t *const d1) {
561 int32x4_t t32[6];
562
563 t32[4] = vmull_lane_s16(vget_low_s16(s1), cospi_0_8_16_24, 2);
564 t32[5] = vmull_lane_s16(vget_high_s16(s1), cospi_0_8_16_24, 2);
565 t32[0] = vmlsl_lane_s16(t32[4], vget_low_s16(s0), cospi_0_8_16_24, 2);
566 t32[1] = vmlsl_lane_s16(t32[5], vget_high_s16(s0), cospi_0_8_16_24, 2);
567 t32[2] = vmlal_lane_s16(t32[4], vget_low_s16(s0), cospi_0_8_16_24, 2);
568 t32[3] = vmlal_lane_s16(t32[5], vget_high_s16(s0), cospi_0_8_16_24, 2);
569 dct_const_round_shift_low_8_dual(t32, d0, d1);
570 }
571
idct_cospi_2_30(const int16x8_t s0,const int16x8_t s1,const int16x4_t cospi_2_30_10_22,int16x8_t * const d0,int16x8_t * const d1)572 static INLINE void idct_cospi_2_30(const int16x8_t s0, const int16x8_t s1,
573 const int16x4_t cospi_2_30_10_22,
574 int16x8_t *const d0, int16x8_t *const d1) {
575 int32x4_t t32[4];
576
577 t32[0] = vmull_lane_s16(vget_low_s16(s0), cospi_2_30_10_22, 1);
578 t32[1] = vmull_lane_s16(vget_high_s16(s0), cospi_2_30_10_22, 1);
579 t32[2] = vmull_lane_s16(vget_low_s16(s1), cospi_2_30_10_22, 1);
580 t32[3] = vmull_lane_s16(vget_high_s16(s1), cospi_2_30_10_22, 1);
581 t32[0] = vmlsl_lane_s16(t32[0], vget_low_s16(s1), cospi_2_30_10_22, 0);
582 t32[1] = vmlsl_lane_s16(t32[1], vget_high_s16(s1), cospi_2_30_10_22, 0);
583 t32[2] = vmlal_lane_s16(t32[2], vget_low_s16(s0), cospi_2_30_10_22, 0);
584 t32[3] = vmlal_lane_s16(t32[3], vget_high_s16(s0), cospi_2_30_10_22, 0);
585 dct_const_round_shift_low_8_dual(t32, d0, d1);
586 }
587
idct_cospi_4_28(const int16x8_t s0,const int16x8_t s1,const int16x4_t cospi_4_12_20N_28,int16x8_t * const d0,int16x8_t * const d1)588 static INLINE void idct_cospi_4_28(const int16x8_t s0, const int16x8_t s1,
589 const int16x4_t cospi_4_12_20N_28,
590 int16x8_t *const d0, int16x8_t *const d1) {
591 int32x4_t t32[4];
592
593 t32[0] = vmull_lane_s16(vget_low_s16(s0), cospi_4_12_20N_28, 3);
594 t32[1] = vmull_lane_s16(vget_high_s16(s0), cospi_4_12_20N_28, 3);
595 t32[2] = vmull_lane_s16(vget_low_s16(s1), cospi_4_12_20N_28, 3);
596 t32[3] = vmull_lane_s16(vget_high_s16(s1), cospi_4_12_20N_28, 3);
597 t32[0] = vmlsl_lane_s16(t32[0], vget_low_s16(s1), cospi_4_12_20N_28, 0);
598 t32[1] = vmlsl_lane_s16(t32[1], vget_high_s16(s1), cospi_4_12_20N_28, 0);
599 t32[2] = vmlal_lane_s16(t32[2], vget_low_s16(s0), cospi_4_12_20N_28, 0);
600 t32[3] = vmlal_lane_s16(t32[3], vget_high_s16(s0), cospi_4_12_20N_28, 0);
601 dct_const_round_shift_low_8_dual(t32, d0, d1);
602 }
603
idct_cospi_6_26(const int16x8_t s0,const int16x8_t s1,const int16x4_t cospi_6_26N_14_18N,int16x8_t * const d0,int16x8_t * const d1)604 static INLINE void idct_cospi_6_26(const int16x8_t s0, const int16x8_t s1,
605 const int16x4_t cospi_6_26N_14_18N,
606 int16x8_t *const d0, int16x8_t *const d1) {
607 int32x4_t t32[4];
608
609 t32[0] = vmull_lane_s16(vget_low_s16(s0), cospi_6_26N_14_18N, 0);
610 t32[1] = vmull_lane_s16(vget_high_s16(s0), cospi_6_26N_14_18N, 0);
611 t32[2] = vmull_lane_s16(vget_low_s16(s1), cospi_6_26N_14_18N, 0);
612 t32[3] = vmull_lane_s16(vget_high_s16(s1), cospi_6_26N_14_18N, 0);
613 t32[0] = vmlal_lane_s16(t32[0], vget_low_s16(s1), cospi_6_26N_14_18N, 1);
614 t32[1] = vmlal_lane_s16(t32[1], vget_high_s16(s1), cospi_6_26N_14_18N, 1);
615 t32[2] = vmlsl_lane_s16(t32[2], vget_low_s16(s0), cospi_6_26N_14_18N, 1);
616 t32[3] = vmlsl_lane_s16(t32[3], vget_high_s16(s0), cospi_6_26N_14_18N, 1);
617 dct_const_round_shift_low_8_dual(t32, d0, d1);
618 }
619
idct_cospi_10_22(const int16x8_t s0,const int16x8_t s1,const int16x4_t cospi_2_30_10_22,int16x8_t * const d0,int16x8_t * const d1)620 static INLINE void idct_cospi_10_22(const int16x8_t s0, const int16x8_t s1,
621 const int16x4_t cospi_2_30_10_22,
622 int16x8_t *const d0, int16x8_t *const d1) {
623 int32x4_t t32[4];
624
625 t32[0] = vmull_lane_s16(vget_low_s16(s0), cospi_2_30_10_22, 3);
626 t32[1] = vmull_lane_s16(vget_high_s16(s0), cospi_2_30_10_22, 3);
627 t32[2] = vmull_lane_s16(vget_low_s16(s1), cospi_2_30_10_22, 3);
628 t32[3] = vmull_lane_s16(vget_high_s16(s1), cospi_2_30_10_22, 3);
629 t32[0] = vmlsl_lane_s16(t32[0], vget_low_s16(s1), cospi_2_30_10_22, 2);
630 t32[1] = vmlsl_lane_s16(t32[1], vget_high_s16(s1), cospi_2_30_10_22, 2);
631 t32[2] = vmlal_lane_s16(t32[2], vget_low_s16(s0), cospi_2_30_10_22, 2);
632 t32[3] = vmlal_lane_s16(t32[3], vget_high_s16(s0), cospi_2_30_10_22, 2);
633 dct_const_round_shift_low_8_dual(t32, d0, d1);
634 }
635
idct_cospi_12_20(const int16x8_t s0,const int16x8_t s1,const int16x4_t cospi_4_12_20N_28,int16x8_t * const d0,int16x8_t * const d1)636 static INLINE void idct_cospi_12_20(const int16x8_t s0, const int16x8_t s1,
637 const int16x4_t cospi_4_12_20N_28,
638 int16x8_t *const d0, int16x8_t *const d1) {
639 int32x4_t t32[4];
640
641 t32[0] = vmull_lane_s16(vget_low_s16(s0), cospi_4_12_20N_28, 1);
642 t32[1] = vmull_lane_s16(vget_high_s16(s0), cospi_4_12_20N_28, 1);
643 t32[2] = vmull_lane_s16(vget_low_s16(s1), cospi_4_12_20N_28, 1);
644 t32[3] = vmull_lane_s16(vget_high_s16(s1), cospi_4_12_20N_28, 1);
645 t32[0] = vmlal_lane_s16(t32[0], vget_low_s16(s1), cospi_4_12_20N_28, 2);
646 t32[1] = vmlal_lane_s16(t32[1], vget_high_s16(s1), cospi_4_12_20N_28, 2);
647 t32[2] = vmlsl_lane_s16(t32[2], vget_low_s16(s0), cospi_4_12_20N_28, 2);
648 t32[3] = vmlsl_lane_s16(t32[3], vget_high_s16(s0), cospi_4_12_20N_28, 2);
649 dct_const_round_shift_low_8_dual(t32, d0, d1);
650 }
651
idct_cospi_14_18(const int16x8_t s0,const int16x8_t s1,const int16x4_t cospi_6_26N_14_18N,int16x8_t * const d0,int16x8_t * const d1)652 static INLINE void idct_cospi_14_18(const int16x8_t s0, const int16x8_t s1,
653 const int16x4_t cospi_6_26N_14_18N,
654 int16x8_t *const d0, int16x8_t *const d1) {
655 int32x4_t t32[4];
656
657 t32[0] = vmull_lane_s16(vget_low_s16(s0), cospi_6_26N_14_18N, 2);
658 t32[1] = vmull_lane_s16(vget_high_s16(s0), cospi_6_26N_14_18N, 2);
659 t32[2] = vmull_lane_s16(vget_low_s16(s1), cospi_6_26N_14_18N, 2);
660 t32[3] = vmull_lane_s16(vget_high_s16(s1), cospi_6_26N_14_18N, 2);
661 t32[0] = vmlal_lane_s16(t32[0], vget_low_s16(s1), cospi_6_26N_14_18N, 3);
662 t32[1] = vmlal_lane_s16(t32[1], vget_high_s16(s1), cospi_6_26N_14_18N, 3);
663 t32[2] = vmlsl_lane_s16(t32[2], vget_low_s16(s0), cospi_6_26N_14_18N, 3);
664 t32[3] = vmlsl_lane_s16(t32[3], vget_high_s16(s0), cospi_6_26N_14_18N, 3);
665 dct_const_round_shift_low_8_dual(t32, d0, d1);
666 }
667
idct16x16_add_stage7(const int16x8_t * const step2,int16x8_t * const out)668 static INLINE void idct16x16_add_stage7(const int16x8_t *const step2,
669 int16x8_t *const out) {
670 #if CONFIG_VP9_HIGHBITDEPTH
671 // Use saturating add/sub to avoid overflow in 2nd pass
672 out[0] = vqaddq_s16(step2[0], step2[15]);
673 out[1] = vqaddq_s16(step2[1], step2[14]);
674 out[2] = vqaddq_s16(step2[2], step2[13]);
675 out[3] = vqaddq_s16(step2[3], step2[12]);
676 out[4] = vqaddq_s16(step2[4], step2[11]);
677 out[5] = vqaddq_s16(step2[5], step2[10]);
678 out[6] = vqaddq_s16(step2[6], step2[9]);
679 out[7] = vqaddq_s16(step2[7], step2[8]);
680 out[8] = vqsubq_s16(step2[7], step2[8]);
681 out[9] = vqsubq_s16(step2[6], step2[9]);
682 out[10] = vqsubq_s16(step2[5], step2[10]);
683 out[11] = vqsubq_s16(step2[4], step2[11]);
684 out[12] = vqsubq_s16(step2[3], step2[12]);
685 out[13] = vqsubq_s16(step2[2], step2[13]);
686 out[14] = vqsubq_s16(step2[1], step2[14]);
687 out[15] = vqsubq_s16(step2[0], step2[15]);
688 #else
689 out[0] = vaddq_s16(step2[0], step2[15]);
690 out[1] = vaddq_s16(step2[1], step2[14]);
691 out[2] = vaddq_s16(step2[2], step2[13]);
692 out[3] = vaddq_s16(step2[3], step2[12]);
693 out[4] = vaddq_s16(step2[4], step2[11]);
694 out[5] = vaddq_s16(step2[5], step2[10]);
695 out[6] = vaddq_s16(step2[6], step2[9]);
696 out[7] = vaddq_s16(step2[7], step2[8]);
697 out[8] = vsubq_s16(step2[7], step2[8]);
698 out[9] = vsubq_s16(step2[6], step2[9]);
699 out[10] = vsubq_s16(step2[5], step2[10]);
700 out[11] = vsubq_s16(step2[4], step2[11]);
701 out[12] = vsubq_s16(step2[3], step2[12]);
702 out[13] = vsubq_s16(step2[2], step2[13]);
703 out[14] = vsubq_s16(step2[1], step2[14]);
704 out[15] = vsubq_s16(step2[0], step2[15]);
705 #endif
706 }
707
idct16x16_store_pass1(const int16x8_t * const out,int16_t * output)708 static INLINE void idct16x16_store_pass1(const int16x8_t *const out,
709 int16_t *output) {
710 // Save the result into output
711 vst1q_s16(output, out[0]);
712 output += 16;
713 vst1q_s16(output, out[1]);
714 output += 16;
715 vst1q_s16(output, out[2]);
716 output += 16;
717 vst1q_s16(output, out[3]);
718 output += 16;
719 vst1q_s16(output, out[4]);
720 output += 16;
721 vst1q_s16(output, out[5]);
722 output += 16;
723 vst1q_s16(output, out[6]);
724 output += 16;
725 vst1q_s16(output, out[7]);
726 output += 16;
727 vst1q_s16(output, out[8]);
728 output += 16;
729 vst1q_s16(output, out[9]);
730 output += 16;
731 vst1q_s16(output, out[10]);
732 output += 16;
733 vst1q_s16(output, out[11]);
734 output += 16;
735 vst1q_s16(output, out[12]);
736 output += 16;
737 vst1q_s16(output, out[13]);
738 output += 16;
739 vst1q_s16(output, out[14]);
740 output += 16;
741 vst1q_s16(output, out[15]);
742 }
743
idct8x8_add8x1(const int16x8_t a,uint8_t ** const dest,const int stride)744 static INLINE void idct8x8_add8x1(const int16x8_t a, uint8_t **const dest,
745 const int stride) {
746 const uint8x8_t s = vld1_u8(*dest);
747 const int16x8_t res = vrshrq_n_s16(a, 5);
748 const uint16x8_t q = vaddw_u8(vreinterpretq_u16_s16(res), s);
749 const uint8x8_t d = vqmovun_s16(vreinterpretq_s16_u16(q));
750 vst1_u8(*dest, d);
751 *dest += stride;
752 }
753
idct8x8_add8x8_neon(int16x8_t * const out,uint8_t * dest,const int stride)754 static INLINE void idct8x8_add8x8_neon(int16x8_t *const out, uint8_t *dest,
755 const int stride) {
756 idct8x8_add8x1(out[0], &dest, stride);
757 idct8x8_add8x1(out[1], &dest, stride);
758 idct8x8_add8x1(out[2], &dest, stride);
759 idct8x8_add8x1(out[3], &dest, stride);
760 idct8x8_add8x1(out[4], &dest, stride);
761 idct8x8_add8x1(out[5], &dest, stride);
762 idct8x8_add8x1(out[6], &dest, stride);
763 idct8x8_add8x1(out[7], &dest, stride);
764 }
765
idct16x16_add8x1(const int16x8_t a,uint8_t ** const dest,const int stride)766 static INLINE void idct16x16_add8x1(const int16x8_t a, uint8_t **const dest,
767 const int stride) {
768 const uint8x8_t s = vld1_u8(*dest);
769 const int16x8_t res = vrshrq_n_s16(a, 6);
770 const uint16x8_t q = vaddw_u8(vreinterpretq_u16_s16(res), s);
771 const uint8x8_t d = vqmovun_s16(vreinterpretq_s16_u16(q));
772 vst1_u8(*dest, d);
773 *dest += stride;
774 }
775
idct16x16_add_store(const int16x8_t * const out,uint8_t * dest,const int stride)776 static INLINE void idct16x16_add_store(const int16x8_t *const out,
777 uint8_t *dest, const int stride) {
778 // Add the result to dest
779 idct16x16_add8x1(out[0], &dest, stride);
780 idct16x16_add8x1(out[1], &dest, stride);
781 idct16x16_add8x1(out[2], &dest, stride);
782 idct16x16_add8x1(out[3], &dest, stride);
783 idct16x16_add8x1(out[4], &dest, stride);
784 idct16x16_add8x1(out[5], &dest, stride);
785 idct16x16_add8x1(out[6], &dest, stride);
786 idct16x16_add8x1(out[7], &dest, stride);
787 idct16x16_add8x1(out[8], &dest, stride);
788 idct16x16_add8x1(out[9], &dest, stride);
789 idct16x16_add8x1(out[10], &dest, stride);
790 idct16x16_add8x1(out[11], &dest, stride);
791 idct16x16_add8x1(out[12], &dest, stride);
792 idct16x16_add8x1(out[13], &dest, stride);
793 idct16x16_add8x1(out[14], &dest, stride);
794 idct16x16_add8x1(out[15], &dest, stride);
795 }
796
highbd_idct16x16_add8x1(const int16x8_t a,const int16x8_t max,uint16_t ** const dest,const int stride)797 static INLINE void highbd_idct16x16_add8x1(const int16x8_t a,
798 const int16x8_t max,
799 uint16_t **const dest,
800 const int stride) {
801 const uint16x8_t s = vld1q_u16(*dest);
802 const int16x8_t res0 = vqaddq_s16(a, vreinterpretq_s16_u16(s));
803 const int16x8_t res1 = vminq_s16(res0, max);
804 const uint16x8_t d = vqshluq_n_s16(res1, 0);
805 vst1q_u16(*dest, d);
806 *dest += stride;
807 }
808
idct16x16_add_store_bd8(int16x8_t * const out,uint16_t * dest,const int stride)809 static INLINE void idct16x16_add_store_bd8(int16x8_t *const out, uint16_t *dest,
810 const int stride) {
811 // Add the result to dest
812 const int16x8_t max = vdupq_n_s16((1 << 8) - 1);
813 out[0] = vrshrq_n_s16(out[0], 6);
814 out[1] = vrshrq_n_s16(out[1], 6);
815 out[2] = vrshrq_n_s16(out[2], 6);
816 out[3] = vrshrq_n_s16(out[3], 6);
817 out[4] = vrshrq_n_s16(out[4], 6);
818 out[5] = vrshrq_n_s16(out[5], 6);
819 out[6] = vrshrq_n_s16(out[6], 6);
820 out[7] = vrshrq_n_s16(out[7], 6);
821 out[8] = vrshrq_n_s16(out[8], 6);
822 out[9] = vrshrq_n_s16(out[9], 6);
823 out[10] = vrshrq_n_s16(out[10], 6);
824 out[11] = vrshrq_n_s16(out[11], 6);
825 out[12] = vrshrq_n_s16(out[12], 6);
826 out[13] = vrshrq_n_s16(out[13], 6);
827 out[14] = vrshrq_n_s16(out[14], 6);
828 out[15] = vrshrq_n_s16(out[15], 6);
829 highbd_idct16x16_add8x1(out[0], max, &dest, stride);
830 highbd_idct16x16_add8x1(out[1], max, &dest, stride);
831 highbd_idct16x16_add8x1(out[2], max, &dest, stride);
832 highbd_idct16x16_add8x1(out[3], max, &dest, stride);
833 highbd_idct16x16_add8x1(out[4], max, &dest, stride);
834 highbd_idct16x16_add8x1(out[5], max, &dest, stride);
835 highbd_idct16x16_add8x1(out[6], max, &dest, stride);
836 highbd_idct16x16_add8x1(out[7], max, &dest, stride);
837 highbd_idct16x16_add8x1(out[8], max, &dest, stride);
838 highbd_idct16x16_add8x1(out[9], max, &dest, stride);
839 highbd_idct16x16_add8x1(out[10], max, &dest, stride);
840 highbd_idct16x16_add8x1(out[11], max, &dest, stride);
841 highbd_idct16x16_add8x1(out[12], max, &dest, stride);
842 highbd_idct16x16_add8x1(out[13], max, &dest, stride);
843 highbd_idct16x16_add8x1(out[14], max, &dest, stride);
844 highbd_idct16x16_add8x1(out[15], max, &dest, stride);
845 }
846
highbd_idct16x16_add8x1_bd8(const int16x8_t a,uint16_t ** const dest,const int stride)847 static INLINE void highbd_idct16x16_add8x1_bd8(const int16x8_t a,
848 uint16_t **const dest,
849 const int stride) {
850 const uint16x8_t s = vld1q_u16(*dest);
851 const int16x8_t res = vrsraq_n_s16(vreinterpretq_s16_u16(s), a, 6);
852 const uint16x8_t d = vmovl_u8(vqmovun_s16(res));
853 vst1q_u16(*dest, d);
854 *dest += stride;
855 }
856
highbd_add_and_store_bd8(const int16x8_t * const a,uint16_t * out,const int stride)857 static INLINE void highbd_add_and_store_bd8(const int16x8_t *const a,
858 uint16_t *out, const int stride) {
859 highbd_idct16x16_add8x1_bd8(a[0], &out, stride);
860 highbd_idct16x16_add8x1_bd8(a[1], &out, stride);
861 highbd_idct16x16_add8x1_bd8(a[2], &out, stride);
862 highbd_idct16x16_add8x1_bd8(a[3], &out, stride);
863 highbd_idct16x16_add8x1_bd8(a[4], &out, stride);
864 highbd_idct16x16_add8x1_bd8(a[5], &out, stride);
865 highbd_idct16x16_add8x1_bd8(a[6], &out, stride);
866 highbd_idct16x16_add8x1_bd8(a[7], &out, stride);
867 highbd_idct16x16_add8x1_bd8(a[8], &out, stride);
868 highbd_idct16x16_add8x1_bd8(a[9], &out, stride);
869 highbd_idct16x16_add8x1_bd8(a[10], &out, stride);
870 highbd_idct16x16_add8x1_bd8(a[11], &out, stride);
871 highbd_idct16x16_add8x1_bd8(a[12], &out, stride);
872 highbd_idct16x16_add8x1_bd8(a[13], &out, stride);
873 highbd_idct16x16_add8x1_bd8(a[14], &out, stride);
874 highbd_idct16x16_add8x1_bd8(a[15], &out, stride);
875 highbd_idct16x16_add8x1_bd8(a[16], &out, stride);
876 highbd_idct16x16_add8x1_bd8(a[17], &out, stride);
877 highbd_idct16x16_add8x1_bd8(a[18], &out, stride);
878 highbd_idct16x16_add8x1_bd8(a[19], &out, stride);
879 highbd_idct16x16_add8x1_bd8(a[20], &out, stride);
880 highbd_idct16x16_add8x1_bd8(a[21], &out, stride);
881 highbd_idct16x16_add8x1_bd8(a[22], &out, stride);
882 highbd_idct16x16_add8x1_bd8(a[23], &out, stride);
883 highbd_idct16x16_add8x1_bd8(a[24], &out, stride);
884 highbd_idct16x16_add8x1_bd8(a[25], &out, stride);
885 highbd_idct16x16_add8x1_bd8(a[26], &out, stride);
886 highbd_idct16x16_add8x1_bd8(a[27], &out, stride);
887 highbd_idct16x16_add8x1_bd8(a[28], &out, stride);
888 highbd_idct16x16_add8x1_bd8(a[29], &out, stride);
889 highbd_idct16x16_add8x1_bd8(a[30], &out, stride);
890 highbd_idct16x16_add8x1_bd8(a[31], &out, stride);
891 }
892
893 void vpx_idct16x16_256_add_half1d(const void *const input, int16_t *output,
894 void *const dest, const int stride,
895 const int highbd_flag);
896
897 void vpx_idct16x16_38_add_half1d(const void *const input, int16_t *const output,
898 void *const dest, const int stride,
899 const int highbd_flag);
900
901 void vpx_idct16x16_10_add_half1d_pass1(const tran_low_t *input,
902 int16_t *output);
903
904 void vpx_idct16x16_10_add_half1d_pass2(const int16_t *input,
905 int16_t *const output, void *const dest,
906 const int stride, const int highbd_flag);
907
908 void vpx_idct32_32_neon(const tran_low_t *input, uint8_t *dest,
909 const int stride, const int highbd_flag);
910
911 void vpx_idct32_12_neon(const tran_low_t *const input, int16_t *output);
912 void vpx_idct32_16_neon(const int16_t *const input, void *const output,
913 const int stride, const int highbd_flag);
914
915 void vpx_idct32_6_neon(const tran_low_t *input, int16_t *output);
916 void vpx_idct32_8_neon(const int16_t *input, void *const output, int stride,
917 const int highbd_flag);
918
919 #endif // VPX_VPX_DSP_ARM_IDCT_NEON_H_
920