1 /*
2 * Copyright (c) 2018 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "./vpx_config.h"
12 #include "./vpx_dsp_rtcd.h"
13
14 #include "vpx_dsp/ppc/types_vsx.h"
15 #include "vpx_dsp/ppc/txfm_common_vsx.h"
16 #include "vpx_dsp/ppc/transpose_vsx.h"
17
18 // Returns ((a +/- b) * cospi16 + (2 << 13)) >> 14.
single_butterfly(int16x8_t a,int16x8_t b,int16x8_t * add,int16x8_t * sub)19 static INLINE void single_butterfly(int16x8_t a, int16x8_t b, int16x8_t *add,
20 int16x8_t *sub) {
21 // Since a + b can overflow 16 bits, the multiplication is distributed
22 // (a * c +/- b * c).
23 const int32x4_t ac_e = vec_mule(a, cospi16_v);
24 const int32x4_t ac_o = vec_mulo(a, cospi16_v);
25 const int32x4_t bc_e = vec_mule(b, cospi16_v);
26 const int32x4_t bc_o = vec_mulo(b, cospi16_v);
27
28 // Reuse the same multiplies for sum and difference.
29 const int32x4_t sum_e = vec_add(ac_e, bc_e);
30 const int32x4_t sum_o = vec_add(ac_o, bc_o);
31 const int32x4_t diff_e = vec_sub(ac_e, bc_e);
32 const int32x4_t diff_o = vec_sub(ac_o, bc_o);
33
34 // Add rounding offset
35 const int32x4_t rsum_o = vec_add(sum_o, vec_dct_const_rounding);
36 const int32x4_t rsum_e = vec_add(sum_e, vec_dct_const_rounding);
37 const int32x4_t rdiff_o = vec_add(diff_o, vec_dct_const_rounding);
38 const int32x4_t rdiff_e = vec_add(diff_e, vec_dct_const_rounding);
39
40 const int32x4_t ssum_o = vec_sra(rsum_o, vec_dct_const_bits);
41 const int32x4_t ssum_e = vec_sra(rsum_e, vec_dct_const_bits);
42 const int32x4_t sdiff_o = vec_sra(rdiff_o, vec_dct_const_bits);
43 const int32x4_t sdiff_e = vec_sra(rdiff_e, vec_dct_const_bits);
44
45 // There's no pack operation for even and odd, so we need to permute.
46 *add = (int16x8_t)vec_perm(ssum_e, ssum_o, vec_perm_odd_even_pack);
47 *sub = (int16x8_t)vec_perm(sdiff_e, sdiff_o, vec_perm_odd_even_pack);
48 }
49
50 // Returns (a * c1 +/- b * c2 + (2 << 13)) >> 14
double_butterfly(int16x8_t a,int16x8_t c1,int16x8_t b,int16x8_t c2,int16x8_t * add,int16x8_t * sub)51 static INLINE void double_butterfly(int16x8_t a, int16x8_t c1, int16x8_t b,
52 int16x8_t c2, int16x8_t *add,
53 int16x8_t *sub) {
54 const int32x4_t ac1_o = vec_mulo(a, c1);
55 const int32x4_t ac1_e = vec_mule(a, c1);
56 const int32x4_t ac2_o = vec_mulo(a, c2);
57 const int32x4_t ac2_e = vec_mule(a, c2);
58
59 const int32x4_t bc1_o = vec_mulo(b, c1);
60 const int32x4_t bc1_e = vec_mule(b, c1);
61 const int32x4_t bc2_o = vec_mulo(b, c2);
62 const int32x4_t bc2_e = vec_mule(b, c2);
63
64 const int32x4_t sum_o = vec_add(ac1_o, bc2_o);
65 const int32x4_t sum_e = vec_add(ac1_e, bc2_e);
66 const int32x4_t diff_o = vec_sub(ac2_o, bc1_o);
67 const int32x4_t diff_e = vec_sub(ac2_e, bc1_e);
68
69 // Add rounding offset
70 const int32x4_t rsum_o = vec_add(sum_o, vec_dct_const_rounding);
71 const int32x4_t rsum_e = vec_add(sum_e, vec_dct_const_rounding);
72 const int32x4_t rdiff_o = vec_add(diff_o, vec_dct_const_rounding);
73 const int32x4_t rdiff_e = vec_add(diff_e, vec_dct_const_rounding);
74
75 const int32x4_t ssum_o = vec_sra(rsum_o, vec_dct_const_bits);
76 const int32x4_t ssum_e = vec_sra(rsum_e, vec_dct_const_bits);
77 const int32x4_t sdiff_o = vec_sra(rdiff_o, vec_dct_const_bits);
78 const int32x4_t sdiff_e = vec_sra(rdiff_e, vec_dct_const_bits);
79
80 // There's no pack operation for even and odd, so we need to permute.
81 *add = (int16x8_t)vec_perm(ssum_e, ssum_o, vec_perm_odd_even_pack);
82 *sub = (int16x8_t)vec_perm(sdiff_e, sdiff_o, vec_perm_odd_even_pack);
83 }
84
85 // While other architecture combine the load and the stage 1 operations, Power9
86 // benchmarking show no benefit in such an approach.
load(const int16_t * a,int stride,int16x8_t * b)87 static INLINE void load(const int16_t *a, int stride, int16x8_t *b) {
88 // Tried out different combinations of load and shift instructions, this is
89 // the fastest one.
90 {
91 const int16x8_t l0 = vec_vsx_ld(0, a);
92 const int16x8_t l1 = vec_vsx_ld(0, a + stride);
93 const int16x8_t l2 = vec_vsx_ld(0, a + 2 * stride);
94 const int16x8_t l3 = vec_vsx_ld(0, a + 3 * stride);
95 const int16x8_t l4 = vec_vsx_ld(0, a + 4 * stride);
96 const int16x8_t l5 = vec_vsx_ld(0, a + 5 * stride);
97 const int16x8_t l6 = vec_vsx_ld(0, a + 6 * stride);
98 const int16x8_t l7 = vec_vsx_ld(0, a + 7 * stride);
99
100 const int16x8_t l8 = vec_vsx_ld(0, a + 8 * stride);
101 const int16x8_t l9 = vec_vsx_ld(0, a + 9 * stride);
102 const int16x8_t l10 = vec_vsx_ld(0, a + 10 * stride);
103 const int16x8_t l11 = vec_vsx_ld(0, a + 11 * stride);
104 const int16x8_t l12 = vec_vsx_ld(0, a + 12 * stride);
105 const int16x8_t l13 = vec_vsx_ld(0, a + 13 * stride);
106 const int16x8_t l14 = vec_vsx_ld(0, a + 14 * stride);
107 const int16x8_t l15 = vec_vsx_ld(0, a + 15 * stride);
108
109 b[0] = vec_sl(l0, vec_dct_scale_log2);
110 b[1] = vec_sl(l1, vec_dct_scale_log2);
111 b[2] = vec_sl(l2, vec_dct_scale_log2);
112 b[3] = vec_sl(l3, vec_dct_scale_log2);
113 b[4] = vec_sl(l4, vec_dct_scale_log2);
114 b[5] = vec_sl(l5, vec_dct_scale_log2);
115 b[6] = vec_sl(l6, vec_dct_scale_log2);
116 b[7] = vec_sl(l7, vec_dct_scale_log2);
117
118 b[8] = vec_sl(l8, vec_dct_scale_log2);
119 b[9] = vec_sl(l9, vec_dct_scale_log2);
120 b[10] = vec_sl(l10, vec_dct_scale_log2);
121 b[11] = vec_sl(l11, vec_dct_scale_log2);
122 b[12] = vec_sl(l12, vec_dct_scale_log2);
123 b[13] = vec_sl(l13, vec_dct_scale_log2);
124 b[14] = vec_sl(l14, vec_dct_scale_log2);
125 b[15] = vec_sl(l15, vec_dct_scale_log2);
126 }
127 {
128 const int16x8_t l16 = vec_vsx_ld(0, a + 16 * stride);
129 const int16x8_t l17 = vec_vsx_ld(0, a + 17 * stride);
130 const int16x8_t l18 = vec_vsx_ld(0, a + 18 * stride);
131 const int16x8_t l19 = vec_vsx_ld(0, a + 19 * stride);
132 const int16x8_t l20 = vec_vsx_ld(0, a + 20 * stride);
133 const int16x8_t l21 = vec_vsx_ld(0, a + 21 * stride);
134 const int16x8_t l22 = vec_vsx_ld(0, a + 22 * stride);
135 const int16x8_t l23 = vec_vsx_ld(0, a + 23 * stride);
136
137 const int16x8_t l24 = vec_vsx_ld(0, a + 24 * stride);
138 const int16x8_t l25 = vec_vsx_ld(0, a + 25 * stride);
139 const int16x8_t l26 = vec_vsx_ld(0, a + 26 * stride);
140 const int16x8_t l27 = vec_vsx_ld(0, a + 27 * stride);
141 const int16x8_t l28 = vec_vsx_ld(0, a + 28 * stride);
142 const int16x8_t l29 = vec_vsx_ld(0, a + 29 * stride);
143 const int16x8_t l30 = vec_vsx_ld(0, a + 30 * stride);
144 const int16x8_t l31 = vec_vsx_ld(0, a + 31 * stride);
145
146 b[16] = vec_sl(l16, vec_dct_scale_log2);
147 b[17] = vec_sl(l17, vec_dct_scale_log2);
148 b[18] = vec_sl(l18, vec_dct_scale_log2);
149 b[19] = vec_sl(l19, vec_dct_scale_log2);
150 b[20] = vec_sl(l20, vec_dct_scale_log2);
151 b[21] = vec_sl(l21, vec_dct_scale_log2);
152 b[22] = vec_sl(l22, vec_dct_scale_log2);
153 b[23] = vec_sl(l23, vec_dct_scale_log2);
154
155 b[24] = vec_sl(l24, vec_dct_scale_log2);
156 b[25] = vec_sl(l25, vec_dct_scale_log2);
157 b[26] = vec_sl(l26, vec_dct_scale_log2);
158 b[27] = vec_sl(l27, vec_dct_scale_log2);
159 b[28] = vec_sl(l28, vec_dct_scale_log2);
160 b[29] = vec_sl(l29, vec_dct_scale_log2);
161 b[30] = vec_sl(l30, vec_dct_scale_log2);
162 b[31] = vec_sl(l31, vec_dct_scale_log2);
163 }
164 }
165
store(tran_low_t * a,const int16x8_t * b)166 static INLINE void store(tran_low_t *a, const int16x8_t *b) {
167 vec_vsx_st(b[0], 0, a);
168 vec_vsx_st(b[8], 0, a + 8);
169 vec_vsx_st(b[16], 0, a + 16);
170 vec_vsx_st(b[24], 0, a + 24);
171
172 vec_vsx_st(b[1], 0, a + 32);
173 vec_vsx_st(b[9], 0, a + 40);
174 vec_vsx_st(b[17], 0, a + 48);
175 vec_vsx_st(b[25], 0, a + 56);
176
177 vec_vsx_st(b[2], 0, a + 64);
178 vec_vsx_st(b[10], 0, a + 72);
179 vec_vsx_st(b[18], 0, a + 80);
180 vec_vsx_st(b[26], 0, a + 88);
181
182 vec_vsx_st(b[3], 0, a + 96);
183 vec_vsx_st(b[11], 0, a + 104);
184 vec_vsx_st(b[19], 0, a + 112);
185 vec_vsx_st(b[27], 0, a + 120);
186
187 vec_vsx_st(b[4], 0, a + 128);
188 vec_vsx_st(b[12], 0, a + 136);
189 vec_vsx_st(b[20], 0, a + 144);
190 vec_vsx_st(b[28], 0, a + 152);
191
192 vec_vsx_st(b[5], 0, a + 160);
193 vec_vsx_st(b[13], 0, a + 168);
194 vec_vsx_st(b[21], 0, a + 176);
195 vec_vsx_st(b[29], 0, a + 184);
196
197 vec_vsx_st(b[6], 0, a + 192);
198 vec_vsx_st(b[14], 0, a + 200);
199 vec_vsx_st(b[22], 0, a + 208);
200 vec_vsx_st(b[30], 0, a + 216);
201
202 vec_vsx_st(b[7], 0, a + 224);
203 vec_vsx_st(b[15], 0, a + 232);
204 vec_vsx_st(b[23], 0, a + 240);
205 vec_vsx_st(b[31], 0, a + 248);
206 }
207
208 // Returns 1 if negative 0 if positive
vec_sign_s16(int16x8_t a)209 static INLINE int16x8_t vec_sign_s16(int16x8_t a) {
210 return vec_sr(a, vec_shift_sign_s16);
211 }
212
213 // Add 2 if positive, 1 if negative, and shift by 2.
sub_round_shift(const int16x8_t a)214 static INLINE int16x8_t sub_round_shift(const int16x8_t a) {
215 const int16x8_t sign = vec_sign_s16(a);
216 return vec_sra(vec_sub(vec_add(a, vec_twos_s16), sign), vec_dct_scale_log2);
217 }
218
219 // Add 1 if positive, 2 if negative, and shift by 2.
220 // In practice, add 1, then add the sign bit, then shift without rounding.
add_round_shift_s16(const int16x8_t a)221 static INLINE int16x8_t add_round_shift_s16(const int16x8_t a) {
222 const int16x8_t sign = vec_sign_s16(a);
223 return vec_sra(vec_add(vec_add(a, vec_ones_s16), sign), vec_dct_scale_log2);
224 }
225
vpx_fdct32_vsx(const int16x8_t * in,int16x8_t * out,int pass)226 void vpx_fdct32_vsx(const int16x8_t *in, int16x8_t *out, int pass) {
227 int16x8_t temp0[32]; // Hold stages: 1, 4, 7
228 int16x8_t temp1[32]; // Hold stages: 2, 5
229 int16x8_t temp2[32]; // Hold stages: 3, 6
230
231 // Stage 1
232 // Unrolling this loops actually slows down Power9 benchmarks
233 for (int i = 0; i < 16; i++) {
234 temp0[i] = vec_add(in[i], in[31 - i]);
235 // pass through to stage 3.
236 temp1[i + 16] = vec_sub(in[15 - i], in[i + 16]);
237 }
238
239 // Stage 2
240 // Unrolling this loops actually slows down Power9 benchmarks
241 for (int i = 0; i < 8; i++) {
242 temp1[i] = vec_add(temp0[i], temp0[15 - i]);
243 temp1[i + 8] = vec_sub(temp0[7 - i], temp0[i + 8]);
244 }
245
246 // Apply butterflies (in place) on pass through to stage 3.
247 single_butterfly(temp1[27], temp1[20], &temp1[27], &temp1[20]);
248 single_butterfly(temp1[26], temp1[21], &temp1[26], &temp1[21]);
249 single_butterfly(temp1[25], temp1[22], &temp1[25], &temp1[22]);
250 single_butterfly(temp1[24], temp1[23], &temp1[24], &temp1[23]);
251
252 // dump the magnitude by 4, hence the intermediate values are within
253 // the range of 16 bits.
254 if (pass) {
255 temp1[0] = add_round_shift_s16(temp1[0]);
256 temp1[1] = add_round_shift_s16(temp1[1]);
257 temp1[2] = add_round_shift_s16(temp1[2]);
258 temp1[3] = add_round_shift_s16(temp1[3]);
259 temp1[4] = add_round_shift_s16(temp1[4]);
260 temp1[5] = add_round_shift_s16(temp1[5]);
261 temp1[6] = add_round_shift_s16(temp1[6]);
262 temp1[7] = add_round_shift_s16(temp1[7]);
263 temp1[8] = add_round_shift_s16(temp1[8]);
264 temp1[9] = add_round_shift_s16(temp1[9]);
265 temp1[10] = add_round_shift_s16(temp1[10]);
266 temp1[11] = add_round_shift_s16(temp1[11]);
267 temp1[12] = add_round_shift_s16(temp1[12]);
268 temp1[13] = add_round_shift_s16(temp1[13]);
269 temp1[14] = add_round_shift_s16(temp1[14]);
270 temp1[15] = add_round_shift_s16(temp1[15]);
271
272 temp1[16] = add_round_shift_s16(temp1[16]);
273 temp1[17] = add_round_shift_s16(temp1[17]);
274 temp1[18] = add_round_shift_s16(temp1[18]);
275 temp1[19] = add_round_shift_s16(temp1[19]);
276 temp1[20] = add_round_shift_s16(temp1[20]);
277 temp1[21] = add_round_shift_s16(temp1[21]);
278 temp1[22] = add_round_shift_s16(temp1[22]);
279 temp1[23] = add_round_shift_s16(temp1[23]);
280 temp1[24] = add_round_shift_s16(temp1[24]);
281 temp1[25] = add_round_shift_s16(temp1[25]);
282 temp1[26] = add_round_shift_s16(temp1[26]);
283 temp1[27] = add_round_shift_s16(temp1[27]);
284 temp1[28] = add_round_shift_s16(temp1[28]);
285 temp1[29] = add_round_shift_s16(temp1[29]);
286 temp1[30] = add_round_shift_s16(temp1[30]);
287 temp1[31] = add_round_shift_s16(temp1[31]);
288 }
289
290 // Stage 3
291 temp2[0] = vec_add(temp1[0], temp1[7]);
292 temp2[1] = vec_add(temp1[1], temp1[6]);
293 temp2[2] = vec_add(temp1[2], temp1[5]);
294 temp2[3] = vec_add(temp1[3], temp1[4]);
295 temp2[5] = vec_sub(temp1[2], temp1[5]);
296 temp2[6] = vec_sub(temp1[1], temp1[6]);
297 temp2[8] = temp1[8];
298 temp2[9] = temp1[9];
299
300 single_butterfly(temp1[13], temp1[10], &temp2[13], &temp2[10]);
301 single_butterfly(temp1[12], temp1[11], &temp2[12], &temp2[11]);
302 temp2[14] = temp1[14];
303 temp2[15] = temp1[15];
304
305 temp2[18] = vec_add(temp1[18], temp1[21]);
306 temp2[19] = vec_add(temp1[19], temp1[20]);
307
308 temp2[20] = vec_sub(temp1[19], temp1[20]);
309 temp2[21] = vec_sub(temp1[18], temp1[21]);
310
311 temp2[26] = vec_sub(temp1[29], temp1[26]);
312 temp2[27] = vec_sub(temp1[28], temp1[27]);
313
314 temp2[28] = vec_add(temp1[28], temp1[27]);
315 temp2[29] = vec_add(temp1[29], temp1[26]);
316
317 // Pass through Stage 4
318 temp0[7] = vec_sub(temp1[0], temp1[7]);
319 temp0[4] = vec_sub(temp1[3], temp1[4]);
320 temp0[16] = vec_add(temp1[16], temp1[23]);
321 temp0[17] = vec_add(temp1[17], temp1[22]);
322 temp0[22] = vec_sub(temp1[17], temp1[22]);
323 temp0[23] = vec_sub(temp1[16], temp1[23]);
324 temp0[24] = vec_sub(temp1[31], temp1[24]);
325 temp0[25] = vec_sub(temp1[30], temp1[25]);
326 temp0[30] = vec_add(temp1[30], temp1[25]);
327 temp0[31] = vec_add(temp1[31], temp1[24]);
328
329 // Stage 4
330 temp0[0] = vec_add(temp2[0], temp2[3]);
331 temp0[1] = vec_add(temp2[1], temp2[2]);
332 temp0[2] = vec_sub(temp2[1], temp2[2]);
333 temp0[3] = vec_sub(temp2[0], temp2[3]);
334 single_butterfly(temp2[6], temp2[5], &temp0[6], &temp0[5]);
335
336 temp0[9] = vec_add(temp2[9], temp2[10]);
337 temp0[10] = vec_sub(temp2[9], temp2[10]);
338 temp0[13] = vec_sub(temp2[14], temp2[13]);
339 temp0[14] = vec_add(temp2[14], temp2[13]);
340
341 double_butterfly(temp2[29], cospi8_v, temp2[18], cospi24_v, &temp0[29],
342 &temp0[18]);
343 double_butterfly(temp2[28], cospi8_v, temp2[19], cospi24_v, &temp0[28],
344 &temp0[19]);
345 double_butterfly(temp2[27], cospi24_v, temp2[20], cospi8m_v, &temp0[27],
346 &temp0[20]);
347 double_butterfly(temp2[26], cospi24_v, temp2[21], cospi8m_v, &temp0[26],
348 &temp0[21]);
349
350 // Pass through Stage 5
351 temp1[8] = vec_add(temp2[8], temp2[11]);
352 temp1[11] = vec_sub(temp2[8], temp2[11]);
353 temp1[12] = vec_sub(temp2[15], temp2[12]);
354 temp1[15] = vec_add(temp2[15], temp2[12]);
355
356 // Stage 5
357 // 0 and 1 pass through to 0 and 16 at the end
358 single_butterfly(temp0[0], temp0[1], &out[0], &out[16]);
359
360 // 2 and 3 pass through to 8 and 24 at the end
361 double_butterfly(temp0[3], cospi8_v, temp0[2], cospi24_v, &out[8], &out[24]);
362
363 temp1[4] = vec_add(temp0[4], temp0[5]);
364 temp1[5] = vec_sub(temp0[4], temp0[5]);
365 temp1[6] = vec_sub(temp0[7], temp0[6]);
366 temp1[7] = vec_add(temp0[7], temp0[6]);
367
368 double_butterfly(temp0[14], cospi8_v, temp0[9], cospi24_v, &temp1[14],
369 &temp1[9]);
370 double_butterfly(temp0[13], cospi24_v, temp0[10], cospi8m_v, &temp1[13],
371 &temp1[10]);
372
373 temp1[17] = vec_add(temp0[17], temp0[18]);
374 temp1[18] = vec_sub(temp0[17], temp0[18]);
375
376 temp1[21] = vec_sub(temp0[22], temp0[21]);
377 temp1[22] = vec_add(temp0[22], temp0[21]);
378
379 temp1[25] = vec_add(temp0[25], temp0[26]);
380 temp1[26] = vec_sub(temp0[25], temp0[26]);
381
382 temp1[29] = vec_sub(temp0[30], temp0[29]);
383 temp1[30] = vec_add(temp0[30], temp0[29]);
384
385 // Pass through Stage 6
386 temp2[16] = vec_add(temp0[16], temp0[19]);
387 temp2[19] = vec_sub(temp0[16], temp0[19]);
388 temp2[20] = vec_sub(temp0[23], temp0[20]);
389 temp2[23] = vec_add(temp0[23], temp0[20]);
390 temp2[24] = vec_add(temp0[24], temp0[27]);
391 temp2[27] = vec_sub(temp0[24], temp0[27]);
392 temp2[28] = vec_sub(temp0[31], temp0[28]);
393 temp2[31] = vec_add(temp0[31], temp0[28]);
394
395 // Stage 6
396 // 4 and 7 pass through to 4 and 28 at the end
397 double_butterfly(temp1[7], cospi4_v, temp1[4], cospi28_v, &out[4], &out[28]);
398 // 5 and 6 pass through to 20 and 12 at the end
399 double_butterfly(temp1[6], cospi20_v, temp1[5], cospi12_v, &out[20],
400 &out[12]);
401 temp2[8] = vec_add(temp1[8], temp1[9]);
402 temp2[9] = vec_sub(temp1[8], temp1[9]);
403 temp2[10] = vec_sub(temp1[11], temp1[10]);
404 temp2[11] = vec_add(temp1[11], temp1[10]);
405 temp2[12] = vec_add(temp1[12], temp1[13]);
406 temp2[13] = vec_sub(temp1[12], temp1[13]);
407 temp2[14] = vec_sub(temp1[15], temp1[14]);
408 temp2[15] = vec_add(temp1[15], temp1[14]);
409
410 double_butterfly(temp1[30], cospi4_v, temp1[17], cospi28_v, &temp2[30],
411 &temp2[17]);
412 double_butterfly(temp1[29], cospi28_v, temp1[18], cospi4m_v, &temp2[29],
413 &temp2[18]);
414 double_butterfly(temp1[26], cospi20_v, temp1[21], cospi12_v, &temp2[26],
415 &temp2[21]);
416 double_butterfly(temp1[25], cospi12_v, temp1[22], cospi20m_v, &temp2[25],
417 &temp2[22]);
418
419 // Stage 7
420 double_butterfly(temp2[15], cospi2_v, temp2[8], cospi30_v, &out[2], &out[30]);
421 double_butterfly(temp2[14], cospi18_v, temp2[9], cospi14_v, &out[18],
422 &out[14]);
423 double_butterfly(temp2[13], cospi10_v, temp2[10], cospi22_v, &out[10],
424 &out[22]);
425 double_butterfly(temp2[12], cospi26_v, temp2[11], cospi6_v, &out[26],
426 &out[6]);
427
428 temp0[16] = vec_add(temp2[16], temp2[17]);
429 temp0[17] = vec_sub(temp2[16], temp2[17]);
430 temp0[18] = vec_sub(temp2[19], temp2[18]);
431 temp0[19] = vec_add(temp2[19], temp2[18]);
432 temp0[20] = vec_add(temp2[20], temp2[21]);
433 temp0[21] = vec_sub(temp2[20], temp2[21]);
434 temp0[22] = vec_sub(temp2[23], temp2[22]);
435 temp0[23] = vec_add(temp2[23], temp2[22]);
436 temp0[24] = vec_add(temp2[24], temp2[25]);
437 temp0[25] = vec_sub(temp2[24], temp2[25]);
438 temp0[26] = vec_sub(temp2[27], temp2[26]);
439 temp0[27] = vec_add(temp2[27], temp2[26]);
440 temp0[28] = vec_add(temp2[28], temp2[29]);
441 temp0[29] = vec_sub(temp2[28], temp2[29]);
442 temp0[30] = vec_sub(temp2[31], temp2[30]);
443 temp0[31] = vec_add(temp2[31], temp2[30]);
444
445 // Final stage --- outputs indices are bit-reversed.
446 double_butterfly(temp0[31], cospi1_v, temp0[16], cospi31_v, &out[1],
447 &out[31]);
448 double_butterfly(temp0[30], cospi17_v, temp0[17], cospi15_v, &out[17],
449 &out[15]);
450 double_butterfly(temp0[29], cospi9_v, temp0[18], cospi23_v, &out[9],
451 &out[23]);
452 double_butterfly(temp0[28], cospi25_v, temp0[19], cospi7_v, &out[25],
453 &out[7]);
454 double_butterfly(temp0[27], cospi5_v, temp0[20], cospi27_v, &out[5],
455 &out[27]);
456 double_butterfly(temp0[26], cospi21_v, temp0[21], cospi11_v, &out[21],
457 &out[11]);
458 double_butterfly(temp0[25], cospi13_v, temp0[22], cospi19_v, &out[13],
459 &out[19]);
460 double_butterfly(temp0[24], cospi29_v, temp0[23], cospi3_v, &out[29],
461 &out[3]);
462
463 if (pass == 0) {
464 for (int i = 0; i < 32; i++) {
465 out[i] = sub_round_shift(out[i]);
466 }
467 }
468 }
469
vpx_fdct32x32_rd_vsx(const int16_t * input,tran_low_t * out,int stride)470 void vpx_fdct32x32_rd_vsx(const int16_t *input, tran_low_t *out, int stride) {
471 int16x8_t temp0[32];
472 int16x8_t temp1[32];
473 int16x8_t temp2[32];
474 int16x8_t temp3[32];
475 int16x8_t temp4[32];
476 int16x8_t temp5[32];
477 int16x8_t temp6[32];
478
479 // Process in 8x32 columns.
480 load(input, stride, temp0);
481 vpx_fdct32_vsx(temp0, temp1, 0);
482
483 load(input + 8, stride, temp0);
484 vpx_fdct32_vsx(temp0, temp2, 0);
485
486 load(input + 16, stride, temp0);
487 vpx_fdct32_vsx(temp0, temp3, 0);
488
489 load(input + 24, stride, temp0);
490 vpx_fdct32_vsx(temp0, temp4, 0);
491
492 // Generate the top row by munging the first set of 8 from each one
493 // together.
494 transpose_8x8(&temp1[0], &temp0[0]);
495 transpose_8x8(&temp2[0], &temp0[8]);
496 transpose_8x8(&temp3[0], &temp0[16]);
497 transpose_8x8(&temp4[0], &temp0[24]);
498
499 vpx_fdct32_vsx(temp0, temp5, 1);
500
501 transpose_8x8(&temp5[0], &temp6[0]);
502 transpose_8x8(&temp5[8], &temp6[8]);
503 transpose_8x8(&temp5[16], &temp6[16]);
504 transpose_8x8(&temp5[24], &temp6[24]);
505
506 store(out, temp6);
507
508 // Second row of 8x32.
509 transpose_8x8(&temp1[8], &temp0[0]);
510 transpose_8x8(&temp2[8], &temp0[8]);
511 transpose_8x8(&temp3[8], &temp0[16]);
512 transpose_8x8(&temp4[8], &temp0[24]);
513
514 vpx_fdct32_vsx(temp0, temp5, 1);
515
516 transpose_8x8(&temp5[0], &temp6[0]);
517 transpose_8x8(&temp5[8], &temp6[8]);
518 transpose_8x8(&temp5[16], &temp6[16]);
519 transpose_8x8(&temp5[24], &temp6[24]);
520
521 store(out + 8 * 32, temp6);
522
523 // Third row of 8x32
524 transpose_8x8(&temp1[16], &temp0[0]);
525 transpose_8x8(&temp2[16], &temp0[8]);
526 transpose_8x8(&temp3[16], &temp0[16]);
527 transpose_8x8(&temp4[16], &temp0[24]);
528
529 vpx_fdct32_vsx(temp0, temp5, 1);
530
531 transpose_8x8(&temp5[0], &temp6[0]);
532 transpose_8x8(&temp5[8], &temp6[8]);
533 transpose_8x8(&temp5[16], &temp6[16]);
534 transpose_8x8(&temp5[24], &temp6[24]);
535
536 store(out + 16 * 32, temp6);
537
538 // Final row of 8x32.
539 transpose_8x8(&temp1[24], &temp0[0]);
540 transpose_8x8(&temp2[24], &temp0[8]);
541 transpose_8x8(&temp3[24], &temp0[16]);
542 transpose_8x8(&temp4[24], &temp0[24]);
543
544 vpx_fdct32_vsx(temp0, temp5, 1);
545
546 transpose_8x8(&temp5[0], &temp6[0]);
547 transpose_8x8(&temp5[8], &temp6[8]);
548 transpose_8x8(&temp5[16], &temp6[16]);
549 transpose_8x8(&temp5[24], &temp6[24]);
550
551 store(out + 24 * 32, temp6);
552 }
553