1 /*
2 * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #ifndef VPX_VPX_DSP_X86_INV_TXFM_SSE2_H_
12 #define VPX_VPX_DSP_X86_INV_TXFM_SSE2_H_
13
14 #include <emmintrin.h> // SSE2
15
16 #include "./vpx_config.h"
17 #include "vpx/vpx_integer.h"
18 #include "vpx_dsp/inv_txfm.h"
19 #include "vpx_dsp/x86/transpose_sse2.h"
20 #include "vpx_dsp/x86/txfm_common_sse2.h"
21
idct8x8_12_transpose_16bit_4x8(const __m128i * const in,__m128i * const out)22 static INLINE void idct8x8_12_transpose_16bit_4x8(const __m128i *const in,
23 __m128i *const out) {
24 // Unpack 16 bit elements. Goes from:
25 // in[0]: 30 31 32 33 00 01 02 03
26 // in[1]: 20 21 22 23 10 11 12 13
27 // in[2]: 40 41 42 43 70 71 72 73
28 // in[3]: 50 51 52 53 60 61 62 63
29 // to:
30 // tr0_0: 00 10 01 11 02 12 03 13
31 // tr0_1: 20 30 21 31 22 32 23 33
32 // tr0_2: 40 50 41 51 42 52 43 53
33 // tr0_3: 60 70 61 71 62 72 63 73
34 const __m128i tr0_0 = _mm_unpackhi_epi16(in[0], in[1]);
35 const __m128i tr0_1 = _mm_unpacklo_epi16(in[1], in[0]);
36 const __m128i tr0_2 = _mm_unpacklo_epi16(in[2], in[3]);
37 const __m128i tr0_3 = _mm_unpackhi_epi16(in[3], in[2]);
38
39 // Unpack 32 bit elements resulting in:
40 // tr1_0: 00 10 20 30 01 11 21 31
41 // tr1_1: 02 12 22 32 03 13 23 33
42 // tr1_2: 40 50 60 70 41 51 61 71
43 // tr1_3: 42 52 62 72 43 53 63 73
44 const __m128i tr1_0 = _mm_unpacklo_epi32(tr0_0, tr0_1);
45 const __m128i tr1_1 = _mm_unpacklo_epi32(tr0_2, tr0_3);
46 const __m128i tr1_2 = _mm_unpackhi_epi32(tr0_0, tr0_1);
47 const __m128i tr1_3 = _mm_unpackhi_epi32(tr0_2, tr0_3);
48
49 // Unpack 64 bit elements resulting in:
50 // out[0]: 00 10 20 30 40 50 60 70
51 // out[1]: 01 11 21 31 41 51 61 71
52 // out[2]: 02 12 22 32 42 52 62 72
53 // out[3]: 03 13 23 33 43 53 63 73
54 out[0] = _mm_unpacklo_epi64(tr1_0, tr1_1);
55 out[1] = _mm_unpackhi_epi64(tr1_0, tr1_1);
56 out[2] = _mm_unpacklo_epi64(tr1_2, tr1_3);
57 out[3] = _mm_unpackhi_epi64(tr1_2, tr1_3);
58 }
59
dct_const_round_shift_sse2(const __m128i in)60 static INLINE __m128i dct_const_round_shift_sse2(const __m128i in) {
61 const __m128i t = _mm_add_epi32(in, _mm_set1_epi32(DCT_CONST_ROUNDING));
62 return _mm_srai_epi32(t, DCT_CONST_BITS);
63 }
64
idct_madd_round_shift_sse2(const __m128i in,const __m128i cospi)65 static INLINE __m128i idct_madd_round_shift_sse2(const __m128i in,
66 const __m128i cospi) {
67 const __m128i t = _mm_madd_epi16(in, cospi);
68 return dct_const_round_shift_sse2(t);
69 }
70
71 // Calculate the dot product between in0/1 and x and wrap to short.
idct_calc_wraplow_sse2(const __m128i in0,const __m128i in1,const __m128i x)72 static INLINE __m128i idct_calc_wraplow_sse2(const __m128i in0,
73 const __m128i in1,
74 const __m128i x) {
75 const __m128i t0 = idct_madd_round_shift_sse2(in0, x);
76 const __m128i t1 = idct_madd_round_shift_sse2(in1, x);
77 return _mm_packs_epi32(t0, t1);
78 }
79
80 // Multiply elements by constants and add them together.
butterfly(const __m128i in0,const __m128i in1,const int c0,const int c1,__m128i * const out0,__m128i * const out1)81 static INLINE void butterfly(const __m128i in0, const __m128i in1, const int c0,
82 const int c1, __m128i *const out0,
83 __m128i *const out1) {
84 const __m128i cst0 = pair_set_epi16(c0, -c1);
85 const __m128i cst1 = pair_set_epi16(c1, c0);
86 const __m128i lo = _mm_unpacklo_epi16(in0, in1);
87 const __m128i hi = _mm_unpackhi_epi16(in0, in1);
88 *out0 = idct_calc_wraplow_sse2(lo, hi, cst0);
89 *out1 = idct_calc_wraplow_sse2(lo, hi, cst1);
90 }
91
butterfly_cospi16(const __m128i in)92 static INLINE __m128i butterfly_cospi16(const __m128i in) {
93 const __m128i cst = pair_set_epi16(cospi_16_64, cospi_16_64);
94 const __m128i lo = _mm_unpacklo_epi16(in, _mm_setzero_si128());
95 const __m128i hi = _mm_unpackhi_epi16(in, _mm_setzero_si128());
96 return idct_calc_wraplow_sse2(lo, hi, cst);
97 }
98
99 // Functions to allow 8 bit optimisations to be used when profile 0 is used with
100 // highbitdepth enabled
load_input_data4(const tran_low_t * data)101 static INLINE __m128i load_input_data4(const tran_low_t *data) {
102 #if CONFIG_VP9_HIGHBITDEPTH
103 const __m128i zero = _mm_setzero_si128();
104 const __m128i in = _mm_load_si128((const __m128i *)data);
105 return _mm_packs_epi32(in, zero);
106 #else
107 return _mm_loadl_epi64((const __m128i *)data);
108 #endif
109 }
110
load_input_data8(const tran_low_t * data)111 static INLINE __m128i load_input_data8(const tran_low_t *data) {
112 #if CONFIG_VP9_HIGHBITDEPTH
113 const __m128i in0 = _mm_load_si128((const __m128i *)data);
114 const __m128i in1 = _mm_load_si128((const __m128i *)(data + 4));
115 return _mm_packs_epi32(in0, in1);
116 #else
117 return _mm_load_si128((const __m128i *)data);
118 #endif
119 }
120
load_transpose_16bit_8x8(const tran_low_t * input,const int stride,__m128i * const in)121 static INLINE void load_transpose_16bit_8x8(const tran_low_t *input,
122 const int stride,
123 __m128i *const in) {
124 in[0] = load_input_data8(input + 0 * stride);
125 in[1] = load_input_data8(input + 1 * stride);
126 in[2] = load_input_data8(input + 2 * stride);
127 in[3] = load_input_data8(input + 3 * stride);
128 in[4] = load_input_data8(input + 4 * stride);
129 in[5] = load_input_data8(input + 5 * stride);
130 in[6] = load_input_data8(input + 6 * stride);
131 in[7] = load_input_data8(input + 7 * stride);
132 transpose_16bit_8x8(in, in);
133 }
134
recon_and_store(uint8_t * const dest,const __m128i in_x)135 static INLINE void recon_and_store(uint8_t *const dest, const __m128i in_x) {
136 const __m128i zero = _mm_setzero_si128();
137 __m128i d0 = _mm_loadl_epi64((__m128i *)(dest));
138 d0 = _mm_unpacklo_epi8(d0, zero);
139 d0 = _mm_add_epi16(in_x, d0);
140 d0 = _mm_packus_epi16(d0, d0);
141 _mm_storel_epi64((__m128i *)(dest), d0);
142 }
143
round_shift_8x8(const __m128i * const in,__m128i * const out)144 static INLINE void round_shift_8x8(const __m128i *const in,
145 __m128i *const out) {
146 const __m128i final_rounding = _mm_set1_epi16(1 << 4);
147
148 out[0] = _mm_add_epi16(in[0], final_rounding);
149 out[1] = _mm_add_epi16(in[1], final_rounding);
150 out[2] = _mm_add_epi16(in[2], final_rounding);
151 out[3] = _mm_add_epi16(in[3], final_rounding);
152 out[4] = _mm_add_epi16(in[4], final_rounding);
153 out[5] = _mm_add_epi16(in[5], final_rounding);
154 out[6] = _mm_add_epi16(in[6], final_rounding);
155 out[7] = _mm_add_epi16(in[7], final_rounding);
156
157 out[0] = _mm_srai_epi16(out[0], 5);
158 out[1] = _mm_srai_epi16(out[1], 5);
159 out[2] = _mm_srai_epi16(out[2], 5);
160 out[3] = _mm_srai_epi16(out[3], 5);
161 out[4] = _mm_srai_epi16(out[4], 5);
162 out[5] = _mm_srai_epi16(out[5], 5);
163 out[6] = _mm_srai_epi16(out[6], 5);
164 out[7] = _mm_srai_epi16(out[7], 5);
165 }
166
write_buffer_8x8(const __m128i * const in,uint8_t * const dest,const int stride)167 static INLINE void write_buffer_8x8(const __m128i *const in,
168 uint8_t *const dest, const int stride) {
169 __m128i t[8];
170
171 round_shift_8x8(in, t);
172
173 recon_and_store(dest + 0 * stride, t[0]);
174 recon_and_store(dest + 1 * stride, t[1]);
175 recon_and_store(dest + 2 * stride, t[2]);
176 recon_and_store(dest + 3 * stride, t[3]);
177 recon_and_store(dest + 4 * stride, t[4]);
178 recon_and_store(dest + 5 * stride, t[5]);
179 recon_and_store(dest + 6 * stride, t[6]);
180 recon_and_store(dest + 7 * stride, t[7]);
181 }
182
recon_and_store4x4_sse2(const __m128i * const in,uint8_t * const dest,const int stride)183 static INLINE void recon_and_store4x4_sse2(const __m128i *const in,
184 uint8_t *const dest,
185 const int stride) {
186 const __m128i zero = _mm_setzero_si128();
187 __m128i d[2];
188
189 // Reconstruction and Store
190 d[0] = _mm_cvtsi32_si128(*(const int *)(dest));
191 d[1] = _mm_cvtsi32_si128(*(const int *)(dest + stride * 3));
192 d[0] = _mm_unpacklo_epi32(d[0],
193 _mm_cvtsi32_si128(*(const int *)(dest + stride)));
194 d[1] = _mm_unpacklo_epi32(
195 _mm_cvtsi32_si128(*(const int *)(dest + stride * 2)), d[1]);
196 d[0] = _mm_unpacklo_epi8(d[0], zero);
197 d[1] = _mm_unpacklo_epi8(d[1], zero);
198 d[0] = _mm_add_epi16(d[0], in[0]);
199 d[1] = _mm_add_epi16(d[1], in[1]);
200 d[0] = _mm_packus_epi16(d[0], d[1]);
201
202 *(int *)dest = _mm_cvtsi128_si32(d[0]);
203 d[0] = _mm_srli_si128(d[0], 4);
204 *(int *)(dest + stride) = _mm_cvtsi128_si32(d[0]);
205 d[0] = _mm_srli_si128(d[0], 4);
206 *(int *)(dest + stride * 2) = _mm_cvtsi128_si32(d[0]);
207 d[0] = _mm_srli_si128(d[0], 4);
208 *(int *)(dest + stride * 3) = _mm_cvtsi128_si32(d[0]);
209 }
210
store_buffer_8x32(__m128i * in,uint8_t * dst,int stride)211 static INLINE void store_buffer_8x32(__m128i *in, uint8_t *dst, int stride) {
212 const __m128i final_rounding = _mm_set1_epi16(1 << 5);
213 int j = 0;
214 while (j < 32) {
215 in[j] = _mm_adds_epi16(in[j], final_rounding);
216 in[j + 1] = _mm_adds_epi16(in[j + 1], final_rounding);
217
218 in[j] = _mm_srai_epi16(in[j], 6);
219 in[j + 1] = _mm_srai_epi16(in[j + 1], 6);
220
221 recon_and_store(dst, in[j]);
222 dst += stride;
223 recon_and_store(dst, in[j + 1]);
224 dst += stride;
225 j += 2;
226 }
227 }
228
write_buffer_8x1(uint8_t * const dest,const __m128i in)229 static INLINE void write_buffer_8x1(uint8_t *const dest, const __m128i in) {
230 const __m128i final_rounding = _mm_set1_epi16(1 << 5);
231 __m128i out;
232 out = _mm_adds_epi16(in, final_rounding);
233 out = _mm_srai_epi16(out, 6);
234 recon_and_store(dest, out);
235 }
236
237 // Only do addition and subtraction butterfly, size = 16, 32
add_sub_butterfly(const __m128i * in,__m128i * out,int size)238 static INLINE void add_sub_butterfly(const __m128i *in, __m128i *out,
239 int size) {
240 int i = 0;
241 const int num = size >> 1;
242 const int bound = size - 1;
243 while (i < num) {
244 out[i] = _mm_add_epi16(in[i], in[bound - i]);
245 out[bound - i] = _mm_sub_epi16(in[i], in[bound - i]);
246 i++;
247 }
248 }
249
idct8(const __m128i * const in,__m128i * const out)250 static INLINE void idct8(const __m128i *const in /*in[8]*/,
251 __m128i *const out /*out[8]*/) {
252 __m128i step1[8], step2[8];
253
254 // stage 1
255 butterfly(in[1], in[7], cospi_28_64, cospi_4_64, &step1[4], &step1[7]);
256 butterfly(in[5], in[3], cospi_12_64, cospi_20_64, &step1[5], &step1[6]);
257
258 // stage 2
259 butterfly(in[0], in[4], cospi_16_64, cospi_16_64, &step2[1], &step2[0]);
260 butterfly(in[2], in[6], cospi_24_64, cospi_8_64, &step2[2], &step2[3]);
261
262 step2[4] = _mm_add_epi16(step1[4], step1[5]);
263 step2[5] = _mm_sub_epi16(step1[4], step1[5]);
264 step2[6] = _mm_sub_epi16(step1[7], step1[6]);
265 step2[7] = _mm_add_epi16(step1[7], step1[6]);
266
267 // stage 3
268 step1[0] = _mm_add_epi16(step2[0], step2[3]);
269 step1[1] = _mm_add_epi16(step2[1], step2[2]);
270 step1[2] = _mm_sub_epi16(step2[1], step2[2]);
271 step1[3] = _mm_sub_epi16(step2[0], step2[3]);
272 butterfly(step2[6], step2[5], cospi_16_64, cospi_16_64, &step1[5], &step1[6]);
273
274 // stage 4
275 out[0] = _mm_add_epi16(step1[0], step2[7]);
276 out[1] = _mm_add_epi16(step1[1], step1[6]);
277 out[2] = _mm_add_epi16(step1[2], step1[5]);
278 out[3] = _mm_add_epi16(step1[3], step2[4]);
279 out[4] = _mm_sub_epi16(step1[3], step2[4]);
280 out[5] = _mm_sub_epi16(step1[2], step1[5]);
281 out[6] = _mm_sub_epi16(step1[1], step1[6]);
282 out[7] = _mm_sub_epi16(step1[0], step2[7]);
283 }
284
idct8x8_12_add_kernel_sse2(__m128i * const io)285 static INLINE void idct8x8_12_add_kernel_sse2(__m128i *const io /*io[8]*/) {
286 const __m128i zero = _mm_setzero_si128();
287 const __m128i cp_16_16 = pair_set_epi16(cospi_16_64, cospi_16_64);
288 const __m128i cp_16_n16 = pair_set_epi16(cospi_16_64, -cospi_16_64);
289 __m128i step1[8], step2[8], tmp[4];
290
291 transpose_16bit_4x4(io, io);
292 // io[0]: 00 10 20 30 01 11 21 31
293 // io[1]: 02 12 22 32 03 13 23 33
294
295 // stage 1
296 {
297 const __m128i cp_28_n4 = pair_set_epi16(cospi_28_64, -cospi_4_64);
298 const __m128i cp_4_28 = pair_set_epi16(cospi_4_64, cospi_28_64);
299 const __m128i cp_n20_12 = pair_set_epi16(-cospi_20_64, cospi_12_64);
300 const __m128i cp_12_20 = pair_set_epi16(cospi_12_64, cospi_20_64);
301 const __m128i lo_1 = _mm_unpackhi_epi16(io[0], zero);
302 const __m128i lo_3 = _mm_unpackhi_epi16(io[1], zero);
303 step1[4] = idct_calc_wraplow_sse2(cp_28_n4, cp_4_28, lo_1); // step1 4&7
304 step1[5] = idct_calc_wraplow_sse2(cp_n20_12, cp_12_20, lo_3); // step1 5&6
305 }
306
307 // stage 2
308 {
309 const __m128i cp_24_n8 = pair_set_epi16(cospi_24_64, -cospi_8_64);
310 const __m128i cp_8_24 = pair_set_epi16(cospi_8_64, cospi_24_64);
311 const __m128i lo_0 = _mm_unpacklo_epi16(io[0], zero);
312 const __m128i lo_2 = _mm_unpacklo_epi16(io[1], zero);
313 const __m128i t = idct_madd_round_shift_sse2(cp_16_16, lo_0);
314 step2[0] = _mm_packs_epi32(t, t); // step2 0&1
315 step2[2] = idct_calc_wraplow_sse2(cp_8_24, cp_24_n8, lo_2); // step2 3&2
316 step2[4] = _mm_add_epi16(step1[4], step1[5]); // step2 4&7
317 step2[5] = _mm_sub_epi16(step1[4], step1[5]); // step2 5&6
318 step2[6] = _mm_unpackhi_epi64(step2[5], zero); // step2 6
319 }
320
321 // stage 3
322 {
323 const __m128i lo_65 = _mm_unpacklo_epi16(step2[6], step2[5]);
324 tmp[0] = _mm_add_epi16(step2[0], step2[2]); // step1 0&1
325 tmp[1] = _mm_sub_epi16(step2[0], step2[2]); // step1 3&2
326 step1[2] = _mm_unpackhi_epi64(tmp[1], tmp[0]); // step1 2&1
327 step1[3] = _mm_unpacklo_epi64(tmp[1], tmp[0]); // step1 3&0
328 step1[5] = idct_calc_wraplow_sse2(cp_16_n16, cp_16_16, lo_65); // step1 5&6
329 }
330
331 // stage 4
332 tmp[0] = _mm_add_epi16(step1[3], step2[4]); // output 3&0
333 tmp[1] = _mm_add_epi16(step1[2], step1[5]); // output 2&1
334 tmp[2] = _mm_sub_epi16(step1[3], step2[4]); // output 4&7
335 tmp[3] = _mm_sub_epi16(step1[2], step1[5]); // output 5&6
336
337 idct8x8_12_transpose_16bit_4x8(tmp, io);
338 io[4] = io[5] = io[6] = io[7] = zero;
339
340 idct8(io, io);
341 }
342
idct16_8col(const __m128i * const in,__m128i * const out)343 static INLINE void idct16_8col(const __m128i *const in /*in[16]*/,
344 __m128i *const out /*out[16]*/) {
345 __m128i step1[16], step2[16];
346
347 // stage 2
348 butterfly(in[1], in[15], cospi_30_64, cospi_2_64, &step2[8], &step2[15]);
349 butterfly(in[9], in[7], cospi_14_64, cospi_18_64, &step2[9], &step2[14]);
350 butterfly(in[5], in[11], cospi_22_64, cospi_10_64, &step2[10], &step2[13]);
351 butterfly(in[13], in[3], cospi_6_64, cospi_26_64, &step2[11], &step2[12]);
352
353 // stage 3
354 butterfly(in[2], in[14], cospi_28_64, cospi_4_64, &step1[4], &step1[7]);
355 butterfly(in[10], in[6], cospi_12_64, cospi_20_64, &step1[5], &step1[6]);
356 step1[8] = _mm_add_epi16(step2[8], step2[9]);
357 step1[9] = _mm_sub_epi16(step2[8], step2[9]);
358 step1[10] = _mm_sub_epi16(step2[11], step2[10]);
359 step1[11] = _mm_add_epi16(step2[10], step2[11]);
360 step1[12] = _mm_add_epi16(step2[12], step2[13]);
361 step1[13] = _mm_sub_epi16(step2[12], step2[13]);
362 step1[14] = _mm_sub_epi16(step2[15], step2[14]);
363 step1[15] = _mm_add_epi16(step2[14], step2[15]);
364
365 // stage 4
366 butterfly(in[0], in[8], cospi_16_64, cospi_16_64, &step2[1], &step2[0]);
367 butterfly(in[4], in[12], cospi_24_64, cospi_8_64, &step2[2], &step2[3]);
368 butterfly(step1[14], step1[9], cospi_24_64, cospi_8_64, &step2[9],
369 &step2[14]);
370 butterfly(step1[10], step1[13], -cospi_8_64, -cospi_24_64, &step2[13],
371 &step2[10]);
372 step2[5] = _mm_sub_epi16(step1[4], step1[5]);
373 step1[4] = _mm_add_epi16(step1[4], step1[5]);
374 step2[6] = _mm_sub_epi16(step1[7], step1[6]);
375 step1[7] = _mm_add_epi16(step1[6], step1[7]);
376 step2[8] = step1[8];
377 step2[11] = step1[11];
378 step2[12] = step1[12];
379 step2[15] = step1[15];
380
381 // stage 5
382 step1[0] = _mm_add_epi16(step2[0], step2[3]);
383 step1[1] = _mm_add_epi16(step2[1], step2[2]);
384 step1[2] = _mm_sub_epi16(step2[1], step2[2]);
385 step1[3] = _mm_sub_epi16(step2[0], step2[3]);
386 butterfly(step2[6], step2[5], cospi_16_64, cospi_16_64, &step1[5], &step1[6]);
387 step1[8] = _mm_add_epi16(step2[8], step2[11]);
388 step1[9] = _mm_add_epi16(step2[9], step2[10]);
389 step1[10] = _mm_sub_epi16(step2[9], step2[10]);
390 step1[11] = _mm_sub_epi16(step2[8], step2[11]);
391 step1[12] = _mm_sub_epi16(step2[15], step2[12]);
392 step1[13] = _mm_sub_epi16(step2[14], step2[13]);
393 step1[14] = _mm_add_epi16(step2[14], step2[13]);
394 step1[15] = _mm_add_epi16(step2[15], step2[12]);
395
396 // stage 6
397 step2[0] = _mm_add_epi16(step1[0], step1[7]);
398 step2[1] = _mm_add_epi16(step1[1], step1[6]);
399 step2[2] = _mm_add_epi16(step1[2], step1[5]);
400 step2[3] = _mm_add_epi16(step1[3], step1[4]);
401 step2[4] = _mm_sub_epi16(step1[3], step1[4]);
402 step2[5] = _mm_sub_epi16(step1[2], step1[5]);
403 step2[6] = _mm_sub_epi16(step1[1], step1[6]);
404 step2[7] = _mm_sub_epi16(step1[0], step1[7]);
405 butterfly(step1[13], step1[10], cospi_16_64, cospi_16_64, &step2[10],
406 &step2[13]);
407 butterfly(step1[12], step1[11], cospi_16_64, cospi_16_64, &step2[11],
408 &step2[12]);
409
410 // stage 7
411 out[0] = _mm_add_epi16(step2[0], step1[15]);
412 out[1] = _mm_add_epi16(step2[1], step1[14]);
413 out[2] = _mm_add_epi16(step2[2], step2[13]);
414 out[3] = _mm_add_epi16(step2[3], step2[12]);
415 out[4] = _mm_add_epi16(step2[4], step2[11]);
416 out[5] = _mm_add_epi16(step2[5], step2[10]);
417 out[6] = _mm_add_epi16(step2[6], step1[9]);
418 out[7] = _mm_add_epi16(step2[7], step1[8]);
419 out[8] = _mm_sub_epi16(step2[7], step1[8]);
420 out[9] = _mm_sub_epi16(step2[6], step1[9]);
421 out[10] = _mm_sub_epi16(step2[5], step2[10]);
422 out[11] = _mm_sub_epi16(step2[4], step2[11]);
423 out[12] = _mm_sub_epi16(step2[3], step2[12]);
424 out[13] = _mm_sub_epi16(step2[2], step2[13]);
425 out[14] = _mm_sub_epi16(step2[1], step1[14]);
426 out[15] = _mm_sub_epi16(step2[0], step1[15]);
427 }
428
idct16x16_10_pass1(const __m128i * const input,__m128i * const output)429 static INLINE void idct16x16_10_pass1(const __m128i *const input /*input[4]*/,
430 __m128i *const output /*output[16]*/) {
431 const __m128i zero = _mm_setzero_si128();
432 const __m128i k__cospi_p16_p16 = pair_set_epi16(cospi_16_64, cospi_16_64);
433 const __m128i k__cospi_m16_p16 = pair_set_epi16(-cospi_16_64, cospi_16_64);
434 __m128i step1[16], step2[16];
435
436 transpose_16bit_4x4(input, output);
437
438 // stage 2
439 {
440 const __m128i k__cospi_p30_m02 = pair_set_epi16(cospi_30_64, -cospi_2_64);
441 const __m128i k__cospi_p02_p30 = pair_set_epi16(cospi_2_64, cospi_30_64);
442 const __m128i k__cospi_p06_m26 = pair_set_epi16(cospi_6_64, -cospi_26_64);
443 const __m128i k__cospi_p26_p06 = pair_set_epi16(cospi_26_64, cospi_6_64);
444 const __m128i lo_1_15 = _mm_unpackhi_epi16(output[0], zero);
445 const __m128i lo_13_3 = _mm_unpackhi_epi16(zero, output[1]);
446 step2[8] = idct_calc_wraplow_sse2(k__cospi_p30_m02, k__cospi_p02_p30,
447 lo_1_15); // step2 8&15
448 step2[11] = idct_calc_wraplow_sse2(k__cospi_p06_m26, k__cospi_p26_p06,
449 lo_13_3); // step2 11&12
450 }
451
452 // stage 3
453 {
454 const __m128i k__cospi_p28_m04 = pair_set_epi16(cospi_28_64, -cospi_4_64);
455 const __m128i k__cospi_p04_p28 = pair_set_epi16(cospi_4_64, cospi_28_64);
456 const __m128i lo_2_14 = _mm_unpacklo_epi16(output[1], zero);
457 step1[4] = idct_calc_wraplow_sse2(k__cospi_p28_m04, k__cospi_p04_p28,
458 lo_2_14); // step1 4&7
459 step1[13] = _mm_unpackhi_epi64(step2[11], zero);
460 step1[14] = _mm_unpackhi_epi64(step2[8], zero);
461 }
462
463 // stage 4
464 {
465 const __m128i k__cospi_m08_p24 = pair_set_epi16(-cospi_8_64, cospi_24_64);
466 const __m128i k__cospi_p24_p08 = pair_set_epi16(cospi_24_64, cospi_8_64);
467 const __m128i k__cospi_m24_m08 = pair_set_epi16(-cospi_24_64, -cospi_8_64);
468 const __m128i lo_0_8 = _mm_unpacklo_epi16(output[0], zero);
469 const __m128i lo_9_14 = _mm_unpacklo_epi16(step2[8], step1[14]);
470 const __m128i lo_10_13 = _mm_unpacklo_epi16(step2[11], step1[13]);
471 const __m128i t = idct_madd_round_shift_sse2(lo_0_8, k__cospi_p16_p16);
472 step1[0] = _mm_packs_epi32(t, t); // step2 0&1
473 step2[9] = idct_calc_wraplow_sse2(k__cospi_m08_p24, k__cospi_p24_p08,
474 lo_9_14); // step2 9&14
475 step2[10] = idct_calc_wraplow_sse2(k__cospi_m24_m08, k__cospi_m08_p24,
476 lo_10_13); // step2 10&13
477 step2[6] = _mm_unpackhi_epi64(step1[4], zero);
478 }
479
480 // stage 5
481 {
482 const __m128i lo_5_6 = _mm_unpacklo_epi16(step1[4], step2[6]);
483 step1[6] = idct_calc_wraplow_sse2(k__cospi_p16_p16, k__cospi_m16_p16,
484 lo_5_6); // step1 6&5
485 step1[8] = _mm_add_epi16(step2[8], step2[11]);
486 step1[9] = _mm_add_epi16(step2[9], step2[10]);
487 step1[10] = _mm_sub_epi16(step2[9], step2[10]);
488 step1[11] = _mm_sub_epi16(step2[8], step2[11]);
489 step1[12] = _mm_unpackhi_epi64(step1[11], zero);
490 step1[13] = _mm_unpackhi_epi64(step1[10], zero);
491 step1[14] = _mm_unpackhi_epi64(step1[9], zero);
492 step1[15] = _mm_unpackhi_epi64(step1[8], zero);
493 }
494
495 // stage 6
496 {
497 const __m128i lo_10_13 = _mm_unpacklo_epi16(step1[10], step1[13]);
498 const __m128i lo_11_12 = _mm_unpacklo_epi16(step1[11], step1[12]);
499 step2[10] = idct_calc_wraplow_sse2(k__cospi_m16_p16, k__cospi_p16_p16,
500 lo_10_13); // step2 10&13
501 step2[11] = idct_calc_wraplow_sse2(k__cospi_m16_p16, k__cospi_p16_p16,
502 lo_11_12); // step2 11&12
503 step2[13] = _mm_unpackhi_epi64(step2[10], zero);
504 step2[12] = _mm_unpackhi_epi64(step2[11], zero);
505 step2[3] = _mm_add_epi16(step1[0], step1[4]);
506 step2[1] = _mm_add_epi16(step1[0], step1[6]);
507 step2[6] = _mm_sub_epi16(step1[0], step1[6]);
508 step2[4] = _mm_sub_epi16(step1[0], step1[4]);
509 step2[0] = _mm_unpackhi_epi64(step2[3], zero);
510 step2[2] = _mm_unpackhi_epi64(step2[1], zero);
511 step2[5] = _mm_unpackhi_epi64(step2[6], zero);
512 step2[7] = _mm_unpackhi_epi64(step2[4], zero);
513 }
514
515 // stage 7. Left 8x16 only.
516 output[0] = _mm_add_epi16(step2[0], step1[15]);
517 output[1] = _mm_add_epi16(step2[1], step1[14]);
518 output[2] = _mm_add_epi16(step2[2], step2[13]);
519 output[3] = _mm_add_epi16(step2[3], step2[12]);
520 output[4] = _mm_add_epi16(step2[4], step2[11]);
521 output[5] = _mm_add_epi16(step2[5], step2[10]);
522 output[6] = _mm_add_epi16(step2[6], step1[9]);
523 output[7] = _mm_add_epi16(step2[7], step1[8]);
524 output[8] = _mm_sub_epi16(step2[7], step1[8]);
525 output[9] = _mm_sub_epi16(step2[6], step1[9]);
526 output[10] = _mm_sub_epi16(step2[5], step2[10]);
527 output[11] = _mm_sub_epi16(step2[4], step2[11]);
528 output[12] = _mm_sub_epi16(step2[3], step2[12]);
529 output[13] = _mm_sub_epi16(step2[2], step2[13]);
530 output[14] = _mm_sub_epi16(step2[1], step1[14]);
531 output[15] = _mm_sub_epi16(step2[0], step1[15]);
532 }
533
idct16x16_10_pass2(__m128i * const l,__m128i * const io)534 static INLINE void idct16x16_10_pass2(__m128i *const l /*l[8]*/,
535 __m128i *const io /*io[16]*/) {
536 const __m128i zero = _mm_setzero_si128();
537 __m128i step1[16], step2[16];
538
539 transpose_16bit_4x8(l, io);
540
541 // stage 2
542 butterfly(io[1], zero, cospi_30_64, cospi_2_64, &step2[8], &step2[15]);
543 butterfly(zero, io[3], cospi_6_64, cospi_26_64, &step2[11], &step2[12]);
544
545 // stage 3
546 butterfly(io[2], zero, cospi_28_64, cospi_4_64, &step1[4], &step1[7]);
547
548 // stage 4
549 step1[0] = butterfly_cospi16(io[0]);
550 butterfly(step2[15], step2[8], cospi_24_64, cospi_8_64, &step2[9],
551 &step2[14]);
552 butterfly(step2[11], step2[12], -cospi_8_64, -cospi_24_64, &step2[13],
553 &step2[10]);
554
555 // stage 5
556 butterfly(step1[7], step1[4], cospi_16_64, cospi_16_64, &step1[5], &step1[6]);
557 step1[8] = _mm_add_epi16(step2[8], step2[11]);
558 step1[9] = _mm_add_epi16(step2[9], step2[10]);
559 step1[10] = _mm_sub_epi16(step2[9], step2[10]);
560 step1[11] = _mm_sub_epi16(step2[8], step2[11]);
561 step1[12] = _mm_sub_epi16(step2[15], step2[12]);
562 step1[13] = _mm_sub_epi16(step2[14], step2[13]);
563 step1[14] = _mm_add_epi16(step2[14], step2[13]);
564 step1[15] = _mm_add_epi16(step2[15], step2[12]);
565
566 // stage 6
567 step2[0] = _mm_add_epi16(step1[0], step1[7]);
568 step2[1] = _mm_add_epi16(step1[0], step1[6]);
569 step2[2] = _mm_add_epi16(step1[0], step1[5]);
570 step2[3] = _mm_add_epi16(step1[0], step1[4]);
571 step2[4] = _mm_sub_epi16(step1[0], step1[4]);
572 step2[5] = _mm_sub_epi16(step1[0], step1[5]);
573 step2[6] = _mm_sub_epi16(step1[0], step1[6]);
574 step2[7] = _mm_sub_epi16(step1[0], step1[7]);
575 butterfly(step1[13], step1[10], cospi_16_64, cospi_16_64, &step2[10],
576 &step2[13]);
577 butterfly(step1[12], step1[11], cospi_16_64, cospi_16_64, &step2[11],
578 &step2[12]);
579
580 // stage 7
581 io[0] = _mm_add_epi16(step2[0], step1[15]);
582 io[1] = _mm_add_epi16(step2[1], step1[14]);
583 io[2] = _mm_add_epi16(step2[2], step2[13]);
584 io[3] = _mm_add_epi16(step2[3], step2[12]);
585 io[4] = _mm_add_epi16(step2[4], step2[11]);
586 io[5] = _mm_add_epi16(step2[5], step2[10]);
587 io[6] = _mm_add_epi16(step2[6], step1[9]);
588 io[7] = _mm_add_epi16(step2[7], step1[8]);
589 io[8] = _mm_sub_epi16(step2[7], step1[8]);
590 io[9] = _mm_sub_epi16(step2[6], step1[9]);
591 io[10] = _mm_sub_epi16(step2[5], step2[10]);
592 io[11] = _mm_sub_epi16(step2[4], step2[11]);
593 io[12] = _mm_sub_epi16(step2[3], step2[12]);
594 io[13] = _mm_sub_epi16(step2[2], step2[13]);
595 io[14] = _mm_sub_epi16(step2[1], step1[14]);
596 io[15] = _mm_sub_epi16(step2[0], step1[15]);
597 }
598
idct32_8x32_quarter_2_stage_4_to_6(__m128i * const step1,__m128i * const out)599 static INLINE void idct32_8x32_quarter_2_stage_4_to_6(
600 __m128i *const step1 /*step1[16]*/, __m128i *const out /*out[16]*/) {
601 __m128i step2[32];
602
603 // stage 4
604 step2[8] = step1[8];
605 step2[15] = step1[15];
606 butterfly(step1[14], step1[9], cospi_24_64, cospi_8_64, &step2[9],
607 &step2[14]);
608 butterfly(step1[13], step1[10], -cospi_8_64, cospi_24_64, &step2[10],
609 &step2[13]);
610 step2[11] = step1[11];
611 step2[12] = step1[12];
612
613 // stage 5
614 step1[8] = _mm_add_epi16(step2[8], step2[11]);
615 step1[9] = _mm_add_epi16(step2[9], step2[10]);
616 step1[10] = _mm_sub_epi16(step2[9], step2[10]);
617 step1[11] = _mm_sub_epi16(step2[8], step2[11]);
618 step1[12] = _mm_sub_epi16(step2[15], step2[12]);
619 step1[13] = _mm_sub_epi16(step2[14], step2[13]);
620 step1[14] = _mm_add_epi16(step2[14], step2[13]);
621 step1[15] = _mm_add_epi16(step2[15], step2[12]);
622
623 // stage 6
624 out[8] = step1[8];
625 out[9] = step1[9];
626 butterfly(step1[13], step1[10], cospi_16_64, cospi_16_64, &out[10], &out[13]);
627 butterfly(step1[12], step1[11], cospi_16_64, cospi_16_64, &out[11], &out[12]);
628 out[14] = step1[14];
629 out[15] = step1[15];
630 }
631
idct32_8x32_quarter_3_4_stage_4_to_7(__m128i * const step1,__m128i * const out)632 static INLINE void idct32_8x32_quarter_3_4_stage_4_to_7(
633 __m128i *const step1 /*step1[32]*/, __m128i *const out /*out[32]*/) {
634 __m128i step2[32];
635
636 // stage 4
637 step2[16] = _mm_add_epi16(step1[16], step1[19]);
638 step2[17] = _mm_add_epi16(step1[17], step1[18]);
639 step2[18] = _mm_sub_epi16(step1[17], step1[18]);
640 step2[19] = _mm_sub_epi16(step1[16], step1[19]);
641 step2[20] = _mm_sub_epi16(step1[23], step1[20]);
642 step2[21] = _mm_sub_epi16(step1[22], step1[21]);
643 step2[22] = _mm_add_epi16(step1[22], step1[21]);
644 step2[23] = _mm_add_epi16(step1[23], step1[20]);
645
646 step2[24] = _mm_add_epi16(step1[24], step1[27]);
647 step2[25] = _mm_add_epi16(step1[25], step1[26]);
648 step2[26] = _mm_sub_epi16(step1[25], step1[26]);
649 step2[27] = _mm_sub_epi16(step1[24], step1[27]);
650 step2[28] = _mm_sub_epi16(step1[31], step1[28]);
651 step2[29] = _mm_sub_epi16(step1[30], step1[29]);
652 step2[30] = _mm_add_epi16(step1[29], step1[30]);
653 step2[31] = _mm_add_epi16(step1[28], step1[31]);
654
655 // stage 5
656 step1[16] = step2[16];
657 step1[17] = step2[17];
658 butterfly(step2[29], step2[18], cospi_24_64, cospi_8_64, &step1[18],
659 &step1[29]);
660 butterfly(step2[28], step2[19], cospi_24_64, cospi_8_64, &step1[19],
661 &step1[28]);
662 butterfly(step2[27], step2[20], -cospi_8_64, cospi_24_64, &step1[20],
663 &step1[27]);
664 butterfly(step2[26], step2[21], -cospi_8_64, cospi_24_64, &step1[21],
665 &step1[26]);
666 step1[22] = step2[22];
667 step1[23] = step2[23];
668 step1[24] = step2[24];
669 step1[25] = step2[25];
670 step1[30] = step2[30];
671 step1[31] = step2[31];
672
673 // stage 6
674 out[16] = _mm_add_epi16(step1[16], step1[23]);
675 out[17] = _mm_add_epi16(step1[17], step1[22]);
676 out[18] = _mm_add_epi16(step1[18], step1[21]);
677 out[19] = _mm_add_epi16(step1[19], step1[20]);
678 step2[20] = _mm_sub_epi16(step1[19], step1[20]);
679 step2[21] = _mm_sub_epi16(step1[18], step1[21]);
680 step2[22] = _mm_sub_epi16(step1[17], step1[22]);
681 step2[23] = _mm_sub_epi16(step1[16], step1[23]);
682
683 step2[24] = _mm_sub_epi16(step1[31], step1[24]);
684 step2[25] = _mm_sub_epi16(step1[30], step1[25]);
685 step2[26] = _mm_sub_epi16(step1[29], step1[26]);
686 step2[27] = _mm_sub_epi16(step1[28], step1[27]);
687 out[28] = _mm_add_epi16(step1[27], step1[28]);
688 out[29] = _mm_add_epi16(step1[26], step1[29]);
689 out[30] = _mm_add_epi16(step1[25], step1[30]);
690 out[31] = _mm_add_epi16(step1[24], step1[31]);
691
692 // stage 7
693 butterfly(step2[27], step2[20], cospi_16_64, cospi_16_64, &out[20], &out[27]);
694 butterfly(step2[26], step2[21], cospi_16_64, cospi_16_64, &out[21], &out[26]);
695 butterfly(step2[25], step2[22], cospi_16_64, cospi_16_64, &out[22], &out[25]);
696 butterfly(step2[24], step2[23], cospi_16_64, cospi_16_64, &out[23], &out[24]);
697 }
698
699 void idct4_sse2(__m128i *const in);
700 void vpx_idct8_sse2(__m128i *const in);
701 void idct16_sse2(__m128i *const in0, __m128i *const in1);
702 void iadst4_sse2(__m128i *const in);
703 void iadst8_sse2(__m128i *const in);
704 void vpx_iadst16_8col_sse2(__m128i *const in);
705 void iadst16_sse2(__m128i *const in0, __m128i *const in1);
706 void idct32_1024_8x32(const __m128i *const in, __m128i *const out);
707 void idct32_34_8x32_sse2(const __m128i *const in, __m128i *const out);
708 void idct32_34_8x32_ssse3(const __m128i *const in, __m128i *const out);
709
710 #endif // VPX_VPX_DSP_X86_INV_TXFM_SSE2_H_
711