• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #ifndef VPX_VPX_DSP_X86_HIGHBD_INV_TXFM_SSE2_H_
12 #define VPX_VPX_DSP_X86_HIGHBD_INV_TXFM_SSE2_H_
13 
14 #include <emmintrin.h>  // SSE2
15 
16 #include "./vpx_config.h"
17 #include "vpx/vpx_integer.h"
18 #include "vpx_dsp/inv_txfm.h"
19 #include "vpx_dsp/x86/transpose_sse2.h"
20 #include "vpx_dsp/x86/txfm_common_sse2.h"
21 
22 // Note: There is no 64-bit bit-level shifting SIMD instruction. All
23 // coefficients are left shifted by 2, so that dct_const_round_shift() can be
24 // done by right shifting 2 bytes.
25 
extend_64bit(const __m128i in,__m128i * const out)26 static INLINE void extend_64bit(const __m128i in,
27                                 __m128i *const out /*out[2]*/) {
28   out[0] = _mm_unpacklo_epi32(in, in);  // 0, 0, 1, 1
29   out[1] = _mm_unpackhi_epi32(in, in);  // 2, 2, 3, 3
30 }
31 
wraplow_16bit_shift4(const __m128i in0,const __m128i in1,const __m128i rounding)32 static INLINE __m128i wraplow_16bit_shift4(const __m128i in0, const __m128i in1,
33                                            const __m128i rounding) {
34   __m128i temp[2];
35   temp[0] = _mm_add_epi32(in0, rounding);
36   temp[1] = _mm_add_epi32(in1, rounding);
37   temp[0] = _mm_srai_epi32(temp[0], 4);
38   temp[1] = _mm_srai_epi32(temp[1], 4);
39   return _mm_packs_epi32(temp[0], temp[1]);
40 }
41 
wraplow_16bit_shift5(const __m128i in0,const __m128i in1,const __m128i rounding)42 static INLINE __m128i wraplow_16bit_shift5(const __m128i in0, const __m128i in1,
43                                            const __m128i rounding) {
44   __m128i temp[2];
45   temp[0] = _mm_add_epi32(in0, rounding);
46   temp[1] = _mm_add_epi32(in1, rounding);
47   temp[0] = _mm_srai_epi32(temp[0], 5);
48   temp[1] = _mm_srai_epi32(temp[1], 5);
49   return _mm_packs_epi32(temp[0], temp[1]);
50 }
51 
dct_const_round_shift_64bit(const __m128i in)52 static INLINE __m128i dct_const_round_shift_64bit(const __m128i in) {
53   const __m128i t =
54       _mm_add_epi64(in, pair_set_epi32(DCT_CONST_ROUNDING << 2, 0));
55   return _mm_srli_si128(t, 2);
56 }
57 
pack_4(const __m128i in0,const __m128i in1)58 static INLINE __m128i pack_4(const __m128i in0, const __m128i in1) {
59   const __m128i t0 = _mm_unpacklo_epi32(in0, in1);  // 0, 2
60   const __m128i t1 = _mm_unpackhi_epi32(in0, in1);  // 1, 3
61   return _mm_unpacklo_epi32(t0, t1);                // 0, 1, 2, 3
62 }
63 
abs_extend_64bit_sse2(const __m128i in,__m128i * const out,__m128i * const sign)64 static INLINE void abs_extend_64bit_sse2(const __m128i in,
65                                          __m128i *const out /*out[2]*/,
66                                          __m128i *const sign /*sign[2]*/) {
67   sign[0] = _mm_srai_epi32(in, 31);
68   out[0] = _mm_xor_si128(in, sign[0]);
69   out[0] = _mm_sub_epi32(out[0], sign[0]);
70   sign[1] = _mm_unpackhi_epi32(sign[0], sign[0]);  // 64-bit sign of 2, 3
71   sign[0] = _mm_unpacklo_epi32(sign[0], sign[0]);  // 64-bit sign of 0, 1
72   out[1] = _mm_unpackhi_epi32(out[0], out[0]);     // 2, 3
73   out[0] = _mm_unpacklo_epi32(out[0], out[0]);     // 0, 1
74 }
75 
76 // Note: cospi must be non negative.
multiply_apply_sign_sse2(const __m128i in,const __m128i sign,const __m128i cospi)77 static INLINE __m128i multiply_apply_sign_sse2(const __m128i in,
78                                                const __m128i sign,
79                                                const __m128i cospi) {
80   __m128i out = _mm_mul_epu32(in, cospi);
81   out = _mm_xor_si128(out, sign);
82   return _mm_sub_epi64(out, sign);
83 }
84 
85 // Note: c must be non negative.
multiplication_round_shift_sse2(const __m128i * const in,const __m128i * const sign,const int c)86 static INLINE __m128i multiplication_round_shift_sse2(
87     const __m128i *const in /*in[2]*/, const __m128i *const sign /*sign[2]*/,
88     const int c) {
89   const __m128i pair_c = pair_set_epi32(c << 2, 0);
90   __m128i t0, t1;
91 
92   assert(c >= 0);
93   t0 = multiply_apply_sign_sse2(in[0], sign[0], pair_c);
94   t1 = multiply_apply_sign_sse2(in[1], sign[1], pair_c);
95   t0 = dct_const_round_shift_64bit(t0);
96   t1 = dct_const_round_shift_64bit(t1);
97 
98   return pack_4(t0, t1);
99 }
100 
101 // Note: c must be non negative.
multiplication_neg_round_shift_sse2(const __m128i * const in,const __m128i * const sign,const int c)102 static INLINE __m128i multiplication_neg_round_shift_sse2(
103     const __m128i *const in /*in[2]*/, const __m128i *const sign /*sign[2]*/,
104     const int c) {
105   const __m128i pair_c = pair_set_epi32(c << 2, 0);
106   __m128i t0, t1;
107 
108   assert(c >= 0);
109   t0 = multiply_apply_sign_sse2(in[0], sign[0], pair_c);
110   t1 = multiply_apply_sign_sse2(in[1], sign[1], pair_c);
111   t0 = _mm_sub_epi64(_mm_setzero_si128(), t0);
112   t1 = _mm_sub_epi64(_mm_setzero_si128(), t1);
113   t0 = dct_const_round_shift_64bit(t0);
114   t1 = dct_const_round_shift_64bit(t1);
115 
116   return pack_4(t0, t1);
117 }
118 
119 // Note: c0 and c1 must be non negative.
highbd_butterfly_sse2(const __m128i in0,const __m128i in1,const int c0,const int c1,__m128i * const out0,__m128i * const out1)120 static INLINE void highbd_butterfly_sse2(const __m128i in0, const __m128i in1,
121                                          const int c0, const int c1,
122                                          __m128i *const out0,
123                                          __m128i *const out1) {
124   const __m128i pair_c0 = pair_set_epi32(c0 << 2, 0);
125   const __m128i pair_c1 = pair_set_epi32(c1 << 2, 0);
126   __m128i temp1[4], temp2[4], sign1[2], sign2[2];
127 
128   assert(c0 >= 0);
129   assert(c1 >= 0);
130   abs_extend_64bit_sse2(in0, temp1, sign1);
131   abs_extend_64bit_sse2(in1, temp2, sign2);
132   temp1[2] = multiply_apply_sign_sse2(temp1[0], sign1[0], pair_c1);
133   temp1[3] = multiply_apply_sign_sse2(temp1[1], sign1[1], pair_c1);
134   temp1[0] = multiply_apply_sign_sse2(temp1[0], sign1[0], pair_c0);
135   temp1[1] = multiply_apply_sign_sse2(temp1[1], sign1[1], pair_c0);
136   temp2[2] = multiply_apply_sign_sse2(temp2[0], sign2[0], pair_c0);
137   temp2[3] = multiply_apply_sign_sse2(temp2[1], sign2[1], pair_c0);
138   temp2[0] = multiply_apply_sign_sse2(temp2[0], sign2[0], pair_c1);
139   temp2[1] = multiply_apply_sign_sse2(temp2[1], sign2[1], pair_c1);
140   temp1[0] = _mm_sub_epi64(temp1[0], temp2[0]);
141   temp1[1] = _mm_sub_epi64(temp1[1], temp2[1]);
142   temp2[0] = _mm_add_epi64(temp1[2], temp2[2]);
143   temp2[1] = _mm_add_epi64(temp1[3], temp2[3]);
144   temp1[0] = dct_const_round_shift_64bit(temp1[0]);
145   temp1[1] = dct_const_round_shift_64bit(temp1[1]);
146   temp2[0] = dct_const_round_shift_64bit(temp2[0]);
147   temp2[1] = dct_const_round_shift_64bit(temp2[1]);
148   *out0 = pack_4(temp1[0], temp1[1]);
149   *out1 = pack_4(temp2[0], temp2[1]);
150 }
151 
152 // Note: c0 and c1 must be non negative.
highbd_partial_butterfly_sse2(const __m128i in,const int c0,const int c1,__m128i * const out0,__m128i * const out1)153 static INLINE void highbd_partial_butterfly_sse2(const __m128i in, const int c0,
154                                                  const int c1,
155                                                  __m128i *const out0,
156                                                  __m128i *const out1) {
157   __m128i temp[2], sign[2];
158 
159   assert(c0 >= 0);
160   assert(c1 >= 0);
161   abs_extend_64bit_sse2(in, temp, sign);
162   *out0 = multiplication_round_shift_sse2(temp, sign, c0);
163   *out1 = multiplication_round_shift_sse2(temp, sign, c1);
164 }
165 
166 // Note: c0 and c1 must be non negative.
highbd_partial_butterfly_neg_sse2(const __m128i in,const int c0,const int c1,__m128i * const out0,__m128i * const out1)167 static INLINE void highbd_partial_butterfly_neg_sse2(const __m128i in,
168                                                      const int c0, const int c1,
169                                                      __m128i *const out0,
170                                                      __m128i *const out1) {
171   __m128i temp[2], sign[2];
172 
173   assert(c0 >= 0);
174   assert(c1 >= 0);
175   abs_extend_64bit_sse2(in, temp, sign);
176   *out0 = multiplication_neg_round_shift_sse2(temp, sign, c1);
177   *out1 = multiplication_round_shift_sse2(temp, sign, c0);
178 }
179 
highbd_butterfly_cospi16_sse2(const __m128i in0,const __m128i in1,__m128i * const out0,__m128i * const out1)180 static INLINE void highbd_butterfly_cospi16_sse2(const __m128i in0,
181                                                  const __m128i in1,
182                                                  __m128i *const out0,
183                                                  __m128i *const out1) {
184   __m128i temp1[2], temp2, sign[2];
185 
186   temp2 = _mm_add_epi32(in0, in1);
187   abs_extend_64bit_sse2(temp2, temp1, sign);
188   *out0 = multiplication_round_shift_sse2(temp1, sign, cospi_16_64);
189   temp2 = _mm_sub_epi32(in0, in1);
190   abs_extend_64bit_sse2(temp2, temp1, sign);
191   *out1 = multiplication_round_shift_sse2(temp1, sign, cospi_16_64);
192 }
193 
194 // Only do addition and subtraction butterfly, size = 16, 32
highbd_add_sub_butterfly(const __m128i * in,__m128i * out,int size)195 static INLINE void highbd_add_sub_butterfly(const __m128i *in, __m128i *out,
196                                             int size) {
197   int i = 0;
198   const int num = size >> 1;
199   const int bound = size - 1;
200   while (i < num) {
201     out[i] = _mm_add_epi32(in[i], in[bound - i]);
202     out[bound - i] = _mm_sub_epi32(in[i], in[bound - i]);
203     i++;
204   }
205 }
206 
highbd_idct8_stage4(const __m128i * const in,__m128i * const out)207 static INLINE void highbd_idct8_stage4(const __m128i *const in,
208                                        __m128i *const out) {
209   out[0] = _mm_add_epi32(in[0], in[7]);
210   out[1] = _mm_add_epi32(in[1], in[6]);
211   out[2] = _mm_add_epi32(in[2], in[5]);
212   out[3] = _mm_add_epi32(in[3], in[4]);
213   out[4] = _mm_sub_epi32(in[3], in[4]);
214   out[5] = _mm_sub_epi32(in[2], in[5]);
215   out[6] = _mm_sub_epi32(in[1], in[6]);
216   out[7] = _mm_sub_epi32(in[0], in[7]);
217 }
218 
highbd_idct8x8_final_round(__m128i * const io)219 static INLINE void highbd_idct8x8_final_round(__m128i *const io) {
220   io[0] = wraplow_16bit_shift5(io[0], io[8], _mm_set1_epi32(16));
221   io[1] = wraplow_16bit_shift5(io[1], io[9], _mm_set1_epi32(16));
222   io[2] = wraplow_16bit_shift5(io[2], io[10], _mm_set1_epi32(16));
223   io[3] = wraplow_16bit_shift5(io[3], io[11], _mm_set1_epi32(16));
224   io[4] = wraplow_16bit_shift5(io[4], io[12], _mm_set1_epi32(16));
225   io[5] = wraplow_16bit_shift5(io[5], io[13], _mm_set1_epi32(16));
226   io[6] = wraplow_16bit_shift5(io[6], io[14], _mm_set1_epi32(16));
227   io[7] = wraplow_16bit_shift5(io[7], io[15], _mm_set1_epi32(16));
228 }
229 
highbd_idct16_4col_stage7(const __m128i * const in,__m128i * const out)230 static INLINE void highbd_idct16_4col_stage7(const __m128i *const in,
231                                              __m128i *const out) {
232   out[0] = _mm_add_epi32(in[0], in[15]);
233   out[1] = _mm_add_epi32(in[1], in[14]);
234   out[2] = _mm_add_epi32(in[2], in[13]);
235   out[3] = _mm_add_epi32(in[3], in[12]);
236   out[4] = _mm_add_epi32(in[4], in[11]);
237   out[5] = _mm_add_epi32(in[5], in[10]);
238   out[6] = _mm_add_epi32(in[6], in[9]);
239   out[7] = _mm_add_epi32(in[7], in[8]);
240   out[8] = _mm_sub_epi32(in[7], in[8]);
241   out[9] = _mm_sub_epi32(in[6], in[9]);
242   out[10] = _mm_sub_epi32(in[5], in[10]);
243   out[11] = _mm_sub_epi32(in[4], in[11]);
244   out[12] = _mm_sub_epi32(in[3], in[12]);
245   out[13] = _mm_sub_epi32(in[2], in[13]);
246   out[14] = _mm_sub_epi32(in[1], in[14]);
247   out[15] = _mm_sub_epi32(in[0], in[15]);
248 }
249 
add_clamp(const __m128i in0,const __m128i in1,const int bd)250 static INLINE __m128i add_clamp(const __m128i in0, const __m128i in1,
251                                 const int bd) {
252   const __m128i zero = _mm_set1_epi16(0);
253   // Faster than _mm_set1_epi16((1 << bd) - 1).
254   const __m128i one = _mm_set1_epi16(1);
255   const __m128i max = _mm_sub_epi16(_mm_slli_epi16(one, bd), one);
256   __m128i d;
257 
258   d = _mm_adds_epi16(in0, in1);
259   d = _mm_max_epi16(d, zero);
260   d = _mm_min_epi16(d, max);
261 
262   return d;
263 }
264 
highbd_idct_1_add_kernel(const tran_low_t * input,uint16_t * dest,int stride,int bd,const int size)265 static INLINE void highbd_idct_1_add_kernel(const tran_low_t *input,
266                                             uint16_t *dest, int stride, int bd,
267                                             const int size) {
268   int a1, i, j;
269   tran_low_t out;
270   __m128i dc, d;
271 
272   out = HIGHBD_WRAPLOW(
273       dct_const_round_shift(input[0] * (tran_high_t)cospi_16_64), bd);
274   out =
275       HIGHBD_WRAPLOW(dct_const_round_shift(out * (tran_high_t)cospi_16_64), bd);
276   a1 = ROUND_POWER_OF_TWO(out, (size == 8) ? 5 : 6);
277   dc = _mm_set1_epi16(a1);
278 
279   for (i = 0; i < size; ++i) {
280     for (j = 0; j < size; j += 8) {
281       d = _mm_load_si128((const __m128i *)(&dest[j]));
282       d = add_clamp(d, dc, bd);
283       _mm_store_si128((__m128i *)(&dest[j]), d);
284     }
285     dest += stride;
286   }
287 }
288 
recon_and_store_4(const __m128i in,uint16_t * const dest,const int bd)289 static INLINE void recon_and_store_4(const __m128i in, uint16_t *const dest,
290                                      const int bd) {
291   __m128i d;
292 
293   d = _mm_loadl_epi64((const __m128i *)dest);
294   d = add_clamp(d, in, bd);
295   _mm_storel_epi64((__m128i *)dest, d);
296 }
297 
recon_and_store_4x2(const __m128i in,uint16_t * const dest,const int stride,const int bd)298 static INLINE void recon_and_store_4x2(const __m128i in, uint16_t *const dest,
299                                        const int stride, const int bd) {
300   __m128i d;
301 
302   d = _mm_loadl_epi64((const __m128i *)(dest + 0 * stride));
303   d = _mm_castps_si128(
304       _mm_loadh_pi(_mm_castsi128_ps(d), (const __m64 *)(dest + 1 * stride)));
305   d = add_clamp(d, in, bd);
306   _mm_storel_epi64((__m128i *)(dest + 0 * stride), d);
307   _mm_storeh_pi((__m64 *)(dest + 1 * stride), _mm_castsi128_ps(d));
308 }
309 
recon_and_store_4x4(const __m128i * const in,uint16_t * dest,const int stride,const int bd)310 static INLINE void recon_and_store_4x4(const __m128i *const in, uint16_t *dest,
311                                        const int stride, const int bd) {
312   recon_and_store_4x2(in[0], dest, stride, bd);
313   dest += 2 * stride;
314   recon_and_store_4x2(in[1], dest, stride, bd);
315 }
316 
recon_and_store_8(const __m128i in,uint16_t ** const dest,const int stride,const int bd)317 static INLINE void recon_and_store_8(const __m128i in, uint16_t **const dest,
318                                      const int stride, const int bd) {
319   __m128i d;
320 
321   d = _mm_load_si128((const __m128i *)(*dest));
322   d = add_clamp(d, in, bd);
323   _mm_store_si128((__m128i *)(*dest), d);
324   *dest += stride;
325 }
326 
recon_and_store_8x8(const __m128i * const in,uint16_t * dest,const int stride,const int bd)327 static INLINE void recon_and_store_8x8(const __m128i *const in, uint16_t *dest,
328                                        const int stride, const int bd) {
329   recon_and_store_8(in[0], &dest, stride, bd);
330   recon_and_store_8(in[1], &dest, stride, bd);
331   recon_and_store_8(in[2], &dest, stride, bd);
332   recon_and_store_8(in[3], &dest, stride, bd);
333   recon_and_store_8(in[4], &dest, stride, bd);
334   recon_and_store_8(in[5], &dest, stride, bd);
335   recon_and_store_8(in[6], &dest, stride, bd);
336   recon_and_store_8(in[7], &dest, stride, bd);
337 }
338 
load_pack_8_32bit(const tran_low_t * const input)339 static INLINE __m128i load_pack_8_32bit(const tran_low_t *const input) {
340   const __m128i t0 = _mm_load_si128((const __m128i *)(input + 0));
341   const __m128i t1 = _mm_load_si128((const __m128i *)(input + 4));
342   return _mm_packs_epi32(t0, t1);
343 }
344 
highbd_load_pack_transpose_32bit_8x8(const tran_low_t * input,const int stride,__m128i * const in)345 static INLINE void highbd_load_pack_transpose_32bit_8x8(const tran_low_t *input,
346                                                         const int stride,
347                                                         __m128i *const in) {
348   in[0] = load_pack_8_32bit(input + 0 * stride);
349   in[1] = load_pack_8_32bit(input + 1 * stride);
350   in[2] = load_pack_8_32bit(input + 2 * stride);
351   in[3] = load_pack_8_32bit(input + 3 * stride);
352   in[4] = load_pack_8_32bit(input + 4 * stride);
353   in[5] = load_pack_8_32bit(input + 5 * stride);
354   in[6] = load_pack_8_32bit(input + 6 * stride);
355   in[7] = load_pack_8_32bit(input + 7 * stride);
356   transpose_16bit_8x8(in, in);
357 }
358 
highbd_load_transpose_32bit_8x4(const tran_low_t * input,const int stride,__m128i * in)359 static INLINE void highbd_load_transpose_32bit_8x4(const tran_low_t *input,
360                                                    const int stride,
361                                                    __m128i *in) {
362   in[0] = _mm_load_si128((const __m128i *)(input + 0 * stride + 0));
363   in[1] = _mm_load_si128((const __m128i *)(input + 0 * stride + 4));
364   in[2] = _mm_load_si128((const __m128i *)(input + 1 * stride + 0));
365   in[3] = _mm_load_si128((const __m128i *)(input + 1 * stride + 4));
366   in[4] = _mm_load_si128((const __m128i *)(input + 2 * stride + 0));
367   in[5] = _mm_load_si128((const __m128i *)(input + 2 * stride + 4));
368   in[6] = _mm_load_si128((const __m128i *)(input + 3 * stride + 0));
369   in[7] = _mm_load_si128((const __m128i *)(input + 3 * stride + 4));
370   transpose_32bit_8x4(in, in);
371 }
372 
highbd_load_transpose_32bit_4x4(const tran_low_t * input,const int stride,__m128i * in)373 static INLINE void highbd_load_transpose_32bit_4x4(const tran_low_t *input,
374                                                    const int stride,
375                                                    __m128i *in) {
376   in[0] = _mm_load_si128((const __m128i *)(input + 0 * stride));
377   in[1] = _mm_load_si128((const __m128i *)(input + 1 * stride));
378   in[2] = _mm_load_si128((const __m128i *)(input + 2 * stride));
379   in[3] = _mm_load_si128((const __m128i *)(input + 3 * stride));
380   transpose_32bit_4x4(in, in);
381 }
382 
highbd_write_buffer_8(uint16_t * dest,const __m128i in,const int bd)383 static INLINE void highbd_write_buffer_8(uint16_t *dest, const __m128i in,
384                                          const int bd) {
385   const __m128i final_rounding = _mm_set1_epi16(1 << 5);
386   __m128i out;
387 
388   out = _mm_adds_epi16(in, final_rounding);
389   out = _mm_srai_epi16(out, 6);
390   recon_and_store_8(out, &dest, 0, bd);
391 }
392 
highbd_write_buffer_4(uint16_t * const dest,const __m128i in,const int bd)393 static INLINE void highbd_write_buffer_4(uint16_t *const dest, const __m128i in,
394                                          const int bd) {
395   const __m128i final_rounding = _mm_set1_epi32(1 << 5);
396   __m128i out;
397 
398   out = _mm_add_epi32(in, final_rounding);
399   out = _mm_srai_epi32(out, 6);
400   out = _mm_packs_epi32(out, out);
401   recon_and_store_4(out, dest, bd);
402 }
403 
404 #endif  // VPX_VPX_DSP_X86_HIGHBD_INV_TXFM_SSE2_H_
405