• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include <emmintrin.h>  // SSE2
12 
13 #include "./vpx_dsp_rtcd.h"
14 #include "vpx_dsp/x86/highbd_inv_txfm_sse2.h"
15 #include "vpx_dsp/x86/inv_txfm_sse2.h"
16 #include "vpx_dsp/x86/transpose_sse2.h"
17 
dct_const_round_shift_4_sse2(const __m128i in0,const __m128i in1)18 static INLINE __m128i dct_const_round_shift_4_sse2(const __m128i in0,
19                                                    const __m128i in1) {
20   const __m128i t0 = _mm_unpacklo_epi32(in0, in1);  // 0, 1
21   const __m128i t1 = _mm_unpackhi_epi32(in0, in1);  // 2, 3
22   const __m128i t2 = _mm_unpacklo_epi64(t0, t1);    // 0, 1, 2, 3
23   return dct_const_round_shift_sse2(t2);
24 }
25 
highbd_idct4_small_sse2(__m128i * const io)26 static INLINE void highbd_idct4_small_sse2(__m128i *const io) {
27   const __m128i cospi_p16_p16 = _mm_setr_epi32(cospi_16_64, 0, cospi_16_64, 0);
28   const __m128i cospi_p08_p08 = _mm_setr_epi32(cospi_8_64, 0, cospi_8_64, 0);
29   const __m128i cospi_p24_p24 = _mm_setr_epi32(cospi_24_64, 0, cospi_24_64, 0);
30   __m128i temp1[4], temp2[4], step[4];
31 
32   transpose_32bit_4x4(io, io);
33 
34   // Note: There is no 32-bit signed multiply SIMD instruction in SSE2.
35   //       _mm_mul_epu32() is used which can only guarantee the lower 32-bit
36   //       (signed) result is meaningful, which is enough in this function.
37 
38   // stage 1
39   temp1[0] = _mm_add_epi32(io[0], io[2]);             // input[0] + input[2]
40   temp2[0] = _mm_sub_epi32(io[0], io[2]);             // input[0] - input[2]
41   temp1[1] = _mm_srli_si128(temp1[0], 4);             // 1, 3
42   temp2[1] = _mm_srli_si128(temp2[0], 4);             // 1, 3
43   temp1[0] = _mm_mul_epu32(temp1[0], cospi_p16_p16);  // ([0] + [2])*cospi_16_64
44   temp1[1] = _mm_mul_epu32(temp1[1], cospi_p16_p16);  // ([0] + [2])*cospi_16_64
45   temp2[0] = _mm_mul_epu32(temp2[0], cospi_p16_p16);  // ([0] - [2])*cospi_16_64
46   temp2[1] = _mm_mul_epu32(temp2[1], cospi_p16_p16);  // ([0] - [2])*cospi_16_64
47   step[0] = dct_const_round_shift_4_sse2(temp1[0], temp1[1]);
48   step[1] = dct_const_round_shift_4_sse2(temp2[0], temp2[1]);
49 
50   temp1[3] = _mm_srli_si128(io[1], 4);
51   temp2[3] = _mm_srli_si128(io[3], 4);
52   temp1[0] = _mm_mul_epu32(io[1], cospi_p24_p24);     // input[1] * cospi_24_64
53   temp1[1] = _mm_mul_epu32(temp1[3], cospi_p24_p24);  // input[1] * cospi_24_64
54   temp2[0] = _mm_mul_epu32(io[1], cospi_p08_p08);     // input[1] * cospi_8_64
55   temp2[1] = _mm_mul_epu32(temp1[3], cospi_p08_p08);  // input[1] * cospi_8_64
56   temp1[2] = _mm_mul_epu32(io[3], cospi_p08_p08);     // input[3] * cospi_8_64
57   temp1[3] = _mm_mul_epu32(temp2[3], cospi_p08_p08);  // input[3] * cospi_8_64
58   temp2[2] = _mm_mul_epu32(io[3], cospi_p24_p24);     // input[3] * cospi_24_64
59   temp2[3] = _mm_mul_epu32(temp2[3], cospi_p24_p24);  // input[3] * cospi_24_64
60   temp1[0] = _mm_sub_epi64(temp1[0], temp1[2]);  // [1]*cospi_24 - [3]*cospi_8
61   temp1[1] = _mm_sub_epi64(temp1[1], temp1[3]);  // [1]*cospi_24 - [3]*cospi_8
62   temp2[0] = _mm_add_epi64(temp2[0], temp2[2]);  // [1]*cospi_8 + [3]*cospi_24
63   temp2[1] = _mm_add_epi64(temp2[1], temp2[3]);  // [1]*cospi_8 + [3]*cospi_24
64   step[2] = dct_const_round_shift_4_sse2(temp1[0], temp1[1]);
65   step[3] = dct_const_round_shift_4_sse2(temp2[0], temp2[1]);
66 
67   // stage 2
68   io[0] = _mm_add_epi32(step[0], step[3]);  // step[0] + step[3]
69   io[1] = _mm_add_epi32(step[1], step[2]);  // step[1] + step[2]
70   io[2] = _mm_sub_epi32(step[1], step[2]);  // step[1] - step[2]
71   io[3] = _mm_sub_epi32(step[0], step[3]);  // step[0] - step[3]
72 }
73 
highbd_idct4_large_sse2(__m128i * const io)74 static INLINE void highbd_idct4_large_sse2(__m128i *const io) {
75   __m128i step[4];
76 
77   transpose_32bit_4x4(io, io);
78 
79   // stage 1
80   highbd_butterfly_cospi16_sse2(io[0], io[2], &step[0], &step[1]);
81   highbd_butterfly_sse2(io[1], io[3], cospi_24_64, cospi_8_64, &step[2],
82                         &step[3]);
83 
84   // stage 2
85   io[0] = _mm_add_epi32(step[0], step[3]);  // step[0] + step[3]
86   io[1] = _mm_add_epi32(step[1], step[2]);  // step[1] + step[2]
87   io[2] = _mm_sub_epi32(step[1], step[2]);  // step[1] - step[2]
88   io[3] = _mm_sub_epi32(step[0], step[3]);  // step[0] - step[3]
89 }
90 
vpx_highbd_idct4x4_16_add_sse2(const tran_low_t * input,uint16_t * dest,int stride,int bd)91 void vpx_highbd_idct4x4_16_add_sse2(const tran_low_t *input, uint16_t *dest,
92                                     int stride, int bd) {
93   int16_t max = 0, min = 0;
94   __m128i io[4], io_short[2];
95 
96   io[0] = _mm_load_si128((const __m128i *)(input + 0));
97   io[1] = _mm_load_si128((const __m128i *)(input + 4));
98   io[2] = _mm_load_si128((const __m128i *)(input + 8));
99   io[3] = _mm_load_si128((const __m128i *)(input + 12));
100 
101   io_short[0] = _mm_packs_epi32(io[0], io[1]);
102   io_short[1] = _mm_packs_epi32(io[2], io[3]);
103 
104   if (bd != 8) {
105     __m128i max_input, min_input;
106 
107     max_input = _mm_max_epi16(io_short[0], io_short[1]);
108     min_input = _mm_min_epi16(io_short[0], io_short[1]);
109     max_input = _mm_max_epi16(max_input, _mm_srli_si128(max_input, 8));
110     min_input = _mm_min_epi16(min_input, _mm_srli_si128(min_input, 8));
111     max_input = _mm_max_epi16(max_input, _mm_srli_si128(max_input, 4));
112     min_input = _mm_min_epi16(min_input, _mm_srli_si128(min_input, 4));
113     max_input = _mm_max_epi16(max_input, _mm_srli_si128(max_input, 2));
114     min_input = _mm_min_epi16(min_input, _mm_srli_si128(min_input, 2));
115     max = _mm_extract_epi16(max_input, 0);
116     min = _mm_extract_epi16(min_input, 0);
117   }
118 
119   if (bd == 8 || (max < 4096 && min >= -4096)) {
120     idct4_sse2(io_short);
121     idct4_sse2(io_short);
122     io_short[0] = _mm_add_epi16(io_short[0], _mm_set1_epi16(8));
123     io_short[1] = _mm_add_epi16(io_short[1], _mm_set1_epi16(8));
124     io[0] = _mm_srai_epi16(io_short[0], 4);
125     io[1] = _mm_srai_epi16(io_short[1], 4);
126   } else {
127     if (max < 32767 && min > -32768) {
128       highbd_idct4_small_sse2(io);
129       highbd_idct4_small_sse2(io);
130     } else {
131       highbd_idct4_large_sse2(io);
132       highbd_idct4_large_sse2(io);
133     }
134     io[0] = wraplow_16bit_shift4(io[0], io[1], _mm_set1_epi32(8));
135     io[1] = wraplow_16bit_shift4(io[2], io[3], _mm_set1_epi32(8));
136   }
137 
138   recon_and_store_4x4(io, dest, stride, bd);
139 }
140 
vpx_highbd_idct4x4_1_add_sse2(const tran_low_t * input,uint16_t * dest,int stride,int bd)141 void vpx_highbd_idct4x4_1_add_sse2(const tran_low_t *input, uint16_t *dest,
142                                    int stride, int bd) {
143   int a1, i;
144   tran_low_t out;
145   __m128i dc, d;
146 
147   out = HIGHBD_WRAPLOW(
148       dct_const_round_shift(input[0] * (tran_high_t)cospi_16_64), bd);
149   out =
150       HIGHBD_WRAPLOW(dct_const_round_shift(out * (tran_high_t)cospi_16_64), bd);
151   a1 = ROUND_POWER_OF_TWO(out, 4);
152   dc = _mm_set1_epi16(a1);
153 
154   for (i = 0; i < 4; ++i) {
155     d = _mm_loadl_epi64((const __m128i *)dest);
156     d = add_clamp(d, dc, bd);
157     _mm_storel_epi64((__m128i *)dest, d);
158     dest += stride;
159   }
160 }
161