• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include "./vpx_dsp_rtcd.h"
12 #include "vpx_dsp/mips/fwd_txfm_msa.h"
13 
vpx_fdct8x8_1_msa(const int16_t * input,tran_low_t * out,int32_t stride)14 void vpx_fdct8x8_1_msa(const int16_t *input, tran_low_t *out, int32_t stride) {
15   v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
16   v4i32 vec_w;
17 
18   LD_SH8(input, stride, in0, in1, in2, in3, in4, in5, in6, in7);
19   ADD4(in0, in1, in2, in3, in4, in5, in6, in7, in0, in2, in4, in6);
20   ADD2(in0, in2, in4, in6, in0, in4);
21   vec_w = __msa_hadd_s_w(in0, in0);
22   vec_w += __msa_hadd_s_w(in4, in4);
23   out[0] = HADD_SW_S32(vec_w);
24   out[1] = 0;
25 }
26 
27 #if !CONFIG_VP9_HIGHBITDEPTH
fdct8x16_1d_column(const int16_t * input,int16_t * tmp_ptr,int32_t src_stride)28 void fdct8x16_1d_column(const int16_t *input, int16_t *tmp_ptr,
29                         int32_t src_stride) {
30   v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
31   v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
32   v8i16 in8, in9, in10, in11, in12, in13, in14, in15;
33   v8i16 stp21, stp22, stp23, stp24, stp25, stp26, stp30;
34   v8i16 stp31, stp32, stp33, stp34, stp35, stp36, stp37;
35   v8i16 vec0, vec1, vec2, vec3, vec4, vec5, cnst0, cnst1, cnst4, cnst5;
36   v8i16 coeff = { cospi_16_64, -cospi_16_64, cospi_8_64,  cospi_24_64,
37                   -cospi_8_64, -cospi_24_64, cospi_12_64, cospi_20_64 };
38   v8i16 coeff1 = { cospi_2_64,  cospi_30_64, cospi_14_64, cospi_18_64,
39                    cospi_10_64, cospi_22_64, cospi_6_64,  cospi_26_64 };
40   v8i16 coeff2 = {
41     -cospi_2_64, -cospi_10_64, -cospi_18_64, -cospi_26_64, 0, 0, 0, 0
42   };
43 
44   LD_SH16(input, src_stride, in0, in1, in2, in3, in4, in5, in6, in7, in8, in9,
45           in10, in11, in12, in13, in14, in15);
46   SLLI_4V(in0, in1, in2, in3, 2);
47   SLLI_4V(in4, in5, in6, in7, 2);
48   SLLI_4V(in8, in9, in10, in11, 2);
49   SLLI_4V(in12, in13, in14, in15, 2);
50   ADD4(in0, in15, in1, in14, in2, in13, in3, in12, tmp0, tmp1, tmp2, tmp3);
51   ADD4(in4, in11, in5, in10, in6, in9, in7, in8, tmp4, tmp5, tmp6, tmp7);
52   FDCT8x16_EVEN(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp0, tmp1,
53                 tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
54   ST_SH8(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp_ptr, 32);
55   SUB4(in0, in15, in1, in14, in2, in13, in3, in12, in15, in14, in13, in12);
56   SUB4(in4, in11, in5, in10, in6, in9, in7, in8, in11, in10, in9, in8);
57 
58   tmp_ptr += 16;
59 
60   /* stp 1 */
61   ILVL_H2_SH(in10, in13, in11, in12, vec2, vec4);
62   ILVR_H2_SH(in10, in13, in11, in12, vec3, vec5);
63 
64   cnst4 = __msa_splati_h(coeff, 0);
65   stp25 = DOT_SHIFT_RIGHT_PCK_H(vec2, vec3, cnst4);
66 
67   cnst5 = __msa_splati_h(coeff, 1);
68   cnst5 = __msa_ilvev_h(cnst5, cnst4);
69   stp22 = DOT_SHIFT_RIGHT_PCK_H(vec2, vec3, cnst5);
70   stp24 = DOT_SHIFT_RIGHT_PCK_H(vec4, vec5, cnst4);
71   stp23 = DOT_SHIFT_RIGHT_PCK_H(vec4, vec5, cnst5);
72 
73   /* stp2 */
74   BUTTERFLY_4(in8, in9, stp22, stp23, stp30, stp31, stp32, stp33);
75   BUTTERFLY_4(in15, in14, stp25, stp24, stp37, stp36, stp35, stp34);
76   ILVL_H2_SH(stp36, stp31, stp35, stp32, vec2, vec4);
77   ILVR_H2_SH(stp36, stp31, stp35, stp32, vec3, vec5);
78   SPLATI_H2_SH(coeff, 2, 3, cnst0, cnst1);
79   cnst0 = __msa_ilvev_h(cnst0, cnst1);
80   stp26 = DOT_SHIFT_RIGHT_PCK_H(vec2, vec3, cnst0);
81 
82   cnst0 = __msa_splati_h(coeff, 4);
83   cnst1 = __msa_ilvev_h(cnst1, cnst0);
84   stp21 = DOT_SHIFT_RIGHT_PCK_H(vec2, vec3, cnst1);
85 
86   BUTTERFLY_4(stp30, stp37, stp26, stp21, in8, in15, in14, in9);
87   ILVRL_H2_SH(in15, in8, vec1, vec0);
88   SPLATI_H2_SH(coeff1, 0, 1, cnst0, cnst1);
89   cnst0 = __msa_ilvev_h(cnst0, cnst1);
90 
91   in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0);
92   ST_SH(in8, tmp_ptr);
93 
94   cnst0 = __msa_splati_h(coeff2, 0);
95   cnst0 = __msa_ilvev_h(cnst1, cnst0);
96   in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0);
97   ST_SH(in8, tmp_ptr + 224);
98 
99   ILVRL_H2_SH(in14, in9, vec1, vec0);
100   SPLATI_H2_SH(coeff1, 2, 3, cnst0, cnst1);
101   cnst1 = __msa_ilvev_h(cnst1, cnst0);
102 
103   in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst1);
104   ST_SH(in8, tmp_ptr + 128);
105 
106   cnst1 = __msa_splati_h(coeff2, 2);
107   cnst0 = __msa_ilvev_h(cnst0, cnst1);
108   in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0);
109   ST_SH(in8, tmp_ptr + 96);
110 
111   SPLATI_H2_SH(coeff, 2, 5, cnst0, cnst1);
112   cnst1 = __msa_ilvev_h(cnst1, cnst0);
113 
114   stp25 = DOT_SHIFT_RIGHT_PCK_H(vec4, vec5, cnst1);
115 
116   cnst1 = __msa_splati_h(coeff, 3);
117   cnst1 = __msa_ilvev_h(cnst0, cnst1);
118   stp22 = DOT_SHIFT_RIGHT_PCK_H(vec4, vec5, cnst1);
119 
120   /* stp4 */
121   ADD2(stp34, stp25, stp33, stp22, in13, in10);
122 
123   ILVRL_H2_SH(in13, in10, vec1, vec0);
124   SPLATI_H2_SH(coeff1, 4, 5, cnst0, cnst1);
125   cnst0 = __msa_ilvev_h(cnst0, cnst1);
126   in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0);
127   ST_SH(in8, tmp_ptr + 64);
128 
129   cnst0 = __msa_splati_h(coeff2, 1);
130   cnst0 = __msa_ilvev_h(cnst1, cnst0);
131   in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0);
132   ST_SH(in8, tmp_ptr + 160);
133 
134   SUB2(stp34, stp25, stp33, stp22, in12, in11);
135   ILVRL_H2_SH(in12, in11, vec1, vec0);
136   SPLATI_H2_SH(coeff1, 6, 7, cnst0, cnst1);
137   cnst1 = __msa_ilvev_h(cnst1, cnst0);
138 
139   in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst1);
140   ST_SH(in8, tmp_ptr + 192);
141 
142   cnst1 = __msa_splati_h(coeff2, 3);
143   cnst0 = __msa_ilvev_h(cnst0, cnst1);
144   in8 = DOT_SHIFT_RIGHT_PCK_H(vec0, vec1, cnst0);
145   ST_SH(in8, tmp_ptr + 32);
146 }
147 
fdct16x8_1d_row(int16_t * input,int16_t * output)148 void fdct16x8_1d_row(int16_t *input, int16_t *output) {
149   v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7;
150   v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
151   v8i16 in8, in9, in10, in11, in12, in13, in14, in15;
152 
153   LD_SH8(input, 16, in0, in1, in2, in3, in4, in5, in6, in7);
154   LD_SH8((input + 8), 16, in8, in9, in10, in11, in12, in13, in14, in15);
155   TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
156                      in4, in5, in6, in7);
157   TRANSPOSE8x8_SH_SH(in8, in9, in10, in11, in12, in13, in14, in15, in8, in9,
158                      in10, in11, in12, in13, in14, in15);
159   ADD4(in0, 1, in1, 1, in2, 1, in3, 1, in0, in1, in2, in3);
160   ADD4(in4, 1, in5, 1, in6, 1, in7, 1, in4, in5, in6, in7);
161   ADD4(in8, 1, in9, 1, in10, 1, in11, 1, in8, in9, in10, in11);
162   ADD4(in12, 1, in13, 1, in14, 1, in15, 1, in12, in13, in14, in15);
163   SRA_4V(in0, in1, in2, in3, 2);
164   SRA_4V(in4, in5, in6, in7, 2);
165   SRA_4V(in8, in9, in10, in11, 2);
166   SRA_4V(in12, in13, in14, in15, 2);
167   BUTTERFLY_16(in0, in1, in2, in3, in4, in5, in6, in7, in8, in9, in10, in11,
168                in12, in13, in14, in15, tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6,
169                tmp7, in8, in9, in10, in11, in12, in13, in14, in15);
170   ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, input, 16);
171   FDCT8x16_EVEN(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp0, tmp1,
172                 tmp2, tmp3, tmp4, tmp5, tmp6, tmp7);
173   LD_SH8(input, 16, in8, in9, in10, in11, in12, in13, in14, in15);
174   FDCT8x16_ODD(in8, in9, in10, in11, in12, in13, in14, in15, in0, in1, in2, in3,
175                in4, in5, in6, in7);
176   TRANSPOSE8x8_SH_SH(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3, tmp0, in0,
177                      tmp1, in1, tmp2, in2, tmp3, in3);
178   ST_SH8(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3, output, 16);
179   TRANSPOSE8x8_SH_SH(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, tmp4, in4,
180                      tmp5, in5, tmp6, in6, tmp7, in7);
181   ST_SH8(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, output + 8, 16);
182 }
183 
vpx_fdct4x4_msa(const int16_t * input,int16_t * output,int32_t src_stride)184 void vpx_fdct4x4_msa(const int16_t *input, int16_t *output,
185                      int32_t src_stride) {
186   v8i16 in0, in1, in2, in3;
187 
188   LD_SH4(input, src_stride, in0, in1, in2, in3);
189 
190   /* fdct4 pre-process */
191   {
192     v8i16 vec, mask;
193     v16i8 zero = { 0 };
194     v16i8 one = __msa_ldi_b(1);
195 
196     mask = (v8i16)__msa_sldi_b(zero, one, 15);
197     SLLI_4V(in0, in1, in2, in3, 4);
198     vec = __msa_ceqi_h(in0, 0);
199     vec = vec ^ 255;
200     vec = mask & vec;
201     in0 += vec;
202   }
203 
204   VP9_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
205   TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
206   VP9_FDCT4(in0, in1, in2, in3, in0, in1, in2, in3);
207   TRANSPOSE4x4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
208   ADD4(in0, 1, in1, 1, in2, 1, in3, 1, in0, in1, in2, in3);
209   SRA_4V(in0, in1, in2, in3, 2);
210   PCKEV_D2_SH(in1, in0, in3, in2, in0, in2);
211   ST_SH2(in0, in2, output, 8);
212 }
213 
vpx_fdct8x8_msa(const int16_t * input,int16_t * output,int32_t src_stride)214 void vpx_fdct8x8_msa(const int16_t *input, int16_t *output,
215                      int32_t src_stride) {
216   v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
217 
218   LD_SH8(input, src_stride, in0, in1, in2, in3, in4, in5, in6, in7);
219   SLLI_4V(in0, in1, in2, in3, 2);
220   SLLI_4V(in4, in5, in6, in7, 2);
221   VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
222             in5, in6, in7);
223   TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
224                      in4, in5, in6, in7);
225   VP9_FDCT8(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3, in4,
226             in5, in6, in7);
227   TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
228                      in4, in5, in6, in7);
229   SRLI_AVE_S_4V_H(in0, in1, in2, in3, in4, in5, in6, in7);
230   ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, output, 8);
231 }
232 
vpx_fdct16x16_msa(const int16_t * input,int16_t * output,int32_t src_stride)233 void vpx_fdct16x16_msa(const int16_t *input, int16_t *output,
234                        int32_t src_stride) {
235   int32_t i;
236   DECLARE_ALIGNED(32, int16_t, tmp_buf[16 * 16]);
237 
238   /* column transform */
239   for (i = 0; i < 2; ++i) {
240     fdct8x16_1d_column((input + 8 * i), (&tmp_buf[0] + 8 * i), src_stride);
241   }
242 
243   /* row transform */
244   for (i = 0; i < 2; ++i) {
245     fdct16x8_1d_row((&tmp_buf[0] + (128 * i)), (output + (128 * i)));
246   }
247 }
248 
vpx_fdct16x16_1_msa(const int16_t * input,int16_t * out,int32_t stride)249 void vpx_fdct16x16_1_msa(const int16_t *input, int16_t *out, int32_t stride) {
250   int sum, i;
251   v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
252   v4i32 vec_w = { 0 };
253 
254   for (i = 0; i < 4; ++i) {
255     LD_SH2(input, 8, in0, in1);
256     input += stride;
257     LD_SH2(input, 8, in2, in3);
258     input += stride;
259     LD_SH2(input, 8, in4, in5);
260     input += stride;
261     LD_SH2(input, 8, in6, in7);
262     input += stride;
263     ADD4(in0, in1, in2, in3, in4, in5, in6, in7, in0, in2, in4, in6);
264     ADD2(in0, in2, in4, in6, in0, in4);
265     vec_w += __msa_hadd_s_w(in0, in0);
266     vec_w += __msa_hadd_s_w(in4, in4);
267   }
268 
269   sum = HADD_SW_S32(vec_w);
270   out[0] = (int16_t)(sum >> 1);
271 }
272 #endif  // !CONFIG_VP9_HIGHBITDEPTH
273