• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  *  Copyright (c) 2015 The WebM project authors. All Rights Reserved.
3  *
4  *  Use of this source code is governed by a BSD-style license
5  *  that can be found in the LICENSE file in the root of the source
6  *  tree. An additional intellectual property rights grant can be found
7  *  in the file PATENTS.  All contributing project authors may
8  *  be found in the AUTHORS file in the root of the source tree.
9  */
10 
11 #include "./vpx_dsp_rtcd.h"
12 #include "vpx_dsp/mips/inv_txfm_msa.h"
13 
vpx_idct8x8_64_add_msa(const int16_t * input,uint8_t * dst,int32_t dst_stride)14 void vpx_idct8x8_64_add_msa(const int16_t *input, uint8_t *dst,
15                             int32_t dst_stride) {
16   v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
17 
18   /* load vector elements of 8x8 block */
19   LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7);
20 
21   /* rows transform */
22   TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
23                      in4, in5, in6, in7);
24   /* 1D idct8x8 */
25   VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
26                  in4, in5, in6, in7);
27   /* columns transform */
28   TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
29                      in4, in5, in6, in7);
30   /* 1D idct8x8 */
31   VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
32                  in4, in5, in6, in7);
33   /* final rounding (add 2^4, divide by 2^5) and shift */
34   SRARI_H4_SH(in0, in1, in2, in3, 5);
35   SRARI_H4_SH(in4, in5, in6, in7, 5);
36   /* add block and store 8x8 */
37   VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
38   dst += (4 * dst_stride);
39   VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
40 }
41 
vpx_idct8x8_12_add_msa(const int16_t * input,uint8_t * dst,int32_t dst_stride)42 void vpx_idct8x8_12_add_msa(const int16_t *input, uint8_t *dst,
43                             int32_t dst_stride) {
44   v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
45   v8i16 s0, s1, s2, s3, s4, s5, s6, s7, k0, k1, k2, k3, m0, m1, m2, m3;
46   v4i32 tmp0, tmp1, tmp2, tmp3;
47   v8i16 zero = { 0 };
48 
49   /* load vector elements of 8x8 block */
50   LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7);
51   TRANSPOSE8X4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
52 
53   /* stage1 */
54   ILVL_H2_SH(in3, in0, in2, in1, s0, s1);
55   k0 = VP9_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64);
56   k1 = VP9_SET_COSPI_PAIR(cospi_4_64, cospi_28_64);
57   k2 = VP9_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64);
58   k3 = VP9_SET_COSPI_PAIR(cospi_12_64, cospi_20_64);
59   DOTP_SH4_SW(s0, s0, s1, s1, k0, k1, k2, k3, tmp0, tmp1, tmp2, tmp3);
60   SRARI_W4_SW(tmp0, tmp1, tmp2, tmp3, DCT_CONST_BITS);
61   PCKEV_H2_SH(zero, tmp0, zero, tmp1, s0, s1);
62   PCKEV_H2_SH(zero, tmp2, zero, tmp3, s2, s3);
63   BUTTERFLY_4(s0, s1, s3, s2, s4, s7, s6, s5);
64 
65   /* stage2 */
66   ILVR_H2_SH(in3, in1, in2, in0, s1, s0);
67   k0 = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);
68   k1 = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);
69   k2 = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);
70   k3 = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);
71   DOTP_SH4_SW(s0, s0, s1, s1, k0, k1, k2, k3, tmp0, tmp1, tmp2, tmp3);
72   SRARI_W4_SW(tmp0, tmp1, tmp2, tmp3, DCT_CONST_BITS);
73   PCKEV_H2_SH(zero, tmp0, zero, tmp1, s0, s1);
74   PCKEV_H2_SH(zero, tmp2, zero, tmp3, s2, s3);
75   BUTTERFLY_4(s0, s1, s2, s3, m0, m1, m2, m3);
76 
77   /* stage3 */
78   s0 = __msa_ilvr_h(s6, s5);
79 
80   k1 = VP9_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64);
81   DOTP_SH2_SW(s0, s0, k1, k0, tmp0, tmp1);
82   SRARI_W2_SW(tmp0, tmp1, DCT_CONST_BITS);
83   PCKEV_H2_SH(zero, tmp0, zero, tmp1, s2, s3);
84 
85   /* stage4 */
86   BUTTERFLY_8(m0, m1, m2, m3, s4, s2, s3, s7, in0, in1, in2, in3, in4, in5, in6,
87               in7);
88   TRANSPOSE4X8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
89                      in4, in5, in6, in7);
90   VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
91                  in4, in5, in6, in7);
92 
93   /* final rounding (add 2^4, divide by 2^5) and shift */
94   SRARI_H4_SH(in0, in1, in2, in3, 5);
95   SRARI_H4_SH(in4, in5, in6, in7, 5);
96 
97   /* add block and store 8x8 */
98   VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
99   dst += (4 * dst_stride);
100   VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
101 }
102 
vpx_idct8x8_1_add_msa(const int16_t * input,uint8_t * dst,int32_t dst_stride)103 void vpx_idct8x8_1_add_msa(const int16_t *input, uint8_t *dst,
104                            int32_t dst_stride) {
105   int16_t out;
106   int32_t val;
107   v8i16 vec;
108 
109   out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), DCT_CONST_BITS);
110   out = ROUND_POWER_OF_TWO((out * cospi_16_64), DCT_CONST_BITS);
111   val = ROUND_POWER_OF_TWO(out, 5);
112   vec = __msa_fill_h(val);
113 
114   VP9_ADDBLK_ST8x4_UB(dst, dst_stride, vec, vec, vec, vec);
115   dst += (4 * dst_stride);
116   VP9_ADDBLK_ST8x4_UB(dst, dst_stride, vec, vec, vec, vec);
117 }
118