1 /*
2 * Copyright (c) 2015 The WebM project authors. All Rights Reserved.
3 *
4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree.
9 */
10
11 #include "vpx_dsp/mips/inv_txfm_msa.h"
12
vpx_idct8x8_64_add_msa(const int16_t * input,uint8_t * dst,int32_t dst_stride)13 void vpx_idct8x8_64_add_msa(const int16_t *input, uint8_t *dst,
14 int32_t dst_stride) {
15 v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
16
17 /* load vector elements of 8x8 block */
18 LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7);
19
20 /* rows transform */
21 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
22 in4, in5, in6, in7);
23 /* 1D idct8x8 */
24 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
25 in4, in5, in6, in7);
26 /* columns transform */
27 TRANSPOSE8x8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
28 in4, in5, in6, in7);
29 /* 1D idct8x8 */
30 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
31 in4, in5, in6, in7);
32 /* final rounding (add 2^4, divide by 2^5) and shift */
33 SRARI_H4_SH(in0, in1, in2, in3, 5);
34 SRARI_H4_SH(in4, in5, in6, in7, 5);
35 /* add block and store 8x8 */
36 VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
37 dst += (4 * dst_stride);
38 VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
39 }
40
vpx_idct8x8_12_add_msa(const int16_t * input,uint8_t * dst,int32_t dst_stride)41 void vpx_idct8x8_12_add_msa(const int16_t *input, uint8_t *dst,
42 int32_t dst_stride) {
43 v8i16 in0, in1, in2, in3, in4, in5, in6, in7;
44 v8i16 s0, s1, s2, s3, s4, s5, s6, s7, k0, k1, k2, k3, m0, m1, m2, m3;
45 v4i32 tmp0, tmp1, tmp2, tmp3;
46 v8i16 zero = { 0 };
47
48 /* load vector elements of 8x8 block */
49 LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7);
50 TRANSPOSE8X4_SH_SH(in0, in1, in2, in3, in0, in1, in2, in3);
51
52 /* stage1 */
53 ILVL_H2_SH(in3, in0, in2, in1, s0, s1);
54 k0 = VP9_SET_COSPI_PAIR(cospi_28_64, -cospi_4_64);
55 k1 = VP9_SET_COSPI_PAIR(cospi_4_64, cospi_28_64);
56 k2 = VP9_SET_COSPI_PAIR(-cospi_20_64, cospi_12_64);
57 k3 = VP9_SET_COSPI_PAIR(cospi_12_64, cospi_20_64);
58 DOTP_SH4_SW(s0, s0, s1, s1, k0, k1, k2, k3, tmp0, tmp1, tmp2, tmp3);
59 SRARI_W4_SW(tmp0, tmp1, tmp2, tmp3, DCT_CONST_BITS);
60 PCKEV_H2_SH(zero, tmp0, zero, tmp1, s0, s1);
61 PCKEV_H2_SH(zero, tmp2, zero, tmp3, s2, s3);
62 BUTTERFLY_4(s0, s1, s3, s2, s4, s7, s6, s5);
63
64 /* stage2 */
65 ILVR_H2_SH(in3, in1, in2, in0, s1, s0);
66 k0 = VP9_SET_COSPI_PAIR(cospi_16_64, cospi_16_64);
67 k1 = VP9_SET_COSPI_PAIR(cospi_16_64, -cospi_16_64);
68 k2 = VP9_SET_COSPI_PAIR(cospi_24_64, -cospi_8_64);
69 k3 = VP9_SET_COSPI_PAIR(cospi_8_64, cospi_24_64);
70 DOTP_SH4_SW(s0, s0, s1, s1, k0, k1, k2, k3, tmp0, tmp1, tmp2, tmp3);
71 SRARI_W4_SW(tmp0, tmp1, tmp2, tmp3, DCT_CONST_BITS);
72 PCKEV_H2_SH(zero, tmp0, zero, tmp1, s0, s1);
73 PCKEV_H2_SH(zero, tmp2, zero, tmp3, s2, s3);
74 BUTTERFLY_4(s0, s1, s2, s3, m0, m1, m2, m3);
75
76 /* stage3 */
77 s0 = __msa_ilvr_h(s6, s5);
78
79 k1 = VP9_SET_COSPI_PAIR(-cospi_16_64, cospi_16_64);
80 DOTP_SH2_SW(s0, s0, k1, k0, tmp0, tmp1);
81 SRARI_W2_SW(tmp0, tmp1, DCT_CONST_BITS);
82 PCKEV_H2_SH(zero, tmp0, zero, tmp1, s2, s3);
83
84 /* stage4 */
85 BUTTERFLY_8(m0, m1, m2, m3, s4, s2, s3, s7, in0, in1, in2, in3, in4, in5, in6,
86 in7);
87 TRANSPOSE4X8_SH_SH(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
88 in4, in5, in6, in7);
89 VP9_IDCT8x8_1D(in0, in1, in2, in3, in4, in5, in6, in7, in0, in1, in2, in3,
90 in4, in5, in6, in7);
91
92 /* final rounding (add 2^4, divide by 2^5) and shift */
93 SRARI_H4_SH(in0, in1, in2, in3, 5);
94 SRARI_H4_SH(in4, in5, in6, in7, 5);
95
96 /* add block and store 8x8 */
97 VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in0, in1, in2, in3);
98 dst += (4 * dst_stride);
99 VP9_ADDBLK_ST8x4_UB(dst, dst_stride, in4, in5, in6, in7);
100 }
101
vpx_idct8x8_1_add_msa(const int16_t * input,uint8_t * dst,int32_t dst_stride)102 void vpx_idct8x8_1_add_msa(const int16_t *input, uint8_t *dst,
103 int32_t dst_stride) {
104 int16_t out;
105 int32_t val;
106 v8i16 vec;
107
108 out = ROUND_POWER_OF_TWO((input[0] * cospi_16_64), DCT_CONST_BITS);
109 out = ROUND_POWER_OF_TWO((out * cospi_16_64), DCT_CONST_BITS);
110 val = ROUND_POWER_OF_TWO(out, 5);
111 vec = __msa_fill_h(val);
112
113 VP9_ADDBLK_ST8x4_UB(dst, dst_stride, vec, vec, vec, vec);
114 dst += (4 * dst_stride);
115 VP9_ADDBLK_ST8x4_UB(dst, dst_stride, vec, vec, vec, vec);
116 }
117