/external/libvpx/vp9/encoder/mips/msa/ |
D | vp9_fdct16x16_msa.c | 169 ST_SH8(r0, r1, r2, r3, r4, r5, r6, r7, out, 8); in fadst16_transpose_postproc_msa() 179 ST_SH8(r8, r9, r10, r11, r12, r13, r14, r15, out, 8); in fadst16_transpose_postproc_msa() 191 ST_SH8(r0, r1, r2, r3, r4, r5, r6, r7, out, 8); in fadst16_transpose_postproc_msa() 201 ST_SH8(r8, r9, r10, r11, r12, r13, r14, r15, out, 8); in fadst16_transpose_postproc_msa() 349 ST_SH8(r0, r8, r1, r9, r2, r10, r3, r11, out, 8); in fadst16_transpose_msa() 350 ST_SH8(r4, r12, r5, r13, r6, r14, r7, r15, (out + 64), 8); in fadst16_transpose_msa() 361 ST_SH8(r0, r8, r1, r9, r2, r10, r3, r11, out, 8); in fadst16_transpose_msa() 362 ST_SH8(r4, r12, r5, r13, r6, r14, r7, r15, (out + 64), 8); in fadst16_transpose_msa() 391 ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, temp, 16); in postproc_fdct16x8_1d_row() 400 ST_SH8(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3, out, 16); in postproc_fdct16x8_1d_row() [all …]
|
D | vp9_fdct8x8_msa.c | 64 ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, output, 8); in vp9_fht8x8_msa()
|
/external/libvpx/vpx_dsp/mips/ |
D | fwd_txfm_msa.c | 54 ST_SH8(tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7, tmp_ptr, 32); in fdct8x16_1d_column() 170 ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, input, 16); in fdct16x8_1d_row() 178 ST_SH8(tmp0, in0, tmp1, in1, tmp2, in2, tmp3, in3, output, 16); in fdct16x8_1d_row() 181 ST_SH8(tmp4, in4, tmp5, in5, tmp6, in6, tmp7, in7, output + 8, 16); in fdct16x8_1d_row() 230 ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, output, 8); in vpx_fdct8x8_msa()
|
D | fwd_dct32x32_msa.c | 269 ST_SH8(step0, step1, step2, step3, step4, step5, step6, step7, output, 8); in fdct8x32_1d_row_load_butterfly() 270 ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, (output + 24 * 8), 8); in fdct8x32_1d_row_load_butterfly() 282 ST_SH8(step0, step1, step2, step3, step4, step5, step6, step7, in fdct8x32_1d_row_load_butterfly() 284 ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, (output + 16 * 8), 8); in fdct8x32_1d_row_load_butterfly() 304 ST_SH8(vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, interm_ptr, 8); in fdct8x32_1d_row_even_4x() 305 ST_SH8(in8, in9, in10, in11, in12, in13, in14, in15, interm_ptr + 64, 8); in fdct8x32_1d_row_even_4x() 623 ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, output, 32); in fdct8x32_1d_row_transpose_store() 637 ST_SH8(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1, output + 8, in fdct8x32_1d_row_transpose_store() 641 ST_SH8(in0, in1, in2, in3, in4, in5, in6, in7, output + 16, 32); in fdct8x32_1d_row_transpose_store() 655 ST_SH8(in0_1, in1_1, in2_1, in3_1, in4_1, in5_1, in6_1, in7_1, output + 24, in fdct8x32_1d_row_transpose_store()
|
D | idct16x16_msa.c | 99 ST_SH8(reg0, reg2, reg4, reg6, reg8, reg10, reg12, reg14, output, 16); in vpx_idct16_1d_rows_msa() 104 ST_SH8(reg3, reg13, reg11, reg5, reg7, reg9, reg1, reg15, (output + 8), 16); in vpx_idct16_1d_rows_msa() 317 ST_SH8(l0, l1, l2, l3, l4, l5, l6, l7, output, 16); in vpx_iadst16_1d_rows_msa() 320 ST_SH8(l8, l9, l10, l11, l12, l13, l14, l15, (output + 8), 16); in vpx_iadst16_1d_rows_msa()
|
D | avg_msa.c | 83 ST_SH8(src0, src1, src2, src3, src4, src5, src6, src7, dst, 8); in vpx_hadamard_8x8_msa() 129 ST_SH8(src0, src1, src2, src11, src4, src5, src6, src7, dst, 8); in vpx_hadamard_16x16_msa() 155 ST_SH8(res0, res1, res2, res3, res4, res5, res6, res7, dst + 64, 8); in vpx_hadamard_16x16_msa() 185 ST_SH8(src0, src1, src2, src3, src4, src5, src6, src7, dst + 2 * 64, 8); in vpx_hadamard_16x16_msa() 201 ST_SH8(res0, res1, res2, res3, res4, res5, res6, res7, dst + 3 * 64, 8); in vpx_hadamard_16x16_msa()
|
D | idct32x32_msa.c | 25 ST_SH8(m0, n0, m1, n1, m2, n2, m3, n3, (tmp_buf), 8); in idct32x8_row_transpose_store()
|
D | macros_msa.h | 339 #define ST_SH8(...) ST_V8(v8i16, __VA_ARGS__) macro
|