/external/libvpx/libvpx/vpx_dsp/mips/ |
D | sum_squares_msa.c | 37 LD_SH8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_sum_squares_2d_i16_msa() 49 LD_SH8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_sum_squares_2d_i16_msa() 54 LD_SH8(src + 8, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_sum_squares_2d_i16_msa() 60 LD_SH8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_sum_squares_2d_i16_msa() 65 LD_SH8(src + 8, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_sum_squares_2d_i16_msa() 81 LD_SH8(src_ptr, src_stride, src0, src1, src2, src3, src4, src5, src6, in vpx_sum_squares_2d_i16_msa() 87 LD_SH8(src_ptr + 8, src_stride, src0, src1, src2, src3, src4, src5, in vpx_sum_squares_2d_i16_msa() 94 LD_SH8(src_ptr, src_stride, src0, src1, src2, src3, src4, src5, src6, in vpx_sum_squares_2d_i16_msa() 100 LD_SH8(src_ptr + 8, src_stride, src0, src1, src2, src3, src4, src5, in vpx_sum_squares_2d_i16_msa()
|
D | idct32x32_msa.c | 18 LD_SH8(input, 32, m0, n0, m1, n1, m2, n2, m3, n3); in idct32x8_row_transpose_store() 19 LD_SH8((input + 8), 32, m4, n4, m5, n5, m6, n6, m7, n7); in idct32x8_row_transpose_store() 29 LD_SH8((input + 16), 32, m0, n0, m1, n1, m2, n2, m3, n3); in idct32x8_row_transpose_store() 30 LD_SH8((input + 24), 32, m4, n4, m5, n5, m6, n6, m7, n7); in idct32x8_row_transpose_store() 48 LD_SH8(tmp_buf, 32, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7); in idct32x8_row_even_process_store() 65 LD_SH8((tmp_buf + 16), 32, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7); in idct32x8_row_even_process_store() 326 LD_SH8((tmp_buf + 8 * 16), 8, m0, n0, m1, n1, m2, n2, m3, n3); in idct_butterfly_transpose_store() 327 LD_SH8((tmp_buf + 12 * 16), 8, m4, n4, m5, n5, m6, n6, m7, n7); in idct_butterfly_transpose_store() 358 LD_SH8(tmp_buf, (4 * 32), reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7); in idct8x32_column_even_process_store() 377 LD_SH8(tmp_buf, (4 * 32), reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7); in idct8x32_column_even_process_store()
|
D | fwd_txfm_msa.c | 138 LD_SH8(input, 16, in0, in1, in2, in3, in4, in5, in6, in7); in fdct16x8_1d_row() 139 LD_SH8((input + 8), 16, in8, in9, in10, in11, in12, in13, in14, in15); in fdct16x8_1d_row() 158 LD_SH8(input, 16, in8, in9, in10, in11, in12, in13, in14, in15); in fdct16x8_1d_row() 203 LD_SH8(input, src_stride, in0, in1, in2, in3, in4, in5, in6, in7); in vpx_fdct8x8_msa() 222 LD_SH8(input, stride, in0, in1, in2, in3, in4, in5, in6, in7); in vpx_fdct8x8_1_msa()
|
D | idct8x8_msa.c | 18 LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); in vpx_idct8x8_64_add_msa() 49 LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); in vpx_idct8x8_12_add_msa()
|
D | fwd_dct32x32_msa.c | 259 LD_SH8(temp_buff, 32, in0, in1, in2, in3, in4, in5, in6, in7); in fdct8x32_1d_row_load_butterfly() 260 LD_SH8(temp_buff + 24, 32, in8, in9, in10, in11, in12, in13, in14, in15); in fdct8x32_1d_row_load_butterfly() 272 LD_SH8(temp_buff + 8, 32, in0, in1, in2, in3, in4, in5, in6, in7); in fdct8x32_1d_row_load_butterfly() 273 LD_SH8(temp_buff + 16, 32, in8, in9, in10, in11, in12, in13, in14, in15); in fdct8x32_1d_row_load_butterfly() 297 LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); in fdct8x32_1d_row_even_4x() 298 LD_SH8(input + 64, 8, in8, in9, in10, in11, in12, in13, in14, in15); in fdct8x32_1d_row_even_4x() 344 LD_SH8(interm_ptr, 8, vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7); in fdct8x32_1d_row_even_4x() 359 LD_SH8(interm_ptr + 64, 8, in8, in9, in10, in11, in12, in13, in14, in15); in fdct8x32_1d_row_even_4x() 398 LD_SH8(temp, 8, in0, in1, in2, in3, in4, in5, in6, in7); in fdct8x32_1d_row_even() 399 LD_SH8(temp + 64, 8, in8, in9, in10, in11, in12, in13, in14, in15); in fdct8x32_1d_row_even() [all …]
|
D | avg_msa.c | 63 LD_SH8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_hadamard_8x8_msa() 271 LD_SH8(data, 8, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_satd_msa() 294 LD_SH8(data, 8, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_satd_msa() 296 LD_SH8(data, 8, src8, src9, src10, src11, src12, src13, src14, src15); in vpx_satd_msa() 339 LD_SH8(data, 8, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_satd_msa() 341 LD_SH8(data, 8, src8, src9, src10, src11, src12, src13, src14, src15); in vpx_satd_msa() 619 LD_SH8(src, 8, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_vector_var_msa() 620 LD_SH8(ref, 8, ref0, ref1, ref2, ref3, ref4, ref5, ref6, ref7); in vpx_vector_var_msa()
|
D | idct16x16_msa.c | 19 LD_SH8(input, 16, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7); in vpx_idct16_1d_rows_msa() 21 LD_SH8(input, 16, reg8, reg9, reg10, reg11, reg12, reg13, reg14, reg15); in vpx_idct16_1d_rows_msa() 114 LD_SH8(input, 16, reg0, reg1, reg2, reg3, reg4, reg5, reg6, reg7); in vpx_idct16_1d_columns_addblk_msa() 117 LD_SH8(input, 16, reg8, reg9, reg10, reg11, reg12, reg13, reg14, reg15); in vpx_idct16_1d_columns_addblk_msa()
|
D | macros_msa.h | 366 #define LD_SH8(...) LD_H8(v8i16, __VA_ARGS__) macro
|
/external/libvpx/libvpx/vp9/encoder/mips/msa/ |
D | vp9_fdct16x16_msa.c | 161 LD_SH8(input, 16, l0, l1, l2, l3, l4, l5, l6, l7); in fadst16_transpose_postproc_msa() 171 LD_SH8(input + 8, 16, l8, l9, l10, l11, l12, l13, l14, l15); in fadst16_transpose_postproc_msa() 183 LD_SH8(input, 16, l0, l1, l2, l3, l4, l5, l6, l7); in fadst16_transpose_postproc_msa() 193 LD_SH8(input + 8, 16, l8, l9, l10, l11, l12, l13, l14, l15); in fadst16_transpose_postproc_msa() 371 LD_SH8(temp, 16, in0, in1, in2, in3, in4, in5, in6, in7); in postproc_fdct16x8_1d_row() 373 LD_SH8(temp, 16, in8, in9, in10, in11, in12, in13, in14, in15); in postproc_fdct16x8_1d_row() 394 LD_SH8(temp, 16, in8, in9, in10, in11, in12, in13, in14, in15); in postproc_fdct16x8_1d_row()
|
D | vp9_fdct8x8_msa.c | 20 LD_SH8(input, stride, in0, in1, in2, in3, in4, in5, in6, in7); in vp9_fht8x8_msa()
|
/external/libvpx/libvpx/vp9/common/mips/msa/ |
D | vp9_idct8x8_msa.c | 21 LD_SH8(input, 8, in0, in1, in2, in3, in4, in5, in6, in7); in vp9_iht8x8_64_add_msa()
|