/external/libvpx/libvpx/vpx_dsp/mips/ |
D | fwd_dct32x32_msa.c | 22 LD_SH4(input, src_stride, in0, in1, in2, in3); in fdct8x32_1d_column_load_butterfly() 23 LD_SH4(input + (28 * src_stride), src_stride, in4, in5, in6, in7); in fdct8x32_1d_column_load_butterfly() 24 LD_SH4(input + (4 * src_stride), src_stride, in0_1, in1_1, in2_1, in3_1); in fdct8x32_1d_column_load_butterfly() 25 LD_SH4(input + (24 * src_stride), src_stride, in4_1, in5_1, in6_1, in7_1); in fdct8x32_1d_column_load_butterfly() 40 LD_SH4(input + (8 * src_stride), src_stride, in0, in1, in2, in3); in fdct8x32_1d_column_load_butterfly() 41 LD_SH4(input + (20 * src_stride), src_stride, in4, in5, in6, in7); in fdct8x32_1d_column_load_butterfly() 42 LD_SH4(input + (12 * src_stride), src_stride, in0_1, in1_1, in2_1, in3_1); in fdct8x32_1d_column_load_butterfly() 43 LD_SH4(input + (16 * src_stride), src_stride, in4_1, in5_1, in6_1, in7_1); in fdct8x32_1d_column_load_butterfly() 65 LD_SH4(input, 8, in0, in1, in2, in3); in fdct8x32_1d_column_even_store() 66 LD_SH4(input + 96, 8, in12, in13, in14, in15); in fdct8x32_1d_column_even_store() [all …]
|
D | avg_msa.c | 201 LD_SH4(dst, 64, src0, src1, src2, src3); in vpx_hadamard_16x16_msa() 202 LD_SH4(dst + 8, 64, src4, src5, src6, src7); in vpx_hadamard_16x16_msa() 215 LD_SH4(dst, 64, src0, src1, src2, src3); in vpx_hadamard_16x16_msa() 216 LD_SH4(dst + 8, 64, src4, src5, src6, src7); in vpx_hadamard_16x16_msa() 229 LD_SH4(dst, 64, src0, src1, src2, src3); in vpx_hadamard_16x16_msa() 230 LD_SH4(dst + 8, 64, src4, src5, src6, src7); in vpx_hadamard_16x16_msa() 243 LD_SH4(dst, 64, src0, src1, src2, src3); in vpx_hadamard_16x16_msa() 244 LD_SH4(dst + 8, 64, src4, src5, src6, src7); in vpx_hadamard_16x16_msa() 595 LD_SH4(src, 8, src0, src1, src2, src3); in vpx_vector_var_msa() 596 LD_SH4(ref, 8, ref0, ref1, ref2, ref3); in vpx_vector_var_msa()
|
D | idct32x32_msa.c | 211 LD_SH4(tmp_odd_buf, 8, reg0, reg1, reg2, reg3); in idct32x8_row_odd_process_store() 212 LD_SH4((tmp_odd_buf + 8 * 8), 8, reg4, reg5, reg6, reg7); in idct32x8_row_odd_process_store() 225 LD_SH4((tmp_odd_buf + 4 * 8), 8, reg1, reg2, reg0, reg3); in idct32x8_row_odd_process_store() 226 LD_SH4((tmp_odd_buf + 12 * 8), 8, reg4, reg5, reg6, reg7); in idct32x8_row_odd_process_store() 510 LD_SH4(tmp_odd_buf, 8, reg0, reg1, reg2, reg3); in idct8x32_column_odd_process_store() 511 LD_SH4((tmp_odd_buf + 8 * 8), 8, reg4, reg5, reg6, reg7); in idct8x32_column_odd_process_store() 524 LD_SH4((tmp_odd_buf + 4 * 8), 8, reg1, reg2, reg0, reg3); in idct8x32_column_odd_process_store() 525 LD_SH4((tmp_odd_buf + 12 * 8), 8, reg4, reg5, reg6, reg7); in idct8x32_column_odd_process_store()
|
D | fwd_txfm_msa.c | 188 LD_SH4(input, src_stride, in0, in1, in2, in3); in vpx_fdct4x4_msa()
|
D | variance_msa.c | 313 LD_SH4(src, 8, src0, src1, src2, src3); in get_mb_ss_msa()
|
D | macros_msa.h | 259 #define LD_SH4(...) LD_V4(v8i16, __VA_ARGS__) macro
|
/external/libvpx/libvpx/vp9/encoder/mips/msa/ |
D | vp9_fdct4x4_msa.c | 20 LD_SH4(input, src_stride, in0, in1, in2, in3); in vp9_fwht4x4_msa() 52 LD_SH4(input, stride, in0, in1, in2, in3); in vp9_fht4x4_msa()
|
/external/libvpx/libvpx/vp8/encoder/mips/msa/ |
D | dct_msa.c | 77 LD_SH4(input, pitch / 2, in0, in1, in2, in3); in vp8_short_fdct4x4_msa() 123 LD_SH4(input, pitch / 2, in0, in1, in2, in3); in vp8_short_fdct8x4_msa() 172 LD_SH4(input, pitch / 2, in0_h, in1_h, in2_h, in3_h); in vp8_short_walsh4x4_msa()
|
/external/libaom/libaom/av1/encoder/mips/msa/ |
D | fdct4x4_msa.c | 20 LD_SH4(input, src_stride, in0, in1, in2, in3); in av1_fwht4x4_msa()
|
/external/libvpx/libvpx/vp8/common/mips/msa/ |
D | idct_msa.c | 224 LD_SH4(input, 8, in0, in1, in2, in3); in dequant_idct4x4_addblk_2x_msa()
|
D | vp8_macros_msa.h | 327 #define LD_SH4(...) LD_H4(v8i16, __VA_ARGS__) macro
|
/external/libaom/libaom/aom_dsp/mips/ |
D | variance_msa.c | 315 LD_SH4(src, 8, src0, src1, src2, src3); in get_mb_ss_msa()
|
D | macros_msa.h | 360 #define LD_SH4(...) LD_H4(v8i16, __VA_ARGS__) macro
|