Home
last modified time | relevance | path

Searched refs:LD_UB8 (Results 1 – 25 of 25) sorted by relevance

/external/libvpx/libvpx/vpx_dsp/mips/
Dloopfilter_4_msa.c23 LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in vpx_lpf_horizontal_4_msa()
51 LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in vpx_lpf_horizontal_4_dual_msa()
80 LD_UB8((src - 4), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in vpx_lpf_vertical_4_msa()
114 LD_UB8(src - 4, pitch, row0, row1, row2, row3, row4, row5, row6, row7); in vpx_lpf_vertical_4_dual_msa()
115 LD_UB8(src - 4 + (8 * pitch), pitch, row8, row9, row10, row11, row12, row13, in vpx_lpf_vertical_4_dual_msa()
Dvpx_convolve_copy_msa.c23 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in copy_width8_msa()
52 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in copy_width8_msa()
109 LD_UB8(src_tmp, src_stride, src0, src1, src2, src3, src4, src5, src6, in copy_16multx8mult_msa()
130 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in copy_width16_msa()
Dloopfilter_8_msa.c27 LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in vpx_lpf_horizontal_8_msa()
96 LD_UB8(src - (4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in vpx_lpf_horizontal_8_dual_msa()
166 LD_UB8(src - 4, pitch, p3, p2, p1, p0, q0, q1, q2, q3); in vpx_lpf_vertical_8_msa()
247 LD_UB8(temp_src, pitch, p0, p1, p2, p3, row4, row5, row6, row7); in vpx_lpf_vertical_8_dual_msa()
249 LD_UB8(temp_src, pitch, q3, q2, q1, q0, row12, row13, row14, row15); in vpx_lpf_vertical_8_dual_msa()
Dloopfilter_16_msa.c30 LD_UB8(src - (4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in hz_lpf_t4_and_t8_16w()
95 LD_UB8((src - 8 * pitch), pitch, p7, p6, p5, p4, p3, p2, p1, p0); in hz_lpf_t16_16w()
96 LD_UB8(src, pitch, q0, q1, q2, q3, q4, q5, q6, q7); in hz_lpf_t16_16w()
445 LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in mb_lpf_horizontal_edge()
669 LD_UB8(input, in_pitch, p7_org, p6_org, p5_org, p4_org, p3_org, p2_org, in transpose_16x8_to_8x16()
693 LD_UB8(input, in_pitch, p7, p6, p5, p4, p3, p2, p1, p0); in transpose_8x16_to_16x8()
694 LD_UB8(input + (8 * in_pitch), in_pitch, q0, q1, q2, q3, q4, q5, q6, q7); in transpose_8x16_to_16x8()
708 LD_UB8(input, in_pitch, row0, row1, row2, row3, row4, row5, row6, row7); in transpose_16x16()
710 LD_UB8(input, in_pitch, row8, row9, row10, row11, row12, row13, row14, row15); in transpose_16x16()
772 LD_UB8(src - (4 * 16), 16, p3, p2, p1, p0, q0, q1, q2, q3); in vt_lpf_t4_and_t8_8w()
[all …]
Davg_msa.c21 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vpx_avg_8x8_msa()
406 LD_UB8(ref, ref_stride, ref0, ref1, ref2, ref3, ref4, ref5, ref6, ref7); in vpx_int_pro_row_msa()
430 LD_UB8(ref, ref_stride, ref0, ref1, ref2, ref3, ref4, ref5, ref6, ref7); in vpx_int_pro_row_msa()
448 LD_UB8(ref, ref_stride, ref0, ref1, ref2, ref3, ref4, ref5, ref6, ref7); in vpx_int_pro_row_msa()
472 LD_UB8(ref, ref_stride, ref0, ref1, ref2, ref3, ref4, ref5, ref6, ref7); in vpx_int_pro_row_msa()
490 LD_UB8(ref, ref_stride, ref0, ref1, ref2, ref3, ref4, ref5, ref6, ref7); in vpx_int_pro_row_msa()
688 LD_UB8(s, p, s0, s1, s2, s3, s4, s5, s6, s7); in vpx_minmax_8x8_msa()
689 LD_UB8(d, dp, d0, d1, d2, d3, d4, d5, d6, d7); in vpx_minmax_8x8_msa()
Dvpx_convolve_avg_msa.c88 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in avg_width16_msa()
90 LD_UB8(dst, dst_stride, dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7); in avg_width16_msa()
Ddeblock_msa.c220 LD_UB8(p_dst, dst_stride, inter0, inter1, inter2, inter3, inter4, inter5, in postproc_down_across_chroma_msa()
413 LD_UB8(p_dst, dst_stride, inter0, inter1, inter2, inter3, inter4, inter5, in postproc_down_across_luma_msa()
415 LD_UB8(p_dst + 8 * dst_stride, dst_stride, inter8, inter9, inter10, inter11, in postproc_down_across_luma_msa()
Dvpx_convolve8_msa.c833 LD_UB8(src, 16, in0, in1, in2, in3, in4, in5, in6, in7); in transpose16x16_to_dst()
834 LD_UB8(src + 16 * 8, 16, in8, in9, in10, in11, in12, in13, in14, in15); in transpose16x16_to_dst()
Dvpx_convolve8_avg_vert_msa.c374 LD_UB8(src, src_stride, src1, src2, src3, src4, src5, src6, src7, src8); in common_vt_2t_and_aver_dst_8x8mult_msa()
Dvpx_convolve8_vert_msa.c407 LD_UB8(src, src_stride, src1, src2, src3, src4, src5, src6, src7, src8); in common_vt_2t_8x8mult_msa()
Dmacros_msa.h282 #define LD_UB8(...) LD_V8(v16u8, __VA_ARGS__) macro
/external/libaom/libaom/aom_dsp/mips/
Dloopfilter_4_msa.c23 LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in aom_lpf_horizontal_4_msa()
51 LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in aom_lpf_horizontal_4_dual_msa()
80 LD_UB8((src - 4), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in aom_lpf_vertical_4_msa()
114 LD_UB8(src - 4, pitch, row0, row1, row2, row3, row4, row5, row6, row7); in aom_lpf_vertical_4_dual_msa()
115 LD_UB8(src - 4 + (8 * pitch), pitch, row8, row9, row10, row11, row12, row13, in aom_lpf_vertical_4_dual_msa()
Daom_convolve_copy_msa.c23 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in copy_width8_msa()
52 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in copy_width8_msa()
109 LD_UB8(src_tmp, src_stride, src0, src1, src2, src3, src4, src5, src6, in copy_16multx8mult_msa()
130 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in copy_width16_msa()
Dloopfilter_8_msa.c27 LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in aom_lpf_horizontal_8_msa()
96 LD_UB8(src - (4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in aom_lpf_horizontal_8_dual_msa()
166 LD_UB8(src - 4, pitch, p3, p2, p1, p0, q0, q1, q2, q3); in aom_lpf_vertical_8_msa()
247 LD_UB8(temp_src, pitch, p0, p1, p2, p3, row4, row5, row6, row7); in aom_lpf_vertical_8_dual_msa()
249 LD_UB8(temp_src, pitch, q3, q2, q1, q0, row12, row13, row14, row15); in aom_lpf_vertical_8_dual_msa()
Dloopfilter_16_msa.c29 LD_UB8(src - (4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in aom_hz_lpf_t4_and_t8_16w()
94 LD_UB8((src - 8 * pitch), pitch, p7, p6, p5, p4, p3, p2, p1, p0); in aom_hz_lpf_t16_16w()
95 LD_UB8(src, pitch, q0, q1, q2, q3, q4, q5, q6, q7); in aom_hz_lpf_t16_16w()
444 LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in mb_lpf_horizontal_edge()
668 LD_UB8(input, in_pitch, p7_org, p6_org, p5_org, p4_org, p3_org, p2_org, in transpose_16x8_to_8x16()
692 LD_UB8(input, in_pitch, p7, p6, p5, p4, p3, p2, p1, p0); in transpose_8x16_to_16x8()
693 LD_UB8(input + (8 * in_pitch), in_pitch, q0, q1, q2, q3, q4, q5, q6, q7); in transpose_8x16_to_16x8()
707 LD_UB8(input, in_pitch, row0, row1, row2, row3, row4, row5, row6, row7); in transpose_16x16()
709 LD_UB8(input, in_pitch, row8, row9, row10, row11, row12, row13, row14, row15); in transpose_16x16()
771 LD_UB8(src - (4 * 16), 16, p3, p2, p1, p0, q0, q1, q2, q3); in aom_vt_lpf_t4_and_t8_8w()
[all …]
Daom_convolve8_vert_msa.c410 LD_UB8(src, src_stride, src1, src2, src3, src4, src5, src6, src7, src8); in common_vt_2t_8x8mult_msa()
Dmacros_msa.h339 #define LD_UB8(...) LD_B8(v16u8, __VA_ARGS__) macro
/external/libvpx/libvpx/vp8/common/mips/msa/
Dloopfilter_filters_msa.c220 LD_UB8((src - 4 * pitch), pitch, p3, p2, p1, p0, q0, q1, q2, q3); in loop_filter_horizontal_4_dual_msa()
254 LD_UB8(src - 4, pitch, row0, row1, row2, row3, row4, row5, row6, row7); in loop_filter_vertical_4_dual_msa()
255 LD_UB8(src - 4 + (8 * pitch), pitch, row8, row9, row10, row11, row12, row13, in loop_filter_vertical_4_dual_msa()
299 LD_UB8(temp_src, pitch, p3, p2, p1, p0, q0, q1, q2, q3); in mbloop_filter_horizontal_edge_y_msa()
326 LD_UB8(temp_src, pitch, p3_u, p2_u, p1_u, p0_u, q0_u, q1_u, q2_u, q3_u); in mbloop_filter_horizontal_edge_uv_msa()
328 LD_UB8(temp_src, pitch, p3_v, p2_v, p1_v, p0_v, q0_v, q1_v, q2_v, q3_v); in mbloop_filter_horizontal_edge_uv_msa()
378 LD_UB8(temp_src, pitch, row0, row1, row2, row3, row4, row5, row6, row7); in mbloop_filter_vertical_edge_y_msa()
380 LD_UB8(temp_src, pitch, row8, row9, row10, row11, row12, row13, row14, row15); in mbloop_filter_vertical_edge_y_msa()
443 LD_UB8(src_u - 4, pitch, row0, row1, row2, row3, row4, row5, row6, row7); in mbloop_filter_vertical_edge_uv_msa()
444 LD_UB8(src_v - 4, pitch, row8, row9, row10, row11, row12, row13, row14, in mbloop_filter_vertical_edge_uv_msa()
[all …]
Dcopymem_msa.c40 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in copy_16x16_msa()
42 LD_UB8(src, src_stride, src8, src9, src10, src11, src12, src13, src14, src15); in copy_16x16_msa()
Dbilinear_filter_msa.c349 LD_UB8(src, src_stride, src1, src2, src3, src4, src5, src6, src7, src8); in common_vt_2t_8x8mult_msa()
Dvp8_macros_msa.h306 #define LD_UB8(...) LD_B8(v16u8, __VA_ARGS__) macro
/external/webp/src/dsp/
Ddec_msa.c349 LD_UB8(ptemp, stride, p3, p2, p1, p0, q0, q1, q2, q3); in VFilter16()
371 LD_UB8(ptmp, stride, row0, row1, row2, row3, row4, row5, row6, row7); in HFilter16()
373 LD_UB8(ptmp, stride, row8, row9, row10, row11, row12, row13, row14, row15); in HFilter16()
428 LD_UB8((src - 4 * stride), stride, p3, p2, p1, p0, q0, q1, q2, q3); in VFilterHorEdge16i()
453 LD_UB8(src - 4, stride, row0, row1, row2, row3, row4, row5, row6, row7); in HFilterVertEdge16i()
454 LD_UB8(src - 4 + (8 * stride), stride, in HFilterVertEdge16i()
492 LD_UB8(ptmp_src_u, stride, p3_u, p2_u, p1_u, p0_u, q0_u, q1_u, q2_u, q3_u); in VFilter8()
493 LD_UB8(ptmp_src_v, stride, p3_v, p2_v, p1_v, p0_v, q0_v, q1_v, q2_v, q3_v); in VFilter8()
537 LD_UB8(ptmp_src_u, stride, row0, row1, row2, row3, row4, row5, row6, row7); in HFilter8()
538 LD_UB8(ptmp_src_v, stride, in HFilter8()
[all …]
Denc_msa.c721 LD_UB8(a, BPS, src0, src1, src2, src3, src4, src5, src6, src7); in SSE16x16_MSA()
722 LD_UB8(b, BPS, ref0, ref1, ref2, ref3, ref4, ref5, ref6, ref7); in SSE16x16_MSA()
729 LD_UB8(a, BPS, src0, src1, src2, src3, src4, src5, src6, src7); in SSE16x16_MSA()
730 LD_UB8(b, BPS, ref0, ref1, ref2, ref3, ref4, ref5, ref6, ref7); in SSE16x16_MSA()
748 LD_UB8(a, BPS, src0, src1, src2, src3, src4, src5, src6, src7); in SSE16x8_MSA()
749 LD_UB8(b, BPS, ref0, ref1, ref2, ref3, ref4, ref5, ref6, ref7); in SSE16x8_MSA()
768 LD_UB8(a, BPS, src0, src1, src2, src3, src4, src5, src6, src7); in SSE8x8_MSA()
769 LD_UB8(b, BPS, ref0, ref1, ref2, ref3, ref4, ref5, ref6, ref7); in SSE8x8_MSA()
Dmsa_macro.h246 #define LD_UB8(...) LD_B8(v16u8, __VA_ARGS__) macro
/external/libvpx/libvpx/vp8/encoder/mips/msa/
Ddenoising_msa.c293 LD_UB8(sig_start, sig_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vp8_denoiser_filter_msa()
295 LD_UB8(sig_start, sig_stride, src8, src9, src10, src11, src12, src13, src14, in vp8_denoiser_filter_msa()