/external/libyuv/files/source/ |
D | row_msa.cc | 48 out_y = (v16u8)__msa_insert_d((v2i64)zero_m, 0, (int64)y_m); \ 49 out_u = (v16u8)__msa_insert_w(zero_m, 0, (int32)u_m); \ 50 out_v = (v16u8)__msa_insert_w(zero_m, 0, (int32)v_m); \ 127 v16u8 dst0_m, dst1_m; \ 130 dst0_m = (v16u8)__msa_ilvr_h(vec1_m, vec0_m); \ 131 dst1_m = (v16u8)__msa_ilvl_h(vec1_m, vec0_m); \ 139 v16u8 vec0_m, vec1_m, vec2_m, vec3_m; \ 142 vec0_m = (v16u8)__msa_pckev_h((v8i16)argb1, (v8i16)argb0); \ 143 vec1_m = (v16u8)__msa_pckev_h((v8i16)argb3, (v8i16)argb2); \ 144 vec2_m = (v16u8)__msa_pckod_h((v8i16)argb1, (v8i16)argb0); \ [all …]
|
D | scale_msa.cc | 29 v16u8 src0, src1, dst0; in ScaleARGBRowDown2_MSA() 33 src0 = (v16u8)__msa_ld_b((v16i8*)src_argb, 0); in ScaleARGBRowDown2_MSA() 34 src1 = (v16u8)__msa_ld_b((v16i8*)src_argb, 16); in ScaleARGBRowDown2_MSA() 35 dst0 = (v16u8)__msa_pckod_w((v4i32)src1, (v4i32)src0); in ScaleARGBRowDown2_MSA() 47 v16u8 src0, src1, vec0, vec1, dst0; in ScaleARGBRowDown2Linear_MSA() 51 src0 = (v16u8)__msa_ld_b((v16i8*)src_argb, 0); in ScaleARGBRowDown2Linear_MSA() 52 src1 = (v16u8)__msa_ld_b((v16i8*)src_argb, 16); in ScaleARGBRowDown2Linear_MSA() 53 vec0 = (v16u8)__msa_pckev_w((v4i32)src1, (v4i32)src0); in ScaleARGBRowDown2Linear_MSA() 54 vec1 = (v16u8)__msa_pckod_w((v4i32)src1, (v4i32)src0); in ScaleARGBRowDown2Linear_MSA() 55 dst0 = (v16u8)__msa_aver_u_b((v16u8)vec0, (v16u8)vec1); in ScaleARGBRowDown2Linear_MSA() [all …]
|
D | rotate_msa.cc | 24 out0 = (v16u8)__msa_ilvr_b((v16i8)in1, (v16i8)in0); \ 25 out1 = (v16u8)__msa_ilvl_b((v16i8)in1, (v16i8)in0); \ 26 out2 = (v16u8)__msa_ilvr_b((v16i8)in3, (v16i8)in2); \ 27 out3 = (v16u8)__msa_ilvl_b((v16i8)in3, (v16i8)in2); \ 32 out0 = (v16u8)__msa_ilvr_h((v8i16)in1, (v8i16)in0); \ 33 out1 = (v16u8)__msa_ilvl_h((v8i16)in1, (v8i16)in0); \ 34 out2 = (v16u8)__msa_ilvr_h((v8i16)in3, (v8i16)in2); \ 35 out3 = (v16u8)__msa_ilvl_h((v8i16)in3, (v8i16)in2); \ 40 out0 = (v16u8)__msa_ilvr_w((v4i32)in1, (v4i32)in0); \ 41 out1 = (v16u8)__msa_ilvl_w((v4i32)in1, (v4i32)in0); \ [all …]
|
/external/libvpx/libvpx/third_party/libyuv/source/ |
D | row_msa.cc | 48 out_y = (v16u8)__msa_insert_d((v2i64)zero_m, 0, (int64_t)y_m); \ 49 out_u = (v16u8)__msa_insert_w(zero_m, 0, (int32_t)u_m); \ 50 out_v = (v16u8)__msa_insert_w(zero_m, 0, (int32_t)v_m); \ 127 v16u8 dst0_m, dst1_m; \ 130 dst0_m = (v16u8)__msa_ilvr_h(vec1_m, vec0_m); \ 131 dst1_m = (v16u8)__msa_ilvl_h(vec1_m, vec0_m); \ 139 v16u8 vec0_m, vec1_m, vec2_m, vec3_m; \ 142 vec0_m = (v16u8)__msa_pckev_h((v8i16)argb1, (v8i16)argb0); \ 143 vec1_m = (v16u8)__msa_pckev_h((v8i16)argb3, (v8i16)argb2); \ 144 vec2_m = (v16u8)__msa_pckod_h((v8i16)argb1, (v8i16)argb0); \ [all …]
|
D | scale_msa.cc | 37 v16u8 src0, src1, dst0; in ScaleARGBRowDown2_MSA() 41 src0 = (v16u8)__msa_ld_b((v16i8*)src_argb, 0); in ScaleARGBRowDown2_MSA() 42 src1 = (v16u8)__msa_ld_b((v16i8*)src_argb, 16); in ScaleARGBRowDown2_MSA() 43 dst0 = (v16u8)__msa_pckod_w((v4i32)src1, (v4i32)src0); in ScaleARGBRowDown2_MSA() 55 v16u8 src0, src1, vec0, vec1, dst0; in ScaleARGBRowDown2Linear_MSA() 59 src0 = (v16u8)__msa_ld_b((v16i8*)src_argb, 0); in ScaleARGBRowDown2Linear_MSA() 60 src1 = (v16u8)__msa_ld_b((v16i8*)src_argb, 16); in ScaleARGBRowDown2Linear_MSA() 61 vec0 = (v16u8)__msa_pckev_w((v4i32)src1, (v4i32)src0); in ScaleARGBRowDown2Linear_MSA() 62 vec1 = (v16u8)__msa_pckod_w((v4i32)src1, (v4i32)src0); in ScaleARGBRowDown2Linear_MSA() 63 dst0 = (v16u8)__msa_aver_u_b((v16u8)vec0, (v16u8)vec1); in ScaleARGBRowDown2Linear_MSA() [all …]
|
D | rotate_msa.cc | 24 out0 = (v16u8)__msa_ilvr_b((v16i8)in1, (v16i8)in0); \ 25 out1 = (v16u8)__msa_ilvl_b((v16i8)in1, (v16i8)in0); \ 26 out2 = (v16u8)__msa_ilvr_b((v16i8)in3, (v16i8)in2); \ 27 out3 = (v16u8)__msa_ilvl_b((v16i8)in3, (v16i8)in2); \ 32 out0 = (v16u8)__msa_ilvr_h((v8i16)in1, (v8i16)in0); \ 33 out1 = (v16u8)__msa_ilvl_h((v8i16)in1, (v8i16)in0); \ 34 out2 = (v16u8)__msa_ilvr_h((v8i16)in3, (v8i16)in2); \ 35 out3 = (v16u8)__msa_ilvl_h((v8i16)in3, (v8i16)in2); \ 40 out0 = (v16u8)__msa_ilvr_w((v4i32)in1, (v4i32)in0); \ 41 out1 = (v16u8)__msa_ilvl_w((v4i32)in1, (v4i32)in0); \ [all …]
|
D | compare_msa.cc | 30 v16u8 src0, src1, src2, src3; in HammingDistance_MSA() 34 src0 = (v16u8)__msa_ld_b((v16i8*)src_a, 0); in HammingDistance_MSA() 35 src1 = (v16u8)__msa_ld_b((v16i8*)src_a, 16); in HammingDistance_MSA() 36 src2 = (v16u8)__msa_ld_b((v16i8*)src_b, 0); in HammingDistance_MSA() 37 src3 = (v16u8)__msa_ld_b((v16i8*)src_b, 16); in HammingDistance_MSA() 57 v16u8 src0, src1, src2, src3; in SumSquareError_MSA() 63 src0 = (v16u8)__msa_ld_b((v16i8*)src_a, 0); in SumSquareError_MSA() 64 src1 = (v16u8)__msa_ld_b((v16i8*)src_a, 16); in SumSquareError_MSA() 65 src2 = (v16u8)__msa_ld_b((v16i8*)src_b, 0); in SumSquareError_MSA() 66 src3 = (v16u8)__msa_ld_b((v16i8*)src_b, 16); in SumSquareError_MSA() [all …]
|
/external/libvpx/libvpx/vp8/encoder/mips/msa/ |
D | denoising_msa.c | 29 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in vp8_denoiser_filter_msa() 30 v16u8 src8, src9, src10, src11, src12, src13, src14, src15; in vp8_denoiser_filter_msa() 31 v16u8 mc_running_avg_y0, running_avg_y, sig0; in vp8_denoiser_filter_msa() 32 v16u8 mc_running_avg_y1, running_avg_y1, sig1; in vp8_denoiser_filter_msa() 33 v16u8 coeff0, coeff1; in vp8_denoiser_filter_msa() 109 adjust0 = (v8i16)__msa_bmnz_v((v16u8)adjust0, (v16u8)diff0, (v16u8)temp2_h); in vp8_denoiser_filter_msa() 110 adjust1 = (v8i16)__msa_bmnz_v((v16u8)adjust1, (v16u8)diff1, (v16u8)temp3_h); in vp8_denoiser_filter_msa() 117 running_avg_y = (v16u8)__msa_pckev_b((v16i8)temp1_h, (v16i8)temp0_h); in vp8_denoiser_filter_msa() 119 __msa_bmnz_v(running_avg_y, mc_running_avg_y0, (v16u8)temp2_h); in vp8_denoiser_filter_msa() 157 adjust0 = (v8i16)__msa_bmnz_v((v16u8)adjust0, (v16u8)diff0, (v16u8)temp2_h); in vp8_denoiser_filter_msa() [all …]
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | loopfilter_4_msa.c | 19 v16u8 mask, hev, flat, thresh, b_limit, limit; in vpx_lpf_horizontal_4_msa() 20 v16u8 p3, p2, p1, p0, q3, q2, q1, q0, p1_out, p0_out, q0_out, q1_out; in vpx_lpf_horizontal_4_msa() 25 thresh = (v16u8)__msa_fill_b(*thresh_ptr); in vpx_lpf_horizontal_4_msa() 26 b_limit = (v16u8)__msa_fill_b(*b_limit_ptr); in vpx_lpf_horizontal_4_msa() 27 limit = (v16u8)__msa_fill_b(*limit_ptr); in vpx_lpf_horizontal_4_msa() 47 v16u8 mask, hev, flat, thresh0, b_limit0, limit0, thresh1, b_limit1, limit1; in vpx_lpf_horizontal_4_dual_msa() 48 v16u8 p3, p2, p1, p0, q3, q2, q1, q0; in vpx_lpf_horizontal_4_dual_msa() 53 thresh0 = (v16u8)__msa_fill_b(*thresh0_ptr); in vpx_lpf_horizontal_4_dual_msa() 54 thresh1 = (v16u8)__msa_fill_b(*thresh1_ptr); in vpx_lpf_horizontal_4_dual_msa() 55 thresh0 = (v16u8)__msa_ilvr_d((v2i64)thresh1, (v2i64)thresh0); in vpx_lpf_horizontal_4_dual_msa() [all …]
|
D | loopfilter_8_msa.c | 19 v16u8 mask, hev, flat, thresh, b_limit, limit; in vpx_lpf_horizontal_8_msa() 20 v16u8 p3, p2, p1, p0, q3, q2, q1, q0; in vpx_lpf_horizontal_8_msa() 21 v16u8 p2_out, p1_out, p0_out, q0_out, q1_out, q2_out; in vpx_lpf_horizontal_8_msa() 29 thresh = (v16u8)__msa_fill_b(*thresh_ptr); in vpx_lpf_horizontal_8_msa() 30 b_limit = (v16u8)__msa_fill_b(*b_limit_ptr); in vpx_lpf_horizontal_8_msa() 31 limit = (v16u8)__msa_fill_b(*limit_ptr); in vpx_lpf_horizontal_8_msa() 38 flat = (v16u8)__msa_ilvr_d((v2i64)zero, (v2i64)flat); in vpx_lpf_horizontal_8_msa() 58 p2_out = __msa_bmnz_v(p2, (v16u8)p2_filter8, flat); in vpx_lpf_horizontal_8_msa() 59 p1_out = __msa_bmnz_v(p1_out, (v16u8)p1_filter8, flat); in vpx_lpf_horizontal_8_msa() 60 p0_out = __msa_bmnz_v(p0_out, (v16u8)p0_filter8, flat); in vpx_lpf_horizontal_8_msa() [all …]
|
D | loopfilter_16_msa.c | 20 v16u8 p3, p2, p1, p0, q3, q2, q1, q0; in hz_lpf_t4_and_t8_16w() 21 v16u8 p2_out, p1_out, p0_out, q0_out, q1_out, q2_out; in hz_lpf_t4_and_t8_16w() 22 v16u8 flat, mask, hev, thresh, b_limit, limit; in hz_lpf_t4_and_t8_16w() 27 v16u8 zero = { 0 }; in hz_lpf_t4_and_t8_16w() 32 thresh = (v16u8)__msa_fill_b(*thresh_ptr); in hz_lpf_t4_and_t8_16w() 33 b_limit = (v16u8)__msa_fill_b(*b_limit_ptr); in hz_lpf_t4_and_t8_16w() 34 limit = (v16u8)__msa_fill_b(*limit_ptr); in hz_lpf_t4_and_t8_16w() 65 p2_out = __msa_bmnz_v(p2, (v16u8)p2_filt8_r, flat); in hz_lpf_t4_and_t8_16w() 66 p1_out = __msa_bmnz_v(p1_out, (v16u8)p1_filt8_r, flat); in hz_lpf_t4_and_t8_16w() 67 p0_out = __msa_bmnz_v(p0_out, (v16u8)p0_filt8_r, flat); in hz_lpf_t4_and_t8_16w() [all …]
|
D | deblock_msa.c | 36 out0 = (v16u8)temp6; \ 37 out2 = (v16u8)temp7; \ 38 out4 = (v16u8)temp8; \ 39 out6 = (v16u8)temp9; \ 40 out9 = (v16u8)__msa_ilvl_d((v2i64)out8, (v2i64)out8); \ 41 out11 = (v16u8)__msa_ilvl_d((v2i64)out10, (v2i64)out10); \ 42 out13 = (v16u8)__msa_ilvl_d((v2i64)out12, (v2i64)out12); \ 43 out15 = (v16u8)__msa_ilvl_d((v2i64)out14, (v2i64)out14); \ 44 out1 = (v16u8)__msa_ilvl_d((v2i64)out0, (v2i64)out0); \ 45 out3 = (v16u8)__msa_ilvl_d((v2i64)out2, (v2i64)out2); \ [all …]
|
D | sub_pixel_variance_msa.c | 23 v16u8 src_l0_m, src_l1_m; \ 47 v16u8 pred, src = { 0 }; in avg_sse_diff_4width_msa() 48 v16u8 ref = { 0 }; in avg_sse_diff_4width_msa() 80 v16u8 src0, src1, src2, src3; in avg_sse_diff_8width_msa() 81 v16u8 ref0, ref1, ref2, ref3; in avg_sse_diff_8width_msa() 82 v16u8 pred0, pred1; in avg_sse_diff_8width_msa() 114 v16u8 src, ref, pred; in avg_sse_diff_16width_msa() 169 v16u8 src0, src1, ref0, ref1, pred0, pred1; in avg_sse_diff_32width_msa() 227 v16u8 src0, src1, ref0, ref1, pred0, pred1; in avg_sse_diff_32x64_msa() 287 v16u8 src0, src1, src2, src3; in avg_sse_diff_64x32_msa() [all …]
|
/external/libaom/libaom/aom_dsp/mips/ |
D | loopfilter_4_msa.c | 19 v16u8 mask, hev, flat, thresh, b_limit, limit; in aom_lpf_horizontal_4_msa() 20 v16u8 p3, p2, p1, p0, q3, q2, q1, q0, p1_out, p0_out, q0_out, q1_out; in aom_lpf_horizontal_4_msa() 25 thresh = (v16u8)__msa_fill_b(*thresh_ptr); in aom_lpf_horizontal_4_msa() 26 b_limit = (v16u8)__msa_fill_b(*b_limit_ptr); in aom_lpf_horizontal_4_msa() 27 limit = (v16u8)__msa_fill_b(*limit_ptr); in aom_lpf_horizontal_4_msa() 47 v16u8 mask, hev, flat, thresh0, b_limit0, limit0, thresh1, b_limit1, limit1; in aom_lpf_horizontal_4_dual_msa() 48 v16u8 p3, p2, p1, p0, q3, q2, q1, q0; in aom_lpf_horizontal_4_dual_msa() 53 thresh0 = (v16u8)__msa_fill_b(*thresh0_ptr); in aom_lpf_horizontal_4_dual_msa() 54 thresh1 = (v16u8)__msa_fill_b(*thresh1_ptr); in aom_lpf_horizontal_4_dual_msa() 55 thresh0 = (v16u8)__msa_ilvr_d((v2i64)thresh1, (v2i64)thresh0); in aom_lpf_horizontal_4_dual_msa() [all …]
|
D | loopfilter_8_msa.c | 19 v16u8 mask, hev, flat, thresh, b_limit, limit; in aom_lpf_horizontal_8_msa() 20 v16u8 p3, p2, p1, p0, q3, q2, q1, q0; in aom_lpf_horizontal_8_msa() 21 v16u8 p2_out, p1_out, p0_out, q0_out, q1_out, q2_out; in aom_lpf_horizontal_8_msa() 29 thresh = (v16u8)__msa_fill_b(*thresh_ptr); in aom_lpf_horizontal_8_msa() 30 b_limit = (v16u8)__msa_fill_b(*b_limit_ptr); in aom_lpf_horizontal_8_msa() 31 limit = (v16u8)__msa_fill_b(*limit_ptr); in aom_lpf_horizontal_8_msa() 38 flat = (v16u8)__msa_ilvr_d((v2i64)zero, (v2i64)flat); in aom_lpf_horizontal_8_msa() 58 p2_out = __msa_bmnz_v(p2, (v16u8)p2_filter8, flat); in aom_lpf_horizontal_8_msa() 59 p1_out = __msa_bmnz_v(p1_out, (v16u8)p1_filter8, flat); in aom_lpf_horizontal_8_msa() 60 p0_out = __msa_bmnz_v(p0_out, (v16u8)p0_filter8, flat); in aom_lpf_horizontal_8_msa() [all …]
|
D | loopfilter_16_msa.c | 19 v16u8 p3, p2, p1, p0, q3, q2, q1, q0; in aom_hz_lpf_t4_and_t8_16w() 20 v16u8 p2_out, p1_out, p0_out, q0_out, q1_out, q2_out; in aom_hz_lpf_t4_and_t8_16w() 21 v16u8 flat, mask, hev, thresh, b_limit, limit; in aom_hz_lpf_t4_and_t8_16w() 26 v16u8 zero = { 0 }; in aom_hz_lpf_t4_and_t8_16w() 31 thresh = (v16u8)__msa_fill_b(*thresh_ptr); in aom_hz_lpf_t4_and_t8_16w() 32 b_limit = (v16u8)__msa_fill_b(*b_limit_ptr); in aom_hz_lpf_t4_and_t8_16w() 33 limit = (v16u8)__msa_fill_b(*limit_ptr); in aom_hz_lpf_t4_and_t8_16w() 64 p2_out = __msa_bmnz_v(p2, (v16u8)p2_filt8_r, flat); in aom_hz_lpf_t4_and_t8_16w() 65 p1_out = __msa_bmnz_v(p1_out, (v16u8)p1_filt8_r, flat); in aom_hz_lpf_t4_and_t8_16w() 66 p0_out = __msa_bmnz_v(p0_out, (v16u8)p0_filt8_r, flat); in aom_hz_lpf_t4_and_t8_16w() [all …]
|
D | add_noise_msa.c | 29 v16u8 temp00, temp01, black_clamp, white_clamp; in aom_plane_add_noise_msa() 30 v16u8 pos0, ref0, pos1, ref1; in aom_plane_add_noise_msa() 37 black_clamp = (v16u8)__msa_fill_b(blackclamp[0]); in aom_plane_add_noise_msa() 38 white_clamp = (v16u8)__msa_fill_b(whiteclamp[0]); in aom_plane_add_noise_msa() 45 temp00 = (v16u8)(temp00_s < pos0); in aom_plane_add_noise_msa() 46 pos0 = (v16u8)__msa_bmnz_v((v16u8)pos0, (v16u8)temp00_s, temp00); in aom_plane_add_noise_msa() 49 pos1 = (v16u8)__msa_bmnz_v((v16u8)pos1, (v16u8)temp01_s, temp01); in aom_plane_add_noise_msa()
|
D | sub_pixel_variance_msa.c | 21 v16u8 src_l0_m, src_l1_m; \ 45 v16u8 pred, src = { 0 }; in avg_sse_diff_4width_msa() 46 v16u8 ref = { 0 }; in avg_sse_diff_4width_msa() 78 v16u8 src0, src1, src2, src3; in avg_sse_diff_8width_msa() 79 v16u8 ref0, ref1, ref2, ref3; in avg_sse_diff_8width_msa() 80 v16u8 pred0, pred1; in avg_sse_diff_8width_msa() 112 v16u8 src, ref, pred; in avg_sse_diff_16width_msa() 167 v16u8 src0, src1, ref0, ref1, pred0, pred1; in avg_sse_diff_32width_msa() 225 v16u8 src0, src1, ref0, ref1, pred0, pred1; in avg_sse_diff_32x64_msa() 285 v16u8 src0, src1, src2, src3; in avg_sse_diff_64x32_msa() [all …]
|
D | loopfilter_msa.h | 54 q0_out = __msa_xori_b((v16u8)q0_m, 0x80); \ 56 p0_out = __msa_xori_b((v16u8)p0_m, 0x80); \ 59 hev_in = __msa_xori_b((v16u8)hev_in, 0xff); \ 63 q1_out = __msa_xori_b((v16u8)q1_m, 0x80); \ 65 p1_out = __msa_xori_b((v16u8)p1_m, 0x80); \ 112 q0_out = __msa_xori_b((v16u8)q0_m, 0x80); \ 114 p0_out = __msa_xori_b((v16u8)p0_m, 0x80); \ 117 hev_in = __msa_xori_b((v16u8)hev_in, 0xff); \ 121 q1_out = __msa_xori_b((v16u8)q1_m, 0x80); \ 123 p1_out = __msa_xori_b((v16u8)p1_m, 0x80); \ [all …]
|
/external/gemmlowp/fixedpoint/ |
D | fixedpoint_msa.h | 39 return reinterpret_cast<v4i32>(__builtin_msa_and_v(reinterpret_cast<v16u8>(a), 40 reinterpret_cast<v16u8>(b))); 45 return reinterpret_cast<v8i16>(__builtin_msa_and_v(reinterpret_cast<v16u8>(a), 46 reinterpret_cast<v16u8>(b))); 51 return reinterpret_cast<v4i32>(__builtin_msa_or_v(reinterpret_cast<v16u8>(a), 52 reinterpret_cast<v16u8>(b))); 57 return reinterpret_cast<v8i16>(__builtin_msa_or_v(reinterpret_cast<v16u8>(a), 58 reinterpret_cast<v16u8>(b))); 63 return reinterpret_cast<v4i32>(__builtin_msa_xor_v(reinterpret_cast<v16u8>(a), 64 reinterpret_cast<v16u8>(b))); [all …]
|
/external/webp/src/dsp/ |
D | dec_msa.c | 138 v16u8 dest0, dest1, dest2, dest3; in TransformAC3() 186 q = __msa_xori_b((v16u8)q_m, 0x80); \ 187 p = __msa_xori_b((v16u8)p_m, 0x80); \ 205 q0 = __msa_xori_b((v16u8)q0_m, 0x80); \ 207 p0 = __msa_xori_b((v16u8)p0_m, 0x80); \ 212 q1 = __msa_xori_b((v16u8)q1_m, 0x80); \ 214 p1 = __msa_xori_b((v16u8)p1_m, 0x80); \ 263 v16u8 p3_asub_p2_m, p2_asub_p1_m, p1_asub_p0_m, q1_asub_q0_m; \ 264 v16u8 p1_asub_q1_m, p0_asub_q0_m, q3_asub_q2_m, q2_asub_q1_m; \ 265 v16u8 flat_out; \ [all …]
|
D | enc_msa.c | 86 v16u8 srcl0, srcl1, src0 = { 0 }, src1 = { 0 }; in FTransform_MSA() 122 tmp4 = (v4i32)__msa_nor_v((v16u8)tmp5, (v16u8)tmp5); in FTransform_MSA() 124 tmp5 = (v4i32)__msa_and_v((v16u8)tmp5, (v16u8)tmp4); in FTransform_MSA() 263 const v16u8 A1 = { 0 }; in VE4() 265 const v16u8 A = (v16u8)__msa_insert_d((v2i64)A1, 0, val_m); in VE4() 266 const v16u8 B = SLDI_UB(A, A, 1); in VE4() 267 const v16u8 C = SLDI_UB(A, A, 2); in VE4() 268 const v16u8 AC = __msa_ave_u_b(A, C); in VE4() 269 const v16u8 B2 = __msa_ave_u_b(B, B); in VE4() 270 const v16u8 R = __msa_aver_u_b(AC, B2); in VE4() [all …]
|
D | upsampling_msa.c | 31 v16u8 t0, t1; \ 60 dst = (v16u8)__msa_pckev_b((v16i8)b1, (v16i8)b0); \ 69 dst = (v16u8)__msa_pckev_b((v16i8)b0, (v16i8)b0); \ 82 dst = (v16u8)__msa_pckev_b((v16i8)a1, (v16i8)a0); \ 92 dst = (v16u8)__msa_pckev_b((v16i8)a0, (v16i8)a0); \ 103 dst = (v16u8)__msa_pckev_b((v16i8)b1, (v16i8)b0); \ 112 dst = (v16u8)__msa_pckev_b((v16i8)b0, (v16i8)b0); \ 116 const v16u8 zero = { 0 }; \ 119 const v16u8 in_y = LD_UB(y); \ 120 const v16u8 in_u = LD_UB(u); \ [all …]
|
/external/libvpx/libvpx/vp8/common/mips/msa/ |
D | loopfilter_filters_msa.c | 17 v16u8 p1_a_sub_q1, p0_a_sub_q0; \ 21 p1_a_sub_q1 = (v16u8)__msa_srli_b((v16i8)p1_a_sub_q1, 1); \ 24 mask = ((v16u8)mask <= b_limit); \ 50 q0 = __msa_xori_b((v16u8)q0_m, 0x80); \ 52 p0 = __msa_xori_b((v16u8)p0_m, 0x80); \ 57 q1 = __msa_xori_b((v16u8)q1_m, 0x80); \ 59 p1 = __msa_xori_b((v16u8)p1_m, 0x80); \ 86 q0_in = __msa_xori_b((v16u8)q0_m, 0x80); \ 87 p0_in = __msa_xori_b((v16u8)p0_m, 0x80); \ 138 q2 = __msa_xori_b((v16u8)q2_m, 0x80); \ [all …]
|
/external/libpng/mips/ |
D | filter_msa_intrinsics.c | 251 #define LD_UB(...) LD_B(v16u8, __VA_ARGS__) 257 #define LD_UB2(...) LD_B2(v16u8, __VA_ARGS__) 263 #define LD_UB4(...) LD_B4(v16u8, __VA_ARGS__) 266 #define ST_UB(...) ST_B(v16u8, __VA_ARGS__) 272 #define ST_UB2(...) ST_B2(v16u8, __VA_ARGS__) 278 #define ST_UB4(...) ST_B4(v16u8, __VA_ARGS__) 307 out0 = (RTYPE) __msa_hsub_u_h((v16u8) in0, (v16u8) in0); \ 308 out1 = (RTYPE) __msa_hsub_u_h((v16u8) in1, (v16u8) in1); \ 318 #define SLDI_B2_0_UB(...) SLDI_B2_0(v16u8, __VA_ARGS__) 326 #define SLDI_B3_0_UB(...) SLDI_B3_0(v16u8, __VA_ARGS__) [all …]
|