/third_party/openh264/codec/common/mips/ |
D | deblock_msa.c | 49 v8i16 p0_l, p0_r, p1_l, p1_r, p2_l, p2_r, q0_l, q0_r, q1_l, q1_r, q2_l, q2_r; in DeblockLumaLt4V_msa() 50 v8i16 tc_l, tc_r, negTc_l, negTc_r; in DeblockLumaLt4V_msa() 51 v8i16 iTc_l, iTc_r, negiTc_l, negiTc_r; in DeblockLumaLt4V_msa() 53 v8i16 t0, t1, t2, t3; in DeblockLumaLt4V_msa() 57 v8i16 const_1_h = __msa_ldi_h(1); in DeblockLumaLt4V_msa() 58 v8i16 const_4_h = __msa_ldi_h(4); in DeblockLumaLt4V_msa() 59 v8i16 const_not_255_h = __msa_ldi_h(~255); in DeblockLumaLt4V_msa() 86 MSA_ILVRL_B4(v8i16, zero, p0, zero, p1, in DeblockLumaLt4V_msa() 88 MSA_ILVRL_B4(v8i16, zero, p2, zero, q0, in DeblockLumaLt4V_msa() 90 MSA_ILVRL_B4(v8i16, zero, q1, zero, q2, in DeblockLumaLt4V_msa() [all …]
|
/third_party/ffmpeg/libavcodec/mips/ |
D | mpegvideo_msa.c | 30 v8i16 block_vec, qmul_vec, qadd_vec, sub; in h263_dct_unquantize_msa() 31 v8i16 add, mask, mul, zero_mask; in h263_dct_unquantize_msa() 42 add = (v8i16) __msa_bmnz_v((v16u8) add, (v16u8) sub, (v16u8) mask); in h263_dct_unquantize_msa() 43 block_vec = (v8i16) __msa_bmnz_v((v16u8) add, (v16u8) block_vec, in h263_dct_unquantize_msa() 69 v8i16 block_vec, block_neg, qscale_vec, mask; in mpeg2_dct_unquantize_inter_msa() 70 v8i16 block_org0, block_org1, block_org2, block_org3; in mpeg2_dct_unquantize_inter_msa() 71 v8i16 quant_m0, quant_m1, quant_m2, quant_m3; in mpeg2_dct_unquantize_inter_msa() 72 v8i16 sum, mul, zero_mask; in mpeg2_dct_unquantize_inter_msa() 83 block_vec = (v8i16) __msa_bmnz_v((v16u8) block_org0, (v16u8) block_neg, in mpeg2_dct_unquantize_inter_msa() 96 mul = (v8i16) __msa_pckev_h((v8i16) block_l, (v8i16) block_r); in mpeg2_dct_unquantize_inter_msa() [all …]
|
D | h263dsp_msa.c | 33 v8i16 temp0, temp1, temp2; in h263_h_loop_filter_msa() 34 v8i16 diff0, diff2, diff4, diff6, diff8; in h263_h_loop_filter_msa() 35 v8i16 d0, a_d0, str_x2, str; in h263_h_loop_filter_msa() 42 temp0 = (v8i16) __msa_ilvr_b((v16i8) in0, (v16i8) in1); in h263_h_loop_filter_msa() 44 temp2 = (v8i16) __msa_ilvr_b((v16i8) in2, (v16i8) in3); in h263_h_loop_filter_msa() 51 diff2 = (v8i16) __msa_bmz_v((v16u8) diff2, (v16u8) temp0, (v16u8) temp0); in h263_h_loop_filter_msa() 55 diff2 = (v8i16) __msa_bmnz_v((v16u8) diff2, (v16u8) temp2, (v16u8) temp0); in h263_h_loop_filter_msa() 59 diff4 = (v8i16) __msa_bmz_v((v16u8) diff4, (v16u8) temp0, (v16u8) temp0); in h263_h_loop_filter_msa() 63 diff4 = (v8i16) __msa_bmnz_v((v16u8) diff4, (v16u8) temp2, (v16u8) temp0); in h263_h_loop_filter_msa() 65 d0 = (v8i16) __msa_bmnz_v((v16u8) diff4, (v16u8) diff2, (v16u8) temp0); in h263_h_loop_filter_msa() [all …]
|
D | vp8_idct_msa.c | 50 v8i16 input0, input1; in ff_vp8_idct_add_msa() 84 v8i16 vec; in ff_vp8_idct_dc_add_msa() 85 v8i16 res0, res1, res2, res3; in ff_vp8_idct_dc_add_msa() 107 v8i16 input0, input1; in ff_vp8_luma_dc_wht_msa() 123 mb_dq_coeff[0] = __msa_copy_s_h((v8i16) vt0, 0); in ff_vp8_luma_dc_wht_msa() 124 mb_dq_coeff[16] = __msa_copy_s_h((v8i16) vt1, 0); in ff_vp8_luma_dc_wht_msa() 125 mb_dq_coeff[32] = __msa_copy_s_h((v8i16) vt2, 0); in ff_vp8_luma_dc_wht_msa() 126 mb_dq_coeff[48] = __msa_copy_s_h((v8i16) vt3, 0); in ff_vp8_luma_dc_wht_msa() 127 mb_dq_coeff[64] = __msa_copy_s_h((v8i16) vt0, 2); in ff_vp8_luma_dc_wht_msa() 128 mb_dq_coeff[80] = __msa_copy_s_h((v8i16) vt1, 2); in ff_vp8_luma_dc_wht_msa() [all …]
|
D | hevc_lpf_sao_msa.c | 47 v8i16 temp2; in hevc_loopfilter_luma_hor_msa() 48 v8i16 tc_pos, tc_neg; in hevc_loopfilter_luma_hor_msa() 49 v8i16 diff0, diff1, delta0, delta1, delta2, abs_delta0; in hevc_loopfilter_luma_hor_msa() 119 tc_pos = (v8i16) __msa_ilvev_d(cmp1, cmp0); in hevc_loopfilter_luma_hor_msa() 141 temp1 = (v8u16) __msa_srari_h((v8i16) temp1, 3); in hevc_loopfilter_luma_hor_msa() 142 temp2 = (v8i16) (temp1 - p2_src); in hevc_loopfilter_luma_hor_msa() 144 dst0 = (v16u8) (temp2 + (v8i16) p2_src); in hevc_loopfilter_luma_hor_msa() 147 temp1 = (v8u16) __msa_srari_h((v8i16) temp1, 2); in hevc_loopfilter_luma_hor_msa() 148 temp2 = (v8i16) (temp1 - p1_src); in hevc_loopfilter_luma_hor_msa() 150 dst1 = (v16u8) (temp2 + (v8i16) p1_src); in hevc_loopfilter_luma_hor_msa() [all …]
|
D | vp9_lpf_msa.c | 131 p2_filt8_out = (v8i16) __msa_srari_h((v8i16) tmp1, 3); \ 134 p1_filt8_out = (v8i16) __msa_srari_h((v8i16) tmp1, 3); \ 140 p0_filt8_out = (v8i16) __msa_srari_h((v8i16) tmp0, 3); \ 146 q2_filt8_out = (v8i16) __msa_srari_h((v8i16) tmp1, 3); \ 150 q0_filt8_out = (v8i16) __msa_srari_h((v8i16) tmp1, 3); \ 155 q1_filt8_out = (v8i16) __msa_srari_h((v8i16) tmp1, 3); \ 264 v8i16 p2_filter8, p1_filter8, p0_filter8; in ff_loop_filter_v_8_8_msa() 265 v8i16 q0_filter8, q1_filter8, q2_filter8; in ff_loop_filter_v_8_8_msa() 339 v8i16 p2_filt8_r, p1_filt8_r, p0_filt8_r; in ff_loop_filter_v_88_16_msa() 340 v8i16 q0_filt8_r, q1_filt8_r, q2_filt8_r; in ff_loop_filter_v_88_16_msa() [all …]
|
D | simple_idct_msa.c | 27 v8i16 weights = { 0, 22725, 21407, 19266, 16383, 12873, 8867, 4520 }; in simple_idct_msa() 28 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in simple_idct_msa() 29 v8i16 w1, w3, w5, w7; in simple_idct_msa() 30 v8i16 const0, const1, const2, const3, const4, const5, const6, const7; in simple_idct_msa() 36 v8i16 select_vec, temp; in simple_idct_msa() 37 v8i16 zero = { 0 }; in simple_idct_msa() 54 w2 = (v4i32) __msa_ilvr_h(zero, (v8i16) w2); in simple_idct_msa() 56 w4 = (v4i32) __msa_ilvr_h(zero, (v8i16) w4); in simple_idct_msa() 58 w6 = (v4i32) __msa_ilvr_h(zero, (v8i16) w6); in simple_idct_msa() 105 in0 = (v8i16) __msa_bmnz_v((v16u8) temp0_r, (v16u8) temp, in simple_idct_msa() [all …]
|
D | hevc_macros_msa.h | 27 v8i16 out_m; \ 40 out_m = __msa_dotp_s_w((v8i16) in0, (v8i16) filt0); \ 41 out_m = __msa_dpadd_s_w(out_m, (v8i16) in1, (v8i16) filt1); \ 48 v8i16 out_m; \ 59 out_m = __msa_dotp_s_w(in0, (v8i16) filt0); \ 60 out_m = __msa_dpadd_s_w(out_m, in1, (v8i16) filt1); \
|
D | h264dsp_msa.c | 31 v8i16 src0_r, tmp0, wgt, denom, offset; in avc_wgt_4x2_msa() 41 src0_r = (v8i16) __msa_ilvr_b((v16i8) zero, (v16i8) src0); in avc_wgt_4x2_msa() 46 tmp0 = (v8i16) __msa_sat_u_h((v8u16) tmp0, 7); in avc_wgt_4x2_msa() 57 v8i16 src0_r, src1_r, tmp0, tmp1, wgt, denom, offset; in avc_wgt_4x4_msa() 84 v8i16 src0_r, src1_r, src2_r, src3_r, tmp0, tmp1, tmp2, tmp3; in avc_wgt_4x8_msa() 85 v8i16 wgt, denom, offset; in avc_wgt_4x8_msa() 117 v8i16 src0_r, src1_r, src2_r, src3_r, tmp0, tmp1, tmp2, tmp3; in avc_wgt_8x4_msa() 118 v8i16 wgt, denom, offset; in avc_wgt_8x4_msa() 148 v8i16 src0_r, src1_r, src2_r, src3_r, src4_r, src5_r, src6_r, src7_r; in avc_wgt_8x8_msa() 149 v8i16 tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; in avc_wgt_8x8_msa() [all …]
|
D | h264qpel_msa.c | 53 v8i16 out0_m; \ 72 v8i16 out0_m; \ 85 out0_m = __msa_dotp_s_w((v8i16) in0, (v8i16) coeff0); \ 86 out0_m = __msa_dpadd_s_w(out0_m, (v8i16) in1, (v8i16) coeff1); \ 87 out0_m = __msa_dpadd_s_w(out0_m, (v8i16) in2, (v8i16) coeff2); \ 104 v8i16 hz_out0, hz_out1, vt_out0, vt_out1, out0, out1; in avc_luma_hv_qrt_4x4_msa() 168 v8i16 hz_out0, hz_out1, hz_out2, hz_out3, vt_out0, vt_out1, vt_out2; in avc_luma_hv_qrt_8x8_msa() 169 v8i16 vt_out3, tmp0, tmp1, tmp2, tmp3; in avc_luma_hv_qrt_8x8_msa() 279 v8i16 hz_out0, hz_out1, hz_out2, hz_out3, vt_out0, vt_out1, vt_out2; in avc_luma_hv_qrt_16x16_msa() 280 v8i16 vt_out3, out0, out1, out2, out3; in avc_luma_hv_qrt_16x16_msa() [all …]
|
D | hevc_mc_biw_msa.c | 39 out0_r = __msa_dpadd_s_w(offset, (v8i16) out0_r, (v8i16) wgt); \ 40 out1_r = __msa_dpadd_s_w(offset, (v8i16) out1_r, (v8i16) wgt); \ 41 out0_l = __msa_dpadd_s_w(offset, (v8i16) out0_l, (v8i16) wgt); \ 42 out1_l = __msa_dpadd_s_w(offset, (v8i16) out1_l, (v8i16) wgt); \ 63 out0_r = __msa_dpadd_s_w(offset, (v8i16) out0_r, (v8i16) wgt); \ 64 out1_r = __msa_dpadd_s_w(offset, (v8i16) out1_r, (v8i16) wgt); \ 65 out0_l = __msa_dpadd_s_w(offset, (v8i16) out0_l, (v8i16) wgt); \ 66 out1_l = __msa_dpadd_s_w(offset, (v8i16) out1_l, (v8i16) wgt); \ 101 v8i16 in0 = { 0 }, in1 = { 0 }, in2 = { 0 }, in3 = { 0 }; in hevc_biwgt_copy_4w_msa() 102 v8i16 dst0, dst1, dst2, dst3, weight_vec; in hevc_biwgt_copy_4w_msa() [all …]
|
D | hevc_mc_bi_msa.c | 73 v8i16 in0 = { 0 }, in1 = { 0 }, in2 = { 0 }, in3 = { 0 }; in hevc_bi_copy_4w_msa() 74 v8i16 dst0, dst1, dst2, dst3; in hevc_bi_copy_4w_msa() 82 dst0 = (v8i16) __msa_ilvr_b(zero, src0); in hevc_bi_copy_4w_msa() 88 dst0 = (v8i16) __msa_pckev_b((v16i8) dst0, (v16i8) dst0); in hevc_bi_copy_4w_msa() 99 dst0 = (v8i16) __msa_pckev_b((v16i8) dst1, (v16i8) dst0); in hevc_bi_copy_4w_msa() 142 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in hevc_bi_copy_6w_msa() 143 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7; in hevc_bi_copy_6w_msa() 193 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in hevc_bi_copy_8w_msa() 194 v8i16 dst0, dst1, dst2, dst3, dst4, dst5, dst6, dst7; in hevc_bi_copy_8w_msa() 281 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in hevc_bi_copy_12w_msa() [all …]
|
D | idctdsp_msa.c | 28 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in put_pixels_clamped_msa() 52 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in put_signed_pixels_clamped_msa() 86 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in add_pixels_clamped_msa() 90 v8i16 zero = { 0 }; in add_pixels_clamped_msa() 101 in0 += (v8i16) pix0; in add_pixels_clamped_msa() 102 in1 += (v8i16) pix1; in add_pixels_clamped_msa() 103 in2 += (v8i16) pix2; in add_pixels_clamped_msa() 104 in3 += (v8i16) pix3; in add_pixels_clamped_msa() 105 in4 += (v8i16) pix4; in add_pixels_clamped_msa() 106 in5 += (v8i16) pix5; in add_pixels_clamped_msa() [all …]
|
D | vp9_idct_msa.c | 69 v8i16 k0_m = __msa_fill_h(cnst0); \ 73 k0_m = __msa_ilvev_h((v8i16) s0_m, k0_m); \ 79 out0 = __msa_pckev_h((v8i16) s0_m, (v8i16) s1_m); \ 83 out1 = __msa_pckev_h((v8i16) s0_m, (v8i16) s1_m); \ 106 v8i16 dst_m; \ 111 dst_m = __msa_pckev_h((v8i16) tp1_m, (v8i16) tp0_m); \ 119 v8i16 cnst0_m, cnst1_m, cnst2_m, cnst3_m, cnst4_m; \ 120 v8i16 vec0_m, vec1_m, vec2_m, vec3_m, s0_m, s1_m; \ 121 v8i16 coeff0_m = { cospi_2_64, cospi_6_64, cospi_10_64, cospi_14_64, \ 123 v8i16 coeff1_m = { cospi_8_64, -cospi_8_64, cospi_16_64, \ [all …]
|
D | me_cmp_msa.c | 252 comp0 = (v8u16) __msa_srari_h((v8i16) comp0, 2); in sad_hv_bilinear_filter_8width_msa() 258 comp1 = (v8u16) __msa_srari_h((v8i16) comp1, 2); in sad_hv_bilinear_filter_8width_msa() 267 comp2 = (v8u16) __msa_srari_h((v8i16) comp2, 2); in sad_hv_bilinear_filter_8width_msa() 273 comp3 = (v8u16) __msa_srari_h((v8i16) comp3, 2); in sad_hv_bilinear_filter_8width_msa() 399 v8i16 res_l0_m, res_l1_m; \ 508 v8i16 sum = { 0 }; in hadamard_diff_8x8_msa() 509 v8i16 zero = { 0 }; in hadamard_diff_8x8_msa() 534 sum = __msa_asub_s_h((v8i16) temp3, (v8i16) temp7); in hadamard_diff_8x8_msa() 535 sum += __msa_asub_s_h((v8i16) temp2, (v8i16) temp6); in hadamard_diff_8x8_msa() 536 sum += __msa_asub_s_h((v8i16) temp1, (v8i16) temp5); in hadamard_diff_8x8_msa() [all …]
|
D | hevcdsp_msa.c | 40 v8i16 in0; in hevc_copy_4w_msa() 45 in0 = (v8i16) __msa_ilvr_b(zero, src0); in hevc_copy_4w_msa() 50 v8i16 in0, in1; in hevc_copy_4w_msa() 61 v8i16 in0, in1, in2, in3; in hevc_copy_4w_msa() 87 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in hevc_copy_6w_msa() 112 v8i16 in0, in1; in hevc_copy_8w_msa() 122 v8i16 in0, in1, in2, in3; in hevc_copy_8w_msa() 132 v8i16 in0, in1, in2, in3, in4, in5; in hevc_copy_8w_msa() 146 v8i16 in0, in1, in2, in3, in4, in5, in6, in7; in hevc_copy_8w_msa() 172 v8i16 in0, in1, in0_r, in1_r, in2_r, in3_r; in hevc_copy_12w_msa() [all …]
|
/third_party/ffmpeg/libavutil/mips/ |
D | generic_macros_msa.h | 39 #define LD_SH(...) LD_V(v8i16, __VA_ARGS__) 47 #define ST_SH(...) ST_V(v8i16, __VA_ARGS__) 284 #define LD_SH2(...) LD_V2(v8i16, __VA_ARGS__) 303 #define LD_SH4(...) LD_V4(v8i16, __VA_ARGS__) 322 #define LD_SH6(...) LD_V6(v8i16, __VA_ARGS__) 342 #define LD_SH8(...) LD_V8(v8i16, __VA_ARGS__) 354 #define LD_SH16(...) LD_V16(v8i16, __VA_ARGS__) 370 #define ST_SH2(...) ST_V2(v8i16, __VA_ARGS__) 380 #define ST_SH4(...) ST_V4(v8i16, __VA_ARGS__) 388 #define ST_SH6(...) ST_V6(v8i16, __VA_ARGS__) [all …]
|
/third_party/skia/third_party/externals/libwebp/src/dsp/ |
D | msa_macro.h | 26 #define ADDVI_H(a, b) __msa_addvi_h((v8i16)a, b) 29 #define SRAI_H(a, b) __msa_srai_h((v8i16)a, b) 31 #define SRLI_H(a, b) __msa_srli_h((v8i16)a, b) 54 #define LD_SH(...) LD_H(v8i16, __VA_ARGS__) 66 #define ST_SH(...) ST_H(v8i16, __VA_ARGS__) 260 #define LD_SH2(...) LD_H2(v8i16, __VA_ARGS__) 355 #define ST_SH2(...) ST_H2(v8i16, __VA_ARGS__) 370 const uint16_t out0_m = __msa_copy_s_h((v8i16)in, stidx); \ 371 const uint16_t out1_m = __msa_copy_s_h((v8i16)in, stidx + 1); \ 372 const uint16_t out2_m = __msa_copy_s_h((v8i16)in, stidx + 2); \ [all …]
|
D | enc_msa.c | 46 v8i16 input0, input1; in ITransformOne() 85 v8i16 t0, t1, t2, t3; in FTransform_MSA() 87 const v8i16 mask0 = { 0, 4, 8, 12, 1, 5, 9, 13 }; in FTransform_MSA() 88 const v8i16 mask1 = { 3, 7, 11, 15, 2, 6, 10, 14 }; in FTransform_MSA() 89 const v8i16 mask2 = { 4, 0, 5, 1, 6, 2, 7, 3 }; in FTransform_MSA() 90 const v8i16 mask3 = { 0, 4, 1, 5, 2, 6, 3, 7 }; in FTransform_MSA() 91 const v8i16 cnst0 = { 2217, -5352, 2217, -5352, 2217, -5352, 2217, -5352 }; in FTransform_MSA() 92 const v8i16 cnst1 = { 5352, 2217, 5352, 2217, 5352, 2217, 5352, 2217 }; in FTransform_MSA() 135 v8i16 in0 = { 0 }; in FTransformWHT_MSA() 136 v8i16 in1 = { 0 }; in FTransformWHT_MSA() [all …]
|
D | upsampling_msa.c | 25 const v8i16 t0 = (v8i16)__msa_ilvr_b((v16i8)zero, (v16i8)in); \ 26 out0 = (v4u32)__msa_ilvr_h((v8i16)zero, t0); \ 27 out1 = (v4u32)__msa_ilvl_h((v8i16)zero, t0); \ 49 out0 = (v8u16)__msa_pckod_h((v8i16)temp1, (v8i16)temp0); \ 53 const v8i16 const_a = (v8i16)__msa_fill_h(14234); \ 54 const v8i16 a0 = __msa_adds_s_h((v8i16)y0, (v8i16)v0); \ 55 const v8i16 a1 = __msa_adds_s_h((v8i16)y1, (v8i16)v1); \ 56 v8i16 b0 = __msa_subs_s_h(a0, const_a); \ 57 v8i16 b1 = __msa_subs_s_h(a1, const_a); \ 64 const v8i16 const_a = (v8i16)__msa_fill_h(14234); \ [all …]
|
/third_party/flutter/skia/third_party/externals/libwebp/src/dsp/ |
D | msa_macro.h | 26 #define ADDVI_H(a, b) __msa_addvi_h((v8i16)a, b) 29 #define SRAI_H(a, b) __msa_srai_h((v8i16)a, b) 31 #define SRLI_H(a, b) __msa_srli_h((v8i16)a, b) 54 #define LD_SH(...) LD_H(v8i16, __VA_ARGS__) 66 #define ST_SH(...) ST_H(v8i16, __VA_ARGS__) 260 #define LD_SH2(...) LD_H2(v8i16, __VA_ARGS__) 355 #define ST_SH2(...) ST_H2(v8i16, __VA_ARGS__) 370 const uint16_t out0_m = __msa_copy_s_h((v8i16)in, stidx); \ 371 const uint16_t out1_m = __msa_copy_s_h((v8i16)in, stidx + 1); \ 372 const uint16_t out2_m = __msa_copy_s_h((v8i16)in, stidx + 2); \ [all …]
|
D | enc_msa.c | 46 v8i16 input0, input1; in ITransformOne() 85 v8i16 t0, t1, t2, t3; in FTransform_MSA() 87 const v8i16 mask0 = { 0, 4, 8, 12, 1, 5, 9, 13 }; in FTransform_MSA() 88 const v8i16 mask1 = { 3, 7, 11, 15, 2, 6, 10, 14 }; in FTransform_MSA() 89 const v8i16 mask2 = { 4, 0, 5, 1, 6, 2, 7, 3 }; in FTransform_MSA() 90 const v8i16 mask3 = { 0, 4, 1, 5, 2, 6, 3, 7 }; in FTransform_MSA() 91 const v8i16 cnst0 = { 2217, -5352, 2217, -5352, 2217, -5352, 2217, -5352 }; in FTransform_MSA() 92 const v8i16 cnst1 = { 5352, 2217, 5352, 2217, 5352, 2217, 5352, 2217 }; in FTransform_MSA() 135 v8i16 in0 = { 0 }; in FTransformWHT_MSA() 136 v8i16 in1 = { 0 }; in FTransformWHT_MSA() [all …]
|
D | upsampling_msa.c | 25 const v8i16 t0 = (v8i16)__msa_ilvr_b((v16i8)zero, (v16i8)in); \ 26 out0 = (v4u32)__msa_ilvr_h((v8i16)zero, t0); \ 27 out1 = (v4u32)__msa_ilvl_h((v8i16)zero, t0); \ 49 out0 = (v8u16)__msa_pckod_h((v8i16)temp1, (v8i16)temp0); \ 53 const v8i16 const_a = (v8i16)__msa_fill_h(14234); \ 54 const v8i16 a0 = __msa_adds_s_h((v8i16)y0, (v8i16)v0); \ 55 const v8i16 a1 = __msa_adds_s_h((v8i16)y1, (v8i16)v1); \ 56 v8i16 b0 = __msa_subs_s_h(a0, const_a); \ 57 v8i16 b1 = __msa_subs_s_h(a1, const_a); \ 64 const v8i16 const_a = (v8i16)__msa_fill_h(14234); \ [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/PowerPC/ |
D | PPCInstrAltivec.td | 409 [(set v8i16:$vD, (int_ppc_altivec_mfvscr))]>; 420 [(set v8i16:$vD, (int_ppc_altivec_lvehx xoaddr:$src))]>; 447 [(int_ppc_altivec_stvehx v8i16:$rS, xoaddr:$dst)]>; 473 def VMHADDSHS : VA1a_Int_Ty<32, "vmhaddshs", int_ppc_altivec_vmhaddshs, v8i16>; 475 v8i16>; 476 def VMLADDUHM : VA1a_Int_Ty<34, "vmladduhm", int_ppc_altivec_vmladduhm, v8i16>; 500 [(set v8i16:$vD, (add v8i16:$vA, v8i16:$vB))]>; 507 def VADDSHS : VX1_Int_Ty<832, "vaddshs", int_ppc_altivec_vaddshs, v8i16>; 510 def VADDUHS : VX1_Int_Ty<576, "vadduhs", int_ppc_altivec_vadduhs, v8i16>; 566 def VAVGSH : VX1_Int_Ty<1346, "vavgsh", int_ppc_altivec_vavgsh, v8i16>; [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/configs/common/lib/Target/ARM/ |
D | ARMGenDAGISel.inc | 2773 /* 5579*/ /*SwitchType*/ 25, MVT::v8i16,// ->5606 2780 MVT::v8i16, 5/*#Ops*/, 0, 1, 2, 3, 4, 2781 …// Src: (or:{ *:[v8i16] } MQPR:{ *:[v8i16] }:$Qm, (xor:{ *:[v8i16] } MQPR:{ *:[v8i16] }:$Qn, (bitc… 2782 … // Dst: (MVE_VORN:{ *:[v8i16] } MQPR:{ *:[v8i16] }:$Qm, MQPR:{ *:[v8i16] }:$Qn) 2960 /* 5938*/ OPC_CheckType, MVT::v8i16, 2967 MVT::v8i16, 5/*#Ops*/, 0, 1, 2, 3, 4, 2968 …v8i16] } MQPR:{ *:[v8i16] }:$Qm, (xor:{ *:[v8i16] } (bitconvert:{ *:[v8i16] } (ARMvmovImm:{ *:[v16… 2969 // Dst: (MVE_VORN:{ *:[v8i16] } MQPR:{ *:[v8i16] }:$Qm, MQPR:{ *:[v8i16] }:$Qn) 2988 /* 5994*/ OPC_CheckType, MVT::v8i16, 2995 MVT::v8i16, 5/*#Ops*/, 1, 0, 2, 3, 4, [all …]
|