/third_party/cmsis/CMSIS/DSP/Source/QuaternionMathFunctions/ |
D | arm_quaternion2rotation_f32.c | 85 f32x4_t vec0,vec1, vec2 ,vec3; in arm_quaternion2rotation_f32() local 92 vec0 = vld1q(pInputQuaternions); in arm_quaternion2rotation_f32() 95 vec1 = vmulq(vec0,vec0); in arm_quaternion2rotation_f32() 98 vec2 = vmulq_n_f32(vec0, vgetq_lane(vec0,0)); in arm_quaternion2rotation_f32() 105 q2q3 = vgetq_lane(vec0,2) * vgetq_lane(vec0,3); in arm_quaternion2rotation_f32() 109 vec3 = vmulq_n_f32(vec0, vgetq_lane(vec0,1)); in arm_quaternion2rotation_f32() 114 vec0 = vsetq_lane(vgetq_lane(vec1,0) + vgetq_lane(vec1,1),vec0,0); in arm_quaternion2rotation_f32() 115 vec0 = vsetq_lane(vgetq_lane(vec0,0) - vgetq_lane(vec1,2),vec0,0); in arm_quaternion2rotation_f32() 116 vec0 = vsetq_lane(vgetq_lane(vec0,0) - vgetq_lane(vec1,3),vec0,0); in arm_quaternion2rotation_f32() 117 vec0 = vsetq_lane(vgetq_lane(vec3,2) - vgetq_lane(vec2,3),vec0,1); in arm_quaternion2rotation_f32() [all …]
|
/third_party/flutter/skia/third_party/externals/libpng/mips/ |
D | filter_msa_intrinsics.c | 664 v8i16 vec0, vec1, vec2; in png_read_filter_row_paeth4_msa() local 696 ILVR_B2_SH(src2, src6, src1, src6, vec0, vec1); in png_read_filter_row_paeth4_msa() 697 HSUB_UB2_SH(vec0, vec1, vec0, vec1); in png_read_filter_row_paeth4_msa() 698 vec2 = vec0 + vec1; in png_read_filter_row_paeth4_msa() 699 ADD_ABS_H3_SH(vec0, vec1, vec2, vec0, vec1, vec2); in png_read_filter_row_paeth4_msa() 700 CMP_AND_SELECT(vec0, vec1, vec2, src1, src2, src6, src10); in png_read_filter_row_paeth4_msa() 701 ILVR_B2_SH(src3, src7, src10, src7, vec0, vec1); in png_read_filter_row_paeth4_msa() 702 HSUB_UB2_SH(vec0, vec1, vec0, vec1); in png_read_filter_row_paeth4_msa() 703 vec2 = vec0 + vec1; in png_read_filter_row_paeth4_msa() 704 ADD_ABS_H3_SH(vec0, vec1, vec2, vec0, vec1, vec2); in png_read_filter_row_paeth4_msa() [all …]
|
/third_party/libpng/mips/ |
D | filter_msa_intrinsics.c | 664 v8i16 vec0, vec1, vec2; in png_read_filter_row_paeth4_msa() local 696 ILVR_B2_SH(src2, src6, src1, src6, vec0, vec1); in png_read_filter_row_paeth4_msa() 697 HSUB_UB2_SH(vec0, vec1, vec0, vec1); in png_read_filter_row_paeth4_msa() 698 vec2 = vec0 + vec1; in png_read_filter_row_paeth4_msa() 699 ADD_ABS_H3_SH(vec0, vec1, vec2, vec0, vec1, vec2); in png_read_filter_row_paeth4_msa() 700 CMP_AND_SELECT(vec0, vec1, vec2, src1, src2, src6, src10); in png_read_filter_row_paeth4_msa() 701 ILVR_B2_SH(src3, src7, src10, src7, vec0, vec1); in png_read_filter_row_paeth4_msa() 702 HSUB_UB2_SH(vec0, vec1, vec0, vec1); in png_read_filter_row_paeth4_msa() 703 vec2 = vec0 + vec1; in png_read_filter_row_paeth4_msa() 704 ADD_ABS_H3_SH(vec0, vec1, vec2, vec0, vec1, vec2); in png_read_filter_row_paeth4_msa() [all …]
|
/third_party/skia/third_party/externals/libpng/mips/ |
D | filter_msa_intrinsics.c | 664 v8i16 vec0, vec1, vec2; in png_read_filter_row_paeth4_msa() local 696 ILVR_B2_SH(src2, src6, src1, src6, vec0, vec1); in png_read_filter_row_paeth4_msa() 697 HSUB_UB2_SH(vec0, vec1, vec0, vec1); in png_read_filter_row_paeth4_msa() 698 vec2 = vec0 + vec1; in png_read_filter_row_paeth4_msa() 699 ADD_ABS_H3_SH(vec0, vec1, vec2, vec0, vec1, vec2); in png_read_filter_row_paeth4_msa() 700 CMP_AND_SELECT(vec0, vec1, vec2, src1, src2, src6, src10); in png_read_filter_row_paeth4_msa() 701 ILVR_B2_SH(src3, src7, src10, src7, vec0, vec1); in png_read_filter_row_paeth4_msa() 702 HSUB_UB2_SH(vec0, vec1, vec0, vec1); in png_read_filter_row_paeth4_msa() 703 vec2 = vec0 + vec1; in png_read_filter_row_paeth4_msa() 704 ADD_ABS_H3_SH(vec0, vec1, vec2, vec0, vec1, vec2); in png_read_filter_row_paeth4_msa() [all …]
|
/third_party/ffmpeg/libavcodec/mips/ |
D | hevcpred_msa.c | 33 #define HEVC_PRED_PLANAR_16x2(src0_r, src0_l, tmp0, tmp1, vec0, vec1, \ argument 39 MUL4(mul_val_h0, vec0, mul_val_h2, vec0, mul_val_h0, vec1, \ 68 v8i16 vec0, vec1, vec2; in hevc_intra_pred_vert_4x4_msa() local 79 vec0 = __msa_fill_h(src_left[-1]); in hevc_intra_pred_vert_4x4_msa() 83 vec2 -= vec0; in hevc_intra_pred_vert_4x4_msa() 103 v8i16 vec0, vec1, vec2; in hevc_intra_pred_vert_8x8_msa() local 118 vec0 = __msa_fill_h(src_left[-1]); in hevc_intra_pred_vert_8x8_msa() 122 vec2 -= vec0; in hevc_intra_pred_vert_8x8_msa() 158 v8i16 vec0, vec1, vec2, vec3; in hevc_intra_pred_vert_16x16_msa() local 170 vec0 = __msa_fill_h(src_left[-1]); in hevc_intra_pred_vert_16x16_msa() [all …]
|
D | hevcdsp_msa.c | 456 v16i8 vec0, vec1, vec2, vec3; in hevc_hz_8t_4w_msa() local 478 vec0, vec1, vec2, vec3); in hevc_hz_8t_4w_msa() 480 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3, in hevc_hz_8t_4w_msa() 483 vec0, vec1, vec2, vec3); in hevc_hz_8t_4w_msa() 485 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3, in hevc_hz_8t_4w_msa() 488 vec0, vec1, vec2, vec3); in hevc_hz_8t_4w_msa() 490 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3, in hevc_hz_8t_4w_msa() 493 vec0, vec1, vec2, vec3); in hevc_hz_8t_4w_msa() 495 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, filt3, in hevc_hz_8t_4w_msa() 511 v16i8 vec0, vec1, vec2, vec3; in hevc_hz_8t_8w_msa() local [all …]
|
D | hevc_mc_biw_msa.c | 31 #define HEVC_BIW_RND_CLIP2(in0, in1, vec0, vec1, wgt, rnd, offset, \ argument 36 ILVR_H2_SW(in0, vec0, in1, vec1, out0_r, out1_r); \ 37 ILVL_H2_SW(in0, vec0, in1, vec1, out0_l, out1_l); \ 49 #define HEVC_BIW_RND_CLIP4(in0, in1, in2, in3, vec0, vec1, vec2, vec3, \ argument 52 HEVC_BIW_RND_CLIP2(in0, in1, vec0, vec1, wgt, rnd, offset, out0, out1); \ 56 #define HEVC_BIW_RND_CLIP2_MAX_SATU(in0, in1, vec0, vec1, wgt, rnd, \ argument 61 ILVR_H2_SW(in0, vec0, in1, vec1, out0_r, out1_r); \ 62 ILVL_H2_SW(in0, vec0, in1, vec1, out0_l, out1_l); \ 72 #define HEVC_BIW_RND_CLIP4_MAX_SATU(in0, in1, in2, in3, vec0, vec1, vec2, \ argument 76 HEVC_BIW_RND_CLIP2_MAX_SATU(in0, in1, vec0, vec1, wgt, rnd, offset, \ [all …]
|
D | vp9_mc_msa.c | 52 #define FILT_8TAP_DPADD_S_H(vec0, vec1, vec2, vec3, \ argument 57 tmp0 = __msa_dotp_s_h((v16i8) vec0, (v16i8) filt0); \ 846 v16u8 mask0, mask1, mask2, mask3, vec0, vec1; in common_hv_8ht_8vt_8w_msa() local 920 vec0 = PCKEV_XORI128_UB(tmp0, tmp1); in common_hv_8ht_8vt_8w_msa() 922 ST_D4(vec0, vec1, 0, 1, 0, 1, dst, dst_stride); in common_hv_8ht_8vt_8w_msa() 1030 v8i16 filt, vec0, vec1, vec2, vec3; in common_hz_8t_and_aver_dst_4x8_msa() local 1051 mask3, filt0, filt1, filt2, filt3, vec0, vec1); in common_hz_8t_and_aver_dst_4x8_msa() 1056 SRARI_H4_SH(vec0, vec1, vec2, vec3, 7); in common_hz_8t_and_aver_dst_4x8_msa() 1057 SAT_SH4_SH(vec0, vec1, vec2, vec3, 7); in common_hz_8t_and_aver_dst_4x8_msa() 1058 PCKEV_B4_UB(vec0, vec0, vec1, vec1, vec2, vec2, vec3, vec3, in common_hz_8t_and_aver_dst_4x8_msa() [all …]
|
D | vp8_mc_msa.c | 107 #define FILT_4TAP_DPADD_S_H(vec0, vec1, filt0, filt1) \ argument 111 tmp0 = __msa_dotp_s_h((v16i8) vec0, (v16i8) filt0); \ 587 v16u8 mask0, mask1, mask2, vec0, vec1; in ff_put_vp8_epel8_h6v6_msa() local 651 vec0 = PCKEV_XORI128_UB(tmp0, tmp1); in ff_put_vp8_epel8_h6v6_msa() 653 ST_D4(vec0, vec1, 0, 1, 0, 1, dst, dst_stride); in ff_put_vp8_epel8_h6v6_msa() 1052 v8i16 filt, filt_vt0, filt_vt1, tmp0, tmp1, vec0, vec1, vec2; in ff_put_vp8_epel4_h4v4_msa() local 1070 vec0 = (v8i16) __msa_ilvev_b((v16i8) hz_out1, (v16i8) hz_out0); in ff_put_vp8_epel4_h4v4_msa() 1083 tmp0 = FILT_4TAP_DPADD_S_H(vec0, vec1, filt_vt0, filt_vt1); in ff_put_vp8_epel4_h4v4_msa() 1098 vec0 = vec2; in ff_put_vp8_epel4_h4v4_msa() 1113 v8i16 vec0, vec1, vec2, vec3, vec4; in ff_put_vp8_epel8_h4v4_msa() local [all …]
|
D | h264idct_msa.c | 45 v8i16 vec0, vec1, vec2, vec3; in avc_deq_idct_luma_dc_msa() local 56 BUTTERFLY_4(tmp0, tmp2, tmp3, tmp1, vec0, vec3, vec2, vec1); in avc_deq_idct_luma_dc_msa() 57 BUTTERFLY_4(vec0, vec1, vec2, vec3, hres0, hres3, hres2, hres1); in avc_deq_idct_luma_dc_msa() 59 BUTTERFLY_4(hres0, hres1, hres3, hres2, vec0, vec3, vec2, vec1); in avc_deq_idct_luma_dc_msa() 60 BUTTERFLY_4(vec0, vec1, vec2, vec3, vres0, vres1, vres2, vres3); in avc_deq_idct_luma_dc_msa() 72 PCKEV_H2_SH(vres1_r, vres0_r, vres3_r, vres2_r, vec0, vec1); in avc_deq_idct_luma_dc_msa() 74 out0 = __msa_copy_s_h(vec0, 0); in avc_deq_idct_luma_dc_msa() 75 out1 = __msa_copy_s_h(vec0, 1); in avc_deq_idct_luma_dc_msa() 76 out2 = __msa_copy_s_h(vec0, 2); in avc_deq_idct_luma_dc_msa() 77 out3 = __msa_copy_s_h(vec0, 3); in avc_deq_idct_luma_dc_msa() [all …]
|
D | hevc_mc_bi_msa.c | 31 #define HEVC_BI_RND_CLIP2(in0, in1, vec0, vec1, rnd_val, out0, out1) \ argument 33 ADDS_SH2_SH(vec0, in0, vec1, in1, out0, out1); \ 39 vec0, vec1, vec2, vec3, rnd_val, \ argument 42 HEVC_BI_RND_CLIP2(in0, in1, vec0, vec1, rnd_val, out0, out1); \ 46 #define HEVC_BI_RND_CLIP2_MAX_SATU(in0, in1, vec0, vec1, rnd_val, \ argument 49 ADDS_SH2_SH(vec0, in0, vec1, in1, out0, out1); \ 54 #define HEVC_BI_RND_CLIP4_MAX_SATU(in0, in1, in2, in3, vec0, vec1, vec2, \ argument 57 HEVC_BI_RND_CLIP2_MAX_SATU(in0, in1, vec0, vec1, rnd_val, out0, out1); \ 542 v16i8 vec0, vec1, vec2, vec3; in hevc_hz_bi_8t_4w_msa() local 576 VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, vec0, vec1); in hevc_hz_bi_8t_4w_msa() [all …]
|
D | hevc_mc_uni_msa.c | 481 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in common_hz_8t_12w_msa() local 511 VSHF_B2_SB(src0, src0, src1, src1, mask00, mask00, vec0, vec1); in common_hz_8t_12w_msa() 513 DOTP_SB4_SH(vec0, vec1, vec2, vec3, filt0, filt0, filt0, filt0, out0, in common_hz_8t_12w_msa() 515 VSHF_B2_SB(src0, src0, src1, src1, mask2, mask2, vec0, vec1); in common_hz_8t_12w_msa() 517 DPADD_SB4_SH(vec0, vec1, vec2, vec3, filt2, filt2, filt2, filt2, out0, in common_hz_8t_12w_msa() 529 VSHF_B2_SB(src4, src5, src6, src7, mask0, mask0, vec0, vec1); in common_hz_8t_12w_msa() 530 DOTP_SB2_SH(vec0, vec1, filt0, filt0, out4, out5); in common_hz_8t_12w_msa() 617 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, vec9, vec10; in common_hz_8t_24w_msa() local 641 VSHF_B2_SB(src0, src0, src1, src1, mask0, mask0, vec0, vec8); in common_hz_8t_24w_msa() 644 DOTP_SB4_SH(vec0, vec8, vec2, vec9, filt0, filt0, filt0, filt0, out0, in common_hz_8t_24w_msa() [all …]
|
D | vp9_lpf_msa.c | 1205 v8i16 vec0, vec1, vec2, vec3; in ff_loop_filter_h_4_8_msa() local 1218 ILVR_B2_SH(p0, p1, q1, q0, vec0, vec1); in ff_loop_filter_h_4_8_msa() 1219 ILVRL_H2_SH(vec1, vec0, vec2, vec3); in ff_loop_filter_h_4_8_msa() 1283 v8i16 vec0, vec1, vec2, vec3, vec4; in ff_loop_filter_h_8_8_msa() local 1309 ILVR_B2_SH(p0_out, p1_out, q1_out, q0_out, vec0, vec1); in ff_loop_filter_h_8_8_msa() 1310 ILVRL_H2_SH(vec1, vec0, vec2, vec3); in ff_loop_filter_h_8_8_msa() 1336 ILVR_B2_SH(p1, p2, q0, p0, vec0, vec1); in ff_loop_filter_h_8_8_msa() 1337 ILVRL_H2_SH(vec1, vec0, vec2, vec3); in ff_loop_filter_h_8_8_msa() 1366 v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in ff_loop_filter_h_88_16_msa() local 1380 vec0 = (v8i16) __msa_fill_b(thresh_ptr >> 8); in ff_loop_filter_h_88_16_msa() [all …]
|
D | hevc_mc_uniw_msa.c | 602 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, vec9, vec10; in hevc_hz_uniwgt_8t_4w_msa() local 638 vec0, vec1, vec2, vec3); in hevc_hz_uniwgt_8t_4w_msa() 645 dst01 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, in hevc_hz_uniwgt_8t_4w_msa() 680 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in hevc_hz_uniwgt_8t_8w_msa() local 716 vec0, vec1, vec2, vec3); in hevc_hz_uniwgt_8t_8w_msa() 723 dst0 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, in hevc_hz_uniwgt_8t_8w_msa() 757 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in hevc_hz_uniwgt_8t_12w_msa() local 799 vec0, vec1, vec2, vec3); in hevc_hz_uniwgt_8t_12w_msa() 806 dst0 = HEVC_FILT_8TAP_SH(vec0, vec1, vec2, vec3, filt0, filt1, filt2, in hevc_hz_uniwgt_8t_12w_msa() 815 vec0, vec1, vec2, vec3); in hevc_hz_uniwgt_8t_12w_msa() [all …]
|
D | vp9_idct_msa.c | 1533 v8i16 vec0, vec1, vec2, vec3, loc0, loc1, loc2, loc3; in vp9_idct_butterfly_transpose_store() local 1537 vec0 = LD_SH(tmp_odd_buf); in vp9_idct_butterfly_transpose_store() 1546 ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m0, m4, m2, m6); in vp9_idct_butterfly_transpose_store() 1551 ST_SH((loc3 - vec0), (tmp_buf + 19 * 8)); in vp9_idct_butterfly_transpose_store() 1554 vec0 = LD_SH(tmp_odd_buf + 4 * 8); in vp9_idct_butterfly_transpose_store() 1563 ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, m1, m5, m3, m7); in vp9_idct_butterfly_transpose_store() 1568 ST_SH((loc3 - vec0), (tmp_buf + 17 * 8)); in vp9_idct_butterfly_transpose_store() 1571 vec0 = LD_SH(tmp_odd_buf + 2 * 8); in vp9_idct_butterfly_transpose_store() 1580 ADD4(loc0, vec3, loc1, vec2, loc2, vec1, loc3, vec0, n0, n4, n2, n6); in vp9_idct_butterfly_transpose_store() 1585 ST_SH((loc3 - vec0), (tmp_buf + 18 * 8)); in vp9_idct_butterfly_transpose_store() [all …]
|
D | h264pred_msa.c | 147 v4i32 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8; in intra_predict_plane_8x8_msa() local 181 vec0 = vec5; in intra_predict_plane_8x8_msa() 182 vec0 += vec4; in intra_predict_plane_8x8_msa() 183 vec1 = vec0 + vec3; in intra_predict_plane_8x8_msa() 189 SRA_4V(vec0, vec1, vec6, vec7, 5); in intra_predict_plane_8x8_msa() 190 PCKEV_H2_SH(vec1, vec0, vec7, vec6, vec10, vec11); in intra_predict_plane_8x8_msa() 216 v4i32 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, res_add; in intra_predict_plane_16x16_msa() local 258 vec0 = vec7; in intra_predict_plane_16x16_msa() 260 vec0 += vec4; in intra_predict_plane_16x16_msa() 263 vec1 = vec0 + vec6; in intra_predict_plane_16x16_msa() [all …]
|
D | vp9_intra_msa.c | 364 v8u16 src_top_left, vec0, vec1, vec2, vec3; in ff_tm_4x4_msa() local 376 HADD_UB4_UH(src0, src1, src2, src3, vec0, vec1, vec2, vec3); in ff_tm_4x4_msa() 377 IPRED_SUBS_UH2_UH(src_top_left, src_top_left, vec0, vec1); in ff_tm_4x4_msa() 379 SAT_UH4_UH(vec0, vec1, vec2, vec3, 7); in ff_tm_4x4_msa() 380 PCKEV_B2_SB(vec1, vec0, vec3, vec2, tmp0, tmp1); in ff_tm_4x4_msa() 391 v8u16 src_top_left, vec0, vec1, vec2, vec3; in ff_tm_8x8_msa() local 408 HADD_UB4_UH(src0, src1, src2, src3, vec0, vec1, vec2, vec3); in ff_tm_8x8_msa() 409 IPRED_SUBS_UH2_UH(src_top_left, src_top_left, vec0, vec1); in ff_tm_8x8_msa() 411 SAT_UH4_UH(vec0, vec1, vec2, vec3, 7); in ff_tm_8x8_msa() 412 PCKEV_B2_SB(vec1, vec0, vec3, vec2, tmp0, tmp1); in ff_tm_8x8_msa()
|
D | hevc_idct_msa.c | 68 v4i32 vec0, vec1, vec2, vec3, vec4, vec5; \ 74 cnst83, cnst36, vec0, vec2, vec1, vec3); \ 77 sum0 = vec0 + vec2; \ 78 sum1 = vec0 - vec2; \ 280 v4i32 vec0, vec1, vec2, vec3; \ 285 vec0 = in_r0 + in_r1; \ 287 res0 = vec0 * cnst29; \ 293 res3 = vec0 * cnst55; \ 355 v8i16 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7; in hevc_idct_16x16_msa() local 410 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7); in hevc_idct_16x16_msa() [all …]
|
D | h264qpel_msa.c | 36 #define AVC_CALC_DPADD_B_6PIX_2COEFF_SH(vec0, vec1, vec2, vec3, vec4, vec5, \ argument 43 ILVRL_B2_SB(vec5, vec0, tmp0_m, tmp1_m); \ 749 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, vec9, vec10; in ff_put_h264_qpel16_mc10_msa() local 771 VSHF_B2_SB(src0, src0, src0, src1, mask0, mask3, vec0, vec3); in ff_put_h264_qpel16_mc10_msa() 777 HADD_SB4_SH(vec0, vec3, vec6, vec9, res0, res1, res2, res3); in ff_put_h264_qpel16_mc10_msa() 782 VSHF_B2_SB(src4, src4, src4, src5, mask0, mask3, vec0, vec3); in ff_put_h264_qpel16_mc10_msa() 788 HADD_SB4_SH(vec0, vec3, vec6, vec9, res4, res5, res6, res7); in ff_put_h264_qpel16_mc10_msa() 817 v16i8 vec0, vec1, vec2, vec3, vec4, vec5, vec6, vec7, vec8, vec9, vec10; in ff_put_h264_qpel16_mc30_msa() local 839 VSHF_B2_SB(src0, src0, src0, src1, mask0, mask3, vec0, vec3); in ff_put_h264_qpel16_mc30_msa() 845 HADD_SB4_SH(vec0, vec3, vec6, vec9, res0, res1, res2, res3); in ff_put_h264_qpel16_mc30_msa() [all …]
|
/third_party/skia/third_party/externals/spirv-cross/shaders-no-opt/asm/vert/ |
D | constant-composite-extract.asm.vert | 44 %vec0 = OpConstantComposite %v4float %float_1 %float_2 %float_3 %float_4 46 %cmat = OpConstantComposite %m4float %vec0 %vec1 %vec0 %vec1 51 %e0 = OpCompositeExtract %float %vec0 0 52 %e1 = OpCompositeExtract %float %vec0 1 53 %e2 = OpCompositeExtract %float %vec0 2 54 %e3 = OpCompositeExtract %float %vec0 3
|
/third_party/skia/third_party/externals/spirv-cross/shaders-hlsl-no-opt/frag/ |
D | spec-constant.frag | 72 float vec0[c + 3][8]; 74 vec0[0][0] = 10.0; 79 FragColor = vec4(t0 + t1) + vec0[0][0] + vec1[0] + foo.elems[c];
|
/third_party/skia/third_party/externals/spirv-cross/reference/shaders-hlsl-no-opt/frag/ |
D | spec-constant.frag | 127 float vec0[_111][8]; 128 vec0[0][0] = 10.0f; 133 FragColor = (((t0 + t1).xxxx + vec0[0][0].xxxx) + vec1[0].xxxx) + foo.elems[c].xxxx;
|
/third_party/skia/third_party/externals/spirv-cross/shaders-reflection/frag/ |
D | spec-constant.vk.frag | 73 float vec0[c + 3][8]; 77 FragColor = vec4(t0 + t1) + vec0[0][0] + vec1[0] + foo.elems[c];
|
/third_party/skia/third_party/externals/spirv-cross/shaders-no-opt/vulkan/frag/ |
D | spec-constant.vk.frag | 72 float vec0[c + 3][8]; 76 FragColor = vec4(t0 + t1) + vec0[0][0] + vec1[0] + foo.elems[c];
|
/third_party/skia/third_party/externals/spirv-cross/reference/shaders-no-opt/vulkan/frag/ |
D | spec-constant.vk.frag.vk | 102 float vec0[_118][8]; 105 FragColor = ((vec4(t0 + t1) + vec4(vec0[0][0])) + vec4(vec1[0])) + vec4(foo.elems[c]);
|