/external/llvm/lib/Target/X86/ |
D | X86InstrFMA.td | 44 (ins VR128:$src1, VR128:$src2, VR128:$src3), 46 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 48 VR128:$src1, VR128:$src3)))]>; 52 (ins VR128:$src1, VR128:$src2, f128mem:$src3), 54 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 56 (MemFrag128 addr:$src3))))]>; 60 (ins VR256:$src1, VR256:$src2, VR256:$src3), 62 "\t{$src3, $src2, $dst|$dst, $src2, $src3}"), 64 VR256:$src3)))]>, VEX_L; 68 (ins VR256:$src1, VR256:$src2, f256mem:$src3), [all …]
|
D | X86InstrXOP.td | 150 (ins VR128:$src1, VR128:$src2, VR128:$src3), 152 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), 154 (Int VR128:$src1, VR128:$src2, VR128:$src3))]>, XOP_4V, VEX_I8IMM; 156 (ins VR128:$src1, i128mem:$src2, VR128:$src3), 158 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), 161 VR128:$src3))]>, XOP_4V, VEX_I8IMM; 201 (ins VR128:$src1, VR128:$src2, u8imm:$src3), 203 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), 207 (ins VR128:$src1, i128mem:$src2, u8imm:$src3), 209 "\t{$src3, $src2, $src1, $dst|$dst, $src1, $src2, $src3}"), [all …]
|
/external/libvpx/libvpx/vpx_dsp/mips/ |
D | vpx_convolve_copy_msa.c | 18 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in copy_width8_msa() local 22 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in copy_width8_msa() 28 out3 = __msa_copy_u_d((v2i64)src3, 0); in copy_width8_msa() 39 LD_UB4(src, src_stride, src0, src1, src2, src3); in copy_width8_msa() 45 out3 = __msa_copy_u_d((v2i64)src3, 0); in copy_width8_msa() 51 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in copy_width8_msa() 57 out3 = __msa_copy_u_d((v2i64)src3, 0); in copy_width8_msa() 70 LD_UB4(src, src_stride, src0, src1, src2, src3); in copy_width8_msa() 75 out3 = __msa_copy_u_d((v2i64)src3, 0); in copy_width8_msa() 101 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in copy_16multx8mult_msa() local [all …]
|
D | vpx_convolve8_horiz_msa.c | 19 v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3; in common_hz_8t_4x4_msa() local 33 LD_SB4(src, src_stride, src0, src1, src2, src3); in common_hz_8t_4x4_msa() 34 XORI_B4_128_SB(src0, src1, src2, src3); in common_hz_8t_4x4_msa() 35 HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3, in common_hz_8t_4x4_msa() 47 v16i8 src0, src1, src2, src3; in common_hz_8t_4x8_msa() local 62 LD_SB4(src, src_stride, src0, src1, src2, src3); in common_hz_8t_4x8_msa() 63 XORI_B4_128_SB(src0, src1, src2, src3); in common_hz_8t_4x8_msa() 65 HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3, in common_hz_8t_4x8_msa() 67 LD_SB4(src, src_stride, src0, src1, src2, src3); in common_hz_8t_4x8_msa() 68 XORI_B4_128_SB(src0, src1, src2, src3); in common_hz_8t_4x8_msa() [all …]
|
D | vpx_convolve8_avg_horiz_msa.c | 20 v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3; in common_hz_8t_and_aver_dst_4x4_msa() local 36 LD_SB4(src, src_stride, src0, src1, src2, src3); in common_hz_8t_and_aver_dst_4x4_msa() 37 XORI_B4_128_SB(src0, src1, src2, src3); in common_hz_8t_and_aver_dst_4x4_msa() 38 HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3, in common_hz_8t_and_aver_dst_4x4_msa() 55 v16i8 src0, src1, src2, src3, filt0, filt1, filt2, filt3; in common_hz_8t_and_aver_dst_4x8_msa() local 71 LD_SB4(src, src_stride, src0, src1, src2, src3); in common_hz_8t_and_aver_dst_4x8_msa() 72 XORI_B4_128_SB(src0, src1, src2, src3); in common_hz_8t_and_aver_dst_4x8_msa() 75 HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, mask0, mask1, mask2, mask3, in common_hz_8t_and_aver_dst_4x8_msa() 77 LD_SB4(src, src_stride, src0, src1, src2, src3); in common_hz_8t_and_aver_dst_4x8_msa() 78 XORI_B4_128_SB(src0, src1, src2, src3); in common_hz_8t_and_aver_dst_4x8_msa() [all …]
|
D | vpx_convolve8_vert_msa.c | 19 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_4w_msa() local 31 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6); in common_vt_8t_4w_msa() 34 ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, src32_r, in common_vt_8t_4w_msa() 36 ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r); in common_vt_8t_4w_msa() 70 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_8w_msa() local 81 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6); in common_vt_8t_8w_msa() 82 XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6); in common_vt_8t_8w_msa() 84 ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, src32_r, in common_vt_8t_8w_msa() 86 ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r); in common_vt_8t_8w_msa() 124 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_16w_msa() local [all …]
|
D | vpx_convolve_msa.h | 48 #define HORIZ_8TAP_4WID_4VECS_FILT(src0, src1, src2, src3, \ argument 55 VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, vec0_m, vec1_m); \ 57 VSHF_B2_SB(src0, src1, src2, src3, mask1, mask1, vec2_m, vec3_m); \ 59 VSHF_B2_SB(src0, src1, src2, src3, mask2, mask2, vec4_m, vec5_m); \ 61 VSHF_B2_SB(src0, src1, src2, src3, mask3, mask3, vec6_m, vec7_m); \ 66 #define HORIZ_8TAP_8WID_4VECS_FILT(src0, src1, src2, src3, \ argument 74 VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec2_m, vec3_m); \ 78 VSHF_B2_SB(src2, src2, src3, src3, mask2, mask2, vec2_m, vec3_m); \ 82 VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec6_m, vec7_m); \ 86 VSHF_B2_SB(src2, src2, src3, src3, mask3, mask3, vec6_m, vec7_m); \
|
D | vpx_convolve8_avg_vert_msa.c | 22 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_and_aver_dst_4w_msa() local 34 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6); in common_vt_8t_and_aver_dst_4w_msa() 37 ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, src32_r, in common_vt_8t_and_aver_dst_4w_msa() 39 ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r); in common_vt_8t_and_aver_dst_4w_msa() 82 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_and_aver_dst_8w_msa() local 93 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6); in common_vt_8t_and_aver_dst_8w_msa() 96 XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6); in common_vt_8t_and_aver_dst_8w_msa() 97 ILVR_B4_SB(src1, src0, src3, src2, src5, src4, src2, src1, src10_r, src32_r, in common_vt_8t_and_aver_dst_8w_msa() 99 ILVR_B2_SB(src4, src3, src6, src5, src43_r, src65_r); in common_vt_8t_and_aver_dst_8w_msa() 143 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_vt_8t_and_aver_dst_16w_mult_msa() local [all …]
|
D | sub_pixel_variance_msa.c | 52 uint32_t src0, src1, src2, src3; in avg_sse_diff_4width_msa() local 62 LW4(src_ptr, src_stride, src0, src1, src2, src3); in avg_sse_diff_4width_msa() 67 INSERT_W4_UB(src0, src1, src2, src3, src); in avg_sse_diff_4width_msa() 88 v16u8 src0, src1, src2, src3; in avg_sse_diff_8width_msa() local 97 LD_UB4(src_ptr, src_stride, src0, src1, src2, src3); in avg_sse_diff_8width_msa() 102 PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2, in avg_sse_diff_8width_msa() 299 v16u8 src0, src1, src2, src3; in avg_sse_diff_64x32_msa() local 309 LD_UB4(src_ptr, 16, src0, src1, src2, src3); in avg_sse_diff_64x32_msa() 313 AVER_UB4_UB(src0, pred0, src1, pred1, src2, pred2, src3, pred3, in avg_sse_diff_64x32_msa() 314 src0, src1, src2, src3); in avg_sse_diff_64x32_msa() [all …]
|
D | variance_msa.c | 43 uint32_t src0, src1, src2, src3; in sse_diff_4width_msa() local 52 LW4(src_ptr, src_stride, src0, src1, src2, src3); in sse_diff_4width_msa() 57 INSERT_W4_UB(src0, src1, src2, src3, src); in sse_diff_4width_msa() 72 v16u8 src0, src1, src2, src3; in sse_diff_8width_msa() local 78 LD_UB4(src_ptr, src_stride, src0, src1, src2, src3); in sse_diff_8width_msa() 83 PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2, in sse_diff_8width_msa() 229 v16u8 src0, src1, src2, src3; in sse_diff_64x32_msa() local 236 LD_UB4(src_ptr, 16, src0, src1, src2, src3); in sse_diff_64x32_msa() 243 CALC_MSE_AVG_B(src3, ref3, var, avg1); in sse_diff_64x32_msa() 245 LD_UB4(src_ptr, 16, src0, src1, src2, src3); in sse_diff_64x32_msa() [all …]
|
D | vpx_convolve_avg_msa.c | 17 v16u8 src0, src1, src2, src3; in avg_width4_msa() local 22 LD_UB4(src, src_stride, src0, src1, src2, src3); in avg_width4_msa() 27 AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, in avg_width4_msa() 60 v16u8 src0, src1, src2, src3; in avg_width8_msa() local 64 LD_UB4(src, src_stride, src0, src1, src2, src3); in avg_width8_msa() 68 AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, in avg_width8_msa() 83 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in avg_width16_msa() local 87 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in avg_width16_msa() 91 AVER_UB4_UB(src0, dst0, src1, dst1, src2, dst2, src3, dst3, in avg_width16_msa() 104 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in avg_width32_msa() local [all …]
|
D | sad_msa.c | 26 uint32_t src0, src1, src2, src3, ref0, ref1, ref2, ref3; in sad_4width_msa() local 33 LW4(src_ptr, src_stride, src0, src1, src2, src3); in sad_4width_msa() 38 INSERT_W4_UB(src0, src1, src2, src3, src); in sad_4width_msa() 52 v16u8 src0, src1, src2, src3, ref0, ref1, ref2, ref3; in sad_8width_msa() local 56 LD_UB4(src, src_stride, src0, src1, src2, src3); in sad_8width_msa() 61 PCKEV_D4_UB(src1, src0, src3, src2, ref1, ref0, ref3, ref2, in sad_8width_msa() 134 v16u8 src0, src1, src2, src3; in sad_64width_msa() local 140 LD_UB4(src, 16, src0, src1, src2, src3); in sad_64width_msa() 145 sad1 += SAD_UB2_UH(src2, src3, ref2, ref3); in sad_64width_msa() 147 LD_UB4(src, 16, src0, src1, src2, src3); in sad_64width_msa() [all …]
|
D | vpx_convolve8_msa.c | 29 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_hv_8ht_8vt_4w_msa() local 47 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6); in common_hv_8ht_8vt_4w_msa() 48 XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6); in common_hv_8ht_8vt_4w_msa() 53 hz_out2 = HORIZ_8TAP_FILT(src2, src3, mask0, mask1, mask2, mask3, filt_hz0, in common_hv_8ht_8vt_4w_msa() 103 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_hv_8ht_8vt_8w_msa() local 122 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6); in common_hv_8ht_8vt_8w_msa() 125 XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6); in common_hv_8ht_8vt_8w_msa() 132 hz_out3 = HORIZ_8TAP_FILT(src3, src3, mask0, mask1, mask2, mask3, filt_hz0, in common_hv_8ht_8vt_8w_msa() 237 v16i8 src0, src1, src2, src3, src4, mask; in common_hv_2ht_2vt_4x4_msa() local 250 LD_SB5(src, src_stride, src0, src1, src2, src3, src4); in common_hv_2ht_2vt_4x4_msa() [all …]
|
D | subtract_msa.c | 17 uint32_t src0, src1, src2, src3; in sub_blk_4x4_msa() local 24 LW4(src_ptr, src_stride, src0, src1, src2, src3); in sub_blk_4x4_msa() 26 INSERT_W4_SB(src0, src1, src2, src3, src); in sub_blk_4x4_msa() 62 v16i8 src0, src1, src2, src3, src4, src5, src6, src7; in sub_blk_16x16_msa() local 68 LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in sub_blk_16x16_msa() 90 ILVRL_B2_UB(src3, pred3, src_l0, src_l1); in sub_blk_16x16_msa() 121 v16i8 src0, src1, src2, src3, src4, src5, src6, src7; in sub_blk_32x32_msa() local 129 LD_SB2(src, 16, src2, src3); in sub_blk_32x32_msa() 156 ILVRL_B2_UB(src3, pred3, src_l0, src_l1); in sub_blk_32x32_msa() 183 v16i8 src0, src1, src2, src3, src4, src5, src6, src7; in sub_blk_64x64_msa() local [all …]
|
D | vpx_convolve8_avg_msa.c | 23 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_hv_8ht_8vt_and_aver_dst_4w_msa() local 41 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6); in common_hv_8ht_8vt_and_aver_dst_4w_msa() 42 XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6); in common_hv_8ht_8vt_and_aver_dst_4w_msa() 47 hz_out2 = HORIZ_8TAP_FILT(src2, src3, mask0, mask1, mask2, mask3, filt_hz0, in common_hv_8ht_8vt_and_aver_dst_4w_msa() 105 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, src8, src9, src10; in common_hv_8ht_8vt_and_aver_dst_8w_msa() local 124 LD_SB7(src, src_stride, src0, src1, src2, src3, src4, src5, src6); in common_hv_8ht_8vt_and_aver_dst_8w_msa() 127 XORI_B7_128_SB(src0, src1, src2, src3, src4, src5, src6); in common_hv_8ht_8vt_and_aver_dst_8w_msa() 134 hz_out3 = HORIZ_8TAP_FILT(src3, src3, mask0, mask1, mask2, mask3, filt_hz0, in common_hv_8ht_8vt_and_aver_dst_8w_msa() 251 v16i8 src0, src1, src2, src3, src4, mask; in common_hv_2ht_2vt_and_aver_dst_4x4_msa() local 265 LD_SB5(src, src_stride, src0, src1, src2, src3, src4); in common_hv_2ht_2vt_and_aver_dst_4x4_msa() [all …]
|
/external/libvpx/libvpx/vp8/common/mips/msa/ |
D | copymem_msa.c | 17 uint64_t src0, src1, src2, src3; in copy_8x4_msa() local 19 LD4(src, src_stride, src0, src1, src2, src3); in copy_8x4_msa() 20 SD4(src0, src1, src2, src3, dst, dst_stride); in copy_8x4_msa() 26 uint64_t src0, src1, src2, src3; in copy_8x8_msa() local 28 LD4(src, src_stride, src0, src1, src2, src3); in copy_8x8_msa() 30 SD4(src0, src1, src2, src3, dst, dst_stride); in copy_8x8_msa() 33 LD4(src, src_stride, src0, src1, src2, src3); in copy_8x8_msa() 34 SD4(src0, src1, src2, src3, dst, dst_stride); in copy_8x8_msa() 40 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in copy_16x16_msa() local 43 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in copy_16x16_msa() [all …]
|
D | bilinear_filter_msa.c | 41 v16i8 src0, src1, src2, src3, mask; in common_hz_2t_4x4_msa() local 50 LD_SB4(src, src_stride, src0, src1, src2, src3); in common_hz_2t_4x4_msa() 51 VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1); in common_hz_2t_4x4_msa() 63 v16i8 src0, src1, src2, src3, src4, src5, src6, src7, mask; in common_hz_2t_4x8_msa() local 72 LD_SB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in common_hz_2t_4x8_msa() 73 VSHF_B2_UB(src0, src1, src2, src3, mask, mask, vec0, vec1); in common_hz_2t_4x8_msa() 104 v16i8 src0, src1, src2, src3, mask; in common_hz_2t_8x4_msa() local 112 LD_SB4(src, src_stride, src0, src1, src2, src3); in common_hz_2t_8x4_msa() 114 VSHF_B2_UH(src2, src2, src3, src3, mask, mask, vec2, vec3); in common_hz_2t_8x4_msa() 127 v16i8 src0, src1, src2, src3, mask, out0, out1; in common_hz_2t_8x8mult_msa() local [all …]
|
D | sixtap_filter_msa.c | 54 #define HORIZ_6TAP_4WID_4VECS_FILT(src0, src1, src2, src3, \ argument 61 VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, vec0_m, vec1_m); \ 63 VSHF_B2_SB(src0, src1, src2, src3, mask1, mask1, vec2_m, vec3_m); \ 65 VSHF_B2_SB(src0, src1, src2, src3, mask2, mask2, vec4_m, vec5_m); \ 69 #define HORIZ_6TAP_8WID_4VECS_FILT(src0, src1, src2, src3, \ argument 77 VSHF_B2_SB(src2, src2, src3, src3, mask0, mask0, vec2_m, vec3_m); \ 81 VSHF_B2_SB(src2, src2, src3, src3, mask1, mask1, vec2_m, vec3_m); \ 83 VSHF_B2_SB(src2, src2, src3, src3, mask2, mask2, vec6_m, vec7_m); \ 114 #define HORIZ_4TAP_4WID_4VECS_FILT(src0, src1, src2, src3, \ argument 120 VSHF_B2_SB(src0, src1, src2, src3, mask0, mask0, vec0_m, vec1_m); \ [all …]
|
/external/bison/lib/ |
D | bitset.c | 409 bitset_op4_cmp (bitset dst, bitset src1, bitset src2, bitset src3, in bitset_op4_cmp() argument 429 changed = bitset_and_cmp (dst, src3, tmp); in bitset_op4_cmp() 434 changed = bitset_or_cmp (dst, src3, tmp); in bitset_op4_cmp() 439 changed = bitset_or_cmp (dst, src3, tmp); in bitset_op4_cmp() 450 bitset_and_or_ (bitset dst, bitset src1, bitset src2, bitset src3) in bitset_and_or_() argument 452 bitset_and_or_cmp_ (dst, src1, src2, src3); in bitset_and_or_() 459 bitset_and_or_cmp_ (bitset dst, bitset src1, bitset src2, bitset src3) in bitset_and_or_cmp_() argument 461 return bitset_op4_cmp (dst, src1, src2, src3, BITSET_OP_AND_OR); in bitset_and_or_cmp_() 467 bitset_andn_or_ (bitset dst, bitset src1, bitset src2, bitset src3) in bitset_andn_or_() argument 469 bitset_andn_or_cmp_ (dst, src1, src2, src3); in bitset_andn_or_() [all …]
|
D | bitset_stats.c | 524 bitset_stats_and_or (bitset dst, bitset src1, bitset src2, bitset src3) in bitset_stats_and_or() argument 526 BITSET_CHECK4_ (dst, src1, src2, src3); in bitset_stats_and_or() 527 BITSET_AND_OR_ (dst->s.bset, src1->s.bset, src2->s.bset, src3->s.bset); in bitset_stats_and_or() 532 bitset_stats_and_or_cmp (bitset dst, bitset src1, bitset src2, bitset src3) in bitset_stats_and_or_cmp() argument 534 BITSET_CHECK4_ (dst, src1, src2, src3); in bitset_stats_and_or_cmp() 535 return BITSET_AND_OR_CMP_ (dst->s.bset, src1->s.bset, src2->s.bset, src3->s.bset); in bitset_stats_and_or_cmp() 540 bitset_stats_andn_or (bitset dst, bitset src1, bitset src2, bitset src3) in bitset_stats_andn_or() argument 542 BITSET_CHECK4_ (dst, src1, src2, src3); in bitset_stats_andn_or() 543 BITSET_ANDN_OR_ (dst->s.bset, src1->s.bset, src2->s.bset, src3->s.bset); in bitset_stats_andn_or() 548 bitset_stats_andn_or_cmp (bitset dst, bitset src1, bitset src2, bitset src3) in bitset_stats_andn_or_cmp() argument [all …]
|
D | vbitset.c | 872 vbitset_and_or (bitset dst, bitset src1, bitset src2, bitset src3) in vbitset_and_or() argument 882 || BITSET_NBITS_ (src1) != BITSET_NBITS_ (src3)) in vbitset_and_or() 884 bitset_and_or_ (dst, src1, src2, src3); in vbitset_and_or() 892 src3p = VBITSET_WORDS (src3); in vbitset_and_or() 902 vbitset_and_or_cmp (bitset dst, bitset src1, bitset src2, bitset src3) in vbitset_and_or_cmp() argument 913 || BITSET_NBITS_ (src1) != BITSET_NBITS_ (src3)) in vbitset_and_or_cmp() 914 return bitset_and_or_cmp_ (dst, src1, src2, src3); in vbitset_and_or_cmp() 920 src3p = VBITSET_WORDS (src3); in vbitset_and_or_cmp() 939 vbitset_andn_or (bitset dst, bitset src1, bitset src2, bitset src3) in vbitset_andn_or() argument 949 || BITSET_NBITS_ (src1) != BITSET_NBITS_ (src3)) in vbitset_andn_or() [all …]
|
/external/v8/test/js-perf-test/Object/ |
D | assign.js | 18 var src3; variable 49 src3 = { obj2: obj2 }; variable in BasicAssign3Setup 53 Object.assign(object, src1, src2, src3); 59 object.obj2 === src3; 70 src3 = { obj1: obj1, obj2: obj2 }; variable in BasicAssignNull3Setup 74 Object.assign(object, src1, src2, src3); 80 object.obj2 === src3;
|
/external/llvm/lib/Target/Hexagon/ |
D | HexagonIntrinsicsV60.td | 296 def: Pat<(IntID VecDblRegs:$src1, VecDblRegs:$src2, IntRegs:$src3), 297 (MI VecDblRegs:$src1, VecDblRegs:$src2, IntRegs:$src3)>, 302 IntRegs:$src3), 305 IntRegs:$src3)>, 310 def: Pat<(IntID VectorRegs:$src1, VectorRegs:$src2, IntRegs:$src3), 311 (MI VectorRegs:$src1, VectorRegs:$src2, IntRegs:$src3)>, 316 IntRegs:$src3), 319 IntRegs:$src3)>, 324 def: Pat<(IntID VecDblRegs:$src1, VectorRegs:$src2, IntRegs:$src3), 325 (MI VecDblRegs:$src1, VectorRegs:$src2, IntRegs:$src3)>, [all …]
|
D | HexagonInstrEnc.td | 342 bits<5> src3; 346 let Inst{13-0} = { src2_vector{3}, 0b00, src2_vector{2-0}, opc{2-0}, src3{4-0} }; 353 bits<5> src3; 357 let Inst{13-0} = { src2_vector{3}, 0b00, src2_vector{2-0}, opc{2-0}, src3{4-0} }; 372 bits<3> src3; 376 let Inst{13-0} = { src2_vector{3}, 0b00, src2_vector{2-0}, 0b00100, src3{2-0} }; 386 bits<3> src3; 390 let Inst{13-0} = { src2_vector{3}, 0b00, src2_vector{2-0}, 0b00100, src3{2-0} }; 399 bits<10> src3; 403 let src3_vector = src3{9-6}; [all …]
|
/external/libvpx/libvpx/vp9/encoder/mips/msa/ |
D | vp9_avg_msa.c | 16 v16u8 src0, src1, src2, src3, src4, src5, src6, src7; in vp9_avg_8x8_msa() local 20 LD_UB8(src, src_stride, src0, src1, src2, src3, src4, src5, src6, src7); in vp9_avg_8x8_msa() 21 HADD_UB4_UH(src0, src1, src2, src3, sum0, sum1, sum2, sum3); in vp9_avg_8x8_msa() 38 uint32_t src0, src1, src2, src3; in vp9_avg_4x4_msa() local 44 LW4(src, src_stride, src0, src1, src2, src3); in vp9_avg_4x4_msa() 45 INSERT_W4_UB(src0, src1, src2, src3, vec); in vp9_avg_4x4_msa()
|