Home
last modified time | relevance | path

Searched refs:v2i64 (Results 1 – 25 of 570) sorted by relevance

12345678910>>...23

/external/gemmlowp/internal/
Dpack_msa.h90 v2i64 tmp = __builtin_msa_ilvr_d( in Pack()
91 reinterpret_cast<v2i64>( in Pack()
93 reinterpret_cast<v2i64>( in Pack()
99 v2i64 tmp = __builtin_msa_ilvl_d( in Pack()
100 reinterpret_cast<v2i64>( in Pack()
102 reinterpret_cast<v2i64>( in Pack()
110 v2i64 tmp = __builtin_msa_ilvr_d( in Pack()
111 reinterpret_cast<v2i64>( in Pack()
113 reinterpret_cast<v2i64>( in Pack()
119 v2i64 tmp = reinterpret_cast<v2i64>( in Pack()
[all …]
Doutput_msa.h315 reinterpret_cast<v2i64>(tmp1), reinterpret_cast<v2i64>(tmp0)));
317 reinterpret_cast<v2i64>(tmp1), reinterpret_cast<v2i64>(tmp0)));
321 reinterpret_cast<v2i64>(tmp1), reinterpret_cast<v2i64>(tmp0)));
323 reinterpret_cast<v2i64>(tmp1), reinterpret_cast<v2i64>(tmp0)));
481 v2i64 u0 = reinterpret_cast<v2i64>(__builtin_msa_ilvr_w(t1, t0));
482 v2i64 u1 = reinterpret_cast<v2i64>(__builtin_msa_ilvl_w(t1, t0));
488 v2i64 u2 = reinterpret_cast<v2i64>(__builtin_msa_ilvr_w(t3, t2));
489 v2i64 u3 = reinterpret_cast<v2i64>(__builtin_msa_ilvl_w(t3, t2));
495 v2i64 u4 = reinterpret_cast<v2i64>(__builtin_msa_ilvr_w(t5, t4));
496 v2i64 u5 = reinterpret_cast<v2i64>(__builtin_msa_ilvl_w(t5, t4));
[all …]
/external/libvpx/libvpx/vpx_dsp/mips/
Dvpx_convolve_copy_msa.c26 out0 = __msa_copy_u_d((v2i64)src0, 0); in copy_width8_msa()
27 out1 = __msa_copy_u_d((v2i64)src1, 0); in copy_width8_msa()
28 out2 = __msa_copy_u_d((v2i64)src2, 0); in copy_width8_msa()
29 out3 = __msa_copy_u_d((v2i64)src3, 0); in copy_width8_msa()
30 out4 = __msa_copy_u_d((v2i64)src4, 0); in copy_width8_msa()
31 out5 = __msa_copy_u_d((v2i64)src5, 0); in copy_width8_msa()
32 out6 = __msa_copy_u_d((v2i64)src6, 0); in copy_width8_msa()
33 out7 = __msa_copy_u_d((v2i64)src7, 0); in copy_width8_msa()
43 out0 = __msa_copy_u_d((v2i64)src0, 0); in copy_width8_msa()
44 out1 = __msa_copy_u_d((v2i64)src1, 0); in copy_width8_msa()
[all …]
Ddeblock_msa.c40 out9 = (v16u8)__msa_ilvl_d((v2i64)out8, (v2i64)out8); \
41 out11 = (v16u8)__msa_ilvl_d((v2i64)out10, (v2i64)out10); \
42 out13 = (v16u8)__msa_ilvl_d((v2i64)out12, (v2i64)out12); \
43 out15 = (v16u8)__msa_ilvl_d((v2i64)out14, (v2i64)out14); \
44 out1 = (v16u8)__msa_ilvl_d((v2i64)out0, (v2i64)out0); \
45 out3 = (v16u8)__msa_ilvl_d((v2i64)out2, (v2i64)out2); \
46 out5 = (v16u8)__msa_ilvl_d((v2i64)out4, (v2i64)out4); \
47 out7 = (v16u8)__msa_ilvl_d((v2i64)out6, (v2i64)out6); \
94 in1 = (v16u8)__msa_ilvl_d((v2i64)temp4, (v2i64)temp0); \
95 in3 = (v16u8)__msa_ilvl_d((v2i64)temp5, (v2i64)temp1); \
[all …]
Dloopfilter_4_msa.c33 p1_d = __msa_copy_u_d((v2i64)p1_out, 0); in vpx_lpf_horizontal_4_msa()
34 p0_d = __msa_copy_u_d((v2i64)p0_out, 0); in vpx_lpf_horizontal_4_msa()
35 q0_d = __msa_copy_u_d((v2i64)q0_out, 0); in vpx_lpf_horizontal_4_msa()
36 q1_d = __msa_copy_u_d((v2i64)q1_out, 0); in vpx_lpf_horizontal_4_msa()
55 thresh0 = (v16u8)__msa_ilvr_d((v2i64)thresh1, (v2i64)thresh0); in vpx_lpf_horizontal_4_dual_msa()
59 b_limit0 = (v16u8)__msa_ilvr_d((v2i64)b_limit1, (v2i64)b_limit0); in vpx_lpf_horizontal_4_dual_msa()
63 limit0 = (v16u8)__msa_ilvr_d((v2i64)limit1, (v2i64)limit0); in vpx_lpf_horizontal_4_dual_msa()
124 thresh0 = (v16u8)__msa_ilvr_d((v2i64)thresh1, (v2i64)thresh0); in vpx_lpf_vertical_4_dual_msa()
128 b_limit0 = (v16u8)__msa_ilvr_d((v2i64)b_limit1, (v2i64)b_limit0); in vpx_lpf_vertical_4_dual_msa()
132 limit0 = (v16u8)__msa_ilvr_d((v2i64)limit1, (v2i64)limit0); in vpx_lpf_vertical_4_dual_msa()
Dloopfilter_8_msa.c38 flat = (v16u8)__msa_ilvr_d((v2i64)zero, (v2i64)flat); in vpx_lpf_horizontal_8_msa()
41 p1_d = __msa_copy_u_d((v2i64)p1_out, 0); in vpx_lpf_horizontal_8_msa()
42 p0_d = __msa_copy_u_d((v2i64)p0_out, 0); in vpx_lpf_horizontal_8_msa()
43 q0_d = __msa_copy_u_d((v2i64)q0_out, 0); in vpx_lpf_horizontal_8_msa()
44 q1_d = __msa_copy_u_d((v2i64)q1_out, 0); in vpx_lpf_horizontal_8_msa()
65 p2_d = __msa_copy_u_d((v2i64)p2_out, 0); in vpx_lpf_horizontal_8_msa()
66 p1_d = __msa_copy_u_d((v2i64)p1_out, 0); in vpx_lpf_horizontal_8_msa()
67 p0_d = __msa_copy_u_d((v2i64)p0_out, 0); in vpx_lpf_horizontal_8_msa()
68 q0_d = __msa_copy_u_d((v2i64)q0_out, 0); in vpx_lpf_horizontal_8_msa()
69 q1_d = __msa_copy_u_d((v2i64)q1_out, 0); in vpx_lpf_horizontal_8_msa()
[all …]
Dmacros_msa.h305 out1 = (v8i16)__msa_ilvl_d((v2i64)out0, (v2i64)out0); \
306 out3 = (v8i16)__msa_ilvl_d((v2i64)out2, (v2i64)out2); \
425 out0_m = __msa_copy_u_d((v2i64)in, 0); \
441 out0_m = __msa_copy_u_d((v2i64)in, 0); \
442 out1_m = __msa_copy_u_d((v2i64)in, 1); \
465 out0_m = __msa_copy_u_d((v2i64)in0, 0); \
466 out1_m = __msa_copy_u_d((v2i64)in0, 1); \
467 out2_m = __msa_copy_u_d((v2i64)in1, 0); \
468 out3_m = __msa_copy_u_d((v2i64)in1, 1); \
659 #define DOTP_SW2_SD(...) DOTP_SW2(v2i64, __VA_ARGS__)
[all …]
/external/libaom/libaom/aom_dsp/mips/
Daom_convolve_copy_msa.c26 out0 = __msa_copy_u_d((v2i64)src0, 0); in copy_width8_msa()
27 out1 = __msa_copy_u_d((v2i64)src1, 0); in copy_width8_msa()
28 out2 = __msa_copy_u_d((v2i64)src2, 0); in copy_width8_msa()
29 out3 = __msa_copy_u_d((v2i64)src3, 0); in copy_width8_msa()
30 out4 = __msa_copy_u_d((v2i64)src4, 0); in copy_width8_msa()
31 out5 = __msa_copy_u_d((v2i64)src5, 0); in copy_width8_msa()
32 out6 = __msa_copy_u_d((v2i64)src6, 0); in copy_width8_msa()
33 out7 = __msa_copy_u_d((v2i64)src7, 0); in copy_width8_msa()
43 out0 = __msa_copy_u_d((v2i64)src0, 0); in copy_width8_msa()
44 out1 = __msa_copy_u_d((v2i64)src1, 0); in copy_width8_msa()
[all …]
Dloopfilter_4_msa.c33 p1_d = __msa_copy_u_d((v2i64)p1_out, 0); in aom_lpf_horizontal_4_msa()
34 p0_d = __msa_copy_u_d((v2i64)p0_out, 0); in aom_lpf_horizontal_4_msa()
35 q0_d = __msa_copy_u_d((v2i64)q0_out, 0); in aom_lpf_horizontal_4_msa()
36 q1_d = __msa_copy_u_d((v2i64)q1_out, 0); in aom_lpf_horizontal_4_msa()
55 thresh0 = (v16u8)__msa_ilvr_d((v2i64)thresh1, (v2i64)thresh0); in aom_lpf_horizontal_4_dual_msa()
59 b_limit0 = (v16u8)__msa_ilvr_d((v2i64)b_limit1, (v2i64)b_limit0); in aom_lpf_horizontal_4_dual_msa()
63 limit0 = (v16u8)__msa_ilvr_d((v2i64)limit1, (v2i64)limit0); in aom_lpf_horizontal_4_dual_msa()
124 thresh0 = (v16u8)__msa_ilvr_d((v2i64)thresh1, (v2i64)thresh0); in aom_lpf_vertical_4_dual_msa()
128 b_limit0 = (v16u8)__msa_ilvr_d((v2i64)b_limit1, (v2i64)b_limit0); in aom_lpf_vertical_4_dual_msa()
132 limit0 = (v16u8)__msa_ilvr_d((v2i64)limit1, (v2i64)limit0); in aom_lpf_vertical_4_dual_msa()
Dloopfilter_8_msa.c38 flat = (v16u8)__msa_ilvr_d((v2i64)zero, (v2i64)flat); in aom_lpf_horizontal_8_msa()
41 p1_d = __msa_copy_u_d((v2i64)p1_out, 0); in aom_lpf_horizontal_8_msa()
42 p0_d = __msa_copy_u_d((v2i64)p0_out, 0); in aom_lpf_horizontal_8_msa()
43 q0_d = __msa_copy_u_d((v2i64)q0_out, 0); in aom_lpf_horizontal_8_msa()
44 q1_d = __msa_copy_u_d((v2i64)q1_out, 0); in aom_lpf_horizontal_8_msa()
65 p2_d = __msa_copy_u_d((v2i64)p2_out, 0); in aom_lpf_horizontal_8_msa()
66 p1_d = __msa_copy_u_d((v2i64)p1_out, 0); in aom_lpf_horizontal_8_msa()
67 p0_d = __msa_copy_u_d((v2i64)p0_out, 0); in aom_lpf_horizontal_8_msa()
68 q0_d = __msa_copy_u_d((v2i64)q0_out, 0); in aom_lpf_horizontal_8_msa()
69 q1_d = __msa_copy_u_d((v2i64)q1_out, 0); in aom_lpf_horizontal_8_msa()
[all …]
Dmacros_msa.h389 out1 = (v8i16)__msa_ilvl_d((v2i64)out0, (v2i64)out0); \
390 out3 = (v8i16)__msa_ilvl_d((v2i64)out2, (v2i64)out2); \
553 out0_m = __msa_copy_u_d((v2i64)in, 0); \
569 out0_m = __msa_copy_u_d((v2i64)in, 0); \
570 out1_m = __msa_copy_u_d((v2i64)in, 1); \
593 out0_m = __msa_copy_u_d((v2i64)in0, 0); \
594 out1_m = __msa_copy_u_d((v2i64)in0, 1); \
595 out2_m = __msa_copy_u_d((v2i64)in1, 0); \
596 out3_m = __msa_copy_u_d((v2i64)in1, 1); \
786 #define DOTP_SW2_SD(...) DOTP_SW2(v2i64, __VA_ARGS__)
[all …]
/external/swiftshader/third_party/LLVM/lib/Target/CellSPU/
DSPU64InstrInfo.td21 // 4. v2i64 setcc results are v4i32, which can be converted to a FSM mask (TODO)
24 // 5. The code sequences for r64 and v2i64 are probably overly conservative,
67 // v2i64 seteq (equality): the setcc result is v4i32
71 def v2i64: CodeFrag<(i32 (COPY_TO_REGCLASS CEQv2i64compare.Fragment, R32C))>;
83 def : Pat<(seteq (v2i64 VECREG:$rA), (v2i64 VECREG:$rB)), I64EQv2i64.Fragment>;
120 def v2i64: CodeFrag<CLGTv2i64compare.Fragment>;
132 //def : Pat<(setugt (v2i64 VECREG:$rA), (v2i64 VECREG:$rB)),
154 def v2i64: CodeFrag<CLGEv2i64compare.Fragment>;
166 def : Pat<(v2i64 (setuge (v2i64 VECREG:$rA), (v2i64 VECREG:$rB))),
205 def v2i64: CodeFrag<CGTv2i64compare.Fragment>;
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/CostModel/X86/
Dscalarize.ll14 declare %i8 @llvm.bswap.v2i64(%i8)
17 declare %i8 @llvm.cttz.v2i64(%i8)
27 ; CHECK32: cost of 1 {{.*}}bswap.v2i64
28 ; CHECK64: cost of 1 {{.*}}bswap.v2i64
29 %r3 = call %i8 @llvm.bswap.v2i64(%i8 undef)
34 ; CHECK32: cost of 10 {{.*}}cttz.v2i64
35 ; CHECK64: cost of 10 {{.*}}cttz.v2i64
36 %r5 = call %i8 @llvm.cttz.v2i64(%i8 undef)
/external/llvm/test/Analysis/CostModel/X86/
Dscalarize.ll14 declare %i8 @llvm.bswap.v2i64(%i8)
17 declare %i8 @llvm.cttz.v2i64(%i8)
27 ; CHECK32: cost of 1 {{.*}}bswap.v2i64
28 ; CHECK64: cost of 1 {{.*}}bswap.v2i64
29 %r3 = call %i8 @llvm.bswap.v2i64(%i8 undef)
34 ; CHECK32: cost of 10 {{.*}}cttz.v2i64
35 ; CHECK64: cost of 6 {{.*}}cttz.v2i64
36 %r5 = call %i8 @llvm.cttz.v2i64(%i8 undef)
/external/swiftshader/third_party/LLVM/test/CodeGen/X86/
Dvec_ctbits.ll3 declare <2 x i64> @llvm.cttz.v2i64(<2 x i64>)
4 declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>)
5 declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>)
8 %c = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %a)
12 %c = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %a)
16 %c = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %a)
/external/libvpx/libvpx/vp8/common/mips/msa/
Dvp8_macros_msa.h457 out0_m = __msa_copy_u_d((v2i64)in, 0); \
473 out0_m = __msa_copy_u_d((v2i64)in, 0); \
474 out1_m = __msa_copy_u_d((v2i64)in, 1); \
497 out0_m = __msa_copy_u_d((v2i64)in0, 0); \
498 out1_m = __msa_copy_u_d((v2i64)in0, 1); \
499 out2_m = __msa_copy_u_d((v2i64)in1, 0); \
500 out3_m = __msa_copy_u_d((v2i64)in1, 1); \
669 #define DOTP_SW2_SD(...) DOTP_SW2(v2i64, __VA_ARGS__)
733 out0 = (RTYPE)__msa_dpadd_s_d((v2i64)out0, (v4i32)mult0, (v4i32)mult0); \
734 out1 = (RTYPE)__msa_dpadd_s_d((v2i64)out1, (v4i32)mult1, (v4i32)mult1); \
[all …]
Dloopfilter_filters_msa.c223 thresh0 = (v16u8)__msa_ilvr_d((v2i64)thresh1, (v2i64)thresh0); in loop_filter_horizontal_4_dual_msa()
227 b_limit0 = (v16u8)__msa_ilvr_d((v2i64)b_limit1, (v2i64)b_limit0); in loop_filter_horizontal_4_dual_msa()
231 limit0 = (v16u8)__msa_ilvr_d((v2i64)limit1, (v2i64)limit0); in loop_filter_horizontal_4_dual_msa()
263 thresh0 = (v16u8)__msa_ilvr_d((v2i64)thresh1, (v2i64)thresh0); in loop_filter_vertical_4_dual_msa()
267 b_limit0 = (v16u8)__msa_ilvr_d((v2i64)b_limit1, (v2i64)b_limit0); in loop_filter_vertical_4_dual_msa()
271 limit0 = (v16u8)__msa_ilvr_d((v2i64)limit1, (v2i64)limit0); in loop_filter_vertical_4_dual_msa()
336 p2_d = __msa_copy_u_d((v2i64)p2, 0); in mbloop_filter_horizontal_edge_uv_msa()
337 p1_d = __msa_copy_u_d((v2i64)p1, 0); in mbloop_filter_horizontal_edge_uv_msa()
338 p0_d = __msa_copy_u_d((v2i64)p0, 0); in mbloop_filter_horizontal_edge_uv_msa()
339 q0_d = __msa_copy_u_d((v2i64)q0, 0); in mbloop_filter_horizontal_edge_uv_msa()
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/PowerPC/
DPPCInstrAltivec.td869 def : Pat<(v16i8 (bitconvert (v2i64 VRRC:$src))), (v16i8 VRRC:$src)>;
875 def : Pat<(v8i16 (bitconvert (v2i64 VRRC:$src))), (v8i16 VRRC:$src)>;
881 def : Pat<(v4i32 (bitconvert (v2i64 VRRC:$src))), (v4i32 VRRC:$src)>;
887 def : Pat<(v4f32 (bitconvert (v2i64 VRRC:$src))), (v4f32 VRRC:$src)>;
890 def : Pat<(v2i64 (bitconvert (v16i8 VRRC:$src))), (v2i64 VRRC:$src)>;
891 def : Pat<(v2i64 (bitconvert (v8i16 VRRC:$src))), (v2i64 VRRC:$src)>;
892 def : Pat<(v2i64 (bitconvert (v4i32 VRRC:$src))), (v2i64 VRRC:$src)>;
893 def : Pat<(v2i64 (bitconvert (v4f32 VRRC:$src))), (v2i64 VRRC:$src)>;
894 def : Pat<(v2i64 (bitconvert (v1i128 VRRC:$src))), (v2i64 VRRC:$src)>;
900 def : Pat<(v1i128 (bitconvert (v2i64 VRRC:$src))), (v1i128 VRRC:$src)>;
[all …]
DPPCInstrVSX.td512 int_ppc_vsx_xvcmpeqdp, v2i64, v2f64>;
518 int_ppc_vsx_xvcmpgedp, v2i64, v2f64>;
524 int_ppc_vsx_xvcmpgtdp, v2i64, v2f64>;
643 [(set v2i64:$XT, (fp_to_sint v2f64:$XB))]>;
651 [(set v2i64:$XT, (fp_to_uint v2f64:$XB))]>;
678 [(set v2f64:$XT, (sint_to_fp v2i64:$XB))]>;
682 [(set v4f32:$XT, (int_ppc_vsx_xvcvsxdsp v2i64:$XB))]>;
694 [(set v2f64:$XT, (uint_to_fp v2i64:$XB))]>;
698 [(set v4f32:$XT, (int_ppc_vsx_xvcvuxdsp v2i64:$XB))]>;
866 [(set v2i64:$XT, (PPCxxpermdi v2i64:$XA, v2i64:$XB,
[all …]
/external/swiftshader/third_party/llvm-7.0/configs/common/lib/Target/Mips/
DMipsGenDAGISel.inc494 /* 796*/ OPC_CheckChild1Type, MVT::v2i64,
503 …// Src: (st MSA128DOpnd:{ *:[v2i64] }:$wd, addrimm10lsl3:{ *:[iPTR] }:$addr)<<P:Predicate_unindexe…
504 // Dst: (ST_D MSA128DOpnd:{ *:[v2i64] }:$wd, addrimm10lsl3:{ *:[iPTR] }:$addr)
1319 /* 2345*/ /*SwitchType*/ 14, MVT::v2i64,// ->2361
1324 MVT::v2i64, 2/*#Ops*/, 2, 3,
1325 …// Src: (ld:{ *:[v2i64] } addrimm10lsl3:{ *:[iPTR] }:$addr)<<P:Predicate_unindexedload>><<P:Predic…
1326 // Dst: (LD_D:{ *:[v2i64] } addrimm10lsl3:{ *:[iPTR] }:$addr)
8947 MVT::v2i64, 2/*#Ops*/, 0, 2,
8948 …// Src: (intrinsic_wo_chain:{ *:[v2i64] } 3473:{ *:[iPTR] }, MSA128DOpnd:{ *:[v2i64] }:$ws, (imm:{…
8949 // Dst: (SAT_S_D:{ *:[v2i64] } MSA128DOpnd:{ *:[v2i64] }:$ws, (imm:{ *:[i32] }):$m)
[all …]
/external/swiftshader/third_party/llvm-7.0/configs/common/lib/Target/AArch64/
DAArch64GenDAGISel.inc347 /* 626*/ OPC_CheckChild0Type, MVT::v2i64,
363 …// Src: (st (vector_extract:{ *:[i64] } VecListOne128:{ *:[v2i64] }:$Vt, 0:{ *:[iPTR] }), (ro_Wind…
364 …// Dst: (STRDroW (EXTRACT_SUBREG:{ *:[i64] } VecListOne128:{ *:[v2i64] }:$Vt, dsub:{ *:[i32] }), G…
373 …// Src: (st (vector_extract:{ *:[i64] } VecListOne128:{ *:[v2i64] }:$Vt, 0:{ *:[iPTR] }), (ro_Xind…
374 …// Dst: (STRDroX (EXTRACT_SUBREG:{ *:[i64] } VecListOne128:{ *:[v2i64] }:$Vt, dsub:{ *:[i32] }), G…
383 …// Src: (st (vector_extract:{ *:[i64] } VecListOne128:{ *:[v2i64] }:$Vt, 0:{ *:[iPTR] }), (am_inde…
384 …// Dst: (STRDui (EXTRACT_SUBREG:{ *:[i64] } VecListOne128:{ *:[v2i64] }:$Vt, dsub:{ *:[i32] }), GP…
393 …// Src: (st (vector_extract:{ *:[i64] } VecListOne128:{ *:[v2i64] }:$Vt, 0:{ *:[iPTR] }), (am_unsc…
394 …// Dst: (STURDi (EXTRACT_SUBREG:{ *:[i64] } VecListOne128:{ *:[v2i64] }:$Vt, dsub:{ *:[i32] }), GP…
413 …// Src: (st (vector_extract:{ *:[i64] } VecListOne128:{ *:[v2i64] }:$Vt, (imm:{ *:[i64] })<<P:Pred…
[all …]
/external/webp/src/dsp/
Dmsa_macro.h589 const v2i64 res0_m = __msa_hadd_s_d((v4i32)in, (v4i32)in); in func_hadd_sw_s32()
590 const v2i64 res1_m = __msa_splati_d(res0_m, 1); in func_hadd_sw_s32()
591 const v2i64 out = res0_m + res1_m; in func_hadd_sw_s32()
606 const v2i64 res0 = __msa_hadd_s_d(res, res); in func_hadd_sh_s32()
607 const v2i64 res1 = __msa_splati_d(res0, 1); in func_hadd_sh_s32()
608 const v2i64 res2 = res0 + res1; in func_hadd_sh_s32()
625 v2u64 res1_m = (v2u64)__msa_splati_d((v2i64)res0_m, 1); in func_hadd_uh_u32()
699 out = (RTYPE)__msa_insert_d((v2i64)out, 0, in0); \
700 out = (RTYPE)__msa_insert_d((v2i64)out, 1, in1); \
720 #define ILVEV_B2_SD(...) ILVEV_B2(v2i64, __VA_ARGS__)
[all …]
/external/swiftshader/third_party/LLVM/lib/Target/X86/
DX86GenDAGISel.inc95 /*172*/ OPC_CheckChild1Type, MVT::v2i64,
104 …// Src: (st VR128:v2i64:$src, addr:iPTR:$dst)<<P:Predicate_alignednontemporalstore>> - Complexity …
105 // Dst: (VMOVNTDQmr addr:iPTR:$dst, VR128:v2i64:$src)
112 …// Src: (st VR128:v2i64:$src, addr:iPTR:$dst)<<P:Predicate_alignednontemporalstore>> - Complexity …
113 // Dst: (MOVNTDQmr addr:iPTR:$dst, VR128:v2i64:$src)
3766 /*8348*/ OPC_CheckType, MVT::v2i64,
3784 …// Src: (st (vector_shuffle:v4i32 (bitconvert:v4i32 (ld:v2i64 addr:iPTR:$src1)<<P:Predicate_uninde…
3792 …// Src: (st (vector_shuffle:v4i32 (bitconvert:v4i32 (ld:v2i64 addr:iPTR:$src1)<<P:Predicate_uninde…
3853 /*SwitchType*/ 56, MVT::v2i64,// ->8604
3866 …// Src: (st (vector_shuffle:v2i64 (ld:v2i64 addr:iPTR:$src1)<<P:Predicate_unindexedload>><<P:Predi…
[all …]
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/PowerPC/
Dvec_popcnt.ll2 ; In addition, check the conversions to/from the v2i64 VMX register that was also added in P8.
9 declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) nounwind readnone
39 %vcnt = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %x)
48 %vcnt = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %tmp)
58 %vcnt = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %tmp)
67 %vcnt = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %tmp)
/external/llvm/test/CodeGen/PowerPC/
Dvec_popcnt.ll2 ; In addition, check the conversions to/from the v2i64 VMX register that was also added in P8.
9 declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) nounwind readnone
39 %vcnt = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %x)
48 %vcnt = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %tmp)
58 %vcnt = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %tmp)
67 %vcnt = tail call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %tmp)

12345678910>>...23