/external/llvm/lib/Target/CellSPU/ |
D | SPU64InstrInfo.td | 21 // 4. v2i64 setcc results are v4i32, which can be converted to a FSM mask (TODO) 24 // 5. The code sequences for r64 and v2i64 are probably overly conservative, 67 // v2i64 seteq (equality): the setcc result is v4i32 71 def v2i64: CodeFrag<(i32 (COPY_TO_REGCLASS CEQv2i64compare.Fragment, R32C))>; 83 def : Pat<(seteq (v2i64 VECREG:$rA), (v2i64 VECREG:$rB)), I64EQv2i64.Fragment>; 120 def v2i64: CodeFrag<CLGTv2i64compare.Fragment>; 132 //def : Pat<(setugt (v2i64 VECREG:$rA), (v2i64 VECREG:$rB)), 154 def v2i64: CodeFrag<CLGEv2i64compare.Fragment>; 166 def : Pat<(v2i64 (setuge (v2i64 VECREG:$rA), (v2i64 VECREG:$rB))), 205 def v2i64: CodeFrag<CGTv2i64compare.Fragment>; [all …]
|
D | SPUInstrInfo.td | 63 def v2i64: LoadDFormVec<v2i64>; 95 def v2i64: LoadAFormVec<v2i64>; 127 def v2i64: LoadXFormVec<v2i64>; 175 def v2i64: StoreDFormVec<v2i64>; 205 def v2i64: StoreAFormVec<v2i64>; 237 def v2i64: StoreXFormVec<v2i64>; 299 [(set (v2i64 VECREG:$rT), (SPUshufmask dform2_addr:$src))]>; 303 [(set (v2i64 VECREG:$rT), (SPUshufmask xform_addr:$src))]>; 350 def v2i64: ILVecInst<v2i64, s16imm_i64, v2i64SExt16Imm>; 377 def v2i64: ILHUVecInst<v2i64, u16imm_i64, immILHUvec_i64>; [all …]
|
D | SPUCallingConv.td | 24 CCIfType<[i8,i16,i32,i64,i128,f32,f64,v16i8,v8i16,v4i32,v2i64,v4f32,v2f64], 41 v16i8, v8i16, v4i32, v4f32, v2i64, v2f64], 55 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
|
D | SPUISelDAGToDAG.cpp | 192 ((vecVT == MVT::v2i64) && in emitBuildVector() 602 case MVT::v2i64: in getRC() 716 } else if (Opc == ISD::ADD && (OpVT == MVT::i64 || OpVT == MVT::v2i64)) { in Select() 728 } else if (Opc == ISD::SUB && (OpVT == MVT::i64 || OpVT == MVT::v2i64)) { in Select() 740 } else if (Opc == ISD::MUL && (OpVT == MVT::i64 || OpVT == MVT::v2i64)) { in Select() 824 MVT::v2i64, in Select() 837 SDValue absVec = CurDAG->getNode(ISD::BUILD_VECTOR, dl, MVT::v2i64, in Select() 924 Op0, getRC(MVT::v2i64) ); in SelectSHLi64() 991 Op0, getRC(MVT::v2i64) ); in SelectSRLi64() 1060 VecVT, N->getOperand(0), getRC(MVT::v2i64)); in SelectSRAi64() [all …]
|
/external/llvm/test/CodeGen/X86/ |
D | vec_ctbits.ll | 3 declare <2 x i64> @llvm.cttz.v2i64(<2 x i64>) 4 declare <2 x i64> @llvm.ctlz.v2i64(<2 x i64>) 5 declare <2 x i64> @llvm.ctpop.v2i64(<2 x i64>) 8 %c = call <2 x i64> @llvm.cttz.v2i64(<2 x i64> %a) 12 %c = call <2 x i64> @llvm.ctlz.v2i64(<2 x i64> %a) 16 %c = call <2 x i64> @llvm.ctpop.v2i64(<2 x i64> %a)
|
D | widen_conv-1.ll | 5 ; truncate v2i64 to v2i32
|
D | legalizedag_vec.ll | 6 ; v2i64 is a legal type but with mmx disabled, i64 is an illegal type. When
|
/external/llvm/test/CodeGen/CellSPU/useful-harnesses/ |
D | vecoperations.c | 7 typedef long long v2i64 __attribute__((ext_vector_type(2))); typedef 58 void print_v2i64(const char *str, v2i64 v) { in print_v2i64() 126 v2i64 v2i64_shuffle(v2i64 a) { in v2i64_shuffle() 127 v2i64 c2 = a.yx; in v2i64_shuffle() 147 v2i64 v3 = { 691043ll, 910301513ll }; in main()
|
/external/llvm/lib/Target/ARM/ |
D | ARMCallingConv.td | 32 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 50 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 64 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 76 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 120 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 130 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 145 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>, 157 CCIfType<[v2i64, v4i32, v8i16, v16i8, v4f32], CCBitConvertToType<v2f64>>,
|
/external/llvm/test/CodeGen/ARM/ |
D | vshll.ll | 23 %tmp2 = call <2 x i64> @llvm.arm.neon.vshiftls.v2i64(<2 x i32> %tmp1, <2 x i32> < i32 31, i32 31 >) 47 %tmp2 = call <2 x i64> @llvm.arm.neon.vshiftlu.v2i64(<2 x i32> %tmp1, <2 x i32> < i32 31, i32 31 >) 73 %tmp2 = call <2 x i64> @llvm.arm.neon.vshiftls.v2i64(<2 x i32> %tmp1, <2 x i32> < i32 32, i32 32 >) 79 declare <2 x i64> @llvm.arm.neon.vshiftls.v2i64(<2 x i32>, <2 x i32>) nounwind readnone 83 declare <2 x i64> @llvm.arm.neon.vshiftlu.v2i64(<2 x i32>, <2 x i32>) nounwind readnone
|
D | 2010-06-29-PartialRedefFastAlloc.ll | 19 %0 = call <2 x i64> @llvm.arm.neon.vld1.v2i64(i8* %arg, i32 1) 25 declare <2 x i64> @llvm.arm.neon.vld1.v2i64(i8*, i32) nounwind readonly
|
D | vqdmul.ll | 175 %tmp3 = call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %tmp1, <2 x i32> %tmp2) 193 …%1 = tail call <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32> %arg0_int32x2_t, <2 x i32> %0) ; <… 198 declare <2 x i64> @llvm.arm.neon.vqdmull.v2i64(<2 x i32>, <2 x i32>) nounwind readnone 216 …%tmp4 = call <2 x i64> @llvm.arm.neon.vqdmlal.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2, <2 x i32> %t… 234 …%1 = tail call <2 x i64> @llvm.arm.neon.vqdmlal.v2i64(<2 x i64> %arg0_int64x2_t, <2 x i32> %arg1_i… 239 declare <2 x i64> @llvm.arm.neon.vqdmlal.v2i64(<2 x i64>, <2 x i32>, <2 x i32>) nounwind readnone 257 …%tmp4 = call <2 x i64> @llvm.arm.neon.vqdmlsl.v2i64(<2 x i64> %tmp1, <2 x i32> %tmp2, <2 x i32> %t… 275 …%1 = tail call <2 x i64> @llvm.arm.neon.vqdmlsl.v2i64(<2 x i64> %arg0_int64x2_t, <2 x i32> %arg1_i… 280 declare <2 x i64> @llvm.arm.neon.vqdmlsl.v2i64(<2 x i64>, <2 x i32>, <2 x i32>) nounwind readnone
|
D | vpadal.ll | 80 %tmp3 = call <2 x i64> @llvm.arm.neon.vpadals.v2i64.v4i32(<2 x i64> %tmp1, <4 x i32> %tmp2) 107 %tmp3 = call <2 x i64> @llvm.arm.neon.vpadalu.v2i64.v4i32(<2 x i64> %tmp1, <4 x i32> %tmp2) 121 declare <2 x i64> @llvm.arm.neon.vpadals.v2i64.v4i32(<2 x i64>, <4 x i32>) nounwind readnone 125 declare <2 x i64> @llvm.arm.neon.vpadalu.v2i64.v4i32(<2 x i64>, <4 x i32>) nounwind readnone
|
D | vqshl.ll | 107 %tmp3 = call <2 x i64> @llvm.arm.neon.vqshifts.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 143 %tmp3 = call <2 x i64> @llvm.arm.neon.vqshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 271 %tmp2 = call <2 x i64> @llvm.arm.neon.vqshifts.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 63, i64 63 >) 303 %tmp2 = call <2 x i64> @llvm.arm.neon.vqshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 63, i64 63 >) 335 …%tmp2 = call <2 x i64> @llvm.arm.neon.vqshiftsu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 63, i64 63 … 357 declare <2 x i64> @llvm.arm.neon.vqshifts.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 362 declare <2 x i64> @llvm.arm.neon.vqshiftu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 367 declare <2 x i64> @llvm.arm.neon.vqshiftsu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 473 %tmp3 = call <2 x i64> @llvm.arm.neon.vqrshifts.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 509 %tmp3 = call <2 x i64> @llvm.arm.neon.vqrshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) [all …]
|
D | vpadd.ll | 113 %tmp2 = call <2 x i64> @llvm.arm.neon.vpaddls.v2i64.v4i32(<4 x i32> %tmp1) 137 %tmp2 = call <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32> %tmp1) 165 declare <2 x i64> @llvm.arm.neon.vpaddls.v2i64.v4i32(<4 x i32>) nounwind readnone 169 declare <2 x i64> @llvm.arm.neon.vpaddlu.v2i64.v4i32(<4 x i32>) nounwind readnone
|
D | vshl.ll | 107 %tmp3 = call <2 x i64> @llvm.arm.neon.vshifts.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 143 %tmp3 = call <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 210 %tmp2 = call <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 63, i64 63 >) 308 …%tmp2 = call <2 x i64> @llvm.arm.neon.vshifts.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 -64, i64 -64 … 340 …%tmp2 = call <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 -64, i64 -64 … 357 declare <2 x i64> @llvm.arm.neon.vshifts.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 362 declare <2 x i64> @llvm.arm.neon.vshiftu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 468 %tmp3 = call <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 504 %tmp3 = call <2 x i64> @llvm.arm.neon.vrshiftu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 600 …%tmp2 = call <2 x i64> @llvm.arm.neon.vrshifts.v2i64(<2 x i64> %tmp1, <2 x i64> < i64 -64, i64 -64… [all …]
|
D | vqsub.ll | 107 %tmp3 = call <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 143 %tmp3 = call <2 x i64> @llvm.arm.neon.vqsubu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 160 declare <2 x i64> @llvm.arm.neon.vqsubs.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 165 declare <2 x i64> @llvm.arm.neon.vqsubu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
|
D | vqadd.ll | 107 %tmp3 = call <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 143 %tmp3 = call <2 x i64> @llvm.arm.neon.vqaddu.v2i64(<2 x i64> %tmp1, <2 x i64> %tmp2) 160 declare <2 x i64> @llvm.arm.neon.vqadds.v2i64(<2 x i64>, <2 x i64>) nounwind readnone 165 declare <2 x i64> @llvm.arm.neon.vqaddu.v2i64(<2 x i64>, <2 x i64>) nounwind readnone
|
/external/llvm/include/llvm/CodeGen/ |
D | ValueTypes.h | 67 v2i64 = 25, // 2 x i64 enumerator 203 case v2i64: in getVectorElementType() 235 case v2i64: in getVectorNumElements() 275 case v2i64: in getSizeInBits() 358 if (NumElements == 2) return MVT::v2i64; in getVectorVT() 482 V==MVT::v2i64 || V==MVT::v4f32 || V==MVT::v2f64); in is128BitVector()
|
/external/llvm/lib/Target/X86/ |
D | X86InstrMMX.td | 182 (i64 (vector_extract (v2i64 VR128:$src), 188 (v2i64 (scalar_to_vector 412 [SDTCisVT<0, v2i64>, SDTCisVT<1, x86mmx>]>>; 414 def : Pat<(v2i64 (MMX_X86movq2dq VR64:$src)), 415 (v2i64 (MMX_MOVQ2DQrr VR64:$src))>; 417 def : Pat<(v2i64 (MMX_X86movq2dq (load_mmx addr:$src))), 418 (v2i64 (MOVQI2PQIrm addr:$src))>; 420 def : Pat<(v2i64 (MMX_X86movq2dq 422 (v2i64 (MOVDI2PDIrm addr:$src))>; 426 [SDTCisVT<0, x86mmx>, SDTCisVT<1, v2i64>]>>;
|
D | X86InstrSSE.td | 441 def : Pat<(movlhps VR128:$src1, (bc_v4i32 (v2i64 (X86vzload addr:$src2)))), 446 def : Pat<(v2i64 (movddup VR128:$src, (undef))), 447 (MOVLHPSrr (v2i64 VR128:$src), (v2i64 VR128:$src))>; 1256 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), VR128:$src2, imm:$cc)), 1258 def : Pat<(v2i64 (X86cmppd (v2f64 VR128:$src1), (memop addr:$src2), imm:$cc)), 1507 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))], 1522 [(set VR128:$dst, (v2i64 (OpNode VR128:$src1, VR128:$src2)))], 1962 def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst), 2012 def : Pat<(alignednontemporalstore (v2i64 VR128:$src), addr:$dst), 2085 def : Pat<(v2i64 immAllZerosV), (V_SET0PI)>; [all …]
|
D | X86CallingConv.td | 40 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 154 CCPromoteToType<v2i64>>>>, 157 CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 175 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>, 194 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCPassIndirect<i64>>, 213 CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 235 CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 273 CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], 282 CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>,
|
D | X86InstrFragmentsSIMD.td | 188 def loadv2i64 : PatFrag<(ops node:$ptr), (v2i64 (load node:$ptr))>; 220 (v2i64 (alignedload node:$ptr))>; 250 def memopv2i64 : PatFrag<(ops node:$ptr), (v2i64 (memop node:$ptr))>; 302 def bc_v2i64 : PatFrag<(ops node:$in), (v2i64 (bitconvert node:$in))>; 309 (bitconvert (v2i64 (X86vzmovl 310 (v2i64 (scalar_to_vector (loadi64 node:$src))))))>; 316 (bitconvert (v2i64 (X86vzload node:$src)))>;
|
/external/llvm/test/TableGen/ |
D | cast.td | 9 def v2i64 : ValueType<128, 22>; // 2 x i64 vector value 58 def VR128 : RegisterClass<[v2i64, v2f64],
|
D | TargetInstrSpec.td | 10 def v2i64 : ValueType<128, 22>; // 2 x i64 vector value 59 def VR128 : RegisterClass<[v2i64, v2f64],
|