/external/llvm/include/llvm/CodeGen/ |
D | MachineValueType.h | 98 v8i64 = 48, // 8 x i64 enumerator 260 SimpleTy == MVT::v8i64); in is512BitVector() 345 case v8i64: in getVectorElementType() 392 case v8i64: in getVectorNumElements() 493 case v8i64: in getSizeInBits() 629 if (NumElements == 8) return MVT::v8i64; in getVectorVT()
|
D | ValueTypes.td | 75 def v8i64 : ValueType<512, 48>; // 8 x i64 vector value
|
/external/llvm/lib/Target/X86/ |
D | X86TargetTransformInfo.cpp | 138 { ISD::SHL, MVT::v8i64, 1 }, in getArithmeticInstrCost() 139 { ISD::SRL, MVT::v8i64, 1 }, in getArithmeticInstrCost() 140 { ISD::SRA, MVT::v8i64, 1 }, in getArithmeticInstrCost() 537 { ISD::UINT_TO_FP, MVT::v8f64, MVT::v8i64, 1 }, in getCastInstrCost() 540 { ISD::UINT_TO_FP, MVT::v8f32, MVT::v8i64, 1 }, in getCastInstrCost() 544 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f64, 1 }, in getCastInstrCost() 547 { ISD::FP_TO_UINT, MVT::v8i64, MVT::v8f32, 1 }, in getCastInstrCost() 557 { ISD::TRUNCATE, MVT::v8i16, MVT::v8i64, 1 }, in getCastInstrCost() 558 { ISD::TRUNCATE, MVT::v8i32, MVT::v8i64, 1 }, in getCastInstrCost() 568 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i32, 1 }, in getCastInstrCost() [all …]
|
D | X86CallingConv.td | 68 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 122 CCIfType<[v16f32, v8f64, v16i32, v8i64], 149 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 312 CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 332 CCIfType<[v16i32, v8i64, v16f32, v8f64], 372 CCIfType<[v16i32, v16f32, v8f64, v8i64], CCPassIndirect<i64>>, 415 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 490 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 507 CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 526 CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], [all …]
|
D | X86InstrFragmentsSIMD.td | 633 def loadv8i64 : PatFrag<(ops node:$ptr), (v8i64 (load node:$ptr))>; 718 (v8i64 (alignedload512 node:$ptr))>; 792 return (Mgt->getIndex().getValueType() == MVT::v8i64 || 793 Mgt->getBasePtr().getValueType() == MVT::v8i64); 839 return (Sc->getIndex().getValueType() == MVT::v8i64 || 840 Sc->getBasePtr().getValueType() == MVT::v8i64); 868 def bc_v8i64 : PatFrag<(ops node:$in), (v8i64 (bitconvert node:$in))>;
|
D | X86InstrAVX512.td | 83 !if (!eq (EltSize, 64), "v8i64", "v16i32"), 376 def : Pat<(v8f64 (bitconvert (v8i64 VR512:$src))), (v8f64 VR512:$src)>; 381 def : Pat<(v16f32 (bitconvert (v8i64 VR512:$src))), (v16f32 VR512:$src)>; 386 def : Pat<(v8i64 (bitconvert (v16i32 VR512:$src))), (v8i64 VR512:$src)>; 387 def : Pat<(v8i64 (bitconvert (v32i16 VR512:$src))), (v8i64 VR512:$src)>; 388 def : Pat<(v8i64 (bitconvert (v64i8 VR512:$src))), (v8i64 VR512:$src)>; 389 def : Pat<(v8i64 (bitconvert (v8f64 VR512:$src))), (v8i64 VR512:$src)>; 390 def : Pat<(v8i64 (bitconvert (v16f32 VR512:$src))), (v8i64 VR512:$src)>; 391 def : Pat<(v16i32 (bitconvert (v8i64 VR512:$src))), (v16i32 VR512:$src)>; 396 def : Pat<(v32i16 (bitconvert (v8i64 VR512:$src))), (v32i16 VR512:$src)>; [all …]
|
D | X86ISelLowering.cpp | 1311 addRegisterClass(MVT::v8i64, &X86::VR512RegClass); in X86TargetLowering() 1327 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i64, MVT::v8i8, Legal); in X86TargetLowering() 1328 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i64, MVT::v8i8, Legal); in X86TargetLowering() 1329 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i64, MVT::v8i16, Legal); in X86TargetLowering() 1330 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i64, MVT::v8i16, Legal); in X86TargetLowering() 1331 setLoadExtAction(ISD::ZEXTLOAD, MVT::v8i64, MVT::v8i32, Legal); in X86TargetLowering() 1332 setLoadExtAction(ISD::SEXTLOAD, MVT::v8i64, MVT::v8i32, Legal); in X86TargetLowering() 1345 setOperationAction(ISD::LOAD, MVT::v8i64, Legal); in X86TargetLowering() 1384 setTruncStoreAction(MVT::v8i64, MVT::v8i8, Legal); in X86TargetLowering() 1385 setTruncStoreAction(MVT::v8i64, MVT::v8i16, Legal); in X86TargetLowering() [all …]
|
D | X86RegisterInfo.td | 465 def VR512 : RegisterClass<"X86", [v16f32, v8f64, v64i8, v32i16, v16i32, v8i64],
|
/external/llvm/lib/Target/AArch64/ |
D | AArch64TargetTransformInfo.cpp | 205 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, in getCastInstrCost() 206 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, in getCastInstrCost() 207 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, in getCastInstrCost() 208 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, in getCastInstrCost() 395 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 8 * AmortizationCost }, in getCmpSelInstrCost()
|
/external/llvm/lib/Target/ARM/ |
D | ARMTargetTransformInfo.cpp | 91 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, in getCastInstrCost() 92 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i8, 7 }, in getCastInstrCost() 93 { ISD::SIGN_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, in getCastInstrCost() 94 { ISD::ZERO_EXTEND, MVT::v8i64, MVT::v8i16, 6 }, in getCastInstrCost() 278 { ISD::SELECT, MVT::v8i1, MVT::v8i64, 50 }, in getCmpSelInstrCost()
|
D | ARMRegisterInfo.td | 398 def QQQQPR : RegisterClass<"ARM", [v8i64], 256, (add Tuples2QQ)> {
|
D | ARMISelDAGToDAG.cpp | 2053 SDValue RegSeq = SDValue(createQuadQRegsNode(MVT::v8i64, V0, V1, V2, V3), 0); in SelectVST() 2174 SuperReg = SDValue(createQuadQRegsNode(MVT::v8i64, V0, V1, V2, V3), 0); in SelectVLDSTLane()
|
/external/llvm/test/CodeGen/X86/ |
D | vector-lzcnt-512.ll | 9 %out = call <8 x i64> @llvm.ctlz.v8i64(<8 x i64> %in, i1 0) 18 %out = call <8 x i64> @llvm.ctlz.v8i64(<8 x i64> %in, i1 -1) 216 declare <8 x i64> @llvm.ctlz.v8i64(<8 x i64>, i1)
|
D | vector-popcnt-512.ll | 42 %out = call <8 x i64> @llvm.ctpop.v8i64(<8 x i64> %in) 158 declare <8 x i64> @llvm.ctpop.v8i64(<8 x i64>)
|
D | vector-tzcnt-512.ll | 42 %out = call <8 x i64> @llvm.cttz.v8i64(<8 x i64> %in, i1 0) 56 %out = call <8 x i64> @llvm.cttz.v8i64(<8 x i64> %in, i1 -1) 268 declare <8 x i64> @llvm.cttz.v8i64(<8 x i64>, i1)
|
/external/llvm/lib/IR/ |
D | ValueTypes.cpp | 175 case MVT::v8i64: return "v8i64"; in getEVTString() 253 case MVT::v8i64: return VectorType::get(Type::getInt64Ty(Context), 8); in getTypeForEVT()
|
/external/llvm/lib/Target/Hexagon/ |
D | HexagonIntrinsicsV60.td | 97 def : Pat <(v512i1 (bitconvert (v8i64 VectorRegs:$src1))), 98 (v512i1 (V6_vandvrt(v8i64 VectorRegs:$src1), 117 def : Pat <(v8i64 (bitconvert (v512i1 VecPredRegs:$src1))), 118 (v8i64 (V6_vandqrt(v512i1 VecPredRegs:$src1),
|
D | HexagonISelLowering.cpp | 197 if (LocVT == MVT::v8i64 || LocVT == MVT::v16i32 || LocVT == MVT::v32i16 || in CC_Hexagon_VarArg() 337 (LocVT == MVT::v8i64 || LocVT == MVT::v16i32 || LocVT == MVT::v32i16 || in CC_HexagonVector() 410 LocVT == MVT::v16i32 || LocVT == MVT::v8i64 || in RetCC_Hexagon() 544 return (ty == MVT::v8i64 || ty == MVT::v16i32 || ty == MVT::v32i16 || in IsHvxVectorType() 888 UseHVX && !UseHVXDbl && (VT == MVT::v16i32 || VT == MVT::v8i64 || in getIndexedAddressParts() 1082 } else if ((RegVT == MVT::v8i64 || RegVT == MVT::v16i32 || in LowerFormalArguments() 1569 addRegisterClass(MVT::v8i64, &Hexagon::VectorRegsRegClass); in HexagonTargetLowering() 2673 case MVT::v8i64: in getRegForInlineAsmConstraint() 2683 case MVT::v8i64: in getRegForInlineAsmConstraint() 2829 case MVT::v8i64: in findRepresentativeClass()
|
D | HexagonRegisterInfo.td | 217 def VectorRegs : RegisterClass<"Hexagon", [v64i8, v32i16, v16i32, v8i64], 512,
|
D | HexagonISelDAGToDAG.cpp | 405 } else if (LoadedVT == MVT::v16i32 || LoadedVT == MVT::v8i64 || in SelectIndexedLoad() 526 else if (StoredVT == MVT::v16i32 || StoredVT == MVT::v8i64 || in SelectIndexedStore() 568 else if (StoredVT == MVT::v16i32 || StoredVT == MVT::v8i64 || in SelectIndexedStore()
|
D | HexagonInstrInfoVector.td | 79 defm : bitconvert_vec<v8i64 , v16i32>;
|
D | HexagonInstrInfoV60.td | 817 defm : vS32b_ai_pats <v8i64, v16i64>; 870 defm : vL32b_ai_pats <v8i64, v16i64>;
|
/external/llvm/test/CodeGen/AMDGPU/ |
D | ctpop64.ll | 7 declare <8 x i64> @llvm.ctpop.v8i64(<8 x i64>) nounwind readnone
|
/external/llvm/utils/TableGen/ |
D | CodeGenTarget.cpp | 108 case MVT::v8i64: return "MVT::v8i64"; in getEnumName()
|
/external/llvm/include/llvm/IR/ |
D | Intrinsics.td | 197 def llvm_v8i64_ty : LLVMType<v8i64>; // 8 x i64
|