/external/llvm/include/llvm/CodeGen/ |
D | MachineValueType.h | 99 v16i64 = 49, // 16 x i64 enumerator 274 SimpleTy == MVT::v16i64); in is1024BitVector() 353 case v16i64: in getVectorElementType() 393 case v16i64: in getVectorNumElements() 507 case v16i64: return 1024; in getSizeInBits() 637 if (NumElements == 16) return MVT::v16i64; in getVectorVT()
|
D | ValueTypes.td | 76 def v16i64 : ValueType<1024,49>; // 16 x i64 vector value
|
/external/swiftshader/third_party/llvm-10.0/llvm/include/llvm/Support/ |
D | MachineValueType.h | 111 v16i64 = 61, // 16 x i64 enumerator 377 SimpleTy == MVT::v16i64); in is1024BitVector() 499 case v16i64: in getVectorElementType() 589 case v16i64: in getVectorNumElements() 807 case v16i64: in getSizeInBits() 978 if (NumElements == 16) return MVT::v16i64; in getVectorVT()
|
/external/llvm-project/llvm/test/Analysis/CostModel/ARM/ |
D | reduce-add.ll | 13 …imated cost of 92 for instruction: %V16 = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> undef) 21 …mated cost of 107 for instruction: %V16 = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> undef) 29 …imated cost of 92 for instruction: %V16 = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> undef) 37 …mated cost of 103 for instruction: %V16 = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> undef) 44 %V16 = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> undef) 103 declare i64 @llvm.vector.reduce.add.v16i64(<16 x i64>)
|
D | mve-vecreduce-add.ll | 239 …mated cost of 730 for instruction: %a4z = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %a4za) 241 …mated cost of 730 for instruction: %a4s = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %a4sa) 259 …mated cost of 730 for instruction: %a9z = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %a9za) 261 …mated cost of 730 for instruction: %a9s = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %a9sa) 279 …ted cost of 730 for instruction: %a14z = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %a14za) 281 …ted cost of 730 for instruction: %a14s = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %a14sa) 286 …mated cost of 730 for instruction: %a19 = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> undef) 314 %a4z = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %a4za) 317 %a4s = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %a4sa) 344 %a9z = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %a9za) [all …]
|
D | reduce-umin.ll | 12 …ated cost of 167 for instruction: %V16 = call i64 @llvm.vector.reduce.umin.v16i64(<16 x i64> undef) 20 …ated cost of 178 for instruction: %V16 = call i64 @llvm.vector.reduce.umin.v16i64(<16 x i64> undef) 28 …ated cost of 970 for instruction: %V16 = call i64 @llvm.vector.reduce.umin.v16i64(<16 x i64> undef) 35 %V16 = call i64 @llvm.vector.reduce.umin.v16i64(<16 x i64> undef) 154 declare i64 @llvm.vector.reduce.umin.v16i64(<16 x i64>)
|
D | reduce-smin.ll | 12 …ated cost of 167 for instruction: %V16 = call i64 @llvm.vector.reduce.smin.v16i64(<16 x i64> undef) 20 …ated cost of 178 for instruction: %V16 = call i64 @llvm.vector.reduce.smin.v16i64(<16 x i64> undef) 28 …ated cost of 970 for instruction: %V16 = call i64 @llvm.vector.reduce.smin.v16i64(<16 x i64> undef) 35 %V16 = call i64 @llvm.vector.reduce.smin.v16i64(<16 x i64> undef) 154 declare i64 @llvm.vector.reduce.smin.v16i64(<16 x i64>)
|
D | reduce-umax.ll | 12 …ated cost of 167 for instruction: %V16 = call i64 @llvm.vector.reduce.umax.v16i64(<16 x i64> undef) 20 …ated cost of 178 for instruction: %V16 = call i64 @llvm.vector.reduce.umax.v16i64(<16 x i64> undef) 28 …ated cost of 970 for instruction: %V16 = call i64 @llvm.vector.reduce.umax.v16i64(<16 x i64> undef) 35 %V16 = call i64 @llvm.vector.reduce.umax.v16i64(<16 x i64> undef) 154 declare i64 @llvm.vector.reduce.umax.v16i64(<16 x i64>)
|
D | reduce-smax.ll | 12 …ated cost of 167 for instruction: %V16 = call i64 @llvm.vector.reduce.smax.v16i64(<16 x i64> undef) 20 …ated cost of 178 for instruction: %V16 = call i64 @llvm.vector.reduce.smax.v16i64(<16 x i64> undef) 28 …ated cost of 970 for instruction: %V16 = call i64 @llvm.vector.reduce.smax.v16i64(<16 x i64> undef) 35 %V16 = call i64 @llvm.vector.reduce.smax.v16i64(<16 x i64> undef) 154 declare i64 @llvm.vector.reduce.smax.v16i64(<16 x i64>)
|
/external/llvm-project/llvm/include/llvm/Support/ |
D | MachineValueType.h | 112 v16i64 = 62, // 16 x i64 enumerator 410 SimpleTy == MVT::v16i64 || SimpleTy == MVT::v64f16 || in is1024BitVector() 568 case v16i64: in getVectorElementType() 697 case v16i64: in getVectorNumElements() 946 case v16i64: in getSizeInBits() 1165 if (NumElements == 16) return MVT::v16i64; in getVectorVT()
|
/external/llvm-project/llvm/test/CodeGen/X86/ |
D | avx512-masked-memop-64-32.ll | 155 …call void @llvm.masked.store.v16i64.p0v16i64(<16 x i64> %src0, <16 x i64>* %ptrs, i32 4, <16 x i1>… 158 declare void @llvm.masked.store.v16i64.p0v16i64(<16 x i64> %src0, <16 x i64>* %ptrs, i32, <16 x i1>… 205 …%res = call <16 x i64> @llvm.masked.load.v16i64.p0v16i64(<16 x i64>* %ptrs, i32 4, <16 x i1> %mask… 208 declare <16 x i64> @llvm.masked.load.v16i64.p0v16i64(<16 x i64>* %ptrs, i32, <16 x i1> %mask, <16 x…
|
/external/llvm-project/llvm/test/Analysis/CostModel/X86/ |
D | reduce-mul.ll | 17 …imated cost of 66 for instruction: %V16 = call i64 @llvm.vector.reduce.mul.v16i64(<16 x i64> undef) 25 …imated cost of 73 for instruction: %V16 = call i64 @llvm.vector.reduce.mul.v16i64(<16 x i64> undef) 33 …imated cost of 43 for instruction: %V16 = call i64 @llvm.vector.reduce.mul.v16i64(<16 x i64> undef) 41 …imated cost of 36 for instruction: %V16 = call i64 @llvm.vector.reduce.mul.v16i64(<16 x i64> undef) 49 …imated cost of 36 for instruction: %V16 = call i64 @llvm.vector.reduce.mul.v16i64(<16 x i64> undef) 57 …timated cost of 8 for instruction: %V16 = call i64 @llvm.vector.reduce.mul.v16i64(<16 x i64> undef) 64 %V16 = call i64 @llvm.vector.reduce.mul.v16i64(<16 x i64> undef) 264 declare i64 @llvm.vector.reduce.mul.v16i64(<16 x i64>)
|
D | reduce-umin.ll | 18 …mated cost of 90 for instruction: %V16 = call i64 @llvm.vector.reduce.umin.v16i64(<16 x i64> undef) 26 …mated cost of 90 for instruction: %V16 = call i64 @llvm.vector.reduce.umin.v16i64(<16 x i64> undef) 34 …mated cost of 74 for instruction: %V16 = call i64 @llvm.vector.reduce.umin.v16i64(<16 x i64> undef) 42 …mated cost of 26 for instruction: %V16 = call i64 @llvm.vector.reduce.umin.v16i64(<16 x i64> undef) 50 …mated cost of 24 for instruction: %V16 = call i64 @llvm.vector.reduce.umin.v16i64(<16 x i64> undef) 58 …mated cost of 15 for instruction: %V16 = call i64 @llvm.vector.reduce.umin.v16i64(<16 x i64> undef) 66 …imated cost of 8 for instruction: %V16 = call i64 @llvm.vector.reduce.umin.v16i64(<16 x i64> undef) 73 %V16 = call i64 @llvm.vector.reduce.umin.v16i64(<16 x i64> undef) 311 declare i64 @llvm.vector.reduce.umin.v16i64(<16 x i64>)
|
D | reduce-smax.ll | 18 …mated cost of 90 for instruction: %V16 = call i64 @llvm.vector.reduce.smax.v16i64(<16 x i64> undef) 26 …mated cost of 90 for instruction: %V16 = call i64 @llvm.vector.reduce.smax.v16i64(<16 x i64> undef) 34 …mated cost of 74 for instruction: %V16 = call i64 @llvm.vector.reduce.smax.v16i64(<16 x i64> undef) 42 …mated cost of 18 for instruction: %V16 = call i64 @llvm.vector.reduce.smax.v16i64(<16 x i64> undef) 50 …mated cost of 22 for instruction: %V16 = call i64 @llvm.vector.reduce.smax.v16i64(<16 x i64> undef) 58 …mated cost of 13 for instruction: %V16 = call i64 @llvm.vector.reduce.smax.v16i64(<16 x i64> undef) 66 …imated cost of 8 for instruction: %V16 = call i64 @llvm.vector.reduce.smax.v16i64(<16 x i64> undef) 73 %V16 = call i64 @llvm.vector.reduce.smax.v16i64(<16 x i64> undef) 311 declare i64 @llvm.vector.reduce.smax.v16i64(<16 x i64>)
|
D | reduce-umax.ll | 18 …mated cost of 90 for instruction: %V16 = call i64 @llvm.vector.reduce.umax.v16i64(<16 x i64> undef) 26 …mated cost of 90 for instruction: %V16 = call i64 @llvm.vector.reduce.umax.v16i64(<16 x i64> undef) 34 …mated cost of 74 for instruction: %V16 = call i64 @llvm.vector.reduce.umax.v16i64(<16 x i64> undef) 42 …mated cost of 26 for instruction: %V16 = call i64 @llvm.vector.reduce.umax.v16i64(<16 x i64> undef) 50 …mated cost of 24 for instruction: %V16 = call i64 @llvm.vector.reduce.umax.v16i64(<16 x i64> undef) 58 …mated cost of 15 for instruction: %V16 = call i64 @llvm.vector.reduce.umax.v16i64(<16 x i64> undef) 66 …imated cost of 8 for instruction: %V16 = call i64 @llvm.vector.reduce.umax.v16i64(<16 x i64> undef) 73 %V16 = call i64 @llvm.vector.reduce.umax.v16i64(<16 x i64> undef) 311 declare i64 @llvm.vector.reduce.umax.v16i64(<16 x i64>)
|
D | reduce-smin.ll | 18 …mated cost of 90 for instruction: %V16 = call i64 @llvm.vector.reduce.smin.v16i64(<16 x i64> undef) 26 …mated cost of 90 for instruction: %V16 = call i64 @llvm.vector.reduce.smin.v16i64(<16 x i64> undef) 34 …mated cost of 74 for instruction: %V16 = call i64 @llvm.vector.reduce.smin.v16i64(<16 x i64> undef) 42 …mated cost of 18 for instruction: %V16 = call i64 @llvm.vector.reduce.smin.v16i64(<16 x i64> undef) 50 …mated cost of 22 for instruction: %V16 = call i64 @llvm.vector.reduce.smin.v16i64(<16 x i64> undef) 58 …mated cost of 13 for instruction: %V16 = call i64 @llvm.vector.reduce.smin.v16i64(<16 x i64> undef) 66 …imated cost of 8 for instruction: %V16 = call i64 @llvm.vector.reduce.smin.v16i64(<16 x i64> undef) 73 %V16 = call i64 @llvm.vector.reduce.smin.v16i64(<16 x i64> undef) 311 declare i64 @llvm.vector.reduce.smin.v16i64(<16 x i64>)
|
D | reduce-add.ll | 19 …timated cost of 9 for instruction: %V16 = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> undef) 27 …imated cost of 15 for instruction: %V16 = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> undef) 35 …timated cost of 6 for instruction: %V16 = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> undef) 43 …timated cost of 8 for instruction: %V16 = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> undef) 51 …imated cost of 33 for instruction: %V16 = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> undef) 58 %V16 = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> undef) 269 declare i64 @llvm.vector.reduce.add.v16i64(<16 x i64>)
|
D | reduce-and.ll | 17 …imated cost of 10 for instruction: %V16 = call i64 @llvm.vector.reduce.and.v16i64(<16 x i64> undef) 25 …timated cost of 8 for instruction: %V16 = call i64 @llvm.vector.reduce.and.v16i64(<16 x i64> undef) 33 …timated cost of 8 for instruction: %V16 = call i64 @llvm.vector.reduce.and.v16i64(<16 x i64> undef) 40 %V16 = call i64 @llvm.vector.reduce.and.v16i64(<16 x i64> undef) 237 declare i64 @llvm.vector.reduce.and.v16i64(<16 x i64>)
|
D | reduce-or.ll | 17 …timated cost of 10 for instruction: %V16 = call i64 @llvm.vector.reduce.or.v16i64(<16 x i64> undef) 25 …stimated cost of 8 for instruction: %V16 = call i64 @llvm.vector.reduce.or.v16i64(<16 x i64> undef) 33 …stimated cost of 8 for instruction: %V16 = call i64 @llvm.vector.reduce.or.v16i64(<16 x i64> undef) 40 %V16 = call i64 @llvm.vector.reduce.or.v16i64(<16 x i64> undef) 237 declare i64 @llvm.vector.reduce.or.v16i64(<16 x i64>)
|
D | reduce-xor.ll | 17 …imated cost of 10 for instruction: %V16 = call i64 @llvm.vector.reduce.xor.v16i64(<16 x i64> undef) 25 …timated cost of 8 for instruction: %V16 = call i64 @llvm.vector.reduce.xor.v16i64(<16 x i64> undef) 33 …timated cost of 8 for instruction: %V16 = call i64 @llvm.vector.reduce.xor.v16i64(<16 x i64> undef) 40 %V16 = call i64 @llvm.vector.reduce.xor.v16i64(<16 x i64> undef) 259 declare i64 @llvm.vector.reduce.xor.v16i64(<16 x i64>)
|
/external/llvm/lib/IR/ |
D | ValueTypes.cpp | 181 case MVT::v16i64: return "v16i64"; in getEVTString() 259 case MVT::v16i64: return VectorType::get(Type::getInt64Ty(Context), 16); in getTypeForEVT()
|
/external/llvm/lib/Target/Hexagon/ |
D | HexagonISelLowering.cpp | 203 if (LocVT == MVT::v16i64 || LocVT == MVT::v32i32 || LocVT == MVT::v64i16 || in CC_Hexagon_VarArg() 348 (LocVT == MVT::v16i64 || LocVT == MVT::v32i32 || LocVT == MVT::v64i16 || in CC_HexagonVector() 371 (LocVT == MVT::v16i64 || LocVT == MVT::v32i32 || LocVT == MVT::v64i16 || in CC_HexagonVector() 419 LocVT == MVT::v32i32 || LocVT == MVT::v16i64 || in RetCC_Hexagon() 545 ty == MVT::v16i64 || ty == MVT::v32i32 || ty == MVT::v64i16 || in IsHvxVectorType() 898 (UseHVX && UseHVXDbl) && (VT == MVT::v32i32 || VT == MVT::v16i64 || in getIndexedAddressParts() 1125 ((RegVT == MVT::v16i64 || RegVT == MVT::v32i32 || in LowerFormalArguments() 1133 } else if ((RegVT == MVT::v16i64 || RegVT == MVT::v32i32 || in LowerFormalArguments() 1762 addRegisterClass(MVT::v16i64, &Hexagon::VecDblRegsRegClass); in HexagonTargetLowering() 1768 addRegisterClass(MVT::v16i64, &Hexagon::VectorRegs128BRegClass); in HexagonTargetLowering() [all …]
|
D | HexagonRegisterInfo.td | 230 [v128i8, v64i16, v32i32, v16i64], 1024, 234 [v128i8, v64i16, v32i32, v16i64], 1024,
|
D | HexagonIntrinsicsV60.td | 139 def : Pat <(v1024i1 (bitconvert (v16i64 VectorRegs128B:$src1))), 140 (v1024i1 (V6_vandvrt_128B(v16i64 VectorRegs128B:$src1), 159 def : Pat <(v16i64 (bitconvert (v1024i1 VecPredRegs128B:$src1))), 160 (v16i64 (V6_vandqrt_128B(v1024i1 VecPredRegs128B:$src1),
|
/external/llvm-project/llvm/test/CodeGen/AArch64/ |
D | sve-fixed-length-int-reduce.ll | 315 %res = call i64 @llvm.vector.reduce.add.v16i64(<16 x i64> %op) 625 %res = call i64 @llvm.vector.reduce.smax.v16i64(<16 x i64> %op) 935 %res = call i64 @llvm.vector.reduce.smin.v16i64(<16 x i64> %op) 1245 %res = call i64 @llvm.vector.reduce.umax.v16i64(<16 x i64> %op) 1555 %res = call i64 @llvm.vector.reduce.umin.v16i64(<16 x i64> %op) 1598 declare i64 @llvm.vector.reduce.add.v16i64(<16 x i64>) 1626 declare i64 @llvm.vector.reduce.smax.v16i64(<16 x i64>) 1654 declare i64 @llvm.vector.reduce.smin.v16i64(<16 x i64>) 1682 declare i64 @llvm.vector.reduce.umax.v16i64(<16 x i64>) 1710 declare i64 @llvm.vector.reduce.umin.v16i64(<16 x i64>)
|