/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/X86/ |
D | X86InstrVecCompiler.td | 90 def : Pat<(v8f64 (bitconvert (v64i8 VR512:$src))), (v8f64 VR512:$src)>; 95 def : Pat<(v16f32 (bitconvert (v64i8 VR512:$src))), (v16f32 VR512:$src)>; 99 def : Pat<(v8i64 (bitconvert (v64i8 VR512:$src))), (v8i64 VR512:$src)>; 105 def : Pat<(v16i32 (bitconvert (v64i8 VR512:$src))), (v16i32 VR512:$src)>; 109 def : Pat<(v32i16 (bitconvert (v64i8 VR512:$src))), (v32i16 VR512:$src)>; 112 def : Pat<(v64i8 (bitconvert (v8i64 VR512:$src))), (v64i8 VR512:$src)>; 113 def : Pat<(v64i8 (bitconvert (v16i32 VR512:$src))), (v64i8 VR512:$src)>; 114 def : Pat<(v64i8 (bitconvert (v32i16 VR512:$src))), (v64i8 VR512:$src)>; 115 def : Pat<(v64i8 (bitconvert (v8f64 VR512:$src))), (v64i8 VR512:$src)>; 116 def : Pat<(v64i8 (bitconvert (v16f32 VR512:$src))), (v64i8 VR512:$src)>; [all …]
|
D | X86TargetTransformInfo.cpp | 290 { ISD::SHL, MVT::v64i8, 2 }, // psllw + pand. in getArithmeticInstrCost() 291 { ISD::SRL, MVT::v64i8, 2 }, // psrlw + pand. in getArithmeticInstrCost() 292 { ISD::SRA, MVT::v64i8, 4 }, // psrlw, pand, pxor, psubb. in getArithmeticInstrCost() 457 { ISD::SHL, MVT::v64i8, 11 }, // vpblendvb sequence. in getArithmeticInstrCost() 458 { ISD::SRL, MVT::v64i8, 11 }, // vpblendvb sequence. in getArithmeticInstrCost() 459 { ISD::SRA, MVT::v64i8, 24 }, // vpblendvb sequence. in getArithmeticInstrCost() 461 { ISD::MUL, MVT::v64i8, 11 }, // extend/pmullw/trunc sequence. in getArithmeticInstrCost() 856 { TTI::SK_Reverse, MVT::v64i8, 1 }, // vpermb in getShuffleCost() 859 { TTI::SK_PermuteSingleSrc, MVT::v64i8, 1 }, // vpermb in getShuffleCost() 862 { TTI::SK_PermuteTwoSrc, MVT::v64i8, 1 }, // vpermt2b in getShuffleCost() [all …]
|
D | X86CallingConv.td | 121 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 195 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 231 CCIfType<[v64i1], CCPromoteToType<v64i8>>, 248 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 532 CCIfType<[v64i1], CCPromoteToType<v64i8>>, 550 CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 673 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 731 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 748 CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 767 CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/Hexagon/autohvx/ |
D | bitcount-64b.ll | 9 %t0 = call <64 x i8> @llvm.ctpop.v64i8(<64 x i8> %a0) 38 %t0 = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %a0) 74 %t0 = call <64 x i8> @llvm.cttz.v64i8(<64 x i8> %a0) 113 declare <64 x i8> @llvm.ctpop.v64i8(<64 x i8>) #0 117 declare <64 x i8> @llvm.ctlz.v64i8(<64 x i8>) #0 121 declare <64 x i8> @llvm.cttz.v64i8(<64 x i8>) #0
|
/external/llvm/include/llvm/CodeGen/ |
D | MachineValueType.h | 74 v64i8 = 27, // 64 x i8 enumerator 265 SimpleTy == MVT::v512i1 || SimpleTy == MVT::v64i8 || in is512BitVector() 331 case v64i8: in getVectorElementType() 381 case v64i8: in getVectorNumElements() 497 case v64i8: in getSizeInBits() 609 if (NumElements == 64) return MVT::v64i8; in getVectorVT()
|
D | ValueTypes.td | 51 def v64i8 : ValueType<512, 27>; // 64 x i8 vector value
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/Hexagon/ |
D | HexagonCallingConv.td | 86 CCIfType<[v16i32,v32i16,v64i8], 92 CCIfType<[v16i32,v32i16,v64i8], 118 CCIfType<[v16i32,v32i16,v64i8],
|
D | HexagonIntrinsicsV60.td | 35 def : Pat <(v512i1 (bitconvert (v64i8 HvxVR:$src1))), 36 (v512i1 (V6_vandvrt(v64i8 HvxVR:$src1), (A2_tfrsi 0x01010101)))>; 44 def : Pat <(v64i8 (bitconvert (v512i1 HvxQR:$src1))), 45 (v64i8 (V6_vandqrt(v512i1 HvxQR:$src1), (A2_tfrsi 0x01010101)))>;
|
/external/swiftshader/third_party/llvm-7.0/llvm/include/llvm/Support/ |
D | MachineValueType.h | 77 v64i8 = 30, // 64 x i8 enumerator 369 SimpleTy == MVT::v512i1 || SimpleTy == MVT::v64i8 || in is512BitVector() 442 case v64i8: in getVectorElementType() 529 case v64i8: in getVectorNumElements() 728 case v64i8: in getSizeInBits() 850 if (NumElements == 64) return MVT::v64i8; in getVectorVT()
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/CodeGen/X86/ |
D | avx512-masked_memop-16-8.ll | 37 …%res = call <64 x i8> @llvm.masked.load.v64i8.p0v64i8(<64 x i8>* %addr, i32 4, <64 x i1>%mask, <64… 40 declare <64 x i8> @llvm.masked.load.v64i8.p0v64i8(<64 x i8>*, i32, <64 x i1>, <64 x i8>) 111 …call void @llvm.masked.store.v64i8.p0v64i8(<64 x i8> %val, <64 x i8>* %addr, i32 4, <64 x i1>%mask) 114 declare void @llvm.masked.store.v64i8.p0v64i8(<64 x i8>, <64 x i8>*, i32, <64 x i1>)
|
D | bitcast-setcc-512.ll | 204 define i64 @v64i8(<64 x i8> %a, <64 x i8> %b) { 205 ; SSE-LABEL: v64i8: 223 ; AVX1-LABEL: v64i8: 246 ; AVX2-LABEL: v64i8: 257 ; AVX512F-LABEL: v64i8: 284 ; AVX512BW-LABEL: v64i8:
|
D | bitcast-and-setcc-512.ll | 533 define i64 @v64i8(<64 x i8> %a, <64 x i8> %b, <64 x i8> %c, <64 x i8> %d) { 534 ; SSE-LABEL: v64i8: 564 ; AVX1-LABEL: v64i8: 599 ; AVX2-LABEL: v64i8: 614 ; AVX512F-LABEL: v64i8: 653 ; AVX512BW-LABEL: v64i8:
|
D | vector-popcnt-512.ll | 282 %out = call <64 x i8> @llvm.ctpop.v64i8(<64 x i8> %in) 289 declare <64 x i8> @llvm.ctpop.v64i8(<64 x i8>)
|
/external/llvm/lib/Target/X86/ |
D | X86CallingConv.td | 51 CCIfType<[v64i1], CCPromoteToType<v64i8>>, 68 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 149 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 328 CCIfType<[v64i1], CCPromoteToType<v64i8>>, 346 CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 449 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 524 CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 541 CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 560 CCIfNotVarArg<CCIfType<[v64i8, v32i16, v16i32, v8i64, v16f32, v8f64], 601 CCIfType<[v64i1], CCPromoteToType<v64i8>>, [all …]
|
/external/swiftshader/third_party/llvm-7.0/llvm/test/Analysis/CostModel/X86/ |
D | ctlz.ll | 104 declare <64 x i8> @llvm.ctlz.v64i8(<64 x i8>, i1) 738 …n estimated cost of 68 for instruction: %ctlz = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %a, i1 f… 742 …n estimated cost of 36 for instruction: %ctlz = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %a, i1 f… 746 …n estimated cost of 40 for instruction: %ctlz = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %a, i1 f… 750 …n estimated cost of 18 for instruction: %ctlz = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %a, i1 f… 754 …n estimated cost of 18 for instruction: %ctlz = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %a, i1 f… 758 …n estimated cost of 17 for instruction: %ctlz = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %a, i1 f… 762 …n estimated cost of 20 for instruction: %ctlz = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %a, i1 f… 765 %ctlz = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %a, i1 0) 771 …n estimated cost of 68 for instruction: %ctlz = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %a, i1 t… [all …]
|
D | ctpop.ll | 84 declare <64 x i8> @llvm.ctpop.v64i8(<64 x i8>) 359 …nd an estimated cost of 40 for instruction: %ctpop = call <64 x i8> @llvm.ctpop.v64i8(<64 x i8> %a) 363 …nd an estimated cost of 24 for instruction: %ctpop = call <64 x i8> @llvm.ctpop.v64i8(<64 x i8> %a) 367 …nd an estimated cost of 28 for instruction: %ctpop = call <64 x i8> @llvm.ctpop.v64i8(<64 x i8> %a) 371 …nd an estimated cost of 12 for instruction: %ctpop = call <64 x i8> @llvm.ctpop.v64i8(<64 x i8> %a) 375 …nd an estimated cost of 12 for instruction: %ctpop = call <64 x i8> @llvm.ctpop.v64i8(<64 x i8> %a) 379 …und an estimated cost of 6 for instruction: %ctpop = call <64 x i8> @llvm.ctpop.v64i8(<64 x i8> %a) 382 %ctpop = call <64 x i8> @llvm.ctpop.v64i8(<64 x i8> %a)
|
D | cttz.ll | 103 declare <64 x i8> @llvm.cttz.v64i8(<64 x i8>, i1) 649 …n estimated cost of 52 for instruction: %cttz = call <64 x i8> @llvm.cttz.v64i8(<64 x i8> %a, i1 f… 653 …n estimated cost of 36 for instruction: %cttz = call <64 x i8> @llvm.cttz.v64i8(<64 x i8> %a, i1 f… 657 …n estimated cost of 40 for instruction: %cttz = call <64 x i8> @llvm.cttz.v64i8(<64 x i8> %a, i1 f… 661 …n estimated cost of 18 for instruction: %cttz = call <64 x i8> @llvm.cttz.v64i8(<64 x i8> %a, i1 f… 665 …n estimated cost of 18 for instruction: %cttz = call <64 x i8> @llvm.cttz.v64i8(<64 x i8> %a, i1 f… 669 …an estimated cost of 9 for instruction: %cttz = call <64 x i8> @llvm.cttz.v64i8(<64 x i8> %a, i1 f… 672 %cttz = call <64 x i8> @llvm.cttz.v64i8(<64 x i8> %a, i1 0) 678 …n estimated cost of 52 for instruction: %cttz = call <64 x i8> @llvm.cttz.v64i8(<64 x i8> %a, i1 t… 682 …n estimated cost of 36 for instruction: %cttz = call <64 x i8> @llvm.cttz.v64i8(<64 x i8> %a, i1 t… [all …]
|
D | bitreverse.ll | 107 declare <64 x i8> @llvm.bitreverse.v64i8(<64 x i8>) 426 …mated cost of 80 for instruction: %bitreverse = call <64 x i8> @llvm.bitreverse.v64i8(<64 x i8> %a) 430 …mated cost of 20 for instruction: %bitreverse = call <64 x i8> @llvm.bitreverse.v64i8(<64 x i8> %a) 434 …mated cost of 24 for instruction: %bitreverse = call <64 x i8> @llvm.bitreverse.v64i8(<64 x i8> %a) 438 …mated cost of 10 for instruction: %bitreverse = call <64 x i8> @llvm.bitreverse.v64i8(<64 x i8> %a) 442 …mated cost of 10 for instruction: %bitreverse = call <64 x i8> @llvm.bitreverse.v64i8(<64 x i8> %a) 446 …imated cost of 5 for instruction: %bitreverse = call <64 x i8> @llvm.bitreverse.v64i8(<64 x i8> %a) 450 …imated cost of 8 for instruction: %bitreverse = call <64 x i8> @llvm.bitreverse.v64i8(<64 x i8> %a) 453 %bitreverse = call <64 x i8> @llvm.bitreverse.v64i8(<64 x i8> %a)
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/CodeGen/ |
D | ValueTypes.cpp | 156 case MVT::v64i8: return "v64i8"; in getEVTString() 237 case MVT::v64i8: return VectorType::get(Type::getInt8Ty(Context), 64); in getTypeForEVT()
|
/external/llvm/lib/IR/ |
D | ValueTypes.cpp | 159 case MVT::v64i8: return "v64i8"; in getEVTString() 237 case MVT::v64i8: return VectorType::get(Type::getInt8Ty(Context), 64); in getTypeForEVT()
|
/external/llvm/test/CodeGen/X86/ |
D | vector-popcnt-512.ll | 172 %out = call <64 x i8> @llvm.ctpop.v64i8(<64 x i8> %in) 179 declare <64 x i8> @llvm.ctpop.v64i8(<64 x i8>)
|
D | vector-lzcnt-512.ll | 157 %out = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %in, i1 0) 213 %out = call <64 x i8> @llvm.ctlz.v64i8(<64 x i8> %in, i1 -1) 220 declare <64 x i8> @llvm.ctlz.v64i8(<64 x i8>, i1)
|
/external/llvm/lib/Target/Hexagon/ |
D | HexagonISelLowering.cpp | 198 LocVT == MVT::v64i8 || LocVT == MVT::v512i1) { in CC_Hexagon_VarArg() 338 LocVT == MVT::v64i8 || LocVT == MVT::v512i1)) { in CC_HexagonVector() 412 } else if (LocVT == MVT::v64i8 || LocVT == MVT::v32i16 || in RetCC_Hexagon() 544 ty == MVT::v64i8 || in IsHvxVectorType() 902 VT == MVT::v32i16 || VT == MVT::v64i8); in getIndexedAddressParts() 1119 RegVT == MVT::v32i16 || RegVT == MVT::v64i8)) { in LowerFormalArguments() 1755 addRegisterClass(MVT::v64i8, &Hexagon::VectorRegsRegClass); in HexagonTargetLowering() 2875 case MVT::v64i8: in getRegForInlineAsmConstraint() 2885 case MVT::v64i8: in getRegForInlineAsmConstraint() 3028 case MVT::v64i8: in allowsMisalignedMemoryAccesses() [all …]
|
D | HexagonIntrinsicsV60.td | 94 def : Pat <(v512i1 (bitconvert (v64i8 VectorRegs:$src1))), 95 (v512i1 (V6_vandvrt(v64i8 VectorRegs:$src1), 114 def : Pat <(v64i8 (bitconvert (v512i1 VecPredRegs:$src1))), 115 (v64i8 (V6_vandqrt(v512i1 VecPredRegs:$src1),
|
/external/swiftshader/third_party/llvm-7.0/configs/common/lib/Target/X86/ |
D | X86GenCallingConv.inc | 525 LocVT = MVT::v64i8; 935 if (LocVT == MVT::v64i8 || 1149 if (LocVT == MVT::v64i8 || 1205 if (LocVT == MVT::v64i8 || 1269 if (LocVT == MVT::v64i8 || 1533 LocVT = MVT::v64i8; 1582 if (LocVT == MVT::v64i8 || 1715 if (LocVT == MVT::v64i8 || 2015 if (LocVT == MVT::v64i8 || 2447 if (LocVT == MVT::v64i8 || [all …]
|