/external/llvm/include/llvm/CodeGen/ |
D | ValueTypes.h | 212 bool bitsLT(EVT VT) const { in bitsLT() function
|
D | MachineValueType.h | 542 bool bitsLT(MVT VT) const { in bitsLT() function
|
/external/llvm/lib/Target/ARM/ |
D | ARMSelectionDAGInfo.cpp | 93 else if (Src.getValueType().bitsLT(MVT::i32)) in EmitSpecializedLibcall()
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/ARM/ |
D | ARMSelectionDAGInfo.cpp | 93 else if (Src.getValueType().bitsLT(MVT::i32)) in EmitSpecializedLibcall()
|
/external/swiftshader/third_party/llvm-7.0/llvm/include/llvm/CodeGen/ |
D | ValueTypes.h | 241 bool bitsLT(EVT VT) const { in bitsLT() function
|
/external/swiftshader/third_party/LLVM/lib/CodeGen/SelectionDAG/ |
D | FastISel.cpp | 273 if (IdxVT.bitsLT(PtrVT)) { in getRegForGEPIndex() 627 else if (SrcVT.bitsLT(MVT::i32)) in SelectCall() 975 if (DstVT.bitsLT(SrcVT)) in SelectOperator()
|
D | LegalizeTypesGeneric.cpp | 189 if (Idx.getValueType().bitsLT(TLI.getPointerTy())) in ExpandRes_EXTRACT_VECTOR_ELT()
|
D | TargetLowering.cpp | 693 if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16. in getVectorTypeBreakdownMVT() 990 if (DestVT.bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16. in getVectorTypeBreakdown() 1027 if (VT.bitsLT(MinVT)) in GetReturnInfo() 2216 else if (Op0.getValueType().bitsLT(VT)) in SimplifySetCC()
|
D | SelectionDAG.cpp | 2498 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) && in getNode() 2514 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) && in getNode() 2531 assert(Operand.getValueType().getScalarType().bitsLT(VT.getScalarType()) && in getNode() 2568 .bitsLT(VT.getScalarType())) in getNode() 3644 if (VT.bitsLT(LargestVT)) { in getMemsetStores() 4177 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) && in getLoad() 4335 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) && in getTruncStore()
|
/external/swiftshader/third_party/LLVM/include/llvm/CodeGen/ |
D | ValueTypes.h | 548 bool bitsLT(EVT VT) const { in bitsLT() function
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/CodeGen/ |
D | TargetLoweringBase.cpp | 895 if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16. in getVectorTypeBreakdownMVT() 1328 if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16. in getVectorTypeBreakdown() 1364 if (VT.bitsLT(MinVT)) in GetReturnInfo()
|
/external/llvm/lib/CodeGen/ |
D | TargetLoweringBase.cpp | 1154 if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16. in getVectorTypeBreakdownMVT() 1557 if (EVT(DestVT).bitsLT(NewVT)) // Value is expanded, e.g. i64 -> i16. in getVectorTypeBreakdown() 1592 if (VT.bitsLT(MinVT)) in GetReturnInfo()
|
/external/swiftshader/third_party/llvm-7.0/llvm/include/llvm/Support/ |
D | MachineValueType.h | 782 bool bitsLT(MVT VT) const { in bitsLT() function
|
/external/llvm/lib/CodeGen/SelectionDAG/ |
D | SelectionDAG.cpp | 2872 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT); in FoldCONCAT_VECTORS() 3063 assert(Operand.getValueType().bitsLT(VT) && in getNode() 3076 assert(Operand.getValueType().bitsLT(VT) && in getNode() 3092 assert(Operand.getValueType().bitsLT(VT) && in getNode() 3109 assert(Operand.getValueType().bitsLT(VT) && in getNode() 3142 .bitsLT(VT.getScalarType())) in getNode() 3412 if (LegalSVT.bitsLT(VT.getScalarType())) in FoldConstantVectorArithmetic() 4572 if (VT.bitsLT(LargestVT)) { in getMemsetStores() 5094 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) && in getLoad() 5272 assert(SVT.getScalarType().bitsLT(VT.getScalarType()) && in getTruncStore()
|
D | LegalizeTypesGeneric.cpp | 231 assert(OldEltVT.bitsLT(OldVT) && "Result type smaller then element type!"); in ExpandRes_EXTRACT_VECTOR_ELT()
|
D | LegalizeDAG.cpp | 1324 if (EltVT.bitsLT(Node->getOperand(i).getValueType().getScalarType())) { in ExpandVectorBuildThroughStack() 2321 } else if (DestVT.bitsLT(MVT::f64)) { in ExpandLegalINT_TO_FP() 3008 if (NewEltVT.bitsLT(EltVT)) { in ExpandNode() 4255 assert(NewEltVT.bitsLT(EltVT) && "not handled"); in PromoteNode() 4288 assert(NewEltVT.bitsLT(EltVT) && "not handled"); in PromoteNode() 4335 assert(NewEltVT.bitsLT(EltVT) && "not handled"); in PromoteNode()
|
D | FastISel.cpp | 328 if (IdxVT.bitsLT(PtrVT)) { in getRegForGEPIndex() 1657 if (DstVT.bitsLT(SrcVT)) in selectOperator()
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/CodeGen/SelectionDAG/ |
D | SelectionDAG.cpp | 3737 SVT = (SVT.bitsLT(Op.getValueType()) ? Op.getValueType() : SVT); in FoldCONCAT_VECTORS() 3959 assert(Operand.getValueType().bitsLT(VT) && in getNode() 3972 assert(Operand.getValueType().bitsLT(VT) && in getNode() 3988 assert(Operand.getValueType().bitsLT(VT) && in getNode() 4004 assert(Operand.getValueType().bitsLT(VT) && in getNode() 4039 .bitsLT(VT.getScalarType())) in getNode() 4274 if (LegalSVT.bitsLT(SVT)) in FoldConstantArithmetic() 4361 if (LegalSVT.bitsLT(VT.getScalarType())) in FoldConstantVectorArithmetic() 5644 if (VT.bitsLT(LargestVT)) { in getMemsetStores() 6261 assert(MemVT.getScalarType().bitsLT(VT.getScalarType()) && in getLoad() [all …]
|
D | LegalizeTypesGeneric.cpp | 228 assert(OldEltVT.bitsLT(OldVT) && "Result type smaller then element type!"); in ExpandRes_EXTRACT_VECTOR_ELT()
|
D | LegalizeDAG.cpp | 1370 if (EltVT.bitsLT(Node->getOperand(i).getValueType().getScalarType())) { in ExpandVectorBuildThroughStack() 2354 } else if (DestVT.bitsLT(MVT::f64)) { in ExpandLegalINT_TO_FP() 3116 if (NewEltVT.bitsLT(EltVT)) { in ExpandNode() 4580 assert(NewEltVT.bitsLT(EltVT) && "not handled"); in PromoteNode() 4613 assert(NewEltVT.bitsLT(EltVT) && "not handled"); in PromoteNode() 4659 assert(NewEltVT.bitsLT(EltVT) && "not handled"); in PromoteNode()
|
D | FastISel.cpp | 517 if (IdxVT.bitsLT(PtrVT)) { in getRegForGEPIndex() 1862 if (DstVT.bitsLT(SrcVT)) in selectOperator()
|
/external/swiftshader/third_party/llvm-7.0/llvm/lib/Target/AMDGPU/ |
D | R600ISelLowering.cpp | 1326 if (MemVT.bitsLT(MVT::i32)) in LowerSTORE() 1445 ExtType != ISD::NON_EXTLOAD && MemVT.bitsLT(MVT::i32)) { in LowerLOAD() 1675 if (VT.bitsLT(MVT::i32)) in allowsMisalignedMemoryAccesses()
|
/external/swiftshader/third_party/LLVM/lib/Transforms/Scalar/ |
D | CodeGenPrepare.cpp | 376 if (SrcVT.bitsLT(DstVT)) return false; in OptimizeNoopCopyExpression()
|
/external/llvm/lib/Target/AMDGPU/ |
D | R600ISelLowering.cpp | 1426 if (MemVT.bitsLT(MVT::i32)) in LowerSTORE() 1572 ExtType != ISD::NON_EXTLOAD && MemVT.bitsLT(MVT::i32)) { in LowerLOAD() 1827 if (VT.bitsLT(MVT::i32)) in allowsMisalignedMemoryAccesses()
|
/external/swiftshader/third_party/LLVM/include/llvm/Target/ |
D | TargetLowering.h | 1304 return VT.bitsLT(MinVT) ? MinVT : VT; in getTypeForExtArgOrReturn()
|