/external/swiftshader/third_party/llvm-10.0/configs/common/lib/Target/X86/ |
D | X86GenFastISel.inc | 346 if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) { 359 if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) { 389 if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) { 402 if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) { 412 if ((Subtarget->hasMMX()) && (Subtarget->hasSSE2())) { 434 if ((Subtarget->hasMMX()) && (Subtarget->hasSSE2())) { 856 if ((!Subtarget->hasSSE2())) { 891 if ((!Subtarget->hasSSE2())) { 917 if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) { 958 if ((Subtarget->hasSSE2() && !Subtarget->hasAVX())) { [all …]
|
D | X86GenCallingConv.inc | 436 if (static_cast<const X86Subtarget&>(State.getMachineFunction().getSubtarget()).hasSSE2()) { 601 if (static_cast<const X86Subtarget&>(State.getMachineFunction().getSubtarget()).hasSSE2()) { 1467 if (static_cast<const X86Subtarget&>(State.getMachineFunction().getSubtarget()).hasSSE2()) { 2895 if (static_cast<const X86Subtarget&>(State.getMachineFunction().getSubtarget()).hasSSE2()) { 2932 if (static_cast<const X86Subtarget&>(State.getMachineFunction().getSubtarget()).hasSSE2()) { 2944 if (static_cast<const X86Subtarget&>(State.getMachineFunction().getSubtarget()).hasSSE2()) {
|
/external/llvm/lib/Target/X86/ |
D | X86Subtarget.h | 383 bool hasSSE2() const { return X86SSELevel >= SSE2; } in hasSSE2() function 460 bool hasMFence() const { return hasSSE2() || is64Bit(); } in hasMFence()
|
D | X86TargetTransformInfo.cpp | 280 ST->hasSSE2()) { in getArithmeticInstrCost() 295 if ((VT == MVT::v8i16 && ST->hasSSE2()) || in getArithmeticInstrCost() 308 if (VT == MVT::v4i32 && ST->hasSSE2()) in getArithmeticInstrCost() 364 if (ST->hasSSE2()) { in getArithmeticInstrCost() 407 if (ISD == ISD::MUL && LT.second == MVT::v4i32 && ST->hasSSE2() && in getArithmeticInstrCost() 821 if (ST->hasSSE2() && !ST->hasAVX()) { in getCastInstrCost() 867 if (ST->hasSSE2()) { in getCastInstrCost() 939 if (ST->hasSSE2()) in getCmpSelInstrCost() 1028 if (ST->hasSSE2()) in getIntrinsicInstrCost()
|
D | X86CallingConv.td | 86 CCIfInReg<CCIfSubtarget<"hasSSE2()", 98 CCIfType<[f32], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>, 99 CCIfType<[f64], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>, 317 CCIfSubtarget<"hasSSE2()", 575 CCIfSubtarget<"hasSSE2()", 732 CCIfSubtarget<"hasSSE2()",
|
D | X86FastISel.cpp | 65 X86ScalarSSEf64 = Subtarget->hasSSE2(); in X86FastISel() 504 bool HasSSE2 = Subtarget->hasSSE2(); in X86FastEmitStore() 1331 bool X86ScalarSSEf64 = Subtarget->hasSSE2(); in X86ChooseCmpOpcode() 2066 (Subtarget->hasSSE2() && RetVT == MVT::f64))) in X86FastEmitSSESelect() 2836 if (!Subtarget->hasSSE2()) in fastLowerIntrinsicCall() 3452 if (!Subtarget->hasSSE2()) in fastSelectInstruction()
|
D | X86ISelLowering.cpp | 76 X86ScalarSSEf64 = Subtarget.hasSSE2(); in X86TargetLowering() 732 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) { in X86TargetLowering() 1823 if (Subtarget.hasSSE2()) in getOptimalMemOpType() 1829 !Subtarget.is64Bit() && Subtarget.hasSSE2()) { in getOptimalMemOpType() 2115 (Subtarget.is64Bit() && !Subtarget.hasSSE2())) in LowerReturn() 2141 if (!Subtarget.hasSSE2()) in LowerReturn() 2817 else if (Subtarget.hasSSE2()) in LowerFormalArguments() 4401 if (!Subtarget.hasSSE2() && VT.is128BitVector()) { in getZeroVector() 6565 if (Subtarget.hasSSE2() && ISD::isBuildVectorAllOnes(Op.getNode())) { in materializeVectorConstant() 15427 || (Subtarget.hasSSE2() && (VET == MVT::i8)); in LowerVSETCC() [all …]
|
D | X86InstrInfo.td | 780 def HasSSE2 : Predicate<"Subtarget->hasSSE2()">; 781 def UseSSE2 : Predicate<"Subtarget->hasSSE2() && !Subtarget->hasAVX()">; 854 def FPStackf64 : Predicate<"!Subtarget->hasSSE2()">;
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/ |
D | X86TargetTransformInfo.cpp | 343 ST->hasSSE2() && !ST->hasXOP()) { in getArithmeticInstrCost() 434 ST->hasSSE2()) { in getArithmeticInstrCost() 478 if (ST->hasSSE2() && in getArithmeticInstrCost() 645 if (ST->hasSSE2() && in getArithmeticInstrCost() 663 if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) || in getArithmeticInstrCost() 844 if (ST->hasSSE2()) in getArithmeticInstrCost() 1254 if (ST->hasSSE2()) in getShuffleCost() 1639 if (ST->hasSSE2() && !ST->hasAVX()) { in getCastInstrCost() 1694 if (ST->hasSSE2()) { in getCastInstrCost() 1739 (ST->hasSSE2() && MTy.getScalarSizeInBits() < 32)) { in getCmpSelInstrCost() [all …]
|
D | X86Subtarget.h | 585 bool hasSSE2() const { return X86SSELevel >= SSE2; } in hasSSE2() function 750 bool hasMFence() const { return hasSSE2() || is64Bit(); } in hasMFence()
|
D | X86CallingConv.td | 267 CCIfInReg<CCIfSubtarget<"hasSSE2()", 279 CCIfType<[f32], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>, 280 CCIfType<[f64], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>, 528 CCIfSubtarget<"hasSSE2()", 798 CCIfSubtarget<"hasSSE2()", 941 CCIfSubtarget<"hasSSE2()",
|
D | X86ISelLowering.cpp | 113 X86ScalarSSEf64 = Subtarget.hasSSE2(); in X86TargetLowering() 861 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) { in X86TargetLowering() 2276 if (Subtarget.hasSSE2() && (Subtarget.getPreferVectorWidth() >= 128)) in getOptimalMemOpType() 2284 !Subtarget.is64Bit() && Subtarget.hasSSE2()) { in getOptimalMemOpType() 2701 } else if (!Subtarget.hasSSE2() && in LowerReturn() 2733 if (!Subtarget.hasSSE2()) in LowerReturn() 3026 } else if (!Subtarget.hasSSE2() && in LowerCallResult() 3630 else if (Subtarget.hasSSE2()) in LowerFormalArguments() 5241 return Subtarget.hasSSE2(); in hasAndNot() 5604 if (!Subtarget.hasSSE2() && VT.is128BitVector()) { in getZeroVector() [all …]
|
D | X86LegalizerInfo.cpp | 314 if (!Subtarget.hasSSE2()) in setLegalizerInfoSSE2()
|
D | X86FastISel.cpp | 65 X86ScalarSSEf64 = Subtarget->hasSSE2(); in X86FastISel() 487 bool HasSSE2 = Subtarget->hasSSE2(); in X86FastEmitStore() 1343 bool X86ScalarSSEf64 = Subtarget->hasSSE2(); in X86ChooseCmpOpcode() 2157 (Subtarget->hasSSE2() && RetVT == MVT::f64))) in X86FastEmitSSESelect() 3000 if (!Subtarget->hasSSE2()) in fastLowerIntrinsicCall() 3657 if (!Subtarget->hasSSE2()) in fastSelectInstruction()
|
D | X86InstrInfo.td | 851 def HasSSE2 : Predicate<"Subtarget->hasSSE2()">; 852 def UseSSE2 : Predicate<"Subtarget->hasSSE2() && !Subtarget->hasAVX()">; 939 def FPStackf64 : Predicate<"!Subtarget->hasSSE2()">;
|
D | X86InstrInfo.cpp | 1789 assert(Subtarget.hasSSE2() && "Commuting MOVHLP/UNPCKHPD requires SSE2!"); in commuteInstructionImpl() 2052 if (Subtarget.hasSSE2()) in findCommutedOpIndices()
|
/external/llvm-project/llvm/lib/Target/X86/ |
D | X86Subtarget.h | 621 bool hasSSE2() const { return X86SSELevel >= SSE2; } in hasSSE2() function 815 bool hasMFence() const { return hasSSE2() || is64Bit(); } in hasMFence()
|
D | X86PartialReduction.cpp | 67 if (!ST->hasSSE2()) in tryMAddReplacement() 171 if (!ST->hasSSE2()) in trySADReplacement()
|
D | X86TargetTransformInfo.cpp | 379 ST->hasSSE2() && !ST->hasXOP()) { in getArithmeticInstrCost() 478 ST->hasSSE2()) { in getArithmeticInstrCost() 543 if (ST->hasSSE2() && in getArithmeticInstrCost() 712 if (ST->hasSSE2() && in getArithmeticInstrCost() 730 if (((VT == MVT::v8i16 || VT == MVT::v4i32) && ST->hasSSE2()) || in getArithmeticInstrCost() 917 if (ST->hasSSE2()) in getArithmeticInstrCost() 1055 if (ST->hasSSE2()) in getShuffleCost() 1379 if (ST->hasSSE2()) in getShuffleCost() 2010 if (ST->hasSSE2() && !ST->hasAVX()) { in getCastInstrCost() 2076 if (ST->hasSSE2()) { in getCastInstrCost() [all …]
|
D | X86CallingConv.td | 267 CCIfInReg<CCIfSubtarget<"hasSSE2()", 279 CCIfType<[f32], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>, 280 CCIfType<[f64], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>, 534 CCIfSubtarget<"hasSSE2()", 805 CCIfSubtarget<"hasSSE2()", 948 CCIfSubtarget<"hasSSE2()",
|
D | X86LegalizerInfo.cpp | 309 if (!Subtarget.hasSSE2()) in setLegalizerInfoSSE2()
|
D | X86ISelLowering.cpp | 107 X86ScalarSSEf64 = Subtarget.hasSSE2(); in X86TargetLowering() 870 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE2()) { in X86TargetLowering() 2279 if (Subtarget.hasSSE2() && (Subtarget.getPreferVectorWidth() >= 128)) in getOptimalMemOpType() 2287 Op.size() >= 8 && !Subtarget.is64Bit() && Subtarget.hasSSE2()) { in getOptimalMemOpType() 2701 } else if (!Subtarget.hasSSE2() && in LowerReturn() 2733 if (!Subtarget.hasSSE2()) in LowerReturn() 3034 } else if (!Subtarget.hasSSE2() && in LowerCallResult() 3528 else if (Subtarget.hasSSE2()) in forwardMustTailParameters() 5381 return Subtarget.hasSSE2(); in hasAndNot() 5795 if (!Subtarget.hasSSE2() && VT.is128BitVector()) { in getZeroVector() [all …]
|
D | X86FastISel.cpp | 64 X86ScalarSSEf64 = Subtarget->hasSSE2(); in X86FastISel() 486 bool HasSSE2 = Subtarget->hasSSE2(); in X86FastEmitStore() 1359 bool X86ScalarSSEf64 = Subtarget->hasSSE2(); in X86ChooseCmpOpcode() 2173 (Subtarget->hasSSE2() && RetVT == MVT::f64))) in X86FastEmitSSESelect() 3019 if (!Subtarget->hasSSE2()) in fastLowerIntrinsicCall() 3674 if (!Subtarget->hasSSE2()) in fastSelectInstruction()
|
D | X86InstrInfo.td | 877 def HasSSE2 : Predicate<"Subtarget->hasSSE2()">; 878 def UseSSE2 : Predicate<"Subtarget->hasSSE2() && !Subtarget->hasAVX()">; 966 def FPStackf64 : Predicate<"!Subtarget->hasSSE2()">;
|
D | X86InstrInfo.cpp | 2272 assert(Subtarget.hasSSE2() && "Commuting MOVHLP/UNPCKHPD requires SSE2!"); in commuteInstructionImpl() 2535 if (Subtarget.hasSSE2()) in findCommutedOpIndices()
|