Lines Matching refs:hasSSE1
108 X86ScalarSSEf32 = Subtarget.hasSSE1(); in X86TargetLowering()
691 if (!Subtarget.useSoftFloat() && Subtarget.is64Bit() && Subtarget.hasSSE1()) { in X86TargetLowering()
847 if (!Subtarget.useSoftFloat() && Subtarget.hasSSE1()) { in X86TargetLowering()
2250 if (Subtarget.hasSSE1()) in getByValTypeAlignment()
2283 if (Subtarget.hasSSE1() && (Subtarget.is64Bit() || Subtarget.hasX87()) && in getOptimalMemOpType()
2698 if (!Subtarget.hasSSE1() && X86::FR32XRegClass.contains(VA.getLocReg())) { in LowerReturn()
3028 if (!Subtarget.hasSSE1() && X86::FR32XRegClass.contains(VA.getLocReg())) { in LowerCallResult()
3348 if (isSoftFloat || NoImplicitFloatOps || !Subtarget.hasSSE1()) in get64BitArgumentXMMs()
3435 assert(!(NumXMMRegs && !Subtarget.hasSSE1()) && in createVarArgAreaAndStoreRegisters()
4172 assert((Subtarget.hasSSE1() || !NumXMMRegs) in LowerCall()
5375 if (!Subtarget.hasSSE1() || VT.getSizeInBits() < 128) in hasAndNot()
22183 if ((VT == MVT::f32 && Subtarget.hasSSE1()) || in getSqrtEstimate()
22184 (VT == MVT::v4f32 && Subtarget.hasSSE1() && Reciprocal) || in getSqrtEstimate()
22213 if ((VT == MVT::f32 && Subtarget.hasSSE1()) || in getRecipEstimate()
22214 (VT == MVT::v4f32 && Subtarget.hasSSE1()) || in getRecipEstimate()
23972 assert(Subtarget.hasSSE1() && "Expected SSE"); in LowerStore()
24344 Subtarget.hasSSE1()); in LowerVAARG()
28406 (Subtarget.hasSSE1() || Subtarget.hasX87())) in shouldExpandAtomicStoreInIR()
28425 (Subtarget.hasSSE1() || Subtarget.hasX87())) in shouldExpandAtomicLoadInIR()
29219 if (Subtarget.hasSSE1()) { in LowerATOMIC_STORE()
30547 if (Subtarget.hasSSE1()) { in ReplaceNodeResults()
30716 assert(Subtarget.hasSSE1() && "Expected SSE"); in ReplaceNodeResults()
34807 if ((MaskVT == MVT::v4f32 && Subtarget.hasSSE1()) || in matchBinaryShuffle()
34969 ((MaskVT.is128BitVector() && Subtarget.hasSSE1()) || in matchBinaryPermuteShuffle()
38766 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2()) { in combineBitcastvxi1()
38970 if (Subtarget.hasSSE1() && !isa<ConstantFPSDNode>(V)) { in createMMXBuildVector()
38994 if (Subtarget.hasSSE1()) { in createMMXBuildVector()
39359 if (!((Subtarget.hasSSE1() && VT == MVT::f32) || in combineBitcast()
40692 (Subtarget.hasSSE1() && VT.getScalarType() == MVT::f32))) { in combineSelect()
41710 (FalseOp.getValueType() == MVT::f32 && !Subtarget.hasSSE1())) || in combineCMov()
43287 !((Subtarget.hasSSE1() && N00Type == MVT::f32) || in convertIntLogicToFPLogic()
43555 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) { in combineAnd()
43936 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32) { in combineOr()
46114 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && in combineXor()
46189 if (!((VT == MVT::f32 && Subtarget.hasSSE1()) || in combineFAndFNotToFAndn()
46191 (VT == MVT::v4f32 && Subtarget.hasSSE1() && !Subtarget.hasSSE2()))) in combineFAndFNotToFAndn()
46293 if (!((Subtarget.hasSSE1() && VT == MVT::f32) || in combineFMinNumFMaxNum()
47348 if (Subtarget.hasSSE1() && !Subtarget.hasSSE2() && VT == MVT::v4i32 && in combineSetCC()
50303 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) || in getSingleConstraintMatchWeight()
50332 if (((type->getPrimitiveSizeInBits() == 128) && Subtarget.hasSSE1()) || in getSingleConstraintMatchWeight()
50408 if (Subtarget.hasSSE1()) in LowerXConstraint()
50714 if (!Subtarget.hasSSE1()) break; in getRegForInlineAsmConstraint()
50785 if (!Subtarget.hasSSE1()) break; in getRegForInlineAsmConstraint()