/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/configs/common/lib/Target/AArch64/ |
D | AArch64GenFastISel.inc | 121 unsigned fastEmit_AArch64ISD_CALL_MVT_i64_r(MVT RetVT, unsigned Op0, bool Op0IsKill) { 124 return fastEmitInst_r(AArch64::BLR, &AArch64::GPR64RegClass, Op0, Op0IsKill); 127 unsigned fastEmit_AArch64ISD_CALL_r(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill) { 129 case MVT::i64: return fastEmit_AArch64ISD_CALL_MVT_i64_r(RetVT, Op0, Op0IsKill); 136 unsigned fastEmit_AArch64ISD_CMEQz_MVT_v8i8_r(MVT RetVT, unsigned Op0, bool Op0IsKill) { 140 return fastEmitInst_r(AArch64::CMEQv8i8rz, &AArch64::FPR64RegClass, Op0, Op0IsKill); 145 unsigned fastEmit_AArch64ISD_CMEQz_MVT_v16i8_r(MVT RetVT, unsigned Op0, bool Op0IsKill) { 149 return fastEmitInst_r(AArch64::CMEQv16i8rz, &AArch64::FPR128RegClass, Op0, Op0IsKill); 154 unsigned fastEmit_AArch64ISD_CMEQz_MVT_v4i16_r(MVT RetVT, unsigned Op0, bool Op0IsKill) { 158 return fastEmitInst_r(AArch64::CMEQv4i16rz, &AArch64::FPR64RegClass, Op0, Op0IsKill); [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/configs/common/lib/Target/X86/ |
D | X86GenFastISel.inc | 42 unsigned fastEmit_ISD_ABS_MVT_v16i8_r(MVT RetVT, unsigned Op0, bool Op0IsKill) { 46 return fastEmitInst_r(X86::VPABSBZ128rr, &X86::VR128XRegClass, Op0, Op0IsKill); 49 return fastEmitInst_r(X86::PABSBrr, &X86::VR128RegClass, Op0, Op0IsKill); 52 return fastEmitInst_r(X86::VPABSBrr, &X86::VR128RegClass, Op0, Op0IsKill); 57 unsigned fastEmit_ISD_ABS_MVT_v32i8_r(MVT RetVT, unsigned Op0, bool Op0IsKill) { 61 return fastEmitInst_r(X86::VPABSBZ256rr, &X86::VR256XRegClass, Op0, Op0IsKill); 64 return fastEmitInst_r(X86::VPABSBYrr, &X86::VR256RegClass, Op0, Op0IsKill); 69 unsigned fastEmit_ISD_ABS_MVT_v64i8_r(MVT RetVT, unsigned Op0, bool Op0IsKill) { 73 return fastEmitInst_r(X86::VPABSBZrr, &X86::VR512RegClass, Op0, Op0IsKill); 78 unsigned fastEmit_ISD_ABS_MVT_v8i16_r(MVT RetVT, unsigned Op0, bool Op0IsKill) { [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/configs/common/lib/Target/PowerPC/ |
D | PPCGenFastISel.inc | 91 unsigned fastEmit_ISD_BITCAST_MVT_i64_r(MVT RetVT, unsigned Op0, bool Op0IsKill) { 95 return fastEmitInst_r(PPC::MTVSRD, &PPC::VSFRCRegClass, Op0, Op0IsKill); 100 unsigned fastEmit_ISD_BITCAST_MVT_f64_r(MVT RetVT, unsigned Op0, bool Op0IsKill) { 104 return fastEmitInst_r(PPC::MFVSRD, &PPC::G8RCRegClass, Op0, Op0IsKill); 109 unsigned fastEmit_ISD_BITCAST_r(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill) { 111 case MVT::i64: return fastEmit_ISD_BITCAST_MVT_i64_r(RetVT, Op0, Op0IsKill); 112 case MVT::f64: return fastEmit_ISD_BITCAST_MVT_f64_r(RetVT, Op0, Op0IsKill); 119 unsigned fastEmit_ISD_BSWAP_MVT_v4i32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) { 123 return fastEmitInst_r(PPC::XXBRW, &PPC::VSRCRegClass, Op0, Op0IsKill); 128 unsigned fastEmit_ISD_BSWAP_MVT_v2i64_r(MVT RetVT, unsigned Op0, bool Op0IsKill) { [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/configs/common/lib/Target/ARM/ |
D | ARMGenFastISel.inc | 80 unsigned fastEmit_ARMISD_CALL_MVT_i32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) { 84 return fastEmitInst_r(ARM::BLX, &ARM::GPRRegClass, Op0, Op0IsKill); 89 unsigned fastEmit_ARMISD_CALL_r(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill) { 91 case MVT::i32: return fastEmit_ARMISD_CALL_MVT_i32_r(RetVT, Op0, Op0IsKill); 98 unsigned fastEmit_ARMISD_CALL_NOLINK_MVT_i32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) { 102 return fastEmitInst_r(ARM::tBX_CALL, &ARM::tGPRRegClass, Op0, Op0IsKill); 105 return fastEmitInst_r(ARM::BMOVPCRX_CALL, &ARM::tGPRRegClass, Op0, Op0IsKill); 108 return fastEmitInst_r(ARM::BX_CALL, &ARM::tGPRRegClass, Op0, Op0IsKill); 113 unsigned fastEmit_ARMISD_CALL_NOLINK_r(MVT VT, MVT RetVT, unsigned Op0, bool Op0IsKill) { 115 case MVT::i32: return fastEmit_ARMISD_CALL_NOLINK_MVT_i32_r(RetVT, Op0, Op0IsKill); [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/configs/common/lib/Target/Mips/ |
D | MipsGenFastISel.inc | 55 unsigned fastEmit_ISD_BITCAST_MVT_i32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) { 59 return fastEmitInst_r(Mips::MTC1_MMR6, &Mips::FGR32RegClass, Op0, Op0IsKill); 62 return fastEmitInst_r(Mips::MTC1_MM, &Mips::FGR32RegClass, Op0, Op0IsKill); 65 return fastEmitInst_r(Mips::MTC1, &Mips::FGR32RegClass, Op0, Op0IsKill); 70 unsigned fastEmit_ISD_BITCAST_MVT_i64_r(MVT RetVT, unsigned Op0, bool Op0IsKill) { 74 return fastEmitInst_r(Mips::DMTC1, &Mips::FGR64RegClass, Op0, Op0IsKill); 79 unsigned fastEmit_ISD_BITCAST_MVT_f32_r(MVT RetVT, unsigned Op0, bool Op0IsKill) { 83 return fastEmitInst_r(Mips::MFC1_MMR6, &Mips::GPR32RegClass, Op0, Op0IsKill); 86 return fastEmitInst_r(Mips::MFC1_MM, &Mips::GPR32RegClass, Op0, Op0IsKill); 89 return fastEmitInst_r(Mips::MFC1, &Mips::GPR32RegClass, Op0, Op0IsKill); [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Analysis/ |
D | InstructionSimplify.cpp | 244 if (BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS)) in ExpandBinOp() local 245 if (Op0->getOpcode() == OpcodeToExpand) { in ExpandBinOp() 247 Value *A = Op0->getOperand(0), *B = Op0->getOperand(1), *C = RHS; in ExpandBinOp() 304 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS); in SimplifyAssociativeBinOp() local 308 if (Op0 && Op0->getOpcode() == Opcode) { in SimplifyAssociativeBinOp() 309 Value *A = Op0->getOperand(0); in SimplifyAssociativeBinOp() 310 Value *B = Op0->getOperand(1); in SimplifyAssociativeBinOp() 350 if (Op0 && Op0->getOpcode() == Opcode) { in SimplifyAssociativeBinOp() 351 Value *A = Op0->getOperand(0); in SimplifyAssociativeBinOp() 352 Value *B = Op0->getOperand(1); in SimplifyAssociativeBinOp() [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Transforms/InstCombine/ |
D | InstCombineMulDivRem.cpp | 186 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); in visitMul() local 188 BinaryOperator *BO = BinaryOperator::CreateNeg(Op0, I.getName()); in visitMul() 241 if (Op0->hasOneUse()) { in visitMul() 244 if (match(Op0, m_Sub(m_Value(Y), m_Value(X)))) in visitMul() 246 else if (match(Op0, m_Add(m_Value(Y), m_ConstantInt(C1)))) in visitMul() 268 if (match(Op0, m_OneUse(m_Add(m_Value(X), m_Constant(C1))))) { in visitMul() 280 if (match(Op0, m_Neg(m_Value(X))) && match(Op1, m_Constant(Op1C))) in visitMul() 284 if (match(Op0, m_Neg(m_Value(X))) && match(Op1, m_Neg(m_Value(Y)))) { in visitMul() 287 cast<OverflowingBinaryOperator>(Op0)->hasNoSignedWrap() && in visitMul() 302 BinaryOperator *Div = dyn_cast<BinaryOperator>(Op0); in visitMul() [all …]
|
D | InstCombineAddSub.cpp | 828 Value *Op0 = Add.getOperand(0), *Op1 = Add.getOperand(1); in foldNoWrapAdd() local 839 match(Op0, m_OneUse(m_ZExt(m_NUWAdd(m_Value(X), m_APInt(C2))))) && in foldNoWrapAdd() 849 if (match(Op0, m_OneUse(m_SExt(m_NSWAdd(m_Value(X), m_Constant(NarrowC)))))) { in foldNoWrapAdd() 856 if (match(Op0, m_OneUse(m_ZExt(m_NUWAdd(m_Value(X), m_Constant(NarrowC)))))) { in foldNoWrapAdd() 867 Value *Op0 = Add.getOperand(0), *Op1 = Add.getOperand(1); in foldAddWithConstant() local 879 if (match(Op0, m_Sub(m_Constant(Op00C), m_Value(X)))) in foldAddWithConstant() 885 if (match(Op0, m_OneUse(m_Sub(m_Value(X), m_Value(Y)))) && in foldAddWithConstant() 890 if (match(Op0, m_ZExt(m_Value(X))) && in foldAddWithConstant() 894 if (match(Op0, m_SExt(m_Value(X))) && in foldAddWithConstant() 899 if (match(Op0, m_Not(m_Value(X)))) in foldAddWithConstant() [all …]
|
D | InstCombineShifts.cpp | 364 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); in commonShiftTransforms() local 365 assert(Op0->getType() == Op1->getType()); in commonShiftTransforms() 371 return BinaryOperator::Create(I.getOpcode(), Op0, NewExt); in commonShiftTransforms() 379 if (isa<Constant>(Op0)) in commonShiftTransforms() 385 if (Instruction *Res = FoldShiftByConstant(Op0, CUI, I)) in commonShiftTransforms() 396 if (match(Op0, m_Constant()) && match(Op1, m_Add(m_Value(A), m_Constant(C)))) in commonShiftTransforms() 400 I.getOpcode(), Builder.CreateBinOp(I.getOpcode(), Op0, C), A); in commonShiftTransforms() 682 Instruction *InstCombiner::FoldShiftByConstant(Value *Op0, Constant *Op1, in FoldShiftByConstant() argument 693 canEvaluateShifted(Op0, Op1C->getZExtValue(), isLeftShift, *this, &I)) { in FoldShiftByConstant() 697 << *Op0 << "\n SH: " << I << "\n"); in FoldShiftByConstant() [all …]
|
D | InstCombineAndOrXor.cpp | 1163 Value *Op0 = LHS->getOperand(0), *Op1 = LHS->getOperand(1); in foldAndOfICmps() local 1166 return getNewICmpValue(Code, IsSigned, Op0, Op1, Builder); in foldAndOfICmps() 1421 Value *Op0 = BO.getOperand(0), *Op1 = BO.getOperand(1), *X; in reassociateFCmps() local 1424 std::swap(Op0, Op1); in reassociateFCmps() 1430 if (!match(Op0, m_FCmp(Pred, m_Value(X), m_AnyZeroFP())) || Pred != NanPred || in reassociateFCmps() 1449 NewFCmpInst->copyIRFlags(Op0); in reassociateFCmps() 1539 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); in foldCastedBitwiseLogic() local 1540 CastInst *Cast0 = dyn_cast<CastInst>(Op0); in foldCastedBitwiseLogic() 1604 Value *Op0 = I.getOperand(0); in foldAndToXor() local 1619 if (Op0->hasOneUse() || Op1->hasOneUse()) in foldAndToXor() [all …]
|
D | InstCombineCompares.cpp | 1446 Value *Op0 = Cmp.getOperand(0), *Op1 = Cmp.getOperand(1); in foldICmpWithConstant() local 1450 match(Op0, m_Add(m_Add(m_Value(A), m_Value(B)), m_ConstantInt(CI2)))) in foldICmpWithConstant() 3169 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); in foldICmpInstWithConstantNotInt() local 3171 Instruction *LHSI = dyn_cast<Instruction>(Op0); in foldICmpInstWithConstantNotInt() 3704 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1); in foldICmpBinOp() local 3707 BinaryOperator *BO0 = dyn_cast<BinaryOperator>(Op0); in foldICmpBinOp() 3717 if (match(Op0, m_OneUse(m_c_Add(m_Specific(Op1), m_Value(X)))) && in foldICmpBinOp() 3721 if (match(Op1, m_OneUse(m_c_Add(m_Specific(Op0), m_Value(X)))) && in foldICmpBinOp() 3723 return new ICmpInst(Pred, X, Builder.CreateNot(Op0)); in foldICmpBinOp() 3757 if ((C == Op0 || D == Op0) && NoOp1WrapProblem) in foldICmpBinOp() [all …]
|
D | InstructionCombining.cpp | 340 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(I.getOperand(0)); in SimplifyAssociativeOrCommutative() local 345 if (Op0 && Op0->getOpcode() == Opcode) { in SimplifyAssociativeOrCommutative() 346 Value *A = Op0->getOperand(0); in SimplifyAssociativeOrCommutative() 347 Value *B = Op0->getOperand(1); in SimplifyAssociativeOrCommutative() 355 bool IsNUW = hasNoUnsignedWrap(I) && hasNoUnsignedWrap(*Op0); in SimplifyAssociativeOrCommutative() 356 bool IsNSW = maintainNoSignedWrap(I, B, C) && hasNoSignedWrap(*Op0); in SimplifyAssociativeOrCommutative() 406 if (Op0 && Op0->getOpcode() == Opcode) { in SimplifyAssociativeOrCommutative() 407 Value *A = Op0->getOperand(0); in SimplifyAssociativeOrCommutative() 408 Value *B = Op0->getOperand(1); in SimplifyAssociativeOrCommutative() 449 if (Op0 && Op1 && in SimplifyAssociativeOrCommutative() [all …]
|
D | InstCombineCalls.cpp | 732 static Value *simplifyX86extrq(IntrinsicInst &II, Value *Op0, in simplifyX86extrq() argument 743 Constant *C0 = dyn_cast<Constant>(Op0); in simplifyX86extrq() 794 Builder.CreateBitCast(Op0, ShufTy), in simplifyX86extrq() 810 Value *Args[] = {Op0, CILength, CIIndex}; in simplifyX86extrq() 826 static Value *simplifyX86insertq(IntrinsicInst &II, Value *Op0, Value *Op1, in simplifyX86insertq() argument 874 Value *SV = Builder.CreateShuffleVector(Builder.CreateBitCast(Op0, ShufTy), in simplifyX86insertq() 881 Constant *C0 = dyn_cast<Constant>(Op0); in simplifyX86insertq() 911 Value *Args[] = {Op0, Op1, CILength, CIIndex}; in simplifyX86insertq() 1193 Value *Op0 = II.getArgOperand(0); in foldCttzCtlz() local 1197 if (match(Op0, m_BitReverse(m_Value(X)))) { in foldCttzCtlz() [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/SystemZ/ |
D | SystemZTDC.cpp | 123 Value *Op0 = I.getOperand(0); in convertFCmp() local 130 auto &Sem = Op0->getType()->getFltSemantics(); in convertFCmp() 216 if (CallInst *CI = dyn_cast<CallInst>(Op0)) { in convertFCmp() 222 Op0 = CI->getArgOperand(0); in convertFCmp() 229 converted(&I, Op0, Mask, Worthy); in convertFCmp() 233 Value *Op0 = I.getOperand(0); in convertICmp() local 239 if (auto *Cast = dyn_cast<BitCastInst>(Op0)) { in convertICmp() 259 } else if (auto *CI = dyn_cast<CallInst>(Op0)) { in convertICmp() 288 Value *Op0, *Op1; in convertLogicOp() local 291 std::tie(Op0, Mask0, Worthy0) = ConvertedInsts[cast<Instruction>(I.getOperand(0))]; in convertLogicOp() [all …]
|
D | SystemZISelLowering.cpp | 37 : Op0(Op0In), Op1(Op1In), Chain(ChainIn), in Comparison() 41 SDValue Op0, Op1; member 2016 if (!C.Op0.hasOneUse() || in adjustSubwordCmp() 2017 C.Op0.getOpcode() != ISD::LOAD || in adjustSubwordCmp() 2022 auto *Load = cast<LoadSDNode>(C.Op0); in adjustSubwordCmp() 2067 if (C.Op0.getValueType() != MVT::i32 || in adjustSubwordCmp() 2069 C.Op0 = DAG.getExtLoad(ExtType, SDLoc(Load), MVT::i32, Load->getChain(), in adjustSubwordCmp() 2074 DAG.ReplaceAllUsesOfValueWith(SDValue(Load, 1), C.Op0.getValue(1)); in adjustSubwordCmp() 2109 if (C.Op0.getValueType() == MVT::f128) in shouldSwapCmpOperands() 2131 if (isNaturalMemoryOperand(C.Op0, C.ICmpType) && C.Op0.hasOneUse()) { in shouldSwapCmpOperands() [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Transforms/Scalar/ |
D | Scalarizer.cpp | 108 Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1, in operator ()() 110 return Builder.CreateFCmp(FCI.getPredicate(), Op0, Op1, Name); in operator ()() 121 Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1, in operator ()() 123 return Builder.CreateICmp(ICI.getPredicate(), Op0, Op1, Name); in operator ()() 146 Value *operator()(IRBuilder<> &Builder, Value *Op0, Value *Op1, in operator ()() 148 return Builder.CreateBinOp(BO.getOpcode(), Op0, Op1, Name); in operator ()() 488 Scatterer Op0 = scatter(&I, I.getOperand(0)); in splitBinary() local 490 assert(Op0.size() == NumElems && "Mismatched binary operation"); in splitBinary() 495 Res[Elem] = Split(Builder, Op0[Elem], Op1[Elem], in splitBinary() 587 Scatterer Op0 = scatter(&SI, SI.getOperand(0)); in visitSelectInst() local [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AArch64/Utils/ |
D | AArch64BaseInfo.cpp | 135 uint32_t Op0 = 0, Op1 = 0, CRn = 0, CRm = 0, Op2 = 0; in parseGenericRegister() local 137 Ops[1].getAsInteger(10, Op0); in parseGenericRegister() 142 Bits = (Op0 << 14) | (Op1 << 11) | (CRn << 7) | (CRm << 3) | Op2; in parseGenericRegister() 149 uint32_t Op0 = (Bits >> 14) & 0x3; in genericRegisterString() local 155 return "S" + utostr(Op0) + "_" + utostr(Op1) + "_C" + utostr(CRn) + "_C" + in genericRegisterString()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/include/llvm/CodeGen/ |
D | FastISel.h | 358 virtual unsigned fastEmit_r(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, 363 virtual unsigned fastEmit_rr(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, 369 virtual unsigned fastEmit_ri(MVT VT, MVT RetVT, unsigned Opcode, unsigned Op0, 377 unsigned fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0, bool Op0IsKill, 398 const TargetRegisterClass *RC, unsigned Op0, 404 const TargetRegisterClass *RC, unsigned Op0, 410 const TargetRegisterClass *RC, unsigned Op0, 417 const TargetRegisterClass *RC, unsigned Op0, 423 const TargetRegisterClass *RC, unsigned Op0, 435 const TargetRegisterClass *RC, unsigned Op0, [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/SelectionDAG/ |
D | FastISel.cpp | 624 unsigned Op0 = getRegForValue(I->getOperand(0)); in selectBinaryOp() local 625 if (!Op0) // Unhandled operand. Halt "fast" selection and bail. in selectBinaryOp() 647 unsigned ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0, in selectBinaryOp() 664 ISDOpcode, Op0, Op0IsKill, Op1, Op1IsKill); in selectBinaryOp() 1544 unsigned Op0 = getRegForValue(I->getOperand(0)); in selectBitCast() local 1545 if (!Op0) // Unhandled operand. Halt "fast" selection and bail. in selectBitCast() 1558 TII.get(TargetOpcode::COPY), ResultReg).addReg(Op0); in selectBitCast() 1564 ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill); in selectBitCast() 1767 const Value *Op0 = EVI->getOperand(0); in selectExtractValue() local 1768 Type *AggTy = Op0->getType(); in selectExtractValue() [all …]
|
D | TargetLowering.cpp | 723 SDValue Op0 = Op.getOperand(0); in SimplifyMultipleUseDemandedBits() local 730 Op0.getScalarValueSizeInBits() == DemandedBits.getBitWidth() && in SimplifyMultipleUseDemandedBits() 731 getBooleanContents(Op0.getValueType()) == in SimplifyMultipleUseDemandedBits() 739 return Op0; in SimplifyMultipleUseDemandedBits() 1020 SDValue Op0 = Op.getOperand(0); in SimplifyDemandedBits() local 1026 if (SimplifyDemandedBits(Op0, DemandedBits, DemandedLHS, Known2, TLO, in SimplifyDemandedBits() 1042 Op0, DemandedBits, DemandedLHS, TLO.DAG, Depth + 1); in SimplifyDemandedBits() 1046 Op0 = DemandedOp0 ? DemandedOp0 : Op0; in SimplifyDemandedBits() 1048 SDValue NewOp = TLO.DAG.getVectorShuffle(VT, dl, Op0, Op1, ShuffleMask); in SimplifyDemandedBits() 1055 SDValue Op0 = Op.getOperand(0); in SimplifyDemandedBits() local [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Transforms/AggressiveInstCombine/ |
D | AggressiveInstCombine.cpp | 177 Value *Op0, *Op1; in matchAndOrChain() local 182 if (match(V, m_And(m_Value(Op0), m_One()))) { in matchAndOrChain() 184 return matchAndOrChain(Op0, MOps); in matchAndOrChain() 186 if (match(V, m_And(m_Value(Op0), m_Value(Op1)))) in matchAndOrChain() 187 return matchAndOrChain(Op0, MOps) && matchAndOrChain(Op1, MOps); in matchAndOrChain() 190 if (match(V, m_Or(m_Value(Op0), m_Value(Op1)))) in matchAndOrChain() 191 return matchAndOrChain(Op0, MOps) && matchAndOrChain(Op1, MOps); in matchAndOrChain() 284 Value *Op0 = I.getOperand(0); in tryToRecognizePopCount() local 288 if ((match(Op0, m_Mul(m_Value(MulOp0), m_SpecificInt(Mask01)))) && in tryToRecognizePopCount()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/GlobalISel/ |
D | MachineIRBuilder.cpp | 202 void MachineIRBuilder::validateBinaryOp(const LLT &Res, const LLT &Op0, in validateBinaryOp() argument 205 assert((Res == Op0 && Res == Op1) && "type mismatch"); in validateBinaryOp() 208 void MachineIRBuilder::validateShiftOp(const LLT &Res, const LLT &Op0, in validateShiftOp() argument 211 assert((Res == Op0) && "type mismatch"); in validateShiftOp() 215 const SrcOp &Op0, in buildPtrAdd() argument 218 Res.getLLTTy(*getMRI()) == Op0.getLLTTy(*getMRI()) && "type mismatch"); in buildPtrAdd() 221 return buildInstr(TargetOpcode::G_PTR_ADD, {Res}, {Op0, Op1}); in buildPtrAdd() 225 MachineIRBuilder::materializePtrAdd(Register &Res, Register Op0, in materializePtrAdd() argument 231 Res = Op0; in materializePtrAdd() 235 Res = getMRI()->createGenericVirtualRegister(getMRI()->getType(Op0)); in materializePtrAdd() [all …]
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/Target/X86/ |
D | X86FloatingPoint.cpp | 1296 unsigned Op0 = getFPReg(MI.getOperand(NumOperands - 2)); in handleTwoArgFP() local 1298 bool KillsOp0 = MI.killsRegister(X86::FP0 + Op0); in handleTwoArgFP() 1306 if (Op0 != TOS && Op1 != TOS) { // No operand at TOS? in handleTwoArgFP() 1311 moveToTop(Op0, I); // Move dead operand to TOS. in handleTwoArgFP() 1312 TOS = Op0; in handleTwoArgFP() 1322 duplicateToTop(Op0, Dest, I); in handleTwoArgFP() 1323 Op0 = TOS = Dest; in handleTwoArgFP() 1330 duplicateToTop(Op0, Dest, I); in handleTwoArgFP() 1331 Op0 = TOS = Dest; in handleTwoArgFP() 1337 assert((TOS == Op0 || TOS == Op1) && (KillsOp0 || KillsOp1) && in handleTwoArgFP() [all …]
|
D | X86InstrBuilder.h | 95 const MachineOperand &Op0 = MI->getOperand(Operand); in getAddressFromInstr() local 96 if (Op0.isReg()) { in getAddressFromInstr() 98 AM.Base.Reg = Op0.getReg(); in getAddressFromInstr() 101 AM.Base.FrameIndex = Op0.getIndex(); in getAddressFromInstr()
|
/third_party/skia/third_party/externals/swiftshader/third_party/llvm-10.0/llvm/lib/IR/ |
D | AutoUpgrade.cpp | 979 Value *Op0, Value *Op1) { in EmitX86Select() argument 983 return Op0; in EmitX86Select() 985 Mask = getX86MaskVec(Builder, Mask, Op0->getType()->getVectorNumElements()); in EmitX86Select() 986 return Builder.CreateSelect(Mask, Op0, Op1); in EmitX86Select() 990 Value *Op0, Value *Op1) { in EmitX86ScalarSelect() argument 994 return Op0; in EmitX86ScalarSelect() 1001 return Builder.CreateSelect(Mask, Op0, Op1); in EmitX86ScalarSelect() 1007 static Value *UpgradeX86ALIGNIntrinsics(IRBuilder<> &Builder, Value *Op0, in UpgradeX86ALIGNIntrinsics() argument 1013 unsigned NumElts = Op0->getType()->getVectorNumElements(); in UpgradeX86ALIGNIntrinsics() 1025 return llvm::Constant::getNullValue(Op0->getType()); in UpgradeX86ALIGNIntrinsics() [all …]
|