/external/llvm-project/llvm/unittests/CodeGen/GlobalISel/ |
D | PatternMatchTest.cpp | 39 bool match = mi_match(MIBCst.getReg(0), *MRI, m_ICst(Cst)); in TEST_F() 53 mi_match(MIBAdd.getReg(0), *MRI, m_GAdd(m_Reg(), m_Reg())); in TEST_F() 56 match = mi_match(MIBAdd.getReg(0), *MRI, in TEST_F() 66 match = mi_match(MIBMul.getReg(0), *MRI, in TEST_F() 73 match = mi_match(MIBMul.getReg(0), *MRI, in TEST_F() 85 match = mi_match(MIBMul2.getReg(0), *MRI, in TEST_F() 93 match = mi_match(MIBSub.getReg(0), *MRI, in TEST_F() 100 match = mi_match(MIBFMul.getReg(0), *MRI, in TEST_F() 109 match = mi_match(MIBFSub.getReg(0), *MRI, in TEST_F() 117 match = mi_match(MIBAnd.getReg(0), *MRI, in TEST_F() [all …]
|
D | ConstantFoldingTest.cpp | 31 bool match = mi_match(MIBCAdd.getReg(0), *MRI, m_ICst(Cst)); in TEST_F() 38 match = mi_match(MIBCAdd1.getReg(0), *MRI, m_ICst(Cst)); in TEST_F() 50 match = mi_match(MIBCSub.getReg(0), *MRI, m_ICst(Cst)); in TEST_F() 58 match = mi_match(MIBCSext1.getReg(0), *MRI, m_ICst(Cst)); in TEST_F() 66 match = mi_match(MIBCSext2.getReg(0), *MRI, m_ICst(Cst)); in TEST_F()
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/ |
D | AMDGPUGlobalISelUtils.cpp | 36 if (mi_match(Def->getOperand(2).getReg(), MRI, m_ICst(Offset))) in getBaseWithConstantOffset() 40 if (mi_match(Def->getOperand(2).getReg(), MRI, m_Copy(m_ICst(Offset)))) in getBaseWithConstantOffset()
|
D | AMDGPUInstructionSelector.cpp | 802 if (mi_match(Reg, MRI, m_ICst(C)) && C == 0) in isZero() 806 return mi_match(Reg, MRI, m_Copy(m_ICst(C))) && C == 0; in isZero() 2102 if (mi_match(Root.getReg(), *MRI, m_ICst(Offset))) { in selectMUBUFScratchOffen() 2208 if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) || in selectMUBUFScratchOffset() 2260 } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) { in selectDS1Addr1Offset()
|
D | AMDGPURegisterBankInfo.cpp | 1285 if (mi_match(Reg, MRI, m_ICst(Const))) in getBaseWithConstantOffset() 1289 if (mi_match(Reg, MRI, m_GAdd(m_Reg(Base), m_ICst(Const)))) in getBaseWithConstantOffset() 1342 return mi_match(Reg, MRI, m_ICst(C)) && C == 0; in isZero()
|
/external/llvm-project/llvm/lib/Target/AMDGPU/ |
D | AMDGPUGlobalISelUtils.cpp | 36 if (mi_match(Def->getOperand(2).getReg(), MRI, m_ICst(Offset))) in getBaseWithConstantOffset() 40 if (mi_match(Def->getOperand(2).getReg(), MRI, m_Copy(m_ICst(Offset)))) in getBaseWithConstantOffset()
|
D | AMDGPUPostLegalizerCombiner.cpp | 84 !mi_match(Cond, MRI, in matchFMinFMaxLegacy() 207 mi_match(SrcReg, MRI, m_GZExt(m_Reg(SrcReg))); in matchCvtF32UByteN() 211 bool IsShr = mi_match(SrcReg, MRI, m_GLShr(m_Reg(Src0), m_ICst(ShiftAmt))); in matchCvtF32UByteN() 212 if (IsShr || mi_match(SrcReg, MRI, m_GShl(m_Reg(Src0), m_ICst(ShiftAmt)))) { in matchCvtF32UByteN()
|
D | AMDGPUInstructionSelector.cpp | 647 bool Shift0 = mi_match( in selectG_BUILD_VECTOR_TRUNC() 650 bool Shift1 = mi_match( in selectG_BUILD_VECTOR_TRUNC() 3469 if (mi_match(Reg, MRI, m_GZExt(m_Reg(ZExtSrc)))) in matchZeroExtendFromS32() 3477 if (mi_match(Def->getOperand(2).getReg(), MRI, m_ZeroInt())) { in matchZeroExtendFromS32() 3605 if (mi_match(Root.getReg(), *MRI, m_ICst(Offset)) && in selectMUBUFScratchOffen() 3726 if (!mi_match(Root.getReg(), *MRI, m_ICst(Offset)) || in selectMUBUFScratchOffset() 3771 } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) { in selectDS1Addr1OffsetImpl() 3837 } else if (mi_match(Root.getReg(), *MRI, m_ICst(ConstAddr))) { in selectDSReadWrite2Impl()
|
D | AMDGPURegisterBankInfo.cpp | 1706 if (mi_match(Reg, MRI, m_ICst(Const))) in getBaseWithConstantOffset() 1710 if (mi_match(Reg, MRI, m_GAdd(m_Reg(Base), m_ICst(Const)))) in getBaseWithConstantOffset() 1763 return mi_match(Reg, MRI, m_ICst(C)) && C == 0; in isZero()
|
D | AMDGPULegalizerInfo.cpp | 4134 if (mi_match(MI.getOperand(ArgOffset + Intr->LodIndex).getReg(), *MRI, in legalizeImageIntrinsic() 4156 if (mi_match(MI.getOperand(ArgOffset + Intr->MipIndex).getReg(), *MRI, in legalizeImageIntrinsic()
|
/external/llvm-project/llvm/include/llvm/CodeGen/GlobalISel/ |
D | LegalizationArtifactCombiner.h | 59 if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) { in tryCombineAnyExt() 70 if (mi_match(SrcReg, MRI, in tryCombineAnyExt() 111 if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc))) || in tryCombineZExt() 112 mi_match(SrcReg, MRI, m_GSExt(m_Reg(SextSrc)))) { in tryCombineZExt() 131 if (mi_match(SrcReg, MRI, m_GZExt(m_Reg(ZextSrc)))) { in tryCombineZExt() 169 if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) { in tryCombineSExt() 187 if (mi_match(SrcReg, MRI, in tryCombineSExt() 287 if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) { in tryCombineTrunc() 1001 while (mi_match(Reg, MRI, m_Copy(m_Reg(TmpReg)))) { in lookThroughCopyInstrs()
|
D | MIPatternMatch.h | 23 bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P) { in mi_match() function 62 return mi_match(Reg, MRI, m_ICst(MatchedVal)) && MatchedVal == RequestedVal; in match() 205 if (mi_match(Op, MRI, m_MInstr(TmpMI))) { 297 if (mi_match(Op, MRI, m_MInstr(TmpMI))) { 385 if (!mi_match(Op, MRI, m_MInstr(TmpMI)) || TmpMI->getOpcode() != Opcode) 433 if (mi_match(Op, MRI, m_MInstr(TmpMI))) {
|
/external/swiftshader/third_party/llvm-10.0/llvm/include/llvm/CodeGen/GlobalISel/ |
D | LegalizationArtifactCombiner.h | 59 if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) { in tryCombineAnyExt() 70 if (mi_match(SrcReg, MRI, in tryCombineAnyExt() 108 if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) { in tryCombineZExt() 152 if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) { in tryCombineSExt() 650 while (mi_match(Reg, MRI, m_Copy(m_Reg(TmpReg)))) { in lookThroughCopyInstrs()
|
D | MIPatternMatch.h | 24 bool mi_match(Reg R, const MachineRegisterInfo &MRI, Pattern &&P) { in mi_match() function 181 if (mi_match(Op, MRI, m_MInstr(TmpMI))) { 249 if (mi_match(Op, MRI, m_MInstr(TmpMI))) {
|
/external/llvm-project/llvm/lib/CodeGen/GlobalISel/ |
D | CombinerHelper.cpp | 597 if (mi_match(SrcReg, MRI, m_GTrunc(m_Reg(TruncSrc)))) in matchSextTruncSextLoad() 1768 if (!mi_match(LHS, MRI, m_GAnyExt(m_Reg(ExtSrc))) && in matchCombineShlOfExtend() 1769 !mi_match(LHS, MRI, m_GZExt(m_Reg(ExtSrc))) && in matchCombineShlOfExtend() 1770 !mi_match(LHS, MRI, m_GSExt(m_Reg(ExtSrc)))) in matchCombineShlOfExtend() 1815 while (mi_match(Reg, MRI, m_GBitcast(m_Reg(Reg)))) in peekThroughBitcast() 1963 if (!mi_match(SrcReg, MRI, m_GZExt(m_Reg(ZExtSrcReg)))) in matchCombineUnmergeZExtToZExt() 2123 return mi_match(SrcReg, MRI, in matchCombineI2PToP2I() 2139 return mi_match(SrcReg, MRI, m_GIntToPtr(m_Reg(Reg))); in matchCombineP2IToI2P() 2162 if (mi_match(SrcReg, MRI, m_GPtrToInt(m_Reg(PtrReg.first)))) { in matchCombineAddP2IToPtrAdd() 2205 if (mi_match(LHS, MRI, m_GIntToPtr(m_ICst(Cst)))) { in matchCombineConstPtrAddToI2P() [all …]
|
D | Utils.cpp | 685 if (!mi_match(Element, MRI, m_SpecificICst(SplatValue))) in isBuildVectorConstantSplat() 703 if (!mi_match(Element, MRI, m_ICst(ElementValue))) in getBuildVectorConstantSplat()
|
D | LegalizerHelper.cpp | 3171 if (mi_match(IdxReg, *B.getMRI(), m_ICst(IdxVal))) in clampDynamicVectorIndex() 3672 if (mi_match(Idx, MRI, m_ICst(IdxVal))) { in fewerElementsVectorExtractInsertVectorElt() 5666 if (mi_match(Idx, MRI, m_ICst(IdxVal))) { in lowerExtractInsertVectorElt()
|
/external/llvm-project/llvm/lib/Target/AArch64/GISel/ |
D | AArch64PostLegalizerLowering.cpp | 294 if (!mi_match(InsMI->getOperand(3).getReg(), MRI, m_ZeroInt())) in matchDupFromInsertVectorElt()
|
D | AArch64InstructionSelector.cpp | 1050 if (mi_match(Reg, MRI, m_Neg(m_Reg(MatchReg)))) { in emitSelect() 1067 if (mi_match(Reg, MRI, m_Not(m_Reg(MatchReg)))) { in emitSelect() 1084 if (mi_match(Reg, MRI, m_GAdd(m_Reg(MatchReg), m_SpecificICst(1)))) { in emitSelect() 1985 if (!mi_match(I.getOperand(2).getReg(), MRI, m_Neg(m_Reg(NegatedReg)))) in convertPtrAddToAdd()
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AArch64/ |
D | AArch64InstructionSelector.cpp | 3698 if (!mi_match(InsMI->getOperand(3).getReg(), MRI, m_ICst(Index)) || Index) in tryOptVectorDup()
|