/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/AMDGPU/ |
D | SIFoldOperands.cpp | 31 MachineInstr *UseMI; member 45 UseMI(MI), OpToFold(nullptr), ShrinkOpcode(ShrinkOp), UseOpNo(OpNo), in FoldCandidate() 95 MachineInstr *UseMI, 135 const MachineInstr &UseMI, in isInlineConstantIfFolded() argument 138 if (TII->isInlineConstant(UseMI, OpNo, OpToFold)) in isInlineConstantIfFolded() 141 unsigned Opc = UseMI.getOpcode(); in isInlineConstantIfFolded() 172 const MachineInstr &UseMI, in frameIndexMayFold() argument 176 (TII->isMUBUF(UseMI) || TII->isFLATScratch(UseMI)) && in frameIndexMayFold() 177 OpNo == AMDGPU::getNamedOperandIdx(UseMI.getOpcode(), AMDGPU::OpName::vaddr); in frameIndexMayFold() 188 MachineInstr *MI = Fold.UseMI; in updateOperand() [all …]
|
D | SIFixSGPRCopies.cpp | 214 const auto *UseMI = MO.getParent(); in tryChangeVGPRtoSGPRinCopy() local 215 if (UseMI == &MI) in tryChangeVGPRtoSGPRinCopy() 217 if (MO.isDef() || UseMI->getParent() != MI.getParent() || in tryChangeVGPRtoSGPRinCopy() 218 UseMI->getOpcode() <= TargetOpcode::GENERIC_OP_END || in tryChangeVGPRtoSGPRinCopy() 219 !TII->isOperandLegal(*UseMI, UseMI->getOperandNo(&MO), &Src)) in tryChangeVGPRtoSGPRinCopy() 770 const MachineInstr *UseMI = Use.getParent(); in processPHINode() local 771 AllAGPRUses &= (UseMI->isCopy() && in processPHINode() 772 TRI->isAGPR(*MRI, UseMI->getOperand(0).getReg())) || in processPHINode() 774 if (UseMI->isCopy() || UseMI->isRegSequence()) { in processPHINode() 775 if (UseMI->isCopy() && in processPHINode() [all …]
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/Hexagon/ |
D | HexagonOptAddrMode.cpp | 92 bool xformUseMI(MachineInstr *TfrMI, MachineInstr *UseMI, 96 bool updateAddUses(MachineInstr *AddMI, MachineInstr *UseMI); 187 MachineInstr &UseMI = *NodeAddr<StmtNode *>(IA).Addr->getCode(); in canRemoveAddasl() local 191 MI.getParent() != UseMI.getParent()) in canRemoveAddasl() 194 const MCInstrDesc &UseMID = UseMI.getDesc(); in canRemoveAddasl() 196 HII->getAddrMode(UseMI) != HexagonII::BaseImmOffset || in canRemoveAddasl() 197 getBaseWithLongOffset(UseMI) < 0) in canRemoveAddasl() 201 if (UseMID.mayStore() && UseMI.getOperand(2).isReg() && in canRemoveAddasl() 202 UseMI.getOperand(2).getReg() == MI.getOperand(0).getReg()) in canRemoveAddasl() 205 for (auto &Mo : UseMI.operands()) in canRemoveAddasl() [all …]
|
/external/llvm/lib/Target/Hexagon/ |
D | HexagonOptAddrMode.cpp | 78 bool xformUseMI(MachineInstr *TfrMI, MachineInstr *UseMI, 165 MachineInstr *UseMI = NodeAddr<StmtNode *>(IA).Addr->getCode(); in canRemoveAddasl() local 169 MI->getParent() != UseMI->getParent()) in canRemoveAddasl() 172 const MCInstrDesc &UseMID = UseMI->getDesc(); in canRemoveAddasl() 174 HII->getAddrMode(UseMI) != HexagonII::BaseImmOffset || in canRemoveAddasl() 175 getBaseWithLongOffset(UseMI) < 0) in canRemoveAddasl() 179 if (UseMID.mayStore() && UseMI->getOperand(2).isReg() && in canRemoveAddasl() 180 UseMI->getOperand(2).getReg() == MI->getOperand(0).getReg()) in canRemoveAddasl() 183 for (auto &Mo : UseMI->operands()) in canRemoveAddasl() 441 MachineInstr *UseMI = UseIA.Addr->getCode(); in changeAddAsl() local [all …]
|
/external/llvm/lib/Target/AMDGPU/ |
D | SIFoldOperands.cpp | 50 MachineInstr *UseMI; member 56 UseMI(MI), UseOpNo(OpNo) { in FoldCandidate() 101 MachineInstr *MI = Fold.UseMI; in updateOperand() 125 if (Candidate.UseMI == MI) in isUseMIInFoldList() 191 static void foldOperand(MachineOperand &OpToFold, MachineInstr *UseMI, in foldOperand() argument 197 const MachineOperand &UseOp = UseMI->getOperand(UseOpIdx); in foldOperand() 236 if (UseMI->getOpcode() == AMDGPU::COPY) { in foldOperand() 237 unsigned DestReg = UseMI->getOperand(0).getReg(); in foldOperand() 247 UseMI->setDesc(TII->get(MovOp)); in foldOperand() 248 CopiesToReplace.push_back(UseMI); in foldOperand() [all …]
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/ |
D | MachineTraceMetrics.cpp | 651 static bool getDataDeps(const MachineInstr &UseMI, in getDataDeps() argument 655 if (UseMI.isDebugInstr()) in getDataDeps() 659 for (MachineInstr::const_mop_iterator I = UseMI.operands_begin(), in getDataDeps() 660 E = UseMI.operands_end(); I != E; ++I) { in getDataDeps() 673 Deps.push_back(DataDep(MRI, Reg, UseMI.getOperandNo(I))); in getDataDeps() 681 static void getPHIDeps(const MachineInstr &UseMI, in getPHIDeps() argument 688 assert(UseMI.isPHI() && UseMI.getNumOperands() % 2 && "Bad PHI"); in getPHIDeps() 689 for (unsigned i = 1; i != UseMI.getNumOperands(); i += 2) { in getPHIDeps() 690 if (UseMI.getOperand(i + 1).getMBB() == Pred) { in getPHIDeps() 691 Register Reg = UseMI.getOperand(i).getReg(); in getPHIDeps() [all …]
|
D | LiveRangeEdit.cpp | 187 MachineInstr *DefMI = nullptr, *UseMI = nullptr; in foldAsLoad() local 199 if (UseMI && UseMI != MI) in foldAsLoad() 204 UseMI = MI; in foldAsLoad() 207 if (!DefMI || !UseMI) in foldAsLoad() 213 LIS.getInstructionIndex(*UseMI))) in foldAsLoad() 223 << " into single use: " << *UseMI); in foldAsLoad() 226 if (UseMI->readsWritesVirtualRegister(LI->reg, &Ops).second) in foldAsLoad() 229 MachineInstr *FoldMI = TII.foldMemoryOperand(*UseMI, Ops, *DefMI, &LIS); in foldAsLoad() 233 LIS.ReplaceMachineInstrInMaps(*UseMI, *FoldMI); in foldAsLoad() 234 if (UseMI->isCall()) in foldAsLoad() [all …]
|
D | TargetSchedule.cpp | 186 const MachineInstr *UseMI, unsigned UseOperIdx) const { in computeOperandLatency() argument 193 if (UseMI) { in computeOperandLatency() 195 *UseMI, UseOperIdx); in computeOperandLatency() 225 if (!UseMI) in computeOperandLatency() 229 const MCSchedClassDesc *UseDesc = resolveSchedClass(UseMI); in computeOperandLatency() 232 unsigned UseIdx = findUseIdx(UseMI, UseOperIdx); in computeOperandLatency()
|
D | RegisterScavenging.cpp | 310 MachineBasicBlock::iterator &UseMI) { in findSurvivorReg() argument 367 UseMI = RestorePointMI; in findSurvivorReg() 463 MachineBasicBlock::iterator &UseMI) { in spill() argument 508 if (!TRI->saveScavengerRegister(*MBB, Before, UseMI, &RC, Reg)) { in spill() 525 TII->loadRegFromStackSlot(*MBB, UseMI, Reg, Scavenged[SI].FrameIndex, in spill() 527 II = std::prev(UseMI); in spill() 559 MachineBasicBlock::iterator UseMI; in scavengeRegister() local 560 Register SReg = findSurvivorReg(I, Candidates, 25, UseMI); in scavengeRegister() 571 ScavengedInfo &Scavenged = spill(SReg, *RC, SPAdj, I, UseMI); in scavengeRegister() 572 Scavenged.Restore = &*std::prev(UseMI); in scavengeRegister() [all …]
|
D | MachineLICM.cpp | 1005 for (MachineInstr &UseMI : MRI->use_instructions(CopyDstReg)) { in isCopyFeedingInvariantStore() 1006 if (UseMI.mayStore() && isInvariantStore(UseMI, TRI, MRI)) in isCopyFeedingInvariantStore() 1102 for (MachineInstr &UseMI : MRI->use_instructions(Reg)) { in HasLoopPHIUse() 1104 if (UseMI.isPHI()) { in HasLoopPHIUse() 1107 if (CurLoop->contains(&UseMI)) in HasLoopPHIUse() 1112 if (isExitBlock(UseMI.getParent())) in HasLoopPHIUse() 1117 if (UseMI.isCopy() && CurLoop->contains(&UseMI)) in HasLoopPHIUse() 1118 Work.push_back(&UseMI); in HasLoopPHIUse() 1133 for (MachineInstr &UseMI : MRI->use_nodbg_instructions(Reg)) { in HasHighOperandLatency() 1134 if (UseMI.isCopyLike()) in HasHighOperandLatency() [all …]
|
D | DetectDeadLanes.cpp | 423 const MachineInstr &UseMI = *MO.getParent(); in determineInitialUsedLanes() local 424 if (UseMI.isKill()) in determineInitialUsedLanes() 428 if (lowersToCopies(UseMI)) { in determineInitialUsedLanes() 429 assert(UseMI.getDesc().getNumDefs() == 1); in determineInitialUsedLanes() 430 const MachineOperand &Def = *UseMI.defs().begin(); in determineInitialUsedLanes() 437 if (lowersToCopies(UseMI)) { in determineInitialUsedLanes() 439 CrossCopy = isCrossCopy(*MRI, UseMI, DstRC, MO); in determineInitialUsedLanes() 441 LLVM_DEBUG(dbgs() << "Copy across incompatible classes: " << UseMI); in determineInitialUsedLanes()
|
D | OptimizePHIs.cpp | 157 for (MachineInstr &UseMI : MRI->use_nodbg_instructions(DstReg)) { in IsDeadPHICycle() 158 if (!UseMI.isPHI() || !IsDeadPHICycle(&UseMI, PHIsInCycle)) in IsDeadPHICycle()
|
/external/llvm/lib/CodeGen/ |
D | MachineTraceMetrics.cpp | 628 static bool getDataDeps(const MachineInstr &UseMI, in getDataDeps() argument 632 if (UseMI.isDebugValue()) in getDataDeps() 636 for (MachineInstr::const_mop_iterator I = UseMI.operands_begin(), in getDataDeps() 637 E = UseMI.operands_end(); I != E; ++I) { in getDataDeps() 650 Deps.push_back(DataDep(MRI, Reg, UseMI.getOperandNo(I))); in getDataDeps() 658 static void getPHIDeps(const MachineInstr &UseMI, in getPHIDeps() argument 665 assert(UseMI.isPHI() && UseMI.getNumOperands() % 2 && "Bad PHI"); in getPHIDeps() 666 for (unsigned i = 1; i != UseMI.getNumOperands(); i += 2) { in getPHIDeps() 667 if (UseMI.getOperand(i + 1).getMBB() == Pred) { in getPHIDeps() 668 unsigned Reg = UseMI.getOperand(i).getReg(); in getPHIDeps() [all …]
|
D | LiveRangeEdit.cpp | 166 MachineInstr *DefMI = nullptr, *UseMI = nullptr; in foldAsLoad() local 178 if (UseMI && UseMI != MI) in foldAsLoad() 183 UseMI = MI; in foldAsLoad() 186 if (!DefMI || !UseMI) in foldAsLoad() 192 LIS.getInstructionIndex(*UseMI))) in foldAsLoad() 202 << " into single use: " << *UseMI); in foldAsLoad() 205 if (UseMI->readsWritesVirtualRegister(LI->reg, &Ops).second) in foldAsLoad() 208 MachineInstr *FoldMI = TII.foldMemoryOperand(*UseMI, Ops, *DefMI, &LIS); in foldAsLoad() 212 LIS.ReplaceMachineInstrInMaps(*UseMI, *FoldMI); in foldAsLoad() 213 UseMI->eraseFromParent(); in foldAsLoad()
|
D | TargetSchedule.cpp | 156 const MachineInstr *UseMI, unsigned UseOperIdx) const { in computeOperandLatency() argument 163 if (UseMI) { in computeOperandLatency() 165 *UseMI, UseOperIdx); in computeOperandLatency() 195 if (!UseMI) in computeOperandLatency() 199 const MCSchedClassDesc *UseDesc = resolveSchedClass(UseMI); in computeOperandLatency() 202 unsigned UseIdx = findUseIdx(UseMI, UseOperIdx); in computeOperandLatency()
|
D | RegisterScavenging.cpp | 279 MachineBasicBlock::iterator &UseMI) { in findSurvivorReg() argument 336 UseMI = RestorePointMI; in findSurvivorReg() 372 MachineBasicBlock::iterator UseMI; in scavengeRegister() local 373 unsigned SReg = findSurvivorReg(I, Candidates, 25, UseMI); in scavengeRegister() 424 if (!TRI->saveScavengerRegister(*MBB, I, UseMI, RC, SReg)) { in scavengeRegister() 441 TII->loadRegFromStackSlot(*MBB, UseMI, SReg, Scavenged[SI].FrameIndex, in scavengeRegister() 443 II = std::prev(UseMI); in scavengeRegister() 449 Scavenged[SI].Restore = &*std::prev(UseMI); in scavengeRegister()
|
D | DetectDeadLanes.cpp | 426 const MachineInstr &UseMI = *MO.getParent(); in determineInitialUsedLanes() local 427 if (UseMI.isKill()) in determineInitialUsedLanes() 431 if (lowersToCopies(UseMI)) { in determineInitialUsedLanes() 432 assert(UseMI.getDesc().getNumDefs() == 1); in determineInitialUsedLanes() 433 const MachineOperand &Def = *UseMI.defs().begin(); in determineInitialUsedLanes() 440 if (lowersToCopies(UseMI)) { in determineInitialUsedLanes() 442 CrossCopy = isCrossCopy(*MRI, UseMI, DstRC, MO); in determineInitialUsedLanes() 444 DEBUG(dbgs() << "Copy accross incompatible classes: " << UseMI); in determineInitialUsedLanes()
|
D | RegisterCoalescer.cpp | 703 MachineInstr *UseMI = MO.getParent(); in removeCopyByCommutingDef() local 704 unsigned OpNo = &MO - &UseMI->getOperand(0); in removeCopyByCommutingDef() 705 SlotIndex UseIdx = LIS->getInstructionIndex(*UseMI); in removeCopyByCommutingDef() 710 if (UseMI->isRegTiedToDefOperand(OpNo)) in removeCopyByCommutingDef() 752 MachineInstr *UseMI = UseMO.getParent(); in removeCopyByCommutingDef() local 753 if (UseMI->isDebugValue()) { in removeCopyByCommutingDef() 759 SlotIndex UseIdx = LIS->getInstructionIndex(*UseMI).getRegSlot(true); in removeCopyByCommutingDef() 770 if (UseMI == CopyMI) in removeCopyByCommutingDef() 772 if (!UseMI->isCopy()) in removeCopyByCommutingDef() 774 if (UseMI->getOperand(0).getReg() != IntB.reg || in removeCopyByCommutingDef() [all …]
|
D | MachineLICM.cpp | 942 for (MachineInstr &UseMI : MRI->use_instructions(Reg)) { in HasLoopPHIUse() 944 if (UseMI.isPHI()) { in HasLoopPHIUse() 947 if (CurLoop->contains(&UseMI)) in HasLoopPHIUse() 952 if (isExitBlock(UseMI.getParent())) in HasLoopPHIUse() 957 if (UseMI.isCopy() && CurLoop->contains(&UseMI)) in HasLoopPHIUse() 958 Work.push_back(&UseMI); in HasLoopPHIUse() 972 for (MachineInstr &UseMI : MRI->use_nodbg_instructions(Reg)) { in HasHighOperandLatency() 973 if (UseMI.isCopyLike()) in HasHighOperandLatency() 975 if (!CurLoop->contains(UseMI.getParent())) in HasHighOperandLatency() 977 for (unsigned i = 0, e = UseMI.getNumOperands(); i != e; ++i) { in HasHighOperandLatency() [all …]
|
D | OptimizePHIs.cpp | 147 for (MachineInstr &UseMI : MRI->use_instructions(DstReg)) { in IsDeadPHICycle() 148 if (!UseMI.isPHI() || !IsDeadPHICycle(&UseMI, PHIsInCycle)) in IsDeadPHICycle()
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/CodeGen/GlobalISel/ |
D | CombinerHelper.cpp | 354 MachineInstr &UseMI = *UseMO.getParent(); in InsertInsnsWithoutSideEffectsBeforeUse() local 356 MachineBasicBlock *InsertBB = UseMI.getParent(); in InsertInsnsWithoutSideEffectsBeforeUse() 359 if (UseMI.isPHI()) { in InsertInsnsWithoutSideEffectsBeforeUse() 431 for (auto &UseMI : MRI.use_instructions(LoadValue.getReg())) { in matchCombineExtendingLoads() local 432 if (UseMI.getOpcode() == TargetOpcode::G_SEXT || in matchCombineExtendingLoads() 433 UseMI.getOpcode() == TargetOpcode::G_ZEXT || in matchCombineExtendingLoads() 434 UseMI.getOpcode() == TargetOpcode::G_ANYEXT) { in matchCombineExtendingLoads() 436 MRI.getType(UseMI.getOperand(0).getReg()), in matchCombineExtendingLoads() 437 UseMI.getOpcode(), &UseMI); in matchCombineExtendingLoads() 493 MachineInstr *UseMI = UseMO->getParent(); in applyCombineExtendingLoads() local [all …]
|
D | Localizer.cpp | 151 MachineInstr &UseMI = *MOUse.getParent(); in localizeInterBlock() local 152 if (MRI->hasOneUse(Reg) && !UseMI.isPHI()) in localizeInterBlock() 153 InsertMBB->insert(InsertMBB->SkipPHIsAndLabels(UseMI), LocalizedMI); in localizeInterBlock() 189 for (MachineInstr &UseMI : MRI->use_nodbg_instructions(Reg)) { in localizeIntraBlock() 190 if (!UseMI.isPHI()) in localizeIntraBlock() 191 Users.insert(&UseMI); in localizeIntraBlock()
|
/external/llvm/lib/Target/ARM/ |
D | MLxExpansionPass.cpp | 124 MachineInstr *UseMI = &*MRI->use_instr_nodbg_begin(Reg); in getDefReg() local 125 if (UseMI->getParent() != MBB) in getDefReg() 128 while (UseMI->isCopy() || UseMI->isInsertSubreg()) { in getDefReg() 129 Reg = UseMI->getOperand(0).getReg(); in getDefReg() 133 UseMI = &*MRI->use_instr_nodbg_begin(Reg); in getDefReg() 134 if (UseMI->getParent() != MBB) in getDefReg()
|
/external/swiftshader/third_party/llvm-10.0/llvm/lib/Target/ARM/ |
D | MLxExpansionPass.cpp | 122 MachineInstr *UseMI = &*MRI->use_instr_nodbg_begin(Reg); in getDefReg() local 123 if (UseMI->getParent() != MBB) in getDefReg() 126 while (UseMI->isCopy() || UseMI->isInsertSubreg()) { in getDefReg() 127 Reg = UseMI->getOperand(0).getReg(); in getDefReg() 130 UseMI = &*MRI->use_instr_nodbg_begin(Reg); in getDefReg() 131 if (UseMI->getParent() != MBB) in getDefReg()
|
/external/llvm/lib/Target/PowerPC/ |
D | PPCInstrInfo.cpp | 143 const MachineInstr &UseMI, in getOperandLatency() argument 146 UseMI, UseIdx); in getOperandLatency() 165 if (UseMI.isBranch() && IsRegCR) { in getOperandLatency() 1213 bool PPCInstrInfo::FoldImmediate(MachineInstr &UseMI, MachineInstr &DefMI, in FoldImmediate() argument 1229 const MCInstrDesc &UseMCID = UseMI.getDesc(); in FoldImmediate() 1236 for (UseIdx = 0; UseIdx < UseMI.getNumOperands(); ++UseIdx) in FoldImmediate() 1237 if (UseMI.getOperand(UseIdx).isReg() && in FoldImmediate() 1238 UseMI.getOperand(UseIdx).getReg() == Reg) in FoldImmediate() 1241 assert(UseIdx < UseMI.getNumOperands() && "Cannot find Reg in UseMI"); in FoldImmediate() 1273 UseMI.getOperand(UseIdx).setReg(ZeroReg); in FoldImmediate() [all …]
|