Lines Matching +full:robust +full:- +full:predicates
1 //===-- ARMLoadStoreOptimizer.cpp - ARM load / store opt. pass ------------===//
8 //===----------------------------------------------------------------------===//
13 //===----------------------------------------------------------------------===//
49 #define DEBUG_TYPE "arm-ldst-opt"
65 /// disabled. This can be used to create libraries that are robust even when
69 AssumeMisalignedLoadStores("arm-assume-misaligned-load-store", cl::Hidden,
79 /// Post- register allocation pass the combine load / store instructions to
175 INITIALIZE_PASS(ARMLoadStoreOpt, "arm-load-store-opt", ARM_LOAD_STORE_OPT_NAME, false, false)
194 unsigned OffField = MI.getOperand(NumOperands - 3).getImm(); in getMemoryOpOffset()
213 return -Offset; in getMemoryOpOffset()
249 // tLDMIA is writeback-only - unless the base register is in the input in getLoadStoreMultipleOpcode()
258 // There is no non-writeback tSTMIA either. in getLoadStoreMultipleOpcode()
411 switch (MI->getOpcode()) { in getLSMultipleTransferSize()
446 return (MI->getNumOperands() - MI->getDesc().getNumOperands() + 1) * 4; in getLSMultipleTransferSize()
449 return (MI->getNumOperands() - MI->getDesc().getNumOperands() + 1) * 8; in getLSMultipleTransferSize()
463 // the first non-updateable instruction (if any). in UpdateBaseRegUses()
466 unsigned Opc = MBBI->getOpcode(); in UpdateBaseRegUses()
468 if (MBBI->readsRegister(Base)) { in UpdateBaseRegUses()
479 // before predicates. in UpdateBaseRegUses()
481 MBBI->getOperand(MBBI->getDesc().getNumOperands() - 3); in UpdateBaseRegUses()
483 Offset = MO.getImm() - WordOffset * getImmScale(Opc); in UpdateBaseRegUses()
499 MBBI->getOperand(MBBI->getDesc().getNumOperands() - 3); in UpdateBaseRegUses()
502 MO.getImm() - WordOffset * 4 ; in UpdateBaseRegUses()
503 if (Offset >= 0 && TL->isLegalAddImmediate(Offset)) { in UpdateBaseRegUses()
504 // FIXME: Swap ADDS<->SUBS if Offset < 0, erase instruction if in UpdateBaseRegUses()
518 } else if (definesCPSR(*MBBI) || MBBI->isCall() || MBBI->isBranch()) { in UpdateBaseRegUses()
527 AddDefaultT1CC(BuildMI(MBB, MBBI, DL, TII->get(ARM::tSUBi8), Base), true) in UpdateBaseRegUses()
532 if (MBBI->killsRegister(Base) || MBBI->definesRegister(Base)) in UpdateBaseRegUses()
540 // the successor blocks' live-in sets. This means we can't trust that in UpdateBaseRegUses()
543 if (MBBI != MBB.end()) --MBBI; in UpdateBaseRegUses()
545 BuildMI(MBB, MBBI, DL, TII->get(ARM::tSUBi8), Base), true) in UpdateBaseRegUses()
577 --LiveRegPos; in moveLiveRegsBefore()
610 // non-writeback. in CreateLoadStoreMulti()
628 } else if (Offset == -4 * (int)NumRegs + 4 && haveIBAndDA) { in CreateLoadStoreMulti()
630 } else if (Offset == -4 * (int)NumRegs && isNotVFP && !isThumb1) { in CreateLoadStoreMulti()
653 NewBase = Regs[NumRegs-1].first; in CreateLoadStoreMulti()
676 Offset = - Offset; in CreateLoadStoreMulti()
683 if (!TL->isLegalAddImmediate(Offset)) in CreateLoadStoreMulti()
702 !STI->hasV6Ops()) { in CreateLoadStoreMulti()
703 // thumbv4t doesn't have lo->lo copies, and we can't predicate tMOVSr in CreateLoadStoreMulti()
706 BuildMI(MBB, InsertBefore, DL, TII->get(ARM::tMOVSr), NewBase) in CreateLoadStoreMulti()
709 BuildMI(MBB, InsertBefore, DL, TII->get(ARM::tMOVr), NewBase) in CreateLoadStoreMulti()
719 BuildMI(MBB, InsertBefore, DL, TII->get(BaseOpc), NewBase) in CreateLoadStoreMulti()
724 BuildMI(MBB, InsertBefore, DL, TII->get(BaseOpc), NewBase), true) in CreateLoadStoreMulti()
728 BuildMI(MBB, InsertBefore, DL, TII->get(BaseOpc), NewBase) in CreateLoadStoreMulti()
745 // - There is no writeback (LDM of base register), in CreateLoadStoreMulti()
746 // - the base register is killed by the merged instruction, in CreateLoadStoreMulti()
747 // - or it's safe to overwrite the condition flags, i.e. to insert a SUBS in CreateLoadStoreMulti()
765 MIB = BuildMI(MBB, InsertBefore, DL, TII->get(Opcode)); in CreateLoadStoreMulti()
778 MIB = BuildMI(MBB, InsertBefore, DL, TII->get(Opcode)); in CreateLoadStoreMulti()
801 TII->get(LoadStoreOpcode)); in CreateLoadStoreDouble()
816 unsigned Opcode = First->getOpcode(); in MergeOpsUpdate()
822 // Determine list of registers and list of implicit super-register defs. in MergeOpsUpdate()
833 // Collect any implicit defs of super-registers, after merging we can't in MergeOpsUpdate()
836 for (const MachineOperand &MO : MI->implicit_operands()) { in MergeOpsUpdate()
844 // We can ignore cases where the super-reg is read and written. in MergeOpsUpdate()
845 if (MI->readsRegister(DefReg)) in MergeOpsUpdate()
856 MachineBasicBlock &MBB = *LatestMI->getParent(); in MergeOpsUpdate()
859 bool BaseKill = LatestMI->killsRegister(Base); in MergeOpsUpdate()
862 DebugLoc DL = First->getDebugLoc(); in MergeOpsUpdate()
895 // If the previous loads defined a super-reg, then we have to mark earlier in MergeOpsUpdate()
896 // operands undef; Replicate the super-reg def on the merged instruction. in MergeOpsUpdate()
910 MachineInstrBuilder MIB(*Merged->getParent()->getParent(), Merged); in MergeOpsUpdate()
948 // SP-relative loads/stores. in mayCombineMisaligned()
950 STI.getFrameLowering()->getTransientStackAlignment() >= 4) in mayCombineMisaligned()
958 unsigned Opcode = FirstMI->getOpcode(); in FormCandidates()
970 unsigned PRegNum = PMO.isUndef() ? UINT_MAX : TRI->getEncodingValue(PReg); in FormCandidates()
975 STI->isThumb2() && isNotVFP && isValidLSDoubleOffset(Offset); in FormCandidates()
978 if (STI->isCortexM3() && isi32Load(Opcode) && in FormCandidates()
985 if (STI->hasSlowOddRegister() && !isNotVFP && (PRegNum % 2) == 1) in FormCandidates()
1008 unsigned RegNum = MO.isUndef() ? UINT_MAX : TRI->getEncodingValue(Reg); in FormCandidates()
1042 Candidate->Instrs.push_back(MemOps[C].MI); in FormCandidates()
1043 Candidate->LatestMIIdx = Latest - SIndex; in FormCandidates()
1044 Candidate->EarliestMIIdx = Earliest - SIndex; in FormCandidates()
1045 Candidate->InsertPos = MemOps[Latest].Position; in FormCandidates()
1048 Candidate->CanMergeToLSMulti = CanMergeToLSMulti; in FormCandidates()
1049 Candidate->CanMergeToLSDouble = CanMergeToLSDouble; in FormCandidates()
1132 case ARM::tSUBi8: Scale = -4; CheckCPSRDef = true; break; in isIncrementOrDecrement()
1134 case ARM::SUBri: Scale = -1; CheckCPSRDef = true; break; in isIncrementOrDecrement()
1138 case ARM::tSUBspi: Scale = -4; CheckCPSRDef = false; break; in isIncrementOrDecrement()
1159 MachineBasicBlock &MBB = *MBBI->getParent(); in findIncDecBefore()
1167 while (PrevMBBI->isDebugValue() && PrevMBBI != BeginMBBI) in findIncDecBefore()
1168 --PrevMBBI; in findIncDecBefore()
1179 MachineBasicBlock &MBB = *MBBI->getParent(); in findIncDecAfter()
1183 while (NextMBBI != EndMBBI && NextMBBI->isDebugValue()) in findIncDecAfter()
1200 /// rn := rn - 4 * 3;
1208 const MachineOperand &BaseOP = MI->getOperand(0); in MergeBaseUpdateLSMultiple()
1213 unsigned Opcode = MI->getOpcode(); in MergeBaseUpdateLSMultiple()
1214 DebugLoc DL = MI->getDebugLoc(); in MergeBaseUpdateLSMultiple()
1218 for (unsigned i = 2, e = MI->getNumOperands(); i != e; ++i) in MergeBaseUpdateLSMultiple()
1219 if (MI->getOperand(i).getReg() == Base) in MergeBaseUpdateLSMultiple()
1223 MachineBasicBlock &MBB = *MI->getParent(); in MergeBaseUpdateLSMultiple()
1229 if (Mode == ARM_AM::ia && Offset == -Bytes) { in MergeBaseUpdateLSMultiple()
1231 } else if (Mode == ARM_AM::ib && Offset == -Bytes) { in MergeBaseUpdateLSMultiple()
1236 ((Mode != ARM_AM::da && Mode != ARM_AM::db) || Offset != -Bytes)) { in MergeBaseUpdateLSMultiple()
1242 if (!MBB.getParent()->getFunction()->optForMinSize() || !BaseKill) in MergeBaseUpdateLSMultiple()
1246 for (unsigned i = 2, e = MI->getNumOperands(); i != e; ++i) in MergeBaseUpdateLSMultiple()
1247 if (MI->getOperand(i).getReg() >= ARM::R8) { in MergeBaseUpdateLSMultiple()
1262 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(NewOpc)) in MergeBaseUpdateLSMultiple()
1268 for (unsigned OpNum = 3, e = MI->getNumOperands(); OpNum != e; ++OpNum) in MergeBaseUpdateLSMultiple()
1269 MIB.addOperand(MI->getOperand(OpNum)); in MergeBaseUpdateLSMultiple()
1272 MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end()); in MergeBaseUpdateLSMultiple()
1337 unsigned Opcode = MI->getOpcode(); in MergeBaseUpdateLoadStore()
1338 DebugLoc DL = MI->getDebugLoc(); in MergeBaseUpdateLoadStore()
1343 if (MI->getOperand(2).getImm() != 0) in MergeBaseUpdateLoadStore()
1345 if (isAM5 && ARM_AM::getAM5Offset(MI->getOperand(2).getImm()) != 0) in MergeBaseUpdateLoadStore()
1348 // Can't do the merge if the destination register is the same as the would-be in MergeBaseUpdateLoadStore()
1350 if (MI->getOperand(0).getReg() == Base) in MergeBaseUpdateLoadStore()
1356 MachineBasicBlock &MBB = *MI->getParent(); in MergeBaseUpdateLoadStore()
1364 } else if (Offset == -Bytes) { in MergeBaseUpdateLoadStore()
1370 } else if (!isAM5 && Offset == -Bytes) { in MergeBaseUpdateLoadStore()
1382 // (There are no base-updating versions of VLDR/VSTR instructions, but the in MergeBaseUpdateLoadStore()
1383 // updating load/store-multiple instructions can be used with only one in MergeBaseUpdateLoadStore()
1385 MachineOperand &MO = MI->getOperand(0); in MergeBaseUpdateLoadStore()
1386 BuildMI(MBB, MBBI, DL, TII->get(NewOpc)) in MergeBaseUpdateLoadStore()
1396 BuildMI(MBB, MBBI, DL, TII->get(NewOpc), MI->getOperand(0).getReg()) in MergeBaseUpdateLoadStore()
1401 BuildMI(MBB, MBBI, DL, TII->get(NewOpc), MI->getOperand(0).getReg()) in MergeBaseUpdateLoadStore()
1407 BuildMI(MBB, MBBI, DL, TII->get(NewOpc), MI->getOperand(0).getReg()) in MergeBaseUpdateLoadStore()
1412 MachineOperand &MO = MI->getOperand(0); in MergeBaseUpdateLoadStore()
1413 // FIXME: post-indexed stores use am2offset_imm, which still encodes in MergeBaseUpdateLoadStore()
1414 // the vestigal zero-reg offset register. When that's fixed, this clause in MergeBaseUpdateLoadStore()
1419 BuildMI(MBB, MBBI, DL, TII->get(NewOpc), Base) in MergeBaseUpdateLoadStore()
1424 BuildMI(MBB, MBBI, DL, TII->get(NewOpc), Base) in MergeBaseUpdateLoadStore()
1458 if (Offset == 8 || Offset == -8) { in MergeBaseUpdateLSDouble()
1462 if (Offset == 8 || Offset == -8) { in MergeBaseUpdateLSDouble()
1470 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(NewOpc)); in MergeBaseUpdateLSDouble()
1481 assert(TII->get(Opcode).getNumOperands() == 6 && in MergeBaseUpdateLSDouble()
1482 TII->get(NewOpc).getNumOperands() == 7 && in MergeBaseUpdateLSDouble()
1488 MIB->setMemRefs(MI.memoperands_begin(), MI.memoperands_end()); in MergeBaseUpdateLSDouble()
1527 // Don't touch volatile memory accesses - we may be changing their order. in isMemoryOp()
1558 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MBBI->getDebugLoc(), in InsertLDR_STR()
1559 TII->get(NewOpc)) in InsertLDR_STR()
1564 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, MBBI->getDebugLoc(), in InsertLDR_STR()
1565 TII->get(NewOpc)) in InsertLDR_STR()
1575 unsigned Opcode = MI->getOpcode(); in FixInvalidRegPairOp()
1579 const MachineOperand &BaseOp = MI->getOperand(2); in FixInvalidRegPairOp()
1581 unsigned EvenReg = MI->getOperand(0).getReg(); in FixInvalidRegPairOp()
1582 unsigned OddReg = MI->getOperand(1).getReg(); in FixInvalidRegPairOp()
1583 unsigned EvenRegNum = TRI->getDwarfRegNum(EvenReg, false); in FixInvalidRegPairOp()
1584 unsigned OddRegNum = TRI->getDwarfRegNum(OddReg, false); in FixInvalidRegPairOp()
1589 (Opcode == ARM::LDRD || Opcode == ARM::t2LDRDi8) && STI->isCortexM3(); in FixInvalidRegPairOp()
1600 MI->getOperand(0).isDead() : MI->getOperand(0).isKill(); in FixInvalidRegPairOp()
1601 bool EvenUndef = MI->getOperand(0).isUndef(); in FixInvalidRegPairOp()
1603 MI->getOperand(1).isDead() : MI->getOperand(1).isKill(); in FixInvalidRegPairOp()
1604 bool OddUndef = MI->getOperand(1).isUndef(); in FixInvalidRegPairOp()
1607 bool OffKill = isT2 ? false : MI->getOperand(3).isKill(); in FixInvalidRegPairOp()
1608 bool OffUndef = isT2 ? false : MI->getOperand(3).isUndef(); in FixInvalidRegPairOp()
1620 BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(NewOpc)) in FixInvalidRegPairOp()
1627 BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(NewOpc)) in FixInvalidRegPairOp()
1646 DebugLoc dl = MBBI->getDebugLoc(); in FixInvalidRegPairOp()
1648 // re-defed by the load, make sure the first load does not clobber it. in FixInvalidRegPairOp()
1651 (TRI->regsOverlap(EvenReg, BaseReg))) { in FixInvalidRegPairOp()
1652 assert(!TRI->regsOverlap(OddReg, BaseReg)); in FixInvalidRegPairOp()
1712 unsigned Opcode = MBBI->getOpcode(); in LoadStoreMultipleOpti()
1713 const MachineOperand &MO = MBBI->getOperand(0); in LoadStoreMultipleOpti()
1741 if (TRI->regsOverlap(Reg, E.MI->getOperand(0).getReg())) { in LoadStoreMultipleOpti()
1757 if (Offset < MI->Offset) { in LoadStoreMultipleOpti()
1761 if (Offset == MI->Offset) { in LoadStoreMultipleOpti()
1777 --Position; in LoadStoreMultipleOpti()
1779 } else if (MBBI->isDebugValue()) { in LoadStoreMultipleOpti()
1781 } else if (MBBI->getOpcode() == ARM::t2LDRDi8 || in LoadStoreMultipleOpti()
1782 MBBI->getOpcode() == ARM::t2STRDi8) { in LoadStoreMultipleOpti()
1805 return M0->InsertPos < M1->InsertPos; in LoadStoreMultipleOpti()
1812 if (Candidate->CanMergeToLSMulti || Candidate->CanMergeToLSDouble) { in LoadStoreMultipleOpti()
1817 unsigned Opcode = Merged->getOpcode(); in LoadStoreMultipleOpti()
1823 for (MachineInstr *MI : Candidate->Instrs) { in LoadStoreMultipleOpti()
1829 assert(Candidate->Instrs.size() == 1); in LoadStoreMultipleOpti()
1830 if (MergeBaseUpdateLoadStore(Candidate->Instrs.front())) in LoadStoreMultipleOpti()
1860 (MBBI->getOpcode() == ARM::BX_RET || in MergeReturnIntoLDM()
1861 MBBI->getOpcode() == ARM::tBX_RET || in MergeReturnIntoLDM()
1862 MBBI->getOpcode() == ARM::MOVPCLR)) { in MergeReturnIntoLDM()
1865 while (PrevI->isDebugValue() && PrevI != MBB.begin()) in MergeReturnIntoLDM()
1866 --PrevI; in MergeReturnIntoLDM()
1872 MachineOperand &MO = PrevMI.getOperand(PrevMI.getNumOperands() - 1); in MergeReturnIntoLDM()
1877 Opcode == ARM::LDMIA_UPD) && "Unsupported multiple load-return!"); in MergeReturnIntoLDM()
1878 PrevMI.setDesc(TII->get(NewOpc)); in MergeReturnIntoLDM()
1891 MBBI->getOpcode() != ARM::tBX_RET) in CombineMovBx()
1895 --Prev; in CombineMovBx()
1896 if (Prev->getOpcode() != ARM::tMOVr || !Prev->definesRegister(ARM::LR)) in CombineMovBx()
1899 for (auto Use : Prev->uses()) in CombineMovBx()
1901 AddDefaultPred(BuildMI(MBB, MBBI, MBBI->getDebugLoc(), TII->get(ARM::tBX)) in CombineMovBx()
1918 TL = STI->getTargetLowering(); in runOnMachineFunction()
1920 TII = STI->getInstrInfo(); in runOnMachineFunction()
1921 TRI = STI->getRegisterInfo(); in runOnMachineFunction()
1924 isThumb2 = AFI->isThumb2Function(); in runOnMachineFunction()
1925 isThumb1 = AFI->isThumbFunction() && !isThumb2; in runOnMachineFunction()
1932 if (STI->hasV5TOps()) in runOnMachineFunction()
1947 "ARM pre- register allocation load / store optimization pass"
1950 /// Pre- register allocation pass that move load / stores from consecutive
1987 INITIALIZE_PASS(ARMPreAllocLoadStoreOpt, "arm-prera-load-store-opt",
1996 TII = STI->getInstrInfo(); in runOnMachineFunction()
1997 TRI = STI->getRegisterInfo(); in runOnMachineFunction()
2019 if (I->isDebugValue() || MemOps.count(&*I)) in IsSafeAndProfitableToMove()
2021 if (I->isCall() || I->isTerminator() || I->hasUnmodeledSideEffects()) in IsSafeAndProfitableToMove()
2023 if (isLd && I->mayStore()) in IsSafeAndProfitableToMove()
2026 if (I->mayLoad()) in IsSafeAndProfitableToMove()
2032 if (I->mayStore()) in IsSafeAndProfitableToMove()
2035 for (unsigned j = 0, NumOps = I->getNumOperands(); j != NumOps; ++j) { in IsSafeAndProfitableToMove()
2036 MachineOperand &MO = I->getOperand(j); in IsSafeAndProfitableToMove()
2040 if (MO.isDef() && TRI->regsOverlap(Reg, Base)) in IsSafeAndProfitableToMove()
2064 if (!STI->hasV5TEOps()) in CanFormLdStDWord()
2067 // FIXME: VLDRS / VSTRS -> VLDRD / VSTRD in CanFormLdStDWord()
2069 unsigned Opcode = Op0->getOpcode(); in CanFormLdStDWord()
2089 if (!Op0->hasOneMemOperand() || in CanFormLdStDWord()
2090 (*Op0->memoperands_begin())->isVolatile()) in CanFormLdStDWord()
2093 unsigned Align = (*Op0->memoperands_begin())->getAlignment(); in CanFormLdStDWord()
2094 const Function *Func = MF->getFunction(); in CanFormLdStDWord()
2095 unsigned ReqAlign = STI->hasV6Ops() in CanFormLdStDWord()
2096 ? TD->getABITypeAlignment(Type::getInt64Ty(Func->getContext())) in CanFormLdStDWord()
2097 : 8; // Pre-v6 need 8-byte align in CanFormLdStDWord()
2105 if (OffImm >= Limit || (OffImm <= -Limit) || (OffImm & (Scale-1))) in CanFormLdStDWord()
2112 OffImm = - OffImm; in CanFormLdStDWord()
2115 if (OffImm >= Limit || (OffImm & (Scale-1))) in CanFormLdStDWord()
2119 FirstReg = Op0->getOperand(0).getReg(); in CanFormLdStDWord()
2120 SecondReg = Op1->getOperand(0).getReg(); in CanFormLdStDWord()
2123 BaseReg = Op0->getOperand(1).getReg(); in CanFormLdStDWord()
2125 dl = Op0->getDebugLoc(); in CanFormLdStDWord()
2157 for (int i = Ops.size() - 1; i >= 0; --i) { in RescheduleOps()
2170 = getLoadStoreMultipleOpcode(Op->getOpcode(), ARM_AM::ia); in RescheduleOps()
2192 for (int i = NumMove-1; i >= 0; --i) { in RescheduleOps()
2194 MemRegs.insert(Ops[i]->getOperand(0).getReg()); in RescheduleOps()
2199 bool DoMove = (LastLoc - FirstLoc) <= NumMove*4; // FIXME: Tune this. in RescheduleOps()
2209 while (InsertPos != MBB->end() && in RescheduleOps()
2210 (MemOps.count(&*InsertPos) || InsertPos->isDebugValue())) in RescheduleOps()
2216 MachineInstr *Op1 = Ops[Ops.size()-2]; in RescheduleOps()
2230 const MCInstrDesc &MCID = TII->get(NewOpc); in RescheduleOps()
2231 const TargetRegisterClass *TRC = TII->getRegClass(MCID, 0, TRI, *MF); in RescheduleOps()
2232 MRI->constrainRegClass(FirstReg, TRC); in RescheduleOps()
2233 MRI->constrainRegClass(SecondReg, TRC); in RescheduleOps()
2247 MIB.setMemRefs(Op0->mergeMemRefsWith(*Op1)); in RescheduleOps()
2261 MIB.setMemRefs(Op0->mergeMemRefsWith(*Op1)); in RescheduleOps()
2265 MBB->erase(Op0); in RescheduleOps()
2266 MBB->erase(Op1); in RescheduleOps()
2270 MRI->setRegAllocationHint(FirstReg, ARMRI::RegPairEven, SecondReg); in RescheduleOps()
2271 MRI->setRegAllocationHint(SecondReg, ARMRI::RegPairOdd, FirstReg); in RescheduleOps()
2277 MBB->splice(InsertPos, MBB, Op); in RescheduleOps()
2301 MachineBasicBlock::iterator MBBI = MBB->begin(); in RescheduleLoadStoreInstrs()
2302 MachineBasicBlock::iterator E = MBB->end(); in RescheduleLoadStoreInstrs()
2331 for (unsigned i = 0, e = BI->second.size(); i != e; ++i) { in RescheduleLoadStoreInstrs()
2332 if (Offset == getMemoryOpOffset(*BI->second[i])) { in RescheduleLoadStoreInstrs()
2338 BI->second.push_back(&MI); in RescheduleLoadStoreInstrs()
2347 for (unsigned i = 0, e = BI->second.size(); i != e; ++i) { in RescheduleLoadStoreInstrs()
2348 if (Offset == getMemoryOpOffset(*BI->second[i])) { in RescheduleLoadStoreInstrs()
2354 BI->second.push_back(&MI); in RescheduleLoadStoreInstrs()
2364 --Loc; in RescheduleLoadStoreInstrs()
2369 // Re-schedule loads. in RescheduleLoadStoreInstrs()
2377 // Re-schedule stores. in RescheduleLoadStoreInstrs()