1 //===-- ARMConstantIslandPass.cpp - ARM constant islands ------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains a pass that splits the constant pool up into 'islands'
11 // which are scattered through-out the function. This is required due to the
12 // limited pc-relative displacements that ARM has.
13 //
14 //===----------------------------------------------------------------------===//
15
16 #include "ARM.h"
17 #include "ARMMachineFunctionInfo.h"
18 #include "MCTargetDesc/ARMAddressingModes.h"
19 #include "Thumb2InstrInfo.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/CodeGen/MachineConstantPool.h"
25 #include "llvm/CodeGen/MachineFunctionPass.h"
26 #include "llvm/CodeGen/MachineJumpTableInfo.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/IR/DataLayout.h"
29 #include "llvm/Support/CommandLine.h"
30 #include "llvm/Support/Debug.h"
31 #include "llvm/Support/ErrorHandling.h"
32 #include "llvm/Support/Format.h"
33 #include "llvm/Support/raw_ostream.h"
34 #include "llvm/Target/TargetMachine.h"
35 #include <algorithm>
36 using namespace llvm;
37
38 #define DEBUG_TYPE "arm-cp-islands"
39
40 STATISTIC(NumCPEs, "Number of constpool entries");
41 STATISTIC(NumSplit, "Number of uncond branches inserted");
42 STATISTIC(NumCBrFixed, "Number of cond branches fixed");
43 STATISTIC(NumUBrFixed, "Number of uncond branches fixed");
44 STATISTIC(NumTBs, "Number of table branches generated");
45 STATISTIC(NumT2CPShrunk, "Number of Thumb2 constantpool instructions shrunk");
46 STATISTIC(NumT2BrShrunk, "Number of Thumb2 immediate branches shrunk");
47 STATISTIC(NumCBZ, "Number of CBZ / CBNZ formed");
48 STATISTIC(NumJTMoved, "Number of jump table destination blocks moved");
49 STATISTIC(NumJTInserted, "Number of jump table intermediate blocks inserted");
50
51
52 static cl::opt<bool>
53 AdjustJumpTableBlocks("arm-adjust-jump-tables", cl::Hidden, cl::init(true),
54 cl::desc("Adjust basic block layout to better use TB[BH]"));
55
56 static cl::opt<unsigned>
57 CPMaxIteration("arm-constant-island-max-iteration", cl::Hidden, cl::init(30),
58 cl::desc("The max number of iteration for converge"));
59
60
61 /// UnknownPadding - Return the worst case padding that could result from
62 /// unknown offset bits. This does not include alignment padding caused by
63 /// known offset bits.
64 ///
65 /// @param LogAlign log2(alignment)
66 /// @param KnownBits Number of known low offset bits.
UnknownPadding(unsigned LogAlign,unsigned KnownBits)67 static inline unsigned UnknownPadding(unsigned LogAlign, unsigned KnownBits) {
68 if (KnownBits < LogAlign)
69 return (1u << LogAlign) - (1u << KnownBits);
70 return 0;
71 }
72
73 namespace {
74 /// ARMConstantIslands - Due to limited PC-relative displacements, ARM
75 /// requires constant pool entries to be scattered among the instructions
76 /// inside a function. To do this, it completely ignores the normal LLVM
77 /// constant pool; instead, it places constants wherever it feels like with
78 /// special instructions.
79 ///
80 /// The terminology used in this pass includes:
81 /// Islands - Clumps of constants placed in the function.
82 /// Water - Potential places where an island could be formed.
83 /// CPE - A constant pool entry that has been placed somewhere, which
84 /// tracks a list of users.
85 class ARMConstantIslands : public MachineFunctionPass {
86 /// BasicBlockInfo - Information about the offset and size of a single
87 /// basic block.
88 struct BasicBlockInfo {
89 /// Offset - Distance from the beginning of the function to the beginning
90 /// of this basic block.
91 ///
92 /// Offsets are computed assuming worst case padding before an aligned
93 /// block. This means that subtracting basic block offsets always gives a
94 /// conservative estimate of the real distance which may be smaller.
95 ///
96 /// Because worst case padding is used, the computed offset of an aligned
97 /// block may not actually be aligned.
98 unsigned Offset;
99
100 /// Size - Size of the basic block in bytes. If the block contains
101 /// inline assembly, this is a worst case estimate.
102 ///
103 /// The size does not include any alignment padding whether from the
104 /// beginning of the block, or from an aligned jump table at the end.
105 unsigned Size;
106
107 /// KnownBits - The number of low bits in Offset that are known to be
108 /// exact. The remaining bits of Offset are an upper bound.
109 uint8_t KnownBits;
110
111 /// Unalign - When non-zero, the block contains instructions (inline asm)
112 /// of unknown size. The real size may be smaller than Size bytes by a
113 /// multiple of 1 << Unalign.
114 uint8_t Unalign;
115
116 /// PostAlign - When non-zero, the block terminator contains a .align
117 /// directive, so the end of the block is aligned to 1 << PostAlign
118 /// bytes.
119 uint8_t PostAlign;
120
BasicBlockInfo__anon72f5a51c0111::ARMConstantIslands::BasicBlockInfo121 BasicBlockInfo() : Offset(0), Size(0), KnownBits(0), Unalign(0),
122 PostAlign(0) {}
123
124 /// Compute the number of known offset bits internally to this block.
125 /// This number should be used to predict worst case padding when
126 /// splitting the block.
internalKnownBits__anon72f5a51c0111::ARMConstantIslands::BasicBlockInfo127 unsigned internalKnownBits() const {
128 unsigned Bits = Unalign ? Unalign : KnownBits;
129 // If the block size isn't a multiple of the known bits, assume the
130 // worst case padding.
131 if (Size & ((1u << Bits) - 1))
132 Bits = countTrailingZeros(Size);
133 return Bits;
134 }
135
136 /// Compute the offset immediately following this block. If LogAlign is
137 /// specified, return the offset the successor block will get if it has
138 /// this alignment.
postOffset__anon72f5a51c0111::ARMConstantIslands::BasicBlockInfo139 unsigned postOffset(unsigned LogAlign = 0) const {
140 unsigned PO = Offset + Size;
141 unsigned LA = std::max(unsigned(PostAlign), LogAlign);
142 if (!LA)
143 return PO;
144 // Add alignment padding from the terminator.
145 return PO + UnknownPadding(LA, internalKnownBits());
146 }
147
148 /// Compute the number of known low bits of postOffset. If this block
149 /// contains inline asm, the number of known bits drops to the
150 /// instruction alignment. An aligned terminator may increase the number
151 /// of know bits.
152 /// If LogAlign is given, also consider the alignment of the next block.
postKnownBits__anon72f5a51c0111::ARMConstantIslands::BasicBlockInfo153 unsigned postKnownBits(unsigned LogAlign = 0) const {
154 return std::max(std::max(unsigned(PostAlign), LogAlign),
155 internalKnownBits());
156 }
157 };
158
159 std::vector<BasicBlockInfo> BBInfo;
160
161 /// WaterList - A sorted list of basic blocks where islands could be placed
162 /// (i.e. blocks that don't fall through to the following block, due
163 /// to a return, unreachable, or unconditional branch).
164 std::vector<MachineBasicBlock*> WaterList;
165
166 /// NewWaterList - The subset of WaterList that was created since the
167 /// previous iteration by inserting unconditional branches.
168 SmallSet<MachineBasicBlock*, 4> NewWaterList;
169
170 typedef std::vector<MachineBasicBlock*>::iterator water_iterator;
171
172 /// CPUser - One user of a constant pool, keeping the machine instruction
173 /// pointer, the constant pool being referenced, and the max displacement
174 /// allowed from the instruction to the CP. The HighWaterMark records the
175 /// highest basic block where a new CPEntry can be placed. To ensure this
176 /// pass terminates, the CP entries are initially placed at the end of the
177 /// function and then move monotonically to lower addresses. The
178 /// exception to this rule is when the current CP entry for a particular
179 /// CPUser is out of range, but there is another CP entry for the same
180 /// constant value in range. We want to use the existing in-range CP
181 /// entry, but if it later moves out of range, the search for new water
182 /// should resume where it left off. The HighWaterMark is used to record
183 /// that point.
184 struct CPUser {
185 MachineInstr *MI;
186 MachineInstr *CPEMI;
187 MachineBasicBlock *HighWaterMark;
188 unsigned MaxDisp;
189 bool NegOk;
190 bool IsSoImm;
191 bool KnownAlignment;
CPUser__anon72f5a51c0111::ARMConstantIslands::CPUser192 CPUser(MachineInstr *mi, MachineInstr *cpemi, unsigned maxdisp,
193 bool neg, bool soimm)
194 : MI(mi), CPEMI(cpemi), MaxDisp(maxdisp), NegOk(neg), IsSoImm(soimm),
195 KnownAlignment(false) {
196 HighWaterMark = CPEMI->getParent();
197 }
198 /// getMaxDisp - Returns the maximum displacement supported by MI.
199 /// Correct for unknown alignment.
200 /// Conservatively subtract 2 bytes to handle weird alignment effects.
getMaxDisp__anon72f5a51c0111::ARMConstantIslands::CPUser201 unsigned getMaxDisp() const {
202 return (KnownAlignment ? MaxDisp : MaxDisp - 2) - 2;
203 }
204 };
205
206 /// CPUsers - Keep track of all of the machine instructions that use various
207 /// constant pools and their max displacement.
208 std::vector<CPUser> CPUsers;
209
210 /// CPEntry - One per constant pool entry, keeping the machine instruction
211 /// pointer, the constpool index, and the number of CPUser's which
212 /// reference this entry.
213 struct CPEntry {
214 MachineInstr *CPEMI;
215 unsigned CPI;
216 unsigned RefCount;
CPEntry__anon72f5a51c0111::ARMConstantIslands::CPEntry217 CPEntry(MachineInstr *cpemi, unsigned cpi, unsigned rc = 0)
218 : CPEMI(cpemi), CPI(cpi), RefCount(rc) {}
219 };
220
221 /// CPEntries - Keep track of all of the constant pool entry machine
222 /// instructions. For each original constpool index (i.e. those that existed
223 /// upon entry to this pass), it keeps a vector of entries. Original
224 /// elements are cloned as we go along; the clones are put in the vector of
225 /// the original element, but have distinct CPIs.
226 ///
227 /// The first half of CPEntries contains generic constants, the second half
228 /// contains jump tables. Use getCombinedIndex on a generic CPEMI to look up
229 /// which vector it will be in here.
230 std::vector<std::vector<CPEntry> > CPEntries;
231
232 /// Maps a JT index to the offset in CPEntries containing copies of that
233 /// table. The equivalent map for a CONSTPOOL_ENTRY is the identity.
234 DenseMap<int, int> JumpTableEntryIndices;
235
236 /// Maps a JT index to the LEA that actually uses the index to calculate its
237 /// base address.
238 DenseMap<int, int> JumpTableUserIndices;
239
240 /// ImmBranch - One per immediate branch, keeping the machine instruction
241 /// pointer, conditional or unconditional, the max displacement,
242 /// and (if isCond is true) the corresponding unconditional branch
243 /// opcode.
244 struct ImmBranch {
245 MachineInstr *MI;
246 unsigned MaxDisp : 31;
247 bool isCond : 1;
248 unsigned UncondBr;
ImmBranch__anon72f5a51c0111::ARMConstantIslands::ImmBranch249 ImmBranch(MachineInstr *mi, unsigned maxdisp, bool cond, unsigned ubr)
250 : MI(mi), MaxDisp(maxdisp), isCond(cond), UncondBr(ubr) {}
251 };
252
253 /// ImmBranches - Keep track of all the immediate branch instructions.
254 ///
255 std::vector<ImmBranch> ImmBranches;
256
257 /// PushPopMIs - Keep track of all the Thumb push / pop instructions.
258 ///
259 SmallVector<MachineInstr*, 4> PushPopMIs;
260
261 /// T2JumpTables - Keep track of all the Thumb2 jumptable instructions.
262 SmallVector<MachineInstr*, 4> T2JumpTables;
263
264 /// HasFarJump - True if any far jump instruction has been emitted during
265 /// the branch fix up pass.
266 bool HasFarJump;
267
268 MachineFunction *MF;
269 MachineConstantPool *MCP;
270 const ARMBaseInstrInfo *TII;
271 const ARMSubtarget *STI;
272 ARMFunctionInfo *AFI;
273 bool isThumb;
274 bool isThumb1;
275 bool isThumb2;
276 public:
277 static char ID;
ARMConstantIslands()278 ARMConstantIslands() : MachineFunctionPass(ID) {}
279
280 bool runOnMachineFunction(MachineFunction &MF) override;
281
getRequiredProperties() const282 MachineFunctionProperties getRequiredProperties() const override {
283 return MachineFunctionProperties().set(
284 MachineFunctionProperties::Property::AllVRegsAllocated);
285 }
286
getPassName() const287 const char *getPassName() const override {
288 return "ARM constant island placement and branch shortening pass";
289 }
290
291 private:
292 void doInitialConstPlacement(std::vector<MachineInstr *> &CPEMIs);
293 void doInitialJumpTablePlacement(std::vector<MachineInstr *> &CPEMIs);
294 bool BBHasFallthrough(MachineBasicBlock *MBB);
295 CPEntry *findConstPoolEntry(unsigned CPI, const MachineInstr *CPEMI);
296 unsigned getCPELogAlign(const MachineInstr *CPEMI);
297 void scanFunctionJumpTables();
298 void initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs);
299 MachineBasicBlock *splitBlockBeforeInstr(MachineInstr *MI);
300 void updateForInsertedWaterBlock(MachineBasicBlock *NewBB);
301 void adjustBBOffsetsAfter(MachineBasicBlock *BB);
302 bool decrementCPEReferenceCount(unsigned CPI, MachineInstr* CPEMI);
303 unsigned getCombinedIndex(const MachineInstr *CPEMI);
304 int findInRangeCPEntry(CPUser& U, unsigned UserOffset);
305 bool findAvailableWater(CPUser&U, unsigned UserOffset,
306 water_iterator &WaterIter, bool CloserWater);
307 void createNewWater(unsigned CPUserIndex, unsigned UserOffset,
308 MachineBasicBlock *&NewMBB);
309 bool handleConstantPoolUser(unsigned CPUserIndex, bool CloserWater);
310 void removeDeadCPEMI(MachineInstr *CPEMI);
311 bool removeUnusedCPEntries();
312 bool isCPEntryInRange(MachineInstr *MI, unsigned UserOffset,
313 MachineInstr *CPEMI, unsigned Disp, bool NegOk,
314 bool DoDump = false);
315 bool isWaterInRange(unsigned UserOffset, MachineBasicBlock *Water,
316 CPUser &U, unsigned &Growth);
317 bool isBBInRange(MachineInstr *MI, MachineBasicBlock *BB, unsigned Disp);
318 bool fixupImmediateBr(ImmBranch &Br);
319 bool fixupConditionalBr(ImmBranch &Br);
320 bool fixupUnconditionalBr(ImmBranch &Br);
321 bool undoLRSpillRestore();
322 bool mayOptimizeThumb2Instruction(const MachineInstr *MI) const;
323 bool optimizeThumb2Instructions();
324 bool optimizeThumb2Branches();
325 bool reorderThumb2JumpTables();
326 bool preserveBaseRegister(MachineInstr *JumpMI, MachineInstr *LEAMI,
327 unsigned &DeadSize, bool &CanDeleteLEA,
328 bool &BaseRegKill);
329 bool optimizeThumb2JumpTables();
330 MachineBasicBlock *adjustJTTargetBlockForward(MachineBasicBlock *BB,
331 MachineBasicBlock *JTBB);
332
333 void computeBlockSize(MachineBasicBlock *MBB);
334 unsigned getOffsetOf(MachineInstr *MI) const;
335 unsigned getUserOffset(CPUser&) const;
336 void dumpBBs();
337 void verify();
338
339 bool isOffsetInRange(unsigned UserOffset, unsigned TrialOffset,
340 unsigned Disp, bool NegativeOK, bool IsSoImm = false);
isOffsetInRange(unsigned UserOffset,unsigned TrialOffset,const CPUser & U)341 bool isOffsetInRange(unsigned UserOffset, unsigned TrialOffset,
342 const CPUser &U) {
343 return isOffsetInRange(UserOffset, TrialOffset,
344 U.getMaxDisp(), U.NegOk, U.IsSoImm);
345 }
346 };
347 char ARMConstantIslands::ID = 0;
348 }
349
350 /// verify - check BBOffsets, BBSizes, alignment of islands
verify()351 void ARMConstantIslands::verify() {
352 #ifndef NDEBUG
353 assert(std::is_sorted(MF->begin(), MF->end(),
354 [this](const MachineBasicBlock &LHS,
355 const MachineBasicBlock &RHS) {
356 return BBInfo[LHS.getNumber()].postOffset() <
357 BBInfo[RHS.getNumber()].postOffset();
358 }));
359 DEBUG(dbgs() << "Verifying " << CPUsers.size() << " CP users.\n");
360 for (unsigned i = 0, e = CPUsers.size(); i != e; ++i) {
361 CPUser &U = CPUsers[i];
362 unsigned UserOffset = getUserOffset(U);
363 // Verify offset using the real max displacement without the safety
364 // adjustment.
365 if (isCPEntryInRange(U.MI, UserOffset, U.CPEMI, U.getMaxDisp()+2, U.NegOk,
366 /* DoDump = */ true)) {
367 DEBUG(dbgs() << "OK\n");
368 continue;
369 }
370 DEBUG(dbgs() << "Out of range.\n");
371 dumpBBs();
372 DEBUG(MF->dump());
373 llvm_unreachable("Constant pool entry out of range!");
374 }
375 #endif
376 }
377
378 /// print block size and offset information - debugging
dumpBBs()379 void ARMConstantIslands::dumpBBs() {
380 DEBUG({
381 for (unsigned J = 0, E = BBInfo.size(); J !=E; ++J) {
382 const BasicBlockInfo &BBI = BBInfo[J];
383 dbgs() << format("%08x BB#%u\t", BBI.Offset, J)
384 << " kb=" << unsigned(BBI.KnownBits)
385 << " ua=" << unsigned(BBI.Unalign)
386 << " pa=" << unsigned(BBI.PostAlign)
387 << format(" size=%#x\n", BBInfo[J].Size);
388 }
389 });
390 }
391
392 /// createARMConstantIslandPass - returns an instance of the constpool
393 /// island pass.
createARMConstantIslandPass()394 FunctionPass *llvm::createARMConstantIslandPass() {
395 return new ARMConstantIslands();
396 }
397
runOnMachineFunction(MachineFunction & mf)398 bool ARMConstantIslands::runOnMachineFunction(MachineFunction &mf) {
399 MF = &mf;
400 MCP = mf.getConstantPool();
401
402 DEBUG(dbgs() << "***** ARMConstantIslands: "
403 << MCP->getConstants().size() << " CP entries, aligned to "
404 << MCP->getConstantPoolAlignment() << " bytes *****\n");
405
406 STI = &static_cast<const ARMSubtarget &>(MF->getSubtarget());
407 TII = STI->getInstrInfo();
408 AFI = MF->getInfo<ARMFunctionInfo>();
409
410 isThumb = AFI->isThumbFunction();
411 isThumb1 = AFI->isThumb1OnlyFunction();
412 isThumb2 = AFI->isThumb2Function();
413
414 HasFarJump = false;
415
416 // This pass invalidates liveness information when it splits basic blocks.
417 MF->getRegInfo().invalidateLiveness();
418
419 // Renumber all of the machine basic blocks in the function, guaranteeing that
420 // the numbers agree with the position of the block in the function.
421 MF->RenumberBlocks();
422
423 // Try to reorder and otherwise adjust the block layout to make good use
424 // of the TB[BH] instructions.
425 bool MadeChange = false;
426 if (isThumb2 && AdjustJumpTableBlocks) {
427 scanFunctionJumpTables();
428 MadeChange |= reorderThumb2JumpTables();
429 // Data is out of date, so clear it. It'll be re-computed later.
430 T2JumpTables.clear();
431 // Blocks may have shifted around. Keep the numbering up to date.
432 MF->RenumberBlocks();
433 }
434
435 // Perform the initial placement of the constant pool entries. To start with,
436 // we put them all at the end of the function.
437 std::vector<MachineInstr*> CPEMIs;
438 if (!MCP->isEmpty())
439 doInitialConstPlacement(CPEMIs);
440
441 if (MF->getJumpTableInfo())
442 doInitialJumpTablePlacement(CPEMIs);
443
444 /// The next UID to take is the first unused one.
445 AFI->initPICLabelUId(CPEMIs.size());
446
447 // Do the initial scan of the function, building up information about the
448 // sizes of each block, the location of all the water, and finding all of the
449 // constant pool users.
450 initializeFunctionInfo(CPEMIs);
451 CPEMIs.clear();
452 DEBUG(dumpBBs());
453
454 // Functions with jump tables need an alignment of 4 because they use the ADR
455 // instruction, which aligns the PC to 4 bytes before adding an offset.
456 if (!T2JumpTables.empty())
457 MF->ensureAlignment(2);
458
459 /// Remove dead constant pool entries.
460 MadeChange |= removeUnusedCPEntries();
461
462 // Iteratively place constant pool entries and fix up branches until there
463 // is no change.
464 unsigned NoCPIters = 0, NoBRIters = 0;
465 while (true) {
466 DEBUG(dbgs() << "Beginning CP iteration #" << NoCPIters << '\n');
467 bool CPChange = false;
468 for (unsigned i = 0, e = CPUsers.size(); i != e; ++i)
469 // For most inputs, it converges in no more than 5 iterations.
470 // If it doesn't end in 10, the input may have huge BB or many CPEs.
471 // In this case, we will try different heuristics.
472 CPChange |= handleConstantPoolUser(i, NoCPIters >= CPMaxIteration / 2);
473 if (CPChange && ++NoCPIters > CPMaxIteration)
474 report_fatal_error("Constant Island pass failed to converge!");
475 DEBUG(dumpBBs());
476
477 // Clear NewWaterList now. If we split a block for branches, it should
478 // appear as "new water" for the next iteration of constant pool placement.
479 NewWaterList.clear();
480
481 DEBUG(dbgs() << "Beginning BR iteration #" << NoBRIters << '\n');
482 bool BRChange = false;
483 for (unsigned i = 0, e = ImmBranches.size(); i != e; ++i)
484 BRChange |= fixupImmediateBr(ImmBranches[i]);
485 if (BRChange && ++NoBRIters > 30)
486 report_fatal_error("Branch Fix Up pass failed to converge!");
487 DEBUG(dumpBBs());
488
489 if (!CPChange && !BRChange)
490 break;
491 MadeChange = true;
492 }
493
494 // Shrink 32-bit Thumb2 load and store instructions.
495 if (isThumb2 && !STI->prefers32BitThumb())
496 MadeChange |= optimizeThumb2Instructions();
497
498 // Shrink 32-bit branch instructions.
499 if (isThumb && STI->hasV8MBaselineOps())
500 MadeChange |= optimizeThumb2Branches();
501
502 // Optimize jump tables using TBB / TBH.
503 if (isThumb2)
504 MadeChange |= optimizeThumb2JumpTables();
505
506 // After a while, this might be made debug-only, but it is not expensive.
507 verify();
508
509 // If LR has been forced spilled and no far jump (i.e. BL) has been issued,
510 // undo the spill / restore of LR if possible.
511 if (isThumb && !HasFarJump && AFI->isLRSpilledForFarJump())
512 MadeChange |= undoLRSpillRestore();
513
514 // Save the mapping between original and cloned constpool entries.
515 for (unsigned i = 0, e = CPEntries.size(); i != e; ++i) {
516 for (unsigned j = 0, je = CPEntries[i].size(); j != je; ++j) {
517 const CPEntry & CPE = CPEntries[i][j];
518 if (CPE.CPEMI && CPE.CPEMI->getOperand(1).isCPI())
519 AFI->recordCPEClone(i, CPE.CPI);
520 }
521 }
522
523 DEBUG(dbgs() << '\n'; dumpBBs());
524
525 BBInfo.clear();
526 WaterList.clear();
527 CPUsers.clear();
528 CPEntries.clear();
529 JumpTableEntryIndices.clear();
530 JumpTableUserIndices.clear();
531 ImmBranches.clear();
532 PushPopMIs.clear();
533 T2JumpTables.clear();
534
535 return MadeChange;
536 }
537
538 /// \brief Perform the initial placement of the regular constant pool entries.
539 /// To start with, we put them all at the end of the function.
540 void
doInitialConstPlacement(std::vector<MachineInstr * > & CPEMIs)541 ARMConstantIslands::doInitialConstPlacement(std::vector<MachineInstr*> &CPEMIs) {
542 // Create the basic block to hold the CPE's.
543 MachineBasicBlock *BB = MF->CreateMachineBasicBlock();
544 MF->push_back(BB);
545
546 // MachineConstantPool measures alignment in bytes. We measure in log2(bytes).
547 unsigned MaxAlign = Log2_32(MCP->getConstantPoolAlignment());
548
549 // Mark the basic block as required by the const-pool.
550 BB->setAlignment(MaxAlign);
551
552 // The function needs to be as aligned as the basic blocks. The linker may
553 // move functions around based on their alignment.
554 MF->ensureAlignment(BB->getAlignment());
555
556 // Order the entries in BB by descending alignment. That ensures correct
557 // alignment of all entries as long as BB is sufficiently aligned. Keep
558 // track of the insertion point for each alignment. We are going to bucket
559 // sort the entries as they are created.
560 SmallVector<MachineBasicBlock::iterator, 8> InsPoint(MaxAlign + 1, BB->end());
561
562 // Add all of the constants from the constant pool to the end block, use an
563 // identity mapping of CPI's to CPE's.
564 const std::vector<MachineConstantPoolEntry> &CPs = MCP->getConstants();
565
566 const DataLayout &TD = MF->getDataLayout();
567 for (unsigned i = 0, e = CPs.size(); i != e; ++i) {
568 unsigned Size = TD.getTypeAllocSize(CPs[i].getType());
569 assert(Size >= 4 && "Too small constant pool entry");
570 unsigned Align = CPs[i].getAlignment();
571 assert(isPowerOf2_32(Align) && "Invalid alignment");
572 // Verify that all constant pool entries are a multiple of their alignment.
573 // If not, we would have to pad them out so that instructions stay aligned.
574 assert((Size % Align) == 0 && "CP Entry not multiple of 4 bytes!");
575
576 // Insert CONSTPOOL_ENTRY before entries with a smaller alignment.
577 unsigned LogAlign = Log2_32(Align);
578 MachineBasicBlock::iterator InsAt = InsPoint[LogAlign];
579 MachineInstr *CPEMI =
580 BuildMI(*BB, InsAt, DebugLoc(), TII->get(ARM::CONSTPOOL_ENTRY))
581 .addImm(i).addConstantPoolIndex(i).addImm(Size);
582 CPEMIs.push_back(CPEMI);
583
584 // Ensure that future entries with higher alignment get inserted before
585 // CPEMI. This is bucket sort with iterators.
586 for (unsigned a = LogAlign + 1; a <= MaxAlign; ++a)
587 if (InsPoint[a] == InsAt)
588 InsPoint[a] = CPEMI;
589
590 // Add a new CPEntry, but no corresponding CPUser yet.
591 CPEntries.emplace_back(1, CPEntry(CPEMI, i));
592 ++NumCPEs;
593 DEBUG(dbgs() << "Moved CPI#" << i << " to end of function, size = "
594 << Size << ", align = " << Align <<'\n');
595 }
596 DEBUG(BB->dump());
597 }
598
599 /// \brief Do initial placement of the jump tables. Because Thumb2's TBB and TBH
600 /// instructions can be made more efficient if the jump table immediately
601 /// follows the instruction, it's best to place them immediately next to their
602 /// jumps to begin with. In almost all cases they'll never be moved from that
603 /// position.
doInitialJumpTablePlacement(std::vector<MachineInstr * > & CPEMIs)604 void ARMConstantIslands::doInitialJumpTablePlacement(
605 std::vector<MachineInstr *> &CPEMIs) {
606 unsigned i = CPEntries.size();
607 auto MJTI = MF->getJumpTableInfo();
608 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
609
610 MachineBasicBlock *LastCorrectlyNumberedBB = nullptr;
611 for (MachineBasicBlock &MBB : *MF) {
612 auto MI = MBB.getLastNonDebugInstr();
613 if (MI == MBB.end())
614 continue;
615
616 unsigned JTOpcode;
617 switch (MI->getOpcode()) {
618 default:
619 continue;
620 case ARM::BR_JTadd:
621 case ARM::BR_JTr:
622 case ARM::tBR_JTr:
623 case ARM::BR_JTm:
624 JTOpcode = ARM::JUMPTABLE_ADDRS;
625 break;
626 case ARM::t2BR_JT:
627 JTOpcode = ARM::JUMPTABLE_INSTS;
628 break;
629 case ARM::t2TBB_JT:
630 JTOpcode = ARM::JUMPTABLE_TBB;
631 break;
632 case ARM::t2TBH_JT:
633 JTOpcode = ARM::JUMPTABLE_TBH;
634 break;
635 }
636
637 unsigned NumOps = MI->getDesc().getNumOperands();
638 MachineOperand JTOp =
639 MI->getOperand(NumOps - (MI->isPredicable() ? 2 : 1));
640 unsigned JTI = JTOp.getIndex();
641 unsigned Size = JT[JTI].MBBs.size() * sizeof(uint32_t);
642 MachineBasicBlock *JumpTableBB = MF->CreateMachineBasicBlock();
643 MF->insert(std::next(MachineFunction::iterator(MBB)), JumpTableBB);
644 MachineInstr *CPEMI = BuildMI(*JumpTableBB, JumpTableBB->begin(),
645 DebugLoc(), TII->get(JTOpcode))
646 .addImm(i++)
647 .addJumpTableIndex(JTI)
648 .addImm(Size);
649 CPEMIs.push_back(CPEMI);
650 CPEntries.emplace_back(1, CPEntry(CPEMI, JTI));
651 JumpTableEntryIndices.insert(std::make_pair(JTI, CPEntries.size() - 1));
652 if (!LastCorrectlyNumberedBB)
653 LastCorrectlyNumberedBB = &MBB;
654 }
655
656 // If we did anything then we need to renumber the subsequent blocks.
657 if (LastCorrectlyNumberedBB)
658 MF->RenumberBlocks(LastCorrectlyNumberedBB);
659 }
660
661 /// BBHasFallthrough - Return true if the specified basic block can fallthrough
662 /// into the block immediately after it.
BBHasFallthrough(MachineBasicBlock * MBB)663 bool ARMConstantIslands::BBHasFallthrough(MachineBasicBlock *MBB) {
664 // Get the next machine basic block in the function.
665 MachineFunction::iterator MBBI = MBB->getIterator();
666 // Can't fall off end of function.
667 if (std::next(MBBI) == MBB->getParent()->end())
668 return false;
669
670 MachineBasicBlock *NextBB = &*std::next(MBBI);
671 if (std::find(MBB->succ_begin(), MBB->succ_end(), NextBB) == MBB->succ_end())
672 return false;
673
674 // Try to analyze the end of the block. A potential fallthrough may already
675 // have an unconditional branch for whatever reason.
676 MachineBasicBlock *TBB, *FBB;
677 SmallVector<MachineOperand, 4> Cond;
678 bool TooDifficult = TII->analyzeBranch(*MBB, TBB, FBB, Cond);
679 return TooDifficult || FBB == nullptr;
680 }
681
682 /// findConstPoolEntry - Given the constpool index and CONSTPOOL_ENTRY MI,
683 /// look up the corresponding CPEntry.
684 ARMConstantIslands::CPEntry
findConstPoolEntry(unsigned CPI,const MachineInstr * CPEMI)685 *ARMConstantIslands::findConstPoolEntry(unsigned CPI,
686 const MachineInstr *CPEMI) {
687 std::vector<CPEntry> &CPEs = CPEntries[CPI];
688 // Number of entries per constpool index should be small, just do a
689 // linear search.
690 for (unsigned i = 0, e = CPEs.size(); i != e; ++i) {
691 if (CPEs[i].CPEMI == CPEMI)
692 return &CPEs[i];
693 }
694 return nullptr;
695 }
696
697 /// getCPELogAlign - Returns the required alignment of the constant pool entry
698 /// represented by CPEMI. Alignment is measured in log2(bytes) units.
getCPELogAlign(const MachineInstr * CPEMI)699 unsigned ARMConstantIslands::getCPELogAlign(const MachineInstr *CPEMI) {
700 switch (CPEMI->getOpcode()) {
701 case ARM::CONSTPOOL_ENTRY:
702 break;
703 case ARM::JUMPTABLE_TBB:
704 return 0;
705 case ARM::JUMPTABLE_TBH:
706 case ARM::JUMPTABLE_INSTS:
707 return 1;
708 case ARM::JUMPTABLE_ADDRS:
709 return 2;
710 default:
711 llvm_unreachable("unknown constpool entry kind");
712 }
713
714 unsigned CPI = getCombinedIndex(CPEMI);
715 assert(CPI < MCP->getConstants().size() && "Invalid constant pool index.");
716 unsigned Align = MCP->getConstants()[CPI].getAlignment();
717 assert(isPowerOf2_32(Align) && "Invalid CPE alignment");
718 return Log2_32(Align);
719 }
720
721 /// scanFunctionJumpTables - Do a scan of the function, building up
722 /// information about the sizes of each block and the locations of all
723 /// the jump tables.
scanFunctionJumpTables()724 void ARMConstantIslands::scanFunctionJumpTables() {
725 for (MachineBasicBlock &MBB : *MF) {
726 for (MachineInstr &I : MBB)
727 if (I.isBranch() && I.getOpcode() == ARM::t2BR_JT)
728 T2JumpTables.push_back(&I);
729 }
730 }
731
732 /// initializeFunctionInfo - Do the initial scan of the function, building up
733 /// information about the sizes of each block, the location of all the water,
734 /// and finding all of the constant pool users.
735 void ARMConstantIslands::
initializeFunctionInfo(const std::vector<MachineInstr * > & CPEMIs)736 initializeFunctionInfo(const std::vector<MachineInstr*> &CPEMIs) {
737 BBInfo.clear();
738 BBInfo.resize(MF->getNumBlockIDs());
739
740 // First thing, compute the size of all basic blocks, and see if the function
741 // has any inline assembly in it. If so, we have to be conservative about
742 // alignment assumptions, as we don't know for sure the size of any
743 // instructions in the inline assembly.
744 for (MachineBasicBlock &MBB : *MF)
745 computeBlockSize(&MBB);
746
747 // The known bits of the entry block offset are determined by the function
748 // alignment.
749 BBInfo.front().KnownBits = MF->getAlignment();
750
751 // Compute block offsets and known bits.
752 adjustBBOffsetsAfter(&MF->front());
753
754 // Now go back through the instructions and build up our data structures.
755 for (MachineBasicBlock &MBB : *MF) {
756 // If this block doesn't fall through into the next MBB, then this is
757 // 'water' that a constant pool island could be placed.
758 if (!BBHasFallthrough(&MBB))
759 WaterList.push_back(&MBB);
760
761 for (MachineInstr &I : MBB) {
762 if (I.isDebugValue())
763 continue;
764
765 unsigned Opc = I.getOpcode();
766 if (I.isBranch()) {
767 bool isCond = false;
768 unsigned Bits = 0;
769 unsigned Scale = 1;
770 int UOpc = Opc;
771 switch (Opc) {
772 default:
773 continue; // Ignore other JT branches
774 case ARM::t2BR_JT:
775 T2JumpTables.push_back(&I);
776 continue; // Does not get an entry in ImmBranches
777 case ARM::Bcc:
778 isCond = true;
779 UOpc = ARM::B;
780 // Fallthrough
781 case ARM::B:
782 Bits = 24;
783 Scale = 4;
784 break;
785 case ARM::tBcc:
786 isCond = true;
787 UOpc = ARM::tB;
788 Bits = 8;
789 Scale = 2;
790 break;
791 case ARM::tB:
792 Bits = 11;
793 Scale = 2;
794 break;
795 case ARM::t2Bcc:
796 isCond = true;
797 UOpc = ARM::t2B;
798 Bits = 20;
799 Scale = 2;
800 break;
801 case ARM::t2B:
802 Bits = 24;
803 Scale = 2;
804 break;
805 }
806
807 // Record this immediate branch.
808 unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale;
809 ImmBranches.push_back(ImmBranch(&I, MaxOffs, isCond, UOpc));
810 }
811
812 if (Opc == ARM::tPUSH || Opc == ARM::tPOP_RET)
813 PushPopMIs.push_back(&I);
814
815 if (Opc == ARM::CONSTPOOL_ENTRY || Opc == ARM::JUMPTABLE_ADDRS ||
816 Opc == ARM::JUMPTABLE_INSTS || Opc == ARM::JUMPTABLE_TBB ||
817 Opc == ARM::JUMPTABLE_TBH)
818 continue;
819
820 // Scan the instructions for constant pool operands.
821 for (unsigned op = 0, e = I.getNumOperands(); op != e; ++op)
822 if (I.getOperand(op).isCPI() || I.getOperand(op).isJTI()) {
823 // We found one. The addressing mode tells us the max displacement
824 // from the PC that this instruction permits.
825
826 // Basic size info comes from the TSFlags field.
827 unsigned Bits = 0;
828 unsigned Scale = 1;
829 bool NegOk = false;
830 bool IsSoImm = false;
831
832 switch (Opc) {
833 default:
834 llvm_unreachable("Unknown addressing mode for CP reference!");
835
836 // Taking the address of a CP entry.
837 case ARM::LEApcrel:
838 case ARM::LEApcrelJT:
839 // This takes a SoImm, which is 8 bit immediate rotated. We'll
840 // pretend the maximum offset is 255 * 4. Since each instruction
841 // 4 byte wide, this is always correct. We'll check for other
842 // displacements that fits in a SoImm as well.
843 Bits = 8;
844 Scale = 4;
845 NegOk = true;
846 IsSoImm = true;
847 break;
848 case ARM::t2LEApcrel:
849 case ARM::t2LEApcrelJT:
850 Bits = 12;
851 NegOk = true;
852 break;
853 case ARM::tLEApcrel:
854 case ARM::tLEApcrelJT:
855 Bits = 8;
856 Scale = 4;
857 break;
858
859 case ARM::LDRBi12:
860 case ARM::LDRi12:
861 case ARM::LDRcp:
862 case ARM::t2LDRpci:
863 Bits = 12; // +-offset_12
864 NegOk = true;
865 break;
866
867 case ARM::tLDRpci:
868 Bits = 8;
869 Scale = 4; // +(offset_8*4)
870 break;
871
872 case ARM::VLDRD:
873 case ARM::VLDRS:
874 Bits = 8;
875 Scale = 4; // +-(offset_8*4)
876 NegOk = true;
877 break;
878 }
879
880 // Remember that this is a user of a CP entry.
881 unsigned CPI = I.getOperand(op).getIndex();
882 if (I.getOperand(op).isJTI()) {
883 JumpTableUserIndices.insert(std::make_pair(CPI, CPUsers.size()));
884 CPI = JumpTableEntryIndices[CPI];
885 }
886
887 MachineInstr *CPEMI = CPEMIs[CPI];
888 unsigned MaxOffs = ((1 << Bits)-1) * Scale;
889 CPUsers.push_back(CPUser(&I, CPEMI, MaxOffs, NegOk, IsSoImm));
890
891 // Increment corresponding CPEntry reference count.
892 CPEntry *CPE = findConstPoolEntry(CPI, CPEMI);
893 assert(CPE && "Cannot find a corresponding CPEntry!");
894 CPE->RefCount++;
895
896 // Instructions can only use one CP entry, don't bother scanning the
897 // rest of the operands.
898 break;
899 }
900 }
901 }
902 }
903
904 /// computeBlockSize - Compute the size and some alignment information for MBB.
905 /// This function updates BBInfo directly.
computeBlockSize(MachineBasicBlock * MBB)906 void ARMConstantIslands::computeBlockSize(MachineBasicBlock *MBB) {
907 BasicBlockInfo &BBI = BBInfo[MBB->getNumber()];
908 BBI.Size = 0;
909 BBI.Unalign = 0;
910 BBI.PostAlign = 0;
911
912 for (MachineInstr &I : *MBB) {
913 BBI.Size += TII->GetInstSizeInBytes(I);
914 // For inline asm, GetInstSizeInBytes returns a conservative estimate.
915 // The actual size may be smaller, but still a multiple of the instr size.
916 if (I.isInlineAsm())
917 BBI.Unalign = isThumb ? 1 : 2;
918 // Also consider instructions that may be shrunk later.
919 else if (isThumb && mayOptimizeThumb2Instruction(&I))
920 BBI.Unalign = 1;
921 }
922
923 // tBR_JTr contains a .align 2 directive.
924 if (!MBB->empty() && MBB->back().getOpcode() == ARM::tBR_JTr) {
925 BBI.PostAlign = 2;
926 MBB->getParent()->ensureAlignment(2);
927 }
928 }
929
930 /// getOffsetOf - Return the current offset of the specified machine instruction
931 /// from the start of the function. This offset changes as stuff is moved
932 /// around inside the function.
getOffsetOf(MachineInstr * MI) const933 unsigned ARMConstantIslands::getOffsetOf(MachineInstr *MI) const {
934 MachineBasicBlock *MBB = MI->getParent();
935
936 // The offset is composed of two things: the sum of the sizes of all MBB's
937 // before this instruction's block, and the offset from the start of the block
938 // it is in.
939 unsigned Offset = BBInfo[MBB->getNumber()].Offset;
940
941 // Sum instructions before MI in MBB.
942 for (MachineBasicBlock::iterator I = MBB->begin(); &*I != MI; ++I) {
943 assert(I != MBB->end() && "Didn't find MI in its own basic block?");
944 Offset += TII->GetInstSizeInBytes(*I);
945 }
946 return Offset;
947 }
948
949 /// CompareMBBNumbers - Little predicate function to sort the WaterList by MBB
950 /// ID.
CompareMBBNumbers(const MachineBasicBlock * LHS,const MachineBasicBlock * RHS)951 static bool CompareMBBNumbers(const MachineBasicBlock *LHS,
952 const MachineBasicBlock *RHS) {
953 return LHS->getNumber() < RHS->getNumber();
954 }
955
956 /// updateForInsertedWaterBlock - When a block is newly inserted into the
957 /// machine function, it upsets all of the block numbers. Renumber the blocks
958 /// and update the arrays that parallel this numbering.
updateForInsertedWaterBlock(MachineBasicBlock * NewBB)959 void ARMConstantIslands::updateForInsertedWaterBlock(MachineBasicBlock *NewBB) {
960 // Renumber the MBB's to keep them consecutive.
961 NewBB->getParent()->RenumberBlocks(NewBB);
962
963 // Insert an entry into BBInfo to align it properly with the (newly
964 // renumbered) block numbers.
965 BBInfo.insert(BBInfo.begin() + NewBB->getNumber(), BasicBlockInfo());
966
967 // Next, update WaterList. Specifically, we need to add NewMBB as having
968 // available water after it.
969 water_iterator IP =
970 std::lower_bound(WaterList.begin(), WaterList.end(), NewBB,
971 CompareMBBNumbers);
972 WaterList.insert(IP, NewBB);
973 }
974
975
976 /// Split the basic block containing MI into two blocks, which are joined by
977 /// an unconditional branch. Update data structures and renumber blocks to
978 /// account for this change and returns the newly created block.
splitBlockBeforeInstr(MachineInstr * MI)979 MachineBasicBlock *ARMConstantIslands::splitBlockBeforeInstr(MachineInstr *MI) {
980 MachineBasicBlock *OrigBB = MI->getParent();
981
982 // Create a new MBB for the code after the OrigBB.
983 MachineBasicBlock *NewBB =
984 MF->CreateMachineBasicBlock(OrigBB->getBasicBlock());
985 MachineFunction::iterator MBBI = ++OrigBB->getIterator();
986 MF->insert(MBBI, NewBB);
987
988 // Splice the instructions starting with MI over to NewBB.
989 NewBB->splice(NewBB->end(), OrigBB, MI, OrigBB->end());
990
991 // Add an unconditional branch from OrigBB to NewBB.
992 // Note the new unconditional branch is not being recorded.
993 // There doesn't seem to be meaningful DebugInfo available; this doesn't
994 // correspond to anything in the source.
995 unsigned Opc = isThumb ? (isThumb2 ? ARM::t2B : ARM::tB) : ARM::B;
996 if (!isThumb)
997 BuildMI(OrigBB, DebugLoc(), TII->get(Opc)).addMBB(NewBB);
998 else
999 BuildMI(OrigBB, DebugLoc(), TII->get(Opc)).addMBB(NewBB)
1000 .addImm(ARMCC::AL).addReg(0);
1001 ++NumSplit;
1002
1003 // Update the CFG. All succs of OrigBB are now succs of NewBB.
1004 NewBB->transferSuccessors(OrigBB);
1005
1006 // OrigBB branches to NewBB.
1007 OrigBB->addSuccessor(NewBB);
1008
1009 // Update internal data structures to account for the newly inserted MBB.
1010 // This is almost the same as updateForInsertedWaterBlock, except that
1011 // the Water goes after OrigBB, not NewBB.
1012 MF->RenumberBlocks(NewBB);
1013
1014 // Insert an entry into BBInfo to align it properly with the (newly
1015 // renumbered) block numbers.
1016 BBInfo.insert(BBInfo.begin() + NewBB->getNumber(), BasicBlockInfo());
1017
1018 // Next, update WaterList. Specifically, we need to add OrigMBB as having
1019 // available water after it (but not if it's already there, which happens
1020 // when splitting before a conditional branch that is followed by an
1021 // unconditional branch - in that case we want to insert NewBB).
1022 water_iterator IP =
1023 std::lower_bound(WaterList.begin(), WaterList.end(), OrigBB,
1024 CompareMBBNumbers);
1025 MachineBasicBlock* WaterBB = *IP;
1026 if (WaterBB == OrigBB)
1027 WaterList.insert(std::next(IP), NewBB);
1028 else
1029 WaterList.insert(IP, OrigBB);
1030 NewWaterList.insert(OrigBB);
1031
1032 // Figure out how large the OrigBB is. As the first half of the original
1033 // block, it cannot contain a tablejump. The size includes
1034 // the new jump we added. (It should be possible to do this without
1035 // recounting everything, but it's very confusing, and this is rarely
1036 // executed.)
1037 computeBlockSize(OrigBB);
1038
1039 // Figure out how large the NewMBB is. As the second half of the original
1040 // block, it may contain a tablejump.
1041 computeBlockSize(NewBB);
1042
1043 // All BBOffsets following these blocks must be modified.
1044 adjustBBOffsetsAfter(OrigBB);
1045
1046 return NewBB;
1047 }
1048
1049 /// getUserOffset - Compute the offset of U.MI as seen by the hardware
1050 /// displacement computation. Update U.KnownAlignment to match its current
1051 /// basic block location.
getUserOffset(CPUser & U) const1052 unsigned ARMConstantIslands::getUserOffset(CPUser &U) const {
1053 unsigned UserOffset = getOffsetOf(U.MI);
1054 const BasicBlockInfo &BBI = BBInfo[U.MI->getParent()->getNumber()];
1055 unsigned KnownBits = BBI.internalKnownBits();
1056
1057 // The value read from PC is offset from the actual instruction address.
1058 UserOffset += (isThumb ? 4 : 8);
1059
1060 // Because of inline assembly, we may not know the alignment (mod 4) of U.MI.
1061 // Make sure U.getMaxDisp() returns a constrained range.
1062 U.KnownAlignment = (KnownBits >= 2);
1063
1064 // On Thumb, offsets==2 mod 4 are rounded down by the hardware for
1065 // purposes of the displacement computation; compensate for that here.
1066 // For unknown alignments, getMaxDisp() constrains the range instead.
1067 if (isThumb && U.KnownAlignment)
1068 UserOffset &= ~3u;
1069
1070 return UserOffset;
1071 }
1072
1073 /// isOffsetInRange - Checks whether UserOffset (the location of a constant pool
1074 /// reference) is within MaxDisp of TrialOffset (a proposed location of a
1075 /// constant pool entry).
1076 /// UserOffset is computed by getUserOffset above to include PC adjustments. If
1077 /// the mod 4 alignment of UserOffset is not known, the uncertainty must be
1078 /// subtracted from MaxDisp instead. CPUser::getMaxDisp() does that.
isOffsetInRange(unsigned UserOffset,unsigned TrialOffset,unsigned MaxDisp,bool NegativeOK,bool IsSoImm)1079 bool ARMConstantIslands::isOffsetInRange(unsigned UserOffset,
1080 unsigned TrialOffset, unsigned MaxDisp,
1081 bool NegativeOK, bool IsSoImm) {
1082 if (UserOffset <= TrialOffset) {
1083 // User before the Trial.
1084 if (TrialOffset - UserOffset <= MaxDisp)
1085 return true;
1086 // FIXME: Make use full range of soimm values.
1087 } else if (NegativeOK) {
1088 if (UserOffset - TrialOffset <= MaxDisp)
1089 return true;
1090 // FIXME: Make use full range of soimm values.
1091 }
1092 return false;
1093 }
1094
1095 /// isWaterInRange - Returns true if a CPE placed after the specified
1096 /// Water (a basic block) will be in range for the specific MI.
1097 ///
1098 /// Compute how much the function will grow by inserting a CPE after Water.
isWaterInRange(unsigned UserOffset,MachineBasicBlock * Water,CPUser & U,unsigned & Growth)1099 bool ARMConstantIslands::isWaterInRange(unsigned UserOffset,
1100 MachineBasicBlock* Water, CPUser &U,
1101 unsigned &Growth) {
1102 unsigned CPELogAlign = getCPELogAlign(U.CPEMI);
1103 unsigned CPEOffset = BBInfo[Water->getNumber()].postOffset(CPELogAlign);
1104 unsigned NextBlockOffset, NextBlockAlignment;
1105 MachineFunction::const_iterator NextBlock = Water->getIterator();
1106 if (++NextBlock == MF->end()) {
1107 NextBlockOffset = BBInfo[Water->getNumber()].postOffset();
1108 NextBlockAlignment = 0;
1109 } else {
1110 NextBlockOffset = BBInfo[NextBlock->getNumber()].Offset;
1111 NextBlockAlignment = NextBlock->getAlignment();
1112 }
1113 unsigned Size = U.CPEMI->getOperand(2).getImm();
1114 unsigned CPEEnd = CPEOffset + Size;
1115
1116 // The CPE may be able to hide in the alignment padding before the next
1117 // block. It may also cause more padding to be required if it is more aligned
1118 // that the next block.
1119 if (CPEEnd > NextBlockOffset) {
1120 Growth = CPEEnd - NextBlockOffset;
1121 // Compute the padding that would go at the end of the CPE to align the next
1122 // block.
1123 Growth += OffsetToAlignment(CPEEnd, 1ULL << NextBlockAlignment);
1124
1125 // If the CPE is to be inserted before the instruction, that will raise
1126 // the offset of the instruction. Also account for unknown alignment padding
1127 // in blocks between CPE and the user.
1128 if (CPEOffset < UserOffset)
1129 UserOffset += Growth + UnknownPadding(MF->getAlignment(), CPELogAlign);
1130 } else
1131 // CPE fits in existing padding.
1132 Growth = 0;
1133
1134 return isOffsetInRange(UserOffset, CPEOffset, U);
1135 }
1136
1137 /// isCPEntryInRange - Returns true if the distance between specific MI and
1138 /// specific ConstPool entry instruction can fit in MI's displacement field.
isCPEntryInRange(MachineInstr * MI,unsigned UserOffset,MachineInstr * CPEMI,unsigned MaxDisp,bool NegOk,bool DoDump)1139 bool ARMConstantIslands::isCPEntryInRange(MachineInstr *MI, unsigned UserOffset,
1140 MachineInstr *CPEMI, unsigned MaxDisp,
1141 bool NegOk, bool DoDump) {
1142 unsigned CPEOffset = getOffsetOf(CPEMI);
1143
1144 if (DoDump) {
1145 DEBUG({
1146 unsigned Block = MI->getParent()->getNumber();
1147 const BasicBlockInfo &BBI = BBInfo[Block];
1148 dbgs() << "User of CPE#" << CPEMI->getOperand(0).getImm()
1149 << " max delta=" << MaxDisp
1150 << format(" insn address=%#x", UserOffset)
1151 << " in BB#" << Block << ": "
1152 << format("%#x-%x\t", BBI.Offset, BBI.postOffset()) << *MI
1153 << format("CPE address=%#x offset=%+d: ", CPEOffset,
1154 int(CPEOffset-UserOffset));
1155 });
1156 }
1157
1158 return isOffsetInRange(UserOffset, CPEOffset, MaxDisp, NegOk);
1159 }
1160
1161 #ifndef NDEBUG
1162 /// BBIsJumpedOver - Return true of the specified basic block's only predecessor
1163 /// unconditionally branches to its only successor.
BBIsJumpedOver(MachineBasicBlock * MBB)1164 static bool BBIsJumpedOver(MachineBasicBlock *MBB) {
1165 if (MBB->pred_size() != 1 || MBB->succ_size() != 1)
1166 return false;
1167
1168 MachineBasicBlock *Succ = *MBB->succ_begin();
1169 MachineBasicBlock *Pred = *MBB->pred_begin();
1170 MachineInstr *PredMI = &Pred->back();
1171 if (PredMI->getOpcode() == ARM::B || PredMI->getOpcode() == ARM::tB
1172 || PredMI->getOpcode() == ARM::t2B)
1173 return PredMI->getOperand(0).getMBB() == Succ;
1174 return false;
1175 }
1176 #endif // NDEBUG
1177
adjustBBOffsetsAfter(MachineBasicBlock * BB)1178 void ARMConstantIslands::adjustBBOffsetsAfter(MachineBasicBlock *BB) {
1179 unsigned BBNum = BB->getNumber();
1180 for(unsigned i = BBNum + 1, e = MF->getNumBlockIDs(); i < e; ++i) {
1181 // Get the offset and known bits at the end of the layout predecessor.
1182 // Include the alignment of the current block.
1183 unsigned LogAlign = MF->getBlockNumbered(i)->getAlignment();
1184 unsigned Offset = BBInfo[i - 1].postOffset(LogAlign);
1185 unsigned KnownBits = BBInfo[i - 1].postKnownBits(LogAlign);
1186
1187 // This is where block i begins. Stop if the offset is already correct,
1188 // and we have updated 2 blocks. This is the maximum number of blocks
1189 // changed before calling this function.
1190 if (i > BBNum + 2 &&
1191 BBInfo[i].Offset == Offset &&
1192 BBInfo[i].KnownBits == KnownBits)
1193 break;
1194
1195 BBInfo[i].Offset = Offset;
1196 BBInfo[i].KnownBits = KnownBits;
1197 }
1198 }
1199
1200 /// decrementCPEReferenceCount - find the constant pool entry with index CPI
1201 /// and instruction CPEMI, and decrement its refcount. If the refcount
1202 /// becomes 0 remove the entry and instruction. Returns true if we removed
1203 /// the entry, false if we didn't.
1204
decrementCPEReferenceCount(unsigned CPI,MachineInstr * CPEMI)1205 bool ARMConstantIslands::decrementCPEReferenceCount(unsigned CPI,
1206 MachineInstr *CPEMI) {
1207 // Find the old entry. Eliminate it if it is no longer used.
1208 CPEntry *CPE = findConstPoolEntry(CPI, CPEMI);
1209 assert(CPE && "Unexpected!");
1210 if (--CPE->RefCount == 0) {
1211 removeDeadCPEMI(CPEMI);
1212 CPE->CPEMI = nullptr;
1213 --NumCPEs;
1214 return true;
1215 }
1216 return false;
1217 }
1218
getCombinedIndex(const MachineInstr * CPEMI)1219 unsigned ARMConstantIslands::getCombinedIndex(const MachineInstr *CPEMI) {
1220 if (CPEMI->getOperand(1).isCPI())
1221 return CPEMI->getOperand(1).getIndex();
1222
1223 return JumpTableEntryIndices[CPEMI->getOperand(1).getIndex()];
1224 }
1225
1226 /// LookForCPEntryInRange - see if the currently referenced CPE is in range;
1227 /// if not, see if an in-range clone of the CPE is in range, and if so,
1228 /// change the data structures so the user references the clone. Returns:
1229 /// 0 = no existing entry found
1230 /// 1 = entry found, and there were no code insertions or deletions
1231 /// 2 = entry found, and there were code insertions or deletions
findInRangeCPEntry(CPUser & U,unsigned UserOffset)1232 int ARMConstantIslands::findInRangeCPEntry(CPUser& U, unsigned UserOffset)
1233 {
1234 MachineInstr *UserMI = U.MI;
1235 MachineInstr *CPEMI = U.CPEMI;
1236
1237 // Check to see if the CPE is already in-range.
1238 if (isCPEntryInRange(UserMI, UserOffset, CPEMI, U.getMaxDisp(), U.NegOk,
1239 true)) {
1240 DEBUG(dbgs() << "In range\n");
1241 return 1;
1242 }
1243
1244 // No. Look for previously created clones of the CPE that are in range.
1245 unsigned CPI = getCombinedIndex(CPEMI);
1246 std::vector<CPEntry> &CPEs = CPEntries[CPI];
1247 for (unsigned i = 0, e = CPEs.size(); i != e; ++i) {
1248 // We already tried this one
1249 if (CPEs[i].CPEMI == CPEMI)
1250 continue;
1251 // Removing CPEs can leave empty entries, skip
1252 if (CPEs[i].CPEMI == nullptr)
1253 continue;
1254 if (isCPEntryInRange(UserMI, UserOffset, CPEs[i].CPEMI, U.getMaxDisp(),
1255 U.NegOk)) {
1256 DEBUG(dbgs() << "Replacing CPE#" << CPI << " with CPE#"
1257 << CPEs[i].CPI << "\n");
1258 // Point the CPUser node to the replacement
1259 U.CPEMI = CPEs[i].CPEMI;
1260 // Change the CPI in the instruction operand to refer to the clone.
1261 for (unsigned j = 0, e = UserMI->getNumOperands(); j != e; ++j)
1262 if (UserMI->getOperand(j).isCPI()) {
1263 UserMI->getOperand(j).setIndex(CPEs[i].CPI);
1264 break;
1265 }
1266 // Adjust the refcount of the clone...
1267 CPEs[i].RefCount++;
1268 // ...and the original. If we didn't remove the old entry, none of the
1269 // addresses changed, so we don't need another pass.
1270 return decrementCPEReferenceCount(CPI, CPEMI) ? 2 : 1;
1271 }
1272 }
1273 return 0;
1274 }
1275
1276 /// getUnconditionalBrDisp - Returns the maximum displacement that can fit in
1277 /// the specific unconditional branch instruction.
getUnconditionalBrDisp(int Opc)1278 static inline unsigned getUnconditionalBrDisp(int Opc) {
1279 switch (Opc) {
1280 case ARM::tB:
1281 return ((1<<10)-1)*2;
1282 case ARM::t2B:
1283 return ((1<<23)-1)*2;
1284 default:
1285 break;
1286 }
1287
1288 return ((1<<23)-1)*4;
1289 }
1290
1291 /// findAvailableWater - Look for an existing entry in the WaterList in which
1292 /// we can place the CPE referenced from U so it's within range of U's MI.
1293 /// Returns true if found, false if not. If it returns true, WaterIter
1294 /// is set to the WaterList entry. For Thumb, prefer water that will not
1295 /// introduce padding to water that will. To ensure that this pass
1296 /// terminates, the CPE location for a particular CPUser is only allowed to
1297 /// move to a lower address, so search backward from the end of the list and
1298 /// prefer the first water that is in range.
findAvailableWater(CPUser & U,unsigned UserOffset,water_iterator & WaterIter,bool CloserWater)1299 bool ARMConstantIslands::findAvailableWater(CPUser &U, unsigned UserOffset,
1300 water_iterator &WaterIter,
1301 bool CloserWater) {
1302 if (WaterList.empty())
1303 return false;
1304
1305 unsigned BestGrowth = ~0u;
1306 // The nearest water without splitting the UserBB is right after it.
1307 // If the distance is still large (we have a big BB), then we need to split it
1308 // if we don't converge after certain iterations. This helps the following
1309 // situation to converge:
1310 // BB0:
1311 // Big BB
1312 // BB1:
1313 // Constant Pool
1314 // When a CP access is out of range, BB0 may be used as water. However,
1315 // inserting islands between BB0 and BB1 makes other accesses out of range.
1316 MachineBasicBlock *UserBB = U.MI->getParent();
1317 unsigned MinNoSplitDisp =
1318 BBInfo[UserBB->getNumber()].postOffset(getCPELogAlign(U.CPEMI));
1319 if (CloserWater && MinNoSplitDisp > U.getMaxDisp() / 2)
1320 return false;
1321 for (water_iterator IP = std::prev(WaterList.end()), B = WaterList.begin();;
1322 --IP) {
1323 MachineBasicBlock* WaterBB = *IP;
1324 // Check if water is in range and is either at a lower address than the
1325 // current "high water mark" or a new water block that was created since
1326 // the previous iteration by inserting an unconditional branch. In the
1327 // latter case, we want to allow resetting the high water mark back to
1328 // this new water since we haven't seen it before. Inserting branches
1329 // should be relatively uncommon and when it does happen, we want to be
1330 // sure to take advantage of it for all the CPEs near that block, so that
1331 // we don't insert more branches than necessary.
1332 // When CloserWater is true, we try to find the lowest address after (or
1333 // equal to) user MI's BB no matter of padding growth.
1334 unsigned Growth;
1335 if (isWaterInRange(UserOffset, WaterBB, U, Growth) &&
1336 (WaterBB->getNumber() < U.HighWaterMark->getNumber() ||
1337 NewWaterList.count(WaterBB) || WaterBB == U.MI->getParent()) &&
1338 Growth < BestGrowth) {
1339 // This is the least amount of required padding seen so far.
1340 BestGrowth = Growth;
1341 WaterIter = IP;
1342 DEBUG(dbgs() << "Found water after BB#" << WaterBB->getNumber()
1343 << " Growth=" << Growth << '\n');
1344
1345 if (CloserWater && WaterBB == U.MI->getParent())
1346 return true;
1347 // Keep looking unless it is perfect and we're not looking for the lowest
1348 // possible address.
1349 if (!CloserWater && BestGrowth == 0)
1350 return true;
1351 }
1352 if (IP == B)
1353 break;
1354 }
1355 return BestGrowth != ~0u;
1356 }
1357
1358 /// createNewWater - No existing WaterList entry will work for
1359 /// CPUsers[CPUserIndex], so create a place to put the CPE. The end of the
1360 /// block is used if in range, and the conditional branch munged so control
1361 /// flow is correct. Otherwise the block is split to create a hole with an
1362 /// unconditional branch around it. In either case NewMBB is set to a
1363 /// block following which the new island can be inserted (the WaterList
1364 /// is not adjusted).
createNewWater(unsigned CPUserIndex,unsigned UserOffset,MachineBasicBlock * & NewMBB)1365 void ARMConstantIslands::createNewWater(unsigned CPUserIndex,
1366 unsigned UserOffset,
1367 MachineBasicBlock *&NewMBB) {
1368 CPUser &U = CPUsers[CPUserIndex];
1369 MachineInstr *UserMI = U.MI;
1370 MachineInstr *CPEMI = U.CPEMI;
1371 unsigned CPELogAlign = getCPELogAlign(CPEMI);
1372 MachineBasicBlock *UserMBB = UserMI->getParent();
1373 const BasicBlockInfo &UserBBI = BBInfo[UserMBB->getNumber()];
1374
1375 // If the block does not end in an unconditional branch already, and if the
1376 // end of the block is within range, make new water there. (The addition
1377 // below is for the unconditional branch we will be adding: 4 bytes on ARM +
1378 // Thumb2, 2 on Thumb1.
1379 if (BBHasFallthrough(UserMBB)) {
1380 // Size of branch to insert.
1381 unsigned Delta = isThumb1 ? 2 : 4;
1382 // Compute the offset where the CPE will begin.
1383 unsigned CPEOffset = UserBBI.postOffset(CPELogAlign) + Delta;
1384
1385 if (isOffsetInRange(UserOffset, CPEOffset, U)) {
1386 DEBUG(dbgs() << "Split at end of BB#" << UserMBB->getNumber()
1387 << format(", expected CPE offset %#x\n", CPEOffset));
1388 NewMBB = &*++UserMBB->getIterator();
1389 // Add an unconditional branch from UserMBB to fallthrough block. Record
1390 // it for branch lengthening; this new branch will not get out of range,
1391 // but if the preceding conditional branch is out of range, the targets
1392 // will be exchanged, and the altered branch may be out of range, so the
1393 // machinery has to know about it.
1394 int UncondBr = isThumb ? ((isThumb2) ? ARM::t2B : ARM::tB) : ARM::B;
1395 if (!isThumb)
1396 BuildMI(UserMBB, DebugLoc(), TII->get(UncondBr)).addMBB(NewMBB);
1397 else
1398 BuildMI(UserMBB, DebugLoc(), TII->get(UncondBr)).addMBB(NewMBB)
1399 .addImm(ARMCC::AL).addReg(0);
1400 unsigned MaxDisp = getUnconditionalBrDisp(UncondBr);
1401 ImmBranches.push_back(ImmBranch(&UserMBB->back(),
1402 MaxDisp, false, UncondBr));
1403 computeBlockSize(UserMBB);
1404 adjustBBOffsetsAfter(UserMBB);
1405 return;
1406 }
1407 }
1408
1409 // What a big block. Find a place within the block to split it. This is a
1410 // little tricky on Thumb1 since instructions are 2 bytes and constant pool
1411 // entries are 4 bytes: if instruction I references island CPE, and
1412 // instruction I+1 references CPE', it will not work well to put CPE as far
1413 // forward as possible, since then CPE' cannot immediately follow it (that
1414 // location is 2 bytes farther away from I+1 than CPE was from I) and we'd
1415 // need to create a new island. So, we make a first guess, then walk through
1416 // the instructions between the one currently being looked at and the
1417 // possible insertion point, and make sure any other instructions that
1418 // reference CPEs will be able to use the same island area; if not, we back
1419 // up the insertion point.
1420
1421 // Try to split the block so it's fully aligned. Compute the latest split
1422 // point where we can add a 4-byte branch instruction, and then align to
1423 // LogAlign which is the largest possible alignment in the function.
1424 unsigned LogAlign = MF->getAlignment();
1425 assert(LogAlign >= CPELogAlign && "Over-aligned constant pool entry");
1426 unsigned KnownBits = UserBBI.internalKnownBits();
1427 unsigned UPad = UnknownPadding(LogAlign, KnownBits);
1428 unsigned BaseInsertOffset = UserOffset + U.getMaxDisp() - UPad;
1429 DEBUG(dbgs() << format("Split in middle of big block before %#x",
1430 BaseInsertOffset));
1431
1432 // The 4 in the following is for the unconditional branch we'll be inserting
1433 // (allows for long branch on Thumb1). Alignment of the island is handled
1434 // inside isOffsetInRange.
1435 BaseInsertOffset -= 4;
1436
1437 DEBUG(dbgs() << format(", adjusted to %#x", BaseInsertOffset)
1438 << " la=" << LogAlign
1439 << " kb=" << KnownBits
1440 << " up=" << UPad << '\n');
1441
1442 // This could point off the end of the block if we've already got constant
1443 // pool entries following this block; only the last one is in the water list.
1444 // Back past any possible branches (allow for a conditional and a maximally
1445 // long unconditional).
1446 if (BaseInsertOffset + 8 >= UserBBI.postOffset()) {
1447 // Ensure BaseInsertOffset is larger than the offset of the instruction
1448 // following UserMI so that the loop which searches for the split point
1449 // iterates at least once.
1450 BaseInsertOffset =
1451 std::max(UserBBI.postOffset() - UPad - 8,
1452 UserOffset + TII->GetInstSizeInBytes(*UserMI) + 1);
1453 DEBUG(dbgs() << format("Move inside block: %#x\n", BaseInsertOffset));
1454 }
1455 unsigned EndInsertOffset = BaseInsertOffset + 4 + UPad +
1456 CPEMI->getOperand(2).getImm();
1457 MachineBasicBlock::iterator MI = UserMI;
1458 ++MI;
1459 unsigned CPUIndex = CPUserIndex+1;
1460 unsigned NumCPUsers = CPUsers.size();
1461 MachineInstr *LastIT = nullptr;
1462 for (unsigned Offset = UserOffset + TII->GetInstSizeInBytes(*UserMI);
1463 Offset < BaseInsertOffset;
1464 Offset += TII->GetInstSizeInBytes(*MI), MI = std::next(MI)) {
1465 assert(MI != UserMBB->end() && "Fell off end of block");
1466 if (CPUIndex < NumCPUsers && CPUsers[CPUIndex].MI == &*MI) {
1467 CPUser &U = CPUsers[CPUIndex];
1468 if (!isOffsetInRange(Offset, EndInsertOffset, U)) {
1469 // Shift intertion point by one unit of alignment so it is within reach.
1470 BaseInsertOffset -= 1u << LogAlign;
1471 EndInsertOffset -= 1u << LogAlign;
1472 }
1473 // This is overly conservative, as we don't account for CPEMIs being
1474 // reused within the block, but it doesn't matter much. Also assume CPEs
1475 // are added in order with alignment padding. We may eventually be able
1476 // to pack the aligned CPEs better.
1477 EndInsertOffset += U.CPEMI->getOperand(2).getImm();
1478 CPUIndex++;
1479 }
1480
1481 // Remember the last IT instruction.
1482 if (MI->getOpcode() == ARM::t2IT)
1483 LastIT = &*MI;
1484 }
1485
1486 --MI;
1487
1488 // Avoid splitting an IT block.
1489 if (LastIT) {
1490 unsigned PredReg = 0;
1491 ARMCC::CondCodes CC = getITInstrPredicate(*MI, PredReg);
1492 if (CC != ARMCC::AL)
1493 MI = LastIT;
1494 }
1495
1496 // We really must not split an IT block.
1497 DEBUG(unsigned PredReg;
1498 assert(!isThumb || getITInstrPredicate(*MI, PredReg) == ARMCC::AL));
1499
1500 NewMBB = splitBlockBeforeInstr(&*MI);
1501 }
1502
1503 /// handleConstantPoolUser - Analyze the specified user, checking to see if it
1504 /// is out-of-range. If so, pick up the constant pool value and move it some
1505 /// place in-range. Return true if we changed any addresses (thus must run
1506 /// another pass of branch lengthening), false otherwise.
handleConstantPoolUser(unsigned CPUserIndex,bool CloserWater)1507 bool ARMConstantIslands::handleConstantPoolUser(unsigned CPUserIndex,
1508 bool CloserWater) {
1509 CPUser &U = CPUsers[CPUserIndex];
1510 MachineInstr *UserMI = U.MI;
1511 MachineInstr *CPEMI = U.CPEMI;
1512 unsigned CPI = getCombinedIndex(CPEMI);
1513 unsigned Size = CPEMI->getOperand(2).getImm();
1514 // Compute this only once, it's expensive.
1515 unsigned UserOffset = getUserOffset(U);
1516
1517 // See if the current entry is within range, or there is a clone of it
1518 // in range.
1519 int result = findInRangeCPEntry(U, UserOffset);
1520 if (result==1) return false;
1521 else if (result==2) return true;
1522
1523 // No existing clone of this CPE is within range.
1524 // We will be generating a new clone. Get a UID for it.
1525 unsigned ID = AFI->createPICLabelUId();
1526
1527 // Look for water where we can place this CPE.
1528 MachineBasicBlock *NewIsland = MF->CreateMachineBasicBlock();
1529 MachineBasicBlock *NewMBB;
1530 water_iterator IP;
1531 if (findAvailableWater(U, UserOffset, IP, CloserWater)) {
1532 DEBUG(dbgs() << "Found water in range\n");
1533 MachineBasicBlock *WaterBB = *IP;
1534
1535 // If the original WaterList entry was "new water" on this iteration,
1536 // propagate that to the new island. This is just keeping NewWaterList
1537 // updated to match the WaterList, which will be updated below.
1538 if (NewWaterList.erase(WaterBB))
1539 NewWaterList.insert(NewIsland);
1540
1541 // The new CPE goes before the following block (NewMBB).
1542 NewMBB = &*++WaterBB->getIterator();
1543 } else {
1544 // No water found.
1545 DEBUG(dbgs() << "No water found\n");
1546 createNewWater(CPUserIndex, UserOffset, NewMBB);
1547
1548 // splitBlockBeforeInstr adds to WaterList, which is important when it is
1549 // called while handling branches so that the water will be seen on the
1550 // next iteration for constant pools, but in this context, we don't want
1551 // it. Check for this so it will be removed from the WaterList.
1552 // Also remove any entry from NewWaterList.
1553 MachineBasicBlock *WaterBB = &*--NewMBB->getIterator();
1554 IP = std::find(WaterList.begin(), WaterList.end(), WaterBB);
1555 if (IP != WaterList.end())
1556 NewWaterList.erase(WaterBB);
1557
1558 // We are adding new water. Update NewWaterList.
1559 NewWaterList.insert(NewIsland);
1560 }
1561
1562 // Remove the original WaterList entry; we want subsequent insertions in
1563 // this vicinity to go after the one we're about to insert. This
1564 // considerably reduces the number of times we have to move the same CPE
1565 // more than once and is also important to ensure the algorithm terminates.
1566 if (IP != WaterList.end())
1567 WaterList.erase(IP);
1568
1569 // Okay, we know we can put an island before NewMBB now, do it!
1570 MF->insert(NewMBB->getIterator(), NewIsland);
1571
1572 // Update internal data structures to account for the newly inserted MBB.
1573 updateForInsertedWaterBlock(NewIsland);
1574
1575 // Now that we have an island to add the CPE to, clone the original CPE and
1576 // add it to the island.
1577 U.HighWaterMark = NewIsland;
1578 U.CPEMI = BuildMI(NewIsland, DebugLoc(), CPEMI->getDesc())
1579 .addImm(ID).addOperand(CPEMI->getOperand(1)).addImm(Size);
1580 CPEntries[CPI].push_back(CPEntry(U.CPEMI, ID, 1));
1581 ++NumCPEs;
1582
1583 // Decrement the old entry, and remove it if refcount becomes 0.
1584 decrementCPEReferenceCount(CPI, CPEMI);
1585
1586 // Mark the basic block as aligned as required by the const-pool entry.
1587 NewIsland->setAlignment(getCPELogAlign(U.CPEMI));
1588
1589 // Increase the size of the island block to account for the new entry.
1590 BBInfo[NewIsland->getNumber()].Size += Size;
1591 adjustBBOffsetsAfter(&*--NewIsland->getIterator());
1592
1593 // Finally, change the CPI in the instruction operand to be ID.
1594 for (unsigned i = 0, e = UserMI->getNumOperands(); i != e; ++i)
1595 if (UserMI->getOperand(i).isCPI()) {
1596 UserMI->getOperand(i).setIndex(ID);
1597 break;
1598 }
1599
1600 DEBUG(dbgs() << " Moved CPE to #" << ID << " CPI=" << CPI
1601 << format(" offset=%#x\n", BBInfo[NewIsland->getNumber()].Offset));
1602
1603 return true;
1604 }
1605
1606 /// removeDeadCPEMI - Remove a dead constant pool entry instruction. Update
1607 /// sizes and offsets of impacted basic blocks.
removeDeadCPEMI(MachineInstr * CPEMI)1608 void ARMConstantIslands::removeDeadCPEMI(MachineInstr *CPEMI) {
1609 MachineBasicBlock *CPEBB = CPEMI->getParent();
1610 unsigned Size = CPEMI->getOperand(2).getImm();
1611 CPEMI->eraseFromParent();
1612 BBInfo[CPEBB->getNumber()].Size -= Size;
1613 // All succeeding offsets have the current size value added in, fix this.
1614 if (CPEBB->empty()) {
1615 BBInfo[CPEBB->getNumber()].Size = 0;
1616
1617 // This block no longer needs to be aligned.
1618 CPEBB->setAlignment(0);
1619 } else
1620 // Entries are sorted by descending alignment, so realign from the front.
1621 CPEBB->setAlignment(getCPELogAlign(&*CPEBB->begin()));
1622
1623 adjustBBOffsetsAfter(CPEBB);
1624 // An island has only one predecessor BB and one successor BB. Check if
1625 // this BB's predecessor jumps directly to this BB's successor. This
1626 // shouldn't happen currently.
1627 assert(!BBIsJumpedOver(CPEBB) && "How did this happen?");
1628 // FIXME: remove the empty blocks after all the work is done?
1629 }
1630
1631 /// removeUnusedCPEntries - Remove constant pool entries whose refcounts
1632 /// are zero.
removeUnusedCPEntries()1633 bool ARMConstantIslands::removeUnusedCPEntries() {
1634 unsigned MadeChange = false;
1635 for (unsigned i = 0, e = CPEntries.size(); i != e; ++i) {
1636 std::vector<CPEntry> &CPEs = CPEntries[i];
1637 for (unsigned j = 0, ee = CPEs.size(); j != ee; ++j) {
1638 if (CPEs[j].RefCount == 0 && CPEs[j].CPEMI) {
1639 removeDeadCPEMI(CPEs[j].CPEMI);
1640 CPEs[j].CPEMI = nullptr;
1641 MadeChange = true;
1642 }
1643 }
1644 }
1645 return MadeChange;
1646 }
1647
1648 /// isBBInRange - Returns true if the distance between specific MI and
1649 /// specific BB can fit in MI's displacement field.
isBBInRange(MachineInstr * MI,MachineBasicBlock * DestBB,unsigned MaxDisp)1650 bool ARMConstantIslands::isBBInRange(MachineInstr *MI,MachineBasicBlock *DestBB,
1651 unsigned MaxDisp) {
1652 unsigned PCAdj = isThumb ? 4 : 8;
1653 unsigned BrOffset = getOffsetOf(MI) + PCAdj;
1654 unsigned DestOffset = BBInfo[DestBB->getNumber()].Offset;
1655
1656 DEBUG(dbgs() << "Branch of destination BB#" << DestBB->getNumber()
1657 << " from BB#" << MI->getParent()->getNumber()
1658 << " max delta=" << MaxDisp
1659 << " from " << getOffsetOf(MI) << " to " << DestOffset
1660 << " offset " << int(DestOffset-BrOffset) << "\t" << *MI);
1661
1662 if (BrOffset <= DestOffset) {
1663 // Branch before the Dest.
1664 if (DestOffset-BrOffset <= MaxDisp)
1665 return true;
1666 } else {
1667 if (BrOffset-DestOffset <= MaxDisp)
1668 return true;
1669 }
1670 return false;
1671 }
1672
1673 /// fixupImmediateBr - Fix up an immediate branch whose destination is too far
1674 /// away to fit in its displacement field.
fixupImmediateBr(ImmBranch & Br)1675 bool ARMConstantIslands::fixupImmediateBr(ImmBranch &Br) {
1676 MachineInstr *MI = Br.MI;
1677 MachineBasicBlock *DestBB = MI->getOperand(0).getMBB();
1678
1679 // Check to see if the DestBB is already in-range.
1680 if (isBBInRange(MI, DestBB, Br.MaxDisp))
1681 return false;
1682
1683 if (!Br.isCond)
1684 return fixupUnconditionalBr(Br);
1685 return fixupConditionalBr(Br);
1686 }
1687
1688 /// fixupUnconditionalBr - Fix up an unconditional branch whose destination is
1689 /// too far away to fit in its displacement field. If the LR register has been
1690 /// spilled in the epilogue, then we can use BL to implement a far jump.
1691 /// Otherwise, add an intermediate branch instruction to a branch.
1692 bool
fixupUnconditionalBr(ImmBranch & Br)1693 ARMConstantIslands::fixupUnconditionalBr(ImmBranch &Br) {
1694 MachineInstr *MI = Br.MI;
1695 MachineBasicBlock *MBB = MI->getParent();
1696 if (!isThumb1)
1697 llvm_unreachable("fixupUnconditionalBr is Thumb1 only!");
1698
1699 // Use BL to implement far jump.
1700 Br.MaxDisp = (1 << 21) * 2;
1701 MI->setDesc(TII->get(ARM::tBfar));
1702 BBInfo[MBB->getNumber()].Size += 2;
1703 adjustBBOffsetsAfter(MBB);
1704 HasFarJump = true;
1705 ++NumUBrFixed;
1706
1707 DEBUG(dbgs() << " Changed B to long jump " << *MI);
1708
1709 return true;
1710 }
1711
1712 /// fixupConditionalBr - Fix up a conditional branch whose destination is too
1713 /// far away to fit in its displacement field. It is converted to an inverse
1714 /// conditional branch + an unconditional branch to the destination.
1715 bool
fixupConditionalBr(ImmBranch & Br)1716 ARMConstantIslands::fixupConditionalBr(ImmBranch &Br) {
1717 MachineInstr *MI = Br.MI;
1718 MachineBasicBlock *DestBB = MI->getOperand(0).getMBB();
1719
1720 // Add an unconditional branch to the destination and invert the branch
1721 // condition to jump over it:
1722 // blt L1
1723 // =>
1724 // bge L2
1725 // b L1
1726 // L2:
1727 ARMCC::CondCodes CC = (ARMCC::CondCodes)MI->getOperand(1).getImm();
1728 CC = ARMCC::getOppositeCondition(CC);
1729 unsigned CCReg = MI->getOperand(2).getReg();
1730
1731 // If the branch is at the end of its MBB and that has a fall-through block,
1732 // direct the updated conditional branch to the fall-through block. Otherwise,
1733 // split the MBB before the next instruction.
1734 MachineBasicBlock *MBB = MI->getParent();
1735 MachineInstr *BMI = &MBB->back();
1736 bool NeedSplit = (BMI != MI) || !BBHasFallthrough(MBB);
1737
1738 ++NumCBrFixed;
1739 if (BMI != MI) {
1740 if (std::next(MachineBasicBlock::iterator(MI)) == std::prev(MBB->end()) &&
1741 BMI->getOpcode() == Br.UncondBr) {
1742 // Last MI in the BB is an unconditional branch. Can we simply invert the
1743 // condition and swap destinations:
1744 // beq L1
1745 // b L2
1746 // =>
1747 // bne L2
1748 // b L1
1749 MachineBasicBlock *NewDest = BMI->getOperand(0).getMBB();
1750 if (isBBInRange(MI, NewDest, Br.MaxDisp)) {
1751 DEBUG(dbgs() << " Invert Bcc condition and swap its destination with "
1752 << *BMI);
1753 BMI->getOperand(0).setMBB(DestBB);
1754 MI->getOperand(0).setMBB(NewDest);
1755 MI->getOperand(1).setImm(CC);
1756 return true;
1757 }
1758 }
1759 }
1760
1761 if (NeedSplit) {
1762 splitBlockBeforeInstr(MI);
1763 // No need for the branch to the next block. We're adding an unconditional
1764 // branch to the destination.
1765 int delta = TII->GetInstSizeInBytes(MBB->back());
1766 BBInfo[MBB->getNumber()].Size -= delta;
1767 MBB->back().eraseFromParent();
1768 // BBInfo[SplitBB].Offset is wrong temporarily, fixed below
1769 }
1770 MachineBasicBlock *NextBB = &*++MBB->getIterator();
1771
1772 DEBUG(dbgs() << " Insert B to BB#" << DestBB->getNumber()
1773 << " also invert condition and change dest. to BB#"
1774 << NextBB->getNumber() << "\n");
1775
1776 // Insert a new conditional branch and a new unconditional branch.
1777 // Also update the ImmBranch as well as adding a new entry for the new branch.
1778 BuildMI(MBB, DebugLoc(), TII->get(MI->getOpcode()))
1779 .addMBB(NextBB).addImm(CC).addReg(CCReg);
1780 Br.MI = &MBB->back();
1781 BBInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(MBB->back());
1782 if (isThumb)
1783 BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr)).addMBB(DestBB)
1784 .addImm(ARMCC::AL).addReg(0);
1785 else
1786 BuildMI(MBB, DebugLoc(), TII->get(Br.UncondBr)).addMBB(DestBB);
1787 BBInfo[MBB->getNumber()].Size += TII->GetInstSizeInBytes(MBB->back());
1788 unsigned MaxDisp = getUnconditionalBrDisp(Br.UncondBr);
1789 ImmBranches.push_back(ImmBranch(&MBB->back(), MaxDisp, false, Br.UncondBr));
1790
1791 // Remove the old conditional branch. It may or may not still be in MBB.
1792 BBInfo[MI->getParent()->getNumber()].Size -= TII->GetInstSizeInBytes(*MI);
1793 MI->eraseFromParent();
1794 adjustBBOffsetsAfter(MBB);
1795 return true;
1796 }
1797
1798 /// undoLRSpillRestore - Remove Thumb push / pop instructions that only spills
1799 /// LR / restores LR to pc. FIXME: This is done here because it's only possible
1800 /// to do this if tBfar is not used.
undoLRSpillRestore()1801 bool ARMConstantIslands::undoLRSpillRestore() {
1802 bool MadeChange = false;
1803 for (unsigned i = 0, e = PushPopMIs.size(); i != e; ++i) {
1804 MachineInstr *MI = PushPopMIs[i];
1805 // First two operands are predicates.
1806 if (MI->getOpcode() == ARM::tPOP_RET &&
1807 MI->getOperand(2).getReg() == ARM::PC &&
1808 MI->getNumExplicitOperands() == 3) {
1809 // Create the new insn and copy the predicate from the old.
1810 BuildMI(MI->getParent(), MI->getDebugLoc(), TII->get(ARM::tBX_RET))
1811 .addOperand(MI->getOperand(0))
1812 .addOperand(MI->getOperand(1));
1813 MI->eraseFromParent();
1814 MadeChange = true;
1815 }
1816 }
1817 return MadeChange;
1818 }
1819
1820 // mayOptimizeThumb2Instruction - Returns true if optimizeThumb2Instructions
1821 // below may shrink MI.
1822 bool
mayOptimizeThumb2Instruction(const MachineInstr * MI) const1823 ARMConstantIslands::mayOptimizeThumb2Instruction(const MachineInstr *MI) const {
1824 switch(MI->getOpcode()) {
1825 // optimizeThumb2Instructions.
1826 case ARM::t2LEApcrel:
1827 case ARM::t2LDRpci:
1828 // optimizeThumb2Branches.
1829 case ARM::t2B:
1830 case ARM::t2Bcc:
1831 case ARM::tBcc:
1832 // optimizeThumb2JumpTables.
1833 case ARM::t2BR_JT:
1834 return true;
1835 }
1836 return false;
1837 }
1838
optimizeThumb2Instructions()1839 bool ARMConstantIslands::optimizeThumb2Instructions() {
1840 bool MadeChange = false;
1841
1842 // Shrink ADR and LDR from constantpool.
1843 for (unsigned i = 0, e = CPUsers.size(); i != e; ++i) {
1844 CPUser &U = CPUsers[i];
1845 unsigned Opcode = U.MI->getOpcode();
1846 unsigned NewOpc = 0;
1847 unsigned Scale = 1;
1848 unsigned Bits = 0;
1849 switch (Opcode) {
1850 default: break;
1851 case ARM::t2LEApcrel:
1852 if (isARMLowRegister(U.MI->getOperand(0).getReg())) {
1853 NewOpc = ARM::tLEApcrel;
1854 Bits = 8;
1855 Scale = 4;
1856 }
1857 break;
1858 case ARM::t2LDRpci:
1859 if (isARMLowRegister(U.MI->getOperand(0).getReg())) {
1860 NewOpc = ARM::tLDRpci;
1861 Bits = 8;
1862 Scale = 4;
1863 }
1864 break;
1865 }
1866
1867 if (!NewOpc)
1868 continue;
1869
1870 unsigned UserOffset = getUserOffset(U);
1871 unsigned MaxOffs = ((1 << Bits) - 1) * Scale;
1872
1873 // Be conservative with inline asm.
1874 if (!U.KnownAlignment)
1875 MaxOffs -= 2;
1876
1877 // FIXME: Check if offset is multiple of scale if scale is not 4.
1878 if (isCPEntryInRange(U.MI, UserOffset, U.CPEMI, MaxOffs, false, true)) {
1879 DEBUG(dbgs() << "Shrink: " << *U.MI);
1880 U.MI->setDesc(TII->get(NewOpc));
1881 MachineBasicBlock *MBB = U.MI->getParent();
1882 BBInfo[MBB->getNumber()].Size -= 2;
1883 adjustBBOffsetsAfter(MBB);
1884 ++NumT2CPShrunk;
1885 MadeChange = true;
1886 }
1887 }
1888
1889 return MadeChange;
1890 }
1891
optimizeThumb2Branches()1892 bool ARMConstantIslands::optimizeThumb2Branches() {
1893 bool MadeChange = false;
1894
1895 // The order in which branches appear in ImmBranches is approximately their
1896 // order within the function body. By visiting later branches first, we reduce
1897 // the distance between earlier forward branches and their targets, making it
1898 // more likely that the cbn?z optimization, which can only apply to forward
1899 // branches, will succeed.
1900 for (unsigned i = ImmBranches.size(); i != 0; --i) {
1901 ImmBranch &Br = ImmBranches[i-1];
1902 unsigned Opcode = Br.MI->getOpcode();
1903 unsigned NewOpc = 0;
1904 unsigned Scale = 1;
1905 unsigned Bits = 0;
1906 switch (Opcode) {
1907 default: break;
1908 case ARM::t2B:
1909 NewOpc = ARM::tB;
1910 Bits = 11;
1911 Scale = 2;
1912 break;
1913 case ARM::t2Bcc: {
1914 NewOpc = ARM::tBcc;
1915 Bits = 8;
1916 Scale = 2;
1917 break;
1918 }
1919 }
1920 if (NewOpc) {
1921 unsigned MaxOffs = ((1 << (Bits-1))-1) * Scale;
1922 MachineBasicBlock *DestBB = Br.MI->getOperand(0).getMBB();
1923 if (isBBInRange(Br.MI, DestBB, MaxOffs)) {
1924 DEBUG(dbgs() << "Shrink branch: " << *Br.MI);
1925 Br.MI->setDesc(TII->get(NewOpc));
1926 MachineBasicBlock *MBB = Br.MI->getParent();
1927 BBInfo[MBB->getNumber()].Size -= 2;
1928 adjustBBOffsetsAfter(MBB);
1929 ++NumT2BrShrunk;
1930 MadeChange = true;
1931 }
1932 }
1933
1934 Opcode = Br.MI->getOpcode();
1935 if (Opcode != ARM::tBcc)
1936 continue;
1937
1938 // If the conditional branch doesn't kill CPSR, then CPSR can be liveout
1939 // so this transformation is not safe.
1940 if (!Br.MI->killsRegister(ARM::CPSR))
1941 continue;
1942
1943 NewOpc = 0;
1944 unsigned PredReg = 0;
1945 ARMCC::CondCodes Pred = getInstrPredicate(*Br.MI, PredReg);
1946 if (Pred == ARMCC::EQ)
1947 NewOpc = ARM::tCBZ;
1948 else if (Pred == ARMCC::NE)
1949 NewOpc = ARM::tCBNZ;
1950 if (!NewOpc)
1951 continue;
1952 MachineBasicBlock *DestBB = Br.MI->getOperand(0).getMBB();
1953 // Check if the distance is within 126. Subtract starting offset by 2
1954 // because the cmp will be eliminated.
1955 unsigned BrOffset = getOffsetOf(Br.MI) + 4 - 2;
1956 unsigned DestOffset = BBInfo[DestBB->getNumber()].Offset;
1957 if (BrOffset < DestOffset && (DestOffset - BrOffset) <= 126) {
1958 MachineBasicBlock::iterator CmpMI = Br.MI;
1959 if (CmpMI != Br.MI->getParent()->begin()) {
1960 --CmpMI;
1961 if (CmpMI->getOpcode() == ARM::tCMPi8) {
1962 unsigned Reg = CmpMI->getOperand(0).getReg();
1963 Pred = getInstrPredicate(*CmpMI, PredReg);
1964 if (Pred == ARMCC::AL &&
1965 CmpMI->getOperand(1).getImm() == 0 &&
1966 isARMLowRegister(Reg)) {
1967 MachineBasicBlock *MBB = Br.MI->getParent();
1968 DEBUG(dbgs() << "Fold: " << *CmpMI << " and: " << *Br.MI);
1969 MachineInstr *NewBR =
1970 BuildMI(*MBB, CmpMI, Br.MI->getDebugLoc(), TII->get(NewOpc))
1971 .addReg(Reg).addMBB(DestBB,Br.MI->getOperand(0).getTargetFlags());
1972 CmpMI->eraseFromParent();
1973 Br.MI->eraseFromParent();
1974 Br.MI = NewBR;
1975 BBInfo[MBB->getNumber()].Size -= 2;
1976 adjustBBOffsetsAfter(MBB);
1977 ++NumCBZ;
1978 MadeChange = true;
1979 }
1980 }
1981 }
1982 }
1983 }
1984
1985 return MadeChange;
1986 }
1987
isSimpleIndexCalc(MachineInstr & I,unsigned EntryReg,unsigned BaseReg)1988 static bool isSimpleIndexCalc(MachineInstr &I, unsigned EntryReg,
1989 unsigned BaseReg) {
1990 if (I.getOpcode() != ARM::t2ADDrs)
1991 return false;
1992
1993 if (I.getOperand(0).getReg() != EntryReg)
1994 return false;
1995
1996 if (I.getOperand(1).getReg() != BaseReg)
1997 return false;
1998
1999 // FIXME: what about CC and IdxReg?
2000 return true;
2001 }
2002
2003 /// \brief While trying to form a TBB/TBH instruction, we may (if the table
2004 /// doesn't immediately follow the BR_JT) need access to the start of the
2005 /// jump-table. We know one instruction that produces such a register; this
2006 /// function works out whether that definition can be preserved to the BR_JT,
2007 /// possibly by removing an intervening addition (which is usually needed to
2008 /// calculate the actual entry to jump to).
preserveBaseRegister(MachineInstr * JumpMI,MachineInstr * LEAMI,unsigned & DeadSize,bool & CanDeleteLEA,bool & BaseRegKill)2009 bool ARMConstantIslands::preserveBaseRegister(MachineInstr *JumpMI,
2010 MachineInstr *LEAMI,
2011 unsigned &DeadSize,
2012 bool &CanDeleteLEA,
2013 bool &BaseRegKill) {
2014 if (JumpMI->getParent() != LEAMI->getParent())
2015 return false;
2016
2017 // Now we hope that we have at least these instructions in the basic block:
2018 // BaseReg = t2LEA ...
2019 // [...]
2020 // EntryReg = t2ADDrs BaseReg, ...
2021 // [...]
2022 // t2BR_JT EntryReg
2023 //
2024 // We have to be very conservative about what we recognise here though. The
2025 // main perturbing factors to watch out for are:
2026 // + Spills at any point in the chain: not direct problems but we would
2027 // expect a blocking Def of the spilled register so in practice what we
2028 // can do is limited.
2029 // + EntryReg == BaseReg: this is the one situation we should allow a Def
2030 // of BaseReg, but only if the t2ADDrs can be removed.
2031 // + Some instruction other than t2ADDrs computing the entry. Not seen in
2032 // the wild, but we should be careful.
2033 unsigned EntryReg = JumpMI->getOperand(0).getReg();
2034 unsigned BaseReg = LEAMI->getOperand(0).getReg();
2035
2036 CanDeleteLEA = true;
2037 BaseRegKill = false;
2038 MachineInstr *RemovableAdd = nullptr;
2039 MachineBasicBlock::iterator I(LEAMI);
2040 for (++I; &*I != JumpMI; ++I) {
2041 if (isSimpleIndexCalc(*I, EntryReg, BaseReg)) {
2042 RemovableAdd = &*I;
2043 break;
2044 }
2045
2046 for (unsigned K = 0, E = I->getNumOperands(); K != E; ++K) {
2047 const MachineOperand &MO = I->getOperand(K);
2048 if (!MO.isReg() || !MO.getReg())
2049 continue;
2050 if (MO.isDef() && MO.getReg() == BaseReg)
2051 return false;
2052 if (MO.isUse() && MO.getReg() == BaseReg) {
2053 BaseRegKill = BaseRegKill || MO.isKill();
2054 CanDeleteLEA = false;
2055 }
2056 }
2057 }
2058
2059 if (!RemovableAdd)
2060 return true;
2061
2062 // Check the add really is removable, and that nothing else in the block
2063 // clobbers BaseReg.
2064 for (++I; &*I != JumpMI; ++I) {
2065 for (unsigned K = 0, E = I->getNumOperands(); K != E; ++K) {
2066 const MachineOperand &MO = I->getOperand(K);
2067 if (!MO.isReg() || !MO.getReg())
2068 continue;
2069 if (MO.isDef() && MO.getReg() == BaseReg)
2070 return false;
2071 if (MO.isUse() && MO.getReg() == EntryReg)
2072 RemovableAdd = nullptr;
2073 }
2074 }
2075
2076 if (RemovableAdd) {
2077 RemovableAdd->eraseFromParent();
2078 DeadSize += 4;
2079 } else if (BaseReg == EntryReg) {
2080 // The add wasn't removable, but clobbered the base for the TBB. So we can't
2081 // preserve it.
2082 return false;
2083 }
2084
2085 // We reached the end of the block without seeing another definition of
2086 // BaseReg (except, possibly the t2ADDrs, which was removed). BaseReg can be
2087 // used in the TBB/TBH if necessary.
2088 return true;
2089 }
2090
2091 /// \brief Returns whether CPEMI is the first instruction in the block
2092 /// immediately following JTMI (assumed to be a TBB or TBH terminator). If so,
2093 /// we can switch the first register to PC and usually remove the address
2094 /// calculation that preceded it.
jumpTableFollowsTB(MachineInstr * JTMI,MachineInstr * CPEMI)2095 static bool jumpTableFollowsTB(MachineInstr *JTMI, MachineInstr *CPEMI) {
2096 MachineFunction::iterator MBB = JTMI->getParent()->getIterator();
2097 MachineFunction *MF = MBB->getParent();
2098 ++MBB;
2099
2100 return MBB != MF->end() && MBB->begin() != MBB->end() &&
2101 &*MBB->begin() == CPEMI;
2102 }
2103
2104 /// optimizeThumb2JumpTables - Use tbb / tbh instructions to generate smaller
2105 /// jumptables when it's possible.
optimizeThumb2JumpTables()2106 bool ARMConstantIslands::optimizeThumb2JumpTables() {
2107 bool MadeChange = false;
2108
2109 // FIXME: After the tables are shrunk, can we get rid some of the
2110 // constantpool tables?
2111 MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
2112 if (!MJTI) return false;
2113
2114 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
2115 for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) {
2116 MachineInstr *MI = T2JumpTables[i];
2117 const MCInstrDesc &MCID = MI->getDesc();
2118 unsigned NumOps = MCID.getNumOperands();
2119 unsigned JTOpIdx = NumOps - (MI->isPredicable() ? 2 : 1);
2120 MachineOperand JTOP = MI->getOperand(JTOpIdx);
2121 unsigned JTI = JTOP.getIndex();
2122 assert(JTI < JT.size());
2123
2124 bool ByteOk = true;
2125 bool HalfWordOk = true;
2126 unsigned JTOffset = getOffsetOf(MI) + 4;
2127 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
2128 for (unsigned j = 0, ee = JTBBs.size(); j != ee; ++j) {
2129 MachineBasicBlock *MBB = JTBBs[j];
2130 unsigned DstOffset = BBInfo[MBB->getNumber()].Offset;
2131 // Negative offset is not ok. FIXME: We should change BB layout to make
2132 // sure all the branches are forward.
2133 if (ByteOk && (DstOffset - JTOffset) > ((1<<8)-1)*2)
2134 ByteOk = false;
2135 unsigned TBHLimit = ((1<<16)-1)*2;
2136 if (HalfWordOk && (DstOffset - JTOffset) > TBHLimit)
2137 HalfWordOk = false;
2138 if (!ByteOk && !HalfWordOk)
2139 break;
2140 }
2141
2142 if (!ByteOk && !HalfWordOk)
2143 continue;
2144
2145 MachineBasicBlock *MBB = MI->getParent();
2146 if (!MI->getOperand(0).isKill()) // FIXME: needed now?
2147 continue;
2148 unsigned IdxReg = MI->getOperand(1).getReg();
2149 bool IdxRegKill = MI->getOperand(1).isKill();
2150
2151 CPUser &User = CPUsers[JumpTableUserIndices[JTI]];
2152 unsigned DeadSize = 0;
2153 bool CanDeleteLEA = false;
2154 bool BaseRegKill = false;
2155 bool PreservedBaseReg =
2156 preserveBaseRegister(MI, User.MI, DeadSize, CanDeleteLEA, BaseRegKill);
2157
2158 if (!jumpTableFollowsTB(MI, User.CPEMI) && !PreservedBaseReg)
2159 continue;
2160
2161 DEBUG(dbgs() << "Shrink JT: " << *MI);
2162 MachineInstr *CPEMI = User.CPEMI;
2163 unsigned Opc = ByteOk ? ARM::t2TBB_JT : ARM::t2TBH_JT;
2164 MachineBasicBlock::iterator MI_JT = MI;
2165 MachineInstr *NewJTMI =
2166 BuildMI(*MBB, MI_JT, MI->getDebugLoc(), TII->get(Opc))
2167 .addReg(User.MI->getOperand(0).getReg(),
2168 getKillRegState(BaseRegKill))
2169 .addReg(IdxReg, getKillRegState(IdxRegKill))
2170 .addJumpTableIndex(JTI, JTOP.getTargetFlags())
2171 .addImm(CPEMI->getOperand(0).getImm());
2172 DEBUG(dbgs() << "BB#" << MBB->getNumber() << ": " << *NewJTMI);
2173
2174 unsigned JTOpc = ByteOk ? ARM::JUMPTABLE_TBB : ARM::JUMPTABLE_TBH;
2175 CPEMI->setDesc(TII->get(JTOpc));
2176
2177 if (jumpTableFollowsTB(MI, User.CPEMI)) {
2178 NewJTMI->getOperand(0).setReg(ARM::PC);
2179 NewJTMI->getOperand(0).setIsKill(false);
2180
2181 if (CanDeleteLEA) {
2182 User.MI->eraseFromParent();
2183 DeadSize += 4;
2184
2185 // The LEA was eliminated, the TBB instruction becomes the only new user
2186 // of the jump table.
2187 User.MI = NewJTMI;
2188 User.MaxDisp = 4;
2189 User.NegOk = false;
2190 User.IsSoImm = false;
2191 User.KnownAlignment = false;
2192 } else {
2193 // The LEA couldn't be eliminated, so we must add another CPUser to
2194 // record the TBB or TBH use.
2195 int CPEntryIdx = JumpTableEntryIndices[JTI];
2196 auto &CPEs = CPEntries[CPEntryIdx];
2197 auto Entry = std::find_if(CPEs.begin(), CPEs.end(), [&](CPEntry &E) {
2198 return E.CPEMI == User.CPEMI;
2199 });
2200 ++Entry->RefCount;
2201 CPUsers.emplace_back(CPUser(NewJTMI, User.CPEMI, 4, false, false));
2202 }
2203 }
2204
2205 unsigned NewSize = TII->GetInstSizeInBytes(*NewJTMI);
2206 unsigned OrigSize = TII->GetInstSizeInBytes(*MI);
2207 MI->eraseFromParent();
2208
2209 int Delta = OrigSize - NewSize + DeadSize;
2210 BBInfo[MBB->getNumber()].Size -= Delta;
2211 adjustBBOffsetsAfter(MBB);
2212
2213 ++NumTBs;
2214 MadeChange = true;
2215 }
2216
2217 return MadeChange;
2218 }
2219
2220 /// reorderThumb2JumpTables - Adjust the function's block layout to ensure that
2221 /// jump tables always branch forwards, since that's what tbb and tbh need.
reorderThumb2JumpTables()2222 bool ARMConstantIslands::reorderThumb2JumpTables() {
2223 bool MadeChange = false;
2224
2225 MachineJumpTableInfo *MJTI = MF->getJumpTableInfo();
2226 if (!MJTI) return false;
2227
2228 const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
2229 for (unsigned i = 0, e = T2JumpTables.size(); i != e; ++i) {
2230 MachineInstr *MI = T2JumpTables[i];
2231 const MCInstrDesc &MCID = MI->getDesc();
2232 unsigned NumOps = MCID.getNumOperands();
2233 unsigned JTOpIdx = NumOps - (MI->isPredicable() ? 2 : 1);
2234 MachineOperand JTOP = MI->getOperand(JTOpIdx);
2235 unsigned JTI = JTOP.getIndex();
2236 assert(JTI < JT.size());
2237
2238 // We prefer if target blocks for the jump table come after the jump
2239 // instruction so we can use TB[BH]. Loop through the target blocks
2240 // and try to adjust them such that that's true.
2241 int JTNumber = MI->getParent()->getNumber();
2242 const std::vector<MachineBasicBlock*> &JTBBs = JT[JTI].MBBs;
2243 for (unsigned j = 0, ee = JTBBs.size(); j != ee; ++j) {
2244 MachineBasicBlock *MBB = JTBBs[j];
2245 int DTNumber = MBB->getNumber();
2246
2247 if (DTNumber < JTNumber) {
2248 // The destination precedes the switch. Try to move the block forward
2249 // so we have a positive offset.
2250 MachineBasicBlock *NewBB =
2251 adjustJTTargetBlockForward(MBB, MI->getParent());
2252 if (NewBB)
2253 MJTI->ReplaceMBBInJumpTable(JTI, JTBBs[j], NewBB);
2254 MadeChange = true;
2255 }
2256 }
2257 }
2258
2259 return MadeChange;
2260 }
2261
2262 MachineBasicBlock *ARMConstantIslands::
adjustJTTargetBlockForward(MachineBasicBlock * BB,MachineBasicBlock * JTBB)2263 adjustJTTargetBlockForward(MachineBasicBlock *BB, MachineBasicBlock *JTBB) {
2264 // If the destination block is terminated by an unconditional branch,
2265 // try to move it; otherwise, create a new block following the jump
2266 // table that branches back to the actual target. This is a very simple
2267 // heuristic. FIXME: We can definitely improve it.
2268 MachineBasicBlock *TBB = nullptr, *FBB = nullptr;
2269 SmallVector<MachineOperand, 4> Cond;
2270 SmallVector<MachineOperand, 4> CondPrior;
2271 MachineFunction::iterator BBi = BB->getIterator();
2272 MachineFunction::iterator OldPrior = std::prev(BBi);
2273
2274 // If the block terminator isn't analyzable, don't try to move the block
2275 bool B = TII->analyzeBranch(*BB, TBB, FBB, Cond);
2276
2277 // If the block ends in an unconditional branch, move it. The prior block
2278 // has to have an analyzable terminator for us to move this one. Be paranoid
2279 // and make sure we're not trying to move the entry block of the function.
2280 if (!B && Cond.empty() && BB != &MF->front() &&
2281 !TII->analyzeBranch(*OldPrior, TBB, FBB, CondPrior)) {
2282 BB->moveAfter(JTBB);
2283 OldPrior->updateTerminator();
2284 BB->updateTerminator();
2285 // Update numbering to account for the block being moved.
2286 MF->RenumberBlocks();
2287 ++NumJTMoved;
2288 return nullptr;
2289 }
2290
2291 // Create a new MBB for the code after the jump BB.
2292 MachineBasicBlock *NewBB =
2293 MF->CreateMachineBasicBlock(JTBB->getBasicBlock());
2294 MachineFunction::iterator MBBI = ++JTBB->getIterator();
2295 MF->insert(MBBI, NewBB);
2296
2297 // Add an unconditional branch from NewBB to BB.
2298 // There doesn't seem to be meaningful DebugInfo available; this doesn't
2299 // correspond directly to anything in the source.
2300 assert (isThumb2 && "Adjusting for TB[BH] but not in Thumb2?");
2301 BuildMI(NewBB, DebugLoc(), TII->get(ARM::t2B)).addMBB(BB)
2302 .addImm(ARMCC::AL).addReg(0);
2303
2304 // Update internal data structures to account for the newly inserted MBB.
2305 MF->RenumberBlocks(NewBB);
2306
2307 // Update the CFG.
2308 NewBB->addSuccessor(BB);
2309 JTBB->replaceSuccessor(BB, NewBB);
2310
2311 ++NumJTInserted;
2312 return NewBB;
2313 }
2314