• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- Thumb2SizeReduction.cpp - Thumb2 code size reduction pass -*- C++ -*-=//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 
10 #include "ARM.h"
11 #include "ARMBaseInstrInfo.h"
12 #include "ARMSubtarget.h"
13 #include "MCTargetDesc/ARMAddressingModes.h"
14 #include "Thumb2InstrInfo.h"
15 #include "llvm/ADT/DenseMap.h"
16 #include "llvm/ADT/PostOrderIterator.h"
17 #include "llvm/ADT/Statistic.h"
18 #include "llvm/CodeGen/MachineFunctionPass.h"
19 #include "llvm/CodeGen/MachineInstr.h"
20 #include "llvm/CodeGen/MachineInstrBuilder.h"
21 #include "llvm/IR/Function.h"        // To access Function attributes
22 #include "llvm/Support/CommandLine.h"
23 #include "llvm/Support/Debug.h"
24 #include "llvm/Support/raw_ostream.h"
25 #include "llvm/Target/TargetMachine.h"
26 using namespace llvm;
27 
28 #define DEBUG_TYPE "t2-reduce-size"
29 
30 STATISTIC(NumNarrows,  "Number of 32-bit instrs reduced to 16-bit ones");
31 STATISTIC(Num2Addrs,   "Number of 32-bit instrs reduced to 2addr 16-bit ones");
32 STATISTIC(NumLdSts,    "Number of 32-bit load / store reduced to 16-bit ones");
33 
34 static cl::opt<int> ReduceLimit("t2-reduce-limit",
35                                 cl::init(-1), cl::Hidden);
36 static cl::opt<int> ReduceLimit2Addr("t2-reduce-limit2",
37                                      cl::init(-1), cl::Hidden);
38 static cl::opt<int> ReduceLimitLdSt("t2-reduce-limit3",
39                                      cl::init(-1), cl::Hidden);
40 
41 namespace {
42   /// ReduceTable - A static table with information on mapping from wide
43   /// opcodes to narrow
44   struct ReduceEntry {
45     uint16_t WideOpc;      // Wide opcode
46     uint16_t NarrowOpc1;   // Narrow opcode to transform to
47     uint16_t NarrowOpc2;   // Narrow opcode when it's two-address
48     uint8_t  Imm1Limit;    // Limit of immediate field (bits)
49     uint8_t  Imm2Limit;    // Limit of immediate field when it's two-address
50     unsigned LowRegs1 : 1; // Only possible if low-registers are used
51     unsigned LowRegs2 : 1; // Only possible if low-registers are used (2addr)
52     unsigned PredCC1  : 2; // 0 - If predicated, cc is on and vice versa.
53                            // 1 - No cc field.
54                            // 2 - Always set CPSR.
55     unsigned PredCC2  : 2;
56     unsigned PartFlag : 1; // 16-bit instruction does partial flag update
57     unsigned Special  : 1; // Needs to be dealt with specially
58     unsigned AvoidMovs: 1; // Avoid movs with shifter operand (for Swift)
59   };
60 
61   static const ReduceEntry ReduceTable[] = {
62   // Wide,        Narrow1,      Narrow2,     imm1,imm2, lo1, lo2, P/C,PF,S,AM
63   { ARM::t2ADCrr, 0,            ARM::tADC,     0,   0,   0,   1,  0,0, 0,0,0 },
64   { ARM::t2ADDri, ARM::tADDi3,  ARM::tADDi8,   3,   8,   1,   1,  0,0, 0,1,0 },
65   { ARM::t2ADDrr, ARM::tADDrr,  ARM::tADDhirr, 0,   0,   1,   0,  0,1, 0,0,0 },
66   { ARM::t2ADDSri,ARM::tADDi3,  ARM::tADDi8,   3,   8,   1,   1,  2,2, 0,1,0 },
67   { ARM::t2ADDSrr,ARM::tADDrr,  0,             0,   0,   1,   0,  2,0, 0,1,0 },
68   { ARM::t2ANDrr, 0,            ARM::tAND,     0,   0,   0,   1,  0,0, 1,0,0 },
69   { ARM::t2ASRri, ARM::tASRri,  0,             5,   0,   1,   0,  0,0, 1,0,1 },
70   { ARM::t2ASRrr, 0,            ARM::tASRrr,   0,   0,   0,   1,  0,0, 1,0,1 },
71   { ARM::t2BICrr, 0,            ARM::tBIC,     0,   0,   0,   1,  0,0, 1,0,0 },
72   //FIXME: Disable CMN, as CCodes are backwards from compare expectations
73   //{ ARM::t2CMNrr, ARM::tCMN,  0,             0,   0,   1,   0,  2,0, 0,0,0 },
74   { ARM::t2CMNzrr, ARM::tCMNz,  0,             0,   0,   1,   0,  2,0, 0,0,0 },
75   { ARM::t2CMPri, ARM::tCMPi8,  0,             8,   0,   1,   0,  2,0, 0,0,0 },
76   { ARM::t2CMPrr, ARM::tCMPhir, 0,             0,   0,   0,   0,  2,0, 0,1,0 },
77   { ARM::t2EORrr, 0,            ARM::tEOR,     0,   0,   0,   1,  0,0, 1,0,0 },
78   // FIXME: adr.n immediate offset must be multiple of 4.
79   //{ ARM::t2LEApcrelJT,ARM::tLEApcrelJT, 0,   0,   0,   1,   0,  1,0, 0,0,0 },
80   { ARM::t2LSLri, ARM::tLSLri,  0,             5,   0,   1,   0,  0,0, 1,0,1 },
81   { ARM::t2LSLrr, 0,            ARM::tLSLrr,   0,   0,   0,   1,  0,0, 1,0,1 },
82   { ARM::t2LSRri, ARM::tLSRri,  0,             5,   0,   1,   0,  0,0, 1,0,1 },
83   { ARM::t2LSRrr, 0,            ARM::tLSRrr,   0,   0,   0,   1,  0,0, 1,0,1 },
84   { ARM::t2MOVi,  ARM::tMOVi8,  0,             8,   0,   1,   0,  0,0, 1,0,0 },
85   { ARM::t2MOVi16,ARM::tMOVi8,  0,             8,   0,   1,   0,  0,0, 1,1,0 },
86   // FIXME: Do we need the 16-bit 'S' variant?
87   { ARM::t2MOVr,ARM::tMOVr,     0,             0,   0,   0,   0,  1,0, 0,0,0 },
88   { ARM::t2MUL,   0,            ARM::tMUL,     0,   0,   0,   1,  0,0, 1,0,0 },
89   { ARM::t2MVNr,  ARM::tMVN,    0,             0,   0,   1,   0,  0,0, 0,0,0 },
90   { ARM::t2ORRrr, 0,            ARM::tORR,     0,   0,   0,   1,  0,0, 1,0,0 },
91   { ARM::t2REV,   ARM::tREV,    0,             0,   0,   1,   0,  1,0, 0,0,0 },
92   { ARM::t2REV16, ARM::tREV16,  0,             0,   0,   1,   0,  1,0, 0,0,0 },
93   { ARM::t2REVSH, ARM::tREVSH,  0,             0,   0,   1,   0,  1,0, 0,0,0 },
94   { ARM::t2RORrr, 0,            ARM::tROR,     0,   0,   0,   1,  0,0, 1,0,0 },
95   { ARM::t2RSBri, ARM::tRSB,    0,             0,   0,   1,   0,  0,0, 0,1,0 },
96   { ARM::t2RSBSri,ARM::tRSB,    0,             0,   0,   1,   0,  2,0, 0,1,0 },
97   { ARM::t2SBCrr, 0,            ARM::tSBC,     0,   0,   0,   1,  0,0, 0,0,0 },
98   { ARM::t2SUBri, ARM::tSUBi3,  ARM::tSUBi8,   3,   8,   1,   1,  0,0, 0,0,0 },
99   { ARM::t2SUBrr, ARM::tSUBrr,  0,             0,   0,   1,   0,  0,0, 0,0,0 },
100   { ARM::t2SUBSri,ARM::tSUBi3,  ARM::tSUBi8,   3,   8,   1,   1,  2,2, 0,0,0 },
101   { ARM::t2SUBSrr,ARM::tSUBrr,  0,             0,   0,   1,   0,  2,0, 0,0,0 },
102   { ARM::t2SXTB,  ARM::tSXTB,   0,             0,   0,   1,   0,  1,0, 0,1,0 },
103   { ARM::t2SXTH,  ARM::tSXTH,   0,             0,   0,   1,   0,  1,0, 0,1,0 },
104   { ARM::t2TSTrr, ARM::tTST,    0,             0,   0,   1,   0,  2,0, 0,0,0 },
105   { ARM::t2UXTB,  ARM::tUXTB,   0,             0,   0,   1,   0,  1,0, 0,1,0 },
106   { ARM::t2UXTH,  ARM::tUXTH,   0,             0,   0,   1,   0,  1,0, 0,1,0 },
107 
108   // FIXME: Clean this up after splitting each Thumb load / store opcode
109   // into multiple ones.
110   { ARM::t2LDRi12,ARM::tLDRi,   ARM::tLDRspi,  5,   8,   1,   0,  0,0, 0,1,0 },
111   { ARM::t2LDRs,  ARM::tLDRr,   0,             0,   0,   1,   0,  0,0, 0,1,0 },
112   { ARM::t2LDRBi12,ARM::tLDRBi, 0,             5,   0,   1,   0,  0,0, 0,1,0 },
113   { ARM::t2LDRBs, ARM::tLDRBr,  0,             0,   0,   1,   0,  0,0, 0,1,0 },
114   { ARM::t2LDRHi12,ARM::tLDRHi, 0,             5,   0,   1,   0,  0,0, 0,1,0 },
115   { ARM::t2LDRHs, ARM::tLDRHr,  0,             0,   0,   1,   0,  0,0, 0,1,0 },
116   { ARM::t2LDRSBs,ARM::tLDRSB,  0,             0,   0,   1,   0,  0,0, 0,1,0 },
117   { ARM::t2LDRSHs,ARM::tLDRSH,  0,             0,   0,   1,   0,  0,0, 0,1,0 },
118   { ARM::t2STRi12,ARM::tSTRi,   ARM::tSTRspi,  5,   8,   1,   0,  0,0, 0,1,0 },
119   { ARM::t2STRs,  ARM::tSTRr,   0,             0,   0,   1,   0,  0,0, 0,1,0 },
120   { ARM::t2STRBi12,ARM::tSTRBi, 0,             5,   0,   1,   0,  0,0, 0,1,0 },
121   { ARM::t2STRBs, ARM::tSTRBr,  0,             0,   0,   1,   0,  0,0, 0,1,0 },
122   { ARM::t2STRHi12,ARM::tSTRHi, 0,             5,   0,   1,   0,  0,0, 0,1,0 },
123   { ARM::t2STRHs, ARM::tSTRHr,  0,             0,   0,   1,   0,  0,0, 0,1,0 },
124 
125   { ARM::t2LDMIA, ARM::tLDMIA,  0,             0,   0,   1,   1,  1,1, 0,1,0 },
126   { ARM::t2LDMIA_RET,0,         ARM::tPOP_RET, 0,   0,   1,   1,  1,1, 0,1,0 },
127   { ARM::t2LDMIA_UPD,ARM::tLDMIA_UPD,ARM::tPOP,0,   0,   1,   1,  1,1, 0,1,0 },
128   // ARM::t2STMIA (with no basereg writeback) has no Thumb1 equivalent.
129   // tSTMIA_UPD is a change in semantics which can only be used if the base
130   // register is killed. This difference is correctly handled elsewhere.
131   { ARM::t2STMIA, ARM::tSTMIA_UPD, 0,          0,   0,   1,   1,  1,1, 0,1,0 },
132   { ARM::t2STMIA_UPD,ARM::tSTMIA_UPD, 0,       0,   0,   1,   1,  1,1, 0,1,0 },
133   { ARM::t2STMDB_UPD, 0,        ARM::tPUSH,    0,   0,   1,   1,  1,1, 0,1,0 }
134   };
135 
136   class Thumb2SizeReduce : public MachineFunctionPass {
137   public:
138     static char ID;
139     Thumb2SizeReduce(std::function<bool(const Function &)> Ftor);
140 
141     const Thumb2InstrInfo *TII;
142     const ARMSubtarget *STI;
143 
144     bool runOnMachineFunction(MachineFunction &MF) override;
145 
getPassName() const146     const char *getPassName() const override {
147       return "Thumb2 instruction size reduction pass";
148     }
149 
150   private:
151     /// ReduceOpcodeMap - Maps wide opcode to index of entry in ReduceTable.
152     DenseMap<unsigned, unsigned> ReduceOpcodeMap;
153 
154     bool canAddPseudoFlagDep(MachineInstr *Use, bool IsSelfLoop);
155 
156     bool VerifyPredAndCC(MachineInstr *MI, const ReduceEntry &Entry,
157                          bool is2Addr, ARMCC::CondCodes Pred,
158                          bool LiveCPSR, bool &HasCC, bool &CCDead);
159 
160     bool ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
161                          const ReduceEntry &Entry);
162 
163     bool ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
164                        const ReduceEntry &Entry, bool LiveCPSR, bool IsSelfLoop);
165 
166     /// ReduceTo2Addr - Reduce a 32-bit instruction to a 16-bit two-address
167     /// instruction.
168     bool ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
169                        const ReduceEntry &Entry, bool LiveCPSR,
170                        bool IsSelfLoop);
171 
172     /// ReduceToNarrow - Reduce a 32-bit instruction to a 16-bit
173     /// non-two-address instruction.
174     bool ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
175                         const ReduceEntry &Entry, bool LiveCPSR,
176                         bool IsSelfLoop);
177 
178     /// ReduceMI - Attempt to reduce MI, return true on success.
179     bool ReduceMI(MachineBasicBlock &MBB, MachineInstr *MI,
180                   bool LiveCPSR, bool IsSelfLoop);
181 
182     /// ReduceMBB - Reduce width of instructions in the specified basic block.
183     bool ReduceMBB(MachineBasicBlock &MBB);
184 
185     bool OptimizeSize;
186     bool MinimizeSize;
187 
188     // Last instruction to define CPSR in the current block.
189     MachineInstr *CPSRDef;
190     // Was CPSR last defined by a high latency instruction?
191     // When CPSRDef is null, this refers to CPSR defs in predecessors.
192     bool HighLatencyCPSR;
193 
194     struct MBBInfo {
195       // The flags leaving this block have high latency.
196       bool HighLatencyCPSR;
197       // Has this block been visited yet?
198       bool Visited;
199 
MBBInfo__anon4fdf1c1d0111::Thumb2SizeReduce::MBBInfo200       MBBInfo() : HighLatencyCPSR(false), Visited(false) {}
201     };
202 
203     SmallVector<MBBInfo, 8> BlockInfo;
204 
205     std::function<bool(const Function &)> PredicateFtor;
206   };
207   char Thumb2SizeReduce::ID = 0;
208 }
209 
Thumb2SizeReduce(std::function<bool (const Function &)> Ftor)210 Thumb2SizeReduce::Thumb2SizeReduce(std::function<bool(const Function &)> Ftor)
211     : MachineFunctionPass(ID), PredicateFtor(Ftor) {
212   OptimizeSize = MinimizeSize = false;
213   for (unsigned i = 0, e = array_lengthof(ReduceTable); i != e; ++i) {
214     unsigned FromOpc = ReduceTable[i].WideOpc;
215     if (!ReduceOpcodeMap.insert(std::make_pair(FromOpc, i)).second)
216       llvm_unreachable("Duplicated entries?");
217   }
218 }
219 
HasImplicitCPSRDef(const MCInstrDesc & MCID)220 static bool HasImplicitCPSRDef(const MCInstrDesc &MCID) {
221   for (const MCPhysReg *Regs = MCID.getImplicitDefs(); *Regs; ++Regs)
222     if (*Regs == ARM::CPSR)
223       return true;
224   return false;
225 }
226 
227 // Check for a likely high-latency flag def.
isHighLatencyCPSR(MachineInstr * Def)228 static bool isHighLatencyCPSR(MachineInstr *Def) {
229   switch(Def->getOpcode()) {
230   case ARM::FMSTAT:
231   case ARM::tMUL:
232     return true;
233   }
234   return false;
235 }
236 
237 /// canAddPseudoFlagDep - For A9 (and other out-of-order) implementations,
238 /// the 's' 16-bit instruction partially update CPSR. Abort the
239 /// transformation to avoid adding false dependency on last CPSR setting
240 /// instruction which hurts the ability for out-of-order execution engine
241 /// to do register renaming magic.
242 /// This function checks if there is a read-of-write dependency between the
243 /// last instruction that defines the CPSR and the current instruction. If there
244 /// is, then there is no harm done since the instruction cannot be retired
245 /// before the CPSR setting instruction anyway.
246 /// Note, we are not doing full dependency analysis here for the sake of compile
247 /// time. We're not looking for cases like:
248 /// r0 = muls ...
249 /// r1 = add.w r0, ...
250 /// ...
251 ///    = mul.w r1
252 /// In this case it would have been ok to narrow the mul.w to muls since there
253 /// are indirect RAW dependency between the muls and the mul.w
254 bool
canAddPseudoFlagDep(MachineInstr * Use,bool FirstInSelfLoop)255 Thumb2SizeReduce::canAddPseudoFlagDep(MachineInstr *Use, bool FirstInSelfLoop) {
256   // Disable the check for -Oz (aka OptimizeForSizeHarder).
257   if (MinimizeSize || !STI->avoidCPSRPartialUpdate())
258     return false;
259 
260   if (!CPSRDef)
261     // If this BB loops back to itself, conservatively avoid narrowing the
262     // first instruction that does partial flag update.
263     return HighLatencyCPSR || FirstInSelfLoop;
264 
265   SmallSet<unsigned, 2> Defs;
266   for (const MachineOperand &MO : CPSRDef->operands()) {
267     if (!MO.isReg() || MO.isUndef() || MO.isUse())
268       continue;
269     unsigned Reg = MO.getReg();
270     if (Reg == 0 || Reg == ARM::CPSR)
271       continue;
272     Defs.insert(Reg);
273   }
274 
275   for (const MachineOperand &MO : Use->operands()) {
276     if (!MO.isReg() || MO.isUndef() || MO.isDef())
277       continue;
278     unsigned Reg = MO.getReg();
279     if (Defs.count(Reg))
280       return false;
281   }
282 
283   // If the current CPSR has high latency, try to avoid the false dependency.
284   if (HighLatencyCPSR)
285     return true;
286 
287   // tMOVi8 usually doesn't start long dependency chains, and there are a lot
288   // of them, so always shrink them when CPSR doesn't have high latency.
289   if (Use->getOpcode() == ARM::t2MOVi ||
290       Use->getOpcode() == ARM::t2MOVi16)
291     return false;
292 
293   // No read-after-write dependency. The narrowing will add false dependency.
294   return true;
295 }
296 
297 bool
VerifyPredAndCC(MachineInstr * MI,const ReduceEntry & Entry,bool is2Addr,ARMCC::CondCodes Pred,bool LiveCPSR,bool & HasCC,bool & CCDead)298 Thumb2SizeReduce::VerifyPredAndCC(MachineInstr *MI, const ReduceEntry &Entry,
299                                   bool is2Addr, ARMCC::CondCodes Pred,
300                                   bool LiveCPSR, bool &HasCC, bool &CCDead) {
301   if ((is2Addr  && Entry.PredCC2 == 0) ||
302       (!is2Addr && Entry.PredCC1 == 0)) {
303     if (Pred == ARMCC::AL) {
304       // Not predicated, must set CPSR.
305       if (!HasCC) {
306         // Original instruction was not setting CPSR, but CPSR is not
307         // currently live anyway. It's ok to set it. The CPSR def is
308         // dead though.
309         if (!LiveCPSR) {
310           HasCC = true;
311           CCDead = true;
312           return true;
313         }
314         return false;
315       }
316     } else {
317       // Predicated, must not set CPSR.
318       if (HasCC)
319         return false;
320     }
321   } else if ((is2Addr  && Entry.PredCC2 == 2) ||
322              (!is2Addr && Entry.PredCC1 == 2)) {
323     /// Old opcode has an optional def of CPSR.
324     if (HasCC)
325       return true;
326     // If old opcode does not implicitly define CPSR, then it's not ok since
327     // these new opcodes' CPSR def is not meant to be thrown away. e.g. CMP.
328     if (!HasImplicitCPSRDef(MI->getDesc()))
329       return false;
330     HasCC = true;
331   } else {
332     // 16-bit instruction does not set CPSR.
333     if (HasCC)
334       return false;
335   }
336 
337   return true;
338 }
339 
VerifyLowRegs(MachineInstr * MI)340 static bool VerifyLowRegs(MachineInstr *MI) {
341   unsigned Opc = MI->getOpcode();
342   bool isPCOk = (Opc == ARM::t2LDMIA_RET || Opc == ARM::t2LDMIA_UPD);
343   bool isLROk = (Opc == ARM::t2STMDB_UPD);
344   bool isSPOk = isPCOk || isLROk;
345   for (unsigned i = 0, e = MI->getNumOperands(); i != e; ++i) {
346     const MachineOperand &MO = MI->getOperand(i);
347     if (!MO.isReg() || MO.isImplicit())
348       continue;
349     unsigned Reg = MO.getReg();
350     if (Reg == 0 || Reg == ARM::CPSR)
351       continue;
352     if (isPCOk && Reg == ARM::PC)
353       continue;
354     if (isLROk && Reg == ARM::LR)
355       continue;
356     if (Reg == ARM::SP) {
357       if (isSPOk)
358         continue;
359       if (i == 1 && (Opc == ARM::t2LDRi12 || Opc == ARM::t2STRi12))
360         // Special case for these ldr / str with sp as base register.
361         continue;
362     }
363     if (!isARMLowRegister(Reg))
364       return false;
365   }
366   return true;
367 }
368 
369 bool
ReduceLoadStore(MachineBasicBlock & MBB,MachineInstr * MI,const ReduceEntry & Entry)370 Thumb2SizeReduce::ReduceLoadStore(MachineBasicBlock &MBB, MachineInstr *MI,
371                                   const ReduceEntry &Entry) {
372   if (ReduceLimitLdSt != -1 && ((int)NumLdSts >= ReduceLimitLdSt))
373     return false;
374 
375   unsigned Scale = 1;
376   bool HasImmOffset = false;
377   bool HasShift = false;
378   bool HasOffReg = true;
379   bool isLdStMul = false;
380   unsigned Opc = Entry.NarrowOpc1;
381   unsigned OpNum = 3; // First 'rest' of operands.
382   uint8_t  ImmLimit = Entry.Imm1Limit;
383 
384   switch (Entry.WideOpc) {
385   default:
386     llvm_unreachable("Unexpected Thumb2 load / store opcode!");
387   case ARM::t2LDRi12:
388   case ARM::t2STRi12:
389     if (MI->getOperand(1).getReg() == ARM::SP) {
390       Opc = Entry.NarrowOpc2;
391       ImmLimit = Entry.Imm2Limit;
392     }
393 
394     Scale = 4;
395     HasImmOffset = true;
396     HasOffReg = false;
397     break;
398   case ARM::t2LDRBi12:
399   case ARM::t2STRBi12:
400     HasImmOffset = true;
401     HasOffReg = false;
402     break;
403   case ARM::t2LDRHi12:
404   case ARM::t2STRHi12:
405     Scale = 2;
406     HasImmOffset = true;
407     HasOffReg = false;
408     break;
409   case ARM::t2LDRs:
410   case ARM::t2LDRBs:
411   case ARM::t2LDRHs:
412   case ARM::t2LDRSBs:
413   case ARM::t2LDRSHs:
414   case ARM::t2STRs:
415   case ARM::t2STRBs:
416   case ARM::t2STRHs:
417     HasShift = true;
418     OpNum = 4;
419     break;
420   case ARM::t2LDMIA: {
421     unsigned BaseReg = MI->getOperand(0).getReg();
422     assert(isARMLowRegister(BaseReg));
423 
424     // For the non-writeback version (this one), the base register must be
425     // one of the registers being loaded.
426     bool isOK = false;
427     for (unsigned i = 3; i < MI->getNumOperands(); ++i) {
428       if (MI->getOperand(i).getReg() == BaseReg) {
429         isOK = true;
430         break;
431       }
432     }
433 
434     if (!isOK)
435       return false;
436 
437     OpNum = 0;
438     isLdStMul = true;
439     break;
440   }
441   case ARM::t2STMIA: {
442     // If the base register is killed, we don't care what its value is after the
443     // instruction, so we can use an updating STMIA.
444     if (!MI->getOperand(0).isKill())
445       return false;
446 
447     break;
448   }
449   case ARM::t2LDMIA_RET: {
450     unsigned BaseReg = MI->getOperand(1).getReg();
451     if (BaseReg != ARM::SP)
452       return false;
453     Opc = Entry.NarrowOpc2; // tPOP_RET
454     OpNum = 2;
455     isLdStMul = true;
456     break;
457   }
458   case ARM::t2LDMIA_UPD:
459   case ARM::t2STMIA_UPD:
460   case ARM::t2STMDB_UPD: {
461     OpNum = 0;
462 
463     unsigned BaseReg = MI->getOperand(1).getReg();
464     if (BaseReg == ARM::SP &&
465         (Entry.WideOpc == ARM::t2LDMIA_UPD ||
466          Entry.WideOpc == ARM::t2STMDB_UPD)) {
467       Opc = Entry.NarrowOpc2; // tPOP or tPUSH
468       OpNum = 2;
469     } else if (!isARMLowRegister(BaseReg) ||
470                (Entry.WideOpc != ARM::t2LDMIA_UPD &&
471                 Entry.WideOpc != ARM::t2STMIA_UPD)) {
472       return false;
473     }
474 
475     isLdStMul = true;
476     break;
477   }
478   }
479 
480   unsigned OffsetReg = 0;
481   bool OffsetKill = false;
482   bool OffsetInternal = false;
483   if (HasShift) {
484     OffsetReg  = MI->getOperand(2).getReg();
485     OffsetKill = MI->getOperand(2).isKill();
486     OffsetInternal = MI->getOperand(2).isInternalRead();
487 
488     if (MI->getOperand(3).getImm())
489       // Thumb1 addressing mode doesn't support shift.
490       return false;
491   }
492 
493   unsigned OffsetImm = 0;
494   if (HasImmOffset) {
495     OffsetImm = MI->getOperand(2).getImm();
496     unsigned MaxOffset = ((1 << ImmLimit) - 1) * Scale;
497 
498     if ((OffsetImm & (Scale - 1)) || OffsetImm > MaxOffset)
499       // Make sure the immediate field fits.
500       return false;
501   }
502 
503   // Add the 16-bit load / store instruction.
504   DebugLoc dl = MI->getDebugLoc();
505   MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, TII->get(Opc));
506 
507   // tSTMIA_UPD takes a defining register operand. We've already checked that
508   // the register is killed, so mark it as dead here.
509   if (Entry.WideOpc == ARM::t2STMIA)
510     MIB.addReg(MI->getOperand(0).getReg(), RegState::Define | RegState::Dead);
511 
512   if (!isLdStMul) {
513     MIB.addOperand(MI->getOperand(0));
514     MIB.addOperand(MI->getOperand(1));
515 
516     if (HasImmOffset)
517       MIB.addImm(OffsetImm / Scale);
518 
519     assert((!HasShift || OffsetReg) && "Invalid so_reg load / store address!");
520 
521     if (HasOffReg)
522       MIB.addReg(OffsetReg, getKillRegState(OffsetKill) |
523                             getInternalReadRegState(OffsetInternal));
524   }
525 
526   // Transfer the rest of operands.
527   for (unsigned e = MI->getNumOperands(); OpNum != e; ++OpNum)
528     MIB.addOperand(MI->getOperand(OpNum));
529 
530   // Transfer memoperands.
531   MIB->setMemRefs(MI->memoperands_begin(), MI->memoperands_end());
532 
533   // Transfer MI flags.
534   MIB.setMIFlags(MI->getFlags());
535 
536   DEBUG(errs() << "Converted 32-bit: " << *MI << "       to 16-bit: " << *MIB);
537 
538   MBB.erase_instr(MI);
539   ++NumLdSts;
540   return true;
541 }
542 
543 bool
ReduceSpecial(MachineBasicBlock & MBB,MachineInstr * MI,const ReduceEntry & Entry,bool LiveCPSR,bool IsSelfLoop)544 Thumb2SizeReduce::ReduceSpecial(MachineBasicBlock &MBB, MachineInstr *MI,
545                                 const ReduceEntry &Entry,
546                                 bool LiveCPSR, bool IsSelfLoop) {
547   unsigned Opc = MI->getOpcode();
548   if (Opc == ARM::t2ADDri) {
549     // If the source register is SP, try to reduce to tADDrSPi, otherwise
550     // it's a normal reduce.
551     if (MI->getOperand(1).getReg() != ARM::SP) {
552       if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop))
553         return true;
554       return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
555     }
556     // Try to reduce to tADDrSPi.
557     unsigned Imm = MI->getOperand(2).getImm();
558     // The immediate must be in range, the destination register must be a low
559     // reg, the predicate must be "always" and the condition flags must not
560     // be being set.
561     if (Imm & 3 || Imm > 1020)
562       return false;
563     if (!isARMLowRegister(MI->getOperand(0).getReg()))
564       return false;
565     if (MI->getOperand(3).getImm() != ARMCC::AL)
566       return false;
567     const MCInstrDesc &MCID = MI->getDesc();
568     if (MCID.hasOptionalDef() &&
569         MI->getOperand(MCID.getNumOperands()-1).getReg() == ARM::CPSR)
570       return false;
571 
572     MachineInstrBuilder MIB = BuildMI(MBB, MI, MI->getDebugLoc(),
573                                       TII->get(ARM::tADDrSPi))
574       .addOperand(MI->getOperand(0))
575       .addOperand(MI->getOperand(1))
576       .addImm(Imm / 4); // The tADDrSPi has an implied scale by four.
577     AddDefaultPred(MIB);
578 
579     // Transfer MI flags.
580     MIB.setMIFlags(MI->getFlags());
581 
582     DEBUG(errs() << "Converted 32-bit: " << *MI << "       to 16-bit: " <<*MIB);
583 
584     MBB.erase_instr(MI);
585     ++NumNarrows;
586     return true;
587   }
588 
589   if (Entry.LowRegs1 && !VerifyLowRegs(MI))
590     return false;
591 
592   if (MI->mayLoadOrStore())
593     return ReduceLoadStore(MBB, MI, Entry);
594 
595   switch (Opc) {
596   default: break;
597   case ARM::t2ADDSri:
598   case ARM::t2ADDSrr: {
599     unsigned PredReg = 0;
600     if (getInstrPredicate(MI, PredReg) == ARMCC::AL) {
601       switch (Opc) {
602       default: break;
603       case ARM::t2ADDSri: {
604         if (ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop))
605           return true;
606         // fallthrough
607       }
608       case ARM::t2ADDSrr:
609         return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
610       }
611     }
612     break;
613   }
614   case ARM::t2RSBri:
615   case ARM::t2RSBSri:
616   case ARM::t2SXTB:
617   case ARM::t2SXTH:
618   case ARM::t2UXTB:
619   case ARM::t2UXTH:
620     if (MI->getOperand(2).getImm() == 0)
621       return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
622     break;
623   case ARM::t2MOVi16:
624     // Can convert only 'pure' immediate operands, not immediates obtained as
625     // globals' addresses.
626     if (MI->getOperand(1).isImm())
627       return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
628     break;
629   case ARM::t2CMPrr: {
630     // Try to reduce to the lo-reg only version first. Why there are two
631     // versions of the instruction is a mystery.
632     // It would be nice to just have two entries in the master table that
633     // are prioritized, but the table assumes a unique entry for each
634     // source insn opcode. So for now, we hack a local entry record to use.
635     static const ReduceEntry NarrowEntry =
636       { ARM::t2CMPrr,ARM::tCMPr, 0, 0, 0, 1, 1,2, 0, 0,1,0 };
637     if (ReduceToNarrow(MBB, MI, NarrowEntry, LiveCPSR, IsSelfLoop))
638       return true;
639     return ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
640   }
641   }
642   return false;
643 }
644 
645 bool
ReduceTo2Addr(MachineBasicBlock & MBB,MachineInstr * MI,const ReduceEntry & Entry,bool LiveCPSR,bool IsSelfLoop)646 Thumb2SizeReduce::ReduceTo2Addr(MachineBasicBlock &MBB, MachineInstr *MI,
647                                 const ReduceEntry &Entry,
648                                 bool LiveCPSR, bool IsSelfLoop) {
649 
650   if (ReduceLimit2Addr != -1 && ((int)Num2Addrs >= ReduceLimit2Addr))
651     return false;
652 
653   if (!OptimizeSize && Entry.AvoidMovs && STI->avoidMOVsShifterOperand())
654     // Don't issue movs with shifter operand for some CPUs unless we
655     // are optimizing for size.
656     return false;
657 
658   unsigned Reg0 = MI->getOperand(0).getReg();
659   unsigned Reg1 = MI->getOperand(1).getReg();
660   // t2MUL is "special". The tied source operand is second, not first.
661   if (MI->getOpcode() == ARM::t2MUL) {
662     unsigned Reg2 = MI->getOperand(2).getReg();
663     // Early exit if the regs aren't all low regs.
664     if (!isARMLowRegister(Reg0) || !isARMLowRegister(Reg1)
665         || !isARMLowRegister(Reg2))
666       return false;
667     if (Reg0 != Reg2) {
668       // If the other operand also isn't the same as the destination, we
669       // can't reduce.
670       if (Reg1 != Reg0)
671         return false;
672       // Try to commute the operands to make it a 2-address instruction.
673       MachineInstr *CommutedMI = TII->commuteInstruction(MI);
674       if (!CommutedMI)
675         return false;
676     }
677   } else if (Reg0 != Reg1) {
678     // Try to commute the operands to make it a 2-address instruction.
679     unsigned CommOpIdx1 = 1;
680     unsigned CommOpIdx2 = TargetInstrInfo::CommuteAnyOperandIndex;
681     if (!TII->findCommutedOpIndices(MI, CommOpIdx1, CommOpIdx2) ||
682         MI->getOperand(CommOpIdx2).getReg() != Reg0)
683       return false;
684     MachineInstr *CommutedMI =
685         TII->commuteInstruction(MI, false, CommOpIdx1, CommOpIdx2);
686     if (!CommutedMI)
687       return false;
688   }
689   if (Entry.LowRegs2 && !isARMLowRegister(Reg0))
690     return false;
691   if (Entry.Imm2Limit) {
692     unsigned Imm = MI->getOperand(2).getImm();
693     unsigned Limit = (1 << Entry.Imm2Limit) - 1;
694     if (Imm > Limit)
695       return false;
696   } else {
697     unsigned Reg2 = MI->getOperand(2).getReg();
698     if (Entry.LowRegs2 && !isARMLowRegister(Reg2))
699       return false;
700   }
701 
702   // Check if it's possible / necessary to transfer the predicate.
703   const MCInstrDesc &NewMCID = TII->get(Entry.NarrowOpc2);
704   unsigned PredReg = 0;
705   ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
706   bool SkipPred = false;
707   if (Pred != ARMCC::AL) {
708     if (!NewMCID.isPredicable())
709       // Can't transfer predicate, fail.
710       return false;
711   } else {
712     SkipPred = !NewMCID.isPredicable();
713   }
714 
715   bool HasCC = false;
716   bool CCDead = false;
717   const MCInstrDesc &MCID = MI->getDesc();
718   if (MCID.hasOptionalDef()) {
719     unsigned NumOps = MCID.getNumOperands();
720     HasCC = (MI->getOperand(NumOps-1).getReg() == ARM::CPSR);
721     if (HasCC && MI->getOperand(NumOps-1).isDead())
722       CCDead = true;
723   }
724   if (!VerifyPredAndCC(MI, Entry, true, Pred, LiveCPSR, HasCC, CCDead))
725     return false;
726 
727   // Avoid adding a false dependency on partial flag update by some 16-bit
728   // instructions which has the 's' bit set.
729   if (Entry.PartFlag && NewMCID.hasOptionalDef() && HasCC &&
730       canAddPseudoFlagDep(MI, IsSelfLoop))
731     return false;
732 
733   // Add the 16-bit instruction.
734   DebugLoc dl = MI->getDebugLoc();
735   MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID);
736   MIB.addOperand(MI->getOperand(0));
737   if (NewMCID.hasOptionalDef()) {
738     if (HasCC)
739       AddDefaultT1CC(MIB, CCDead);
740     else
741       AddNoT1CC(MIB);
742   }
743 
744   // Transfer the rest of operands.
745   unsigned NumOps = MCID.getNumOperands();
746   for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
747     if (i < NumOps && MCID.OpInfo[i].isOptionalDef())
748       continue;
749     if (SkipPred && MCID.OpInfo[i].isPredicate())
750       continue;
751     MIB.addOperand(MI->getOperand(i));
752   }
753 
754   // Transfer MI flags.
755   MIB.setMIFlags(MI->getFlags());
756 
757   DEBUG(errs() << "Converted 32-bit: " << *MI << "       to 16-bit: " << *MIB);
758 
759   MBB.erase_instr(MI);
760   ++Num2Addrs;
761   return true;
762 }
763 
764 bool
ReduceToNarrow(MachineBasicBlock & MBB,MachineInstr * MI,const ReduceEntry & Entry,bool LiveCPSR,bool IsSelfLoop)765 Thumb2SizeReduce::ReduceToNarrow(MachineBasicBlock &MBB, MachineInstr *MI,
766                                  const ReduceEntry &Entry,
767                                  bool LiveCPSR, bool IsSelfLoop) {
768   if (ReduceLimit != -1 && ((int)NumNarrows >= ReduceLimit))
769     return false;
770 
771   if (!OptimizeSize && Entry.AvoidMovs && STI->avoidMOVsShifterOperand())
772     // Don't issue movs with shifter operand for some CPUs unless we
773     // are optimizing for size.
774     return false;
775 
776   unsigned Limit = ~0U;
777   if (Entry.Imm1Limit)
778     Limit = (1 << Entry.Imm1Limit) - 1;
779 
780   const MCInstrDesc &MCID = MI->getDesc();
781   for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i) {
782     if (MCID.OpInfo[i].isPredicate())
783       continue;
784     const MachineOperand &MO = MI->getOperand(i);
785     if (MO.isReg()) {
786       unsigned Reg = MO.getReg();
787       if (!Reg || Reg == ARM::CPSR)
788         continue;
789       if (Entry.LowRegs1 && !isARMLowRegister(Reg))
790         return false;
791     } else if (MO.isImm() &&
792                !MCID.OpInfo[i].isPredicate()) {
793       if (((unsigned)MO.getImm()) > Limit)
794         return false;
795     }
796   }
797 
798   // Check if it's possible / necessary to transfer the predicate.
799   const MCInstrDesc &NewMCID = TII->get(Entry.NarrowOpc1);
800   unsigned PredReg = 0;
801   ARMCC::CondCodes Pred = getInstrPredicate(MI, PredReg);
802   bool SkipPred = false;
803   if (Pred != ARMCC::AL) {
804     if (!NewMCID.isPredicable())
805       // Can't transfer predicate, fail.
806       return false;
807   } else {
808     SkipPred = !NewMCID.isPredicable();
809   }
810 
811   bool HasCC = false;
812   bool CCDead = false;
813   if (MCID.hasOptionalDef()) {
814     unsigned NumOps = MCID.getNumOperands();
815     HasCC = (MI->getOperand(NumOps-1).getReg() == ARM::CPSR);
816     if (HasCC && MI->getOperand(NumOps-1).isDead())
817       CCDead = true;
818   }
819   if (!VerifyPredAndCC(MI, Entry, false, Pred, LiveCPSR, HasCC, CCDead))
820     return false;
821 
822   // Avoid adding a false dependency on partial flag update by some 16-bit
823   // instructions which has the 's' bit set.
824   if (Entry.PartFlag && NewMCID.hasOptionalDef() && HasCC &&
825       canAddPseudoFlagDep(MI, IsSelfLoop))
826     return false;
827 
828   // Add the 16-bit instruction.
829   DebugLoc dl = MI->getDebugLoc();
830   MachineInstrBuilder MIB = BuildMI(MBB, MI, dl, NewMCID);
831   MIB.addOperand(MI->getOperand(0));
832   if (NewMCID.hasOptionalDef()) {
833     if (HasCC)
834       AddDefaultT1CC(MIB, CCDead);
835     else
836       AddNoT1CC(MIB);
837   }
838 
839   // Transfer the rest of operands.
840   unsigned NumOps = MCID.getNumOperands();
841   for (unsigned i = 1, e = MI->getNumOperands(); i != e; ++i) {
842     if (i < NumOps && MCID.OpInfo[i].isOptionalDef())
843       continue;
844     if ((MCID.getOpcode() == ARM::t2RSBSri ||
845          MCID.getOpcode() == ARM::t2RSBri ||
846          MCID.getOpcode() == ARM::t2SXTB ||
847          MCID.getOpcode() == ARM::t2SXTH ||
848          MCID.getOpcode() == ARM::t2UXTB ||
849          MCID.getOpcode() == ARM::t2UXTH) && i == 2)
850       // Skip the zero immediate operand, it's now implicit.
851       continue;
852     bool isPred = (i < NumOps && MCID.OpInfo[i].isPredicate());
853     if (SkipPred && isPred)
854         continue;
855     const MachineOperand &MO = MI->getOperand(i);
856     if (MO.isReg() && MO.isImplicit() && MO.getReg() == ARM::CPSR)
857       // Skip implicit def of CPSR. Either it's modeled as an optional
858       // def now or it's already an implicit def on the new instruction.
859       continue;
860     MIB.addOperand(MO);
861   }
862   if (!MCID.isPredicable() && NewMCID.isPredicable())
863     AddDefaultPred(MIB);
864 
865   // Transfer MI flags.
866   MIB.setMIFlags(MI->getFlags());
867 
868   DEBUG(errs() << "Converted 32-bit: " << *MI << "       to 16-bit: " << *MIB);
869 
870   MBB.erase_instr(MI);
871   ++NumNarrows;
872   return true;
873 }
874 
UpdateCPSRDef(MachineInstr & MI,bool LiveCPSR,bool & DefCPSR)875 static bool UpdateCPSRDef(MachineInstr &MI, bool LiveCPSR, bool &DefCPSR) {
876   bool HasDef = false;
877   for (const MachineOperand &MO : MI.operands()) {
878     if (!MO.isReg() || MO.isUndef() || MO.isUse())
879       continue;
880     if (MO.getReg() != ARM::CPSR)
881       continue;
882 
883     DefCPSR = true;
884     if (!MO.isDead())
885       HasDef = true;
886   }
887 
888   return HasDef || LiveCPSR;
889 }
890 
UpdateCPSRUse(MachineInstr & MI,bool LiveCPSR)891 static bool UpdateCPSRUse(MachineInstr &MI, bool LiveCPSR) {
892   for (const MachineOperand &MO : MI.operands()) {
893     if (!MO.isReg() || MO.isUndef() || MO.isDef())
894       continue;
895     if (MO.getReg() != ARM::CPSR)
896       continue;
897     assert(LiveCPSR && "CPSR liveness tracking is wrong!");
898     if (MO.isKill()) {
899       LiveCPSR = false;
900       break;
901     }
902   }
903 
904   return LiveCPSR;
905 }
906 
ReduceMI(MachineBasicBlock & MBB,MachineInstr * MI,bool LiveCPSR,bool IsSelfLoop)907 bool Thumb2SizeReduce::ReduceMI(MachineBasicBlock &MBB, MachineInstr *MI,
908                                 bool LiveCPSR, bool IsSelfLoop) {
909   unsigned Opcode = MI->getOpcode();
910   DenseMap<unsigned, unsigned>::iterator OPI = ReduceOpcodeMap.find(Opcode);
911   if (OPI == ReduceOpcodeMap.end())
912     return false;
913   const ReduceEntry &Entry = ReduceTable[OPI->second];
914 
915   // Don't attempt normal reductions on "special" cases for now.
916   if (Entry.Special)
917     return ReduceSpecial(MBB, MI, Entry, LiveCPSR, IsSelfLoop);
918 
919   // Try to transform to a 16-bit two-address instruction.
920   if (Entry.NarrowOpc2 &&
921       ReduceTo2Addr(MBB, MI, Entry, LiveCPSR, IsSelfLoop))
922     return true;
923 
924   // Try to transform to a 16-bit non-two-address instruction.
925   if (Entry.NarrowOpc1 &&
926       ReduceToNarrow(MBB, MI, Entry, LiveCPSR, IsSelfLoop))
927     return true;
928 
929   return false;
930 }
931 
ReduceMBB(MachineBasicBlock & MBB)932 bool Thumb2SizeReduce::ReduceMBB(MachineBasicBlock &MBB) {
933   bool Modified = false;
934 
935   // Yes, CPSR could be livein.
936   bool LiveCPSR = MBB.isLiveIn(ARM::CPSR);
937   MachineInstr *BundleMI = nullptr;
938 
939   CPSRDef = nullptr;
940   HighLatencyCPSR = false;
941 
942   // Check predecessors for the latest CPSRDef.
943   for (auto *Pred : MBB.predecessors()) {
944     const MBBInfo &PInfo = BlockInfo[Pred->getNumber()];
945     if (!PInfo.Visited) {
946       // Since blocks are visited in RPO, this must be a back-edge.
947       continue;
948     }
949     if (PInfo.HighLatencyCPSR) {
950       HighLatencyCPSR = true;
951       break;
952     }
953   }
954 
955   // If this BB loops back to itself, conservatively avoid narrowing the
956   // first instruction that does partial flag update.
957   bool IsSelfLoop = MBB.isSuccessor(&MBB);
958   MachineBasicBlock::instr_iterator MII = MBB.instr_begin(),E = MBB.instr_end();
959   MachineBasicBlock::instr_iterator NextMII;
960   for (; MII != E; MII = NextMII) {
961     NextMII = std::next(MII);
962 
963     MachineInstr *MI = &*MII;
964     if (MI->isBundle()) {
965       BundleMI = MI;
966       continue;
967     }
968     if (MI->isDebugValue())
969       continue;
970 
971     LiveCPSR = UpdateCPSRUse(*MI, LiveCPSR);
972 
973     // Does NextMII belong to the same bundle as MI?
974     bool NextInSameBundle = NextMII != E && NextMII->isBundledWithPred();
975 
976     if (ReduceMI(MBB, MI, LiveCPSR, IsSelfLoop)) {
977       Modified = true;
978       MachineBasicBlock::instr_iterator I = std::prev(NextMII);
979       MI = &*I;
980       // Removing and reinserting the first instruction in a bundle will break
981       // up the bundle. Fix the bundling if it was broken.
982       if (NextInSameBundle && !NextMII->isBundledWithPred())
983         NextMII->bundleWithPred();
984     }
985 
986     if (!NextInSameBundle && MI->isInsideBundle()) {
987       // FIXME: Since post-ra scheduler operates on bundles, the CPSR kill
988       // marker is only on the BUNDLE instruction. Process the BUNDLE
989       // instruction as we finish with the bundled instruction to work around
990       // the inconsistency.
991       if (BundleMI->killsRegister(ARM::CPSR))
992         LiveCPSR = false;
993       MachineOperand *MO = BundleMI->findRegisterDefOperand(ARM::CPSR);
994       if (MO && !MO->isDead())
995         LiveCPSR = true;
996       MO = BundleMI->findRegisterUseOperand(ARM::CPSR);
997       if (MO && !MO->isKill())
998         LiveCPSR = true;
999     }
1000 
1001     bool DefCPSR = false;
1002     LiveCPSR = UpdateCPSRDef(*MI, LiveCPSR, DefCPSR);
1003     if (MI->isCall()) {
1004       // Calls don't really set CPSR.
1005       CPSRDef = nullptr;
1006       HighLatencyCPSR = false;
1007       IsSelfLoop = false;
1008     } else if (DefCPSR) {
1009       // This is the last CPSR defining instruction.
1010       CPSRDef = MI;
1011       HighLatencyCPSR = isHighLatencyCPSR(CPSRDef);
1012       IsSelfLoop = false;
1013     }
1014   }
1015 
1016   MBBInfo &Info = BlockInfo[MBB.getNumber()];
1017   Info.HighLatencyCPSR = HighLatencyCPSR;
1018   Info.Visited = true;
1019   return Modified;
1020 }
1021 
runOnMachineFunction(MachineFunction & MF)1022 bool Thumb2SizeReduce::runOnMachineFunction(MachineFunction &MF) {
1023   if (PredicateFtor && !PredicateFtor(*MF.getFunction()))
1024     return false;
1025 
1026   STI = &static_cast<const ARMSubtarget &>(MF.getSubtarget());
1027   if (STI->isThumb1Only() || STI->prefers32BitThumb())
1028     return false;
1029 
1030   TII = static_cast<const Thumb2InstrInfo *>(STI->getInstrInfo());
1031 
1032   // Optimizing / minimizing size? Minimizing size implies optimizing for size.
1033   OptimizeSize = MF.getFunction()->optForSize();
1034   MinimizeSize = MF.getFunction()->optForMinSize();
1035 
1036   BlockInfo.clear();
1037   BlockInfo.resize(MF.getNumBlockIDs());
1038 
1039   // Visit blocks in reverse post-order so LastCPSRDef is known for all
1040   // predecessors.
1041   ReversePostOrderTraversal<MachineFunction*> RPOT(&MF);
1042   bool Modified = false;
1043   for (ReversePostOrderTraversal<MachineFunction*>::rpo_iterator
1044        I = RPOT.begin(), E = RPOT.end(); I != E; ++I)
1045     Modified |= ReduceMBB(**I);
1046   return Modified;
1047 }
1048 
1049 /// createThumb2SizeReductionPass - Returns an instance of the Thumb2 size
1050 /// reduction pass.
createThumb2SizeReductionPass(std::function<bool (const Function &)> Ftor)1051 FunctionPass *llvm::createThumb2SizeReductionPass(
1052     std::function<bool(const Function &)> Ftor) {
1053   return new Thumb2SizeReduce(Ftor);
1054 }
1055