• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===- Thumb2InstrInfo.cpp - Thumb-2 Instruction Information --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the Thumb-2 implementation of the TargetInstrInfo class.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "Thumb2InstrInfo.h"
14 #include "ARMMachineFunctionInfo.h"
15 #include "ARMSubtarget.h"
16 #include "MCTargetDesc/ARMAddressingModes.h"
17 #include "llvm/CodeGen/MachineBasicBlock.h"
18 #include "llvm/CodeGen/MachineFrameInfo.h"
19 #include "llvm/CodeGen/MachineFunction.h"
20 #include "llvm/CodeGen/MachineInstr.h"
21 #include "llvm/CodeGen/MachineInstrBuilder.h"
22 #include "llvm/CodeGen/MachineMemOperand.h"
23 #include "llvm/CodeGen/MachineOperand.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/TargetRegisterInfo.h"
26 #include "llvm/IR/DebugLoc.h"
27 #include "llvm/MC/MCInst.h"
28 #include "llvm/MC/MCInstrDesc.h"
29 #include "llvm/Support/CommandLine.h"
30 #include "llvm/Support/ErrorHandling.h"
31 #include "llvm/Support/MathExtras.h"
32 #include "llvm/Target/TargetMachine.h"
33 #include <cassert>
34 
35 using namespace llvm;
36 
37 static cl::opt<bool>
38 OldT2IfCvt("old-thumb2-ifcvt", cl::Hidden,
39            cl::desc("Use old-style Thumb2 if-conversion heuristics"),
40            cl::init(false));
41 
42 static cl::opt<bool>
43 PreferNoCSEL("prefer-no-csel", cl::Hidden,
44              cl::desc("Prefer predicated Move to CSEL"),
45              cl::init(false));
46 
Thumb2InstrInfo(const ARMSubtarget & STI)47 Thumb2InstrInfo::Thumb2InstrInfo(const ARMSubtarget &STI)
48     : ARMBaseInstrInfo(STI) {}
49 
50 /// Return the noop instruction to use for a noop.
getNoop(MCInst & NopInst) const51 void Thumb2InstrInfo::getNoop(MCInst &NopInst) const {
52   NopInst.setOpcode(ARM::tHINT);
53   NopInst.addOperand(MCOperand::createImm(0));
54   NopInst.addOperand(MCOperand::createImm(ARMCC::AL));
55   NopInst.addOperand(MCOperand::createReg(0));
56 }
57 
getUnindexedOpcode(unsigned Opc) const58 unsigned Thumb2InstrInfo::getUnindexedOpcode(unsigned Opc) const {
59   // FIXME
60   return 0;
61 }
62 
63 void
ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,MachineBasicBlock * NewDest) const64 Thumb2InstrInfo::ReplaceTailWithBranchTo(MachineBasicBlock::iterator Tail,
65                                          MachineBasicBlock *NewDest) const {
66   MachineBasicBlock *MBB = Tail->getParent();
67   ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>();
68   if (!AFI->hasITBlocks() || Tail->isBranch()) {
69     TargetInstrInfo::ReplaceTailWithBranchTo(Tail, NewDest);
70     return;
71   }
72 
73   // If the first instruction of Tail is predicated, we may have to update
74   // the IT instruction.
75   Register PredReg;
76   ARMCC::CondCodes CC = getInstrPredicate(*Tail, PredReg);
77   MachineBasicBlock::iterator MBBI = Tail;
78   if (CC != ARMCC::AL)
79     // Expecting at least the t2IT instruction before it.
80     --MBBI;
81 
82   // Actually replace the tail.
83   TargetInstrInfo::ReplaceTailWithBranchTo(Tail, NewDest);
84 
85   // Fix up IT.
86   if (CC != ARMCC::AL) {
87     MachineBasicBlock::iterator E = MBB->begin();
88     unsigned Count = 4; // At most 4 instructions in an IT block.
89     while (Count && MBBI != E) {
90       if (MBBI->isDebugInstr()) {
91         --MBBI;
92         continue;
93       }
94       if (MBBI->getOpcode() == ARM::t2IT) {
95         unsigned Mask = MBBI->getOperand(1).getImm();
96         if (Count == 4)
97           MBBI->eraseFromParent();
98         else {
99           unsigned MaskOn = 1 << Count;
100           unsigned MaskOff = ~(MaskOn - 1);
101           MBBI->getOperand(1).setImm((Mask & MaskOff) | MaskOn);
102         }
103         return;
104       }
105       --MBBI;
106       --Count;
107     }
108 
109     // Ctrl flow can reach here if branch folding is run before IT block
110     // formation pass.
111   }
112 }
113 
114 bool
isLegalToSplitMBBAt(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI) const115 Thumb2InstrInfo::isLegalToSplitMBBAt(MachineBasicBlock &MBB,
116                                      MachineBasicBlock::iterator MBBI) const {
117   while (MBBI->isDebugInstr()) {
118     ++MBBI;
119     if (MBBI == MBB.end())
120       return false;
121   }
122 
123   Register PredReg;
124   return getITInstrPredicate(*MBBI, PredReg) == ARMCC::AL;
125 }
126 
127 MachineInstr *
optimizeSelect(MachineInstr & MI,SmallPtrSetImpl<MachineInstr * > & SeenMIs,bool PreferFalse) const128 Thumb2InstrInfo::optimizeSelect(MachineInstr &MI,
129                                 SmallPtrSetImpl<MachineInstr *> &SeenMIs,
130                                 bool PreferFalse) const {
131   // Try to use the base optimizeSelect, which uses canFoldIntoMOVCC to fold the
132   // MOVCC into another instruction. If that fails on 8.1-M fall back to using a
133   // CSEL.
134   MachineInstr *RV = ARMBaseInstrInfo::optimizeSelect(MI, SeenMIs, PreferFalse);
135   if (!RV && getSubtarget().hasV8_1MMainlineOps() && !PreferNoCSEL) {
136     Register DestReg = MI.getOperand(0).getReg();
137 
138     if (!DestReg.isVirtual())
139       return nullptr;
140 
141     MachineInstrBuilder NewMI = BuildMI(*MI.getParent(), MI, MI.getDebugLoc(),
142                                         get(ARM::t2CSEL), DestReg)
143                                     .add(MI.getOperand(2))
144                                     .add(MI.getOperand(1))
145                                     .add(MI.getOperand(3));
146     SeenMIs.insert(NewMI);
147     return NewMI;
148   }
149   return RV;
150 }
151 
copyPhysReg(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,const DebugLoc & DL,MCRegister DestReg,MCRegister SrcReg,bool KillSrc) const152 void Thumb2InstrInfo::copyPhysReg(MachineBasicBlock &MBB,
153                                   MachineBasicBlock::iterator I,
154                                   const DebugLoc &DL, MCRegister DestReg,
155                                   MCRegister SrcReg, bool KillSrc) const {
156   // Handle SPR, DPR, and QPR copies.
157   if (!ARM::GPRRegClass.contains(DestReg, SrcReg))
158     return ARMBaseInstrInfo::copyPhysReg(MBB, I, DL, DestReg, SrcReg, KillSrc);
159 
160   BuildMI(MBB, I, DL, get(ARM::tMOVr), DestReg)
161       .addReg(SrcReg, getKillRegState(KillSrc))
162       .add(predOps(ARMCC::AL));
163 }
164 
165 void Thumb2InstrInfo::
storeRegToStackSlot(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,Register SrcReg,bool isKill,int FI,const TargetRegisterClass * RC,const TargetRegisterInfo * TRI) const166 storeRegToStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
167                     Register SrcReg, bool isKill, int FI,
168                     const TargetRegisterClass *RC,
169                     const TargetRegisterInfo *TRI) const {
170   DebugLoc DL;
171   if (I != MBB.end()) DL = I->getDebugLoc();
172 
173   MachineFunction &MF = *MBB.getParent();
174   MachineFrameInfo &MFI = MF.getFrameInfo();
175   MachineMemOperand *MMO = MF.getMachineMemOperand(
176       MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOStore,
177       MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
178 
179   if (ARM::GPRRegClass.hasSubClassEq(RC)) {
180     BuildMI(MBB, I, DL, get(ARM::t2STRi12))
181         .addReg(SrcReg, getKillRegState(isKill))
182         .addFrameIndex(FI)
183         .addImm(0)
184         .addMemOperand(MMO)
185         .add(predOps(ARMCC::AL));
186     return;
187   }
188 
189   if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
190     // Thumb2 STRD expects its dest-registers to be in rGPR. Not a problem for
191     // gsub_0, but needs an extra constraint for gsub_1 (which could be sp
192     // otherwise).
193     if (Register::isVirtualRegister(SrcReg)) {
194       MachineRegisterInfo *MRI = &MF.getRegInfo();
195       MRI->constrainRegClass(SrcReg, &ARM::GPRPairnospRegClass);
196     }
197 
198     MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::t2STRDi8));
199     AddDReg(MIB, SrcReg, ARM::gsub_0, getKillRegState(isKill), TRI);
200     AddDReg(MIB, SrcReg, ARM::gsub_1, 0, TRI);
201     MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO).add(predOps(ARMCC::AL));
202     return;
203   }
204 
205   ARMBaseInstrInfo::storeRegToStackSlot(MBB, I, SrcReg, isKill, FI, RC, TRI);
206 }
207 
208 void Thumb2InstrInfo::
loadRegFromStackSlot(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,Register DestReg,int FI,const TargetRegisterClass * RC,const TargetRegisterInfo * TRI) const209 loadRegFromStackSlot(MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
210                      Register DestReg, int FI,
211                      const TargetRegisterClass *RC,
212                      const TargetRegisterInfo *TRI) const {
213   MachineFunction &MF = *MBB.getParent();
214   MachineFrameInfo &MFI = MF.getFrameInfo();
215   MachineMemOperand *MMO = MF.getMachineMemOperand(
216       MachinePointerInfo::getFixedStack(MF, FI), MachineMemOperand::MOLoad,
217       MFI.getObjectSize(FI), MFI.getObjectAlign(FI));
218   DebugLoc DL;
219   if (I != MBB.end()) DL = I->getDebugLoc();
220 
221   if (ARM::GPRRegClass.hasSubClassEq(RC)) {
222     BuildMI(MBB, I, DL, get(ARM::t2LDRi12), DestReg)
223         .addFrameIndex(FI)
224         .addImm(0)
225         .addMemOperand(MMO)
226         .add(predOps(ARMCC::AL));
227     return;
228   }
229 
230   if (ARM::GPRPairRegClass.hasSubClassEq(RC)) {
231     // Thumb2 LDRD expects its dest-registers to be in rGPR. Not a problem for
232     // gsub_0, but needs an extra constraint for gsub_1 (which could be sp
233     // otherwise).
234     if (Register::isVirtualRegister(DestReg)) {
235       MachineRegisterInfo *MRI = &MF.getRegInfo();
236       MRI->constrainRegClass(DestReg, &ARM::GPRPairnospRegClass);
237     }
238 
239     MachineInstrBuilder MIB = BuildMI(MBB, I, DL, get(ARM::t2LDRDi8));
240     AddDReg(MIB, DestReg, ARM::gsub_0, RegState::DefineNoRead, TRI);
241     AddDReg(MIB, DestReg, ARM::gsub_1, RegState::DefineNoRead, TRI);
242     MIB.addFrameIndex(FI).addImm(0).addMemOperand(MMO).add(predOps(ARMCC::AL));
243 
244     if (Register::isPhysicalRegister(DestReg))
245       MIB.addReg(DestReg, RegState::ImplicitDefine);
246     return;
247   }
248 
249   ARMBaseInstrInfo::loadRegFromStackSlot(MBB, I, DestReg, FI, RC, TRI);
250 }
251 
expandLoadStackGuard(MachineBasicBlock::iterator MI) const252 void Thumb2InstrInfo::expandLoadStackGuard(
253     MachineBasicBlock::iterator MI) const {
254   MachineFunction &MF = *MI->getParent()->getParent();
255   if (MF.getTarget().isPositionIndependent())
256     expandLoadStackGuardBase(MI, ARM::t2MOV_ga_pcrel, ARM::t2LDRi12);
257   else
258     expandLoadStackGuardBase(MI, ARM::t2MOVi32imm, ARM::t2LDRi12);
259 }
260 
commuteInstructionImpl(MachineInstr & MI,bool NewMI,unsigned OpIdx1,unsigned OpIdx2) const261 MachineInstr *Thumb2InstrInfo::commuteInstructionImpl(MachineInstr &MI,
262                                                       bool NewMI,
263                                                       unsigned OpIdx1,
264                                                       unsigned OpIdx2) const {
265   switch (MI.getOpcode()) {
266   case ARM::MVE_VMAXNMAf16:
267   case ARM::MVE_VMAXNMAf32:
268   case ARM::MVE_VMINNMAf16:
269   case ARM::MVE_VMINNMAf32:
270     // Don't allow predicated instructions to be commuted.
271     if (getVPTInstrPredicate(MI) != ARMVCC::None)
272       return nullptr;
273   }
274   return ARMBaseInstrInfo::commuteInstructionImpl(MI, NewMI, OpIdx1, OpIdx2);
275 }
276 
emitT2RegPlusImmediate(MachineBasicBlock & MBB,MachineBasicBlock::iterator & MBBI,const DebugLoc & dl,Register DestReg,Register BaseReg,int NumBytes,ARMCC::CondCodes Pred,Register PredReg,const ARMBaseInstrInfo & TII,unsigned MIFlags)277 void llvm::emitT2RegPlusImmediate(MachineBasicBlock &MBB,
278                                   MachineBasicBlock::iterator &MBBI,
279                                   const DebugLoc &dl, Register DestReg,
280                                   Register BaseReg, int NumBytes,
281                                   ARMCC::CondCodes Pred, Register PredReg,
282                                   const ARMBaseInstrInfo &TII,
283                                   unsigned MIFlags) {
284   if (NumBytes == 0 && DestReg != BaseReg) {
285     BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), DestReg)
286       .addReg(BaseReg, RegState::Kill)
287       .addImm((unsigned)Pred).addReg(PredReg).setMIFlags(MIFlags);
288     return;
289   }
290 
291   bool isSub = NumBytes < 0;
292   if (isSub) NumBytes = -NumBytes;
293 
294   // If profitable, use a movw or movt to materialize the offset.
295   // FIXME: Use the scavenger to grab a scratch register.
296   if (DestReg != ARM::SP && DestReg != BaseReg &&
297       NumBytes >= 4096 &&
298       ARM_AM::getT2SOImmVal(NumBytes) == -1) {
299     bool Fits = false;
300     if (NumBytes < 65536) {
301       // Use a movw to materialize the 16-bit constant.
302       BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVi16), DestReg)
303         .addImm(NumBytes)
304         .addImm((unsigned)Pred).addReg(PredReg).setMIFlags(MIFlags);
305       Fits = true;
306     } else if ((NumBytes & 0xffff) == 0) {
307       // Use a movt to materialize the 32-bit constant.
308       BuildMI(MBB, MBBI, dl, TII.get(ARM::t2MOVTi16), DestReg)
309         .addReg(DestReg)
310         .addImm(NumBytes >> 16)
311         .addImm((unsigned)Pred).addReg(PredReg).setMIFlags(MIFlags);
312       Fits = true;
313     }
314 
315     if (Fits) {
316       if (isSub) {
317         BuildMI(MBB, MBBI, dl, TII.get(ARM::t2SUBrr), DestReg)
318             .addReg(BaseReg)
319             .addReg(DestReg, RegState::Kill)
320             .add(predOps(Pred, PredReg))
321             .add(condCodeOp())
322             .setMIFlags(MIFlags);
323       } else {
324         // Here we know that DestReg is not SP but we do not
325         // know anything about BaseReg. t2ADDrr is an invalid
326         // instruction is SP is used as the second argument, but
327         // is fine if SP is the first argument. To be sure we
328         // do not generate invalid encoding, put BaseReg first.
329         BuildMI(MBB, MBBI, dl, TII.get(ARM::t2ADDrr), DestReg)
330             .addReg(BaseReg)
331             .addReg(DestReg, RegState::Kill)
332             .add(predOps(Pred, PredReg))
333             .add(condCodeOp())
334             .setMIFlags(MIFlags);
335       }
336       return;
337     }
338   }
339 
340   while (NumBytes) {
341     unsigned ThisVal = NumBytes;
342     unsigned Opc = 0;
343     if (DestReg == ARM::SP && BaseReg != ARM::SP) {
344       // mov sp, rn. Note t2MOVr cannot be used.
345       BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVr), DestReg)
346           .addReg(BaseReg)
347           .setMIFlags(MIFlags)
348           .add(predOps(ARMCC::AL));
349       BaseReg = ARM::SP;
350       continue;
351     }
352 
353     assert((DestReg != ARM::SP || BaseReg == ARM::SP) &&
354            "Writing to SP, from other register.");
355 
356     // Try to use T1, as it smaller
357     if ((DestReg == ARM::SP) && (ThisVal < ((1 << 7) - 1) * 4)) {
358       assert((ThisVal & 3) == 0 && "Stack update is not multiple of 4?");
359       Opc = isSub ? ARM::tSUBspi : ARM::tADDspi;
360       BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
361           .addReg(BaseReg)
362           .addImm(ThisVal / 4)
363           .setMIFlags(MIFlags)
364           .add(predOps(ARMCC::AL));
365       break;
366     }
367     bool HasCCOut = true;
368     int ImmIsT2SO = ARM_AM::getT2SOImmVal(ThisVal);
369     bool ToSP = DestReg == ARM::SP;
370     unsigned t2SUB = ToSP ? ARM::t2SUBspImm : ARM::t2SUBri;
371     unsigned t2ADD = ToSP ? ARM::t2ADDspImm : ARM::t2ADDri;
372     unsigned t2SUBi12 = ToSP ? ARM::t2SUBspImm12 : ARM::t2SUBri12;
373     unsigned t2ADDi12 = ToSP ? ARM::t2ADDspImm12 : ARM::t2ADDri12;
374     Opc = isSub ? t2SUB : t2ADD;
375     // Prefer T2: sub rd, rn, so_imm | sub sp, sp, so_imm
376     if (ImmIsT2SO != -1) {
377       NumBytes = 0;
378     } else if (ThisVal < 4096) {
379       // Prefer T3 if can make it in a single go: subw rd, rn, imm12 | subw sp,
380       // sp, imm12
381       Opc = isSub ? t2SUBi12 : t2ADDi12;
382       HasCCOut = false;
383       NumBytes = 0;
384     } else {
385       // Use one T2 instruction to reduce NumBytes
386       // FIXME: Move this to ARMAddressingModes.h?
387       unsigned RotAmt = countLeadingZeros(ThisVal);
388       ThisVal = ThisVal & ARM_AM::rotr32(0xff000000U, RotAmt);
389       NumBytes &= ~ThisVal;
390       assert(ARM_AM::getT2SOImmVal(ThisVal) != -1 &&
391              "Bit extraction didn't work?");
392     }
393 
394     // Build the new ADD / SUB.
395     MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg)
396                                   .addReg(BaseReg, RegState::Kill)
397                                   .addImm(ThisVal)
398                                   .add(predOps(ARMCC::AL))
399                                   .setMIFlags(MIFlags);
400     if (HasCCOut)
401       MIB.add(condCodeOp());
402 
403     BaseReg = DestReg;
404   }
405 }
406 
407 static unsigned
negativeOffsetOpcode(unsigned opcode)408 negativeOffsetOpcode(unsigned opcode)
409 {
410   switch (opcode) {
411   case ARM::t2LDRi12:   return ARM::t2LDRi8;
412   case ARM::t2LDRHi12:  return ARM::t2LDRHi8;
413   case ARM::t2LDRBi12:  return ARM::t2LDRBi8;
414   case ARM::t2LDRSHi12: return ARM::t2LDRSHi8;
415   case ARM::t2LDRSBi12: return ARM::t2LDRSBi8;
416   case ARM::t2STRi12:   return ARM::t2STRi8;
417   case ARM::t2STRBi12:  return ARM::t2STRBi8;
418   case ARM::t2STRHi12:  return ARM::t2STRHi8;
419   case ARM::t2PLDi12:   return ARM::t2PLDi8;
420   case ARM::t2PLDWi12:  return ARM::t2PLDWi8;
421   case ARM::t2PLIi12:   return ARM::t2PLIi8;
422 
423   case ARM::t2LDRi8:
424   case ARM::t2LDRHi8:
425   case ARM::t2LDRBi8:
426   case ARM::t2LDRSHi8:
427   case ARM::t2LDRSBi8:
428   case ARM::t2STRi8:
429   case ARM::t2STRBi8:
430   case ARM::t2STRHi8:
431   case ARM::t2PLDi8:
432   case ARM::t2PLDWi8:
433   case ARM::t2PLIi8:
434     return opcode;
435 
436   default:
437     llvm_unreachable("unknown thumb2 opcode.");
438   }
439 }
440 
441 static unsigned
positiveOffsetOpcode(unsigned opcode)442 positiveOffsetOpcode(unsigned opcode)
443 {
444   switch (opcode) {
445   case ARM::t2LDRi8:   return ARM::t2LDRi12;
446   case ARM::t2LDRHi8:  return ARM::t2LDRHi12;
447   case ARM::t2LDRBi8:  return ARM::t2LDRBi12;
448   case ARM::t2LDRSHi8: return ARM::t2LDRSHi12;
449   case ARM::t2LDRSBi8: return ARM::t2LDRSBi12;
450   case ARM::t2STRi8:   return ARM::t2STRi12;
451   case ARM::t2STRBi8:  return ARM::t2STRBi12;
452   case ARM::t2STRHi8:  return ARM::t2STRHi12;
453   case ARM::t2PLDi8:   return ARM::t2PLDi12;
454   case ARM::t2PLDWi8:  return ARM::t2PLDWi12;
455   case ARM::t2PLIi8:   return ARM::t2PLIi12;
456 
457   case ARM::t2LDRi12:
458   case ARM::t2LDRHi12:
459   case ARM::t2LDRBi12:
460   case ARM::t2LDRSHi12:
461   case ARM::t2LDRSBi12:
462   case ARM::t2STRi12:
463   case ARM::t2STRBi12:
464   case ARM::t2STRHi12:
465   case ARM::t2PLDi12:
466   case ARM::t2PLDWi12:
467   case ARM::t2PLIi12:
468     return opcode;
469 
470   default:
471     llvm_unreachable("unknown thumb2 opcode.");
472   }
473 }
474 
475 static unsigned
immediateOffsetOpcode(unsigned opcode)476 immediateOffsetOpcode(unsigned opcode)
477 {
478   switch (opcode) {
479   case ARM::t2LDRs:   return ARM::t2LDRi12;
480   case ARM::t2LDRHs:  return ARM::t2LDRHi12;
481   case ARM::t2LDRBs:  return ARM::t2LDRBi12;
482   case ARM::t2LDRSHs: return ARM::t2LDRSHi12;
483   case ARM::t2LDRSBs: return ARM::t2LDRSBi12;
484   case ARM::t2STRs:   return ARM::t2STRi12;
485   case ARM::t2STRBs:  return ARM::t2STRBi12;
486   case ARM::t2STRHs:  return ARM::t2STRHi12;
487   case ARM::t2PLDs:   return ARM::t2PLDi12;
488   case ARM::t2PLDWs:  return ARM::t2PLDWi12;
489   case ARM::t2PLIs:   return ARM::t2PLIi12;
490 
491   case ARM::t2LDRi12:
492   case ARM::t2LDRHi12:
493   case ARM::t2LDRBi12:
494   case ARM::t2LDRSHi12:
495   case ARM::t2LDRSBi12:
496   case ARM::t2STRi12:
497   case ARM::t2STRBi12:
498   case ARM::t2STRHi12:
499   case ARM::t2PLDi12:
500   case ARM::t2PLDWi12:
501   case ARM::t2PLIi12:
502   case ARM::t2LDRi8:
503   case ARM::t2LDRHi8:
504   case ARM::t2LDRBi8:
505   case ARM::t2LDRSHi8:
506   case ARM::t2LDRSBi8:
507   case ARM::t2STRi8:
508   case ARM::t2STRBi8:
509   case ARM::t2STRHi8:
510   case ARM::t2PLDi8:
511   case ARM::t2PLDWi8:
512   case ARM::t2PLIi8:
513     return opcode;
514 
515   default:
516     llvm_unreachable("unknown thumb2 opcode.");
517   }
518 }
519 
rewriteT2FrameIndex(MachineInstr & MI,unsigned FrameRegIdx,Register FrameReg,int & Offset,const ARMBaseInstrInfo & TII,const TargetRegisterInfo * TRI)520 bool llvm::rewriteT2FrameIndex(MachineInstr &MI, unsigned FrameRegIdx,
521                                Register FrameReg, int &Offset,
522                                const ARMBaseInstrInfo &TII,
523                                const TargetRegisterInfo *TRI) {
524   unsigned Opcode = MI.getOpcode();
525   const MCInstrDesc &Desc = MI.getDesc();
526   unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
527   bool isSub = false;
528 
529   MachineFunction &MF = *MI.getParent()->getParent();
530   const TargetRegisterClass *RegClass =
531       TII.getRegClass(Desc, FrameRegIdx, TRI, MF);
532 
533   // Memory operands in inline assembly always use AddrModeT2_i12.
534   if (Opcode == ARM::INLINEASM || Opcode == ARM::INLINEASM_BR)
535     AddrMode = ARMII::AddrModeT2_i12; // FIXME. mode for thumb2?
536 
537   const bool IsSP = Opcode == ARM::t2ADDspImm12 || Opcode == ARM::t2ADDspImm;
538   if (IsSP || Opcode == ARM::t2ADDri || Opcode == ARM::t2ADDri12) {
539     Offset += MI.getOperand(FrameRegIdx+1).getImm();
540 
541     Register PredReg;
542     if (Offset == 0 && getInstrPredicate(MI, PredReg) == ARMCC::AL &&
543         !MI.definesRegister(ARM::CPSR)) {
544       // Turn it into a move.
545       MI.setDesc(TII.get(ARM::tMOVr));
546       MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
547       // Remove offset and remaining explicit predicate operands.
548       do MI.RemoveOperand(FrameRegIdx+1);
549       while (MI.getNumOperands() > FrameRegIdx+1);
550       MachineInstrBuilder MIB(*MI.getParent()->getParent(), &MI);
551       MIB.add(predOps(ARMCC::AL));
552       return true;
553     }
554 
555     bool HasCCOut = (Opcode != ARM::t2ADDspImm12 && Opcode != ARM::t2ADDri12);
556 
557     if (Offset < 0) {
558       Offset = -Offset;
559       isSub = true;
560       MI.setDesc(IsSP ? TII.get(ARM::t2SUBspImm) : TII.get(ARM::t2SUBri));
561     } else {
562       MI.setDesc(IsSP ? TII.get(ARM::t2ADDspImm) : TII.get(ARM::t2ADDri));
563     }
564 
565     // Common case: small offset, fits into instruction.
566     if (ARM_AM::getT2SOImmVal(Offset) != -1) {
567       MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
568       MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset);
569       // Add cc_out operand if the original instruction did not have one.
570       if (!HasCCOut)
571         MI.addOperand(MachineOperand::CreateReg(0, false));
572       Offset = 0;
573       return true;
574     }
575     // Another common case: imm12.
576     if (Offset < 4096 &&
577         (!HasCCOut || MI.getOperand(MI.getNumOperands()-1).getReg() == 0)) {
578       unsigned NewOpc = isSub ? IsSP ? ARM::t2SUBspImm12 : ARM::t2SUBri12
579                               : IsSP ? ARM::t2ADDspImm12 : ARM::t2ADDri12;
580       MI.setDesc(TII.get(NewOpc));
581       MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
582       MI.getOperand(FrameRegIdx+1).ChangeToImmediate(Offset);
583       // Remove the cc_out operand.
584       if (HasCCOut)
585         MI.RemoveOperand(MI.getNumOperands()-1);
586       Offset = 0;
587       return true;
588     }
589 
590     // Otherwise, extract 8 adjacent bits from the immediate into this
591     // t2ADDri/t2SUBri.
592     unsigned RotAmt = countLeadingZeros<unsigned>(Offset);
593     unsigned ThisImmVal = Offset & ARM_AM::rotr32(0xff000000U, RotAmt);
594 
595     // We will handle these bits from offset, clear them.
596     Offset &= ~ThisImmVal;
597 
598     assert(ARM_AM::getT2SOImmVal(ThisImmVal) != -1 &&
599            "Bit extraction didn't work?");
600     MI.getOperand(FrameRegIdx+1).ChangeToImmediate(ThisImmVal);
601     // Add cc_out operand if the original instruction did not have one.
602     if (!HasCCOut)
603       MI.addOperand(MachineOperand::CreateReg(0, false));
604   } else {
605     // AddrMode4 and AddrMode6 cannot handle any offset.
606     if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6)
607       return false;
608 
609     // AddrModeT2_so cannot handle any offset. If there is no offset
610     // register then we change to an immediate version.
611     unsigned NewOpc = Opcode;
612     if (AddrMode == ARMII::AddrModeT2_so) {
613       Register OffsetReg = MI.getOperand(FrameRegIdx + 1).getReg();
614       if (OffsetReg != 0) {
615         MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
616         return Offset == 0;
617       }
618 
619       MI.RemoveOperand(FrameRegIdx+1);
620       MI.getOperand(FrameRegIdx+1).ChangeToImmediate(0);
621       NewOpc = immediateOffsetOpcode(Opcode);
622       AddrMode = ARMII::AddrModeT2_i12;
623     }
624 
625     unsigned NumBits = 0;
626     unsigned Scale = 1;
627     if (AddrMode == ARMII::AddrModeT2_i8 || AddrMode == ARMII::AddrModeT2_i12) {
628       // i8 supports only negative, and i12 supports only positive, so
629       // based on Offset sign convert Opcode to the appropriate
630       // instruction
631       Offset += MI.getOperand(FrameRegIdx+1).getImm();
632       if (Offset < 0) {
633         NewOpc = negativeOffsetOpcode(Opcode);
634         NumBits = 8;
635         isSub = true;
636         Offset = -Offset;
637       } else {
638         NewOpc = positiveOffsetOpcode(Opcode);
639         NumBits = 12;
640       }
641     } else if (AddrMode == ARMII::AddrMode5) {
642       // VFP address mode.
643       const MachineOperand &OffOp = MI.getOperand(FrameRegIdx+1);
644       int InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm());
645       if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub)
646         InstrOffs *= -1;
647       NumBits = 8;
648       Scale = 4;
649       Offset += InstrOffs * 4;
650       assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
651       if (Offset < 0) {
652         Offset = -Offset;
653         isSub = true;
654       }
655     } else if (AddrMode == ARMII::AddrMode5FP16) {
656       // VFP address mode.
657       const MachineOperand &OffOp = MI.getOperand(FrameRegIdx+1);
658       int InstrOffs = ARM_AM::getAM5FP16Offset(OffOp.getImm());
659       if (ARM_AM::getAM5FP16Op(OffOp.getImm()) == ARM_AM::sub)
660         InstrOffs *= -1;
661       NumBits = 8;
662       Scale = 2;
663       Offset += InstrOffs * 2;
664       assert((Offset & (Scale-1)) == 0 && "Can't encode this offset!");
665       if (Offset < 0) {
666         Offset = -Offset;
667         isSub = true;
668       }
669     } else if (AddrMode == ARMII::AddrModeT2_i7s4 ||
670                AddrMode == ARMII::AddrModeT2_i7s2 ||
671                AddrMode == ARMII::AddrModeT2_i7) {
672       Offset += MI.getOperand(FrameRegIdx + 1).getImm();
673       unsigned OffsetMask;
674       switch (AddrMode) {
675       case ARMII::AddrModeT2_i7s4: NumBits = 9; OffsetMask = 0x3; break;
676       case ARMII::AddrModeT2_i7s2: NumBits = 8; OffsetMask = 0x1; break;
677       default:                     NumBits = 7; OffsetMask = 0x0; break;
678       }
679       // MCInst operand expects already scaled value.
680       Scale = 1;
681       assert((Offset & OffsetMask) == 0 && "Can't encode this offset!");
682       (void)OffsetMask; // squash unused-variable warning at -NDEBUG
683     } else if (AddrMode == ARMII::AddrModeT2_i8s4) {
684       Offset += MI.getOperand(FrameRegIdx + 1).getImm();
685       NumBits = 8 + 2;
686       // MCInst operand expects already scaled value.
687       Scale = 1;
688       assert((Offset & 3) == 0 && "Can't encode this offset!");
689     } else if (AddrMode == ARMII::AddrModeT2_ldrex) {
690       Offset += MI.getOperand(FrameRegIdx + 1).getImm() * 4;
691       NumBits = 8; // 8 bits scaled by 4
692       Scale = 4;
693       assert((Offset & 3) == 0 && "Can't encode this offset!");
694     } else {
695       llvm_unreachable("Unsupported addressing mode!");
696     }
697 
698     if (NewOpc != Opcode)
699       MI.setDesc(TII.get(NewOpc));
700 
701     MachineOperand &ImmOp = MI.getOperand(FrameRegIdx+1);
702 
703     // Attempt to fold address computation
704     // Common case: small offset, fits into instruction. We need to make sure
705     // the register class is correct too, for instructions like the MVE
706     // VLDRH.32, which only accepts low tGPR registers.
707     int ImmedOffset = Offset / Scale;
708     unsigned Mask = (1 << NumBits) - 1;
709     if ((unsigned)Offset <= Mask * Scale &&
710         (Register::isVirtualRegister(FrameReg) ||
711          RegClass->contains(FrameReg))) {
712       if (Register::isVirtualRegister(FrameReg)) {
713         // Make sure the register class for the virtual register is correct
714         MachineRegisterInfo *MRI = &MF.getRegInfo();
715         if (!MRI->constrainRegClass(FrameReg, RegClass))
716           llvm_unreachable("Unable to constrain virtual register class.");
717       }
718 
719       // Replace the FrameIndex with fp/sp
720       MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
721       if (isSub) {
722         if (AddrMode == ARMII::AddrMode5 || AddrMode == ARMII::AddrMode5FP16)
723           // FIXME: Not consistent.
724           ImmedOffset |= 1 << NumBits;
725         else
726           ImmedOffset = -ImmedOffset;
727       }
728       ImmOp.ChangeToImmediate(ImmedOffset);
729       Offset = 0;
730       return true;
731     }
732 
733     // Otherwise, offset doesn't fit. Pull in what we can to simplify
734     ImmedOffset = ImmedOffset & Mask;
735     if (isSub) {
736       if (AddrMode == ARMII::AddrMode5 || AddrMode == ARMII::AddrMode5FP16)
737         // FIXME: Not consistent.
738         ImmedOffset |= 1 << NumBits;
739       else {
740         ImmedOffset = -ImmedOffset;
741         if (ImmedOffset == 0)
742           // Change the opcode back if the encoded offset is zero.
743           MI.setDesc(TII.get(positiveOffsetOpcode(NewOpc)));
744       }
745     }
746     ImmOp.ChangeToImmediate(ImmedOffset);
747     Offset &= ~(Mask*Scale);
748   }
749 
750   Offset = (isSub) ? -Offset : Offset;
751   return Offset == 0 && (Register::isVirtualRegister(FrameReg) ||
752                          RegClass->contains(FrameReg));
753 }
754 
getITInstrPredicate(const MachineInstr & MI,Register & PredReg)755 ARMCC::CondCodes llvm::getITInstrPredicate(const MachineInstr &MI,
756                                            Register &PredReg) {
757   unsigned Opc = MI.getOpcode();
758   if (Opc == ARM::tBcc || Opc == ARM::t2Bcc)
759     return ARMCC::AL;
760   return getInstrPredicate(MI, PredReg);
761 }
762 
findFirstVPTPredOperandIdx(const MachineInstr & MI)763 int llvm::findFirstVPTPredOperandIdx(const MachineInstr &MI) {
764   const MCInstrDesc &MCID = MI.getDesc();
765 
766   if (!MCID.OpInfo)
767     return -1;
768 
769   for (unsigned i = 0, e = MCID.getNumOperands(); i != e; ++i)
770     if (ARM::isVpred(MCID.OpInfo[i].OperandType))
771       return i;
772 
773   return -1;
774 }
775 
getVPTInstrPredicate(const MachineInstr & MI,Register & PredReg)776 ARMVCC::VPTCodes llvm::getVPTInstrPredicate(const MachineInstr &MI,
777                                             Register &PredReg) {
778   int PIdx = findFirstVPTPredOperandIdx(MI);
779   if (PIdx == -1) {
780     PredReg = 0;
781     return ARMVCC::None;
782   }
783 
784   PredReg = MI.getOperand(PIdx+1).getReg();
785   return (ARMVCC::VPTCodes)MI.getOperand(PIdx).getImm();
786 }
787 
recomputeVPTBlockMask(MachineInstr & Instr)788 void llvm::recomputeVPTBlockMask(MachineInstr &Instr) {
789   assert(isVPTOpcode(Instr.getOpcode()) && "Not a VPST or VPT Instruction!");
790 
791   MachineOperand &MaskOp = Instr.getOperand(0);
792   assert(MaskOp.isImm() && "Operand 0 is not the block mask of the VPT/VPST?!");
793 
794   MachineBasicBlock::iterator Iter = ++Instr.getIterator(),
795                               End = Instr.getParent()->end();
796 
797   // Verify that the instruction after the VPT/VPST is predicated (it should
798   // be), and skip it.
799   assert(
800       getVPTInstrPredicate(*Iter) == ARMVCC::Then &&
801       "VPT/VPST should be followed by an instruction with a 'then' predicate!");
802   ++Iter;
803 
804   // Iterate over the predicated instructions, updating the BlockMask as we go.
805   ARM::PredBlockMask BlockMask = ARM::PredBlockMask::T;
806   while (Iter != End) {
807     ARMVCC::VPTCodes Pred = getVPTInstrPredicate(*Iter);
808     if (Pred == ARMVCC::None)
809       break;
810     BlockMask = expandPredBlockMask(BlockMask, Pred);
811     ++Iter;
812   }
813 
814   // Rewrite the BlockMask.
815   MaskOp.setImm((int64_t)(BlockMask));
816 }
817