• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===- ARMBaseRegisterInfo.cpp - ARM Register Information -------*- C++ -*-===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the base ARM implementation of TargetRegisterInfo class.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "ARM.h"
15 #include "ARMBaseInstrInfo.h"
16 #include "ARMBaseRegisterInfo.h"
17 #include "ARMFrameLowering.h"
18 #include "ARMInstrInfo.h"
19 #include "ARMMachineFunctionInfo.h"
20 #include "ARMSubtarget.h"
21 #include "MCTargetDesc/ARMAddressingModes.h"
22 #include "llvm/Constants.h"
23 #include "llvm/DerivedTypes.h"
24 #include "llvm/Function.h"
25 #include "llvm/LLVMContext.h"
26 #include "llvm/CodeGen/MachineConstantPool.h"
27 #include "llvm/CodeGen/MachineFrameInfo.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineInstrBuilder.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/RegisterScavenging.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/raw_ostream.h"
35 #include "llvm/Target/TargetFrameLowering.h"
36 #include "llvm/Target/TargetMachine.h"
37 #include "llvm/Target/TargetOptions.h"
38 #include "llvm/ADT/BitVector.h"
39 #include "llvm/ADT/SmallVector.h"
40 #include "llvm/Support/CommandLine.h"
41 
42 #define GET_REGINFO_TARGET_DESC
43 #include "ARMGenRegisterInfo.inc"
44 
45 using namespace llvm;
46 
47 static cl::opt<bool>
48 ForceAllBaseRegAlloc("arm-force-base-reg-alloc", cl::Hidden, cl::init(false),
49           cl::desc("Force use of virtual base registers for stack load/store"));
50 static cl::opt<bool>
51 EnableLocalStackAlloc("enable-local-stack-alloc", cl::init(true), cl::Hidden,
52           cl::desc("Enable pre-regalloc stack frame index allocation"));
53 static cl::opt<bool>
54 EnableBasePointer("arm-use-base-pointer", cl::Hidden, cl::init(true),
55           cl::desc("Enable use of a base pointer for complex stack frames"));
56 
ARMBaseRegisterInfo(const ARMBaseInstrInfo & tii,const ARMSubtarget & sti)57 ARMBaseRegisterInfo::ARMBaseRegisterInfo(const ARMBaseInstrInfo &tii,
58                                          const ARMSubtarget &sti)
59   : ARMGenRegisterInfo(ARM::LR), TII(tii), STI(sti),
60     FramePtr((STI.isTargetDarwin() || STI.isThumb()) ? ARM::R7 : ARM::R11),
61     BasePtr(ARM::R6) {
62 }
63 
64 const unsigned*
getCalleeSavedRegs(const MachineFunction * MF) const65 ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
66   bool ghcCall = false;
67 
68   if (MF) {
69     const Function *F = MF->getFunction();
70     ghcCall = (F ? F->getCallingConv() == CallingConv::GHC : false);
71   }
72 
73   static const unsigned CalleeSavedRegs[] = {
74     ARM::LR, ARM::R11, ARM::R10, ARM::R9, ARM::R8,
75     ARM::R7, ARM::R6,  ARM::R5,  ARM::R4,
76 
77     ARM::D15, ARM::D14, ARM::D13, ARM::D12,
78     ARM::D11, ARM::D10, ARM::D9,  ARM::D8,
79     0
80   };
81 
82   static const unsigned DarwinCalleeSavedRegs[] = {
83     // Darwin ABI deviates from ARM standard ABI. R9 is not a callee-saved
84     // register.
85     ARM::LR,  ARM::R7,  ARM::R6, ARM::R5, ARM::R4,
86     ARM::R11, ARM::R10, ARM::R8,
87 
88     ARM::D15, ARM::D14, ARM::D13, ARM::D12,
89     ARM::D11, ARM::D10, ARM::D9,  ARM::D8,
90     0
91   };
92 
93   static const unsigned GhcCalleeSavedRegs[] = {
94     0
95   };
96 
97   return ghcCall ? GhcCalleeSavedRegs :
98          STI.isTargetDarwin() ? DarwinCalleeSavedRegs : CalleeSavedRegs;
99 }
100 
101 BitVector ARMBaseRegisterInfo::
getReservedRegs(const MachineFunction & MF) const102 getReservedRegs(const MachineFunction &MF) const {
103   const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
104 
105   // FIXME: avoid re-calculating this every time.
106   BitVector Reserved(getNumRegs());
107   Reserved.set(ARM::SP);
108   Reserved.set(ARM::PC);
109   Reserved.set(ARM::FPSCR);
110   if (TFI->hasFP(MF))
111     Reserved.set(FramePtr);
112   if (hasBasePointer(MF))
113     Reserved.set(BasePtr);
114   // Some targets reserve R9.
115   if (STI.isR9Reserved())
116     Reserved.set(ARM::R9);
117   // Reserve D16-D31 if the subtarget doesn't support them.
118   if (!STI.hasVFP3() || STI.hasD16()) {
119     assert(ARM::D31 == ARM::D16 + 15);
120     for (unsigned i = 0; i != 16; ++i)
121       Reserved.set(ARM::D16 + i);
122   }
123   return Reserved;
124 }
125 
isReservedReg(const MachineFunction & MF,unsigned Reg) const126 bool ARMBaseRegisterInfo::isReservedReg(const MachineFunction &MF,
127                                         unsigned Reg) const {
128   const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
129 
130   switch (Reg) {
131   default: break;
132   case ARM::SP:
133   case ARM::PC:
134     return true;
135   case ARM::R6:
136     if (hasBasePointer(MF))
137       return true;
138     break;
139   case ARM::R7:
140   case ARM::R11:
141     if (FramePtr == Reg && TFI->hasFP(MF))
142       return true;
143     break;
144   case ARM::R9:
145     return STI.isR9Reserved();
146   }
147 
148   return false;
149 }
150 
151 const TargetRegisterClass *
getMatchingSuperRegClass(const TargetRegisterClass * A,const TargetRegisterClass * B,unsigned SubIdx) const152 ARMBaseRegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
153                                               const TargetRegisterClass *B,
154                                               unsigned SubIdx) const {
155   switch (SubIdx) {
156   default: return 0;
157   case ARM::ssub_0:
158   case ARM::ssub_1:
159   case ARM::ssub_2:
160   case ARM::ssub_3: {
161     // S sub-registers.
162     if (A->getSize() == 8) {
163       if (B == &ARM::SPR_8RegClass)
164         return &ARM::DPR_8RegClass;
165       assert(B == &ARM::SPRRegClass && "Expecting SPR register class!");
166       if (A == &ARM::DPR_8RegClass)
167         return A;
168       return &ARM::DPR_VFP2RegClass;
169     }
170 
171     if (A->getSize() == 16) {
172       if (B == &ARM::SPR_8RegClass)
173         return &ARM::QPR_8RegClass;
174       return &ARM::QPR_VFP2RegClass;
175     }
176 
177     if (A->getSize() == 32) {
178       if (B == &ARM::SPR_8RegClass)
179         return 0;  // Do not allow coalescing!
180       return &ARM::QQPR_VFP2RegClass;
181     }
182 
183     assert(A->getSize() == 64 && "Expecting a QQQQ register class!");
184     return 0;  // Do not allow coalescing!
185   }
186   case ARM::dsub_0:
187   case ARM::dsub_1:
188   case ARM::dsub_2:
189   case ARM::dsub_3: {
190     // D sub-registers.
191     if (A->getSize() == 16) {
192       if (B == &ARM::DPR_VFP2RegClass)
193         return &ARM::QPR_VFP2RegClass;
194       if (B == &ARM::DPR_8RegClass)
195         return 0;  // Do not allow coalescing!
196       return A;
197     }
198 
199     if (A->getSize() == 32) {
200       if (B == &ARM::DPR_VFP2RegClass)
201         return &ARM::QQPR_VFP2RegClass;
202       if (B == &ARM::DPR_8RegClass)
203         return 0;  // Do not allow coalescing!
204       return A;
205     }
206 
207     assert(A->getSize() == 64 && "Expecting a QQQQ register class!");
208     if (B != &ARM::DPRRegClass)
209       return 0;  // Do not allow coalescing!
210     return A;
211   }
212   case ARM::dsub_4:
213   case ARM::dsub_5:
214   case ARM::dsub_6:
215   case ARM::dsub_7: {
216     // D sub-registers of QQQQ registers.
217     if (A->getSize() == 64 && B == &ARM::DPRRegClass)
218       return A;
219     return 0;  // Do not allow coalescing!
220   }
221 
222   case ARM::qsub_0:
223   case ARM::qsub_1: {
224     // Q sub-registers.
225     if (A->getSize() == 32) {
226       if (B == &ARM::QPR_VFP2RegClass)
227         return &ARM::QQPR_VFP2RegClass;
228       if (B == &ARM::QPR_8RegClass)
229         return 0;  // Do not allow coalescing!
230       return A;
231     }
232 
233     assert(A->getSize() == 64 && "Expecting a QQQQ register class!");
234     if (B == &ARM::QPRRegClass)
235       return A;
236     return 0;  // Do not allow coalescing!
237   }
238   case ARM::qsub_2:
239   case ARM::qsub_3: {
240     // Q sub-registers of QQQQ registers.
241     if (A->getSize() == 64 && B == &ARM::QPRRegClass)
242       return A;
243     return 0;  // Do not allow coalescing!
244   }
245   }
246   return 0;
247 }
248 
249 bool
canCombineSubRegIndices(const TargetRegisterClass * RC,SmallVectorImpl<unsigned> & SubIndices,unsigned & NewSubIdx) const250 ARMBaseRegisterInfo::canCombineSubRegIndices(const TargetRegisterClass *RC,
251                                           SmallVectorImpl<unsigned> &SubIndices,
252                                           unsigned &NewSubIdx) const {
253 
254   unsigned Size = RC->getSize() * 8;
255   if (Size < 6)
256     return 0;
257 
258   NewSubIdx = 0;  // Whole register.
259   unsigned NumRegs = SubIndices.size();
260   if (NumRegs == 8) {
261     // 8 D registers -> 1 QQQQ register.
262     return (Size == 512 &&
263             SubIndices[0] == ARM::dsub_0 &&
264             SubIndices[1] == ARM::dsub_1 &&
265             SubIndices[2] == ARM::dsub_2 &&
266             SubIndices[3] == ARM::dsub_3 &&
267             SubIndices[4] == ARM::dsub_4 &&
268             SubIndices[5] == ARM::dsub_5 &&
269             SubIndices[6] == ARM::dsub_6 &&
270             SubIndices[7] == ARM::dsub_7);
271   } else if (NumRegs == 4) {
272     if (SubIndices[0] == ARM::qsub_0) {
273       // 4 Q registers -> 1 QQQQ register.
274       return (Size == 512 &&
275               SubIndices[1] == ARM::qsub_1 &&
276               SubIndices[2] == ARM::qsub_2 &&
277               SubIndices[3] == ARM::qsub_3);
278     } else if (SubIndices[0] == ARM::dsub_0) {
279       // 4 D registers -> 1 QQ register.
280       if (Size >= 256 &&
281           SubIndices[1] == ARM::dsub_1 &&
282           SubIndices[2] == ARM::dsub_2 &&
283           SubIndices[3] == ARM::dsub_3) {
284         if (Size == 512)
285           NewSubIdx = ARM::qqsub_0;
286         return true;
287       }
288     } else if (SubIndices[0] == ARM::dsub_4) {
289       // 4 D registers -> 1 QQ register (2nd).
290       if (Size == 512 &&
291           SubIndices[1] == ARM::dsub_5 &&
292           SubIndices[2] == ARM::dsub_6 &&
293           SubIndices[3] == ARM::dsub_7) {
294         NewSubIdx = ARM::qqsub_1;
295         return true;
296       }
297     } else if (SubIndices[0] == ARM::ssub_0) {
298       // 4 S registers -> 1 Q register.
299       if (Size >= 128 &&
300           SubIndices[1] == ARM::ssub_1 &&
301           SubIndices[2] == ARM::ssub_2 &&
302           SubIndices[3] == ARM::ssub_3) {
303         if (Size >= 256)
304           NewSubIdx = ARM::qsub_0;
305         return true;
306       }
307     }
308   } else if (NumRegs == 2) {
309     if (SubIndices[0] == ARM::qsub_0) {
310       // 2 Q registers -> 1 QQ register.
311       if (Size >= 256 && SubIndices[1] == ARM::qsub_1) {
312         if (Size == 512)
313           NewSubIdx = ARM::qqsub_0;
314         return true;
315       }
316     } else if (SubIndices[0] == ARM::qsub_2) {
317       // 2 Q registers -> 1 QQ register (2nd).
318       if (Size == 512 && SubIndices[1] == ARM::qsub_3) {
319         NewSubIdx = ARM::qqsub_1;
320         return true;
321       }
322     } else if (SubIndices[0] == ARM::dsub_0) {
323       // 2 D registers -> 1 Q register.
324       if (Size >= 128 && SubIndices[1] == ARM::dsub_1) {
325         if (Size >= 256)
326           NewSubIdx = ARM::qsub_0;
327         return true;
328       }
329     } else if (SubIndices[0] == ARM::dsub_2) {
330       // 2 D registers -> 1 Q register (2nd).
331       if (Size >= 256 && SubIndices[1] == ARM::dsub_3) {
332         NewSubIdx = ARM::qsub_1;
333         return true;
334       }
335     } else if (SubIndices[0] == ARM::dsub_4) {
336       // 2 D registers -> 1 Q register (3rd).
337       if (Size == 512 && SubIndices[1] == ARM::dsub_5) {
338         NewSubIdx = ARM::qsub_2;
339         return true;
340       }
341     } else if (SubIndices[0] == ARM::dsub_6) {
342       // 2 D registers -> 1 Q register (3rd).
343       if (Size == 512 && SubIndices[1] == ARM::dsub_7) {
344         NewSubIdx = ARM::qsub_3;
345         return true;
346       }
347     } else if (SubIndices[0] == ARM::ssub_0) {
348       // 2 S registers -> 1 D register.
349       if (SubIndices[1] == ARM::ssub_1) {
350         if (Size >= 128)
351           NewSubIdx = ARM::dsub_0;
352         return true;
353       }
354     } else if (SubIndices[0] == ARM::ssub_2) {
355       // 2 S registers -> 1 D register (2nd).
356       if (Size >= 128 && SubIndices[1] == ARM::ssub_3) {
357         NewSubIdx = ARM::dsub_1;
358         return true;
359       }
360     }
361   }
362   return false;
363 }
364 
365 const TargetRegisterClass*
getLargestLegalSuperClass(const TargetRegisterClass * RC) const366 ARMBaseRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC)
367                                                                          const {
368   const TargetRegisterClass *Super = RC;
369   TargetRegisterClass::sc_iterator I = RC->getSuperClasses();
370   do {
371     switch (Super->getID()) {
372     case ARM::GPRRegClassID:
373     case ARM::SPRRegClassID:
374     case ARM::DPRRegClassID:
375     case ARM::QPRRegClassID:
376     case ARM::QQPRRegClassID:
377     case ARM::QQQQPRRegClassID:
378       return Super;
379     }
380     Super = *I++;
381   } while (Super);
382   return RC;
383 }
384 
385 const TargetRegisterClass *
getPointerRegClass(unsigned Kind) const386 ARMBaseRegisterInfo::getPointerRegClass(unsigned Kind) const {
387   return ARM::GPRRegisterClass;
388 }
389 
390 const TargetRegisterClass *
getCrossCopyRegClass(const TargetRegisterClass * RC) const391 ARMBaseRegisterInfo::getCrossCopyRegClass(const TargetRegisterClass *RC) const {
392   if (RC == &ARM::CCRRegClass)
393     return 0;  // Can't copy CCR registers.
394   return RC;
395 }
396 
397 unsigned
getRegPressureLimit(const TargetRegisterClass * RC,MachineFunction & MF) const398 ARMBaseRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
399                                          MachineFunction &MF) const {
400   const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
401 
402   switch (RC->getID()) {
403   default:
404     return 0;
405   case ARM::tGPRRegClassID:
406     return TFI->hasFP(MF) ? 4 : 5;
407   case ARM::GPRRegClassID: {
408     unsigned FP = TFI->hasFP(MF) ? 1 : 0;
409     return 10 - FP - (STI.isR9Reserved() ? 1 : 0);
410   }
411   case ARM::SPRRegClassID:  // Currently not used as 'rep' register class.
412   case ARM::DPRRegClassID:
413     return 32 - 10;
414   }
415 }
416 
417 /// getRawAllocationOrder - Returns the register allocation order for a
418 /// specified register class with a target-dependent hint.
419 ArrayRef<unsigned>
getRawAllocationOrder(const TargetRegisterClass * RC,unsigned HintType,unsigned HintReg,const MachineFunction & MF) const420 ARMBaseRegisterInfo::getRawAllocationOrder(const TargetRegisterClass *RC,
421                                            unsigned HintType, unsigned HintReg,
422                                            const MachineFunction &MF) const {
423   const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
424   // Alternative register allocation orders when favoring even / odd registers
425   // of register pairs.
426 
427   // No FP, R9 is available.
428   static const unsigned GPREven1[] = {
429     ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R8, ARM::R10,
430     ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R7,
431     ARM::R9, ARM::R11
432   };
433   static const unsigned GPROdd1[] = {
434     ARM::R1, ARM::R3, ARM::R5, ARM::R7, ARM::R9, ARM::R11,
435     ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6,
436     ARM::R8, ARM::R10
437   };
438 
439   // FP is R7, R9 is available.
440   static const unsigned GPREven2[] = {
441     ARM::R0, ARM::R2, ARM::R4,          ARM::R8, ARM::R10,
442     ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R6,
443     ARM::R9, ARM::R11
444   };
445   static const unsigned GPROdd2[] = {
446     ARM::R1, ARM::R3, ARM::R5,          ARM::R9, ARM::R11,
447     ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6,
448     ARM::R8, ARM::R10
449   };
450 
451   // FP is R11, R9 is available.
452   static const unsigned GPREven3[] = {
453     ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R8,
454     ARM::R1, ARM::R3, ARM::R10,ARM::R12,ARM::LR, ARM::R5, ARM::R7,
455     ARM::R9
456   };
457   static const unsigned GPROdd3[] = {
458     ARM::R1, ARM::R3, ARM::R5, ARM::R6, ARM::R9,
459     ARM::R0, ARM::R2, ARM::R10,ARM::R12,ARM::LR, ARM::R4, ARM::R7,
460     ARM::R8
461   };
462 
463   // No FP, R9 is not available.
464   static const unsigned GPREven4[] = {
465     ARM::R0, ARM::R2, ARM::R4, ARM::R6,          ARM::R10,
466     ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R7, ARM::R8,
467     ARM::R11
468   };
469   static const unsigned GPROdd4[] = {
470     ARM::R1, ARM::R3, ARM::R5, ARM::R7,          ARM::R11,
471     ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8,
472     ARM::R10
473   };
474 
475   // FP is R7, R9 is not available.
476   static const unsigned GPREven5[] = {
477     ARM::R0, ARM::R2, ARM::R4,                   ARM::R10,
478     ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R6, ARM::R8,
479     ARM::R11
480   };
481   static const unsigned GPROdd5[] = {
482     ARM::R1, ARM::R3, ARM::R5,                   ARM::R11,
483     ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8,
484     ARM::R10
485   };
486 
487   // FP is R11, R9 is not available.
488   static const unsigned GPREven6[] = {
489     ARM::R0, ARM::R2, ARM::R4, ARM::R6,
490     ARM::R1, ARM::R3, ARM::R10,ARM::R12,ARM::LR, ARM::R5, ARM::R7, ARM::R8
491   };
492   static const unsigned GPROdd6[] = {
493     ARM::R1, ARM::R3, ARM::R5, ARM::R7,
494     ARM::R0, ARM::R2, ARM::R10,ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8
495   };
496 
497   // We only support even/odd hints for GPR and rGPR.
498   if (RC != ARM::GPRRegisterClass && RC != ARM::rGPRRegisterClass)
499     return RC->getRawAllocationOrder(MF);
500 
501   if (HintType == ARMRI::RegPairEven) {
502     if (isPhysicalRegister(HintReg) && getRegisterPairEven(HintReg, MF) == 0)
503       // It's no longer possible to fulfill this hint. Return the default
504       // allocation order.
505       return RC->getRawAllocationOrder(MF);
506 
507     if (!TFI->hasFP(MF)) {
508       if (!STI.isR9Reserved())
509         return makeArrayRef(GPREven1);
510       else
511         return makeArrayRef(GPREven4);
512     } else if (FramePtr == ARM::R7) {
513       if (!STI.isR9Reserved())
514         return makeArrayRef(GPREven2);
515       else
516         return makeArrayRef(GPREven5);
517     } else { // FramePtr == ARM::R11
518       if (!STI.isR9Reserved())
519         return makeArrayRef(GPREven3);
520       else
521         return makeArrayRef(GPREven6);
522     }
523   } else if (HintType == ARMRI::RegPairOdd) {
524     if (isPhysicalRegister(HintReg) && getRegisterPairOdd(HintReg, MF) == 0)
525       // It's no longer possible to fulfill this hint. Return the default
526       // allocation order.
527       return RC->getRawAllocationOrder(MF);
528 
529     if (!TFI->hasFP(MF)) {
530       if (!STI.isR9Reserved())
531         return makeArrayRef(GPROdd1);
532       else
533         return makeArrayRef(GPROdd4);
534     } else if (FramePtr == ARM::R7) {
535       if (!STI.isR9Reserved())
536         return makeArrayRef(GPROdd2);
537       else
538         return makeArrayRef(GPROdd5);
539     } else { // FramePtr == ARM::R11
540       if (!STI.isR9Reserved())
541         return makeArrayRef(GPROdd3);
542       else
543         return makeArrayRef(GPROdd6);
544     }
545   }
546   return RC->getRawAllocationOrder(MF);
547 }
548 
549 /// ResolveRegAllocHint - Resolves the specified register allocation hint
550 /// to a physical register. Returns the physical register if it is successful.
551 unsigned
ResolveRegAllocHint(unsigned Type,unsigned Reg,const MachineFunction & MF) const552 ARMBaseRegisterInfo::ResolveRegAllocHint(unsigned Type, unsigned Reg,
553                                          const MachineFunction &MF) const {
554   if (Reg == 0 || !isPhysicalRegister(Reg))
555     return 0;
556   if (Type == 0)
557     return Reg;
558   else if (Type == (unsigned)ARMRI::RegPairOdd)
559     // Odd register.
560     return getRegisterPairOdd(Reg, MF);
561   else if (Type == (unsigned)ARMRI::RegPairEven)
562     // Even register.
563     return getRegisterPairEven(Reg, MF);
564   return 0;
565 }
566 
567 void
UpdateRegAllocHint(unsigned Reg,unsigned NewReg,MachineFunction & MF) const568 ARMBaseRegisterInfo::UpdateRegAllocHint(unsigned Reg, unsigned NewReg,
569                                         MachineFunction &MF) const {
570   MachineRegisterInfo *MRI = &MF.getRegInfo();
571   std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(Reg);
572   if ((Hint.first == (unsigned)ARMRI::RegPairOdd ||
573        Hint.first == (unsigned)ARMRI::RegPairEven) &&
574       TargetRegisterInfo::isVirtualRegister(Hint.second)) {
575     // If 'Reg' is one of the even / odd register pair and it's now changed
576     // (e.g. coalesced) into a different register. The other register of the
577     // pair allocation hint must be updated to reflect the relationship
578     // change.
579     unsigned OtherReg = Hint.second;
580     Hint = MRI->getRegAllocationHint(OtherReg);
581     if (Hint.second == Reg)
582       // Make sure the pair has not already divorced.
583       MRI->setRegAllocationHint(OtherReg, Hint.first, NewReg);
584   }
585 }
586 
587 bool
avoidWriteAfterWrite(const TargetRegisterClass * RC) const588 ARMBaseRegisterInfo::avoidWriteAfterWrite(const TargetRegisterClass *RC) const {
589   // CortexA9 has a Write-after-write hazard for NEON registers.
590   if (!STI.isCortexA9())
591     return false;
592 
593   switch (RC->getID()) {
594   case ARM::DPRRegClassID:
595   case ARM::DPR_8RegClassID:
596   case ARM::DPR_VFP2RegClassID:
597   case ARM::QPRRegClassID:
598   case ARM::QPR_8RegClassID:
599   case ARM::QPR_VFP2RegClassID:
600   case ARM::SPRRegClassID:
601   case ARM::SPR_8RegClassID:
602     // Avoid reusing S, D, and Q registers.
603     // Don't increase register pressure for QQ and QQQQ.
604     return true;
605   default:
606     return false;
607   }
608 }
609 
hasBasePointer(const MachineFunction & MF) const610 bool ARMBaseRegisterInfo::hasBasePointer(const MachineFunction &MF) const {
611   const MachineFrameInfo *MFI = MF.getFrameInfo();
612   const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
613 
614   if (!EnableBasePointer)
615     return false;
616 
617   if (needsStackRealignment(MF) && MFI->hasVarSizedObjects())
618     return true;
619 
620   // Thumb has trouble with negative offsets from the FP. Thumb2 has a limited
621   // negative range for ldr/str (255), and thumb1 is positive offsets only.
622   // It's going to be better to use the SP or Base Pointer instead. When there
623   // are variable sized objects, we can't reference off of the SP, so we
624   // reserve a Base Pointer.
625   if (AFI->isThumbFunction() && MFI->hasVarSizedObjects()) {
626     // Conservatively estimate whether the negative offset from the frame
627     // pointer will be sufficient to reach. If a function has a smallish
628     // frame, it's less likely to have lots of spills and callee saved
629     // space, so it's all more likely to be within range of the frame pointer.
630     // If it's wrong, the scavenger will still enable access to work, it just
631     // won't be optimal.
632     if (AFI->isThumb2Function() && MFI->getLocalFrameSize() < 128)
633       return false;
634     return true;
635   }
636 
637   return false;
638 }
639 
canRealignStack(const MachineFunction & MF) const640 bool ARMBaseRegisterInfo::canRealignStack(const MachineFunction &MF) const {
641   const MachineFrameInfo *MFI = MF.getFrameInfo();
642   const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
643   // We can't realign the stack if:
644   // 1. Dynamic stack realignment is explicitly disabled,
645   // 2. This is a Thumb1 function (it's not useful, so we don't bother), or
646   // 3. There are VLAs in the function and the base pointer is disabled.
647   return (RealignStack && !AFI->isThumb1OnlyFunction() &&
648           (!MFI->hasVarSizedObjects() || EnableBasePointer));
649 }
650 
651 bool ARMBaseRegisterInfo::
needsStackRealignment(const MachineFunction & MF) const652 needsStackRealignment(const MachineFunction &MF) const {
653   const MachineFrameInfo *MFI = MF.getFrameInfo();
654   const Function *F = MF.getFunction();
655   unsigned StackAlign = MF.getTarget().getFrameLowering()->getStackAlignment();
656   bool requiresRealignment = ((MFI->getLocalFrameMaxAlign() > StackAlign) ||
657                                F->hasFnAttr(Attribute::StackAlignment));
658 
659   return requiresRealignment && canRealignStack(MF);
660 }
661 
662 bool ARMBaseRegisterInfo::
cannotEliminateFrame(const MachineFunction & MF) const663 cannotEliminateFrame(const MachineFunction &MF) const {
664   const MachineFrameInfo *MFI = MF.getFrameInfo();
665   if (DisableFramePointerElim(MF) && MFI->adjustsStack())
666     return true;
667   return MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken()
668     || needsStackRealignment(MF);
669 }
670 
671 unsigned
getFrameRegister(const MachineFunction & MF) const672 ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
673   const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
674 
675   if (TFI->hasFP(MF))
676     return FramePtr;
677   return ARM::SP;
678 }
679 
getEHExceptionRegister() const680 unsigned ARMBaseRegisterInfo::getEHExceptionRegister() const {
681   llvm_unreachable("What is the exception register");
682   return 0;
683 }
684 
getEHHandlerRegister() const685 unsigned ARMBaseRegisterInfo::getEHHandlerRegister() const {
686   llvm_unreachable("What is the exception handler register");
687   return 0;
688 }
689 
getRegisterPairEven(unsigned Reg,const MachineFunction & MF) const690 unsigned ARMBaseRegisterInfo::getRegisterPairEven(unsigned Reg,
691                                               const MachineFunction &MF) const {
692   switch (Reg) {
693   default: break;
694   // Return 0 if either register of the pair is a special register.
695   // So no R12, etc.
696   case ARM::R1: return ARM::R0;
697   case ARM::R3: return ARM::R2;
698   case ARM::R5: return ARM::R4;
699   case ARM::R7:
700     return (isReservedReg(MF, ARM::R7) || isReservedReg(MF, ARM::R6))
701       ? 0 : ARM::R6;
702   case ARM::R9: return isReservedReg(MF, ARM::R9)  ? 0 :ARM::R8;
703   case ARM::R11: return isReservedReg(MF, ARM::R11) ? 0 : ARM::R10;
704 
705   case ARM::S1: return ARM::S0;
706   case ARM::S3: return ARM::S2;
707   case ARM::S5: return ARM::S4;
708   case ARM::S7: return ARM::S6;
709   case ARM::S9: return ARM::S8;
710   case ARM::S11: return ARM::S10;
711   case ARM::S13: return ARM::S12;
712   case ARM::S15: return ARM::S14;
713   case ARM::S17: return ARM::S16;
714   case ARM::S19: return ARM::S18;
715   case ARM::S21: return ARM::S20;
716   case ARM::S23: return ARM::S22;
717   case ARM::S25: return ARM::S24;
718   case ARM::S27: return ARM::S26;
719   case ARM::S29: return ARM::S28;
720   case ARM::S31: return ARM::S30;
721 
722   case ARM::D1: return ARM::D0;
723   case ARM::D3: return ARM::D2;
724   case ARM::D5: return ARM::D4;
725   case ARM::D7: return ARM::D6;
726   case ARM::D9: return ARM::D8;
727   case ARM::D11: return ARM::D10;
728   case ARM::D13: return ARM::D12;
729   case ARM::D15: return ARM::D14;
730   case ARM::D17: return ARM::D16;
731   case ARM::D19: return ARM::D18;
732   case ARM::D21: return ARM::D20;
733   case ARM::D23: return ARM::D22;
734   case ARM::D25: return ARM::D24;
735   case ARM::D27: return ARM::D26;
736   case ARM::D29: return ARM::D28;
737   case ARM::D31: return ARM::D30;
738   }
739 
740   return 0;
741 }
742 
getRegisterPairOdd(unsigned Reg,const MachineFunction & MF) const743 unsigned ARMBaseRegisterInfo::getRegisterPairOdd(unsigned Reg,
744                                              const MachineFunction &MF) const {
745   switch (Reg) {
746   default: break;
747   // Return 0 if either register of the pair is a special register.
748   // So no R12, etc.
749   case ARM::R0: return ARM::R1;
750   case ARM::R2: return ARM::R3;
751   case ARM::R4: return ARM::R5;
752   case ARM::R6:
753     return (isReservedReg(MF, ARM::R7) || isReservedReg(MF, ARM::R6))
754       ? 0 : ARM::R7;
755   case ARM::R8: return isReservedReg(MF, ARM::R9)  ? 0 :ARM::R9;
756   case ARM::R10: return isReservedReg(MF, ARM::R11) ? 0 : ARM::R11;
757 
758   case ARM::S0: return ARM::S1;
759   case ARM::S2: return ARM::S3;
760   case ARM::S4: return ARM::S5;
761   case ARM::S6: return ARM::S7;
762   case ARM::S8: return ARM::S9;
763   case ARM::S10: return ARM::S11;
764   case ARM::S12: return ARM::S13;
765   case ARM::S14: return ARM::S15;
766   case ARM::S16: return ARM::S17;
767   case ARM::S18: return ARM::S19;
768   case ARM::S20: return ARM::S21;
769   case ARM::S22: return ARM::S23;
770   case ARM::S24: return ARM::S25;
771   case ARM::S26: return ARM::S27;
772   case ARM::S28: return ARM::S29;
773   case ARM::S30: return ARM::S31;
774 
775   case ARM::D0: return ARM::D1;
776   case ARM::D2: return ARM::D3;
777   case ARM::D4: return ARM::D5;
778   case ARM::D6: return ARM::D7;
779   case ARM::D8: return ARM::D9;
780   case ARM::D10: return ARM::D11;
781   case ARM::D12: return ARM::D13;
782   case ARM::D14: return ARM::D15;
783   case ARM::D16: return ARM::D17;
784   case ARM::D18: return ARM::D19;
785   case ARM::D20: return ARM::D21;
786   case ARM::D22: return ARM::D23;
787   case ARM::D24: return ARM::D25;
788   case ARM::D26: return ARM::D27;
789   case ARM::D28: return ARM::D29;
790   case ARM::D30: return ARM::D31;
791   }
792 
793   return 0;
794 }
795 
796 /// emitLoadConstPool - Emits a load from constpool to materialize the
797 /// specified immediate.
798 void ARMBaseRegisterInfo::
emitLoadConstPool(MachineBasicBlock & MBB,MachineBasicBlock::iterator & MBBI,DebugLoc dl,unsigned DestReg,unsigned SubIdx,int Val,ARMCC::CondCodes Pred,unsigned PredReg,unsigned MIFlags) const799 emitLoadConstPool(MachineBasicBlock &MBB,
800                   MachineBasicBlock::iterator &MBBI,
801                   DebugLoc dl,
802                   unsigned DestReg, unsigned SubIdx, int Val,
803                   ARMCC::CondCodes Pred,
804                   unsigned PredReg, unsigned MIFlags) const {
805   MachineFunction &MF = *MBB.getParent();
806   MachineConstantPool *ConstantPool = MF.getConstantPool();
807   const Constant *C =
808         ConstantInt::get(Type::getInt32Ty(MF.getFunction()->getContext()), Val);
809   unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
810 
811   BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp))
812     .addReg(DestReg, getDefRegState(true), SubIdx)
813     .addConstantPoolIndex(Idx)
814     .addImm(0).addImm(Pred).addReg(PredReg)
815     .setMIFlags(MIFlags);
816 }
817 
818 bool ARMBaseRegisterInfo::
requiresRegisterScavenging(const MachineFunction & MF) const819 requiresRegisterScavenging(const MachineFunction &MF) const {
820   return true;
821 }
822 
823 bool ARMBaseRegisterInfo::
requiresFrameIndexScavenging(const MachineFunction & MF) const824 requiresFrameIndexScavenging(const MachineFunction &MF) const {
825   return true;
826 }
827 
828 bool ARMBaseRegisterInfo::
requiresVirtualBaseRegisters(const MachineFunction & MF) const829 requiresVirtualBaseRegisters(const MachineFunction &MF) const {
830   return EnableLocalStackAlloc;
831 }
832 
833 static void
emitSPUpdate(bool isARM,MachineBasicBlock & MBB,MachineBasicBlock::iterator & MBBI,DebugLoc dl,const ARMBaseInstrInfo & TII,int NumBytes,ARMCC::CondCodes Pred=ARMCC::AL,unsigned PredReg=0)834 emitSPUpdate(bool isARM,
835              MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
836              DebugLoc dl, const ARMBaseInstrInfo &TII,
837              int NumBytes,
838              ARMCC::CondCodes Pred = ARMCC::AL, unsigned PredReg = 0) {
839   if (isARM)
840     emitARMRegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes,
841                             Pred, PredReg, TII);
842   else
843     emitT2RegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes,
844                            Pred, PredReg, TII);
845 }
846 
847 
848 void ARMBaseRegisterInfo::
eliminateCallFramePseudoInstr(MachineFunction & MF,MachineBasicBlock & MBB,MachineBasicBlock::iterator I) const849 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
850                               MachineBasicBlock::iterator I) const {
851   const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
852   if (!TFI->hasReservedCallFrame(MF)) {
853     // If we have alloca, convert as follows:
854     // ADJCALLSTACKDOWN -> sub, sp, sp, amount
855     // ADJCALLSTACKUP   -> add, sp, sp, amount
856     MachineInstr *Old = I;
857     DebugLoc dl = Old->getDebugLoc();
858     unsigned Amount = Old->getOperand(0).getImm();
859     if (Amount != 0) {
860       // We need to keep the stack aligned properly.  To do this, we round the
861       // amount of space needed for the outgoing arguments up to the next
862       // alignment boundary.
863       unsigned Align = TFI->getStackAlignment();
864       Amount = (Amount+Align-1)/Align*Align;
865 
866       ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
867       assert(!AFI->isThumb1OnlyFunction() &&
868              "This eliminateCallFramePseudoInstr does not support Thumb1!");
869       bool isARM = !AFI->isThumbFunction();
870 
871       // Replace the pseudo instruction with a new instruction...
872       unsigned Opc = Old->getOpcode();
873       int PIdx = Old->findFirstPredOperandIdx();
874       ARMCC::CondCodes Pred = (PIdx == -1)
875         ? ARMCC::AL : (ARMCC::CondCodes)Old->getOperand(PIdx).getImm();
876       if (Opc == ARM::ADJCALLSTACKDOWN || Opc == ARM::tADJCALLSTACKDOWN) {
877         // Note: PredReg is operand 2 for ADJCALLSTACKDOWN.
878         unsigned PredReg = Old->getOperand(2).getReg();
879         emitSPUpdate(isARM, MBB, I, dl, TII, -Amount, Pred, PredReg);
880       } else {
881         // Note: PredReg is operand 3 for ADJCALLSTACKUP.
882         unsigned PredReg = Old->getOperand(3).getReg();
883         assert(Opc == ARM::ADJCALLSTACKUP || Opc == ARM::tADJCALLSTACKUP);
884         emitSPUpdate(isARM, MBB, I, dl, TII, Amount, Pred, PredReg);
885       }
886     }
887   }
888   MBB.erase(I);
889 }
890 
891 int64_t ARMBaseRegisterInfo::
getFrameIndexInstrOffset(const MachineInstr * MI,int Idx) const892 getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const {
893   const MCInstrDesc &Desc = MI->getDesc();
894   unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
895   int64_t InstrOffs = 0;;
896   int Scale = 1;
897   unsigned ImmIdx = 0;
898   switch (AddrMode) {
899   case ARMII::AddrModeT2_i8:
900   case ARMII::AddrModeT2_i12:
901   case ARMII::AddrMode_i12:
902     InstrOffs = MI->getOperand(Idx+1).getImm();
903     Scale = 1;
904     break;
905   case ARMII::AddrMode5: {
906     // VFP address mode.
907     const MachineOperand &OffOp = MI->getOperand(Idx+1);
908     InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm());
909     if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub)
910       InstrOffs = -InstrOffs;
911     Scale = 4;
912     break;
913   }
914   case ARMII::AddrMode2: {
915     ImmIdx = Idx+2;
916     InstrOffs = ARM_AM::getAM2Offset(MI->getOperand(ImmIdx).getImm());
917     if (ARM_AM::getAM2Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub)
918       InstrOffs = -InstrOffs;
919     break;
920   }
921   case ARMII::AddrMode3: {
922     ImmIdx = Idx+2;
923     InstrOffs = ARM_AM::getAM3Offset(MI->getOperand(ImmIdx).getImm());
924     if (ARM_AM::getAM3Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub)
925       InstrOffs = -InstrOffs;
926     break;
927   }
928   case ARMII::AddrModeT1_s: {
929     ImmIdx = Idx+1;
930     InstrOffs = MI->getOperand(ImmIdx).getImm();
931     Scale = 4;
932     break;
933   }
934   default:
935     llvm_unreachable("Unsupported addressing mode!");
936     break;
937   }
938 
939   return InstrOffs * Scale;
940 }
941 
942 /// needsFrameBaseReg - Returns true if the instruction's frame index
943 /// reference would be better served by a base register other than FP
944 /// or SP. Used by LocalStackFrameAllocation to determine which frame index
945 /// references it should create new base registers for.
946 bool ARMBaseRegisterInfo::
needsFrameBaseReg(MachineInstr * MI,int64_t Offset) const947 needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
948   for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i) {
949     assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!");
950   }
951 
952   // It's the load/store FI references that cause issues, as it can be difficult
953   // to materialize the offset if it won't fit in the literal field. Estimate
954   // based on the size of the local frame and some conservative assumptions
955   // about the rest of the stack frame (note, this is pre-regalloc, so
956   // we don't know everything for certain yet) whether this offset is likely
957   // to be out of range of the immediate. Return true if so.
958 
959   // We only generate virtual base registers for loads and stores, so
960   // return false for everything else.
961   unsigned Opc = MI->getOpcode();
962   switch (Opc) {
963   case ARM::LDRi12: case ARM::LDRH: case ARM::LDRBi12:
964   case ARM::STRi12: case ARM::STRH: case ARM::STRBi12:
965   case ARM::t2LDRi12: case ARM::t2LDRi8:
966   case ARM::t2STRi12: case ARM::t2STRi8:
967   case ARM::VLDRS: case ARM::VLDRD:
968   case ARM::VSTRS: case ARM::VSTRD:
969   case ARM::tSTRspi: case ARM::tLDRspi:
970     if (ForceAllBaseRegAlloc)
971       return true;
972     break;
973   default:
974     return false;
975   }
976 
977   // Without a virtual base register, if the function has variable sized
978   // objects, all fixed-size local references will be via the frame pointer,
979   // Approximate the offset and see if it's legal for the instruction.
980   // Note that the incoming offset is based on the SP value at function entry,
981   // so it'll be negative.
982   MachineFunction &MF = *MI->getParent()->getParent();
983   const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
984   MachineFrameInfo *MFI = MF.getFrameInfo();
985   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
986 
987   // Estimate an offset from the frame pointer.
988   // Conservatively assume all callee-saved registers get pushed. R4-R6
989   // will be earlier than the FP, so we ignore those.
990   // R7, LR
991   int64_t FPOffset = Offset - 8;
992   // ARM and Thumb2 functions also need to consider R8-R11 and D8-D15
993   if (!AFI->isThumbFunction() || !AFI->isThumb1OnlyFunction())
994     FPOffset -= 80;
995   // Estimate an offset from the stack pointer.
996   // The incoming offset is relating to the SP at the start of the function,
997   // but when we access the local it'll be relative to the SP after local
998   // allocation, so adjust our SP-relative offset by that allocation size.
999   Offset = -Offset;
1000   Offset += MFI->getLocalFrameSize();
1001   // Assume that we'll have at least some spill slots allocated.
1002   // FIXME: This is a total SWAG number. We should run some statistics
1003   //        and pick a real one.
1004   Offset += 128; // 128 bytes of spill slots
1005 
1006   // If there is a frame pointer, try using it.
1007   // The FP is only available if there is no dynamic realignment. We
1008   // don't know for sure yet whether we'll need that, so we guess based
1009   // on whether there are any local variables that would trigger it.
1010   unsigned StackAlign = TFI->getStackAlignment();
1011   if (TFI->hasFP(MF) &&
1012       !((MFI->getLocalFrameMaxAlign() > StackAlign) && canRealignStack(MF))) {
1013     if (isFrameOffsetLegal(MI, FPOffset))
1014       return false;
1015   }
1016   // If we can reference via the stack pointer, try that.
1017   // FIXME: This (and the code that resolves the references) can be improved
1018   //        to only disallow SP relative references in the live range of
1019   //        the VLA(s). In practice, it's unclear how much difference that
1020   //        would make, but it may be worth doing.
1021   if (!MFI->hasVarSizedObjects() && isFrameOffsetLegal(MI, Offset))
1022     return false;
1023 
1024   // The offset likely isn't legal, we want to allocate a virtual base register.
1025   return true;
1026 }
1027 
1028 /// materializeFrameBaseRegister - Insert defining instruction(s) for BaseReg to
1029 /// be a pointer to FrameIdx at the beginning of the basic block.
1030 void ARMBaseRegisterInfo::
materializeFrameBaseRegister(MachineBasicBlock * MBB,unsigned BaseReg,int FrameIdx,int64_t Offset) const1031 materializeFrameBaseRegister(MachineBasicBlock *MBB,
1032                              unsigned BaseReg, int FrameIdx,
1033                              int64_t Offset) const {
1034   ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>();
1035   unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri :
1036     (AFI->isThumb1OnlyFunction() ? ARM::tADDrSPi : ARM::t2ADDri);
1037 
1038   MachineBasicBlock::iterator Ins = MBB->begin();
1039   DebugLoc DL;                  // Defaults to "unknown"
1040   if (Ins != MBB->end())
1041     DL = Ins->getDebugLoc();
1042 
1043   const MCInstrDesc &MCID = TII.get(ADDriOpc);
1044   MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
1045   MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this));
1046 
1047   MachineInstrBuilder MIB = AddDefaultPred(BuildMI(*MBB, Ins, DL, MCID, BaseReg)
1048     .addFrameIndex(FrameIdx).addImm(Offset));
1049 
1050   if (!AFI->isThumb1OnlyFunction())
1051     AddDefaultCC(MIB);
1052 }
1053 
1054 void
resolveFrameIndex(MachineBasicBlock::iterator I,unsigned BaseReg,int64_t Offset) const1055 ARMBaseRegisterInfo::resolveFrameIndex(MachineBasicBlock::iterator I,
1056                                        unsigned BaseReg, int64_t Offset) const {
1057   MachineInstr &MI = *I;
1058   MachineBasicBlock &MBB = *MI.getParent();
1059   MachineFunction &MF = *MBB.getParent();
1060   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1061   int Off = Offset; // ARM doesn't need the general 64-bit offsets
1062   unsigned i = 0;
1063 
1064   assert(!AFI->isThumb1OnlyFunction() &&
1065          "This resolveFrameIndex does not support Thumb1!");
1066 
1067   while (!MI.getOperand(i).isFI()) {
1068     ++i;
1069     assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
1070   }
1071   bool Done = false;
1072   if (!AFI->isThumbFunction())
1073     Done = rewriteARMFrameIndex(MI, i, BaseReg, Off, TII);
1074   else {
1075     assert(AFI->isThumb2Function());
1076     Done = rewriteT2FrameIndex(MI, i, BaseReg, Off, TII);
1077   }
1078   assert (Done && "Unable to resolve frame index!");
1079   (void)Done;
1080 }
1081 
isFrameOffsetLegal(const MachineInstr * MI,int64_t Offset) const1082 bool ARMBaseRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
1083                                              int64_t Offset) const {
1084   const MCInstrDesc &Desc = MI->getDesc();
1085   unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
1086   unsigned i = 0;
1087 
1088   while (!MI->getOperand(i).isFI()) {
1089     ++i;
1090     assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!");
1091   }
1092 
1093   // AddrMode4 and AddrMode6 cannot handle any offset.
1094   if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6)
1095     return Offset == 0;
1096 
1097   unsigned NumBits = 0;
1098   unsigned Scale = 1;
1099   bool isSigned = true;
1100   switch (AddrMode) {
1101   case ARMII::AddrModeT2_i8:
1102   case ARMII::AddrModeT2_i12:
1103     // i8 supports only negative, and i12 supports only positive, so
1104     // based on Offset sign, consider the appropriate instruction
1105     Scale = 1;
1106     if (Offset < 0) {
1107       NumBits = 8;
1108       Offset = -Offset;
1109     } else {
1110       NumBits = 12;
1111     }
1112     break;
1113   case ARMII::AddrMode5:
1114     // VFP address mode.
1115     NumBits = 8;
1116     Scale = 4;
1117     break;
1118   case ARMII::AddrMode_i12:
1119   case ARMII::AddrMode2:
1120     NumBits = 12;
1121     break;
1122   case ARMII::AddrMode3:
1123     NumBits = 8;
1124     break;
1125   case ARMII::AddrModeT1_s:
1126     NumBits = 5;
1127     Scale = 4;
1128     isSigned = false;
1129     break;
1130   default:
1131     llvm_unreachable("Unsupported addressing mode!");
1132     break;
1133   }
1134 
1135   Offset += getFrameIndexInstrOffset(MI, i);
1136   // Make sure the offset is encodable for instructions that scale the
1137   // immediate.
1138   if ((Offset & (Scale-1)) != 0)
1139     return false;
1140 
1141   if (isSigned && Offset < 0)
1142     Offset = -Offset;
1143 
1144   unsigned Mask = (1 << NumBits) - 1;
1145   if ((unsigned)Offset <= Mask * Scale)
1146     return true;
1147 
1148   return false;
1149 }
1150 
1151 void
eliminateFrameIndex(MachineBasicBlock::iterator II,int SPAdj,RegScavenger * RS) const1152 ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
1153                                          int SPAdj, RegScavenger *RS) const {
1154   unsigned i = 0;
1155   MachineInstr &MI = *II;
1156   MachineBasicBlock &MBB = *MI.getParent();
1157   MachineFunction &MF = *MBB.getParent();
1158   const ARMFrameLowering *TFI =
1159     static_cast<const ARMFrameLowering*>(MF.getTarget().getFrameLowering());
1160   ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1161   assert(!AFI->isThumb1OnlyFunction() &&
1162          "This eliminateFrameIndex does not support Thumb1!");
1163 
1164   while (!MI.getOperand(i).isFI()) {
1165     ++i;
1166     assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
1167   }
1168 
1169   int FrameIndex = MI.getOperand(i).getIndex();
1170   unsigned FrameReg;
1171 
1172   int Offset = TFI->ResolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj);
1173 
1174   // Special handling of dbg_value instructions.
1175   if (MI.isDebugValue()) {
1176     MI.getOperand(i).  ChangeToRegister(FrameReg, false /*isDef*/);
1177     MI.getOperand(i+1).ChangeToImmediate(Offset);
1178     return;
1179   }
1180 
1181   // Modify MI as necessary to handle as much of 'Offset' as possible
1182   bool Done = false;
1183   if (!AFI->isThumbFunction())
1184     Done = rewriteARMFrameIndex(MI, i, FrameReg, Offset, TII);
1185   else {
1186     assert(AFI->isThumb2Function());
1187     Done = rewriteT2FrameIndex(MI, i, FrameReg, Offset, TII);
1188   }
1189   if (Done)
1190     return;
1191 
1192   // If we get here, the immediate doesn't fit into the instruction.  We folded
1193   // as much as possible above, handle the rest, providing a register that is
1194   // SP+LargeImm.
1195   assert((Offset ||
1196           (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode4 ||
1197           (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode6) &&
1198          "This code isn't needed if offset already handled!");
1199 
1200   unsigned ScratchReg = 0;
1201   int PIdx = MI.findFirstPredOperandIdx();
1202   ARMCC::CondCodes Pred = (PIdx == -1)
1203     ? ARMCC::AL : (ARMCC::CondCodes)MI.getOperand(PIdx).getImm();
1204   unsigned PredReg = (PIdx == -1) ? 0 : MI.getOperand(PIdx+1).getReg();
1205   if (Offset == 0)
1206     // Must be addrmode4/6.
1207     MI.getOperand(i).ChangeToRegister(FrameReg, false, false, false);
1208   else {
1209     ScratchReg = MF.getRegInfo().createVirtualRegister(ARM::GPRRegisterClass);
1210     if (!AFI->isThumbFunction())
1211       emitARMRegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
1212                               Offset, Pred, PredReg, TII);
1213     else {
1214       assert(AFI->isThumb2Function());
1215       emitT2RegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
1216                              Offset, Pred, PredReg, TII);
1217     }
1218     // Update the original instruction to use the scratch register.
1219     MI.getOperand(i).ChangeToRegister(ScratchReg, false, false, true);
1220   }
1221 }
1222