1 //===- ARMBaseRegisterInfo.cpp - ARM Register Information -------*- C++ -*-===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the base ARM implementation of TargetRegisterInfo class.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "ARM.h"
15 #include "ARMAddressingModes.h"
16 #include "ARMBaseInstrInfo.h"
17 #include "ARMBaseRegisterInfo.h"
18 #include "ARMFrameLowering.h"
19 #include "ARMInstrInfo.h"
20 #include "ARMMachineFunctionInfo.h"
21 #include "ARMSubtarget.h"
22 #include "llvm/Constants.h"
23 #include "llvm/DerivedTypes.h"
24 #include "llvm/Function.h"
25 #include "llvm/LLVMContext.h"
26 #include "llvm/CodeGen/MachineConstantPool.h"
27 #include "llvm/CodeGen/MachineFrameInfo.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineInstrBuilder.h"
30 #include "llvm/CodeGen/MachineRegisterInfo.h"
31 #include "llvm/CodeGen/RegisterScavenging.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/ErrorHandling.h"
34 #include "llvm/Support/raw_ostream.h"
35 #include "llvm/Target/TargetFrameLowering.h"
36 #include "llvm/Target/TargetMachine.h"
37 #include "llvm/Target/TargetOptions.h"
38 #include "llvm/ADT/BitVector.h"
39 #include "llvm/ADT/SmallVector.h"
40 #include "llvm/Support/CommandLine.h"
41
42 #define GET_REGINFO_TARGET_DESC
43 #include "ARMGenRegisterInfo.inc"
44
45 using namespace llvm;
46
47 static cl::opt<bool>
48 ForceAllBaseRegAlloc("arm-force-base-reg-alloc", cl::Hidden, cl::init(false),
49 cl::desc("Force use of virtual base registers for stack load/store"));
50 static cl::opt<bool>
51 EnableLocalStackAlloc("enable-local-stack-alloc", cl::init(true), cl::Hidden,
52 cl::desc("Enable pre-regalloc stack frame index allocation"));
53 static cl::opt<bool>
54 EnableBasePointer("arm-use-base-pointer", cl::Hidden, cl::init(true),
55 cl::desc("Enable use of a base pointer for complex stack frames"));
56
ARMBaseRegisterInfo(const ARMBaseInstrInfo & tii,const ARMSubtarget & sti)57 ARMBaseRegisterInfo::ARMBaseRegisterInfo(const ARMBaseInstrInfo &tii,
58 const ARMSubtarget &sti)
59 : ARMGenRegisterInfo(ARM::LR), TII(tii), STI(sti),
60 FramePtr((STI.isTargetDarwin() || STI.isThumb()) ? ARM::R7 : ARM::R11),
61 BasePtr(ARM::R6) {
62 }
63
64 const unsigned*
getCalleeSavedRegs(const MachineFunction * MF) const65 ARMBaseRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const {
66 static const unsigned CalleeSavedRegs[] = {
67 ARM::LR, ARM::R11, ARM::R10, ARM::R9, ARM::R8,
68 ARM::R7, ARM::R6, ARM::R5, ARM::R4,
69
70 ARM::D15, ARM::D14, ARM::D13, ARM::D12,
71 ARM::D11, ARM::D10, ARM::D9, ARM::D8,
72 0
73 };
74
75 static const unsigned DarwinCalleeSavedRegs[] = {
76 // Darwin ABI deviates from ARM standard ABI. R9 is not a callee-saved
77 // register.
78 ARM::LR, ARM::R7, ARM::R6, ARM::R5, ARM::R4,
79 ARM::R11, ARM::R10, ARM::R8,
80
81 ARM::D15, ARM::D14, ARM::D13, ARM::D12,
82 ARM::D11, ARM::D10, ARM::D9, ARM::D8,
83 0
84 };
85 return STI.isTargetDarwin() ? DarwinCalleeSavedRegs : CalleeSavedRegs;
86 }
87
88 BitVector ARMBaseRegisterInfo::
getReservedRegs(const MachineFunction & MF) const89 getReservedRegs(const MachineFunction &MF) const {
90 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
91
92 // FIXME: avoid re-calculating this every time.
93 BitVector Reserved(getNumRegs());
94 Reserved.set(ARM::SP);
95 Reserved.set(ARM::PC);
96 Reserved.set(ARM::FPSCR);
97 if (TFI->hasFP(MF))
98 Reserved.set(FramePtr);
99 if (hasBasePointer(MF))
100 Reserved.set(BasePtr);
101 // Some targets reserve R9.
102 if (STI.isR9Reserved())
103 Reserved.set(ARM::R9);
104 // Reserve D16-D31 if the subtarget doesn't support them.
105 if (!STI.hasVFP3() || STI.hasD16()) {
106 assert(ARM::D31 == ARM::D16 + 15);
107 for (unsigned i = 0; i != 16; ++i)
108 Reserved.set(ARM::D16 + i);
109 }
110 return Reserved;
111 }
112
isReservedReg(const MachineFunction & MF,unsigned Reg) const113 bool ARMBaseRegisterInfo::isReservedReg(const MachineFunction &MF,
114 unsigned Reg) const {
115 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
116
117 switch (Reg) {
118 default: break;
119 case ARM::SP:
120 case ARM::PC:
121 return true;
122 case ARM::R6:
123 if (hasBasePointer(MF))
124 return true;
125 break;
126 case ARM::R7:
127 case ARM::R11:
128 if (FramePtr == Reg && TFI->hasFP(MF))
129 return true;
130 break;
131 case ARM::R9:
132 return STI.isR9Reserved();
133 }
134
135 return false;
136 }
137
138 const TargetRegisterClass *
getMatchingSuperRegClass(const TargetRegisterClass * A,const TargetRegisterClass * B,unsigned SubIdx) const139 ARMBaseRegisterInfo::getMatchingSuperRegClass(const TargetRegisterClass *A,
140 const TargetRegisterClass *B,
141 unsigned SubIdx) const {
142 switch (SubIdx) {
143 default: return 0;
144 case ARM::ssub_0:
145 case ARM::ssub_1:
146 case ARM::ssub_2:
147 case ARM::ssub_3: {
148 // S sub-registers.
149 if (A->getSize() == 8) {
150 if (B == &ARM::SPR_8RegClass)
151 return &ARM::DPR_8RegClass;
152 assert(B == &ARM::SPRRegClass && "Expecting SPR register class!");
153 if (A == &ARM::DPR_8RegClass)
154 return A;
155 return &ARM::DPR_VFP2RegClass;
156 }
157
158 if (A->getSize() == 16) {
159 if (B == &ARM::SPR_8RegClass)
160 return &ARM::QPR_8RegClass;
161 return &ARM::QPR_VFP2RegClass;
162 }
163
164 if (A->getSize() == 32) {
165 if (B == &ARM::SPR_8RegClass)
166 return 0; // Do not allow coalescing!
167 return &ARM::QQPR_VFP2RegClass;
168 }
169
170 assert(A->getSize() == 64 && "Expecting a QQQQ register class!");
171 return 0; // Do not allow coalescing!
172 }
173 case ARM::dsub_0:
174 case ARM::dsub_1:
175 case ARM::dsub_2:
176 case ARM::dsub_3: {
177 // D sub-registers.
178 if (A->getSize() == 16) {
179 if (B == &ARM::DPR_VFP2RegClass)
180 return &ARM::QPR_VFP2RegClass;
181 if (B == &ARM::DPR_8RegClass)
182 return 0; // Do not allow coalescing!
183 return A;
184 }
185
186 if (A->getSize() == 32) {
187 if (B == &ARM::DPR_VFP2RegClass)
188 return &ARM::QQPR_VFP2RegClass;
189 if (B == &ARM::DPR_8RegClass)
190 return 0; // Do not allow coalescing!
191 return A;
192 }
193
194 assert(A->getSize() == 64 && "Expecting a QQQQ register class!");
195 if (B != &ARM::DPRRegClass)
196 return 0; // Do not allow coalescing!
197 return A;
198 }
199 case ARM::dsub_4:
200 case ARM::dsub_5:
201 case ARM::dsub_6:
202 case ARM::dsub_7: {
203 // D sub-registers of QQQQ registers.
204 if (A->getSize() == 64 && B == &ARM::DPRRegClass)
205 return A;
206 return 0; // Do not allow coalescing!
207 }
208
209 case ARM::qsub_0:
210 case ARM::qsub_1: {
211 // Q sub-registers.
212 if (A->getSize() == 32) {
213 if (B == &ARM::QPR_VFP2RegClass)
214 return &ARM::QQPR_VFP2RegClass;
215 if (B == &ARM::QPR_8RegClass)
216 return 0; // Do not allow coalescing!
217 return A;
218 }
219
220 assert(A->getSize() == 64 && "Expecting a QQQQ register class!");
221 if (B == &ARM::QPRRegClass)
222 return A;
223 return 0; // Do not allow coalescing!
224 }
225 case ARM::qsub_2:
226 case ARM::qsub_3: {
227 // Q sub-registers of QQQQ registers.
228 if (A->getSize() == 64 && B == &ARM::QPRRegClass)
229 return A;
230 return 0; // Do not allow coalescing!
231 }
232 }
233 return 0;
234 }
235
236 bool
canCombineSubRegIndices(const TargetRegisterClass * RC,SmallVectorImpl<unsigned> & SubIndices,unsigned & NewSubIdx) const237 ARMBaseRegisterInfo::canCombineSubRegIndices(const TargetRegisterClass *RC,
238 SmallVectorImpl<unsigned> &SubIndices,
239 unsigned &NewSubIdx) const {
240
241 unsigned Size = RC->getSize() * 8;
242 if (Size < 6)
243 return 0;
244
245 NewSubIdx = 0; // Whole register.
246 unsigned NumRegs = SubIndices.size();
247 if (NumRegs == 8) {
248 // 8 D registers -> 1 QQQQ register.
249 return (Size == 512 &&
250 SubIndices[0] == ARM::dsub_0 &&
251 SubIndices[1] == ARM::dsub_1 &&
252 SubIndices[2] == ARM::dsub_2 &&
253 SubIndices[3] == ARM::dsub_3 &&
254 SubIndices[4] == ARM::dsub_4 &&
255 SubIndices[5] == ARM::dsub_5 &&
256 SubIndices[6] == ARM::dsub_6 &&
257 SubIndices[7] == ARM::dsub_7);
258 } else if (NumRegs == 4) {
259 if (SubIndices[0] == ARM::qsub_0) {
260 // 4 Q registers -> 1 QQQQ register.
261 return (Size == 512 &&
262 SubIndices[1] == ARM::qsub_1 &&
263 SubIndices[2] == ARM::qsub_2 &&
264 SubIndices[3] == ARM::qsub_3);
265 } else if (SubIndices[0] == ARM::dsub_0) {
266 // 4 D registers -> 1 QQ register.
267 if (Size >= 256 &&
268 SubIndices[1] == ARM::dsub_1 &&
269 SubIndices[2] == ARM::dsub_2 &&
270 SubIndices[3] == ARM::dsub_3) {
271 if (Size == 512)
272 NewSubIdx = ARM::qqsub_0;
273 return true;
274 }
275 } else if (SubIndices[0] == ARM::dsub_4) {
276 // 4 D registers -> 1 QQ register (2nd).
277 if (Size == 512 &&
278 SubIndices[1] == ARM::dsub_5 &&
279 SubIndices[2] == ARM::dsub_6 &&
280 SubIndices[3] == ARM::dsub_7) {
281 NewSubIdx = ARM::qqsub_1;
282 return true;
283 }
284 } else if (SubIndices[0] == ARM::ssub_0) {
285 // 4 S registers -> 1 Q register.
286 if (Size >= 128 &&
287 SubIndices[1] == ARM::ssub_1 &&
288 SubIndices[2] == ARM::ssub_2 &&
289 SubIndices[3] == ARM::ssub_3) {
290 if (Size >= 256)
291 NewSubIdx = ARM::qsub_0;
292 return true;
293 }
294 }
295 } else if (NumRegs == 2) {
296 if (SubIndices[0] == ARM::qsub_0) {
297 // 2 Q registers -> 1 QQ register.
298 if (Size >= 256 && SubIndices[1] == ARM::qsub_1) {
299 if (Size == 512)
300 NewSubIdx = ARM::qqsub_0;
301 return true;
302 }
303 } else if (SubIndices[0] == ARM::qsub_2) {
304 // 2 Q registers -> 1 QQ register (2nd).
305 if (Size == 512 && SubIndices[1] == ARM::qsub_3) {
306 NewSubIdx = ARM::qqsub_1;
307 return true;
308 }
309 } else if (SubIndices[0] == ARM::dsub_0) {
310 // 2 D registers -> 1 Q register.
311 if (Size >= 128 && SubIndices[1] == ARM::dsub_1) {
312 if (Size >= 256)
313 NewSubIdx = ARM::qsub_0;
314 return true;
315 }
316 } else if (SubIndices[0] == ARM::dsub_2) {
317 // 2 D registers -> 1 Q register (2nd).
318 if (Size >= 256 && SubIndices[1] == ARM::dsub_3) {
319 NewSubIdx = ARM::qsub_1;
320 return true;
321 }
322 } else if (SubIndices[0] == ARM::dsub_4) {
323 // 2 D registers -> 1 Q register (3rd).
324 if (Size == 512 && SubIndices[1] == ARM::dsub_5) {
325 NewSubIdx = ARM::qsub_2;
326 return true;
327 }
328 } else if (SubIndices[0] == ARM::dsub_6) {
329 // 2 D registers -> 1 Q register (3rd).
330 if (Size == 512 && SubIndices[1] == ARM::dsub_7) {
331 NewSubIdx = ARM::qsub_3;
332 return true;
333 }
334 } else if (SubIndices[0] == ARM::ssub_0) {
335 // 2 S registers -> 1 D register.
336 if (SubIndices[1] == ARM::ssub_1) {
337 if (Size >= 128)
338 NewSubIdx = ARM::dsub_0;
339 return true;
340 }
341 } else if (SubIndices[0] == ARM::ssub_2) {
342 // 2 S registers -> 1 D register (2nd).
343 if (Size >= 128 && SubIndices[1] == ARM::ssub_3) {
344 NewSubIdx = ARM::dsub_1;
345 return true;
346 }
347 }
348 }
349 return false;
350 }
351
352 const TargetRegisterClass*
getLargestLegalSuperClass(const TargetRegisterClass * RC) const353 ARMBaseRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC)
354 const {
355 const TargetRegisterClass *Super = RC;
356 TargetRegisterClass::sc_iterator I = RC->superclasses_begin();
357 do {
358 switch (Super->getID()) {
359 case ARM::GPRRegClassID:
360 case ARM::SPRRegClassID:
361 case ARM::DPRRegClassID:
362 case ARM::QPRRegClassID:
363 case ARM::QQPRRegClassID:
364 case ARM::QQQQPRRegClassID:
365 return Super;
366 }
367 Super = *I++;
368 } while (Super);
369 return RC;
370 }
371
372 const TargetRegisterClass *
getPointerRegClass(unsigned Kind) const373 ARMBaseRegisterInfo::getPointerRegClass(unsigned Kind) const {
374 return ARM::GPRRegisterClass;
375 }
376
377 unsigned
getRegPressureLimit(const TargetRegisterClass * RC,MachineFunction & MF) const378 ARMBaseRegisterInfo::getRegPressureLimit(const TargetRegisterClass *RC,
379 MachineFunction &MF) const {
380 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
381
382 switch (RC->getID()) {
383 default:
384 return 0;
385 case ARM::tGPRRegClassID:
386 return TFI->hasFP(MF) ? 4 : 5;
387 case ARM::GPRRegClassID: {
388 unsigned FP = TFI->hasFP(MF) ? 1 : 0;
389 return 10 - FP - (STI.isR9Reserved() ? 1 : 0);
390 }
391 case ARM::SPRRegClassID: // Currently not used as 'rep' register class.
392 case ARM::DPRRegClassID:
393 return 32 - 10;
394 }
395 }
396
397 /// getRawAllocationOrder - Returns the register allocation order for a
398 /// specified register class with a target-dependent hint.
399 ArrayRef<unsigned>
getRawAllocationOrder(const TargetRegisterClass * RC,unsigned HintType,unsigned HintReg,const MachineFunction & MF) const400 ARMBaseRegisterInfo::getRawAllocationOrder(const TargetRegisterClass *RC,
401 unsigned HintType, unsigned HintReg,
402 const MachineFunction &MF) const {
403 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
404 // Alternative register allocation orders when favoring even / odd registers
405 // of register pairs.
406
407 // No FP, R9 is available.
408 static const unsigned GPREven1[] = {
409 ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R8, ARM::R10,
410 ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R7,
411 ARM::R9, ARM::R11
412 };
413 static const unsigned GPROdd1[] = {
414 ARM::R1, ARM::R3, ARM::R5, ARM::R7, ARM::R9, ARM::R11,
415 ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6,
416 ARM::R8, ARM::R10
417 };
418
419 // FP is R7, R9 is available.
420 static const unsigned GPREven2[] = {
421 ARM::R0, ARM::R2, ARM::R4, ARM::R8, ARM::R10,
422 ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R6,
423 ARM::R9, ARM::R11
424 };
425 static const unsigned GPROdd2[] = {
426 ARM::R1, ARM::R3, ARM::R5, ARM::R9, ARM::R11,
427 ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6,
428 ARM::R8, ARM::R10
429 };
430
431 // FP is R11, R9 is available.
432 static const unsigned GPREven3[] = {
433 ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R8,
434 ARM::R1, ARM::R3, ARM::R10,ARM::R12,ARM::LR, ARM::R5, ARM::R7,
435 ARM::R9
436 };
437 static const unsigned GPROdd3[] = {
438 ARM::R1, ARM::R3, ARM::R5, ARM::R6, ARM::R9,
439 ARM::R0, ARM::R2, ARM::R10,ARM::R12,ARM::LR, ARM::R4, ARM::R7,
440 ARM::R8
441 };
442
443 // No FP, R9 is not available.
444 static const unsigned GPREven4[] = {
445 ARM::R0, ARM::R2, ARM::R4, ARM::R6, ARM::R10,
446 ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R7, ARM::R8,
447 ARM::R11
448 };
449 static const unsigned GPROdd4[] = {
450 ARM::R1, ARM::R3, ARM::R5, ARM::R7, ARM::R11,
451 ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8,
452 ARM::R10
453 };
454
455 // FP is R7, R9 is not available.
456 static const unsigned GPREven5[] = {
457 ARM::R0, ARM::R2, ARM::R4, ARM::R10,
458 ARM::R1, ARM::R3, ARM::R12,ARM::LR, ARM::R5, ARM::R6, ARM::R8,
459 ARM::R11
460 };
461 static const unsigned GPROdd5[] = {
462 ARM::R1, ARM::R3, ARM::R5, ARM::R11,
463 ARM::R0, ARM::R2, ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8,
464 ARM::R10
465 };
466
467 // FP is R11, R9 is not available.
468 static const unsigned GPREven6[] = {
469 ARM::R0, ARM::R2, ARM::R4, ARM::R6,
470 ARM::R1, ARM::R3, ARM::R10,ARM::R12,ARM::LR, ARM::R5, ARM::R7, ARM::R8
471 };
472 static const unsigned GPROdd6[] = {
473 ARM::R1, ARM::R3, ARM::R5, ARM::R7,
474 ARM::R0, ARM::R2, ARM::R10,ARM::R12,ARM::LR, ARM::R4, ARM::R6, ARM::R8
475 };
476
477 // We only support even/odd hints for GPR and rGPR.
478 if (RC != ARM::GPRRegisterClass && RC != ARM::rGPRRegisterClass)
479 return RC->getRawAllocationOrder(MF);
480
481 if (HintType == ARMRI::RegPairEven) {
482 if (isPhysicalRegister(HintReg) && getRegisterPairEven(HintReg, MF) == 0)
483 // It's no longer possible to fulfill this hint. Return the default
484 // allocation order.
485 return RC->getRawAllocationOrder(MF);
486
487 if (!TFI->hasFP(MF)) {
488 if (!STI.isR9Reserved())
489 return makeArrayRef(GPREven1);
490 else
491 return makeArrayRef(GPREven4);
492 } else if (FramePtr == ARM::R7) {
493 if (!STI.isR9Reserved())
494 return makeArrayRef(GPREven2);
495 else
496 return makeArrayRef(GPREven5);
497 } else { // FramePtr == ARM::R11
498 if (!STI.isR9Reserved())
499 return makeArrayRef(GPREven3);
500 else
501 return makeArrayRef(GPREven6);
502 }
503 } else if (HintType == ARMRI::RegPairOdd) {
504 if (isPhysicalRegister(HintReg) && getRegisterPairOdd(HintReg, MF) == 0)
505 // It's no longer possible to fulfill this hint. Return the default
506 // allocation order.
507 return RC->getRawAllocationOrder(MF);
508
509 if (!TFI->hasFP(MF)) {
510 if (!STI.isR9Reserved())
511 return makeArrayRef(GPROdd1);
512 else
513 return makeArrayRef(GPROdd4);
514 } else if (FramePtr == ARM::R7) {
515 if (!STI.isR9Reserved())
516 return makeArrayRef(GPROdd2);
517 else
518 return makeArrayRef(GPROdd5);
519 } else { // FramePtr == ARM::R11
520 if (!STI.isR9Reserved())
521 return makeArrayRef(GPROdd3);
522 else
523 return makeArrayRef(GPROdd6);
524 }
525 }
526 return RC->getRawAllocationOrder(MF);
527 }
528
529 /// ResolveRegAllocHint - Resolves the specified register allocation hint
530 /// to a physical register. Returns the physical register if it is successful.
531 unsigned
ResolveRegAllocHint(unsigned Type,unsigned Reg,const MachineFunction & MF) const532 ARMBaseRegisterInfo::ResolveRegAllocHint(unsigned Type, unsigned Reg,
533 const MachineFunction &MF) const {
534 if (Reg == 0 || !isPhysicalRegister(Reg))
535 return 0;
536 if (Type == 0)
537 return Reg;
538 else if (Type == (unsigned)ARMRI::RegPairOdd)
539 // Odd register.
540 return getRegisterPairOdd(Reg, MF);
541 else if (Type == (unsigned)ARMRI::RegPairEven)
542 // Even register.
543 return getRegisterPairEven(Reg, MF);
544 return 0;
545 }
546
547 void
UpdateRegAllocHint(unsigned Reg,unsigned NewReg,MachineFunction & MF) const548 ARMBaseRegisterInfo::UpdateRegAllocHint(unsigned Reg, unsigned NewReg,
549 MachineFunction &MF) const {
550 MachineRegisterInfo *MRI = &MF.getRegInfo();
551 std::pair<unsigned, unsigned> Hint = MRI->getRegAllocationHint(Reg);
552 if ((Hint.first == (unsigned)ARMRI::RegPairOdd ||
553 Hint.first == (unsigned)ARMRI::RegPairEven) &&
554 TargetRegisterInfo::isVirtualRegister(Hint.second)) {
555 // If 'Reg' is one of the even / odd register pair and it's now changed
556 // (e.g. coalesced) into a different register. The other register of the
557 // pair allocation hint must be updated to reflect the relationship
558 // change.
559 unsigned OtherReg = Hint.second;
560 Hint = MRI->getRegAllocationHint(OtherReg);
561 if (Hint.second == Reg)
562 // Make sure the pair has not already divorced.
563 MRI->setRegAllocationHint(OtherReg, Hint.first, NewReg);
564 }
565 }
566
567 bool
avoidWriteAfterWrite(const TargetRegisterClass * RC) const568 ARMBaseRegisterInfo::avoidWriteAfterWrite(const TargetRegisterClass *RC) const {
569 // CortexA9 has a Write-after-write hazard for NEON registers.
570 if (!STI.isCortexA9())
571 return false;
572
573 switch (RC->getID()) {
574 case ARM::DPRRegClassID:
575 case ARM::DPR_8RegClassID:
576 case ARM::DPR_VFP2RegClassID:
577 case ARM::QPRRegClassID:
578 case ARM::QPR_8RegClassID:
579 case ARM::QPR_VFP2RegClassID:
580 case ARM::SPRRegClassID:
581 case ARM::SPR_8RegClassID:
582 // Avoid reusing S, D, and Q registers.
583 // Don't increase register pressure for QQ and QQQQ.
584 return true;
585 default:
586 return false;
587 }
588 }
589
hasBasePointer(const MachineFunction & MF) const590 bool ARMBaseRegisterInfo::hasBasePointer(const MachineFunction &MF) const {
591 const MachineFrameInfo *MFI = MF.getFrameInfo();
592 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
593
594 if (!EnableBasePointer)
595 return false;
596
597 if (needsStackRealignment(MF) && MFI->hasVarSizedObjects())
598 return true;
599
600 // Thumb has trouble with negative offsets from the FP. Thumb2 has a limited
601 // negative range for ldr/str (255), and thumb1 is positive offsets only.
602 // It's going to be better to use the SP or Base Pointer instead. When there
603 // are variable sized objects, we can't reference off of the SP, so we
604 // reserve a Base Pointer.
605 if (AFI->isThumbFunction() && MFI->hasVarSizedObjects()) {
606 // Conservatively estimate whether the negative offset from the frame
607 // pointer will be sufficient to reach. If a function has a smallish
608 // frame, it's less likely to have lots of spills and callee saved
609 // space, so it's all more likely to be within range of the frame pointer.
610 // If it's wrong, the scavenger will still enable access to work, it just
611 // won't be optimal.
612 if (AFI->isThumb2Function() && MFI->getLocalFrameSize() < 128)
613 return false;
614 return true;
615 }
616
617 return false;
618 }
619
canRealignStack(const MachineFunction & MF) const620 bool ARMBaseRegisterInfo::canRealignStack(const MachineFunction &MF) const {
621 const MachineFrameInfo *MFI = MF.getFrameInfo();
622 const ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
623 // We can't realign the stack if:
624 // 1. Dynamic stack realignment is explicitly disabled,
625 // 2. This is a Thumb1 function (it's not useful, so we don't bother), or
626 // 3. There are VLAs in the function and the base pointer is disabled.
627 return (RealignStack && !AFI->isThumb1OnlyFunction() &&
628 (!MFI->hasVarSizedObjects() || EnableBasePointer));
629 }
630
631 bool ARMBaseRegisterInfo::
needsStackRealignment(const MachineFunction & MF) const632 needsStackRealignment(const MachineFunction &MF) const {
633 const MachineFrameInfo *MFI = MF.getFrameInfo();
634 const Function *F = MF.getFunction();
635 unsigned StackAlign = MF.getTarget().getFrameLowering()->getStackAlignment();
636 bool requiresRealignment = ((MFI->getLocalFrameMaxAlign() > StackAlign) ||
637 F->hasFnAttr(Attribute::StackAlignment));
638
639 return requiresRealignment && canRealignStack(MF);
640 }
641
642 bool ARMBaseRegisterInfo::
cannotEliminateFrame(const MachineFunction & MF) const643 cannotEliminateFrame(const MachineFunction &MF) const {
644 const MachineFrameInfo *MFI = MF.getFrameInfo();
645 if (DisableFramePointerElim(MF) && MFI->adjustsStack())
646 return true;
647 return MFI->hasVarSizedObjects() || MFI->isFrameAddressTaken()
648 || needsStackRealignment(MF);
649 }
650
651 unsigned
getFrameRegister(const MachineFunction & MF) const652 ARMBaseRegisterInfo::getFrameRegister(const MachineFunction &MF) const {
653 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
654
655 if (TFI->hasFP(MF))
656 return FramePtr;
657 return ARM::SP;
658 }
659
getEHExceptionRegister() const660 unsigned ARMBaseRegisterInfo::getEHExceptionRegister() const {
661 llvm_unreachable("What is the exception register");
662 return 0;
663 }
664
getEHHandlerRegister() const665 unsigned ARMBaseRegisterInfo::getEHHandlerRegister() const {
666 llvm_unreachable("What is the exception handler register");
667 return 0;
668 }
669
getRegisterPairEven(unsigned Reg,const MachineFunction & MF) const670 unsigned ARMBaseRegisterInfo::getRegisterPairEven(unsigned Reg,
671 const MachineFunction &MF) const {
672 switch (Reg) {
673 default: break;
674 // Return 0 if either register of the pair is a special register.
675 // So no R12, etc.
676 case ARM::R1:
677 return ARM::R0;
678 case ARM::R3:
679 return ARM::R2;
680 case ARM::R5:
681 return ARM::R4;
682 case ARM::R7:
683 return (isReservedReg(MF, ARM::R7) || isReservedReg(MF, ARM::R6))
684 ? 0 : ARM::R6;
685 case ARM::R9:
686 return isReservedReg(MF, ARM::R9) ? 0 :ARM::R8;
687 case ARM::R11:
688 return isReservedReg(MF, ARM::R11) ? 0 : ARM::R10;
689
690 case ARM::S1:
691 return ARM::S0;
692 case ARM::S3:
693 return ARM::S2;
694 case ARM::S5:
695 return ARM::S4;
696 case ARM::S7:
697 return ARM::S6;
698 case ARM::S9:
699 return ARM::S8;
700 case ARM::S11:
701 return ARM::S10;
702 case ARM::S13:
703 return ARM::S12;
704 case ARM::S15:
705 return ARM::S14;
706 case ARM::S17:
707 return ARM::S16;
708 case ARM::S19:
709 return ARM::S18;
710 case ARM::S21:
711 return ARM::S20;
712 case ARM::S23:
713 return ARM::S22;
714 case ARM::S25:
715 return ARM::S24;
716 case ARM::S27:
717 return ARM::S26;
718 case ARM::S29:
719 return ARM::S28;
720 case ARM::S31:
721 return ARM::S30;
722
723 case ARM::D1:
724 return ARM::D0;
725 case ARM::D3:
726 return ARM::D2;
727 case ARM::D5:
728 return ARM::D4;
729 case ARM::D7:
730 return ARM::D6;
731 case ARM::D9:
732 return ARM::D8;
733 case ARM::D11:
734 return ARM::D10;
735 case ARM::D13:
736 return ARM::D12;
737 case ARM::D15:
738 return ARM::D14;
739 case ARM::D17:
740 return ARM::D16;
741 case ARM::D19:
742 return ARM::D18;
743 case ARM::D21:
744 return ARM::D20;
745 case ARM::D23:
746 return ARM::D22;
747 case ARM::D25:
748 return ARM::D24;
749 case ARM::D27:
750 return ARM::D26;
751 case ARM::D29:
752 return ARM::D28;
753 case ARM::D31:
754 return ARM::D30;
755 }
756
757 return 0;
758 }
759
getRegisterPairOdd(unsigned Reg,const MachineFunction & MF) const760 unsigned ARMBaseRegisterInfo::getRegisterPairOdd(unsigned Reg,
761 const MachineFunction &MF) const {
762 switch (Reg) {
763 default: break;
764 // Return 0 if either register of the pair is a special register.
765 // So no R12, etc.
766 case ARM::R0:
767 return ARM::R1;
768 case ARM::R2:
769 return ARM::R3;
770 case ARM::R4:
771 return ARM::R5;
772 case ARM::R6:
773 return (isReservedReg(MF, ARM::R7) || isReservedReg(MF, ARM::R6))
774 ? 0 : ARM::R7;
775 case ARM::R8:
776 return isReservedReg(MF, ARM::R9) ? 0 :ARM::R9;
777 case ARM::R10:
778 return isReservedReg(MF, ARM::R11) ? 0 : ARM::R11;
779
780 case ARM::S0:
781 return ARM::S1;
782 case ARM::S2:
783 return ARM::S3;
784 case ARM::S4:
785 return ARM::S5;
786 case ARM::S6:
787 return ARM::S7;
788 case ARM::S8:
789 return ARM::S9;
790 case ARM::S10:
791 return ARM::S11;
792 case ARM::S12:
793 return ARM::S13;
794 case ARM::S14:
795 return ARM::S15;
796 case ARM::S16:
797 return ARM::S17;
798 case ARM::S18:
799 return ARM::S19;
800 case ARM::S20:
801 return ARM::S21;
802 case ARM::S22:
803 return ARM::S23;
804 case ARM::S24:
805 return ARM::S25;
806 case ARM::S26:
807 return ARM::S27;
808 case ARM::S28:
809 return ARM::S29;
810 case ARM::S30:
811 return ARM::S31;
812
813 case ARM::D0:
814 return ARM::D1;
815 case ARM::D2:
816 return ARM::D3;
817 case ARM::D4:
818 return ARM::D5;
819 case ARM::D6:
820 return ARM::D7;
821 case ARM::D8:
822 return ARM::D9;
823 case ARM::D10:
824 return ARM::D11;
825 case ARM::D12:
826 return ARM::D13;
827 case ARM::D14:
828 return ARM::D15;
829 case ARM::D16:
830 return ARM::D17;
831 case ARM::D18:
832 return ARM::D19;
833 case ARM::D20:
834 return ARM::D21;
835 case ARM::D22:
836 return ARM::D23;
837 case ARM::D24:
838 return ARM::D25;
839 case ARM::D26:
840 return ARM::D27;
841 case ARM::D28:
842 return ARM::D29;
843 case ARM::D30:
844 return ARM::D31;
845 }
846
847 return 0;
848 }
849
850 /// emitLoadConstPool - Emits a load from constpool to materialize the
851 /// specified immediate.
852 void ARMBaseRegisterInfo::
emitLoadConstPool(MachineBasicBlock & MBB,MachineBasicBlock::iterator & MBBI,DebugLoc dl,unsigned DestReg,unsigned SubIdx,int Val,ARMCC::CondCodes Pred,unsigned PredReg,unsigned MIFlags) const853 emitLoadConstPool(MachineBasicBlock &MBB,
854 MachineBasicBlock::iterator &MBBI,
855 DebugLoc dl,
856 unsigned DestReg, unsigned SubIdx, int Val,
857 ARMCC::CondCodes Pred,
858 unsigned PredReg, unsigned MIFlags) const {
859 MachineFunction &MF = *MBB.getParent();
860 MachineConstantPool *ConstantPool = MF.getConstantPool();
861 const Constant *C =
862 ConstantInt::get(Type::getInt32Ty(MF.getFunction()->getContext()), Val);
863 unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
864
865 BuildMI(MBB, MBBI, dl, TII.get(ARM::LDRcp))
866 .addReg(DestReg, getDefRegState(true), SubIdx)
867 .addConstantPoolIndex(Idx)
868 .addImm(0).addImm(Pred).addReg(PredReg)
869 .setMIFlags(MIFlags);
870 }
871
872 bool ARMBaseRegisterInfo::
requiresRegisterScavenging(const MachineFunction & MF) const873 requiresRegisterScavenging(const MachineFunction &MF) const {
874 return true;
875 }
876
877 bool ARMBaseRegisterInfo::
requiresFrameIndexScavenging(const MachineFunction & MF) const878 requiresFrameIndexScavenging(const MachineFunction &MF) const {
879 return true;
880 }
881
882 bool ARMBaseRegisterInfo::
requiresVirtualBaseRegisters(const MachineFunction & MF) const883 requiresVirtualBaseRegisters(const MachineFunction &MF) const {
884 return EnableLocalStackAlloc;
885 }
886
887 static void
emitSPUpdate(bool isARM,MachineBasicBlock & MBB,MachineBasicBlock::iterator & MBBI,DebugLoc dl,const ARMBaseInstrInfo & TII,int NumBytes,ARMCC::CondCodes Pred=ARMCC::AL,unsigned PredReg=0)888 emitSPUpdate(bool isARM,
889 MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
890 DebugLoc dl, const ARMBaseInstrInfo &TII,
891 int NumBytes,
892 ARMCC::CondCodes Pred = ARMCC::AL, unsigned PredReg = 0) {
893 if (isARM)
894 emitARMRegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes,
895 Pred, PredReg, TII);
896 else
897 emitT2RegPlusImmediate(MBB, MBBI, dl, ARM::SP, ARM::SP, NumBytes,
898 Pred, PredReg, TII);
899 }
900
901
902 void ARMBaseRegisterInfo::
eliminateCallFramePseudoInstr(MachineFunction & MF,MachineBasicBlock & MBB,MachineBasicBlock::iterator I) const903 eliminateCallFramePseudoInstr(MachineFunction &MF, MachineBasicBlock &MBB,
904 MachineBasicBlock::iterator I) const {
905 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
906 if (!TFI->hasReservedCallFrame(MF)) {
907 // If we have alloca, convert as follows:
908 // ADJCALLSTACKDOWN -> sub, sp, sp, amount
909 // ADJCALLSTACKUP -> add, sp, sp, amount
910 MachineInstr *Old = I;
911 DebugLoc dl = Old->getDebugLoc();
912 unsigned Amount = Old->getOperand(0).getImm();
913 if (Amount != 0) {
914 // We need to keep the stack aligned properly. To do this, we round the
915 // amount of space needed for the outgoing arguments up to the next
916 // alignment boundary.
917 unsigned Align = TFI->getStackAlignment();
918 Amount = (Amount+Align-1)/Align*Align;
919
920 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
921 assert(!AFI->isThumb1OnlyFunction() &&
922 "This eliminateCallFramePseudoInstr does not support Thumb1!");
923 bool isARM = !AFI->isThumbFunction();
924
925 // Replace the pseudo instruction with a new instruction...
926 unsigned Opc = Old->getOpcode();
927 int PIdx = Old->findFirstPredOperandIdx();
928 ARMCC::CondCodes Pred = (PIdx == -1)
929 ? ARMCC::AL : (ARMCC::CondCodes)Old->getOperand(PIdx).getImm();
930 if (Opc == ARM::ADJCALLSTACKDOWN || Opc == ARM::tADJCALLSTACKDOWN) {
931 // Note: PredReg is operand 2 for ADJCALLSTACKDOWN.
932 unsigned PredReg = Old->getOperand(2).getReg();
933 emitSPUpdate(isARM, MBB, I, dl, TII, -Amount, Pred, PredReg);
934 } else {
935 // Note: PredReg is operand 3 for ADJCALLSTACKUP.
936 unsigned PredReg = Old->getOperand(3).getReg();
937 assert(Opc == ARM::ADJCALLSTACKUP || Opc == ARM::tADJCALLSTACKUP);
938 emitSPUpdate(isARM, MBB, I, dl, TII, Amount, Pred, PredReg);
939 }
940 }
941 }
942 MBB.erase(I);
943 }
944
945 int64_t ARMBaseRegisterInfo::
getFrameIndexInstrOffset(const MachineInstr * MI,int Idx) const946 getFrameIndexInstrOffset(const MachineInstr *MI, int Idx) const {
947 const MCInstrDesc &Desc = MI->getDesc();
948 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
949 int64_t InstrOffs = 0;;
950 int Scale = 1;
951 unsigned ImmIdx = 0;
952 switch (AddrMode) {
953 case ARMII::AddrModeT2_i8:
954 case ARMII::AddrModeT2_i12:
955 case ARMII::AddrMode_i12:
956 InstrOffs = MI->getOperand(Idx+1).getImm();
957 Scale = 1;
958 break;
959 case ARMII::AddrMode5: {
960 // VFP address mode.
961 const MachineOperand &OffOp = MI->getOperand(Idx+1);
962 InstrOffs = ARM_AM::getAM5Offset(OffOp.getImm());
963 if (ARM_AM::getAM5Op(OffOp.getImm()) == ARM_AM::sub)
964 InstrOffs = -InstrOffs;
965 Scale = 4;
966 break;
967 }
968 case ARMII::AddrMode2: {
969 ImmIdx = Idx+2;
970 InstrOffs = ARM_AM::getAM2Offset(MI->getOperand(ImmIdx).getImm());
971 if (ARM_AM::getAM2Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub)
972 InstrOffs = -InstrOffs;
973 break;
974 }
975 case ARMII::AddrMode3: {
976 ImmIdx = Idx+2;
977 InstrOffs = ARM_AM::getAM3Offset(MI->getOperand(ImmIdx).getImm());
978 if (ARM_AM::getAM3Op(MI->getOperand(ImmIdx).getImm()) == ARM_AM::sub)
979 InstrOffs = -InstrOffs;
980 break;
981 }
982 case ARMII::AddrModeT1_s: {
983 ImmIdx = Idx+1;
984 InstrOffs = MI->getOperand(ImmIdx).getImm();
985 Scale = 4;
986 break;
987 }
988 default:
989 llvm_unreachable("Unsupported addressing mode!");
990 break;
991 }
992
993 return InstrOffs * Scale;
994 }
995
996 /// needsFrameBaseReg - Returns true if the instruction's frame index
997 /// reference would be better served by a base register other than FP
998 /// or SP. Used by LocalStackFrameAllocation to determine which frame index
999 /// references it should create new base registers for.
1000 bool ARMBaseRegisterInfo::
needsFrameBaseReg(MachineInstr * MI,int64_t Offset) const1001 needsFrameBaseReg(MachineInstr *MI, int64_t Offset) const {
1002 for (unsigned i = 0; !MI->getOperand(i).isFI(); ++i) {
1003 assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!");
1004 }
1005
1006 // It's the load/store FI references that cause issues, as it can be difficult
1007 // to materialize the offset if it won't fit in the literal field. Estimate
1008 // based on the size of the local frame and some conservative assumptions
1009 // about the rest of the stack frame (note, this is pre-regalloc, so
1010 // we don't know everything for certain yet) whether this offset is likely
1011 // to be out of range of the immediate. Return true if so.
1012
1013 // We only generate virtual base registers for loads and stores, so
1014 // return false for everything else.
1015 unsigned Opc = MI->getOpcode();
1016 switch (Opc) {
1017 case ARM::LDRi12: case ARM::LDRH: case ARM::LDRBi12:
1018 case ARM::STRi12: case ARM::STRH: case ARM::STRBi12:
1019 case ARM::t2LDRi12: case ARM::t2LDRi8:
1020 case ARM::t2STRi12: case ARM::t2STRi8:
1021 case ARM::VLDRS: case ARM::VLDRD:
1022 case ARM::VSTRS: case ARM::VSTRD:
1023 case ARM::tSTRspi: case ARM::tLDRspi:
1024 if (ForceAllBaseRegAlloc)
1025 return true;
1026 break;
1027 default:
1028 return false;
1029 }
1030
1031 // Without a virtual base register, if the function has variable sized
1032 // objects, all fixed-size local references will be via the frame pointer,
1033 // Approximate the offset and see if it's legal for the instruction.
1034 // Note that the incoming offset is based on the SP value at function entry,
1035 // so it'll be negative.
1036 MachineFunction &MF = *MI->getParent()->getParent();
1037 const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering();
1038 MachineFrameInfo *MFI = MF.getFrameInfo();
1039 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1040
1041 // Estimate an offset from the frame pointer.
1042 // Conservatively assume all callee-saved registers get pushed. R4-R6
1043 // will be earlier than the FP, so we ignore those.
1044 // R7, LR
1045 int64_t FPOffset = Offset - 8;
1046 // ARM and Thumb2 functions also need to consider R8-R11 and D8-D15
1047 if (!AFI->isThumbFunction() || !AFI->isThumb1OnlyFunction())
1048 FPOffset -= 80;
1049 // Estimate an offset from the stack pointer.
1050 // The incoming offset is relating to the SP at the start of the function,
1051 // but when we access the local it'll be relative to the SP after local
1052 // allocation, so adjust our SP-relative offset by that allocation size.
1053 Offset = -Offset;
1054 Offset += MFI->getLocalFrameSize();
1055 // Assume that we'll have at least some spill slots allocated.
1056 // FIXME: This is a total SWAG number. We should run some statistics
1057 // and pick a real one.
1058 Offset += 128; // 128 bytes of spill slots
1059
1060 // If there is a frame pointer, try using it.
1061 // The FP is only available if there is no dynamic realignment. We
1062 // don't know for sure yet whether we'll need that, so we guess based
1063 // on whether there are any local variables that would trigger it.
1064 unsigned StackAlign = TFI->getStackAlignment();
1065 if (TFI->hasFP(MF) &&
1066 !((MFI->getLocalFrameMaxAlign() > StackAlign) && canRealignStack(MF))) {
1067 if (isFrameOffsetLegal(MI, FPOffset))
1068 return false;
1069 }
1070 // If we can reference via the stack pointer, try that.
1071 // FIXME: This (and the code that resolves the references) can be improved
1072 // to only disallow SP relative references in the live range of
1073 // the VLA(s). In practice, it's unclear how much difference that
1074 // would make, but it may be worth doing.
1075 if (!MFI->hasVarSizedObjects() && isFrameOffsetLegal(MI, Offset))
1076 return false;
1077
1078 // The offset likely isn't legal, we want to allocate a virtual base register.
1079 return true;
1080 }
1081
1082 /// materializeFrameBaseRegister - Insert defining instruction(s) for BaseReg to
1083 /// be a pointer to FrameIdx at the beginning of the basic block.
1084 void ARMBaseRegisterInfo::
materializeFrameBaseRegister(MachineBasicBlock * MBB,unsigned BaseReg,int FrameIdx,int64_t Offset) const1085 materializeFrameBaseRegister(MachineBasicBlock *MBB,
1086 unsigned BaseReg, int FrameIdx,
1087 int64_t Offset) const {
1088 ARMFunctionInfo *AFI = MBB->getParent()->getInfo<ARMFunctionInfo>();
1089 unsigned ADDriOpc = !AFI->isThumbFunction() ? ARM::ADDri :
1090 (AFI->isThumb1OnlyFunction() ? ARM::tADDrSPi : ARM::t2ADDri);
1091
1092 MachineBasicBlock::iterator Ins = MBB->begin();
1093 DebugLoc DL; // Defaults to "unknown"
1094 if (Ins != MBB->end())
1095 DL = Ins->getDebugLoc();
1096
1097 const MCInstrDesc &MCID = TII.get(ADDriOpc);
1098 MachineRegisterInfo &MRI = MBB->getParent()->getRegInfo();
1099 MRI.constrainRegClass(BaseReg, TII.getRegClass(MCID, 0, this));
1100
1101 MachineInstrBuilder MIB = BuildMI(*MBB, Ins, DL, MCID, BaseReg)
1102 .addFrameIndex(FrameIdx).addImm(Offset);
1103
1104 if (!AFI->isThumb1OnlyFunction())
1105 AddDefaultCC(AddDefaultPred(MIB));
1106 }
1107
1108 void
resolveFrameIndex(MachineBasicBlock::iterator I,unsigned BaseReg,int64_t Offset) const1109 ARMBaseRegisterInfo::resolveFrameIndex(MachineBasicBlock::iterator I,
1110 unsigned BaseReg, int64_t Offset) const {
1111 MachineInstr &MI = *I;
1112 MachineBasicBlock &MBB = *MI.getParent();
1113 MachineFunction &MF = *MBB.getParent();
1114 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1115 int Off = Offset; // ARM doesn't need the general 64-bit offsets
1116 unsigned i = 0;
1117
1118 assert(!AFI->isThumb1OnlyFunction() &&
1119 "This resolveFrameIndex does not support Thumb1!");
1120
1121 while (!MI.getOperand(i).isFI()) {
1122 ++i;
1123 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
1124 }
1125 bool Done = false;
1126 if (!AFI->isThumbFunction())
1127 Done = rewriteARMFrameIndex(MI, i, BaseReg, Off, TII);
1128 else {
1129 assert(AFI->isThumb2Function());
1130 Done = rewriteT2FrameIndex(MI, i, BaseReg, Off, TII);
1131 }
1132 assert (Done && "Unable to resolve frame index!");
1133 }
1134
isFrameOffsetLegal(const MachineInstr * MI,int64_t Offset) const1135 bool ARMBaseRegisterInfo::isFrameOffsetLegal(const MachineInstr *MI,
1136 int64_t Offset) const {
1137 const MCInstrDesc &Desc = MI->getDesc();
1138 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
1139 unsigned i = 0;
1140
1141 while (!MI->getOperand(i).isFI()) {
1142 ++i;
1143 assert(i < MI->getNumOperands() &&"Instr doesn't have FrameIndex operand!");
1144 }
1145
1146 // AddrMode4 and AddrMode6 cannot handle any offset.
1147 if (AddrMode == ARMII::AddrMode4 || AddrMode == ARMII::AddrMode6)
1148 return Offset == 0;
1149
1150 unsigned NumBits = 0;
1151 unsigned Scale = 1;
1152 bool isSigned = true;
1153 switch (AddrMode) {
1154 case ARMII::AddrModeT2_i8:
1155 case ARMII::AddrModeT2_i12:
1156 // i8 supports only negative, and i12 supports only positive, so
1157 // based on Offset sign, consider the appropriate instruction
1158 Scale = 1;
1159 if (Offset < 0) {
1160 NumBits = 8;
1161 Offset = -Offset;
1162 } else {
1163 NumBits = 12;
1164 }
1165 break;
1166 case ARMII::AddrMode5:
1167 // VFP address mode.
1168 NumBits = 8;
1169 Scale = 4;
1170 break;
1171 case ARMII::AddrMode_i12:
1172 case ARMII::AddrMode2:
1173 NumBits = 12;
1174 break;
1175 case ARMII::AddrMode3:
1176 NumBits = 8;
1177 break;
1178 case ARMII::AddrModeT1_s:
1179 NumBits = 5;
1180 Scale = 4;
1181 isSigned = false;
1182 break;
1183 default:
1184 llvm_unreachable("Unsupported addressing mode!");
1185 break;
1186 }
1187
1188 Offset += getFrameIndexInstrOffset(MI, i);
1189 // Make sure the offset is encodable for instructions that scale the
1190 // immediate.
1191 if ((Offset & (Scale-1)) != 0)
1192 return false;
1193
1194 if (isSigned && Offset < 0)
1195 Offset = -Offset;
1196
1197 unsigned Mask = (1 << NumBits) - 1;
1198 if ((unsigned)Offset <= Mask * Scale)
1199 return true;
1200
1201 return false;
1202 }
1203
1204 void
eliminateFrameIndex(MachineBasicBlock::iterator II,int SPAdj,RegScavenger * RS) const1205 ARMBaseRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
1206 int SPAdj, RegScavenger *RS) const {
1207 unsigned i = 0;
1208 MachineInstr &MI = *II;
1209 MachineBasicBlock &MBB = *MI.getParent();
1210 MachineFunction &MF = *MBB.getParent();
1211 const ARMFrameLowering *TFI =
1212 static_cast<const ARMFrameLowering*>(MF.getTarget().getFrameLowering());
1213 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
1214 assert(!AFI->isThumb1OnlyFunction() &&
1215 "This eliminateFrameIndex does not support Thumb1!");
1216
1217 while (!MI.getOperand(i).isFI()) {
1218 ++i;
1219 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
1220 }
1221
1222 int FrameIndex = MI.getOperand(i).getIndex();
1223 unsigned FrameReg;
1224
1225 int Offset = TFI->ResolveFrameIndexReference(MF, FrameIndex, FrameReg, SPAdj);
1226
1227 // Special handling of dbg_value instructions.
1228 if (MI.isDebugValue()) {
1229 MI.getOperand(i). ChangeToRegister(FrameReg, false /*isDef*/);
1230 MI.getOperand(i+1).ChangeToImmediate(Offset);
1231 return;
1232 }
1233
1234 // Modify MI as necessary to handle as much of 'Offset' as possible
1235 bool Done = false;
1236 if (!AFI->isThumbFunction())
1237 Done = rewriteARMFrameIndex(MI, i, FrameReg, Offset, TII);
1238 else {
1239 assert(AFI->isThumb2Function());
1240 Done = rewriteT2FrameIndex(MI, i, FrameReg, Offset, TII);
1241 }
1242 if (Done)
1243 return;
1244
1245 // If we get here, the immediate doesn't fit into the instruction. We folded
1246 // as much as possible above, handle the rest, providing a register that is
1247 // SP+LargeImm.
1248 assert((Offset ||
1249 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode4 ||
1250 (MI.getDesc().TSFlags & ARMII::AddrModeMask) == ARMII::AddrMode6) &&
1251 "This code isn't needed if offset already handled!");
1252
1253 unsigned ScratchReg = 0;
1254 int PIdx = MI.findFirstPredOperandIdx();
1255 ARMCC::CondCodes Pred = (PIdx == -1)
1256 ? ARMCC::AL : (ARMCC::CondCodes)MI.getOperand(PIdx).getImm();
1257 unsigned PredReg = (PIdx == -1) ? 0 : MI.getOperand(PIdx+1).getReg();
1258 if (Offset == 0)
1259 // Must be addrmode4/6.
1260 MI.getOperand(i).ChangeToRegister(FrameReg, false, false, false);
1261 else {
1262 ScratchReg = MF.getRegInfo().createVirtualRegister(ARM::GPRRegisterClass);
1263 if (!AFI->isThumbFunction())
1264 emitARMRegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
1265 Offset, Pred, PredReg, TII);
1266 else {
1267 assert(AFI->isThumb2Function());
1268 emitT2RegPlusImmediate(MBB, II, MI.getDebugLoc(), ScratchReg, FrameReg,
1269 Offset, Pred, PredReg, TII);
1270 }
1271 // Update the original instruction to use the scratch register.
1272 MI.getOperand(i).ChangeToRegister(ScratchReg, false, false, true);
1273 }
1274 }
1275