1 //===-- ThumbRegisterInfo.cpp - Thumb-1 Register Information -------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the Thumb-1 implementation of the TargetRegisterInfo
11 // class.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "ThumbRegisterInfo.h"
16 #include "ARMBaseInstrInfo.h"
17 #include "ARMMachineFunctionInfo.h"
18 #include "ARMSubtarget.h"
19 #include "MCTargetDesc/ARMAddressingModes.h"
20 #include "llvm/CodeGen/MachineConstantPool.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineFunction.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineRegisterInfo.h"
25 #include "llvm/CodeGen/RegisterScavenging.h"
26 #include "llvm/IR/Constants.h"
27 #include "llvm/IR/DerivedTypes.h"
28 #include "llvm/IR/Function.h"
29 #include "llvm/IR/LLVMContext.h"
30 #include "llvm/Support/CommandLine.h"
31 #include "llvm/Support/ErrorHandling.h"
32 #include "llvm/Target/TargetFrameLowering.h"
33 #include "llvm/Target/TargetMachine.h"
34
35 namespace llvm {
36 extern cl::opt<bool> ReuseFrameIndexVals;
37 }
38
39 using namespace llvm;
40
ThumbRegisterInfo()41 ThumbRegisterInfo::ThumbRegisterInfo() : ARMBaseRegisterInfo() {}
42
43 const TargetRegisterClass *
getLargestLegalSuperClass(const TargetRegisterClass * RC,const MachineFunction & MF) const44 ThumbRegisterInfo::getLargestLegalSuperClass(const TargetRegisterClass *RC,
45 const MachineFunction &MF) const {
46 if (!MF.getSubtarget<ARMSubtarget>().isThumb1Only())
47 return ARMBaseRegisterInfo::getLargestLegalSuperClass(RC, MF);
48
49 if (ARM::tGPRRegClass.hasSubClassEq(RC))
50 return &ARM::tGPRRegClass;
51 return ARMBaseRegisterInfo::getLargestLegalSuperClass(RC, MF);
52 }
53
54 const TargetRegisterClass *
getPointerRegClass(const MachineFunction & MF,unsigned Kind) const55 ThumbRegisterInfo::getPointerRegClass(const MachineFunction &MF,
56 unsigned Kind) const {
57 if (!MF.getSubtarget<ARMSubtarget>().isThumb1Only())
58 return ARMBaseRegisterInfo::getPointerRegClass(MF, Kind);
59 return &ARM::tGPRRegClass;
60 }
61
emitThumb1LoadConstPool(MachineBasicBlock & MBB,MachineBasicBlock::iterator & MBBI,const DebugLoc & dl,unsigned DestReg,unsigned SubIdx,int Val,ARMCC::CondCodes Pred,unsigned PredReg,unsigned MIFlags)62 static void emitThumb1LoadConstPool(MachineBasicBlock &MBB,
63 MachineBasicBlock::iterator &MBBI,
64 const DebugLoc &dl, unsigned DestReg,
65 unsigned SubIdx, int Val,
66 ARMCC::CondCodes Pred, unsigned PredReg,
67 unsigned MIFlags) {
68 MachineFunction &MF = *MBB.getParent();
69 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
70 const TargetInstrInfo &TII = *STI.getInstrInfo();
71 MachineConstantPool *ConstantPool = MF.getConstantPool();
72 const Constant *C = ConstantInt::get(
73 Type::getInt32Ty(MBB.getParent()->getFunction()->getContext()), Val);
74 unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
75
76 BuildMI(MBB, MBBI, dl, TII.get(ARM::tLDRpci))
77 .addReg(DestReg, getDefRegState(true), SubIdx)
78 .addConstantPoolIndex(Idx).addImm(Pred).addReg(PredReg)
79 .setMIFlags(MIFlags);
80 }
81
emitThumb2LoadConstPool(MachineBasicBlock & MBB,MachineBasicBlock::iterator & MBBI,const DebugLoc & dl,unsigned DestReg,unsigned SubIdx,int Val,ARMCC::CondCodes Pred,unsigned PredReg,unsigned MIFlags)82 static void emitThumb2LoadConstPool(MachineBasicBlock &MBB,
83 MachineBasicBlock::iterator &MBBI,
84 const DebugLoc &dl, unsigned DestReg,
85 unsigned SubIdx, int Val,
86 ARMCC::CondCodes Pred, unsigned PredReg,
87 unsigned MIFlags) {
88 MachineFunction &MF = *MBB.getParent();
89 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
90 MachineConstantPool *ConstantPool = MF.getConstantPool();
91 const Constant *C = ConstantInt::get(
92 Type::getInt32Ty(MBB.getParent()->getFunction()->getContext()), Val);
93 unsigned Idx = ConstantPool->getConstantPoolIndex(C, 4);
94
95 BuildMI(MBB, MBBI, dl, TII.get(ARM::t2LDRpci))
96 .addReg(DestReg, getDefRegState(true), SubIdx)
97 .addConstantPoolIndex(Idx).addImm((int64_t)ARMCC::AL).addReg(0)
98 .setMIFlags(MIFlags);
99 }
100
101 /// emitLoadConstPool - Emits a load from constpool to materialize the
102 /// specified immediate.
emitLoadConstPool(MachineBasicBlock & MBB,MachineBasicBlock::iterator & MBBI,const DebugLoc & dl,unsigned DestReg,unsigned SubIdx,int Val,ARMCC::CondCodes Pred,unsigned PredReg,unsigned MIFlags) const103 void ThumbRegisterInfo::emitLoadConstPool(
104 MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
105 const DebugLoc &dl, unsigned DestReg, unsigned SubIdx, int Val,
106 ARMCC::CondCodes Pred, unsigned PredReg, unsigned MIFlags) const {
107 MachineFunction &MF = *MBB.getParent();
108 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
109 if (STI.isThumb1Only()) {
110 assert((isARMLowRegister(DestReg) || isVirtualRegister(DestReg)) &&
111 "Thumb1 does not have ldr to high register");
112 return emitThumb1LoadConstPool(MBB, MBBI, dl, DestReg, SubIdx, Val, Pred,
113 PredReg, MIFlags);
114 }
115 return emitThumb2LoadConstPool(MBB, MBBI, dl, DestReg, SubIdx, Val, Pred,
116 PredReg, MIFlags);
117 }
118
119 /// emitThumbRegPlusImmInReg - Emits a series of instructions to materialize
120 /// a destreg = basereg + immediate in Thumb code. Materialize the immediate
121 /// in a register using mov / mvn sequences or load the immediate from a
122 /// constpool entry.
emitThumbRegPlusImmInReg(MachineBasicBlock & MBB,MachineBasicBlock::iterator & MBBI,const DebugLoc & dl,unsigned DestReg,unsigned BaseReg,int NumBytes,bool CanChangeCC,const TargetInstrInfo & TII,const ARMBaseRegisterInfo & MRI,unsigned MIFlags=MachineInstr::NoFlags)123 static void emitThumbRegPlusImmInReg(
124 MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
125 const DebugLoc &dl, unsigned DestReg, unsigned BaseReg, int NumBytes,
126 bool CanChangeCC, const TargetInstrInfo &TII,
127 const ARMBaseRegisterInfo &MRI, unsigned MIFlags = MachineInstr::NoFlags) {
128 MachineFunction &MF = *MBB.getParent();
129 bool isHigh = !isARMLowRegister(DestReg) ||
130 (BaseReg != 0 && !isARMLowRegister(BaseReg));
131 bool isSub = false;
132 // Subtract doesn't have high register version. Load the negative value
133 // if either base or dest register is a high register. Also, if do not
134 // issue sub as part of the sequence if condition register is to be
135 // preserved.
136 if (NumBytes < 0 && !isHigh && CanChangeCC) {
137 isSub = true;
138 NumBytes = -NumBytes;
139 }
140 unsigned LdReg = DestReg;
141 if (DestReg == ARM::SP)
142 assert(BaseReg == ARM::SP && "Unexpected!");
143 if (!isARMLowRegister(DestReg) && !MRI.isVirtualRegister(DestReg))
144 LdReg = MF.getRegInfo().createVirtualRegister(&ARM::tGPRRegClass);
145
146 if (NumBytes <= 255 && NumBytes >= 0 && CanChangeCC) {
147 AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi8), LdReg))
148 .addImm(NumBytes)
149 .setMIFlags(MIFlags);
150 } else if (NumBytes < 0 && NumBytes >= -255 && CanChangeCC) {
151 AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TII.get(ARM::tMOVi8), LdReg))
152 .addImm(NumBytes)
153 .setMIFlags(MIFlags);
154 AddDefaultT1CC(BuildMI(MBB, MBBI, dl, TII.get(ARM::tRSB), LdReg))
155 .addReg(LdReg, RegState::Kill)
156 .setMIFlags(MIFlags);
157 } else
158 MRI.emitLoadConstPool(MBB, MBBI, dl, LdReg, 0, NumBytes, ARMCC::AL, 0,
159 MIFlags);
160
161 // Emit add / sub.
162 int Opc = (isSub) ? ARM::tSUBrr
163 : ((isHigh || !CanChangeCC) ? ARM::tADDhirr : ARM::tADDrr);
164 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(Opc), DestReg);
165 if (Opc != ARM::tADDhirr)
166 MIB = AddDefaultT1CC(MIB);
167 if (DestReg == ARM::SP || isSub)
168 MIB.addReg(BaseReg).addReg(LdReg, RegState::Kill);
169 else
170 MIB.addReg(LdReg).addReg(BaseReg, RegState::Kill);
171 AddDefaultPred(MIB);
172 }
173
174 /// emitThumbRegPlusImmediate - Emits a series of instructions to materialize
175 /// a destreg = basereg + immediate in Thumb code. Tries a series of ADDs or
176 /// SUBs first, and uses a constant pool value if the instruction sequence would
177 /// be too long. This is allowed to modify the condition flags.
emitThumbRegPlusImmediate(MachineBasicBlock & MBB,MachineBasicBlock::iterator & MBBI,const DebugLoc & dl,unsigned DestReg,unsigned BaseReg,int NumBytes,const TargetInstrInfo & TII,const ARMBaseRegisterInfo & MRI,unsigned MIFlags)178 void llvm::emitThumbRegPlusImmediate(MachineBasicBlock &MBB,
179 MachineBasicBlock::iterator &MBBI,
180 const DebugLoc &dl, unsigned DestReg,
181 unsigned BaseReg, int NumBytes,
182 const TargetInstrInfo &TII,
183 const ARMBaseRegisterInfo &MRI,
184 unsigned MIFlags) {
185 bool isSub = NumBytes < 0;
186 unsigned Bytes = (unsigned)NumBytes;
187 if (isSub) Bytes = -NumBytes;
188
189 int CopyOpc = 0;
190 unsigned CopyBits = 0;
191 unsigned CopyScale = 1;
192 bool CopyNeedsCC = false;
193 int ExtraOpc = 0;
194 unsigned ExtraBits = 0;
195 unsigned ExtraScale = 1;
196 bool ExtraNeedsCC = false;
197
198 // Strategy:
199 // We need to select two types of instruction, maximizing the available
200 // immediate range of each. The instructions we use will depend on whether
201 // DestReg and BaseReg are low, high or the stack pointer.
202 // * CopyOpc - DestReg = BaseReg + imm
203 // This will be emitted once if DestReg != BaseReg, and never if
204 // DestReg == BaseReg.
205 // * ExtraOpc - DestReg = DestReg + imm
206 // This will be emitted as many times as necessary to add the
207 // full immediate.
208 // If the immediate ranges of these instructions are not large enough to cover
209 // NumBytes with a reasonable number of instructions, we fall back to using a
210 // value loaded from a constant pool.
211 if (DestReg == ARM::SP) {
212 if (BaseReg == ARM::SP) {
213 // sp -> sp
214 // Already in right reg, no copy needed
215 } else {
216 // low -> sp or high -> sp
217 CopyOpc = ARM::tMOVr;
218 CopyBits = 0;
219 }
220 ExtraOpc = isSub ? ARM::tSUBspi : ARM::tADDspi;
221 ExtraBits = 7;
222 ExtraScale = 4;
223 } else if (isARMLowRegister(DestReg)) {
224 if (BaseReg == ARM::SP) {
225 // sp -> low
226 assert(!isSub && "Thumb1 does not have tSUBrSPi");
227 CopyOpc = ARM::tADDrSPi;
228 CopyBits = 8;
229 CopyScale = 4;
230 } else if (DestReg == BaseReg) {
231 // low -> same low
232 // Already in right reg, no copy needed
233 } else if (isARMLowRegister(BaseReg)) {
234 // low -> different low
235 CopyOpc = isSub ? ARM::tSUBi3 : ARM::tADDi3;
236 CopyBits = 3;
237 CopyNeedsCC = true;
238 } else {
239 // high -> low
240 CopyOpc = ARM::tMOVr;
241 CopyBits = 0;
242 }
243 ExtraOpc = isSub ? ARM::tSUBi8 : ARM::tADDi8;
244 ExtraBits = 8;
245 ExtraNeedsCC = true;
246 } else /* DestReg is high */ {
247 if (DestReg == BaseReg) {
248 // high -> same high
249 // Already in right reg, no copy needed
250 } else {
251 // {low,high,sp} -> high
252 CopyOpc = ARM::tMOVr;
253 CopyBits = 0;
254 }
255 ExtraOpc = 0;
256 }
257
258 // We could handle an unaligned immediate with an unaligned copy instruction
259 // and an aligned extra instruction, but this case is not currently needed.
260 assert(((Bytes & 3) == 0 || ExtraScale == 1) &&
261 "Unaligned offset, but all instructions require alignment");
262
263 unsigned CopyRange = ((1 << CopyBits) - 1) * CopyScale;
264 // If we would emit the copy with an immediate of 0, just use tMOVr.
265 if (CopyOpc && Bytes < CopyScale) {
266 CopyOpc = ARM::tMOVr;
267 CopyScale = 1;
268 CopyNeedsCC = false;
269 CopyRange = 0;
270 }
271 unsigned ExtraRange = ((1 << ExtraBits) - 1) * ExtraScale; // per instruction
272 unsigned RequiredCopyInstrs = CopyOpc ? 1 : 0;
273 unsigned RangeAfterCopy = (CopyRange > Bytes) ? 0 : (Bytes - CopyRange);
274
275 // We could handle this case when the copy instruction does not require an
276 // aligned immediate, but we do not currently do this.
277 assert(RangeAfterCopy % ExtraScale == 0 &&
278 "Extra instruction requires immediate to be aligned");
279
280 unsigned RequiredExtraInstrs;
281 if (ExtraRange)
282 RequiredExtraInstrs = alignTo(RangeAfterCopy, ExtraRange) / ExtraRange;
283 else if (RangeAfterCopy > 0)
284 // We need an extra instruction but none is available
285 RequiredExtraInstrs = 1000000;
286 else
287 RequiredExtraInstrs = 0;
288 unsigned RequiredInstrs = RequiredCopyInstrs + RequiredExtraInstrs;
289 unsigned Threshold = (DestReg == ARM::SP) ? 3 : 2;
290
291 // Use a constant pool, if the sequence of ADDs/SUBs is too expensive.
292 if (RequiredInstrs > Threshold) {
293 emitThumbRegPlusImmInReg(MBB, MBBI, dl,
294 DestReg, BaseReg, NumBytes, true,
295 TII, MRI, MIFlags);
296 return;
297 }
298
299 // Emit zero or one copy instructions
300 if (CopyOpc) {
301 unsigned CopyImm = std::min(Bytes, CopyRange) / CopyScale;
302 Bytes -= CopyImm * CopyScale;
303
304 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(CopyOpc), DestReg);
305 if (CopyNeedsCC)
306 MIB = AddDefaultT1CC(MIB);
307 MIB.addReg(BaseReg, RegState::Kill);
308 if (CopyOpc != ARM::tMOVr) {
309 MIB.addImm(CopyImm);
310 }
311 AddDefaultPred(MIB.setMIFlags(MIFlags));
312
313 BaseReg = DestReg;
314 }
315
316 // Emit zero or more in-place add/sub instructions
317 while (Bytes) {
318 unsigned ExtraImm = std::min(Bytes, ExtraRange) / ExtraScale;
319 Bytes -= ExtraImm * ExtraScale;
320
321 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, dl, TII.get(ExtraOpc), DestReg);
322 if (ExtraNeedsCC)
323 MIB = AddDefaultT1CC(MIB);
324 MIB.addReg(BaseReg).addImm(ExtraImm);
325 MIB = AddDefaultPred(MIB);
326 MIB.setMIFlags(MIFlags);
327 }
328 }
329
removeOperands(MachineInstr & MI,unsigned i)330 static void removeOperands(MachineInstr &MI, unsigned i) {
331 unsigned Op = i;
332 for (unsigned e = MI.getNumOperands(); i != e; ++i)
333 MI.RemoveOperand(Op);
334 }
335
336 /// convertToNonSPOpcode - Change the opcode to the non-SP version, because
337 /// we're replacing the frame index with a non-SP register.
convertToNonSPOpcode(unsigned Opcode)338 static unsigned convertToNonSPOpcode(unsigned Opcode) {
339 switch (Opcode) {
340 case ARM::tLDRspi:
341 return ARM::tLDRi;
342
343 case ARM::tSTRspi:
344 return ARM::tSTRi;
345 }
346
347 return Opcode;
348 }
349
rewriteFrameIndex(MachineBasicBlock::iterator II,unsigned FrameRegIdx,unsigned FrameReg,int & Offset,const ARMBaseInstrInfo & TII) const350 bool ThumbRegisterInfo::rewriteFrameIndex(MachineBasicBlock::iterator II,
351 unsigned FrameRegIdx,
352 unsigned FrameReg, int &Offset,
353 const ARMBaseInstrInfo &TII) const {
354 MachineInstr &MI = *II;
355 MachineBasicBlock &MBB = *MI.getParent();
356 assert(MBB.getParent()->getSubtarget<ARMSubtarget>().isThumb1Only() &&
357 "This isn't needed for thumb2!");
358 DebugLoc dl = MI.getDebugLoc();
359 MachineInstrBuilder MIB(*MBB.getParent(), &MI);
360 unsigned Opcode = MI.getOpcode();
361 const MCInstrDesc &Desc = MI.getDesc();
362 unsigned AddrMode = (Desc.TSFlags & ARMII::AddrModeMask);
363
364 if (Opcode == ARM::tADDframe) {
365 Offset += MI.getOperand(FrameRegIdx+1).getImm();
366 unsigned DestReg = MI.getOperand(0).getReg();
367
368 emitThumbRegPlusImmediate(MBB, II, dl, DestReg, FrameReg, Offset, TII,
369 *this);
370 MBB.erase(II);
371 return true;
372 } else {
373 if (AddrMode != ARMII::AddrModeT1_s)
374 llvm_unreachable("Unsupported addressing mode!");
375
376 unsigned ImmIdx = FrameRegIdx + 1;
377 int InstrOffs = MI.getOperand(ImmIdx).getImm();
378 unsigned NumBits = (FrameReg == ARM::SP) ? 8 : 5;
379 unsigned Scale = 4;
380
381 Offset += InstrOffs * Scale;
382 assert((Offset & (Scale - 1)) == 0 && "Can't encode this offset!");
383
384 // Common case: small offset, fits into instruction.
385 MachineOperand &ImmOp = MI.getOperand(ImmIdx);
386 int ImmedOffset = Offset / Scale;
387 unsigned Mask = (1 << NumBits) - 1;
388
389 if ((unsigned)Offset <= Mask * Scale) {
390 // Replace the FrameIndex with the frame register (e.g., sp).
391 MI.getOperand(FrameRegIdx).ChangeToRegister(FrameReg, false);
392 ImmOp.ChangeToImmediate(ImmedOffset);
393
394 // If we're using a register where sp was stored, convert the instruction
395 // to the non-SP version.
396 unsigned NewOpc = convertToNonSPOpcode(Opcode);
397 if (NewOpc != Opcode && FrameReg != ARM::SP)
398 MI.setDesc(TII.get(NewOpc));
399
400 return true;
401 }
402
403 NumBits = 5;
404 Mask = (1 << NumBits) - 1;
405
406 // If this is a thumb spill / restore, we will be using a constpool load to
407 // materialize the offset.
408 if (Opcode == ARM::tLDRspi || Opcode == ARM::tSTRspi) {
409 ImmOp.ChangeToImmediate(0);
410 } else {
411 // Otherwise, it didn't fit. Pull in what we can to simplify the immed.
412 ImmedOffset = ImmedOffset & Mask;
413 ImmOp.ChangeToImmediate(ImmedOffset);
414 Offset &= ~(Mask * Scale);
415 }
416 }
417
418 return Offset == 0;
419 }
420
resolveFrameIndex(MachineInstr & MI,unsigned BaseReg,int64_t Offset) const421 void ThumbRegisterInfo::resolveFrameIndex(MachineInstr &MI, unsigned BaseReg,
422 int64_t Offset) const {
423 const MachineFunction &MF = *MI.getParent()->getParent();
424 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
425 if (!STI.isThumb1Only())
426 return ARMBaseRegisterInfo::resolveFrameIndex(MI, BaseReg, Offset);
427
428 const ARMBaseInstrInfo &TII = *STI.getInstrInfo();
429 int Off = Offset; // ARM doesn't need the general 64-bit offsets
430 unsigned i = 0;
431
432 while (!MI.getOperand(i).isFI()) {
433 ++i;
434 assert(i < MI.getNumOperands() && "Instr doesn't have FrameIndex operand!");
435 }
436 bool Done = rewriteFrameIndex(MI, i, BaseReg, Off, TII);
437 assert (Done && "Unable to resolve frame index!");
438 (void)Done;
439 }
440
441 /// saveScavengerRegister - Spill the register so it can be used by the
442 /// register scavenger. Return true.
saveScavengerRegister(MachineBasicBlock & MBB,MachineBasicBlock::iterator I,MachineBasicBlock::iterator & UseMI,const TargetRegisterClass * RC,unsigned Reg) const443 bool ThumbRegisterInfo::saveScavengerRegister(
444 MachineBasicBlock &MBB, MachineBasicBlock::iterator I,
445 MachineBasicBlock::iterator &UseMI, const TargetRegisterClass *RC,
446 unsigned Reg) const {
447
448 const ARMSubtarget &STI = MBB.getParent()->getSubtarget<ARMSubtarget>();
449 if (!STI.isThumb1Only())
450 return ARMBaseRegisterInfo::saveScavengerRegister(MBB, I, UseMI, RC, Reg);
451
452 // Thumb1 can't use the emergency spill slot on the stack because
453 // ldr/str immediate offsets must be positive, and if we're referencing
454 // off the frame pointer (if, for example, there are alloca() calls in
455 // the function, the offset will be negative. Use R12 instead since that's
456 // a call clobbered register that we know won't be used in Thumb1 mode.
457 const TargetInstrInfo &TII = *STI.getInstrInfo();
458 DebugLoc DL;
459 AddDefaultPred(BuildMI(MBB, I, DL, TII.get(ARM::tMOVr))
460 .addReg(ARM::R12, RegState::Define)
461 .addReg(Reg, RegState::Kill));
462
463 // The UseMI is where we would like to restore the register. If there's
464 // interference with R12 before then, however, we'll need to restore it
465 // before that instead and adjust the UseMI.
466 bool done = false;
467 for (MachineBasicBlock::iterator II = I; !done && II != UseMI ; ++II) {
468 if (II->isDebugValue())
469 continue;
470 // If this instruction affects R12, adjust our restore point.
471 for (unsigned i = 0, e = II->getNumOperands(); i != e; ++i) {
472 const MachineOperand &MO = II->getOperand(i);
473 if (MO.isRegMask() && MO.clobbersPhysReg(ARM::R12)) {
474 UseMI = II;
475 done = true;
476 break;
477 }
478 if (!MO.isReg() || MO.isUndef() || !MO.getReg() ||
479 TargetRegisterInfo::isVirtualRegister(MO.getReg()))
480 continue;
481 if (MO.getReg() == ARM::R12) {
482 UseMI = II;
483 done = true;
484 break;
485 }
486 }
487 }
488 // Restore the register from R12
489 AddDefaultPred(BuildMI(MBB, UseMI, DL, TII.get(ARM::tMOVr)).
490 addReg(Reg, RegState::Define).addReg(ARM::R12, RegState::Kill));
491
492 return true;
493 }
494
eliminateFrameIndex(MachineBasicBlock::iterator II,int SPAdj,unsigned FIOperandNum,RegScavenger * RS) const495 void ThumbRegisterInfo::eliminateFrameIndex(MachineBasicBlock::iterator II,
496 int SPAdj, unsigned FIOperandNum,
497 RegScavenger *RS) const {
498 MachineInstr &MI = *II;
499 MachineBasicBlock &MBB = *MI.getParent();
500 MachineFunction &MF = *MBB.getParent();
501 const ARMSubtarget &STI = MF.getSubtarget<ARMSubtarget>();
502 if (!STI.isThumb1Only())
503 return ARMBaseRegisterInfo::eliminateFrameIndex(II, SPAdj, FIOperandNum,
504 RS);
505
506 unsigned VReg = 0;
507 const ARMBaseInstrInfo &TII = *STI.getInstrInfo();
508 ARMFunctionInfo *AFI = MF.getInfo<ARMFunctionInfo>();
509 DebugLoc dl = MI.getDebugLoc();
510 MachineInstrBuilder MIB(*MBB.getParent(), &MI);
511
512 unsigned FrameReg = ARM::SP;
513 int FrameIndex = MI.getOperand(FIOperandNum).getIndex();
514 int Offset = MF.getFrameInfo()->getObjectOffset(FrameIndex) +
515 MF.getFrameInfo()->getStackSize() + SPAdj;
516
517 if (MF.getFrameInfo()->hasVarSizedObjects()) {
518 assert(SPAdj == 0 && STI.getFrameLowering()->hasFP(MF) && "Unexpected");
519 // There are alloca()'s in this function, must reference off the frame
520 // pointer or base pointer instead.
521 if (!hasBasePointer(MF)) {
522 FrameReg = getFrameRegister(MF);
523 Offset -= AFI->getFramePtrSpillOffset();
524 } else
525 FrameReg = BasePtr;
526 }
527
528 // PEI::scavengeFrameVirtualRegs() cannot accurately track SPAdj because the
529 // call frame setup/destroy instructions have already been eliminated. That
530 // means the stack pointer cannot be used to access the emergency spill slot
531 // when !hasReservedCallFrame().
532 #ifndef NDEBUG
533 if (RS && FrameReg == ARM::SP && RS->isScavengingFrameIndex(FrameIndex)){
534 assert(STI.getFrameLowering()->hasReservedCallFrame(MF) &&
535 "Cannot use SP to access the emergency spill slot in "
536 "functions without a reserved call frame");
537 assert(!MF.getFrameInfo()->hasVarSizedObjects() &&
538 "Cannot use SP to access the emergency spill slot in "
539 "functions with variable sized frame objects");
540 }
541 #endif // NDEBUG
542
543 // Special handling of dbg_value instructions.
544 if (MI.isDebugValue()) {
545 MI.getOperand(FIOperandNum). ChangeToRegister(FrameReg, false /*isDef*/);
546 MI.getOperand(FIOperandNum+1).ChangeToImmediate(Offset);
547 return;
548 }
549
550 // Modify MI as necessary to handle as much of 'Offset' as possible
551 assert(AFI->isThumbFunction() &&
552 "This eliminateFrameIndex only supports Thumb1!");
553 if (rewriteFrameIndex(MI, FIOperandNum, FrameReg, Offset, TII))
554 return;
555
556 // If we get here, the immediate doesn't fit into the instruction. We folded
557 // as much as possible above, handle the rest, providing a register that is
558 // SP+LargeImm.
559 assert(Offset && "This code isn't needed if offset already handled!");
560
561 unsigned Opcode = MI.getOpcode();
562
563 // Remove predicate first.
564 int PIdx = MI.findFirstPredOperandIdx();
565 if (PIdx != -1)
566 removeOperands(MI, PIdx);
567
568 if (MI.mayLoad()) {
569 // Use the destination register to materialize sp + offset.
570 unsigned TmpReg = MI.getOperand(0).getReg();
571 bool UseRR = false;
572 if (Opcode == ARM::tLDRspi) {
573 if (FrameReg == ARM::SP)
574 emitThumbRegPlusImmInReg(MBB, II, dl, TmpReg, FrameReg,
575 Offset, false, TII, *this);
576 else {
577 emitLoadConstPool(MBB, II, dl, TmpReg, 0, Offset);
578 UseRR = true;
579 }
580 } else {
581 emitThumbRegPlusImmediate(MBB, II, dl, TmpReg, FrameReg, Offset, TII,
582 *this);
583 }
584
585 MI.setDesc(TII.get(UseRR ? ARM::tLDRr : ARM::tLDRi));
586 MI.getOperand(FIOperandNum).ChangeToRegister(TmpReg, false, false, true);
587 if (UseRR)
588 // Use [reg, reg] addrmode. Replace the immediate operand w/ the frame
589 // register. The offset is already handled in the vreg value.
590 MI.getOperand(FIOperandNum+1).ChangeToRegister(FrameReg, false, false,
591 false);
592 } else if (MI.mayStore()) {
593 VReg = MF.getRegInfo().createVirtualRegister(&ARM::tGPRRegClass);
594 bool UseRR = false;
595
596 if (Opcode == ARM::tSTRspi) {
597 if (FrameReg == ARM::SP)
598 emitThumbRegPlusImmInReg(MBB, II, dl, VReg, FrameReg,
599 Offset, false, TII, *this);
600 else {
601 emitLoadConstPool(MBB, II, dl, VReg, 0, Offset);
602 UseRR = true;
603 }
604 } else
605 emitThumbRegPlusImmediate(MBB, II, dl, VReg, FrameReg, Offset, TII,
606 *this);
607 MI.setDesc(TII.get(UseRR ? ARM::tSTRr : ARM::tSTRi));
608 MI.getOperand(FIOperandNum).ChangeToRegister(VReg, false, false, true);
609 if (UseRR)
610 // Use [reg, reg] addrmode. Replace the immediate operand w/ the frame
611 // register. The offset is already handled in the vreg value.
612 MI.getOperand(FIOperandNum+1).ChangeToRegister(FrameReg, false, false,
613 false);
614 } else {
615 llvm_unreachable("Unexpected opcode!");
616 }
617
618 // Add predicate back if it's needed.
619 if (MI.isPredicable())
620 AddDefaultPred(MIB);
621 }
622