1 //===-- X86FrameLowering.cpp - X86 Frame Information ----------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file contains the X86 implementation of TargetFrameLowering class.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "X86FrameLowering.h"
15 #include "X86InstrBuilder.h"
16 #include "X86InstrInfo.h"
17 #include "X86MachineFunctionInfo.h"
18 #include "X86Subtarget.h"
19 #include "X86TargetMachine.h"
20 #include "llvm/Function.h"
21 #include "llvm/CodeGen/MachineFrameInfo.h"
22 #include "llvm/CodeGen/MachineFunction.h"
23 #include "llvm/CodeGen/MachineInstrBuilder.h"
24 #include "llvm/CodeGen/MachineModuleInfo.h"
25 #include "llvm/CodeGen/MachineRegisterInfo.h"
26 #include "llvm/MC/MCAsmInfo.h"
27 #include "llvm/MC/MCSymbol.h"
28 #include "llvm/Target/TargetData.h"
29 #include "llvm/Target/TargetOptions.h"
30 #include "llvm/Support/CommandLine.h"
31 #include "llvm/ADT/SmallSet.h"
32
33 using namespace llvm;
34
35 // FIXME: completely move here.
36 extern cl::opt<bool> ForceStackAlign;
37
hasReservedCallFrame(const MachineFunction & MF) const38 bool X86FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
39 return !MF.getFrameInfo()->hasVarSizedObjects();
40 }
41
42 /// hasFP - Return true if the specified function should have a dedicated frame
43 /// pointer register. This is true if the function has variable sized allocas
44 /// or if frame pointer elimination is disabled.
hasFP(const MachineFunction & MF) const45 bool X86FrameLowering::hasFP(const MachineFunction &MF) const {
46 const MachineFrameInfo *MFI = MF.getFrameInfo();
47 const MachineModuleInfo &MMI = MF.getMMI();
48 const TargetRegisterInfo *RI = TM.getRegisterInfo();
49
50 return (MF.getTarget().Options.DisableFramePointerElim(MF) ||
51 RI->needsStackRealignment(MF) ||
52 MFI->hasVarSizedObjects() ||
53 MFI->isFrameAddressTaken() ||
54 MF.getInfo<X86MachineFunctionInfo>()->getForceFramePointer() ||
55 MMI.callsUnwindInit());
56 }
57
getSUBriOpcode(unsigned is64Bit,int64_t Imm)58 static unsigned getSUBriOpcode(unsigned is64Bit, int64_t Imm) {
59 if (is64Bit) {
60 if (isInt<8>(Imm))
61 return X86::SUB64ri8;
62 return X86::SUB64ri32;
63 } else {
64 if (isInt<8>(Imm))
65 return X86::SUB32ri8;
66 return X86::SUB32ri;
67 }
68 }
69
getADDriOpcode(unsigned is64Bit,int64_t Imm)70 static unsigned getADDriOpcode(unsigned is64Bit, int64_t Imm) {
71 if (is64Bit) {
72 if (isInt<8>(Imm))
73 return X86::ADD64ri8;
74 return X86::ADD64ri32;
75 } else {
76 if (isInt<8>(Imm))
77 return X86::ADD32ri8;
78 return X86::ADD32ri;
79 }
80 }
81
getLEArOpcode(unsigned is64Bit)82 static unsigned getLEArOpcode(unsigned is64Bit) {
83 return is64Bit ? X86::LEA64r : X86::LEA32r;
84 }
85
86 /// findDeadCallerSavedReg - Return a caller-saved register that isn't live
87 /// when it reaches the "return" instruction. We can then pop a stack object
88 /// to this register without worry about clobbering it.
findDeadCallerSavedReg(MachineBasicBlock & MBB,MachineBasicBlock::iterator & MBBI,const TargetRegisterInfo & TRI,bool Is64Bit)89 static unsigned findDeadCallerSavedReg(MachineBasicBlock &MBB,
90 MachineBasicBlock::iterator &MBBI,
91 const TargetRegisterInfo &TRI,
92 bool Is64Bit) {
93 const MachineFunction *MF = MBB.getParent();
94 const Function *F = MF->getFunction();
95 if (!F || MF->getMMI().callsEHReturn())
96 return 0;
97
98 static const uint16_t CallerSavedRegs32Bit[] = {
99 X86::EAX, X86::EDX, X86::ECX, 0
100 };
101
102 static const uint16_t CallerSavedRegs64Bit[] = {
103 X86::RAX, X86::RDX, X86::RCX, X86::RSI, X86::RDI,
104 X86::R8, X86::R9, X86::R10, X86::R11, 0
105 };
106
107 unsigned Opc = MBBI->getOpcode();
108 switch (Opc) {
109 default: return 0;
110 case X86::RET:
111 case X86::RETI:
112 case X86::TCRETURNdi:
113 case X86::TCRETURNri:
114 case X86::TCRETURNmi:
115 case X86::TCRETURNdi64:
116 case X86::TCRETURNri64:
117 case X86::TCRETURNmi64:
118 case X86::EH_RETURN:
119 case X86::EH_RETURN64: {
120 SmallSet<uint16_t, 8> Uses;
121 for (unsigned i = 0, e = MBBI->getNumOperands(); i != e; ++i) {
122 MachineOperand &MO = MBBI->getOperand(i);
123 if (!MO.isReg() || MO.isDef())
124 continue;
125 unsigned Reg = MO.getReg();
126 if (!Reg)
127 continue;
128 for (const uint16_t *AsI = TRI.getOverlaps(Reg); *AsI; ++AsI)
129 Uses.insert(*AsI);
130 }
131
132 const uint16_t *CS = Is64Bit ? CallerSavedRegs64Bit : CallerSavedRegs32Bit;
133 for (; *CS; ++CS)
134 if (!Uses.count(*CS))
135 return *CS;
136 }
137 }
138
139 return 0;
140 }
141
142
143 /// emitSPUpdate - Emit a series of instructions to increment / decrement the
144 /// stack pointer by a constant value.
145 static
emitSPUpdate(MachineBasicBlock & MBB,MachineBasicBlock::iterator & MBBI,unsigned StackPtr,int64_t NumBytes,bool Is64Bit,bool UseLEA,const TargetInstrInfo & TII,const TargetRegisterInfo & TRI)146 void emitSPUpdate(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
147 unsigned StackPtr, int64_t NumBytes,
148 bool Is64Bit, bool UseLEA,
149 const TargetInstrInfo &TII, const TargetRegisterInfo &TRI) {
150 bool isSub = NumBytes < 0;
151 uint64_t Offset = isSub ? -NumBytes : NumBytes;
152 unsigned Opc;
153 if (UseLEA)
154 Opc = getLEArOpcode(Is64Bit);
155 else
156 Opc = isSub
157 ? getSUBriOpcode(Is64Bit, Offset)
158 : getADDriOpcode(Is64Bit, Offset);
159
160 uint64_t Chunk = (1LL << 31) - 1;
161 DebugLoc DL = MBB.findDebugLoc(MBBI);
162
163 while (Offset) {
164 uint64_t ThisVal = (Offset > Chunk) ? Chunk : Offset;
165 if (ThisVal == (Is64Bit ? 8 : 4)) {
166 // Use push / pop instead.
167 unsigned Reg = isSub
168 ? (unsigned)(Is64Bit ? X86::RAX : X86::EAX)
169 : findDeadCallerSavedReg(MBB, MBBI, TRI, Is64Bit);
170 if (Reg) {
171 Opc = isSub
172 ? (Is64Bit ? X86::PUSH64r : X86::PUSH32r)
173 : (Is64Bit ? X86::POP64r : X86::POP32r);
174 MachineInstr *MI = BuildMI(MBB, MBBI, DL, TII.get(Opc))
175 .addReg(Reg, getDefRegState(!isSub) | getUndefRegState(isSub));
176 if (isSub)
177 MI->setFlag(MachineInstr::FrameSetup);
178 Offset -= ThisVal;
179 continue;
180 }
181 }
182
183 MachineInstr *MI = NULL;
184
185 if (UseLEA) {
186 MI = addRegOffset(BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr),
187 StackPtr, false, isSub ? -ThisVal : ThisVal);
188 } else {
189 MI = BuildMI(MBB, MBBI, DL, TII.get(Opc), StackPtr)
190 .addReg(StackPtr)
191 .addImm(ThisVal);
192 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
193 }
194
195 if (isSub)
196 MI->setFlag(MachineInstr::FrameSetup);
197
198 Offset -= ThisVal;
199 }
200 }
201
202 /// mergeSPUpdatesUp - Merge two stack-manipulating instructions upper iterator.
203 static
mergeSPUpdatesUp(MachineBasicBlock & MBB,MachineBasicBlock::iterator & MBBI,unsigned StackPtr,uint64_t * NumBytes=NULL)204 void mergeSPUpdatesUp(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MBBI,
205 unsigned StackPtr, uint64_t *NumBytes = NULL) {
206 if (MBBI == MBB.begin()) return;
207
208 MachineBasicBlock::iterator PI = prior(MBBI);
209 unsigned Opc = PI->getOpcode();
210 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
211 Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||
212 Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
213 PI->getOperand(0).getReg() == StackPtr) {
214 if (NumBytes)
215 *NumBytes += PI->getOperand(2).getImm();
216 MBB.erase(PI);
217 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
218 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
219 PI->getOperand(0).getReg() == StackPtr) {
220 if (NumBytes)
221 *NumBytes -= PI->getOperand(2).getImm();
222 MBB.erase(PI);
223 }
224 }
225
226 /// mergeSPUpdatesDown - Merge two stack-manipulating instructions lower iterator.
227 static
mergeSPUpdatesDown(MachineBasicBlock & MBB,MachineBasicBlock::iterator & MBBI,unsigned StackPtr,uint64_t * NumBytes=NULL)228 void mergeSPUpdatesDown(MachineBasicBlock &MBB,
229 MachineBasicBlock::iterator &MBBI,
230 unsigned StackPtr, uint64_t *NumBytes = NULL) {
231 // FIXME: THIS ISN'T RUN!!!
232 return;
233
234 if (MBBI == MBB.end()) return;
235
236 MachineBasicBlock::iterator NI = llvm::next(MBBI);
237 if (NI == MBB.end()) return;
238
239 unsigned Opc = NI->getOpcode();
240 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
241 Opc == X86::ADD32ri || Opc == X86::ADD32ri8) &&
242 NI->getOperand(0).getReg() == StackPtr) {
243 if (NumBytes)
244 *NumBytes -= NI->getOperand(2).getImm();
245 MBB.erase(NI);
246 MBBI = NI;
247 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
248 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
249 NI->getOperand(0).getReg() == StackPtr) {
250 if (NumBytes)
251 *NumBytes += NI->getOperand(2).getImm();
252 MBB.erase(NI);
253 MBBI = NI;
254 }
255 }
256
257 /// mergeSPUpdates - Checks the instruction before/after the passed
258 /// instruction. If it is an ADD/SUB/LEA instruction it is deleted argument and the
259 /// stack adjustment is returned as a positive value for ADD/LEA and a negative for
260 /// SUB.
mergeSPUpdates(MachineBasicBlock & MBB,MachineBasicBlock::iterator & MBBI,unsigned StackPtr,bool doMergeWithPrevious)261 static int mergeSPUpdates(MachineBasicBlock &MBB,
262 MachineBasicBlock::iterator &MBBI,
263 unsigned StackPtr,
264 bool doMergeWithPrevious) {
265 if ((doMergeWithPrevious && MBBI == MBB.begin()) ||
266 (!doMergeWithPrevious && MBBI == MBB.end()))
267 return 0;
268
269 MachineBasicBlock::iterator PI = doMergeWithPrevious ? prior(MBBI) : MBBI;
270 MachineBasicBlock::iterator NI = doMergeWithPrevious ? 0 : llvm::next(MBBI);
271 unsigned Opc = PI->getOpcode();
272 int Offset = 0;
273
274 if ((Opc == X86::ADD64ri32 || Opc == X86::ADD64ri8 ||
275 Opc == X86::ADD32ri || Opc == X86::ADD32ri8 ||
276 Opc == X86::LEA32r || Opc == X86::LEA64_32r) &&
277 PI->getOperand(0).getReg() == StackPtr){
278 Offset += PI->getOperand(2).getImm();
279 MBB.erase(PI);
280 if (!doMergeWithPrevious) MBBI = NI;
281 } else if ((Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
282 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) &&
283 PI->getOperand(0).getReg() == StackPtr) {
284 Offset -= PI->getOperand(2).getImm();
285 MBB.erase(PI);
286 if (!doMergeWithPrevious) MBBI = NI;
287 }
288
289 return Offset;
290 }
291
isEAXLiveIn(MachineFunction & MF)292 static bool isEAXLiveIn(MachineFunction &MF) {
293 for (MachineRegisterInfo::livein_iterator II = MF.getRegInfo().livein_begin(),
294 EE = MF.getRegInfo().livein_end(); II != EE; ++II) {
295 unsigned Reg = II->first;
296
297 if (Reg == X86::EAX || Reg == X86::AX ||
298 Reg == X86::AH || Reg == X86::AL)
299 return true;
300 }
301
302 return false;
303 }
304
emitCalleeSavedFrameMoves(MachineFunction & MF,MCSymbol * Label,unsigned FramePtr) const305 void X86FrameLowering::emitCalleeSavedFrameMoves(MachineFunction &MF,
306 MCSymbol *Label,
307 unsigned FramePtr) const {
308 MachineFrameInfo *MFI = MF.getFrameInfo();
309 MachineModuleInfo &MMI = MF.getMMI();
310
311 // Add callee saved registers to move list.
312 const std::vector<CalleeSavedInfo> &CSI = MFI->getCalleeSavedInfo();
313 if (CSI.empty()) return;
314
315 std::vector<MachineMove> &Moves = MMI.getFrameMoves();
316 const TargetData *TD = TM.getTargetData();
317 bool HasFP = hasFP(MF);
318
319 // Calculate amount of bytes used for return address storing.
320 int stackGrowth = -TD->getPointerSize();
321
322 // FIXME: This is dirty hack. The code itself is pretty mess right now.
323 // It should be rewritten from scratch and generalized sometimes.
324
325 // Determine maximum offset (minimum due to stack growth).
326 int64_t MaxOffset = 0;
327 for (std::vector<CalleeSavedInfo>::const_iterator
328 I = CSI.begin(), E = CSI.end(); I != E; ++I)
329 MaxOffset = std::min(MaxOffset,
330 MFI->getObjectOffset(I->getFrameIdx()));
331
332 // Calculate offsets.
333 int64_t saveAreaOffset = (HasFP ? 3 : 2) * stackGrowth;
334 for (std::vector<CalleeSavedInfo>::const_iterator
335 I = CSI.begin(), E = CSI.end(); I != E; ++I) {
336 int64_t Offset = MFI->getObjectOffset(I->getFrameIdx());
337 unsigned Reg = I->getReg();
338 Offset = MaxOffset - Offset + saveAreaOffset;
339
340 // Don't output a new machine move if we're re-saving the frame
341 // pointer. This happens when the PrologEpilogInserter has inserted an extra
342 // "PUSH" of the frame pointer -- the "emitPrologue" method automatically
343 // generates one when frame pointers are used. If we generate a "machine
344 // move" for this extra "PUSH", the linker will lose track of the fact that
345 // the frame pointer should have the value of the first "PUSH" when it's
346 // trying to unwind.
347 //
348 // FIXME: This looks inelegant. It's possibly correct, but it's covering up
349 // another bug. I.e., one where we generate a prolog like this:
350 //
351 // pushl %ebp
352 // movl %esp, %ebp
353 // pushl %ebp
354 // pushl %esi
355 // ...
356 //
357 // The immediate re-push of EBP is unnecessary. At the least, it's an
358 // optimization bug. EBP can be used as a scratch register in certain
359 // cases, but probably not when we have a frame pointer.
360 if (HasFP && FramePtr == Reg)
361 continue;
362
363 MachineLocation CSDst(MachineLocation::VirtualFP, Offset);
364 MachineLocation CSSrc(Reg);
365 Moves.push_back(MachineMove(Label, CSDst, CSSrc));
366 }
367 }
368
369 /// getCompactUnwindRegNum - Get the compact unwind number for a given
370 /// register. The number corresponds to the enum lists in
371 /// compact_unwind_encoding.h.
getCompactUnwindRegNum(const unsigned * CURegs,unsigned Reg)372 static int getCompactUnwindRegNum(const unsigned *CURegs, unsigned Reg) {
373 for (int Idx = 1; *CURegs; ++CURegs, ++Idx)
374 if (*CURegs == Reg)
375 return Idx;
376
377 return -1;
378 }
379
380 // Number of registers that can be saved in a compact unwind encoding.
381 #define CU_NUM_SAVED_REGS 6
382
383 /// encodeCompactUnwindRegistersWithoutFrame - Create the permutation encoding
384 /// used with frameless stacks. It is passed the number of registers to be saved
385 /// and an array of the registers saved.
386 static uint32_t
encodeCompactUnwindRegistersWithoutFrame(unsigned SavedRegs[CU_NUM_SAVED_REGS],unsigned RegCount,bool Is64Bit)387 encodeCompactUnwindRegistersWithoutFrame(unsigned SavedRegs[CU_NUM_SAVED_REGS],
388 unsigned RegCount, bool Is64Bit) {
389 // The saved registers are numbered from 1 to 6. In order to encode the order
390 // in which they were saved, we re-number them according to their place in the
391 // register order. The re-numbering is relative to the last re-numbered
392 // register. E.g., if we have registers {6, 2, 4, 5} saved in that order:
393 //
394 // Orig Re-Num
395 // ---- ------
396 // 6 6
397 // 2 2
398 // 4 3
399 // 5 3
400 //
401 static const unsigned CU32BitRegs[] = {
402 X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0
403 };
404 static const unsigned CU64BitRegs[] = {
405 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0
406 };
407 const unsigned *CURegs = (Is64Bit ? CU64BitRegs : CU32BitRegs);
408
409 for (unsigned i = 0; i != CU_NUM_SAVED_REGS; ++i) {
410 int CUReg = getCompactUnwindRegNum(CURegs, SavedRegs[i]);
411 if (CUReg == -1) return ~0U;
412 SavedRegs[i] = CUReg;
413 }
414
415 // Reverse the list.
416 std::swap(SavedRegs[0], SavedRegs[5]);
417 std::swap(SavedRegs[1], SavedRegs[4]);
418 std::swap(SavedRegs[2], SavedRegs[3]);
419
420 uint32_t RenumRegs[CU_NUM_SAVED_REGS];
421 for (unsigned i = CU_NUM_SAVED_REGS - RegCount; i < CU_NUM_SAVED_REGS; ++i) {
422 unsigned Countless = 0;
423 for (unsigned j = CU_NUM_SAVED_REGS - RegCount; j < i; ++j)
424 if (SavedRegs[j] < SavedRegs[i])
425 ++Countless;
426
427 RenumRegs[i] = SavedRegs[i] - Countless - 1;
428 }
429
430 // Take the renumbered values and encode them into a 10-bit number.
431 uint32_t permutationEncoding = 0;
432 switch (RegCount) {
433 case 6:
434 permutationEncoding |= 120 * RenumRegs[0] + 24 * RenumRegs[1]
435 + 6 * RenumRegs[2] + 2 * RenumRegs[3]
436 + RenumRegs[4];
437 break;
438 case 5:
439 permutationEncoding |= 120 * RenumRegs[1] + 24 * RenumRegs[2]
440 + 6 * RenumRegs[3] + 2 * RenumRegs[4]
441 + RenumRegs[5];
442 break;
443 case 4:
444 permutationEncoding |= 60 * RenumRegs[2] + 12 * RenumRegs[3]
445 + 3 * RenumRegs[4] + RenumRegs[5];
446 break;
447 case 3:
448 permutationEncoding |= 20 * RenumRegs[3] + 4 * RenumRegs[4]
449 + RenumRegs[5];
450 break;
451 case 2:
452 permutationEncoding |= 5 * RenumRegs[4] + RenumRegs[5];
453 break;
454 case 1:
455 permutationEncoding |= RenumRegs[5];
456 break;
457 }
458
459 assert((permutationEncoding & 0x3FF) == permutationEncoding &&
460 "Invalid compact register encoding!");
461 return permutationEncoding;
462 }
463
464 /// encodeCompactUnwindRegistersWithFrame - Return the registers encoded for a
465 /// compact encoding with a frame pointer.
466 static uint32_t
encodeCompactUnwindRegistersWithFrame(unsigned SavedRegs[CU_NUM_SAVED_REGS],bool Is64Bit)467 encodeCompactUnwindRegistersWithFrame(unsigned SavedRegs[CU_NUM_SAVED_REGS],
468 bool Is64Bit) {
469 static const unsigned CU32BitRegs[] = {
470 X86::EBX, X86::ECX, X86::EDX, X86::EDI, X86::ESI, X86::EBP, 0
471 };
472 static const unsigned CU64BitRegs[] = {
473 X86::RBX, X86::R12, X86::R13, X86::R14, X86::R15, X86::RBP, 0
474 };
475 const unsigned *CURegs = (Is64Bit ? CU64BitRegs : CU32BitRegs);
476
477 // Encode the registers in the order they were saved, 3-bits per register. The
478 // registers are numbered from 1 to CU_NUM_SAVED_REGS.
479 uint32_t RegEnc = 0;
480 for (int I = CU_NUM_SAVED_REGS - 1, Idx = 0; I != -1; --I) {
481 unsigned Reg = SavedRegs[I];
482 if (Reg == 0) continue;
483
484 int CURegNum = getCompactUnwindRegNum(CURegs, Reg);
485 if (CURegNum == -1) return ~0U;
486
487 // Encode the 3-bit register number in order, skipping over 3-bits for each
488 // register.
489 RegEnc |= (CURegNum & 0x7) << (Idx++ * 3);
490 }
491
492 assert((RegEnc & 0x3FFFF) == RegEnc && "Invalid compact register encoding!");
493 return RegEnc;
494 }
495
getCompactUnwindEncoding(MachineFunction & MF) const496 uint32_t X86FrameLowering::getCompactUnwindEncoding(MachineFunction &MF) const {
497 const X86RegisterInfo *RegInfo = TM.getRegisterInfo();
498 unsigned FramePtr = RegInfo->getFrameRegister(MF);
499 unsigned StackPtr = RegInfo->getStackRegister();
500
501 bool Is64Bit = STI.is64Bit();
502 bool HasFP = hasFP(MF);
503
504 unsigned SavedRegs[CU_NUM_SAVED_REGS] = { 0, 0, 0, 0, 0, 0 };
505 unsigned SavedRegIdx = 0;
506
507 unsigned OffsetSize = (Is64Bit ? 8 : 4);
508
509 unsigned PushInstr = (Is64Bit ? X86::PUSH64r : X86::PUSH32r);
510 unsigned PushInstrSize = 1;
511 unsigned MoveInstr = (Is64Bit ? X86::MOV64rr : X86::MOV32rr);
512 unsigned MoveInstrSize = (Is64Bit ? 3 : 2);
513 unsigned SubtractInstrIdx = (Is64Bit ? 3 : 2);
514
515 unsigned StackDivide = (Is64Bit ? 8 : 4);
516
517 unsigned InstrOffset = 0;
518 unsigned StackAdjust = 0;
519 unsigned StackSize = 0;
520
521 MachineBasicBlock &MBB = MF.front(); // Prologue is in entry BB.
522 bool ExpectEnd = false;
523 for (MachineBasicBlock::iterator
524 MBBI = MBB.begin(), MBBE = MBB.end(); MBBI != MBBE; ++MBBI) {
525 MachineInstr &MI = *MBBI;
526 unsigned Opc = MI.getOpcode();
527 if (Opc == X86::PROLOG_LABEL) continue;
528 if (!MI.getFlag(MachineInstr::FrameSetup)) break;
529
530 // We don't exect any more prolog instructions.
531 if (ExpectEnd) return 0;
532
533 if (Opc == PushInstr) {
534 // If there are too many saved registers, we cannot use compact encoding.
535 if (SavedRegIdx >= CU_NUM_SAVED_REGS) return 0;
536
537 SavedRegs[SavedRegIdx++] = MI.getOperand(0).getReg();
538 StackAdjust += OffsetSize;
539 InstrOffset += PushInstrSize;
540 } else if (Opc == MoveInstr) {
541 unsigned SrcReg = MI.getOperand(1).getReg();
542 unsigned DstReg = MI.getOperand(0).getReg();
543
544 if (DstReg != FramePtr || SrcReg != StackPtr)
545 return 0;
546
547 StackAdjust = 0;
548 memset(SavedRegs, 0, sizeof(SavedRegs));
549 SavedRegIdx = 0;
550 InstrOffset += MoveInstrSize;
551 } else if (Opc == X86::SUB64ri32 || Opc == X86::SUB64ri8 ||
552 Opc == X86::SUB32ri || Opc == X86::SUB32ri8) {
553 if (StackSize)
554 // We already have a stack size.
555 return 0;
556
557 if (!MI.getOperand(0).isReg() ||
558 MI.getOperand(0).getReg() != MI.getOperand(1).getReg() ||
559 MI.getOperand(0).getReg() != StackPtr || !MI.getOperand(2).isImm())
560 // We need this to be a stack adjustment pointer. Something like:
561 //
562 // %RSP<def> = SUB64ri8 %RSP, 48
563 return 0;
564
565 StackSize = MI.getOperand(2).getImm() / StackDivide;
566 SubtractInstrIdx += InstrOffset;
567 ExpectEnd = true;
568 }
569 }
570
571 // Encode that we are using EBP/RBP as the frame pointer.
572 uint32_t CompactUnwindEncoding = 0;
573 StackAdjust /= StackDivide;
574 if (HasFP) {
575 if ((StackAdjust & 0xFF) != StackAdjust)
576 // Offset was too big for compact encoding.
577 return 0;
578
579 // Get the encoding of the saved registers when we have a frame pointer.
580 uint32_t RegEnc = encodeCompactUnwindRegistersWithFrame(SavedRegs, Is64Bit);
581 if (RegEnc == ~0U) return 0;
582
583 CompactUnwindEncoding |= 0x01000000;
584 CompactUnwindEncoding |= (StackAdjust & 0xFF) << 16;
585 CompactUnwindEncoding |= RegEnc & 0x7FFF;
586 } else {
587 ++StackAdjust;
588 uint32_t TotalStackSize = StackAdjust + StackSize;
589 if ((TotalStackSize & 0xFF) == TotalStackSize) {
590 // Frameless stack with a small stack size.
591 CompactUnwindEncoding |= 0x02000000;
592
593 // Encode the stack size.
594 CompactUnwindEncoding |= (TotalStackSize & 0xFF) << 16;
595 } else {
596 if ((StackAdjust & 0x7) != StackAdjust)
597 // The extra stack adjustments are too big for us to handle.
598 return 0;
599
600 // Frameless stack with an offset too large for us to encode compactly.
601 CompactUnwindEncoding |= 0x03000000;
602
603 // Encode the offset to the nnnnnn value in the 'subl $nnnnnn, ESP'
604 // instruction.
605 CompactUnwindEncoding |= (SubtractInstrIdx & 0xFF) << 16;
606
607 // Encode any extra stack stack adjustments (done via push instructions).
608 CompactUnwindEncoding |= (StackAdjust & 0x7) << 13;
609 }
610
611 // Encode the number of registers saved.
612 CompactUnwindEncoding |= (SavedRegIdx & 0x7) << 10;
613
614 // Get the encoding of the saved registers when we don't have a frame
615 // pointer.
616 uint32_t RegEnc =
617 encodeCompactUnwindRegistersWithoutFrame(SavedRegs, SavedRegIdx,
618 Is64Bit);
619 if (RegEnc == ~0U) return 0;
620
621 // Encode the register encoding.
622 CompactUnwindEncoding |= RegEnc & 0x3FF;
623 }
624
625 return CompactUnwindEncoding;
626 }
627
628 /// emitPrologue - Push callee-saved registers onto the stack, which
629 /// automatically adjust the stack pointer. Adjust the stack pointer to allocate
630 /// space for local variables. Also emit labels used by the exception handler to
631 /// generate the exception handling frames.
emitPrologue(MachineFunction & MF) const632 void X86FrameLowering::emitPrologue(MachineFunction &MF) const {
633 MachineBasicBlock &MBB = MF.front(); // Prologue goes in entry BB.
634 MachineBasicBlock::iterator MBBI = MBB.begin();
635 MachineFrameInfo *MFI = MF.getFrameInfo();
636 const Function *Fn = MF.getFunction();
637 const X86RegisterInfo *RegInfo = TM.getRegisterInfo();
638 const X86InstrInfo &TII = *TM.getInstrInfo();
639 MachineModuleInfo &MMI = MF.getMMI();
640 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
641 bool needsFrameMoves = MMI.hasDebugInfo() ||
642 Fn->needsUnwindTableEntry();
643 uint64_t MaxAlign = MFI->getMaxAlignment(); // Desired stack alignment.
644 uint64_t StackSize = MFI->getStackSize(); // Number of bytes to allocate.
645 bool HasFP = hasFP(MF);
646 bool Is64Bit = STI.is64Bit();
647 bool IsWin64 = STI.isTargetWin64();
648 bool UseLEA = STI.useLeaForSP();
649 unsigned StackAlign = getStackAlignment();
650 unsigned SlotSize = RegInfo->getSlotSize();
651 unsigned FramePtr = RegInfo->getFrameRegister(MF);
652 unsigned StackPtr = RegInfo->getStackRegister();
653 DebugLoc DL;
654
655 // If we're forcing a stack realignment we can't rely on just the frame
656 // info, we need to know the ABI stack alignment as well in case we
657 // have a call out. Otherwise just make sure we have some alignment - we'll
658 // go with the minimum SlotSize.
659 if (ForceStackAlign) {
660 if (MFI->hasCalls())
661 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
662 else if (MaxAlign < SlotSize)
663 MaxAlign = SlotSize;
664 }
665
666 // Add RETADDR move area to callee saved frame size.
667 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
668 if (TailCallReturnAddrDelta < 0)
669 X86FI->setCalleeSavedFrameSize(
670 X86FI->getCalleeSavedFrameSize() - TailCallReturnAddrDelta);
671
672 // If this is x86-64 and the Red Zone is not disabled, if we are a leaf
673 // function, and use up to 128 bytes of stack space, don't have a frame
674 // pointer, calls, or dynamic alloca then we do not need to adjust the
675 // stack pointer (we fit in the Red Zone).
676 if (Is64Bit && !Fn->hasFnAttr(Attribute::NoRedZone) &&
677 !RegInfo->needsStackRealignment(MF) &&
678 !MFI->hasVarSizedObjects() && // No dynamic alloca.
679 !MFI->adjustsStack() && // No calls.
680 !IsWin64 && // Win64 has no Red Zone
681 !MF.getTarget().Options.EnableSegmentedStacks) { // Regular stack
682 uint64_t MinSize = X86FI->getCalleeSavedFrameSize();
683 if (HasFP) MinSize += SlotSize;
684 StackSize = std::max(MinSize, StackSize > 128 ? StackSize - 128 : 0);
685 MFI->setStackSize(StackSize);
686 }
687
688 // Insert stack pointer adjustment for later moving of return addr. Only
689 // applies to tail call optimized functions where the callee argument stack
690 // size is bigger than the callers.
691 if (TailCallReturnAddrDelta < 0) {
692 MachineInstr *MI =
693 BuildMI(MBB, MBBI, DL,
694 TII.get(getSUBriOpcode(Is64Bit, -TailCallReturnAddrDelta)),
695 StackPtr)
696 .addReg(StackPtr)
697 .addImm(-TailCallReturnAddrDelta)
698 .setMIFlag(MachineInstr::FrameSetup);
699 MI->getOperand(3).setIsDead(); // The EFLAGS implicit def is dead.
700 }
701
702 // Mapping for machine moves:
703 //
704 // DST: VirtualFP AND
705 // SRC: VirtualFP => DW_CFA_def_cfa_offset
706 // ELSE => DW_CFA_def_cfa
707 //
708 // SRC: VirtualFP AND
709 // DST: Register => DW_CFA_def_cfa_register
710 //
711 // ELSE
712 // OFFSET < 0 => DW_CFA_offset_extended_sf
713 // REG < 64 => DW_CFA_offset + Reg
714 // ELSE => DW_CFA_offset_extended
715
716 std::vector<MachineMove> &Moves = MMI.getFrameMoves();
717 const TargetData *TD = MF.getTarget().getTargetData();
718 uint64_t NumBytes = 0;
719 int stackGrowth = -TD->getPointerSize();
720
721 if (HasFP) {
722 // Calculate required stack adjustment.
723 uint64_t FrameSize = StackSize - SlotSize;
724 if (RegInfo->needsStackRealignment(MF))
725 FrameSize = (FrameSize + MaxAlign - 1) / MaxAlign * MaxAlign;
726
727 NumBytes = FrameSize - X86FI->getCalleeSavedFrameSize();
728
729 // Get the offset of the stack slot for the EBP register, which is
730 // guaranteed to be the last slot by processFunctionBeforeFrameFinalized.
731 // Update the frame offset adjustment.
732 MFI->setOffsetAdjustment(-NumBytes);
733
734 // Save EBP/RBP into the appropriate stack slot.
735 BuildMI(MBB, MBBI, DL, TII.get(Is64Bit ? X86::PUSH64r : X86::PUSH32r))
736 .addReg(FramePtr, RegState::Kill)
737 .setMIFlag(MachineInstr::FrameSetup);
738
739 if (needsFrameMoves) {
740 // Mark the place where EBP/RBP was saved.
741 MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol();
742 BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL))
743 .addSym(FrameLabel);
744
745 // Define the current CFA rule to use the provided offset.
746 if (StackSize) {
747 MachineLocation SPDst(MachineLocation::VirtualFP);
748 MachineLocation SPSrc(MachineLocation::VirtualFP, 2 * stackGrowth);
749 Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc));
750 } else {
751 MachineLocation SPDst(StackPtr);
752 MachineLocation SPSrc(StackPtr, stackGrowth);
753 Moves.push_back(MachineMove(FrameLabel, SPDst, SPSrc));
754 }
755
756 // Change the rule for the FramePtr to be an "offset" rule.
757 MachineLocation FPDst(MachineLocation::VirtualFP, 2 * stackGrowth);
758 MachineLocation FPSrc(FramePtr);
759 Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc));
760 }
761
762 // Update EBP with the new base value.
763 BuildMI(MBB, MBBI, DL,
764 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), FramePtr)
765 .addReg(StackPtr)
766 .setMIFlag(MachineInstr::FrameSetup);
767
768 if (needsFrameMoves) {
769 // Mark effective beginning of when frame pointer becomes valid.
770 MCSymbol *FrameLabel = MMI.getContext().CreateTempSymbol();
771 BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL))
772 .addSym(FrameLabel);
773
774 // Define the current CFA to use the EBP/RBP register.
775 MachineLocation FPDst(FramePtr);
776 MachineLocation FPSrc(MachineLocation::VirtualFP);
777 Moves.push_back(MachineMove(FrameLabel, FPDst, FPSrc));
778 }
779
780 // Mark the FramePtr as live-in in every block except the entry.
781 for (MachineFunction::iterator I = llvm::next(MF.begin()), E = MF.end();
782 I != E; ++I)
783 I->addLiveIn(FramePtr);
784
785 // Realign stack
786 if (RegInfo->needsStackRealignment(MF)) {
787 MachineInstr *MI =
788 BuildMI(MBB, MBBI, DL,
789 TII.get(Is64Bit ? X86::AND64ri32 : X86::AND32ri), StackPtr)
790 .addReg(StackPtr)
791 .addImm(-MaxAlign)
792 .setMIFlag(MachineInstr::FrameSetup);
793
794 // The EFLAGS implicit def is dead.
795 MI->getOperand(3).setIsDead();
796 }
797 } else {
798 NumBytes = StackSize - X86FI->getCalleeSavedFrameSize();
799 }
800
801 // Skip the callee-saved push instructions.
802 bool PushedRegs = false;
803 int StackOffset = 2 * stackGrowth;
804
805 while (MBBI != MBB.end() &&
806 (MBBI->getOpcode() == X86::PUSH32r ||
807 MBBI->getOpcode() == X86::PUSH64r)) {
808 PushedRegs = true;
809 MBBI->setFlag(MachineInstr::FrameSetup);
810 ++MBBI;
811
812 if (!HasFP && needsFrameMoves) {
813 // Mark callee-saved push instruction.
814 MCSymbol *Label = MMI.getContext().CreateTempSymbol();
815 BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL)).addSym(Label);
816
817 // Define the current CFA rule to use the provided offset.
818 unsigned Ptr = StackSize ? MachineLocation::VirtualFP : StackPtr;
819 MachineLocation SPDst(Ptr);
820 MachineLocation SPSrc(Ptr, StackOffset);
821 Moves.push_back(MachineMove(Label, SPDst, SPSrc));
822 StackOffset += stackGrowth;
823 }
824 }
825
826 DL = MBB.findDebugLoc(MBBI);
827
828 // If there is an SUB32ri of ESP immediately before this instruction, merge
829 // the two. This can be the case when tail call elimination is enabled and
830 // the callee has more arguments then the caller.
831 NumBytes -= mergeSPUpdates(MBB, MBBI, StackPtr, true);
832
833 // If there is an ADD32ri or SUB32ri of ESP immediately after this
834 // instruction, merge the two instructions.
835 mergeSPUpdatesDown(MBB, MBBI, StackPtr, &NumBytes);
836
837 // Adjust stack pointer: ESP -= numbytes.
838
839 // Windows and cygwin/mingw require a prologue helper routine when allocating
840 // more than 4K bytes on the stack. Windows uses __chkstk and cygwin/mingw
841 // uses __alloca. __alloca and the 32-bit version of __chkstk will probe the
842 // stack and adjust the stack pointer in one go. The 64-bit version of
843 // __chkstk is only responsible for probing the stack. The 64-bit prologue is
844 // responsible for adjusting the stack pointer. Touching the stack at 4K
845 // increments is necessary to ensure that the guard pages used by the OS
846 // virtual memory manager are allocated in correct sequence.
847 if (NumBytes >= 4096 && STI.isTargetCOFF() && !STI.isTargetEnvMacho()) {
848 const char *StackProbeSymbol;
849 bool isSPUpdateNeeded = false;
850
851 if (Is64Bit) {
852 if (STI.isTargetCygMing())
853 StackProbeSymbol = "___chkstk";
854 else {
855 StackProbeSymbol = "__chkstk";
856 isSPUpdateNeeded = true;
857 }
858 } else if (STI.isTargetCygMing())
859 StackProbeSymbol = "_alloca";
860 else
861 StackProbeSymbol = "_chkstk";
862
863 // Check whether EAX is livein for this function.
864 bool isEAXAlive = isEAXLiveIn(MF);
865
866 if (isEAXAlive) {
867 // Sanity check that EAX is not livein for this function.
868 // It should not be, so throw an assert.
869 assert(!Is64Bit && "EAX is livein in x64 case!");
870
871 // Save EAX
872 BuildMI(MBB, MBBI, DL, TII.get(X86::PUSH32r))
873 .addReg(X86::EAX, RegState::Kill)
874 .setMIFlag(MachineInstr::FrameSetup);
875 }
876
877 if (Is64Bit) {
878 // Handle the 64-bit Windows ABI case where we need to call __chkstk.
879 // Function prologue is responsible for adjusting the stack pointer.
880 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV64ri), X86::RAX)
881 .addImm(NumBytes)
882 .setMIFlag(MachineInstr::FrameSetup);
883 } else {
884 // Allocate NumBytes-4 bytes on stack in case of isEAXAlive.
885 // We'll also use 4 already allocated bytes for EAX.
886 BuildMI(MBB, MBBI, DL, TII.get(X86::MOV32ri), X86::EAX)
887 .addImm(isEAXAlive ? NumBytes - 4 : NumBytes)
888 .setMIFlag(MachineInstr::FrameSetup);
889 }
890
891 BuildMI(MBB, MBBI, DL,
892 TII.get(Is64Bit ? X86::W64ALLOCA : X86::CALLpcrel32))
893 .addExternalSymbol(StackProbeSymbol)
894 .addReg(StackPtr, RegState::Define | RegState::Implicit)
895 .addReg(X86::EFLAGS, RegState::Define | RegState::Implicit)
896 .setMIFlag(MachineInstr::FrameSetup);
897
898 // MSVC x64's __chkstk needs to adjust %rsp.
899 // FIXME: %rax preserves the offset and should be available.
900 if (isSPUpdateNeeded)
901 emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit,
902 UseLEA, TII, *RegInfo);
903
904 if (isEAXAlive) {
905 // Restore EAX
906 MachineInstr *MI = addRegOffset(BuildMI(MF, DL, TII.get(X86::MOV32rm),
907 X86::EAX),
908 StackPtr, false, NumBytes - 4);
909 MI->setFlag(MachineInstr::FrameSetup);
910 MBB.insert(MBBI, MI);
911 }
912 } else if (NumBytes)
913 emitSPUpdate(MBB, MBBI, StackPtr, -(int64_t)NumBytes, Is64Bit,
914 UseLEA, TII, *RegInfo);
915
916 if (( (!HasFP && NumBytes) || PushedRegs) && needsFrameMoves) {
917 // Mark end of stack pointer adjustment.
918 MCSymbol *Label = MMI.getContext().CreateTempSymbol();
919 BuildMI(MBB, MBBI, DL, TII.get(X86::PROLOG_LABEL))
920 .addSym(Label);
921
922 if (!HasFP && NumBytes) {
923 // Define the current CFA rule to use the provided offset.
924 if (StackSize) {
925 MachineLocation SPDst(MachineLocation::VirtualFP);
926 MachineLocation SPSrc(MachineLocation::VirtualFP,
927 -StackSize + stackGrowth);
928 Moves.push_back(MachineMove(Label, SPDst, SPSrc));
929 } else {
930 MachineLocation SPDst(StackPtr);
931 MachineLocation SPSrc(StackPtr, stackGrowth);
932 Moves.push_back(MachineMove(Label, SPDst, SPSrc));
933 }
934 }
935
936 // Emit DWARF info specifying the offsets of the callee-saved registers.
937 if (PushedRegs)
938 emitCalleeSavedFrameMoves(MF, Label, HasFP ? FramePtr : StackPtr);
939 }
940
941 // Darwin 10.7 and greater has support for compact unwind encoding.
942 if (STI.getTargetTriple().isMacOSX() &&
943 !STI.getTargetTriple().isMacOSXVersionLT(10, 7))
944 MMI.setCompactUnwindEncoding(getCompactUnwindEncoding(MF));
945 }
946
emitEpilogue(MachineFunction & MF,MachineBasicBlock & MBB) const947 void X86FrameLowering::emitEpilogue(MachineFunction &MF,
948 MachineBasicBlock &MBB) const {
949 const MachineFrameInfo *MFI = MF.getFrameInfo();
950 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
951 const X86RegisterInfo *RegInfo = TM.getRegisterInfo();
952 const X86InstrInfo &TII = *TM.getInstrInfo();
953 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
954 assert(MBBI != MBB.end() && "Returning block has no instructions");
955 unsigned RetOpcode = MBBI->getOpcode();
956 DebugLoc DL = MBBI->getDebugLoc();
957 bool Is64Bit = STI.is64Bit();
958 bool UseLEA = STI.useLeaForSP();
959 unsigned StackAlign = getStackAlignment();
960 unsigned SlotSize = RegInfo->getSlotSize();
961 unsigned FramePtr = RegInfo->getFrameRegister(MF);
962 unsigned StackPtr = RegInfo->getStackRegister();
963
964 switch (RetOpcode) {
965 default:
966 llvm_unreachable("Can only insert epilog into returning blocks");
967 case X86::RET:
968 case X86::RETI:
969 case X86::TCRETURNdi:
970 case X86::TCRETURNri:
971 case X86::TCRETURNmi:
972 case X86::TCRETURNdi64:
973 case X86::TCRETURNri64:
974 case X86::TCRETURNmi64:
975 case X86::EH_RETURN:
976 case X86::EH_RETURN64:
977 break; // These are ok
978 }
979
980 // Get the number of bytes to allocate from the FrameInfo.
981 uint64_t StackSize = MFI->getStackSize();
982 uint64_t MaxAlign = MFI->getMaxAlignment();
983 unsigned CSSize = X86FI->getCalleeSavedFrameSize();
984 uint64_t NumBytes = 0;
985
986 // If we're forcing a stack realignment we can't rely on just the frame
987 // info, we need to know the ABI stack alignment as well in case we
988 // have a call out. Otherwise just make sure we have some alignment - we'll
989 // go with the minimum.
990 if (ForceStackAlign) {
991 if (MFI->hasCalls())
992 MaxAlign = (StackAlign > MaxAlign) ? StackAlign : MaxAlign;
993 else
994 MaxAlign = MaxAlign ? MaxAlign : 4;
995 }
996
997 if (hasFP(MF)) {
998 // Calculate required stack adjustment.
999 uint64_t FrameSize = StackSize - SlotSize;
1000 if (RegInfo->needsStackRealignment(MF))
1001 FrameSize = (FrameSize + MaxAlign - 1)/MaxAlign*MaxAlign;
1002
1003 NumBytes = FrameSize - CSSize;
1004
1005 // Pop EBP.
1006 BuildMI(MBB, MBBI, DL,
1007 TII.get(Is64Bit ? X86::POP64r : X86::POP32r), FramePtr);
1008 } else {
1009 NumBytes = StackSize - CSSize;
1010 }
1011
1012 // Skip the callee-saved pop instructions.
1013 MachineBasicBlock::iterator LastCSPop = MBBI;
1014 while (MBBI != MBB.begin()) {
1015 MachineBasicBlock::iterator PI = prior(MBBI);
1016 unsigned Opc = PI->getOpcode();
1017
1018 if (Opc != X86::POP32r && Opc != X86::POP64r && Opc != X86::DBG_VALUE &&
1019 !PI->isTerminator())
1020 break;
1021
1022 --MBBI;
1023 }
1024
1025 DL = MBBI->getDebugLoc();
1026
1027 // If there is an ADD32ri or SUB32ri of ESP immediately before this
1028 // instruction, merge the two instructions.
1029 if (NumBytes || MFI->hasVarSizedObjects())
1030 mergeSPUpdatesUp(MBB, MBBI, StackPtr, &NumBytes);
1031
1032 // If dynamic alloca is used, then reset esp to point to the last callee-saved
1033 // slot before popping them off! Same applies for the case, when stack was
1034 // realigned.
1035 if (RegInfo->needsStackRealignment(MF)) {
1036 // We cannot use LEA here, because stack pointer was realigned. We need to
1037 // deallocate local frame back.
1038 if (CSSize) {
1039 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, UseLEA, TII,
1040 *RegInfo);
1041 MBBI = prior(LastCSPop);
1042 }
1043
1044 BuildMI(MBB, MBBI, DL,
1045 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),
1046 StackPtr).addReg(FramePtr);
1047 } else if (MFI->hasVarSizedObjects()) {
1048 if (CSSize) {
1049 unsigned Opc = Is64Bit ? X86::LEA64r : X86::LEA32r;
1050 MachineInstr *MI =
1051 addRegOffset(BuildMI(MF, DL, TII.get(Opc), StackPtr),
1052 FramePtr, false, -CSSize);
1053 MBB.insert(MBBI, MI);
1054 } else {
1055 BuildMI(MBB, MBBI, DL,
1056 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr), StackPtr)
1057 .addReg(FramePtr);
1058 }
1059 } else if (NumBytes) {
1060 // Adjust stack pointer back: ESP += numbytes.
1061 emitSPUpdate(MBB, MBBI, StackPtr, NumBytes, Is64Bit, UseLEA, TII, *RegInfo);
1062 }
1063
1064 // We're returning from function via eh_return.
1065 if (RetOpcode == X86::EH_RETURN || RetOpcode == X86::EH_RETURN64) {
1066 MBBI = MBB.getLastNonDebugInstr();
1067 MachineOperand &DestAddr = MBBI->getOperand(0);
1068 assert(DestAddr.isReg() && "Offset should be in register!");
1069 BuildMI(MBB, MBBI, DL,
1070 TII.get(Is64Bit ? X86::MOV64rr : X86::MOV32rr),
1071 StackPtr).addReg(DestAddr.getReg());
1072 } else if (RetOpcode == X86::TCRETURNri || RetOpcode == X86::TCRETURNdi ||
1073 RetOpcode == X86::TCRETURNmi ||
1074 RetOpcode == X86::TCRETURNri64 || RetOpcode == X86::TCRETURNdi64 ||
1075 RetOpcode == X86::TCRETURNmi64) {
1076 bool isMem = RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64;
1077 // Tail call return: adjust the stack pointer and jump to callee.
1078 MBBI = MBB.getLastNonDebugInstr();
1079 MachineOperand &JumpTarget = MBBI->getOperand(0);
1080 MachineOperand &StackAdjust = MBBI->getOperand(isMem ? 5 : 1);
1081 assert(StackAdjust.isImm() && "Expecting immediate value.");
1082
1083 // Adjust stack pointer.
1084 int StackAdj = StackAdjust.getImm();
1085 int MaxTCDelta = X86FI->getTCReturnAddrDelta();
1086 int Offset = 0;
1087 assert(MaxTCDelta <= 0 && "MaxTCDelta should never be positive");
1088
1089 // Incoporate the retaddr area.
1090 Offset = StackAdj-MaxTCDelta;
1091 assert(Offset >= 0 && "Offset should never be negative");
1092
1093 if (Offset) {
1094 // Check for possible merge with preceding ADD instruction.
1095 Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true);
1096 emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, UseLEA, TII, *RegInfo);
1097 }
1098
1099 // Jump to label or value in register.
1100 if (RetOpcode == X86::TCRETURNdi || RetOpcode == X86::TCRETURNdi64) {
1101 MachineInstrBuilder MIB =
1102 BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNdi)
1103 ? X86::TAILJMPd : X86::TAILJMPd64));
1104 if (JumpTarget.isGlobal())
1105 MIB.addGlobalAddress(JumpTarget.getGlobal(), JumpTarget.getOffset(),
1106 JumpTarget.getTargetFlags());
1107 else {
1108 assert(JumpTarget.isSymbol());
1109 MIB.addExternalSymbol(JumpTarget.getSymbolName(),
1110 JumpTarget.getTargetFlags());
1111 }
1112 } else if (RetOpcode == X86::TCRETURNmi || RetOpcode == X86::TCRETURNmi64) {
1113 MachineInstrBuilder MIB =
1114 BuildMI(MBB, MBBI, DL, TII.get((RetOpcode == X86::TCRETURNmi)
1115 ? X86::TAILJMPm : X86::TAILJMPm64));
1116 for (unsigned i = 0; i != 5; ++i)
1117 MIB.addOperand(MBBI->getOperand(i));
1118 } else if (RetOpcode == X86::TCRETURNri64) {
1119 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr64)).
1120 addReg(JumpTarget.getReg(), RegState::Kill);
1121 } else {
1122 BuildMI(MBB, MBBI, DL, TII.get(X86::TAILJMPr)).
1123 addReg(JumpTarget.getReg(), RegState::Kill);
1124 }
1125
1126 MachineInstr *NewMI = prior(MBBI);
1127 for (unsigned i = 2, e = MBBI->getNumOperands(); i != e; ++i)
1128 NewMI->addOperand(MBBI->getOperand(i));
1129
1130 // Delete the pseudo instruction TCRETURN.
1131 MBB.erase(MBBI);
1132 } else if ((RetOpcode == X86::RET || RetOpcode == X86::RETI) &&
1133 (X86FI->getTCReturnAddrDelta() < 0)) {
1134 // Add the return addr area delta back since we are not tail calling.
1135 int delta = -1*X86FI->getTCReturnAddrDelta();
1136 MBBI = MBB.getLastNonDebugInstr();
1137
1138 // Check for possible merge with preceding ADD instruction.
1139 delta += mergeSPUpdates(MBB, MBBI, StackPtr, true);
1140 emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, UseLEA, TII, *RegInfo);
1141 }
1142 }
1143
getFrameIndexOffset(const MachineFunction & MF,int FI) const1144 int X86FrameLowering::getFrameIndexOffset(const MachineFunction &MF, int FI) const {
1145 const X86RegisterInfo *RI =
1146 static_cast<const X86RegisterInfo*>(MF.getTarget().getRegisterInfo());
1147 const MachineFrameInfo *MFI = MF.getFrameInfo();
1148 int Offset = MFI->getObjectOffset(FI) - getOffsetOfLocalArea();
1149 uint64_t StackSize = MFI->getStackSize();
1150
1151 if (RI->needsStackRealignment(MF)) {
1152 if (FI < 0) {
1153 // Skip the saved EBP.
1154 Offset += RI->getSlotSize();
1155 } else {
1156 assert((-(Offset + StackSize)) % MFI->getObjectAlignment(FI) == 0);
1157 return Offset + StackSize;
1158 }
1159 // FIXME: Support tail calls
1160 } else {
1161 if (!hasFP(MF))
1162 return Offset + StackSize;
1163
1164 // Skip the saved EBP.
1165 Offset += RI->getSlotSize();
1166
1167 // Skip the RETADDR move area
1168 const X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1169 int TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
1170 if (TailCallReturnAddrDelta < 0)
1171 Offset -= TailCallReturnAddrDelta;
1172 }
1173
1174 return Offset;
1175 }
1176
spillCalleeSavedRegisters(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,const std::vector<CalleeSavedInfo> & CSI,const TargetRegisterInfo * TRI) const1177 bool X86FrameLowering::spillCalleeSavedRegisters(MachineBasicBlock &MBB,
1178 MachineBasicBlock::iterator MI,
1179 const std::vector<CalleeSavedInfo> &CSI,
1180 const TargetRegisterInfo *TRI) const {
1181 if (CSI.empty())
1182 return false;
1183
1184 DebugLoc DL = MBB.findDebugLoc(MI);
1185
1186 MachineFunction &MF = *MBB.getParent();
1187
1188 unsigned SlotSize = STI.is64Bit() ? 8 : 4;
1189 unsigned FPReg = TRI->getFrameRegister(MF);
1190 unsigned CalleeFrameSize = 0;
1191
1192 const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
1193 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1194
1195 // Push GPRs. It increases frame size.
1196 unsigned Opc = STI.is64Bit() ? X86::PUSH64r : X86::PUSH32r;
1197 for (unsigned i = CSI.size(); i != 0; --i) {
1198 unsigned Reg = CSI[i-1].getReg();
1199 if (!X86::GR64RegClass.contains(Reg) &&
1200 !X86::GR32RegClass.contains(Reg))
1201 continue;
1202 // Add the callee-saved register as live-in. It's killed at the spill.
1203 MBB.addLiveIn(Reg);
1204 if (Reg == FPReg)
1205 // X86RegisterInfo::emitPrologue will handle spilling of frame register.
1206 continue;
1207 CalleeFrameSize += SlotSize;
1208 BuildMI(MBB, MI, DL, TII.get(Opc)).addReg(Reg, RegState::Kill)
1209 .setMIFlag(MachineInstr::FrameSetup);
1210 }
1211
1212 X86FI->setCalleeSavedFrameSize(CalleeFrameSize);
1213
1214 // Make XMM regs spilled. X86 does not have ability of push/pop XMM.
1215 // It can be done by spilling XMMs to stack frame.
1216 // Note that only Win64 ABI might spill XMMs.
1217 for (unsigned i = CSI.size(); i != 0; --i) {
1218 unsigned Reg = CSI[i-1].getReg();
1219 if (X86::GR64RegClass.contains(Reg) ||
1220 X86::GR32RegClass.contains(Reg))
1221 continue;
1222 // Add the callee-saved register as live-in. It's killed at the spill.
1223 MBB.addLiveIn(Reg);
1224 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
1225 TII.storeRegToStackSlot(MBB, MI, Reg, true, CSI[i-1].getFrameIdx(),
1226 RC, TRI);
1227 }
1228
1229 return true;
1230 }
1231
restoreCalleeSavedRegisters(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,const std::vector<CalleeSavedInfo> & CSI,const TargetRegisterInfo * TRI) const1232 bool X86FrameLowering::restoreCalleeSavedRegisters(MachineBasicBlock &MBB,
1233 MachineBasicBlock::iterator MI,
1234 const std::vector<CalleeSavedInfo> &CSI,
1235 const TargetRegisterInfo *TRI) const {
1236 if (CSI.empty())
1237 return false;
1238
1239 DebugLoc DL = MBB.findDebugLoc(MI);
1240
1241 MachineFunction &MF = *MBB.getParent();
1242 const TargetInstrInfo &TII = *MF.getTarget().getInstrInfo();
1243
1244 // Reload XMMs from stack frame.
1245 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
1246 unsigned Reg = CSI[i].getReg();
1247 if (X86::GR64RegClass.contains(Reg) ||
1248 X86::GR32RegClass.contains(Reg))
1249 continue;
1250 const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg);
1251 TII.loadRegFromStackSlot(MBB, MI, Reg, CSI[i].getFrameIdx(),
1252 RC, TRI);
1253 }
1254
1255 // POP GPRs.
1256 unsigned FPReg = TRI->getFrameRegister(MF);
1257 unsigned Opc = STI.is64Bit() ? X86::POP64r : X86::POP32r;
1258 for (unsigned i = 0, e = CSI.size(); i != e; ++i) {
1259 unsigned Reg = CSI[i].getReg();
1260 if (!X86::GR64RegClass.contains(Reg) &&
1261 !X86::GR32RegClass.contains(Reg))
1262 continue;
1263 if (Reg == FPReg)
1264 // X86RegisterInfo::emitEpilogue will handle restoring of frame register.
1265 continue;
1266 BuildMI(MBB, MI, DL, TII.get(Opc), Reg);
1267 }
1268 return true;
1269 }
1270
1271 void
processFunctionBeforeCalleeSavedScan(MachineFunction & MF,RegScavenger * RS) const1272 X86FrameLowering::processFunctionBeforeCalleeSavedScan(MachineFunction &MF,
1273 RegScavenger *RS) const {
1274 MachineFrameInfo *MFI = MF.getFrameInfo();
1275 const X86RegisterInfo *RegInfo = TM.getRegisterInfo();
1276 unsigned SlotSize = RegInfo->getSlotSize();
1277
1278 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1279 int32_t TailCallReturnAddrDelta = X86FI->getTCReturnAddrDelta();
1280
1281 if (TailCallReturnAddrDelta < 0) {
1282 // create RETURNADDR area
1283 // arg
1284 // arg
1285 // RETADDR
1286 // { ...
1287 // RETADDR area
1288 // ...
1289 // }
1290 // [EBP]
1291 MFI->CreateFixedObject(-TailCallReturnAddrDelta,
1292 (-1U*SlotSize)+TailCallReturnAddrDelta, true);
1293 }
1294
1295 if (hasFP(MF)) {
1296 assert((TailCallReturnAddrDelta <= 0) &&
1297 "The Delta should always be zero or negative");
1298 const TargetFrameLowering &TFI = *MF.getTarget().getFrameLowering();
1299
1300 // Create a frame entry for the EBP register that must be saved.
1301 int FrameIdx = MFI->CreateFixedObject(SlotSize,
1302 -(int)SlotSize +
1303 TFI.getOffsetOfLocalArea() +
1304 TailCallReturnAddrDelta,
1305 true);
1306 assert(FrameIdx == MFI->getObjectIndexBegin() &&
1307 "Slot for EBP register must be last in order to be found!");
1308 (void)FrameIdx;
1309 }
1310 }
1311
1312 static bool
HasNestArgument(const MachineFunction * MF)1313 HasNestArgument(const MachineFunction *MF) {
1314 const Function *F = MF->getFunction();
1315 for (Function::const_arg_iterator I = F->arg_begin(), E = F->arg_end();
1316 I != E; I++) {
1317 if (I->hasNestAttr())
1318 return true;
1319 }
1320 return false;
1321 }
1322
1323
1324 /// GetScratchRegister - Get a register for performing work in the segmented
1325 /// stack prologue. Depending on platform and the properties of the function
1326 /// either one or two registers will be needed. Set primary to true for
1327 /// the first register, false for the second.
1328 static unsigned
GetScratchRegister(bool Is64Bit,const MachineFunction & MF,bool Primary)1329 GetScratchRegister(bool Is64Bit, const MachineFunction &MF, bool Primary) {
1330 if (Is64Bit)
1331 return Primary ? X86::R11 : X86::R12;
1332
1333 CallingConv::ID CallingConvention = MF.getFunction()->getCallingConv();
1334 bool IsNested = HasNestArgument(&MF);
1335
1336 if (CallingConvention == CallingConv::X86_FastCall ||
1337 CallingConvention == CallingConv::Fast) {
1338 if (IsNested)
1339 report_fatal_error("Segmented stacks does not support fastcall with "
1340 "nested function.");
1341 return Primary ? X86::EAX : X86::ECX;
1342 }
1343 if (IsNested)
1344 return Primary ? X86::EDX : X86::EAX;
1345 return Primary ? X86::ECX : X86::EAX;
1346 }
1347
1348 // The stack limit in the TCB is set to this many bytes above the actual stack
1349 // limit.
1350 static const uint64_t kSplitStackAvailable = 256;
1351
1352 void
adjustForSegmentedStacks(MachineFunction & MF) const1353 X86FrameLowering::adjustForSegmentedStacks(MachineFunction &MF) const {
1354 MachineBasicBlock &prologueMBB = MF.front();
1355 MachineFrameInfo *MFI = MF.getFrameInfo();
1356 const X86InstrInfo &TII = *TM.getInstrInfo();
1357 uint64_t StackSize;
1358 bool Is64Bit = STI.is64Bit();
1359 unsigned TlsReg, TlsOffset;
1360 DebugLoc DL;
1361 const X86Subtarget *ST = &MF.getTarget().getSubtarget<X86Subtarget>();
1362
1363 unsigned ScratchReg = GetScratchRegister(Is64Bit, MF, true);
1364 assert(!MF.getRegInfo().isLiveIn(ScratchReg) &&
1365 "Scratch register is live-in");
1366
1367 if (MF.getFunction()->isVarArg())
1368 report_fatal_error("Segmented stacks do not support vararg functions.");
1369 if (!ST->isTargetLinux() && !ST->isTargetDarwin() &&
1370 !ST->isTargetWin32() && !ST->isTargetFreeBSD())
1371 report_fatal_error("Segmented stacks not supported on this platform.");
1372
1373 MachineBasicBlock *allocMBB = MF.CreateMachineBasicBlock();
1374 MachineBasicBlock *checkMBB = MF.CreateMachineBasicBlock();
1375 X86MachineFunctionInfo *X86FI = MF.getInfo<X86MachineFunctionInfo>();
1376 bool IsNested = false;
1377
1378 // We need to know if the function has a nest argument only in 64 bit mode.
1379 if (Is64Bit)
1380 IsNested = HasNestArgument(&MF);
1381
1382 // The MOV R10, RAX needs to be in a different block, since the RET we emit in
1383 // allocMBB needs to be last (terminating) instruction.
1384
1385 for (MachineBasicBlock::livein_iterator i = prologueMBB.livein_begin(),
1386 e = prologueMBB.livein_end(); i != e; i++) {
1387 allocMBB->addLiveIn(*i);
1388 checkMBB->addLiveIn(*i);
1389 }
1390
1391 if (IsNested)
1392 allocMBB->addLiveIn(X86::R10);
1393
1394 MF.push_front(allocMBB);
1395 MF.push_front(checkMBB);
1396
1397 // Eventually StackSize will be calculated by a link-time pass; which will
1398 // also decide whether checking code needs to be injected into this particular
1399 // prologue.
1400 StackSize = MFI->getStackSize();
1401
1402 // When the frame size is less than 256 we just compare the stack
1403 // boundary directly to the value of the stack pointer, per gcc.
1404 bool CompareStackPointer = StackSize < kSplitStackAvailable;
1405
1406 // Read the limit off the current stacklet off the stack_guard location.
1407 if (Is64Bit) {
1408 if (ST->isTargetLinux()) {
1409 TlsReg = X86::FS;
1410 TlsOffset = 0x70;
1411 } else if (ST->isTargetDarwin()) {
1412 TlsReg = X86::GS;
1413 TlsOffset = 0x60 + 90*8; // See pthread_machdep.h. Steal TLS slot 90.
1414 } else if (ST->isTargetFreeBSD()) {
1415 TlsReg = X86::FS;
1416 TlsOffset = 0x18;
1417 } else {
1418 report_fatal_error("Segmented stacks not supported on this platform.");
1419 }
1420
1421 if (CompareStackPointer)
1422 ScratchReg = X86::RSP;
1423 else
1424 BuildMI(checkMBB, DL, TII.get(X86::LEA64r), ScratchReg).addReg(X86::RSP)
1425 .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
1426
1427 BuildMI(checkMBB, DL, TII.get(X86::CMP64rm)).addReg(ScratchReg)
1428 .addReg(0).addImm(1).addReg(0).addImm(TlsOffset).addReg(TlsReg);
1429 } else {
1430 if (ST->isTargetLinux()) {
1431 TlsReg = X86::GS;
1432 TlsOffset = 0x30;
1433 } else if (ST->isTargetDarwin()) {
1434 TlsReg = X86::GS;
1435 TlsOffset = 0x48 + 90*4;
1436 } else if (ST->isTargetWin32()) {
1437 TlsReg = X86::FS;
1438 TlsOffset = 0x14; // pvArbitrary, reserved for application use
1439 } else if (ST->isTargetFreeBSD()) {
1440 report_fatal_error("Segmented stacks not supported on FreeBSD i386.");
1441 } else {
1442 report_fatal_error("Segmented stacks not supported on this platform.");
1443 }
1444
1445 if (CompareStackPointer)
1446 ScratchReg = X86::ESP;
1447 else
1448 BuildMI(checkMBB, DL, TII.get(X86::LEA32r), ScratchReg).addReg(X86::ESP)
1449 .addImm(1).addReg(0).addImm(-StackSize).addReg(0);
1450
1451 if (ST->isTargetLinux() || ST->isTargetWin32()) {
1452 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm)).addReg(ScratchReg)
1453 .addReg(0).addImm(0).addReg(0).addImm(TlsOffset).addReg(TlsReg);
1454 } else if (ST->isTargetDarwin()) {
1455
1456 // TlsOffset doesn't fit into a mod r/m byte so we need an extra register
1457 unsigned ScratchReg2;
1458 bool SaveScratch2;
1459 if (CompareStackPointer) {
1460 // The primary scratch register is available for holding the TLS offset
1461 ScratchReg2 = GetScratchRegister(Is64Bit, MF, true);
1462 SaveScratch2 = false;
1463 } else {
1464 // Need to use a second register to hold the TLS offset
1465 ScratchReg2 = GetScratchRegister(Is64Bit, MF, false);
1466
1467 // Unfortunately, with fastcc the second scratch register may hold an arg
1468 SaveScratch2 = MF.getRegInfo().isLiveIn(ScratchReg2);
1469 }
1470
1471 // If Scratch2 is live-in then it needs to be saved
1472 assert((!MF.getRegInfo().isLiveIn(ScratchReg2) || SaveScratch2) &&
1473 "Scratch register is live-in and not saved");
1474
1475 if (SaveScratch2)
1476 BuildMI(checkMBB, DL, TII.get(X86::PUSH32r))
1477 .addReg(ScratchReg2, RegState::Kill);
1478
1479 BuildMI(checkMBB, DL, TII.get(X86::MOV32ri), ScratchReg2)
1480 .addImm(TlsOffset);
1481 BuildMI(checkMBB, DL, TII.get(X86::CMP32rm))
1482 .addReg(ScratchReg)
1483 .addReg(ScratchReg2).addImm(1).addReg(0)
1484 .addImm(0)
1485 .addReg(TlsReg);
1486
1487 if (SaveScratch2)
1488 BuildMI(checkMBB, DL, TII.get(X86::POP32r), ScratchReg2);
1489 }
1490 }
1491
1492 // This jump is taken if SP >= (Stacklet Limit + Stack Space required).
1493 // It jumps to normal execution of the function body.
1494 BuildMI(checkMBB, DL, TII.get(X86::JA_4)).addMBB(&prologueMBB);
1495
1496 // On 32 bit we first push the arguments size and then the frame size. On 64
1497 // bit, we pass the stack frame size in r10 and the argument size in r11.
1498 if (Is64Bit) {
1499 // Functions with nested arguments use R10, so it needs to be saved across
1500 // the call to _morestack
1501
1502 if (IsNested)
1503 BuildMI(allocMBB, DL, TII.get(X86::MOV64rr), X86::RAX).addReg(X86::R10);
1504
1505 BuildMI(allocMBB, DL, TII.get(X86::MOV64ri), X86::R10)
1506 .addImm(StackSize);
1507 BuildMI(allocMBB, DL, TII.get(X86::MOV64ri), X86::R11)
1508 .addImm(X86FI->getArgumentStackSize());
1509 MF.getRegInfo().setPhysRegUsed(X86::R10);
1510 MF.getRegInfo().setPhysRegUsed(X86::R11);
1511 } else {
1512 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
1513 .addImm(X86FI->getArgumentStackSize());
1514 BuildMI(allocMBB, DL, TII.get(X86::PUSHi32))
1515 .addImm(StackSize);
1516 }
1517
1518 // __morestack is in libgcc
1519 if (Is64Bit)
1520 BuildMI(allocMBB, DL, TII.get(X86::CALL64pcrel32))
1521 .addExternalSymbol("__morestack");
1522 else
1523 BuildMI(allocMBB, DL, TII.get(X86::CALLpcrel32))
1524 .addExternalSymbol("__morestack");
1525
1526 if (IsNested)
1527 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET_RESTORE_R10));
1528 else
1529 BuildMI(allocMBB, DL, TII.get(X86::MORESTACK_RET));
1530
1531 allocMBB->addSuccessor(&prologueMBB);
1532
1533 checkMBB->addSuccessor(allocMBB);
1534 checkMBB->addSuccessor(&prologueMBB);
1535
1536 #ifdef XDEBUG
1537 MF.verify();
1538 #endif
1539 }
1540