1 //===- AArch64FrameLowering.cpp - AArch64 Frame Lowering -------*- C++ -*-====//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the AArch64 implementation of TargetFrameLowering class.
10 //
11 // On AArch64, stack frames are structured as follows:
12 //
13 // The stack grows downward.
14 //
15 // All of the individual frame areas on the frame below are optional, i.e. it's
16 // possible to create a function so that the particular area isn't present
17 // in the frame.
18 //
19 // At function entry, the "frame" looks as follows:
20 //
21 // | | Higher address
22 // |-----------------------------------|
23 // | |
24 // | arguments passed on the stack |
25 // | |
26 // |-----------------------------------| <- sp
27 // | | Lower address
28 //
29 //
30 // After the prologue has run, the frame has the following general structure.
31 // Note that this doesn't depict the case where a red-zone is used. Also,
32 // technically the last frame area (VLAs) doesn't get created until in the
33 // main function body, after the prologue is run. However, it's depicted here
34 // for completeness.
35 //
36 // | | Higher address
37 // |-----------------------------------|
38 // | |
39 // | arguments passed on the stack |
40 // | |
41 // |-----------------------------------|
42 // | |
43 // | (Win64 only) varargs from reg |
44 // | |
45 // |-----------------------------------|
46 // | |
47 // | callee-saved gpr registers | <--.
48 // | | | On Darwin platforms these
49 // |- - - - - - - - - - - - - - - - - -| | callee saves are swapped,
50 // | | | (frame record first)
51 // | prev_fp, prev_lr | <--'
52 // | (a.k.a. "frame record") |
53 // |-----------------------------------| <- fp(=x29)
54 // | |
55 // | callee-saved fp/simd/SVE regs |
56 // | |
57 // |-----------------------------------|
58 // | |
59 // | SVE stack objects |
60 // | |
61 // |-----------------------------------|
62 // |.empty.space.to.make.part.below....|
63 // |.aligned.in.case.it.needs.more.than| (size of this area is unknown at
64 // |.the.standard.16-byte.alignment....| compile time; if present)
65 // |-----------------------------------|
66 // | |
67 // | local variables of fixed size |
68 // | including spill slots |
69 // |-----------------------------------| <- bp(not defined by ABI,
70 // |.variable-sized.local.variables....| LLVM chooses X19)
71 // |.(VLAs)............................| (size of this area is unknown at
72 // |...................................| compile time)
73 // |-----------------------------------| <- sp
74 // | | Lower address
75 //
76 //
77 // To access the data in a frame, at-compile time, a constant offset must be
78 // computable from one of the pointers (fp, bp, sp) to access it. The size
79 // of the areas with a dotted background cannot be computed at compile-time
80 // if they are present, making it required to have all three of fp, bp and
81 // sp to be set up to be able to access all contents in the frame areas,
82 // assuming all of the frame areas are non-empty.
83 //
84 // For most functions, some of the frame areas are empty. For those functions,
85 // it may not be necessary to set up fp or bp:
86 // * A base pointer is definitely needed when there are both VLAs and local
87 // variables with more-than-default alignment requirements.
88 // * A frame pointer is definitely needed when there are local variables with
89 // more-than-default alignment requirements.
90 //
91 // For Darwin platforms the frame-record (fp, lr) is stored at the top of the
92 // callee-saved area, since the unwind encoding does not allow for encoding
93 // this dynamically and existing tools depend on this layout. For other
94 // platforms, the frame-record is stored at the bottom of the (gpr) callee-saved
95 // area to allow SVE stack objects (allocated directly below the callee-saves,
96 // if available) to be accessed directly from the framepointer.
97 // The SVE spill/fill instructions have VL-scaled addressing modes such
98 // as:
99 // ldr z8, [fp, #-7 mul vl]
100 // For SVE the size of the vector length (VL) is not known at compile-time, so
101 // '#-7 mul vl' is an offset that can only be evaluated at runtime. With this
102 // layout, we don't need to add an unscaled offset to the framepointer before
103 // accessing the SVE object in the frame.
104 //
105 // In some cases when a base pointer is not strictly needed, it is generated
106 // anyway when offsets from the frame pointer to access local variables become
107 // so large that the offset can't be encoded in the immediate fields of loads
108 // or stores.
109 //
110 // FIXME: also explain the redzone concept.
111 // FIXME: also explain the concept of reserved call frames.
112 //
113 //===----------------------------------------------------------------------===//
114
115 #include "AArch64FrameLowering.h"
116 #include "AArch64InstrInfo.h"
117 #include "AArch64MachineFunctionInfo.h"
118 #include "AArch64RegisterInfo.h"
119 #include "AArch64StackOffset.h"
120 #include "AArch64Subtarget.h"
121 #include "AArch64TargetMachine.h"
122 #include "MCTargetDesc/AArch64AddressingModes.h"
123 #include "llvm/ADT/ScopeExit.h"
124 #include "llvm/ADT/SmallVector.h"
125 #include "llvm/ADT/Statistic.h"
126 #include "llvm/CodeGen/LivePhysRegs.h"
127 #include "llvm/CodeGen/MachineBasicBlock.h"
128 #include "llvm/CodeGen/MachineFrameInfo.h"
129 #include "llvm/CodeGen/MachineFunction.h"
130 #include "llvm/CodeGen/MachineInstr.h"
131 #include "llvm/CodeGen/MachineInstrBuilder.h"
132 #include "llvm/CodeGen/MachineMemOperand.h"
133 #include "llvm/CodeGen/MachineModuleInfo.h"
134 #include "llvm/CodeGen/MachineOperand.h"
135 #include "llvm/CodeGen/MachineRegisterInfo.h"
136 #include "llvm/CodeGen/RegisterScavenging.h"
137 #include "llvm/CodeGen/TargetInstrInfo.h"
138 #include "llvm/CodeGen/TargetRegisterInfo.h"
139 #include "llvm/CodeGen/TargetSubtargetInfo.h"
140 #include "llvm/CodeGen/WinEHFuncInfo.h"
141 #include "llvm/IR/Attributes.h"
142 #include "llvm/IR/CallingConv.h"
143 #include "llvm/IR/DataLayout.h"
144 #include "llvm/IR/DebugLoc.h"
145 #include "llvm/IR/Function.h"
146 #include "llvm/MC/MCAsmInfo.h"
147 #include "llvm/MC/MCDwarf.h"
148 #include "llvm/Support/CommandLine.h"
149 #include "llvm/Support/Debug.h"
150 #include "llvm/Support/ErrorHandling.h"
151 #include "llvm/Support/MathExtras.h"
152 #include "llvm/Support/raw_ostream.h"
153 #include "llvm/Target/TargetMachine.h"
154 #include "llvm/Target/TargetOptions.h"
155 #include <cassert>
156 #include <cstdint>
157 #include <iterator>
158 #include <vector>
159
160 using namespace llvm;
161
162 #define DEBUG_TYPE "frame-info"
163
164 static cl::opt<bool> EnableRedZone("aarch64-redzone",
165 cl::desc("enable use of redzone on AArch64"),
166 cl::init(false), cl::Hidden);
167
168 static cl::opt<bool>
169 ReverseCSRRestoreSeq("reverse-csr-restore-seq",
170 cl::desc("reverse the CSR restore sequence"),
171 cl::init(false), cl::Hidden);
172
173 STATISTIC(NumRedZoneFunctions, "Number of functions using red zone");
174
175 /// This is the biggest offset to the stack pointer we can encode in aarch64
176 /// instructions (without using a separate calculation and a temp register).
177 /// Note that the exception here are vector stores/loads which cannot encode any
178 /// displacements (see estimateRSStackSizeLimit(), isAArch64FrameOffsetLegal()).
179 static const unsigned DefaultSafeSPDisplacement = 255;
180
181 /// Look at each instruction that references stack frames and return the stack
182 /// size limit beyond which some of these instructions will require a scratch
183 /// register during their expansion later.
estimateRSStackSizeLimit(MachineFunction & MF)184 static unsigned estimateRSStackSizeLimit(MachineFunction &MF) {
185 // FIXME: For now, just conservatively guestimate based on unscaled indexing
186 // range. We'll end up allocating an unnecessary spill slot a lot, but
187 // realistically that's not a big deal at this stage of the game.
188 for (MachineBasicBlock &MBB : MF) {
189 for (MachineInstr &MI : MBB) {
190 if (MI.isDebugInstr() || MI.isPseudo() ||
191 MI.getOpcode() == AArch64::ADDXri ||
192 MI.getOpcode() == AArch64::ADDSXri)
193 continue;
194
195 for (const MachineOperand &MO : MI.operands()) {
196 if (!MO.isFI())
197 continue;
198
199 StackOffset Offset;
200 if (isAArch64FrameOffsetLegal(MI, Offset, nullptr, nullptr, nullptr) ==
201 AArch64FrameOffsetCannotUpdate)
202 return 0;
203 }
204 }
205 }
206 return DefaultSafeSPDisplacement;
207 }
208
209 TargetStackID::Value
getStackIDForScalableVectors() const210 AArch64FrameLowering::getStackIDForScalableVectors() const {
211 return TargetStackID::SVEVector;
212 }
213
214 /// Returns the size of the entire SVE stackframe (calleesaves + spills).
getSVEStackSize(const MachineFunction & MF)215 static StackOffset getSVEStackSize(const MachineFunction &MF) {
216 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
217 return {(int64_t)AFI->getStackSizeSVE(), MVT::nxv1i8};
218 }
219
canUseRedZone(const MachineFunction & MF) const220 bool AArch64FrameLowering::canUseRedZone(const MachineFunction &MF) const {
221 if (!EnableRedZone)
222 return false;
223 // Don't use the red zone if the function explicitly asks us not to.
224 // This is typically used for kernel code.
225 if (MF.getFunction().hasFnAttribute(Attribute::NoRedZone))
226 return false;
227
228 const MachineFrameInfo &MFI = MF.getFrameInfo();
229 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
230 uint64_t NumBytes = AFI->getLocalStackSize();
231
232 return !(MFI.hasCalls() || hasFP(MF) || NumBytes > 128 ||
233 getSVEStackSize(MF));
234 }
235
236 /// hasFP - Return true if the specified function should have a dedicated frame
237 /// pointer register.
hasFP(const MachineFunction & MF) const238 bool AArch64FrameLowering::hasFP(const MachineFunction &MF) const {
239 const MachineFrameInfo &MFI = MF.getFrameInfo();
240 const TargetRegisterInfo *RegInfo = MF.getSubtarget().getRegisterInfo();
241 // Win64 EH requires a frame pointer if funclets are present, as the locals
242 // are accessed off the frame pointer in both the parent function and the
243 // funclets.
244 if (MF.hasEHFunclets())
245 return true;
246 // Retain behavior of always omitting the FP for leaf functions when possible.
247 if (MF.getTarget().Options.DisableFramePointerElim(MF))
248 return true;
249 if (MFI.hasVarSizedObjects() || MFI.isFrameAddressTaken() ||
250 MFI.hasStackMap() || MFI.hasPatchPoint() ||
251 RegInfo->needsStackRealignment(MF))
252 return true;
253 // With large callframes around we may need to use FP to access the scavenging
254 // emergency spillslot.
255 //
256 // Unfortunately some calls to hasFP() like machine verifier ->
257 // getReservedReg() -> hasFP in the middle of global isel are too early
258 // to know the max call frame size. Hopefully conservatively returning "true"
259 // in those cases is fine.
260 // DefaultSafeSPDisplacement is fine as we only emergency spill GP regs.
261 if (!MFI.isMaxCallFrameSizeComputed() ||
262 MFI.getMaxCallFrameSize() > DefaultSafeSPDisplacement)
263 return true;
264
265 return false;
266 }
267
268 /// hasReservedCallFrame - Under normal circumstances, when a frame pointer is
269 /// not required, we reserve argument space for call sites in the function
270 /// immediately on entry to the current function. This eliminates the need for
271 /// add/sub sp brackets around call sites. Returns true if the call frame is
272 /// included as part of the stack frame.
273 bool
hasReservedCallFrame(const MachineFunction & MF) const274 AArch64FrameLowering::hasReservedCallFrame(const MachineFunction &MF) const {
275 return !MF.getFrameInfo().hasVarSizedObjects();
276 }
277
eliminateCallFramePseudoInstr(MachineFunction & MF,MachineBasicBlock & MBB,MachineBasicBlock::iterator I) const278 MachineBasicBlock::iterator AArch64FrameLowering::eliminateCallFramePseudoInstr(
279 MachineFunction &MF, MachineBasicBlock &MBB,
280 MachineBasicBlock::iterator I) const {
281 const AArch64InstrInfo *TII =
282 static_cast<const AArch64InstrInfo *>(MF.getSubtarget().getInstrInfo());
283 DebugLoc DL = I->getDebugLoc();
284 unsigned Opc = I->getOpcode();
285 bool IsDestroy = Opc == TII->getCallFrameDestroyOpcode();
286 uint64_t CalleePopAmount = IsDestroy ? I->getOperand(1).getImm() : 0;
287
288 if (!hasReservedCallFrame(MF)) {
289 unsigned Align = getStackAlignment();
290
291 int64_t Amount = I->getOperand(0).getImm();
292 Amount = alignTo(Amount, Align);
293 if (!IsDestroy)
294 Amount = -Amount;
295
296 // N.b. if CalleePopAmount is valid but zero (i.e. callee would pop, but it
297 // doesn't have to pop anything), then the first operand will be zero too so
298 // this adjustment is a no-op.
299 if (CalleePopAmount == 0) {
300 // FIXME: in-function stack adjustment for calls is limited to 24-bits
301 // because there's no guaranteed temporary register available.
302 //
303 // ADD/SUB (immediate) has only LSL #0 and LSL #12 available.
304 // 1) For offset <= 12-bit, we use LSL #0
305 // 2) For 12-bit <= offset <= 24-bit, we use two instructions. One uses
306 // LSL #0, and the other uses LSL #12.
307 //
308 // Most call frames will be allocated at the start of a function so
309 // this is OK, but it is a limitation that needs dealing with.
310 assert(Amount > -0xffffff && Amount < 0xffffff && "call frame too large");
311 emitFrameOffset(MBB, I, DL, AArch64::SP, AArch64::SP, {Amount, MVT::i8},
312 TII);
313 }
314 } else if (CalleePopAmount != 0) {
315 // If the calling convention demands that the callee pops arguments from the
316 // stack, we want to add it back if we have a reserved call frame.
317 assert(CalleePopAmount < 0xffffff && "call frame too large");
318 emitFrameOffset(MBB, I, DL, AArch64::SP, AArch64::SP,
319 {-(int64_t)CalleePopAmount, MVT::i8}, TII);
320 }
321 return MBB.erase(I);
322 }
323
ShouldSignReturnAddress(MachineFunction & MF)324 static bool ShouldSignReturnAddress(MachineFunction &MF) {
325 // The function should be signed in the following situations:
326 // - sign-return-address=all
327 // - sign-return-address=non-leaf and the functions spills the LR
328
329 const Function &F = MF.getFunction();
330 if (!F.hasFnAttribute("sign-return-address"))
331 return false;
332
333 StringRef Scope = F.getFnAttribute("sign-return-address").getValueAsString();
334 if (Scope.equals("none"))
335 return false;
336
337 if (Scope.equals("all"))
338 return true;
339
340 assert(Scope.equals("non-leaf") && "Expected all, none or non-leaf");
341
342 for (const auto &Info : MF.getFrameInfo().getCalleeSavedInfo())
343 if (Info.getReg() == AArch64::LR)
344 return true;
345
346 return false;
347 }
348
emitCalleeSavedFrameMoves(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI) const349 void AArch64FrameLowering::emitCalleeSavedFrameMoves(
350 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI) const {
351 MachineFunction &MF = *MBB.getParent();
352 MachineFrameInfo &MFI = MF.getFrameInfo();
353 const TargetSubtargetInfo &STI = MF.getSubtarget();
354 const MCRegisterInfo *MRI = STI.getRegisterInfo();
355 const TargetInstrInfo *TII = STI.getInstrInfo();
356 DebugLoc DL = MBB.findDebugLoc(MBBI);
357
358 // Add callee saved registers to move list.
359 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
360 if (CSI.empty())
361 return;
362
363 for (const auto &Info : CSI) {
364 unsigned Reg = Info.getReg();
365 int64_t Offset =
366 MFI.getObjectOffset(Info.getFrameIdx()) - getOffsetOfLocalArea();
367 unsigned DwarfReg = MRI->getDwarfRegNum(Reg, true);
368 unsigned CFIIndex = MF.addFrameInst(
369 MCCFIInstruction::createOffset(nullptr, DwarfReg, Offset));
370 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
371 .addCFIIndex(CFIIndex)
372 .setMIFlags(MachineInstr::FrameSetup);
373 }
374 }
375
376 // Find a scratch register that we can use at the start of the prologue to
377 // re-align the stack pointer. We avoid using callee-save registers since they
378 // may appear to be free when this is called from canUseAsPrologue (during
379 // shrink wrapping), but then no longer be free when this is called from
380 // emitPrologue.
381 //
382 // FIXME: This is a bit conservative, since in the above case we could use one
383 // of the callee-save registers as a scratch temp to re-align the stack pointer,
384 // but we would then have to make sure that we were in fact saving at least one
385 // callee-save register in the prologue, which is additional complexity that
386 // doesn't seem worth the benefit.
findScratchNonCalleeSaveRegister(MachineBasicBlock * MBB)387 static unsigned findScratchNonCalleeSaveRegister(MachineBasicBlock *MBB) {
388 MachineFunction *MF = MBB->getParent();
389
390 // If MBB is an entry block, use X9 as the scratch register
391 if (&MF->front() == MBB)
392 return AArch64::X9;
393
394 const AArch64Subtarget &Subtarget = MF->getSubtarget<AArch64Subtarget>();
395 const AArch64RegisterInfo &TRI = *Subtarget.getRegisterInfo();
396 LivePhysRegs LiveRegs(TRI);
397 LiveRegs.addLiveIns(*MBB);
398
399 // Mark callee saved registers as used so we will not choose them.
400 const MCPhysReg *CSRegs = MF->getRegInfo().getCalleeSavedRegs();
401 for (unsigned i = 0; CSRegs[i]; ++i)
402 LiveRegs.addReg(CSRegs[i]);
403
404 // Prefer X9 since it was historically used for the prologue scratch reg.
405 const MachineRegisterInfo &MRI = MF->getRegInfo();
406 if (LiveRegs.available(MRI, AArch64::X9))
407 return AArch64::X9;
408
409 for (unsigned Reg : AArch64::GPR64RegClass) {
410 if (LiveRegs.available(MRI, Reg))
411 return Reg;
412 }
413 return AArch64::NoRegister;
414 }
415
canUseAsPrologue(const MachineBasicBlock & MBB) const416 bool AArch64FrameLowering::canUseAsPrologue(
417 const MachineBasicBlock &MBB) const {
418 const MachineFunction *MF = MBB.getParent();
419 MachineBasicBlock *TmpMBB = const_cast<MachineBasicBlock *>(&MBB);
420 const AArch64Subtarget &Subtarget = MF->getSubtarget<AArch64Subtarget>();
421 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
422
423 // Don't need a scratch register if we're not going to re-align the stack.
424 if (!RegInfo->needsStackRealignment(*MF))
425 return true;
426 // Otherwise, we can use any block as long as it has a scratch register
427 // available.
428 return findScratchNonCalleeSaveRegister(TmpMBB) != AArch64::NoRegister;
429 }
430
windowsRequiresStackProbe(MachineFunction & MF,uint64_t StackSizeInBytes)431 static bool windowsRequiresStackProbe(MachineFunction &MF,
432 uint64_t StackSizeInBytes) {
433 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
434 if (!Subtarget.isTargetWindows())
435 return false;
436 const Function &F = MF.getFunction();
437 // TODO: When implementing stack protectors, take that into account
438 // for the probe threshold.
439 unsigned StackProbeSize = 4096;
440 if (F.hasFnAttribute("stack-probe-size"))
441 F.getFnAttribute("stack-probe-size")
442 .getValueAsString()
443 .getAsInteger(0, StackProbeSize);
444 return (StackSizeInBytes >= StackProbeSize) &&
445 !F.hasFnAttribute("no-stack-arg-probe");
446 }
447
shouldCombineCSRLocalStackBump(MachineFunction & MF,uint64_t StackBumpBytes) const448 bool AArch64FrameLowering::shouldCombineCSRLocalStackBump(
449 MachineFunction &MF, uint64_t StackBumpBytes) const {
450 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
451 const MachineFrameInfo &MFI = MF.getFrameInfo();
452 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
453 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
454
455 if (AFI->getLocalStackSize() == 0)
456 return false;
457
458 // 512 is the maximum immediate for stp/ldp that will be used for
459 // callee-save save/restores
460 if (StackBumpBytes >= 512 || windowsRequiresStackProbe(MF, StackBumpBytes))
461 return false;
462
463 if (MFI.hasVarSizedObjects())
464 return false;
465
466 if (RegInfo->needsStackRealignment(MF))
467 return false;
468
469 // This isn't strictly necessary, but it simplifies things a bit since the
470 // current RedZone handling code assumes the SP is adjusted by the
471 // callee-save save/restore code.
472 if (canUseRedZone(MF))
473 return false;
474
475 // When there is an SVE area on the stack, always allocate the
476 // callee-saves and spills/locals separately.
477 if (getSVEStackSize(MF))
478 return false;
479
480 return true;
481 }
482
483 // Given a load or a store instruction, generate an appropriate unwinding SEH
484 // code on Windows.
InsertSEH(MachineBasicBlock::iterator MBBI,const TargetInstrInfo & TII,MachineInstr::MIFlag Flag)485 static MachineBasicBlock::iterator InsertSEH(MachineBasicBlock::iterator MBBI,
486 const TargetInstrInfo &TII,
487 MachineInstr::MIFlag Flag) {
488 unsigned Opc = MBBI->getOpcode();
489 MachineBasicBlock *MBB = MBBI->getParent();
490 MachineFunction &MF = *MBB->getParent();
491 DebugLoc DL = MBBI->getDebugLoc();
492 unsigned ImmIdx = MBBI->getNumOperands() - 1;
493 int Imm = MBBI->getOperand(ImmIdx).getImm();
494 MachineInstrBuilder MIB;
495 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
496 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
497
498 switch (Opc) {
499 default:
500 llvm_unreachable("No SEH Opcode for this instruction");
501 case AArch64::LDPDpost:
502 Imm = -Imm;
503 LLVM_FALLTHROUGH;
504 case AArch64::STPDpre: {
505 unsigned Reg0 = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg());
506 unsigned Reg1 = RegInfo->getSEHRegNum(MBBI->getOperand(2).getReg());
507 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFRegP_X))
508 .addImm(Reg0)
509 .addImm(Reg1)
510 .addImm(Imm * 8)
511 .setMIFlag(Flag);
512 break;
513 }
514 case AArch64::LDPXpost:
515 Imm = -Imm;
516 LLVM_FALLTHROUGH;
517 case AArch64::STPXpre: {
518 Register Reg0 = MBBI->getOperand(1).getReg();
519 Register Reg1 = MBBI->getOperand(2).getReg();
520 if (Reg0 == AArch64::FP && Reg1 == AArch64::LR)
521 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFPLR_X))
522 .addImm(Imm * 8)
523 .setMIFlag(Flag);
524 else
525 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveRegP_X))
526 .addImm(RegInfo->getSEHRegNum(Reg0))
527 .addImm(RegInfo->getSEHRegNum(Reg1))
528 .addImm(Imm * 8)
529 .setMIFlag(Flag);
530 break;
531 }
532 case AArch64::LDRDpost:
533 Imm = -Imm;
534 LLVM_FALLTHROUGH;
535 case AArch64::STRDpre: {
536 unsigned Reg = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg());
537 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFReg_X))
538 .addImm(Reg)
539 .addImm(Imm)
540 .setMIFlag(Flag);
541 break;
542 }
543 case AArch64::LDRXpost:
544 Imm = -Imm;
545 LLVM_FALLTHROUGH;
546 case AArch64::STRXpre: {
547 unsigned Reg = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg());
548 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveReg_X))
549 .addImm(Reg)
550 .addImm(Imm)
551 .setMIFlag(Flag);
552 break;
553 }
554 case AArch64::STPDi:
555 case AArch64::LDPDi: {
556 unsigned Reg0 = RegInfo->getSEHRegNum(MBBI->getOperand(0).getReg());
557 unsigned Reg1 = RegInfo->getSEHRegNum(MBBI->getOperand(1).getReg());
558 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFRegP))
559 .addImm(Reg0)
560 .addImm(Reg1)
561 .addImm(Imm * 8)
562 .setMIFlag(Flag);
563 break;
564 }
565 case AArch64::STPXi:
566 case AArch64::LDPXi: {
567 Register Reg0 = MBBI->getOperand(0).getReg();
568 Register Reg1 = MBBI->getOperand(1).getReg();
569 if (Reg0 == AArch64::FP && Reg1 == AArch64::LR)
570 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFPLR))
571 .addImm(Imm * 8)
572 .setMIFlag(Flag);
573 else
574 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveRegP))
575 .addImm(RegInfo->getSEHRegNum(Reg0))
576 .addImm(RegInfo->getSEHRegNum(Reg1))
577 .addImm(Imm * 8)
578 .setMIFlag(Flag);
579 break;
580 }
581 case AArch64::STRXui:
582 case AArch64::LDRXui: {
583 int Reg = RegInfo->getSEHRegNum(MBBI->getOperand(0).getReg());
584 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveReg))
585 .addImm(Reg)
586 .addImm(Imm * 8)
587 .setMIFlag(Flag);
588 break;
589 }
590 case AArch64::STRDui:
591 case AArch64::LDRDui: {
592 unsigned Reg = RegInfo->getSEHRegNum(MBBI->getOperand(0).getReg());
593 MIB = BuildMI(MF, DL, TII.get(AArch64::SEH_SaveFReg))
594 .addImm(Reg)
595 .addImm(Imm * 8)
596 .setMIFlag(Flag);
597 break;
598 }
599 }
600 auto I = MBB->insertAfter(MBBI, MIB);
601 return I;
602 }
603
604 // Fix up the SEH opcode associated with the save/restore instruction.
fixupSEHOpcode(MachineBasicBlock::iterator MBBI,unsigned LocalStackSize)605 static void fixupSEHOpcode(MachineBasicBlock::iterator MBBI,
606 unsigned LocalStackSize) {
607 MachineOperand *ImmOpnd = nullptr;
608 unsigned ImmIdx = MBBI->getNumOperands() - 1;
609 switch (MBBI->getOpcode()) {
610 default:
611 llvm_unreachable("Fix the offset in the SEH instruction");
612 case AArch64::SEH_SaveFPLR:
613 case AArch64::SEH_SaveRegP:
614 case AArch64::SEH_SaveReg:
615 case AArch64::SEH_SaveFRegP:
616 case AArch64::SEH_SaveFReg:
617 ImmOpnd = &MBBI->getOperand(ImmIdx);
618 break;
619 }
620 if (ImmOpnd)
621 ImmOpnd->setImm(ImmOpnd->getImm() + LocalStackSize);
622 }
623
624 // Convert callee-save register save/restore instruction to do stack pointer
625 // decrement/increment to allocate/deallocate the callee-save stack area by
626 // converting store/load to use pre/post increment version.
convertCalleeSaveRestoreToSPPrePostIncDec(MachineBasicBlock & MBB,MachineBasicBlock::iterator MBBI,const DebugLoc & DL,const TargetInstrInfo * TII,int CSStackSizeInc,bool NeedsWinCFI,bool * HasWinCFI,bool InProlog=true)627 static MachineBasicBlock::iterator convertCalleeSaveRestoreToSPPrePostIncDec(
628 MachineBasicBlock &MBB, MachineBasicBlock::iterator MBBI,
629 const DebugLoc &DL, const TargetInstrInfo *TII, int CSStackSizeInc,
630 bool NeedsWinCFI, bool *HasWinCFI, bool InProlog = true) {
631 // Ignore instructions that do not operate on SP, i.e. shadow call stack
632 // instructions and associated CFI instruction.
633 while (MBBI->getOpcode() == AArch64::STRXpost ||
634 MBBI->getOpcode() == AArch64::LDRXpre ||
635 MBBI->getOpcode() == AArch64::CFI_INSTRUCTION) {
636 if (MBBI->getOpcode() != AArch64::CFI_INSTRUCTION)
637 assert(MBBI->getOperand(0).getReg() != AArch64::SP);
638 ++MBBI;
639 }
640 unsigned NewOpc;
641 int Scale = 1;
642 switch (MBBI->getOpcode()) {
643 default:
644 llvm_unreachable("Unexpected callee-save save/restore opcode!");
645 case AArch64::STPXi:
646 NewOpc = AArch64::STPXpre;
647 Scale = 8;
648 break;
649 case AArch64::STPDi:
650 NewOpc = AArch64::STPDpre;
651 Scale = 8;
652 break;
653 case AArch64::STPQi:
654 NewOpc = AArch64::STPQpre;
655 Scale = 16;
656 break;
657 case AArch64::STRXui:
658 NewOpc = AArch64::STRXpre;
659 break;
660 case AArch64::STRDui:
661 NewOpc = AArch64::STRDpre;
662 break;
663 case AArch64::STRQui:
664 NewOpc = AArch64::STRQpre;
665 break;
666 case AArch64::LDPXi:
667 NewOpc = AArch64::LDPXpost;
668 Scale = 8;
669 break;
670 case AArch64::LDPDi:
671 NewOpc = AArch64::LDPDpost;
672 Scale = 8;
673 break;
674 case AArch64::LDPQi:
675 NewOpc = AArch64::LDPQpost;
676 Scale = 16;
677 break;
678 case AArch64::LDRXui:
679 NewOpc = AArch64::LDRXpost;
680 break;
681 case AArch64::LDRDui:
682 NewOpc = AArch64::LDRDpost;
683 break;
684 case AArch64::LDRQui:
685 NewOpc = AArch64::LDRQpost;
686 break;
687 }
688 // Get rid of the SEH code associated with the old instruction.
689 if (NeedsWinCFI) {
690 auto SEH = std::next(MBBI);
691 if (AArch64InstrInfo::isSEHInstruction(*SEH))
692 SEH->eraseFromParent();
693 }
694
695 MachineInstrBuilder MIB = BuildMI(MBB, MBBI, DL, TII->get(NewOpc));
696 MIB.addReg(AArch64::SP, RegState::Define);
697
698 // Copy all operands other than the immediate offset.
699 unsigned OpndIdx = 0;
700 for (unsigned OpndEnd = MBBI->getNumOperands() - 1; OpndIdx < OpndEnd;
701 ++OpndIdx)
702 MIB.add(MBBI->getOperand(OpndIdx));
703
704 assert(MBBI->getOperand(OpndIdx).getImm() == 0 &&
705 "Unexpected immediate offset in first/last callee-save save/restore "
706 "instruction!");
707 assert(MBBI->getOperand(OpndIdx - 1).getReg() == AArch64::SP &&
708 "Unexpected base register in callee-save save/restore instruction!");
709 assert(CSStackSizeInc % Scale == 0);
710 MIB.addImm(CSStackSizeInc / Scale);
711
712 MIB.setMIFlags(MBBI->getFlags());
713 MIB.setMemRefs(MBBI->memoperands());
714
715 // Generate a new SEH code that corresponds to the new instruction.
716 if (NeedsWinCFI) {
717 *HasWinCFI = true;
718 InsertSEH(*MIB, *TII,
719 InProlog ? MachineInstr::FrameSetup : MachineInstr::FrameDestroy);
720 }
721
722 return std::prev(MBB.erase(MBBI));
723 }
724
725 // Fixup callee-save register save/restore instructions to take into account
726 // combined SP bump by adding the local stack size to the stack offsets.
fixupCalleeSaveRestoreStackOffset(MachineInstr & MI,uint64_t LocalStackSize,bool NeedsWinCFI,bool * HasWinCFI)727 static void fixupCalleeSaveRestoreStackOffset(MachineInstr &MI,
728 uint64_t LocalStackSize,
729 bool NeedsWinCFI,
730 bool *HasWinCFI) {
731 if (AArch64InstrInfo::isSEHInstruction(MI))
732 return;
733
734 unsigned Opc = MI.getOpcode();
735
736 // Ignore instructions that do not operate on SP, i.e. shadow call stack
737 // instructions and associated CFI instruction.
738 if (Opc == AArch64::STRXpost || Opc == AArch64::LDRXpre ||
739 Opc == AArch64::CFI_INSTRUCTION) {
740 if (Opc != AArch64::CFI_INSTRUCTION)
741 assert(MI.getOperand(0).getReg() != AArch64::SP);
742 return;
743 }
744
745 unsigned Scale;
746 switch (Opc) {
747 case AArch64::STPXi:
748 case AArch64::STRXui:
749 case AArch64::STPDi:
750 case AArch64::STRDui:
751 case AArch64::LDPXi:
752 case AArch64::LDRXui:
753 case AArch64::LDPDi:
754 case AArch64::LDRDui:
755 Scale = 8;
756 break;
757 case AArch64::STPQi:
758 case AArch64::STRQui:
759 case AArch64::LDPQi:
760 case AArch64::LDRQui:
761 Scale = 16;
762 break;
763 default:
764 llvm_unreachable("Unexpected callee-save save/restore opcode!");
765 }
766
767 unsigned OffsetIdx = MI.getNumExplicitOperands() - 1;
768 assert(MI.getOperand(OffsetIdx - 1).getReg() == AArch64::SP &&
769 "Unexpected base register in callee-save save/restore instruction!");
770 // Last operand is immediate offset that needs fixing.
771 MachineOperand &OffsetOpnd = MI.getOperand(OffsetIdx);
772 // All generated opcodes have scaled offsets.
773 assert(LocalStackSize % Scale == 0);
774 OffsetOpnd.setImm(OffsetOpnd.getImm() + LocalStackSize / Scale);
775
776 if (NeedsWinCFI) {
777 *HasWinCFI = true;
778 auto MBBI = std::next(MachineBasicBlock::iterator(MI));
779 assert(MBBI != MI.getParent()->end() && "Expecting a valid instruction");
780 assert(AArch64InstrInfo::isSEHInstruction(*MBBI) &&
781 "Expecting a SEH instruction");
782 fixupSEHOpcode(MBBI, LocalStackSize);
783 }
784 }
785
adaptForLdStOpt(MachineBasicBlock & MBB,MachineBasicBlock::iterator FirstSPPopI,MachineBasicBlock::iterator LastPopI)786 static void adaptForLdStOpt(MachineBasicBlock &MBB,
787 MachineBasicBlock::iterator FirstSPPopI,
788 MachineBasicBlock::iterator LastPopI) {
789 // Sometimes (when we restore in the same order as we save), we can end up
790 // with code like this:
791 //
792 // ldp x26, x25, [sp]
793 // ldp x24, x23, [sp, #16]
794 // ldp x22, x21, [sp, #32]
795 // ldp x20, x19, [sp, #48]
796 // add sp, sp, #64
797 //
798 // In this case, it is always better to put the first ldp at the end, so
799 // that the load-store optimizer can run and merge the ldp and the add into
800 // a post-index ldp.
801 // If we managed to grab the first pop instruction, move it to the end.
802 if (ReverseCSRRestoreSeq)
803 MBB.splice(FirstSPPopI, &MBB, LastPopI);
804 // We should end up with something like this now:
805 //
806 // ldp x24, x23, [sp, #16]
807 // ldp x22, x21, [sp, #32]
808 // ldp x20, x19, [sp, #48]
809 // ldp x26, x25, [sp]
810 // add sp, sp, #64
811 //
812 // and the load-store optimizer can merge the last two instructions into:
813 //
814 // ldp x26, x25, [sp], #64
815 //
816 }
817
ShouldSignWithAKey(MachineFunction & MF)818 static bool ShouldSignWithAKey(MachineFunction &MF) {
819 const Function &F = MF.getFunction();
820 if (!F.hasFnAttribute("sign-return-address-key"))
821 return true;
822
823 const StringRef Key =
824 F.getFnAttribute("sign-return-address-key").getValueAsString();
825 assert(Key.equals_lower("a_key") || Key.equals_lower("b_key"));
826 return Key.equals_lower("a_key");
827 }
828
needsWinCFI(const MachineFunction & MF)829 static bool needsWinCFI(const MachineFunction &MF) {
830 const Function &F = MF.getFunction();
831 return MF.getTarget().getMCAsmInfo()->usesWindowsCFI() &&
832 F.needsUnwindTableEntry();
833 }
834
isTargetDarwin(const MachineFunction & MF)835 static bool isTargetDarwin(const MachineFunction &MF) {
836 return MF.getSubtarget<AArch64Subtarget>().isTargetDarwin();
837 }
838
isTargetWindows(const MachineFunction & MF)839 static bool isTargetWindows(const MachineFunction &MF) {
840 return MF.getSubtarget<AArch64Subtarget>().isTargetWindows();
841 }
842
843 // Convenience function to determine whether I is an SVE callee save.
IsSVECalleeSave(MachineBasicBlock::iterator I)844 static bool IsSVECalleeSave(MachineBasicBlock::iterator I) {
845 switch (I->getOpcode()) {
846 default:
847 return false;
848 case AArch64::STR_ZXI:
849 case AArch64::STR_PXI:
850 case AArch64::LDR_ZXI:
851 case AArch64::LDR_PXI:
852 return I->getFlag(MachineInstr::FrameSetup) ||
853 I->getFlag(MachineInstr::FrameDestroy);
854 }
855 }
856
emitPrologue(MachineFunction & MF,MachineBasicBlock & MBB) const857 void AArch64FrameLowering::emitPrologue(MachineFunction &MF,
858 MachineBasicBlock &MBB) const {
859 MachineBasicBlock::iterator MBBI = MBB.begin();
860 const MachineFrameInfo &MFI = MF.getFrameInfo();
861 const Function &F = MF.getFunction();
862 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
863 const AArch64RegisterInfo *RegInfo = Subtarget.getRegisterInfo();
864 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
865 MachineModuleInfo &MMI = MF.getMMI();
866 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
867 bool needsFrameMoves =
868 MF.needsFrameMoves() && !MF.getTarget().getMCAsmInfo()->usesWindowsCFI();
869 bool HasFP = hasFP(MF);
870 bool NeedsWinCFI = needsWinCFI(MF);
871 bool HasWinCFI = false;
872 auto Cleanup = make_scope_exit([&]() { MF.setHasWinCFI(HasWinCFI); });
873
874 bool IsFunclet = MBB.isEHFuncletEntry();
875
876 // At this point, we're going to decide whether or not the function uses a
877 // redzone. In most cases, the function doesn't have a redzone so let's
878 // assume that's false and set it to true in the case that there's a redzone.
879 AFI->setHasRedZone(false);
880
881 // Debug location must be unknown since the first debug location is used
882 // to determine the end of the prologue.
883 DebugLoc DL;
884
885 if (ShouldSignReturnAddress(MF)) {
886 if (ShouldSignWithAKey(MF))
887 BuildMI(MBB, MBBI, DL, TII->get(AArch64::PACIASP))
888 .setMIFlag(MachineInstr::FrameSetup);
889 else {
890 BuildMI(MBB, MBBI, DL, TII->get(AArch64::EMITBKEY))
891 .setMIFlag(MachineInstr::FrameSetup);
892 BuildMI(MBB, MBBI, DL, TII->get(AArch64::PACIBSP))
893 .setMIFlag(MachineInstr::FrameSetup);
894 }
895
896 unsigned CFIIndex =
897 MF.addFrameInst(MCCFIInstruction::createNegateRAState(nullptr));
898 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
899 .addCFIIndex(CFIIndex)
900 .setMIFlags(MachineInstr::FrameSetup);
901 }
902
903 // All calls are tail calls in GHC calling conv, and functions have no
904 // prologue/epilogue.
905 if (MF.getFunction().getCallingConv() == CallingConv::GHC)
906 return;
907
908 // Set tagged base pointer to the bottom of the stack frame.
909 // Ideally it should match SP value after prologue.
910 AFI->setTaggedBasePointerOffset(MFI.getStackSize());
911
912 const StackOffset &SVEStackSize = getSVEStackSize(MF);
913
914 // getStackSize() includes all the locals in its size calculation. We don't
915 // include these locals when computing the stack size of a funclet, as they
916 // are allocated in the parent's stack frame and accessed via the frame
917 // pointer from the funclet. We only save the callee saved registers in the
918 // funclet, which are really the callee saved registers of the parent
919 // function, including the funclet.
920 int64_t NumBytes = IsFunclet ? getWinEHFuncletFrameSize(MF)
921 : MFI.getStackSize();
922 if (!AFI->hasStackFrame() && !windowsRequiresStackProbe(MF, NumBytes)) {
923 assert(!HasFP && "unexpected function without stack frame but with FP");
924 assert(!SVEStackSize &&
925 "unexpected function without stack frame but with SVE objects");
926 // All of the stack allocation is for locals.
927 AFI->setLocalStackSize(NumBytes);
928 if (!NumBytes)
929 return;
930 // REDZONE: If the stack size is less than 128 bytes, we don't need
931 // to actually allocate.
932 if (canUseRedZone(MF)) {
933 AFI->setHasRedZone(true);
934 ++NumRedZoneFunctions;
935 } else {
936 emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP,
937 {-NumBytes, MVT::i8}, TII, MachineInstr::FrameSetup,
938 false, NeedsWinCFI, &HasWinCFI);
939 if (!NeedsWinCFI && needsFrameMoves) {
940 // Label used to tie together the PROLOG_LABEL and the MachineMoves.
941 MCSymbol *FrameLabel = MMI.getContext().createTempSymbol();
942 // Encode the stack size of the leaf function.
943 unsigned CFIIndex = MF.addFrameInst(
944 MCCFIInstruction::createDefCfaOffset(FrameLabel, -NumBytes));
945 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
946 .addCFIIndex(CFIIndex)
947 .setMIFlags(MachineInstr::FrameSetup);
948 }
949 }
950
951 if (NeedsWinCFI) {
952 HasWinCFI = true;
953 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_PrologEnd))
954 .setMIFlag(MachineInstr::FrameSetup);
955 }
956
957 return;
958 }
959
960 bool IsWin64 =
961 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv());
962 // Var args are accounted for in the containing function, so don't
963 // include them for funclets.
964 unsigned FixedObject = (IsWin64 && !IsFunclet) ?
965 alignTo(AFI->getVarArgsGPRSize(), 16) : 0;
966
967 auto PrologueSaveSize = AFI->getCalleeSavedStackSize() + FixedObject;
968 // All of the remaining stack allocations are for locals.
969 AFI->setLocalStackSize(NumBytes - PrologueSaveSize);
970 bool CombineSPBump = shouldCombineCSRLocalStackBump(MF, NumBytes);
971 if (CombineSPBump) {
972 assert(!SVEStackSize && "Cannot combine SP bump with SVE");
973 emitFrameOffset(MBB, MBBI, DL, AArch64::SP, AArch64::SP,
974 {-NumBytes, MVT::i8}, TII, MachineInstr::FrameSetup, false,
975 NeedsWinCFI, &HasWinCFI);
976 NumBytes = 0;
977 } else if (PrologueSaveSize != 0) {
978 MBBI = convertCalleeSaveRestoreToSPPrePostIncDec(
979 MBB, MBBI, DL, TII, -PrologueSaveSize, NeedsWinCFI, &HasWinCFI);
980 NumBytes -= PrologueSaveSize;
981 }
982 assert(NumBytes >= 0 && "Negative stack allocation size!?");
983
984 // Move past the saves of the callee-saved registers, fixing up the offsets
985 // and pre-inc if we decided to combine the callee-save and local stack
986 // pointer bump above.
987 MachineBasicBlock::iterator End = MBB.end();
988 while (MBBI != End && MBBI->getFlag(MachineInstr::FrameSetup) &&
989 !IsSVECalleeSave(MBBI)) {
990 if (CombineSPBump)
991 fixupCalleeSaveRestoreStackOffset(*MBBI, AFI->getLocalStackSize(),
992 NeedsWinCFI, &HasWinCFI);
993 ++MBBI;
994 }
995
996 // The code below is not applicable to funclets. We have emitted all the SEH
997 // opcodes that we needed to emit. The FP and BP belong to the containing
998 // function.
999 if (IsFunclet) {
1000 if (NeedsWinCFI) {
1001 HasWinCFI = true;
1002 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_PrologEnd))
1003 .setMIFlag(MachineInstr::FrameSetup);
1004 }
1005
1006 // SEH funclets are passed the frame pointer in X1. If the parent
1007 // function uses the base register, then the base register is used
1008 // directly, and is not retrieved from X1.
1009 if (F.hasPersonalityFn()) {
1010 EHPersonality Per = classifyEHPersonality(F.getPersonalityFn());
1011 if (isAsynchronousEHPersonality(Per)) {
1012 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::COPY), AArch64::FP)
1013 .addReg(AArch64::X1).setMIFlag(MachineInstr::FrameSetup);
1014 MBB.addLiveIn(AArch64::X1);
1015 }
1016 }
1017
1018 return;
1019 }
1020
1021 if (HasFP) {
1022 // Only set up FP if we actually need to.
1023 int64_t FPOffset = isTargetDarwin(MF) ? (AFI->getCalleeSavedStackSize() - 16) : 0;
1024
1025 if (CombineSPBump)
1026 FPOffset += AFI->getLocalStackSize();
1027
1028 // Issue sub fp, sp, FPOffset or
1029 // mov fp,sp when FPOffset is zero.
1030 // Note: All stores of callee-saved registers are marked as "FrameSetup".
1031 // This code marks the instruction(s) that set the FP also.
1032 emitFrameOffset(MBB, MBBI, DL, AArch64::FP, AArch64::SP,
1033 {FPOffset, MVT::i8}, TII, MachineInstr::FrameSetup, false,
1034 NeedsWinCFI, &HasWinCFI);
1035 }
1036
1037 if (windowsRequiresStackProbe(MF, NumBytes)) {
1038 uint64_t NumWords = NumBytes >> 4;
1039 if (NeedsWinCFI) {
1040 HasWinCFI = true;
1041 // alloc_l can hold at most 256MB, so assume that NumBytes doesn't
1042 // exceed this amount. We need to move at most 2^24 - 1 into x15.
1043 // This is at most two instructions, MOVZ follwed by MOVK.
1044 // TODO: Fix to use multiple stack alloc unwind codes for stacks
1045 // exceeding 256MB in size.
1046 if (NumBytes >= (1 << 28))
1047 report_fatal_error("Stack size cannot exceed 256MB for stack "
1048 "unwinding purposes");
1049
1050 uint32_t LowNumWords = NumWords & 0xFFFF;
1051 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVZXi), AArch64::X15)
1052 .addImm(LowNumWords)
1053 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 0))
1054 .setMIFlag(MachineInstr::FrameSetup);
1055 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop))
1056 .setMIFlag(MachineInstr::FrameSetup);
1057 if ((NumWords & 0xFFFF0000) != 0) {
1058 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVKXi), AArch64::X15)
1059 .addReg(AArch64::X15)
1060 .addImm((NumWords & 0xFFFF0000) >> 16) // High half
1061 .addImm(AArch64_AM::getShifterImm(AArch64_AM::LSL, 16))
1062 .setMIFlag(MachineInstr::FrameSetup);
1063 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop))
1064 .setMIFlag(MachineInstr::FrameSetup);
1065 }
1066 } else {
1067 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVi64imm), AArch64::X15)
1068 .addImm(NumWords)
1069 .setMIFlags(MachineInstr::FrameSetup);
1070 }
1071
1072 switch (MF.getTarget().getCodeModel()) {
1073 case CodeModel::Tiny:
1074 case CodeModel::Small:
1075 case CodeModel::Medium:
1076 case CodeModel::Kernel:
1077 BuildMI(MBB, MBBI, DL, TII->get(AArch64::BL))
1078 .addExternalSymbol("__chkstk")
1079 .addReg(AArch64::X15, RegState::Implicit)
1080 .addReg(AArch64::X16, RegState::Implicit | RegState::Define | RegState::Dead)
1081 .addReg(AArch64::X17, RegState::Implicit | RegState::Define | RegState::Dead)
1082 .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define | RegState::Dead)
1083 .setMIFlags(MachineInstr::FrameSetup);
1084 if (NeedsWinCFI) {
1085 HasWinCFI = true;
1086 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop))
1087 .setMIFlag(MachineInstr::FrameSetup);
1088 }
1089 break;
1090 case CodeModel::Large:
1091 BuildMI(MBB, MBBI, DL, TII->get(AArch64::MOVaddrEXT))
1092 .addReg(AArch64::X16, RegState::Define)
1093 .addExternalSymbol("__chkstk")
1094 .addExternalSymbol("__chkstk")
1095 .setMIFlags(MachineInstr::FrameSetup);
1096 if (NeedsWinCFI) {
1097 HasWinCFI = true;
1098 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop))
1099 .setMIFlag(MachineInstr::FrameSetup);
1100 }
1101
1102 BuildMI(MBB, MBBI, DL, TII->get(AArch64::BLR))
1103 .addReg(AArch64::X16, RegState::Kill)
1104 .addReg(AArch64::X15, RegState::Implicit | RegState::Define)
1105 .addReg(AArch64::X16, RegState::Implicit | RegState::Define | RegState::Dead)
1106 .addReg(AArch64::X17, RegState::Implicit | RegState::Define | RegState::Dead)
1107 .addReg(AArch64::NZCV, RegState::Implicit | RegState::Define | RegState::Dead)
1108 .setMIFlags(MachineInstr::FrameSetup);
1109 if (NeedsWinCFI) {
1110 HasWinCFI = true;
1111 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop))
1112 .setMIFlag(MachineInstr::FrameSetup);
1113 }
1114 break;
1115 }
1116
1117 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SUBXrx64), AArch64::SP)
1118 .addReg(AArch64::SP, RegState::Kill)
1119 .addReg(AArch64::X15, RegState::Kill)
1120 .addImm(AArch64_AM::getArithExtendImm(AArch64_AM::UXTX, 4))
1121 .setMIFlags(MachineInstr::FrameSetup);
1122 if (NeedsWinCFI) {
1123 HasWinCFI = true;
1124 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_StackAlloc))
1125 .addImm(NumBytes)
1126 .setMIFlag(MachineInstr::FrameSetup);
1127 }
1128 NumBytes = 0;
1129 }
1130
1131 StackOffset AllocateBefore = SVEStackSize, AllocateAfter = {};
1132 MachineBasicBlock::iterator CalleeSavesBegin = MBBI, CalleeSavesEnd = MBBI;
1133
1134 // Process the SVE callee-saves to determine what space needs to be
1135 // allocated.
1136 if (AFI->getSVECalleeSavedStackSize()) {
1137 // Find callee save instructions in frame.
1138 CalleeSavesBegin = MBBI;
1139 assert(IsSVECalleeSave(CalleeSavesBegin) && "Unexpected instruction");
1140 while (IsSVECalleeSave(MBBI) && MBBI != MBB.getFirstTerminator())
1141 ++MBBI;
1142 CalleeSavesEnd = MBBI;
1143
1144 int64_t OffsetToFirstCalleeSaveFromSP =
1145 MFI.getObjectOffset(AFI->getMaxSVECSFrameIndex());
1146 StackOffset OffsetToCalleeSavesFromSP =
1147 StackOffset(OffsetToFirstCalleeSaveFromSP, MVT::nxv1i8) + SVEStackSize;
1148 AllocateBefore -= OffsetToCalleeSavesFromSP;
1149 AllocateAfter = SVEStackSize - AllocateBefore;
1150 }
1151
1152 // Allocate space for the callee saves (if any).
1153 emitFrameOffset(MBB, CalleeSavesBegin, DL, AArch64::SP, AArch64::SP,
1154 -AllocateBefore, TII,
1155 MachineInstr::FrameSetup);
1156
1157 // Finally allocate remaining SVE stack space.
1158 emitFrameOffset(MBB, CalleeSavesEnd, DL, AArch64::SP, AArch64::SP,
1159 -AllocateAfter, TII,
1160 MachineInstr::FrameSetup);
1161
1162 // Allocate space for the rest of the frame.
1163 if (NumBytes) {
1164 const bool NeedsRealignment = RegInfo->needsStackRealignment(MF);
1165 unsigned scratchSPReg = AArch64::SP;
1166
1167 if (NeedsRealignment) {
1168 scratchSPReg = findScratchNonCalleeSaveRegister(&MBB);
1169 assert(scratchSPReg != AArch64::NoRegister);
1170 }
1171
1172 // If we're a leaf function, try using the red zone.
1173 if (!canUseRedZone(MF))
1174 // FIXME: in the case of dynamic re-alignment, NumBytes doesn't have
1175 // the correct value here, as NumBytes also includes padding bytes,
1176 // which shouldn't be counted here.
1177 emitFrameOffset(MBB, MBBI, DL, scratchSPReg, AArch64::SP,
1178 {-NumBytes, MVT::i8}, TII, MachineInstr::FrameSetup,
1179 false, NeedsWinCFI, &HasWinCFI);
1180
1181 if (NeedsRealignment) {
1182 const unsigned Alignment = MFI.getMaxAlignment();
1183 const unsigned NrBitsToZero = countTrailingZeros(Alignment);
1184 assert(NrBitsToZero > 1);
1185 assert(scratchSPReg != AArch64::SP);
1186
1187 // SUB X9, SP, NumBytes
1188 // -- X9 is temporary register, so shouldn't contain any live data here,
1189 // -- free to use. This is already produced by emitFrameOffset above.
1190 // AND SP, X9, 0b11111...0000
1191 // The logical immediates have a non-trivial encoding. The following
1192 // formula computes the encoded immediate with all ones but
1193 // NrBitsToZero zero bits as least significant bits.
1194 uint32_t andMaskEncoded = (1 << 12) // = N
1195 | ((64 - NrBitsToZero) << 6) // immr
1196 | ((64 - NrBitsToZero - 1) << 0); // imms
1197
1198 BuildMI(MBB, MBBI, DL, TII->get(AArch64::ANDXri), AArch64::SP)
1199 .addReg(scratchSPReg, RegState::Kill)
1200 .addImm(andMaskEncoded);
1201 AFI->setStackRealigned(true);
1202 if (NeedsWinCFI) {
1203 HasWinCFI = true;
1204 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_StackAlloc))
1205 .addImm(NumBytes & andMaskEncoded)
1206 .setMIFlag(MachineInstr::FrameSetup);
1207 }
1208 }
1209 }
1210
1211 // If we need a base pointer, set it up here. It's whatever the value of the
1212 // stack pointer is at this point. Any variable size objects will be allocated
1213 // after this, so we can still use the base pointer to reference locals.
1214 //
1215 // FIXME: Clarify FrameSetup flags here.
1216 // Note: Use emitFrameOffset() like above for FP if the FrameSetup flag is
1217 // needed.
1218 if (RegInfo->hasBasePointer(MF)) {
1219 TII->copyPhysReg(MBB, MBBI, DL, RegInfo->getBaseRegister(), AArch64::SP,
1220 false);
1221 if (NeedsWinCFI) {
1222 HasWinCFI = true;
1223 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_Nop))
1224 .setMIFlag(MachineInstr::FrameSetup);
1225 }
1226 }
1227
1228 // The very last FrameSetup instruction indicates the end of prologue. Emit a
1229 // SEH opcode indicating the prologue end.
1230 if (NeedsWinCFI && HasWinCFI) {
1231 BuildMI(MBB, MBBI, DL, TII->get(AArch64::SEH_PrologEnd))
1232 .setMIFlag(MachineInstr::FrameSetup);
1233 }
1234
1235 if (needsFrameMoves) {
1236 const DataLayout &TD = MF.getDataLayout();
1237 const int StackGrowth = isTargetDarwin(MF)
1238 ? (2 * -TD.getPointerSize(0))
1239 : -AFI->getCalleeSavedStackSize();
1240 Register FramePtr = RegInfo->getFrameRegister(MF);
1241 // An example of the prologue:
1242 //
1243 // .globl __foo
1244 // .align 2
1245 // __foo:
1246 // Ltmp0:
1247 // .cfi_startproc
1248 // .cfi_personality 155, ___gxx_personality_v0
1249 // Leh_func_begin:
1250 // .cfi_lsda 16, Lexception33
1251 //
1252 // stp xa,bx, [sp, -#offset]!
1253 // ...
1254 // stp x28, x27, [sp, #offset-32]
1255 // stp fp, lr, [sp, #offset-16]
1256 // add fp, sp, #offset - 16
1257 // sub sp, sp, #1360
1258 //
1259 // The Stack:
1260 // +-------------------------------------------+
1261 // 10000 | ........ | ........ | ........ | ........ |
1262 // 10004 | ........ | ........ | ........ | ........ |
1263 // +-------------------------------------------+
1264 // 10008 | ........ | ........ | ........ | ........ |
1265 // 1000c | ........ | ........ | ........ | ........ |
1266 // +===========================================+
1267 // 10010 | X28 Register |
1268 // 10014 | X28 Register |
1269 // +-------------------------------------------+
1270 // 10018 | X27 Register |
1271 // 1001c | X27 Register |
1272 // +===========================================+
1273 // 10020 | Frame Pointer |
1274 // 10024 | Frame Pointer |
1275 // +-------------------------------------------+
1276 // 10028 | Link Register |
1277 // 1002c | Link Register |
1278 // +===========================================+
1279 // 10030 | ........ | ........ | ........ | ........ |
1280 // 10034 | ........ | ........ | ........ | ........ |
1281 // +-------------------------------------------+
1282 // 10038 | ........ | ........ | ........ | ........ |
1283 // 1003c | ........ | ........ | ........ | ........ |
1284 // +-------------------------------------------+
1285 //
1286 // [sp] = 10030 :: >>initial value<<
1287 // sp = 10020 :: stp fp, lr, [sp, #-16]!
1288 // fp = sp == 10020 :: mov fp, sp
1289 // [sp] == 10020 :: stp x28, x27, [sp, #-16]!
1290 // sp == 10010 :: >>final value<<
1291 //
1292 // The frame pointer (w29) points to address 10020. If we use an offset of
1293 // '16' from 'w29', we get the CFI offsets of -8 for w30, -16 for w29, -24
1294 // for w27, and -32 for w28:
1295 //
1296 // Ltmp1:
1297 // .cfi_def_cfa w29, 16
1298 // Ltmp2:
1299 // .cfi_offset w30, -8
1300 // Ltmp3:
1301 // .cfi_offset w29, -16
1302 // Ltmp4:
1303 // .cfi_offset w27, -24
1304 // Ltmp5:
1305 // .cfi_offset w28, -32
1306
1307 if (HasFP) {
1308 // Define the current CFA rule to use the provided FP.
1309 unsigned Reg = RegInfo->getDwarfRegNum(FramePtr, true);
1310 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createDefCfa(
1311 nullptr, Reg, StackGrowth - FixedObject));
1312 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
1313 .addCFIIndex(CFIIndex)
1314 .setMIFlags(MachineInstr::FrameSetup);
1315 } else {
1316 // Encode the stack size of the leaf function.
1317 unsigned CFIIndex = MF.addFrameInst(
1318 MCCFIInstruction::createDefCfaOffset(nullptr, -MFI.getStackSize()));
1319 BuildMI(MBB, MBBI, DL, TII->get(TargetOpcode::CFI_INSTRUCTION))
1320 .addCFIIndex(CFIIndex)
1321 .setMIFlags(MachineInstr::FrameSetup);
1322 }
1323
1324 // Now emit the moves for whatever callee saved regs we have (including FP,
1325 // LR if those are saved).
1326 emitCalleeSavedFrameMoves(MBB, MBBI);
1327 }
1328 }
1329
InsertReturnAddressAuth(MachineFunction & MF,MachineBasicBlock & MBB)1330 static void InsertReturnAddressAuth(MachineFunction &MF,
1331 MachineBasicBlock &MBB) {
1332 if (!ShouldSignReturnAddress(MF))
1333 return;
1334 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
1335 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
1336
1337 MachineBasicBlock::iterator MBBI = MBB.getFirstTerminator();
1338 DebugLoc DL;
1339 if (MBBI != MBB.end())
1340 DL = MBBI->getDebugLoc();
1341
1342 // The AUTIASP instruction assembles to a hint instruction before v8.3a so
1343 // this instruction can safely used for any v8a architecture.
1344 // From v8.3a onwards there are optimised authenticate LR and return
1345 // instructions, namely RETA{A,B}, that can be used instead.
1346 if (Subtarget.hasV8_3aOps() && MBBI != MBB.end() &&
1347 MBBI->getOpcode() == AArch64::RET_ReallyLR) {
1348 BuildMI(MBB, MBBI, DL,
1349 TII->get(ShouldSignWithAKey(MF) ? AArch64::RETAA : AArch64::RETAB))
1350 .copyImplicitOps(*MBBI);
1351 MBB.erase(MBBI);
1352 } else {
1353 BuildMI(
1354 MBB, MBBI, DL,
1355 TII->get(ShouldSignWithAKey(MF) ? AArch64::AUTIASP : AArch64::AUTIBSP))
1356 .setMIFlag(MachineInstr::FrameDestroy);
1357 }
1358 }
1359
isFuncletReturnInstr(const MachineInstr & MI)1360 static bool isFuncletReturnInstr(const MachineInstr &MI) {
1361 switch (MI.getOpcode()) {
1362 default:
1363 return false;
1364 case AArch64::CATCHRET:
1365 case AArch64::CLEANUPRET:
1366 return true;
1367 }
1368 }
1369
emitEpilogue(MachineFunction & MF,MachineBasicBlock & MBB) const1370 void AArch64FrameLowering::emitEpilogue(MachineFunction &MF,
1371 MachineBasicBlock &MBB) const {
1372 MachineBasicBlock::iterator MBBI = MBB.getLastNonDebugInstr();
1373 MachineFrameInfo &MFI = MF.getFrameInfo();
1374 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
1375 const TargetInstrInfo *TII = Subtarget.getInstrInfo();
1376 DebugLoc DL;
1377 bool IsTailCallReturn = false;
1378 bool NeedsWinCFI = needsWinCFI(MF);
1379 bool HasWinCFI = false;
1380 bool IsFunclet = false;
1381 auto WinCFI = make_scope_exit([&]() {
1382 if (!MF.hasWinCFI())
1383 MF.setHasWinCFI(HasWinCFI);
1384 });
1385
1386 if (MBB.end() != MBBI) {
1387 DL = MBBI->getDebugLoc();
1388 unsigned RetOpcode = MBBI->getOpcode();
1389 IsTailCallReturn = RetOpcode == AArch64::TCRETURNdi ||
1390 RetOpcode == AArch64::TCRETURNri ||
1391 RetOpcode == AArch64::TCRETURNriBTI;
1392 IsFunclet = isFuncletReturnInstr(*MBBI);
1393 }
1394
1395 int64_t NumBytes = IsFunclet ? getWinEHFuncletFrameSize(MF)
1396 : MFI.getStackSize();
1397 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
1398
1399 // All calls are tail calls in GHC calling conv, and functions have no
1400 // prologue/epilogue.
1401 if (MF.getFunction().getCallingConv() == CallingConv::GHC)
1402 return;
1403
1404 // Initial and residual are named for consistency with the prologue. Note that
1405 // in the epilogue, the residual adjustment is executed first.
1406 uint64_t ArgumentPopSize = 0;
1407 if (IsTailCallReturn) {
1408 MachineOperand &StackAdjust = MBBI->getOperand(1);
1409
1410 // For a tail-call in a callee-pops-arguments environment, some or all of
1411 // the stack may actually be in use for the call's arguments, this is
1412 // calculated during LowerCall and consumed here...
1413 ArgumentPopSize = StackAdjust.getImm();
1414 } else {
1415 // ... otherwise the amount to pop is *all* of the argument space,
1416 // conveniently stored in the MachineFunctionInfo by
1417 // LowerFormalArguments. This will, of course, be zero for the C calling
1418 // convention.
1419 ArgumentPopSize = AFI->getArgumentStackToRestore();
1420 }
1421
1422 // The stack frame should be like below,
1423 //
1424 // ---------------------- ---
1425 // | | |
1426 // | BytesInStackArgArea| CalleeArgStackSize
1427 // | (NumReusableBytes) | (of tail call)
1428 // | | ---
1429 // | | |
1430 // ---------------------| --- |
1431 // | | | |
1432 // | CalleeSavedReg | | |
1433 // | (CalleeSavedStackSize)| | |
1434 // | | | |
1435 // ---------------------| | NumBytes
1436 // | | StackSize (StackAdjustUp)
1437 // | LocalStackSize | | |
1438 // | (covering callee | | |
1439 // | args) | | |
1440 // | | | |
1441 // ---------------------- --- ---
1442 //
1443 // So NumBytes = StackSize + BytesInStackArgArea - CalleeArgStackSize
1444 // = StackSize + ArgumentPopSize
1445 //
1446 // AArch64TargetLowering::LowerCall figures out ArgumentPopSize and keeps
1447 // it as the 2nd argument of AArch64ISD::TC_RETURN.
1448
1449 auto Cleanup = make_scope_exit([&] { InsertReturnAddressAuth(MF, MBB); });
1450
1451 bool IsWin64 =
1452 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv());
1453 // Var args are accounted for in the containing function, so don't
1454 // include them for funclets.
1455 unsigned FixedObject =
1456 (IsWin64 && !IsFunclet) ? alignTo(AFI->getVarArgsGPRSize(), 16) : 0;
1457
1458 uint64_t AfterCSRPopSize = ArgumentPopSize;
1459 auto PrologueSaveSize = AFI->getCalleeSavedStackSize() + FixedObject;
1460 // We cannot rely on the local stack size set in emitPrologue if the function
1461 // has funclets, as funclets have different local stack size requirements, and
1462 // the current value set in emitPrologue may be that of the containing
1463 // function.
1464 if (MF.hasEHFunclets())
1465 AFI->setLocalStackSize(NumBytes - PrologueSaveSize);
1466 bool CombineSPBump = shouldCombineCSRLocalStackBump(MF, NumBytes);
1467 // Assume we can't combine the last pop with the sp restore.
1468
1469 if (!CombineSPBump && PrologueSaveSize != 0) {
1470 MachineBasicBlock::iterator Pop = std::prev(MBB.getFirstTerminator());
1471 while (AArch64InstrInfo::isSEHInstruction(*Pop))
1472 Pop = std::prev(Pop);
1473 // Converting the last ldp to a post-index ldp is valid only if the last
1474 // ldp's offset is 0.
1475 const MachineOperand &OffsetOp = Pop->getOperand(Pop->getNumOperands() - 1);
1476 // If the offset is 0, convert it to a post-index ldp.
1477 if (OffsetOp.getImm() == 0)
1478 convertCalleeSaveRestoreToSPPrePostIncDec(
1479 MBB, Pop, DL, TII, PrologueSaveSize, NeedsWinCFI, &HasWinCFI, false);
1480 else {
1481 // If not, make sure to emit an add after the last ldp.
1482 // We're doing this by transfering the size to be restored from the
1483 // adjustment *before* the CSR pops to the adjustment *after* the CSR
1484 // pops.
1485 AfterCSRPopSize += PrologueSaveSize;
1486 }
1487 }
1488
1489 // Move past the restores of the callee-saved registers.
1490 // If we plan on combining the sp bump of the local stack size and the callee
1491 // save stack size, we might need to adjust the CSR save and restore offsets.
1492 MachineBasicBlock::iterator LastPopI = MBB.getFirstTerminator();
1493 MachineBasicBlock::iterator Begin = MBB.begin();
1494 while (LastPopI != Begin) {
1495 --LastPopI;
1496 if (!LastPopI->getFlag(MachineInstr::FrameDestroy) ||
1497 IsSVECalleeSave(LastPopI)) {
1498 ++LastPopI;
1499 break;
1500 } else if (CombineSPBump)
1501 fixupCalleeSaveRestoreStackOffset(*LastPopI, AFI->getLocalStackSize(),
1502 NeedsWinCFI, &HasWinCFI);
1503 }
1504
1505 if (NeedsWinCFI) {
1506 HasWinCFI = true;
1507 BuildMI(MBB, LastPopI, DL, TII->get(AArch64::SEH_EpilogStart))
1508 .setMIFlag(MachineInstr::FrameDestroy);
1509 }
1510
1511 const StackOffset &SVEStackSize = getSVEStackSize(MF);
1512
1513 // If there is a single SP update, insert it before the ret and we're done.
1514 if (CombineSPBump) {
1515 assert(!SVEStackSize && "Cannot combine SP bump with SVE");
1516 emitFrameOffset(MBB, MBB.getFirstTerminator(), DL, AArch64::SP, AArch64::SP,
1517 {NumBytes + (int64_t)AfterCSRPopSize, MVT::i8}, TII,
1518 MachineInstr::FrameDestroy, false, NeedsWinCFI, &HasWinCFI);
1519 if (NeedsWinCFI && HasWinCFI)
1520 BuildMI(MBB, MBB.getFirstTerminator(), DL,
1521 TII->get(AArch64::SEH_EpilogEnd))
1522 .setMIFlag(MachineInstr::FrameDestroy);
1523 return;
1524 }
1525
1526 NumBytes -= PrologueSaveSize;
1527 assert(NumBytes >= 0 && "Negative stack allocation size!?");
1528
1529 // Process the SVE callee-saves to determine what space needs to be
1530 // deallocated.
1531 StackOffset DeallocateBefore = {}, DeallocateAfter = SVEStackSize;
1532 MachineBasicBlock::iterator RestoreBegin = LastPopI, RestoreEnd = LastPopI;
1533 if (AFI->getSVECalleeSavedStackSize()) {
1534 RestoreBegin = std::prev(RestoreEnd);;
1535 while (IsSVECalleeSave(RestoreBegin) &&
1536 RestoreBegin != MBB.begin())
1537 --RestoreBegin;
1538 ++RestoreBegin;
1539
1540 assert(IsSVECalleeSave(RestoreBegin) &&
1541 IsSVECalleeSave(std::prev(RestoreEnd)) && "Unexpected instruction");
1542
1543 int64_t OffsetToFirstCalleeSaveFromSP =
1544 MFI.getObjectOffset(AFI->getMaxSVECSFrameIndex());
1545 StackOffset OffsetToCalleeSavesFromSP =
1546 StackOffset(OffsetToFirstCalleeSaveFromSP, MVT::nxv1i8) + SVEStackSize;
1547 DeallocateBefore = OffsetToCalleeSavesFromSP;
1548 DeallocateAfter = SVEStackSize - DeallocateBefore;
1549 }
1550
1551 // Deallocate the SVE area.
1552 if (SVEStackSize) {
1553 if (AFI->isStackRealigned()) {
1554 if (AFI->getSVECalleeSavedStackSize())
1555 // Set SP to start of SVE area, from which the callee-save reloads
1556 // can be done. The code below will deallocate the stack space
1557 // space by moving FP -> SP.
1558 emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, AArch64::FP,
1559 -SVEStackSize, TII, MachineInstr::FrameDestroy);
1560 } else {
1561 if (AFI->getSVECalleeSavedStackSize()) {
1562 // Deallocate the non-SVE locals first before we can deallocate (and
1563 // restore callee saves) from the SVE area.
1564 emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP,
1565 {NumBytes, MVT::i8}, TII, MachineInstr::FrameDestroy);
1566 NumBytes = 0;
1567 }
1568
1569 emitFrameOffset(MBB, RestoreBegin, DL, AArch64::SP, AArch64::SP,
1570 DeallocateBefore, TII, MachineInstr::FrameDestroy);
1571
1572 emitFrameOffset(MBB, RestoreEnd, DL, AArch64::SP, AArch64::SP,
1573 DeallocateAfter, TII, MachineInstr::FrameDestroy);
1574 }
1575 }
1576
1577 if (!hasFP(MF)) {
1578 bool RedZone = canUseRedZone(MF);
1579 // If this was a redzone leaf function, we don't need to restore the
1580 // stack pointer (but we may need to pop stack args for fastcc).
1581 if (RedZone && AfterCSRPopSize == 0)
1582 return;
1583
1584 bool NoCalleeSaveRestore = PrologueSaveSize == 0;
1585 int64_t StackRestoreBytes = RedZone ? 0 : NumBytes;
1586 if (NoCalleeSaveRestore)
1587 StackRestoreBytes += AfterCSRPopSize;
1588
1589 // If we were able to combine the local stack pop with the argument pop,
1590 // then we're done.
1591 bool Done = NoCalleeSaveRestore || AfterCSRPopSize == 0;
1592
1593 // If we're done after this, make sure to help the load store optimizer.
1594 if (Done)
1595 adaptForLdStOpt(MBB, MBB.getFirstTerminator(), LastPopI);
1596
1597 emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP,
1598 {StackRestoreBytes, MVT::i8}, TII,
1599 MachineInstr::FrameDestroy, false, NeedsWinCFI, &HasWinCFI);
1600 if (Done) {
1601 if (NeedsWinCFI) {
1602 HasWinCFI = true;
1603 BuildMI(MBB, MBB.getFirstTerminator(), DL,
1604 TII->get(AArch64::SEH_EpilogEnd))
1605 .setMIFlag(MachineInstr::FrameDestroy);
1606 }
1607 return;
1608 }
1609
1610 NumBytes = 0;
1611 }
1612
1613 // Restore the original stack pointer.
1614 // FIXME: Rather than doing the math here, we should instead just use
1615 // non-post-indexed loads for the restores if we aren't actually going to
1616 // be able to save any instructions.
1617 if (!IsFunclet && (MFI.hasVarSizedObjects() || AFI->isStackRealigned())) {
1618 int64_t OffsetToFrameRecord =
1619 isTargetDarwin(MF) ? (-(int64_t)AFI->getCalleeSavedStackSize() + 16) : 0;
1620 emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::FP,
1621 {OffsetToFrameRecord, MVT::i8},
1622 TII, MachineInstr::FrameDestroy, false, NeedsWinCFI);
1623 } else if (NumBytes)
1624 emitFrameOffset(MBB, LastPopI, DL, AArch64::SP, AArch64::SP,
1625 {NumBytes, MVT::i8}, TII, MachineInstr::FrameDestroy, false,
1626 NeedsWinCFI);
1627
1628 // This must be placed after the callee-save restore code because that code
1629 // assumes the SP is at the same location as it was after the callee-save save
1630 // code in the prologue.
1631 if (AfterCSRPopSize) {
1632 // Find an insertion point for the first ldp so that it goes before the
1633 // shadow call stack epilog instruction. This ensures that the restore of
1634 // lr from x18 is placed after the restore from sp.
1635 auto FirstSPPopI = MBB.getFirstTerminator();
1636 while (FirstSPPopI != Begin) {
1637 auto Prev = std::prev(FirstSPPopI);
1638 if (Prev->getOpcode() != AArch64::LDRXpre ||
1639 Prev->getOperand(0).getReg() == AArch64::SP)
1640 break;
1641 FirstSPPopI = Prev;
1642 }
1643
1644 adaptForLdStOpt(MBB, FirstSPPopI, LastPopI);
1645
1646 emitFrameOffset(MBB, FirstSPPopI, DL, AArch64::SP, AArch64::SP,
1647 {(int64_t)AfterCSRPopSize, MVT::i8}, TII,
1648 MachineInstr::FrameDestroy, false, NeedsWinCFI, &HasWinCFI);
1649 }
1650 if (NeedsWinCFI && HasWinCFI)
1651 BuildMI(MBB, MBB.getFirstTerminator(), DL, TII->get(AArch64::SEH_EpilogEnd))
1652 .setMIFlag(MachineInstr::FrameDestroy);
1653
1654 MF.setHasWinCFI(HasWinCFI);
1655 }
1656
1657 /// getFrameIndexReference - Provide a base+offset reference to an FI slot for
1658 /// debug info. It's the same as what we use for resolving the code-gen
1659 /// references for now. FIXME: This can go wrong when references are
1660 /// SP-relative and simple call frames aren't used.
getFrameIndexReference(const MachineFunction & MF,int FI,unsigned & FrameReg) const1661 int AArch64FrameLowering::getFrameIndexReference(const MachineFunction &MF,
1662 int FI,
1663 unsigned &FrameReg) const {
1664 return resolveFrameIndexReference(
1665 MF, FI, FrameReg,
1666 /*PreferFP=*/
1667 MF.getFunction().hasFnAttribute(Attribute::SanitizeHWAddress),
1668 /*ForSimm=*/false)
1669 .getBytes();
1670 }
1671
getNonLocalFrameIndexReference(const MachineFunction & MF,int FI) const1672 int AArch64FrameLowering::getNonLocalFrameIndexReference(
1673 const MachineFunction &MF, int FI) const {
1674 return getSEHFrameIndexOffset(MF, FI);
1675 }
1676
getFPOffset(const MachineFunction & MF,int64_t ObjectOffset)1677 static StackOffset getFPOffset(const MachineFunction &MF, int64_t ObjectOffset) {
1678 const auto *AFI = MF.getInfo<AArch64FunctionInfo>();
1679 const auto &Subtarget = MF.getSubtarget<AArch64Subtarget>();
1680 bool IsWin64 =
1681 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv());
1682 unsigned FixedObject = IsWin64 ? alignTo(AFI->getVarArgsGPRSize(), 16) : 0;
1683 unsigned FPAdjust = isTargetDarwin(MF)
1684 ? 16 : AFI->getCalleeSavedStackSize(MF.getFrameInfo());
1685 return {ObjectOffset + FixedObject + FPAdjust, MVT::i8};
1686 }
1687
getStackOffset(const MachineFunction & MF,int64_t ObjectOffset)1688 static StackOffset getStackOffset(const MachineFunction &MF, int64_t ObjectOffset) {
1689 const auto &MFI = MF.getFrameInfo();
1690 return {ObjectOffset + (int64_t)MFI.getStackSize(), MVT::i8};
1691 }
1692
getSEHFrameIndexOffset(const MachineFunction & MF,int FI) const1693 int AArch64FrameLowering::getSEHFrameIndexOffset(const MachineFunction &MF,
1694 int FI) const {
1695 const auto *RegInfo = static_cast<const AArch64RegisterInfo *>(
1696 MF.getSubtarget().getRegisterInfo());
1697 int ObjectOffset = MF.getFrameInfo().getObjectOffset(FI);
1698 return RegInfo->getLocalAddressRegister(MF) == AArch64::FP
1699 ? getFPOffset(MF, ObjectOffset).getBytes()
1700 : getStackOffset(MF, ObjectOffset).getBytes();
1701 }
1702
resolveFrameIndexReference(const MachineFunction & MF,int FI,unsigned & FrameReg,bool PreferFP,bool ForSimm) const1703 StackOffset AArch64FrameLowering::resolveFrameIndexReference(
1704 const MachineFunction &MF, int FI, unsigned &FrameReg, bool PreferFP,
1705 bool ForSimm) const {
1706 const auto &MFI = MF.getFrameInfo();
1707 int64_t ObjectOffset = MFI.getObjectOffset(FI);
1708 bool isFixed = MFI.isFixedObjectIndex(FI);
1709 bool isSVE = MFI.getStackID(FI) == TargetStackID::SVEVector;
1710 return resolveFrameOffsetReference(MF, ObjectOffset, isFixed, isSVE, FrameReg,
1711 PreferFP, ForSimm);
1712 }
1713
resolveFrameOffsetReference(const MachineFunction & MF,int64_t ObjectOffset,bool isFixed,bool isSVE,unsigned & FrameReg,bool PreferFP,bool ForSimm) const1714 StackOffset AArch64FrameLowering::resolveFrameOffsetReference(
1715 const MachineFunction &MF, int64_t ObjectOffset, bool isFixed, bool isSVE,
1716 unsigned &FrameReg, bool PreferFP, bool ForSimm) const {
1717 const auto &MFI = MF.getFrameInfo();
1718 const auto *RegInfo = static_cast<const AArch64RegisterInfo *>(
1719 MF.getSubtarget().getRegisterInfo());
1720 const auto *AFI = MF.getInfo<AArch64FunctionInfo>();
1721 const auto &Subtarget = MF.getSubtarget<AArch64Subtarget>();
1722
1723 int64_t FPOffset = getFPOffset(MF, ObjectOffset).getBytes();
1724 int64_t Offset = getStackOffset(MF, ObjectOffset).getBytes();
1725 bool isCSR =
1726 !isFixed && ObjectOffset >= -((int)AFI->getCalleeSavedStackSize(MFI));
1727
1728 const StackOffset &SVEStackSize = getSVEStackSize(MF);
1729
1730 // Use frame pointer to reference fixed objects. Use it for locals if
1731 // there are VLAs or a dynamically realigned SP (and thus the SP isn't
1732 // reliable as a base). Make sure useFPForScavengingIndex() does the
1733 // right thing for the emergency spill slot.
1734 bool UseFP = false;
1735 if (AFI->hasStackFrame() && !isSVE) {
1736 // We shouldn't prefer using the FP when there is an SVE area
1737 // in between the FP and the non-SVE locals/spills.
1738 PreferFP &= !SVEStackSize;
1739
1740 // Note: Keeping the following as multiple 'if' statements rather than
1741 // merging to a single expression for readability.
1742 //
1743 // Argument access should always use the FP.
1744 if (isFixed) {
1745 UseFP = hasFP(MF);
1746 } else if (isCSR && RegInfo->needsStackRealignment(MF)) {
1747 // References to the CSR area must use FP if we're re-aligning the stack
1748 // since the dynamically-sized alignment padding is between the SP/BP and
1749 // the CSR area.
1750 assert(hasFP(MF) && "Re-aligned stack must have frame pointer");
1751 UseFP = true;
1752 } else if (hasFP(MF) && !RegInfo->needsStackRealignment(MF)) {
1753 // If the FPOffset is negative and we're producing a signed immediate, we
1754 // have to keep in mind that the available offset range for negative
1755 // offsets is smaller than for positive ones. If an offset is available
1756 // via the FP and the SP, use whichever is closest.
1757 bool FPOffsetFits = !ForSimm || FPOffset >= -256;
1758 PreferFP |= Offset > -FPOffset;
1759
1760 if (MFI.hasVarSizedObjects()) {
1761 // If we have variable sized objects, we can use either FP or BP, as the
1762 // SP offset is unknown. We can use the base pointer if we have one and
1763 // FP is not preferred. If not, we're stuck with using FP.
1764 bool CanUseBP = RegInfo->hasBasePointer(MF);
1765 if (FPOffsetFits && CanUseBP) // Both are ok. Pick the best.
1766 UseFP = PreferFP;
1767 else if (!CanUseBP) { // Can't use BP. Forced to use FP.
1768 assert(!SVEStackSize && "Expected BP to be available");
1769 UseFP = true;
1770 }
1771 // else we can use BP and FP, but the offset from FP won't fit.
1772 // That will make us scavenge registers which we can probably avoid by
1773 // using BP. If it won't fit for BP either, we'll scavenge anyway.
1774 } else if (FPOffset >= 0) {
1775 // Use SP or FP, whichever gives us the best chance of the offset
1776 // being in range for direct access. If the FPOffset is positive,
1777 // that'll always be best, as the SP will be even further away.
1778 UseFP = true;
1779 } else if (MF.hasEHFunclets() && !RegInfo->hasBasePointer(MF)) {
1780 // Funclets access the locals contained in the parent's stack frame
1781 // via the frame pointer, so we have to use the FP in the parent
1782 // function.
1783 (void) Subtarget;
1784 assert(
1785 Subtarget.isCallingConvWin64(MF.getFunction().getCallingConv()) &&
1786 "Funclets should only be present on Win64");
1787 UseFP = true;
1788 } else {
1789 // We have the choice between FP and (SP or BP).
1790 if (FPOffsetFits && PreferFP) // If FP is the best fit, use it.
1791 UseFP = true;
1792 }
1793 }
1794 }
1795
1796 assert(((isFixed || isCSR) || !RegInfo->needsStackRealignment(MF) || !UseFP) &&
1797 "In the presence of dynamic stack pointer realignment, "
1798 "non-argument/CSR objects cannot be accessed through the frame pointer");
1799
1800 if (isSVE) {
1801 int64_t OffsetToSVEArea =
1802 MFI.getStackSize() - AFI->getCalleeSavedStackSize();
1803 StackOffset FPOffset = {ObjectOffset, MVT::nxv1i8};
1804 StackOffset SPOffset = SVEStackSize +
1805 StackOffset(ObjectOffset, MVT::nxv1i8) +
1806 StackOffset(OffsetToSVEArea, MVT::i8);
1807 // Always use the FP for SVE spills if available and beneficial.
1808 if (hasFP(MF) &&
1809 (SPOffset.getBytes() ||
1810 FPOffset.getScalableBytes() < SPOffset.getScalableBytes() ||
1811 RegInfo->needsStackRealignment(MF))) {
1812 FrameReg = RegInfo->getFrameRegister(MF);
1813 return FPOffset;
1814 }
1815
1816 FrameReg = RegInfo->hasBasePointer(MF) ? RegInfo->getBaseRegister()
1817 : (unsigned)AArch64::SP;
1818 return SPOffset;
1819 }
1820
1821 StackOffset ScalableOffset = {};
1822 if (UseFP && !(isFixed || isCSR))
1823 ScalableOffset = -SVEStackSize;
1824 if (!UseFP && (isFixed || isCSR))
1825 ScalableOffset = SVEStackSize;
1826
1827 if (UseFP) {
1828 FrameReg = RegInfo->getFrameRegister(MF);
1829 return StackOffset(FPOffset, MVT::i8) + ScalableOffset;
1830 }
1831
1832 // Use the base pointer if we have one.
1833 if (RegInfo->hasBasePointer(MF))
1834 FrameReg = RegInfo->getBaseRegister();
1835 else {
1836 assert(!MFI.hasVarSizedObjects() &&
1837 "Can't use SP when we have var sized objects.");
1838 FrameReg = AArch64::SP;
1839 // If we're using the red zone for this function, the SP won't actually
1840 // be adjusted, so the offsets will be negative. They're also all
1841 // within range of the signed 9-bit immediate instructions.
1842 if (canUseRedZone(MF))
1843 Offset -= AFI->getLocalStackSize();
1844 }
1845
1846 return StackOffset(Offset, MVT::i8) + ScalableOffset;
1847 }
1848
getPrologueDeath(MachineFunction & MF,unsigned Reg)1849 static unsigned getPrologueDeath(MachineFunction &MF, unsigned Reg) {
1850 // Do not set a kill flag on values that are also marked as live-in. This
1851 // happens with the @llvm-returnaddress intrinsic and with arguments passed in
1852 // callee saved registers.
1853 // Omitting the kill flags is conservatively correct even if the live-in
1854 // is not used after all.
1855 bool IsLiveIn = MF.getRegInfo().isLiveIn(Reg);
1856 return getKillRegState(!IsLiveIn);
1857 }
1858
produceCompactUnwindFrame(MachineFunction & MF)1859 static bool produceCompactUnwindFrame(MachineFunction &MF) {
1860 const AArch64Subtarget &Subtarget = MF.getSubtarget<AArch64Subtarget>();
1861 AttributeList Attrs = MF.getFunction().getAttributes();
1862 return Subtarget.isTargetMachO() &&
1863 !(Subtarget.getTargetLowering()->supportSwiftError() &&
1864 Attrs.hasAttrSomewhere(Attribute::SwiftError));
1865 }
1866
invalidateWindowsRegisterPairing(unsigned Reg1,unsigned Reg2,bool NeedsWinCFI)1867 static bool invalidateWindowsRegisterPairing(unsigned Reg1, unsigned Reg2,
1868 bool NeedsWinCFI) {
1869 // If we are generating register pairs for a Windows function that requires
1870 // EH support, then pair consecutive registers only. There are no unwind
1871 // opcodes for saves/restores of non-consectuve register pairs.
1872 // The unwind opcodes are save_regp, save_regp_x, save_fregp, save_frepg_x.
1873 // https://docs.microsoft.com/en-us/cpp/build/arm64-exception-handling
1874
1875 // TODO: LR can be paired with any register. We don't support this yet in
1876 // the MCLayer. We need to add support for the save_lrpair unwind code.
1877 if (Reg2 == AArch64::FP)
1878 return true;
1879 if (!NeedsWinCFI)
1880 return false;
1881 if (Reg2 == Reg1 + 1)
1882 return false;
1883 return true;
1884 }
1885
1886 /// Returns true if Reg1 and Reg2 cannot be paired using a ldp/stp instruction.
1887 /// WindowsCFI requires that only consecutive registers can be paired.
1888 /// LR and FP need to be allocated together when the frame needs to save
1889 /// the frame-record. This means any other register pairing with LR is invalid.
invalidateRegisterPairing(unsigned Reg1,unsigned Reg2,bool UsesWinAAPCS,bool NeedsWinCFI,bool NeedsFrameRecord)1890 static bool invalidateRegisterPairing(unsigned Reg1, unsigned Reg2,
1891 bool UsesWinAAPCS, bool NeedsWinCFI, bool NeedsFrameRecord) {
1892 if (UsesWinAAPCS)
1893 return invalidateWindowsRegisterPairing(Reg1, Reg2, NeedsWinCFI);
1894
1895 // If we need to store the frame record, don't pair any register
1896 // with LR other than FP.
1897 if (NeedsFrameRecord)
1898 return Reg2 == AArch64::LR;
1899
1900 return false;
1901 }
1902
1903 namespace {
1904
1905 struct RegPairInfo {
1906 unsigned Reg1 = AArch64::NoRegister;
1907 unsigned Reg2 = AArch64::NoRegister;
1908 int FrameIdx;
1909 int Offset;
1910 enum RegType { GPR, FPR64, FPR128, PPR, ZPR } Type;
1911
1912 RegPairInfo() = default;
1913
isPaired__anonb826af230411::RegPairInfo1914 bool isPaired() const { return Reg2 != AArch64::NoRegister; }
1915
getScale__anonb826af230411::RegPairInfo1916 unsigned getScale() const {
1917 switch (Type) {
1918 case PPR:
1919 return 2;
1920 case GPR:
1921 case FPR64:
1922 return 8;
1923 case ZPR:
1924 case FPR128:
1925 return 16;
1926 }
1927 llvm_unreachable("Unsupported type");
1928 }
1929
isScalable__anonb826af230411::RegPairInfo1930 bool isScalable() const { return Type == PPR || Type == ZPR; }
1931 };
1932
1933 } // end anonymous namespace
1934
computeCalleeSaveRegisterPairs(MachineFunction & MF,const std::vector<CalleeSavedInfo> & CSI,const TargetRegisterInfo * TRI,SmallVectorImpl<RegPairInfo> & RegPairs,bool & NeedShadowCallStackProlog,bool NeedsFrameRecord)1935 static void computeCalleeSaveRegisterPairs(
1936 MachineFunction &MF, const std::vector<CalleeSavedInfo> &CSI,
1937 const TargetRegisterInfo *TRI, SmallVectorImpl<RegPairInfo> &RegPairs,
1938 bool &NeedShadowCallStackProlog, bool NeedsFrameRecord) {
1939
1940 if (CSI.empty())
1941 return;
1942
1943 bool IsWindows = isTargetWindows(MF);
1944 bool NeedsWinCFI = needsWinCFI(MF);
1945 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
1946 MachineFrameInfo &MFI = MF.getFrameInfo();
1947 CallingConv::ID CC = MF.getFunction().getCallingConv();
1948 unsigned Count = CSI.size();
1949 (void)CC;
1950 // MachO's compact unwind format relies on all registers being stored in
1951 // pairs.
1952 assert((!produceCompactUnwindFrame(MF) ||
1953 CC == CallingConv::PreserveMost ||
1954 (Count & 1) == 0) &&
1955 "Odd number of callee-saved regs to spill!");
1956 int ByteOffset = AFI->getCalleeSavedStackSize();
1957 int ScalableByteOffset = AFI->getSVECalleeSavedStackSize();
1958 // On Linux, we will have either one or zero non-paired register. On Windows
1959 // with CFI, we can have multiple unpaired registers in order to utilize the
1960 // available unwind codes. This flag assures that the alignment fixup is done
1961 // only once, as intened.
1962 bool FixupDone = false;
1963 for (unsigned i = 0; i < Count; ++i) {
1964 RegPairInfo RPI;
1965 RPI.Reg1 = CSI[i].getReg();
1966
1967 if (AArch64::GPR64RegClass.contains(RPI.Reg1))
1968 RPI.Type = RegPairInfo::GPR;
1969 else if (AArch64::FPR64RegClass.contains(RPI.Reg1))
1970 RPI.Type = RegPairInfo::FPR64;
1971 else if (AArch64::FPR128RegClass.contains(RPI.Reg1))
1972 RPI.Type = RegPairInfo::FPR128;
1973 else if (AArch64::ZPRRegClass.contains(RPI.Reg1))
1974 RPI.Type = RegPairInfo::ZPR;
1975 else if (AArch64::PPRRegClass.contains(RPI.Reg1))
1976 RPI.Type = RegPairInfo::PPR;
1977 else
1978 llvm_unreachable("Unsupported register class.");
1979
1980 // Add the next reg to the pair if it is in the same register class.
1981 if (i + 1 < Count) {
1982 unsigned NextReg = CSI[i + 1].getReg();
1983 switch (RPI.Type) {
1984 case RegPairInfo::GPR:
1985 if (AArch64::GPR64RegClass.contains(NextReg) &&
1986 !invalidateRegisterPairing(RPI.Reg1, NextReg, IsWindows, NeedsWinCFI,
1987 NeedsFrameRecord))
1988 RPI.Reg2 = NextReg;
1989 break;
1990 case RegPairInfo::FPR64:
1991 if (AArch64::FPR64RegClass.contains(NextReg) &&
1992 !invalidateWindowsRegisterPairing(RPI.Reg1, NextReg, NeedsWinCFI))
1993 RPI.Reg2 = NextReg;
1994 break;
1995 case RegPairInfo::FPR128:
1996 if (AArch64::FPR128RegClass.contains(NextReg))
1997 RPI.Reg2 = NextReg;
1998 break;
1999 case RegPairInfo::PPR:
2000 case RegPairInfo::ZPR:
2001 break;
2002 }
2003 }
2004
2005 // If either of the registers to be saved is the lr register, it means that
2006 // we also need to save lr in the shadow call stack.
2007 if ((RPI.Reg1 == AArch64::LR || RPI.Reg2 == AArch64::LR) &&
2008 MF.getFunction().hasFnAttribute(Attribute::ShadowCallStack)) {
2009 if (!MF.getSubtarget<AArch64Subtarget>().isXRegisterReserved(18))
2010 report_fatal_error("Must reserve x18 to use shadow call stack");
2011 NeedShadowCallStackProlog = true;
2012 }
2013
2014 // GPRs and FPRs are saved in pairs of 64-bit regs. We expect the CSI
2015 // list to come in sorted by frame index so that we can issue the store
2016 // pair instructions directly. Assert if we see anything otherwise.
2017 //
2018 // The order of the registers in the list is controlled by
2019 // getCalleeSavedRegs(), so they will always be in-order, as well.
2020 assert((!RPI.isPaired() ||
2021 (CSI[i].getFrameIdx() + 1 == CSI[i + 1].getFrameIdx())) &&
2022 "Out of order callee saved regs!");
2023
2024 assert((!RPI.isPaired() || !NeedsFrameRecord || RPI.Reg2 != AArch64::FP ||
2025 RPI.Reg1 == AArch64::LR) &&
2026 "FrameRecord must be allocated together with LR");
2027
2028 // Windows AAPCS has FP and LR reversed.
2029 assert((!RPI.isPaired() || !NeedsFrameRecord || RPI.Reg1 != AArch64::FP ||
2030 RPI.Reg2 == AArch64::LR) &&
2031 "FrameRecord must be allocated together with LR");
2032
2033 // MachO's compact unwind format relies on all registers being stored in
2034 // adjacent register pairs.
2035 assert((!produceCompactUnwindFrame(MF) ||
2036 CC == CallingConv::PreserveMost ||
2037 (RPI.isPaired() &&
2038 ((RPI.Reg1 == AArch64::LR && RPI.Reg2 == AArch64::FP) ||
2039 RPI.Reg1 + 1 == RPI.Reg2))) &&
2040 "Callee-save registers not saved as adjacent register pair!");
2041
2042 RPI.FrameIdx = CSI[i].getFrameIdx();
2043
2044 int Scale = RPI.getScale();
2045 if (RPI.isScalable())
2046 ScalableByteOffset -= Scale;
2047 else
2048 ByteOffset -= RPI.isPaired() ? 2 * Scale : Scale;
2049
2050 assert(!(RPI.isScalable() && RPI.isPaired()) &&
2051 "Paired spill/fill instructions don't exist for SVE vectors");
2052
2053 // Round up size of non-pair to pair size if we need to pad the
2054 // callee-save area to ensure 16-byte alignment.
2055 if (AFI->hasCalleeSaveStackFreeSpace() && !FixupDone &&
2056 !RPI.isScalable() && RPI.Type != RegPairInfo::FPR128 &&
2057 !RPI.isPaired()) {
2058 FixupDone = true;
2059 ByteOffset -= 8;
2060 assert(ByteOffset % 16 == 0);
2061 assert(MFI.getObjectAlignment(RPI.FrameIdx) <= 16);
2062 MFI.setObjectAlignment(RPI.FrameIdx, 16);
2063 }
2064
2065 int Offset = RPI.isScalable() ? ScalableByteOffset : ByteOffset;
2066 assert(Offset % Scale == 0);
2067 RPI.Offset = Offset / Scale;
2068
2069 assert(((!RPI.isScalable() && RPI.Offset >= -64 && RPI.Offset <= 63) ||
2070 (RPI.isScalable() && RPI.Offset >= -256 && RPI.Offset <= 255)) &&
2071 "Offset out of bounds for LDP/STP immediate");
2072
2073 RegPairs.push_back(RPI);
2074 if (RPI.isPaired())
2075 ++i;
2076 }
2077 }
2078
spillCalleeSavedRegisters(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,const std::vector<CalleeSavedInfo> & CSI,const TargetRegisterInfo * TRI) const2079 bool AArch64FrameLowering::spillCalleeSavedRegisters(
2080 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
2081 const std::vector<CalleeSavedInfo> &CSI,
2082 const TargetRegisterInfo *TRI) const {
2083 MachineFunction &MF = *MBB.getParent();
2084 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
2085 bool NeedsWinCFI = needsWinCFI(MF);
2086 DebugLoc DL;
2087 SmallVector<RegPairInfo, 8> RegPairs;
2088
2089 bool NeedShadowCallStackProlog = false;
2090 computeCalleeSaveRegisterPairs(MF, CSI, TRI, RegPairs,
2091 NeedShadowCallStackProlog, hasFP(MF));
2092 const MachineRegisterInfo &MRI = MF.getRegInfo();
2093
2094 if (NeedShadowCallStackProlog) {
2095 // Shadow call stack prolog: str x30, [x18], #8
2096 BuildMI(MBB, MI, DL, TII.get(AArch64::STRXpost))
2097 .addReg(AArch64::X18, RegState::Define)
2098 .addReg(AArch64::LR)
2099 .addReg(AArch64::X18)
2100 .addImm(8)
2101 .setMIFlag(MachineInstr::FrameSetup);
2102
2103 if (NeedsWinCFI)
2104 BuildMI(MBB, MI, DL, TII.get(AArch64::SEH_Nop))
2105 .setMIFlag(MachineInstr::FrameSetup);
2106
2107 if (!MF.getFunction().hasFnAttribute(Attribute::NoUnwind)) {
2108 // Emit a CFI instruction that causes 8 to be subtracted from the value of
2109 // x18 when unwinding past this frame.
2110 static const char CFIInst[] = {
2111 dwarf::DW_CFA_val_expression,
2112 18, // register
2113 2, // length
2114 static_cast<char>(unsigned(dwarf::DW_OP_breg18)),
2115 static_cast<char>(-8) & 0x7f, // addend (sleb128)
2116 };
2117 unsigned CFIIndex = MF.addFrameInst(MCCFIInstruction::createEscape(
2118 nullptr, StringRef(CFIInst, sizeof(CFIInst))));
2119 BuildMI(MBB, MI, DL, TII.get(AArch64::CFI_INSTRUCTION))
2120 .addCFIIndex(CFIIndex)
2121 .setMIFlag(MachineInstr::FrameSetup);
2122 }
2123
2124 // This instruction also makes x18 live-in to the entry block.
2125 MBB.addLiveIn(AArch64::X18);
2126 }
2127
2128 for (auto RPII = RegPairs.rbegin(), RPIE = RegPairs.rend(); RPII != RPIE;
2129 ++RPII) {
2130 RegPairInfo RPI = *RPII;
2131 unsigned Reg1 = RPI.Reg1;
2132 unsigned Reg2 = RPI.Reg2;
2133 unsigned StrOpc;
2134
2135 // Issue sequence of spills for cs regs. The first spill may be converted
2136 // to a pre-decrement store later by emitPrologue if the callee-save stack
2137 // area allocation can't be combined with the local stack area allocation.
2138 // For example:
2139 // stp x22, x21, [sp, #0] // addImm(+0)
2140 // stp x20, x19, [sp, #16] // addImm(+2)
2141 // stp fp, lr, [sp, #32] // addImm(+4)
2142 // Rationale: This sequence saves uop updates compared to a sequence of
2143 // pre-increment spills like stp xi,xj,[sp,#-16]!
2144 // Note: Similar rationale and sequence for restores in epilog.
2145 unsigned Size, Align;
2146 switch (RPI.Type) {
2147 case RegPairInfo::GPR:
2148 StrOpc = RPI.isPaired() ? AArch64::STPXi : AArch64::STRXui;
2149 Size = 8;
2150 Align = 8;
2151 break;
2152 case RegPairInfo::FPR64:
2153 StrOpc = RPI.isPaired() ? AArch64::STPDi : AArch64::STRDui;
2154 Size = 8;
2155 Align = 8;
2156 break;
2157 case RegPairInfo::FPR128:
2158 StrOpc = RPI.isPaired() ? AArch64::STPQi : AArch64::STRQui;
2159 Size = 16;
2160 Align = 16;
2161 break;
2162 case RegPairInfo::ZPR:
2163 StrOpc = AArch64::STR_ZXI;
2164 Size = 16;
2165 Align = 16;
2166 break;
2167 case RegPairInfo::PPR:
2168 StrOpc = AArch64::STR_PXI;
2169 Size = 2;
2170 Align = 2;
2171 break;
2172 }
2173 LLVM_DEBUG(dbgs() << "CSR spill: (" << printReg(Reg1, TRI);
2174 if (RPI.isPaired()) dbgs() << ", " << printReg(Reg2, TRI);
2175 dbgs() << ") -> fi#(" << RPI.FrameIdx;
2176 if (RPI.isPaired()) dbgs() << ", " << RPI.FrameIdx + 1;
2177 dbgs() << ")\n");
2178
2179 assert((!NeedsWinCFI || !(Reg1 == AArch64::LR && Reg2 == AArch64::FP)) &&
2180 "Windows unwdinding requires a consecutive (FP,LR) pair");
2181 // Windows unwind codes require consecutive registers if registers are
2182 // paired. Make the switch here, so that the code below will save (x,x+1)
2183 // and not (x+1,x).
2184 unsigned FrameIdxReg1 = RPI.FrameIdx;
2185 unsigned FrameIdxReg2 = RPI.FrameIdx + 1;
2186 if (NeedsWinCFI && RPI.isPaired()) {
2187 std::swap(Reg1, Reg2);
2188 std::swap(FrameIdxReg1, FrameIdxReg2);
2189 }
2190 MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(StrOpc));
2191 if (!MRI.isReserved(Reg1))
2192 MBB.addLiveIn(Reg1);
2193 if (RPI.isPaired()) {
2194 if (!MRI.isReserved(Reg2))
2195 MBB.addLiveIn(Reg2);
2196 MIB.addReg(Reg2, getPrologueDeath(MF, Reg2));
2197 MIB.addMemOperand(MF.getMachineMemOperand(
2198 MachinePointerInfo::getFixedStack(MF, FrameIdxReg2),
2199 MachineMemOperand::MOStore, Size, Align));
2200 }
2201 MIB.addReg(Reg1, getPrologueDeath(MF, Reg1))
2202 .addReg(AArch64::SP)
2203 .addImm(RPI.Offset) // [sp, #offset*scale],
2204 // where factor*scale is implicit
2205 .setMIFlag(MachineInstr::FrameSetup);
2206 MIB.addMemOperand(MF.getMachineMemOperand(
2207 MachinePointerInfo::getFixedStack(MF,FrameIdxReg1),
2208 MachineMemOperand::MOStore, Size, Align));
2209 if (NeedsWinCFI)
2210 InsertSEH(MIB, TII, MachineInstr::FrameSetup);
2211
2212 // Update the StackIDs of the SVE stack slots.
2213 MachineFrameInfo &MFI = MF.getFrameInfo();
2214 if (RPI.Type == RegPairInfo::ZPR || RPI.Type == RegPairInfo::PPR)
2215 MFI.setStackID(RPI.FrameIdx, TargetStackID::SVEVector);
2216
2217 }
2218 return true;
2219 }
2220
restoreCalleeSavedRegisters(MachineBasicBlock & MBB,MachineBasicBlock::iterator MI,std::vector<CalleeSavedInfo> & CSI,const TargetRegisterInfo * TRI) const2221 bool AArch64FrameLowering::restoreCalleeSavedRegisters(
2222 MachineBasicBlock &MBB, MachineBasicBlock::iterator MI,
2223 std::vector<CalleeSavedInfo> &CSI,
2224 const TargetRegisterInfo *TRI) const {
2225 MachineFunction &MF = *MBB.getParent();
2226 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
2227 DebugLoc DL;
2228 SmallVector<RegPairInfo, 8> RegPairs;
2229 bool NeedsWinCFI = needsWinCFI(MF);
2230
2231 if (MI != MBB.end())
2232 DL = MI->getDebugLoc();
2233
2234 bool NeedShadowCallStackProlog = false;
2235 computeCalleeSaveRegisterPairs(MF, CSI, TRI, RegPairs,
2236 NeedShadowCallStackProlog, hasFP(MF));
2237
2238 auto EmitMI = [&](const RegPairInfo &RPI) {
2239 unsigned Reg1 = RPI.Reg1;
2240 unsigned Reg2 = RPI.Reg2;
2241
2242 // Issue sequence of restores for cs regs. The last restore may be converted
2243 // to a post-increment load later by emitEpilogue if the callee-save stack
2244 // area allocation can't be combined with the local stack area allocation.
2245 // For example:
2246 // ldp fp, lr, [sp, #32] // addImm(+4)
2247 // ldp x20, x19, [sp, #16] // addImm(+2)
2248 // ldp x22, x21, [sp, #0] // addImm(+0)
2249 // Note: see comment in spillCalleeSavedRegisters()
2250 unsigned LdrOpc;
2251 unsigned Size, Align;
2252 switch (RPI.Type) {
2253 case RegPairInfo::GPR:
2254 LdrOpc = RPI.isPaired() ? AArch64::LDPXi : AArch64::LDRXui;
2255 Size = 8;
2256 Align = 8;
2257 break;
2258 case RegPairInfo::FPR64:
2259 LdrOpc = RPI.isPaired() ? AArch64::LDPDi : AArch64::LDRDui;
2260 Size = 8;
2261 Align = 8;
2262 break;
2263 case RegPairInfo::FPR128:
2264 LdrOpc = RPI.isPaired() ? AArch64::LDPQi : AArch64::LDRQui;
2265 Size = 16;
2266 Align = 16;
2267 break;
2268 case RegPairInfo::ZPR:
2269 LdrOpc = AArch64::LDR_ZXI;
2270 Size = 16;
2271 Align = 16;
2272 break;
2273 case RegPairInfo::PPR:
2274 LdrOpc = AArch64::LDR_PXI;
2275 Size = 2;
2276 Align = 2;
2277 break;
2278 }
2279 LLVM_DEBUG(dbgs() << "CSR restore: (" << printReg(Reg1, TRI);
2280 if (RPI.isPaired()) dbgs() << ", " << printReg(Reg2, TRI);
2281 dbgs() << ") -> fi#(" << RPI.FrameIdx;
2282 if (RPI.isPaired()) dbgs() << ", " << RPI.FrameIdx + 1;
2283 dbgs() << ")\n");
2284
2285 // Windows unwind codes require consecutive registers if registers are
2286 // paired. Make the switch here, so that the code below will save (x,x+1)
2287 // and not (x+1,x).
2288 unsigned FrameIdxReg1 = RPI.FrameIdx;
2289 unsigned FrameIdxReg2 = RPI.FrameIdx + 1;
2290 if (NeedsWinCFI && RPI.isPaired()) {
2291 std::swap(Reg1, Reg2);
2292 std::swap(FrameIdxReg1, FrameIdxReg2);
2293 }
2294 MachineInstrBuilder MIB = BuildMI(MBB, MI, DL, TII.get(LdrOpc));
2295 if (RPI.isPaired()) {
2296 MIB.addReg(Reg2, getDefRegState(true));
2297 MIB.addMemOperand(MF.getMachineMemOperand(
2298 MachinePointerInfo::getFixedStack(MF, FrameIdxReg2),
2299 MachineMemOperand::MOLoad, Size, Align));
2300 }
2301 MIB.addReg(Reg1, getDefRegState(true))
2302 .addReg(AArch64::SP)
2303 .addImm(RPI.Offset) // [sp, #offset*scale]
2304 // where factor*scale is implicit
2305 .setMIFlag(MachineInstr::FrameDestroy);
2306 MIB.addMemOperand(MF.getMachineMemOperand(
2307 MachinePointerInfo::getFixedStack(MF, FrameIdxReg1),
2308 MachineMemOperand::MOLoad, Size, Align));
2309 if (NeedsWinCFI)
2310 InsertSEH(MIB, TII, MachineInstr::FrameDestroy);
2311 };
2312
2313 // SVE objects are always restored in reverse order.
2314 for (const RegPairInfo &RPI : reverse(RegPairs))
2315 if (RPI.isScalable())
2316 EmitMI(RPI);
2317
2318 if (ReverseCSRRestoreSeq) {
2319 for (const RegPairInfo &RPI : reverse(RegPairs))
2320 if (!RPI.isScalable())
2321 EmitMI(RPI);
2322 } else
2323 for (const RegPairInfo &RPI : RegPairs)
2324 if (!RPI.isScalable())
2325 EmitMI(RPI);
2326
2327 if (NeedShadowCallStackProlog) {
2328 // Shadow call stack epilog: ldr x30, [x18, #-8]!
2329 BuildMI(MBB, MI, DL, TII.get(AArch64::LDRXpre))
2330 .addReg(AArch64::X18, RegState::Define)
2331 .addReg(AArch64::LR, RegState::Define)
2332 .addReg(AArch64::X18)
2333 .addImm(-8)
2334 .setMIFlag(MachineInstr::FrameDestroy);
2335 }
2336
2337 return true;
2338 }
2339
determineCalleeSaves(MachineFunction & MF,BitVector & SavedRegs,RegScavenger * RS) const2340 void AArch64FrameLowering::determineCalleeSaves(MachineFunction &MF,
2341 BitVector &SavedRegs,
2342 RegScavenger *RS) const {
2343 // All calls are tail calls in GHC calling conv, and functions have no
2344 // prologue/epilogue.
2345 if (MF.getFunction().getCallingConv() == CallingConv::GHC)
2346 return;
2347
2348 TargetFrameLowering::determineCalleeSaves(MF, SavedRegs, RS);
2349 const AArch64RegisterInfo *RegInfo = static_cast<const AArch64RegisterInfo *>(
2350 MF.getSubtarget().getRegisterInfo());
2351 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
2352 unsigned UnspilledCSGPR = AArch64::NoRegister;
2353 unsigned UnspilledCSGPRPaired = AArch64::NoRegister;
2354
2355 MachineFrameInfo &MFI = MF.getFrameInfo();
2356 const MCPhysReg *CSRegs = MF.getRegInfo().getCalleeSavedRegs();
2357
2358 unsigned BasePointerReg = RegInfo->hasBasePointer(MF)
2359 ? RegInfo->getBaseRegister()
2360 : (unsigned)AArch64::NoRegister;
2361
2362 unsigned ExtraCSSpill = 0;
2363 // Figure out which callee-saved registers to save/restore.
2364 for (unsigned i = 0; CSRegs[i]; ++i) {
2365 const unsigned Reg = CSRegs[i];
2366
2367 // Add the base pointer register to SavedRegs if it is callee-save.
2368 if (Reg == BasePointerReg)
2369 SavedRegs.set(Reg);
2370
2371 bool RegUsed = SavedRegs.test(Reg);
2372 unsigned PairedReg = AArch64::NoRegister;
2373 if (AArch64::GPR64RegClass.contains(Reg) ||
2374 AArch64::FPR64RegClass.contains(Reg) ||
2375 AArch64::FPR128RegClass.contains(Reg))
2376 PairedReg = CSRegs[i ^ 1];
2377
2378 if (!RegUsed) {
2379 if (AArch64::GPR64RegClass.contains(Reg) &&
2380 !RegInfo->isReservedReg(MF, Reg)) {
2381 UnspilledCSGPR = Reg;
2382 UnspilledCSGPRPaired = PairedReg;
2383 }
2384 continue;
2385 }
2386
2387 // MachO's compact unwind format relies on all registers being stored in
2388 // pairs.
2389 // FIXME: the usual format is actually better if unwinding isn't needed.
2390 if (produceCompactUnwindFrame(MF) && PairedReg != AArch64::NoRegister &&
2391 !SavedRegs.test(PairedReg)) {
2392 SavedRegs.set(PairedReg);
2393 if (AArch64::GPR64RegClass.contains(PairedReg) &&
2394 !RegInfo->isReservedReg(MF, PairedReg))
2395 ExtraCSSpill = PairedReg;
2396 }
2397 }
2398
2399 // Calculates the callee saved stack size.
2400 unsigned CSStackSize = 0;
2401 unsigned SVECSStackSize = 0;
2402 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
2403 const MachineRegisterInfo &MRI = MF.getRegInfo();
2404 for (unsigned Reg : SavedRegs.set_bits()) {
2405 auto RegSize = TRI->getRegSizeInBits(Reg, MRI) / 8;
2406 if (AArch64::PPRRegClass.contains(Reg) ||
2407 AArch64::ZPRRegClass.contains(Reg))
2408 SVECSStackSize += RegSize;
2409 else
2410 CSStackSize += RegSize;
2411 }
2412
2413 // Save number of saved regs, so we can easily update CSStackSize later.
2414 unsigned NumSavedRegs = SavedRegs.count();
2415
2416 // The frame record needs to be created by saving the appropriate registers
2417 uint64_t EstimatedStackSize = MFI.estimateStackSize(MF);
2418 if (hasFP(MF) ||
2419 windowsRequiresStackProbe(MF, EstimatedStackSize + CSStackSize + 16)) {
2420 SavedRegs.set(AArch64::FP);
2421 SavedRegs.set(AArch64::LR);
2422 }
2423
2424 LLVM_DEBUG(dbgs() << "*** determineCalleeSaves\nSaved CSRs:";
2425 for (unsigned Reg
2426 : SavedRegs.set_bits()) dbgs()
2427 << ' ' << printReg(Reg, RegInfo);
2428 dbgs() << "\n";);
2429
2430 // If any callee-saved registers are used, the frame cannot be eliminated.
2431 int64_t SVEStackSize =
2432 alignTo(SVECSStackSize + estimateSVEStackObjectOffsets(MFI), 16);
2433 bool CanEliminateFrame = (SavedRegs.count() == 0) && !SVEStackSize;
2434
2435 // The CSR spill slots have not been allocated yet, so estimateStackSize
2436 // won't include them.
2437 unsigned EstimatedStackSizeLimit = estimateRSStackSizeLimit(MF);
2438
2439 // Conservatively always assume BigStack when there are SVE spills.
2440 bool BigStack = SVEStackSize ||
2441 (EstimatedStackSize + CSStackSize) > EstimatedStackSizeLimit;
2442 if (BigStack || !CanEliminateFrame || RegInfo->cannotEliminateFrame(MF))
2443 AFI->setHasStackFrame(true);
2444
2445 // Estimate if we might need to scavenge a register at some point in order
2446 // to materialize a stack offset. If so, either spill one additional
2447 // callee-saved register or reserve a special spill slot to facilitate
2448 // register scavenging. If we already spilled an extra callee-saved register
2449 // above to keep the number of spills even, we don't need to do anything else
2450 // here.
2451 if (BigStack) {
2452 if (!ExtraCSSpill && UnspilledCSGPR != AArch64::NoRegister) {
2453 LLVM_DEBUG(dbgs() << "Spilling " << printReg(UnspilledCSGPR, RegInfo)
2454 << " to get a scratch register.\n");
2455 SavedRegs.set(UnspilledCSGPR);
2456 // MachO's compact unwind format relies on all registers being stored in
2457 // pairs, so if we need to spill one extra for BigStack, then we need to
2458 // store the pair.
2459 if (produceCompactUnwindFrame(MF))
2460 SavedRegs.set(UnspilledCSGPRPaired);
2461 ExtraCSSpill = UnspilledCSGPR;
2462 }
2463
2464 // If we didn't find an extra callee-saved register to spill, create
2465 // an emergency spill slot.
2466 if (!ExtraCSSpill || MF.getRegInfo().isPhysRegUsed(ExtraCSSpill)) {
2467 const TargetRegisterInfo *TRI = MF.getSubtarget().getRegisterInfo();
2468 const TargetRegisterClass &RC = AArch64::GPR64RegClass;
2469 unsigned Size = TRI->getSpillSize(RC);
2470 unsigned Align = TRI->getSpillAlignment(RC);
2471 int FI = MFI.CreateStackObject(Size, Align, false);
2472 RS->addScavengingFrameIndex(FI);
2473 LLVM_DEBUG(dbgs() << "No available CS registers, allocated fi#" << FI
2474 << " as the emergency spill slot.\n");
2475 }
2476 }
2477
2478 // Adding the size of additional 64bit GPR saves.
2479 CSStackSize += 8 * (SavedRegs.count() - NumSavedRegs);
2480 uint64_t AlignedCSStackSize = alignTo(CSStackSize, 16);
2481 LLVM_DEBUG(dbgs() << "Estimated stack frame size: "
2482 << EstimatedStackSize + AlignedCSStackSize
2483 << " bytes.\n");
2484
2485 assert((!MFI.isCalleeSavedInfoValid() ||
2486 AFI->getCalleeSavedStackSize() == AlignedCSStackSize) &&
2487 "Should not invalidate callee saved info");
2488
2489 // Round up to register pair alignment to avoid additional SP adjustment
2490 // instructions.
2491 AFI->setCalleeSavedStackSize(AlignedCSStackSize);
2492 AFI->setCalleeSaveStackHasFreeSpace(AlignedCSStackSize != CSStackSize);
2493 AFI->setSVECalleeSavedStackSize(alignTo(SVECSStackSize, 16));
2494 }
2495
enableStackSlotScavenging(const MachineFunction & MF) const2496 bool AArch64FrameLowering::enableStackSlotScavenging(
2497 const MachineFunction &MF) const {
2498 const AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
2499 return AFI->hasCalleeSaveStackFreeSpace();
2500 }
2501
2502 /// returns true if there are any SVE callee saves.
getSVECalleeSaveSlotRange(const MachineFrameInfo & MFI,int & Min,int & Max)2503 static bool getSVECalleeSaveSlotRange(const MachineFrameInfo &MFI,
2504 int &Min, int &Max) {
2505 Min = std::numeric_limits<int>::max();
2506 Max = std::numeric_limits<int>::min();
2507
2508 if (!MFI.isCalleeSavedInfoValid())
2509 return false;
2510
2511 const std::vector<CalleeSavedInfo> &CSI = MFI.getCalleeSavedInfo();
2512 for (auto &CS : CSI) {
2513 if (AArch64::ZPRRegClass.contains(CS.getReg()) ||
2514 AArch64::PPRRegClass.contains(CS.getReg())) {
2515 assert((Max == std::numeric_limits<int>::min() ||
2516 Max + 1 == CS.getFrameIdx()) &&
2517 "SVE CalleeSaves are not consecutive");
2518
2519 Min = std::min(Min, CS.getFrameIdx());
2520 Max = std::max(Max, CS.getFrameIdx());
2521 }
2522 }
2523 return Min != std::numeric_limits<int>::max();
2524 }
2525
2526 // Process all the SVE stack objects and determine offsets for each
2527 // object. If AssignOffsets is true, the offsets get assigned.
2528 // Fills in the first and last callee-saved frame indices into
2529 // Min/MaxCSFrameIndex, respectively.
2530 // Returns the size of the stack.
determineSVEStackObjectOffsets(MachineFrameInfo & MFI,int & MinCSFrameIndex,int & MaxCSFrameIndex,bool AssignOffsets)2531 static int64_t determineSVEStackObjectOffsets(MachineFrameInfo &MFI,
2532 int &MinCSFrameIndex,
2533 int &MaxCSFrameIndex,
2534 bool AssignOffsets) {
2535 // First process all fixed stack objects.
2536 int64_t Offset = 0;
2537 for (int I = MFI.getObjectIndexBegin(); I != 0; ++I)
2538 if (MFI.getStackID(I) == TargetStackID::SVEVector) {
2539 int64_t FixedOffset = -MFI.getObjectOffset(I);
2540 if (FixedOffset > Offset)
2541 Offset = FixedOffset;
2542 }
2543
2544 auto Assign = [&MFI](int FI, int64_t Offset) {
2545 LLVM_DEBUG(dbgs() << "alloc FI(" << FI << ") at SP[" << Offset << "]\n");
2546 MFI.setObjectOffset(FI, Offset);
2547 };
2548
2549 // Then process all callee saved slots.
2550 if (getSVECalleeSaveSlotRange(MFI, MinCSFrameIndex, MaxCSFrameIndex)) {
2551 // Make sure to align the last callee save slot.
2552 MFI.setObjectAlignment(MaxCSFrameIndex, 16U);
2553
2554 // Assign offsets to the callee save slots.
2555 for (int I = MinCSFrameIndex; I <= MaxCSFrameIndex; ++I) {
2556 Offset += MFI.getObjectSize(I);
2557 Offset = alignTo(Offset, MFI.getObjectAlignment(I));
2558 if (AssignOffsets)
2559 Assign(I, -Offset);
2560 }
2561 }
2562
2563 // Create a buffer of SVE objects to allocate and sort it.
2564 SmallVector<int, 8> ObjectsToAllocate;
2565 for (int I = 0, E = MFI.getObjectIndexEnd(); I != E; ++I) {
2566 unsigned StackID = MFI.getStackID(I);
2567 if (StackID != TargetStackID::SVEVector)
2568 continue;
2569 if (MaxCSFrameIndex >= I && I >= MinCSFrameIndex)
2570 continue;
2571 if (MFI.isDeadObjectIndex(I))
2572 continue;
2573
2574 ObjectsToAllocate.push_back(I);
2575 }
2576
2577 // Allocate all SVE locals and spills
2578 for (unsigned FI : ObjectsToAllocate) {
2579 unsigned Align = MFI.getObjectAlignment(FI);
2580 // FIXME: Given that the length of SVE vectors is not necessarily a power of
2581 // two, we'd need to align every object dynamically at runtime if the
2582 // alignment is larger than 16. This is not yet supported.
2583 if (Align > 16)
2584 report_fatal_error(
2585 "Alignment of scalable vectors > 16 bytes is not yet supported");
2586
2587 Offset = alignTo(Offset + MFI.getObjectSize(FI), Align);
2588 if (AssignOffsets)
2589 Assign(FI, -Offset);
2590 }
2591
2592 return Offset;
2593 }
2594
estimateSVEStackObjectOffsets(MachineFrameInfo & MFI) const2595 int64_t AArch64FrameLowering::estimateSVEStackObjectOffsets(
2596 MachineFrameInfo &MFI) const {
2597 int MinCSFrameIndex, MaxCSFrameIndex;
2598 return determineSVEStackObjectOffsets(MFI, MinCSFrameIndex, MaxCSFrameIndex, false);
2599 }
2600
assignSVEStackObjectOffsets(MachineFrameInfo & MFI,int & MinCSFrameIndex,int & MaxCSFrameIndex) const2601 int64_t AArch64FrameLowering::assignSVEStackObjectOffsets(
2602 MachineFrameInfo &MFI, int &MinCSFrameIndex, int &MaxCSFrameIndex) const {
2603 return determineSVEStackObjectOffsets(MFI, MinCSFrameIndex, MaxCSFrameIndex,
2604 true);
2605 }
2606
processFunctionBeforeFrameFinalized(MachineFunction & MF,RegScavenger * RS) const2607 void AArch64FrameLowering::processFunctionBeforeFrameFinalized(
2608 MachineFunction &MF, RegScavenger *RS) const {
2609 MachineFrameInfo &MFI = MF.getFrameInfo();
2610
2611 assert(getStackGrowthDirection() == TargetFrameLowering::StackGrowsDown &&
2612 "Upwards growing stack unsupported");
2613
2614 int MinCSFrameIndex, MaxCSFrameIndex;
2615 int64_t SVEStackSize =
2616 assignSVEStackObjectOffsets(MFI, MinCSFrameIndex, MaxCSFrameIndex);
2617
2618 AArch64FunctionInfo *AFI = MF.getInfo<AArch64FunctionInfo>();
2619 AFI->setStackSizeSVE(alignTo(SVEStackSize, 16U));
2620 AFI->setMinMaxSVECSFrameIndex(MinCSFrameIndex, MaxCSFrameIndex);
2621
2622 // If this function isn't doing Win64-style C++ EH, we don't need to do
2623 // anything.
2624 if (!MF.hasEHFunclets())
2625 return;
2626 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
2627 WinEHFuncInfo &EHInfo = *MF.getWinEHFuncInfo();
2628
2629 MachineBasicBlock &MBB = MF.front();
2630 auto MBBI = MBB.begin();
2631 while (MBBI != MBB.end() && MBBI->getFlag(MachineInstr::FrameSetup))
2632 ++MBBI;
2633
2634 // Create an UnwindHelp object.
2635 int UnwindHelpFI =
2636 MFI.CreateStackObject(/*size*/8, /*alignment*/16, false);
2637 EHInfo.UnwindHelpFrameIdx = UnwindHelpFI;
2638 // We need to store -2 into the UnwindHelp object at the start of the
2639 // function.
2640 DebugLoc DL;
2641 RS->enterBasicBlockEnd(MBB);
2642 RS->backward(std::prev(MBBI));
2643 unsigned DstReg = RS->FindUnusedReg(&AArch64::GPR64commonRegClass);
2644 assert(DstReg && "There must be a free register after frame setup");
2645 BuildMI(MBB, MBBI, DL, TII.get(AArch64::MOVi64imm), DstReg).addImm(-2);
2646 BuildMI(MBB, MBBI, DL, TII.get(AArch64::STURXi))
2647 .addReg(DstReg, getKillRegState(true))
2648 .addFrameIndex(UnwindHelpFI)
2649 .addImm(0);
2650 }
2651
2652 /// For Win64 AArch64 EH, the offset to the Unwind object is from the SP before
2653 /// the update. This is easily retrieved as it is exactly the offset that is set
2654 /// in processFunctionBeforeFrameFinalized.
getFrameIndexReferencePreferSP(const MachineFunction & MF,int FI,unsigned & FrameReg,bool IgnoreSPUpdates) const2655 int AArch64FrameLowering::getFrameIndexReferencePreferSP(
2656 const MachineFunction &MF, int FI, unsigned &FrameReg,
2657 bool IgnoreSPUpdates) const {
2658 const MachineFrameInfo &MFI = MF.getFrameInfo();
2659 LLVM_DEBUG(dbgs() << "Offset from the SP for " << FI << " is "
2660 << MFI.getObjectOffset(FI) << "\n");
2661 FrameReg = AArch64::SP;
2662 return MFI.getObjectOffset(FI);
2663 }
2664
2665 /// The parent frame offset (aka dispFrame) is only used on X86_64 to retrieve
2666 /// the parent's frame pointer
getWinEHParentFrameOffset(const MachineFunction & MF) const2667 unsigned AArch64FrameLowering::getWinEHParentFrameOffset(
2668 const MachineFunction &MF) const {
2669 return 0;
2670 }
2671
2672 /// Funclets only need to account for space for the callee saved registers,
2673 /// as the locals are accounted for in the parent's stack frame.
getWinEHFuncletFrameSize(const MachineFunction & MF) const2674 unsigned AArch64FrameLowering::getWinEHFuncletFrameSize(
2675 const MachineFunction &MF) const {
2676 // This is the size of the pushed CSRs.
2677 unsigned CSSize =
2678 MF.getInfo<AArch64FunctionInfo>()->getCalleeSavedStackSize();
2679 // This is the amount of stack a funclet needs to allocate.
2680 return alignTo(CSSize + MF.getFrameInfo().getMaxCallFrameSize(),
2681 getStackAlignment());
2682 }
2683