• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===--- AArch64CallLowering.cpp - Call lowering --------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 ///
9 /// \file
10 /// This file implements the lowering of LLVM calls to machine code calls for
11 /// GlobalISel.
12 ///
13 //===----------------------------------------------------------------------===//
14 
15 #include "AArch64CallLowering.h"
16 #include "AArch64ISelLowering.h"
17 #include "AArch64MachineFunctionInfo.h"
18 #include "AArch64Subtarget.h"
19 #include "llvm/ADT/ArrayRef.h"
20 #include "llvm/ADT/SmallVector.h"
21 #include "llvm/CodeGen/Analysis.h"
22 #include "llvm/CodeGen/CallingConvLower.h"
23 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
24 #include "llvm/CodeGen/GlobalISel/Utils.h"
25 #include "llvm/CodeGen/LowLevelType.h"
26 #include "llvm/CodeGen/MachineBasicBlock.h"
27 #include "llvm/CodeGen/MachineFrameInfo.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineInstrBuilder.h"
30 #include "llvm/CodeGen/MachineMemOperand.h"
31 #include "llvm/CodeGen/MachineOperand.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/TargetRegisterInfo.h"
34 #include "llvm/CodeGen/TargetSubtargetInfo.h"
35 #include "llvm/CodeGen/ValueTypes.h"
36 #include "llvm/IR/Argument.h"
37 #include "llvm/IR/Attributes.h"
38 #include "llvm/IR/Function.h"
39 #include "llvm/IR/Type.h"
40 #include "llvm/IR/Value.h"
41 #include "llvm/Support/MachineValueType.h"
42 #include <algorithm>
43 #include <cassert>
44 #include <cstdint>
45 #include <iterator>
46 
47 #define DEBUG_TYPE "aarch64-call-lowering"
48 
49 using namespace llvm;
50 
AArch64CallLowering(const AArch64TargetLowering & TLI)51 AArch64CallLowering::AArch64CallLowering(const AArch64TargetLowering &TLI)
52   : CallLowering(&TLI) {}
53 
54 namespace {
55 struct IncomingArgHandler : public CallLowering::IncomingValueHandler {
IncomingArgHandler__anon6c94d2a40111::IncomingArgHandler56   IncomingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
57                      CCAssignFn *AssignFn)
58       : IncomingValueHandler(MIRBuilder, MRI, AssignFn), StackUsed(0) {}
59 
getStackAddress__anon6c94d2a40111::IncomingArgHandler60   Register getStackAddress(uint64_t Size, int64_t Offset,
61                            MachinePointerInfo &MPO) override {
62     auto &MFI = MIRBuilder.getMF().getFrameInfo();
63     int FI = MFI.CreateFixedObject(Size, Offset, true);
64     MPO = MachinePointerInfo::getFixedStack(MIRBuilder.getMF(), FI);
65     auto AddrReg = MIRBuilder.buildFrameIndex(LLT::pointer(0, 64), FI);
66     StackUsed = std::max(StackUsed, Size + Offset);
67     return AddrReg.getReg(0);
68   }
69 
assignValueToReg__anon6c94d2a40111::IncomingArgHandler70   void assignValueToReg(Register ValVReg, Register PhysReg,
71                         CCValAssign &VA) override {
72     markPhysRegUsed(PhysReg);
73     switch (VA.getLocInfo()) {
74     default:
75       MIRBuilder.buildCopy(ValVReg, PhysReg);
76       break;
77     case CCValAssign::LocInfo::SExt:
78     case CCValAssign::LocInfo::ZExt:
79     case CCValAssign::LocInfo::AExt: {
80       auto Copy = MIRBuilder.buildCopy(LLT{VA.getLocVT()}, PhysReg);
81       MIRBuilder.buildTrunc(ValVReg, Copy);
82       break;
83     }
84     }
85   }
86 
assignValueToAddress__anon6c94d2a40111::IncomingArgHandler87   void assignValueToAddress(Register ValVReg, Register Addr, uint64_t MemSize,
88                             MachinePointerInfo &MPO, CCValAssign &VA) override {
89     MachineFunction &MF = MIRBuilder.getMF();
90 
91     // The reported memory location may be wider than the value.
92     const LLT RegTy = MRI.getType(ValVReg);
93     MemSize = std::min(static_cast<uint64_t>(RegTy.getSizeInBytes()), MemSize);
94 
95     auto MMO = MF.getMachineMemOperand(
96         MPO, MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant, MemSize,
97         inferAlignFromPtrInfo(MF, MPO));
98     MIRBuilder.buildLoad(ValVReg, Addr, *MMO);
99   }
100 
101   /// How the physical register gets marked varies between formal
102   /// parameters (it's a basic-block live-in), and a call instruction
103   /// (it's an implicit-def of the BL).
104   virtual void markPhysRegUsed(MCRegister PhysReg) = 0;
105 
106   uint64_t StackUsed;
107 };
108 
109 struct FormalArgHandler : public IncomingArgHandler {
FormalArgHandler__anon6c94d2a40111::FormalArgHandler110   FormalArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
111                    CCAssignFn *AssignFn)
112     : IncomingArgHandler(MIRBuilder, MRI, AssignFn) {}
113 
markPhysRegUsed__anon6c94d2a40111::FormalArgHandler114   void markPhysRegUsed(MCRegister PhysReg) override {
115     MIRBuilder.getMRI()->addLiveIn(PhysReg);
116     MIRBuilder.getMBB().addLiveIn(PhysReg);
117   }
118 };
119 
120 struct CallReturnHandler : public IncomingArgHandler {
CallReturnHandler__anon6c94d2a40111::CallReturnHandler121   CallReturnHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
122                     MachineInstrBuilder MIB, CCAssignFn *AssignFn)
123     : IncomingArgHandler(MIRBuilder, MRI, AssignFn), MIB(MIB) {}
124 
markPhysRegUsed__anon6c94d2a40111::CallReturnHandler125   void markPhysRegUsed(MCRegister PhysReg) override {
126     MIB.addDef(PhysReg, RegState::Implicit);
127   }
128 
129   MachineInstrBuilder MIB;
130 };
131 
132 struct OutgoingArgHandler : public CallLowering::OutgoingValueHandler {
OutgoingArgHandler__anon6c94d2a40111::OutgoingArgHandler133   OutgoingArgHandler(MachineIRBuilder &MIRBuilder, MachineRegisterInfo &MRI,
134                      MachineInstrBuilder MIB, CCAssignFn *AssignFn,
135                      CCAssignFn *AssignFnVarArg, bool IsTailCall = false,
136                      int FPDiff = 0)
137       : OutgoingValueHandler(MIRBuilder, MRI, AssignFn), MIB(MIB),
138         AssignFnVarArg(AssignFnVarArg), IsTailCall(IsTailCall), FPDiff(FPDiff),
139         StackSize(0), SPReg(0) {}
140 
getStackAddress__anon6c94d2a40111::OutgoingArgHandler141   Register getStackAddress(uint64_t Size, int64_t Offset,
142                            MachinePointerInfo &MPO) override {
143     MachineFunction &MF = MIRBuilder.getMF();
144     LLT p0 = LLT::pointer(0, 64);
145     LLT s64 = LLT::scalar(64);
146 
147     if (IsTailCall) {
148       Offset += FPDiff;
149       int FI = MF.getFrameInfo().CreateFixedObject(Size, Offset, true);
150       auto FIReg = MIRBuilder.buildFrameIndex(p0, FI);
151       MPO = MachinePointerInfo::getFixedStack(MF, FI);
152       return FIReg.getReg(0);
153     }
154 
155     if (!SPReg)
156       SPReg = MIRBuilder.buildCopy(p0, Register(AArch64::SP)).getReg(0);
157 
158     auto OffsetReg = MIRBuilder.buildConstant(s64, Offset);
159 
160     auto AddrReg = MIRBuilder.buildPtrAdd(p0, SPReg, OffsetReg);
161 
162     MPO = MachinePointerInfo::getStack(MF, Offset);
163     return AddrReg.getReg(0);
164   }
165 
assignValueToReg__anon6c94d2a40111::OutgoingArgHandler166   void assignValueToReg(Register ValVReg, Register PhysReg,
167                         CCValAssign &VA) override {
168     MIB.addUse(PhysReg, RegState::Implicit);
169     Register ExtReg = extendRegister(ValVReg, VA);
170     MIRBuilder.buildCopy(PhysReg, ExtReg);
171   }
172 
assignValueToAddress__anon6c94d2a40111::OutgoingArgHandler173   void assignValueToAddress(Register ValVReg, Register Addr, uint64_t Size,
174                             MachinePointerInfo &MPO, CCValAssign &VA) override {
175     MachineFunction &MF = MIRBuilder.getMF();
176     auto MMO = MF.getMachineMemOperand(MPO, MachineMemOperand::MOStore, Size,
177                                        inferAlignFromPtrInfo(MF, MPO));
178     MIRBuilder.buildStore(ValVReg, Addr, *MMO);
179   }
180 
assignValueToAddress__anon6c94d2a40111::OutgoingArgHandler181   void assignValueToAddress(const CallLowering::ArgInfo &Arg, Register Addr,
182                             uint64_t Size, MachinePointerInfo &MPO,
183                             CCValAssign &VA) override {
184     unsigned MaxSize = Size * 8;
185     // For varargs, we always want to extend them to 8 bytes, in which case
186     // we disable setting a max.
187     if (!Arg.IsFixed)
188       MaxSize = 0;
189 
190     assert(Arg.Regs.size() == 1);
191 
192     Register ValVReg = VA.getLocInfo() != CCValAssign::LocInfo::FPExt
193                            ? extendRegister(Arg.Regs[0], VA, MaxSize)
194                            : Arg.Regs[0];
195 
196     // If we extended we might need to adjust the MMO's Size.
197     const LLT RegTy = MRI.getType(ValVReg);
198     if (RegTy.getSizeInBytes() > Size)
199       Size = RegTy.getSizeInBytes();
200 
201     assignValueToAddress(ValVReg, Addr, Size, MPO, VA);
202   }
203 
assignArg__anon6c94d2a40111::OutgoingArgHandler204   bool assignArg(unsigned ValNo, MVT ValVT, MVT LocVT,
205                  CCValAssign::LocInfo LocInfo,
206                  const CallLowering::ArgInfo &Info,
207                  ISD::ArgFlagsTy Flags,
208                  CCState &State) override {
209     bool Res;
210     if (Info.IsFixed)
211       Res = AssignFn(ValNo, ValVT, LocVT, LocInfo, Flags, State);
212     else
213       Res = AssignFnVarArg(ValNo, ValVT, LocVT, LocInfo, Flags, State);
214 
215     StackSize = State.getNextStackOffset();
216     return Res;
217   }
218 
219   MachineInstrBuilder MIB;
220   CCAssignFn *AssignFnVarArg;
221   bool IsTailCall;
222 
223   /// For tail calls, the byte offset of the call's argument area from the
224   /// callee's. Unused elsewhere.
225   int FPDiff;
226   uint64_t StackSize;
227 
228   // Cache the SP register vreg if we need it more than once in this call site.
229   Register SPReg;
230 };
231 } // namespace
232 
doesCalleeRestoreStack(CallingConv::ID CallConv,bool TailCallOpt)233 static bool doesCalleeRestoreStack(CallingConv::ID CallConv, bool TailCallOpt) {
234   return CallConv == CallingConv::Fast && TailCallOpt;
235 }
236 
splitToValueTypes(const ArgInfo & OrigArg,SmallVectorImpl<ArgInfo> & SplitArgs,const DataLayout & DL,MachineRegisterInfo & MRI,CallingConv::ID CallConv) const237 void AArch64CallLowering::splitToValueTypes(
238     const ArgInfo &OrigArg, SmallVectorImpl<ArgInfo> &SplitArgs,
239     const DataLayout &DL, MachineRegisterInfo &MRI, CallingConv::ID CallConv) const {
240   const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
241   LLVMContext &Ctx = OrigArg.Ty->getContext();
242 
243   SmallVector<EVT, 4> SplitVTs;
244   SmallVector<uint64_t, 4> Offsets;
245   ComputeValueVTs(TLI, DL, OrigArg.Ty, SplitVTs, &Offsets, 0);
246 
247   if (SplitVTs.size() == 0)
248     return;
249 
250   if (SplitVTs.size() == 1) {
251     // No splitting to do, but we want to replace the original type (e.g. [1 x
252     // double] -> double).
253     SplitArgs.emplace_back(OrigArg.Regs[0], SplitVTs[0].getTypeForEVT(Ctx),
254                            OrigArg.Flags[0], OrigArg.IsFixed);
255     return;
256   }
257 
258   // Create one ArgInfo for each virtual register in the original ArgInfo.
259   assert(OrigArg.Regs.size() == SplitVTs.size() && "Regs / types mismatch");
260 
261   bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
262       OrigArg.Ty, CallConv, false);
263   for (unsigned i = 0, e = SplitVTs.size(); i < e; ++i) {
264     Type *SplitTy = SplitVTs[i].getTypeForEVT(Ctx);
265     SplitArgs.emplace_back(OrigArg.Regs[i], SplitTy, OrigArg.Flags[0],
266                            OrigArg.IsFixed);
267     if (NeedsRegBlock)
268       SplitArgs.back().Flags[0].setInConsecutiveRegs();
269   }
270 
271   SplitArgs.back().Flags[0].setInConsecutiveRegsLast();
272 }
273 
lowerReturn(MachineIRBuilder & MIRBuilder,const Value * Val,ArrayRef<Register> VRegs,Register SwiftErrorVReg) const274 bool AArch64CallLowering::lowerReturn(MachineIRBuilder &MIRBuilder,
275                                       const Value *Val,
276                                       ArrayRef<Register> VRegs,
277                                       Register SwiftErrorVReg) const {
278   auto MIB = MIRBuilder.buildInstrNoInsert(AArch64::RET_ReallyLR);
279   assert(((Val && !VRegs.empty()) || (!Val && VRegs.empty())) &&
280          "Return value without a vreg");
281 
282   bool Success = true;
283   if (!VRegs.empty()) {
284     MachineFunction &MF = MIRBuilder.getMF();
285     const Function &F = MF.getFunction();
286 
287     MachineRegisterInfo &MRI = MF.getRegInfo();
288     const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
289     CCAssignFn *AssignFn = TLI.CCAssignFnForReturn(F.getCallingConv());
290     auto &DL = F.getParent()->getDataLayout();
291     LLVMContext &Ctx = Val->getType()->getContext();
292 
293     SmallVector<EVT, 4> SplitEVTs;
294     ComputeValueVTs(TLI, DL, Val->getType(), SplitEVTs);
295     assert(VRegs.size() == SplitEVTs.size() &&
296            "For each split Type there should be exactly one VReg.");
297 
298     SmallVector<ArgInfo, 8> SplitArgs;
299     CallingConv::ID CC = F.getCallingConv();
300 
301     for (unsigned i = 0; i < SplitEVTs.size(); ++i) {
302       if (TLI.getNumRegistersForCallingConv(Ctx, CC, SplitEVTs[i]) > 1) {
303         LLVM_DEBUG(dbgs() << "Can't handle extended arg types which need split");
304         return false;
305       }
306 
307       Register CurVReg = VRegs[i];
308       ArgInfo CurArgInfo = ArgInfo{CurVReg, SplitEVTs[i].getTypeForEVT(Ctx)};
309       setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
310 
311       // i1 is a special case because SDAG i1 true is naturally zero extended
312       // when widened using ANYEXT. We need to do it explicitly here.
313       if (MRI.getType(CurVReg).getSizeInBits() == 1) {
314         CurVReg = MIRBuilder.buildZExt(LLT::scalar(8), CurVReg).getReg(0);
315       } else {
316         // Some types will need extending as specified by the CC.
317         MVT NewVT = TLI.getRegisterTypeForCallingConv(Ctx, CC, SplitEVTs[i]);
318         if (EVT(NewVT) != SplitEVTs[i]) {
319           unsigned ExtendOp = TargetOpcode::G_ANYEXT;
320           if (F.getAttributes().hasAttribute(AttributeList::ReturnIndex,
321                                              Attribute::SExt))
322             ExtendOp = TargetOpcode::G_SEXT;
323           else if (F.getAttributes().hasAttribute(AttributeList::ReturnIndex,
324                                                   Attribute::ZExt))
325             ExtendOp = TargetOpcode::G_ZEXT;
326 
327           LLT NewLLT(NewVT);
328           LLT OldLLT(MVT::getVT(CurArgInfo.Ty));
329           CurArgInfo.Ty = EVT(NewVT).getTypeForEVT(Ctx);
330           // Instead of an extend, we might have a vector type which needs
331           // padding with more elements, e.g. <2 x half> -> <4 x half>.
332           if (NewVT.isVector()) {
333             if (OldLLT.isVector()) {
334               if (NewLLT.getNumElements() > OldLLT.getNumElements()) {
335                 // We don't handle VA types which are not exactly twice the
336                 // size, but can easily be done in future.
337                 if (NewLLT.getNumElements() != OldLLT.getNumElements() * 2) {
338                   LLVM_DEBUG(dbgs() << "Outgoing vector ret has too many elts");
339                   return false;
340                 }
341                 auto Undef = MIRBuilder.buildUndef({OldLLT});
342                 CurVReg =
343                     MIRBuilder.buildMerge({NewLLT}, {CurVReg, Undef}).getReg(0);
344               } else {
345                 // Just do a vector extend.
346                 CurVReg = MIRBuilder.buildInstr(ExtendOp, {NewLLT}, {CurVReg})
347                               .getReg(0);
348               }
349             } else if (NewLLT.getNumElements() == 2) {
350               // We need to pad a <1 x S> type to <2 x S>. Since we don't have
351               // <1 x S> vector types in GISel we use a build_vector instead
352               // of a vector merge/concat.
353               auto Undef = MIRBuilder.buildUndef({OldLLT});
354               CurVReg =
355                   MIRBuilder
356                       .buildBuildVector({NewLLT}, {CurVReg, Undef.getReg(0)})
357                       .getReg(0);
358             } else {
359               LLVM_DEBUG(dbgs() << "Could not handle ret ty");
360               return false;
361             }
362           } else {
363             // A scalar extend.
364             CurVReg =
365                 MIRBuilder.buildInstr(ExtendOp, {NewLLT}, {CurVReg}).getReg(0);
366           }
367         }
368       }
369       if (CurVReg != CurArgInfo.Regs[0]) {
370         CurArgInfo.Regs[0] = CurVReg;
371         // Reset the arg flags after modifying CurVReg.
372         setArgFlags(CurArgInfo, AttributeList::ReturnIndex, DL, F);
373       }
374      splitToValueTypes(CurArgInfo, SplitArgs, DL, MRI, CC);
375     }
376 
377     OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFn, AssignFn);
378     Success = handleAssignments(MIRBuilder, SplitArgs, Handler);
379   }
380 
381   if (SwiftErrorVReg) {
382     MIB.addUse(AArch64::X21, RegState::Implicit);
383     MIRBuilder.buildCopy(AArch64::X21, SwiftErrorVReg);
384   }
385 
386   MIRBuilder.insertInstr(MIB);
387   return Success;
388 }
389 
390 /// Helper function to compute forwarded registers for musttail calls. Computes
391 /// the forwarded registers, sets MBB liveness, and emits COPY instructions that
392 /// can be used to save + restore registers later.
handleMustTailForwardedRegisters(MachineIRBuilder & MIRBuilder,CCAssignFn * AssignFn)393 static void handleMustTailForwardedRegisters(MachineIRBuilder &MIRBuilder,
394                                              CCAssignFn *AssignFn) {
395   MachineBasicBlock &MBB = MIRBuilder.getMBB();
396   MachineFunction &MF = MIRBuilder.getMF();
397   MachineFrameInfo &MFI = MF.getFrameInfo();
398 
399   if (!MFI.hasMustTailInVarArgFunc())
400     return;
401 
402   AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
403   const Function &F = MF.getFunction();
404   assert(F.isVarArg() && "Expected F to be vararg?");
405 
406   // Compute the set of forwarded registers. The rest are scratch.
407   SmallVector<CCValAssign, 16> ArgLocs;
408   CCState CCInfo(F.getCallingConv(), /*IsVarArg=*/true, MF, ArgLocs,
409                  F.getContext());
410   SmallVector<MVT, 2> RegParmTypes;
411   RegParmTypes.push_back(MVT::i64);
412   RegParmTypes.push_back(MVT::f128);
413 
414   // Later on, we can use this vector to restore the registers if necessary.
415   SmallVectorImpl<ForwardedRegister> &Forwards =
416       FuncInfo->getForwardedMustTailRegParms();
417   CCInfo.analyzeMustTailForwardedRegisters(Forwards, RegParmTypes, AssignFn);
418 
419   // Conservatively forward X8, since it might be used for an aggregate
420   // return.
421   if (!CCInfo.isAllocated(AArch64::X8)) {
422     Register X8VReg = MF.addLiveIn(AArch64::X8, &AArch64::GPR64RegClass);
423     Forwards.push_back(ForwardedRegister(X8VReg, AArch64::X8, MVT::i64));
424   }
425 
426   // Add the forwards to the MachineBasicBlock and MachineFunction.
427   for (const auto &F : Forwards) {
428     MBB.addLiveIn(F.PReg);
429     MIRBuilder.buildCopy(Register(F.VReg), Register(F.PReg));
430   }
431 }
432 
fallBackToDAGISel(const Function & F) const433 bool AArch64CallLowering::fallBackToDAGISel(const Function &F) const {
434   if (isa<ScalableVectorType>(F.getReturnType()))
435     return true;
436   return llvm::any_of(F.args(), [](const Argument &A) {
437     return isa<ScalableVectorType>(A.getType());
438   });
439 }
440 
lowerFormalArguments(MachineIRBuilder & MIRBuilder,const Function & F,ArrayRef<ArrayRef<Register>> VRegs) const441 bool AArch64CallLowering::lowerFormalArguments(
442     MachineIRBuilder &MIRBuilder, const Function &F,
443     ArrayRef<ArrayRef<Register>> VRegs) const {
444   MachineFunction &MF = MIRBuilder.getMF();
445   MachineBasicBlock &MBB = MIRBuilder.getMBB();
446   MachineRegisterInfo &MRI = MF.getRegInfo();
447   auto &DL = F.getParent()->getDataLayout();
448 
449   SmallVector<ArgInfo, 8> SplitArgs;
450   unsigned i = 0;
451   for (auto &Arg : F.args()) {
452     if (DL.getTypeStoreSize(Arg.getType()).isZero())
453       continue;
454 
455     ArgInfo OrigArg{VRegs[i], Arg.getType()};
456     setArgFlags(OrigArg, i + AttributeList::FirstArgIndex, DL, F);
457 
458     splitToValueTypes(OrigArg, SplitArgs, DL, MRI, F.getCallingConv());
459     ++i;
460   }
461 
462   if (!MBB.empty())
463     MIRBuilder.setInstr(*MBB.begin());
464 
465   const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
466   CCAssignFn *AssignFn =
467       TLI.CCAssignFnForCall(F.getCallingConv(), /*IsVarArg=*/false);
468 
469   FormalArgHandler Handler(MIRBuilder, MRI, AssignFn);
470   if (!handleAssignments(MIRBuilder, SplitArgs, Handler))
471     return false;
472 
473   AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
474   uint64_t StackOffset = Handler.StackUsed;
475   if (F.isVarArg()) {
476     auto &Subtarget = MF.getSubtarget<AArch64Subtarget>();
477     if (!Subtarget.isTargetDarwin()) {
478         // FIXME: we need to reimplement saveVarArgsRegisters from
479       // AArch64ISelLowering.
480       return false;
481     }
482 
483     // We currently pass all varargs at 8-byte alignment, or 4 in ILP32.
484     StackOffset = alignTo(Handler.StackUsed, Subtarget.isTargetILP32() ? 4 : 8);
485 
486     auto &MFI = MIRBuilder.getMF().getFrameInfo();
487     FuncInfo->setVarArgsStackIndex(MFI.CreateFixedObject(4, StackOffset, true));
488   }
489 
490   if (doesCalleeRestoreStack(F.getCallingConv(),
491                              MF.getTarget().Options.GuaranteedTailCallOpt)) {
492     // We have a non-standard ABI, so why not make full use of the stack that
493     // we're going to pop? It must be aligned to 16 B in any case.
494     StackOffset = alignTo(StackOffset, 16);
495 
496     // If we're expected to restore the stack (e.g. fastcc), then we'll be
497     // adding a multiple of 16.
498     FuncInfo->setArgumentStackToRestore(StackOffset);
499 
500     // Our own callers will guarantee that the space is free by giving an
501     // aligned value to CALLSEQ_START.
502   }
503 
504   // When we tail call, we need to check if the callee's arguments
505   // will fit on the caller's stack. So, whenever we lower formal arguments,
506   // we should keep track of this information, since we might lower a tail call
507   // in this function later.
508   FuncInfo->setBytesInStackArgArea(StackOffset);
509 
510   auto &Subtarget = MF.getSubtarget<AArch64Subtarget>();
511   if (Subtarget.hasCustomCallingConv())
512     Subtarget.getRegisterInfo()->UpdateCustomCalleeSavedRegs(MF);
513 
514   handleMustTailForwardedRegisters(MIRBuilder, AssignFn);
515 
516   // Move back to the end of the basic block.
517   MIRBuilder.setMBB(MBB);
518 
519   return true;
520 }
521 
522 /// Return true if the calling convention is one that we can guarantee TCO for.
canGuaranteeTCO(CallingConv::ID CC)523 static bool canGuaranteeTCO(CallingConv::ID CC) {
524   return CC == CallingConv::Fast;
525 }
526 
527 /// Return true if we might ever do TCO for calls with this calling convention.
mayTailCallThisCC(CallingConv::ID CC)528 static bool mayTailCallThisCC(CallingConv::ID CC) {
529   switch (CC) {
530   case CallingConv::C:
531   case CallingConv::PreserveMost:
532   case CallingConv::Swift:
533     return true;
534   default:
535     return canGuaranteeTCO(CC);
536   }
537 }
538 
539 /// Returns a pair containing the fixed CCAssignFn and the vararg CCAssignFn for
540 /// CC.
541 static std::pair<CCAssignFn *, CCAssignFn *>
getAssignFnsForCC(CallingConv::ID CC,const AArch64TargetLowering & TLI)542 getAssignFnsForCC(CallingConv::ID CC, const AArch64TargetLowering &TLI) {
543   return {TLI.CCAssignFnForCall(CC, false), TLI.CCAssignFnForCall(CC, true)};
544 }
545 
doCallerAndCalleePassArgsTheSameWay(CallLoweringInfo & Info,MachineFunction & MF,SmallVectorImpl<ArgInfo> & InArgs) const546 bool AArch64CallLowering::doCallerAndCalleePassArgsTheSameWay(
547     CallLoweringInfo &Info, MachineFunction &MF,
548     SmallVectorImpl<ArgInfo> &InArgs) const {
549   const Function &CallerF = MF.getFunction();
550   CallingConv::ID CalleeCC = Info.CallConv;
551   CallingConv::ID CallerCC = CallerF.getCallingConv();
552 
553   // If the calling conventions match, then everything must be the same.
554   if (CalleeCC == CallerCC)
555     return true;
556 
557   // Check if the caller and callee will handle arguments in the same way.
558   const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
559   CCAssignFn *CalleeAssignFnFixed;
560   CCAssignFn *CalleeAssignFnVarArg;
561   std::tie(CalleeAssignFnFixed, CalleeAssignFnVarArg) =
562       getAssignFnsForCC(CalleeCC, TLI);
563 
564   CCAssignFn *CallerAssignFnFixed;
565   CCAssignFn *CallerAssignFnVarArg;
566   std::tie(CallerAssignFnFixed, CallerAssignFnVarArg) =
567       getAssignFnsForCC(CallerCC, TLI);
568 
569   if (!resultsCompatible(Info, MF, InArgs, *CalleeAssignFnFixed,
570                          *CalleeAssignFnVarArg, *CallerAssignFnFixed,
571                          *CallerAssignFnVarArg))
572     return false;
573 
574   // Make sure that the caller and callee preserve all of the same registers.
575   auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo();
576   const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
577   const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
578   if (MF.getSubtarget<AArch64Subtarget>().hasCustomCallingConv()) {
579     TRI->UpdateCustomCallPreservedMask(MF, &CallerPreserved);
580     TRI->UpdateCustomCallPreservedMask(MF, &CalleePreserved);
581   }
582 
583   return TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved);
584 }
585 
areCalleeOutgoingArgsTailCallable(CallLoweringInfo & Info,MachineFunction & MF,SmallVectorImpl<ArgInfo> & OutArgs) const586 bool AArch64CallLowering::areCalleeOutgoingArgsTailCallable(
587     CallLoweringInfo &Info, MachineFunction &MF,
588     SmallVectorImpl<ArgInfo> &OutArgs) const {
589   // If there are no outgoing arguments, then we are done.
590   if (OutArgs.empty())
591     return true;
592 
593   const Function &CallerF = MF.getFunction();
594   CallingConv::ID CalleeCC = Info.CallConv;
595   CallingConv::ID CallerCC = CallerF.getCallingConv();
596   const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
597 
598   CCAssignFn *AssignFnFixed;
599   CCAssignFn *AssignFnVarArg;
600   std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI);
601 
602   // We have outgoing arguments. Make sure that we can tail call with them.
603   SmallVector<CCValAssign, 16> OutLocs;
604   CCState OutInfo(CalleeCC, false, MF, OutLocs, CallerF.getContext());
605 
606   if (!analyzeArgInfo(OutInfo, OutArgs, *AssignFnFixed, *AssignFnVarArg)) {
607     LLVM_DEBUG(dbgs() << "... Could not analyze call operands.\n");
608     return false;
609   }
610 
611   // Make sure that they can fit on the caller's stack.
612   const AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
613   if (OutInfo.getNextStackOffset() > FuncInfo->getBytesInStackArgArea()) {
614     LLVM_DEBUG(dbgs() << "... Cannot fit call operands on caller's stack.\n");
615     return false;
616   }
617 
618   // Verify that the parameters in callee-saved registers match.
619   // TODO: Port this over to CallLowering as general code once swiftself is
620   // supported.
621   auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo();
622   const uint32_t *CallerPreservedMask = TRI->getCallPreservedMask(MF, CallerCC);
623   MachineRegisterInfo &MRI = MF.getRegInfo();
624 
625   for (unsigned i = 0; i < OutLocs.size(); ++i) {
626     auto &ArgLoc = OutLocs[i];
627     // If it's not a register, it's fine.
628     if (!ArgLoc.isRegLoc()) {
629       if (Info.IsVarArg) {
630         // Be conservative and disallow variadic memory operands to match SDAG's
631         // behaviour.
632         // FIXME: If the caller's calling convention is C, then we can
633         // potentially use its argument area. However, for cases like fastcc,
634         // we can't do anything.
635         LLVM_DEBUG(
636             dbgs()
637             << "... Cannot tail call vararg function with stack arguments\n");
638         return false;
639       }
640       continue;
641     }
642 
643     Register Reg = ArgLoc.getLocReg();
644 
645     // Only look at callee-saved registers.
646     if (MachineOperand::clobbersPhysReg(CallerPreservedMask, Reg))
647       continue;
648 
649     LLVM_DEBUG(
650         dbgs()
651         << "... Call has an argument passed in a callee-saved register.\n");
652 
653     // Check if it was copied from.
654     ArgInfo &OutInfo = OutArgs[i];
655 
656     if (OutInfo.Regs.size() > 1) {
657       LLVM_DEBUG(
658           dbgs() << "... Cannot handle arguments in multiple registers.\n");
659       return false;
660     }
661 
662     // Check if we copy the register, walking through copies from virtual
663     // registers. Note that getDefIgnoringCopies does not ignore copies from
664     // physical registers.
665     MachineInstr *RegDef = getDefIgnoringCopies(OutInfo.Regs[0], MRI);
666     if (!RegDef || RegDef->getOpcode() != TargetOpcode::COPY) {
667       LLVM_DEBUG(
668           dbgs()
669           << "... Parameter was not copied into a VReg, cannot tail call.\n");
670       return false;
671     }
672 
673     // Got a copy. Verify that it's the same as the register we want.
674     Register CopyRHS = RegDef->getOperand(1).getReg();
675     if (CopyRHS != Reg) {
676       LLVM_DEBUG(dbgs() << "... Callee-saved register was not copied into "
677                            "VReg, cannot tail call.\n");
678       return false;
679     }
680   }
681 
682   return true;
683 }
684 
isEligibleForTailCallOptimization(MachineIRBuilder & MIRBuilder,CallLoweringInfo & Info,SmallVectorImpl<ArgInfo> & InArgs,SmallVectorImpl<ArgInfo> & OutArgs) const685 bool AArch64CallLowering::isEligibleForTailCallOptimization(
686     MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info,
687     SmallVectorImpl<ArgInfo> &InArgs,
688     SmallVectorImpl<ArgInfo> &OutArgs) const {
689 
690   // Must pass all target-independent checks in order to tail call optimize.
691   if (!Info.IsTailCall)
692     return false;
693 
694   CallingConv::ID CalleeCC = Info.CallConv;
695   MachineFunction &MF = MIRBuilder.getMF();
696   const Function &CallerF = MF.getFunction();
697 
698   LLVM_DEBUG(dbgs() << "Attempting to lower call as tail call\n");
699 
700   if (Info.SwiftErrorVReg) {
701     // TODO: We should handle this.
702     // Note that this is also handled by the check for no outgoing arguments.
703     // Proactively disabling this though, because the swifterror handling in
704     // lowerCall inserts a COPY *after* the location of the call.
705     LLVM_DEBUG(dbgs() << "... Cannot handle tail calls with swifterror yet.\n");
706     return false;
707   }
708 
709   if (!mayTailCallThisCC(CalleeCC)) {
710     LLVM_DEBUG(dbgs() << "... Calling convention cannot be tail called.\n");
711     return false;
712   }
713 
714   // Byval parameters hand the function a pointer directly into the stack area
715   // we want to reuse during a tail call. Working around this *is* possible (see
716   // X86).
717   //
718   // FIXME: In AArch64ISelLowering, this isn't worked around. Can/should we try
719   // it?
720   //
721   // On Windows, "inreg" attributes signify non-aggregate indirect returns.
722   // In this case, it is necessary to save/restore X0 in the callee. Tail
723   // call opt interferes with this. So we disable tail call opt when the
724   // caller has an argument with "inreg" attribute.
725   //
726   // FIXME: Check whether the callee also has an "inreg" argument.
727   //
728   // When the caller has a swifterror argument, we don't want to tail call
729   // because would have to move into the swifterror register before the
730   // tail call.
731   if (any_of(CallerF.args(), [](const Argument &A) {
732         return A.hasByValAttr() || A.hasInRegAttr() || A.hasSwiftErrorAttr();
733       })) {
734     LLVM_DEBUG(dbgs() << "... Cannot tail call from callers with byval, "
735                          "inreg, or swifterror arguments\n");
736     return false;
737   }
738 
739   // Externally-defined functions with weak linkage should not be
740   // tail-called on AArch64 when the OS does not support dynamic
741   // pre-emption of symbols, as the AAELF spec requires normal calls
742   // to undefined weak functions to be replaced with a NOP or jump to the
743   // next instruction. The behaviour of branch instructions in this
744   // situation (as used for tail calls) is implementation-defined, so we
745   // cannot rely on the linker replacing the tail call with a return.
746   if (Info.Callee.isGlobal()) {
747     const GlobalValue *GV = Info.Callee.getGlobal();
748     const Triple &TT = MF.getTarget().getTargetTriple();
749     if (GV->hasExternalWeakLinkage() &&
750         (!TT.isOSWindows() || TT.isOSBinFormatELF() ||
751          TT.isOSBinFormatMachO())) {
752       LLVM_DEBUG(dbgs() << "... Cannot tail call externally-defined function "
753                            "with weak linkage for this OS.\n");
754       return false;
755     }
756   }
757 
758   // If we have -tailcallopt, then we're done.
759   if (MF.getTarget().Options.GuaranteedTailCallOpt)
760     return canGuaranteeTCO(CalleeCC) && CalleeCC == CallerF.getCallingConv();
761 
762   // We don't have -tailcallopt, so we're allowed to change the ABI (sibcall).
763   // Try to find cases where we can do that.
764 
765   // I want anyone implementing a new calling convention to think long and hard
766   // about this assert.
767   assert((!Info.IsVarArg || CalleeCC == CallingConv::C) &&
768          "Unexpected variadic calling convention");
769 
770   // Verify that the incoming and outgoing arguments from the callee are
771   // safe to tail call.
772   if (!doCallerAndCalleePassArgsTheSameWay(Info, MF, InArgs)) {
773     LLVM_DEBUG(
774         dbgs()
775         << "... Caller and callee have incompatible calling conventions.\n");
776     return false;
777   }
778 
779   if (!areCalleeOutgoingArgsTailCallable(Info, MF, OutArgs))
780     return false;
781 
782   LLVM_DEBUG(
783       dbgs() << "... Call is eligible for tail call optimization.\n");
784   return true;
785 }
786 
getCallOpcode(const MachineFunction & CallerF,bool IsIndirect,bool IsTailCall)787 static unsigned getCallOpcode(const MachineFunction &CallerF, bool IsIndirect,
788                               bool IsTailCall) {
789   if (!IsTailCall)
790     return IsIndirect ? getBLRCallOpcode(CallerF) : (unsigned)AArch64::BL;
791 
792   if (!IsIndirect)
793     return AArch64::TCRETURNdi;
794 
795   // When BTI is enabled, we need to use TCRETURNriBTI to make sure that we use
796   // x16 or x17.
797   if (CallerF.getInfo<AArch64FunctionInfo>()->branchTargetEnforcement())
798     return AArch64::TCRETURNriBTI;
799 
800   return AArch64::TCRETURNri;
801 }
802 
lowerTailCall(MachineIRBuilder & MIRBuilder,CallLoweringInfo & Info,SmallVectorImpl<ArgInfo> & OutArgs) const803 bool AArch64CallLowering::lowerTailCall(
804     MachineIRBuilder &MIRBuilder, CallLoweringInfo &Info,
805     SmallVectorImpl<ArgInfo> &OutArgs) const {
806   MachineFunction &MF = MIRBuilder.getMF();
807   const Function &F = MF.getFunction();
808   MachineRegisterInfo &MRI = MF.getRegInfo();
809   const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
810   AArch64FunctionInfo *FuncInfo = MF.getInfo<AArch64FunctionInfo>();
811 
812   // True when we're tail calling, but without -tailcallopt.
813   bool IsSibCall = !MF.getTarget().Options.GuaranteedTailCallOpt;
814 
815   // TODO: Right now, regbankselect doesn't know how to handle the rtcGPR64
816   // register class. Until we can do that, we should fall back here.
817   if (MF.getInfo<AArch64FunctionInfo>()->branchTargetEnforcement()) {
818     LLVM_DEBUG(
819         dbgs() << "Cannot lower indirect tail calls with BTI enabled yet.\n");
820     return false;
821   }
822 
823   // Find out which ABI gets to decide where things go.
824   CallingConv::ID CalleeCC = Info.CallConv;
825   CCAssignFn *AssignFnFixed;
826   CCAssignFn *AssignFnVarArg;
827   std::tie(AssignFnFixed, AssignFnVarArg) = getAssignFnsForCC(CalleeCC, TLI);
828 
829   MachineInstrBuilder CallSeqStart;
830   if (!IsSibCall)
831     CallSeqStart = MIRBuilder.buildInstr(AArch64::ADJCALLSTACKDOWN);
832 
833   unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), true);
834   auto MIB = MIRBuilder.buildInstrNoInsert(Opc);
835   MIB.add(Info.Callee);
836 
837   // Byte offset for the tail call. When we are sibcalling, this will always
838   // be 0.
839   MIB.addImm(0);
840 
841   // Tell the call which registers are clobbered.
842   auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo();
843   const uint32_t *Mask = TRI->getCallPreservedMask(MF, CalleeCC);
844   if (MF.getSubtarget<AArch64Subtarget>().hasCustomCallingConv())
845     TRI->UpdateCustomCallPreservedMask(MF, &Mask);
846   MIB.addRegMask(Mask);
847 
848   if (TRI->isAnyArgRegReserved(MF))
849     TRI->emitReservedArgRegCallError(MF);
850 
851   // FPDiff is the byte offset of the call's argument area from the callee's.
852   // Stores to callee stack arguments will be placed in FixedStackSlots offset
853   // by this amount for a tail call. In a sibling call it must be 0 because the
854   // caller will deallocate the entire stack and the callee still expects its
855   // arguments to begin at SP+0.
856   int FPDiff = 0;
857 
858   // This will be 0 for sibcalls, potentially nonzero for tail calls produced
859   // by -tailcallopt. For sibcalls, the memory operands for the call are
860   // already available in the caller's incoming argument space.
861   unsigned NumBytes = 0;
862   if (!IsSibCall) {
863     // We aren't sibcalling, so we need to compute FPDiff. We need to do this
864     // before handling assignments, because FPDiff must be known for memory
865     // arguments.
866     unsigned NumReusableBytes = FuncInfo->getBytesInStackArgArea();
867     SmallVector<CCValAssign, 16> OutLocs;
868     CCState OutInfo(CalleeCC, false, MF, OutLocs, F.getContext());
869     analyzeArgInfo(OutInfo, OutArgs, *AssignFnFixed, *AssignFnVarArg);
870 
871     // The callee will pop the argument stack as a tail call. Thus, we must
872     // keep it 16-byte aligned.
873     NumBytes = alignTo(OutInfo.getNextStackOffset(), 16);
874 
875     // FPDiff will be negative if this tail call requires more space than we
876     // would automatically have in our incoming argument space. Positive if we
877     // actually shrink the stack.
878     FPDiff = NumReusableBytes - NumBytes;
879 
880     // The stack pointer must be 16-byte aligned at all times it's used for a
881     // memory operation, which in practice means at *all* times and in
882     // particular across call boundaries. Therefore our own arguments started at
883     // a 16-byte aligned SP and the delta applied for the tail call should
884     // satisfy the same constraint.
885     assert(FPDiff % 16 == 0 && "unaligned stack on tail call");
886   }
887 
888   const auto &Forwards = FuncInfo->getForwardedMustTailRegParms();
889 
890   // Do the actual argument marshalling.
891   OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFnFixed,
892                              AssignFnVarArg, true, FPDiff);
893   if (!handleAssignments(MIRBuilder, OutArgs, Handler))
894     return false;
895 
896   if (Info.IsVarArg && Info.IsMustTailCall) {
897     // Now we know what's being passed to the function. Add uses to the call for
898     // the forwarded registers that we *aren't* passing as parameters. This will
899     // preserve the copies we build earlier.
900     for (const auto &F : Forwards) {
901       Register ForwardedReg = F.PReg;
902       // If the register is already passed, or aliases a register which is
903       // already being passed, then skip it.
904       if (any_of(MIB->uses(), [&ForwardedReg, &TRI](const MachineOperand &Use) {
905             if (!Use.isReg())
906               return false;
907             return TRI->regsOverlap(Use.getReg(), ForwardedReg);
908           }))
909         continue;
910 
911       // We aren't passing it already, so we should add it to the call.
912       MIRBuilder.buildCopy(ForwardedReg, Register(F.VReg));
913       MIB.addReg(ForwardedReg, RegState::Implicit);
914     }
915   }
916 
917   // If we have -tailcallopt, we need to adjust the stack. We'll do the call
918   // sequence start and end here.
919   if (!IsSibCall) {
920     MIB->getOperand(1).setImm(FPDiff);
921     CallSeqStart.addImm(NumBytes).addImm(0);
922     // End the call sequence *before* emitting the call. Normally, we would
923     // tidy the frame up after the call. However, here, we've laid out the
924     // parameters so that when SP is reset, they will be in the correct
925     // location.
926     MIRBuilder.buildInstr(AArch64::ADJCALLSTACKUP).addImm(NumBytes).addImm(0);
927   }
928 
929   // Now we can add the actual call instruction to the correct basic block.
930   MIRBuilder.insertInstr(MIB);
931 
932   // If Callee is a reg, since it is used by a target specific instruction,
933   // it must have a register class matching the constraint of that instruction.
934   if (Info.Callee.isReg())
935     MIB->getOperand(0).setReg(constrainOperandRegClass(
936         MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(),
937         *MF.getSubtarget().getRegBankInfo(), *MIB, MIB->getDesc(), Info.Callee,
938         0));
939 
940   MF.getFrameInfo().setHasTailCall();
941   Info.LoweredTailCall = true;
942   return true;
943 }
944 
lowerCall(MachineIRBuilder & MIRBuilder,CallLoweringInfo & Info) const945 bool AArch64CallLowering::lowerCall(MachineIRBuilder &MIRBuilder,
946                                     CallLoweringInfo &Info) const {
947   MachineFunction &MF = MIRBuilder.getMF();
948   const Function &F = MF.getFunction();
949   MachineRegisterInfo &MRI = MF.getRegInfo();
950   auto &DL = F.getParent()->getDataLayout();
951   const AArch64TargetLowering &TLI = *getTLI<AArch64TargetLowering>();
952 
953   SmallVector<ArgInfo, 8> OutArgs;
954   for (auto &OrigArg : Info.OrigArgs) {
955     splitToValueTypes(OrigArg, OutArgs, DL, MRI, Info.CallConv);
956     // AAPCS requires that we zero-extend i1 to 8 bits by the caller.
957     if (OrigArg.Ty->isIntegerTy(1))
958       OutArgs.back().Flags[0].setZExt();
959   }
960 
961   SmallVector<ArgInfo, 8> InArgs;
962   if (!Info.OrigRet.Ty->isVoidTy())
963     splitToValueTypes(Info.OrigRet, InArgs, DL, MRI, F.getCallingConv());
964 
965   // If we can lower as a tail call, do that instead.
966   bool CanTailCallOpt =
967       isEligibleForTailCallOptimization(MIRBuilder, Info, InArgs, OutArgs);
968 
969   // We must emit a tail call if we have musttail.
970   if (Info.IsMustTailCall && !CanTailCallOpt) {
971     // There are types of incoming/outgoing arguments we can't handle yet, so
972     // it doesn't make sense to actually die here like in ISelLowering. Instead,
973     // fall back to SelectionDAG and let it try to handle this.
974     LLVM_DEBUG(dbgs() << "Failed to lower musttail call as tail call\n");
975     return false;
976   }
977 
978   if (CanTailCallOpt)
979     return lowerTailCall(MIRBuilder, Info, OutArgs);
980 
981   // Find out which ABI gets to decide where things go.
982   CCAssignFn *AssignFnFixed;
983   CCAssignFn *AssignFnVarArg;
984   std::tie(AssignFnFixed, AssignFnVarArg) =
985       getAssignFnsForCC(Info.CallConv, TLI);
986 
987   MachineInstrBuilder CallSeqStart;
988   CallSeqStart = MIRBuilder.buildInstr(AArch64::ADJCALLSTACKDOWN);
989 
990   // Create a temporarily-floating call instruction so we can add the implicit
991   // uses of arg registers.
992   unsigned Opc = getCallOpcode(MF, Info.Callee.isReg(), false);
993 
994   auto MIB = MIRBuilder.buildInstrNoInsert(Opc);
995   MIB.add(Info.Callee);
996 
997   // Tell the call which registers are clobbered.
998   auto TRI = MF.getSubtarget<AArch64Subtarget>().getRegisterInfo();
999   const uint32_t *Mask = TRI->getCallPreservedMask(MF, Info.CallConv);
1000   if (MF.getSubtarget<AArch64Subtarget>().hasCustomCallingConv())
1001     TRI->UpdateCustomCallPreservedMask(MF, &Mask);
1002   MIB.addRegMask(Mask);
1003 
1004   if (TRI->isAnyArgRegReserved(MF))
1005     TRI->emitReservedArgRegCallError(MF);
1006 
1007   // Do the actual argument marshalling.
1008   OutgoingArgHandler Handler(MIRBuilder, MRI, MIB, AssignFnFixed,
1009                              AssignFnVarArg, false);
1010   if (!handleAssignments(MIRBuilder, OutArgs, Handler))
1011     return false;
1012 
1013   // Now we can add the actual call instruction to the correct basic block.
1014   MIRBuilder.insertInstr(MIB);
1015 
1016   // If Callee is a reg, since it is used by a target specific
1017   // instruction, it must have a register class matching the
1018   // constraint of that instruction.
1019   if (Info.Callee.isReg())
1020     MIB->getOperand(0).setReg(constrainOperandRegClass(
1021         MF, *TRI, MRI, *MF.getSubtarget().getInstrInfo(),
1022         *MF.getSubtarget().getRegBankInfo(), *MIB, MIB->getDesc(), Info.Callee,
1023         0));
1024 
1025   // Finally we can copy the returned value back into its virtual-register. In
1026   // symmetry with the arguments, the physical register must be an
1027   // implicit-define of the call instruction.
1028   if (!Info.OrigRet.Ty->isVoidTy()) {
1029     CCAssignFn *RetAssignFn = TLI.CCAssignFnForReturn(Info.CallConv);
1030     CallReturnHandler Handler(MIRBuilder, MRI, MIB, RetAssignFn);
1031     if (!handleAssignments(MIRBuilder, InArgs, Handler))
1032       return false;
1033   }
1034 
1035   if (Info.SwiftErrorVReg) {
1036     MIB.addDef(AArch64::X21, RegState::Implicit);
1037     MIRBuilder.buildCopy(Info.SwiftErrorVReg, Register(AArch64::X21));
1038   }
1039 
1040   uint64_t CalleePopBytes =
1041       doesCalleeRestoreStack(Info.CallConv,
1042                              MF.getTarget().Options.GuaranteedTailCallOpt)
1043           ? alignTo(Handler.StackSize, 16)
1044           : 0;
1045 
1046   CallSeqStart.addImm(Handler.StackSize).addImm(0);
1047   MIRBuilder.buildInstr(AArch64::ADJCALLSTACKUP)
1048       .addImm(Handler.StackSize)
1049       .addImm(CalleePopBytes);
1050 
1051   return true;
1052 }
1053