1 //===-- CallingConvLower.cpp - Calling Conventions ------------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements the CCState class, used for lowering and implementing
11 // calling conventions.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "llvm/CodeGen/CallingConvLower.h"
16 #include "llvm/CodeGen/MachineFrameInfo.h"
17 #include "llvm/CodeGen/MachineRegisterInfo.h"
18 #include "llvm/IR/DataLayout.h"
19 #include "llvm/Support/Debug.h"
20 #include "llvm/Support/ErrorHandling.h"
21 #include "llvm/Support/SaveAndRestore.h"
22 #include "llvm/Support/raw_ostream.h"
23 #include "llvm/Target/TargetLowering.h"
24 #include "llvm/Target/TargetRegisterInfo.h"
25 #include "llvm/Target/TargetSubtargetInfo.h"
26 using namespace llvm;
27
CCState(CallingConv::ID CC,bool isVarArg,MachineFunction & mf,SmallVectorImpl<CCValAssign> & locs,LLVMContext & C)28 CCState::CCState(CallingConv::ID CC, bool isVarArg, MachineFunction &mf,
29 SmallVectorImpl<CCValAssign> &locs, LLVMContext &C)
30 : CallingConv(CC), IsVarArg(isVarArg), MF(mf),
31 TRI(*MF.getSubtarget().getRegisterInfo()), Locs(locs), Context(C),
32 CallOrPrologue(Unknown) {
33 // No stack is used.
34 StackOffset = 0;
35 MaxStackArgAlign = 1;
36
37 clearByValRegsInfo();
38 UsedRegs.resize((TRI.getNumRegs()+31)/32);
39 }
40
41 /// Allocate space on the stack large enough to pass an argument by value.
42 /// The size and alignment information of the argument is encoded in
43 /// its parameter attribute.
HandleByVal(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,int MinSize,int MinAlign,ISD::ArgFlagsTy ArgFlags)44 void CCState::HandleByVal(unsigned ValNo, MVT ValVT,
45 MVT LocVT, CCValAssign::LocInfo LocInfo,
46 int MinSize, int MinAlign,
47 ISD::ArgFlagsTy ArgFlags) {
48 unsigned Align = ArgFlags.getByValAlign();
49 unsigned Size = ArgFlags.getByValSize();
50 if (MinSize > (int)Size)
51 Size = MinSize;
52 if (MinAlign > (int)Align)
53 Align = MinAlign;
54 ensureMaxAlignment(Align);
55 MF.getSubtarget().getTargetLowering()->HandleByVal(this, Size, Align);
56 Size = unsigned(alignTo(Size, MinAlign));
57 unsigned Offset = AllocateStack(Size, Align);
58 addLoc(CCValAssign::getMem(ValNo, ValVT, Offset, LocVT, LocInfo));
59 }
60
61 /// Mark a register and all of its aliases as allocated.
MarkAllocated(unsigned Reg)62 void CCState::MarkAllocated(unsigned Reg) {
63 for (MCRegAliasIterator AI(Reg, &TRI, true); AI.isValid(); ++AI)
64 UsedRegs[*AI/32] |= 1 << (*AI&31);
65 }
66
67 /// Analyze an array of argument values,
68 /// incorporating info about the formals into this state.
69 void
AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> & Ins,CCAssignFn Fn)70 CCState::AnalyzeFormalArguments(const SmallVectorImpl<ISD::InputArg> &Ins,
71 CCAssignFn Fn) {
72 unsigned NumArgs = Ins.size();
73
74 for (unsigned i = 0; i != NumArgs; ++i) {
75 MVT ArgVT = Ins[i].VT;
76 ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
77 if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
78 #ifndef NDEBUG
79 dbgs() << "Formal argument #" << i << " has unhandled type "
80 << EVT(ArgVT).getEVTString() << '\n';
81 #endif
82 llvm_unreachable(nullptr);
83 }
84 }
85 }
86
87 /// Analyze the return values of a function, returning true if the return can
88 /// be performed without sret-demotion and false otherwise.
CheckReturn(const SmallVectorImpl<ISD::OutputArg> & Outs,CCAssignFn Fn)89 bool CCState::CheckReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
90 CCAssignFn Fn) {
91 // Determine which register each value should be copied into.
92 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
93 MVT VT = Outs[i].VT;
94 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
95 if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this))
96 return false;
97 }
98 return true;
99 }
100
101 /// Analyze the returned values of a return,
102 /// incorporating info about the result values into this state.
AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> & Outs,CCAssignFn Fn)103 void CCState::AnalyzeReturn(const SmallVectorImpl<ISD::OutputArg> &Outs,
104 CCAssignFn Fn) {
105 // Determine which register each value should be copied into.
106 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
107 MVT VT = Outs[i].VT;
108 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
109 if (Fn(i, VT, VT, CCValAssign::Full, ArgFlags, *this)) {
110 #ifndef NDEBUG
111 dbgs() << "Return operand #" << i << " has unhandled type "
112 << EVT(VT).getEVTString() << '\n';
113 #endif
114 llvm_unreachable(nullptr);
115 }
116 }
117 }
118
119 /// Analyze the outgoing arguments to a call,
120 /// incorporating info about the passed values into this state.
AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> & Outs,CCAssignFn Fn)121 void CCState::AnalyzeCallOperands(const SmallVectorImpl<ISD::OutputArg> &Outs,
122 CCAssignFn Fn) {
123 unsigned NumOps = Outs.size();
124 for (unsigned i = 0; i != NumOps; ++i) {
125 MVT ArgVT = Outs[i].VT;
126 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
127 if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
128 #ifndef NDEBUG
129 dbgs() << "Call operand #" << i << " has unhandled type "
130 << EVT(ArgVT).getEVTString() << '\n';
131 #endif
132 llvm_unreachable(nullptr);
133 }
134 }
135 }
136
137 /// Same as above except it takes vectors of types and argument flags.
AnalyzeCallOperands(SmallVectorImpl<MVT> & ArgVTs,SmallVectorImpl<ISD::ArgFlagsTy> & Flags,CCAssignFn Fn)138 void CCState::AnalyzeCallOperands(SmallVectorImpl<MVT> &ArgVTs,
139 SmallVectorImpl<ISD::ArgFlagsTy> &Flags,
140 CCAssignFn Fn) {
141 unsigned NumOps = ArgVTs.size();
142 for (unsigned i = 0; i != NumOps; ++i) {
143 MVT ArgVT = ArgVTs[i];
144 ISD::ArgFlagsTy ArgFlags = Flags[i];
145 if (Fn(i, ArgVT, ArgVT, CCValAssign::Full, ArgFlags, *this)) {
146 #ifndef NDEBUG
147 dbgs() << "Call operand #" << i << " has unhandled type "
148 << EVT(ArgVT).getEVTString() << '\n';
149 #endif
150 llvm_unreachable(nullptr);
151 }
152 }
153 }
154
155 /// Analyze the return values of a call, incorporating info about the passed
156 /// values into this state.
AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> & Ins,CCAssignFn Fn)157 void CCState::AnalyzeCallResult(const SmallVectorImpl<ISD::InputArg> &Ins,
158 CCAssignFn Fn) {
159 for (unsigned i = 0, e = Ins.size(); i != e; ++i) {
160 MVT VT = Ins[i].VT;
161 ISD::ArgFlagsTy Flags = Ins[i].Flags;
162 if (Fn(i, VT, VT, CCValAssign::Full, Flags, *this)) {
163 #ifndef NDEBUG
164 dbgs() << "Call result #" << i << " has unhandled type "
165 << EVT(VT).getEVTString() << '\n';
166 #endif
167 llvm_unreachable(nullptr);
168 }
169 }
170 }
171
172 /// Same as above except it's specialized for calls that produce a single value.
AnalyzeCallResult(MVT VT,CCAssignFn Fn)173 void CCState::AnalyzeCallResult(MVT VT, CCAssignFn Fn) {
174 if (Fn(0, VT, VT, CCValAssign::Full, ISD::ArgFlagsTy(), *this)) {
175 #ifndef NDEBUG
176 dbgs() << "Call result has unhandled type "
177 << EVT(VT).getEVTString() << '\n';
178 #endif
179 llvm_unreachable(nullptr);
180 }
181 }
182
isValueTypeInRegForCC(CallingConv::ID CC,MVT VT)183 static bool isValueTypeInRegForCC(CallingConv::ID CC, MVT VT) {
184 if (VT.isVector())
185 return true; // Assume -msse-regparm might be in effect.
186 if (!VT.isInteger())
187 return false;
188 if (CC == CallingConv::X86_VectorCall || CC == CallingConv::X86_FastCall)
189 return true;
190 return false;
191 }
192
getRemainingRegParmsForType(SmallVectorImpl<MCPhysReg> & Regs,MVT VT,CCAssignFn Fn)193 void CCState::getRemainingRegParmsForType(SmallVectorImpl<MCPhysReg> &Regs,
194 MVT VT, CCAssignFn Fn) {
195 unsigned SavedStackOffset = StackOffset;
196 unsigned SavedMaxStackArgAlign = MaxStackArgAlign;
197 unsigned NumLocs = Locs.size();
198
199 // Set the 'inreg' flag if it is used for this calling convention.
200 ISD::ArgFlagsTy Flags;
201 if (isValueTypeInRegForCC(CallingConv, VT))
202 Flags.setInReg();
203
204 // Allocate something of this value type repeatedly until we get assigned a
205 // location in memory.
206 bool HaveRegParm = true;
207 while (HaveRegParm) {
208 if (Fn(0, VT, VT, CCValAssign::Full, Flags, *this)) {
209 #ifndef NDEBUG
210 dbgs() << "Call has unhandled type " << EVT(VT).getEVTString()
211 << " while computing remaining regparms\n";
212 #endif
213 llvm_unreachable(nullptr);
214 }
215 HaveRegParm = Locs.back().isRegLoc();
216 }
217
218 // Copy all the registers from the value locations we added.
219 assert(NumLocs < Locs.size() && "CC assignment failed to add location");
220 for (unsigned I = NumLocs, E = Locs.size(); I != E; ++I)
221 if (Locs[I].isRegLoc())
222 Regs.push_back(MCPhysReg(Locs[I].getLocReg()));
223
224 // Clear the assigned values and stack memory. We leave the registers marked
225 // as allocated so that future queries don't return the same registers, i.e.
226 // when i64 and f64 are both passed in GPRs.
227 StackOffset = SavedStackOffset;
228 MaxStackArgAlign = SavedMaxStackArgAlign;
229 Locs.resize(NumLocs);
230 }
231
analyzeMustTailForwardedRegisters(SmallVectorImpl<ForwardedRegister> & Forwards,ArrayRef<MVT> RegParmTypes,CCAssignFn Fn)232 void CCState::analyzeMustTailForwardedRegisters(
233 SmallVectorImpl<ForwardedRegister> &Forwards, ArrayRef<MVT> RegParmTypes,
234 CCAssignFn Fn) {
235 // Oftentimes calling conventions will not user register parameters for
236 // variadic functions, so we need to assume we're not variadic so that we get
237 // all the registers that might be used in a non-variadic call.
238 SaveAndRestore<bool> SavedVarArg(IsVarArg, false);
239 SaveAndRestore<bool> SavedMustTail(AnalyzingMustTailForwardedRegs, true);
240
241 for (MVT RegVT : RegParmTypes) {
242 SmallVector<MCPhysReg, 8> RemainingRegs;
243 getRemainingRegParmsForType(RemainingRegs, RegVT, Fn);
244 const TargetLowering *TL = MF.getSubtarget().getTargetLowering();
245 const TargetRegisterClass *RC = TL->getRegClassFor(RegVT);
246 for (MCPhysReg PReg : RemainingRegs) {
247 unsigned VReg = MF.addLiveIn(PReg, RC);
248 Forwards.push_back(ForwardedRegister(VReg, PReg, RegVT));
249 }
250 }
251 }
252
resultsCompatible(CallingConv::ID CalleeCC,CallingConv::ID CallerCC,MachineFunction & MF,LLVMContext & C,const SmallVectorImpl<ISD::InputArg> & Ins,CCAssignFn CalleeFn,CCAssignFn CallerFn)253 bool CCState::resultsCompatible(CallingConv::ID CalleeCC,
254 CallingConv::ID CallerCC, MachineFunction &MF,
255 LLVMContext &C,
256 const SmallVectorImpl<ISD::InputArg> &Ins,
257 CCAssignFn CalleeFn, CCAssignFn CallerFn) {
258 if (CalleeCC == CallerCC)
259 return true;
260 SmallVector<CCValAssign, 4> RVLocs1;
261 CCState CCInfo1(CalleeCC, false, MF, RVLocs1, C);
262 CCInfo1.AnalyzeCallResult(Ins, CalleeFn);
263
264 SmallVector<CCValAssign, 4> RVLocs2;
265 CCState CCInfo2(CallerCC, false, MF, RVLocs2, C);
266 CCInfo2.AnalyzeCallResult(Ins, CallerFn);
267
268 if (RVLocs1.size() != RVLocs2.size())
269 return false;
270 for (unsigned I = 0, E = RVLocs1.size(); I != E; ++I) {
271 const CCValAssign &Loc1 = RVLocs1[I];
272 const CCValAssign &Loc2 = RVLocs2[I];
273 if (Loc1.getLocInfo() != Loc2.getLocInfo())
274 return false;
275 bool RegLoc1 = Loc1.isRegLoc();
276 if (RegLoc1 != Loc2.isRegLoc())
277 return false;
278 if (RegLoc1) {
279 if (Loc1.getLocReg() != Loc2.getLocReg())
280 return false;
281 } else {
282 if (Loc1.getLocMemOffset() != Loc2.getLocMemOffset())
283 return false;
284 }
285 }
286 return true;
287 }
288