• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1//===-- X86CallingConv.td - Calling Conventions X86 32/64 --*- tablegen -*-===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This describes the calling conventions for the X86-32 and X86-64
11// architectures.
12//
13//===----------------------------------------------------------------------===//
14
15/// CCIfSubtarget - Match if the current subtarget has a feature F.
16class CCIfSubtarget<string F, CCAction A>
17 : CCIf<!strconcat("State.getTarget().getSubtarget<X86Subtarget>().", F), A>;
18
19//===----------------------------------------------------------------------===//
20// Return Value Calling Conventions
21//===----------------------------------------------------------------------===//
22
23// Return-value conventions common to all X86 CC's.
24def RetCC_X86Common : CallingConv<[
25  // Scalar values are returned in AX first, then DX.  For i8, the ABI
26  // requires the values to be in AL and AH, however this code uses AL and DL
27  // instead. This is because using AH for the second register conflicts with
28  // the way LLVM does multiple return values -- a return of {i16,i8} would end
29  // up in AX and AH, which overlap. Front-ends wishing to conform to the ABI
30  // for functions that return two i8 values are currently expected to pack the
31  // values into an i16 (which uses AX, and thus AL:AH).
32  //
33  // For code that doesn't care about the ABI, we allow returning more than two
34  // integer values in registers.
35  CCIfType<[i8] , CCAssignToReg<[AL, DL, CL]>>,
36  CCIfType<[i16], CCAssignToReg<[AX, DX, CX]>>,
37  CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>,
38  CCIfType<[i64], CCAssignToReg<[RAX, RDX, RCX]>>,
39
40  // Vector types are returned in XMM0 and XMM1, when they fit.  XMM2 and XMM3
41  // can only be used by ABI non-compliant code. If the target doesn't have XMM
42  // registers, it won't have vector types.
43  CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
44            CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>,
45
46  // 256-bit vectors are returned in YMM0 and XMM1, when they fit. YMM2 and YMM3
47  // can only be used by ABI non-compliant code. This vector type is only
48  // supported while using the AVX target feature.
49  CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
50            CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>,
51
52  // 512-bit vectors are returned in ZMM0 and ZMM1, when they fit. ZMM2 and ZMM3
53  // can only be used by ABI non-compliant code. This vector type is only
54  // supported while using the AVX-512 target feature.
55  CCIfType<[v16i32, v8i64, v16f32, v8f64],
56            CCAssignToReg<[ZMM0,ZMM1,ZMM2,ZMM3]>>,
57
58  // MMX vector types are always returned in MM0. If the target doesn't have
59  // MM0, it doesn't support these vector types.
60  CCIfType<[x86mmx], CCAssignToReg<[MM0]>>,
61
62  // Long double types are always returned in ST0 (even with SSE).
63  CCIfType<[f80], CCAssignToReg<[ST0, ST1]>>
64]>;
65
66// X86-32 C return-value convention.
67def RetCC_X86_32_C : CallingConv<[
68  // The X86-32 calling convention returns FP values in ST0, unless marked
69  // with "inreg" (used here to distinguish one kind of reg from another,
70  // weirdly; this is really the sse-regparm calling convention) in which
71  // case they use XMM0, otherwise it is the same as the common X86 calling
72  // conv.
73  CCIfInReg<CCIfSubtarget<"hasSSE2()",
74    CCIfType<[f32, f64], CCAssignToReg<[XMM0,XMM1,XMM2]>>>>,
75  CCIfType<[f32,f64], CCAssignToReg<[ST0, ST1]>>,
76  CCDelegateTo<RetCC_X86Common>
77]>;
78
79// X86-32 FastCC return-value convention.
80def RetCC_X86_32_Fast : CallingConv<[
81  // The X86-32 fastcc returns 1, 2, or 3 FP values in XMM0-2 if the target has
82  // SSE2.
83  // This can happen when a float, 2 x float, or 3 x float vector is split by
84  // target lowering, and is returned in 1-3 sse regs.
85  CCIfType<[f32], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>,
86  CCIfType<[f64], CCIfSubtarget<"hasSSE2()", CCAssignToReg<[XMM0,XMM1,XMM2]>>>,
87
88  // For integers, ECX can be used as an extra return register
89  CCIfType<[i8],  CCAssignToReg<[AL, DL, CL]>>,
90  CCIfType<[i16], CCAssignToReg<[AX, DX, CX]>>,
91  CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>,
92
93  // Otherwise, it is the same as the common X86 calling convention.
94  CCDelegateTo<RetCC_X86Common>
95]>;
96
97// Intel_OCL_BI return-value convention.
98def RetCC_Intel_OCL_BI : CallingConv<[
99  // Vector types are returned in XMM0,XMM1,XMMM2 and XMM3.
100  CCIfType<[f32, f64, v4i32, v2i64, v4f32, v2f64],
101            CCAssignToReg<[XMM0,XMM1,XMM2,XMM3]>>,
102
103  // 256-bit FP vectors
104  // No more than 4 registers
105  CCIfType<[v8f32, v4f64, v8i32, v4i64],
106            CCAssignToReg<[YMM0,YMM1,YMM2,YMM3]>>,
107
108  // 512-bit FP vectors
109  CCIfType<[v16f32, v8f64, v16i32, v8i64],
110            CCAssignToReg<[ZMM0,ZMM1,ZMM2,ZMM3]>>,
111
112  // i32, i64 in the standard way
113  CCDelegateTo<RetCC_X86Common>
114]>;
115
116// X86-32 HiPE return-value convention.
117def RetCC_X86_32_HiPE : CallingConv<[
118  // Promote all types to i32
119  CCIfType<[i8, i16], CCPromoteToType<i32>>,
120
121  // Return: HP, P, VAL1, VAL2
122  CCIfType<[i32], CCAssignToReg<[ESI, EBP, EAX, EDX]>>
123]>;
124
125// X86-64 C return-value convention.
126def RetCC_X86_64_C : CallingConv<[
127  // The X86-64 calling convention always returns FP values in XMM0.
128  CCIfType<[f32], CCAssignToReg<[XMM0, XMM1]>>,
129  CCIfType<[f64], CCAssignToReg<[XMM0, XMM1]>>,
130
131  // MMX vector types are always returned in XMM0.
132  CCIfType<[x86mmx], CCAssignToReg<[XMM0, XMM1]>>,
133  CCDelegateTo<RetCC_X86Common>
134]>;
135
136// X86-Win64 C return-value convention.
137def RetCC_X86_Win64_C : CallingConv<[
138  // The X86-Win64 calling convention always returns __m64 values in RAX.
139  CCIfType<[x86mmx], CCBitConvertToType<i64>>,
140
141  // Otherwise, everything is the same as 'normal' X86-64 C CC.
142  CCDelegateTo<RetCC_X86_64_C>
143]>;
144
145// X86-64 HiPE return-value convention.
146def RetCC_X86_64_HiPE : CallingConv<[
147  // Promote all types to i64
148  CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
149
150  // Return: HP, P, VAL1, VAL2
151  CCIfType<[i64], CCAssignToReg<[R15, RBP, RAX, RDX]>>
152]>;
153
154// This is the root return-value convention for the X86-32 backend.
155def RetCC_X86_32 : CallingConv<[
156  // If FastCC, use RetCC_X86_32_Fast.
157  CCIfCC<"CallingConv::Fast", CCDelegateTo<RetCC_X86_32_Fast>>,
158  // If HiPE, use RetCC_X86_32_HiPE.
159  CCIfCC<"CallingConv::HiPE", CCDelegateTo<RetCC_X86_32_HiPE>>,
160
161  // Otherwise, use RetCC_X86_32_C.
162  CCDelegateTo<RetCC_X86_32_C>
163]>;
164
165// This is the root return-value convention for the X86-64 backend.
166def RetCC_X86_64 : CallingConv<[
167  // HiPE uses RetCC_X86_64_HiPE
168  CCIfCC<"CallingConv::HiPE", CCDelegateTo<RetCC_X86_64_HiPE>>,
169
170  // Handle explicit CC selection
171  CCIfCC<"CallingConv::X86_64_Win64", CCDelegateTo<RetCC_X86_Win64_C>>,
172  CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<RetCC_X86_64_C>>,
173
174  // Mingw64 and native Win64 use Win64 CC
175  CCIfSubtarget<"isTargetWin64()", CCDelegateTo<RetCC_X86_Win64_C>>,
176
177  // Otherwise, drop to normal X86-64 CC
178  CCDelegateTo<RetCC_X86_64_C>
179]>;
180
181// This is the return-value convention used for the entire X86 backend.
182def RetCC_X86 : CallingConv<[
183
184  // Check if this is the Intel OpenCL built-ins calling convention
185  CCIfCC<"CallingConv::Intel_OCL_BI", CCDelegateTo<RetCC_Intel_OCL_BI>>,
186
187  CCIfSubtarget<"is64Bit()", CCDelegateTo<RetCC_X86_64>>,
188  CCDelegateTo<RetCC_X86_32>
189]>;
190
191//===----------------------------------------------------------------------===//
192// X86-64 Argument Calling Conventions
193//===----------------------------------------------------------------------===//
194
195def CC_X86_64_C : CallingConv<[
196  // Handles byval parameters.
197  CCIfByVal<CCPassByVal<8, 8>>,
198
199  // Promote i8/i16 arguments to i32.
200  CCIfType<[i8, i16], CCPromoteToType<i32>>,
201
202  // The 'nest' parameter, if any, is passed in R10.
203  CCIfNest<CCAssignToReg<[R10]>>,
204
205  // The first 6 integer arguments are passed in integer registers.
206  CCIfType<[i32], CCAssignToReg<[EDI, ESI, EDX, ECX, R8D, R9D]>>,
207  CCIfType<[i64], CCAssignToReg<[RDI, RSI, RDX, RCX, R8 , R9 ]>>,
208
209  // The first 8 MMX vector arguments are passed in XMM registers on Darwin.
210  CCIfType<[x86mmx],
211            CCIfSubtarget<"isTargetDarwin()",
212            CCIfSubtarget<"hasSSE2()",
213            CCPromoteToType<v2i64>>>>,
214
215  // The first 8 FP/Vector arguments are passed in XMM registers.
216  CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
217            CCIfSubtarget<"hasSSE1()",
218            CCAssignToReg<[XMM0, XMM1, XMM2, XMM3, XMM4, XMM5, XMM6, XMM7]>>>,
219
220  // The first 8 256-bit vector arguments are passed in YMM registers, unless
221  // this is a vararg function.
222  // FIXME: This isn't precisely correct; the x86-64 ABI document says that
223  // fixed arguments to vararg functions are supposed to be passed in
224  // registers.  Actually modeling that would be a lot of work, though.
225  CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
226                          CCIfSubtarget<"hasFp256()",
227                          CCAssignToReg<[YMM0, YMM1, YMM2, YMM3,
228                                         YMM4, YMM5, YMM6, YMM7]>>>>,
229
230  // The first 8 512-bit vector arguments are passed in ZMM registers.
231  CCIfNotVarArg<CCIfType<[v16i32, v8i64, v16f32, v8f64],
232            CCIfSubtarget<"hasAVX512()",
233            CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3, ZMM4, ZMM5, ZMM6, ZMM7]>>>>,
234
235  // Integer/FP values get stored in stack slots that are 8 bytes in size and
236  // 8-byte aligned if there are no more registers to hold them.
237  CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>,
238
239  // Long doubles get stack slots whose size and alignment depends on the
240  // subtarget.
241  CCIfType<[f80], CCAssignToStack<0, 0>>,
242
243  // Vectors get 16-byte stack slots that are 16-byte aligned.
244  CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>,
245
246  // 256-bit vectors get 32-byte stack slots that are 32-byte aligned.
247  CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
248           CCAssignToStack<32, 32>>,
249
250  // 512-bit vectors get 64-byte stack slots that are 64-byte aligned.
251  CCIfType<[v16i32, v8i64, v16f32, v8f64],
252           CCAssignToStack<64, 64>>
253]>;
254
255// Calling convention used on Win64
256def CC_X86_Win64_C : CallingConv<[
257  // FIXME: Handle byval stuff.
258  // FIXME: Handle varargs.
259
260  // Promote i8/i16 arguments to i32.
261  CCIfType<[i8, i16], CCPromoteToType<i32>>,
262
263  // The 'nest' parameter, if any, is passed in R10.
264  CCIfNest<CCAssignToReg<[R10]>>,
265
266  // 128 bit vectors are passed by pointer
267  CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCPassIndirect<i64>>,
268
269
270  // 256 bit vectors are passed by pointer
271  CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64], CCPassIndirect<i64>>,
272
273  // 512 bit vectors are passed by pointer
274  CCIfType<[v16i32, v16f32, v8f64, v8i64], CCPassIndirect<i64>>,
275
276  // The first 4 MMX vector arguments are passed in GPRs.
277  CCIfType<[x86mmx], CCBitConvertToType<i64>>,
278
279  // The first 4 integer arguments are passed in integer registers.
280  CCIfType<[i32], CCAssignToRegWithShadow<[ECX , EDX , R8D , R9D ],
281                                          [XMM0, XMM1, XMM2, XMM3]>>,
282
283  // Do not pass the sret argument in RCX, the Win64 thiscall calling
284  // convention requires "this" to be passed in RCX.
285  CCIfCC<"CallingConv::X86_ThisCall",
286    CCIfSRet<CCIfType<[i64], CCAssignToRegWithShadow<[RDX , R8  , R9  ],
287                                                     [XMM1, XMM2, XMM3]>>>>,
288
289  CCIfType<[i64], CCAssignToRegWithShadow<[RCX , RDX , R8  , R9  ],
290                                          [XMM0, XMM1, XMM2, XMM3]>>,
291
292  // The first 4 FP/Vector arguments are passed in XMM registers.
293  CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
294           CCAssignToRegWithShadow<[XMM0, XMM1, XMM2, XMM3],
295                                   [RCX , RDX , R8  , R9  ]>>,
296
297  // Integer/FP values get stored in stack slots that are 8 bytes in size and
298  // 8-byte aligned if there are no more registers to hold them.
299  CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>,
300
301  // Long doubles get stack slots whose size and alignment depends on the
302  // subtarget.
303  CCIfType<[f80], CCAssignToStack<0, 0>>
304]>;
305
306def CC_X86_64_GHC : CallingConv<[
307  // Promote i8/i16/i32 arguments to i64.
308  CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
309
310  // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, SpLim
311  CCIfType<[i64],
312            CCAssignToReg<[R13, RBP, R12, RBX, R14, RSI, RDI, R8, R9, R15]>>,
313
314  // Pass in STG registers: F1, F2, F3, F4, D1, D2
315  CCIfType<[f32, f64, v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
316            CCIfSubtarget<"hasSSE1()",
317            CCAssignToReg<[XMM1, XMM2, XMM3, XMM4, XMM5, XMM6]>>>
318]>;
319
320def CC_X86_64_HiPE : CallingConv<[
321  // Promote i8/i16/i32 arguments to i64.
322  CCIfType<[i8, i16, i32], CCPromoteToType<i64>>,
323
324  // Pass in VM's registers: HP, P, ARG0, ARG1, ARG2, ARG3
325  CCIfType<[i64], CCAssignToReg<[R15, RBP, RSI, RDX, RCX, R8]>>,
326
327  // Integer/FP values get stored in stack slots that are 8 bytes in size and
328  // 8-byte aligned if there are no more registers to hold them.
329  CCIfType<[i32, i64, f32, f64], CCAssignToStack<8, 8>>
330]>;
331
332//===----------------------------------------------------------------------===//
333// X86 C Calling Convention
334//===----------------------------------------------------------------------===//
335
336/// CC_X86_32_Common - In all X86-32 calling conventions, extra integers and FP
337/// values are spilled on the stack, and the first 4 vector values go in XMM
338/// regs.
339def CC_X86_32_Common : CallingConv<[
340  // Handles byval parameters.
341  CCIfByVal<CCPassByVal<4, 4>>,
342
343  // The first 3 float or double arguments, if marked 'inreg' and if the call
344  // is not a vararg call and if SSE2 is available, are passed in SSE registers.
345  CCIfNotVarArg<CCIfInReg<CCIfType<[f32,f64],
346                CCIfSubtarget<"hasSSE2()",
347                CCAssignToReg<[XMM0,XMM1,XMM2]>>>>>,
348
349  // The first 3 __m64 vector arguments are passed in mmx registers if the
350  // call is not a vararg call.
351  CCIfNotVarArg<CCIfType<[x86mmx],
352                CCAssignToReg<[MM0, MM1, MM2]>>>,
353
354  // Integer/Float values get stored in stack slots that are 4 bytes in
355  // size and 4-byte aligned.
356  CCIfType<[i32, f32], CCAssignToStack<4, 4>>,
357
358  // Doubles get 8-byte slots that are 4-byte aligned.
359  CCIfType<[f64], CCAssignToStack<8, 4>>,
360
361  // Long doubles get slots whose size depends on the subtarget.
362  CCIfType<[f80], CCAssignToStack<0, 4>>,
363
364  // The first 4 SSE vector arguments are passed in XMM registers.
365  CCIfNotVarArg<CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64],
366                CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>>,
367
368  // The first 4 AVX 256-bit vector arguments are passed in YMM registers.
369  CCIfNotVarArg<CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
370                CCIfSubtarget<"hasFp256()",
371                CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>>>,
372
373  // Other SSE vectors get 16-byte stack slots that are 16-byte aligned.
374  CCIfType<[v16i8, v8i16, v4i32, v2i64, v4f32, v2f64], CCAssignToStack<16, 16>>,
375
376  // 256-bit AVX vectors get 32-byte stack slots that are 32-byte aligned.
377  CCIfType<[v32i8, v16i16, v8i32, v4i64, v8f32, v4f64],
378           CCAssignToStack<32, 32>>,
379
380  // __m64 vectors get 8-byte stack slots that are 4-byte aligned. They are
381  // passed in the parameter area.
382  CCIfType<[x86mmx], CCAssignToStack<8, 4>>]>;
383
384def CC_X86_32_C : CallingConv<[
385  // Promote i8/i16 arguments to i32.
386  CCIfType<[i8, i16], CCPromoteToType<i32>>,
387
388  // The 'nest' parameter, if any, is passed in ECX.
389  CCIfNest<CCAssignToReg<[ECX]>>,
390
391  // The first 3 integer arguments, if marked 'inreg' and if the call is not
392  // a vararg call, are passed in integer registers.
393  CCIfNotVarArg<CCIfInReg<CCIfType<[i32], CCAssignToReg<[EAX, EDX, ECX]>>>>,
394
395  // Otherwise, same as everything else.
396  CCDelegateTo<CC_X86_32_Common>
397]>;
398
399def CC_X86_32_FastCall : CallingConv<[
400  // Promote i8/i16 arguments to i32.
401  CCIfType<[i8, i16], CCPromoteToType<i32>>,
402
403  // The 'nest' parameter, if any, is passed in EAX.
404  CCIfNest<CCAssignToReg<[EAX]>>,
405
406  // The first 2 integer arguments are passed in ECX/EDX
407  CCIfInReg<CCIfType<[i32], CCAssignToReg<[ECX, EDX]>>>,
408
409  // Otherwise, same as everything else.
410  CCDelegateTo<CC_X86_32_Common>
411]>;
412
413def CC_X86_32_ThisCall : CallingConv<[
414  // Promote i8/i16 arguments to i32.
415  CCIfType<[i8, i16], CCPromoteToType<i32>>,
416
417  // Pass sret arguments indirectly through stack.
418  CCIfSRet<CCAssignToStack<4, 4>>,
419
420  // The first integer argument is passed in ECX
421  CCIfType<[i32], CCAssignToReg<[ECX]>>,
422
423  // Otherwise, same as everything else.
424  CCDelegateTo<CC_X86_32_Common>
425]>;
426
427def CC_X86_32_FastCC : CallingConv<[
428  // Handles byval parameters.  Note that we can't rely on the delegation
429  // to CC_X86_32_Common for this because that happens after code that
430  // puts arguments in registers.
431  CCIfByVal<CCPassByVal<4, 4>>,
432
433  // Promote i8/i16 arguments to i32.
434  CCIfType<[i8, i16], CCPromoteToType<i32>>,
435
436  // The 'nest' parameter, if any, is passed in EAX.
437  CCIfNest<CCAssignToReg<[EAX]>>,
438
439  // The first 2 integer arguments are passed in ECX/EDX
440  CCIfType<[i32], CCAssignToReg<[ECX, EDX]>>,
441
442  // The first 3 float or double arguments, if the call is not a vararg
443  // call and if SSE2 is available, are passed in SSE registers.
444  CCIfNotVarArg<CCIfType<[f32,f64],
445                CCIfSubtarget<"hasSSE2()",
446                CCAssignToReg<[XMM0,XMM1,XMM2]>>>>,
447
448  // Doubles get 8-byte slots that are 8-byte aligned.
449  CCIfType<[f64], CCAssignToStack<8, 8>>,
450
451  // Otherwise, same as everything else.
452  CCDelegateTo<CC_X86_32_Common>
453]>;
454
455def CC_X86_32_GHC : CallingConv<[
456  // Promote i8/i16 arguments to i32.
457  CCIfType<[i8, i16], CCPromoteToType<i32>>,
458
459  // Pass in STG registers: Base, Sp, Hp, R1
460  CCIfType<[i32], CCAssignToReg<[EBX, EBP, EDI, ESI]>>
461]>;
462
463def CC_X86_32_HiPE : CallingConv<[
464  // Promote i8/i16 arguments to i32.
465  CCIfType<[i8, i16], CCPromoteToType<i32>>,
466
467  // Pass in VM's registers: HP, P, ARG0, ARG1, ARG2
468  CCIfType<[i32], CCAssignToReg<[ESI, EBP, EAX, EDX, ECX]>>,
469
470  // Integer/Float values get stored in stack slots that are 4 bytes in
471  // size and 4-byte aligned.
472  CCIfType<[i32, f32], CCAssignToStack<4, 4>>
473]>;
474
475// X86-64 Intel OpenCL built-ins calling convention.
476def CC_Intel_OCL_BI : CallingConv<[
477
478  CCIfType<[i32], CCIfSubtarget<"isTargetWin64()", CCAssignToReg<[ECX, EDX, R8D, R9D]>>>,
479  CCIfType<[i64], CCIfSubtarget<"isTargetWin64()", CCAssignToReg<[RCX, RDX, R8,  R9 ]>>>,
480
481  CCIfType<[i32], CCIfSubtarget<"is64Bit()", CCAssignToReg<[EDI, ESI, EDX, ECX]>>>,
482  CCIfType<[i64], CCIfSubtarget<"is64Bit()", CCAssignToReg<[RDI, RSI, RDX, RCX]>>>,
483
484  CCIfType<[i32], CCAssignToStack<4, 4>>,
485
486  // The SSE vector arguments are passed in XMM registers.
487  CCIfType<[f32, f64, v4i32, v2i64, v4f32, v2f64],
488           CCAssignToReg<[XMM0, XMM1, XMM2, XMM3]>>,
489
490  // The 256-bit vector arguments are passed in YMM registers.
491  CCIfType<[v8f32, v4f64, v8i32, v4i64],
492           CCAssignToReg<[YMM0, YMM1, YMM2, YMM3]>>,
493
494  // The 512-bit vector arguments are passed in ZMM registers.
495  CCIfType<[v16f32, v8f64, v16i32, v8i64],
496           CCAssignToReg<[ZMM0, ZMM1, ZMM2, ZMM3]>>,
497
498  CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>,
499  CCIfSubtarget<"is64Bit()",       CCDelegateTo<CC_X86_64_C>>,
500  CCDelegateTo<CC_X86_32_C>
501]>;
502
503//===----------------------------------------------------------------------===//
504// X86 Root Argument Calling Conventions
505//===----------------------------------------------------------------------===//
506
507// This is the root argument convention for the X86-32 backend.
508def CC_X86_32 : CallingConv<[
509  CCIfCC<"CallingConv::X86_FastCall", CCDelegateTo<CC_X86_32_FastCall>>,
510  CCIfCC<"CallingConv::X86_ThisCall", CCDelegateTo<CC_X86_32_ThisCall>>,
511  CCIfCC<"CallingConv::Fast", CCDelegateTo<CC_X86_32_FastCC>>,
512  CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_32_GHC>>,
513  CCIfCC<"CallingConv::HiPE", CCDelegateTo<CC_X86_32_HiPE>>,
514
515  // Otherwise, drop to normal X86-32 CC
516  CCDelegateTo<CC_X86_32_C>
517]>;
518
519// This is the root argument convention for the X86-64 backend.
520def CC_X86_64 : CallingConv<[
521  CCIfCC<"CallingConv::GHC", CCDelegateTo<CC_X86_64_GHC>>,
522  CCIfCC<"CallingConv::HiPE", CCDelegateTo<CC_X86_64_HiPE>>,
523  CCIfCC<"CallingConv::X86_64_Win64", CCDelegateTo<CC_X86_Win64_C>>,
524  CCIfCC<"CallingConv::X86_64_SysV", CCDelegateTo<CC_X86_64_C>>,
525
526  // Mingw64 and native Win64 use Win64 CC
527  CCIfSubtarget<"isTargetWin64()", CCDelegateTo<CC_X86_Win64_C>>,
528
529  // Otherwise, drop to normal X86-64 CC
530  CCDelegateTo<CC_X86_64_C>
531]>;
532
533// This is the argument convention used for the entire X86 backend.
534def CC_X86 : CallingConv<[
535  CCIfCC<"CallingConv::Intel_OCL_BI", CCDelegateTo<CC_Intel_OCL_BI>>,
536  CCIfSubtarget<"is64Bit()", CCDelegateTo<CC_X86_64>>,
537  CCDelegateTo<CC_X86_32>
538]>;
539
540//===----------------------------------------------------------------------===//
541// Callee-saved Registers.
542//===----------------------------------------------------------------------===//
543
544def CSR_NoRegs : CalleeSavedRegs<(add)>;
545
546def CSR_32 : CalleeSavedRegs<(add ESI, EDI, EBX, EBP)>;
547def CSR_64 : CalleeSavedRegs<(add RBX, R12, R13, R14, R15, RBP)>;
548
549def CSR_32EHRet : CalleeSavedRegs<(add EAX, EDX, CSR_32)>;
550def CSR_64EHRet : CalleeSavedRegs<(add RAX, RDX, CSR_64)>;
551
552def CSR_Win64 : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, R12, R13, R14, R15,
553                                     (sequence "XMM%u", 6, 15))>;
554
555def CSR_MostRegs_64 : CalleeSavedRegs<(add RBX, RCX, RDX, RSI, RDI, R8, R9, R10,
556                                           R11, R12, R13, R14, R15, RBP,
557                                           (sequence "XMM%u", 0, 15))>;
558
559// Standard C + YMM6-15
560def CSR_Win64_Intel_OCL_BI_AVX : CalleeSavedRegs<(add RBX, RBP, RDI, RSI, R12,
561                                                  R13, R14, R15,
562                                                  (sequence "YMM%u", 6, 15))>;
563
564def CSR_Win64_Intel_OCL_BI_AVX512 : CalleeSavedRegs<(add RBX, RBP, RDI, RSI,
565                                                     R12, R13, R14, R15,
566                                                     (sequence "ZMM%u", 6, 21),
567                                                     K4, K5, K6, K7)>;
568//Standard C + XMM 8-15
569def CSR_64_Intel_OCL_BI       : CalleeSavedRegs<(add CSR_64,
570                                                 (sequence "XMM%u", 8, 15))>;
571
572//Standard C + YMM 8-15
573def CSR_64_Intel_OCL_BI_AVX    : CalleeSavedRegs<(add CSR_64,
574                                                  (sequence "YMM%u", 8, 15))>;
575
576def CSR_64_Intel_OCL_BI_AVX512    : CalleeSavedRegs<(add CSR_64,
577                                                  (sequence "ZMM%u", 16, 31),
578                                                  K4, K5, K6, K7)>;
579