• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc < %s -mcpu=generic -mtriple=x86_64-pc-win32 | FileCheck %s
2
3; Verify that the var arg parameters which are passed in registers are stored
4; in home stack slots allocated by the caller and that AP is correctly
5; calculated.
6define void @average_va(i32 %count, ...) nounwind {
7entry:
8; CHECK: pushq
9; CHECK: movq   %r9, 40(%rsp)
10; CHECK: movq   %r8, 32(%rsp)
11; CHECK: movq   %rdx, 24(%rsp)
12; CHECK: leaq   24(%rsp), %rax
13
14  %ap = alloca i8*, align 8                       ; <i8**> [#uses=1]
15  %ap1 = bitcast i8** %ap to i8*                  ; <i8*> [#uses=1]
16  call void @llvm.va_start(i8* %ap1)
17  ret void
18}
19
20declare void @llvm.va_start(i8*) nounwind
21declare void @llvm.va_copy(i8*, i8*) nounwind
22
23; CHECK-LABEL: f5:
24; CHECK: pushq
25; CHECK: leaq 56(%rsp),
26define i8* @f5(i64 %a0, i64 %a1, i64 %a2, i64 %a3, i64 %a4, ...) nounwind {
27entry:
28  %ap = alloca i8*, align 8
29  %ap1 = bitcast i8** %ap to i8*
30  call void @llvm.va_start(i8* %ap1)
31  ret i8* %ap1
32}
33
34; CHECK-LABEL: f4:
35; CHECK: pushq
36; CHECK: leaq 48(%rsp),
37define i8* @f4(i64 %a0, i64 %a1, i64 %a2, i64 %a3, ...) nounwind {
38entry:
39  %ap = alloca i8*, align 8
40  %ap1 = bitcast i8** %ap to i8*
41  call void @llvm.va_start(i8* %ap1)
42  ret i8* %ap1
43}
44
45; CHECK-LABEL: f3:
46; CHECK: pushq
47; CHECK: leaq 40(%rsp),
48define i8* @f3(i64 %a0, i64 %a1, i64 %a2, ...) nounwind {
49entry:
50  %ap = alloca i8*, align 8
51  %ap1 = bitcast i8** %ap to i8*
52  call void @llvm.va_start(i8* %ap1)
53  ret i8* %ap1
54}
55
56; WinX86_64 uses char* for va_list. Verify that the correct amount of bytes
57; are copied using va_copy.
58
59; CHECK-LABEL: copy1:
60; CHECK: subq $16
61; CHECK: leaq 32(%rsp), [[REG_copy1:%[a-z]+]]
62; CHECK: movq [[REG_copy1]], 8(%rsp)
63; CHECK: movq [[REG_copy1]], (%rsp)
64; CHECK: addq $16
65; CHECK: ret
66define void @copy1(i64 %a0, ...) nounwind {
67entry:
68  %ap = alloca i8*, align 8
69  %cp = alloca i8*, align 8
70  %ap1 = bitcast i8** %ap to i8*
71  %cp1 = bitcast i8** %cp to i8*
72  call void @llvm.va_start(i8* %ap1)
73  call void @llvm.va_copy(i8* %cp1, i8* %ap1)
74  ret void
75}
76
77; CHECK-LABEL: copy4:
78; CHECK: subq $16
79; CHECK: leaq 56(%rsp), [[REG_copy4:%[a-z]+]]
80; CHECK: movq [[REG_copy4]], 8(%rsp)
81; CHECK: movq [[REG_copy4]], (%rsp)
82; CHECK: addq $16
83; CHECK: ret
84define void @copy4(i64 %a0, i64 %a1, i64 %a2, i64 %a3, ...) nounwind {
85entry:
86  %ap = alloca i8*, align 8
87  %cp = alloca i8*, align 8
88  %ap1 = bitcast i8** %ap to i8*
89  %cp1 = bitcast i8** %cp to i8*
90  call void @llvm.va_start(i8* %ap1)
91  call void @llvm.va_copy(i8* %cp1, i8* %ap1)
92  ret void
93}
94
95; CHECK-LABEL: arg4:
96; CHECK: pushq
97; va_start:
98; CHECK: leaq 48(%rsp), [[REG_arg4_1:%[a-z]+]]
99; CHECK: movq [[REG_arg4_1]], (%rsp)
100; va_arg:
101; CHECK: leaq 52(%rsp), [[REG_arg4_2:%[a-z]+]]
102; CHECK: movq [[REG_arg4_2]], (%rsp)
103; CHECK: movl 48(%rsp), %eax
104; CHECK: popq
105; CHECK: ret
106define i32 @arg4(i64 %a0, i64 %a1, i64 %a2, i64 %a3, ...) nounwind {
107entry:
108  %ap = alloca i8*, align 8
109  %ap1 = bitcast i8** %ap to i8*
110  call void @llvm.va_start(i8* %ap1)
111  %tmp = va_arg i8** %ap, i32
112  ret i32 %tmp
113}
114
115define void @sret_arg(i32* sret %agg.result, i8* nocapture readnone %format, ...) {
116entry:
117  %ap = alloca i8*
118  %ap_i8 = bitcast i8** %ap to i8*
119  call void @llvm.va_start(i8* %ap_i8)
120  %tmp = va_arg i8** %ap, i32
121  store i32 %tmp, i32* %agg.result
122  ret void
123}
124; CHECK-LABEL: sret_arg:
125; CHECK: pushq
126; CHECK-DAG: movq %r9, 40(%rsp)
127; CHECK-DAG: movq %r8, 32(%rsp)
128; CHECK: movl 32(%rsp), %[[tmp:[^ ]*]]
129; CHECK: movl %[[tmp]], (%[[sret:[^ ]*]])
130; CHECK: movq %[[sret]], %rax
131; CHECK: popq
132; CHECK: retq
133