• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // RUN: %clang_cc1 -triple arm64-apple-ios7 -target-abi darwinpcs -emit-llvm -o - %s | FileCheck %s
2 
3 #include <stdarg.h>
4 
5 typedef __attribute__(( ext_vector_type(3) ))  char __char3;
6 typedef __attribute__(( ext_vector_type(4) ))  char __char4;
7 typedef __attribute__(( ext_vector_type(5) ))  char __char5;
8 typedef __attribute__(( ext_vector_type(9) ))  char __char9;
9 typedef __attribute__(( ext_vector_type(19) )) char __char19;
10 typedef __attribute__(( ext_vector_type(3) ))  short __short3;
11 typedef __attribute__(( ext_vector_type(5) ))  short __short5;
12 typedef __attribute__(( ext_vector_type(3) ))  int __int3;
13 typedef __attribute__(( ext_vector_type(5) ))  int __int5;
14 typedef __attribute__(( ext_vector_type(3) ))  double __double3;
15 
varargs_vec_3c(int fixed,...)16 double varargs_vec_3c(int fixed, ...) {
17 // CHECK: varargs_vec_3c
18 // CHECK: alloca <3 x i8>, align 4
19 // CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_CUR:%.*]], i32 8
20 // CHECK: bitcast i8* [[AP_CUR]] to <3 x i8>*
21   va_list ap;
22   double sum = fixed;
23   va_start(ap, fixed);
24   __char3 c3 = va_arg(ap, __char3);
25   sum = sum + c3.x + c3.y;
26   va_end(ap);
27   return sum;
28 }
29 
test_3c(__char3 * in)30 double test_3c(__char3 *in) {
31 // CHECK: test_3c
32 // CHECK: call double (i32, ...)* @varargs_vec_3c(i32 3, i32 {{%.*}})
33   return varargs_vec_3c(3, *in);
34 }
35 
varargs_vec_4c(int fixed,...)36 double varargs_vec_4c(int fixed, ...) {
37 // CHECK: varargs_vec_4c
38 // CHECK: alloca <4 x i8>, align 4
39 // CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_CUR:%.*]], i32 8
40 // CHECK: bitcast i8* [[AP_CUR]] to <4 x i8>*
41   va_list ap;
42   double sum = fixed;
43   va_start(ap, fixed);
44   __char4 c4 = va_arg(ap, __char4);
45   sum = sum + c4.x + c4.y;
46   va_end(ap);
47   return sum;
48 }
49 
test_4c(__char4 * in)50 double test_4c(__char4 *in) {
51 // CHECK: test_4c
52 // CHECK: call double (i32, ...)* @varargs_vec_4c(i32 4, i32 {{%.*}})
53   return varargs_vec_4c(4, *in);
54 }
55 
varargs_vec_5c(int fixed,...)56 double varargs_vec_5c(int fixed, ...) {
57 // CHECK: varargs_vec_5c
58 // CHECK: alloca <5 x i8>, align 8
59 // CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_CUR:%.*]], i32 8
60 // CHECK: bitcast i8* [[AP_CUR]] to <5 x i8>*
61   va_list ap;
62   double sum = fixed;
63   va_start(ap, fixed);
64   __char5 c5 = va_arg(ap, __char5);
65   sum = sum + c5.x + c5.y;
66   va_end(ap);
67   return sum;
68 }
69 
test_5c(__char5 * in)70 double test_5c(__char5 *in) {
71 // CHECK: test_5c
72 // CHECK: call double (i32, ...)* @varargs_vec_5c(i32 5, <2 x i32> {{%.*}})
73   return varargs_vec_5c(5, *in);
74 }
75 
varargs_vec_9c(int fixed,...)76 double varargs_vec_9c(int fixed, ...) {
77 // CHECK: varargs_vec_9c
78 // CHECK: alloca <9 x i8>, align 16
79 // CHECK: [[ALIGN:%.*]] = and i64 {{%.*}}, -16
80 // CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to i8*
81 // CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_ALIGN]], i32 16
82 // CHECK: bitcast i8* [[AP_ALIGN]] to <9 x i8>*
83   va_list ap;
84   double sum = fixed;
85   va_start(ap, fixed);
86   __char9 c9 = va_arg(ap, __char9);
87   sum = sum + c9.x + c9.y;
88   va_end(ap);
89   return sum;
90 }
91 
test_9c(__char9 * in)92 double test_9c(__char9 *in) {
93 // CHECK: test_9c
94 // CHECK: call double (i32, ...)* @varargs_vec_9c(i32 9, <4 x i32> {{%.*}})
95   return varargs_vec_9c(9, *in);
96 }
97 
varargs_vec_19c(int fixed,...)98 double varargs_vec_19c(int fixed, ...) {
99 // CHECK: varargs_vec_19c
100 // CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_CUR:%.*]], i32 8
101 // CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to i8**
102 // CHECK: [[VAR2:%.*]] = load i8** [[VAR]]
103 // CHECK: bitcast i8* [[VAR2]] to <19 x i8>*
104   va_list ap;
105   double sum = fixed;
106   va_start(ap, fixed);
107   __char19 c19 = va_arg(ap, __char19);
108   sum = sum + c19.x + c19.y;
109   va_end(ap);
110   return sum;
111 }
112 
test_19c(__char19 * in)113 double test_19c(__char19 *in) {
114 // CHECK: test_19c
115 // CHECK: call double (i32, ...)* @varargs_vec_19c(i32 19, <19 x i8>* {{%.*}})
116   return varargs_vec_19c(19, *in);
117 }
118 
varargs_vec_3s(int fixed,...)119 double varargs_vec_3s(int fixed, ...) {
120 // CHECK: varargs_vec_3s
121 // CHECK: alloca <3 x i16>, align 8
122 // CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_CUR:%.*]], i32 8
123 // CHECK: bitcast i8* [[AP_CUR]] to <3 x i16>*
124   va_list ap;
125   double sum = fixed;
126   va_start(ap, fixed);
127   __short3 c3 = va_arg(ap, __short3);
128   sum = sum + c3.x + c3.y;
129   va_end(ap);
130   return sum;
131 }
132 
test_3s(__short3 * in)133 double test_3s(__short3 *in) {
134 // CHECK: test_3s
135 // CHECK: call double (i32, ...)* @varargs_vec_3s(i32 3, <2 x i32> {{%.*}})
136   return varargs_vec_3s(3, *in);
137 }
138 
varargs_vec_5s(int fixed,...)139 double varargs_vec_5s(int fixed, ...) {
140 // CHECK: varargs_vec_5s
141 // CHECK: alloca <5 x i16>, align 16
142 // CHECK: [[ALIGN:%.*]] = and i64 {{%.*}}, -16
143 // CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to i8*
144 // CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_ALIGN]], i32 16
145 // CHECK: bitcast i8* [[AP_ALIGN]] to <5 x i16>*
146   va_list ap;
147   double sum = fixed;
148   va_start(ap, fixed);
149   __short5 c5 = va_arg(ap, __short5);
150   sum = sum + c5.x + c5.y;
151   va_end(ap);
152   return sum;
153 }
154 
test_5s(__short5 * in)155 double test_5s(__short5 *in) {
156 // CHECK: test_5s
157 // CHECK: call double (i32, ...)* @varargs_vec_5s(i32 5, <4 x i32> {{%.*}})
158   return varargs_vec_5s(5, *in);
159 }
160 
varargs_vec_3i(int fixed,...)161 double varargs_vec_3i(int fixed, ...) {
162 // CHECK: varargs_vec_3i
163 // CHECK: alloca <3 x i32>, align 16
164 // CHECK: [[ALIGN:%.*]] = and i64 {{%.*}}, -16
165 // CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to i8*
166 // CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_ALIGN]], i32 16
167 // CHECK: bitcast i8* [[AP_ALIGN]] to <3 x i32>*
168   va_list ap;
169   double sum = fixed;
170   va_start(ap, fixed);
171   __int3 c3 = va_arg(ap, __int3);
172   sum = sum + c3.x + c3.y;
173   va_end(ap);
174   return sum;
175 }
176 
test_3i(__int3 * in)177 double test_3i(__int3 *in) {
178 // CHECK: test_3i
179 // CHECK: call double (i32, ...)* @varargs_vec_3i(i32 3, <4 x i32> {{%.*}})
180   return varargs_vec_3i(3, *in);
181 }
182 
varargs_vec_5i(int fixed,...)183 double varargs_vec_5i(int fixed, ...) {
184 // CHECK: varargs_vec_5i
185 // CHECK: alloca <5 x i32>, align 16
186 // CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_CUR:%.*]], i32 8
187 // CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to i8**
188 // CHECK: [[VAR2:%.*]] = load i8** [[VAR]]
189 // CHECK: bitcast i8* [[VAR2]] to <5 x i32>*
190   va_list ap;
191   double sum = fixed;
192   va_start(ap, fixed);
193   __int5 c5 = va_arg(ap, __int5);
194   sum = sum + c5.x + c5.y;
195   va_end(ap);
196   return sum;
197 }
198 
test_5i(__int5 * in)199 double test_5i(__int5 *in) {
200 // CHECK: test_5i
201 // CHECK: call double (i32, ...)* @varargs_vec_5i(i32 5, <5 x i32>* {{%.*}})
202   return varargs_vec_5i(5, *in);
203 }
204 
varargs_vec_3d(int fixed,...)205 double varargs_vec_3d(int fixed, ...) {
206 // CHECK: varargs_vec_3d
207 // CHECK: alloca <3 x double>, align 16
208 // CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_CUR:%.*]], i32 8
209 // CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to i8**
210 // CHECK: [[VAR2:%.*]] = load i8** [[VAR]]
211 // CHECK: bitcast i8* [[VAR2]] to <3 x double>*
212   va_list ap;
213   double sum = fixed;
214   va_start(ap, fixed);
215   __double3 c3 = va_arg(ap, __double3);
216   sum = sum + c3.x + c3.y;
217   va_end(ap);
218   return sum;
219 }
220 
test_3d(__double3 * in)221 double test_3d(__double3 *in) {
222 // CHECK: test_3d
223 // CHECK: call double (i32, ...)* @varargs_vec_3d(i32 3, <3 x double>* {{%.*}})
224   return varargs_vec_3d(3, *in);
225 }
226 
varargs_vec(int fixed,...)227 double varargs_vec(int fixed, ...) {
228 // CHECK: varargs_vec
229   va_list ap;
230   double sum = fixed;
231   va_start(ap, fixed);
232   __char3 c3 = va_arg(ap, __char3);
233 // CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_CUR:%.*]], i32 8
234 // CHECK: bitcast i8* [[AP_CUR]] to <3 x i8>*
235   sum = sum + c3.x + c3.y;
236   __char5 c5 = va_arg(ap, __char5);
237 // CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_CUR:%.*]], i32 8
238 // CHECK: bitcast i8* [[AP_CUR]] to <5 x i8>*
239   sum = sum + c5.x + c5.y;
240   __char9 c9 = va_arg(ap, __char9);
241 // CHECK: [[ALIGN:%.*]] = and i64 {{%.*}}, -16
242 // CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to i8*
243 // CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_ALIGN]], i32 16
244 // CHECK: bitcast i8* [[AP_ALIGN]] to <9 x i8>*
245   sum = sum + c9.x + c9.y;
246   __char19 c19 = va_arg(ap, __char19);
247 // CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_CUR:%.*]], i32 8
248 // CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to i8**
249 // CHECK: [[VAR2:%.*]] = load i8** [[VAR]]
250 // CHECK: bitcast i8* [[VAR2]] to <19 x i8>*
251   sum = sum + c19.x + c19.y;
252   __short3 s3 = va_arg(ap, __short3);
253 // CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_CUR:%.*]], i32 8
254 // CHECK: bitcast i8* [[AP_CUR]] to <3 x i16>*
255   sum = sum + s3.x + s3.y;
256   __short5 s5 = va_arg(ap, __short5);
257 // CHECK: [[ALIGN:%.*]] = and i64 {{%.*}}, -16
258 // CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to i8*
259 // CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_ALIGN]], i32 16
260 // CHECK: bitcast i8* [[AP_ALIGN]] to <5 x i16>*
261   sum = sum + s5.x + s5.y;
262   __int3 i3 = va_arg(ap, __int3);
263 // CHECK: [[ALIGN:%.*]] = and i64 {{%.*}}, -16
264 // CHECK: [[AP_ALIGN:%.*]] = inttoptr i64 [[ALIGN]] to i8*
265 // CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_ALIGN]], i32 16
266 // CHECK: bitcast i8* [[AP_ALIGN]] to <3 x i32>*
267   sum = sum + i3.x + i3.y;
268   __int5 i5 = va_arg(ap, __int5);
269 // CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_CUR:%.*]], i32 8
270 // CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to i8**
271 // CHECK: [[VAR2:%.*]] = load i8** [[VAR]]
272 // CHECK: bitcast i8* [[VAR2]] to <5 x i32>*
273   sum = sum + i5.x + i5.y;
274   __double3 d3 = va_arg(ap, __double3);
275 // CHECK: [[AP_NEXT:%.*]] = getelementptr i8* [[AP_CUR:%.*]], i32 8
276 // CHECK: [[VAR:%.*]] = bitcast i8* [[AP_CUR]] to i8**
277 // CHECK: [[VAR2:%.*]] = load i8** [[VAR]]
278 // CHECK: bitcast i8* [[VAR2]] to <3 x double>*
279   sum = sum + d3.x + d3.y;
280   va_end(ap);
281   return sum;
282 }
283 
test(__char3 * c3,__char5 * c5,__char9 * c9,__char19 * c19,__short3 * s3,__short5 * s5,__int3 * i3,__int5 * i5,__double3 * d3)284 double test(__char3 *c3, __char5 *c5, __char9 *c9, __char19 *c19,
285             __short3 *s3, __short5 *s5, __int3 *i3, __int5 *i5,
286             __double3 *d3) {
287   double ret = varargs_vec(3, *c3, *c5, *c9, *c19, *s3, *s5, *i3, *i5, *d3);
288 // CHECK: call double (i32, ...)* @varargs_vec(i32 3, i32 {{%.*}}, <2 x i32> {{%.*}}, <4 x i32> {{%.*}}, <19 x i8>* {{%.*}}, <2 x i32> {{%.*}}, <4 x i32> {{%.*}}, <4 x i32> {{%.*}}, <5 x i32>* {{%.*}}, <3 x double>* {{%.*}})
289   return ret;
290 }
291 
args_vec_3c(int fixed,__char3 c3)292 __attribute__((noinline)) double args_vec_3c(int fixed, __char3 c3) {
293 // CHECK: args_vec_3c
294 // CHECK: [[C3:%.*]] = alloca <3 x i8>, align 4
295 // CHECK: [[TMP:%.*]] = bitcast <3 x i8>* [[C3]] to i32*
296 // CHECK: store i32 {{%.*}}, i32* [[TMP]]
297   double sum = fixed;
298   sum = sum + c3.x + c3.y;
299   return sum;
300 }
301 
fixed_3c(__char3 * in)302 double fixed_3c(__char3 *in) {
303 // CHECK: fixed_3c
304 // CHECK: call double @args_vec_3c(i32 3, i32 {{%.*}})
305   return args_vec_3c(3, *in);
306 }
307 
args_vec_5c(int fixed,__char5 c5)308 __attribute__((noinline)) double args_vec_5c(int fixed, __char5 c5) {
309 // CHECK: args_vec_5c
310 // CHECK: [[C5:%.*]] = alloca <5 x i8>, align 8
311 // CHECK: [[TMP:%.*]] = bitcast <5 x i8>* [[C5]] to <2 x i32>*
312 // CHECK: store <2 x i32> {{%.*}}, <2 x i32>* [[TMP]], align 1
313   double sum = fixed;
314   sum = sum + c5.x + c5.y;
315   return sum;
316 }
317 
fixed_5c(__char5 * in)318 double fixed_5c(__char5 *in) {
319 // CHECK: fixed_5c
320 // CHECK: call double @args_vec_5c(i32 5, <2 x i32> {{%.*}})
321   return args_vec_5c(5, *in);
322 }
323 
args_vec_9c(int fixed,__char9 c9)324 __attribute__((noinline)) double args_vec_9c(int fixed, __char9 c9) {
325 // CHECK: args_vec_9c
326 // CHECK: [[C9:%.*]] = alloca <9 x i8>, align 16
327 // CHECK: [[TMP:%.*]] = bitcast <9 x i8>* [[C9]] to <4 x i32>*
328 // CHECK: store <4 x i32> {{%.*}}, <4 x i32>* [[TMP]], align 1
329   double sum = fixed;
330   sum = sum + c9.x + c9.y;
331   return sum;
332 }
333 
fixed_9c(__char9 * in)334 double fixed_9c(__char9 *in) {
335 // CHECK: fixed_9c
336 // CHECK: call double @args_vec_9c(i32 9, <4 x i32> {{%.*}})
337   return args_vec_9c(9, *in);
338 }
339 
args_vec_19c(int fixed,__char19 c19)340 __attribute__((noinline)) double args_vec_19c(int fixed, __char19 c19) {
341 // CHECK: args_vec_19c
342 // CHECK: [[C19:%.*]] = load <19 x i8>* {{.*}}, align 16
343   double sum = fixed;
344   sum = sum + c19.x + c19.y;
345   return sum;
346 }
347 
fixed_19c(__char19 * in)348 double fixed_19c(__char19 *in) {
349 // CHECK: fixed_19c
350 // CHECK: call double @args_vec_19c(i32 19, <19 x i8>* {{%.*}})
351   return args_vec_19c(19, *in);
352 }
353 
args_vec_3s(int fixed,__short3 c3)354 __attribute__((noinline)) double args_vec_3s(int fixed, __short3 c3) {
355 // CHECK: args_vec_3s
356 // CHECK: [[C3:%.*]] = alloca <3 x i16>, align 8
357 // CHECK: [[TMP:%.*]] = bitcast <3 x i16>* [[C3]] to <2 x i32>*
358 // CHECK: store <2 x i32> {{%.*}}, <2 x i32>* [[TMP]], align 1
359   double sum = fixed;
360   sum = sum + c3.x + c3.y;
361   return sum;
362 }
363 
fixed_3s(__short3 * in)364 double fixed_3s(__short3 *in) {
365 // CHECK: fixed_3s
366 // CHECK: call double @args_vec_3s(i32 3, <2 x i32> {{%.*}})
367   return args_vec_3s(3, *in);
368 }
369 
args_vec_5s(int fixed,__short5 c5)370 __attribute__((noinline)) double args_vec_5s(int fixed, __short5 c5) {
371 // CHECK: args_vec_5s
372 // CHECK: [[C5:%.*]] = alloca <5 x i16>, align 16
373 // CHECK: [[TMP:%.*]] = bitcast <5 x i16>* [[C5]] to <4 x i32>*
374 // CHECK: store <4 x i32> {{%.*}}, <4 x i32>* [[TMP]], align 1
375   double sum = fixed;
376   sum = sum + c5.x + c5.y;
377   return sum;
378 }
379 
fixed_5s(__short5 * in)380 double fixed_5s(__short5 *in) {
381 // CHECK: fixed_5s
382 // CHECK: call double @args_vec_5s(i32 5, <4 x i32> {{%.*}})
383   return args_vec_5s(5, *in);
384 }
385 
args_vec_3i(int fixed,__int3 c3)386 __attribute__((noinline)) double args_vec_3i(int fixed, __int3 c3) {
387 // CHECK: args_vec_3i
388 // CHECK: [[C3:%.*]] = alloca <3 x i32>, align 16
389 // CHECK: [[TMP:%.*]] = bitcast <3 x i32>* [[C3]] to <4 x i32>*
390 // CHECK: store <4 x i32> {{%.*}}, <4 x i32>* [[TMP]], align 1
391   double sum = fixed;
392   sum = sum + c3.x + c3.y;
393   return sum;
394 }
395 
fixed_3i(__int3 * in)396 double fixed_3i(__int3 *in) {
397 // CHECK: fixed_3i
398 // CHECK: call double @args_vec_3i(i32 3, <4 x i32> {{%.*}})
399   return args_vec_3i(3, *in);
400 }
401 
args_vec_5i(int fixed,__int5 c5)402 __attribute__((noinline)) double args_vec_5i(int fixed, __int5 c5) {
403 // CHECK: args_vec_5i
404 // CHECK: [[C5:%.*]] = load <5 x i32>* {{%.*}}, align 16
405   double sum = fixed;
406   sum = sum + c5.x + c5.y;
407   return sum;
408 }
409 
fixed_5i(__int5 * in)410 double fixed_5i(__int5 *in) {
411 // CHECK: fixed_5i
412 // CHECK: call double @args_vec_5i(i32 5, <5 x i32>* {{%.*}})
413   return args_vec_5i(5, *in);
414 }
415 
args_vec_3d(int fixed,__double3 c3)416 __attribute__((noinline)) double args_vec_3d(int fixed, __double3 c3) {
417 // CHECK: args_vec_3d
418 // CHECK: [[CAST:%.*]] = bitcast <3 x double>* {{%.*}} to <4 x double>*
419 // CHECK: [[LOAD:%.*]] = load <4 x double>* [[CAST]]
420 // CHECK: shufflevector <4 x double> [[LOAD]], <4 x double> undef, <3 x i32> <i32 0, i32 1, i32 2>
421   double sum = fixed;
422   sum = sum + c3.x + c3.y;
423   return sum;
424 }
425 
fixed_3d(__double3 * in)426 double fixed_3d(__double3 *in) {
427 // CHECK: fixed_3d
428 // CHECK: call double @args_vec_3d(i32 3, <3 x double>* {{%.*}})
429   return args_vec_3d(3, *in);
430 }
431