• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // RUN: %clang_cc1 -emit-llvm -o %t %s
2 // RUN: not grep __builtin %t
3 // RUN: %clang_cc1 %s -emit-llvm -o - -triple x86_64-darwin-apple | FileCheck %s
4 
5 int printf(const char *, ...);
6 
p(char * str,int x)7 void p(char *str, int x) {
8   printf("%s: %d\n", str, x);
9 }
q(char * str,double x)10 void q(char *str, double x) {
11   printf("%s: %f\n", str, x);
12 }
r(char * str,void * ptr)13 void r(char *str, void *ptr) {
14   printf("%s: %p\n", str, ptr);
15 }
16 
17 int random(void);
18 
main()19 int main() {
20   int N = random();
21 #define P(n,args) p(#n #args, __builtin_##n args)
22 #define Q(n,args) q(#n #args, __builtin_##n args)
23 #define R(n,args) r(#n #args, __builtin_##n args)
24 #define V(n,args) p(#n #args, (__builtin_##n args, 0))
25   P(types_compatible_p, (int, float));
26   P(choose_expr, (0, 10, 20));
27   P(constant_p, (sizeof(10)));
28   P(expect, (N == 12, 0));
29   V(prefetch, (&N));
30   V(prefetch, (&N, 1));
31   V(prefetch, (&N, 1, 0));
32 
33   // Numeric Constants
34 
35   Q(huge_val, ());
36   Q(huge_valf, ());
37   Q(huge_vall, ());
38   Q(inf, ());
39   Q(inff, ());
40   Q(infl, ());
41 
42   P(fpclassify, (0, 1, 2, 3, 4, 1.0));
43   P(fpclassify, (0, 1, 2, 3, 4, 1.0f));
44   P(fpclassify, (0, 1, 2, 3, 4, 1.0l));
45 
46   Q(nan, (""));
47   Q(nanf, (""));
48   Q(nanl, (""));
49   Q(nans, (""));
50   Q(nan, ("10"));
51   Q(nanf, ("10"));
52   Q(nanl, ("10"));
53   Q(nans, ("10"));
54 
55   P(isgreater, (1., 2.));
56   P(isgreaterequal, (1., 2.));
57   P(isless, (1., 2.));
58   P(islessequal, (1., 2.));
59   P(islessgreater, (1., 2.));
60   P(isunordered, (1., 2.));
61 
62   P(isinf, (1.));
63   P(isinf_sign, (1.));
64   P(isnan, (1.));
65 
66   // Bitwise & Numeric Functions
67 
68   P(abs, (N));
69 
70   P(clz, (N));
71   P(clzl, (N));
72   P(clzll, (N));
73   P(ctz, (N));
74   P(ctzl, (N));
75   P(ctzll, (N));
76   P(ffs, (N));
77   P(ffsl, (N));
78   P(ffsll, (N));
79   P(parity, (N));
80   P(parityl, (N));
81   P(parityll, (N));
82   P(popcount, (N));
83   P(popcountl, (N));
84   P(popcountll, (N));
85   Q(powi, (1.2f, N));
86   Q(powif, (1.2f, N));
87   Q(powil, (1.2f, N));
88 
89   // Lib functions
90   int a, b, n = random(); // Avoid optimizing out.
91   char s0[10], s1[] = "Hello";
92   V(strcat, (s0, s1));
93   V(strcmp, (s0, s1));
94   V(strncat, (s0, s1, n));
95   V(strchr, (s0, s1[0]));
96   V(strrchr, (s0, s1[0]));
97   V(strcpy, (s0, s1));
98   V(strncpy, (s0, s1, n));
99 
100   // Object size checking
101   V(__memset_chk, (s0, 0, sizeof s0, n));
102   V(__memcpy_chk, (s0, s1, sizeof s0, n));
103   V(__memmove_chk, (s0, s1, sizeof s0, n));
104   V(__mempcpy_chk, (s0, s1, sizeof s0, n));
105   V(__strncpy_chk, (s0, s1, sizeof s0, n));
106   V(__strcpy_chk, (s0, s1, n));
107   s0[0] = 0;
108   V(__strcat_chk, (s0, s1, n));
109   P(object_size, (s0, 0));
110   P(object_size, (s0, 1));
111   P(object_size, (s0, 2));
112   P(object_size, (s0, 3));
113 
114   // Whatever
115 
116   P(bswap16, (N));
117   P(bswap32, (N));
118   P(bswap64, (N));
119 
120   // CHECK: @llvm.bitreverse.i8
121   // CHECK: @llvm.bitreverse.i16
122   // CHECK: @llvm.bitreverse.i32
123   // CHECK: @llvm.bitreverse.i64
124   P(bitreverse8, (N));
125   P(bitreverse16, (N));
126   P(bitreverse32, (N));
127   P(bitreverse64, (N));
128 
129   // FIXME
130   // V(clear_cache, (&N, &N+1));
131   V(trap, ());
132   R(extract_return_addr, (&N));
133   P(signbit, (1.0));
134 
135   return 0;
136 }
137 
138 
139 
foo()140 void foo() {
141  __builtin_strcat(0, 0);
142 }
143 
144 // CHECK-LABEL: define void @bar(
bar()145 void bar() {
146   float f;
147   double d;
148   long double ld;
149 
150   // LLVM's hex representation of float constants is really unfortunate;
151   // basically it does a float-to-double "conversion" and then prints the
152   // hex form of that.  That gives us weird artifacts like exponents
153   // that aren't numerically similar to the original exponent and
154   // significand bit-patterns that are offset by three bits (because
155   // the exponent was expanded from 8 bits to 11).
156   //
157   // 0xAE98 == 1010111010011000
158   // 0x15D3 == 1010111010011
159 
160   f = __builtin_huge_valf();     // CHECK: float    0x7FF0000000000000
161   d = __builtin_huge_val();      // CHECK: double   0x7FF0000000000000
162   ld = __builtin_huge_vall();    // CHECK: x86_fp80 0xK7FFF8000000000000000
163   f = __builtin_nanf("");        // CHECK: float    0x7FF8000000000000
164   d = __builtin_nan("");         // CHECK: double   0x7FF8000000000000
165   ld = __builtin_nanl("");       // CHECK: x86_fp80 0xK7FFFC000000000000000
166   f = __builtin_nanf("0xAE98");  // CHECK: float    0x7FF815D300000000
167   d = __builtin_nan("0xAE98");   // CHECK: double   0x7FF800000000AE98
168   ld = __builtin_nanl("0xAE98"); // CHECK: x86_fp80 0xK7FFFC00000000000AE98
169   f = __builtin_nansf("");       // CHECK: float    0x7FF4000000000000
170   d = __builtin_nans("");        // CHECK: double   0x7FF4000000000000
171   ld = __builtin_nansl("");      // CHECK: x86_fp80 0xK7FFFA000000000000000
172   f = __builtin_nansf("0xAE98"); // CHECK: float    0x7FF015D300000000
173   d = __builtin_nans("0xAE98");  // CHECK: double   0x7FF000000000AE98
174   ld = __builtin_nansl("0xAE98");// CHECK: x86_fp80 0xK7FFF800000000000AE98
175 
176 }
177 // CHECK: }
178 
179 
180 // CHECK-LABEL: define void @test_float_builtins
test_float_builtins(float F,double D,long double LD)181 void test_float_builtins(float F, double D, long double LD) {
182   volatile int res;
183   res = __builtin_isinf(F);
184   // CHECK:  call float @llvm.fabs.f32(float
185   // CHECK:  fcmp oeq float {{.*}}, 0x7FF0000000000000
186 
187   res = __builtin_isinf(D);
188   // CHECK:  call double @llvm.fabs.f64(double
189   // CHECK:  fcmp oeq double {{.*}}, 0x7FF0000000000000
190 
191   res = __builtin_isinf(LD);
192   // CHECK:  call x86_fp80 @llvm.fabs.f80(x86_fp80
193   // CHECK:  fcmp oeq x86_fp80 {{.*}}, 0xK7FFF8000000000000000
194 
195   res = __builtin_isinf_sign(F);
196   // CHECK:  %[[ABS:.*]] = call float @llvm.fabs.f32(float %[[ARG:.*]])
197   // CHECK:  %[[ISINF:.*]] = fcmp oeq float %[[ABS]], 0x7FF0000000000000
198   // CHECK:  %[[BITCAST:.*]] = bitcast float %[[ARG]] to i32
199   // CHECK:  %[[ISNEG:.*]] = icmp slt i32 %[[BITCAST]], 0
200   // CHECK:  %[[SIGN:.*]] = select i1 %[[ISNEG]], i32 -1, i32 1
201   // CHECK:  select i1 %[[ISINF]], i32 %[[SIGN]], i32 0
202 
203   res = __builtin_isinf_sign(D);
204   // CHECK:  %[[ABS:.*]] = call double @llvm.fabs.f64(double %[[ARG:.*]])
205   // CHECK:  %[[ISINF:.*]] = fcmp oeq double %[[ABS]], 0x7FF0000000000000
206   // CHECK:  %[[BITCAST:.*]] = bitcast double %[[ARG]] to i64
207   // CHECK:  %[[ISNEG:.*]] = icmp slt i64 %[[BITCAST]], 0
208   // CHECK:  %[[SIGN:.*]] = select i1 %[[ISNEG]], i32 -1, i32 1
209   // CHECK:  select i1 %[[ISINF]], i32 %[[SIGN]], i32 0
210 
211   res = __builtin_isinf_sign(LD);
212   // CHECK:  %[[ABS:.*]] = call x86_fp80 @llvm.fabs.f80(x86_fp80 %[[ARG:.*]])
213   // CHECK:  %[[ISINF:.*]] = fcmp oeq x86_fp80 %[[ABS]], 0xK7FFF8000000000000000
214   // CHECK:  %[[BITCAST:.*]] = bitcast x86_fp80 %[[ARG]] to i80
215   // CHECK:  %[[ISNEG:.*]] = icmp slt i80 %[[BITCAST]], 0
216   // CHECK:  %[[SIGN:.*]] = select i1 %[[ISNEG]], i32 -1, i32 1
217   // CHECK:  select i1 %[[ISINF]], i32 %[[SIGN]], i32 0
218 
219   res = __builtin_isfinite(F);
220   // CHECK: call float @llvm.fabs.f32(float
221   // CHECK: fcmp one float {{.*}}, 0x7FF0000000000000
222 
223   res = __builtin_isnormal(F);
224   // CHECK: fcmp oeq float
225   // CHECK: call float @llvm.fabs.f32(float
226   // CHECK: fcmp ult float {{.*}}, 0x7FF0000000000000
227   // CHECK: fcmp uge float {{.*}}, 0x3810000000000000
228   // CHECK: and i1
229   // CHECK: and i1
230 }
231 
232 // CHECK-LABEL: define void @test_float_builtin_ops
test_float_builtin_ops(float F,double D,long double LD)233 void test_float_builtin_ops(float F, double D, long double LD) {
234   volatile float resf;
235   volatile double resd;
236   volatile long double resld;
237 
238   resf = __builtin_fmodf(F,F);
239   // CHECK: frem float
240 
241   resd = __builtin_fmod(D,D);
242   // CHECK: frem double
243 
244   resld = __builtin_fmodl(LD,LD);
245   // CHECK: frem x86_fp80
246 
247   resf = __builtin_fabsf(F);
248   resd = __builtin_fabs(D);
249   resld = __builtin_fabsl(LD);
250   // CHECK: call float @llvm.fabs.f32(float
251   // CHECK: call double @llvm.fabs.f64(double
252   // CHECK: call x86_fp80 @llvm.fabs.f80(x86_fp80
253 
254   resf = __builtin_canonicalizef(F);
255   resd = __builtin_canonicalize(D);
256   resld = __builtin_canonicalizel(LD);
257   // CHECK: call float @llvm.canonicalize.f32(float
258   // CHECK: call double @llvm.canonicalize.f64(double
259   // CHECK: call x86_fp80 @llvm.canonicalize.f80(x86_fp80
260 
261   resf = __builtin_fminf(F, F);
262   // CHECK: call float @llvm.minnum.f32
263 
264   resd = __builtin_fmin(D, D);
265   // CHECK: call double @llvm.minnum.f64
266 
267   resld = __builtin_fminl(LD, LD);
268   // CHECK: call x86_fp80 @llvm.minnum.f80
269 
270   resf = __builtin_fmaxf(F, F);
271   // CHECK: call float @llvm.maxnum.f32
272 
273   resd = __builtin_fmax(D, D);
274   // CHECK: call double @llvm.maxnum.f64
275 
276   resld = __builtin_fmaxl(LD, LD);
277   // CHECK: call x86_fp80 @llvm.maxnum.f80
278 
279   resf = __builtin_fabsf(F);
280   // CHECK: call float @llvm.fabs.f32
281 
282   resd = __builtin_fabs(D);
283   // CHECK: call double @llvm.fabs.f64
284 
285   resld = __builtin_fabsl(LD);
286   // CHECK: call x86_fp80 @llvm.fabs.f80
287 
288   resf = __builtin_copysignf(F, F);
289   // CHECK: call float @llvm.copysign.f32
290 
291   resd = __builtin_copysign(D, D);
292   // CHECK: call double @llvm.copysign.f64
293 
294   resld = __builtin_copysignl(LD, LD);
295   // CHECK: call x86_fp80 @llvm.copysign.f80
296 
297 
298   resf = __builtin_ceilf(F);
299   // CHECK: call float @llvm.ceil.f32
300 
301   resd = __builtin_ceil(D);
302   // CHECK: call double @llvm.ceil.f64
303 
304   resld = __builtin_ceill(LD);
305   // CHECK: call x86_fp80 @llvm.ceil.f80
306 
307   resf = __builtin_floorf(F);
308   // CHECK: call float @llvm.floor.f32
309 
310   resd = __builtin_floor(D);
311   // CHECK: call double @llvm.floor.f64
312 
313   resld = __builtin_floorl(LD);
314   // CHECK: call x86_fp80 @llvm.floor.f80
315 
316   resf = __builtin_truncf(F);
317   // CHECK: call float @llvm.trunc.f32
318 
319   resd = __builtin_trunc(D);
320   // CHECK: call double @llvm.trunc.f64
321 
322   resld = __builtin_truncl(LD);
323   // CHECK: call x86_fp80 @llvm.trunc.f80
324 
325   resf = __builtin_rintf(F);
326   // CHECK: call float @llvm.rint.f32
327 
328   resd = __builtin_rint(D);
329   // CHECK: call double @llvm.rint.f64
330 
331   resld = __builtin_rintl(LD);
332   // CHECK: call x86_fp80 @llvm.rint.f80
333 
334   resf = __builtin_nearbyintf(F);
335   // CHECK: call float @llvm.nearbyint.f32
336 
337   resd = __builtin_nearbyint(D);
338   // CHECK: call double @llvm.nearbyint.f64
339 
340   resld = __builtin_nearbyintl(LD);
341   // CHECK: call x86_fp80 @llvm.nearbyint.f80
342 
343   resf = __builtin_roundf(F);
344   // CHECK: call float @llvm.round.f32
345 
346   resd = __builtin_round(D);
347   // CHECK: call double @llvm.round.f64
348 
349   resld = __builtin_roundl(LD);
350   // CHECK: call x86_fp80 @llvm.round.f80
351 
352 }
353 
354 // __builtin_longjmp isn't supported on all platforms, so only test it on X86.
355 #ifdef __x86_64__
356 // CHECK-LABEL: define void @test_builtin_longjmp
test_builtin_longjmp(void ** buffer)357 void test_builtin_longjmp(void **buffer) {
358   // CHECK: [[BITCAST:%.*]] = bitcast
359   // CHECK-NEXT: call void @llvm.eh.sjlj.longjmp(i8* [[BITCAST]])
360   __builtin_longjmp(buffer, 1);
361   // CHECK-NEXT: unreachable
362 }
363 #endif
364 
365 // CHECK-LABEL: define i64 @test_builtin_readcyclecounter
test_builtin_readcyclecounter()366 long long test_builtin_readcyclecounter() {
367   // CHECK: call i64 @llvm.readcyclecounter()
368   return __builtin_readcyclecounter();
369 }
370