• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // RUN: %clang_cc1 -triple x86_64-linux -ffp-exception-behavior=maytrap -w -S -o - -emit-llvm %s | FileCheck %s
2 
3 // Test codegen of constrained math builtins.
4 //
5 // Test that the constrained intrinsics are picking up the exception
6 // metadata from the AST instead of the global default from the command line.
7 
8 #pragma float_control(except, on)
9 
foo(double * d,float f,float * fp,long double * l,int * i,const char * c)10 void foo(double *d, float f, float *fp, long double *l, int *i, const char *c) {
11   f = __builtin_fmod(f,f);    f = __builtin_fmodf(f,f);   f =  __builtin_fmodl(f,f); f = __builtin_fmodf128(f,f);
12 
13 // CHECK: call double @llvm.experimental.constrained.frem.f64(double %{{.*}}, double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
14 // CHECK: call float @llvm.experimental.constrained.frem.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
15 // CHECK: call x86_fp80 @llvm.experimental.constrained.frem.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
16 // CHECK: call fp128 @llvm.experimental.constrained.frem.f128(fp128 %{{.*}}, fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
17 
18   __builtin_pow(f,f);        __builtin_powf(f,f);       __builtin_powl(f,f); __builtin_powf128(f,f);
19 
20 // CHECK: call double @llvm.experimental.constrained.pow.f64(double %{{.*}}, double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
21 // CHECK: call float @llvm.experimental.constrained.pow.f32(float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
22 // CHECK: call x86_fp80 @llvm.experimental.constrained.pow.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
23 // CHECK: call fp128 @llvm.experimental.constrained.pow.f128(fp128 %{{.*}}, fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
24 
25   __builtin_powi(f,f);        __builtin_powif(f,f);       __builtin_powil(f,f);
26 
27 // CHECK: call double @llvm.experimental.constrained.powi.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
28 // CHECK: call float @llvm.experimental.constrained.powi.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
29 // CHECK: call x86_fp80 @llvm.experimental.constrained.powi.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
30 
31   __builtin_ceil(f);       __builtin_ceilf(f);      __builtin_ceill(f); __builtin_ceilf128(f);
32 
33 // CHECK: call double @llvm.experimental.constrained.ceil.f64(double %{{.*}}, metadata !"fpexcept.strict")
34 // CHECK: call float @llvm.experimental.constrained.ceil.f32(float %{{.*}}, metadata !"fpexcept.strict")
35 // CHECK: call x86_fp80 @llvm.experimental.constrained.ceil.f80(x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
36 // CHECK: call fp128 @llvm.experimental.constrained.ceil.f128(fp128 %{{.*}}, metadata !"fpexcept.strict")
37 
38   __builtin_cos(f);        __builtin_cosf(f);       __builtin_cosl(f); __builtin_cosf128(f);
39 
40 // CHECK: call double @llvm.experimental.constrained.cos.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
41 // CHECK: call float @llvm.experimental.constrained.cos.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
42 // CHECK: call x86_fp80 @llvm.experimental.constrained.cos.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
43 // CHECK: call fp128 @llvm.experimental.constrained.cos.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
44 
45   __builtin_exp(f);        __builtin_expf(f);       __builtin_expl(f); __builtin_expf128(f);
46 
47 // CHECK: call double @llvm.experimental.constrained.exp.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
48 // CHECK: call float @llvm.experimental.constrained.exp.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
49 // CHECK: call x86_fp80 @llvm.experimental.constrained.exp.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
50 // CHECK: call fp128 @llvm.experimental.constrained.exp.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
51 
52   __builtin_exp2(f);       __builtin_exp2f(f);      __builtin_exp2l(f); __builtin_exp2f128(f);
53 
54 // CHECK: call double @llvm.experimental.constrained.exp2.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
55 // CHECK: call float @llvm.experimental.constrained.exp2.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
56 // CHECK: call x86_fp80 @llvm.experimental.constrained.exp2.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
57 // CHECK: call fp128 @llvm.experimental.constrained.exp2.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
58 
59   __builtin_floor(f);      __builtin_floorf(f);     __builtin_floorl(f); __builtin_floorf128(f);
60 
61 // CHECK: call double @llvm.experimental.constrained.floor.f64(double %{{.*}}, metadata !"fpexcept.strict")
62 // CHECK: call float @llvm.experimental.constrained.floor.f32(float %{{.*}}, metadata !"fpexcept.strict")
63 // CHECK: call x86_fp80 @llvm.experimental.constrained.floor.f80(x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
64 // CHECK: call fp128 @llvm.experimental.constrained.floor.f128(fp128 %{{.*}}, metadata !"fpexcept.strict")
65 
66   __builtin_fma(f,f,f);        __builtin_fmaf(f,f,f);       __builtin_fmal(f,f,f); __builtin_fmaf128(f,f,f);
67 
68 // CHECK: call double @llvm.experimental.constrained.fma.f64(double %{{.*}}, double %{{.*}}, double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
69 // CHECK: call float @llvm.experimental.constrained.fma.f32(float %{{.*}}, float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
70 // CHECK: call x86_fp80 @llvm.experimental.constrained.fma.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
71 // CHECK: call fp128 @llvm.experimental.constrained.fma.f128(fp128 %{{.*}}, fp128 %{{.*}}, fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
72 
73   __builtin_fmax(f,f);       __builtin_fmaxf(f,f);      __builtin_fmaxl(f,f); __builtin_fmaxf128(f,f);
74 
75 // CHECK: call double @llvm.experimental.constrained.maxnum.f64(double %{{.*}}, double %{{.*}}, metadata !"fpexcept.strict")
76 // CHECK: call float @llvm.experimental.constrained.maxnum.f32(float %{{.*}}, float %{{.*}}, metadata !"fpexcept.strict")
77 // CHECK: call x86_fp80 @llvm.experimental.constrained.maxnum.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
78 // CHECK: call fp128 @llvm.experimental.constrained.maxnum.f128(fp128 %{{.*}}, fp128 %{{.*}}, metadata !"fpexcept.strict")
79 
80   __builtin_fmin(f,f);       __builtin_fminf(f,f);      __builtin_fminl(f,f); __builtin_fminf128(f,f);
81 
82 // CHECK: call double @llvm.experimental.constrained.minnum.f64(double %{{.*}}, double %{{.*}}, metadata !"fpexcept.strict")
83 // CHECK: call float @llvm.experimental.constrained.minnum.f32(float %{{.*}}, float %{{.*}}, metadata !"fpexcept.strict")
84 // CHECK: call x86_fp80 @llvm.experimental.constrained.minnum.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
85 // CHECK: call fp128 @llvm.experimental.constrained.minnum.f128(fp128 %{{.*}}, fp128 %{{.*}}, metadata !"fpexcept.strict")
86 
87   __builtin_llrint(f);     __builtin_llrintf(f);    __builtin_llrintl(f); __builtin_llrintf128(f);
88 
89 // CHECK: call i64 @llvm.experimental.constrained.llrint.i64.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
90 // CHECK: call i64 @llvm.experimental.constrained.llrint.i64.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
91 // CHECK: call i64 @llvm.experimental.constrained.llrint.i64.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
92 // CHECK: call i64 @llvm.experimental.constrained.llrint.i64.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
93 
94   __builtin_llround(f);    __builtin_llroundf(f);   __builtin_llroundl(f); __builtin_llroundf128(f);
95 
96 // CHECK: call i64 @llvm.experimental.constrained.llround.i64.f64(double %{{.*}}, metadata !"fpexcept.strict")
97 // CHECK: call i64 @llvm.experimental.constrained.llround.i64.f32(float %{{.*}}, metadata !"fpexcept.strict")
98 // CHECK: call i64 @llvm.experimental.constrained.llround.i64.f80(x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
99 // CHECK: call i64 @llvm.experimental.constrained.llround.i64.f128(fp128 %{{.*}}, metadata !"fpexcept.strict")
100 
101   __builtin_log(f);        __builtin_logf(f);       __builtin_logl(f); __builtin_logf128(f);
102 
103 // CHECK: call double @llvm.experimental.constrained.log.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
104 // CHECK: call float @llvm.experimental.constrained.log.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
105 // CHECK: call x86_fp80 @llvm.experimental.constrained.log.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
106 // CHECK: call fp128 @llvm.experimental.constrained.log.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
107 
108   __builtin_log10(f);      __builtin_log10f(f);     __builtin_log10l(f); __builtin_log10f128(f);
109 
110 // CHECK: call double @llvm.experimental.constrained.log10.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
111 // CHECK: call float @llvm.experimental.constrained.log10.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
112 // CHECK: call x86_fp80 @llvm.experimental.constrained.log10.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
113 // CHECK: call fp128 @llvm.experimental.constrained.log10.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
114 
115   __builtin_log2(f);       __builtin_log2f(f);      __builtin_log2l(f); __builtin_log2f128(f);
116 
117 // CHECK: call double @llvm.experimental.constrained.log2.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
118 // CHECK: call float @llvm.experimental.constrained.log2.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
119 // CHECK: call x86_fp80 @llvm.experimental.constrained.log2.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
120 // CHECK: call fp128 @llvm.experimental.constrained.log2.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
121 
122   __builtin_lrint(f);      __builtin_lrintf(f);     __builtin_lrintl(f); __builtin_lrintf128(f);
123 
124 // CHECK: call i64 @llvm.experimental.constrained.lrint.i64.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
125 // CHECK: call i64 @llvm.experimental.constrained.lrint.i64.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
126 // CHECK: call i64 @llvm.experimental.constrained.lrint.i64.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
127 // CHECK: call i64 @llvm.experimental.constrained.lrint.i64.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
128 
129   __builtin_lround(f);     __builtin_lroundf(f);    __builtin_lroundl(f); __builtin_lroundf128(f);
130 
131 // CHECK: call i64 @llvm.experimental.constrained.lround.i64.f64(double %{{.*}}, metadata !"fpexcept.strict")
132 // CHECK: call i64 @llvm.experimental.constrained.lround.i64.f32(float %{{.*}}, metadata !"fpexcept.strict")
133 // CHECK: call i64 @llvm.experimental.constrained.lround.i64.f80(x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
134 // CHECK: call i64 @llvm.experimental.constrained.lround.i64.f128(fp128 %{{.*}}, metadata !"fpexcept.strict")
135 
136   __builtin_nearbyint(f);  __builtin_nearbyintf(f); __builtin_nearbyintl(f); __builtin_nearbyintf128(f);
137 
138 // CHECK: call double @llvm.experimental.constrained.nearbyint.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
139 // CHECK: call float @llvm.experimental.constrained.nearbyint.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
140 // CHECK: call x86_fp80 @llvm.experimental.constrained.nearbyint.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
141 // CHECK: call fp128 @llvm.experimental.constrained.nearbyint.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
142 
143   __builtin_rint(f);       __builtin_rintf(f);      __builtin_rintl(f); __builtin_rintf128(f);
144 
145 // CHECK: call double @llvm.experimental.constrained.rint.f64(double %{{.*}}, metadata !"fpexcept.strict")
146 // CHECK: call float @llvm.experimental.constrained.rint.f32(float %{{.*}}, metadata !"fpexcept.strict")
147 // CHECK: call x86_fp80 @llvm.experimental.constrained.rint.f80(x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
148 // CHECK: call fp128 @llvm.experimental.constrained.rint.f128(fp128 %{{.*}}, metadata !"fpexcept.strict")
149 
150   __builtin_round(f);      __builtin_roundf(f);     __builtin_roundl(f); __builtin_roundf128(f);
151 
152 // CHECK: call double @llvm.experimental.constrained.round.f64(double %{{.*}}, metadata !"fpexcept.strict")
153 // CHECK: call float @llvm.experimental.constrained.round.f32(float %{{.*}}, metadata !"fpexcept.strict")
154 // CHECK: call x86_fp80 @llvm.experimental.constrained.round.f80(x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
155 // CHECK: call fp128 @llvm.experimental.constrained.round.f128(fp128 %{{.*}}, metadata !"fpexcept.strict")
156 
157   __builtin_sin(f);        __builtin_sinf(f);       __builtin_sinl(f); __builtin_sinf128(f);
158 
159 // CHECK: call double @llvm.experimental.constrained.sin.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
160 // CHECK: call float @llvm.experimental.constrained.sin.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
161 // CHECK: call x86_fp80 @llvm.experimental.constrained.sin.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
162 // CHECK: call fp128 @llvm.experimental.constrained.sin.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
163 
164   __builtin_sqrt(f);       __builtin_sqrtf(f);      __builtin_sqrtl(f); __builtin_sqrtf128(f);
165 
166 // CHECK: call double @llvm.experimental.constrained.sqrt.f64(double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
167 // CHECK: call float @llvm.experimental.constrained.sqrt.f32(float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
168 // CHECK: call x86_fp80 @llvm.experimental.constrained.sqrt.f80(x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
169 // CHECK: call fp128 @llvm.experimental.constrained.sqrt.f128(fp128 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
170 
171   __builtin_trunc(f);      __builtin_truncf(f);     __builtin_truncl(f); __builtin_truncf128(f);
172 
173 // CHECK: call double @llvm.experimental.constrained.trunc.f64(double %{{.*}}, metadata !"fpexcept.strict")
174 // CHECK: call float @llvm.experimental.constrained.trunc.f32(float %{{.*}}, metadata !"fpexcept.strict")
175 // CHECK: call x86_fp80 @llvm.experimental.constrained.trunc.f80(x86_fp80 %{{.*}}, metadata !"fpexcept.strict")
176 // CHECK: call fp128 @llvm.experimental.constrained.trunc.f128(fp128 %{{.*}}, metadata !"fpexcept.strict")
177 };
178 
179 // CHECK: declare double @llvm.experimental.constrained.frem.f64(double, double, metadata, metadata)
180 // CHECK: declare float @llvm.experimental.constrained.frem.f32(float, float, metadata, metadata)
181 // CHECK: declare x86_fp80 @llvm.experimental.constrained.frem.f80(x86_fp80, x86_fp80, metadata, metadata)
182 // CHECK: declare fp128 @llvm.experimental.constrained.frem.f128(fp128, fp128, metadata, metadata)
183 
184 // CHECK: declare double @llvm.experimental.constrained.pow.f64(double, double, metadata, metadata)
185 // CHECK: declare float @llvm.experimental.constrained.pow.f32(float, float, metadata, metadata)
186 // CHECK: declare x86_fp80 @llvm.experimental.constrained.pow.f80(x86_fp80, x86_fp80, metadata, metadata)
187 // CHECK: declare fp128 @llvm.experimental.constrained.pow.f128(fp128, fp128, metadata, metadata)
188 
189 // CHECK: declare double @llvm.experimental.constrained.powi.f64(double, i32, metadata, metadata)
190 // CHECK: declare float @llvm.experimental.constrained.powi.f32(float, i32, metadata, metadata)
191 // CHECK: declare x86_fp80 @llvm.experimental.constrained.powi.f80(x86_fp80, i32, metadata, metadata)
192 
193 // CHECK: declare double @llvm.experimental.constrained.ceil.f64(double, metadata)
194 // CHECK: declare float @llvm.experimental.constrained.ceil.f32(float, metadata)
195 // CHECK: declare x86_fp80 @llvm.experimental.constrained.ceil.f80(x86_fp80, metadata)
196 // CHECK: declare fp128 @llvm.experimental.constrained.ceil.f128(fp128, metadata)
197 
198 // CHECK: declare double @llvm.experimental.constrained.cos.f64(double, metadata, metadata)
199 // CHECK: declare float @llvm.experimental.constrained.cos.f32(float, metadata, metadata)
200 // CHECK: declare x86_fp80 @llvm.experimental.constrained.cos.f80(x86_fp80, metadata, metadata)
201 // CHECK: declare fp128 @llvm.experimental.constrained.cos.f128(fp128, metadata, metadata)
202 
203 // CHECK: declare double @llvm.experimental.constrained.exp.f64(double, metadata, metadata)
204 // CHECK: declare float @llvm.experimental.constrained.exp.f32(float, metadata, metadata)
205 // CHECK: declare x86_fp80 @llvm.experimental.constrained.exp.f80(x86_fp80, metadata, metadata)
206 // CHECK: declare fp128 @llvm.experimental.constrained.exp.f128(fp128, metadata, metadata)
207 
208 // CHECK: declare double @llvm.experimental.constrained.exp2.f64(double, metadata, metadata)
209 // CHECK: declare float @llvm.experimental.constrained.exp2.f32(float, metadata, metadata)
210 // CHECK: declare x86_fp80 @llvm.experimental.constrained.exp2.f80(x86_fp80, metadata, metadata)
211 // CHECK: declare fp128 @llvm.experimental.constrained.exp2.f128(fp128, metadata, metadata)
212 
213 // CHECK: declare double @llvm.experimental.constrained.floor.f64(double, metadata)
214 // CHECK: declare float @llvm.experimental.constrained.floor.f32(float, metadata)
215 // CHECK: declare x86_fp80 @llvm.experimental.constrained.floor.f80(x86_fp80, metadata)
216 // CHECK: declare fp128 @llvm.experimental.constrained.floor.f128(fp128, metadata)
217 
218 // CHECK: declare double @llvm.experimental.constrained.fma.f64(double, double, double, metadata, metadata)
219 // CHECK: declare float @llvm.experimental.constrained.fma.f32(float, float, float, metadata, metadata)
220 // CHECK: declare x86_fp80 @llvm.experimental.constrained.fma.f80(x86_fp80, x86_fp80, x86_fp80, metadata, metadata)
221 // CHECK: declare fp128 @llvm.experimental.constrained.fma.f128(fp128, fp128, fp128, metadata, metadata)
222 
223 // CHECK: declare double @llvm.experimental.constrained.maxnum.f64(double, double, metadata)
224 // CHECK: declare float @llvm.experimental.constrained.maxnum.f32(float, float, metadata)
225 // CHECK: declare x86_fp80 @llvm.experimental.constrained.maxnum.f80(x86_fp80, x86_fp80, metadata)
226 // CHECK: declare fp128 @llvm.experimental.constrained.maxnum.f128(fp128, fp128, metadata)
227 
228 // CHECK: declare double @llvm.experimental.constrained.minnum.f64(double, double, metadata)
229 // CHECK: declare float @llvm.experimental.constrained.minnum.f32(float, float, metadata)
230 // CHECK: declare x86_fp80 @llvm.experimental.constrained.minnum.f80(x86_fp80, x86_fp80, metadata)
231 // CHECK: declare fp128 @llvm.experimental.constrained.minnum.f128(fp128, fp128, metadata)
232 
233 // CHECK: declare i64 @llvm.experimental.constrained.llrint.i64.f64(double, metadata, metadata)
234 // CHECK: declare i64 @llvm.experimental.constrained.llrint.i64.f32(float, metadata, metadata)
235 // CHECK: declare i64 @llvm.experimental.constrained.llrint.i64.f80(x86_fp80, metadata, metadata)
236 // CHECK: declare i64 @llvm.experimental.constrained.llrint.i64.f128(fp128, metadata, metadata)
237 
238 // CHECK: declare i64 @llvm.experimental.constrained.llround.i64.f64(double, metadata)
239 // CHECK: declare i64 @llvm.experimental.constrained.llround.i64.f32(float, metadata)
240 // CHECK: declare i64 @llvm.experimental.constrained.llround.i64.f80(x86_fp80, metadata)
241 // CHECK: declare i64 @llvm.experimental.constrained.llround.i64.f128(fp128, metadata)
242 
243 // CHECK: declare double @llvm.experimental.constrained.log.f64(double, metadata, metadata)
244 // CHECK: declare float @llvm.experimental.constrained.log.f32(float, metadata, metadata)
245 // CHECK: declare x86_fp80 @llvm.experimental.constrained.log.f80(x86_fp80, metadata, metadata)
246 // CHECK: declare fp128 @llvm.experimental.constrained.log.f128(fp128, metadata, metadata)
247 
248 // CHECK: declare double @llvm.experimental.constrained.log10.f64(double, metadata, metadata)
249 // CHECK: declare float @llvm.experimental.constrained.log10.f32(float, metadata, metadata)
250 // CHECK: declare x86_fp80 @llvm.experimental.constrained.log10.f80(x86_fp80, metadata, metadata)
251 // CHECK: declare fp128 @llvm.experimental.constrained.log10.f128(fp128, metadata, metadata)
252 
253 // CHECK: declare double @llvm.experimental.constrained.log2.f64(double, metadata, metadata)
254 // CHECK: declare float @llvm.experimental.constrained.log2.f32(float, metadata, metadata)
255 // CHECK: declare x86_fp80 @llvm.experimental.constrained.log2.f80(x86_fp80, metadata, metadata)
256 // CHECK: declare fp128 @llvm.experimental.constrained.log2.f128(fp128, metadata, metadata)
257 
258 // CHECK: declare i64 @llvm.experimental.constrained.lrint.i64.f64(double, metadata, metadata)
259 // CHECK: declare i64 @llvm.experimental.constrained.lrint.i64.f32(float, metadata, metadata)
260 // CHECK: declare i64 @llvm.experimental.constrained.lrint.i64.f80(x86_fp80, metadata, metadata)
261 // CHECK: declare i64 @llvm.experimental.constrained.lrint.i64.f128(fp128, metadata, metadata)
262 
263 // CHECK: declare i64 @llvm.experimental.constrained.lround.i64.f64(double, metadata)
264 // CHECK: declare i64 @llvm.experimental.constrained.lround.i64.f32(float, metadata)
265 // CHECK: declare i64 @llvm.experimental.constrained.lround.i64.f80(x86_fp80, metadata)
266 // CHECK: declare i64 @llvm.experimental.constrained.lround.i64.f128(fp128, metadata)
267 
268 // CHECK: declare double @llvm.experimental.constrained.nearbyint.f64(double, metadata, metadata)
269 // CHECK: declare float @llvm.experimental.constrained.nearbyint.f32(float, metadata, metadata)
270 // CHECK: declare x86_fp80 @llvm.experimental.constrained.nearbyint.f80(x86_fp80, metadata, metadata)
271 // CHECK: declare fp128 @llvm.experimental.constrained.nearbyint.f128(fp128, metadata, metadata)
272 
273 // CHECK: declare double @llvm.experimental.constrained.rint.f64(double, metadata, metadata)
274 // CHECK: declare float @llvm.experimental.constrained.rint.f32(float, metadata, metadata)
275 // CHECK: declare x86_fp80 @llvm.experimental.constrained.rint.f80(x86_fp80, metadata, metadata)
276 // CHECK: declare fp128 @llvm.experimental.constrained.rint.f128(fp128, metadata, metadata)
277 
278 // CHECK: declare double @llvm.experimental.constrained.round.f64(double, metadata)
279 // CHECK: declare float @llvm.experimental.constrained.round.f32(float, metadata)
280 // CHECK: declare x86_fp80 @llvm.experimental.constrained.round.f80(x86_fp80, metadata)
281 // CHECK: declare fp128 @llvm.experimental.constrained.round.f128(fp128, metadata)
282 
283 // CHECK: declare double @llvm.experimental.constrained.sin.f64(double, metadata, metadata)
284 // CHECK: declare float @llvm.experimental.constrained.sin.f32(float, metadata, metadata)
285 // CHECK: declare x86_fp80 @llvm.experimental.constrained.sin.f80(x86_fp80, metadata, metadata)
286 // CHECK: declare fp128 @llvm.experimental.constrained.sin.f128(fp128, metadata, metadata)
287 
288 // CHECK: declare double @llvm.experimental.constrained.sqrt.f64(double, metadata, metadata)
289 // CHECK: declare float @llvm.experimental.constrained.sqrt.f32(float, metadata, metadata)
290 // CHECK: declare x86_fp80 @llvm.experimental.constrained.sqrt.f80(x86_fp80, metadata, metadata)
291 // CHECK: declare fp128 @llvm.experimental.constrained.sqrt.f128(fp128, metadata, metadata)
292 
293 // CHECK: declare double @llvm.experimental.constrained.trunc.f64(double, metadata)
294 // CHECK: declare float @llvm.experimental.constrained.trunc.f32(float, metadata)
295 // CHECK: declare x86_fp80 @llvm.experimental.constrained.trunc.f80(x86_fp80, metadata)
296 // CHECK: declare fp128 @llvm.experimental.constrained.trunc.f128(fp128, metadata)
297 
298 #pragma STDC FP_CONTRACT ON
bar(float f)299 void bar(float f) {
300   f * f + f;
301   (double)f * f - f;
302   (long double)-f * f + f;
303 
304   // CHECK: call float @llvm.experimental.constrained.fmuladd.f32(float %{{.*}}, float %{{.*}}, float %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
305   // CHECK: fneg
306   // CHECK: call double @llvm.experimental.constrained.fmuladd.f64(double %{{.*}}, double %{{.*}}, double %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
307   // CHECK: fneg
308   // CHECK: call x86_fp80 @llvm.experimental.constrained.fmuladd.f80(x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, x86_fp80 %{{.*}}, metadata !"round.tonearest", metadata !"fpexcept.strict")
309 };
310