• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: opt -instcombine -S < %s | FileCheck %s
2
3%overflow.result = type {i8, i1}
4%ov.result.32 = type { i32, i1 }
5
6
7declare %overflow.result @llvm.uadd.with.overflow.i8(i8, i8) nounwind readnone
8declare %overflow.result @llvm.umul.with.overflow.i8(i8, i8) nounwind readnone
9declare %ov.result.32 @llvm.sadd.with.overflow.i32(i32, i32) nounwind readnone
10declare %ov.result.32 @llvm.uadd.with.overflow.i32(i32, i32) nounwind readnone
11declare %ov.result.32 @llvm.ssub.with.overflow.i32(i32, i32) nounwind readnone
12declare %ov.result.32 @llvm.usub.with.overflow.i32(i32, i32) nounwind readnone
13declare %ov.result.32 @llvm.smul.with.overflow.i32(i32, i32) nounwind readnone
14declare %ov.result.32 @llvm.umul.with.overflow.i32(i32, i32) nounwind readnone
15declare double @llvm.powi.f64(double, i32) nounwind readonly
16declare i32 @llvm.cttz.i32(i32, i1) nounwind readnone
17declare i32 @llvm.ctlz.i32(i32, i1) nounwind readnone
18declare i32 @llvm.ctpop.i32(i32) nounwind readnone
19declare i8 @llvm.ctlz.i8(i8, i1) nounwind readnone
20declare double @llvm.cos.f64(double %Val) nounwind readonly
21declare double @llvm.sin.f64(double %Val) nounwind readonly
22declare double @llvm.floor.f64(double %Val) nounwind readonly
23declare double @llvm.ceil.f64(double %Val) nounwind readonly
24declare double @llvm.trunc.f64(double %Val) nounwind readonly
25declare double @llvm.rint.f64(double %Val) nounwind readonly
26declare double @llvm.nearbyint.f64(double %Val) nounwind readonly
27
28define i8 @uaddtest1(i8 %A, i8 %B) {
29  %x = call %overflow.result @llvm.uadd.with.overflow.i8(i8 %A, i8 %B)
30  %y = extractvalue %overflow.result %x, 0
31  ret i8 %y
32; CHECK-LABEL: @uaddtest1(
33; CHECK-NEXT: %y = add i8 %A, %B
34; CHECK-NEXT: ret i8 %y
35}
36
37define i8 @uaddtest2(i8 %A, i8 %B, i1* %overflowPtr) {
38  %and.A = and i8 %A, 127
39  %and.B = and i8 %B, 127
40  %x = call %overflow.result @llvm.uadd.with.overflow.i8(i8 %and.A, i8 %and.B)
41  %y = extractvalue %overflow.result %x, 0
42  %z = extractvalue %overflow.result %x, 1
43  store i1 %z, i1* %overflowPtr
44  ret i8 %y
45; CHECK-LABEL: @uaddtest2(
46; CHECK-NEXT: %and.A = and i8 %A, 127
47; CHECK-NEXT: %and.B = and i8 %B, 127
48; CHECK-NEXT: %x = add nuw i8 %and.A, %and.B
49; CHECK-NEXT: store i1 false, i1* %overflowPtr
50; CHECK-NEXT: ret i8 %x
51}
52
53define i8 @uaddtest3(i8 %A, i8 %B, i1* %overflowPtr) {
54  %or.A = or i8 %A, -128
55  %or.B = or i8 %B, -128
56  %x = call %overflow.result @llvm.uadd.with.overflow.i8(i8 %or.A, i8 %or.B)
57  %y = extractvalue %overflow.result %x, 0
58  %z = extractvalue %overflow.result %x, 1
59  store i1 %z, i1* %overflowPtr
60  ret i8 %y
61; CHECK-LABEL: @uaddtest3(
62; CHECK-NEXT: %or.A = or i8 %A, -128
63; CHECK-NEXT: %or.B = or i8 %B, -128
64; CHECK-NEXT: %x = add i8 %or.A, %or.B
65; CHECK-NEXT: store i1 true, i1* %overflowPtr
66; CHECK-NEXT: ret i8 %x
67}
68
69define i8 @uaddtest4(i8 %A, i1* %overflowPtr) {
70  %x = call %overflow.result @llvm.uadd.with.overflow.i8(i8 undef, i8 %A)
71  %y = extractvalue %overflow.result %x, 0
72  %z = extractvalue %overflow.result %x, 1
73  store i1 %z, i1* %overflowPtr
74  ret i8 %y
75; CHECK-LABEL: @uaddtest4(
76; CHECK-NEXT: ret i8 undef
77}
78
79define i8 @uaddtest5(i8 %A, i1* %overflowPtr) {
80  %x = call %overflow.result @llvm.uadd.with.overflow.i8(i8 0, i8 %A)
81  %y = extractvalue %overflow.result %x, 0
82  %z = extractvalue %overflow.result %x, 1
83  store i1 %z, i1* %overflowPtr
84  ret i8 %y
85; CHECK-LABEL: @uaddtest5(
86; CHECK: ret i8 %A
87}
88
89define i1 @uaddtest6(i8 %A, i8 %B) {
90  %x = call %overflow.result @llvm.uadd.with.overflow.i8(i8 %A, i8 -4)
91  %z = extractvalue %overflow.result %x, 1
92  ret i1 %z
93; CHECK-LABEL: @uaddtest6(
94; CHECK-NEXT: %z = icmp ugt i8 %A, 3
95; CHECK-NEXT: ret i1 %z
96}
97
98define i8 @uaddtest7(i8 %A, i8 %B) {
99  %x = call %overflow.result @llvm.uadd.with.overflow.i8(i8 %A, i8 %B)
100  %z = extractvalue %overflow.result %x, 0
101  ret i8 %z
102; CHECK-LABEL: @uaddtest7(
103; CHECK-NEXT: %z = add i8 %A, %B
104; CHECK-NEXT: ret i8 %z
105}
106
107; PR20194
108define %ov.result.32 @saddtest_nsw(i8 %a, i8 %b) {
109  %A = sext i8 %a to i32
110  %B = sext i8 %b to i32
111  %x = call %ov.result.32 @llvm.sadd.with.overflow.i32(i32 %A, i32 %B)
112  ret %ov.result.32 %x
113; CHECK-LABEL: @saddtest_nsw
114; CHECK: %x = add nsw i32 %A, %B
115; CHECK-NEXT: %1 = insertvalue %ov.result.32 { i32 undef, i1 false }, i32 %x, 0
116; CHECK-NEXT:  ret %ov.result.32 %1
117}
118
119define %ov.result.32 @uaddtest_nuw(i32 %a, i32 %b) {
120  %A = and i32 %a, 2147483647
121  %B = and i32 %b, 2147483647
122  %x = call %ov.result.32 @llvm.uadd.with.overflow.i32(i32 %A, i32 %B)
123  ret %ov.result.32 %x
124; CHECK-LABEL: @uaddtest_nuw
125; CHECK: %x = add nuw i32 %A, %B
126; CHECK-NEXT: %1 = insertvalue %ov.result.32 { i32 undef, i1 false }, i32 %x, 0
127; CHECK-NEXT:  ret %ov.result.32 %1
128}
129
130define %ov.result.32 @ssubtest_nsw(i8 %a, i8 %b) {
131  %A = sext i8 %a to i32
132  %B = sext i8 %b to i32
133  %x = call %ov.result.32 @llvm.ssub.with.overflow.i32(i32 %A, i32 %B)
134  ret %ov.result.32 %x
135; CHECK-LABEL: @ssubtest_nsw
136; CHECK: %x = sub nsw i32 %A, %B
137; CHECK-NEXT: %1 = insertvalue %ov.result.32 { i32 undef, i1 false }, i32 %x, 0
138; CHECK-NEXT:  ret %ov.result.32 %1
139}
140
141define %ov.result.32 @usubtest_nuw(i32 %a, i32 %b) {
142  %A = or i32 %a, 2147483648
143  %B = and i32 %b, 2147483647
144  %x = call %ov.result.32 @llvm.usub.with.overflow.i32(i32 %A, i32 %B)
145  ret %ov.result.32 %x
146; CHECK-LABEL: @usubtest_nuw
147; CHECK: %x = sub nuw i32 %A, %B
148; CHECK-NEXT: %1 = insertvalue %ov.result.32 { i32 undef, i1 false }, i32 %x, 0
149; CHECK-NEXT:  ret %ov.result.32 %1
150}
151
152define %ov.result.32 @smultest1_nsw(i32 %a, i32 %b) {
153  %A = and i32 %a, 4095 ; 0xfff
154  %B = and i32 %b, 524287; 0x7ffff
155  %x = call %ov.result.32 @llvm.smul.with.overflow.i32(i32 %A, i32 %B)
156  ret %ov.result.32 %x
157; CHECK-LABEL: @smultest1_nsw
158; CHECK: %x = mul nuw nsw i32 %A, %B
159; CHECK-NEXT: %1 = insertvalue %ov.result.32 { i32 undef, i1 false }, i32 %x, 0
160; CHECK-NEXT:  ret %ov.result.32 %1
161}
162
163define %ov.result.32 @smultest2_nsw(i32 %a, i32 %b) {
164  %A = ashr i32 %a, 16
165  %B = ashr i32 %b, 16
166  %x = call %ov.result.32 @llvm.smul.with.overflow.i32(i32 %A, i32 %B)
167  ret %ov.result.32 %x
168; CHECK-LABEL: @smultest2_nsw
169; CHECK: %x = mul nsw i32 %A, %B
170; CHECK-NEXT: %1 = insertvalue %ov.result.32 { i32 undef, i1 false }, i32 %x, 0
171; CHECK-NEXT:  ret %ov.result.32 %1
172}
173
174define %ov.result.32 @smultest3_sw(i32 %a, i32 %b) {
175  %A = ashr i32 %a, 16
176  %B = ashr i32 %b, 15
177  %x = call %ov.result.32 @llvm.smul.with.overflow.i32(i32 %A, i32 %B)
178  ret %ov.result.32 %x
179; CHECK-LABEL: @smultest3_sw
180; CHECK: %x = call %ov.result.32 @llvm.smul.with.overflow.i32(i32 %A, i32 %B)
181; CHECK-NEXT:  ret %ov.result.32 %x
182}
183
184define %ov.result.32 @umultest_nuw(i32 %a, i32 %b) {
185  %A = and i32 %a, 65535 ; 0xffff
186  %B = and i32 %b, 65535 ; 0xffff
187  %x = call %ov.result.32 @llvm.umul.with.overflow.i32(i32 %A, i32 %B)
188  ret %ov.result.32 %x
189; CHECK-LABEL: @umultest_nuw
190; CHECK: %x = mul nuw i32 %A, %B
191; CHECK-NEXT: %1 = insertvalue %ov.result.32 { i32 undef, i1 false }, i32 %x, 0
192; CHECK-NEXT:  ret %ov.result.32 %1
193}
194
195define i8 @umultest1(i8 %A, i1* %overflowPtr) {
196  %x = call %overflow.result @llvm.umul.with.overflow.i8(i8 0, i8 %A)
197  %y = extractvalue %overflow.result %x, 0
198  %z = extractvalue %overflow.result %x, 1
199  store i1 %z, i1* %overflowPtr
200  ret i8 %y
201; CHECK-LABEL: @umultest1(
202; CHECK-NEXT: store i1 false, i1* %overflowPtr
203; CHECK-NEXT: ret i8 0
204}
205
206define i8 @umultest2(i8 %A, i1* %overflowPtr) {
207  %x = call %overflow.result @llvm.umul.with.overflow.i8(i8 1, i8 %A)
208  %y = extractvalue %overflow.result %x, 0
209  %z = extractvalue %overflow.result %x, 1
210  store i1 %z, i1* %overflowPtr
211  ret i8 %y
212; CHECK-LABEL: @umultest2(
213; CHECK-NEXT: store i1 false, i1* %overflowPtr
214; CHECK-NEXT: ret i8 %A
215}
216
217define i32 @umultest3(i32 %n) nounwind {
218  %shr = lshr i32 %n, 2
219  %mul = call %ov.result.32 @llvm.umul.with.overflow.i32(i32 %shr, i32 3)
220  %ov = extractvalue %ov.result.32 %mul, 1
221  %res = extractvalue %ov.result.32 %mul, 0
222  %ret = select i1 %ov, i32 -1, i32 %res
223  ret i32 %ret
224; CHECK-LABEL: @umultest3(
225; CHECK-NEXT: shr
226; CHECK-NEXT: mul nuw
227; CHECK-NEXT: ret
228}
229
230define i32 @umultest4(i32 %n) nounwind {
231  %shr = lshr i32 %n, 1
232  %mul = call %ov.result.32 @llvm.umul.with.overflow.i32(i32 %shr, i32 4)
233  %ov = extractvalue %ov.result.32 %mul, 1
234  %res = extractvalue %ov.result.32 %mul, 0
235  %ret = select i1 %ov, i32 -1, i32 %res
236  ret i32 %ret
237; CHECK-LABEL: @umultest4(
238; CHECK: umul.with.overflow
239}
240
241define %ov.result.32 @umultest5(i32 %x, i32 %y) nounwind {
242  %or_x = or i32 %x, 2147483648
243  %or_y = or i32 %y, 2147483648
244  %mul = call %ov.result.32 @llvm.umul.with.overflow.i32(i32 %or_x, i32 %or_y)
245  ret %ov.result.32 %mul
246; CHECK-LABEL: @umultest5(
247; CHECK-NEXT: %[[or_x:.*]] = or i32 %x, -2147483648
248; CHECK-NEXT: %[[or_y:.*]] = or i32 %y, -2147483648
249; CHECK-NEXT: %[[mul:.*]] = mul i32 %[[or_x]], %[[or_y]]
250; CHECK-NEXT: %[[ret:.*]] = insertvalue %ov.result.32 { i32 undef, i1 true }, i32 %[[mul]], 0
251; CHECK-NEXT: ret %ov.result.32 %[[ret]]
252}
253
254define void @powi(double %V, double *%P) {
255entry:
256  %A = tail call double @llvm.powi.f64(double %V, i32 -1) nounwind
257  store volatile double %A, double* %P
258
259  %B = tail call double @llvm.powi.f64(double %V, i32 0) nounwind
260  store volatile double %B, double* %P
261
262  %C = tail call double @llvm.powi.f64(double %V, i32 1) nounwind
263  store volatile double %C, double* %P
264  ret void
265; CHECK-LABEL: @powi(
266; CHECK: %A = fdiv double 1.0{{.*}}, %V
267; CHECK: store volatile double %A,
268; CHECK: store volatile double 1.0
269; CHECK: store volatile double %V
270}
271
272define i32 @cttz(i32 %a) {
273entry:
274  %or = or i32 %a, 8
275  %and = and i32 %or, -8
276  %count = tail call i32 @llvm.cttz.i32(i32 %and, i1 true) nounwind readnone
277  ret i32 %count
278; CHECK-LABEL: @cttz(
279; CHECK-NEXT: entry:
280; CHECK-NEXT: ret i32 3
281}
282
283define i8 @ctlz(i8 %a) {
284entry:
285  %or = or i8 %a, 32
286  %and = and i8 %or, 63
287  %count = tail call i8 @llvm.ctlz.i8(i8 %and, i1 true) nounwind readnone
288  ret i8 %count
289; CHECK-LABEL: @ctlz(
290; CHECK-NEXT: entry:
291; CHECK-NEXT: ret i8 2
292}
293
294define void @cmp.simplify(i32 %a, i32 %b, i1* %c) {
295entry:
296  %lz = tail call i32 @llvm.ctlz.i32(i32 %a, i1 false) nounwind readnone
297  %lz.cmp = icmp eq i32 %lz, 32
298  store volatile i1 %lz.cmp, i1* %c
299  %tz = tail call i32 @llvm.cttz.i32(i32 %a, i1 false) nounwind readnone
300  %tz.cmp = icmp ne i32 %tz, 32
301  store volatile i1 %tz.cmp, i1* %c
302  %pop = tail call i32 @llvm.ctpop.i32(i32 %b) nounwind readnone
303  %pop.cmp = icmp eq i32 %pop, 0
304  store volatile i1 %pop.cmp, i1* %c
305  ret void
306; CHECK: @cmp.simplify
307; CHECK-NEXT: entry:
308; CHECK-NEXT: %lz.cmp = icmp eq i32 %a, 0
309; CHECK-NEXT: store volatile i1 %lz.cmp, i1* %c
310; CHECK-NEXT: %tz.cmp = icmp ne i32 %a, 0
311; CHECK-NEXT: store volatile i1 %tz.cmp, i1* %c
312; CHECK-NEXT: %pop.cmp = icmp eq i32 %b, 0
313; CHECK-NEXT: store volatile i1 %pop.cmp, i1* %c
314}
315
316define i32 @cttz_simplify1a(i32 %x) nounwind readnone ssp {
317  %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %x, i1 false)
318  %shr3 = lshr i32 %tmp1, 5
319  ret i32 %shr3
320
321; CHECK-LABEL: @cttz_simplify1a(
322; CHECK: icmp eq i32 %x, 0
323; CHECK-NEXT: zext i1
324; CHECK-NEXT: ret i32
325}
326
327define i32 @cttz_simplify1b(i32 %x) nounwind readnone ssp {
328  %tmp1 = tail call i32 @llvm.ctlz.i32(i32 %x, i1 true)
329  %shr3 = lshr i32 %tmp1, 5
330  ret i32 %shr3
331
332; CHECK-LABEL: @cttz_simplify1b(
333; CHECK-NEXT: ret i32 0
334}
335
336define i32 @ctlz_undef(i32 %Value) nounwind {
337  %ctlz = call i32 @llvm.ctlz.i32(i32 0, i1 true)
338  ret i32 %ctlz
339
340; CHECK-LABEL: @ctlz_undef(
341; CHECK-NEXT: ret i32 undef
342}
343
344define i32 @cttz_undef(i32 %Value) nounwind {
345  %cttz = call i32 @llvm.cttz.i32(i32 0, i1 true)
346  ret i32 %cttz
347
348; CHECK-LABEL: @cttz_undef(
349; CHECK-NEXT: ret i32 undef
350}
351
352define i32 @ctlz_select(i32 %Value) nounwind {
353  %tobool = icmp ne i32 %Value, 0
354  %ctlz = call i32 @llvm.ctlz.i32(i32 %Value, i1 true)
355  %s = select i1 %tobool, i32 %ctlz, i32 32
356  ret i32 %s
357
358; CHECK-LABEL: @ctlz_select(
359; CHECK-NEXT: call i32 @llvm.ctlz.i32(i32 %Value, i1 false)
360; CHECK-NEXT: ret i32
361}
362
363define i32 @cttz_select(i32 %Value) nounwind {
364  %tobool = icmp ne i32 %Value, 0
365  %cttz = call i32 @llvm.cttz.i32(i32 %Value, i1 true)
366  %s = select i1 %tobool, i32 %cttz, i32 32
367  ret i32 %s
368
369; CHECK-LABEL: @cttz_select(
370; CHECK-NEXT: call i32 @llvm.cttz.i32(i32 %Value, i1 false)
371; CHECK-NEXT: ret i32
372}
373
374; CHECK-LABEL: @overflow_div_add(
375; CHECK: ret i1 false
376define i1 @overflow_div_add(i32 %v1, i32 %v2) nounwind {
377entry:
378  %div = sdiv i32 %v1, 2
379  %t = call %ov.result.32 @llvm.sadd.with.overflow.i32(i32 %div, i32 1)
380  %obit = extractvalue %ov.result.32 %t, 1
381  ret i1 %obit
382}
383
384; CHECK-LABEL: @overflow_div_sub(
385; CHECK: ret i1 false
386define i1 @overflow_div_sub(i32 %v1, i32 %v2) nounwind {
387entry:
388  ; Check cases where the known sign bits are larger than the word size.
389  %a = ashr i32 %v1, 18
390  %div = sdiv i32 %a, 65536
391  %t = call %ov.result.32 @llvm.ssub.with.overflow.i32(i32 %div, i32 1)
392  %obit = extractvalue %ov.result.32 %t, 1
393  ret i1 %obit
394}
395
396; CHECK-LABEL: @overflow_mod_mul(
397; CHECK: ret i1 false
398define i1 @overflow_mod_mul(i32 %v1, i32 %v2) nounwind {
399entry:
400  %rem = srem i32 %v1, 1000
401  %t = call %ov.result.32 @llvm.smul.with.overflow.i32(i32 %rem, i32 %rem)
402  %obit = extractvalue %ov.result.32 %t, 1
403  ret i1 %obit
404}
405
406; CHECK-LABEL: @overflow_mod_overflow_mul(
407; CHECK-NOT: ret i1 false
408define i1 @overflow_mod_overflow_mul(i32 %v1, i32 %v2) nounwind {
409entry:
410  %rem = srem i32 %v1, 65537
411  ; This may overflow because the result of the mul operands may be greater than 16bits
412  ; and the result greater than 32.
413  %t = call %ov.result.32 @llvm.smul.with.overflow.i32(i32 %rem, i32 %rem)
414  %obit = extractvalue %ov.result.32 %t, 1
415  ret i1 %obit
416}
417
418define %ov.result.32 @ssubtest_reorder(i8 %a) {
419  %A = sext i8 %a to i32
420  %x = call %ov.result.32 @llvm.ssub.with.overflow.i32(i32 0, i32 %A)
421  ret %ov.result.32 %x
422; CHECK-LABEL: @ssubtest_reorder
423; CHECK: %x = sub nsw i32 0, %A
424; CHECK-NEXT: %1 = insertvalue %ov.result.32 { i32 undef, i1 false }, i32 %x, 0
425; CHECK-NEXT:  ret %ov.result.32 %1
426}
427
428define %ov.result.32 @never_overflows_ssub_test0(i32 %a) {
429  %x = call %ov.result.32 @llvm.ssub.with.overflow.i32(i32 %a, i32 0)
430  ret %ov.result.32 %x
431; CHECK-LABEL: @never_overflows_ssub_test0
432; CHECK-NEXT: %[[x:.*]] = insertvalue %ov.result.32 { i32 undef, i1 false }, i32 %a, 0
433; CHECK-NEXT:  ret %ov.result.32 %[[x]]
434}
435
436define void @cos(double *%P) {
437entry:
438  %B = tail call double @llvm.cos.f64(double 0.0) nounwind
439  store volatile double %B, double* %P
440
441  ret void
442; CHECK-LABEL: @cos(
443; CHECK: store volatile double 1.000000e+00, double* %P
444}
445
446define void @sin(double *%P) {
447entry:
448  %B = tail call double @llvm.sin.f64(double 0.0) nounwind
449  store volatile double %B, double* %P
450
451  ret void
452; CHECK-LABEL: @sin(
453; CHECK: store volatile double 0.000000e+00, double* %P
454}
455
456define void @floor(double *%P) {
457entry:
458  %B = tail call double @llvm.floor.f64(double 1.5) nounwind
459  store volatile double %B, double* %P
460  %C = tail call double @llvm.floor.f64(double -1.5) nounwind
461  store volatile double %C, double* %P
462  ret void
463; CHECK-LABEL: @floor(
464; CHECK: store volatile double 1.000000e+00, double* %P, align 8
465; CHECK: store volatile double -2.000000e+00, double* %P, align 8
466}
467
468define void @ceil(double *%P) {
469entry:
470  %B = tail call double @llvm.ceil.f64(double 1.5) nounwind
471  store volatile double %B, double* %P
472  %C = tail call double @llvm.ceil.f64(double -1.5) nounwind
473  store volatile double %C, double* %P
474  ret void
475; CHECK-LABEL: @ceil(
476; CHECK: store volatile double 2.000000e+00, double* %P, align 8
477; CHECK: store volatile double -1.000000e+00, double* %P, align 8
478}
479
480define void @trunc(double *%P) {
481entry:
482  %B = tail call double @llvm.trunc.f64(double 1.5) nounwind
483  store volatile double %B, double* %P
484  %C = tail call double @llvm.trunc.f64(double -1.5) nounwind
485  store volatile double %C, double* %P
486  ret void
487; CHECK-LABEL: @trunc(
488; CHECK: store volatile double 1.000000e+00, double* %P, align 8
489; CHECK: store volatile double -1.000000e+00, double* %P, align 8
490}
491
492define void @rint(double *%P) {
493entry:
494  %B = tail call double @llvm.rint.f64(double 1.5) nounwind
495  store volatile double %B, double* %P
496  %C = tail call double @llvm.rint.f64(double -1.5) nounwind
497  store volatile double %C, double* %P
498  ret void
499; CHECK-LABEL: @rint(
500; CHECK: store volatile double 2.000000e+00, double* %P, align 8
501; CHECK: store volatile double -2.000000e+00, double* %P, align 8
502}
503
504define void @nearbyint(double *%P) {
505entry:
506  %B = tail call double @llvm.nearbyint.f64(double 1.5) nounwind
507  store volatile double %B, double* %P
508  %C = tail call double @llvm.nearbyint.f64(double -1.5) nounwind
509  store volatile double %C, double* %P
510  ret void
511; CHECK-LABEL: @nearbyint(
512; CHECK: store volatile double 2.000000e+00, double* %P, align 8
513; CHECK: store volatile double -2.000000e+00, double* %P, align 8
514}
515