• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -instsimplify -S | FileCheck %s
3
4; Fold icmp with a constant operand.
5
6define i1 @tautological_ule(i8 %x) {
7; CHECK-LABEL: @tautological_ule(
8; CHECK-NEXT:    ret i1 true
9;
10  %cmp = icmp ule i8 %x, 255
11  ret i1 %cmp
12}
13
14define <2 x i1> @tautological_ule_vec(<2 x i8> %x) {
15; CHECK-LABEL: @tautological_ule_vec(
16; CHECK-NEXT:    ret <2 x i1> <i1 true, i1 true>
17;
18  %cmp = icmp ule <2 x i8> %x, <i8 255, i8 255>
19  ret <2 x i1> %cmp
20}
21
22define i1 @tautological_ugt(i8 %x) {
23; CHECK-LABEL: @tautological_ugt(
24; CHECK-NEXT:    ret i1 false
25;
26  %cmp = icmp ugt i8 %x, 255
27  ret i1 %cmp
28}
29
30define <2 x i1> @tautological_ugt_vec(<2 x i8> %x) {
31; CHECK-LABEL: @tautological_ugt_vec(
32; CHECK-NEXT:    ret <2 x i1> zeroinitializer
33;
34  %cmp = icmp ugt <2 x i8> %x, <i8 255, i8 255>
35  ret <2 x i1> %cmp
36}
37
38; 'urem x, C2' produces [0, C2)
39define i1 @urem3(i32 %X) {
40; CHECK-LABEL: @urem3(
41; CHECK-NEXT:    ret i1 true
42;
43  %A = urem i32 %X, 10
44  %B = icmp ult i32 %A, 15
45  ret i1 %B
46}
47
48define <2 x i1> @urem3_vec(<2 x i32> %X) {
49; CHECK-LABEL: @urem3_vec(
50; CHECK-NEXT:    ret <2 x i1> <i1 true, i1 true>
51;
52  %A = urem <2 x i32> %X, <i32 10, i32 10>
53  %B = icmp ult <2 x i32> %A, <i32 15, i32 15>
54  ret <2 x i1> %B
55}
56
57;'srem x, C2' produces (-|C2|, |C2|)
58define i1 @srem1(i32 %X) {
59; CHECK-LABEL: @srem1(
60; CHECK-NEXT:    ret i1 false
61;
62  %A = srem i32 %X, -5
63  %B = icmp sgt i32 %A, 5
64  ret i1 %B
65}
66
67define <2 x i1> @srem1_vec(<2 x i32> %X) {
68; CHECK-LABEL: @srem1_vec(
69; CHECK-NEXT:    ret <2 x i1> zeroinitializer
70;
71  %A = srem <2 x i32> %X, <i32 -5, i32 -5>
72  %B = icmp sgt <2 x i32> %A, <i32 5, i32 5>
73  ret <2 x i1> %B
74}
75
76;'udiv C2, x' produces [0, C2]
77define i1 @udiv5(i32 %X) {
78; CHECK-LABEL: @udiv5(
79; CHECK-NEXT:    ret i1 false
80;
81  %A = udiv i32 123, %X
82  %C = icmp ugt i32 %A, 124
83  ret i1 %C
84}
85
86define <2 x i1> @udiv5_vec(<2 x i32> %X) {
87; CHECK-LABEL: @udiv5_vec(
88; CHECK-NEXT:    ret <2 x i1> zeroinitializer
89;
90  %A = udiv <2 x i32> <i32 123, i32 123>, %X
91  %C = icmp ugt <2 x i32> %A, <i32 124, i32 124>
92  ret <2 x i1> %C
93}
94
95; 'udiv x, C2' produces [0, UINT_MAX / C2]
96define i1 @udiv1(i32 %X) {
97; CHECK-LABEL: @udiv1(
98; CHECK-NEXT:    ret i1 true
99;
100  %A = udiv i32 %X, 1000000
101  %B = icmp ult i32 %A, 5000
102  ret i1 %B
103}
104
105define <2 x i1> @udiv1_vec(<2 x i32> %X) {
106; CHECK-LABEL: @udiv1_vec(
107; CHECK-NEXT:    ret <2 x i1> <i1 true, i1 true>
108;
109  %A = udiv <2 x i32> %X, <i32 1000000, i32 1000000>
110  %B = icmp ult <2 x i32> %A, <i32 5000, i32 5000>
111  ret <2 x i1> %B
112}
113
114; 'sdiv C2, x' produces [-|C2|, |C2|]
115define i1 @compare_dividend(i32 %a) {
116; CHECK-LABEL: @compare_dividend(
117; CHECK-NEXT:    ret i1 false
118;
119  %div = sdiv i32 2, %a
120  %cmp = icmp eq i32 %div, 3
121  ret i1 %cmp
122}
123
124define <2 x i1> @compare_dividend_vec(<2 x i32> %a) {
125; CHECK-LABEL: @compare_dividend_vec(
126; CHECK-NEXT:    ret <2 x i1> zeroinitializer
127;
128  %div = sdiv <2 x i32> <i32 2, i32 2>, %a
129  %cmp = icmp eq <2 x i32> %div, <i32 3, i32 3>
130  ret <2 x i1> %cmp
131}
132
133; 'sdiv x, C2' produces [INT_MIN / C2, INT_MAX / C2]
134;    where C2 != -1 and C2 != 0 and C2 != 1
135define i1 @sdiv1(i32 %X) {
136; CHECK-LABEL: @sdiv1(
137; CHECK-NEXT:    ret i1 true
138;
139  %A = sdiv i32 %X, 1000000
140  %B = icmp slt i32 %A, 3000
141  ret i1 %B
142}
143
144define <2 x i1> @sdiv1_vec(<2 x i32> %X) {
145; CHECK-LABEL: @sdiv1_vec(
146; CHECK-NEXT:    ret <2 x i1> <i1 true, i1 true>
147;
148  %A = sdiv <2 x i32> %X, <i32 1000000, i32 1000000>
149  %B = icmp slt <2 x i32> %A, <i32 3000, i32 3000>
150  ret <2 x i1> %B
151}
152
153; 'shl nuw C2, x' produces [C2, C2 << CLZ(C2)]
154define i1 @shl5(i32 %X) {
155; CHECK-LABEL: @shl5(
156; CHECK-NEXT:    ret i1 true
157;
158  %sub = shl nuw i32 4, %X
159  %cmp = icmp ugt i32 %sub, 3
160  ret i1 %cmp
161}
162
163define <2 x i1> @shl5_vec(<2 x i32> %X) {
164; CHECK-LABEL: @shl5_vec(
165; CHECK-NEXT:    ret <2 x i1> <i1 true, i1 true>
166;
167  %sub = shl nuw <2 x i32> <i32 4, i32 4>, %X
168  %cmp = icmp ugt <2 x i32> %sub, <i32 3, i32 3>
169  ret <2 x i1> %cmp
170}
171
172; 'shl nsw C2, x' produces [C2 << CLO(C2)-1, C2]
173define i1 @shl2(i32 %X) {
174; CHECK-LABEL: @shl2(
175; CHECK-NEXT:    ret i1 false
176;
177  %sub = shl nsw i32 -1, %X
178  %cmp = icmp eq i32 %sub, 31
179  ret i1 %cmp
180}
181
182define <2 x i1> @shl2_vec(<2 x i32> %X) {
183; CHECK-LABEL: @shl2_vec(
184; CHECK-NEXT:    ret <2 x i1> zeroinitializer
185;
186  %sub = shl nsw <2 x i32> <i32 -1, i32 -1>, %X
187  %cmp = icmp eq <2 x i32> %sub, <i32 31, i32 31>
188  ret <2 x i1> %cmp
189}
190
191; 'shl nsw C2, x' produces [C2 << CLO(C2)-1, C2]
192define i1 @shl4(i32 %X) {
193; CHECK-LABEL: @shl4(
194; CHECK-NEXT:    ret i1 true
195;
196  %sub = shl nsw i32 -1, %X
197  %cmp = icmp sle i32 %sub, -1
198  ret i1 %cmp
199}
200
201define <2 x i1> @shl4_vec(<2 x i32> %X) {
202; CHECK-LABEL: @shl4_vec(
203; CHECK-NEXT:    ret <2 x i1> <i1 true, i1 true>
204;
205  %sub = shl nsw <2 x i32> <i32 -1, i32 -1>, %X
206  %cmp = icmp sle <2 x i32> %sub, <i32 -1, i32 -1>
207  ret <2 x i1> %cmp
208}
209
210; 'shl nsw C2, x' produces [C2, C2 << CLZ(C2)-1]
211define i1 @icmp_shl_nsw_1(i64 %a) {
212; CHECK-LABEL: @icmp_shl_nsw_1(
213; CHECK-NEXT:    ret i1 true
214;
215  %shl = shl nsw i64 1, %a
216  %cmp = icmp sge i64 %shl, 0
217  ret i1 %cmp
218}
219
220define <2 x i1> @icmp_shl_nsw_1_vec(<2 x i64> %a) {
221; CHECK-LABEL: @icmp_shl_nsw_1_vec(
222; CHECK-NEXT:    ret <2 x i1> <i1 true, i1 true>
223;
224  %shl = shl nsw <2 x i64> <i64 1, i64 1>, %a
225  %cmp = icmp sge <2 x i64> %shl, zeroinitializer
226  ret <2 x i1> %cmp
227}
228
229; 'shl nsw C2, x' produces [C2 << CLO(C2)-1, C2]
230define i1 @icmp_shl_nsw_neg1(i64 %a) {
231; CHECK-LABEL: @icmp_shl_nsw_neg1(
232; CHECK-NEXT:    ret i1 false
233;
234  %shl = shl nsw i64 -1, %a
235  %cmp = icmp sge i64 %shl, 3
236  ret i1 %cmp
237}
238
239define <2 x i1> @icmp_shl_nsw_neg1_vec(<2 x i64> %a) {
240; CHECK-LABEL: @icmp_shl_nsw_neg1_vec(
241; CHECK-NEXT:    ret <2 x i1> zeroinitializer
242;
243  %shl = shl nsw <2 x i64> <i64 -1, i64 -1>, %a
244  %cmp = icmp sge <2 x i64> %shl, <i64 3, i64 3>
245  ret <2 x i1> %cmp
246}
247
248; 'lshr x, C2' produces [0, UINT_MAX >> C2]
249define i1 @lshr2(i32 %x) {
250; CHECK-LABEL: @lshr2(
251; CHECK-NEXT:    ret i1 false
252;
253  %s = lshr i32 %x, 30
254  %c = icmp ugt i32 %s, 8
255  ret i1 %c
256}
257
258define <2 x i1> @lshr2_vec(<2 x i32> %x) {
259; CHECK-LABEL: @lshr2_vec(
260; CHECK-NEXT:    ret <2 x i1> zeroinitializer
261;
262  %s = lshr <2 x i32> %x, <i32 30, i32 30>
263  %c = icmp ugt <2 x i32> %s, <i32 8, i32 8>
264  ret <2 x i1> %c
265}
266
267; 'lshr C2, x' produces [C2 >> (Width-1), C2]
268define i1 @exact_lshr_ugt_false(i32 %a) {
269; CHECK-LABEL: @exact_lshr_ugt_false(
270; CHECK-NEXT:    ret i1 false
271;
272  %shr = lshr exact i32 30, %a
273  %cmp = icmp ult i32 %shr, 15
274  ret i1 %cmp
275}
276
277define <2 x i1> @exact_lshr_ugt_false_vec(<2 x i32> %a) {
278; CHECK-LABEL: @exact_lshr_ugt_false_vec(
279; CHECK-NEXT:    ret <2 x i1> zeroinitializer
280;
281  %shr = lshr exact <2 x i32> <i32 30, i32 30>, %a
282  %cmp = icmp ult <2 x i32> %shr, <i32 15, i32 15>
283  ret <2 x i1> %cmp
284}
285
286; 'lshr C2, x' produces [C2 >> (Width-1), C2]
287define i1 @lshr_sgt_false(i32 %a) {
288; CHECK-LABEL: @lshr_sgt_false(
289; CHECK-NEXT:    ret i1 false
290;
291  %shr = lshr i32 1, %a
292  %cmp = icmp sgt i32 %shr, 1
293  ret i1 %cmp
294}
295
296define <2 x i1> @lshr_sgt_false_vec(<2 x i32> %a) {
297; CHECK-LABEL: @lshr_sgt_false_vec(
298; CHECK-NEXT:    ret <2 x i1> zeroinitializer
299;
300  %shr = lshr <2 x i32> <i32 1, i32 1>, %a
301  %cmp = icmp sgt <2 x i32> %shr, <i32 1, i32 1>
302  ret <2 x i1> %cmp
303}
304
305; 'ashr x, C2' produces [INT_MIN >> C2, INT_MAX >> C2]
306define i1 @ashr2(i32 %x) {
307; CHECK-LABEL: @ashr2(
308; CHECK-NEXT:    ret i1 false
309;
310  %s = ashr i32 %x, 30
311  %c = icmp slt i32 %s, -5
312  ret i1 %c
313}
314
315define <2 x i1> @ashr2_vec(<2 x i32> %x) {
316; CHECK-LABEL: @ashr2_vec(
317; CHECK-NEXT:    ret <2 x i1> zeroinitializer
318;
319  %s = ashr <2 x i32> %x, <i32 30, i32 30>
320  %c = icmp slt <2 x i32> %s, <i32 -5, i32 -5>
321  ret <2 x i1> %c
322}
323
324; 'ashr C2, x' produces [C2, C2 >> (Width-1)]
325define i1 @ashr_sgt_false(i32 %a) {
326; CHECK-LABEL: @ashr_sgt_false(
327; CHECK-NEXT:    ret i1 false
328;
329  %shr = ashr i32 -30, %a
330  %cmp = icmp sgt i32 %shr, -1
331  ret i1 %cmp
332}
333
334define <2 x i1> @ashr_sgt_false_vec(<2 x i32> %a) {
335; CHECK-LABEL: @ashr_sgt_false_vec(
336; CHECK-NEXT:    ret <2 x i1> zeroinitializer
337;
338  %shr = ashr <2 x i32> <i32 -30, i32 -30>, %a
339  %cmp = icmp sgt <2 x i32> %shr, <i32 -1, i32 -1>
340  ret <2 x i1> %cmp
341}
342
343; 'ashr C2, x' produces [C2, C2 >> (Width-1)]
344define i1 @exact_ashr_sgt_false(i32 %a) {
345; CHECK-LABEL: @exact_ashr_sgt_false(
346; CHECK-NEXT:    ret i1 false
347;
348  %shr = ashr exact i32 -30, %a
349  %cmp = icmp sgt i32 %shr, -15
350  ret i1 %cmp
351}
352
353define <2 x i1> @exact_ashr_sgt_false_vec(<2 x i32> %a) {
354; CHECK-LABEL: @exact_ashr_sgt_false_vec(
355; CHECK-NEXT:    ret <2 x i1> zeroinitializer
356;
357  %shr = ashr exact <2 x i32> <i32 -30, i32 -30>, %a
358  %cmp = icmp sgt <2 x i32> %shr, <i32 -15, i32 -15>
359  ret <2 x i1> %cmp
360}
361
362; 'or x, C2' produces [C2, UINT_MAX]
363define i1 @or1(i32 %X) {
364; CHECK-LABEL: @or1(
365; CHECK-NEXT:    ret i1 false
366;
367  %A = or i32 %X, 62
368  %B = icmp ult i32 %A, 50
369  ret i1 %B
370}
371
372define <2 x i1> @or1_vec(<2 x i32> %X) {
373; CHECK-LABEL: @or1_vec(
374; CHECK-NEXT:    ret <2 x i1> zeroinitializer
375;
376  %A = or <2 x i32> %X, <i32 62, i32 62>
377  %B = icmp ult <2 x i32> %A, <i32 50, i32 50>
378  ret <2 x i1> %B
379}
380
381; 'and x, C2' produces [0, C2]
382define i1 @and1(i32 %X) {
383; CHECK-LABEL: @and1(
384; CHECK-NEXT:    ret i1 false
385;
386  %A = and i32 %X, 62
387  %B = icmp ugt i32 %A, 70
388  ret i1 %B
389}
390
391define <2 x i1> @and1_vec(<2 x i32> %X) {
392; CHECK-LABEL: @and1_vec(
393; CHECK-NEXT:    ret <2 x i1> zeroinitializer
394;
395  %A = and <2 x i32> %X, <i32 62, i32 62>
396  %B = icmp ugt <2 x i32> %A, <i32 70, i32 70>
397  ret <2 x i1> %B
398}
399
400; 'add nuw x, C2' produces [C2, UINT_MAX]
401define i1 @tautological9(i32 %x) {
402; CHECK-LABEL: @tautological9(
403; CHECK-NEXT:    ret i1 true
404;
405  %add = add nuw i32 %x, 13
406  %cmp = icmp ne i32 %add, 12
407  ret i1 %cmp
408}
409
410define <2 x i1> @tautological9_vec(<2 x i32> %x) {
411; CHECK-LABEL: @tautological9_vec(
412; CHECK-NEXT:    ret <2 x i1> <i1 true, i1 true>
413;
414  %add = add nuw <2 x i32> %x, <i32 13, i32 13>
415  %cmp = icmp ne <2 x i32> %add, <i32 12, i32 12>
416  ret <2 x i1> %cmp
417}
418
419; The upper bound of the 'add' is 0.
420
421define i1 @add_nsw_neg_const1(i32 %x) {
422; CHECK-LABEL: @add_nsw_neg_const1(
423; CHECK-NEXT:    ret i1 false
424;
425  %add = add nsw i32 %x, -2147483647
426  %cmp = icmp sgt i32 %add, 0
427  ret i1 %cmp
428}
429
430; InstCombine can fold this, but not InstSimplify.
431
432define i1 @add_nsw_neg_const2(i32 %x) {
433; CHECK-LABEL: @add_nsw_neg_const2(
434; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 %x, -2147483647
435; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[ADD]], -1
436; CHECK-NEXT:    ret i1 [[CMP]]
437;
438  %add = add nsw i32 %x, -2147483647
439  %cmp = icmp sgt i32 %add, -1
440  ret i1 %cmp
441}
442
443; The upper bound of the 'add' is 1 (move the constants to prove we're doing range-based analysis).
444
445define i1 @add_nsw_neg_const3(i32 %x) {
446; CHECK-LABEL: @add_nsw_neg_const3(
447; CHECK-NEXT:    ret i1 false
448;
449  %add = add nsw i32 %x, -2147483646
450  %cmp = icmp sgt i32 %add, 1
451  ret i1 %cmp
452}
453
454; InstCombine can fold this, but not InstSimplify.
455
456define i1 @add_nsw_neg_const4(i32 %x) {
457; CHECK-LABEL: @add_nsw_neg_const4(
458; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 %x, -2147483646
459; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[ADD]], 0
460; CHECK-NEXT:    ret i1 [[CMP]]
461;
462  %add = add nsw i32 %x, -2147483646
463  %cmp = icmp sgt i32 %add, 0
464  ret i1 %cmp
465}
466
467; The upper bound of the 'add' is 2147483647 - 42 = 2147483605 (move the constants again and try a different cmp predicate).
468
469define i1 @add_nsw_neg_const5(i32 %x) {
470; CHECK-LABEL: @add_nsw_neg_const5(
471; CHECK-NEXT:    ret i1 true
472;
473  %add = add nsw i32 %x, -42
474  %cmp = icmp ne i32 %add, 2147483606
475  ret i1 %cmp
476}
477
478; InstCombine can fold this, but not InstSimplify.
479
480define i1 @add_nsw_neg_const6(i32 %x) {
481; CHECK-LABEL: @add_nsw_neg_const6(
482; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 %x, -42
483; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[ADD]], 2147483605
484; CHECK-NEXT:    ret i1 [[CMP]]
485;
486  %add = add nsw i32 %x, -42
487  %cmp = icmp ne i32 %add, 2147483605
488  ret i1 %cmp
489}
490
491; The lower bound of the 'add' is -1.
492
493define i1 @add_nsw_pos_const1(i32 %x) {
494; CHECK-LABEL: @add_nsw_pos_const1(
495; CHECK-NEXT:    ret i1 false
496;
497  %add = add nsw i32 %x, 2147483647
498  %cmp = icmp slt i32 %add, -1
499  ret i1 %cmp
500}
501
502; InstCombine can fold this, but not InstSimplify.
503
504define i1 @add_nsw_pos_const2(i32 %x) {
505; CHECK-LABEL: @add_nsw_pos_const2(
506; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 %x, 2147483647
507; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[ADD]], 0
508; CHECK-NEXT:    ret i1 [[CMP]]
509;
510  %add = add nsw i32 %x, 2147483647
511  %cmp = icmp slt i32 %add, 0
512  ret i1 %cmp
513}
514
515; The lower bound of the 'add' is -2 (move the constants to prove we're doing range-based analysis).
516
517define i1 @add_nsw_pos_const3(i32 %x) {
518; CHECK-LABEL: @add_nsw_pos_const3(
519; CHECK-NEXT:    ret i1 false
520;
521  %add = add nsw i32 %x, 2147483646
522  %cmp = icmp slt i32 %add, -2
523  ret i1 %cmp
524}
525
526; InstCombine can fold this, but not InstSimplify.
527
528define i1 @add_nsw_pos_const4(i32 %x) {
529; CHECK-LABEL: @add_nsw_pos_const4(
530; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 %x, 2147483646
531; CHECK-NEXT:    [[CMP:%.*]] = icmp slt i32 [[ADD]], -1
532; CHECK-NEXT:    ret i1 [[CMP]]
533;
534  %add = add nsw i32 %x, 2147483646
535  %cmp = icmp slt i32 %add, -1
536  ret i1 %cmp
537}
538
539; The lower bound of the 'add' is -2147483648 + 42 = -2147483606 (move the constants again and change the cmp predicate).
540
541define i1 @add_nsw_pos_const5(i32 %x) {
542; CHECK-LABEL: @add_nsw_pos_const5(
543; CHECK-NEXT:    ret i1 false
544;
545  %add = add nsw i32 %x, 42
546  %cmp = icmp eq i32 %add, -2147483607
547  ret i1 %cmp
548}
549
550; InstCombine can fold this, but not InstSimplify.
551
552define i1 @add_nsw_pos_const6(i32 %x) {
553; CHECK-LABEL: @add_nsw_pos_const6(
554; CHECK-NEXT:    [[ADD:%.*]] = add nsw i32 %x, 42
555; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[ADD]], -2147483606
556; CHECK-NEXT:    ret i1 [[CMP]]
557;
558  %add = add nsw i32 %x, 42
559  %cmp = icmp eq i32 %add, -2147483606
560  ret i1 %cmp
561}
562
563; Verify that vectors work too.
564
565define <2 x i1> @add_nsw_pos_const5_splat_vec(<2 x i32> %x) {
566; CHECK-LABEL: @add_nsw_pos_const5_splat_vec(
567; CHECK-NEXT:    ret <2 x i1> <i1 true, i1 true>
568;
569  %add = add nsw <2 x i32> %x, <i32 42, i32 42>
570  %cmp = icmp ne <2 x i32> %add, <i32 -2147483607, i32 -2147483607>
571  ret <2 x i1> %cmp
572}
573
574; PR34838 - https://bugs.llvm.org/show_bug.cgi?id=34838
575; The shift is known to create poison, so we can simplify the cmp.
576
577define i1 @ne_shl_by_constant_produces_poison(i8 %x) {
578; CHECK-LABEL: @ne_shl_by_constant_produces_poison(
579; CHECK-NEXT:    ret i1 true
580;
581  %zx = zext i8 %x to i16      ; zx  = 0x00xx
582  %xor = xor i16 %zx, 32767    ; xor = 0x7fyy
583  %sub = sub nsw i16 %zx, %xor ; sub = 0x80zz  (the top bit is known one)
584  %poison = shl nsw i16 %sub, 2    ; oops! this shl can't be nsw; that's POISON
585  %cmp = icmp ne i16 %poison, 1
586  ret i1 %cmp
587}
588
589define i1 @eq_shl_by_constant_produces_poison(i8 %x) {
590; CHECK-LABEL: @eq_shl_by_constant_produces_poison(
591; CHECK-NEXT:    ret i1 false
592;
593  %clear_high_bit = and i8 %x, 127                 ; 0x7f
594  %set_next_high_bits = or i8 %clear_high_bit, 112 ; 0x70
595  %poison = shl nsw i8 %set_next_high_bits, 3
596  %cmp = icmp eq i8 %poison, 15
597  ret i1 %cmp
598}
599
600; Shift-by-variable that produces poison is more complicated but still possible.
601; We guarantee that the shift will change the sign of the shifted value (and
602; therefore produce poison) by limiting its range from 1 to 3.
603
604define i1 @eq_shl_by_variable_produces_poison(i8 %x) {
605; CHECK-LABEL: @eq_shl_by_variable_produces_poison(
606; CHECK-NEXT:    ret i1 false
607;
608  %clear_high_bit = and i8 %x, 127                 ; 0x7f
609  %set_next_high_bits = or i8 %clear_high_bit, 112 ; 0x70
610  %notundef_shiftamt = and i8 %x, 3
611  %nonzero_shiftamt = or i8 %notundef_shiftamt, 1
612  %poison = shl nsw i8 %set_next_high_bits, %nonzero_shiftamt
613  %cmp = icmp eq i8 %poison, 15
614  ret i1 %cmp
615}
616
617