• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -instcombine -S | FileCheck %s
3
4declare void @use(i8)
5
6; Tests for slt/ult
7
8define i1 @slt_positive_multip_rem_zero(i8 %x) {
9; CHECK-LABEL: @slt_positive_multip_rem_zero(
10; CHECK-NEXT:    [[A:%.*]] = mul nsw i8 [[X:%.*]], 7
11; CHECK-NEXT:    [[B:%.*]] = icmp slt i8 [[A]], 21
12; CHECK-NEXT:    ret i1 [[B]]
13;
14  %a = mul nsw i8 %x, 7
15  %b = icmp slt i8 %a, 21
16  ret i1 %b
17}
18
19define i1 @slt_negative_multip_rem_zero(i8 %x) {
20; CHECK-LABEL: @slt_negative_multip_rem_zero(
21; CHECK-NEXT:    [[A:%.*]] = mul nsw i8 [[X:%.*]], -7
22; CHECK-NEXT:    [[B:%.*]] = icmp slt i8 [[A]], 21
23; CHECK-NEXT:    ret i1 [[B]]
24;
25  %a = mul nsw i8 %x, -7
26  %b = icmp slt i8 %a, 21
27  ret i1 %b
28}
29
30define i1 @slt_positive_multip_rem_nz(i8 %x) {
31; CHECK-LABEL: @slt_positive_multip_rem_nz(
32; CHECK-NEXT:    [[A:%.*]] = mul nsw i8 [[X:%.*]], 5
33; CHECK-NEXT:    [[B:%.*]] = icmp slt i8 [[A]], 21
34; CHECK-NEXT:    ret i1 [[B]]
35;
36  %a = mul nsw i8 %x, 5
37  %b = icmp slt i8 %a, 21
38  ret i1 %b
39}
40
41define i1 @ult_rem_zero(i8 %x) {
42; CHECK-LABEL: @ult_rem_zero(
43; CHECK-NEXT:    [[A:%.*]] = mul nuw i8 [[X:%.*]], 7
44; CHECK-NEXT:    [[B:%.*]] = icmp ult i8 [[A]], 21
45; CHECK-NEXT:    ret i1 [[B]]
46;
47  %a = mul nuw i8 %x, 7
48  %b = icmp ult i8 %a, 21
49  ret i1 %b
50}
51
52define i1 @ult_rem_nz(i8 %x) {
53; CHECK-LABEL: @ult_rem_nz(
54; CHECK-NEXT:    [[A:%.*]] = mul nuw i8 [[X:%.*]], 5
55; CHECK-NEXT:    [[B:%.*]] = icmp ult i8 [[A]], 21
56; CHECK-NEXT:    ret i1 [[B]]
57;
58  %a = mul nuw i8 %x, 5
59  %b = icmp ult i8 %a, 21
60  ret i1 %b
61}
62
63; Tests for sgt/ugt
64
65define i1 @sgt_positive_multip_rem_zero(i8 %x) {
66; CHECK-LABEL: @sgt_positive_multip_rem_zero(
67; CHECK-NEXT:    [[A:%.*]] = mul nsw i8 [[X:%.*]], 7
68; CHECK-NEXT:    [[B:%.*]] = icmp sgt i8 [[A]], 21
69; CHECK-NEXT:    ret i1 [[B]]
70;
71  %a = mul nsw i8 %x, 7
72  %b = icmp sgt i8 %a, 21
73  ret i1 %b
74}
75
76define i1 @sgt_negative_multip_rem_zero(i8 %x) {
77; CHECK-LABEL: @sgt_negative_multip_rem_zero(
78; CHECK-NEXT:    [[A:%.*]] = mul nsw i8 [[X:%.*]], -7
79; CHECK-NEXT:    [[B:%.*]] = icmp sgt i8 [[A]], 21
80; CHECK-NEXT:    ret i1 [[B]]
81;
82  %a = mul nsw i8 %x, -7
83  %b = icmp sgt i8 %a, 21
84  ret i1 %b
85}
86
87define i1 @sgt_positive_multip_rem_nz(i8 %x) {
88; CHECK-LABEL: @sgt_positive_multip_rem_nz(
89; CHECK-NEXT:    [[A:%.*]] = mul nsw i8 [[X:%.*]], 5
90; CHECK-NEXT:    [[B:%.*]] = icmp sgt i8 [[A]], 21
91; CHECK-NEXT:    ret i1 [[B]]
92;
93  %a = mul nsw i8 %x, 5
94  %b = icmp sgt i8 %a, 21
95  ret i1 %b
96}
97
98define i1 @ugt_rem_zero(i8 %x) {
99; CHECK-LABEL: @ugt_rem_zero(
100; CHECK-NEXT:    [[A:%.*]] = mul nuw i8 [[X:%.*]], 7
101; CHECK-NEXT:    [[B:%.*]] = icmp ugt i8 [[A]], 21
102; CHECK-NEXT:    ret i1 [[B]]
103;
104  %a = mul nuw i8 %x, 7
105  %b = icmp ugt i8 %a, 21
106  ret i1 %b
107}
108
109define i1 @ugt_rem_nz(i8 %x) {
110; CHECK-LABEL: @ugt_rem_nz(
111; CHECK-NEXT:    [[A:%.*]] = mul nuw i8 [[X:%.*]], 5
112; CHECK-NEXT:    [[B:%.*]] = icmp ugt i8 [[A]], 21
113; CHECK-NEXT:    ret i1 [[B]]
114;
115  %a = mul nuw i8 %x, 5
116  %b = icmp ugt i8 %a, 21
117  ret i1 %b
118}
119
120; Tests for eq/ne
121
122define i1 @eq_nsw_rem_zero(i8 %x) {
123; CHECK-LABEL: @eq_nsw_rem_zero(
124; CHECK-NEXT:    [[B:%.*]] = icmp eq i8 [[X:%.*]], -4
125; CHECK-NEXT:    ret i1 [[B]]
126;
127  %a = mul nsw i8 %x, -5
128  %b = icmp eq i8 %a, 20
129  ret i1 %b
130}
131
132define <2 x i1> @ne_nsw_rem_zero(<2 x i8> %x) {
133; CHECK-LABEL: @ne_nsw_rem_zero(
134; CHECK-NEXT:    [[B:%.*]] = icmp ne <2 x i8> [[X:%.*]], <i8 -6, i8 -6>
135; CHECK-NEXT:    ret <2 x i1> [[B]]
136;
137  %a = mul nsw <2 x i8> %x, <i8 5, i8 5>
138  %b = icmp ne <2 x i8> %a, <i8 -30, i8 -30>
139  ret <2 x i1> %b
140}
141
142; TODO: Missed fold with undef.
143
144define <2 x i1> @ne_nsw_rem_zero_undef1(<2 x i8> %x) {
145; CHECK-LABEL: @ne_nsw_rem_zero_undef1(
146; CHECK-NEXT:    [[A:%.*]] = mul nsw <2 x i8> [[X:%.*]], <i8 5, i8 undef>
147; CHECK-NEXT:    [[B:%.*]] = icmp ne <2 x i8> [[A]], <i8 -30, i8 -30>
148; CHECK-NEXT:    ret <2 x i1> [[B]]
149;
150  %a = mul nsw <2 x i8> %x, <i8 5, i8 undef>
151  %b = icmp ne <2 x i8> %a, <i8 -30, i8 -30>
152  ret <2 x i1> %b
153}
154
155; TODO: Missed fold with undef.
156
157define <2 x i1> @ne_nsw_rem_zero_undef2(<2 x i8> %x) {
158; CHECK-LABEL: @ne_nsw_rem_zero_undef2(
159; CHECK-NEXT:    [[A:%.*]] = mul nsw <2 x i8> [[X:%.*]], <i8 5, i8 5>
160; CHECK-NEXT:    [[B:%.*]] = icmp ne <2 x i8> [[A]], <i8 -30, i8 undef>
161; CHECK-NEXT:    ret <2 x i1> [[B]]
162;
163  %a = mul nsw <2 x i8> %x, <i8 5, i8 5>
164  %b = icmp ne <2 x i8> %a, <i8 -30, i8 undef>
165  ret <2 x i1> %b
166}
167
168define i1 @eq_nsw_rem_zero_uses(i8 %x) {
169; CHECK-LABEL: @eq_nsw_rem_zero_uses(
170; CHECK-NEXT:    [[A:%.*]] = mul nsw i8 [[X:%.*]], -5
171; CHECK-NEXT:    call void @use(i8 [[A]])
172; CHECK-NEXT:    [[B:%.*]] = icmp eq i8 [[X]], -4
173; CHECK-NEXT:    ret i1 [[B]]
174;
175  %a = mul nsw i8 %x, -5
176  call void @use(i8 %a)
177  %b = icmp eq i8 %a, 20
178  ret i1 %b
179}
180
181; Impossible multiple should be handled by instsimplify.
182
183define i1 @eq_nsw_rem_nz(i8 %x) {
184; CHECK-LABEL: @eq_nsw_rem_nz(
185; CHECK-NEXT:    ret i1 false
186;
187  %a = mul nsw i8 %x, 5
188  %b = icmp eq i8 %a, 245
189  ret i1 %b
190}
191
192; Impossible multiple should be handled by instsimplify.
193
194define i1 @ne_nsw_rem_nz(i8 %x) {
195; CHECK-LABEL: @ne_nsw_rem_nz(
196; CHECK-NEXT:    ret i1 true
197;
198  %a = mul nsw i8 %x, 5
199  %b = icmp ne i8 %a, 130
200  ret i1 %b
201}
202
203define <2 x i1> @eq_nuw_rem_zero(<2 x i8> %x) {
204; CHECK-LABEL: @eq_nuw_rem_zero(
205; CHECK-NEXT:    [[B:%.*]] = icmp eq <2 x i8> [[X:%.*]], <i8 4, i8 4>
206; CHECK-NEXT:    ret <2 x i1> [[B]]
207;
208  %a = mul nuw <2 x i8> %x, <i8 5, i8 5>
209  %b = icmp eq <2 x i8> %a, <i8 20, i8 20>
210  ret <2 x i1> %b
211}
212
213; TODO: Missed fold with undef.
214
215define <2 x i1> @eq_nuw_rem_zero_undef1(<2 x i8> %x) {
216; CHECK-LABEL: @eq_nuw_rem_zero_undef1(
217; CHECK-NEXT:    [[A:%.*]] = mul nuw <2 x i8> [[X:%.*]], <i8 undef, i8 5>
218; CHECK-NEXT:    [[B:%.*]] = icmp eq <2 x i8> [[A]], <i8 20, i8 20>
219; CHECK-NEXT:    ret <2 x i1> [[B]]
220;
221  %a = mul nuw <2 x i8> %x, <i8 undef, i8 5>
222  %b = icmp eq <2 x i8> %a, <i8 20, i8 20>
223  ret <2 x i1> %b
224}
225
226; TODO: Missed fold with undef.
227
228define <2 x i1> @eq_nuw_rem_zero_undef2(<2 x i8> %x) {
229; CHECK-LABEL: @eq_nuw_rem_zero_undef2(
230; CHECK-NEXT:    [[A:%.*]] = mul nuw <2 x i8> [[X:%.*]], <i8 5, i8 5>
231; CHECK-NEXT:    [[B:%.*]] = icmp eq <2 x i8> [[A]], <i8 undef, i8 20>
232; CHECK-NEXT:    ret <2 x i1> [[B]]
233;
234  %a = mul nuw <2 x i8> %x, <i8 5, i8 5>
235  %b = icmp eq <2 x i8> %a, <i8 undef, i8 20>
236  ret <2 x i1> %b
237}
238
239define i1 @ne_nuw_rem_zero(i8 %x) {
240; CHECK-LABEL: @ne_nuw_rem_zero(
241; CHECK-NEXT:    [[B:%.*]] = icmp ne i8 [[X:%.*]], 26
242; CHECK-NEXT:    ret i1 [[B]]
243;
244  %a = mul nuw i8 %x, 5
245  %b = icmp ne i8 %a, 130
246  ret i1 %b
247}
248
249define i1 @ne_nuw_rem_zero_uses(i8 %x) {
250; CHECK-LABEL: @ne_nuw_rem_zero_uses(
251; CHECK-NEXT:    [[A:%.*]] = mul nuw i8 [[X:%.*]], 5
252; CHECK-NEXT:    call void @use(i8 [[A]])
253; CHECK-NEXT:    [[B:%.*]] = icmp ne i8 [[X]], 26
254; CHECK-NEXT:    ret i1 [[B]]
255;
256  %a = mul nuw i8 %x, 5
257  call void @use(i8 %a)
258  %b = icmp ne i8 %a, 130
259  ret i1 %b
260}
261
262; Impossible multiple should be handled by instsimplify.
263
264define i1 @eq_nuw_rem_nz(i8 %x) {
265; CHECK-LABEL: @eq_nuw_rem_nz(
266; CHECK-NEXT:    ret i1 false
267;
268  %a = mul nuw i8 %x, -5
269  %b = icmp eq i8 %a, 20
270  ret i1 %b
271}
272
273; Impossible multiple should be handled by instsimplify.
274
275define i1 @ne_nuw_rem_nz(i8 %x) {
276; CHECK-LABEL: @ne_nuw_rem_nz(
277; CHECK-NEXT:    ret i1 true
278;
279  %a = mul nuw i8 %x, 5
280  %b = icmp ne i8 %a, -30
281  ret i1 %b
282}
283
284; Negative tests for the icmp mul folds
285
286define i1 @sgt_positive_multip_rem_zero_nonsw(i8 %x) {
287; CHECK-LABEL: @sgt_positive_multip_rem_zero_nonsw(
288; CHECK-NEXT:    [[A:%.*]] = mul i8 [[X:%.*]], 7
289; CHECK-NEXT:    [[B:%.*]] = icmp sgt i8 [[A]], 21
290; CHECK-NEXT:    ret i1 [[B]]
291;
292  %a = mul i8 %x, 7
293  %b = icmp sgt i8 %a, 21
294  ret i1 %b
295}
296
297define i1 @ult_multip_rem_zero_nonsw(i8 %x) {
298; CHECK-LABEL: @ult_multip_rem_zero_nonsw(
299; CHECK-NEXT:    [[A:%.*]] = mul i8 [[X:%.*]], 7
300; CHECK-NEXT:    [[B:%.*]] = icmp ult i8 [[A]], 21
301; CHECK-NEXT:    ret i1 [[B]]
302;
303  %a = mul i8 %x, 7
304  %b = icmp ult i8 %a, 21
305  ret i1 %b
306}
307
308define i1 @ugt_rem_zero_nonuw(i8 %x) {
309; CHECK-LABEL: @ugt_rem_zero_nonuw(
310; CHECK-NEXT:    [[A:%.*]] = mul i8 [[X:%.*]], 7
311; CHECK-NEXT:    [[B:%.*]] = icmp ugt i8 [[A]], 21
312; CHECK-NEXT:    ret i1 [[B]]
313;
314  %a = mul i8 %x, 7
315  %b = icmp ugt i8 %a, 21
316  ret i1 %b
317}
318
319define i1 @sgt_minnum(i8 %x) {
320; CHECK-LABEL: @sgt_minnum(
321; CHECK-NEXT:    ret i1 true
322;
323  %a = mul nsw i8 %x, 7
324  %b = icmp sgt i8 %a, -128
325  ret i1 %b
326}
327
328define i1 @ule_bignum(i8 %x) {
329; CHECK-LABEL: @ule_bignum(
330; CHECK-NEXT:    [[B:%.*]] = icmp eq i8 [[X:%.*]], 0
331; CHECK-NEXT:    ret i1 [[B]]
332;
333  %a = mul i8 %x, 2147483647
334  %b = icmp ule i8 %a, 0
335  ret i1 %b
336}
337
338define i1 @sgt_mulzero(i8 %x) {
339; CHECK-LABEL: @sgt_mulzero(
340; CHECK-NEXT:    ret i1 false
341;
342  %a = mul nsw i8 %x, 0
343  %b = icmp sgt i8 %a, 21
344  ret i1 %b
345}
346
347define i1 @eq_rem_zero_nonuw(i8 %x) {
348; CHECK-LABEL: @eq_rem_zero_nonuw(
349; CHECK-NEXT:    [[A:%.*]] = mul i8 [[X:%.*]], 5
350; CHECK-NEXT:    [[B:%.*]] = icmp eq i8 [[A]], 20
351; CHECK-NEXT:    ret i1 [[B]]
352;
353  %a = mul i8 %x, 5
354  %b = icmp eq i8 %a, 20
355  ret i1 %b
356}
357
358define i1 @ne_rem_zero_nonuw(i8 %x) {
359; CHECK-LABEL: @ne_rem_zero_nonuw(
360; CHECK-NEXT:    [[A:%.*]] = mul i8 [[X:%.*]], 5
361; CHECK-NEXT:    [[B:%.*]] = icmp ne i8 [[A]], 30
362; CHECK-NEXT:    ret i1 [[B]]
363;
364  %a = mul i8 %x, 5
365  %b = icmp ne i8 %a, 30
366  ret i1 %b
367}
368
369define i1 @mul_constant_eq(i32 %x, i32 %y) {
370; CHECK-LABEL: @mul_constant_eq(
371; CHECK-NEXT:    [[C:%.*]] = icmp eq i32 [[X:%.*]], [[Y:%.*]]
372; CHECK-NEXT:    ret i1 [[C]]
373;
374  %A = mul i32 %x, 5
375  %B = mul i32 %y, 5
376  %C = icmp eq i32 %A, %B
377  ret i1 %C
378}
379
380define <2 x i1> @mul_constant_ne_splat(<2 x i32> %x, <2 x i32> %y) {
381; CHECK-LABEL: @mul_constant_ne_splat(
382; CHECK-NEXT:    [[C:%.*]] = icmp ne <2 x i32> [[X:%.*]], [[Y:%.*]]
383; CHECK-NEXT:    ret <2 x i1> [[C]]
384;
385  %A = mul <2 x i32> %x, <i32 5, i32 5>
386  %B = mul <2 x i32> %y, <i32 5, i32 5>
387  %C = icmp ne <2 x i32> %A, %B
388  ret <2 x i1> %C
389}
390
391define i1 @mul_constant_ne_extra_use1(i8 %x, i8 %y) {
392; CHECK-LABEL: @mul_constant_ne_extra_use1(
393; CHECK-NEXT:    [[A:%.*]] = mul i8 [[X:%.*]], 5
394; CHECK-NEXT:    call void @use(i8 [[A]])
395; CHECK-NEXT:    [[C:%.*]] = icmp ne i8 [[X]], [[Y:%.*]]
396; CHECK-NEXT:    ret i1 [[C]]
397;
398  %A = mul i8 %x, 5
399  call void @use(i8 %A)
400  %B = mul i8 %y, 5
401  %C = icmp ne i8 %A, %B
402  ret i1 %C
403}
404
405define i1 @mul_constant_eq_extra_use2(i8 %x, i8 %y) {
406; CHECK-LABEL: @mul_constant_eq_extra_use2(
407; CHECK-NEXT:    [[B:%.*]] = mul i8 [[Y:%.*]], 5
408; CHECK-NEXT:    call void @use(i8 [[B]])
409; CHECK-NEXT:    [[C:%.*]] = icmp eq i8 [[X:%.*]], [[Y]]
410; CHECK-NEXT:    ret i1 [[C]]
411;
412  %A = mul i8 %x, 5
413  %B = mul i8 %y, 5
414  call void @use(i8 %B)
415  %C = icmp eq i8 %A, %B
416  ret i1 %C
417}
418
419define i1 @mul_constant_ne_extra_use3(i8 %x, i8 %y) {
420; CHECK-LABEL: @mul_constant_ne_extra_use3(
421; CHECK-NEXT:    [[A:%.*]] = mul i8 [[X:%.*]], 5
422; CHECK-NEXT:    call void @use(i8 [[A]])
423; CHECK-NEXT:    [[B:%.*]] = mul i8 [[Y:%.*]], 5
424; CHECK-NEXT:    call void @use(i8 [[B]])
425; CHECK-NEXT:    [[C:%.*]] = icmp ne i8 [[X]], [[Y]]
426; CHECK-NEXT:    ret i1 [[C]]
427;
428  %A = mul i8 %x, 5
429  call void @use(i8 %A)
430  %B = mul i8 %y, 5
431  call void @use(i8 %B)
432  %C = icmp ne i8 %A, %B
433  ret i1 %C
434}
435
436define i1 @mul_constant_eq_nsw(i32 %x, i32 %y) {
437; CHECK-LABEL: @mul_constant_eq_nsw(
438; CHECK-NEXT:    [[C:%.*]] = icmp eq i32 [[X:%.*]], [[Y:%.*]]
439; CHECK-NEXT:    ret i1 [[C]]
440;
441  %A = mul nsw i32 %x, 6
442  %B = mul nsw i32 %y, 6
443  %C = icmp eq i32 %A, %B
444  ret i1 %C
445}
446
447define <2 x i1> @mul_constant_ne_nsw_splat(<2 x i32> %x, <2 x i32> %y) {
448; CHECK-LABEL: @mul_constant_ne_nsw_splat(
449; CHECK-NEXT:    [[C:%.*]] = icmp ne <2 x i32> [[X:%.*]], [[Y:%.*]]
450; CHECK-NEXT:    ret <2 x i1> [[C]]
451;
452  %A = mul nsw <2 x i32> %x, <i32 12, i32 12>
453  %B = mul nsw <2 x i32> %y, <i32 12, i32 12>
454  %C = icmp ne <2 x i32> %A, %B
455  ret <2 x i1> %C
456}
457
458define i1 @mul_constant_ne_nsw_extra_use1(i8 %x, i8 %y) {
459; CHECK-LABEL: @mul_constant_ne_nsw_extra_use1(
460; CHECK-NEXT:    [[A:%.*]] = mul nsw i8 [[X:%.*]], 74
461; CHECK-NEXT:    call void @use(i8 [[A]])
462; CHECK-NEXT:    [[C:%.*]] = icmp ne i8 [[X]], [[Y:%.*]]
463; CHECK-NEXT:    ret i1 [[C]]
464;
465  %A = mul nsw i8 %x, 74
466  call void @use(i8 %A)
467  %B = mul nsw i8 %y, 74
468  %C = icmp ne i8 %A, %B
469  ret i1 %C
470}
471
472define i1 @mul_constant_eq_nsw_extra_use2(i8 %x, i8 %y) {
473; CHECK-LABEL: @mul_constant_eq_nsw_extra_use2(
474; CHECK-NEXT:    [[B:%.*]] = mul nsw i8 [[Y:%.*]], 20
475; CHECK-NEXT:    call void @use(i8 [[B]])
476; CHECK-NEXT:    [[C:%.*]] = icmp eq i8 [[X:%.*]], [[Y]]
477; CHECK-NEXT:    ret i1 [[C]]
478;
479  %A = mul nsw i8 %x, 20
480  %B = mul nsw i8 %y, 20
481  call void @use(i8 %B)
482  %C = icmp eq i8 %A, %B
483  ret i1 %C
484}
485
486define i1 @mul_constant_ne_nsw_extra_use3(i8 %x, i8 %y) {
487; CHECK-LABEL: @mul_constant_ne_nsw_extra_use3(
488; CHECK-NEXT:    [[A:%.*]] = mul nsw i8 [[X:%.*]], 24
489; CHECK-NEXT:    call void @use(i8 [[A]])
490; CHECK-NEXT:    [[B:%.*]] = mul nsw i8 [[Y:%.*]], 24
491; CHECK-NEXT:    call void @use(i8 [[B]])
492; CHECK-NEXT:    [[C:%.*]] = icmp ne i8 [[X]], [[Y]]
493; CHECK-NEXT:    ret i1 [[C]]
494;
495  %A = mul nsw i8 %x, 24
496  call void @use(i8 %A)
497  %B = mul nsw i8 %y, 24
498  call void @use(i8 %B)
499  %C = icmp ne i8 %A, %B
500  ret i1 %C
501}
502
503define i1 @mul_constant_nuw_eq(i32 %x, i32 %y) {
504; CHECK-LABEL: @mul_constant_nuw_eq(
505; CHECK-NEXT:    [[C:%.*]] = icmp eq i32 [[X:%.*]], [[Y:%.*]]
506; CHECK-NEXT:    ret i1 [[C]]
507;
508  %A = mul nuw i32 %x, 22
509  %B = mul nuw i32 %y, 22
510  %C = icmp eq i32 %A, %B
511  ret i1 %C
512}
513
514define <2 x i1> @mul_constant_ne_nuw_splat(<2 x i32> %x, <2 x i32> %y) {
515; CHECK-LABEL: @mul_constant_ne_nuw_splat(
516; CHECK-NEXT:    [[C:%.*]] = icmp ne <2 x i32> [[X:%.*]], [[Y:%.*]]
517; CHECK-NEXT:    ret <2 x i1> [[C]]
518;
519  %A = mul nuw <2 x i32> %x, <i32 10, i32 10>
520  %B = mul nuw <2 x i32> %y, <i32 10, i32 10>
521  %C = icmp ne <2 x i32> %A, %B
522  ret <2 x i1> %C
523}
524
525define i1 @mul_constant_ne_nuw_extra_use1(i8 %x, i8 %y) {
526; CHECK-LABEL: @mul_constant_ne_nuw_extra_use1(
527; CHECK-NEXT:    [[A:%.*]] = mul nuw i8 [[X:%.*]], 6
528; CHECK-NEXT:    call void @use(i8 [[A]])
529; CHECK-NEXT:    [[C:%.*]] = icmp ne i8 [[X]], [[Y:%.*]]
530; CHECK-NEXT:    ret i1 [[C]]
531;
532  %A = mul nuw i8 %x, 6
533  call void @use(i8 %A)
534  %B = mul nuw i8 %y, 6
535  %C = icmp ne i8 %A, %B
536  ret i1 %C
537}
538
539define i1 @mul_constant_eq_nuw_extra_use2(i8 %x, i8 %y) {
540; CHECK-LABEL: @mul_constant_eq_nuw_extra_use2(
541; CHECK-NEXT:    [[B:%.*]] = mul nuw i8 [[Y:%.*]], 36
542; CHECK-NEXT:    call void @use(i8 [[B]])
543; CHECK-NEXT:    [[C:%.*]] = icmp eq i8 [[X:%.*]], [[Y]]
544; CHECK-NEXT:    ret i1 [[C]]
545;
546  %A = mul nuw i8 %x, 36
547  %B = mul nuw i8 %y, 36
548  call void @use(i8 %B)
549  %C = icmp eq i8 %A, %B
550  ret i1 %C
551}
552
553define i1 @mul_constant_ne_nuw_extra_use3(i8 %x, i8 %y) {
554; CHECK-LABEL: @mul_constant_ne_nuw_extra_use3(
555; CHECK-NEXT:    [[A:%.*]] = mul nuw i8 [[X:%.*]], 38
556; CHECK-NEXT:    call void @use(i8 [[A]])
557; CHECK-NEXT:    [[B:%.*]] = mul nuw i8 [[Y:%.*]], 38
558; CHECK-NEXT:    call void @use(i8 [[B]])
559; CHECK-NEXT:    [[C:%.*]] = icmp ne i8 [[X]], [[Y]]
560; CHECK-NEXT:    ret i1 [[C]]
561;
562  %A = mul nuw i8 %x, 38
563  call void @use(i8 %A)
564  %B = mul nuw i8 %y, 38
565  call void @use(i8 %B)
566  %C = icmp ne i8 %A, %B
567  ret i1 %C
568}
569
570; Negative test - wrong pred
571
572define i1 @mul_constant_ult(i32 %x, i32 %y) {
573; CHECK-LABEL: @mul_constant_ult(
574; CHECK-NEXT:    [[A:%.*]] = mul i32 [[X:%.*]], 47
575; CHECK-NEXT:    [[B:%.*]] = mul i32 [[Y:%.*]], 47
576; CHECK-NEXT:    [[C:%.*]] = icmp ult i32 [[A]], [[B]]
577; CHECK-NEXT:    ret i1 [[C]]
578;
579  %A = mul i32 %x, 47
580  %B = mul i32 %y, 47
581  %C = icmp ult i32 %A, %B
582  ret i1 %C
583}
584
585; Negative test - wrong pred
586
587define i1 @mul_constant_nuw_sgt(i32 %x, i32 %y) {
588; CHECK-LABEL: @mul_constant_nuw_sgt(
589; CHECK-NEXT:    [[A:%.*]] = mul nuw i32 [[X:%.*]], 46
590; CHECK-NEXT:    [[B:%.*]] = mul nuw i32 [[Y:%.*]], 46
591; CHECK-NEXT:    [[C:%.*]] = icmp sgt i32 [[A]], [[B]]
592; CHECK-NEXT:    ret i1 [[C]]
593;
594  %A = mul nuw i32 %x, 46
595  %B = mul nuw i32 %y, 46
596  %C = icmp sgt i32 %A, %B
597  ret i1 %C
598}
599
600; Negative test - wrong constants
601
602define i1 @mul_mismatch_constant_nuw_eq(i32 %x, i32 %y) {
603; CHECK-LABEL: @mul_mismatch_constant_nuw_eq(
604; CHECK-NEXT:    [[A:%.*]] = mul nuw i32 [[X:%.*]], 46
605; CHECK-NEXT:    [[B:%.*]] = mul nuw i32 [[Y:%.*]], 44
606; CHECK-NEXT:    [[C:%.*]] = icmp eq i32 [[A]], [[B]]
607; CHECK-NEXT:    ret i1 [[C]]
608;
609  %A = mul nuw i32 %x, 46
610  %B = mul nuw i32 %y, 44
611  %C = icmp eq i32 %A, %B
612  ret i1 %C
613}
614
615; If the multiply constant has any trailing zero bits but could overflow,
616; we get something completely different.
617; We mask off the high bits of each input and then convert:
618; (X&Z) == (Y&Z) -> (X^Y) & Z == 0
619
620define i1 @mul_constant_partial_nuw_eq(i32 %x, i32 %y) {
621; CHECK-LABEL: @mul_constant_partial_nuw_eq(
622; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
623; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], 1073741823
624; CHECK-NEXT:    [[C:%.*]] = icmp eq i32 [[TMP2]], 0
625; CHECK-NEXT:    ret i1 [[C]]
626;
627  %A = mul i32 %x, 44
628  %B = mul nuw i32 %y, 44
629  %C = icmp eq i32 %A, %B
630  ret i1 %C
631}
632
633define i1 @mul_constant_mismatch_wrap_eq(i32 %x, i32 %y) {
634; CHECK-LABEL: @mul_constant_mismatch_wrap_eq(
635; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
636; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], 2147483647
637; CHECK-NEXT:    [[C:%.*]] = icmp eq i32 [[TMP2]], 0
638; CHECK-NEXT:    ret i1 [[C]]
639;
640  %A = mul nsw i32 %x, 54
641  %B = mul nuw i32 %y, 54
642  %C = icmp eq i32 %A, %B
643  ret i1 %C
644}
645
646define i1 @eq_mul_constants_with_tz(i32 %x, i32 %y) {
647; CHECK-LABEL: @eq_mul_constants_with_tz(
648; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[X:%.*]], [[Y:%.*]]
649; CHECK-NEXT:    [[TMP2:%.*]] = and i32 [[TMP1]], 1073741823
650; CHECK-NEXT:    [[C:%.*]] = icmp ne i32 [[TMP2]], 0
651; CHECK-NEXT:    ret i1 [[C]]
652;
653  %A = mul i32 %x, 12
654  %B = mul i32 %y, 12
655  %C = icmp ne i32 %A, %B
656  ret i1 %C
657}
658
659define <2 x i1> @eq_mul_constants_with_tz_splat(<2 x i32> %x, <2 x i32> %y) {
660; CHECK-LABEL: @eq_mul_constants_with_tz_splat(
661; CHECK-NEXT:    [[TMP1:%.*]] = xor <2 x i32> [[X:%.*]], [[Y:%.*]]
662; CHECK-NEXT:    [[TMP2:%.*]] = and <2 x i32> [[TMP1]], <i32 1073741823, i32 1073741823>
663; CHECK-NEXT:    [[C:%.*]] = icmp eq <2 x i32> [[TMP2]], zeroinitializer
664; CHECK-NEXT:    ret <2 x i1> [[C]]
665;
666  %A = mul <2 x i32> %x, <i32 12, i32 12>
667  %B = mul <2 x i32> %y, <i32 12, i32 12>
668  %C = icmp eq <2 x i32> %A, %B
669  ret <2 x i1> %C
670}
671