• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -instcombine -S %s | FileCheck %s
3
4; Check that we simplify llvm.umul.with.overflow, if the overflow check is
5; weakened by or (icmp ne %res, 0) %overflow. This is generated by code using
6; __builtin_mul_overflow with negative integer constants, e.g.
7
8;   bool test(unsigned long long v, unsigned long long *res) {
9;     return __builtin_mul_overflow(v, -4775807LL, res);
10;   }
11
12declare { i64, i1 } @llvm.umul.with.overflow.i64(i64, i64) #0
13
14define i1 @test1(i64 %a, i64 %b, i64* %ptr) {
15; CHECK-LABEL: @test1(
16; CHECK-NEXT:    [[MUL:%.*]] = mul i64 [[A:%.*]], [[B:%.*]]
17; CHECK-NEXT:    [[TMP1:%.*]] = icmp ne i64 [[A]], 0
18; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i64 [[B]], 0
19; CHECK-NEXT:    [[OVERFLOW_1:%.*]] = and i1 [[TMP1]], [[TMP2]]
20; CHECK-NEXT:    store i64 [[MUL]], i64* [[PTR:%.*]], align 8
21; CHECK-NEXT:    ret i1 [[OVERFLOW_1]]
22;
23
24  %res = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %a, i64 %b)
25  %overflow = extractvalue { i64, i1 } %res, 1
26  %mul = extractvalue { i64, i1 } %res, 0
27  %cmp  = icmp ne i64 %mul, 0
28  %overflow.1 = or i1 %overflow, %cmp
29  store i64 %mul, i64* %ptr, align 8
30  ret i1 %overflow.1
31}
32
33define i1 @test1_or_ops_swapped(i64 %a, i64 %b, i64* %ptr) {
34; CHECK-LABEL: @test1_or_ops_swapped(
35; CHECK-NEXT:    [[MUL:%.*]] = mul i64 [[A:%.*]], [[B:%.*]]
36; CHECK-NEXT:    [[TMP1:%.*]] = icmp ne i64 [[A]], 0
37; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i64 [[B]], 0
38; CHECK-NEXT:    [[OVERFLOW_1:%.*]] = and i1 [[TMP1]], [[TMP2]]
39; CHECK-NEXT:    store i64 [[MUL]], i64* [[PTR:%.*]], align 8
40; CHECK-NEXT:    ret i1 [[OVERFLOW_1]]
41;
42
43
44  %res = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %a, i64 %b)
45  %overflow = extractvalue { i64, i1 } %res, 1
46  %mul = extractvalue { i64, i1 } %res, 0
47  %cmp  = icmp ne i64 %mul, 0
48  %overflow.1 = or i1 %cmp, %overflow
49  store i64 %mul, i64* %ptr, align 8
50  ret i1 %overflow.1
51}
52
53define i1 @test2(i64 %a, i64 %b, i64* %ptr) {
54; CHECK-LABEL: @test2(
55; CHECK-NEXT:    [[MUL:%.*]] = mul i64 [[A:%.*]], [[B:%.*]]
56; CHECK-NEXT:    [[TMP1:%.*]] = icmp ne i64 [[A]], 0
57; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i64 [[B]], 0
58; CHECK-NEXT:    [[OVERFLOW_1:%.*]] = and i1 [[TMP1]], [[TMP2]]
59; CHECK-NEXT:    [[NEG:%.*]] = sub i64 0, [[MUL]]
60; CHECK-NEXT:    store i64 [[NEG]], i64* [[PTR:%.*]], align 8
61; CHECK-NEXT:    ret i1 [[OVERFLOW_1]]
62;
63
64  %res = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %a, i64 %b)
65  %overflow = extractvalue { i64, i1 } %res, 1
66  %mul = extractvalue { i64, i1 } %res, 0
67  %cmp = icmp ne i64 %mul, 0
68  %overflow.1 = or i1 %overflow, %cmp
69  %neg = sub i64 0, %mul
70  store i64 %neg, i64* %ptr, align 8
71  ret i1 %overflow.1
72}
73
74declare void @use(i1)
75
76define i1 @test3_multiple_overflow_users(i64 %a, i64 %b, i64* %ptr) {
77; CHECK-LABEL: @test3_multiple_overflow_users(
78; CHECK-NEXT:    [[RES:%.*]] = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[A:%.*]], i64 [[B:%.*]])
79; CHECK-NEXT:    [[OVERFLOW:%.*]] = extractvalue { i64, i1 } [[RES]], 1
80; CHECK-NEXT:    [[TMP1:%.*]] = icmp ne i64 [[A]], 0
81; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i64 [[B]], 0
82; CHECK-NEXT:    [[OVERFLOW_1:%.*]] = and i1 [[TMP1]], [[TMP2]]
83; CHECK-NEXT:    call void @use(i1 [[OVERFLOW]])
84; CHECK-NEXT:    ret i1 [[OVERFLOW_1]]
85;
86  %res = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %a, i64 %b)
87  %overflow = extractvalue { i64, i1 } %res, 1
88  %mul = extractvalue { i64, i1 } %res, 0
89  %cmp = icmp ne i64 %mul, 0
90  %overflow.1 = or i1 %overflow, %cmp
91  call void @use(i1 %overflow)
92  ret i1 %overflow.1
93}
94
95; Do not simplify if %overflow and %mul have multiple uses.
96define i1 @test3_multiple_overflow_and_mul_users(i64 %a, i64 %b, i64* %ptr) {
97; CHECK-LABEL: @test3_multiple_overflow_and_mul_users(
98; CHECK-NEXT:    [[RES:%.*]] = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[A:%.*]], i64 [[B:%.*]])
99; CHECK-NEXT:    [[OVERFLOW:%.*]] = extractvalue { i64, i1 } [[RES]], 1
100; CHECK-NEXT:    [[MUL:%.*]] = extractvalue { i64, i1 } [[RES]], 0
101; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i64 [[MUL]], 0
102; CHECK-NEXT:    [[OVERFLOW_1:%.*]] = or i1 [[OVERFLOW]], [[CMP]]
103; CHECK-NEXT:    [[NEG:%.*]] = sub i64 0, [[MUL]]
104; CHECK-NEXT:    store i64 [[NEG]], i64* [[PTR:%.*]], align 8
105; CHECK-NEXT:    call void @use(i1 [[OVERFLOW]])
106; CHECK-NEXT:    ret i1 [[OVERFLOW_1]]
107;
108  %res = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %a, i64 %b)
109  %overflow = extractvalue { i64, i1 } %res, 1
110  %mul = extractvalue { i64, i1 } %res, 0
111  %cmp = icmp ne i64 %mul, 0
112  %overflow.1 = or i1 %overflow, %cmp
113  %neg = sub i64 0, %mul
114  store i64 %neg, i64* %ptr, align 8
115  call void @use(i1 %overflow)
116  ret i1 %overflow.1
117}
118
119
120declare void @use.2({ i64, i1 })
121define i1 @test3_multiple_res_users(i64 %a, i64 %b, i64* %ptr) {
122; CHECK-LABEL: @test3_multiple_res_users(
123; CHECK-NEXT:    [[RES:%.*]] = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[A:%.*]], i64 [[B:%.*]])
124; CHECK-NEXT:    [[MUL:%.*]] = extractvalue { i64, i1 } [[RES]], 0
125; CHECK-NEXT:    [[TMP1:%.*]] = icmp ne i64 [[A]], 0
126; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i64 [[B]], 0
127; CHECK-NEXT:    [[OVERFLOW_1:%.*]] = and i1 [[TMP1]], [[TMP2]]
128; CHECK-NEXT:    [[NEG:%.*]] = sub i64 0, [[MUL]]
129; CHECK-NEXT:    store i64 [[NEG]], i64* [[PTR:%.*]], align 8
130; CHECK-NEXT:    call void @use.2({ i64, i1 } [[RES]])
131; CHECK-NEXT:    ret i1 [[OVERFLOW_1]]
132;
133  %res = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %a, i64 %b)
134  %overflow = extractvalue { i64, i1 } %res, 1
135  %mul = extractvalue { i64, i1 } %res, 0
136  %cmp = icmp ne i64 %mul, 0
137  %overflow.1 = or i1 %overflow, %cmp
138  %neg = sub i64 0, %mul
139  store i64 %neg, i64* %ptr, align 8
140  call void @use.2({ i64, i1 } %res)
141  ret i1 %overflow.1
142}
143
144declare void @use.3(i64)
145
146; Simplify if %mul has multiple uses.
147define i1 @test3_multiple_mul_users(i64 %a, i64 %b, i64* %ptr) {
148; CHECK-LABEL: @test3_multiple_mul_users(
149; CHECK-NEXT:    [[MUL:%.*]] = mul i64 [[A:%.*]], [[B:%.*]]
150; CHECK-NEXT:    [[TMP1:%.*]] = icmp ne i64 [[A]], 0
151; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i64 [[B]], 0
152; CHECK-NEXT:    [[OVERFLOW_1:%.*]] = and i1 [[TMP1]], [[TMP2]]
153; CHECK-NEXT:    [[NEG:%.*]] = sub i64 0, [[MUL]]
154; CHECK-NEXT:    store i64 [[NEG]], i64* [[PTR:%.*]], align 8
155; CHECK-NEXT:    call void @use.3(i64 [[MUL]])
156; CHECK-NEXT:    ret i1 [[OVERFLOW_1]]
157;
158
159  %res = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %a, i64 %b)
160  %overflow = extractvalue { i64, i1 } %res, 1
161  %mul = extractvalue { i64, i1 } %res, 0
162  %cmp = icmp ne i64 %mul, 0
163  %overflow.1 = or i1 %overflow, %cmp
164  %neg = sub i64 0, %mul
165  store i64 %neg, i64* %ptr, align 8
166  call void @use.3(i64 %mul)
167  ret i1 %overflow.1
168}
169
170
171
172define i1 @test4_no_icmp_ne(i64 %a, i64 %b, i64* %ptr) {
173; CHECK-LABEL: @test4_no_icmp_ne(
174; CHECK-NEXT:    [[RES:%.*]] = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 [[A:%.*]], i64 [[B:%.*]])
175; CHECK-NEXT:    [[OVERFLOW:%.*]] = extractvalue { i64, i1 } [[RES]], 1
176; CHECK-NEXT:    [[MUL:%.*]] = extractvalue { i64, i1 } [[RES]], 0
177; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i64 [[MUL]], 0
178; CHECK-NEXT:    [[OVERFLOW_1:%.*]] = or i1 [[OVERFLOW]], [[CMP]]
179; CHECK-NEXT:    [[NEG:%.*]] = sub i64 0, [[MUL]]
180; CHECK-NEXT:    store i64 [[NEG]], i64* [[PTR:%.*]], align 8
181; CHECK-NEXT:    ret i1 [[OVERFLOW_1]]
182;
183  %res = tail call { i64, i1 } @llvm.umul.with.overflow.i64(i64 %a, i64 %b)
184  %overflow = extractvalue { i64, i1 } %res, 1
185  %mul = extractvalue { i64, i1 } %res, 0
186  %cmp = icmp sgt i64 %mul, 0
187  %overflow.1 = or i1 %overflow, %cmp
188  %neg = sub i64 0, %mul
189  store i64 %neg, i64* %ptr, align 8
190  ret i1 %overflow.1
191}
192
193attributes #0 = { nounwind readnone speculatable willreturn }
194