• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -aggressive-instcombine -S | FileCheck %s
3
4; PR37098 - https://bugs.llvm.org/show_bug.cgi?id=37098
5
6define i32 @anyset_two_bit_mask(i32 %x) {
7; CHECK-LABEL: @anyset_two_bit_mask(
8; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[X:%.*]], 9
9; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
10; CHECK-NEXT:    [[TMP3:%.*]] = zext i1 [[TMP2]] to i32
11; CHECK-NEXT:    ret i32 [[TMP3]]
12;
13  %s = lshr i32 %x, 3
14  %o = or i32 %s, %x
15  %r = and i32 %o, 1
16  ret i32 %r
17}
18
19define i32 @anyset_four_bit_mask(i32 %x) {
20; CHECK-LABEL: @anyset_four_bit_mask(
21; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[X:%.*]], 297
22; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
23; CHECK-NEXT:    [[TMP3:%.*]] = zext i1 [[TMP2]] to i32
24; CHECK-NEXT:    ret i32 [[TMP3]]
25;
26  %t1 = lshr i32 %x, 3
27  %t2 = lshr i32 %x, 5
28  %t3 = lshr i32 %x, 8
29  %o1 = or i32 %t1, %x
30  %o2 = or i32 %t2, %t3
31  %o3 = or i32 %o1, %o2
32  %r = and i32 %o3, 1
33  ret i32 %r
34}
35
36; We're not testing the LSB here, so all of the 'or' operands are shifts.
37
38define i32 @anyset_three_bit_mask_all_shifted_bits(i32 %x) {
39; CHECK-LABEL: @anyset_three_bit_mask_all_shifted_bits(
40; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[X:%.*]], 296
41; CHECK-NEXT:    [[TMP2:%.*]] = icmp ne i32 [[TMP1]], 0
42; CHECK-NEXT:    [[TMP3:%.*]] = zext i1 [[TMP2]] to i32
43; CHECK-NEXT:    ret i32 [[TMP3]]
44;
45  %t1 = lshr i32 %x, 3
46  %t2 = lshr i32 %x, 5
47  %t3 = lshr i32 %x, 8
48  %o2 = or i32 %t2, %t3
49  %o3 = or i32 %t1, %o2
50  %r = and i32 %o3, 1
51  ret i32 %r
52}
53
54; Recognize the 'and' sibling pattern (all-bits-set). The 'and 1' may not be at the end.
55
56define i32 @allset_two_bit_mask(i32 %x) {
57; CHECK-LABEL: @allset_two_bit_mask(
58; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[X:%.*]], 129
59; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq i32 [[TMP1]], 129
60; CHECK-NEXT:    [[TMP3:%.*]] = zext i1 [[TMP2]] to i32
61; CHECK-NEXT:    ret i32 [[TMP3]]
62;
63  %s = lshr i32 %x, 7
64  %o = and i32 %s, %x
65  %r = and i32 %o, 1
66  ret i32 %r
67}
68
69define i64 @allset_four_bit_mask(i64 %x) {
70; CHECK-LABEL: @allset_four_bit_mask(
71; CHECK-NEXT:    [[TMP1:%.*]] = and i64 [[X:%.*]], 30
72; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq i64 [[TMP1]], 30
73; CHECK-NEXT:    [[TMP3:%.*]] = zext i1 [[TMP2]] to i64
74; CHECK-NEXT:    ret i64 [[TMP3]]
75;
76  %t1 = lshr i64 %x, 1
77  %t2 = lshr i64 %x, 2
78  %t3 = lshr i64 %x, 3
79  %t4 = lshr i64 %x, 4
80  %a1 = and i64 %t4, 1
81  %a2 = and i64 %t2, %a1
82  %a3 = and i64 %a2, %t1
83  %r = and i64 %a3, %t3
84  ret i64 %r
85}
86
87declare void @use(i32)
88
89; negative test - extra use means the transform would increase instruction count
90
91define i32 @allset_two_bit_mask_multiuse(i32 %x) {
92; CHECK-LABEL: @allset_two_bit_mask_multiuse(
93; CHECK-NEXT:    [[S:%.*]] = lshr i32 [[X:%.*]], 7
94; CHECK-NEXT:    [[O:%.*]] = and i32 [[S]], [[X]]
95; CHECK-NEXT:    [[R:%.*]] = and i32 [[O]], 1
96; CHECK-NEXT:    call void @use(i32 [[O]])
97; CHECK-NEXT:    ret i32 [[R]]
98;
99  %s = lshr i32 %x, 7
100  %o = and i32 %s, %x
101  %r = and i32 %o, 1
102  call void @use(i32 %o)
103  ret i32 %r
104}
105
106; negative test - missing 'and 1' mask, so more than the low bit is used here
107
108define i8 @allset_three_bit_mask_no_and1(i8 %x) {
109; CHECK-LABEL: @allset_three_bit_mask_no_and1(
110; CHECK-NEXT:    [[T1:%.*]] = lshr i8 [[X:%.*]], 1
111; CHECK-NEXT:    [[T2:%.*]] = lshr i8 [[X]], 2
112; CHECK-NEXT:    [[T3:%.*]] = lshr i8 [[X]], 3
113; CHECK-NEXT:    [[A2:%.*]] = and i8 [[T1]], [[T2]]
114; CHECK-NEXT:    [[R:%.*]] = and i8 [[A2]], [[T3]]
115; CHECK-NEXT:    ret i8 [[R]]
116;
117  %t1 = lshr i8 %x, 1
118  %t2 = lshr i8 %x, 2
119  %t3 = lshr i8 %x, 3
120  %a2 = and i8 %t1, %t2
121  %r = and i8 %a2, %t3
122  ret i8 %r
123}
124
125; This test demonstrates that the transform can be large. If the implementation
126; is slow or explosive (stack overflow due to recursion), it should be made efficient.
127
128define i64 @allset_40_bit_mask(i64 %x) {
129; CHECK-LABEL: @allset_40_bit_mask(
130; CHECK-NEXT:    [[TMP1:%.*]] = and i64 [[X:%.*]], 2199023255550
131; CHECK-NEXT:    [[TMP2:%.*]] = icmp eq i64 [[TMP1]], 2199023255550
132; CHECK-NEXT:    [[TMP3:%.*]] = zext i1 [[TMP2]] to i64
133; CHECK-NEXT:    ret i64 [[TMP3]]
134;
135  %t1 = lshr i64 %x, 1
136  %t2 = lshr i64 %x, 2
137  %t3 = lshr i64 %x, 3
138  %t4 = lshr i64 %x, 4
139  %t5 = lshr i64 %x, 5
140  %t6 = lshr i64 %x, 6
141  %t7 = lshr i64 %x, 7
142  %t8 = lshr i64 %x, 8
143  %t9 = lshr i64 %x, 9
144  %t10 = lshr i64 %x, 10
145  %t11 = lshr i64 %x, 11
146  %t12 = lshr i64 %x, 12
147  %t13 = lshr i64 %x, 13
148  %t14 = lshr i64 %x, 14
149  %t15 = lshr i64 %x, 15
150  %t16 = lshr i64 %x, 16
151  %t17 = lshr i64 %x, 17
152  %t18 = lshr i64 %x, 18
153  %t19 = lshr i64 %x, 19
154  %t20 = lshr i64 %x, 20
155  %t21 = lshr i64 %x, 21
156  %t22 = lshr i64 %x, 22
157  %t23 = lshr i64 %x, 23
158  %t24 = lshr i64 %x, 24
159  %t25 = lshr i64 %x, 25
160  %t26 = lshr i64 %x, 26
161  %t27 = lshr i64 %x, 27
162  %t28 = lshr i64 %x, 28
163  %t29 = lshr i64 %x, 29
164  %t30 = lshr i64 %x, 30
165  %t31 = lshr i64 %x, 31
166  %t32 = lshr i64 %x, 32
167  %t33 = lshr i64 %x, 33
168  %t34 = lshr i64 %x, 34
169  %t35 = lshr i64 %x, 35
170  %t36 = lshr i64 %x, 36
171  %t37 = lshr i64 %x, 37
172  %t38 = lshr i64 %x, 38
173  %t39 = lshr i64 %x, 39
174  %t40 = lshr i64 %x, 40
175
176  %a1 = and i64 %t1, 1
177  %a2 = and i64 %t2, %a1
178  %a3 = and i64 %t3, %a2
179  %a4 = and i64 %t4, %a3
180  %a5 = and i64 %t5, %a4
181  %a6 = and i64 %t6, %a5
182  %a7 = and i64 %t7, %a6
183  %a8 = and i64 %t8, %a7
184  %a9 = and i64 %t9, %a8
185  %a10 = and i64 %t10, %a9
186  %a11 = and i64 %t11, %a10
187  %a12 = and i64 %t12, %a11
188  %a13 = and i64 %t13, %a12
189  %a14 = and i64 %t14, %a13
190  %a15 = and i64 %t15, %a14
191  %a16 = and i64 %t16, %a15
192  %a17 = and i64 %t17, %a16
193  %a18 = and i64 %t18, %a17
194  %a19 = and i64 %t19, %a18
195  %a20 = and i64 %t20, %a19
196  %a21 = and i64 %t21, %a20
197  %a22 = and i64 %t22, %a21
198  %a23 = and i64 %t23, %a22
199  %a24 = and i64 %t24, %a23
200  %a25 = and i64 %t25, %a24
201  %a26 = and i64 %t26, %a25
202  %a27 = and i64 %t27, %a26
203  %a28 = and i64 %t28, %a27
204  %a29 = and i64 %t29, %a28
205  %a30 = and i64 %t30, %a29
206  %a31 = and i64 %t31, %a30
207  %a32 = and i64 %t32, %a31
208  %a33 = and i64 %t33, %a32
209  %a34 = and i64 %t34, %a33
210  %a35 = and i64 %t35, %a34
211  %a36 = and i64 %t36, %a35
212  %a37 = and i64 %t37, %a36
213  %a38 = and i64 %t38, %a37
214  %a39 = and i64 %t39, %a38
215  %a40 = and i64 %t40, %a39
216
217  ret i64 %a40
218}
219
220; Verify that unsimplified code doesn't crash:
221; https://bugs.llvm.org/show_bug.cgi?id=37446
222
223define i32 @PR37446(i32 %x) {
224; CHECK-LABEL: @PR37446(
225; CHECK-NEXT:    [[SHR:%.*]] = lshr i32 1, 33
226; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SHR]], 15
227; CHECK-NEXT:    [[AND1:%.*]] = and i32 [[AND]], [[X:%.*]]
228; CHECK-NEXT:    ret i32 [[AND1]]
229;
230  %shr = lshr i32 1, 33
231  %and = and i32 %shr, 15
232  %and1 = and i32 %and, %x
233  ret i32 %and1
234}
235
236