• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -instcombine -S | FileCheck %s
3
4define i64 @test_sext_zext(i16 %A) {
5; CHECK-LABEL: @test_sext_zext(
6; CHECK-NEXT:    [[C2:%.*]] = zext i16 [[A:%.*]] to i64
7; CHECK-NEXT:    ret i64 [[C2]]
8;
9  %c1 = zext i16 %A to i32
10  %c2 = sext i32 %c1 to i64
11  ret i64 %c2
12}
13
14define <2 x i64> @test2(<2 x i1> %A) {
15; CHECK-LABEL: @test2(
16; CHECK-NEXT:    [[XOR:%.*]] = xor <2 x i1> [[A:%.*]], <i1 true, i1 true>
17; CHECK-NEXT:    [[ZEXT:%.*]] = zext <2 x i1> [[XOR]] to <2 x i64>
18; CHECK-NEXT:    ret <2 x i64> [[ZEXT]]
19;
20  %xor = xor <2 x i1> %A, <i1 true, i1 true>
21  %zext = zext <2 x i1> %xor to <2 x i64>
22  ret <2 x i64> %zext
23}
24
25define <2 x i64> @test3(<2 x i64> %A) {
26; CHECK-LABEL: @test3(
27; CHECK-NEXT:    [[ZEXT:%.*]] = and <2 x i64> [[A:%.*]], <i64 23, i64 42>
28; CHECK-NEXT:    ret <2 x i64> [[ZEXT]]
29;
30  %trunc = trunc <2 x i64> %A to <2 x i32>
31  %and = and <2 x i32> %trunc, <i32 23, i32 42>
32  %zext = zext <2 x i32> %and to <2 x i64>
33  ret <2 x i64> %zext
34}
35
36define <2 x i64> @test4(<2 x i64> %A) {
37; CHECK-LABEL: @test4(
38; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i64> [[A:%.*]], <i64 23, i64 42>
39; CHECK-NEXT:    [[ZEXT:%.*]] = xor <2 x i64> [[TMP1]], <i64 23, i64 42>
40; CHECK-NEXT:    ret <2 x i64> [[ZEXT]]
41;
42  %trunc = trunc <2 x i64> %A to <2 x i32>
43  %and = and <2 x i32> %trunc, <i32 23, i32 42>
44  %xor = xor <2 x i32> %and, <i32 23, i32 42>
45  %zext = zext <2 x i32> %xor to <2 x i64>
46  ret <2 x i64> %zext
47}
48
49define i64 @fold_xor_zext_sandwich(i1 %a) {
50; CHECK-LABEL: @fold_xor_zext_sandwich(
51; CHECK-NEXT:    [[TMP1:%.*]] = xor i1 [[A:%.*]], true
52; CHECK-NEXT:    [[ZEXT2:%.*]] = zext i1 [[TMP1]] to i64
53; CHECK-NEXT:    ret i64 [[ZEXT2]]
54;
55  %zext1 = zext i1 %a to i32
56  %xor = xor i32 %zext1, 1
57  %zext2 = zext i32 %xor to i64
58  ret i64 %zext2
59}
60
61define <2 x i64> @fold_xor_zext_sandwich_vec(<2 x i1> %a) {
62; CHECK-LABEL: @fold_xor_zext_sandwich_vec(
63; CHECK-NEXT:    [[TMP1:%.*]] = xor <2 x i1> [[A:%.*]], <i1 true, i1 true>
64; CHECK-NEXT:    [[ZEXT2:%.*]] = zext <2 x i1> [[TMP1]] to <2 x i64>
65; CHECK-NEXT:    ret <2 x i64> [[ZEXT2]]
66;
67  %zext1 = zext <2 x i1> %a to <2 x i32>
68  %xor = xor <2 x i32> %zext1, <i32 1, i32 1>
69  %zext2 = zext <2 x i32> %xor to <2 x i64>
70  ret <2 x i64> %zext2
71}
72
73; Assert that zexts in and(zext(icmp), zext(icmp)) can be folded.
74
75define i8 @fold_and_zext_icmp(i64 %a, i64 %b, i64 %c) {
76; CHECK-LABEL: @fold_and_zext_icmp(
77; CHECK-NEXT:    [[TMP1:%.*]] = icmp sgt i64 [[A:%.*]], [[B:%.*]]
78; CHECK-NEXT:    [[TMP2:%.*]] = icmp slt i64 [[A]], [[C:%.*]]
79; CHECK-NEXT:    [[TMP3:%.*]] = and i1 [[TMP1]], [[TMP2]]
80; CHECK-NEXT:    [[TMP4:%.*]] = zext i1 [[TMP3]] to i8
81; CHECK-NEXT:    ret i8 [[TMP4]]
82;
83  %1 = icmp sgt i64 %a, %b
84  %2 = zext i1 %1 to i8
85  %3 = icmp slt i64 %a, %c
86  %4 = zext i1 %3 to i8
87  %5 = and i8 %2, %4
88  ret i8 %5
89}
90
91; Assert that zexts in or(zext(icmp), zext(icmp)) can be folded.
92
93define i8 @fold_or_zext_icmp(i64 %a, i64 %b, i64 %c) {
94; CHECK-LABEL: @fold_or_zext_icmp(
95; CHECK-NEXT:    [[TMP1:%.*]] = icmp sgt i64 [[A:%.*]], [[B:%.*]]
96; CHECK-NEXT:    [[TMP2:%.*]] = icmp slt i64 [[A]], [[C:%.*]]
97; CHECK-NEXT:    [[TMP3:%.*]] = or i1 [[TMP1]], [[TMP2]]
98; CHECK-NEXT:    [[TMP4:%.*]] = zext i1 [[TMP3]] to i8
99; CHECK-NEXT:    ret i8 [[TMP4]]
100;
101  %1 = icmp sgt i64 %a, %b
102  %2 = zext i1 %1 to i8
103  %3 = icmp slt i64 %a, %c
104  %4 = zext i1 %3 to i8
105  %5 = or i8 %2, %4
106  ret i8 %5
107}
108
109; Assert that zexts in xor(zext(icmp), zext(icmp)) can be folded.
110
111define i8 @fold_xor_zext_icmp(i64 %a, i64 %b, i64 %c) {
112; CHECK-LABEL: @fold_xor_zext_icmp(
113; CHECK-NEXT:    [[TMP1:%.*]] = icmp sgt i64 [[A:%.*]], [[B:%.*]]
114; CHECK-NEXT:    [[TMP2:%.*]] = icmp slt i64 [[A]], [[C:%.*]]
115; CHECK-NEXT:    [[TMP3:%.*]] = xor i1 [[TMP1]], [[TMP2]]
116; CHECK-NEXT:    [[TMP4:%.*]] = zext i1 [[TMP3]] to i8
117; CHECK-NEXT:    ret i8 [[TMP4]]
118;
119  %1 = icmp sgt i64 %a, %b
120  %2 = zext i1 %1 to i8
121  %3 = icmp slt i64 %a, %c
122  %4 = zext i1 %3 to i8
123  %5 = xor i8 %2, %4
124  ret i8 %5
125}
126
127; Assert that zexts in logic(zext(icmp), zext(icmp)) are also folded accross
128; nested logical operators.
129
130define i8 @fold_nested_logic_zext_icmp(i64 %a, i64 %b, i64 %c, i64 %d) {
131; CHECK-LABEL: @fold_nested_logic_zext_icmp(
132; CHECK-NEXT:    [[TMP1:%.*]] = icmp sgt i64 [[A:%.*]], [[B:%.*]]
133; CHECK-NEXT:    [[TMP2:%.*]] = icmp slt i64 [[A]], [[C:%.*]]
134; CHECK-NEXT:    [[TMP3:%.*]] = and i1 [[TMP1]], [[TMP2]]
135; CHECK-NEXT:    [[TMP4:%.*]] = icmp eq i64 [[A]], [[D:%.*]]
136; CHECK-NEXT:    [[TMP5:%.*]] = or i1 [[TMP3]], [[TMP4]]
137; CHECK-NEXT:    [[TMP6:%.*]] = zext i1 [[TMP5]] to i8
138; CHECK-NEXT:    ret i8 [[TMP6]]
139;
140  %1 = icmp sgt i64 %a, %b
141  %2 = zext i1 %1 to i8
142  %3 = icmp slt i64 %a, %c
143  %4 = zext i1 %3 to i8
144  %5 = and i8 %2, %4
145  %6 = icmp eq i64 %a, %d
146  %7 = zext i1 %6 to i8
147  %8 = or i8 %5, %7
148  ret i8 %8
149}
150
151; This test is for Integer BitWidth > 64 && BitWidth <= 1024.
152
153define i1024 @sext_zext_apint1(i77 %A) {
154; CHECK-LABEL: @sext_zext_apint1(
155; CHECK-NEXT:    [[C2:%.*]] = zext i77 [[A:%.*]] to i1024
156; CHECK-NEXT:    ret i1024 [[C2]]
157;
158  %c1 = zext i77 %A to i533
159  %c2 = sext i533 %c1 to i1024
160  ret i1024 %c2
161}
162
163; This test is for Integer BitWidth <= 64 && BitWidth % 2 != 0.
164
165define i47 @sext_zext_apint2(i11 %A) {
166; CHECK-LABEL: @sext_zext_apint2(
167; CHECK-NEXT:    [[C2:%.*]] = zext i11 [[A:%.*]] to i47
168; CHECK-NEXT:    ret i47 [[C2]]
169;
170  %c1 = zext i11 %A to i39
171  %c2 = sext i39 %c1 to i47
172  ret i47 %c2
173}
174
175declare void @use1(i1)
176declare void @use32(i32)
177
178define i32 @masked_bit_set(i32 %x, i32 %y) {
179; CHECK-LABEL: @masked_bit_set(
180; CHECK-NEXT:    [[SH1:%.*]] = shl i32 1, [[Y:%.*]]
181; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SH1]], [[X:%.*]]
182; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[AND]], 0
183; CHECK-NEXT:    [[R:%.*]] = zext i1 [[CMP]] to i32
184; CHECK-NEXT:    ret i32 [[R]]
185;
186  %sh1 = shl i32 1, %y
187  %and = and i32 %sh1, %x
188  %cmp = icmp ne i32 %and, 0
189  %r = zext i1 %cmp to i32
190  ret i32 %r
191}
192
193define <2 x i32> @masked_bit_clear(<2 x i32> %x, <2 x i32> %y) {
194; CHECK-LABEL: @masked_bit_clear(
195; CHECK-NEXT:    [[SH1:%.*]] = shl <2 x i32> <i32 1, i32 1>, [[Y:%.*]]
196; CHECK-NEXT:    [[AND:%.*]] = and <2 x i32> [[SH1]], [[X:%.*]]
197; CHECK-NEXT:    [[CMP:%.*]] = icmp eq <2 x i32> [[AND]], zeroinitializer
198; CHECK-NEXT:    [[R:%.*]] = zext <2 x i1> [[CMP]] to <2 x i32>
199; CHECK-NEXT:    ret <2 x i32> [[R]]
200;
201  %sh1 = shl <2 x i32> <i32 1, i32 1>, %y
202  %and = and <2 x i32> %sh1, %x
203  %cmp = icmp eq <2 x i32> %and, zeroinitializer
204  %r = zext <2 x i1> %cmp to <2 x i32>
205  ret <2 x i32> %r
206}
207
208define <2 x i32> @masked_bit_set_commute(<2 x i32> %px, <2 x i32> %y) {
209; CHECK-LABEL: @masked_bit_set_commute(
210; CHECK-NEXT:    [[X:%.*]] = srem <2 x i32> <i32 42, i32 3>, [[PX:%.*]]
211; CHECK-NEXT:    [[SH1:%.*]] = shl <2 x i32> <i32 1, i32 1>, [[Y:%.*]]
212; CHECK-NEXT:    [[AND:%.*]] = and <2 x i32> [[X]], [[SH1]]
213; CHECK-NEXT:    [[CMP:%.*]] = icmp ne <2 x i32> [[AND]], zeroinitializer
214; CHECK-NEXT:    [[R:%.*]] = zext <2 x i1> [[CMP]] to <2 x i32>
215; CHECK-NEXT:    ret <2 x i32> [[R]]
216;
217  %x = srem <2 x i32> <i32 42, i32 3>, %px ; thwart complexity-based canonicalization
218  %sh1 = shl <2 x i32> <i32 1, i32 1>, %y
219  %and = and <2 x i32> %x, %sh1
220  %cmp = icmp ne <2 x i32> %and, zeroinitializer
221  %r = zext <2 x i1> %cmp to <2 x i32>
222  ret <2 x i32> %r
223}
224
225define i32 @masked_bit_clear_commute(i32 %px, i32 %y) {
226; CHECK-LABEL: @masked_bit_clear_commute(
227; CHECK-NEXT:    [[X:%.*]] = srem i32 42, [[PX:%.*]]
228; CHECK-NEXT:    [[SH1:%.*]] = shl i32 1, [[Y:%.*]]
229; CHECK-NEXT:    [[AND:%.*]] = and i32 [[X]], [[SH1]]
230; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[AND]], 0
231; CHECK-NEXT:    [[R:%.*]] = zext i1 [[CMP]] to i32
232; CHECK-NEXT:    ret i32 [[R]]
233;
234  %x = srem i32 42, %px ; thwart complexity-based canonicalization
235  %sh1 = shl i32 1, %y
236  %and = and i32 %x, %sh1
237  %cmp = icmp eq i32 %and, 0
238  %r = zext i1 %cmp to i32
239  ret i32 %r
240}
241
242define i32 @masked_bit_set_use1(i32 %x, i32 %y) {
243; CHECK-LABEL: @masked_bit_set_use1(
244; CHECK-NEXT:    [[SH1:%.*]] = shl i32 1, [[Y:%.*]]
245; CHECK-NEXT:    call void @use32(i32 [[SH1]])
246; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SH1]], [[X:%.*]]
247; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[AND]], 0
248; CHECK-NEXT:    [[R:%.*]] = zext i1 [[CMP]] to i32
249; CHECK-NEXT:    ret i32 [[R]]
250;
251  %sh1 = shl i32 1, %y
252  call void @use32(i32 %sh1)
253  %and = and i32 %sh1, %x
254  %cmp = icmp ne i32 %and, 0
255  %r = zext i1 %cmp to i32
256  ret i32 %r
257}
258
259define i32 @masked_bit_set_use2(i32 %x, i32 %y) {
260; CHECK-LABEL: @masked_bit_set_use2(
261; CHECK-NEXT:    [[SH1:%.*]] = shl i32 1, [[Y:%.*]]
262; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SH1]], [[X:%.*]]
263; CHECK-NEXT:    call void @use32(i32 [[AND]])
264; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[AND]], 0
265; CHECK-NEXT:    [[R:%.*]] = zext i1 [[CMP]] to i32
266; CHECK-NEXT:    ret i32 [[R]]
267;
268  %sh1 = shl i32 1, %y
269  %and = and i32 %sh1, %x
270  call void @use32(i32 %and)
271  %cmp = icmp ne i32 %and, 0
272  %r = zext i1 %cmp to i32
273  ret i32 %r
274}
275
276define i32 @masked_bit_set_use3(i32 %x, i32 %y) {
277; CHECK-LABEL: @masked_bit_set_use3(
278; CHECK-NEXT:    [[SH1:%.*]] = shl i32 1, [[Y:%.*]]
279; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SH1]], [[X:%.*]]
280; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[AND]], 0
281; CHECK-NEXT:    call void @use1(i1 [[CMP]])
282; CHECK-NEXT:    [[R:%.*]] = zext i1 [[CMP]] to i32
283; CHECK-NEXT:    ret i32 [[R]]
284;
285  %sh1 = shl i32 1, %y
286  %and = and i32 %sh1, %x
287  %cmp = icmp ne i32 %and, 0
288  call void @use1(i1 %cmp)
289  %r = zext i1 %cmp to i32
290  ret i32 %r
291}
292
293define i32 @masked_bit_clear_use1(i32 %x, i32 %y) {
294; CHECK-LABEL: @masked_bit_clear_use1(
295; CHECK-NEXT:    [[SH1:%.*]] = shl i32 1, [[Y:%.*]]
296; CHECK-NEXT:    call void @use32(i32 [[SH1]])
297; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SH1]], [[X:%.*]]
298; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[AND]], 0
299; CHECK-NEXT:    [[R:%.*]] = zext i1 [[CMP]] to i32
300; CHECK-NEXT:    ret i32 [[R]]
301;
302  %sh1 = shl i32 1, %y
303  call void @use32(i32 %sh1)
304  %and = and i32 %sh1, %x
305  %cmp = icmp eq i32 %and, 0
306  %r = zext i1 %cmp to i32
307  ret i32 %r
308}
309
310define i32 @masked_bit_clear_use2(i32 %x, i32 %y) {
311; CHECK-LABEL: @masked_bit_clear_use2(
312; CHECK-NEXT:    [[SH1:%.*]] = shl i32 1, [[Y:%.*]]
313; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SH1]], [[X:%.*]]
314; CHECK-NEXT:    call void @use32(i32 [[AND]])
315; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[AND]], 0
316; CHECK-NEXT:    [[R:%.*]] = zext i1 [[CMP]] to i32
317; CHECK-NEXT:    ret i32 [[R]]
318;
319  %sh1 = shl i32 1, %y
320  %and = and i32 %sh1, %x
321  call void @use32(i32 %and)
322  %cmp = icmp eq i32 %and, 0
323  %r = zext i1 %cmp to i32
324  ret i32 %r
325}
326
327define i32 @masked_bit_clear_use3(i32 %x, i32 %y) {
328; CHECK-LABEL: @masked_bit_clear_use3(
329; CHECK-NEXT:    [[SH1:%.*]] = shl i32 1, [[Y:%.*]]
330; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SH1]], [[X:%.*]]
331; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[AND]], 0
332; CHECK-NEXT:    call void @use1(i1 [[CMP]])
333; CHECK-NEXT:    [[R:%.*]] = zext i1 [[CMP]] to i32
334; CHECK-NEXT:    ret i32 [[R]]
335;
336  %sh1 = shl i32 1, %y
337  %and = and i32 %sh1, %x
338  %cmp = icmp eq i32 %and, 0
339  call void @use1(i1 %cmp)
340  %r = zext i1 %cmp to i32
341  ret i32 %r
342}
343
344define i32 @masked_bits_set(i32 %x, i32 %y) {
345; CHECK-LABEL: @masked_bits_set(
346; CHECK-NEXT:    [[SH1:%.*]] = shl i32 3, [[Y:%.*]]
347; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SH1]], [[X:%.*]]
348; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[AND]], 0
349; CHECK-NEXT:    [[R:%.*]] = zext i1 [[CMP]] to i32
350; CHECK-NEXT:    ret i32 [[R]]
351;
352  %sh1 = shl i32 3, %y
353  %and = and i32 %sh1, %x
354  %cmp = icmp ne i32 %and, 0
355  %r = zext i1 %cmp to i32
356  ret i32 %r
357}
358
359define i32 @div_bit_set(i32 %x, i32 %y) {
360; CHECK-LABEL: @div_bit_set(
361; CHECK-NEXT:    [[SH1:%.*]] = shl i32 1, [[Y:%.*]]
362; CHECK-NEXT:    [[AND:%.*]] = sdiv i32 [[SH1]], [[X:%.*]]
363; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[AND]], 0
364; CHECK-NEXT:    [[R:%.*]] = zext i1 [[CMP]] to i32
365; CHECK-NEXT:    ret i32 [[R]]
366;
367  %sh1 = shl i32 1, %y
368  %and = sdiv i32 %sh1, %x
369  %cmp = icmp ne i32 %and, 0
370  %r = zext i1 %cmp to i32
371  ret i32 %r
372}
373
374define i32 @masked_bit_set_nonzero_cmp(i32 %x, i32 %y) {
375; CHECK-LABEL: @masked_bit_set_nonzero_cmp(
376; CHECK-NEXT:    [[SH1:%.*]] = shl i32 1, [[Y:%.*]]
377; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SH1]], [[X:%.*]]
378; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32 [[AND]], 1
379; CHECK-NEXT:    [[R:%.*]] = zext i1 [[CMP]] to i32
380; CHECK-NEXT:    ret i32 [[R]]
381;
382  %sh1 = shl i32 1, %y
383  %and = and i32 %sh1, %x
384  %cmp = icmp ne i32 %and, 1
385  %r = zext i1 %cmp to i32
386  ret i32 %r
387}
388
389define i32 @masked_bit_wrong_pred(i32 %x, i32 %y) {
390; CHECK-LABEL: @masked_bit_wrong_pred(
391; CHECK-NEXT:    [[SH1:%.*]] = shl i32 1, [[Y:%.*]]
392; CHECK-NEXT:    [[AND:%.*]] = and i32 [[SH1]], [[X:%.*]]
393; CHECK-NEXT:    [[CMP:%.*]] = icmp sgt i32 [[AND]], 0
394; CHECK-NEXT:    [[R:%.*]] = zext i1 [[CMP]] to i32
395; CHECK-NEXT:    ret i32 [[R]]
396;
397  %sh1 = shl i32 1, %y
398  %and = and i32 %sh1, %x
399  %cmp = icmp sgt i32 %and, 0
400  %r = zext i1 %cmp to i32
401  ret i32 %r
402}
403