• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -instcombine -S | FileCheck %s
3
4; a & (a ^ b) --> a & ~b
5
6define i32 @and_xor_common_op(i32 %pa, i32 %pb) {
7; CHECK-LABEL: @and_xor_common_op(
8; CHECK-NEXT:    [[A:%.*]] = udiv i32 42, [[PA:%.*]]
9; CHECK-NEXT:    [[B:%.*]] = udiv i32 43, [[PB:%.*]]
10; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[B]], -1
11; CHECK-NEXT:    [[R:%.*]] = and i32 [[A]], [[TMP1]]
12; CHECK-NEXT:    ret i32 [[R]]
13;
14  %a = udiv i32 42, %pa ; thwart complexity-based canonicalization
15  %b = udiv i32 43, %pb ; thwart complexity-based canonicalization
16  %xor = xor i32 %a, %b
17  %r = and i32 %a, %xor
18  ret i32 %r
19}
20
21; a & (b ^ a) --> a & ~b
22
23define i32 @and_xor_common_op_commute1(i32 %pa, i32 %pb) {
24; CHECK-LABEL: @and_xor_common_op_commute1(
25; CHECK-NEXT:    [[A:%.*]] = udiv i32 42, [[PA:%.*]]
26; CHECK-NEXT:    [[B:%.*]] = udiv i32 43, [[PB:%.*]]
27; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[B]], -1
28; CHECK-NEXT:    [[R:%.*]] = and i32 [[A]], [[TMP1]]
29; CHECK-NEXT:    ret i32 [[R]]
30;
31  %a = udiv i32 42, %pa ; thwart complexity-based canonicalization
32  %b = udiv i32 43, %pb ; thwart complexity-based canonicalization
33  %xor = xor i32 %b, %a
34  %r = and i32 %a, %xor
35  ret i32 %r
36}
37
38; (b ^ a) & a --> a & ~b
39
40define i32 @and_xor_common_op_commute2(i32 %pa, i32 %pb) {
41; CHECK-LABEL: @and_xor_common_op_commute2(
42; CHECK-NEXT:    [[A:%.*]] = udiv i32 42, [[PA:%.*]]
43; CHECK-NEXT:    [[B:%.*]] = udiv i32 43, [[PB:%.*]]
44; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[B]], -1
45; CHECK-NEXT:    [[R:%.*]] = and i32 [[A]], [[TMP1]]
46; CHECK-NEXT:    ret i32 [[R]]
47;
48  %a = udiv i32 42, %pa ; thwart complexity-based canonicalization
49  %b = udiv i32 43, %pb ; thwart complexity-based canonicalization
50  %xor = xor i32 %b, %a
51  %r = and i32 %xor, %a
52  ret i32 %r
53}
54
55; (a ^ b) & a --> a & ~b
56
57define <2 x i32> @and_xor_common_op_commute3(<2 x i32> %pa, <2 x i32> %pb) {
58; CHECK-LABEL: @and_xor_common_op_commute3(
59; CHECK-NEXT:    [[A:%.*]] = udiv <2 x i32> <i32 42, i32 43>, [[PA:%.*]]
60; CHECK-NEXT:    [[B:%.*]] = udiv <2 x i32> <i32 43, i32 42>, [[PB:%.*]]
61; CHECK-NEXT:    [[TMP1:%.*]] = xor <2 x i32> [[B]], <i32 -1, i32 -1>
62; CHECK-NEXT:    [[R:%.*]] = and <2 x i32> [[A]], [[TMP1]]
63; CHECK-NEXT:    ret <2 x i32> [[R]]
64;
65  %a = udiv <2 x i32> <i32 42, i32 43>, %pa ; thwart complexity-based canonicalization
66  %b = udiv <2 x i32> <i32 43, i32 42>, %pb ; thwart complexity-based canonicalization
67  %xor = xor <2 x i32> %a, %b
68  %r = and <2 x i32> %xor, %a
69  ret <2 x i32> %r
70}
71
72; It's ok to match a common constant.
73; The xor should be a 'not' op (-1 constant).
74
75define <4 x i32> @and_xor_common_op_constant(<4 x i32> %A) {
76; CHECK-LABEL: @and_xor_common_op_constant(
77; CHECK-NEXT:    [[TMP1:%.*]] = xor <4 x i32> [[A:%.*]], <i32 -1, i32 -1, i32 -1, i32 -1>
78; CHECK-NEXT:    [[TMP2:%.*]] = and <4 x i32> [[TMP1]], <i32 1, i32 2, i32 3, i32 4>
79; CHECK-NEXT:    ret <4 x i32> [[TMP2]]
80;
81  %1 = xor <4 x i32> %A, <i32 1, i32 2, i32 3, i32 4>
82  %2 = and <4 x i32> <i32 1, i32 2, i32 3, i32 4>, %1
83  ret <4 x i32> %2
84}
85
86; a & (a ^ ~b) --> a & b
87
88define i32 @and_xor_not_common_op(i32 %a, i32 %b) {
89; CHECK-LABEL: @and_xor_not_common_op(
90; CHECK-NEXT:    [[T4:%.*]] = and i32 [[A:%.*]], [[B:%.*]]
91; CHECK-NEXT:    ret i32 [[T4]]
92;
93  %b2 = xor i32 %b, -1
94  %t2 = xor i32 %a, %b2
95  %t4 = and i32 %t2, %a
96  ret i32 %t4
97}
98
99; rdar://10770603
100; (x & y) | (x ^ y) -> x | y
101
102define i64 @or(i64 %x, i64 %y) {
103; CHECK-LABEL: @or(
104; CHECK-NEXT:    [[TMP1:%.*]] = or i64 [[Y:%.*]], [[X:%.*]]
105; CHECK-NEXT:    ret i64 [[TMP1]]
106;
107  %1 = and i64 %y, %x
108  %2 = xor i64 %y, %x
109  %3 = add i64 %1, %2
110  ret i64 %3
111}
112
113; (x & y) + (x ^ y) -> x | y
114
115define i64 @or2(i64 %x, i64 %y) {
116; CHECK-LABEL: @or2(
117; CHECK-NEXT:    [[TMP1:%.*]] = or i64 [[Y:%.*]], [[X:%.*]]
118; CHECK-NEXT:    ret i64 [[TMP1]]
119;
120  %1 = and i64 %y, %x
121  %2 = xor i64 %y, %x
122  %3 = or i64 %1, %2
123  ret i64 %3
124}
125
126; PR37098 - https://bugs.llvm.org/show_bug.cgi?id=37098
127; Reassociate bitwise logic to eliminate a shift.
128; There are 4 commuted * 3 shift ops * 3 logic ops = 36 potential variations of this fold.
129; Mix the commutation options to provide coverage using less tests.
130
131define i8 @and_shl(i8 %x, i8 %y, i8 %z, i8 %shamt) {
132; CHECK-LABEL: @and_shl(
133; CHECK-NEXT:    [[SX:%.*]] = shl i8 [[X:%.*]], [[SHAMT:%.*]]
134; CHECK-NEXT:    [[SY:%.*]] = shl i8 [[Y:%.*]], [[SHAMT]]
135; CHECK-NEXT:    [[A:%.*]] = and i8 [[SX]], [[Z:%.*]]
136; CHECK-NEXT:    [[R:%.*]] = and i8 [[SY]], [[A]]
137; CHECK-NEXT:    ret i8 [[R]]
138;
139  %sx = shl i8 %x, %shamt
140  %sy = shl i8 %y, %shamt
141  %a = and i8 %sx, %z
142  %r = and i8 %sy, %a
143  ret i8 %r
144}
145
146define i8 @or_shl(i8 %x, i8 %y, i8 %z, i8 %shamt) {
147; CHECK-LABEL: @or_shl(
148; CHECK-NEXT:    [[SX:%.*]] = shl i8 [[X:%.*]], [[SHAMT:%.*]]
149; CHECK-NEXT:    [[SY:%.*]] = shl i8 [[Y:%.*]], [[SHAMT]]
150; CHECK-NEXT:    [[A:%.*]] = or i8 [[SX]], [[Z:%.*]]
151; CHECK-NEXT:    [[R:%.*]] = or i8 [[A]], [[SY]]
152; CHECK-NEXT:    ret i8 [[R]]
153;
154  %sx = shl i8 %x, %shamt
155  %sy = shl i8 %y, %shamt
156  %a = or i8 %sx, %z
157  %r = or i8 %a, %sy
158  ret i8 %r
159}
160
161define i8 @xor_shl(i8 %x, i8 %y, i8 %zarg, i8 %shamt) {
162; CHECK-LABEL: @xor_shl(
163; CHECK-NEXT:    [[Z:%.*]] = sdiv i8 42, [[ZARG:%.*]]
164; CHECK-NEXT:    [[SX:%.*]] = shl i8 [[X:%.*]], [[SHAMT:%.*]]
165; CHECK-NEXT:    [[SY:%.*]] = shl i8 [[Y:%.*]], [[SHAMT]]
166; CHECK-NEXT:    [[A:%.*]] = xor i8 [[Z]], [[SX]]
167; CHECK-NEXT:    [[R:%.*]] = xor i8 [[A]], [[SY]]
168; CHECK-NEXT:    ret i8 [[R]]
169;
170  %z = sdiv i8 42, %zarg ; thwart complexity-based canonicalization
171  %sx = shl i8 %x, %shamt
172  %sy = shl i8 %y, %shamt
173  %a = xor i8 %z, %sx
174  %r = xor i8 %a, %sy
175  ret i8 %r
176}
177
178define i8 @and_lshr(i8 %x, i8 %y, i8 %zarg, i8 %shamt) {
179; CHECK-LABEL: @and_lshr(
180; CHECK-NEXT:    [[Z:%.*]] = sdiv i8 42, [[ZARG:%.*]]
181; CHECK-NEXT:    [[SX:%.*]] = lshr i8 [[X:%.*]], [[SHAMT:%.*]]
182; CHECK-NEXT:    [[SY:%.*]] = lshr i8 [[Y:%.*]], [[SHAMT]]
183; CHECK-NEXT:    [[A:%.*]] = and i8 [[Z]], [[SX]]
184; CHECK-NEXT:    [[R:%.*]] = and i8 [[SY]], [[A]]
185; CHECK-NEXT:    ret i8 [[R]]
186;
187  %z = sdiv i8 42, %zarg ; thwart complexity-based canonicalization
188  %sx = lshr i8 %x, %shamt
189  %sy = lshr i8 %y, %shamt
190  %a = and i8 %z, %sx
191  %r = and i8 %sy, %a
192  ret i8 %r
193}
194
195define i8 @or_lshr(i8 %x, i8 %y, i8 %z, i8 %shamt) {
196; CHECK-LABEL: @or_lshr(
197; CHECK-NEXT:    [[SX:%.*]] = lshr i8 [[X:%.*]], [[SHAMT:%.*]]
198; CHECK-NEXT:    [[SY:%.*]] = lshr i8 [[Y:%.*]], [[SHAMT]]
199; CHECK-NEXT:    [[A:%.*]] = or i8 [[SX]], [[Z:%.*]]
200; CHECK-NEXT:    [[R:%.*]] = or i8 [[SY]], [[A]]
201; CHECK-NEXT:    ret i8 [[R]]
202;
203  %sx = lshr i8 %x, %shamt
204  %sy = lshr i8 %y, %shamt
205  %a = or i8 %sx, %z
206  %r = or i8 %sy, %a
207  ret i8 %r
208}
209
210define i8 @xor_lshr(i8 %x, i8 %y, i8 %z, i8 %shamt) {
211; CHECK-LABEL: @xor_lshr(
212; CHECK-NEXT:    [[SX:%.*]] = lshr i8 [[X:%.*]], [[SHAMT:%.*]]
213; CHECK-NEXT:    [[SY:%.*]] = lshr i8 [[Y:%.*]], [[SHAMT]]
214; CHECK-NEXT:    [[A:%.*]] = xor i8 [[SX]], [[Z:%.*]]
215; CHECK-NEXT:    [[R:%.*]] = xor i8 [[A]], [[SY]]
216; CHECK-NEXT:    ret i8 [[R]]
217;
218  %sx = lshr i8 %x, %shamt
219  %sy = lshr i8 %y, %shamt
220  %a = xor i8 %sx, %z
221  %r = xor i8 %a, %sy
222  ret i8 %r
223}
224
225define i8 @and_ashr(i8 %x, i8 %y, i8 %zarg, i8 %shamt) {
226; CHECK-LABEL: @and_ashr(
227; CHECK-NEXT:    [[Z:%.*]] = sdiv i8 42, [[ZARG:%.*]]
228; CHECK-NEXT:    [[SX:%.*]] = ashr i8 [[X:%.*]], [[SHAMT:%.*]]
229; CHECK-NEXT:    [[SY:%.*]] = ashr i8 [[Y:%.*]], [[SHAMT]]
230; CHECK-NEXT:    [[A:%.*]] = and i8 [[Z]], [[SX]]
231; CHECK-NEXT:    [[R:%.*]] = and i8 [[A]], [[SY]]
232; CHECK-NEXT:    ret i8 [[R]]
233;
234  %z = sdiv i8 42, %zarg ; thwart complexity-based canonicalization
235  %sx = ashr i8 %x, %shamt
236  %sy = ashr i8 %y, %shamt
237  %a = and i8 %z, %sx
238  %r = and i8 %a, %sy
239  ret i8 %r
240}
241
242define i8 @or_ashr(i8 %x, i8 %y, i8 %zarg, i8 %shamt) {
243; CHECK-LABEL: @or_ashr(
244; CHECK-NEXT:    [[Z:%.*]] = sdiv i8 42, [[ZARG:%.*]]
245; CHECK-NEXT:    [[SX:%.*]] = ashr i8 [[X:%.*]], [[SHAMT:%.*]]
246; CHECK-NEXT:    [[SY:%.*]] = ashr i8 [[Y:%.*]], [[SHAMT]]
247; CHECK-NEXT:    [[A:%.*]] = or i8 [[Z]], [[SX]]
248; CHECK-NEXT:    [[R:%.*]] = or i8 [[SY]], [[A]]
249; CHECK-NEXT:    ret i8 [[R]]
250;
251  %z = sdiv i8 42, %zarg ; thwart complexity-based canonicalization
252  %sx = ashr i8 %x, %shamt
253  %sy = ashr i8 %y, %shamt
254  %a = or i8 %z, %sx
255  %r = or i8 %sy, %a
256  ret i8 %r
257}
258
259define <2 x i8> @xor_ashr(<2 x i8> %x, <2 x i8> %y, <2 x i8> %z, <2 x i8> %shamt) {
260; CHECK-LABEL: @xor_ashr(
261; CHECK-NEXT:    [[SX:%.*]] = ashr <2 x i8> [[X:%.*]], [[SHAMT:%.*]]
262; CHECK-NEXT:    [[SY:%.*]] = ashr <2 x i8> [[Y:%.*]], [[SHAMT]]
263; CHECK-NEXT:    [[A:%.*]] = xor <2 x i8> [[SX]], [[Z:%.*]]
264; CHECK-NEXT:    [[R:%.*]] = xor <2 x i8> [[A]], [[SY]]
265; CHECK-NEXT:    ret <2 x i8> [[R]]
266;
267  %sx = ashr <2 x i8> %x, %shamt
268  %sy = ashr <2 x i8> %y, %shamt
269  %a = xor <2 x i8> %sx, %z
270  %r = xor <2 x i8> %a, %sy
271  ret <2 x i8> %r
272}
273
274; Negative test - different logic ops
275
276define i8 @or_and_shl(i8 %x, i8 %y, i8 %z, i8 %shamt) {
277; CHECK-LABEL: @or_and_shl(
278; CHECK-NEXT:    [[SX:%.*]] = shl i8 [[X:%.*]], [[SHAMT:%.*]]
279; CHECK-NEXT:    [[SY:%.*]] = shl i8 [[Y:%.*]], [[SHAMT]]
280; CHECK-NEXT:    [[A:%.*]] = or i8 [[SX]], [[Z:%.*]]
281; CHECK-NEXT:    [[R:%.*]] = and i8 [[SY]], [[A]]
282; CHECK-NEXT:    ret i8 [[R]]
283;
284  %sx = shl i8 %x, %shamt
285  %sy = shl i8 %y, %shamt
286  %a = or i8 %sx, %z
287  %r = and i8 %sy, %a
288  ret i8 %r
289}
290
291; Negative test - different shift ops
292
293define i8 @or_lshr_shl(i8 %x, i8 %y, i8 %z, i8 %shamt) {
294; CHECK-LABEL: @or_lshr_shl(
295; CHECK-NEXT:    [[SX:%.*]] = lshr i8 [[X:%.*]], [[SHAMT:%.*]]
296; CHECK-NEXT:    [[SY:%.*]] = shl i8 [[Y:%.*]], [[SHAMT]]
297; CHECK-NEXT:    [[A:%.*]] = or i8 [[SX]], [[Z:%.*]]
298; CHECK-NEXT:    [[R:%.*]] = or i8 [[A]], [[SY]]
299; CHECK-NEXT:    ret i8 [[R]]
300;
301  %sx = lshr i8 %x, %shamt
302  %sy = shl i8 %y, %shamt
303  %a = or i8 %sx, %z
304  %r = or i8 %a, %sy
305  ret i8 %r
306}
307
308; Negative test - different shift amounts
309
310define i8 @or_lshr_shamt2(i8 %x, i8 %y, i8 %z, i8 %shamt) {
311; CHECK-LABEL: @or_lshr_shamt2(
312; CHECK-NEXT:    [[SX:%.*]] = lshr i8 [[X:%.*]], 5
313; CHECK-NEXT:    [[SY:%.*]] = lshr i8 [[Y:%.*]], [[SHAMT:%.*]]
314; CHECK-NEXT:    [[A:%.*]] = or i8 [[SX]], [[Z:%.*]]
315; CHECK-NEXT:    [[R:%.*]] = or i8 [[SY]], [[A]]
316; CHECK-NEXT:    ret i8 [[R]]
317;
318  %sx = lshr i8 %x, 5
319  %sy = lshr i8 %y, %shamt
320  %a = or i8 %sx, %z
321  %r = or i8 %sy, %a
322  ret i8 %r
323}
324
325; Negative test - multi-use
326
327define i8 @xor_lshr_multiuse(i8 %x, i8 %y, i8 %z, i8 %shamt) {
328; CHECK-LABEL: @xor_lshr_multiuse(
329; CHECK-NEXT:    [[SX:%.*]] = lshr i8 [[X:%.*]], [[SHAMT:%.*]]
330; CHECK-NEXT:    [[SY:%.*]] = lshr i8 [[Y:%.*]], [[SHAMT]]
331; CHECK-NEXT:    [[A:%.*]] = xor i8 [[SX]], [[Z:%.*]]
332; CHECK-NEXT:    [[R:%.*]] = xor i8 [[A]], [[SY]]
333; CHECK-NEXT:    [[R2:%.*]] = sdiv i8 [[A]], [[R]]
334; CHECK-NEXT:    ret i8 [[R2]]
335;
336  %sx = lshr i8 %x, %shamt
337  %sy = lshr i8 %y, %shamt
338  %a = xor i8 %sx, %z
339  %r = xor i8 %a, %sy
340  %r2 = sdiv i8 %a, %r
341  ret i8 %r2
342}
343
344