• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt < %s -instcombine -S | FileCheck %s
3
4; rdar://5992453
5; A & 255
6define i32 @test4(i32 %a) nounwind  {
7; CHECK-LABEL: @test4(
8; CHECK-NEXT:    [[T2:%.*]] = and i32 [[A:%.*]], 255
9; CHECK-NEXT:    ret i32 [[T2]]
10;
11  %t2 = tail call i32 @llvm.bswap.i32( i32 %a )
12  %t4 = lshr i32 %t2, 24
13  ret i32 %t4
14}
15
16; a >> 24
17define i32 @test6(i32 %a) nounwind {
18; CHECK-LABEL: @test6(
19; CHECK-NEXT:    [[T2:%.*]] = lshr i32 [[A:%.*]], 24
20; CHECK-NEXT:    ret i32 [[T2]]
21;
22  %t2 = tail call i32 @llvm.bswap.i32( i32 %a )
23  %t4 = and i32 %t2, 255
24  ret i32 %t4
25}
26
27; PR5284
28define i16 @test7(i32 %A) {
29; CHECK-LABEL: @test7(
30; CHECK-NEXT:    [[TMP1:%.*]] = lshr i32 [[A:%.*]], 16
31; CHECK-NEXT:    [[D:%.*]] = trunc i32 [[TMP1]] to i16
32; CHECK-NEXT:    ret i16 [[D]]
33;
34  %B = tail call i32 @llvm.bswap.i32(i32 %A) nounwind
35  %C = trunc i32 %B to i16
36  %D = tail call i16 @llvm.bswap.i16(i16 %C) nounwind
37  ret i16 %D
38}
39
40define <2 x i16> @test7_vector(<2 x i32> %A) {
41; CHECK-LABEL: @test7_vector(
42; CHECK-NEXT:    [[TMP1:%.*]] = lshr <2 x i32> [[A:%.*]], <i32 16, i32 16>
43; CHECK-NEXT:    [[D:%.*]] = trunc <2 x i32> [[TMP1]] to <2 x i16>
44; CHECK-NEXT:    ret <2 x i16> [[D]]
45;
46  %B = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %A) nounwind
47  %C = trunc <2 x i32> %B to <2 x i16>
48  %D = tail call <2 x i16> @llvm.bswap.v2i16(<2 x i16> %C) nounwind
49  ret <2 x i16> %D
50}
51
52define i16 @test8(i64 %A) {
53; CHECK-LABEL: @test8(
54; CHECK-NEXT:    [[TMP1:%.*]] = lshr i64 [[A:%.*]], 48
55; CHECK-NEXT:    [[D:%.*]] = trunc i64 [[TMP1]] to i16
56; CHECK-NEXT:    ret i16 [[D]]
57;
58  %B = tail call i64 @llvm.bswap.i64(i64 %A) nounwind
59  %C = trunc i64 %B to i16
60  %D = tail call i16 @llvm.bswap.i16(i16 %C) nounwind
61  ret i16 %D
62}
63
64define <2 x i16> @test8_vector(<2 x i64> %A) {
65; CHECK-LABEL: @test8_vector(
66; CHECK-NEXT:    [[TMP1:%.*]] = lshr <2 x i64> [[A:%.*]], <i64 48, i64 48>
67; CHECK-NEXT:    [[D:%.*]] = trunc <2 x i64> [[TMP1]] to <2 x i16>
68; CHECK-NEXT:    ret <2 x i16> [[D]]
69;
70  %B = tail call <2 x i64> @llvm.bswap.v2i64(<2 x i64> %A) nounwind
71  %C = trunc <2 x i64> %B to <2 x i16>
72  %D = tail call <2 x i16> @llvm.bswap.v2i16(<2 x i16> %C) nounwind
73  ret <2 x i16> %D
74}
75
76; Misc: Fold bswap(undef) to undef.
77define i64 @foo() {
78; CHECK-LABEL: @foo(
79; CHECK-NEXT:    ret i64 undef
80;
81  %a = call i64 @llvm.bswap.i64(i64 undef)
82  ret i64 %a
83}
84
85; PR15782
86; Fold: OP( BSWAP(x), BSWAP(y) ) -> BSWAP( OP(x, y) )
87; Fold: OP( BSWAP(x), CONSTANT ) -> BSWAP( OP(x, BSWAP(CONSTANT) ) )
88define i16 @bs_and16i(i16 %a, i16 %b) #0 {
89; CHECK-LABEL: @bs_and16i(
90; CHECK-NEXT:    [[TMP1:%.*]] = and i16 [[A:%.*]], 4391
91; CHECK-NEXT:    [[TMP2:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP1]])
92; CHECK-NEXT:    ret i16 [[TMP2]]
93;
94  %1 = tail call i16 @llvm.bswap.i16(i16 %a)
95  %2 = and i16 %1, 10001
96  ret i16 %2
97}
98
99define i16 @bs_and16(i16 %a, i16 %b) #0 {
100; CHECK-LABEL: @bs_and16(
101; CHECK-NEXT:    [[TMP1:%.*]] = and i16 [[A:%.*]], [[B:%.*]]
102; CHECK-NEXT:    [[TMP2:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP1]])
103; CHECK-NEXT:    ret i16 [[TMP2]]
104;
105  %t1 = tail call i16 @llvm.bswap.i16(i16 %a)
106  %t2 = tail call i16 @llvm.bswap.i16(i16 %b)
107  %t3 = and i16 %t1, %t2
108  ret i16 %t3
109}
110
111define i16 @bs_or16(i16 %a, i16 %b) #0 {
112; CHECK-LABEL: @bs_or16(
113; CHECK-NEXT:    [[TMP1:%.*]] = or i16 [[A:%.*]], [[B:%.*]]
114; CHECK-NEXT:    [[TMP2:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP1]])
115; CHECK-NEXT:    ret i16 [[TMP2]]
116;
117  %t1 = tail call i16 @llvm.bswap.i16(i16 %a)
118  %t2 = tail call i16 @llvm.bswap.i16(i16 %b)
119  %t3 = or i16 %t1, %t2
120  ret i16 %t3
121}
122
123define i16 @bs_xor16(i16 %a, i16 %b) #0 {
124; CHECK-LABEL: @bs_xor16(
125; CHECK-NEXT:    [[TMP1:%.*]] = xor i16 [[A:%.*]], [[B:%.*]]
126; CHECK-NEXT:    [[TMP2:%.*]] = call i16 @llvm.bswap.i16(i16 [[TMP1]])
127; CHECK-NEXT:    ret i16 [[TMP2]]
128;
129  %t1 = tail call i16 @llvm.bswap.i16(i16 %a)
130  %t2 = tail call i16 @llvm.bswap.i16(i16 %b)
131  %t3 = xor i16 %t1, %t2
132  ret i16 %t3
133}
134
135define i32 @bs_and32i(i32 %a, i32 %b) #0 {
136; CHECK-LABEL: @bs_and32i(
137; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[A:%.*]], -1585053440
138; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP1]])
139; CHECK-NEXT:    ret i32 [[TMP2]]
140;
141  %t1 = tail call i32 @llvm.bswap.i32(i32 %a)
142  %t2 = and i32 %t1, 100001
143  ret i32 %t2
144}
145
146define i32 @bs_and32(i32 %a, i32 %b) #0 {
147; CHECK-LABEL: @bs_and32(
148; CHECK-NEXT:    [[TMP1:%.*]] = and i32 [[A:%.*]], [[B:%.*]]
149; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP1]])
150; CHECK-NEXT:    ret i32 [[TMP2]]
151;
152  %t1 = tail call i32 @llvm.bswap.i32(i32 %a)
153  %t2 = tail call i32 @llvm.bswap.i32(i32 %b)
154  %t3 = and i32 %t1, %t2
155  ret i32 %t3
156}
157
158define i32 @bs_or32(i32 %a, i32 %b) #0 {
159; CHECK-LABEL: @bs_or32(
160; CHECK-NEXT:    [[TMP1:%.*]] = or i32 [[A:%.*]], [[B:%.*]]
161; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP1]])
162; CHECK-NEXT:    ret i32 [[TMP2]]
163;
164  %t1 = tail call i32 @llvm.bswap.i32(i32 %a)
165  %t2 = tail call i32 @llvm.bswap.i32(i32 %b)
166  %t3 = or i32 %t1, %t2
167  ret i32 %t3
168}
169
170define i32 @bs_xor32(i32 %a, i32 %b) #0 {
171; CHECK-LABEL: @bs_xor32(
172; CHECK-NEXT:    [[TMP1:%.*]] = xor i32 [[A:%.*]], [[B:%.*]]
173; CHECK-NEXT:    [[TMP2:%.*]] = call i32 @llvm.bswap.i32(i32 [[TMP1]])
174; CHECK-NEXT:    ret i32 [[TMP2]]
175;
176  %t1 = tail call i32 @llvm.bswap.i32(i32 %a)
177  %t2 = tail call i32 @llvm.bswap.i32(i32 %b)
178  %t3 = xor i32 %t1, %t2
179  ret i32 %t3
180}
181
182define i64 @bs_and64i(i64 %a, i64 %b) #0 {
183; CHECK-LABEL: @bs_and64i(
184; CHECK-NEXT:    [[TMP1:%.*]] = and i64 [[A:%.*]], 129085117527228416
185; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP1]])
186; CHECK-NEXT:    ret i64 [[TMP2]]
187;
188  %t1 = tail call i64 @llvm.bswap.i64(i64 %a)
189  %t2 = and i64 %t1, 1000000001
190  ret i64 %t2
191}
192
193define i64 @bs_and64(i64 %a, i64 %b) #0 {
194; CHECK-LABEL: @bs_and64(
195; CHECK-NEXT:    [[TMP1:%.*]] = and i64 [[A:%.*]], [[B:%.*]]
196; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP1]])
197; CHECK-NEXT:    ret i64 [[TMP2]]
198;
199  %t1 = tail call i64 @llvm.bswap.i64(i64 %a)
200  %t2 = tail call i64 @llvm.bswap.i64(i64 %b)
201  %t3 = and i64 %t1, %t2
202  ret i64 %t3
203}
204
205define i64 @bs_or64(i64 %a, i64 %b) #0 {
206; CHECK-LABEL: @bs_or64(
207; CHECK-NEXT:    [[TMP1:%.*]] = or i64 [[A:%.*]], [[B:%.*]]
208; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP1]])
209; CHECK-NEXT:    ret i64 [[TMP2]]
210;
211  %t1 = tail call i64 @llvm.bswap.i64(i64 %a)
212  %t2 = tail call i64 @llvm.bswap.i64(i64 %b)
213  %t3 = or i64 %t1, %t2
214  ret i64 %t3
215}
216
217define i64 @bs_xor64(i64 %a, i64 %b) #0 {
218; CHECK-LABEL: @bs_xor64(
219; CHECK-NEXT:    [[TMP1:%.*]] = xor i64 [[A:%.*]], [[B:%.*]]
220; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP1]])
221; CHECK-NEXT:    ret i64 [[TMP2]]
222;
223  %t1 = tail call i64 @llvm.bswap.i64(i64 %a)
224  %t2 = tail call i64 @llvm.bswap.i64(i64 %b)
225  %t3 = xor i64 %t1, %t2
226  ret i64 %t3
227}
228
229define <2 x i32> @bs_and32vec(<2 x i32> %a, <2 x i32> %b) #0 {
230; CHECK-LABEL: @bs_and32vec(
231; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[A:%.*]], [[B:%.*]]
232; CHECK-NEXT:    [[TMP2:%.*]] = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[TMP1]])
233; CHECK-NEXT:    ret <2 x i32> [[TMP2]]
234;
235  %t1 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %a)
236  %t2 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %b)
237  %t3 = and <2 x i32> %t1, %t2
238  ret <2 x i32> %t3
239}
240
241define <2 x i32> @bs_or32vec(<2 x i32> %a, <2 x i32> %b) #0 {
242; CHECK-LABEL: @bs_or32vec(
243; CHECK-NEXT:    [[TMP1:%.*]] = or <2 x i32> [[A:%.*]], [[B:%.*]]
244; CHECK-NEXT:    [[TMP2:%.*]] = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[TMP1]])
245; CHECK-NEXT:    ret <2 x i32> [[TMP2]]
246;
247  %t1 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %a)
248  %t2 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %b)
249  %t3 = or <2 x i32> %t1, %t2
250  ret <2 x i32> %t3
251}
252
253define <2 x i32> @bs_xor32vec(<2 x i32> %a, <2 x i32> %b) #0 {
254; CHECK-LABEL: @bs_xor32vec(
255; CHECK-NEXT:    [[TMP1:%.*]] = xor <2 x i32> [[A:%.*]], [[B:%.*]]
256; CHECK-NEXT:    [[TMP2:%.*]] = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[TMP1]])
257; CHECK-NEXT:    ret <2 x i32> [[TMP2]]
258;
259  %t1 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %a)
260  %t2 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %b)
261  %t3 = xor <2 x i32> %t1, %t2
262  ret <2 x i32> %t3
263}
264
265define <2 x i32> @bs_and32ivec(<2 x i32> %a, <2 x i32> %b) #0 {
266; CHECK-LABEL: @bs_and32ivec(
267; CHECK-NEXT:    [[TMP1:%.*]] = and <2 x i32> [[A:%.*]], <i32 -1585053440, i32 -1585053440>
268; CHECK-NEXT:    [[TMP2:%.*]] = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[TMP1]])
269; CHECK-NEXT:    ret <2 x i32> [[TMP2]]
270;
271  %t1 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %a)
272  %t2 = and <2 x i32> %t1, <i32 100001, i32 100001>
273  ret <2 x i32> %t2
274}
275
276define <2 x i32> @bs_or32ivec(<2 x i32> %a, <2 x i32> %b) #0 {
277; CHECK-LABEL: @bs_or32ivec(
278; CHECK-NEXT:    [[TMP1:%.*]] = or <2 x i32> [[A:%.*]], <i32 -1585053440, i32 -1585053440>
279; CHECK-NEXT:    [[TMP2:%.*]] = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[TMP1]])
280; CHECK-NEXT:    ret <2 x i32> [[TMP2]]
281;
282  %t1 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %a)
283  %t2 = or <2 x i32> %t1, <i32 100001, i32 100001>
284  ret <2 x i32> %t2
285}
286
287define <2 x i32> @bs_xor32ivec(<2 x i32> %a, <2 x i32> %b) #0 {
288; CHECK-LABEL: @bs_xor32ivec(
289; CHECK-NEXT:    [[TMP1:%.*]] = xor <2 x i32> [[A:%.*]], <i32 -1585053440, i32 -1585053440>
290; CHECK-NEXT:    [[TMP2:%.*]] = call <2 x i32> @llvm.bswap.v2i32(<2 x i32> [[TMP1]])
291; CHECK-NEXT:    ret <2 x i32> [[TMP2]]
292;
293  %t1 = tail call <2 x i32> @llvm.bswap.v2i32(<2 x i32> %a)
294  %t2 = xor <2 x i32> %t1, <i32 100001, i32 100001>
295  ret <2 x i32> %t2
296}
297
298define i64 @bs_and64_multiuse1(i64 %a, i64 %b) #0 {
299; CHECK-LABEL: @bs_and64_multiuse1(
300; CHECK-NEXT:    [[T1:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[A:%.*]])
301; CHECK-NEXT:    [[T2:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[B:%.*]])
302; CHECK-NEXT:    [[T3:%.*]] = and i64 [[T1]], [[T2]]
303; CHECK-NEXT:    [[T4:%.*]] = mul i64 [[T3]], [[T1]]
304; CHECK-NEXT:    [[T5:%.*]] = mul i64 [[T4]], [[T2]]
305; CHECK-NEXT:    ret i64 [[T5]]
306;
307  %t1 = tail call i64 @llvm.bswap.i64(i64 %a)
308  %t2 = tail call i64 @llvm.bswap.i64(i64 %b)
309  %t3 = and i64 %t1, %t2
310  %t4 = mul i64 %t3, %t1 ; to increase use count of the bswaps
311  %t5 = mul i64 %t4, %t2 ; to increase use count of the bswaps
312  ret i64 %t5
313}
314
315define i64 @bs_and64_multiuse2(i64 %a, i64 %b) #0 {
316; CHECK-LABEL: @bs_and64_multiuse2(
317; CHECK-NEXT:    [[T1:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[A:%.*]])
318; CHECK-NEXT:    [[TMP1:%.*]] = and i64 [[A]], [[B:%.*]]
319; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP1]])
320; CHECK-NEXT:    [[T4:%.*]] = mul i64 [[TMP2]], [[T1]]
321; CHECK-NEXT:    ret i64 [[T4]]
322;
323  %t1 = tail call i64 @llvm.bswap.i64(i64 %a)
324  %t2 = tail call i64 @llvm.bswap.i64(i64 %b)
325  %t3 = and i64 %t1, %t2
326  %t4 = mul i64 %t3, %t1 ; to increase use count of the bswaps
327  ret i64 %t4
328}
329
330define i64 @bs_and64_multiuse3(i64 %a, i64 %b) #0 {
331; CHECK-LABEL: @bs_and64_multiuse3(
332; CHECK-NEXT:    [[T2:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[B:%.*]])
333; CHECK-NEXT:    [[TMP1:%.*]] = and i64 [[A:%.*]], [[B]]
334; CHECK-NEXT:    [[TMP2:%.*]] = call i64 @llvm.bswap.i64(i64 [[TMP1]])
335; CHECK-NEXT:    [[T4:%.*]] = mul i64 [[TMP2]], [[T2]]
336; CHECK-NEXT:    ret i64 [[T4]]
337;
338  %t1 = tail call i64 @llvm.bswap.i64(i64 %a)
339  %t2 = tail call i64 @llvm.bswap.i64(i64 %b)
340  %t3 = and i64 %t1, %t2
341  %t4 = mul i64 %t3, %t2 ; to increase use count of the bswaps
342  ret i64 %t4
343}
344
345define i64 @bs_and64i_multiuse(i64 %a, i64 %b) #0 {
346; CHECK-LABEL: @bs_and64i_multiuse(
347; CHECK-NEXT:    [[T1:%.*]] = tail call i64 @llvm.bswap.i64(i64 [[A:%.*]])
348; CHECK-NEXT:    [[T2:%.*]] = and i64 [[T1]], 1000000001
349; CHECK-NEXT:    [[T3:%.*]] = mul i64 [[T2]], [[T1]]
350; CHECK-NEXT:    ret i64 [[T3]]
351;
352  %t1 = tail call i64 @llvm.bswap.i64(i64 %a)
353  %t2 = and i64 %t1, 1000000001
354  %t3 = mul i64 %t2, %t1 ; to increase use count of the bswap
355  ret i64 %t3
356}
357
358declare i16 @llvm.bswap.i16(i16)
359declare i32 @llvm.bswap.i32(i32)
360declare i64 @llvm.bswap.i64(i64)
361declare <2 x i16> @llvm.bswap.v2i16(<2 x i16>)
362declare <2 x i32> @llvm.bswap.v2i32(<2 x i32>)
363declare <2 x i64> @llvm.bswap.v2i64(<2 x i64>)
364