• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; Test that DAGCombiner gets helped by computeKnownBitsForTargetNode() with
2; vector intrinsics.
3;
4; RUN: llc -mtriple=s390x-linux-gnu -mcpu=z13 < %s  | FileCheck %s
5
6declare <8 x i16> @llvm.s390.vuphb(<16 x i8>)
7declare <8 x i16> @llvm.s390.vuplhb(<16 x i8>)
8
9; VUPHB (used operand elements are 0)
10define <8 x i16> @f0() {
11; CHECK-LABEL: f0:
12; CHECK-LABEL: # %bb.0:
13; CHECK-NEXT:  vgbm %v24, 0
14; CHECK-NEXT:  br %r14
15  %unp = call <8 x i16> @llvm.s390.vuphb(<16 x i8>
16                                         <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0,
17                                          i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
18  %and = and <8 x i16> %unp, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
19  ret <8 x i16> %and
20}
21
22; VUPHB (used operand elements are 1)
23; NOTE: The AND is optimized away, but instead of replicating '1' into <8 x
24; i16>, the original vector constant is put in the constant pool and then
25; unpacked (repeated in more test cases below).
26define <8 x i16> @f1() {
27; CHECK-LABEL: f1:
28; CHECK-LABEL: # %bb.0:
29; CHECK-NEXT:  larl %r1, .LCPI
30; CHECK-NEXT:  vl %v0, 0(%r1)
31; CHECK-NEXT:  vuphb %v24, %v0
32; CHECK-NEXT:  br %r14
33  %unp = call <8 x i16> @llvm.s390.vuphb(<16 x i8>
34                                         <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
35                                          i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>)
36  %and = and <8 x i16> %unp, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
37  ret <8 x i16> %and
38}
39
40; VUPLHB (used operand elements are 0)
41define <8 x i16> @f2() {
42; CHECK-LABEL: f2:
43; CHECK-LABEL: # %bb.0:
44; CHECK-NEXT:  vgbm %v24, 0
45; CHECK-NEXT:  br %r14
46  %unp = call <8 x i16> @llvm.s390.vuplhb(<16 x i8>
47                                          <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0,
48                                           i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
49  %and = and <8 x i16> %unp, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
50  ret <8 x i16> %and
51}
52
53; VUPLHB (used operand elements are 1)
54define <8 x i16> @f3() {
55; CHECK-LABEL: f3:
56; CHECK-LABEL: # %bb.0:
57; CHECK-NEXT:  larl %r1, .LCPI
58; CHECK-NEXT:  vl %v0, 0(%r1)
59; CHECK-NEXT:  vuplhb %v24, %v0
60; CHECK-NEXT:  br %r14
61  %unp = call <8 x i16> @llvm.s390.vuplhb(<16 x i8>
62                                          <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
63                                           i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>)
64  %and = and <8 x i16> %unp, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
65  ret <8 x i16> %and
66}
67
68declare <4 x i32> @llvm.s390.vuphh(<8 x i16>)
69declare <4 x i32> @llvm.s390.vuplhh(<8 x i16>)
70
71; VUPHH (used operand elements are 0)
72define <4 x i32> @f4() {
73; CHECK-LABEL: f4:
74; CHECK-LABEL: # %bb.0:
75; CHECK-NEXT:  vgbm %v24, 0
76; CHECK-NEXT:  br %r14
77  %unp = call <4 x i32> @llvm.s390.vuphh(<8 x i16>
78                                         <i16 0, i16 0, i16 0, i16 0,
79                                          i16 1, i16 1, i16 1, i16 1>)
80  %and = and <4 x i32> %unp, <i32 1, i32 1, i32 1, i32 1>
81  ret <4 x i32> %and
82}
83
84; VUPHH (used operand elements are 1)
85define <4 x i32> @f5() {
86; CHECK-LABEL: f5:
87; CHECK-LABEL: # %bb.0:
88; CHECK-NEXT:  larl %r1, .LCPI
89; CHECK-NEXT:  vl %v0, 0(%r1)
90; CHECK-NEXT:  vuphh %v24, %v0
91; CHECK-NEXT:  br %r14
92  %unp = call <4 x i32> @llvm.s390.vuphh(<8 x i16>
93                                         <i16 1, i16 1, i16 1, i16 1,
94                                          i16 0, i16 0, i16 0, i16 0>)
95  %and = and <4 x i32> %unp, <i32 1, i32 1, i32 1, i32 1>
96  ret <4 x i32> %and
97}
98
99; VUPLHH (used operand elements are 0)
100define <4 x i32> @f6() {
101; CHECK-LABEL: f6:
102; CHECK-LABEL: # %bb.0:
103; CHECK-NEXT:  vgbm %v24, 0
104; CHECK-NEXT:  br %r14
105  %unp = call <4 x i32> @llvm.s390.vuplhh(<8 x i16>
106                                          <i16 0, i16 0, i16 0, i16 0,
107                                           i16 1, i16 1, i16 1, i16 1>)
108  %and = and <4 x i32> %unp, <i32 1, i32 1, i32 1, i32 1>
109  ret <4 x i32> %and
110}
111
112; VUPLHH (used operand elements are 1)
113define <4 x i32> @f7() {
114; CHECK-LABEL: f7:
115; CHECK-LABEL: # %bb.0:
116; CHECK-NEXT:  larl %r1, .LCPI
117; CHECK-NEXT:  vl %v0, 0(%r1)
118; CHECK-NEXT:  vuplhh %v24, %v0
119; CHECK-NEXT:  br %r14
120  %unp = call <4 x i32> @llvm.s390.vuplhh(<8 x i16>
121                                          <i16 1, i16 1, i16 1, i16 1,
122                                           i16 0, i16 0, i16 0, i16 0>)
123  %and = and <4 x i32> %unp, <i32 1, i32 1, i32 1, i32 1>
124  ret <4 x i32> %and
125}
126
127declare <2 x i64> @llvm.s390.vuphf(<4 x i32>)
128declare <2 x i64> @llvm.s390.vuplhf(<4 x i32>)
129
130; VUPHF (used operand elements are 0)
131define <2 x i64> @f8() {
132; CHECK-LABEL: f8:
133; CHECK-LABEL: # %bb.0:
134; CHECK-NEXT:  vgbm %v24, 0
135; CHECK-NEXT:  br %r14
136  %unp = call <2 x i64> @llvm.s390.vuphf(<4 x i32> <i32 0, i32 0, i32 1, i32 1>)
137  %and = and <2 x i64> %unp, <i64 1, i64 1>
138  ret <2 x i64> %and
139}
140
141; VUPHF (used operand elements are 1)
142define <2 x i64> @f9() {
143; CHECK-LABEL: f9:
144; CHECK-LABEL: # %bb.0:
145; CHECK-NEXT:  larl %r1, .LCPI
146; CHECK-NEXT:  vl %v0, 0(%r1)
147; CHECK-NEXT:  vuphf %v24, %v0
148; CHECK-NEXT:  br %r14
149  %unp = call <2 x i64> @llvm.s390.vuphf(<4 x i32> <i32 1, i32 1, i32 0, i32 0>)
150  %and = and <2 x i64> %unp, <i64 1, i64 1>
151  ret <2 x i64> %and
152}
153
154; VUPLHF (used operand elements are 0)
155define <2 x i64> @f10() {
156; CHECK-LABEL: f10:
157; CHECK-LABEL: # %bb.0:
158; CHECK-NEXT:  vgbm %v24, 0
159; CHECK-NEXT:  br %r14
160  %unp = call <2 x i64> @llvm.s390.vuplhf(<4 x i32> <i32 0, i32 0, i32 1, i32 1>)
161  %and = and <2 x i64> %unp, <i64 1, i64 1>
162  ret <2 x i64> %and
163}
164
165; VUPLHF (used operand elements are 1)
166define <2 x i64> @f11() {
167; CHECK-LABEL: f11:
168; CHECK-LABEL: # %bb.0:
169; CHECK-NEXT:  larl %r1, .LCPI
170; CHECK-NEXT:  vl %v0, 0(%r1)
171; CHECK-NEXT:  vuplhf %v24, %v0
172; CHECK-NEXT:  br %r14
173  %unp = call <2 x i64> @llvm.s390.vuplhf(<4 x i32> <i32 1, i32 1, i32 0, i32 0>)
174  %and = and <2 x i64> %unp, <i64 1, i64 1>
175  ret <2 x i64> %and
176}
177
178declare <8 x i16> @llvm.s390.vuplb(<16 x i8>)
179declare <8 x i16> @llvm.s390.vupllb(<16 x i8>)
180
181; VUPLB (used operand elements are 0)
182define <8 x i16> @f12() {
183; CHECK-LABEL: f12:
184; CHECK-LABEL: # %bb.0:
185; CHECK-NEXT:  vgbm %v24, 0
186; CHECK-NEXT:  br %r14
187  %unp = call <8 x i16> @llvm.s390.vuplb(<16 x i8>
188                                         <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
189                                          i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>)
190
191  %and = and <8 x i16> %unp, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
192  ret <8 x i16> %and
193}
194
195; VUPLB (used operand elements are 1)
196define <8 x i16> @f13() {
197; CHECK-LABEL: f13:
198; CHECK-LABEL: # %bb.0:
199; CHECK-NEXT:  larl %r1, .LCPI
200; CHECK-NEXT:  vl %v0, 0(%r1)
201; CHECK-NEXT:  vuplb %v24, %v0
202; CHECK-NEXT:  br %r14
203  %unp = call <8 x i16> @llvm.s390.vuplb(<16 x i8>
204                                         <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0,
205                                          i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
206  %and = and <8 x i16> %unp, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
207  ret <8 x i16> %and
208}
209
210; VUPLLB (used operand elements are 0)
211define <8 x i16> @f14() {
212; CHECK-LABEL: f14:
213; CHECK-LABEL: # %bb.0:
214; CHECK-NEXT:  vgbm %v24, 0
215; CHECK-NEXT:  br %r14
216  %unp = call <8 x i16> @llvm.s390.vupllb(<16 x i8>
217                                         <i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1,
218                                          i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0>)
219  %and = and <8 x i16> %unp, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
220  ret <8 x i16> %and
221}
222
223; VUPLLB (used operand elements are 1)
224define <8 x i16> @f15() {
225; CHECK-LABEL: f15:
226; CHECK-LABEL: # %bb.0:
227; CHECK-NEXT:  larl %r1, .LCPI
228; CHECK-NEXT:  vl %v0, 0(%r1)
229; CHECK-NEXT:  vupllb %v24, %v0
230; CHECK-NEXT:  br %r14
231  %unp = call <8 x i16> @llvm.s390.vupllb(<16 x i8>
232                                         <i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0, i8 0,
233                                          i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1, i8 1>)
234  %and = and <8 x i16> %unp, <i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1, i16 1>
235  ret <8 x i16> %and
236}
237
238declare <4 x i32> @llvm.s390.vuplhw(<8 x i16>)
239declare <4 x i32> @llvm.s390.vupllh(<8 x i16>)
240
241; VUPLHW (used operand elements are 0)
242define <4 x i32> @f16() {
243; CHECK-LABEL: f16:
244; CHECK-LABEL: # %bb.0:
245; CHECK-NEXT:  vgbm %v24, 0
246; CHECK-NEXT:  br %r14
247  %unp = call <4 x i32> @llvm.s390.vuplhw(<8 x i16>
248                                          <i16 1, i16 1, i16 1, i16 1,
249                                           i16 0, i16 0, i16 0, i16 0>)
250
251  %and = and <4 x i32> %unp, <i32 1, i32 1, i32 1, i32 1>
252  ret <4 x i32> %and
253}
254
255; VUPLHW (used operand elements are 1)
256define <4 x i32> @f17() {
257; CHECK-LABEL: f17:
258; CHECK-LABEL: # %bb.0:
259; CHECK-NEXT:  larl %r1, .LCPI
260; CHECK-NEXT:  vl %v0, 0(%r1)
261; CHECK-NEXT:  vuplhw %v24, %v0
262; CHECK-NEXT:  br %r14
263  %unp = call <4 x i32> @llvm.s390.vuplhw(<8 x i16>
264                                          <i16 0, i16 0, i16 0, i16 0,
265                                           i16 1, i16 1, i16 1, i16 1>)
266  %and = and <4 x i32> %unp, <i32 1, i32 1, i32 1, i32 1>
267  ret <4 x i32> %and
268}
269
270; VUPLLH (used operand elements are 0)
271define <4 x i32> @f18() {
272; CHECK-LABEL: f18:
273; CHECK-LABEL: # %bb.0:
274; CHECK-NEXT:  vgbm %v24, 0
275; CHECK-NEXT:  br %r14
276  %unp = call <4 x i32> @llvm.s390.vupllh(<8 x i16>
277                                          <i16 1, i16 1, i16 1, i16 1,
278                                           i16 0, i16 0, i16 0, i16 0>)
279  %and = and <4 x i32> %unp, <i32 1, i32 1, i32 1, i32 1>
280  ret <4 x i32> %and
281}
282
283; VUPLLH (used operand elements are 1)
284define <4 x i32> @f19() {
285; CHECK-LABEL: f19:
286; CHECK-LABEL: # %bb.0:
287; CHECK-NEXT:  larl %r1, .LCPI
288; CHECK-NEXT:  vl %v0, 0(%r1)
289; CHECK-NEXT:  vupllh %v24, %v0
290; CHECK-NEXT:  br %r14
291  %unp = call <4 x i32> @llvm.s390.vupllh(<8 x i16>
292                                          <i16 0, i16 0, i16 0, i16 0,
293                                           i16 1, i16 1, i16 1, i16 1>)
294  %and = and <4 x i32> %unp, <i32 1, i32 1, i32 1, i32 1>
295  ret <4 x i32> %and
296}
297
298declare <2 x i64> @llvm.s390.vuplf(<4 x i32>)
299declare <2 x i64> @llvm.s390.vupllf(<4 x i32>)
300
301; VUPLF (used operand elements are 0)
302define <2 x i64> @f20() {
303; CHECK-LABEL: f20:
304; CHECK-LABEL: # %bb.0:
305; CHECK-NEXT:  vgbm %v24, 0
306; CHECK-NEXT:  br %r14
307  %unp = call <2 x i64> @llvm.s390.vuplf(<4 x i32> <i32 1, i32 1, i32 0, i32 0>)
308  %and = and <2 x i64> %unp, <i64 1, i64 1>
309  ret <2 x i64> %and
310}
311
312; VUPLF (used operand elements are 1)
313define <2 x i64> @f21() {
314; CHECK-LABEL: f21:
315; CHECK-LABEL: # %bb.0:
316; CHECK-NEXT:  larl %r1, .LCPI
317; CHECK-NEXT:  vl %v0, 0(%r1)
318; CHECK-NEXT:  vuplf %v24, %v0
319; CHECK-NEXT:  br %r14
320  %unp = call <2 x i64> @llvm.s390.vuplf(<4 x i32> <i32 0, i32 0, i32 1, i32 1>)
321  %and = and <2 x i64> %unp, <i64 1, i64 1>
322  ret <2 x i64> %and
323}
324
325; VUPLLF (used operand elements are 0)
326define <2 x i64> @f22() {
327; CHECK-LABEL: f22:
328; CHECK-LABEL: # %bb.0:
329; CHECK-NEXT:  vgbm %v24, 0
330; CHECK-NEXT:  br %r14
331  %unp = call <2 x i64> @llvm.s390.vupllf(<4 x i32> <i32 1, i32 1, i32 0, i32 0>)
332  %and = and <2 x i64> %unp, <i64 1, i64 1>
333  ret <2 x i64> %and
334}
335
336; VUPLLF (used operand elements are 1)
337define <2 x i64> @f23() {
338; CHECK-LABEL: f23:
339; CHECK-LABEL: # %bb.0:
340; CHECK-NEXT:  larl %r1, .LCPI
341; CHECK-NEXT:  vl %v0, 0(%r1)
342; CHECK-NEXT:  vupllf %v24, %v0
343; CHECK-NEXT:  br %r14
344  %unp = call <2 x i64> @llvm.s390.vupllf(<4 x i32> <i32 0, i32 0, i32 1, i32 1>)
345  %and = and <2 x i64> %unp, <i64 1, i64 1>
346  ret <2 x i64> %and
347}
348
349; Test that signed unpacking of positive elements gives known zeros in high part.
350define <2 x i64> @f24() {
351; CHECK-LABEL: f24:
352; CHECK-LABEL: # %bb.0:
353; CHECK-NEXT:  vgbm %v24, 0
354; CHECK-NEXT:  br %r14
355  %unp = call <2 x i64> @llvm.s390.vuphf(<4 x i32> <i32 1, i32 1, i32 0, i32 0>)
356  %and = and <2 x i64> %unp, <i64 -4294967296, ; = 0xffffffff00000000
357                              i64 -4294967296>
358  ret <2 x i64> %and
359}
360
361; Test that signed unpacking of negative elements gives known ones in high part.
362define <2 x i64> @f25() {
363; CHECK-LABEL: f25:
364; CHECK-LABEL: # %bb.0:
365;                         61680 = 0xf0f0
366; CHECK-NEXT:  vgbm %v24, 61680
367; CHECK-NEXT:  br %r14
368  %unp = call <2 x i64> @llvm.s390.vuphf(<4 x i32> <i32 -1, i32 -1, i32 0, i32 0>)
369  %and = and <2 x i64> %unp, <i64 -4294967296, ; = 0xffffffff00000000
370                              i64 -4294967296>
371  ret <2 x i64> %and
372}
373
374; Test that logical unpacking of negative elements gives known zeros in high part.
375define <2 x i64> @f26() {
376; CHECK-LABEL: f26:
377; CHECK-LABEL: # %bb.0:
378; CHECK-NEXT:  vgbm %v24, 0
379; CHECK-NEXT:  br %r14
380  %unp = call <2 x i64> @llvm.s390.vuplhf(<4 x i32> <i32 -1, i32 -1, i32 0, i32 0>)
381  %and = and <2 x i64> %unp, <i64 -4294967296, ; = 0xffffffff00000000
382                              i64 -4294967296>
383  ret <2 x i64> %and
384}
385