• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX2-SLOW
4; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-shuffle | FileCheck %s --check-prefixes=CHECK,AVX,AVX2-FAST
5
6; fold (sra 0, x) -> 0
7define <4 x i32> @combine_vec_ashr_zero(<4 x i32> %x) {
8; SSE-LABEL: combine_vec_ashr_zero:
9; SSE:       # %bb.0:
10; SSE-NEXT:    xorps %xmm0, %xmm0
11; SSE-NEXT:    retq
12;
13; AVX-LABEL: combine_vec_ashr_zero:
14; AVX:       # %bb.0:
15; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
16; AVX-NEXT:    retq
17  %1 = ashr <4 x i32> zeroinitializer, %x
18  ret <4 x i32> %1
19}
20
21; fold (sra -1, x) -> -1
22define <4 x i32> @combine_vec_ashr_allones(<4 x i32> %x) {
23; SSE-LABEL: combine_vec_ashr_allones:
24; SSE:       # %bb.0:
25; SSE-NEXT:    pcmpeqd %xmm0, %xmm0
26; SSE-NEXT:    retq
27;
28; AVX-LABEL: combine_vec_ashr_allones:
29; AVX:       # %bb.0:
30; AVX-NEXT:    vpcmpeqd %xmm0, %xmm0, %xmm0
31; AVX-NEXT:    retq
32  %1 = ashr <4 x i32> <i32 -1, i32 -1, i32 -1, i32 -1>, %x
33  ret <4 x i32> %1
34}
35
36; fold (sra x, c >= size(x)) -> undef
37define <4 x i32> @combine_vec_ashr_outofrange0(<4 x i32> %x) {
38; CHECK-LABEL: combine_vec_ashr_outofrange0:
39; CHECK:       # %bb.0:
40; CHECK-NEXT:    retq
41  %1 = ashr <4 x i32> %x, <i32 33, i32 33, i32 33, i32 33>
42  ret <4 x i32> %1
43}
44
45define <4 x i32> @combine_vec_ashr_outofrange1(<4 x i32> %x) {
46; CHECK-LABEL: combine_vec_ashr_outofrange1:
47; CHECK:       # %bb.0:
48; CHECK-NEXT:    retq
49  %1 = ashr <4 x i32> %x, <i32 33, i32 34, i32 35, i32 36>
50  ret <4 x i32> %1
51}
52
53define <4 x i32> @combine_vec_ashr_outofrange2(<4 x i32> %x) {
54; CHECK-LABEL: combine_vec_ashr_outofrange2:
55; CHECK:       # %bb.0:
56; CHECK-NEXT:    retq
57  %1 = ashr <4 x i32> %x, <i32 33, i32 34, i32 35, i32 undef>
58  ret <4 x i32> %1
59}
60
61; fold (sra x, 0) -> x
62define <4 x i32> @combine_vec_ashr_by_zero(<4 x i32> %x) {
63; CHECK-LABEL: combine_vec_ashr_by_zero:
64; CHECK:       # %bb.0:
65; CHECK-NEXT:    retq
66  %1 = ashr <4 x i32> %x, zeroinitializer
67  ret <4 x i32> %1
68}
69
70; fold (sra (sra x, c1), c2) -> (sra x, (add c1, c2))
71define <4 x i32> @combine_vec_ashr_ashr0(<4 x i32> %x) {
72; SSE-LABEL: combine_vec_ashr_ashr0:
73; SSE:       # %bb.0:
74; SSE-NEXT:    psrad $6, %xmm0
75; SSE-NEXT:    retq
76;
77; AVX-LABEL: combine_vec_ashr_ashr0:
78; AVX:       # %bb.0:
79; AVX-NEXT:    vpsrad $6, %xmm0, %xmm0
80; AVX-NEXT:    retq
81  %1 = ashr <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
82  %2 = ashr <4 x i32> %1, <i32 4, i32 4, i32 4, i32 4>
83  ret <4 x i32> %2
84}
85
86define <4 x i32> @combine_vec_ashr_ashr1(<4 x i32> %x) {
87; SSE-LABEL: combine_vec_ashr_ashr1:
88; SSE:       # %bb.0:
89; SSE-NEXT:    movdqa %xmm0, %xmm1
90; SSE-NEXT:    psrad $10, %xmm1
91; SSE-NEXT:    movdqa %xmm0, %xmm2
92; SSE-NEXT:    psrad $6, %xmm2
93; SSE-NEXT:    pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
94; SSE-NEXT:    movdqa %xmm0, %xmm1
95; SSE-NEXT:    psrad $8, %xmm1
96; SSE-NEXT:    psrad $4, %xmm0
97; SSE-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
98; SSE-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
99; SSE-NEXT:    retq
100;
101; AVX-LABEL: combine_vec_ashr_ashr1:
102; AVX:       # %bb.0:
103; AVX-NEXT:    vpsravd {{.*}}(%rip), %xmm0, %xmm0
104; AVX-NEXT:    retq
105  %1 = ashr <4 x i32> %x, <i32 0, i32 1, i32 2, i32 3>
106  %2 = ashr <4 x i32> %1, <i32 4, i32 5, i32 6, i32 7>
107  ret <4 x i32> %2
108}
109
110define <4 x i32> @combine_vec_ashr_ashr2(<4 x i32> %x) {
111; SSE-LABEL: combine_vec_ashr_ashr2:
112; SSE:       # %bb.0:
113; SSE-NEXT:    psrad $31, %xmm0
114; SSE-NEXT:    retq
115;
116; AVX-LABEL: combine_vec_ashr_ashr2:
117; AVX:       # %bb.0:
118; AVX-NEXT:    vpsrad $31, %xmm0, %xmm0
119; AVX-NEXT:    retq
120  %1 = ashr <4 x i32> %x, <i32 17, i32 18, i32 19, i32 20>
121  %2 = ashr <4 x i32> %1, <i32 25, i32 26, i32 27, i32 28>
122  ret <4 x i32> %2
123}
124
125define <4 x i32> @combine_vec_ashr_ashr3(<4 x i32> %x) {
126; SSE-LABEL: combine_vec_ashr_ashr3:
127; SSE:       # %bb.0:
128; SSE-NEXT:    movdqa %xmm0, %xmm1
129; SSE-NEXT:    psrad $27, %xmm1
130; SSE-NEXT:    movdqa %xmm0, %xmm2
131; SSE-NEXT:    psrad $15, %xmm2
132; SSE-NEXT:    pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
133; SSE-NEXT:    psrad $31, %xmm0
134; SSE-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
135; SSE-NEXT:    retq
136;
137; AVX-LABEL: combine_vec_ashr_ashr3:
138; AVX:       # %bb.0:
139; AVX-NEXT:    vpsravd {{.*}}(%rip), %xmm0, %xmm0
140; AVX-NEXT:    retq
141  %1 = ashr <4 x i32> %x, <i32  1, i32  5, i32 50, i32 27>
142  %2 = ashr <4 x i32> %1, <i32 33, i32 10, i32 33, i32  0>
143  ret <4 x i32> %2
144}
145
146; fold (sra x, (trunc (and y, c))) -> (sra x, (and (trunc y), (trunc c))).
147define <4 x i32> @combine_vec_ashr_trunc_and(<4 x i32> %x, <4 x i64> %y) {
148; SSE-LABEL: combine_vec_ashr_trunc_and:
149; SSE:       # %bb.0:
150; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
151; SSE-NEXT:    andps {{.*}}(%rip), %xmm1
152; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
153; SSE-NEXT:    movdqa %xmm0, %xmm3
154; SSE-NEXT:    psrad %xmm2, %xmm3
155; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
156; SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7]
157; SSE-NEXT:    movdqa %xmm0, %xmm5
158; SSE-NEXT:    psrad %xmm4, %xmm5
159; SSE-NEXT:    pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7]
160; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
161; SSE-NEXT:    movdqa %xmm0, %xmm3
162; SSE-NEXT:    psrad %xmm1, %xmm3
163; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7]
164; SSE-NEXT:    psrad %xmm1, %xmm0
165; SSE-NEXT:    pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
166; SSE-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7]
167; SSE-NEXT:    retq
168;
169; AVX2-SLOW-LABEL: combine_vec_ashr_trunc_and:
170; AVX2-SLOW:       # %bb.0:
171; AVX2-SLOW-NEXT:    vextractf128 $1, %ymm1, %xmm2
172; AVX2-SLOW-NEXT:    vshufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
173; AVX2-SLOW-NEXT:    vandps {{.*}}(%rip), %xmm1, %xmm1
174; AVX2-SLOW-NEXT:    vpsravd %xmm1, %xmm0, %xmm0
175; AVX2-SLOW-NEXT:    vzeroupper
176; AVX2-SLOW-NEXT:    retq
177;
178; AVX2-FAST-LABEL: combine_vec_ashr_trunc_and:
179; AVX2-FAST:       # %bb.0:
180; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm2 = <0,2,4,6,u,u,u,u>
181; AVX2-FAST-NEXT:    vpermd %ymm1, %ymm2, %ymm1
182; AVX2-FAST-NEXT:    vpand {{.*}}(%rip), %xmm1, %xmm1
183; AVX2-FAST-NEXT:    vpsravd %xmm1, %xmm0, %xmm0
184; AVX2-FAST-NEXT:    vzeroupper
185; AVX2-FAST-NEXT:    retq
186  %1 = and <4 x i64> %y, <i64 15, i64 255, i64 4095, i64 65535>
187  %2 = trunc <4 x i64> %1 to <4 x i32>
188  %3 = ashr <4 x i32> %x, %2
189  ret <4 x i32> %3
190}
191
192; fold (sra (trunc (srl x, c1)), c2) -> (trunc (sra x, c1 + c2))
193;      if c1 is equal to the number of bits the trunc removes
194define <4 x i32> @combine_vec_ashr_trunc_lshr(<4 x i64> %x) {
195; SSE-LABEL: combine_vec_ashr_trunc_lshr:
196; SSE:       # %bb.0:
197; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
198; SSE-NEXT:    movaps %xmm0, %xmm2
199; SSE-NEXT:    psrad $2, %xmm2
200; SSE-NEXT:    pblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm2[4,5,6,7]
201; SSE-NEXT:    psrad $1, %xmm0
202; SSE-NEXT:    psrad $3, %xmm1
203; SSE-NEXT:    pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7]
204; SSE-NEXT:    pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
205; SSE-NEXT:    movdqa %xmm2, %xmm0
206; SSE-NEXT:    retq
207;
208; AVX2-SLOW-LABEL: combine_vec_ashr_trunc_lshr:
209; AVX2-SLOW:       # %bb.0:
210; AVX2-SLOW-NEXT:    vpsrlq $32, %ymm0, %ymm0
211; AVX2-SLOW-NEXT:    vextracti128 $1, %ymm0, %xmm1
212; AVX2-SLOW-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[0,2],xmm1[0,2]
213; AVX2-SLOW-NEXT:    vpsravd {{.*}}(%rip), %xmm0, %xmm0
214; AVX2-SLOW-NEXT:    vzeroupper
215; AVX2-SLOW-NEXT:    retq
216;
217; AVX2-FAST-LABEL: combine_vec_ashr_trunc_lshr:
218; AVX2-FAST:       # %bb.0:
219; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} xmm1 = [1,3,5,7]
220; AVX2-FAST-NEXT:    vpermd %ymm0, %ymm1, %ymm0
221; AVX2-FAST-NEXT:    vpsravd {{.*}}(%rip), %xmm0, %xmm0
222; AVX2-FAST-NEXT:    vzeroupper
223; AVX2-FAST-NEXT:    retq
224  %1 = lshr <4 x i64> %x, <i64 32, i64 32, i64 32, i64 32>
225  %2 = trunc <4 x i64> %1 to <4 x i32>
226  %3 = ashr <4 x i32> %2, <i32 0, i32 1, i32 2, i32 3>
227  ret <4 x i32> %3
228}
229
230; fold (sra (trunc (sra x, c1)), c2) -> (trunc (sra x, c1 + c2))
231;      if c1 is equal to the number of bits the trunc removes
232define <4 x i32> @combine_vec_ashr_trunc_ashr(<4 x i64> %x) {
233; SSE-LABEL: combine_vec_ashr_trunc_ashr:
234; SSE:       # %bb.0:
235; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
236; SSE-NEXT:    movaps %xmm0, %xmm2
237; SSE-NEXT:    psrad $2, %xmm2
238; SSE-NEXT:    pblendw {{.*#+}} xmm2 = xmm0[0,1,2,3],xmm2[4,5,6,7]
239; SSE-NEXT:    psrad $1, %xmm0
240; SSE-NEXT:    psrad $3, %xmm1
241; SSE-NEXT:    pblendw {{.*#+}} xmm1 = xmm0[0,1,2,3],xmm1[4,5,6,7]
242; SSE-NEXT:    pblendw {{.*#+}} xmm2 = xmm2[0,1],xmm1[2,3],xmm2[4,5],xmm1[6,7]
243; SSE-NEXT:    movdqa %xmm2, %xmm0
244; SSE-NEXT:    retq
245;
246; AVX2-SLOW-LABEL: combine_vec_ashr_trunc_ashr:
247; AVX2-SLOW:       # %bb.0:
248; AVX2-SLOW-NEXT:    vextractf128 $1, %ymm0, %xmm1
249; AVX2-SLOW-NEXT:    vshufps {{.*#+}} xmm0 = xmm0[1,3],xmm1[1,3]
250; AVX2-SLOW-NEXT:    vpsravd {{.*}}(%rip), %xmm0, %xmm0
251; AVX2-SLOW-NEXT:    vzeroupper
252; AVX2-SLOW-NEXT:    retq
253;
254; AVX2-FAST-LABEL: combine_vec_ashr_trunc_ashr:
255; AVX2-FAST:       # %bb.0:
256; AVX2-FAST-NEXT:    vmovdqa {{.*#+}} ymm1 = <1,3,5,7,u,u,u,u>
257; AVX2-FAST-NEXT:    vpermd %ymm0, %ymm1, %ymm0
258; AVX2-FAST-NEXT:    vpsravd {{.*}}(%rip), %xmm0, %xmm0
259; AVX2-FAST-NEXT:    vzeroupper
260; AVX2-FAST-NEXT:    retq
261  %1 = ashr <4 x i64> %x, <i64 32, i64 32, i64 32, i64 32>
262  %2 = trunc <4 x i64> %1 to <4 x i32>
263  %3 = ashr <4 x i32> %2, <i32 0, i32 1, i32 2, i32 3>
264  ret <4 x i32> %3
265}
266
267; If the sign bit is known to be zero, switch this to a SRL.
268define <4 x i32> @combine_vec_ashr_positive(<4 x i32> %x, <4 x i32> %y) {
269; SSE-LABEL: combine_vec_ashr_positive:
270; SSE:       # %bb.0:
271; SSE-NEXT:    pand {{.*}}(%rip), %xmm0
272; SSE-NEXT:    pshuflw {{.*#+}} xmm2 = xmm1[2,3,3,3,4,5,6,7]
273; SSE-NEXT:    movdqa %xmm0, %xmm3
274; SSE-NEXT:    psrld %xmm2, %xmm3
275; SSE-NEXT:    pshufd {{.*#+}} xmm2 = xmm1[2,3,2,3]
276; SSE-NEXT:    pshuflw {{.*#+}} xmm4 = xmm2[2,3,3,3,4,5,6,7]
277; SSE-NEXT:    movdqa %xmm0, %xmm5
278; SSE-NEXT:    psrld %xmm4, %xmm5
279; SSE-NEXT:    pblendw {{.*#+}} xmm5 = xmm3[0,1,2,3],xmm5[4,5,6,7]
280; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm1[0,1,1,1,4,5,6,7]
281; SSE-NEXT:    movdqa %xmm0, %xmm3
282; SSE-NEXT:    psrld %xmm1, %xmm3
283; SSE-NEXT:    pshuflw {{.*#+}} xmm1 = xmm2[0,1,1,1,4,5,6,7]
284; SSE-NEXT:    psrld %xmm1, %xmm0
285; SSE-NEXT:    pblendw {{.*#+}} xmm0 = xmm3[0,1,2,3],xmm0[4,5,6,7]
286; SSE-NEXT:    pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm5[2,3],xmm0[4,5],xmm5[6,7]
287; SSE-NEXT:    retq
288;
289; AVX-LABEL: combine_vec_ashr_positive:
290; AVX:       # %bb.0:
291; AVX-NEXT:    vpand {{.*}}(%rip), %xmm0, %xmm0
292; AVX-NEXT:    vpsrlvd %xmm1, %xmm0, %xmm0
293; AVX-NEXT:    retq
294  %1 = and <4 x i32> %x, <i32 15, i32 255, i32 4095, i32 65535>
295  %2 = ashr <4 x i32> %1, %y
296  ret <4 x i32> %2
297}
298
299define <4 x i32> @combine_vec_ashr_positive_splat(<4 x i32> %x, <4 x i32> %y) {
300; SSE-LABEL: combine_vec_ashr_positive_splat:
301; SSE:       # %bb.0:
302; SSE-NEXT:    xorps %xmm0, %xmm0
303; SSE-NEXT:    retq
304;
305; AVX-LABEL: combine_vec_ashr_positive_splat:
306; AVX:       # %bb.0:
307; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
308; AVX-NEXT:    retq
309  %1 = and <4 x i32> %x, <i32 1023, i32 1023, i32 1023, i32 1023>
310  %2 = ashr <4 x i32> %1, <i32 10, i32 10, i32 10, i32 10>
311  ret <4 x i32> %2
312}
313