• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+sse2 < %s | FileCheck %s --check-prefix=SSE2
3; RUN: llc -mtriple=x86_64-unknown-unknown -mattr=+avx < %s | FileCheck %s --check-prefix=AVX
4
5define <8 x i16> @test1(<8 x i16> %A, <8 x i16> %B) {
6; SSE2-LABEL: test1:
7; SSE2:       # %bb.0: # %entry
8; SSE2-NEXT:    pextrw $0, %xmm1, %eax
9; SSE2-NEXT:    movd %eax, %xmm1
10; SSE2-NEXT:    psllw %xmm1, %xmm0
11; SSE2-NEXT:    retq
12;
13; AVX-LABEL: test1:
14; AVX:       # %bb.0: # %entry
15; AVX-NEXT:    vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
16; AVX-NEXT:    vpsllw %xmm1, %xmm0, %xmm0
17; AVX-NEXT:    retq
18entry:
19  %vecinit14 = shufflevector <8 x i16> %B, <8 x i16> undef, <8 x i32> zeroinitializer
20  %shl = shl <8 x i16> %A, %vecinit14
21  ret <8 x i16> %shl
22}
23
24define <4 x i32> @test2(<4 x i32> %A, <4 x i32> %B) {
25; SSE2-LABEL: test2:
26; SSE2:       # %bb.0: # %entry
27; SSE2-NEXT:    xorps %xmm2, %xmm2
28; SSE2-NEXT:    movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
29; SSE2-NEXT:    pslld %xmm2, %xmm0
30; SSE2-NEXT:    retq
31;
32; AVX-LABEL: test2:
33; AVX:       # %bb.0: # %entry
34; AVX-NEXT:    vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
35; AVX-NEXT:    vpslld %xmm1, %xmm0, %xmm0
36; AVX-NEXT:    retq
37entry:
38  %vecinit6 = shufflevector <4 x i32> %B, <4 x i32> undef, <4 x i32> zeroinitializer
39  %shl = shl <4 x i32> %A, %vecinit6
40  ret <4 x i32> %shl
41}
42
43define <2 x i64> @test3(<2 x i64> %A, <2 x i64> %B) {
44; SSE2-LABEL: test3:
45; SSE2:       # %bb.0: # %entry
46; SSE2-NEXT:    psllq %xmm1, %xmm0
47; SSE2-NEXT:    retq
48;
49; AVX-LABEL: test3:
50; AVX:       # %bb.0: # %entry
51; AVX-NEXT:    vpsllq %xmm1, %xmm0, %xmm0
52; AVX-NEXT:    retq
53entry:
54  %vecinit2 = shufflevector <2 x i64> %B, <2 x i64> undef, <2 x i32> zeroinitializer
55  %shl = shl <2 x i64> %A, %vecinit2
56  ret <2 x i64> %shl
57}
58
59define <8 x i16> @test4(<8 x i16> %A, <8 x i16> %B) {
60; SSE2-LABEL: test4:
61; SSE2:       # %bb.0: # %entry
62; SSE2-NEXT:    pextrw $0, %xmm1, %eax
63; SSE2-NEXT:    movd %eax, %xmm1
64; SSE2-NEXT:    psrlw %xmm1, %xmm0
65; SSE2-NEXT:    retq
66;
67; AVX-LABEL: test4:
68; AVX:       # %bb.0: # %entry
69; AVX-NEXT:    vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
70; AVX-NEXT:    vpsrlw %xmm1, %xmm0, %xmm0
71; AVX-NEXT:    retq
72entry:
73  %vecinit14 = shufflevector <8 x i16> %B, <8 x i16> undef, <8 x i32> zeroinitializer
74  %shr = lshr <8 x i16> %A, %vecinit14
75  ret <8 x i16> %shr
76}
77
78define <4 x i32> @test5(<4 x i32> %A, <4 x i32> %B) {
79; SSE2-LABEL: test5:
80; SSE2:       # %bb.0: # %entry
81; SSE2-NEXT:    xorps %xmm2, %xmm2
82; SSE2-NEXT:    movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
83; SSE2-NEXT:    psrld %xmm2, %xmm0
84; SSE2-NEXT:    retq
85;
86; AVX-LABEL: test5:
87; AVX:       # %bb.0: # %entry
88; AVX-NEXT:    vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
89; AVX-NEXT:    vpsrld %xmm1, %xmm0, %xmm0
90; AVX-NEXT:    retq
91entry:
92  %vecinit6 = shufflevector <4 x i32> %B, <4 x i32> undef, <4 x i32> zeroinitializer
93  %shr = lshr <4 x i32> %A, %vecinit6
94  ret <4 x i32> %shr
95}
96
97define <2 x i64> @test6(<2 x i64> %A, <2 x i64> %B) {
98; SSE2-LABEL: test6:
99; SSE2:       # %bb.0: # %entry
100; SSE2-NEXT:    psrlq %xmm1, %xmm0
101; SSE2-NEXT:    retq
102;
103; AVX-LABEL: test6:
104; AVX:       # %bb.0: # %entry
105; AVX-NEXT:    vpsrlq %xmm1, %xmm0, %xmm0
106; AVX-NEXT:    retq
107entry:
108  %vecinit2 = shufflevector <2 x i64> %B, <2 x i64> undef, <2 x i32> zeroinitializer
109  %shr = lshr <2 x i64> %A, %vecinit2
110  ret <2 x i64> %shr
111}
112
113define <8 x i16> @test7(<8 x i16> %A, <8 x i16> %B) {
114; SSE2-LABEL: test7:
115; SSE2:       # %bb.0: # %entry
116; SSE2-NEXT:    pextrw $0, %xmm1, %eax
117; SSE2-NEXT:    movd %eax, %xmm1
118; SSE2-NEXT:    psraw %xmm1, %xmm0
119; SSE2-NEXT:    retq
120;
121; AVX-LABEL: test7:
122; AVX:       # %bb.0: # %entry
123; AVX-NEXT:    vpmovzxwq {{.*#+}} xmm1 = xmm1[0],zero,zero,zero,xmm1[1],zero,zero,zero
124; AVX-NEXT:    vpsraw %xmm1, %xmm0, %xmm0
125; AVX-NEXT:    retq
126entry:
127  %vecinit14 = shufflevector <8 x i16> %B, <8 x i16> undef, <8 x i32> zeroinitializer
128  %shr = ashr <8 x i16> %A, %vecinit14
129  ret <8 x i16> %shr
130}
131
132define <4 x i32> @test8(<4 x i32> %A, <4 x i32> %B) {
133; SSE2-LABEL: test8:
134; SSE2:       # %bb.0: # %entry
135; SSE2-NEXT:    xorps %xmm2, %xmm2
136; SSE2-NEXT:    movss {{.*#+}} xmm2 = xmm1[0],xmm2[1,2,3]
137; SSE2-NEXT:    psrad %xmm2, %xmm0
138; SSE2-NEXT:    retq
139;
140; AVX-LABEL: test8:
141; AVX:       # %bb.0: # %entry
142; AVX-NEXT:    vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero
143; AVX-NEXT:    vpsrad %xmm1, %xmm0, %xmm0
144; AVX-NEXT:    retq
145entry:
146  %vecinit6 = shufflevector <4 x i32> %B, <4 x i32> undef, <4 x i32> zeroinitializer
147  %shr = ashr <4 x i32> %A, %vecinit6
148  ret <4 x i32> %shr
149}
150