• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=X32
3; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=X64
4
5; test vector shifts converted to proper SSE2 vector shifts when the shift
6; amounts are the same.
7
8define void @shift1a(<2 x i64> %val, <2 x i64>* %dst) nounwind {
9; X32-LABEL: shift1a:
10; X32:       # %bb.0: # %entry
11; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
12; X32-NEXT:    psllq $32, %xmm0
13; X32-NEXT:    movdqa %xmm0, (%eax)
14; X32-NEXT:    retl
15;
16; X64-LABEL: shift1a:
17; X64:       # %bb.0: # %entry
18; X64-NEXT:    psllq $32, %xmm0
19; X64-NEXT:    movdqa %xmm0, (%rdi)
20; X64-NEXT:    retq
21entry:
22  %shl = shl <2 x i64> %val, < i64 32, i64 32 >
23  store <2 x i64> %shl, <2 x i64>* %dst
24  ret void
25}
26
27define void @shift1b(<2 x i64> %val, <2 x i64>* %dst, i64 %amt) nounwind {
28; X32-LABEL: shift1b:
29; X32:       # %bb.0: # %entry
30; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
31; X32-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
32; X32-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
33; X32-NEXT:    psllq %xmm1, %xmm0
34; X32-NEXT:    movdqa %xmm0, (%eax)
35; X32-NEXT:    retl
36;
37; X64-LABEL: shift1b:
38; X64:       # %bb.0: # %entry
39; X64-NEXT:    movq %rsi, %xmm1
40; X64-NEXT:    psllq %xmm1, %xmm0
41; X64-NEXT:    movdqa %xmm0, (%rdi)
42; X64-NEXT:    retq
43entry:
44  %0 = insertelement <2 x i64> undef, i64 %amt, i32 0
45  %1 = insertelement <2 x i64> %0, i64 %amt, i32 1
46  %shl = shl <2 x i64> %val, %1
47  store <2 x i64> %shl, <2 x i64>* %dst
48  ret void
49}
50
51
52define void @shift2a(<4 x i32> %val, <4 x i32>* %dst) nounwind {
53; X32-LABEL: shift2a:
54; X32:       # %bb.0: # %entry
55; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
56; X32-NEXT:    pslld $5, %xmm0
57; X32-NEXT:    movdqa %xmm0, (%eax)
58; X32-NEXT:    retl
59;
60; X64-LABEL: shift2a:
61; X64:       # %bb.0: # %entry
62; X64-NEXT:    pslld $5, %xmm0
63; X64-NEXT:    movdqa %xmm0, (%rdi)
64; X64-NEXT:    retq
65entry:
66  %shl = shl <4 x i32> %val, < i32 5, i32 5, i32 5, i32 5 >
67  store <4 x i32> %shl, <4 x i32>* %dst
68  ret void
69}
70
71define void @shift2b(<4 x i32> %val, <4 x i32>* %dst, i32 %amt) nounwind {
72; X32-LABEL: shift2b:
73; X32:       # %bb.0: # %entry
74; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
75; X32-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
76; X32-NEXT:    pslld %xmm1, %xmm0
77; X32-NEXT:    movdqa %xmm0, (%eax)
78; X32-NEXT:    retl
79;
80; X64-LABEL: shift2b:
81; X64:       # %bb.0: # %entry
82; X64-NEXT:    movd %esi, %xmm1
83; X64-NEXT:    pslld %xmm1, %xmm0
84; X64-NEXT:    movdqa %xmm0, (%rdi)
85; X64-NEXT:    retq
86entry:
87  %0 = insertelement <4 x i32> undef, i32 %amt, i32 0
88  %1 = insertelement <4 x i32> %0, i32 %amt, i32 1
89  %2 = insertelement <4 x i32> %1, i32 %amt, i32 2
90  %3 = insertelement <4 x i32> %2, i32 %amt, i32 3
91  %shl = shl <4 x i32> %val, %3
92  store <4 x i32> %shl, <4 x i32>* %dst
93  ret void
94}
95
96define void @shift3a(<8 x i16> %val, <8 x i16>* %dst) nounwind {
97; X32-LABEL: shift3a:
98; X32:       # %bb.0: # %entry
99; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
100; X32-NEXT:    psllw $5, %xmm0
101; X32-NEXT:    movdqa %xmm0, (%eax)
102; X32-NEXT:    retl
103;
104; X64-LABEL: shift3a:
105; X64:       # %bb.0: # %entry
106; X64-NEXT:    psllw $5, %xmm0
107; X64-NEXT:    movdqa %xmm0, (%rdi)
108; X64-NEXT:    retq
109entry:
110  %shl = shl <8 x i16> %val, < i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5 >
111  store <8 x i16> %shl, <8 x i16>* %dst
112  ret void
113}
114
115; Make sure the shift amount is properly zero extended.
116define void @shift3b(<8 x i16> %val, <8 x i16>* %dst, i16 %amt) nounwind {
117; X32-LABEL: shift3b:
118; X32:       # %bb.0: # %entry
119; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
120; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
121; X32-NEXT:    movd %ecx, %xmm1
122; X32-NEXT:    psllw %xmm1, %xmm0
123; X32-NEXT:    movdqa %xmm0, (%eax)
124; X32-NEXT:    retl
125;
126; X64-LABEL: shift3b:
127; X64:       # %bb.0: # %entry
128; X64-NEXT:    movzwl %si, %eax
129; X64-NEXT:    movd %eax, %xmm1
130; X64-NEXT:    psllw %xmm1, %xmm0
131; X64-NEXT:    movdqa %xmm0, (%rdi)
132; X64-NEXT:    retq
133entry:
134  %0 = insertelement <8 x i16> undef, i16 %amt, i32 0
135  %1 = insertelement <8 x i16> %0, i16 %amt, i32 1
136  %2 = insertelement <8 x i16> %1, i16 %amt, i32 2
137  %3 = insertelement <8 x i16> %2, i16 %amt, i32 3
138  %4 = insertelement <8 x i16> %3, i16 %amt, i32 4
139  %5 = insertelement <8 x i16> %4, i16 %amt, i32 5
140  %6 = insertelement <8 x i16> %5, i16 %amt, i32 6
141  %7 = insertelement <8 x i16> %6, i16 %amt, i32 7
142  %shl = shl <8 x i16> %val, %7
143  store <8 x i16> %shl, <8 x i16>* %dst
144  ret void
145}
146
147