• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=X32
3; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse2 | FileCheck %s --check-prefix=CHECK --check-prefix=X64
4
5; test vector shifts converted to proper SSE2 vector shifts when the shift
6; amounts are the same.
7
8define void @shift1a(<2 x i64> %val, <2 x i64>* %dst) nounwind {
9; X32-LABEL: shift1a:
10; X32:       # %bb.0: # %entry
11; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
12; X32-NEXT:    psrlq $32, %xmm0
13; X32-NEXT:    movdqa %xmm0, (%eax)
14; X32-NEXT:    retl
15;
16; X64-LABEL: shift1a:
17; X64:       # %bb.0: # %entry
18; X64-NEXT:    psrlq $32, %xmm0
19; X64-NEXT:    movdqa %xmm0, (%rdi)
20; X64-NEXT:    retq
21entry:
22  %lshr = lshr <2 x i64> %val, < i64 32, i64 32 >
23  store <2 x i64> %lshr, <2 x i64>* %dst
24  ret void
25}
26
27define void @shift1b(<2 x i64> %val, <2 x i64>* %dst, i64 %amt) nounwind {
28; X32-LABEL: shift1b:
29; X32:       # %bb.0: # %entry
30; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
31; X32-NEXT:    movq {{.*#+}} xmm1 = mem[0],zero
32; X32-NEXT:    pshufd {{.*#+}} xmm1 = xmm1[0,1,0,1]
33; X32-NEXT:    psrlq %xmm1, %xmm0
34; X32-NEXT:    movdqa %xmm0, (%eax)
35; X32-NEXT:    retl
36;
37; X64-LABEL: shift1b:
38; X64:       # %bb.0: # %entry
39; X64-NEXT:    movq %rsi, %xmm1
40; X64-NEXT:    psrlq %xmm1, %xmm0
41; X64-NEXT:    movdqa %xmm0, (%rdi)
42; X64-NEXT:    retq
43entry:
44  %0 = insertelement <2 x i64> undef, i64 %amt, i32 0
45  %1 = insertelement <2 x i64> %0, i64 %amt, i32 1
46  %lshr = lshr <2 x i64> %val, %1
47  store <2 x i64> %lshr, <2 x i64>* %dst
48  ret void
49}
50
51define void @shift2a(<4 x i32> %val, <4 x i32>* %dst) nounwind {
52; X32-LABEL: shift2a:
53; X32:       # %bb.0: # %entry
54; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
55; X32-NEXT:    psrld $17, %xmm0
56; X32-NEXT:    movdqa %xmm0, (%eax)
57; X32-NEXT:    retl
58;
59; X64-LABEL: shift2a:
60; X64:       # %bb.0: # %entry
61; X64-NEXT:    psrld $17, %xmm0
62; X64-NEXT:    movdqa %xmm0, (%rdi)
63; X64-NEXT:    retq
64entry:
65  %lshr = lshr <4 x i32> %val, < i32 17, i32 17, i32 17, i32 17 >
66  store <4 x i32> %lshr, <4 x i32>* %dst
67  ret void
68}
69
70define void @shift2b(<4 x i32> %val, <4 x i32>* %dst, i32 %amt) nounwind {
71; X32-LABEL: shift2b:
72; X32:       # %bb.0: # %entry
73; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
74; X32-NEXT:    movd {{.*#+}} xmm1 = mem[0],zero,zero,zero
75; X32-NEXT:    psrld %xmm1, %xmm0
76; X32-NEXT:    movdqa %xmm0, (%eax)
77; X32-NEXT:    retl
78;
79; X64-LABEL: shift2b:
80; X64:       # %bb.0: # %entry
81; X64-NEXT:    movd %esi, %xmm1
82; X64-NEXT:    psrld %xmm1, %xmm0
83; X64-NEXT:    movdqa %xmm0, (%rdi)
84; X64-NEXT:    retq
85entry:
86  %0 = insertelement <4 x i32> undef, i32 %amt, i32 0
87  %1 = insertelement <4 x i32> %0, i32 %amt, i32 1
88  %2 = insertelement <4 x i32> %1, i32 %amt, i32 2
89  %3 = insertelement <4 x i32> %2, i32 %amt, i32 3
90  %lshr = lshr <4 x i32> %val, %3
91  store <4 x i32> %lshr, <4 x i32>* %dst
92  ret void
93}
94
95
96define void @shift3a(<8 x i16> %val, <8 x i16>* %dst) nounwind {
97; X32-LABEL: shift3a:
98; X32:       # %bb.0: # %entry
99; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
100; X32-NEXT:    psrlw $5, %xmm0
101; X32-NEXT:    movdqa %xmm0, (%eax)
102; X32-NEXT:    retl
103;
104; X64-LABEL: shift3a:
105; X64:       # %bb.0: # %entry
106; X64-NEXT:    psrlw $5, %xmm0
107; X64-NEXT:    movdqa %xmm0, (%rdi)
108; X64-NEXT:    retq
109entry:
110  %lshr = lshr <8 x i16> %val, < i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5, i16 5 >
111  store <8 x i16> %lshr, <8 x i16>* %dst
112  ret void
113}
114
115; properly zero extend the shift amount
116define void @shift3b(<8 x i16> %val, <8 x i16>* %dst, i16 %amt) nounwind {
117; X32-LABEL: shift3b:
118; X32:       # %bb.0: # %entry
119; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
120; X32-NEXT:    movzwl {{[0-9]+}}(%esp), %ecx
121; X32-NEXT:    movd %ecx, %xmm1
122; X32-NEXT:    psrlw %xmm1, %xmm0
123; X32-NEXT:    movdqa %xmm0, (%eax)
124; X32-NEXT:    retl
125;
126; X64-LABEL: shift3b:
127; X64:       # %bb.0: # %entry
128; X64-NEXT:    movzwl %si, %eax
129; X64-NEXT:    movd %eax, %xmm1
130; X64-NEXT:    psrlw %xmm1, %xmm0
131; X64-NEXT:    movdqa %xmm0, (%rdi)
132; X64-NEXT:    retq
133entry:
134  %0 = insertelement <8 x i16> undef, i16 %amt, i32 0
135  %1 = insertelement <8 x i16> %0, i16 %amt, i32 1
136  %2 = insertelement <8 x i16> %1, i16 %amt, i32 2
137  %3 = insertelement <8 x i16> %2, i16 %amt, i32 3
138  %4 = insertelement <8 x i16> %3, i16 %amt, i32 4
139  %5 = insertelement <8 x i16> %4, i16 %amt, i32 5
140  %6 = insertelement <8 x i16> %5, i16 %amt, i32 6
141  %7 = insertelement <8 x i16> %6, i16 %amt, i32 7
142  %lshr = lshr <8 x i16> %val, %7
143  store <8 x i16> %lshr, <8 x i16>* %dst
144  ret void
145}
146