• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X32
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx | FileCheck %s --check-prefix=X64
4
5; When extracting multiple consecutive elements from a larger
6; vector into a smaller one, do it efficiently. We should use
7; an EXTRACT_SUBVECTOR node internally rather than a bunch of
8; single element extractions.
9
10; Extracting the low elements only requires using the right kind of store.
11define void @low_v8f32_to_v4f32(<8 x float> %v, <4 x float>* %ptr) {
12; X32-LABEL: low_v8f32_to_v4f32:
13; X32:       # %bb.0:
14; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
15; X32-NEXT:    vmovaps %xmm0, (%eax)
16; X32-NEXT:    vzeroupper
17; X32-NEXT:    retl
18;
19; X64-LABEL: low_v8f32_to_v4f32:
20; X64:       # %bb.0:
21; X64-NEXT:    vmovaps %xmm0, (%rdi)
22; X64-NEXT:    vzeroupper
23; X64-NEXT:    retq
24  %ext0 = extractelement <8 x float> %v, i32 0
25  %ext1 = extractelement <8 x float> %v, i32 1
26  %ext2 = extractelement <8 x float> %v, i32 2
27  %ext3 = extractelement <8 x float> %v, i32 3
28  %ins0 = insertelement <4 x float> undef, float %ext0, i32 0
29  %ins1 = insertelement <4 x float> %ins0, float %ext1, i32 1
30  %ins2 = insertelement <4 x float> %ins1, float %ext2, i32 2
31  %ins3 = insertelement <4 x float> %ins2, float %ext3, i32 3
32  store <4 x float> %ins3, <4 x float>* %ptr, align 16
33  ret void
34}
35
36; Extracting the high elements requires just one AVX instruction.
37define void @high_v8f32_to_v4f32(<8 x float> %v, <4 x float>* %ptr) {
38; X32-LABEL: high_v8f32_to_v4f32:
39; X32:       # %bb.0:
40; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
41; X32-NEXT:    vextractf128 $1, %ymm0, (%eax)
42; X32-NEXT:    vzeroupper
43; X32-NEXT:    retl
44;
45; X64-LABEL: high_v8f32_to_v4f32:
46; X64:       # %bb.0:
47; X64-NEXT:    vextractf128 $1, %ymm0, (%rdi)
48; X64-NEXT:    vzeroupper
49; X64-NEXT:    retq
50  %ext0 = extractelement <8 x float> %v, i32 4
51  %ext1 = extractelement <8 x float> %v, i32 5
52  %ext2 = extractelement <8 x float> %v, i32 6
53  %ext3 = extractelement <8 x float> %v, i32 7
54  %ins0 = insertelement <4 x float> undef, float %ext0, i32 0
55  %ins1 = insertelement <4 x float> %ins0, float %ext1, i32 1
56  %ins2 = insertelement <4 x float> %ins1, float %ext2, i32 2
57  %ins3 = insertelement <4 x float> %ins2, float %ext3, i32 3
58  store <4 x float> %ins3, <4 x float>* %ptr, align 16
59  ret void
60}
61
62; Make sure element type doesn't alter the codegen. Note that
63; if we were actually using the vector in this function and
64; have AVX2, we should generate vextracti128 (the int version).
65define void @high_v8i32_to_v4i32(<8 x i32> %v, <4 x i32>* %ptr) {
66; X32-LABEL: high_v8i32_to_v4i32:
67; X32:       # %bb.0:
68; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
69; X32-NEXT:    vextractf128 $1, %ymm0, (%eax)
70; X32-NEXT:    vzeroupper
71; X32-NEXT:    retl
72;
73; X64-LABEL: high_v8i32_to_v4i32:
74; X64:       # %bb.0:
75; X64-NEXT:    vextractf128 $1, %ymm0, (%rdi)
76; X64-NEXT:    vzeroupper
77; X64-NEXT:    retq
78  %ext0 = extractelement <8 x i32> %v, i32 4
79  %ext1 = extractelement <8 x i32> %v, i32 5
80  %ext2 = extractelement <8 x i32> %v, i32 6
81  %ext3 = extractelement <8 x i32> %v, i32 7
82  %ins0 = insertelement <4 x i32> undef, i32 %ext0, i32 0
83  %ins1 = insertelement <4 x i32> %ins0, i32 %ext1, i32 1
84  %ins2 = insertelement <4 x i32> %ins1, i32 %ext2, i32 2
85  %ins3 = insertelement <4 x i32> %ins2, i32 %ext3, i32 3
86  store <4 x i32> %ins3, <4 x i32>* %ptr, align 16
87  ret void
88}
89
90; Make sure that element size doesn't alter the codegen.
91define void @high_v4f64_to_v2f64(<4 x double> %v, <2 x double>* %ptr) {
92; X32-LABEL: high_v4f64_to_v2f64:
93; X32:       # %bb.0:
94; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
95; X32-NEXT:    vextractf128 $1, %ymm0, (%eax)
96; X32-NEXT:    vzeroupper
97; X32-NEXT:    retl
98;
99; X64-LABEL: high_v4f64_to_v2f64:
100; X64:       # %bb.0:
101; X64-NEXT:    vextractf128 $1, %ymm0, (%rdi)
102; X64-NEXT:    vzeroupper
103; X64-NEXT:    retq
104  %ext0 = extractelement <4 x double> %v, i32 2
105  %ext1 = extractelement <4 x double> %v, i32 3
106  %ins0 = insertelement <2 x double> undef, double %ext0, i32 0
107  %ins1 = insertelement <2 x double> %ins0, double %ext1, i32 1
108  store <2 x double> %ins1, <2 x double>* %ptr, align 16
109  ret void
110}
111
112; PR25320 Make sure that a widened (possibly legalized) vector correctly zero-extends upper elements.
113; FIXME - Ideally these should just call VMOVD/VMOVQ/VMOVSS/VMOVSD
114
115define void @legal_vzmovl_2i32_8i32(<2 x i32>* %in, <8 x i32>* %out) {
116; X32-LABEL: legal_vzmovl_2i32_8i32:
117; X32:       # %bb.0:
118; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
119; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
120; X32-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
121; X32-NEXT:    vxorps %xmm1, %xmm1, %xmm1
122; X32-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
123; X32-NEXT:    vmovaps %ymm0, (%eax)
124; X32-NEXT:    vzeroupper
125; X32-NEXT:    retl
126;
127; X64-LABEL: legal_vzmovl_2i32_8i32:
128; X64:       # %bb.0:
129; X64-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
130; X64-NEXT:    vxorps %xmm1, %xmm1, %xmm1
131; X64-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
132; X64-NEXT:    vmovaps %ymm0, (%rsi)
133; X64-NEXT:    vzeroupper
134; X64-NEXT:    retq
135  %ld = load <2 x i32>, <2 x i32>* %in, align 8
136  %ext = extractelement <2 x i32> %ld, i64 0
137  %ins = insertelement <8 x i32> <i32 undef, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0, i32 0>, i32 %ext, i64 0
138  store <8 x i32> %ins, <8 x i32>* %out, align 32
139  ret void
140}
141
142define void @legal_vzmovl_2i64_4i64(<2 x i64>* %in, <4 x i64>* %out) {
143; X32-LABEL: legal_vzmovl_2i64_4i64:
144; X32:       # %bb.0:
145; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
146; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
147; X32-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
148; X32-NEXT:    vmovaps %ymm0, (%eax)
149; X32-NEXT:    vzeroupper
150; X32-NEXT:    retl
151;
152; X64-LABEL: legal_vzmovl_2i64_4i64:
153; X64:       # %bb.0:
154; X64-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
155; X64-NEXT:    vmovaps %ymm0, (%rsi)
156; X64-NEXT:    vzeroupper
157; X64-NEXT:    retq
158  %ld = load <2 x i64>, <2 x i64>* %in, align 8
159  %ext = extractelement <2 x i64> %ld, i64 0
160  %ins = insertelement <4 x i64> <i64 undef, i64 0, i64 0, i64 0>, i64 %ext, i64 0
161  store <4 x i64> %ins, <4 x i64>* %out, align 32
162  ret void
163}
164
165define void @legal_vzmovl_2f32_8f32(<2 x float>* %in, <8 x float>* %out) {
166; X32-LABEL: legal_vzmovl_2f32_8f32:
167; X32:       # %bb.0:
168; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
169; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
170; X32-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
171; X32-NEXT:    vxorps %xmm1, %xmm1, %xmm1
172; X32-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
173; X32-NEXT:    vmovaps %ymm0, (%eax)
174; X32-NEXT:    vzeroupper
175; X32-NEXT:    retl
176;
177; X64-LABEL: legal_vzmovl_2f32_8f32:
178; X64:       # %bb.0:
179; X64-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
180; X64-NEXT:    vxorps %xmm1, %xmm1, %xmm1
181; X64-NEXT:    vblendps {{.*#+}} xmm0 = xmm0[0],xmm1[1,2,3]
182; X64-NEXT:    vmovaps %ymm0, (%rsi)
183; X64-NEXT:    vzeroupper
184; X64-NEXT:    retq
185  %ld = load <2 x float>, <2 x float>* %in, align 8
186  %ext = extractelement <2 x float> %ld, i64 0
187  %ins = insertelement <8 x float> <float undef, float 0.0, float 0.0, float 0.0, float 0.0, float 0.0, float 0.0, float 0.0>, float %ext, i64 0
188  store <8 x float> %ins, <8 x float>* %out, align 32
189  ret void
190}
191
192define void @legal_vzmovl_2f64_4f64(<2 x double>* %in, <4 x double>* %out) {
193; X32-LABEL: legal_vzmovl_2f64_4f64:
194; X32:       # %bb.0:
195; X32-NEXT:    movl {{[0-9]+}}(%esp), %eax
196; X32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
197; X32-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
198; X32-NEXT:    vmovaps %ymm0, (%eax)
199; X32-NEXT:    vzeroupper
200; X32-NEXT:    retl
201;
202; X64-LABEL: legal_vzmovl_2f64_4f64:
203; X64:       # %bb.0:
204; X64-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
205; X64-NEXT:    vmovaps %ymm0, (%rsi)
206; X64-NEXT:    vzeroupper
207; X64-NEXT:    retq
208  %ld = load <2 x double>, <2 x double>* %in, align 8
209  %ext = extractelement <2 x double> %ld, i64 0
210  %ins = insertelement <4 x double> <double undef, double 0.0, double 0.0, double 0.0>, double %ext, i64 0
211  store <4 x double> %ins, <4 x double>* %out, align 32
212  ret void
213}
214