• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+sse4.1 | FileCheck %s --check-prefix=ALL --check-prefix=SSE --check-prefix=SSE41
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX1
4; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mcpu=x86-64 -mattr=+avx2 | FileCheck %s --check-prefix=ALL --check-prefix=AVX --check-prefix=AVX2
5
6define <4 x float> @shuffle_v4f32_0z27(<4 x float> %x, <4 x float> %a) {
7; SSE-LABEL: shuffle_v4f32_0z27:
8; SSE:       # BB#0:
9; SSE-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[2],xmm1[2]
10; SSE-NEXT:    retq
11;
12; AVX-LABEL: shuffle_v4f32_0z27:
13; AVX:       # BB#0:
14; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[2],xmm1[2]
15; AVX-NEXT:    retq
16  %vecext = extractelement <4 x float> %x, i32 0
17  %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
18  %vecinit1 = insertelement <4 x float> %vecinit, float 0.0, i32 1
19  %vecinit3 = shufflevector <4 x float> %vecinit1, <4 x float> %x, <4 x i32> <i32 0, i32 1, i32 6, i32 undef>
20  %vecinit5 = shufflevector <4 x float> %vecinit3, <4 x float> %a, <4 x i32> <i32 0, i32 1, i32 2, i32 6>
21  ret <4 x float> %vecinit5
22}
23
24define <4 x float> @shuffle_v4f32_0zz4(<4 x float> %xyzw, <4 x float> %abcd) {
25; SSE-LABEL: shuffle_v4f32_0zz4:
26; SSE:       # BB#0:
27; SSE-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],zero,zero,xmm1[0]
28; SSE-NEXT:    retq
29;
30; AVX-LABEL: shuffle_v4f32_0zz4:
31; AVX:       # BB#0:
32; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],zero,zero,xmm1[0]
33; AVX-NEXT:    retq
34  %vecext = extractelement <4 x float> %xyzw, i32 0
35  %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
36  %vecinit1 = insertelement <4 x float> %vecinit, float 0.000000e+00, i32 1
37  %vecinit2 = insertelement <4 x float> %vecinit1, float 0.000000e+00, i32 2
38  %vecinit4 = shufflevector <4 x float> %vecinit2, <4 x float> %abcd, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
39  ret <4 x float> %vecinit4
40}
41
42define <4 x float> @shuffle_v4f32_0z24(<4 x float> %xyzw, <4 x float> %abcd) {
43; SSE-LABEL: shuffle_v4f32_0z24:
44; SSE:       # BB#0:
45; SSE-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[2],xmm1[0]
46; SSE-NEXT:    retq
47;
48; AVX-LABEL: shuffle_v4f32_0z24:
49; AVX:       # BB#0:
50; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],zero,xmm0[2],xmm1[0]
51; AVX-NEXT:    retq
52  %vecext = extractelement <4 x float> %xyzw, i32 0
53  %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
54  %vecinit1 = insertelement <4 x float> %vecinit, float 0.000000e+00, i32 1
55  %vecinit3 = shufflevector <4 x float> %vecinit1, <4 x float> %xyzw, <4 x i32> <i32 0, i32 1, i32 6, i32 undef>
56  %vecinit5 = shufflevector <4 x float> %vecinit3, <4 x float> %abcd, <4 x i32> <i32 0, i32 1, i32 2, i32 4>
57  ret <4 x float> %vecinit5
58}
59
60define <4 x float> @shuffle_v4f32_0zz0(float %a) {
61; SSE-LABEL: shuffle_v4f32_0zz0:
62; SSE:       # BB#0:
63; SSE-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],zero,zero,xmm0[0]
64; SSE-NEXT:    retq
65;
66; AVX-LABEL: shuffle_v4f32_0zz0:
67; AVX:       # BB#0:
68; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],zero,zero,xmm0[0]
69; AVX-NEXT:    retq
70  %vecinit = insertelement <4 x float> undef, float %a, i32 0
71  %vecinit1 = insertelement <4 x float> %vecinit, float 0.000000e+00, i32 1
72  %vecinit2 = insertelement <4 x float> %vecinit1, float 0.000000e+00, i32 2
73  %vecinit3 = insertelement <4 x float> %vecinit2, float %a, i32 3
74  ret <4 x float> %vecinit3
75}
76
77define <4 x float> @shuffle_v4f32_0z6z(<4 x float> %A, <4 x float> %B) {
78; SSE-LABEL: shuffle_v4f32_0z6z:
79; SSE:       # BB#0:
80; SSE-NEXT:    insertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[2],zero
81; SSE-NEXT:    retq
82;
83; AVX-LABEL: shuffle_v4f32_0z6z:
84; AVX:       # BB#0:
85; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm0[0],zero,xmm1[2],zero
86; AVX-NEXT:    retq
87  %vecext = extractelement <4 x float> %A, i32 0
88  %vecinit = insertelement <4 x float> undef, float %vecext, i32 0
89  %vecinit1 = insertelement <4 x float> %vecinit, float 0.000000e+00, i32 1
90  %vecext2 = extractelement <4 x float> %B, i32 2
91  %vecinit3 = insertelement <4 x float> %vecinit1, float %vecext2, i32 2
92  %vecinit4 = insertelement <4 x float> %vecinit3, float 0.000000e+00, i32 3
93  ret <4 x float> %vecinit4
94}
95
96define <4 x float> @insertps_undef_input0(<4 x float> %a0, <4 x float> %a1) {
97; SSE-LABEL: insertps_undef_input0:
98; SSE:       # BB#0:
99; SSE-NEXT:    insertps {{.*#+}} xmm0 = zero,xmm1[0],zero,zero
100; SSE-NEXT:    retq
101;
102; AVX-LABEL: insertps_undef_input0:
103; AVX:       # BB#0:
104; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = zero,xmm1[0],zero,zero
105; AVX-NEXT:    retq
106  %res0 = fadd <4 x float> %a0, <float 1.0, float 1.0, float 1.0, float 1.0>
107  %res1 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %res0, <4 x float> %a1, i8 21)
108  %res2 = shufflevector <4 x float> %res1, <4 x float> zeroinitializer, <4 x i32> <i32 0, i32 1, i32 2, i32 7>
109  ret <4 x float> %res2
110}
111
112define <4 x float> @insertps_undef_input1(<4 x float> %a0, <4 x float> %a1) {
113; SSE-LABEL: insertps_undef_input1:
114; SSE:       # BB#0:
115; SSE-NEXT:    xorps %xmm1, %xmm1
116; SSE-NEXT:    blendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
117; SSE-NEXT:    retq
118;
119; AVX-LABEL: insertps_undef_input1:
120; AVX:       # BB#0:
121; AVX-NEXT:    vxorps %xmm1, %xmm1, %xmm1
122; AVX-NEXT:    vblendps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[3]
123; AVX-NEXT:    retq
124  %res0 = fadd <4 x float> %a1, <float 1.0, float 1.0, float 1.0, float 1.0>
125  %res1 = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %res0, i8 21)
126  %res2 = shufflevector <4 x float> %res1, <4 x float> zeroinitializer, <4 x i32> <i32 0, i32 5, i32 2, i32 3>
127  ret <4 x float> %res2
128}
129
130define <4 x float> @insertps_zero_from_v2f64(<4 x float> %a0, <2 x double>* %a1) nounwind {
131; SSE-LABEL: insertps_zero_from_v2f64:
132; SSE:       # BB#0:
133; SSE-NEXT:    movapd (%rdi), %xmm1
134; SSE-NEXT:    addpd {{.*}}(%rip), %xmm1
135; SSE-NEXT:    insertps {{.*#+}} xmm0 = zero,xmm0[2,2,3]
136; SSE-NEXT:    movapd %xmm1, (%rdi)
137; SSE-NEXT:    retq
138;
139; AVX-LABEL: insertps_zero_from_v2f64:
140; AVX:       # BB#0:
141; AVX-NEXT:    vmovapd (%rdi), %xmm1
142; AVX-NEXT:    vaddpd {{.*}}(%rip), %xmm1, %xmm1
143; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = zero,xmm0[2,2,3]
144; AVX-NEXT:    vmovapd %xmm1, (%rdi)
145; AVX-NEXT:    retq
146  %1 = load <2 x double>, <2 x double>* %a1
147  %2 = bitcast <2 x double> <double 1.0, double 2.0> to <4 x float>
148  %3 = fadd <2 x double> %1, <double 1.0, double 2.0>
149  %4 = shufflevector <4 x float> %a0, <4 x float> %2, <4 x i32> <i32 6, i32 2, i32 2, i32 3>
150  store <2 x double> %3, <2 x double> *%a1
151  ret <4 x float> %4
152}
153
154define <4 x float> @insertps_zero_from_v2i64(<4 x float> %a0, <2 x i64>* %a1) nounwind {
155; SSE-LABEL: insertps_zero_from_v2i64:
156; SSE:       # BB#0:
157; SSE-NEXT:    movdqa (%rdi), %xmm1
158; SSE-NEXT:    paddq {{.*}}(%rip), %xmm1
159; SSE-NEXT:    insertps {{.*#+}} xmm0 = zero,xmm0[2,2,3]
160; SSE-NEXT:    movdqa %xmm1, (%rdi)
161; SSE-NEXT:    retq
162;
163; AVX-LABEL: insertps_zero_from_v2i64:
164; AVX:       # BB#0:
165; AVX-NEXT:    vmovdqa (%rdi), %xmm1
166; AVX-NEXT:    vpaddq {{.*}}(%rip), %xmm1, %xmm1
167; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = zero,xmm0[2,2,3]
168; AVX-NEXT:    vmovdqa %xmm1, (%rdi)
169; AVX-NEXT:    retq
170  %1 = load <2 x i64>, <2 x i64>* %a1
171  %2 = bitcast <2 x i64> <i64 1, i64 -2> to <4 x float>
172  %3 = add <2 x i64> %1, <i64 1, i64 -2>
173  %4 = shufflevector <4 x float> %a0, <4 x float> %2, <4 x i32> <i32 5, i32 2, i32 2, i32 3>
174  store <2 x i64> %3, <2 x i64> *%a1
175  ret <4 x float> %4
176}
177
178define <4 x float> @insertps_zero_from_v8i16(<4 x float> %a0, <8 x i16>* %a1) nounwind {
179; SSE-LABEL: insertps_zero_from_v8i16:
180; SSE:       # BB#0:
181; SSE-NEXT:    movdqa (%rdi), %xmm1
182; SSE-NEXT:    paddw {{.*}}(%rip), %xmm1
183; SSE-NEXT:    insertps {{.*#+}} xmm0 = zero,xmm0[2,2,3]
184; SSE-NEXT:    movdqa %xmm1, (%rdi)
185; SSE-NEXT:    retq
186;
187; AVX-LABEL: insertps_zero_from_v8i16:
188; AVX:       # BB#0:
189; AVX-NEXT:    vmovdqa (%rdi), %xmm1
190; AVX-NEXT:    vpaddw {{.*}}(%rip), %xmm1, %xmm1
191; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = zero,xmm0[2,2,3]
192; AVX-NEXT:    vmovdqa %xmm1, (%rdi)
193; AVX-NEXT:    retq
194  %1 = load <8 x i16>, <8 x i16>* %a1
195  %2 = bitcast <8 x i16> <i16 0, i16 0, i16 1, i16 1, i16 2, i16 2, i16 3, i16 3> to <4 x float>
196  %3 = add <8 x i16> %1, <i16 0, i16 0, i16 1, i16 1, i16 2, i16 2, i16 3, i16 3>
197  %4 = shufflevector <4 x float> %a0, <4 x float> %2, <4 x i32> <i32 4, i32 2, i32 2, i32 3>
198  store <8 x i16> %3, <8 x i16> *%a1
199  ret <4 x float> %4
200}
201
202define <4 x float> @consecutive_load_insertps_04zz(float* %p) {
203; SSE-LABEL: consecutive_load_insertps_04zz:
204; SSE:       # BB#0:
205; SSE-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
206; SSE-NEXT:    retq
207;
208; AVX-LABEL: consecutive_load_insertps_04zz:
209; AVX:       # BB#0:
210; AVX-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
211; AVX-NEXT:    retq
212  %p0 = getelementptr inbounds float, float* %p, i64 1
213  %p1 = getelementptr inbounds float, float* %p, i64 2
214  %s0 = load float, float* %p0
215  %s1 = load float, float* %p1
216  %v0 = insertelement <4 x float> undef, float %s0, i32 0
217  %v1 = insertelement <4 x float> undef, float %s1, i32 0
218  %res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %v0, <4 x float> %v1, i8 28)
219  ret <4 x float> %res
220}
221
222define float @extract_zero_insertps_z0z7(<4 x float> %a0, <4 x float> %a1) {
223; SSE-LABEL: extract_zero_insertps_z0z7:
224; SSE:       # BB#0:
225; SSE-NEXT:    xorps %xmm0, %xmm0
226; SSE-NEXT:    retq
227;
228; AVX-LABEL: extract_zero_insertps_z0z7:
229; AVX:       # BB#0:
230; AVX-NEXT:    vxorps %xmm0, %xmm0, %xmm0
231; AVX-NEXT:    retq
232  %res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %a1, i8 21)
233  %ext = extractelement <4 x float> %res, i32 0
234  ret float %ext
235}
236
237define float @extract_lane_insertps_5123(<4 x float> %a0, <4 x float> *%p1) {
238; SSE-LABEL: extract_lane_insertps_5123:
239; SSE:       # BB#0:
240; SSE-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
241; SSE-NEXT:    retq
242;
243; AVX-LABEL: extract_lane_insertps_5123:
244; AVX:       # BB#0:
245; AVX-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
246; AVX-NEXT:    retq
247  %a1 = load <4 x float>, <4 x float> *%p1
248  %res = call <4 x float> @llvm.x86.sse41.insertps(<4 x float> %a0, <4 x float> %a1, i8 128)
249  %ext = extractelement <4 x float> %res, i32 0
250  ret float %ext
251}
252
253declare <4 x float> @llvm.x86.sse41.insertps(<4 x float>, <4 x float>, i8) nounwind readnone
254