• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+sse3 | FileCheck %s --check-prefix=SSE
3; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1
4; RUN: llc < %s -mtriple=x86_64-unknown-linux-gnu -mattr=+avx512f | FileCheck %s --check-prefix=AVX --check-prefix=AVX512
5
6; Verify that we correctly generate 'addsub' instructions from
7; a sequence of vector extracts + float add/sub + vector inserts.
8
9define <4 x float> @test1(<4 x float> %A, <4 x float> %B) {
10; SSE-LABEL: test1:
11; SSE:       # %bb.0:
12; SSE-NEXT:    addsubps %xmm1, %xmm0
13; SSE-NEXT:    retq
14;
15; AVX-LABEL: test1:
16; AVX:       # %bb.0:
17; AVX-NEXT:    vaddsubps %xmm1, %xmm0, %xmm0
18; AVX-NEXT:    retq
19  %1 = extractelement <4 x float> %A, i32 0
20  %2 = extractelement <4 x float> %B, i32 0
21  %sub = fsub float %1, %2
22  %3 = extractelement <4 x float> %A, i32 2
23  %4 = extractelement <4 x float> %B, i32 2
24  %sub2 = fsub float %3, %4
25  %5 = extractelement <4 x float> %A, i32 1
26  %6 = extractelement <4 x float> %B, i32 1
27  %add = fadd float %5, %6
28  %7 = extractelement <4 x float> %A, i32 3
29  %8 = extractelement <4 x float> %B, i32 3
30  %add2 = fadd float %7, %8
31  %vecinsert1 = insertelement <4 x float> undef, float %add, i32 1
32  %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add2, i32 3
33  %vecinsert3 = insertelement <4 x float> %vecinsert2, float %sub, i32 0
34  %vecinsert4 = insertelement <4 x float> %vecinsert3, float %sub2, i32 2
35  ret <4 x float> %vecinsert4
36}
37
38define <4 x float> @test2(<4 x float> %A, <4 x float> %B) {
39; SSE-LABEL: test2:
40; SSE:       # %bb.0:
41; SSE-NEXT:    addsubps %xmm1, %xmm0
42; SSE-NEXT:    retq
43;
44; AVX-LABEL: test2:
45; AVX:       # %bb.0:
46; AVX-NEXT:    vaddsubps %xmm1, %xmm0, %xmm0
47; AVX-NEXT:    retq
48  %1 = extractelement <4 x float> %A, i32 2
49  %2 = extractelement <4 x float> %B, i32 2
50  %sub2 = fsub float %1, %2
51  %3 = extractelement <4 x float> %A, i32 3
52  %4 = extractelement <4 x float> %B, i32 3
53  %add2 = fadd float %3, %4
54  %vecinsert1 = insertelement <4 x float> undef, float %sub2, i32 2
55  %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add2, i32 3
56  ret <4 x float> %vecinsert2
57}
58
59define <4 x float> @test3(<4 x float> %A, <4 x float> %B) {
60; SSE-LABEL: test3:
61; SSE:       # %bb.0:
62; SSE-NEXT:    addsubps %xmm1, %xmm0
63; SSE-NEXT:    retq
64;
65; AVX-LABEL: test3:
66; AVX:       # %bb.0:
67; AVX-NEXT:    vaddsubps %xmm1, %xmm0, %xmm0
68; AVX-NEXT:    retq
69  %1 = extractelement <4 x float> %A, i32 0
70  %2 = extractelement <4 x float> %B, i32 0
71  %sub = fsub float %1, %2
72  %3 = extractelement <4 x float> %A, i32 3
73  %4 = extractelement <4 x float> %B, i32 3
74  %add = fadd float %4, %3
75  %vecinsert1 = insertelement <4 x float> undef, float %sub, i32 0
76  %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add, i32 3
77  ret <4 x float> %vecinsert2
78}
79
80define <4 x float> @test4(<4 x float> %A, <4 x float> %B) {
81; SSE-LABEL: test4:
82; SSE:       # %bb.0:
83; SSE-NEXT:    addsubps %xmm1, %xmm0
84; SSE-NEXT:    retq
85;
86; AVX-LABEL: test4:
87; AVX:       # %bb.0:
88; AVX-NEXT:    vaddsubps %xmm1, %xmm0, %xmm0
89; AVX-NEXT:    retq
90  %1 = extractelement <4 x float> %A, i32 2
91  %2 = extractelement <4 x float> %B, i32 2
92  %sub = fsub float %1, %2
93  %3 = extractelement <4 x float> %A, i32 1
94  %4 = extractelement <4 x float> %B, i32 1
95  %add = fadd float %3, %4
96  %vecinsert1 = insertelement <4 x float> undef, float %sub, i32 2
97  %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add, i32 1
98  ret <4 x float> %vecinsert2
99}
100
101define <4 x float> @test5(<4 x float> %A, <4 x float> %B) {
102; SSE-LABEL: test5:
103; SSE:       # %bb.0:
104; SSE-NEXT:    addsubps %xmm1, %xmm0
105; SSE-NEXT:    retq
106;
107; AVX-LABEL: test5:
108; AVX:       # %bb.0:
109; AVX-NEXT:    vaddsubps %xmm1, %xmm0, %xmm0
110; AVX-NEXT:    retq
111  %1 = extractelement <4 x float> %A, i32 0
112  %2 = extractelement <4 x float> %B, i32 0
113  %sub2 = fsub float %1, %2
114  %3 = extractelement <4 x float> %A, i32 1
115  %4 = extractelement <4 x float> %B, i32 1
116  %add2 = fadd float %3, %4
117  %vecinsert1 = insertelement <4 x float> undef, float %sub2, i32 0
118  %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add2, i32 1
119  ret <4 x float> %vecinsert2
120}
121
122define <4 x float> @test6(<4 x float> %A, <4 x float> %B) {
123; SSE-LABEL: test6:
124; SSE:       # %bb.0:
125; SSE-NEXT:    addsubps %xmm1, %xmm0
126; SSE-NEXT:    retq
127;
128; AVX-LABEL: test6:
129; AVX:       # %bb.0:
130; AVX-NEXT:    vaddsubps %xmm1, %xmm0, %xmm0
131; AVX-NEXT:    retq
132  %1 = extractelement <4 x float> %A, i32 0
133  %2 = extractelement <4 x float> %B, i32 0
134  %sub = fsub float %1, %2
135  %3 = extractelement <4 x float> %A, i32 2
136  %4 = extractelement <4 x float> %B, i32 2
137  %sub2 = fsub float %3, %4
138  %5 = extractelement <4 x float> %A, i32 1
139  %6 = extractelement <4 x float> %B, i32 1
140  %add = fadd float %5, %6
141  %7 = extractelement <4 x float> %A, i32 3
142  %8 = extractelement <4 x float> %B, i32 3
143  %add2 = fadd float %7, %8
144  %vecinsert1 = insertelement <4 x float> undef, float %add, i32 1
145  %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add2, i32 3
146  %vecinsert3 = insertelement <4 x float> %vecinsert2, float %sub, i32 0
147  %vecinsert4 = insertelement <4 x float> %vecinsert3, float %sub2, i32 2
148  ret <4 x float> %vecinsert4
149}
150
151define <4 x double> @test7(<4 x double> %A, <4 x double> %B) {
152; SSE-LABEL: test7:
153; SSE:       # %bb.0:
154; SSE-NEXT:    addsubpd %xmm2, %xmm0
155; SSE-NEXT:    addsubpd %xmm3, %xmm1
156; SSE-NEXT:    retq
157;
158; AVX-LABEL: test7:
159; AVX:       # %bb.0:
160; AVX-NEXT:    vaddsubpd %ymm1, %ymm0, %ymm0
161; AVX-NEXT:    retq
162  %1 = extractelement <4 x double> %A, i32 0
163  %2 = extractelement <4 x double> %B, i32 0
164  %sub = fsub double %1, %2
165  %3 = extractelement <4 x double> %A, i32 2
166  %4 = extractelement <4 x double> %B, i32 2
167  %sub2 = fsub double %3, %4
168  %5 = extractelement <4 x double> %A, i32 1
169  %6 = extractelement <4 x double> %B, i32 1
170  %add = fadd double %5, %6
171  %7 = extractelement <4 x double> %A, i32 3
172  %8 = extractelement <4 x double> %B, i32 3
173  %add2 = fadd double %7, %8
174  %vecinsert1 = insertelement <4 x double> undef, double %add, i32 1
175  %vecinsert2 = insertelement <4 x double> %vecinsert1, double %add2, i32 3
176  %vecinsert3 = insertelement <4 x double> %vecinsert2, double %sub, i32 0
177  %vecinsert4 = insertelement <4 x double> %vecinsert3, double %sub2, i32 2
178  ret <4 x double> %vecinsert4
179}
180
181define <2 x double> @test8(<2 x double> %A, <2 x double> %B) {
182; SSE-LABEL: test8:
183; SSE:       # %bb.0:
184; SSE-NEXT:    addsubpd %xmm1, %xmm0
185; SSE-NEXT:    retq
186;
187; AVX-LABEL: test8:
188; AVX:       # %bb.0:
189; AVX-NEXT:    vaddsubpd %xmm1, %xmm0, %xmm0
190; AVX-NEXT:    retq
191  %1 = extractelement <2 x double> %A, i32 0
192  %2 = extractelement <2 x double> %B, i32 0
193  %sub = fsub double %1, %2
194  %3 = extractelement <2 x double> %A, i32 1
195  %4 = extractelement <2 x double> %B, i32 1
196  %add = fadd double %3, %4
197  %vecinsert1 = insertelement <2 x double> undef, double %sub, i32 0
198  %vecinsert2 = insertelement <2 x double> %vecinsert1, double %add, i32 1
199  ret <2 x double> %vecinsert2
200}
201
202define <8 x float> @test9(<8 x float> %A, <8 x float> %B) {
203; SSE-LABEL: test9:
204; SSE:       # %bb.0:
205; SSE-NEXT:    addsubps %xmm2, %xmm0
206; SSE-NEXT:    addsubps %xmm3, %xmm1
207; SSE-NEXT:    retq
208;
209; AVX-LABEL: test9:
210; AVX:       # %bb.0:
211; AVX-NEXT:    vaddsubps %ymm1, %ymm0, %ymm0
212; AVX-NEXT:    retq
213  %1 = extractelement <8 x float> %A, i32 0
214  %2 = extractelement <8 x float> %B, i32 0
215  %sub = fsub float %1, %2
216  %3 = extractelement <8 x float> %A, i32 2
217  %4 = extractelement <8 x float> %B, i32 2
218  %sub2 = fsub float %3, %4
219  %5 = extractelement <8 x float> %A, i32 1
220  %6 = extractelement <8 x float> %B, i32 1
221  %add = fadd float %5, %6
222  %7 = extractelement <8 x float> %A, i32 3
223  %8 = extractelement <8 x float> %B, i32 3
224  %add2 = fadd float %7, %8
225  %9 = extractelement <8 x float> %A, i32 4
226  %10 = extractelement <8 x float> %B, i32 4
227  %sub3 = fsub float %9, %10
228  %11 = extractelement <8 x float> %A, i32 6
229  %12 = extractelement <8 x float> %B, i32 6
230  %sub4 = fsub float %11, %12
231  %13 = extractelement <8 x float> %A, i32 5
232  %14 = extractelement <8 x float> %B, i32 5
233  %add3 = fadd float %13, %14
234  %15 = extractelement <8 x float> %A, i32 7
235  %16 = extractelement <8 x float> %B, i32 7
236  %add4 = fadd float %15, %16
237  %vecinsert1 = insertelement <8 x float> undef, float %add, i32 1
238  %vecinsert2 = insertelement <8 x float> %vecinsert1, float %add2, i32 3
239  %vecinsert3 = insertelement <8 x float> %vecinsert2, float %sub, i32 0
240  %vecinsert4 = insertelement <8 x float> %vecinsert3, float %sub2, i32 2
241  %vecinsert5 = insertelement <8 x float> %vecinsert4, float %add3, i32 5
242  %vecinsert6 = insertelement <8 x float> %vecinsert5, float %add4, i32 7
243  %vecinsert7 = insertelement <8 x float> %vecinsert6, float %sub3, i32 4
244  %vecinsert8 = insertelement <8 x float> %vecinsert7, float %sub4, i32 6
245  ret <8 x float> %vecinsert8
246}
247
248; Verify that we don't generate addsub instruction for the following
249; functions.
250
251define <4 x float> @test10(<4 x float> %A, <4 x float> %B) {
252; SSE-LABEL: test10:
253; SSE:       # %bb.0:
254; SSE-NEXT:    subss %xmm1, %xmm0
255; SSE-NEXT:    retq
256;
257; AVX-LABEL: test10:
258; AVX:       # %bb.0:
259; AVX-NEXT:    vsubss %xmm1, %xmm0, %xmm0
260; AVX-NEXT:    retq
261  %1 = extractelement <4 x float> %A, i32 0
262  %2 = extractelement <4 x float> %B, i32 0
263  %sub = fsub float %1, %2
264  %vecinsert1 = insertelement <4 x float> undef, float %sub, i32 0
265  ret <4 x float> %vecinsert1
266}
267
268define <4 x float> @test11(<4 x float> %A, <4 x float> %B) {
269; SSE-LABEL: test11:
270; SSE:       # %bb.0:
271; SSE-NEXT:    movhlps {{.*#+}} xmm0 = xmm0[1,1]
272; SSE-NEXT:    movhlps {{.*#+}} xmm1 = xmm1[1,1]
273; SSE-NEXT:    subss %xmm1, %xmm0
274; SSE-NEXT:    movddup {{.*#+}} xmm0 = xmm0[0,0]
275; SSE-NEXT:    retq
276;
277; AVX-LABEL: test11:
278; AVX:       # %bb.0:
279; AVX-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
280; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
281; AVX-NEXT:    vsubss %xmm1, %xmm0, %xmm0
282; AVX-NEXT:    vmovddup {{.*#+}} xmm0 = xmm0[0,0]
283; AVX-NEXT:    retq
284  %1 = extractelement <4 x float> %A, i32 2
285  %2 = extractelement <4 x float> %B, i32 2
286  %sub = fsub float %1, %2
287  %vecinsert1 = insertelement <4 x float> undef, float %sub, i32 2
288  ret <4 x float> %vecinsert1
289}
290
291define <4 x float> @test12(<4 x float> %A, <4 x float> %B) {
292; SSE-LABEL: test12:
293; SSE:       # %bb.0:
294; SSE-NEXT:    movshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
295; SSE-NEXT:    movshdup {{.*#+}} xmm1 = xmm1[1,1,3,3]
296; SSE-NEXT:    addss %xmm0, %xmm1
297; SSE-NEXT:    movsldup {{.*#+}} xmm0 = xmm1[0,0,2,2]
298; SSE-NEXT:    retq
299;
300; AVX1-LABEL: test12:
301; AVX1:       # %bb.0:
302; AVX1-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
303; AVX1-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm1[1,1,3,3]
304; AVX1-NEXT:    vaddss %xmm1, %xmm0, %xmm0
305; AVX1-NEXT:    vmovsldup {{.*#+}} xmm0 = xmm0[0,0,2,2]
306; AVX1-NEXT:    retq
307;
308; AVX512-LABEL: test12:
309; AVX512:       # %bb.0:
310; AVX512-NEXT:    vmovshdup {{.*#+}} xmm0 = xmm0[1,1,3,3]
311; AVX512-NEXT:    vmovshdup {{.*#+}} xmm1 = xmm1[1,1,3,3]
312; AVX512-NEXT:    vaddss %xmm1, %xmm0, %xmm0
313; AVX512-NEXT:    vbroadcastss %xmm0, %xmm0
314; AVX512-NEXT:    retq
315  %1 = extractelement <4 x float> %A, i32 1
316  %2 = extractelement <4 x float> %B, i32 1
317  %add = fadd float %1, %2
318  %vecinsert1 = insertelement <4 x float> undef, float %add, i32 1
319  ret <4 x float> %vecinsert1
320}
321
322define <4 x float> @test13(<4 x float> %A, <4 x float> %B) {
323; SSE-LABEL: test13:
324; SSE:       # %bb.0:
325; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
326; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
327; SSE-NEXT:    addss %xmm0, %xmm1
328; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[0,1,2,0]
329; SSE-NEXT:    movaps %xmm1, %xmm0
330; SSE-NEXT:    retq
331;
332; AVX1-LABEL: test13:
333; AVX1:       # %bb.0:
334; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
335; AVX1-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
336; AVX1-NEXT:    vaddss %xmm1, %xmm0, %xmm0
337; AVX1-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[0,1,2,0]
338; AVX1-NEXT:    retq
339;
340; AVX512-LABEL: test13:
341; AVX512:       # %bb.0:
342; AVX512-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
343; AVX512-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
344; AVX512-NEXT:    vaddss %xmm1, %xmm0, %xmm0
345; AVX512-NEXT:    vbroadcastss %xmm0, %xmm0
346; AVX512-NEXT:    retq
347  %1 = extractelement <4 x float> %A, i32 3
348  %2 = extractelement <4 x float> %B, i32 3
349  %add = fadd float %1, %2
350  %vecinsert1 = insertelement <4 x float> undef, float %add, i32 3
351  ret <4 x float> %vecinsert1
352}
353
354define <4 x float> @test14(<4 x float> %A, <4 x float> %B) {
355; SSE-LABEL: test14:
356; SSE:       # %bb.0:
357; SSE-NEXT:    movaps %xmm0, %xmm2
358; SSE-NEXT:    subss %xmm1, %xmm2
359; SSE-NEXT:    movhlps {{.*#+}} xmm0 = xmm0[1,1]
360; SSE-NEXT:    movhlps {{.*#+}} xmm1 = xmm1[1,1]
361; SSE-NEXT:    subss %xmm1, %xmm0
362; SSE-NEXT:    movlhps {{.*#+}} xmm2 = xmm2[0],xmm0[0]
363; SSE-NEXT:    movaps %xmm2, %xmm0
364; SSE-NEXT:    retq
365;
366; AVX-LABEL: test14:
367; AVX:       # %bb.0:
368; AVX-NEXT:    vsubss %xmm1, %xmm0, %xmm2
369; AVX-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
370; AVX-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
371; AVX-NEXT:    vsubss %xmm1, %xmm0, %xmm0
372; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm2[0,1],xmm0[0],xmm2[3]
373; AVX-NEXT:    retq
374  %1 = extractelement <4 x float> %A, i32 0
375  %2 = extractelement <4 x float> %B, i32 0
376  %sub = fsub float %1, %2
377  %3 = extractelement <4 x float> %A, i32 2
378  %4 = extractelement <4 x float> %B, i32 2
379  %sub2 = fsub float %3, %4
380  %vecinsert1 = insertelement <4 x float> undef, float %sub, i32 0
381  %vecinsert2 = insertelement <4 x float> %vecinsert1, float %sub2, i32 2
382  ret <4 x float> %vecinsert2
383}
384
385define <4 x float> @test15(<4 x float> %A, <4 x float> %B) {
386; SSE-LABEL: test15:
387; SSE:       # %bb.0:
388; SSE-NEXT:    movshdup {{.*#+}} xmm3 = xmm0[1,1,3,3]
389; SSE-NEXT:    movshdup {{.*#+}} xmm2 = xmm1[1,1,3,3]
390; SSE-NEXT:    addss %xmm3, %xmm2
391; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
392; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
393; SSE-NEXT:    addss %xmm0, %xmm1
394; SSE-NEXT:    shufps {{.*#+}} xmm2 = xmm2[0,0],xmm1[0,0]
395; SSE-NEXT:    movaps %xmm2, %xmm0
396; SSE-NEXT:    retq
397;
398; AVX-LABEL: test15:
399; AVX:       # %bb.0:
400; AVX-NEXT:    vmovshdup {{.*#+}} xmm2 = xmm0[1,1,3,3]
401; AVX-NEXT:    vmovshdup {{.*#+}} xmm3 = xmm1[1,1,3,3]
402; AVX-NEXT:    vaddss %xmm3, %xmm2, %xmm2
403; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
404; AVX-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
405; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
406; AVX-NEXT:    vmovsldup {{.*#+}} xmm1 = xmm2[0,0,2,2]
407; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm1[0,1,2],xmm0[0]
408; AVX-NEXT:    retq
409  %1 = extractelement <4 x float> %A, i32 1
410  %2 = extractelement <4 x float> %B, i32 1
411  %add = fadd float %1, %2
412  %3 = extractelement <4 x float> %A, i32 3
413  %4 = extractelement <4 x float> %B, i32 3
414  %add2 = fadd float %3, %4
415  %vecinsert1 = insertelement <4 x float> undef, float %add, i32 1
416  %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add2, i32 3
417  ret <4 x float> %vecinsert2
418}
419
420define <4 x float> @test16(<4 x float> %A, <4 x float> %B) {
421; SSE-LABEL: test16:
422; SSE:       # %bb.0:
423; SSE-NEXT:    movss {{.*#+}} xmm3 = mem[0],zero,zero,zero
424; SSE-NEXT:    movaps %xmm0, %xmm2
425; SSE-NEXT:    subss %xmm3, %xmm2
426; SSE-NEXT:    movaps %xmm0, %xmm4
427; SSE-NEXT:    movhlps {{.*#+}} xmm4 = xmm0[1],xmm4[1]
428; SSE-NEXT:    movaps %xmm1, %xmm5
429; SSE-NEXT:    movhlps {{.*#+}} xmm5 = xmm1[1],xmm5[1]
430; SSE-NEXT:    subss %xmm5, %xmm4
431; SSE-NEXT:    movshdup {{.*#+}} xmm5 = xmm0[1,1,3,3]
432; SSE-NEXT:    addss %xmm3, %xmm5
433; SSE-NEXT:    unpcklps {{.*#+}} xmm2 = xmm2[0],xmm5[0],xmm2[1],xmm5[1]
434; SSE-NEXT:    shufps {{.*#+}} xmm0 = xmm0[3,1,2,3]
435; SSE-NEXT:    shufps {{.*#+}} xmm1 = xmm1[3,1,2,3]
436; SSE-NEXT:    addss %xmm0, %xmm1
437; SSE-NEXT:    unpcklps {{.*#+}} xmm4 = xmm4[0],xmm1[0],xmm4[1],xmm1[1]
438; SSE-NEXT:    movlhps {{.*#+}} xmm2 = xmm2[0],xmm4[0]
439; SSE-NEXT:    movaps %xmm2, %xmm0
440; SSE-NEXT:    retq
441;
442; AVX-LABEL: test16:
443; AVX:       # %bb.0:
444; AVX-NEXT:    vmovss {{.*#+}} xmm2 = mem[0],zero,zero,zero
445; AVX-NEXT:    vsubss %xmm2, %xmm0, %xmm3
446; AVX-NEXT:    vpermilpd {{.*#+}} xmm4 = xmm0[1,0]
447; AVX-NEXT:    vpermilpd {{.*#+}} xmm5 = xmm1[1,0]
448; AVX-NEXT:    vsubss %xmm5, %xmm4, %xmm4
449; AVX-NEXT:    vmovshdup {{.*#+}} xmm5 = xmm0[1,1,3,3]
450; AVX-NEXT:    vaddss %xmm2, %xmm5, %xmm2
451; AVX-NEXT:    vinsertps {{.*#+}} xmm2 = xmm3[0],xmm2[0],xmm3[2,3]
452; AVX-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm4[0],xmm2[3]
453; AVX-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
454; AVX-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
455; AVX-NEXT:    vaddss %xmm1, %xmm0, %xmm0
456; AVX-NEXT:    vinsertps {{.*#+}} xmm0 = xmm2[0,1,2],xmm0[0]
457; AVX-NEXT:    retq
458  %1 = extractelement <4 x float> %A, i32 0
459  %2 = extractelement <4 x float> %B, i32 0
460  %sub = fsub float %1, 42.0
461  %3 = extractelement <4 x float> %A, i32 2
462  %4 = extractelement <4 x float> %B, i32 2
463  %sub2 = fsub float %3, %4
464  %5 = extractelement <4 x float> %A, i32 1
465  %6 = extractelement <4 x float> %B, i32 1
466  %add = fadd float %5, 42.0
467  %7 = extractelement <4 x float> %A, i32 3
468  %8 = extractelement <4 x float> %B, i32 3
469  %add2 = fadd float %7, %8
470  %vecinsert1 = insertelement <4 x float> undef, float %add, i32 1
471  %vecinsert2 = insertelement <4 x float> %vecinsert1, float %add2, i32 3
472  %vecinsert3 = insertelement <4 x float> %vecinsert2, float %sub, i32 0
473  %vecinsert4 = insertelement <4 x float> %vecinsert3, float %sub2, i32 2
474  ret <4 x float> %vecinsert4
475}
476
477define <2 x float> @test_v2f32(<2 x float> %v0, <2 x float> %v1) {
478; SSE-LABEL: test_v2f32:
479; SSE:       # %bb.0:
480; SSE-NEXT:    addsubps %xmm1, %xmm0
481; SSE-NEXT:    retq
482;
483; AVX-LABEL: test_v2f32:
484; AVX:       # %bb.0:
485; AVX-NEXT:    vaddsubps %xmm1, %xmm0, %xmm0
486; AVX-NEXT:    retq
487  %v2 = extractelement <2 x float> %v0, i32 0
488  %v3 = extractelement <2 x float> %v1, i32 0
489  %v4 = extractelement <2 x float> %v0, i32 1
490  %v5 = extractelement <2 x float> %v1, i32 1
491  %sub = fsub float %v2, %v3
492  %add = fadd float %v5, %v4
493  %res0 = insertelement <2 x float> undef, float %sub, i32 0
494  %res1 = insertelement <2 x float> %res0, float %add, i32 1
495  ret <2 x float> %res1
496}
497
498define <16 x float> @test17(<16 x float> %A, <16 x float> %B) {
499; SSE-LABEL: test17:
500; SSE:       # %bb.0:
501; SSE-NEXT:    addsubps %xmm4, %xmm0
502; SSE-NEXT:    addsubps %xmm5, %xmm1
503; SSE-NEXT:    movaps %xmm0, %xmm2
504; SSE-NEXT:    movaps %xmm1, %xmm3
505; SSE-NEXT:    retq
506;
507; AVX1-LABEL: test17:
508; AVX1:       # %bb.0:
509; AVX1-NEXT:    vaddsubps %ymm2, %ymm0, %ymm0
510; AVX1-NEXT:    vmovaps %ymm0, %ymm1
511; AVX1-NEXT:    retq
512;
513; AVX512-LABEL: test17:
514; AVX512:       # %bb.0:
515; AVX512-NEXT:    vsubss %xmm1, %xmm0, %xmm2
516; AVX512-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm0[1,0]
517; AVX512-NEXT:    vpermilpd {{.*#+}} xmm4 = xmm1[1,0]
518; AVX512-NEXT:    vsubss %xmm4, %xmm3, %xmm3
519; AVX512-NEXT:    vmovshdup {{.*#+}} xmm4 = xmm0[1,1,3,3]
520; AVX512-NEXT:    vmovshdup {{.*#+}} xmm5 = xmm1[1,1,3,3]
521; AVX512-NEXT:    vaddss %xmm5, %xmm4, %xmm4
522; AVX512-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0],xmm4[0],xmm2[2,3]
523; AVX512-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1],xmm3[0],xmm2[3]
524; AVX512-NEXT:    vpermilps {{.*#+}} xmm3 = xmm0[3,1,2,3]
525; AVX512-NEXT:    vpermilps {{.*#+}} xmm4 = xmm1[3,1,2,3]
526; AVX512-NEXT:    vaddss %xmm4, %xmm3, %xmm3
527; AVX512-NEXT:    vinsertps {{.*#+}} xmm2 = xmm2[0,1,2],xmm3[0]
528; AVX512-NEXT:    vextractf128 $1, %ymm0, %xmm0
529; AVX512-NEXT:    vextractf128 $1, %ymm1, %xmm1
530; AVX512-NEXT:    vsubss %xmm1, %xmm0, %xmm3
531; AVX512-NEXT:    vpermilpd {{.*#+}} xmm4 = xmm0[1,0]
532; AVX512-NEXT:    vpermilpd {{.*#+}} xmm5 = xmm1[1,0]
533; AVX512-NEXT:    vsubss %xmm5, %xmm4, %xmm4
534; AVX512-NEXT:    vmovshdup {{.*#+}} xmm5 = xmm0[1,1,3,3]
535; AVX512-NEXT:    vmovshdup {{.*#+}} xmm6 = xmm1[1,1,3,3]
536; AVX512-NEXT:    vaddss %xmm6, %xmm5, %xmm5
537; AVX512-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0],xmm5[0],xmm3[2,3]
538; AVX512-NEXT:    vinsertps {{.*#+}} xmm3 = xmm3[0,1],xmm4[0],xmm3[3]
539; AVX512-NEXT:    vpermilps {{.*#+}} xmm0 = xmm0[3,1,2,3]
540; AVX512-NEXT:    vpermilps {{.*#+}} xmm1 = xmm1[3,1,2,3]
541; AVX512-NEXT:    vaddss %xmm1, %xmm0, %xmm0
542; AVX512-NEXT:    vinsertps {{.*#+}} xmm0 = xmm3[0,1,2],xmm0[0]
543; AVX512-NEXT:    vinsertf128 $1, %xmm0, %ymm2, %ymm0
544; AVX512-NEXT:    vinsertf64x4 $1, %ymm0, %zmm0, %zmm0
545; AVX512-NEXT:    retq
546  %1 = extractelement <16 x float> %A, i32 0
547  %2 = extractelement <16 x float> %B, i32 0
548  %sub = fsub float %1, %2
549  %3 = extractelement <16 x float> %A, i32 2
550  %4 = extractelement <16 x float> %B, i32 2
551  %sub2 = fsub float %3, %4
552  %5 = extractelement <16 x float> %A, i32 1
553  %6 = extractelement <16 x float> %B, i32 1
554  %add = fadd float %5, %6
555  %7 = extractelement <16 x float> %A, i32 3
556  %8 = extractelement <16 x float> %B, i32 3
557  %add2 = fadd float %7, %8
558  %9 = extractelement <16 x float> %A, i32 4
559  %10 = extractelement <16 x float> %B, i32 4
560  %sub3 = fsub float %9, %10
561  %11 = extractelement <16 x float> %A, i32 6
562  %12 = extractelement <16 x float> %B, i32 6
563  %sub4 = fsub float %11, %12
564  %13 = extractelement <16 x float> %A, i32 5
565  %14 = extractelement <16 x float> %B, i32 5
566  %add3 = fadd float %13, %14
567  %15 = extractelement <16 x float> %A, i32 7
568  %16 = extractelement <16 x float> %B, i32 7
569  %add4 = fadd float %15, %16
570  %17 = extractelement <16 x float> %A, i32 8
571  %18 = extractelement <16 x float> %B, i32 8
572  %sub5 = fsub float %1, %2
573  %19 = extractelement <16 x float> %A, i32 10
574  %20 = extractelement <16 x float> %B, i32 10
575  %sub6 = fsub float %3, %4
576  %21 = extractelement <16 x float> %A, i32 9
577  %22 = extractelement <16 x float> %B, i32 9
578  %add5 = fadd float %5, %6
579  %23 = extractelement <16 x float> %A, i32 11
580  %24 = extractelement <16 x float> %B, i32 11
581  %add6 = fadd float %7, %8
582  %25 = extractelement <16 x float> %A, i32 12
583  %26 = extractelement <16 x float> %B, i32 12
584  %sub7 = fsub float %9, %10
585  %27 = extractelement <16 x float> %A, i32 14
586  %28 = extractelement <16 x float> %B, i32 14
587  %sub8 = fsub float %11, %12
588  %29 = extractelement <16 x float> %A, i32 13
589  %30 = extractelement <16 x float> %B, i32 13
590  %add7 = fadd float %13, %14
591  %31 = extractelement <16 x float> %A, i32 15
592  %32 = extractelement <16 x float> %B, i32 15
593  %add8 = fadd float %15, %16
594  %vecinsert1 = insertelement <16 x float> undef, float %add, i32 1
595  %vecinsert2 = insertelement <16 x float> %vecinsert1, float %add2, i32 3
596  %vecinsert3 = insertelement <16 x float> %vecinsert2, float %sub, i32 0
597  %vecinsert4 = insertelement <16 x float> %vecinsert3, float %sub2, i32 2
598  %vecinsert5 = insertelement <16 x float> %vecinsert4, float %add3, i32 5
599  %vecinsert6 = insertelement <16 x float> %vecinsert5, float %add4, i32 7
600  %vecinsert7 = insertelement <16 x float> %vecinsert6, float %sub3, i32 4
601  %vecinsert8 = insertelement <16 x float> %vecinsert7, float %sub4, i32 6
602  %vecinsert9 = insertelement <16 x float> %vecinsert8, float %add5, i32 9
603  %vecinsert10 = insertelement <16 x float> %vecinsert9, float %add6, i32 11
604  %vecinsert11 = insertelement <16 x float> %vecinsert10, float %sub5, i32 8
605  %vecinsert12 = insertelement <16 x float> %vecinsert11, float %sub6, i32 10
606  %vecinsert13 = insertelement <16 x float> %vecinsert12, float %add7, i32 13
607  %vecinsert14 = insertelement <16 x float> %vecinsert13, float %add8, i32 15
608  %vecinsert15 = insertelement <16 x float> %vecinsert14, float %sub7, i32 12
609  %vecinsert16 = insertelement <16 x float> %vecinsert15, float %sub8, i32 14
610  ret <16 x float> %vecinsert16
611}
612
613define <8 x double> @test18(<8 x double> %A, <8 x double> %B) {
614; SSE-LABEL: test18:
615; SSE:       # %bb.0:
616; SSE-NEXT:    addsubpd %xmm4, %xmm0
617; SSE-NEXT:    addsubpd %xmm5, %xmm1
618; SSE-NEXT:    addsubpd %xmm6, %xmm2
619; SSE-NEXT:    addsubpd %xmm7, %xmm3
620; SSE-NEXT:    retq
621;
622; AVX1-LABEL: test18:
623; AVX1:       # %bb.0:
624; AVX1-NEXT:    vaddsubpd %ymm2, %ymm0, %ymm0
625; AVX1-NEXT:    vaddsubpd %ymm3, %ymm1, %ymm1
626; AVX1-NEXT:    retq
627;
628; AVX512-LABEL: test18:
629; AVX512:       # %bb.0:
630; AVX512-NEXT:    vsubsd %xmm1, %xmm0, %xmm2
631; AVX512-NEXT:    vextractf128 $1, %ymm0, %xmm3
632; AVX512-NEXT:    vextractf128 $1, %ymm1, %xmm4
633; AVX512-NEXT:    vsubsd %xmm4, %xmm3, %xmm5
634; AVX512-NEXT:    vpermilpd {{.*#+}} xmm6 = xmm0[1,0]
635; AVX512-NEXT:    vpermilpd {{.*#+}} xmm7 = xmm1[1,0]
636; AVX512-NEXT:    vaddsd %xmm7, %xmm6, %xmm6
637; AVX512-NEXT:    vunpcklpd {{.*#+}} xmm2 = xmm2[0],xmm6[0]
638; AVX512-NEXT:    vpermilpd {{.*#+}} xmm3 = xmm3[1,0]
639; AVX512-NEXT:    vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
640; AVX512-NEXT:    vaddsd %xmm4, %xmm3, %xmm3
641; AVX512-NEXT:    vunpcklpd {{.*#+}} xmm3 = xmm5[0],xmm3[0]
642; AVX512-NEXT:    vextractf32x4 $2, %zmm0, %xmm4
643; AVX512-NEXT:    vextractf32x4 $2, %zmm1, %xmm5
644; AVX512-NEXT:    vsubsd %xmm5, %xmm4, %xmm6
645; AVX512-NEXT:    vextractf32x4 $3, %zmm0, %xmm0
646; AVX512-NEXT:    vextractf32x4 $3, %zmm1, %xmm1
647; AVX512-NEXT:    vsubsd %xmm1, %xmm0, %xmm7
648; AVX512-NEXT:    vpermilpd {{.*#+}} xmm4 = xmm4[1,0]
649; AVX512-NEXT:    vpermilpd {{.*#+}} xmm5 = xmm5[1,0]
650; AVX512-NEXT:    vaddsd %xmm5, %xmm4, %xmm4
651; AVX512-NEXT:    vunpcklpd {{.*#+}} xmm4 = xmm6[0],xmm4[0]
652; AVX512-NEXT:    vpermilpd {{.*#+}} xmm0 = xmm0[1,0]
653; AVX512-NEXT:    vpermilpd {{.*#+}} xmm1 = xmm1[1,0]
654; AVX512-NEXT:    vaddsd %xmm1, %xmm0, %xmm0
655; AVX512-NEXT:    vunpcklpd {{.*#+}} xmm0 = xmm7[0],xmm0[0]
656; AVX512-NEXT:    vinsertf128 $1, %xmm0, %ymm4, %ymm0
657; AVX512-NEXT:    vinsertf128 $1, %xmm3, %ymm2, %ymm1
658; AVX512-NEXT:    vinsertf64x4 $1, %ymm0, %zmm1, %zmm0
659; AVX512-NEXT:    retq
660  %1 = extractelement <8 x double> %A, i32 0
661  %2 = extractelement <8 x double> %B, i32 0
662  %sub = fsub double %1, %2
663  %3 = extractelement <8 x double> %A, i32 2
664  %4 = extractelement <8 x double> %B, i32 2
665  %sub2 = fsub double %3, %4
666  %5 = extractelement <8 x double> %A, i32 1
667  %6 = extractelement <8 x double> %B, i32 1
668  %add = fadd double %5, %6
669  %7 = extractelement <8 x double> %A, i32 3
670  %8 = extractelement <8 x double> %B, i32 3
671  %add2 = fadd double %7, %8
672  %9 = extractelement <8 x double> %A, i32 4
673  %10 = extractelement <8 x double> %B, i32 4
674  %sub3 = fsub double %9, %10
675  %11 = extractelement <8 x double> %A, i32 6
676  %12 = extractelement <8 x double> %B, i32 6
677  %sub4 = fsub double %11, %12
678  %13 = extractelement <8 x double> %A, i32 5
679  %14 = extractelement <8 x double> %B, i32 5
680  %add3 = fadd double %13, %14
681  %15 = extractelement <8 x double> %A, i32 7
682  %16 = extractelement <8 x double> %B, i32 7
683  %add4 = fadd double %15, %16
684  %vecinsert1 = insertelement <8 x double> undef, double %add, i32 1
685  %vecinsert2 = insertelement <8 x double> %vecinsert1, double %add2, i32 3
686  %vecinsert3 = insertelement <8 x double> %vecinsert2, double %sub, i32 0
687  %vecinsert4 = insertelement <8 x double> %vecinsert3, double %sub2, i32 2
688  %vecinsert5 = insertelement <8 x double> %vecinsert4, double %add3, i32 5
689  %vecinsert6 = insertelement <8 x double> %vecinsert5, double %add4, i32 7
690  %vecinsert7 = insertelement <8 x double> %vecinsert6, double %sub3, i32 4
691  %vecinsert8 = insertelement <8 x double> %vecinsert7, double %sub4, i32 6
692  ret <8 x double> %vecinsert8
693}
694