• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=x86_64-pc-linux -mcpu=corei7 | FileCheck %s --check-prefix=SSE
3; RUN: llc < %s -mtriple=x86_64-pc-linux -mcpu=core-avx-i | FileCheck %s --check-prefix=AVX
4
5define <2 x double> @v2f2d_ext_vec(<2 x float> %v1) nounwind {
6; SSE-LABEL: v2f2d_ext_vec:
7; SSE:       # %bb.0: # %entry
8; SSE-NEXT:    cvtps2pd %xmm0, %xmm0
9; SSE-NEXT:    retq
10;
11; AVX-LABEL: v2f2d_ext_vec:
12; AVX:       # %bb.0: # %entry
13; AVX-NEXT:    vcvtps2pd %xmm0, %xmm0
14; AVX-NEXT:    retq
15entry:
16  %f1 = fpext <2 x float> %v1 to <2 x double>
17  ret <2 x double> %f1
18}
19
20define <3 x double> @v3f2d_ext_vec(<3 x float> %v1) nounwind {
21; SSE-LABEL: v3f2d_ext_vec:
22; SSE:       # %bb.0: # %entry
23; SSE-NEXT:    cvtps2pd %xmm0, %xmm2
24; SSE-NEXT:    movhlps {{.*#+}} xmm0 = xmm0[1,1]
25; SSE-NEXT:    cvtps2pd %xmm0, %xmm0
26; SSE-NEXT:    movlps %xmm0, -{{[0-9]+}}(%rsp)
27; SSE-NEXT:    movaps %xmm2, %xmm1
28; SSE-NEXT:    movhlps {{.*#+}} xmm1 = xmm2[1],xmm1[1]
29; SSE-NEXT:    fldl -{{[0-9]+}}(%rsp)
30; SSE-NEXT:    movaps %xmm2, %xmm0
31; SSE-NEXT:    retq
32;
33; AVX-LABEL: v3f2d_ext_vec:
34; AVX:       # %bb.0: # %entry
35; AVX-NEXT:    vcvtps2pd %xmm0, %ymm0
36; AVX-NEXT:    retq
37entry:
38  %f1 = fpext <3 x float> %v1 to <3 x double>
39  ret <3 x double> %f1
40}
41
42define <4 x double> @v4f2d_ext_vec(<4 x float> %v1) nounwind {
43; SSE-LABEL: v4f2d_ext_vec:
44; SSE:       # %bb.0: # %entry
45; SSE-NEXT:    cvtps2pd %xmm0, %xmm2
46; SSE-NEXT:    movhlps {{.*#+}} xmm0 = xmm0[1,1]
47; SSE-NEXT:    cvtps2pd %xmm0, %xmm1
48; SSE-NEXT:    movaps %xmm2, %xmm0
49; SSE-NEXT:    retq
50;
51; AVX-LABEL: v4f2d_ext_vec:
52; AVX:       # %bb.0: # %entry
53; AVX-NEXT:    vcvtps2pd %xmm0, %ymm0
54; AVX-NEXT:    retq
55entry:
56  %f1 = fpext <4 x float> %v1 to <4 x double>
57  ret <4 x double> %f1
58}
59
60define <8 x double> @v8f2d_ext_vec(<8 x float> %v1) nounwind {
61; SSE-LABEL: v8f2d_ext_vec:
62; SSE:       # %bb.0: # %entry
63; SSE-NEXT:    cvtps2pd %xmm0, %xmm5
64; SSE-NEXT:    cvtps2pd %xmm1, %xmm2
65; SSE-NEXT:    movhlps {{.*#+}} xmm0 = xmm0[1,1]
66; SSE-NEXT:    cvtps2pd %xmm0, %xmm4
67; SSE-NEXT:    movhlps {{.*#+}} xmm1 = xmm1[1,1]
68; SSE-NEXT:    cvtps2pd %xmm1, %xmm3
69; SSE-NEXT:    movaps %xmm5, %xmm0
70; SSE-NEXT:    movaps %xmm4, %xmm1
71; SSE-NEXT:    retq
72;
73; AVX-LABEL: v8f2d_ext_vec:
74; AVX:       # %bb.0: # %entry
75; AVX-NEXT:    vcvtps2pd %xmm0, %ymm2
76; AVX-NEXT:    vextractf128 $1, %ymm0, %xmm0
77; AVX-NEXT:    vcvtps2pd %xmm0, %ymm1
78; AVX-NEXT:    vmovaps %ymm2, %ymm0
79; AVX-NEXT:    retq
80entry:
81  %f1 = fpext <8 x float> %v1 to <8 x double>
82  ret <8 x double> %f1
83}
84
85define void @test_vector_creation() nounwind {
86; SSE-LABEL: test_vector_creation:
87; SSE:       # %bb.0:
88; SSE-NEXT:    xorpd %xmm0, %xmm0
89; SSE-NEXT:    movhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
90; SSE-NEXT:    movapd %xmm0, (%rax)
91; SSE-NEXT:    retq
92;
93; AVX-LABEL: test_vector_creation:
94; AVX:       # %bb.0:
95; AVX-NEXT:    vxorpd %xmm0, %xmm0, %xmm0
96; AVX-NEXT:    vmovhpd {{.*#+}} xmm0 = xmm0[0],mem[0]
97; AVX-NEXT:    vinsertf128 $1, %xmm0, %ymm0, %ymm0
98; AVX-NEXT:    vmovaps %ymm0, (%rax)
99; AVX-NEXT:    vzeroupper
100; AVX-NEXT:    retq
101  %1 = insertelement <4 x double> undef, double 0.000000e+00, i32 2
102  %2 = load double, double addrspace(1)* null
103  %3 = insertelement <4 x double> %1, double %2, i32 3
104  store <4 x double> %3, <4 x double>* undef
105  ret void
106}
107