• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown -mattr=+sse4.1 -show-mc-encoding | FileCheck %s --check-prefixes=SSE,X32-SSE
3; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=AVX,X32-AVX
4; RUN: llc < %s -mtriple=i686-unknown -mattr=+avx512vl -show-mc-encoding | FileCheck %s --check-prefixes=AVX512VL,X32-AVX512VL
5; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+sse4.1 -show-mc-encoding | FileCheck %s --check-prefixes=SSE,X64-SSE
6; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx -show-mc-encoding | FileCheck %s --check-prefixes=AVX,X64-AVX
7; RUN: llc < %s -mtriple=x86_64-unknown -mattr=+avx512vl -show-mc-encoding | FileCheck %s --check-prefixes=AVX512VL,X64-AVX512VL
8
9define <2 x double> @fpext_4f32_to_2f64(<4 x float> %a) {
10; SSE-LABEL: fpext_4f32_to_2f64:
11; SSE:       # %bb.0:
12; SSE-NEXT:    cvtps2pd %xmm0, %xmm0 # encoding: [0x0f,0x5a,0xc0]
13; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
14;
15; AVX-LABEL: fpext_4f32_to_2f64:
16; AVX:       # %bb.0:
17; AVX-NEXT:    vcvtps2pd %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x5a,0xc0]
18; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
19;
20; AVX512VL-LABEL: fpext_4f32_to_2f64:
21; AVX512VL:       # %bb.0:
22; AVX512VL-NEXT:    vcvtps2pd %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5a,0xc0]
23; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
24  %cvt = fpext <4 x float> %a to <4 x double>
25  %shuf = shufflevector <4 x double> %cvt, <4 x double> undef, <2 x i32> <i32 0, i32 1>
26  ret <2 x double> %shuf
27}
28
29define <2 x double> @fpext_8f32_to_2f64(<8 x float> %a) {
30; SSE-LABEL: fpext_8f32_to_2f64:
31; SSE:       # %bb.0:
32; SSE-NEXT:    cvtps2pd %xmm0, %xmm0 # encoding: [0x0f,0x5a,0xc0]
33; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
34;
35; AVX-LABEL: fpext_8f32_to_2f64:
36; AVX:       # %bb.0:
37; AVX-NEXT:    vcvtps2pd %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x5a,0xc0]
38; AVX-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
39; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
40;
41; AVX512VL-LABEL: fpext_8f32_to_2f64:
42; AVX512VL:       # %bb.0:
43; AVX512VL-NEXT:    vcvtps2pd %ymm0, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x5a,0xc0]
44; AVX512VL-NEXT:    # kill: def $xmm0 killed $xmm0 killed $zmm0
45; AVX512VL-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
46; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
47  %cvt = fpext <8 x float> %a to <8 x double>
48  %shuf = shufflevector <8 x double> %cvt, <8 x double> undef, <2 x i32> <i32 0, i32 1>
49  ret <2 x double> %shuf
50}
51
52define <4 x double> @fpext_8f32_to_4f64(<8 x float> %a) {
53; SSE-LABEL: fpext_8f32_to_4f64:
54; SSE:       # %bb.0:
55; SSE-NEXT:    cvtps2pd %xmm0, %xmm2 # encoding: [0x0f,0x5a,0xd0]
56; SSE-NEXT:    movhlps %xmm0, %xmm0 # encoding: [0x0f,0x12,0xc0]
57; SSE-NEXT:    # xmm0 = xmm0[1,1]
58; SSE-NEXT:    cvtps2pd %xmm0, %xmm1 # encoding: [0x0f,0x5a,0xc8]
59; SSE-NEXT:    movaps %xmm2, %xmm0 # encoding: [0x0f,0x28,0xc2]
60; SSE-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
61;
62; AVX-LABEL: fpext_8f32_to_4f64:
63; AVX:       # %bb.0:
64; AVX-NEXT:    vcvtps2pd %xmm0, %ymm0 # encoding: [0xc5,0xfc,0x5a,0xc0]
65; AVX-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
66;
67; AVX512VL-LABEL: fpext_8f32_to_4f64:
68; AVX512VL:       # %bb.0:
69; AVX512VL-NEXT:    vcvtps2pd %ymm0, %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x5a,0xc0]
70; AVX512VL-NEXT:    # kill: def $ymm0 killed $ymm0 killed $zmm0
71; AVX512VL-NEXT:    ret{{[l|q]}} # encoding: [0xc3]
72  %cvt = fpext <8 x float> %a to <8 x double>
73  %shuf = shufflevector <8 x double> %cvt, <8 x double> undef, <4 x i32> <i32 0, i32 1, i32 2, i32 3>
74  ret <4 x double> %shuf
75}
76
77; PR11674
78define void @fpext_frommem(<2 x float>* %in, <2 x double>* %out) {
79; X32-SSE-LABEL: fpext_frommem:
80; X32-SSE:       # %bb.0: # %entry
81; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
82; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
83; X32-SSE-NEXT:    cvtps2pd (%ecx), %xmm0 # encoding: [0x0f,0x5a,0x01]
84; X32-SSE-NEXT:    movups %xmm0, (%eax) # encoding: [0x0f,0x11,0x00]
85; X32-SSE-NEXT:    retl # encoding: [0xc3]
86;
87; X32-AVX-LABEL: fpext_frommem:
88; X32-AVX:       # %bb.0: # %entry
89; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
90; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
91; X32-AVX-NEXT:    vcvtps2pd (%ecx), %xmm0 # encoding: [0xc5,0xf8,0x5a,0x01]
92; X32-AVX-NEXT:    vmovups %xmm0, (%eax) # encoding: [0xc5,0xf8,0x11,0x00]
93; X32-AVX-NEXT:    retl # encoding: [0xc3]
94;
95; X32-AVX512VL-LABEL: fpext_frommem:
96; X32-AVX512VL:       # %bb.0: # %entry
97; X32-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
98; X32-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
99; X32-AVX512VL-NEXT:    vcvtps2pd (%ecx), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5a,0x01]
100; X32-AVX512VL-NEXT:    vmovups %xmm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x11,0x00]
101; X32-AVX512VL-NEXT:    retl # encoding: [0xc3]
102;
103; X64-SSE-LABEL: fpext_frommem:
104; X64-SSE:       # %bb.0: # %entry
105; X64-SSE-NEXT:    cvtps2pd (%rdi), %xmm0 # encoding: [0x0f,0x5a,0x07]
106; X64-SSE-NEXT:    movups %xmm0, (%rsi) # encoding: [0x0f,0x11,0x06]
107; X64-SSE-NEXT:    retq # encoding: [0xc3]
108;
109; X64-AVX-LABEL: fpext_frommem:
110; X64-AVX:       # %bb.0: # %entry
111; X64-AVX-NEXT:    vcvtps2pd (%rdi), %xmm0 # encoding: [0xc5,0xf8,0x5a,0x07]
112; X64-AVX-NEXT:    vmovups %xmm0, (%rsi) # encoding: [0xc5,0xf8,0x11,0x06]
113; X64-AVX-NEXT:    retq # encoding: [0xc3]
114;
115; X64-AVX512VL-LABEL: fpext_frommem:
116; X64-AVX512VL:       # %bb.0: # %entry
117; X64-AVX512VL-NEXT:    vcvtps2pd (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5a,0x07]
118; X64-AVX512VL-NEXT:    vmovups %xmm0, (%rsi) # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x11,0x06]
119; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
120entry:
121  %0 = load <2 x float>, <2 x float>* %in, align 8
122  %1 = fpext <2 x float> %0 to <2 x double>
123  store <2 x double> %1, <2 x double>* %out, align 1
124  ret void
125}
126
127define void @fpext_frommem4(<4 x float>* %in, <4 x double>* %out) {
128; X32-SSE-LABEL: fpext_frommem4:
129; X32-SSE:       # %bb.0: # %entry
130; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
131; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
132; X32-SSE-NEXT:    cvtps2pd (%ecx), %xmm0 # encoding: [0x0f,0x5a,0x01]
133; X32-SSE-NEXT:    cvtps2pd 8(%ecx), %xmm1 # encoding: [0x0f,0x5a,0x49,0x08]
134; X32-SSE-NEXT:    movups %xmm1, 16(%eax) # encoding: [0x0f,0x11,0x48,0x10]
135; X32-SSE-NEXT:    movups %xmm0, (%eax) # encoding: [0x0f,0x11,0x00]
136; X32-SSE-NEXT:    retl # encoding: [0xc3]
137;
138; X32-AVX-LABEL: fpext_frommem4:
139; X32-AVX:       # %bb.0: # %entry
140; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
141; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
142; X32-AVX-NEXT:    vcvtps2pd (%ecx), %ymm0 # encoding: [0xc5,0xfc,0x5a,0x01]
143; X32-AVX-NEXT:    vmovups %ymm0, (%eax) # encoding: [0xc5,0xfc,0x11,0x00]
144; X32-AVX-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
145; X32-AVX-NEXT:    retl # encoding: [0xc3]
146;
147; X32-AVX512VL-LABEL: fpext_frommem4:
148; X32-AVX512VL:       # %bb.0: # %entry
149; X32-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
150; X32-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
151; X32-AVX512VL-NEXT:    vcvtps2pd (%ecx), %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5a,0x01]
152; X32-AVX512VL-NEXT:    vmovups %ymm0, (%eax) # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x11,0x00]
153; X32-AVX512VL-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
154; X32-AVX512VL-NEXT:    retl # encoding: [0xc3]
155;
156; X64-SSE-LABEL: fpext_frommem4:
157; X64-SSE:       # %bb.0: # %entry
158; X64-SSE-NEXT:    cvtps2pd (%rdi), %xmm0 # encoding: [0x0f,0x5a,0x07]
159; X64-SSE-NEXT:    cvtps2pd 8(%rdi), %xmm1 # encoding: [0x0f,0x5a,0x4f,0x08]
160; X64-SSE-NEXT:    movups %xmm1, 16(%rsi) # encoding: [0x0f,0x11,0x4e,0x10]
161; X64-SSE-NEXT:    movups %xmm0, (%rsi) # encoding: [0x0f,0x11,0x06]
162; X64-SSE-NEXT:    retq # encoding: [0xc3]
163;
164; X64-AVX-LABEL: fpext_frommem4:
165; X64-AVX:       # %bb.0: # %entry
166; X64-AVX-NEXT:    vcvtps2pd (%rdi), %ymm0 # encoding: [0xc5,0xfc,0x5a,0x07]
167; X64-AVX-NEXT:    vmovups %ymm0, (%rsi) # encoding: [0xc5,0xfc,0x11,0x06]
168; X64-AVX-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
169; X64-AVX-NEXT:    retq # encoding: [0xc3]
170;
171; X64-AVX512VL-LABEL: fpext_frommem4:
172; X64-AVX512VL:       # %bb.0: # %entry
173; X64-AVX512VL-NEXT:    vcvtps2pd (%rdi), %ymm0 # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x5a,0x07]
174; X64-AVX512VL-NEXT:    vmovups %ymm0, (%rsi) # EVEX TO VEX Compression encoding: [0xc5,0xfc,0x11,0x06]
175; X64-AVX512VL-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
176; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
177entry:
178  %0 = load <4 x float>, <4 x float>* %in
179  %1 = fpext <4 x float> %0 to <4 x double>
180  store <4 x double> %1, <4 x double>* %out, align 1
181  ret void
182}
183
184define void @fpext_frommem8(<8 x float>* %in, <8 x double>* %out) {
185; X32-SSE-LABEL: fpext_frommem8:
186; X32-SSE:       # %bb.0: # %entry
187; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
188; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
189; X32-SSE-NEXT:    cvtps2pd 8(%ecx), %xmm0 # encoding: [0x0f,0x5a,0x41,0x08]
190; X32-SSE-NEXT:    cvtps2pd (%ecx), %xmm1 # encoding: [0x0f,0x5a,0x09]
191; X32-SSE-NEXT:    cvtps2pd 24(%ecx), %xmm2 # encoding: [0x0f,0x5a,0x51,0x18]
192; X32-SSE-NEXT:    cvtps2pd 16(%ecx), %xmm3 # encoding: [0x0f,0x5a,0x59,0x10]
193; X32-SSE-NEXT:    movups %xmm3, 32(%eax) # encoding: [0x0f,0x11,0x58,0x20]
194; X32-SSE-NEXT:    movups %xmm2, 48(%eax) # encoding: [0x0f,0x11,0x50,0x30]
195; X32-SSE-NEXT:    movups %xmm1, (%eax) # encoding: [0x0f,0x11,0x08]
196; X32-SSE-NEXT:    movups %xmm0, 16(%eax) # encoding: [0x0f,0x11,0x40,0x10]
197; X32-SSE-NEXT:    retl # encoding: [0xc3]
198;
199; X32-AVX-LABEL: fpext_frommem8:
200; X32-AVX:       # %bb.0: # %entry
201; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
202; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
203; X32-AVX-NEXT:    vcvtps2pd (%ecx), %ymm0 # encoding: [0xc5,0xfc,0x5a,0x01]
204; X32-AVX-NEXT:    vcvtps2pd 16(%ecx), %ymm1 # encoding: [0xc5,0xfc,0x5a,0x49,0x10]
205; X32-AVX-NEXT:    vmovups %ymm1, 32(%eax) # encoding: [0xc5,0xfc,0x11,0x48,0x20]
206; X32-AVX-NEXT:    vmovups %ymm0, (%eax) # encoding: [0xc5,0xfc,0x11,0x00]
207; X32-AVX-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
208; X32-AVX-NEXT:    retl # encoding: [0xc3]
209;
210; X32-AVX512VL-LABEL: fpext_frommem8:
211; X32-AVX512VL:       # %bb.0: # %entry
212; X32-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x08]
213; X32-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %ecx # encoding: [0x8b,0x4c,0x24,0x04]
214; X32-AVX512VL-NEXT:    vcvtps2pd (%ecx), %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x5a,0x01]
215; X32-AVX512VL-NEXT:    vmovups %zmm0, (%eax) # encoding: [0x62,0xf1,0x7c,0x48,0x11,0x00]
216; X32-AVX512VL-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
217; X32-AVX512VL-NEXT:    retl # encoding: [0xc3]
218;
219; X64-SSE-LABEL: fpext_frommem8:
220; X64-SSE:       # %bb.0: # %entry
221; X64-SSE-NEXT:    cvtps2pd 8(%rdi), %xmm0 # encoding: [0x0f,0x5a,0x47,0x08]
222; X64-SSE-NEXT:    cvtps2pd (%rdi), %xmm1 # encoding: [0x0f,0x5a,0x0f]
223; X64-SSE-NEXT:    cvtps2pd 24(%rdi), %xmm2 # encoding: [0x0f,0x5a,0x57,0x18]
224; X64-SSE-NEXT:    cvtps2pd 16(%rdi), %xmm3 # encoding: [0x0f,0x5a,0x5f,0x10]
225; X64-SSE-NEXT:    movups %xmm3, 32(%rsi) # encoding: [0x0f,0x11,0x5e,0x20]
226; X64-SSE-NEXT:    movups %xmm2, 48(%rsi) # encoding: [0x0f,0x11,0x56,0x30]
227; X64-SSE-NEXT:    movups %xmm1, (%rsi) # encoding: [0x0f,0x11,0x0e]
228; X64-SSE-NEXT:    movups %xmm0, 16(%rsi) # encoding: [0x0f,0x11,0x46,0x10]
229; X64-SSE-NEXT:    retq # encoding: [0xc3]
230;
231; X64-AVX-LABEL: fpext_frommem8:
232; X64-AVX:       # %bb.0: # %entry
233; X64-AVX-NEXT:    vcvtps2pd (%rdi), %ymm0 # encoding: [0xc5,0xfc,0x5a,0x07]
234; X64-AVX-NEXT:    vcvtps2pd 16(%rdi), %ymm1 # encoding: [0xc5,0xfc,0x5a,0x4f,0x10]
235; X64-AVX-NEXT:    vmovups %ymm1, 32(%rsi) # encoding: [0xc5,0xfc,0x11,0x4e,0x20]
236; X64-AVX-NEXT:    vmovups %ymm0, (%rsi) # encoding: [0xc5,0xfc,0x11,0x06]
237; X64-AVX-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
238; X64-AVX-NEXT:    retq # encoding: [0xc3]
239;
240; X64-AVX512VL-LABEL: fpext_frommem8:
241; X64-AVX512VL:       # %bb.0: # %entry
242; X64-AVX512VL-NEXT:    vcvtps2pd (%rdi), %zmm0 # encoding: [0x62,0xf1,0x7c,0x48,0x5a,0x07]
243; X64-AVX512VL-NEXT:    vmovups %zmm0, (%rsi) # encoding: [0x62,0xf1,0x7c,0x48,0x11,0x06]
244; X64-AVX512VL-NEXT:    vzeroupper # encoding: [0xc5,0xf8,0x77]
245; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
246entry:
247  %0 = load <8 x float>, <8 x float>* %in
248  %1 = fpext <8 x float> %0 to <8 x double>
249  store <8 x double> %1, <8 x double>* %out, align 1
250  ret void
251}
252
253define <2 x double> @fpext_fromconst() {
254; X32-SSE-LABEL: fpext_fromconst:
255; X32-SSE:       # %bb.0: # %entry
256; X32-SSE-NEXT:    movaps {{.*#+}} xmm0 = [1.0E+0,-2.0E+0]
257; X32-SSE-NEXT:    # encoding: [0x0f,0x28,0x05,A,A,A,A]
258; X32-SSE-NEXT:    # fixup A - offset: 3, value: {{\.LCPI.*}}, kind: FK_Data_4
259; X32-SSE-NEXT:    retl # encoding: [0xc3]
260;
261; X32-AVX-LABEL: fpext_fromconst:
262; X32-AVX:       # %bb.0: # %entry
263; X32-AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [1.0E+0,-2.0E+0]
264; X32-AVX-NEXT:    # encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
265; X32-AVX-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
266; X32-AVX-NEXT:    retl # encoding: [0xc3]
267;
268; X32-AVX512VL-LABEL: fpext_fromconst:
269; X32-AVX512VL:       # %bb.0: # %entry
270; X32-AVX512VL-NEXT:    vmovaps {{\.LCPI.*}}, %xmm0 # EVEX TO VEX Compression xmm0 = [1.0E+0,-2.0E+0]
271; X32-AVX512VL-NEXT:    # encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
272; X32-AVX512VL-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}, kind: FK_Data_4
273; X32-AVX512VL-NEXT:    retl # encoding: [0xc3]
274;
275; X64-SSE-LABEL: fpext_fromconst:
276; X64-SSE:       # %bb.0: # %entry
277; X64-SSE-NEXT:    movaps {{.*#+}} xmm0 = [1.0E+0,-2.0E+0]
278; X64-SSE-NEXT:    # encoding: [0x0f,0x28,0x05,A,A,A,A]
279; X64-SSE-NEXT:    # fixup A - offset: 3, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
280; X64-SSE-NEXT:    retq # encoding: [0xc3]
281;
282; X64-AVX-LABEL: fpext_fromconst:
283; X64-AVX:       # %bb.0: # %entry
284; X64-AVX-NEXT:    vmovaps {{.*#+}} xmm0 = [1.0E+0,-2.0E+0]
285; X64-AVX-NEXT:    # encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
286; X64-AVX-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
287; X64-AVX-NEXT:    retq # encoding: [0xc3]
288;
289; X64-AVX512VL-LABEL: fpext_fromconst:
290; X64-AVX512VL:       # %bb.0: # %entry
291; X64-AVX512VL-NEXT:    vmovaps {{.*}}(%rip), %xmm0 # EVEX TO VEX Compression xmm0 = [1.0E+0,-2.0E+0]
292; X64-AVX512VL-NEXT:    # encoding: [0xc5,0xf8,0x28,0x05,A,A,A,A]
293; X64-AVX512VL-NEXT:    # fixup A - offset: 4, value: {{\.LCPI.*}}-4, kind: reloc_riprel_4byte
294; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
295entry:
296  %0  = insertelement <2 x float> undef, float 1.0, i32 0
297  %1  = insertelement <2 x float> %0, float -2.0, i32 1
298  %2  = fpext <2 x float> %1 to <2 x double>
299  ret <2 x double> %2
300}
301
302; Make sure we don't narrow a volatile load.
303define <2 x double> @PR42079(<4 x float>* %x) {
304; X32-SSE-LABEL: PR42079:
305; X32-SSE:       # %bb.0:
306; X32-SSE-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
307; X32-SSE-NEXT:    movaps (%eax), %xmm0 # encoding: [0x0f,0x28,0x00]
308; X32-SSE-NEXT:    cvtps2pd %xmm0, %xmm0 # encoding: [0x0f,0x5a,0xc0]
309; X32-SSE-NEXT:    retl # encoding: [0xc3]
310;
311; X32-AVX-LABEL: PR42079:
312; X32-AVX:       # %bb.0:
313; X32-AVX-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
314; X32-AVX-NEXT:    vmovaps (%eax), %xmm0 # encoding: [0xc5,0xf8,0x28,0x00]
315; X32-AVX-NEXT:    vcvtps2pd %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x5a,0xc0]
316; X32-AVX-NEXT:    retl # encoding: [0xc3]
317;
318; X32-AVX512VL-LABEL: PR42079:
319; X32-AVX512VL:       # %bb.0:
320; X32-AVX512VL-NEXT:    movl {{[0-9]+}}(%esp), %eax # encoding: [0x8b,0x44,0x24,0x04]
321; X32-AVX512VL-NEXT:    vmovaps (%eax), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x00]
322; X32-AVX512VL-NEXT:    vcvtps2pd %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5a,0xc0]
323; X32-AVX512VL-NEXT:    retl # encoding: [0xc3]
324;
325; X64-SSE-LABEL: PR42079:
326; X64-SSE:       # %bb.0:
327; X64-SSE-NEXT:    movaps (%rdi), %xmm0 # encoding: [0x0f,0x28,0x07]
328; X64-SSE-NEXT:    cvtps2pd %xmm0, %xmm0 # encoding: [0x0f,0x5a,0xc0]
329; X64-SSE-NEXT:    retq # encoding: [0xc3]
330;
331; X64-AVX-LABEL: PR42079:
332; X64-AVX:       # %bb.0:
333; X64-AVX-NEXT:    vmovaps (%rdi), %xmm0 # encoding: [0xc5,0xf8,0x28,0x07]
334; X64-AVX-NEXT:    vcvtps2pd %xmm0, %xmm0 # encoding: [0xc5,0xf8,0x5a,0xc0]
335; X64-AVX-NEXT:    retq # encoding: [0xc3]
336;
337; X64-AVX512VL-LABEL: PR42079:
338; X64-AVX512VL:       # %bb.0:
339; X64-AVX512VL-NEXT:    vmovaps (%rdi), %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x28,0x07]
340; X64-AVX512VL-NEXT:    vcvtps2pd %xmm0, %xmm0 # EVEX TO VEX Compression encoding: [0xc5,0xf8,0x5a,0xc0]
341; X64-AVX512VL-NEXT:    retq # encoding: [0xc3]
342  %a = load volatile <4 x float>, <4 x float>* %x
343  %b = shufflevector <4 x float> %a, <4 x float> %a, <2 x i32> <i32 0, i32 1>
344  %c = fpext <2 x float> %b to <2 x double>
345  ret <2 x double> %c
346}
347