• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i386-unknown-unknown     -mattr=+avx512f,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=CHECK,CHECK32,AVX512_32,AVX512DQVL_32
3; RUN: llc < %s -mtriple=x86_64-unknown-unknown   -mattr=+avx512f,+avx512dq,+avx512vl | FileCheck %s --check-prefixes=CHECK,CHECK64,AVX512_64,AVX512DQVL_64
4; RUN: llc < %s -mtriple=i386-unknown-unknown     -mattr=+avx512f,+avx512dq | FileCheck %s --check-prefixes=CHECK,CHECK32,AVX512_32,AVX512DQ_32
5; RUN: llc < %s -mtriple=x86_64-unknown-unknown   -mattr=+avx512f,+avx512dq | FileCheck %s --check-prefixes=CHECK,CHECK64,AVX512_64,AVX512DQ_64
6; RUN: llc < %s -mtriple=i386-unknown-unknown     -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,CHECK32,AVX512_32,AVX512F_32
7; RUN: llc < %s -mtriple=x86_64-unknown-unknown   -mattr=+avx512f | FileCheck %s --check-prefixes=CHECK,CHECK64,AVX512_64,AVX512F_64
8; RUN: llc < %s -mtriple=i386-unknown-unknown     -mattr=+sse2    | FileCheck %s --check-prefixes=CHECK,CHECK32,SSE2_32
9; RUN: llc < %s -mtriple=x86_64-unknown-unknown   -mattr=+sse2    | FileCheck %s --check-prefixes=CHECK,CHECK64,SSE2_64
10; RUN: llc < %s -mtriple=i386-unknown-unknown     -mattr=-sse     | FileCheck %s --check-prefixes=CHECK,CHECK32,X87
11
12; Verify that scalar integer conversions to FP compile successfully
13; (at one time long double failed with avx512f), and that reasonable
14; instruction sequences are selected based on subtarget features.
15
16define float @u32_to_f(i32 %a) nounwind {
17; AVX512_32-LABEL: u32_to_f:
18; AVX512_32:       # %bb.0:
19; AVX512_32-NEXT:    pushl %eax
20; AVX512_32-NEXT:    vcvtusi2ssl {{[0-9]+}}(%esp), %xmm0, %xmm0
21; AVX512_32-NEXT:    vmovss %xmm0, (%esp)
22; AVX512_32-NEXT:    flds (%esp)
23; AVX512_32-NEXT:    popl %eax
24; AVX512_32-NEXT:    retl
25;
26; AVX512_64-LABEL: u32_to_f:
27; AVX512_64:       # %bb.0:
28; AVX512_64-NEXT:    vcvtusi2ssl %edi, %xmm0, %xmm0
29; AVX512_64-NEXT:    retq
30;
31; SSE2_32-LABEL: u32_to_f:
32; SSE2_32:       # %bb.0:
33; SSE2_32-NEXT:    pushl %eax
34; SSE2_32-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
35; SSE2_32-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
36; SSE2_32-NEXT:    orpd %xmm0, %xmm1
37; SSE2_32-NEXT:    subsd %xmm0, %xmm1
38; SSE2_32-NEXT:    xorps %xmm0, %xmm0
39; SSE2_32-NEXT:    cvtsd2ss %xmm1, %xmm0
40; SSE2_32-NEXT:    movss %xmm0, (%esp)
41; SSE2_32-NEXT:    flds (%esp)
42; SSE2_32-NEXT:    popl %eax
43; SSE2_32-NEXT:    retl
44;
45; SSE2_64-LABEL: u32_to_f:
46; SSE2_64:       # %bb.0:
47; SSE2_64-NEXT:    movl %edi, %eax
48; SSE2_64-NEXT:    cvtsi2ssq %rax, %xmm0
49; SSE2_64-NEXT:    retq
50;
51; X87-LABEL: u32_to_f:
52; X87:       # %bb.0:
53; X87-NEXT:    pushl %ebp
54; X87-NEXT:    movl %esp, %ebp
55; X87-NEXT:    andl $-8, %esp
56; X87-NEXT:    subl $8, %esp
57; X87-NEXT:    movl 8(%ebp), %eax
58; X87-NEXT:    movl %eax, (%esp)
59; X87-NEXT:    movl $0, {{[0-9]+}}(%esp)
60; X87-NEXT:    fildll (%esp)
61; X87-NEXT:    movl %ebp, %esp
62; X87-NEXT:    popl %ebp
63; X87-NEXT:    retl
64  %r = uitofp i32 %a to float
65  ret float %r
66}
67
68define float @s32_to_f(i32 %a) nounwind {
69; AVX512_32-LABEL: s32_to_f:
70; AVX512_32:       # %bb.0:
71; AVX512_32-NEXT:    pushl %eax
72; AVX512_32-NEXT:    vcvtsi2ssl {{[0-9]+}}(%esp), %xmm0, %xmm0
73; AVX512_32-NEXT:    vmovss %xmm0, (%esp)
74; AVX512_32-NEXT:    flds (%esp)
75; AVX512_32-NEXT:    popl %eax
76; AVX512_32-NEXT:    retl
77;
78; AVX512_64-LABEL: s32_to_f:
79; AVX512_64:       # %bb.0:
80; AVX512_64-NEXT:    vcvtsi2ssl %edi, %xmm0, %xmm0
81; AVX512_64-NEXT:    retq
82;
83; SSE2_32-LABEL: s32_to_f:
84; SSE2_32:       # %bb.0:
85; SSE2_32-NEXT:    pushl %eax
86; SSE2_32-NEXT:    cvtsi2ssl {{[0-9]+}}(%esp), %xmm0
87; SSE2_32-NEXT:    movss %xmm0, (%esp)
88; SSE2_32-NEXT:    flds (%esp)
89; SSE2_32-NEXT:    popl %eax
90; SSE2_32-NEXT:    retl
91;
92; SSE2_64-LABEL: s32_to_f:
93; SSE2_64:       # %bb.0:
94; SSE2_64-NEXT:    cvtsi2ssl %edi, %xmm0
95; SSE2_64-NEXT:    retq
96;
97; X87-LABEL: s32_to_f:
98; X87:       # %bb.0:
99; X87-NEXT:    pushl %eax
100; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
101; X87-NEXT:    movl %eax, (%esp)
102; X87-NEXT:    fildl (%esp)
103; X87-NEXT:    popl %eax
104; X87-NEXT:    retl
105  %r = sitofp i32 %a to float
106  ret float %r
107}
108
109define double @u32_to_d(i32 %a) nounwind {
110; AVX512_32-LABEL: u32_to_d:
111; AVX512_32:       # %bb.0:
112; AVX512_32-NEXT:    pushl %ebp
113; AVX512_32-NEXT:    movl %esp, %ebp
114; AVX512_32-NEXT:    andl $-8, %esp
115; AVX512_32-NEXT:    subl $8, %esp
116; AVX512_32-NEXT:    vcvtusi2sdl 8(%ebp), %xmm0, %xmm0
117; AVX512_32-NEXT:    vmovsd %xmm0, (%esp)
118; AVX512_32-NEXT:    fldl (%esp)
119; AVX512_32-NEXT:    movl %ebp, %esp
120; AVX512_32-NEXT:    popl %ebp
121; AVX512_32-NEXT:    retl
122;
123; AVX512_64-LABEL: u32_to_d:
124; AVX512_64:       # %bb.0:
125; AVX512_64-NEXT:    vcvtusi2sdl %edi, %xmm0, %xmm0
126; AVX512_64-NEXT:    retq
127;
128; SSE2_32-LABEL: u32_to_d:
129; SSE2_32:       # %bb.0:
130; SSE2_32-NEXT:    pushl %ebp
131; SSE2_32-NEXT:    movl %esp, %ebp
132; SSE2_32-NEXT:    andl $-8, %esp
133; SSE2_32-NEXT:    subl $8, %esp
134; SSE2_32-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
135; SSE2_32-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
136; SSE2_32-NEXT:    orpd %xmm0, %xmm1
137; SSE2_32-NEXT:    subsd %xmm0, %xmm1
138; SSE2_32-NEXT:    movsd %xmm1, (%esp)
139; SSE2_32-NEXT:    fldl (%esp)
140; SSE2_32-NEXT:    movl %ebp, %esp
141; SSE2_32-NEXT:    popl %ebp
142; SSE2_32-NEXT:    retl
143;
144; SSE2_64-LABEL: u32_to_d:
145; SSE2_64:       # %bb.0:
146; SSE2_64-NEXT:    movl %edi, %eax
147; SSE2_64-NEXT:    cvtsi2sdq %rax, %xmm0
148; SSE2_64-NEXT:    retq
149;
150; X87-LABEL: u32_to_d:
151; X87:       # %bb.0:
152; X87-NEXT:    pushl %ebp
153; X87-NEXT:    movl %esp, %ebp
154; X87-NEXT:    andl $-8, %esp
155; X87-NEXT:    subl $8, %esp
156; X87-NEXT:    movl 8(%ebp), %eax
157; X87-NEXT:    movl %eax, (%esp)
158; X87-NEXT:    movl $0, {{[0-9]+}}(%esp)
159; X87-NEXT:    fildll (%esp)
160; X87-NEXT:    movl %ebp, %esp
161; X87-NEXT:    popl %ebp
162; X87-NEXT:    retl
163  %r = uitofp i32 %a to double
164  ret double %r
165}
166
167define double @s32_to_d(i32 %a) nounwind {
168; AVX512_32-LABEL: s32_to_d:
169; AVX512_32:       # %bb.0:
170; AVX512_32-NEXT:    pushl %ebp
171; AVX512_32-NEXT:    movl %esp, %ebp
172; AVX512_32-NEXT:    andl $-8, %esp
173; AVX512_32-NEXT:    subl $8, %esp
174; AVX512_32-NEXT:    vcvtsi2sdl 8(%ebp), %xmm0, %xmm0
175; AVX512_32-NEXT:    vmovsd %xmm0, (%esp)
176; AVX512_32-NEXT:    fldl (%esp)
177; AVX512_32-NEXT:    movl %ebp, %esp
178; AVX512_32-NEXT:    popl %ebp
179; AVX512_32-NEXT:    retl
180;
181; AVX512_64-LABEL: s32_to_d:
182; AVX512_64:       # %bb.0:
183; AVX512_64-NEXT:    vcvtsi2sdl %edi, %xmm0, %xmm0
184; AVX512_64-NEXT:    retq
185;
186; SSE2_32-LABEL: s32_to_d:
187; SSE2_32:       # %bb.0:
188; SSE2_32-NEXT:    pushl %ebp
189; SSE2_32-NEXT:    movl %esp, %ebp
190; SSE2_32-NEXT:    andl $-8, %esp
191; SSE2_32-NEXT:    subl $8, %esp
192; SSE2_32-NEXT:    cvtsi2sdl 8(%ebp), %xmm0
193; SSE2_32-NEXT:    movsd %xmm0, (%esp)
194; SSE2_32-NEXT:    fldl (%esp)
195; SSE2_32-NEXT:    movl %ebp, %esp
196; SSE2_32-NEXT:    popl %ebp
197; SSE2_32-NEXT:    retl
198;
199; SSE2_64-LABEL: s32_to_d:
200; SSE2_64:       # %bb.0:
201; SSE2_64-NEXT:    cvtsi2sdl %edi, %xmm0
202; SSE2_64-NEXT:    retq
203;
204; X87-LABEL: s32_to_d:
205; X87:       # %bb.0:
206; X87-NEXT:    pushl %eax
207; X87-NEXT:    movl {{[0-9]+}}(%esp), %eax
208; X87-NEXT:    movl %eax, (%esp)
209; X87-NEXT:    fildl (%esp)
210; X87-NEXT:    popl %eax
211; X87-NEXT:    retl
212  %r = sitofp i32 %a to double
213  ret double %r
214}
215
216define x86_fp80 @u32_to_x(i32 %a) nounwind {
217; AVX512_32-LABEL: u32_to_x:
218; AVX512_32:       # %bb.0:
219; AVX512_32-NEXT:    pushl %ebp
220; AVX512_32-NEXT:    movl %esp, %ebp
221; AVX512_32-NEXT:    andl $-8, %esp
222; AVX512_32-NEXT:    subl $8, %esp
223; AVX512_32-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
224; AVX512_32-NEXT:    vmovss {{.*#+}} xmm1 = mem[0],zero,zero,zero
225; AVX512_32-NEXT:    vorpd %xmm0, %xmm1, %xmm1
226; AVX512_32-NEXT:    vsubsd %xmm0, %xmm1, %xmm0
227; AVX512_32-NEXT:    vmovsd %xmm0, (%esp)
228; AVX512_32-NEXT:    fldl (%esp)
229; AVX512_32-NEXT:    movl %ebp, %esp
230; AVX512_32-NEXT:    popl %ebp
231; AVX512_32-NEXT:    retl
232;
233; AVX512_64-LABEL: u32_to_x:
234; AVX512_64:       # %bb.0:
235; AVX512_64-NEXT:    vmovq {{.*#+}} xmm0 = mem[0],zero
236; AVX512_64-NEXT:    vmovd %edi, %xmm1
237; AVX512_64-NEXT:    vpor %xmm0, %xmm1, %xmm1
238; AVX512_64-NEXT:    vsubsd %xmm0, %xmm1, %xmm0
239; AVX512_64-NEXT:    vmovsd %xmm0, -{{[0-9]+}}(%rsp)
240; AVX512_64-NEXT:    fldl -{{[0-9]+}}(%rsp)
241; AVX512_64-NEXT:    retq
242;
243; SSE2_32-LABEL: u32_to_x:
244; SSE2_32:       # %bb.0:
245; SSE2_32-NEXT:    pushl %ebp
246; SSE2_32-NEXT:    movl %esp, %ebp
247; SSE2_32-NEXT:    andl $-8, %esp
248; SSE2_32-NEXT:    subl $8, %esp
249; SSE2_32-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
250; SSE2_32-NEXT:    movss {{.*#+}} xmm1 = mem[0],zero,zero,zero
251; SSE2_32-NEXT:    orpd %xmm0, %xmm1
252; SSE2_32-NEXT:    subsd %xmm0, %xmm1
253; SSE2_32-NEXT:    movsd %xmm1, (%esp)
254; SSE2_32-NEXT:    fldl (%esp)
255; SSE2_32-NEXT:    movl %ebp, %esp
256; SSE2_32-NEXT:    popl %ebp
257; SSE2_32-NEXT:    retl
258;
259; SSE2_64-LABEL: u32_to_x:
260; SSE2_64:       # %bb.0:
261; SSE2_64-NEXT:    movl %edi, %eax
262; SSE2_64-NEXT:    movq %rax, -{{[0-9]+}}(%rsp)
263; SSE2_64-NEXT:    fildll -{{[0-9]+}}(%rsp)
264; SSE2_64-NEXT:    retq
265;
266; X87-LABEL: u32_to_x:
267; X87:       # %bb.0:
268; X87-NEXT:    pushl %ebp
269; X87-NEXT:    movl %esp, %ebp
270; X87-NEXT:    andl $-8, %esp
271; X87-NEXT:    subl $8, %esp
272; X87-NEXT:    movl 8(%ebp), %eax
273; X87-NEXT:    movl %eax, (%esp)
274; X87-NEXT:    movl $0, {{[0-9]+}}(%esp)
275; X87-NEXT:    fildll (%esp)
276; X87-NEXT:    movl %ebp, %esp
277; X87-NEXT:    popl %ebp
278; X87-NEXT:    retl
279  %r = uitofp i32 %a to x86_fp80
280  ret x86_fp80 %r
281}
282
283define x86_fp80 @s32_to_x(i32 %a) nounwind {
284; CHECK32-LABEL: s32_to_x:
285; CHECK32:       # %bb.0:
286; CHECK32-NEXT:    pushl %eax
287; CHECK32-NEXT:    movl {{[0-9]+}}(%esp), %eax
288; CHECK32-NEXT:    movl %eax, (%esp)
289; CHECK32-NEXT:    fildl (%esp)
290; CHECK32-NEXT:    popl %eax
291; CHECK32-NEXT:    retl
292;
293; CHECK64-LABEL: s32_to_x:
294; CHECK64:       # %bb.0:
295; CHECK64-NEXT:    movl %edi, -{{[0-9]+}}(%rsp)
296; CHECK64-NEXT:    fildl -{{[0-9]+}}(%rsp)
297; CHECK64-NEXT:    retq
298  %r = sitofp i32 %a to x86_fp80
299  ret x86_fp80 %r
300}
301
302define float @u64_to_f(i64 %a) nounwind {
303; AVX512DQVL_32-LABEL: u64_to_f:
304; AVX512DQVL_32:       # %bb.0:
305; AVX512DQVL_32-NEXT:    pushl %eax
306; AVX512DQVL_32-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
307; AVX512DQVL_32-NEXT:    vcvtuqq2ps %ymm0, %xmm0
308; AVX512DQVL_32-NEXT:    vmovss %xmm0, (%esp)
309; AVX512DQVL_32-NEXT:    flds (%esp)
310; AVX512DQVL_32-NEXT:    popl %eax
311; AVX512DQVL_32-NEXT:    vzeroupper
312; AVX512DQVL_32-NEXT:    retl
313;
314; AVX512_64-LABEL: u64_to_f:
315; AVX512_64:       # %bb.0:
316; AVX512_64-NEXT:    vcvtusi2ssq %rdi, %xmm0, %xmm0
317; AVX512_64-NEXT:    retq
318;
319; AVX512DQ_32-LABEL: u64_to_f:
320; AVX512DQ_32:       # %bb.0:
321; AVX512DQ_32-NEXT:    pushl %eax
322; AVX512DQ_32-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
323; AVX512DQ_32-NEXT:    vcvtuqq2ps %zmm0, %ymm0
324; AVX512DQ_32-NEXT:    vmovss %xmm0, (%esp)
325; AVX512DQ_32-NEXT:    flds (%esp)
326; AVX512DQ_32-NEXT:    popl %eax
327; AVX512DQ_32-NEXT:    vzeroupper
328; AVX512DQ_32-NEXT:    retl
329;
330; AVX512F_32-LABEL: u64_to_f:
331; AVX512F_32:       # %bb.0:
332; AVX512F_32-NEXT:    pushl %ebp
333; AVX512F_32-NEXT:    movl %esp, %ebp
334; AVX512F_32-NEXT:    andl $-8, %esp
335; AVX512F_32-NEXT:    subl $16, %esp
336; AVX512F_32-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
337; AVX512F_32-NEXT:    vmovlps %xmm0, {{[0-9]+}}(%esp)
338; AVX512F_32-NEXT:    xorl %eax, %eax
339; AVX512F_32-NEXT:    cmpl $0, 12(%ebp)
340; AVX512F_32-NEXT:    setns %al
341; AVX512F_32-NEXT:    fildll {{[0-9]+}}(%esp)
342; AVX512F_32-NEXT:    fadds {{\.LCPI.*}}(,%eax,4)
343; AVX512F_32-NEXT:    fstps {{[0-9]+}}(%esp)
344; AVX512F_32-NEXT:    vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero
345; AVX512F_32-NEXT:    vmovss %xmm0, (%esp)
346; AVX512F_32-NEXT:    flds (%esp)
347; AVX512F_32-NEXT:    movl %ebp, %esp
348; AVX512F_32-NEXT:    popl %ebp
349; AVX512F_32-NEXT:    retl
350;
351; SSE2_32-LABEL: u64_to_f:
352; SSE2_32:       # %bb.0:
353; SSE2_32-NEXT:    pushl %ebp
354; SSE2_32-NEXT:    movl %esp, %ebp
355; SSE2_32-NEXT:    andl $-8, %esp
356; SSE2_32-NEXT:    subl $16, %esp
357; SSE2_32-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
358; SSE2_32-NEXT:    movlps %xmm0, {{[0-9]+}}(%esp)
359; SSE2_32-NEXT:    xorl %eax, %eax
360; SSE2_32-NEXT:    cmpl $0, 12(%ebp)
361; SSE2_32-NEXT:    setns %al
362; SSE2_32-NEXT:    fildll {{[0-9]+}}(%esp)
363; SSE2_32-NEXT:    fadds {{\.LCPI.*}}(,%eax,4)
364; SSE2_32-NEXT:    fstps {{[0-9]+}}(%esp)
365; SSE2_32-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
366; SSE2_32-NEXT:    movss %xmm0, (%esp)
367; SSE2_32-NEXT:    flds (%esp)
368; SSE2_32-NEXT:    movl %ebp, %esp
369; SSE2_32-NEXT:    popl %ebp
370; SSE2_32-NEXT:    retl
371;
372; SSE2_64-LABEL: u64_to_f:
373; SSE2_64:       # %bb.0:
374; SSE2_64-NEXT:    testq %rdi, %rdi
375; SSE2_64-NEXT:    js .LBB6_1
376; SSE2_64-NEXT:  # %bb.2:
377; SSE2_64-NEXT:    cvtsi2ssq %rdi, %xmm0
378; SSE2_64-NEXT:    retq
379; SSE2_64-NEXT:  .LBB6_1:
380; SSE2_64-NEXT:    movq %rdi, %rax
381; SSE2_64-NEXT:    shrq %rax
382; SSE2_64-NEXT:    andl $1, %edi
383; SSE2_64-NEXT:    orq %rax, %rdi
384; SSE2_64-NEXT:    cvtsi2ssq %rdi, %xmm0
385; SSE2_64-NEXT:    addss %xmm0, %xmm0
386; SSE2_64-NEXT:    retq
387;
388; X87-LABEL: u64_to_f:
389; X87:       # %bb.0:
390; X87-NEXT:    pushl %ebp
391; X87-NEXT:    movl %esp, %ebp
392; X87-NEXT:    andl $-8, %esp
393; X87-NEXT:    subl $16, %esp
394; X87-NEXT:    movl 8(%ebp), %eax
395; X87-NEXT:    movl 12(%ebp), %ecx
396; X87-NEXT:    movl %ecx, {{[0-9]+}}(%esp)
397; X87-NEXT:    movl %eax, {{[0-9]+}}(%esp)
398; X87-NEXT:    xorl %eax, %eax
399; X87-NEXT:    testl %ecx, %ecx
400; X87-NEXT:    setns %al
401; X87-NEXT:    fildll {{[0-9]+}}(%esp)
402; X87-NEXT:    fadds {{\.LCPI.*}}(,%eax,4)
403; X87-NEXT:    fstps {{[0-9]+}}(%esp)
404; X87-NEXT:    flds {{[0-9]+}}(%esp)
405; X87-NEXT:    movl %ebp, %esp
406; X87-NEXT:    popl %ebp
407; X87-NEXT:    retl
408  %r = uitofp i64 %a to float
409  ret float %r
410}
411
412define float @s64_to_f(i64 %a) nounwind {
413; AVX512DQVL_32-LABEL: s64_to_f:
414; AVX512DQVL_32:       # %bb.0:
415; AVX512DQVL_32-NEXT:    pushl %eax
416; AVX512DQVL_32-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
417; AVX512DQVL_32-NEXT:    vcvtqq2ps %ymm0, %xmm0
418; AVX512DQVL_32-NEXT:    vmovss %xmm0, (%esp)
419; AVX512DQVL_32-NEXT:    flds (%esp)
420; AVX512DQVL_32-NEXT:    popl %eax
421; AVX512DQVL_32-NEXT:    vzeroupper
422; AVX512DQVL_32-NEXT:    retl
423;
424; AVX512_64-LABEL: s64_to_f:
425; AVX512_64:       # %bb.0:
426; AVX512_64-NEXT:    vcvtsi2ssq %rdi, %xmm0, %xmm0
427; AVX512_64-NEXT:    retq
428;
429; AVX512DQ_32-LABEL: s64_to_f:
430; AVX512DQ_32:       # %bb.0:
431; AVX512DQ_32-NEXT:    pushl %eax
432; AVX512DQ_32-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
433; AVX512DQ_32-NEXT:    vcvtqq2ps %zmm0, %ymm0
434; AVX512DQ_32-NEXT:    vmovss %xmm0, (%esp)
435; AVX512DQ_32-NEXT:    flds (%esp)
436; AVX512DQ_32-NEXT:    popl %eax
437; AVX512DQ_32-NEXT:    vzeroupper
438; AVX512DQ_32-NEXT:    retl
439;
440; AVX512F_32-LABEL: s64_to_f:
441; AVX512F_32:       # %bb.0:
442; AVX512F_32-NEXT:    pushl %eax
443; AVX512F_32-NEXT:    fildll {{[0-9]+}}(%esp)
444; AVX512F_32-NEXT:    fstps (%esp)
445; AVX512F_32-NEXT:    flds (%esp)
446; AVX512F_32-NEXT:    popl %eax
447; AVX512F_32-NEXT:    retl
448;
449; SSE2_32-LABEL: s64_to_f:
450; SSE2_32:       # %bb.0:
451; SSE2_32-NEXT:    pushl %eax
452; SSE2_32-NEXT:    fildll {{[0-9]+}}(%esp)
453; SSE2_32-NEXT:    fstps (%esp)
454; SSE2_32-NEXT:    flds (%esp)
455; SSE2_32-NEXT:    popl %eax
456; SSE2_32-NEXT:    retl
457;
458; SSE2_64-LABEL: s64_to_f:
459; SSE2_64:       # %bb.0:
460; SSE2_64-NEXT:    cvtsi2ssq %rdi, %xmm0
461; SSE2_64-NEXT:    retq
462;
463; X87-LABEL: s64_to_f:
464; X87:       # %bb.0:
465; X87-NEXT:    fildll {{[0-9]+}}(%esp)
466; X87-NEXT:    retl
467  %r = sitofp i64 %a to float
468  ret float %r
469}
470
471define float @s64_to_f_2(i64 %a) nounwind {
472; AVX512DQVL_32-LABEL: s64_to_f_2:
473; AVX512DQVL_32:       # %bb.0:
474; AVX512DQVL_32-NEXT:    pushl %eax
475; AVX512DQVL_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
476; AVX512DQVL_32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
477; AVX512DQVL_32-NEXT:    addl $5, %eax
478; AVX512DQVL_32-NEXT:    adcl $0, %ecx
479; AVX512DQVL_32-NEXT:    vmovd %eax, %xmm0
480; AVX512DQVL_32-NEXT:    vpinsrd $1, %ecx, %xmm0, %xmm0
481; AVX512DQVL_32-NEXT:    vcvtqq2ps %ymm0, %xmm0
482; AVX512DQVL_32-NEXT:    vmovss %xmm0, (%esp)
483; AVX512DQVL_32-NEXT:    flds (%esp)
484; AVX512DQVL_32-NEXT:    popl %eax
485; AVX512DQVL_32-NEXT:    vzeroupper
486; AVX512DQVL_32-NEXT:    retl
487;
488; AVX512_64-LABEL: s64_to_f_2:
489; AVX512_64:       # %bb.0:
490; AVX512_64-NEXT:    addq $5, %rdi
491; AVX512_64-NEXT:    vcvtsi2ssq %rdi, %xmm0, %xmm0
492; AVX512_64-NEXT:    retq
493;
494; AVX512DQ_32-LABEL: s64_to_f_2:
495; AVX512DQ_32:       # %bb.0:
496; AVX512DQ_32-NEXT:    pushl %eax
497; AVX512DQ_32-NEXT:    movl {{[0-9]+}}(%esp), %eax
498; AVX512DQ_32-NEXT:    movl {{[0-9]+}}(%esp), %ecx
499; AVX512DQ_32-NEXT:    addl $5, %eax
500; AVX512DQ_32-NEXT:    adcl $0, %ecx
501; AVX512DQ_32-NEXT:    vmovd %eax, %xmm0
502; AVX512DQ_32-NEXT:    vpinsrd $1, %ecx, %xmm0, %xmm0
503; AVX512DQ_32-NEXT:    vcvtqq2ps %zmm0, %ymm0
504; AVX512DQ_32-NEXT:    vmovss %xmm0, (%esp)
505; AVX512DQ_32-NEXT:    flds (%esp)
506; AVX512DQ_32-NEXT:    popl %eax
507; AVX512DQ_32-NEXT:    vzeroupper
508; AVX512DQ_32-NEXT:    retl
509;
510; AVX512F_32-LABEL: s64_to_f_2:
511; AVX512F_32:       # %bb.0:
512; AVX512F_32-NEXT:    pushl %ebp
513; AVX512F_32-NEXT:    movl %esp, %ebp
514; AVX512F_32-NEXT:    andl $-8, %esp
515; AVX512F_32-NEXT:    subl $16, %esp
516; AVX512F_32-NEXT:    movl 8(%ebp), %eax
517; AVX512F_32-NEXT:    movl 12(%ebp), %ecx
518; AVX512F_32-NEXT:    addl $5, %eax
519; AVX512F_32-NEXT:    adcl $0, %ecx
520; AVX512F_32-NEXT:    vmovd %eax, %xmm0
521; AVX512F_32-NEXT:    vpinsrd $1, %ecx, %xmm0, %xmm0
522; AVX512F_32-NEXT:    vmovq %xmm0, {{[0-9]+}}(%esp)
523; AVX512F_32-NEXT:    fildll {{[0-9]+}}(%esp)
524; AVX512F_32-NEXT:    fstps {{[0-9]+}}(%esp)
525; AVX512F_32-NEXT:    flds {{[0-9]+}}(%esp)
526; AVX512F_32-NEXT:    movl %ebp, %esp
527; AVX512F_32-NEXT:    popl %ebp
528; AVX512F_32-NEXT:    retl
529;
530; SSE2_32-LABEL: s64_to_f_2:
531; SSE2_32:       # %bb.0:
532; SSE2_32-NEXT:    pushl %ebp
533; SSE2_32-NEXT:    movl %esp, %ebp
534; SSE2_32-NEXT:    andl $-8, %esp
535; SSE2_32-NEXT:    subl $16, %esp
536; SSE2_32-NEXT:    movl 8(%ebp), %eax
537; SSE2_32-NEXT:    movl 12(%ebp), %ecx
538; SSE2_32-NEXT:    addl $5, %eax
539; SSE2_32-NEXT:    adcl $0, %ecx
540; SSE2_32-NEXT:    movd %ecx, %xmm0
541; SSE2_32-NEXT:    movd %eax, %xmm1
542; SSE2_32-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
543; SSE2_32-NEXT:    movq %xmm1, {{[0-9]+}}(%esp)
544; SSE2_32-NEXT:    fildll {{[0-9]+}}(%esp)
545; SSE2_32-NEXT:    fstps {{[0-9]+}}(%esp)
546; SSE2_32-NEXT:    flds {{[0-9]+}}(%esp)
547; SSE2_32-NEXT:    movl %ebp, %esp
548; SSE2_32-NEXT:    popl %ebp
549; SSE2_32-NEXT:    retl
550;
551; SSE2_64-LABEL: s64_to_f_2:
552; SSE2_64:       # %bb.0:
553; SSE2_64-NEXT:    addq $5, %rdi
554; SSE2_64-NEXT:    cvtsi2ssq %rdi, %xmm0
555; SSE2_64-NEXT:    retq
556;
557; X87-LABEL: s64_to_f_2:
558; X87:       # %bb.0:
559; X87-NEXT:    pushl %ebp
560; X87-NEXT:    movl %esp, %ebp
561; X87-NEXT:    andl $-8, %esp
562; X87-NEXT:    subl $8, %esp
563; X87-NEXT:    movl 8(%ebp), %eax
564; X87-NEXT:    movl 12(%ebp), %ecx
565; X87-NEXT:    addl $5, %eax
566; X87-NEXT:    adcl $0, %ecx
567; X87-NEXT:    movl %eax, (%esp)
568; X87-NEXT:    movl %ecx, {{[0-9]+}}(%esp)
569; X87-NEXT:    fildll (%esp)
570; X87-NEXT:    movl %ebp, %esp
571; X87-NEXT:    popl %ebp
572; X87-NEXT:    retl
573  %a1 = add i64 %a, 5
574  %r = sitofp i64 %a1 to float
575  ret float %r
576}
577
578define double @u64_to_d(i64 %a) nounwind {
579; AVX512DQVL_32-LABEL: u64_to_d:
580; AVX512DQVL_32:       # %bb.0:
581; AVX512DQVL_32-NEXT:    pushl %ebp
582; AVX512DQVL_32-NEXT:    movl %esp, %ebp
583; AVX512DQVL_32-NEXT:    andl $-8, %esp
584; AVX512DQVL_32-NEXT:    subl $8, %esp
585; AVX512DQVL_32-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
586; AVX512DQVL_32-NEXT:    vcvtuqq2pd %ymm0, %ymm0
587; AVX512DQVL_32-NEXT:    vmovlps %xmm0, (%esp)
588; AVX512DQVL_32-NEXT:    fldl (%esp)
589; AVX512DQVL_32-NEXT:    movl %ebp, %esp
590; AVX512DQVL_32-NEXT:    popl %ebp
591; AVX512DQVL_32-NEXT:    vzeroupper
592; AVX512DQVL_32-NEXT:    retl
593;
594; AVX512_64-LABEL: u64_to_d:
595; AVX512_64:       # %bb.0:
596; AVX512_64-NEXT:    vcvtusi2sdq %rdi, %xmm0, %xmm0
597; AVX512_64-NEXT:    retq
598;
599; AVX512DQ_32-LABEL: u64_to_d:
600; AVX512DQ_32:       # %bb.0:
601; AVX512DQ_32-NEXT:    pushl %ebp
602; AVX512DQ_32-NEXT:    movl %esp, %ebp
603; AVX512DQ_32-NEXT:    andl $-8, %esp
604; AVX512DQ_32-NEXT:    subl $8, %esp
605; AVX512DQ_32-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
606; AVX512DQ_32-NEXT:    vcvtuqq2pd %zmm0, %zmm0
607; AVX512DQ_32-NEXT:    vmovlps %xmm0, (%esp)
608; AVX512DQ_32-NEXT:    fldl (%esp)
609; AVX512DQ_32-NEXT:    movl %ebp, %esp
610; AVX512DQ_32-NEXT:    popl %ebp
611; AVX512DQ_32-NEXT:    vzeroupper
612; AVX512DQ_32-NEXT:    retl
613;
614; AVX512F_32-LABEL: u64_to_d:
615; AVX512F_32:       # %bb.0:
616; AVX512F_32-NEXT:    pushl %ebp
617; AVX512F_32-NEXT:    movl %esp, %ebp
618; AVX512F_32-NEXT:    andl $-8, %esp
619; AVX512F_32-NEXT:    subl $8, %esp
620; AVX512F_32-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
621; AVX512F_32-NEXT:    vunpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
622; AVX512F_32-NEXT:    vsubpd {{\.LCPI.*}}, %xmm0, %xmm0
623; AVX512F_32-NEXT:    vhaddpd %xmm0, %xmm0, %xmm0
624; AVX512F_32-NEXT:    vmovlpd %xmm0, (%esp)
625; AVX512F_32-NEXT:    fldl (%esp)
626; AVX512F_32-NEXT:    movl %ebp, %esp
627; AVX512F_32-NEXT:    popl %ebp
628; AVX512F_32-NEXT:    retl
629;
630; SSE2_32-LABEL: u64_to_d:
631; SSE2_32:       # %bb.0:
632; SSE2_32-NEXT:    pushl %ebp
633; SSE2_32-NEXT:    movl %esp, %ebp
634; SSE2_32-NEXT:    andl $-8, %esp
635; SSE2_32-NEXT:    subl $8, %esp
636; SSE2_32-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
637; SSE2_32-NEXT:    unpcklps {{.*#+}} xmm0 = xmm0[0],mem[0],xmm0[1],mem[1]
638; SSE2_32-NEXT:    subpd {{\.LCPI.*}}, %xmm0
639; SSE2_32-NEXT:    pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
640; SSE2_32-NEXT:    addpd %xmm0, %xmm1
641; SSE2_32-NEXT:    movlpd %xmm1, (%esp)
642; SSE2_32-NEXT:    fldl (%esp)
643; SSE2_32-NEXT:    movl %ebp, %esp
644; SSE2_32-NEXT:    popl %ebp
645; SSE2_32-NEXT:    retl
646;
647; SSE2_64-LABEL: u64_to_d:
648; SSE2_64:       # %bb.0:
649; SSE2_64-NEXT:    movq %rdi, %xmm1
650; SSE2_64-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],mem[0],xmm1[1],mem[1]
651; SSE2_64-NEXT:    subpd {{.*}}(%rip), %xmm1
652; SSE2_64-NEXT:    pshufd {{.*#+}} xmm0 = xmm1[2,3,0,1]
653; SSE2_64-NEXT:    addpd %xmm1, %xmm0
654; SSE2_64-NEXT:    retq
655;
656; X87-LABEL: u64_to_d:
657; X87:       # %bb.0:
658; X87-NEXT:    pushl %ebp
659; X87-NEXT:    movl %esp, %ebp
660; X87-NEXT:    andl $-8, %esp
661; X87-NEXT:    subl $16, %esp
662; X87-NEXT:    movl 8(%ebp), %eax
663; X87-NEXT:    movl 12(%ebp), %ecx
664; X87-NEXT:    movl %ecx, {{[0-9]+}}(%esp)
665; X87-NEXT:    movl %eax, (%esp)
666; X87-NEXT:    xorl %eax, %eax
667; X87-NEXT:    testl %ecx, %ecx
668; X87-NEXT:    setns %al
669; X87-NEXT:    fildll (%esp)
670; X87-NEXT:    fadds {{\.LCPI.*}}(,%eax,4)
671; X87-NEXT:    fstpl {{[0-9]+}}(%esp)
672; X87-NEXT:    fldl {{[0-9]+}}(%esp)
673; X87-NEXT:    movl %ebp, %esp
674; X87-NEXT:    popl %ebp
675; X87-NEXT:    retl
676  %r = uitofp i64 %a to double
677  ret double %r
678}
679
680define double @s64_to_d(i64 %a) nounwind {
681; AVX512DQVL_32-LABEL: s64_to_d:
682; AVX512DQVL_32:       # %bb.0:
683; AVX512DQVL_32-NEXT:    pushl %ebp
684; AVX512DQVL_32-NEXT:    movl %esp, %ebp
685; AVX512DQVL_32-NEXT:    andl $-8, %esp
686; AVX512DQVL_32-NEXT:    subl $8, %esp
687; AVX512DQVL_32-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
688; AVX512DQVL_32-NEXT:    vcvtqq2pd %ymm0, %ymm0
689; AVX512DQVL_32-NEXT:    vmovlps %xmm0, (%esp)
690; AVX512DQVL_32-NEXT:    fldl (%esp)
691; AVX512DQVL_32-NEXT:    movl %ebp, %esp
692; AVX512DQVL_32-NEXT:    popl %ebp
693; AVX512DQVL_32-NEXT:    vzeroupper
694; AVX512DQVL_32-NEXT:    retl
695;
696; AVX512_64-LABEL: s64_to_d:
697; AVX512_64:       # %bb.0:
698; AVX512_64-NEXT:    vcvtsi2sdq %rdi, %xmm0, %xmm0
699; AVX512_64-NEXT:    retq
700;
701; AVX512DQ_32-LABEL: s64_to_d:
702; AVX512DQ_32:       # %bb.0:
703; AVX512DQ_32-NEXT:    pushl %ebp
704; AVX512DQ_32-NEXT:    movl %esp, %ebp
705; AVX512DQ_32-NEXT:    andl $-8, %esp
706; AVX512DQ_32-NEXT:    subl $8, %esp
707; AVX512DQ_32-NEXT:    vmovsd {{.*#+}} xmm0 = mem[0],zero
708; AVX512DQ_32-NEXT:    vcvtqq2pd %zmm0, %zmm0
709; AVX512DQ_32-NEXT:    vmovlps %xmm0, (%esp)
710; AVX512DQ_32-NEXT:    fldl (%esp)
711; AVX512DQ_32-NEXT:    movl %ebp, %esp
712; AVX512DQ_32-NEXT:    popl %ebp
713; AVX512DQ_32-NEXT:    vzeroupper
714; AVX512DQ_32-NEXT:    retl
715;
716; AVX512F_32-LABEL: s64_to_d:
717; AVX512F_32:       # %bb.0:
718; AVX512F_32-NEXT:    pushl %ebp
719; AVX512F_32-NEXT:    movl %esp, %ebp
720; AVX512F_32-NEXT:    andl $-8, %esp
721; AVX512F_32-NEXT:    subl $8, %esp
722; AVX512F_32-NEXT:    fildll 8(%ebp)
723; AVX512F_32-NEXT:    fstpl (%esp)
724; AVX512F_32-NEXT:    fldl (%esp)
725; AVX512F_32-NEXT:    movl %ebp, %esp
726; AVX512F_32-NEXT:    popl %ebp
727; AVX512F_32-NEXT:    retl
728;
729; SSE2_32-LABEL: s64_to_d:
730; SSE2_32:       # %bb.0:
731; SSE2_32-NEXT:    pushl %ebp
732; SSE2_32-NEXT:    movl %esp, %ebp
733; SSE2_32-NEXT:    andl $-8, %esp
734; SSE2_32-NEXT:    subl $8, %esp
735; SSE2_32-NEXT:    fildll 8(%ebp)
736; SSE2_32-NEXT:    fstpl (%esp)
737; SSE2_32-NEXT:    fldl (%esp)
738; SSE2_32-NEXT:    movl %ebp, %esp
739; SSE2_32-NEXT:    popl %ebp
740; SSE2_32-NEXT:    retl
741;
742; SSE2_64-LABEL: s64_to_d:
743; SSE2_64:       # %bb.0:
744; SSE2_64-NEXT:    cvtsi2sdq %rdi, %xmm0
745; SSE2_64-NEXT:    retq
746;
747; X87-LABEL: s64_to_d:
748; X87:       # %bb.0:
749; X87-NEXT:    fildll {{[0-9]+}}(%esp)
750; X87-NEXT:    retl
751  %r = sitofp i64 %a to double
752  ret double %r
753}
754
755define double @s64_to_d_2(i64 %a) nounwind {
756; AVX512DQVL_32-LABEL: s64_to_d_2:
757; AVX512DQVL_32:       # %bb.0:
758; AVX512DQVL_32-NEXT:    pushl %ebp
759; AVX512DQVL_32-NEXT:    movl %esp, %ebp
760; AVX512DQVL_32-NEXT:    andl $-8, %esp
761; AVX512DQVL_32-NEXT:    subl $8, %esp
762; AVX512DQVL_32-NEXT:    movl 8(%ebp), %eax
763; AVX512DQVL_32-NEXT:    movl 12(%ebp), %ecx
764; AVX512DQVL_32-NEXT:    addl $5, %eax
765; AVX512DQVL_32-NEXT:    adcl $0, %ecx
766; AVX512DQVL_32-NEXT:    vmovd %eax, %xmm0
767; AVX512DQVL_32-NEXT:    vpinsrd $1, %ecx, %xmm0, %xmm0
768; AVX512DQVL_32-NEXT:    vcvtqq2pd %ymm0, %ymm0
769; AVX512DQVL_32-NEXT:    vmovlps %xmm0, (%esp)
770; AVX512DQVL_32-NEXT:    fldl (%esp)
771; AVX512DQVL_32-NEXT:    movl %ebp, %esp
772; AVX512DQVL_32-NEXT:    popl %ebp
773; AVX512DQVL_32-NEXT:    vzeroupper
774; AVX512DQVL_32-NEXT:    retl
775;
776; AVX512_64-LABEL: s64_to_d_2:
777; AVX512_64:       # %bb.0:
778; AVX512_64-NEXT:    addq $5, %rdi
779; AVX512_64-NEXT:    vcvtsi2sdq %rdi, %xmm0, %xmm0
780; AVX512_64-NEXT:    retq
781;
782; AVX512DQ_32-LABEL: s64_to_d_2:
783; AVX512DQ_32:       # %bb.0:
784; AVX512DQ_32-NEXT:    pushl %ebp
785; AVX512DQ_32-NEXT:    movl %esp, %ebp
786; AVX512DQ_32-NEXT:    andl $-8, %esp
787; AVX512DQ_32-NEXT:    subl $8, %esp
788; AVX512DQ_32-NEXT:    movl 8(%ebp), %eax
789; AVX512DQ_32-NEXT:    movl 12(%ebp), %ecx
790; AVX512DQ_32-NEXT:    addl $5, %eax
791; AVX512DQ_32-NEXT:    adcl $0, %ecx
792; AVX512DQ_32-NEXT:    vmovd %eax, %xmm0
793; AVX512DQ_32-NEXT:    vpinsrd $1, %ecx, %xmm0, %xmm0
794; AVX512DQ_32-NEXT:    vcvtqq2pd %zmm0, %zmm0
795; AVX512DQ_32-NEXT:    vmovlps %xmm0, (%esp)
796; AVX512DQ_32-NEXT:    fldl (%esp)
797; AVX512DQ_32-NEXT:    movl %ebp, %esp
798; AVX512DQ_32-NEXT:    popl %ebp
799; AVX512DQ_32-NEXT:    vzeroupper
800; AVX512DQ_32-NEXT:    retl
801;
802; AVX512F_32-LABEL: s64_to_d_2:
803; AVX512F_32:       # %bb.0:
804; AVX512F_32-NEXT:    pushl %ebp
805; AVX512F_32-NEXT:    movl %esp, %ebp
806; AVX512F_32-NEXT:    andl $-8, %esp
807; AVX512F_32-NEXT:    subl $16, %esp
808; AVX512F_32-NEXT:    movl 8(%ebp), %eax
809; AVX512F_32-NEXT:    movl 12(%ebp), %ecx
810; AVX512F_32-NEXT:    addl $5, %eax
811; AVX512F_32-NEXT:    adcl $0, %ecx
812; AVX512F_32-NEXT:    vmovd %eax, %xmm0
813; AVX512F_32-NEXT:    vpinsrd $1, %ecx, %xmm0, %xmm0
814; AVX512F_32-NEXT:    vmovq %xmm0, {{[0-9]+}}(%esp)
815; AVX512F_32-NEXT:    fildll {{[0-9]+}}(%esp)
816; AVX512F_32-NEXT:    fstpl (%esp)
817; AVX512F_32-NEXT:    fldl (%esp)
818; AVX512F_32-NEXT:    movl %ebp, %esp
819; AVX512F_32-NEXT:    popl %ebp
820; AVX512F_32-NEXT:    retl
821;
822; SSE2_32-LABEL: s64_to_d_2:
823; SSE2_32:       # %bb.0:
824; SSE2_32-NEXT:    pushl %ebp
825; SSE2_32-NEXT:    movl %esp, %ebp
826; SSE2_32-NEXT:    andl $-8, %esp
827; SSE2_32-NEXT:    subl $16, %esp
828; SSE2_32-NEXT:    movl 8(%ebp), %eax
829; SSE2_32-NEXT:    movl 12(%ebp), %ecx
830; SSE2_32-NEXT:    addl $5, %eax
831; SSE2_32-NEXT:    adcl $0, %ecx
832; SSE2_32-NEXT:    movd %ecx, %xmm0
833; SSE2_32-NEXT:    movd %eax, %xmm1
834; SSE2_32-NEXT:    punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
835; SSE2_32-NEXT:    movq %xmm1, {{[0-9]+}}(%esp)
836; SSE2_32-NEXT:    fildll {{[0-9]+}}(%esp)
837; SSE2_32-NEXT:    fstpl (%esp)
838; SSE2_32-NEXT:    fldl (%esp)
839; SSE2_32-NEXT:    movl %ebp, %esp
840; SSE2_32-NEXT:    popl %ebp
841; SSE2_32-NEXT:    retl
842;
843; SSE2_64-LABEL: s64_to_d_2:
844; SSE2_64:       # %bb.0:
845; SSE2_64-NEXT:    addq $5, %rdi
846; SSE2_64-NEXT:    cvtsi2sdq %rdi, %xmm0
847; SSE2_64-NEXT:    retq
848;
849; X87-LABEL: s64_to_d_2:
850; X87:       # %bb.0:
851; X87-NEXT:    pushl %ebp
852; X87-NEXT:    movl %esp, %ebp
853; X87-NEXT:    andl $-8, %esp
854; X87-NEXT:    subl $8, %esp
855; X87-NEXT:    movl 8(%ebp), %eax
856; X87-NEXT:    movl 12(%ebp), %ecx
857; X87-NEXT:    addl $5, %eax
858; X87-NEXT:    adcl $0, %ecx
859; X87-NEXT:    movl %eax, (%esp)
860; X87-NEXT:    movl %ecx, {{[0-9]+}}(%esp)
861; X87-NEXT:    fildll (%esp)
862; X87-NEXT:    movl %ebp, %esp
863; X87-NEXT:    popl %ebp
864; X87-NEXT:    retl
865  %b = add i64 %a, 5
866  %f = sitofp i64 %b to double
867  ret double %f
868}
869
870define x86_fp80 @u64_to_x(i64 %a) nounwind {
871; CHECK32-LABEL: u64_to_x:
872; CHECK32:       # %bb.0:
873; CHECK32-NEXT:    pushl %ebp
874; CHECK32-NEXT:    movl %esp, %ebp
875; CHECK32-NEXT:    andl $-8, %esp
876; CHECK32-NEXT:    subl $8, %esp
877; CHECK32-NEXT:    movl 8(%ebp), %eax
878; CHECK32-NEXT:    movl 12(%ebp), %ecx
879; CHECK32-NEXT:    movl %ecx, {{[0-9]+}}(%esp)
880; CHECK32-NEXT:    movl %eax, (%esp)
881; CHECK32-NEXT:    xorl %eax, %eax
882; CHECK32-NEXT:    testl %ecx, %ecx
883; CHECK32-NEXT:    setns %al
884; CHECK32-NEXT:    fildll (%esp)
885; CHECK32-NEXT:    fadds {{\.LCPI.*}}(,%eax,4)
886; CHECK32-NEXT:    movl %ebp, %esp
887; CHECK32-NEXT:    popl %ebp
888; CHECK32-NEXT:    retl
889;
890; CHECK64-LABEL: u64_to_x:
891; CHECK64:       # %bb.0:
892; CHECK64-NEXT:    movq %rdi, -{{[0-9]+}}(%rsp)
893; CHECK64-NEXT:    xorl %eax, %eax
894; CHECK64-NEXT:    testq %rdi, %rdi
895; CHECK64-NEXT:    setns %al
896; CHECK64-NEXT:    fildll -{{[0-9]+}}(%rsp)
897; CHECK64-NEXT:    fadds {{\.LCPI.*}}(,%rax,4)
898; CHECK64-NEXT:    retq
899  %r = uitofp i64 %a to x86_fp80
900  ret x86_fp80 %r
901}
902
903define x86_fp80 @s64_to_x(i64 %a) nounwind {
904; CHECK32-LABEL: s64_to_x:
905; CHECK32:       # %bb.0:
906; CHECK32-NEXT:    fildll {{[0-9]+}}(%esp)
907; CHECK32-NEXT:    retl
908;
909; CHECK64-LABEL: s64_to_x:
910; CHECK64:       # %bb.0:
911; CHECK64-NEXT:    movq %rdi, -{{[0-9]+}}(%rsp)
912; CHECK64-NEXT:    fildll -{{[0-9]+}}(%rsp)
913; CHECK64-NEXT:    retq
914  %r = sitofp i64 %a to x86_fp80
915  ret x86_fp80 %r
916}
917