• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc < %s -mtriple=i686-unknown             | FileCheck %s --check-prefix=X86
3; RUN: llc < %s -mtriple=i686-unknown -mattr=sse2 | FileCheck %s --check-prefix=SSE2
4; RUN: llc < %s -mtriple=x86_64-unknown | FileCheck %s --check-prefix=X64
5
6define i64 @testmsxs(float %x) {
7; X86-LABEL: testmsxs:
8; X86:       # %bb.0: # %entry
9; X86-NEXT:    pushl %eax
10; X86-NEXT:    .cfi_def_cfa_offset 8
11; X86-NEXT:    flds {{[0-9]+}}(%esp)
12; X86-NEXT:    fstps (%esp)
13; X86-NEXT:    calll llroundf
14; X86-NEXT:    popl %ecx
15; X86-NEXT:    .cfi_def_cfa_offset 4
16; X86-NEXT:    retl
17;
18; SSE2-LABEL: testmsxs:
19; SSE2:       # %bb.0: # %entry
20; SSE2-NEXT:    pushl %eax
21; SSE2-NEXT:    .cfi_def_cfa_offset 8
22; SSE2-NEXT:    movss {{.*#+}} xmm0 = mem[0],zero,zero,zero
23; SSE2-NEXT:    movss %xmm0, (%esp)
24; SSE2-NEXT:    calll llroundf
25; SSE2-NEXT:    popl %ecx
26; SSE2-NEXT:    .cfi_def_cfa_offset 4
27; SSE2-NEXT:    retl
28;
29; X64-LABEL: testmsxs:
30; X64:       # %bb.0: # %entry
31; X64-NEXT:    jmp llroundf@PLT # TAILCALL
32entry:
33  %0 = tail call i64 @llvm.llround.f32(float %x)
34  ret i64 %0
35}
36
37define i64 @testmsxd(double %x) {
38; X86-LABEL: testmsxd:
39; X86:       # %bb.0: # %entry
40; X86-NEXT:    subl $8, %esp
41; X86-NEXT:    .cfi_def_cfa_offset 12
42; X86-NEXT:    fldl {{[0-9]+}}(%esp)
43; X86-NEXT:    fstpl (%esp)
44; X86-NEXT:    calll llround
45; X86-NEXT:    addl $8, %esp
46; X86-NEXT:    .cfi_def_cfa_offset 4
47; X86-NEXT:    retl
48;
49; SSE2-LABEL: testmsxd:
50; SSE2:       # %bb.0: # %entry
51; SSE2-NEXT:    subl $8, %esp
52; SSE2-NEXT:    .cfi_def_cfa_offset 12
53; SSE2-NEXT:    movsd {{.*#+}} xmm0 = mem[0],zero
54; SSE2-NEXT:    movsd %xmm0, (%esp)
55; SSE2-NEXT:    calll llround
56; SSE2-NEXT:    addl $8, %esp
57; SSE2-NEXT:    .cfi_def_cfa_offset 4
58; SSE2-NEXT:    retl
59;
60; X64-LABEL: testmsxd:
61; X64:       # %bb.0: # %entry
62; X64-NEXT:    jmp llround@PLT # TAILCALL
63entry:
64  %0 = tail call i64 @llvm.llround.f64(double %x)
65  ret i64 %0
66}
67
68define i64 @testmsll(x86_fp80 %x) {
69; X86-LABEL: testmsll:
70; X86:       # %bb.0: # %entry
71; X86-NEXT:    subl $12, %esp
72; X86-NEXT:    .cfi_def_cfa_offset 16
73; X86-NEXT:    fldt {{[0-9]+}}(%esp)
74; X86-NEXT:    fstpt (%esp)
75; X86-NEXT:    calll llroundl
76; X86-NEXT:    addl $12, %esp
77; X86-NEXT:    .cfi_def_cfa_offset 4
78; X86-NEXT:    retl
79;
80; SSE2-LABEL: testmsll:
81; SSE2:       # %bb.0: # %entry
82; SSE2-NEXT:    subl $12, %esp
83; SSE2-NEXT:    .cfi_def_cfa_offset 16
84; SSE2-NEXT:    fldt {{[0-9]+}}(%esp)
85; SSE2-NEXT:    fstpt (%esp)
86; SSE2-NEXT:    calll llroundl
87; SSE2-NEXT:    addl $12, %esp
88; SSE2-NEXT:    .cfi_def_cfa_offset 4
89; SSE2-NEXT:    retl
90;
91; X64-LABEL: testmsll:
92; X64:       # %bb.0: # %entry
93; X64-NEXT:    jmp llroundl@PLT # TAILCALL
94entry:
95  %0 = tail call i64 @llvm.llround.f80(x86_fp80 %x)
96  ret i64 %0
97}
98
99declare i64 @llvm.llround.f32(float) nounwind readnone
100declare i64 @llvm.llround.f64(double) nounwind readnone
101declare i64 @llvm.llround.f80(x86_fp80) nounwind readnone
102