• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc < %s -march=sparc -mattr=hard-quad-float | FileCheck %s --check-prefix=CHECK --check-prefix=HARD --check-prefix=BE
2; RUN: llc < %s -march=sparcel -mattr=hard-quad-float | FileCheck %s --check-prefix=CHECK --check-prefix=HARD --check-prefix=EL
3; RUN: llc < %s -march=sparc -mattr=-hard-quad-float -verify-machineinstrs | FileCheck %s --check-prefix=CHECK --check-prefix=SOFT --check-prefix=BE
4; RUN: llc < %s -march=sparcel -mattr=-hard-quad-float | FileCheck %s --check-prefix=CHECK --check-prefix=SOFT --check-prefix=EL
5
6; CHECK-LABEL: f128_ops:
7; CHECK:      ldd
8; CHECK:      ldd
9; CHECK:      ldd
10; CHECK:      ldd
11; HARD:       faddq [[R0:.+]],  [[R1:.+]],  [[R2:.+]]
12; HARD:       fsubq [[R2]], [[R3:.+]], [[R4:.+]]
13; HARD:       fmulq [[R4]], [[R5:.+]], [[R6:.+]]
14; HARD:       fdivq [[R6]], [[R2]]
15; SOFT:       call _Q_add
16; SOFT:       unimp 16
17; SOFT:       call _Q_sub
18; SOFT:       unimp 16
19; SOFT:       call _Q_mul
20; SOFT:       unimp 16
21; SOFT:       call _Q_div
22; SOFT:       unimp 16
23; CHECK:      std
24; CHECK:      std
25
26define void @f128_ops(fp128* noalias sret(fp128) %scalar.result, fp128* byval(fp128) %a, fp128* byval(fp128) %b, fp128* byval(fp128) %c, fp128* byval(fp128) %d) {
27entry:
28  %0 = load fp128, fp128* %a, align 8
29  %1 = load fp128, fp128* %b, align 8
30  %2 = load fp128, fp128* %c, align 8
31  %3 = load fp128, fp128* %d, align 8
32  %4 = fadd fp128 %0, %1
33  %5 = fsub fp128 %4, %2
34  %6 = fmul fp128 %5, %3
35  %7 = fdiv fp128 %6, %4
36  store fp128 %7, fp128* %scalar.result, align 8
37  ret void
38}
39
40; CHECK-LABEL: f128_spill:
41; CHECK:       std %f{{.+}}, [%[[S0:.+]]]
42; CHECK:       std %f{{.+}}, [%[[S1:.+]]]
43; CHECK-DAG:   ldd [%[[S0]]], %f{{.+}}
44; CHECK-DAG:   ldd [%[[S1]]], %f{{.+}}
45; CHECK:       jmp {{%[oi]7}}+12
46
47define void @f128_spill(fp128* noalias sret(fp128) %scalar.result, fp128* byval(fp128) %a) {
48entry:
49  %0 = load fp128, fp128* %a, align 8
50  call void asm sideeffect "", "~{f0},~{f1},~{f2},~{f3},~{f4},~{f5},~{f6},~{f7},~{f8},~{f9},~{f10},~{f11},~{f12},~{f13},~{f14},~{f15},~{f16},~{f17},~{f18},~{f19},~{f20},~{f21},~{f22},~{f23},~{f24},~{f25},~{f26},~{f27},~{f28},~{f29},~{f30},~{f31}"()
51  store fp128 %0, fp128* %scalar.result, align 8
52  ret void
53}
54
55; CHECK-LABEL: f128_spill_large:
56; CHECK:       sethi 4, %g1
57; CHECK:       sethi 4, %g1
58; CHECK-NEXT:  add %g1, %sp, %g1
59; CHECK-NEXT:  std %f{{.+}}, [%g1]
60; CHECK:       sethi 4, %g1
61; CHECK-NEXT:  add %g1, %sp, %g1
62; CHECK-NEXT:  std %f{{.+}}, [%g1+8]
63; CHECK:       sethi 4, %g1
64; CHECK-NEXT:  add %g1, %sp, %g1
65; CHECK-NEXT:  ldd [%g1], %f{{.+}}
66; CHECK:       sethi 4, %g1
67; CHECK-NEXT:  add %g1, %sp, %g1
68; CHECK-NEXT:  ldd [%g1+8], %f{{.+}}
69
70define void @f128_spill_large(<251 x fp128>* noalias sret(<251 x fp128>) %scalar.result, <251 x fp128>* byval(<251 x fp128>) %a) {
71entry:
72  %0 = load <251 x fp128>, <251 x fp128>* %a, align 8
73  call void asm sideeffect "", "~{f0},~{f1},~{f2},~{f3},~{f4},~{f5},~{f6},~{f7},~{f8},~{f9},~{f10},~{f11},~{f12},~{f13},~{f14},~{f15},~{f16},~{f17},~{f18},~{f19},~{f20},~{f21},~{f22},~{f23},~{f24},~{f25},~{f26},~{f27},~{f28},~{f29},~{f30},~{f31}"()
74  store <251 x fp128> %0, <251 x fp128>* %scalar.result, align 8
75  ret void
76}
77
78; CHECK-LABEL: f128_compare:
79; HARD:       fcmpq
80; HARD-NEXT:  nop
81; SOFT:       _Q_cmp
82
83define i32 @f128_compare(fp128* byval(fp128) %f0, fp128* byval(fp128) %f1, i32 %a, i32 %b) {
84entry:
85   %0 = load fp128, fp128* %f0, align 8
86   %1 = load fp128, fp128* %f1, align 8
87   %cond = fcmp ult fp128 %0, %1
88   %ret = select i1 %cond, i32 %a, i32 %b
89   ret i32 %ret
90}
91
92; CHECK-LABEL: f128_compare2:
93; HARD:        fcmpq
94; HARD:        fb{{ule|g}}
95; SOFT:       _Q_cmp
96; SOFT:       cmp
97
98define i32 @f128_compare2(fp128* byval(fp128) %f0) {
99entry:
100  %0 = load fp128, fp128* %f0, align 8
101  %1 = fcmp ogt fp128 %0, 0xL00000000000000000000000000000000
102  br i1 %1, label %"5", label %"7"
103
104"5":                                              ; preds = %entry
105  ret i32 0
106
107"7":                                              ; preds = %entry
108  ret i32 1
109}
110
111
112; CHECK-LABEL: f128_abs:
113; CHECK:       ldd [%o0], %f0
114; CHECK:       ldd [%o0+8], %f2
115; BE:          fabss %f0, %f0
116; EL:          fabss %f3, %f3
117
118define void @f128_abs(fp128* noalias sret(fp128) %scalar.result, fp128* byval(fp128) %a) {
119entry:
120  %0 = load fp128, fp128* %a, align 8
121  %1 = tail call fp128 @llvm.fabs.f128(fp128 %0)
122  store fp128 %1, fp128* %scalar.result, align 8
123  ret void
124}
125
126declare fp128 @llvm.fabs.f128(fp128) nounwind readonly
127
128; CHECK-LABEL: int_to_f128:
129; HARD:       fitoq
130; SOFT:       _Q_itoq
131; SOFT:       unimp 16
132
133define void @int_to_f128(fp128* noalias sret(fp128) %scalar.result, i32 %i) {
134entry:
135  %0 = sitofp i32 %i to fp128
136  store fp128 %0, fp128* %scalar.result, align 8
137  ret void
138}
139
140; CHECK-LABEL: fp128_unaligned:
141; CHECK:       ldub
142; HARD:        faddq
143; SOFT:       call _Q_add
144; SOFT:       unimp 16
145; CHECK:       stb
146; CHECK:       ret
147
148define void @fp128_unaligned(fp128* %a, fp128* %b, fp128* %c) {
149entry:
150  %0 = load fp128, fp128* %a, align 1
151  %1 = load fp128, fp128* %b, align 1
152  %2 = fadd fp128 %0, %1
153  store fp128 %2, fp128* %c, align 1
154  ret void
155}
156
157; CHECK-LABEL: uint_to_f128:
158; HARD:       fdtoq
159; SOFT:       _Q_utoq
160; SOFT:       unimp 16
161
162define void @uint_to_f128(fp128* noalias sret(fp128) %scalar.result, i32 %i) {
163entry:
164  %0 = uitofp i32 %i to fp128
165  store fp128 %0, fp128* %scalar.result, align 8
166  ret void
167}
168
169; CHECK-LABEL: f128_to_i32:
170; HARD:       fqtoi
171; HARD:       fqtoi
172; SOFT:       call _Q_qtou
173; SOFT:       call _Q_qtoi
174
175
176define i32 @f128_to_i32(fp128* %a, fp128* %b) {
177entry:
178  %0 = load fp128, fp128* %a, align 8
179  %1 = load fp128, fp128* %b, align 8
180  %2 = fptoui fp128 %0 to i32
181  %3 = fptosi fp128 %1 to i32
182  %4 = add i32 %2, %3
183  ret i32 %4
184}
185
186; CHECK-LABEL:   test_itoq_qtoi
187; HARD-DAG:      call _Q_lltoq
188; HARD-DAG:      call _Q_qtoll
189; HARD-DAG:      fitoq
190; HARD-DAG:      fqtoi
191; SOFT-DAG:      call _Q_lltoq
192; SOFT-DAG:      unimp 16
193; SOFT-DAG:      call _Q_qtoll
194; SOFT-DAG:      call _Q_itoq
195; SOFT-DAG:      unimp 16
196; SOFT-DAG:      call _Q_qtoi
197
198define void @test_itoq_qtoi(i64 %a, i32 %b, fp128* %c, fp128* %d, i64* %ptr0, fp128* %ptr1) {
199entry:
200  %0 = sitofp i64 %a to fp128
201  store  fp128 %0, fp128* %ptr1, align 8
202  %cval = load fp128, fp128* %c, align 8
203  %1 = fptosi fp128 %cval to i64
204  store  i64 %1, i64* %ptr0, align 8
205  %2 = sitofp i32 %b to fp128
206  store  fp128 %2, fp128* %ptr1, align 8
207  %dval = load fp128, fp128* %d, align 8
208  %3 = fptosi fp128 %dval to i32
209  %4 = bitcast i64* %ptr0 to i32*
210  store  i32 %3, i32* %4, align 8
211  ret void
212}
213
214; CHECK-LABEL:   test_utoq_qtou:
215; CHECK-DAG:     call _Q_ulltoq
216; CHECK-DAG:     call _Q_qtoull
217; HARD-DAG:      fdtoq
218; HARD-DAG:      fqtoi
219; SOFT-DAG:      call _Q_utoq
220; SOFT-DAG:      unimp 16
221; SOFT-DAG:      call _Q_qtou
222
223define void @test_utoq_qtou(i64 %a, i32 %b, fp128* %c, fp128* %d, i64* %ptr0, fp128* %ptr1) {
224entry:
225  %0 = uitofp i64 %a to fp128
226  store  fp128 %0, fp128* %ptr1, align 8
227  %cval = load fp128, fp128* %c, align 8
228  %1 = fptoui fp128 %cval to i64
229  store  i64 %1, i64* %ptr0, align 8
230  %2 = uitofp i32 %b to fp128
231  store  fp128 %2, fp128* %ptr1, align 8
232  %dval = load fp128, fp128* %d, align 8
233  %3 = fptoui fp128 %dval to i32
234  %4 = bitcast i64* %ptr0 to i32*
235  store  i32 %3, i32* %4, align 8
236  ret void
237}
238
239; CHECK-LABEL: f128_neg:
240; CHECK:       ldd [%o0], %f0
241; CHECK:       ldd [%o0+8], %f2
242; BE:          fnegs %f0, %f0
243; EL:          fnegs %f3, %f3
244
245define void @f128_neg(fp128* noalias sret(fp128) %scalar.result, fp128* byval(fp128) %a) {
246entry:
247  %0 = load fp128, fp128* %a, align 8
248  %1 = fsub fp128 0xL00000000000000008000000000000000, %0
249  store fp128 %1, fp128* %scalar.result, align 8
250  ret void
251}
252