• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+d -verify-machineinstrs < %s \
3; RUN:   | FileCheck -check-prefix=RV32IFD %s
4; RUN: llc -mtriple=riscv64 -mattr=+d -verify-machineinstrs < %s \
5; RUN:   | FileCheck -check-prefix=RV64IFD %s
6
7declare double @llvm.sqrt.f64(double)
8
9define double @sqrt_f64(double %a) nounwind {
10; RV32IFD-LABEL: sqrt_f64:
11; RV32IFD:       # %bb.0:
12; RV32IFD-NEXT:    addi sp, sp, -16
13; RV32IFD-NEXT:    sw a0, 8(sp)
14; RV32IFD-NEXT:    sw a1, 12(sp)
15; RV32IFD-NEXT:    fld ft0, 8(sp)
16; RV32IFD-NEXT:    fsqrt.d ft0, ft0
17; RV32IFD-NEXT:    fsd ft0, 8(sp)
18; RV32IFD-NEXT:    lw a0, 8(sp)
19; RV32IFD-NEXT:    lw a1, 12(sp)
20; RV32IFD-NEXT:    addi sp, sp, 16
21; RV32IFD-NEXT:    ret
22;
23; RV64IFD-LABEL: sqrt_f64:
24; RV64IFD:       # %bb.0:
25; RV64IFD-NEXT:    fmv.d.x ft0, a0
26; RV64IFD-NEXT:    fsqrt.d ft0, ft0
27; RV64IFD-NEXT:    fmv.x.d a0, ft0
28; RV64IFD-NEXT:    ret
29  %1 = call double @llvm.sqrt.f64(double %a)
30  ret double %1
31}
32
33declare double @llvm.powi.f64(double, i32)
34
35define double @powi_f64(double %a, i32 %b) nounwind {
36; RV32IFD-LABEL: powi_f64:
37; RV32IFD:       # %bb.0:
38; RV32IFD-NEXT:    addi sp, sp, -16
39; RV32IFD-NEXT:    sw ra, 12(sp)
40; RV32IFD-NEXT:    call __powidf2
41; RV32IFD-NEXT:    lw ra, 12(sp)
42; RV32IFD-NEXT:    addi sp, sp, 16
43; RV32IFD-NEXT:    ret
44;
45; RV64IFD-LABEL: powi_f64:
46; RV64IFD:       # %bb.0:
47; RV64IFD-NEXT:    addi sp, sp, -16
48; RV64IFD-NEXT:    sd ra, 8(sp)
49; RV64IFD-NEXT:    sext.w a1, a1
50; RV64IFD-NEXT:    call __powidf2
51; RV64IFD-NEXT:    ld ra, 8(sp)
52; RV64IFD-NEXT:    addi sp, sp, 16
53; RV64IFD-NEXT:    ret
54  %1 = call double @llvm.powi.f64(double %a, i32 %b)
55  ret double %1
56}
57
58declare double @llvm.sin.f64(double)
59
60define double @sin_f64(double %a) nounwind {
61; RV32IFD-LABEL: sin_f64:
62; RV32IFD:       # %bb.0:
63; RV32IFD-NEXT:    addi sp, sp, -16
64; RV32IFD-NEXT:    sw ra, 12(sp)
65; RV32IFD-NEXT:    call sin
66; RV32IFD-NEXT:    lw ra, 12(sp)
67; RV32IFD-NEXT:    addi sp, sp, 16
68; RV32IFD-NEXT:    ret
69;
70; RV64IFD-LABEL: sin_f64:
71; RV64IFD:       # %bb.0:
72; RV64IFD-NEXT:    addi sp, sp, -16
73; RV64IFD-NEXT:    sd ra, 8(sp)
74; RV64IFD-NEXT:    call sin
75; RV64IFD-NEXT:    ld ra, 8(sp)
76; RV64IFD-NEXT:    addi sp, sp, 16
77; RV64IFD-NEXT:    ret
78  %1 = call double @llvm.sin.f64(double %a)
79  ret double %1
80}
81
82declare double @llvm.cos.f64(double)
83
84define double @cos_f64(double %a) nounwind {
85; RV32IFD-LABEL: cos_f64:
86; RV32IFD:       # %bb.0:
87; RV32IFD-NEXT:    addi sp, sp, -16
88; RV32IFD-NEXT:    sw ra, 12(sp)
89; RV32IFD-NEXT:    call cos
90; RV32IFD-NEXT:    lw ra, 12(sp)
91; RV32IFD-NEXT:    addi sp, sp, 16
92; RV32IFD-NEXT:    ret
93;
94; RV64IFD-LABEL: cos_f64:
95; RV64IFD:       # %bb.0:
96; RV64IFD-NEXT:    addi sp, sp, -16
97; RV64IFD-NEXT:    sd ra, 8(sp)
98; RV64IFD-NEXT:    call cos
99; RV64IFD-NEXT:    ld ra, 8(sp)
100; RV64IFD-NEXT:    addi sp, sp, 16
101; RV64IFD-NEXT:    ret
102  %1 = call double @llvm.cos.f64(double %a)
103  ret double %1
104}
105
106; The sin+cos combination results in an FSINCOS SelectionDAG node.
107define double @sincos_f64(double %a) nounwind {
108; RV32IFD-LABEL: sincos_f64:
109; RV32IFD:       # %bb.0:
110; RV32IFD-NEXT:    addi sp, sp, -32
111; RV32IFD-NEXT:    sw ra, 28(sp)
112; RV32IFD-NEXT:    sw s0, 24(sp)
113; RV32IFD-NEXT:    sw s1, 20(sp)
114; RV32IFD-NEXT:    mv s0, a1
115; RV32IFD-NEXT:    mv s1, a0
116; RV32IFD-NEXT:    call sin
117; RV32IFD-NEXT:    sw a0, 8(sp)
118; RV32IFD-NEXT:    sw a1, 12(sp)
119; RV32IFD-NEXT:    fld ft0, 8(sp)
120; RV32IFD-NEXT:    fsd ft0, 0(sp)
121; RV32IFD-NEXT:    mv a0, s1
122; RV32IFD-NEXT:    mv a1, s0
123; RV32IFD-NEXT:    call cos
124; RV32IFD-NEXT:    sw a0, 8(sp)
125; RV32IFD-NEXT:    sw a1, 12(sp)
126; RV32IFD-NEXT:    fld ft0, 8(sp)
127; RV32IFD-NEXT:    fld ft1, 0(sp)
128; RV32IFD-NEXT:    fadd.d ft0, ft1, ft0
129; RV32IFD-NEXT:    fsd ft0, 8(sp)
130; RV32IFD-NEXT:    lw a0, 8(sp)
131; RV32IFD-NEXT:    lw a1, 12(sp)
132; RV32IFD-NEXT:    lw s1, 20(sp)
133; RV32IFD-NEXT:    lw s0, 24(sp)
134; RV32IFD-NEXT:    lw ra, 28(sp)
135; RV32IFD-NEXT:    addi sp, sp, 32
136; RV32IFD-NEXT:    ret
137;
138; RV64IFD-LABEL: sincos_f64:
139; RV64IFD:       # %bb.0:
140; RV64IFD-NEXT:    addi sp, sp, -32
141; RV64IFD-NEXT:    sd ra, 24(sp)
142; RV64IFD-NEXT:    sd s0, 16(sp)
143; RV64IFD-NEXT:    mv s0, a0
144; RV64IFD-NEXT:    call sin
145; RV64IFD-NEXT:    fmv.d.x ft0, a0
146; RV64IFD-NEXT:    fsd ft0, 8(sp)
147; RV64IFD-NEXT:    mv a0, s0
148; RV64IFD-NEXT:    call cos
149; RV64IFD-NEXT:    fmv.d.x ft0, a0
150; RV64IFD-NEXT:    fld ft1, 8(sp)
151; RV64IFD-NEXT:    fadd.d ft0, ft1, ft0
152; RV64IFD-NEXT:    fmv.x.d a0, ft0
153; RV64IFD-NEXT:    ld s0, 16(sp)
154; RV64IFD-NEXT:    ld ra, 24(sp)
155; RV64IFD-NEXT:    addi sp, sp, 32
156; RV64IFD-NEXT:    ret
157  %1 = call double @llvm.sin.f64(double %a)
158  %2 = call double @llvm.cos.f64(double %a)
159  %3 = fadd double %1, %2
160  ret double %3
161}
162
163declare double @llvm.pow.f64(double, double)
164
165define double @pow_f64(double %a, double %b) nounwind {
166; RV32IFD-LABEL: pow_f64:
167; RV32IFD:       # %bb.0:
168; RV32IFD-NEXT:    addi sp, sp, -16
169; RV32IFD-NEXT:    sw ra, 12(sp)
170; RV32IFD-NEXT:    call pow
171; RV32IFD-NEXT:    lw ra, 12(sp)
172; RV32IFD-NEXT:    addi sp, sp, 16
173; RV32IFD-NEXT:    ret
174;
175; RV64IFD-LABEL: pow_f64:
176; RV64IFD:       # %bb.0:
177; RV64IFD-NEXT:    addi sp, sp, -16
178; RV64IFD-NEXT:    sd ra, 8(sp)
179; RV64IFD-NEXT:    call pow
180; RV64IFD-NEXT:    ld ra, 8(sp)
181; RV64IFD-NEXT:    addi sp, sp, 16
182; RV64IFD-NEXT:    ret
183  %1 = call double @llvm.pow.f64(double %a, double %b)
184  ret double %1
185}
186
187declare double @llvm.exp.f64(double)
188
189define double @exp_f64(double %a) nounwind {
190; RV32IFD-LABEL: exp_f64:
191; RV32IFD:       # %bb.0:
192; RV32IFD-NEXT:    addi sp, sp, -16
193; RV32IFD-NEXT:    sw ra, 12(sp)
194; RV32IFD-NEXT:    call exp
195; RV32IFD-NEXT:    lw ra, 12(sp)
196; RV32IFD-NEXT:    addi sp, sp, 16
197; RV32IFD-NEXT:    ret
198;
199; RV64IFD-LABEL: exp_f64:
200; RV64IFD:       # %bb.0:
201; RV64IFD-NEXT:    addi sp, sp, -16
202; RV64IFD-NEXT:    sd ra, 8(sp)
203; RV64IFD-NEXT:    call exp
204; RV64IFD-NEXT:    ld ra, 8(sp)
205; RV64IFD-NEXT:    addi sp, sp, 16
206; RV64IFD-NEXT:    ret
207  %1 = call double @llvm.exp.f64(double %a)
208  ret double %1
209}
210
211declare double @llvm.exp2.f64(double)
212
213define double @exp2_f64(double %a) nounwind {
214; RV32IFD-LABEL: exp2_f64:
215; RV32IFD:       # %bb.0:
216; RV32IFD-NEXT:    addi sp, sp, -16
217; RV32IFD-NEXT:    sw ra, 12(sp)
218; RV32IFD-NEXT:    call exp2
219; RV32IFD-NEXT:    lw ra, 12(sp)
220; RV32IFD-NEXT:    addi sp, sp, 16
221; RV32IFD-NEXT:    ret
222;
223; RV64IFD-LABEL: exp2_f64:
224; RV64IFD:       # %bb.0:
225; RV64IFD-NEXT:    addi sp, sp, -16
226; RV64IFD-NEXT:    sd ra, 8(sp)
227; RV64IFD-NEXT:    call exp2
228; RV64IFD-NEXT:    ld ra, 8(sp)
229; RV64IFD-NEXT:    addi sp, sp, 16
230; RV64IFD-NEXT:    ret
231  %1 = call double @llvm.exp2.f64(double %a)
232  ret double %1
233}
234
235declare double @llvm.log.f64(double)
236
237define double @log_f64(double %a) nounwind {
238; RV32IFD-LABEL: log_f64:
239; RV32IFD:       # %bb.0:
240; RV32IFD-NEXT:    addi sp, sp, -16
241; RV32IFD-NEXT:    sw ra, 12(sp)
242; RV32IFD-NEXT:    call log
243; RV32IFD-NEXT:    lw ra, 12(sp)
244; RV32IFD-NEXT:    addi sp, sp, 16
245; RV32IFD-NEXT:    ret
246;
247; RV64IFD-LABEL: log_f64:
248; RV64IFD:       # %bb.0:
249; RV64IFD-NEXT:    addi sp, sp, -16
250; RV64IFD-NEXT:    sd ra, 8(sp)
251; RV64IFD-NEXT:    call log
252; RV64IFD-NEXT:    ld ra, 8(sp)
253; RV64IFD-NEXT:    addi sp, sp, 16
254; RV64IFD-NEXT:    ret
255  %1 = call double @llvm.log.f64(double %a)
256  ret double %1
257}
258
259declare double @llvm.log10.f64(double)
260
261define double @log10_f64(double %a) nounwind {
262; RV32IFD-LABEL: log10_f64:
263; RV32IFD:       # %bb.0:
264; RV32IFD-NEXT:    addi sp, sp, -16
265; RV32IFD-NEXT:    sw ra, 12(sp)
266; RV32IFD-NEXT:    call log10
267; RV32IFD-NEXT:    lw ra, 12(sp)
268; RV32IFD-NEXT:    addi sp, sp, 16
269; RV32IFD-NEXT:    ret
270;
271; RV64IFD-LABEL: log10_f64:
272; RV64IFD:       # %bb.0:
273; RV64IFD-NEXT:    addi sp, sp, -16
274; RV64IFD-NEXT:    sd ra, 8(sp)
275; RV64IFD-NEXT:    call log10
276; RV64IFD-NEXT:    ld ra, 8(sp)
277; RV64IFD-NEXT:    addi sp, sp, 16
278; RV64IFD-NEXT:    ret
279  %1 = call double @llvm.log10.f64(double %a)
280  ret double %1
281}
282
283declare double @llvm.log2.f64(double)
284
285define double @log2_f64(double %a) nounwind {
286; RV32IFD-LABEL: log2_f64:
287; RV32IFD:       # %bb.0:
288; RV32IFD-NEXT:    addi sp, sp, -16
289; RV32IFD-NEXT:    sw ra, 12(sp)
290; RV32IFD-NEXT:    call log2
291; RV32IFD-NEXT:    lw ra, 12(sp)
292; RV32IFD-NEXT:    addi sp, sp, 16
293; RV32IFD-NEXT:    ret
294;
295; RV64IFD-LABEL: log2_f64:
296; RV64IFD:       # %bb.0:
297; RV64IFD-NEXT:    addi sp, sp, -16
298; RV64IFD-NEXT:    sd ra, 8(sp)
299; RV64IFD-NEXT:    call log2
300; RV64IFD-NEXT:    ld ra, 8(sp)
301; RV64IFD-NEXT:    addi sp, sp, 16
302; RV64IFD-NEXT:    ret
303  %1 = call double @llvm.log2.f64(double %a)
304  ret double %1
305}
306
307declare double @llvm.fma.f64(double, double, double)
308
309define double @fma_f64(double %a, double %b, double %c) nounwind {
310; RV32IFD-LABEL: fma_f64:
311; RV32IFD:       # %bb.0:
312; RV32IFD-NEXT:    addi sp, sp, -16
313; RV32IFD-NEXT:    sw a4, 8(sp)
314; RV32IFD-NEXT:    sw a5, 12(sp)
315; RV32IFD-NEXT:    fld ft0, 8(sp)
316; RV32IFD-NEXT:    sw a2, 8(sp)
317; RV32IFD-NEXT:    sw a3, 12(sp)
318; RV32IFD-NEXT:    fld ft1, 8(sp)
319; RV32IFD-NEXT:    sw a0, 8(sp)
320; RV32IFD-NEXT:    sw a1, 12(sp)
321; RV32IFD-NEXT:    fld ft2, 8(sp)
322; RV32IFD-NEXT:    fmadd.d ft0, ft2, ft1, ft0
323; RV32IFD-NEXT:    fsd ft0, 8(sp)
324; RV32IFD-NEXT:    lw a0, 8(sp)
325; RV32IFD-NEXT:    lw a1, 12(sp)
326; RV32IFD-NEXT:    addi sp, sp, 16
327; RV32IFD-NEXT:    ret
328;
329; RV64IFD-LABEL: fma_f64:
330; RV64IFD:       # %bb.0:
331; RV64IFD-NEXT:    fmv.d.x ft0, a2
332; RV64IFD-NEXT:    fmv.d.x ft1, a1
333; RV64IFD-NEXT:    fmv.d.x ft2, a0
334; RV64IFD-NEXT:    fmadd.d ft0, ft2, ft1, ft0
335; RV64IFD-NEXT:    fmv.x.d a0, ft0
336; RV64IFD-NEXT:    ret
337  %1 = call double @llvm.fma.f64(double %a, double %b, double %c)
338  ret double %1
339}
340
341declare double @llvm.fmuladd.f64(double, double, double)
342
343define double @fmuladd_f64(double %a, double %b, double %c) nounwind {
344; RV32IFD-LABEL: fmuladd_f64:
345; RV32IFD:       # %bb.0:
346; RV32IFD-NEXT:    addi sp, sp, -16
347; RV32IFD-NEXT:    sw a4, 8(sp)
348; RV32IFD-NEXT:    sw a5, 12(sp)
349; RV32IFD-NEXT:    fld ft0, 8(sp)
350; RV32IFD-NEXT:    sw a2, 8(sp)
351; RV32IFD-NEXT:    sw a3, 12(sp)
352; RV32IFD-NEXT:    fld ft1, 8(sp)
353; RV32IFD-NEXT:    sw a0, 8(sp)
354; RV32IFD-NEXT:    sw a1, 12(sp)
355; RV32IFD-NEXT:    fld ft2, 8(sp)
356; RV32IFD-NEXT:    fmadd.d ft0, ft2, ft1, ft0
357; RV32IFD-NEXT:    fsd ft0, 8(sp)
358; RV32IFD-NEXT:    lw a0, 8(sp)
359; RV32IFD-NEXT:    lw a1, 12(sp)
360; RV32IFD-NEXT:    addi sp, sp, 16
361; RV32IFD-NEXT:    ret
362;
363; RV64IFD-LABEL: fmuladd_f64:
364; RV64IFD:       # %bb.0:
365; RV64IFD-NEXT:    fmv.d.x ft0, a2
366; RV64IFD-NEXT:    fmv.d.x ft1, a1
367; RV64IFD-NEXT:    fmv.d.x ft2, a0
368; RV64IFD-NEXT:    fmadd.d ft0, ft2, ft1, ft0
369; RV64IFD-NEXT:    fmv.x.d a0, ft0
370; RV64IFD-NEXT:    ret
371  %1 = call double @llvm.fmuladd.f64(double %a, double %b, double %c)
372  ret double %1
373}
374
375declare double @llvm.fabs.f64(double)
376
377define double @fabs_f64(double %a) nounwind {
378; RV32IFD-LABEL: fabs_f64:
379; RV32IFD:       # %bb.0:
380; RV32IFD-NEXT:    lui a2, 524288
381; RV32IFD-NEXT:    addi a2, a2, -1
382; RV32IFD-NEXT:    and a1, a1, a2
383; RV32IFD-NEXT:    ret
384;
385; RV64IFD-LABEL: fabs_f64:
386; RV64IFD:       # %bb.0:
387; RV64IFD-NEXT:    addi a1, zero, -1
388; RV64IFD-NEXT:    slli a1, a1, 63
389; RV64IFD-NEXT:    addi a1, a1, -1
390; RV64IFD-NEXT:    and a0, a0, a1
391; RV64IFD-NEXT:    ret
392  %1 = call double @llvm.fabs.f64(double %a)
393  ret double %1
394}
395
396declare double @llvm.minnum.f64(double, double)
397
398define double @minnum_f64(double %a, double %b) nounwind {
399; RV32IFD-LABEL: minnum_f64:
400; RV32IFD:       # %bb.0:
401; RV32IFD-NEXT:    addi sp, sp, -16
402; RV32IFD-NEXT:    sw a2, 8(sp)
403; RV32IFD-NEXT:    sw a3, 12(sp)
404; RV32IFD-NEXT:    fld ft0, 8(sp)
405; RV32IFD-NEXT:    sw a0, 8(sp)
406; RV32IFD-NEXT:    sw a1, 12(sp)
407; RV32IFD-NEXT:    fld ft1, 8(sp)
408; RV32IFD-NEXT:    fmin.d ft0, ft1, ft0
409; RV32IFD-NEXT:    fsd ft0, 8(sp)
410; RV32IFD-NEXT:    lw a0, 8(sp)
411; RV32IFD-NEXT:    lw a1, 12(sp)
412; RV32IFD-NEXT:    addi sp, sp, 16
413; RV32IFD-NEXT:    ret
414;
415; RV64IFD-LABEL: minnum_f64:
416; RV64IFD:       # %bb.0:
417; RV64IFD-NEXT:    fmv.d.x ft0, a1
418; RV64IFD-NEXT:    fmv.d.x ft1, a0
419; RV64IFD-NEXT:    fmin.d ft0, ft1, ft0
420; RV64IFD-NEXT:    fmv.x.d a0, ft0
421; RV64IFD-NEXT:    ret
422  %1 = call double @llvm.minnum.f64(double %a, double %b)
423  ret double %1
424}
425
426declare double @llvm.maxnum.f64(double, double)
427
428define double @maxnum_f64(double %a, double %b) nounwind {
429; RV32IFD-LABEL: maxnum_f64:
430; RV32IFD:       # %bb.0:
431; RV32IFD-NEXT:    addi sp, sp, -16
432; RV32IFD-NEXT:    sw a2, 8(sp)
433; RV32IFD-NEXT:    sw a3, 12(sp)
434; RV32IFD-NEXT:    fld ft0, 8(sp)
435; RV32IFD-NEXT:    sw a0, 8(sp)
436; RV32IFD-NEXT:    sw a1, 12(sp)
437; RV32IFD-NEXT:    fld ft1, 8(sp)
438; RV32IFD-NEXT:    fmax.d ft0, ft1, ft0
439; RV32IFD-NEXT:    fsd ft0, 8(sp)
440; RV32IFD-NEXT:    lw a0, 8(sp)
441; RV32IFD-NEXT:    lw a1, 12(sp)
442; RV32IFD-NEXT:    addi sp, sp, 16
443; RV32IFD-NEXT:    ret
444;
445; RV64IFD-LABEL: maxnum_f64:
446; RV64IFD:       # %bb.0:
447; RV64IFD-NEXT:    fmv.d.x ft0, a1
448; RV64IFD-NEXT:    fmv.d.x ft1, a0
449; RV64IFD-NEXT:    fmax.d ft0, ft1, ft0
450; RV64IFD-NEXT:    fmv.x.d a0, ft0
451; RV64IFD-NEXT:    ret
452  %1 = call double @llvm.maxnum.f64(double %a, double %b)
453  ret double %1
454}
455
456; TODO: FMINNAN and FMAXNAN aren't handled in
457; SelectionDAGLegalize::ExpandNode.
458
459; declare double @llvm.minimum.f64(double, double)
460
461; define double @fminimum_f64(double %a, double %b) nounwind {
462;   %1 = call double @llvm.minimum.f64(double %a, double %b)
463;   ret double %1
464; }
465
466; declare double @llvm.maximum.f64(double, double)
467
468; define double @fmaximum_f64(double %a, double %b) nounwind {
469;   %1 = call double @llvm.maximum.f64(double %a, double %b)
470;   ret double %1
471; }
472
473declare double @llvm.copysign.f64(double, double)
474
475define double @copysign_f64(double %a, double %b) nounwind {
476; RV32IFD-LABEL: copysign_f64:
477; RV32IFD:       # %bb.0:
478; RV32IFD-NEXT:    addi sp, sp, -16
479; RV32IFD-NEXT:    sw a2, 8(sp)
480; RV32IFD-NEXT:    sw a3, 12(sp)
481; RV32IFD-NEXT:    fld ft0, 8(sp)
482; RV32IFD-NEXT:    sw a0, 8(sp)
483; RV32IFD-NEXT:    sw a1, 12(sp)
484; RV32IFD-NEXT:    fld ft1, 8(sp)
485; RV32IFD-NEXT:    fsgnj.d ft0, ft1, ft0
486; RV32IFD-NEXT:    fsd ft0, 8(sp)
487; RV32IFD-NEXT:    lw a0, 8(sp)
488; RV32IFD-NEXT:    lw a1, 12(sp)
489; RV32IFD-NEXT:    addi sp, sp, 16
490; RV32IFD-NEXT:    ret
491;
492; RV64IFD-LABEL: copysign_f64:
493; RV64IFD:       # %bb.0:
494; RV64IFD-NEXT:    fmv.d.x ft0, a1
495; RV64IFD-NEXT:    fmv.d.x ft1, a0
496; RV64IFD-NEXT:    fsgnj.d ft0, ft1, ft0
497; RV64IFD-NEXT:    fmv.x.d a0, ft0
498; RV64IFD-NEXT:    ret
499  %1 = call double @llvm.copysign.f64(double %a, double %b)
500  ret double %1
501}
502
503declare double @llvm.floor.f64(double)
504
505define double @floor_f64(double %a) nounwind {
506; RV32IFD-LABEL: floor_f64:
507; RV32IFD:       # %bb.0:
508; RV32IFD-NEXT:    addi sp, sp, -16
509; RV32IFD-NEXT:    sw ra, 12(sp)
510; RV32IFD-NEXT:    call floor
511; RV32IFD-NEXT:    lw ra, 12(sp)
512; RV32IFD-NEXT:    addi sp, sp, 16
513; RV32IFD-NEXT:    ret
514;
515; RV64IFD-LABEL: floor_f64:
516; RV64IFD:       # %bb.0:
517; RV64IFD-NEXT:    addi sp, sp, -16
518; RV64IFD-NEXT:    sd ra, 8(sp)
519; RV64IFD-NEXT:    call floor
520; RV64IFD-NEXT:    ld ra, 8(sp)
521; RV64IFD-NEXT:    addi sp, sp, 16
522; RV64IFD-NEXT:    ret
523  %1 = call double @llvm.floor.f64(double %a)
524  ret double %1
525}
526
527declare double @llvm.ceil.f64(double)
528
529define double @ceil_f64(double %a) nounwind {
530; RV32IFD-LABEL: ceil_f64:
531; RV32IFD:       # %bb.0:
532; RV32IFD-NEXT:    addi sp, sp, -16
533; RV32IFD-NEXT:    sw ra, 12(sp)
534; RV32IFD-NEXT:    call ceil
535; RV32IFD-NEXT:    lw ra, 12(sp)
536; RV32IFD-NEXT:    addi sp, sp, 16
537; RV32IFD-NEXT:    ret
538;
539; RV64IFD-LABEL: ceil_f64:
540; RV64IFD:       # %bb.0:
541; RV64IFD-NEXT:    addi sp, sp, -16
542; RV64IFD-NEXT:    sd ra, 8(sp)
543; RV64IFD-NEXT:    call ceil
544; RV64IFD-NEXT:    ld ra, 8(sp)
545; RV64IFD-NEXT:    addi sp, sp, 16
546; RV64IFD-NEXT:    ret
547  %1 = call double @llvm.ceil.f64(double %a)
548  ret double %1
549}
550
551declare double @llvm.trunc.f64(double)
552
553define double @trunc_f64(double %a) nounwind {
554; RV32IFD-LABEL: trunc_f64:
555; RV32IFD:       # %bb.0:
556; RV32IFD-NEXT:    addi sp, sp, -16
557; RV32IFD-NEXT:    sw ra, 12(sp)
558; RV32IFD-NEXT:    call trunc
559; RV32IFD-NEXT:    lw ra, 12(sp)
560; RV32IFD-NEXT:    addi sp, sp, 16
561; RV32IFD-NEXT:    ret
562;
563; RV64IFD-LABEL: trunc_f64:
564; RV64IFD:       # %bb.0:
565; RV64IFD-NEXT:    addi sp, sp, -16
566; RV64IFD-NEXT:    sd ra, 8(sp)
567; RV64IFD-NEXT:    call trunc
568; RV64IFD-NEXT:    ld ra, 8(sp)
569; RV64IFD-NEXT:    addi sp, sp, 16
570; RV64IFD-NEXT:    ret
571  %1 = call double @llvm.trunc.f64(double %a)
572  ret double %1
573}
574
575declare double @llvm.rint.f64(double)
576
577define double @rint_f64(double %a) nounwind {
578; RV32IFD-LABEL: rint_f64:
579; RV32IFD:       # %bb.0:
580; RV32IFD-NEXT:    addi sp, sp, -16
581; RV32IFD-NEXT:    sw ra, 12(sp)
582; RV32IFD-NEXT:    call rint
583; RV32IFD-NEXT:    lw ra, 12(sp)
584; RV32IFD-NEXT:    addi sp, sp, 16
585; RV32IFD-NEXT:    ret
586;
587; RV64IFD-LABEL: rint_f64:
588; RV64IFD:       # %bb.0:
589; RV64IFD-NEXT:    addi sp, sp, -16
590; RV64IFD-NEXT:    sd ra, 8(sp)
591; RV64IFD-NEXT:    call rint
592; RV64IFD-NEXT:    ld ra, 8(sp)
593; RV64IFD-NEXT:    addi sp, sp, 16
594; RV64IFD-NEXT:    ret
595  %1 = call double @llvm.rint.f64(double %a)
596  ret double %1
597}
598
599declare double @llvm.nearbyint.f64(double)
600
601define double @nearbyint_f64(double %a) nounwind {
602; RV32IFD-LABEL: nearbyint_f64:
603; RV32IFD:       # %bb.0:
604; RV32IFD-NEXT:    addi sp, sp, -16
605; RV32IFD-NEXT:    sw ra, 12(sp)
606; RV32IFD-NEXT:    call nearbyint
607; RV32IFD-NEXT:    lw ra, 12(sp)
608; RV32IFD-NEXT:    addi sp, sp, 16
609; RV32IFD-NEXT:    ret
610;
611; RV64IFD-LABEL: nearbyint_f64:
612; RV64IFD:       # %bb.0:
613; RV64IFD-NEXT:    addi sp, sp, -16
614; RV64IFD-NEXT:    sd ra, 8(sp)
615; RV64IFD-NEXT:    call nearbyint
616; RV64IFD-NEXT:    ld ra, 8(sp)
617; RV64IFD-NEXT:    addi sp, sp, 16
618; RV64IFD-NEXT:    ret
619  %1 = call double @llvm.nearbyint.f64(double %a)
620  ret double %1
621}
622
623declare double @llvm.round.f64(double)
624
625define double @round_f64(double %a) nounwind {
626; RV32IFD-LABEL: round_f64:
627; RV32IFD:       # %bb.0:
628; RV32IFD-NEXT:    addi sp, sp, -16
629; RV32IFD-NEXT:    sw ra, 12(sp)
630; RV32IFD-NEXT:    call round
631; RV32IFD-NEXT:    lw ra, 12(sp)
632; RV32IFD-NEXT:    addi sp, sp, 16
633; RV32IFD-NEXT:    ret
634;
635; RV64IFD-LABEL: round_f64:
636; RV64IFD:       # %bb.0:
637; RV64IFD-NEXT:    addi sp, sp, -16
638; RV64IFD-NEXT:    sd ra, 8(sp)
639; RV64IFD-NEXT:    call round
640; RV64IFD-NEXT:    ld ra, 8(sp)
641; RV64IFD-NEXT:    addi sp, sp, 16
642; RV64IFD-NEXT:    ret
643  %1 = call double @llvm.round.f64(double %a)
644  ret double %1
645}
646