• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc -verify-machineinstrs -mcpu=pwr9 -enable-ppc-quad-precision \
2; RUN:   -mtriple=powerpc64le-unknown-unknown -ppc-vsr-nums-as-vr \
3; RUN:   -ppc-asm-full-reg-names < %s | FileCheck %s
4
5@A = common global fp128 0xL00000000000000000000000000000000, align 16
6@B = common global fp128 0xL00000000000000000000000000000000, align 16
7@C = common global fp128 0xL00000000000000000000000000000000, align 16
8@D = common global fp128 0xL00000000000000000000000000000000, align 16
9
10define fp128 @testSqrtOdd(fp128 %a) {
11entry:
12  %0 = call fp128 @llvm.ppc.sqrtf128.round.to.odd(fp128 %a)
13  ret fp128 %0
14; CHECK-LABEL: testSqrtOdd
15; CHECK: xssqrtqpo v2, v2
16; CHECK: blr
17}
18
19declare fp128 @llvm.ppc.sqrtf128.round.to.odd(fp128)
20
21define void @testFMAOdd(fp128 %a, fp128 %b, fp128 %c) {
22entry:
23  %0 = call fp128 @llvm.ppc.fmaf128.round.to.odd(fp128 %a, fp128 %b, fp128 %c)
24  store fp128 %0, fp128* @A, align 16
25  %sub = fsub fp128 0xL00000000000000008000000000000000, %c
26  %1 = call fp128 @llvm.ppc.fmaf128.round.to.odd(fp128 %a, fp128 %b, fp128 %sub)
27  store fp128 %1, fp128* @B, align 16
28  %2 = call fp128 @llvm.ppc.fmaf128.round.to.odd(fp128 %a, fp128 %b, fp128 %c)
29  %sub1 = fsub fp128 0xL00000000000000008000000000000000, %2
30  store fp128 %sub1, fp128* @C, align 16
31  %sub2 = fsub fp128 0xL00000000000000008000000000000000, %c
32  %3 = call fp128 @llvm.ppc.fmaf128.round.to.odd(fp128 %a, fp128 %b, fp128 %sub2)
33  %sub3 = fsub fp128 0xL00000000000000008000000000000000, %3
34  store fp128 %sub3, fp128* @D, align 16
35  ret void
36; CHECK-LABEL: testFMAOdd
37; CHECK-DAG: xsmaddqpo v{{[0-9]+}}, v2, v3
38; CHECK-DAG: xsmsubqpo v{{[0-9]+}}, v2, v3
39; CHECK-DAG: xsnmaddqpo v{{[0-9]+}}, v2, v3
40; CHECK-DAG: xsnmsubqpo v{{[0-9]+}}, v2, v3
41; CHECK: blr
42}
43
44declare fp128 @llvm.ppc.fmaf128.round.to.odd(fp128, fp128, fp128)
45
46define fp128 @testAddOdd(fp128 %a, fp128 %b) {
47entry:
48  %0 = call fp128 @llvm.ppc.addf128.round.to.odd(fp128 %a, fp128 %b)
49  ret fp128 %0
50; CHECK-LABEL: testAddOdd
51; CHECK: xsaddqpo v2, v2, v3
52; CHECK: blr
53}
54
55declare fp128 @llvm.ppc.addf128.round.to.odd(fp128, fp128)
56
57define fp128 @testSubOdd(fp128 %a, fp128 %b) {
58entry:
59  %0 = call fp128 @llvm.ppc.subf128.round.to.odd(fp128 %a, fp128 %b)
60  ret fp128 %0
61; CHECK-LABEL: testSubOdd
62; CHECK: xssubqpo v2, v2, v3
63; CHECK: blr
64}
65
66; Function Attrs: nounwind readnone
67declare fp128 @llvm.ppc.subf128.round.to.odd(fp128, fp128)
68
69; Function Attrs: noinline nounwind optnone
70define fp128 @testMulOdd(fp128 %a, fp128 %b) {
71entry:
72  %0 = call fp128 @llvm.ppc.mulf128.round.to.odd(fp128 %a, fp128 %b)
73  ret fp128 %0
74; CHECK-LABEL: testMulOdd
75; CHECK: xsmulqpo v2, v2, v3
76; CHECK: blr
77}
78
79; Function Attrs: nounwind readnone
80declare fp128 @llvm.ppc.mulf128.round.to.odd(fp128, fp128)
81
82define fp128 @testDivOdd(fp128 %a, fp128 %b) {
83entry:
84  %0 = call fp128 @llvm.ppc.divf128.round.to.odd(fp128 %a, fp128 %b)
85  ret fp128 %0
86; CHECK-LABEL: testDivOdd
87; CHECK: xsdivqpo v2, v2, v3
88; CHECK: blr
89}
90
91declare fp128 @llvm.ppc.divf128.round.to.odd(fp128, fp128)
92
93define double @testTruncOdd(fp128 %a) {
94entry:
95  %0 = call double @llvm.ppc.truncf128.round.to.odd(fp128 %a)
96  ret double %0
97; CHECK-LABEL: testTruncOdd
98; CHECK: xscvqpdpo v2, v2
99; CHECK: xxlor f1, v2, v2
100; CHECK: blr
101}
102
103declare double @llvm.ppc.truncf128.round.to.odd(fp128)
104