• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc -verify-machineinstrs -mcpu=pwr9 -mtriple=powerpc64le-unknown-unknown \
2; RUN:   -ppc-vsr-nums-as-vr -ppc-asm-full-reg-names < %s | FileCheck %s
3
4@A = common global fp128 0xL00000000000000000000000000000000, align 16
5@B = common global fp128 0xL00000000000000000000000000000000, align 16
6@C = common global fp128 0xL00000000000000000000000000000000, align 16
7@D = common global fp128 0xL00000000000000000000000000000000, align 16
8
9define fp128 @testSqrtOdd(fp128 %a) {
10entry:
11  %0 = call fp128 @llvm.ppc.sqrtf128.round.to.odd(fp128 %a)
12  ret fp128 %0
13; CHECK-LABEL: testSqrtOdd
14; CHECK: xssqrtqpo v2, v2
15; CHECK: blr
16}
17
18declare fp128 @llvm.ppc.sqrtf128.round.to.odd(fp128)
19
20define void @testFMAOdd(fp128 %a, fp128 %b, fp128 %c) {
21entry:
22  %0 = call fp128 @llvm.ppc.fmaf128.round.to.odd(fp128 %a, fp128 %b, fp128 %c)
23  store fp128 %0, fp128* @A, align 16
24  %sub = fsub fp128 0xL00000000000000008000000000000000, %c
25  %1 = call fp128 @llvm.ppc.fmaf128.round.to.odd(fp128 %a, fp128 %b, fp128 %sub)
26  store fp128 %1, fp128* @B, align 16
27  %2 = call fp128 @llvm.ppc.fmaf128.round.to.odd(fp128 %a, fp128 %b, fp128 %c)
28  %sub1 = fsub fp128 0xL00000000000000008000000000000000, %2
29  store fp128 %sub1, fp128* @C, align 16
30  %sub2 = fsub fp128 0xL00000000000000008000000000000000, %c
31  %3 = call fp128 @llvm.ppc.fmaf128.round.to.odd(fp128 %a, fp128 %b, fp128 %sub2)
32  %sub3 = fsub fp128 0xL00000000000000008000000000000000, %3
33  store fp128 %sub3, fp128* @D, align 16
34  ret void
35; CHECK-LABEL: testFMAOdd
36; CHECK-DAG: xsmaddqpo v{{[0-9]+}}, v2, v3
37; CHECK-DAG: xsmsubqpo v{{[0-9]+}}, v2, v3
38; CHECK-DAG: xsnmaddqpo v{{[0-9]+}}, v2, v3
39; CHECK-DAG: xsnmsubqpo v{{[0-9]+}}, v2, v3
40; CHECK: blr
41}
42
43declare fp128 @llvm.ppc.fmaf128.round.to.odd(fp128, fp128, fp128)
44
45define fp128 @testAddOdd(fp128 %a, fp128 %b) {
46entry:
47  %0 = call fp128 @llvm.ppc.addf128.round.to.odd(fp128 %a, fp128 %b)
48  ret fp128 %0
49; CHECK-LABEL: testAddOdd
50; CHECK: xsaddqpo v2, v2, v3
51; CHECK: blr
52}
53
54declare fp128 @llvm.ppc.addf128.round.to.odd(fp128, fp128)
55
56define fp128 @testSubOdd(fp128 %a, fp128 %b) {
57entry:
58  %0 = call fp128 @llvm.ppc.subf128.round.to.odd(fp128 %a, fp128 %b)
59  ret fp128 %0
60; CHECK-LABEL: testSubOdd
61; CHECK: xssubqpo v2, v2, v3
62; CHECK: blr
63}
64
65; Function Attrs: nounwind readnone
66declare fp128 @llvm.ppc.subf128.round.to.odd(fp128, fp128)
67
68; Function Attrs: noinline nounwind optnone
69define fp128 @testMulOdd(fp128 %a, fp128 %b) {
70entry:
71  %0 = call fp128 @llvm.ppc.mulf128.round.to.odd(fp128 %a, fp128 %b)
72  ret fp128 %0
73; CHECK-LABEL: testMulOdd
74; CHECK: xsmulqpo v2, v2, v3
75; CHECK: blr
76}
77
78; Function Attrs: nounwind readnone
79declare fp128 @llvm.ppc.mulf128.round.to.odd(fp128, fp128)
80
81define fp128 @testDivOdd(fp128 %a, fp128 %b) {
82entry:
83  %0 = call fp128 @llvm.ppc.divf128.round.to.odd(fp128 %a, fp128 %b)
84  ret fp128 %0
85; CHECK-LABEL: testDivOdd
86; CHECK: xsdivqpo v2, v2, v3
87; CHECK: blr
88}
89
90declare fp128 @llvm.ppc.divf128.round.to.odd(fp128, fp128)
91
92define double @testTruncOdd(fp128 %a) {
93entry:
94  %0 = call double @llvm.ppc.truncf128.round.to.odd(fp128 %a)
95  ret double %0
96; CHECK-LABEL: testTruncOdd
97; CHECK: xscvqpdpo v2, v2
98; CHECK: xscpsgndp f1, v2, v2
99; CHECK: blr
100}
101
102declare double @llvm.ppc.truncf128.round.to.odd(fp128)
103
104; Function Attrs: noinline nounwind optnone
105define fp128 @insert_exp_qp(i64 %b) {
106entry:
107  %b.addr = alloca i64, align 8
108  store i64 %b, i64* %b.addr, align 8
109  %0 = load fp128, fp128* @A, align 16
110  %1 = load i64, i64* %b.addr, align 8
111  %2 = call fp128 @llvm.ppc.scalar.insert.exp.qp(fp128 %0, i64 %1)
112  ret fp128 %2
113; CHECK-LABEL: insert_exp_qp
114; CHECK-DAG: mtfprd [[FPREG:f[0-9]+]], r3
115; CHECK-DAG: lxvx [[VECREG:v[0-9]+]]
116; CHECK: xsiexpqp v2, [[VECREG]], [[FPREG]]
117; CHECK: blr
118}
119
120; Function Attrs: nounwind readnone
121declare fp128 @llvm.ppc.scalar.insert.exp.qp(fp128, i64)
122
123; Function Attrs: noinline nounwind optnone
124define i64 @extract_exp() {
125entry:
126  %0 = load fp128, fp128* @A, align 16
127  %1 = call i64 @llvm.ppc.scalar.extract.expq(fp128 %0)
128  ret i64 %1
129; CHECK-LABEL: extract_exp
130; CHECK: lxvx [[VECIN:v[0-9]+]]
131; CHECK: xsxexpqp [[VECOUT:v[0-9]+]], [[VECIN]]
132; CHECK: mfvsrd r3, [[VECOUT]]
133; CHECK: blr
134}
135
136; Function Attrs: nounwind readnone
137declare i64 @llvm.ppc.scalar.extract.expq(fp128)
138
139