• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; Test 64-bit floating-point signaling comparison.  The tests assume a z10
2; implementation of select, using conditional branches rather than LOCGR.
3;
4; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z10 \
5; RUN:   | FileCheck -check-prefix=CHECK -check-prefix=CHECK-SCALAR %s
6; RUN: llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 -verify-machineinstrs\
7; RUN:   | FileCheck -check-prefix=CHECK -check-prefix=CHECK-VECTOR %s
8
9declare double @foo()
10
11; Check comparison with registers.
12define i64 @f1(i64 %a, i64 %b, double %f1, double %f2) #0 {
13; CHECK-LABEL: f1:
14; CHECK: kdbr %f0, %f2
15; CHECK-SCALAR-NEXT: ber %r14
16; CHECK-SCALAR: lgr %r2, %r3
17; CHECK-VECTOR-NEXT: locgrne %r2, %r3
18; CHECK: br %r14
19  %cond = call i1 @llvm.experimental.constrained.fcmps.f64(
20                                               double %f1, double %f2,
21                                               metadata !"oeq",
22                                               metadata !"fpexcept.strict") #0
23  %res = select i1 %cond, i64 %a, i64 %b
24  ret i64 %res
25}
26
27; Check the low end of the KDB range.
28define i64 @f2(i64 %a, i64 %b, double %f1, double *%ptr) #0 {
29; CHECK-LABEL: f2:
30; CHECK: kdb %f0, 0(%r4)
31; CHECK-SCALAR-NEXT: ber %r14
32; CHECK-SCALAR: lgr %r2, %r3
33; CHECK-VECTOR-NEXT: locgrne %r2, %r3
34; CHECK: br %r14
35  %f2 = load double, double *%ptr
36  %cond = call i1 @llvm.experimental.constrained.fcmps.f64(
37                                               double %f1, double %f2,
38                                               metadata !"oeq",
39                                               metadata !"fpexcept.strict") #0
40  %res = select i1 %cond, i64 %a, i64 %b
41  ret i64 %res
42}
43
44; Check the high end of the aligned KDB range.
45define i64 @f3(i64 %a, i64 %b, double %f1, double *%base) #0 {
46; CHECK-LABEL: f3:
47; CHECK: kdb %f0, 4088(%r4)
48; CHECK-SCALAR-NEXT: ber %r14
49; CHECK-SCALAR: lgr %r2, %r3
50; CHECK-VECTOR-NEXT: locgrne %r2, %r3
51; CHECK: br %r14
52  %ptr = getelementptr double, double *%base, i64 511
53  %f2 = load double, double *%ptr
54  %cond = call i1 @llvm.experimental.constrained.fcmps.f64(
55                                               double %f1, double %f2,
56                                               metadata !"oeq",
57                                               metadata !"fpexcept.strict") #0
58  %res = select i1 %cond, i64 %a, i64 %b
59  ret i64 %res
60}
61
62; Check the next doubleword up, which needs separate address logic.
63; Other sequences besides this one would be OK.
64define i64 @f4(i64 %a, i64 %b, double %f1, double *%base) #0 {
65; CHECK-LABEL: f4:
66; CHECK: aghi %r4, 4096
67; CHECK: kdb %f0, 0(%r4)
68; CHECK-SCALAR-NEXT: ber %r14
69; CHECK-SCALAR: lgr %r2, %r3
70; CHECK-VECTOR-NEXT: locgrne %r2, %r3
71; CHECK: br %r14
72  %ptr = getelementptr double, double *%base, i64 512
73  %f2 = load double, double *%ptr
74  %cond = call i1 @llvm.experimental.constrained.fcmps.f64(
75                                               double %f1, double %f2,
76                                               metadata !"oeq",
77                                               metadata !"fpexcept.strict") #0
78  %res = select i1 %cond, i64 %a, i64 %b
79  ret i64 %res
80}
81
82; Check negative displacements, which also need separate address logic.
83define i64 @f5(i64 %a, i64 %b, double %f1, double *%base) #0 {
84; CHECK-LABEL: f5:
85; CHECK: aghi %r4, -8
86; CHECK: kdb %f0, 0(%r4)
87; CHECK-SCALAR-NEXT: ber %r14
88; CHECK-SCALAR: lgr %r2, %r3
89; CHECK-VECTOR-NEXT: locgrne %r2, %r3
90; CHECK: br %r14
91  %ptr = getelementptr double, double *%base, i64 -1
92  %f2 = load double, double *%ptr
93  %cond = call i1 @llvm.experimental.constrained.fcmps.f64(
94                                               double %f1, double %f2,
95                                               metadata !"oeq",
96                                               metadata !"fpexcept.strict") #0
97  %res = select i1 %cond, i64 %a, i64 %b
98  ret i64 %res
99}
100
101; Check that KDB allows indices.
102define i64 @f6(i64 %a, i64 %b, double %f1, double *%base, i64 %index) #0 {
103; CHECK-LABEL: f6:
104; CHECK: sllg %r1, %r5, 3
105; CHECK: kdb %f0, 800(%r1,%r4)
106; CHECK-SCALAR-NEXT: ber %r14
107; CHECK-SCALAR: lgr %r2, %r3
108; CHECK-VECTOR-NEXT: locgrne %r2, %r3
109; CHECK: br %r14
110  %ptr1 = getelementptr double, double *%base, i64 %index
111  %ptr2 = getelementptr double, double *%ptr1, i64 100
112  %f2 = load double, double *%ptr2
113  %cond = call i1 @llvm.experimental.constrained.fcmps.f64(
114                                               double %f1, double %f2,
115                                               metadata !"oeq",
116                                               metadata !"fpexcept.strict") #0
117  %res = select i1 %cond, i64 %a, i64 %b
118  ret i64 %res
119}
120
121; Check that comparisons of spilled values can use KDB rather than KDBR.
122define double @f7(double *%ptr0) #0 {
123; CHECK-LABEL: f7:
124; CHECK: brasl %r14, foo@PLT
125; CHECK-SCALAR: kdb {{%f[0-9]+}}, 160(%r15)
126; CHECK: br %r14
127  %ptr1 = getelementptr double, double *%ptr0, i64 2
128  %ptr2 = getelementptr double, double *%ptr0, i64 4
129  %ptr3 = getelementptr double, double *%ptr0, i64 6
130  %ptr4 = getelementptr double, double *%ptr0, i64 8
131  %ptr5 = getelementptr double, double *%ptr0, i64 10
132  %ptr6 = getelementptr double, double *%ptr0, i64 12
133  %ptr7 = getelementptr double, double *%ptr0, i64 14
134  %ptr8 = getelementptr double, double *%ptr0, i64 16
135  %ptr9 = getelementptr double, double *%ptr0, i64 18
136  %ptr10 = getelementptr double, double *%ptr0, i64 20
137
138  %val0 = load double, double *%ptr0
139  %val1 = load double, double *%ptr1
140  %val2 = load double, double *%ptr2
141  %val3 = load double, double *%ptr3
142  %val4 = load double, double *%ptr4
143  %val5 = load double, double *%ptr5
144  %val6 = load double, double *%ptr6
145  %val7 = load double, double *%ptr7
146  %val8 = load double, double *%ptr8
147  %val9 = load double, double *%ptr9
148  %val10 = load double, double *%ptr10
149
150  %ret = call double @foo() #0
151
152  %cmp0 = call i1 @llvm.experimental.constrained.fcmps.f64(
153                                               double %ret, double %val0,
154                                               metadata !"olt",
155                                               metadata !"fpexcept.strict") #0
156  %cmp1 = call i1 @llvm.experimental.constrained.fcmps.f64(
157                                               double %ret, double %val1,
158                                               metadata !"olt",
159                                               metadata !"fpexcept.strict") #0
160  %cmp2 = call i1 @llvm.experimental.constrained.fcmps.f64(
161                                               double %ret, double %val2,
162                                               metadata !"olt",
163                                               metadata !"fpexcept.strict") #0
164  %cmp3 = call i1 @llvm.experimental.constrained.fcmps.f64(
165                                               double %ret, double %val3,
166                                               metadata !"olt",
167                                               metadata !"fpexcept.strict") #0
168  %cmp4 = call i1 @llvm.experimental.constrained.fcmps.f64(
169                                               double %ret, double %val4,
170                                               metadata !"olt",
171                                               metadata !"fpexcept.strict") #0
172  %cmp5 = call i1 @llvm.experimental.constrained.fcmps.f64(
173                                               double %ret, double %val5,
174                                               metadata !"olt",
175                                               metadata !"fpexcept.strict") #0
176  %cmp6 = call i1 @llvm.experimental.constrained.fcmps.f64(
177                                               double %ret, double %val6,
178                                               metadata !"olt",
179                                               metadata !"fpexcept.strict") #0
180  %cmp7 = call i1 @llvm.experimental.constrained.fcmps.f64(
181                                               double %ret, double %val7,
182                                               metadata !"olt",
183                                               metadata !"fpexcept.strict") #0
184  %cmp8 = call i1 @llvm.experimental.constrained.fcmps.f64(
185                                               double %ret, double %val8,
186                                               metadata !"olt",
187                                               metadata !"fpexcept.strict") #0
188  %cmp9 = call i1 @llvm.experimental.constrained.fcmps.f64(
189                                               double %ret, double %val9,
190                                               metadata !"olt",
191                                               metadata !"fpexcept.strict") #0
192  %cmp10 = call i1 @llvm.experimental.constrained.fcmps.f64(
193                                               double %ret, double %val10,
194                                               metadata !"olt",
195                                               metadata !"fpexcept.strict") #0
196
197  %sel0 = select i1 %cmp0, double %ret, double 0.0
198  %sel1 = select i1 %cmp1, double %sel0, double 1.0
199  %sel2 = select i1 %cmp2, double %sel1, double 2.0
200  %sel3 = select i1 %cmp3, double %sel2, double 3.0
201  %sel4 = select i1 %cmp4, double %sel3, double 4.0
202  %sel5 = select i1 %cmp5, double %sel4, double 5.0
203  %sel6 = select i1 %cmp6, double %sel5, double 6.0
204  %sel7 = select i1 %cmp7, double %sel6, double 7.0
205  %sel8 = select i1 %cmp8, double %sel7, double 8.0
206  %sel9 = select i1 %cmp9, double %sel8, double 9.0
207  %sel10 = select i1 %cmp10, double %sel9, double 10.0
208
209  ret double %sel10
210}
211
212; Check comparison with zero - cannot use LOAD AND TEST.
213define i64 @f8(i64 %a, i64 %b, double %f) #0 {
214; CHECK-LABEL: f8:
215; CHECK: lzdr [[REG:%f[0-9]+]]
216; CHECK-NEXT: kdbr %f0, [[REG]]
217; CHECK-SCALAR-NEXT: ber %r14
218; CHECK-SCALAR: lgr %r2, %r3
219; CHECK-VECTOR-NEXT: locgrne %r2, %r3
220; CHECK: br %r14
221  %cond = call i1 @llvm.experimental.constrained.fcmps.f64(
222                                               double %f, double 0.0,
223                                               metadata !"oeq",
224                                               metadata !"fpexcept.strict") #0
225  %res = select i1 %cond, i64 %a, i64 %b
226  ret i64 %res
227}
228
229; Check the comparison can be reversed if that allows KDB to be used,
230define i64 @f9(i64 %a, i64 %b, double %f2, double *%ptr) #0 {
231; CHECK-LABEL: f9:
232; CHECK: kdb %f0, 0(%r4)
233; CHECK-SCALAR-NEXT: blr %r14
234; CHECK-SCALAR: lgr %r2, %r3
235; CHECK-VECTOR-NEXT: locgrnl %r2, %r3
236; CHECK: br %r14
237  %f1 = load double, double *%ptr
238  %cond = call i1 @llvm.experimental.constrained.fcmps.f64(
239                                               double %f1, double %f2,
240                                               metadata !"ogt",
241                                               metadata !"fpexcept.strict") #0
242  %res = select i1 %cond, i64 %a, i64 %b
243  ret i64 %res
244}
245
246attributes #0 = { strictfp }
247
248declare i1 @llvm.experimental.constrained.fcmps.f64(double, double, metadata, metadata)
249
250