• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; Test sign extensions from a byte to an i64.
2;
3; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
4
5; Test register extension, starting with an i32.
6define i64 @f1(i32 %a) {
7; CHECK-LABEL: f1:
8; CHECK: lgbr %r2, %r2
9; CHECK: br %r14
10  %byte = trunc i32 %a to i8
11  %ext = sext i8 %byte to i64
12  ret i64 %ext
13}
14
15; ...and again with an i64.
16define i64 @f2(i64 %a) {
17; CHECK-LABEL: f2:
18; CHECK: lgbr %r2, %r2
19; CHECK: br %r14
20  %byte = trunc i64 %a to i8
21  %ext = sext i8 %byte to i64
22  ret i64 %ext
23}
24
25; Check LGB with no displacement.
26define i64 @f3(i8 *%src) {
27; CHECK-LABEL: f3:
28; CHECK: lgb %r2, 0(%r2)
29; CHECK: br %r14
30  %byte = load i8 , i8 *%src
31  %ext = sext i8 %byte to i64
32  ret i64 %ext
33}
34
35; Check the high end of the LGB range.
36define i64 @f4(i8 *%src) {
37; CHECK-LABEL: f4:
38; CHECK: lgb %r2, 524287(%r2)
39; CHECK: br %r14
40  %ptr = getelementptr i8, i8 *%src, i64 524287
41  %byte = load i8 , i8 *%ptr
42  %ext = sext i8 %byte to i64
43  ret i64 %ext
44}
45
46; Check the next byte up, which needs separate address logic.
47; Other sequences besides this one would be OK.
48define i64 @f5(i8 *%src) {
49; CHECK-LABEL: f5:
50; CHECK: agfi %r2, 524288
51; CHECK: lgb %r2, 0(%r2)
52; CHECK: br %r14
53  %ptr = getelementptr i8, i8 *%src, i64 524288
54  %byte = load i8 , i8 *%ptr
55  %ext = sext i8 %byte to i64
56  ret i64 %ext
57}
58
59; Check the high end of the negative LGB range.
60define i64 @f6(i8 *%src) {
61; CHECK-LABEL: f6:
62; CHECK: lgb %r2, -1(%r2)
63; CHECK: br %r14
64  %ptr = getelementptr i8, i8 *%src, i64 -1
65  %byte = load i8 , i8 *%ptr
66  %ext = sext i8 %byte to i64
67  ret i64 %ext
68}
69
70; Check the low end of the LGB range.
71define i64 @f7(i8 *%src) {
72; CHECK-LABEL: f7:
73; CHECK: lgb %r2, -524288(%r2)
74; CHECK: br %r14
75  %ptr = getelementptr i8, i8 *%src, i64 -524288
76  %byte = load i8 , i8 *%ptr
77  %ext = sext i8 %byte to i64
78  ret i64 %ext
79}
80
81; Check the next byte down, which needs separate address logic.
82; Other sequences besides this one would be OK.
83define i64 @f8(i8 *%src) {
84; CHECK-LABEL: f8:
85; CHECK: agfi %r2, -524289
86; CHECK: lgb %r2, 0(%r2)
87; CHECK: br %r14
88  %ptr = getelementptr i8, i8 *%src, i64 -524289
89  %byte = load i8 , i8 *%ptr
90  %ext = sext i8 %byte to i64
91  ret i64 %ext
92}
93
94; Check that LGB allows an index
95define i64 @f9(i64 %src, i64 %index) {
96; CHECK-LABEL: f9:
97; CHECK: lgb %r2, 524287(%r3,%r2)
98; CHECK: br %r14
99  %add1 = add i64 %src, %index
100  %add2 = add i64 %add1, 524287
101  %ptr = inttoptr i64 %add2 to i8 *
102  %byte = load i8 , i8 *%ptr
103  %ext = sext i8 %byte to i64
104  ret i64 %ext
105}
106
107; Test a case where we spill the source of at least one LGBR.  We want
108; to use LGB if possible.
109define void @f10(i64 *%ptr) {
110; CHECK-LABEL: f10:
111; CHECK: lgb {{%r[0-9]+}}, 167(%r15)
112; CHECK: br %r14
113  %val0 = load volatile i64 , i64 *%ptr
114  %val1 = load volatile i64 , i64 *%ptr
115  %val2 = load volatile i64 , i64 *%ptr
116  %val3 = load volatile i64 , i64 *%ptr
117  %val4 = load volatile i64 , i64 *%ptr
118  %val5 = load volatile i64 , i64 *%ptr
119  %val6 = load volatile i64 , i64 *%ptr
120  %val7 = load volatile i64 , i64 *%ptr
121  %val8 = load volatile i64 , i64 *%ptr
122  %val9 = load volatile i64 , i64 *%ptr
123  %val10 = load volatile i64 , i64 *%ptr
124  %val11 = load volatile i64 , i64 *%ptr
125  %val12 = load volatile i64 , i64 *%ptr
126  %val13 = load volatile i64 , i64 *%ptr
127  %val14 = load volatile i64 , i64 *%ptr
128  %val15 = load volatile i64 , i64 *%ptr
129
130  %trunc0 = trunc i64 %val0 to i8
131  %trunc1 = trunc i64 %val1 to i8
132  %trunc2 = trunc i64 %val2 to i8
133  %trunc3 = trunc i64 %val3 to i8
134  %trunc4 = trunc i64 %val4 to i8
135  %trunc5 = trunc i64 %val5 to i8
136  %trunc6 = trunc i64 %val6 to i8
137  %trunc7 = trunc i64 %val7 to i8
138  %trunc8 = trunc i64 %val8 to i8
139  %trunc9 = trunc i64 %val9 to i8
140  %trunc10 = trunc i64 %val10 to i8
141  %trunc11 = trunc i64 %val11 to i8
142  %trunc12 = trunc i64 %val12 to i8
143  %trunc13 = trunc i64 %val13 to i8
144  %trunc14 = trunc i64 %val14 to i8
145  %trunc15 = trunc i64 %val15 to i8
146
147  %ext0 = sext i8 %trunc0 to i64
148  %ext1 = sext i8 %trunc1 to i64
149  %ext2 = sext i8 %trunc2 to i64
150  %ext3 = sext i8 %trunc3 to i64
151  %ext4 = sext i8 %trunc4 to i64
152  %ext5 = sext i8 %trunc5 to i64
153  %ext6 = sext i8 %trunc6 to i64
154  %ext7 = sext i8 %trunc7 to i64
155  %ext8 = sext i8 %trunc8 to i64
156  %ext9 = sext i8 %trunc9 to i64
157  %ext10 = sext i8 %trunc10 to i64
158  %ext11 = sext i8 %trunc11 to i64
159  %ext12 = sext i8 %trunc12 to i64
160  %ext13 = sext i8 %trunc13 to i64
161  %ext14 = sext i8 %trunc14 to i64
162  %ext15 = sext i8 %trunc15 to i64
163
164  store volatile i64 %val0, i64 *%ptr
165  store volatile i64 %val1, i64 *%ptr
166  store volatile i64 %val2, i64 *%ptr
167  store volatile i64 %val3, i64 *%ptr
168  store volatile i64 %val4, i64 *%ptr
169  store volatile i64 %val5, i64 *%ptr
170  store volatile i64 %val6, i64 *%ptr
171  store volatile i64 %val7, i64 *%ptr
172  store volatile i64 %val8, i64 *%ptr
173  store volatile i64 %val9, i64 *%ptr
174  store volatile i64 %val10, i64 *%ptr
175  store volatile i64 %val11, i64 *%ptr
176  store volatile i64 %val12, i64 *%ptr
177  store volatile i64 %val13, i64 *%ptr
178  store volatile i64 %val14, i64 *%ptr
179  store volatile i64 %val15, i64 *%ptr
180
181  store volatile i64 %ext0, i64 *%ptr
182  store volatile i64 %ext1, i64 *%ptr
183  store volatile i64 %ext2, i64 *%ptr
184  store volatile i64 %ext3, i64 *%ptr
185  store volatile i64 %ext4, i64 *%ptr
186  store volatile i64 %ext5, i64 *%ptr
187  store volatile i64 %ext6, i64 *%ptr
188  store volatile i64 %ext7, i64 *%ptr
189  store volatile i64 %ext8, i64 *%ptr
190  store volatile i64 %ext9, i64 *%ptr
191  store volatile i64 %ext10, i64 *%ptr
192  store volatile i64 %ext11, i64 *%ptr
193  store volatile i64 %ext12, i64 *%ptr
194  store volatile i64 %ext13, i64 *%ptr
195  store volatile i64 %ext14, i64 *%ptr
196  store volatile i64 %ext15, i64 *%ptr
197
198  ret void
199}
200