• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; Test 64-bit byteswaps from memory to registers.
2;
3; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
4
5declare i64 @llvm.bswap.i64(i64 %a)
6
7; Check LRVG with no displacement.
8define i64 @f1(i64 *%src) {
9; CHECK-LABEL: f1:
10; CHECK: lrvg %r2, 0(%r2)
11; CHECK: br %r14
12  %a = load i64 , i64 *%src
13  %swapped = call i64 @llvm.bswap.i64(i64 %a)
14  ret i64 %swapped
15}
16
17; Check the high end of the aligned LRVG range.
18define i64 @f2(i64 *%src) {
19; CHECK-LABEL: f2:
20; CHECK: lrvg %r2, 524280(%r2)
21; CHECK: br %r14
22  %ptr = getelementptr i64, i64 *%src, i64 65535
23  %a = load i64 , i64 *%ptr
24  %swapped = call i64 @llvm.bswap.i64(i64 %a)
25  ret i64 %swapped
26}
27
28; Check the next doubleword up, which needs separate address logic.
29; Other sequences besides this one would be OK.
30define i64 @f3(i64 *%src) {
31; CHECK-LABEL: f3:
32; CHECK: agfi %r2, 524288
33; CHECK: lrvg %r2, 0(%r2)
34; CHECK: br %r14
35  %ptr = getelementptr i64, i64 *%src, i64 65536
36  %a = load i64 , i64 *%ptr
37  %swapped = call i64 @llvm.bswap.i64(i64 %a)
38  ret i64 %swapped
39}
40
41; Check the high end of the negative aligned LRVG range.
42define i64 @f4(i64 *%src) {
43; CHECK-LABEL: f4:
44; CHECK: lrvg %r2, -8(%r2)
45; CHECK: br %r14
46  %ptr = getelementptr i64, i64 *%src, i64 -1
47  %a = load i64 , i64 *%ptr
48  %swapped = call i64 @llvm.bswap.i64(i64 %a)
49  ret i64 %swapped
50}
51
52; Check the low end of the LRVG range.
53define i64 @f5(i64 *%src) {
54; CHECK-LABEL: f5:
55; CHECK: lrvg %r2, -524288(%r2)
56; CHECK: br %r14
57  %ptr = getelementptr i64, i64 *%src, i64 -65536
58  %a = load i64 , i64 *%ptr
59  %swapped = call i64 @llvm.bswap.i64(i64 %a)
60  ret i64 %swapped
61}
62
63; Check the next doubleword down, which needs separate address logic.
64; Other sequences besides this one would be OK.
65define i64 @f6(i64 *%src) {
66; CHECK-LABEL: f6:
67; CHECK: agfi %r2, -524296
68; CHECK: lrvg %r2, 0(%r2)
69; CHECK: br %r14
70  %ptr = getelementptr i64, i64 *%src, i64 -65537
71  %a = load i64 , i64 *%ptr
72  %swapped = call i64 @llvm.bswap.i64(i64 %a)
73  ret i64 %swapped
74}
75
76; Check that LRVG allows an index.
77define i64 @f7(i64 %src, i64 %index) {
78; CHECK-LABEL: f7:
79; CHECK: lrvg %r2, 524287({{%r3,%r2|%r2,%r3}})
80; CHECK: br %r14
81  %add1 = add i64 %src, %index
82  %add2 = add i64 %add1, 524287
83  %ptr = inttoptr i64 %add2 to i64 *
84  %a = load i64 , i64 *%ptr
85  %swapped = call i64 @llvm.bswap.i64(i64 %a)
86  ret i64 %swapped
87}
88
89; Check that volatile accesses do not use LRVG, which might access the
90; storage multple times.
91define i64 @f8(i64 *%src) {
92; CHECK-LABEL: f8:
93; CHECK: lg [[REG:%r[0-5]]], 0(%r2)
94; CHECK: lrvgr %r2, [[REG]]
95; CHECK: br %r14
96  %a = load volatile i64 , i64 *%src
97  %swapped = call i64 @llvm.bswap.i64(i64 %a)
98  ret i64 %swapped
99}
100
101; Test a case where we spill the source of at least one LRVGR.  We want
102; to use LRVG if possible.
103define void @f9(i64 *%ptr) {
104; CHECK-LABEL: f9:
105; CHECK: lrvg {{%r[0-9]+}}, 160(%r15)
106; CHECK: br %r14
107  %val0 = load volatile i64 , i64 *%ptr
108  %val1 = load volatile i64 , i64 *%ptr
109  %val2 = load volatile i64 , i64 *%ptr
110  %val3 = load volatile i64 , i64 *%ptr
111  %val4 = load volatile i64 , i64 *%ptr
112  %val5 = load volatile i64 , i64 *%ptr
113  %val6 = load volatile i64 , i64 *%ptr
114  %val7 = load volatile i64 , i64 *%ptr
115  %val8 = load volatile i64 , i64 *%ptr
116  %val9 = load volatile i64 , i64 *%ptr
117  %val10 = load volatile i64 , i64 *%ptr
118  %val11 = load volatile i64 , i64 *%ptr
119  %val12 = load volatile i64 , i64 *%ptr
120  %val13 = load volatile i64 , i64 *%ptr
121  %val14 = load volatile i64 , i64 *%ptr
122  %val15 = load volatile i64 , i64 *%ptr
123
124  %swapped0 = call i64 @llvm.bswap.i64(i64 %val0)
125  %swapped1 = call i64 @llvm.bswap.i64(i64 %val1)
126  %swapped2 = call i64 @llvm.bswap.i64(i64 %val2)
127  %swapped3 = call i64 @llvm.bswap.i64(i64 %val3)
128  %swapped4 = call i64 @llvm.bswap.i64(i64 %val4)
129  %swapped5 = call i64 @llvm.bswap.i64(i64 %val5)
130  %swapped6 = call i64 @llvm.bswap.i64(i64 %val6)
131  %swapped7 = call i64 @llvm.bswap.i64(i64 %val7)
132  %swapped8 = call i64 @llvm.bswap.i64(i64 %val8)
133  %swapped9 = call i64 @llvm.bswap.i64(i64 %val9)
134  %swapped10 = call i64 @llvm.bswap.i64(i64 %val10)
135  %swapped11 = call i64 @llvm.bswap.i64(i64 %val11)
136  %swapped12 = call i64 @llvm.bswap.i64(i64 %val12)
137  %swapped13 = call i64 @llvm.bswap.i64(i64 %val13)
138  %swapped14 = call i64 @llvm.bswap.i64(i64 %val14)
139  %swapped15 = call i64 @llvm.bswap.i64(i64 %val15)
140
141  store volatile i64 %val0, i64 *%ptr
142  store volatile i64 %val1, i64 *%ptr
143  store volatile i64 %val2, i64 *%ptr
144  store volatile i64 %val3, i64 *%ptr
145  store volatile i64 %val4, i64 *%ptr
146  store volatile i64 %val5, i64 *%ptr
147  store volatile i64 %val6, i64 *%ptr
148  store volatile i64 %val7, i64 *%ptr
149  store volatile i64 %val8, i64 *%ptr
150  store volatile i64 %val9, i64 *%ptr
151  store volatile i64 %val10, i64 *%ptr
152  store volatile i64 %val11, i64 *%ptr
153  store volatile i64 %val12, i64 *%ptr
154  store volatile i64 %val13, i64 *%ptr
155  store volatile i64 %val14, i64 *%ptr
156  store volatile i64 %val15, i64 *%ptr
157
158  store volatile i64 %swapped0, i64 *%ptr
159  store volatile i64 %swapped1, i64 *%ptr
160  store volatile i64 %swapped2, i64 *%ptr
161  store volatile i64 %swapped3, i64 *%ptr
162  store volatile i64 %swapped4, i64 *%ptr
163  store volatile i64 %swapped5, i64 *%ptr
164  store volatile i64 %swapped6, i64 *%ptr
165  store volatile i64 %swapped7, i64 *%ptr
166  store volatile i64 %swapped8, i64 *%ptr
167  store volatile i64 %swapped9, i64 *%ptr
168  store volatile i64 %swapped10, i64 *%ptr
169  store volatile i64 %swapped11, i64 *%ptr
170  store volatile i64 %swapped12, i64 *%ptr
171  store volatile i64 %swapped13, i64 *%ptr
172  store volatile i64 %swapped14, i64 *%ptr
173  store volatile i64 %swapped15, i64 *%ptr
174
175  ret void
176}
177