• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; Test 64-bit byteswaps from memory to registers.
2;
3; RUN: llc < %s -mtriple=s390x-linux-gnu | FileCheck %s
4
5declare i64 @llvm.bswap.i64(i64 %a)
6
7; Check LRVG with no displacement.
8define i64 @f1(i64 *%src) {
9; CHECK-LABEL: f1:
10; CHECK: lrvg %r2, 0(%r2)
11; CHECK: br %r14
12  %a = load i64, i64 *%src
13  %swapped = call i64 @llvm.bswap.i64(i64 %a)
14  ret i64 %swapped
15}
16
17; Check the high end of the aligned LRVG range.
18define i64 @f2(i64 *%src) {
19; CHECK-LABEL: f2:
20; CHECK: lrvg %r2, 524280(%r2)
21; CHECK: br %r14
22  %ptr = getelementptr i64, i64 *%src, i64 65535
23  %a = load i64, i64 *%ptr
24  %swapped = call i64 @llvm.bswap.i64(i64 %a)
25  ret i64 %swapped
26}
27
28; Check the next doubleword up, which needs separate address logic.
29; Other sequences besides this one would be OK.
30define i64 @f3(i64 *%src) {
31; CHECK-LABEL: f3:
32; CHECK: agfi %r2, 524288
33; CHECK: lrvg %r2, 0(%r2)
34; CHECK: br %r14
35  %ptr = getelementptr i64, i64 *%src, i64 65536
36  %a = load i64, i64 *%ptr
37  %swapped = call i64 @llvm.bswap.i64(i64 %a)
38  ret i64 %swapped
39}
40
41; Check the high end of the negative aligned LRVG range.
42define i64 @f4(i64 *%src) {
43; CHECK-LABEL: f4:
44; CHECK: lrvg %r2, -8(%r2)
45; CHECK: br %r14
46  %ptr = getelementptr i64, i64 *%src, i64 -1
47  %a = load i64, i64 *%ptr
48  %swapped = call i64 @llvm.bswap.i64(i64 %a)
49  ret i64 %swapped
50}
51
52; Check the low end of the LRVG range.
53define i64 @f5(i64 *%src) {
54; CHECK-LABEL: f5:
55; CHECK: lrvg %r2, -524288(%r2)
56; CHECK: br %r14
57  %ptr = getelementptr i64, i64 *%src, i64 -65536
58  %a = load i64, i64 *%ptr
59  %swapped = call i64 @llvm.bswap.i64(i64 %a)
60  ret i64 %swapped
61}
62
63; Check the next doubleword down, which needs separate address logic.
64; Other sequences besides this one would be OK.
65define i64 @f6(i64 *%src) {
66; CHECK-LABEL: f6:
67; CHECK: agfi %r2, -524296
68; CHECK: lrvg %r2, 0(%r2)
69; CHECK: br %r14
70  %ptr = getelementptr i64, i64 *%src, i64 -65537
71  %a = load i64, i64 *%ptr
72  %swapped = call i64 @llvm.bswap.i64(i64 %a)
73  ret i64 %swapped
74}
75
76; Check that LRVG allows an index.
77define i64 @f7(i64 %src, i64 %index) {
78; CHECK-LABEL: f7:
79; CHECK: lrvg %r2, 524287({{%r3,%r2|%r2,%r3}})
80; CHECK: br %r14
81  %add1 = add i64 %src, %index
82  %add2 = add i64 %add1, 524287
83  %ptr = inttoptr i64 %add2 to i64 *
84  %a = load i64, i64 *%ptr
85  %swapped = call i64 @llvm.bswap.i64(i64 %a)
86  ret i64 %swapped
87}
88
89; Test a case where we spill the source of at least one LRVGR.  We want
90; to use LRVG if possible.
91define i64 @f8(i64 *%ptr) {
92; CHECK-LABEL: f8:
93; CHECK: lrvg {{%r[0-9]+}}, 160(%r15)
94; CHECK: br %r14
95
96  %val0 = call i64 @foo()
97  %val1 = call i64 @foo()
98  %val2 = call i64 @foo()
99  %val3 = call i64 @foo()
100  %val4 = call i64 @foo()
101  %val5 = call i64 @foo()
102  %val6 = call i64 @foo()
103  %val7 = call i64 @foo()
104  %val8 = call i64 @foo()
105  %val9 = call i64 @foo()
106
107  %swapped0 = call i64 @llvm.bswap.i64(i64 %val0)
108  %swapped1 = call i64 @llvm.bswap.i64(i64 %val1)
109  %swapped2 = call i64 @llvm.bswap.i64(i64 %val2)
110  %swapped3 = call i64 @llvm.bswap.i64(i64 %val3)
111  %swapped4 = call i64 @llvm.bswap.i64(i64 %val4)
112  %swapped5 = call i64 @llvm.bswap.i64(i64 %val5)
113  %swapped6 = call i64 @llvm.bswap.i64(i64 %val6)
114  %swapped7 = call i64 @llvm.bswap.i64(i64 %val7)
115  %swapped8 = call i64 @llvm.bswap.i64(i64 %val8)
116  %swapped9 = call i64 @llvm.bswap.i64(i64 %val9)
117
118  %ret1 = add i64 %swapped0, %swapped1
119  %ret2 = add i64 %ret1, %swapped2
120  %ret3 = add i64 %ret2, %swapped3
121  %ret4 = add i64 %ret3, %swapped4
122  %ret5 = add i64 %ret4, %swapped5
123  %ret6 = add i64 %ret5, %swapped6
124  %ret7 = add i64 %ret6, %swapped7
125  %ret8 = add i64 %ret7, %swapped8
126  %ret9 = add i64 %ret8, %swapped9
127
128  ret i64 %ret9
129}
130
131declare i64 @foo()
132