• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc -verify-machineinstrs < %s -mtriple=aarch64-none-linux-gnu | FileCheck %s
2
3@var32 = global i32 0
4@var64 = global i64 0
5
6define void @test_extendb32(i8 %var) {
7; CHECK-LABEL: test_extendb32:
8
9  %sxt32 = sext i8 %var to i32
10  store volatile i32 %sxt32, i32* @var32
11; CHECK: sxtb {{w[0-9]+}}, {{w[0-9]+}}
12
13; N.b. this doesn't actually produce a bitfield instruction at the
14; moment, but it's still a good test to have and the semantics are
15; correct.
16  %uxt32 = zext i8 %var to i32
17  store volatile i32 %uxt32, i32* @var32
18; CHECK: and {{w[0-9]+}}, {{w[0-9]+}}, #0xff
19  ret void
20}
21
22define void @test_extendb64(i8 %var) {
23; CHECK-LABEL: test_extendb64:
24
25  %sxt64 = sext i8 %var to i64
26  store volatile i64 %sxt64, i64* @var64
27; CHECK: sxtb {{x[0-9]+}}, {{w[0-9]+}}
28
29; N.b. this doesn't actually produce a bitfield instruction at the
30; moment, but it's still a good test to have and the semantics are
31; correct.
32  %uxt64 = zext i8 %var to i64
33  store volatile i64 %uxt64, i64* @var64
34; CHECK: and {{w[0-9]+}}, {{w[0-9]+}}, #0xff
35  ret void
36}
37
38define void @test_extendh32(i16 %var) {
39; CHECK-LABEL: test_extendh32:
40
41  %sxt32 = sext i16 %var to i32
42  store volatile i32 %sxt32, i32* @var32
43; CHECK: sxth {{w[0-9]+}}, {{w[0-9]+}}
44
45; N.b. this doesn't actually produce a bitfield instruction at the
46; moment, but it's still a good test to have and the semantics are
47; correct.
48  %uxt32 = zext i16 %var to i32
49  store volatile i32 %uxt32, i32* @var32
50; CHECK: and {{w[0-9]+}}, {{w[0-9]+}}, #0xffff
51  ret void
52}
53
54define void @test_extendh64(i16 %var) {
55; CHECK-LABEL: test_extendh64:
56
57  %sxt64 = sext i16 %var to i64
58  store volatile i64 %sxt64, i64* @var64
59; CHECK: sxth {{x[0-9]+}}, {{w[0-9]+}}
60
61; N.b. this doesn't actually produce a bitfield instruction at the
62; moment, but it's still a good test to have and the semantics are
63; correct.
64  %uxt64 = zext i16 %var to i64
65  store volatile i64 %uxt64, i64* @var64
66; CHECK: and {{w[0-9]+}}, {{w[0-9]+}}, #0xffff
67  ret void
68}
69
70define void @test_extendw(i32 %var) {
71; CHECK-LABEL: test_extendw:
72
73  %sxt64 = sext i32 %var to i64
74  store volatile i64 %sxt64, i64* @var64
75; CHECK: sxtw {{x[0-9]+}}, {{w[0-9]+}}
76
77  %uxt64 = zext i32 %var to i64
78  store volatile i64 %uxt64, i64* @var64
79; CHECK: mov {{w[0-9]+}}, w0
80  ret void
81}
82
83define void @test_shifts(i32 %val32, i64 %val64) {
84; CHECK-LABEL: test_shifts:
85
86  %shift1 = ashr i32 %val32, 31
87  store volatile i32 %shift1, i32* @var32
88; CHECK: asr {{w[0-9]+}}, {{w[0-9]+}}, #31
89
90  %shift2 = lshr i32 %val32, 8
91  store volatile i32 %shift2, i32* @var32
92; CHECK: lsr {{w[0-9]+}}, {{w[0-9]+}}, #8
93
94  %shift3 = shl i32 %val32, 1
95  store volatile i32 %shift3, i32* @var32
96; CHECK: lsl {{w[0-9]+}}, {{w[0-9]+}}, #1
97
98  %shift4 = ashr i64 %val64, 31
99  store volatile i64 %shift4, i64* @var64
100; CHECK: asr {{x[0-9]+}}, {{x[0-9]+}}, #31
101
102  %shift5 = lshr i64 %val64, 8
103  store volatile i64 %shift5, i64* @var64
104; CHECK: lsr {{x[0-9]+}}, {{x[0-9]+}}, #8
105
106  %shift6 = shl i64 %val64, 63
107  store volatile i64 %shift6, i64* @var64
108; CHECK: lsl {{x[0-9]+}}, {{x[0-9]+}}, #63
109
110  %shift7 = ashr i64 %val64, 63
111  store volatile i64 %shift7, i64* @var64
112; CHECK: asr {{x[0-9]+}}, {{x[0-9]+}}, #63
113
114  %shift8 = lshr i64 %val64, 63
115  store volatile i64 %shift8, i64* @var64
116; CHECK: lsr {{x[0-9]+}}, {{x[0-9]+}}, #63
117
118  %shift9 = lshr i32 %val32, 31
119  store volatile i32 %shift9, i32* @var32
120; CHECK: lsr {{w[0-9]+}}, {{w[0-9]+}}, #31
121
122  %shift10 = shl i32 %val32, 31
123  store volatile i32 %shift10, i32* @var32
124; CHECK: lsl {{w[0-9]+}}, {{w[0-9]+}}, #31
125
126  ret void
127}
128
129; LLVM can produce in-register extensions taking place entirely with
130; 64-bit registers too.
131define void @test_sext_inreg_64(i64 %in) {
132; CHECK-LABEL: test_sext_inreg_64:
133
134; i1 doesn't have an official alias, but crops up and is handled by
135; the bitfield ops.
136  %trunc_i1 = trunc i64 %in to i1
137  %sext_i1 = sext i1 %trunc_i1 to i64
138  store volatile i64 %sext_i1, i64* @var64
139; CHECK: sbfx {{x[0-9]+}}, {{x[0-9]+}}, #0, #1
140
141  %trunc_i8 = trunc i64 %in to i8
142  %sext_i8 = sext i8 %trunc_i8 to i64
143  store volatile i64 %sext_i8, i64* @var64
144; CHECK: sxtb {{x[0-9]+}}, {{w[0-9]+}}
145
146  %trunc_i16 = trunc i64 %in to i16
147  %sext_i16 = sext i16 %trunc_i16 to i64
148  store volatile i64 %sext_i16, i64* @var64
149; CHECK: sxth {{x[0-9]+}}, {{w[0-9]+}}
150
151  %trunc_i32 = trunc i64 %in to i32
152  %sext_i32 = sext i32 %trunc_i32 to i64
153  store volatile i64 %sext_i32, i64* @var64
154; CHECK: sxtw {{x[0-9]+}}, {{w[0-9]+}}
155  ret void
156}
157
158; These instructions don't actually select to official bitfield
159; operations, but it's important that we select them somehow:
160define void @test_zext_inreg_64(i64 %in) {
161; CHECK-LABEL: test_zext_inreg_64:
162
163  %trunc_i8 = trunc i64 %in to i8
164  %zext_i8 = zext i8 %trunc_i8 to i64
165  store volatile i64 %zext_i8, i64* @var64
166; CHECK: and {{x[0-9]+}}, {{x[0-9]+}}, #0xff
167
168  %trunc_i16 = trunc i64 %in to i16
169  %zext_i16 = zext i16 %trunc_i16 to i64
170  store volatile i64 %zext_i16, i64* @var64
171; CHECK: and {{x[0-9]+}}, {{x[0-9]+}}, #0xffff
172
173  %trunc_i32 = trunc i64 %in to i32
174  %zext_i32 = zext i32 %trunc_i32 to i64
175  store volatile i64 %zext_i32, i64* @var64
176; CHECK: and {{x[0-9]+}}, {{x[0-9]+}}, #0xffffffff
177
178  ret void
179}
180
181define i64 @test_sext_inreg_from_32(i32 %in) {
182; CHECK-LABEL: test_sext_inreg_from_32:
183
184  %small = trunc i32 %in to i1
185  %ext = sext i1 %small to i64
186
187  ; Different registers are of course, possible, though suboptimal. This is
188  ; making sure that a 64-bit "(sext_inreg (anyext GPR32), i1)" uses the 64-bit
189  ; sbfx rather than just 32-bits.
190; CHECK: sbfx x0, x0, #0, #1
191  ret i64 %ext
192}
193
194
195define i32 @test_ubfx32(i32* %addr) {
196; CHECK-LABEL: test_ubfx32:
197; CHECK: ubfx {{w[0-9]+}}, {{w[0-9]+}}, #23, #3
198
199   %fields = load i32, i32* %addr
200   %shifted = lshr i32 %fields, 23
201   %masked = and i32 %shifted, 7
202   ret i32 %masked
203}
204
205define i64 @test_ubfx64(i64* %addr) {
206; CHECK-LABEL: test_ubfx64:
207; CHECK: ubfx {{x[0-9]+}}, {{x[0-9]+}}, #25, #10
208   %fields = load i64, i64* %addr
209   %shifted = lshr i64 %fields, 25
210   %masked = and i64 %shifted, 1023
211   ret i64 %masked
212}
213
214define i32 @test_sbfx32(i32* %addr) {
215; CHECK-LABEL: test_sbfx32:
216; CHECK: sbfx {{w[0-9]+}}, {{w[0-9]+}}, #6, #3
217
218   %fields = load i32, i32* %addr
219   %shifted = shl i32 %fields, 23
220   %extended = ashr i32 %shifted, 29
221   ret i32 %extended
222}
223
224define i64 @test_sbfx64(i64* %addr) {
225; CHECK-LABEL: test_sbfx64:
226; CHECK: sbfx {{x[0-9]+}}, {{x[0-9]+}}, #0, #63
227
228   %fields = load i64, i64* %addr
229   %shifted = shl i64 %fields, 1
230   %extended = ashr i64 %shifted, 1
231   ret i64 %extended
232}
233