• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM
2; RUN: llc < %s -O0 -fast-isel-abort -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB
3; RUN: llc < %s -O0 -arm-strict-align -relocation-model=dynamic-no-pic -mtriple=armv7-apple-ios | FileCheck %s --check-prefix=ARM-STRICT-ALIGN
4; RUN: llc < %s -O0 -arm-strict-align -relocation-model=dynamic-no-pic -mtriple=thumbv7-apple-ios | FileCheck %s --check-prefix=THUMB-STRICT-ALIGN
5
6; Very basic fast-isel functionality.
7define i32 @add(i32 %a, i32 %b) nounwind {
8entry:
9  %a.addr = alloca i32, align 4
10  %b.addr = alloca i32, align 4
11  store i32 %a, i32* %a.addr
12  store i32 %b, i32* %b.addr
13  %tmp = load i32* %a.addr
14  %tmp1 = load i32* %b.addr
15  %add = add nsw i32 %tmp, %tmp1
16  ret i32 %add
17}
18
19; Check truncate to bool
20define void @test1(i32 %tmp) nounwind {
21entry:
22%tobool = trunc i32 %tmp to i1
23br i1 %tobool, label %if.then, label %if.end
24
25if.then:                                          ; preds = %entry
26call void @test1(i32 0)
27br label %if.end
28
29if.end:                                           ; preds = %if.then, %entry
30ret void
31; ARM: test1:
32; ARM: tst r0, #1
33; THUMB: test1:
34; THUMB: tst.w r0, #1
35}
36
37; Check some simple operations with immediates
38define void @test2(i32 %tmp, i32* %ptr) nounwind {
39; THUMB: test2:
40; ARM: test2:
41
42b1:
43  %a = add i32 %tmp, 4096
44  store i32 %a, i32* %ptr
45  br label %b2
46
47; THUMB: add.w {{.*}} #4096
48; ARM: add {{.*}} #4096
49
50b2:
51  %b = add i32 %tmp, 4095
52  store i32 %b, i32* %ptr
53  br label %b3
54; THUMB: addw {{.*}} #4095
55; ARM: movw {{.*}} #4095
56; ARM: add
57
58b3:
59  %c = or i32 %tmp, 4
60  store i32 %c, i32* %ptr
61  ret void
62
63; THUMB: orr {{.*}} #4
64; ARM: orr {{.*}} #4
65}
66
67define void @test3(i32 %tmp, i32* %ptr1, i16* %ptr2, i8* %ptr3) nounwind {
68; THUMB: test3:
69; ARM: test3:
70
71bb1:
72  %a1 = trunc i32 %tmp to i16
73  %a2 = trunc i16 %a1 to i8
74  %a3 = trunc i8 %a2 to i1
75  %a4 = zext i1 %a3 to i8
76  store i8 %a4, i8* %ptr3
77  %a5 = zext i8 %a4 to i16
78  store i16 %a5, i16* %ptr2
79  %a6 = zext i16 %a5 to i32
80  store i32 %a6, i32* %ptr1
81  br label %bb2
82
83; THUMB: and
84; THUMB: strb
85; THUMB: uxtb
86; THUMB: strh
87; THUMB: uxth
88; ARM: and
89; ARM: strb
90; ARM: uxtb
91; ARM: strh
92; ARM: uxth
93
94bb2:
95  %b1 = trunc i32 %tmp to i16
96  %b2 = trunc i16 %b1 to i8
97  store i8 %b2, i8* %ptr3
98  %b3 = sext i8 %b2 to i16
99  store i16 %b3, i16* %ptr2
100  %b4 = sext i16 %b3 to i32
101  store i32 %b4, i32* %ptr1
102  br label %bb3
103
104; THUMB: strb
105; THUMB: sxtb
106; THUMB: strh
107; THUMB: sxth
108; ARM: strb
109; ARM: sxtb
110; ARM: strh
111; ARM: sxth
112
113bb3:
114  %c1 = load i8* %ptr3
115  %c2 = load i16* %ptr2
116  %c3 = load i32* %ptr1
117  %c4 = zext i8 %c1 to i32
118  %c5 = sext i16 %c2 to i32
119  %c6 = add i32 %c4, %c5
120  %c7 = sub i32 %c3, %c6
121  store i32 %c7, i32* %ptr1
122  ret void
123
124; THUMB: ldrb
125; THUMB: ldrh
126; THUMB: uxtb
127; THUMB: sxth
128; THUMB: add
129; THUMB: sub
130; ARM: ldrb
131; ARM: ldrh
132; ARM: uxtb
133; ARM: sxth
134; ARM: add
135; ARM: sub
136}
137
138; Check loads/stores with globals
139@test4g = external global i32
140
141define void @test4() {
142  %a = load i32* @test4g
143  %b = add i32 %a, 1
144  store i32 %b, i32* @test4g
145  ret void
146
147; THUMB: movw r0, :lower16:L_test4g$non_lazy_ptr
148; THUMB: movt r0, :upper16:L_test4g$non_lazy_ptr
149; THUMB: ldr r0, [r0]
150; THUMB: ldr r1, [r0]
151; THUMB: adds r1, #1
152; THUMB: str r1, [r0]
153
154; ARM: movw r0, :lower16:L_test4g$non_lazy_ptr
155; ARM: movt r0, :upper16:L_test4g$non_lazy_ptr
156; ARM: ldr r0, [r0]
157; ARM: ldr r1, [r0]
158; ARM: add r1, r1, #1
159; ARM: str r1, [r0]
160}
161
162; Check unaligned stores
163%struct.anon = type <{ float }>
164
165@a = common global %struct.anon* null, align 4
166
167define void @unaligned_store(float %x, float %y) nounwind {
168entry:
169; ARM: @unaligned_store
170; ARM: vmov r1, s0
171; ARM: str r1, [r0]
172
173; THUMB: @unaligned_store
174; THUMB: vmov r1, s0
175; THUMB: str r1, [r0]
176
177  %add = fadd float %x, %y
178  %0 = load %struct.anon** @a, align 4
179  %x1 = getelementptr inbounds %struct.anon* %0, i32 0, i32 0
180  store float %add, float* %x1, align 1
181  ret void
182}
183
184; Doublewords require only word-alignment.
185; rdar://10528060
186%struct.anon.0 = type { double }
187
188@foo_unpacked = common global %struct.anon.0 zeroinitializer, align 4
189
190define void @test5(double %a, double %b) nounwind {
191entry:
192; ARM: @test5
193; THUMB: @test5
194  %add = fadd double %a, %b
195  store double %add, double* getelementptr inbounds (%struct.anon.0* @foo_unpacked, i32 0, i32 0), align 4
196; ARM: vstr d16, [r0]
197; THUMB: vstr d16, [r0]
198  ret void
199}
200
201; Check unaligned loads of floats
202%class.TAlignTest = type <{ i16, float }>
203
204define zeroext i1 @test6(%class.TAlignTest* %this) nounwind align 2 {
205entry:
206; ARM: @test6
207; THUMB: @test6
208  %0 = alloca %class.TAlignTest*, align 4
209  store %class.TAlignTest* %this, %class.TAlignTest** %0, align 4
210  %1 = load %class.TAlignTest** %0
211  %2 = getelementptr inbounds %class.TAlignTest* %1, i32 0, i32 1
212  %3 = load float* %2, align 1
213  %4 = fcmp une float %3, 0.000000e+00
214; ARM: ldr r0, [r0, #2]
215; ARM: vmov s0, r0
216; ARM: vcmpe.f32 s0, #0
217; THUMB: ldr.w r0, [r0, #2]
218; THUMB: vmov s0, r0
219; THUMB: vcmpe.f32 s0, #0
220  ret i1 %4
221}
222
223; ARM: @urem_fold
224; THUMB: @urem_fold
225; ARM: and r0, r0, #31
226; THUMB: and r0, r0, #31
227define i32 @urem_fold(i32 %a) nounwind {
228  %rem = urem i32 %a, 32
229  ret i32 %rem
230}
231
232define i32 @test7() noreturn nounwind  {
233entry:
234; ARM: @test7
235; THUMB: @test7
236; ARM: trap
237; THUMB: trap
238  tail call void @llvm.trap( )
239  unreachable
240}
241
242declare void @llvm.trap() nounwind
243
244define void @unaligned_i16_store(i16 %x, i16* %y) nounwind {
245entry:
246; ARM-STRICT-ALIGN: @unaligned_i16_store
247; ARM-STRICT-ALIGN: strb
248; ARM-STRICT-ALIGN: strb
249
250; THUMB-STRICT-ALIGN: @unaligned_i16_store
251; THUMB-STRICT-ALIGN: strb
252; THUMB-STRICT-ALIGN: strb
253
254  store i16 %x, i16* %y, align 1
255  ret void
256}
257
258define i16 @unaligned_i16_load(i16* %x) nounwind {
259entry:
260; ARM-STRICT-ALIGN: @unaligned_i16_load
261; ARM-STRICT-ALIGN: ldrb
262; ARM-STRICT-ALIGN: ldrb
263
264; THUMB-STRICT-ALIGN: @unaligned_i16_load
265; THUMB-STRICT-ALIGN: ldrb
266; THUMB-STRICT-ALIGN: ldrb
267
268  %0 = load i16* %x, align 1
269  ret i16 %0
270}
271
272define void @unaligned_i32_store(i32 %x, i32* %y) nounwind {
273entry:
274; ARM-STRICT-ALIGN: @unaligned_i32_store
275; ARM-STRICT-ALIGN: strb
276; ARM-STRICT-ALIGN: strb
277; ARM-STRICT-ALIGN: strb
278; ARM-STRICT-ALIGN: strb
279
280; THUMB-STRICT-ALIGN: @unaligned_i32_store
281; THUMB-STRICT-ALIGN: strb
282; THUMB-STRICT-ALIGN: strb
283; THUMB-STRICT-ALIGN: strb
284; THUMB-STRICT-ALIGN: strb
285
286  store i32 %x, i32* %y, align 1
287  ret void
288}
289
290define i32 @unaligned_i32_load(i32* %x) nounwind {
291entry:
292; ARM-STRICT-ALIGN: @unaligned_i32_load
293; ARM-STRICT-ALIGN: ldrb
294; ARM-STRICT-ALIGN: ldrb
295; ARM-STRICT-ALIGN: ldrb
296; ARM-STRICT-ALIGN: ldrb
297
298; THUMB-STRICT-ALIGN: @unaligned_i32_load
299; THUMB-STRICT-ALIGN: ldrb
300; THUMB-STRICT-ALIGN: ldrb
301; THUMB-STRICT-ALIGN: ldrb
302; THUMB-STRICT-ALIGN: ldrb
303
304  %0 = load i32* %x, align 1
305  ret i32 %0
306}
307