• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc -mtriple=thumb-eabi < %s -o - | FileCheck %s
2
3; Check that stack addresses are generated using a single ADD
4define void @test1(i8** %p) {
5  %x = alloca i8, align 1
6  %y = alloca i8, align 1
7  %z = alloca i8, align 1
8; CHECK: add r1, sp, #8
9; CHECK: str r1, [r0]
10  store i8* %x, i8** %p, align 4
11; CHECK: add r1, sp, #4
12; CHECK: str r1, [r0]
13  store i8* %y, i8** %p, align 4
14; CHECK: mov r1, sp
15; CHECK: str r1, [r0]
16  store i8* %z, i8** %p, align 4
17  ret void
18}
19
20; Stack offsets larger than 1020 still need two ADDs
21define void @test2([1024 x i8]** %p) {
22  %arr1 = alloca [1024 x i8], align 1
23  %arr2 = alloca [1024 x i8], align 1
24; CHECK: add r1, sp, #1020
25; CHECK: adds r1, #4
26; CHECK: str r1, [r0]
27  store [1024 x i8]* %arr1, [1024 x i8]** %p, align 4
28; CHECK: mov r1, sp
29; CHECK: str r1, [r0]
30  store [1024 x i8]* %arr2, [1024 x i8]** %p, align 4
31  ret void
32}
33
34; If possible stack-based lrdb/ldrh are widened to use SP-based addressing
35define i32 @test3() #0 {
36  %x = alloca i8, align 1
37  %y = alloca i8, align 1
38; CHECK: ldr r0, [sp]
39  %1 = load i8, i8* %x, align 1
40; CHECK: ldr r1, [sp, #4]
41  %2 = load i8, i8* %y, align 1
42  %3 = add nsw i8 %1, %2
43  %4 = zext i8 %3 to i32
44  ret i32 %4
45}
46
47define i32 @test4() #0 {
48  %x = alloca i16, align 2
49  %y = alloca i16, align 2
50; CHECK: ldr r0, [sp]
51  %1 = load i16, i16* %x, align 2
52; CHECK: ldr r1, [sp, #4]
53  %2 = load i16, i16* %y, align 2
54  %3 = add nsw i16 %1, %2
55  %4 = zext i16 %3 to i32
56  ret i32 %4
57}
58
59; Don't widen if the value needs to be zero-extended
60define zeroext i8 @test5() {
61  %x = alloca i8, align 1
62; CHECK: mov r0, sp
63; CHECK: ldrb r0, [r0]
64  %1 = load i8, i8* %x, align 1
65  ret i8 %1
66}
67
68define zeroext i16 @test6() {
69  %x = alloca i16, align 2
70; CHECK: mov r0, sp
71; CHECK: ldrh r0, [r0]
72  %1 = load i16, i16* %x, align 2
73  ret i16 %1
74}
75
76; Accessing the bottom of a large array shouldn't require materializing a base
77define void @test7() {
78  %arr = alloca [200 x i32], align 4
79
80  ; CHECK: movs [[REG:r[0-9]+]], #1
81  ; CHECK: str [[REG]], [sp, #4]
82  %arrayidx = getelementptr inbounds [200 x i32], [200 x i32]* %arr, i32 0, i32 1
83  store i32 1, i32* %arrayidx, align 4
84
85  ; CHECK: str [[REG]], [sp, #16]
86  %arrayidx1 = getelementptr inbounds [200 x i32], [200 x i32]* %arr, i32 0, i32 4
87  store i32 1, i32* %arrayidx1, align 4
88
89  ret void
90}
91
92; Check that loads/stores with out-of-range offsets are handled correctly
93define void @test8() {
94  %arr3 = alloca [224 x i32], align 4
95  %arr2 = alloca [224 x i32], align 4
96  %arr1 = alloca [224 x i32], align 4
97
98; CHECK: movs [[REG:r[0-9]+]], #1
99; CHECK: str [[REG]], [sp]
100  %arr1idx1 = getelementptr inbounds [224 x i32], [224 x i32]* %arr1, i32 0, i32 0
101  store i32 1, i32* %arr1idx1, align 4
102
103; Offset in range for sp-based store, but not for non-sp-based store
104; CHECK: str [[REG]], [sp, #128]
105  %arr1idx2 = getelementptr inbounds [224 x i32], [224 x i32]* %arr1, i32 0, i32 32
106  store i32 1, i32* %arr1idx2, align 4
107
108; CHECK: str [[REG]], [sp, #896]
109  %arr2idx1 = getelementptr inbounds [224 x i32], [224 x i32]* %arr2, i32 0, i32 0
110  store i32 1, i32* %arr2idx1, align 4
111
112; %arr2 is in range, but this element of it is not
113; CHECK: str [[REG]], [{{r[0-9]+}}]
114  %arr2idx2 = getelementptr inbounds [224 x i32], [224 x i32]* %arr2, i32 0, i32 32
115  store i32 1, i32* %arr2idx2, align 4
116
117; %arr3 is not in range
118; CHECK: str [[REG]], [{{r[0-9]+}}]
119  %arr3idx1 = getelementptr inbounds [224 x i32], [224 x i32]* %arr3, i32 0, i32 0
120  store i32 1, i32* %arr3idx1, align 4
121
122; CHECK: str [[REG]], [{{r[0-9]+}}]
123  %arr3idx2 = getelementptr inbounds [224 x i32], [224 x i32]* %arr3, i32 0, i32 32
124  store i32 1, i32* %arr3idx2, align 4
125
126  ret void
127}
128