• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc -mtriple=aarch64-linux-gnu -o - %s
2
3; Regression test for a crash in the ShrinkWrap pass not handling targets
4; requiring a register scavenger.
5
6%type1 = type { i32, i32, i32 }
7
8@g1 = external unnamed_addr global i32, align 4
9@g2 = external unnamed_addr global i1
10@g3 = external unnamed_addr global [144 x i32], align 4
11@g4 = external unnamed_addr constant [144 x i32], align 4
12@g5 = external unnamed_addr constant [144 x i32], align 4
13@g6 = external unnamed_addr constant [144 x i32], align 4
14@g7 = external unnamed_addr constant [144 x i32], align 4
15@g8 = external unnamed_addr constant [144 x i32], align 4
16@g9 = external unnamed_addr constant [144 x i32], align 4
17@g10 = external unnamed_addr constant [144 x i32], align 4
18@g11 = external unnamed_addr global i32, align 4
19@g12 = external unnamed_addr global [144 x [144 x i8]], align 1
20@g13 = external unnamed_addr global %type1*, align 8
21@g14 = external unnamed_addr global [144 x [144 x i8]], align 1
22@g15 = external unnamed_addr global [144 x [144 x i8]], align 1
23@g16 = external unnamed_addr global [144 x [144 x i8]], align 1
24@g17 = external unnamed_addr global [62 x i32], align 4
25@g18 = external unnamed_addr global i32, align 4
26@g19 = external unnamed_addr constant [144 x i32], align 4
27@g20 = external unnamed_addr global [144 x [144 x i8]], align 1
28@g21 = external unnamed_addr global i32, align 4
29
30declare fastcc i32 @foo()
31
32declare fastcc i32 @bar()
33
34define internal fastcc i32 @func(i32 %alpha, i32 %beta) {
35entry:
36  %v1 = alloca [2 x [11 x i32]], align 4
37  %v2 = alloca [11 x i32], align 16
38  %v3 = alloca [11 x i32], align 16
39  switch i32 undef, label %if.end.9 [
40    i32 4, label %if.then.6
41    i32 3, label %if.then.2
42  ]
43
44if.then.2:
45  %call3 = tail call fastcc i32 @bar()
46  br label %cleanup
47
48if.then.6:
49  %call7 = tail call fastcc i32 @foo()
50  unreachable
51
52if.end.9:
53  %tmp = load i32, i32* @g1, align 4
54  %rem.i = urem i32 %tmp, 1000000
55  %idxprom.1.i = zext i32 %rem.i to i64
56  %tmp1 = load %type1*, %type1** @g13, align 8
57  %v4 = getelementptr inbounds %type1, %type1* %tmp1, i64 %idxprom.1.i, i32 0
58  %.b = load i1, i1* @g2, align 1
59  %v5 = select i1 %.b, i32 2, i32 0
60  %tmp2 = load i32, i32* @g18, align 4
61  %tmp3 = load i32, i32* @g11, align 4
62  %idxprom58 = sext i32 %tmp3 to i64
63  %tmp4 = load i32, i32* @g21, align 4
64  %idxprom69 = sext i32 %tmp4 to i64
65  br label %for.body
66
67for.body:
68  %v6 = phi i32 [ 0, %if.end.9 ], [ %v7, %for.inc ]
69  %a.0983 = phi i32 [ 1, %if.end.9 ], [ %a.1, %for.inc ]
70  %arrayidx = getelementptr inbounds [62 x i32], [62 x i32]* @g17, i64 0, i64 undef
71  %tmp5 = load i32, i32* %arrayidx, align 4
72  br i1 undef, label %for.inc, label %if.else.51
73
74if.else.51:
75  %idxprom53 = sext i32 %tmp5 to i64
76  %arrayidx54 = getelementptr inbounds [144 x i32], [144 x i32]* @g3, i64 0, i64 %idxprom53
77  %tmp6 = load i32, i32* %arrayidx54, align 4
78  switch i32 %tmp6, label %for.inc [
79    i32 1, label %block.bb
80    i32 10, label %block.bb.159
81    i32 7, label %block.bb.75
82    i32 8, label %block.bb.87
83    i32 9, label %block.bb.147
84    i32 12, label %block.bb.111
85    i32 3, label %block.bb.123
86    i32 4, label %block.bb.135
87  ]
88
89block.bb:
90  %arrayidx56 = getelementptr inbounds [144 x i32], [144 x i32]* @g6, i64 0, i64 %idxprom53
91  %tmp7 = load i32, i32* %arrayidx56, align 4
92  %shr = ashr i32 %tmp7, %v5
93  %add57 = add nsw i32 %shr, 0
94  %arrayidx61 = getelementptr inbounds [144 x [144 x i8]], [144 x [144 x i8]]* @g14, i64 0, i64 %idxprom53, i64 %idxprom58
95  %tmp8 = load i8, i8* %arrayidx61, align 1
96  %conv = zext i8 %tmp8 to i32
97  %add62 = add nsw i32 %conv, %add57
98  br label %for.inc
99
100block.bb.75:
101  %arrayidx78 = getelementptr inbounds [144 x i32], [144 x i32]* @g10, i64 0, i64 %idxprom53
102  %tmp9 = load i32, i32* %arrayidx78, align 4
103  %shr79 = ashr i32 %tmp9, %v5
104  %add80 = add nsw i32 %shr79, 0
105  %add86 = add nsw i32 0, %add80
106  br label %for.inc
107
108block.bb.87:
109  %arrayidx90 = getelementptr inbounds [144 x i32], [144 x i32]* @g9, i64 0, i64 %idxprom53
110  %tmp10 = load i32, i32* %arrayidx90, align 4
111  %shr91 = ashr i32 %tmp10, 0
112  %sub92 = sub nsw i32 0, %shr91
113  %arrayidx96 = getelementptr inbounds [144 x [144 x i8]], [144 x [144 x i8]]* @g15, i64 0, i64 %idxprom53, i64 %idxprom69
114  %tmp11 = load i8, i8* %arrayidx96, align 1
115  %conv97 = zext i8 %tmp11 to i32
116  %sub98 = sub nsw i32 %sub92, %conv97
117  br label %for.inc
118
119block.bb.111:
120  %arrayidx114 = getelementptr inbounds [144 x i32], [144 x i32]* @g19, i64 0, i64 %idxprom53
121  %tmp12 = load i32, i32* %arrayidx114, align 4
122  %shr115 = ashr i32 %tmp12, 0
123  %sub116 = sub nsw i32 0, %shr115
124  %arrayidx120 = getelementptr inbounds [144 x [144 x i8]], [144 x [144 x i8]]* @g12, i64 0, i64 %idxprom53, i64 %idxprom69
125  %tmp13 = load i8, i8* %arrayidx120, align 1
126  %conv121 = zext i8 %tmp13 to i32
127  %sub122 = sub nsw i32 %sub116, %conv121
128  br label %for.inc
129
130block.bb.123:
131  %arrayidx126 = getelementptr inbounds [144 x i32], [144 x i32]* @g5, i64 0, i64 %idxprom53
132  %tmp14 = load i32, i32* %arrayidx126, align 4
133  %shr127 = ashr i32 %tmp14, %v5
134  %add128 = add nsw i32 %shr127, 0
135  %add134 = add nsw i32 0, %add128
136  br label %for.inc
137
138block.bb.135:
139  %arrayidx138 = getelementptr inbounds [144 x i32], [144 x i32]* @g4, i64 0, i64 %idxprom53
140  %tmp15 = load i32, i32* %arrayidx138, align 4
141  %shr139 = ashr i32 %tmp15, 0
142  %sub140 = sub nsw i32 0, %shr139
143  %arrayidx144 = getelementptr inbounds [144 x [144 x i8]], [144 x [144 x i8]]* @g20, i64 0, i64 %idxprom53, i64 %idxprom69
144  %tmp16 = load i8, i8* %arrayidx144, align 1
145  %conv145 = zext i8 %tmp16 to i32
146  %sub146 = sub nsw i32 %sub140, %conv145
147  br label %for.inc
148
149block.bb.147:
150  %arrayidx150 = getelementptr inbounds [144 x i32], [144 x i32]* @g8, i64 0, i64 %idxprom53
151  %tmp17 = load i32, i32* %arrayidx150, align 4
152  %shr151 = ashr i32 %tmp17, %v5
153  %add152 = add nsw i32 %shr151, 0
154  %arrayidx156 = getelementptr inbounds [144 x [144 x i8]], [144 x [144 x i8]]* @g16, i64 0, i64 %idxprom53, i64 %idxprom58
155  %tmp18 = load i8, i8* %arrayidx156, align 1
156  %conv157 = zext i8 %tmp18 to i32
157  %add158 = add nsw i32 %conv157, %add152
158  br label %for.inc
159
160block.bb.159:
161  %sub160 = add nsw i32 %v6, -450
162  %arrayidx162 = getelementptr inbounds [144 x i32], [144 x i32]* @g7, i64 0, i64 %idxprom53
163  %tmp19 = load i32, i32* %arrayidx162, align 4
164  %shr163 = ashr i32 %tmp19, 0
165  %sub164 = sub nsw i32 %sub160, %shr163
166  %sub170 = sub nsw i32 %sub164, 0
167  br label %for.inc
168
169for.inc:
170  %v7 = phi i32 [ %v6, %for.body ], [ %v6, %if.else.51 ], [ %sub170, %block.bb.159 ], [ %add158, %block.bb.147 ], [ %sub146, %block.bb.135 ], [ %add134, %block.bb.123 ], [ %sub122, %block.bb.111 ], [ %sub98, %block.bb.87 ], [ %add86, %block.bb.75 ], [ %add62, %block.bb ]
171  %a.1 = phi i32 [ %a.0983, %for.body ], [ undef, %if.else.51 ], [ undef, %block.bb.159 ], [ undef, %block.bb.147 ], [ undef, %block.bb.135 ], [ undef, %block.bb.123 ], [ undef, %block.bb.111 ], [ undef, %block.bb.87 ], [ undef, %block.bb.75 ], [ undef, %block.bb ]
172  %cmp48 = icmp sgt i32 %a.1, %tmp2
173  br i1 %cmp48, label %for.end, label %for.body
174
175for.end:
176  store i32 %tmp, i32* %v4, align 4
177  %hold_hash.i.7 = getelementptr inbounds %type1, %type1* %tmp1, i64 %idxprom.1.i, i32 1
178  store i32 0, i32* %hold_hash.i.7, align 4
179  br label %cleanup
180
181cleanup:
182  %retval.0 = phi i32 [ %call3, %if.then.2 ], [ undef, %for.end ]
183  ret i32 %retval.0
184}
185