• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; RUN: llc < %s -O3 -march=x86-64 -mcpu=core2 | FileCheck %s -check-prefix=X64
2; RUN: llc < %s -O3 -march=x86 -mcpu=core2 | FileCheck %s -check-prefix=X32
3; RUN: llc < %s -O3 -march=x86-64 -mcpu=core2 -addr-sink-using-gep=1 | FileCheck %s -check-prefix=X64
4; RUN: llc < %s -O3 -march=x86 -mcpu=core2 -addr-sink-using-gep=1 | FileCheck %s -check-prefix=X32
5
6; @simple is the most basic chain of address induction variables. Chaining
7; saves at least one register and avoids complex addressing and setup
8; code.
9;
10; X64: @simple
11; %x * 4
12; X64: shlq $2
13; no other address computation in the preheader
14; X64-NEXT: xorl
15; X64-NEXT: .align
16; X64: %loop
17; no complex address modes
18; X64-NOT: (%{{[^)]+}},%{{[^)]+}},
19;
20; X32: @simple
21; no expensive address computation in the preheader
22; X32-NOT: imul
23; X32: %loop
24; no complex address modes
25; X32-NOT: (%{{[^)]+}},%{{[^)]+}},
26define i32 @simple(i32* %a, i32* %b, i32 %x) nounwind {
27entry:
28  br label %loop
29loop:
30  %iv = phi i32* [ %a, %entry ], [ %iv4, %loop ]
31  %s = phi i32 [ 0, %entry ], [ %s4, %loop ]
32  %v = load i32* %iv
33  %iv1 = getelementptr inbounds i32* %iv, i32 %x
34  %v1 = load i32* %iv1
35  %iv2 = getelementptr inbounds i32* %iv1, i32 %x
36  %v2 = load i32* %iv2
37  %iv3 = getelementptr inbounds i32* %iv2, i32 %x
38  %v3 = load i32* %iv3
39  %s1 = add i32 %s, %v
40  %s2 = add i32 %s1, %v1
41  %s3 = add i32 %s2, %v2
42  %s4 = add i32 %s3, %v3
43  %iv4 = getelementptr inbounds i32* %iv3, i32 %x
44  %cmp = icmp eq i32* %iv4, %b
45  br i1 %cmp, label %exit, label %loop
46exit:
47  ret i32 %s4
48}
49
50; @user is not currently chained because the IV is live across memory ops.
51;
52; X64: @user
53; X64: shlq $4
54; X64: lea
55; X64: lea
56; X64: %loop
57; complex address modes
58; X64: (%{{[^)]+}},%{{[^)]+}},
59;
60; X32: @user
61; expensive address computation in the preheader
62; X32: imul
63; X32: %loop
64; complex address modes
65; X32: (%{{[^)]+}},%{{[^)]+}},
66define i32 @user(i32* %a, i32* %b, i32 %x) nounwind {
67entry:
68  br label %loop
69loop:
70  %iv = phi i32* [ %a, %entry ], [ %iv4, %loop ]
71  %s = phi i32 [ 0, %entry ], [ %s4, %loop ]
72  %v = load i32* %iv
73  %iv1 = getelementptr inbounds i32* %iv, i32 %x
74  %v1 = load i32* %iv1
75  %iv2 = getelementptr inbounds i32* %iv1, i32 %x
76  %v2 = load i32* %iv2
77  %iv3 = getelementptr inbounds i32* %iv2, i32 %x
78  %v3 = load i32* %iv3
79  %s1 = add i32 %s, %v
80  %s2 = add i32 %s1, %v1
81  %s3 = add i32 %s2, %v2
82  %s4 = add i32 %s3, %v3
83  %iv4 = getelementptr inbounds i32* %iv3, i32 %x
84  store i32 %s4, i32* %iv
85  %cmp = icmp eq i32* %iv4, %b
86  br i1 %cmp, label %exit, label %loop
87exit:
88  ret i32 %s4
89}
90
91; @extrastride is a slightly more interesting case of a single
92; complete chain with multiple strides. The test case IR is what LSR
93; used to do, and exactly what we don't want to do. LSR's new IV
94; chaining feature should now undo the damage.
95;
96; X64: extrastride:
97; We currently don't handle this on X64 because the sexts cause
98; strange increment expressions like this:
99; IV + ((sext i32 (2 * %s) to i64) + (-1 * (sext i32 %s to i64)))
100;
101; X32: extrastride:
102; no spills in the preheader
103; X32-NOT: mov{{.*}}(%esp){{$}}
104; X32: %for.body{{$}}
105; no complex address modes
106; X32-NOT: (%{{[^)]+}},%{{[^)]+}},
107; no reloads
108; X32-NOT: (%esp)
109define void @extrastride(i8* nocapture %main, i32 %main_stride, i32* nocapture %res, i32 %x, i32 %y, i32 %z) nounwind {
110entry:
111  %cmp8 = icmp eq i32 %z, 0
112  br i1 %cmp8, label %for.end, label %for.body.lr.ph
113
114for.body.lr.ph:                                   ; preds = %entry
115  %add.ptr.sum = shl i32 %main_stride, 1 ; s*2
116  %add.ptr1.sum = add i32 %add.ptr.sum, %main_stride ; s*3
117  %add.ptr2.sum = add i32 %x, %main_stride ; s + x
118  %add.ptr4.sum = shl i32 %main_stride, 2 ; s*4
119  %add.ptr3.sum = add i32 %add.ptr2.sum, %add.ptr4.sum ; total IV stride = s*5+x
120  br label %for.body
121
122for.body:                                         ; preds = %for.body.lr.ph, %for.body
123  %main.addr.011 = phi i8* [ %main, %for.body.lr.ph ], [ %add.ptr6, %for.body ]
124  %i.010 = phi i32 [ 0, %for.body.lr.ph ], [ %inc, %for.body ]
125  %res.addr.09 = phi i32* [ %res, %for.body.lr.ph ], [ %add.ptr7, %for.body ]
126  %0 = bitcast i8* %main.addr.011 to i32*
127  %1 = load i32* %0, align 4
128  %add.ptr = getelementptr inbounds i8* %main.addr.011, i32 %main_stride
129  %2 = bitcast i8* %add.ptr to i32*
130  %3 = load i32* %2, align 4
131  %add.ptr1 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr.sum
132  %4 = bitcast i8* %add.ptr1 to i32*
133  %5 = load i32* %4, align 4
134  %add.ptr2 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr1.sum
135  %6 = bitcast i8* %add.ptr2 to i32*
136  %7 = load i32* %6, align 4
137  %add.ptr3 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr4.sum
138  %8 = bitcast i8* %add.ptr3 to i32*
139  %9 = load i32* %8, align 4
140  %add = add i32 %3, %1
141  %add4 = add i32 %add, %5
142  %add5 = add i32 %add4, %7
143  %add6 = add i32 %add5, %9
144  store i32 %add6, i32* %res.addr.09, align 4
145  %add.ptr6 = getelementptr inbounds i8* %main.addr.011, i32 %add.ptr3.sum
146  %add.ptr7 = getelementptr inbounds i32* %res.addr.09, i32 %y
147  %inc = add i32 %i.010, 1
148  %cmp = icmp eq i32 %inc, %z
149  br i1 %cmp, label %for.end, label %for.body
150
151for.end:                                          ; preds = %for.body, %entry
152  ret void
153}
154
155; @foldedidx is an unrolled variant of this loop:
156;  for (unsigned long i = 0; i < len; i += s) {
157;    c[i] = a[i] + b[i];
158;  }
159; where 's' can be folded into the addressing mode.
160; Consequently, we should *not* form any chains.
161;
162; X64: foldedidx:
163; X64: movzbl -3(
164;
165; X32: foldedidx:
166; X32: movzbl -3(
167define void @foldedidx(i8* nocapture %a, i8* nocapture %b, i8* nocapture %c) nounwind ssp {
168entry:
169  br label %for.body
170
171for.body:                                         ; preds = %for.body, %entry
172  %i.07 = phi i32 [ 0, %entry ], [ %inc.3, %for.body ]
173  %arrayidx = getelementptr inbounds i8* %a, i32 %i.07
174  %0 = load i8* %arrayidx, align 1
175  %conv5 = zext i8 %0 to i32
176  %arrayidx1 = getelementptr inbounds i8* %b, i32 %i.07
177  %1 = load i8* %arrayidx1, align 1
178  %conv26 = zext i8 %1 to i32
179  %add = add nsw i32 %conv26, %conv5
180  %conv3 = trunc i32 %add to i8
181  %arrayidx4 = getelementptr inbounds i8* %c, i32 %i.07
182  store i8 %conv3, i8* %arrayidx4, align 1
183  %inc1 = or i32 %i.07, 1
184  %arrayidx.1 = getelementptr inbounds i8* %a, i32 %inc1
185  %2 = load i8* %arrayidx.1, align 1
186  %conv5.1 = zext i8 %2 to i32
187  %arrayidx1.1 = getelementptr inbounds i8* %b, i32 %inc1
188  %3 = load i8* %arrayidx1.1, align 1
189  %conv26.1 = zext i8 %3 to i32
190  %add.1 = add nsw i32 %conv26.1, %conv5.1
191  %conv3.1 = trunc i32 %add.1 to i8
192  %arrayidx4.1 = getelementptr inbounds i8* %c, i32 %inc1
193  store i8 %conv3.1, i8* %arrayidx4.1, align 1
194  %inc.12 = or i32 %i.07, 2
195  %arrayidx.2 = getelementptr inbounds i8* %a, i32 %inc.12
196  %4 = load i8* %arrayidx.2, align 1
197  %conv5.2 = zext i8 %4 to i32
198  %arrayidx1.2 = getelementptr inbounds i8* %b, i32 %inc.12
199  %5 = load i8* %arrayidx1.2, align 1
200  %conv26.2 = zext i8 %5 to i32
201  %add.2 = add nsw i32 %conv26.2, %conv5.2
202  %conv3.2 = trunc i32 %add.2 to i8
203  %arrayidx4.2 = getelementptr inbounds i8* %c, i32 %inc.12
204  store i8 %conv3.2, i8* %arrayidx4.2, align 1
205  %inc.23 = or i32 %i.07, 3
206  %arrayidx.3 = getelementptr inbounds i8* %a, i32 %inc.23
207  %6 = load i8* %arrayidx.3, align 1
208  %conv5.3 = zext i8 %6 to i32
209  %arrayidx1.3 = getelementptr inbounds i8* %b, i32 %inc.23
210  %7 = load i8* %arrayidx1.3, align 1
211  %conv26.3 = zext i8 %7 to i32
212  %add.3 = add nsw i32 %conv26.3, %conv5.3
213  %conv3.3 = trunc i32 %add.3 to i8
214  %arrayidx4.3 = getelementptr inbounds i8* %c, i32 %inc.23
215  store i8 %conv3.3, i8* %arrayidx4.3, align 1
216  %inc.3 = add nsw i32 %i.07, 4
217  %exitcond.3 = icmp eq i32 %inc.3, 400
218  br i1 %exitcond.3, label %for.end, label %for.body
219
220for.end:                                          ; preds = %for.body
221  ret void
222}
223
224; @multioper tests instructions with multiple IV user operands. We
225; should be able to chain them independent of each other.
226;
227; X64: @multioper
228; X64: %for.body
229; X64: movl %{{.*}},4)
230; X64-NEXT: leal 1(
231; X64-NEXT: movl %{{.*}},4)
232; X64-NEXT: leal 2(
233; X64-NEXT: movl %{{.*}},4)
234; X64-NEXT: leal 3(
235; X64-NEXT: movl %{{.*}},4)
236;
237; X32: @multioper
238; X32: %for.body
239; X32: movl %{{.*}},4)
240; X32-NEXT: leal 1(
241; X32-NEXT: movl %{{.*}},4)
242; X32-NEXT: leal 2(
243; X32-NEXT: movl %{{.*}},4)
244; X32-NEXT: leal 3(
245; X32-NEXT: movl %{{.*}},4)
246define void @multioper(i32* %a, i32 %n) nounwind {
247entry:
248  br label %for.body
249
250for.body:
251  %p = phi i32* [ %p.next, %for.body ], [ %a, %entry ]
252  %i = phi i32 [ %inc4, %for.body ], [ 0, %entry ]
253  store i32 %i, i32* %p, align 4
254  %inc1 = or i32 %i, 1
255  %add.ptr.i1 = getelementptr inbounds i32* %p, i32 1
256  store i32 %inc1, i32* %add.ptr.i1, align 4
257  %inc2 = add nsw i32 %i, 2
258  %add.ptr.i2 = getelementptr inbounds i32* %p, i32 2
259  store i32 %inc2, i32* %add.ptr.i2, align 4
260  %inc3 = add nsw i32 %i, 3
261  %add.ptr.i3 = getelementptr inbounds i32* %p, i32 3
262  store i32 %inc3, i32* %add.ptr.i3, align 4
263  %p.next = getelementptr inbounds i32* %p, i32 4
264  %inc4 = add nsw i32 %i, 4
265  %cmp = icmp slt i32 %inc4, %n
266  br i1 %cmp, label %for.body, label %exit
267
268exit:
269  ret void
270}
271
272; @testCmpZero has a ICmpZero LSR use that should not be hidden from
273; LSR. Profitable chains should have more than one nonzero increment
274; anyway.
275;
276; X32: @testCmpZero
277; X32: %for.body82.us
278; X32: dec
279; X32: jne
280define void @testCmpZero(i8* %src, i8* %dst, i32 %srcidx, i32 %dstidx, i32 %len) nounwind ssp {
281entry:
282  %dest0 = getelementptr inbounds i8* %src, i32 %srcidx
283  %source0 = getelementptr inbounds i8* %dst, i32 %dstidx
284  %add.ptr79.us.sum = add i32 %srcidx, %len
285  %lftr.limit = getelementptr i8* %src, i32 %add.ptr79.us.sum
286  br label %for.body82.us
287
288for.body82.us:
289  %dest = phi i8* [ %dest0, %entry ], [ %incdec.ptr91.us, %for.body82.us ]
290  %source = phi i8* [ %source0, %entry ], [ %add.ptr83.us, %for.body82.us ]
291  %0 = bitcast i8* %source to i32*
292  %1 = load i32* %0, align 4
293  %trunc = trunc i32 %1 to i8
294  %add.ptr83.us = getelementptr inbounds i8* %source, i32 4
295  %incdec.ptr91.us = getelementptr inbounds i8* %dest, i32 1
296  store i8 %trunc, i8* %dest, align 1
297  %exitcond = icmp eq i8* %incdec.ptr91.us, %lftr.limit
298  br i1 %exitcond, label %return, label %for.body82.us
299
300return:
301  ret void
302}
303