• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; XFAIL: *
2; RUN: opt < %s -newgvn -S | FileCheck %s
3
4%struct.A = type { i32 (...)** }
5@_ZTV1A = available_externally unnamed_addr constant [3 x i8*] [i8* null, i8* bitcast (i8** @_ZTI1A to i8*), i8* bitcast (void (%struct.A*)* @_ZN1A3fooEv to i8*)], align 8
6@_ZTI1A = external constant i8*
7
8@unknownPtr = external global i8
9
10; CHECK-LABEL: define i8 @simple() {
11define i8 @simple() {
12entry:
13    %ptr = alloca i8
14    store i8 42, i8* %ptr, !invariant.group !0
15    call void @foo(i8* %ptr)
16
17    %a = load i8, i8* %ptr, !invariant.group !0
18    %b = load i8, i8* %ptr, !invariant.group !0
19    %c = load i8, i8* %ptr, !invariant.group !0
20; CHECK: ret i8 42
21    ret i8 %a
22}
23
24; CHECK-LABEL: define i8 @optimizable1() {
25define i8 @optimizable1() {
26entry:
27    %ptr = alloca i8
28    store i8 42, i8* %ptr, !invariant.group !0
29    %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
30    %a = load i8, i8* %ptr, !invariant.group !0
31
32    call void @foo(i8* %ptr2); call to use %ptr2
33; CHECK: ret i8 42
34    ret i8 %a
35}
36
37; CHECK-LABEL: define i8 @optimizable2() {
38define i8 @optimizable2() {
39entry:
40    %ptr = alloca i8
41    store i8 42, i8* %ptr, !invariant.group !0
42    call void @foo(i8* %ptr)
43
44    store i8 13, i8* %ptr ; can't use this store with invariant.group
45    %a = load i8, i8* %ptr
46    call void @bar(i8 %a) ; call to use %a
47
48    call void @foo(i8* %ptr)
49    %b = load i8, i8* %ptr, !invariant.group !0
50
51; CHECK: ret i8 42
52    ret i8 %b
53}
54
55; CHECK-LABEL: define i1 @proveEqualityForStrip(
56define i1 @proveEqualityForStrip(i8* %a) {
57; FIXME: The first call could be also removed by GVN. Right now
58; DCE removes it. The second call is CSE'd with the first one.
59; CHECK: %b1 = call i8* @llvm.strip.invariant.group.p0i8(i8* %a)
60  %b1 = call i8* @llvm.strip.invariant.group.p0i8(i8* %a)
61; CHECK-NOT: llvm.strip.invariant.group
62  %b2 = call i8* @llvm.strip.invariant.group.p0i8(i8* %a)
63  %r = icmp eq i8* %b1, %b2
64; CHECK: ret i1 true
65  ret i1 %r
66}
67
68; CHECK-LABEL: define i8 @unoptimizable1() {
69define i8 @unoptimizable1() {
70entry:
71    %ptr = alloca i8
72    store i8 42, i8* %ptr
73    call void @foo(i8* %ptr)
74    %a = load i8, i8* %ptr, !invariant.group !0
75; CHECK: ret i8 %a
76    ret i8 %a
77}
78
79; CHECK-LABEL: define void @indirectLoads() {
80define void @indirectLoads() {
81entry:
82  %a = alloca %struct.A*, align 8
83  %0 = bitcast %struct.A** %a to i8*
84
85  %call = call i8* @getPointer(i8* null)
86  %1 = bitcast i8* %call to %struct.A*
87  call void @_ZN1AC1Ev(%struct.A* %1)
88  %2 = bitcast %struct.A* %1 to i8***
89
90; CHECK: %vtable = load {{.*}} !invariant.group
91  %vtable = load i8**, i8*** %2, align 8, !invariant.group !0
92  %cmp.vtables = icmp eq i8** %vtable, getelementptr inbounds ([3 x i8*], [3 x i8*]* @_ZTV1A, i64 0, i64 2)
93  call void @llvm.assume(i1 %cmp.vtables)
94
95  store %struct.A* %1, %struct.A** %a, align 8
96  %3 = load %struct.A*, %struct.A** %a, align 8
97  %4 = bitcast %struct.A* %3 to void (%struct.A*)***
98
99; CHECK: call void @_ZN1A3fooEv(
100  %vtable1 = load void (%struct.A*)**, void (%struct.A*)*** %4, align 8, !invariant.group !0
101  %vfn = getelementptr inbounds void (%struct.A*)*, void (%struct.A*)** %vtable1, i64 0
102  %5 = load void (%struct.A*)*, void (%struct.A*)** %vfn, align 8
103  call void %5(%struct.A* %3)
104  %6 = load %struct.A*, %struct.A** %a, align 8
105  %7 = bitcast %struct.A* %6 to void (%struct.A*)***
106
107; CHECK: call void @_ZN1A3fooEv(
108  %vtable2 = load void (%struct.A*)**, void (%struct.A*)*** %7, align 8, !invariant.group !0
109  %vfn3 = getelementptr inbounds void (%struct.A*)*, void (%struct.A*)** %vtable2, i64 0
110  %8 = load void (%struct.A*)*, void (%struct.A*)** %vfn3, align 8
111
112  call void %8(%struct.A* %6)
113  %9 = load %struct.A*, %struct.A** %a, align 8
114  %10 = bitcast %struct.A* %9 to void (%struct.A*)***
115
116  %vtable4 = load void (%struct.A*)**, void (%struct.A*)*** %10, align 8, !invariant.group !0
117  %vfn5 = getelementptr inbounds void (%struct.A*)*, void (%struct.A*)** %vtable4, i64 0
118  %11 = load void (%struct.A*)*, void (%struct.A*)** %vfn5, align 8
119; CHECK: call void @_ZN1A3fooEv(
120  call void %11(%struct.A* %9)
121
122  %vtable5 = load i8**, i8*** %2, align 8, !invariant.group !0
123  %vfn6 = getelementptr inbounds i8*, i8** %vtable5, i64 0
124  %12 = bitcast i8** %vfn6 to void (%struct.A*)**
125  %13 = load void (%struct.A*)*, void (%struct.A*)** %12, align 8
126; CHECK: call void @_ZN1A3fooEv(
127  call void %13(%struct.A* %9)
128
129  ret void
130}
131
132; CHECK-LABEL: define void @combiningBitCastWithLoad() {
133define void @combiningBitCastWithLoad() {
134entry:
135  %a = alloca %struct.A*, align 8
136  %0 = bitcast %struct.A** %a to i8*
137
138  %call = call i8* @getPointer(i8* null)
139  %1 = bitcast i8* %call to %struct.A*
140  call void @_ZN1AC1Ev(%struct.A* %1)
141  %2 = bitcast %struct.A* %1 to i8***
142
143; CHECK: %vtable = load {{.*}} !invariant.group
144  %vtable = load i8**, i8*** %2, align 8, !invariant.group !0
145  %cmp.vtables = icmp eq i8** %vtable, getelementptr inbounds ([3 x i8*], [3 x i8*]* @_ZTV1A, i64 0, i64 2)
146
147  store %struct.A* %1, %struct.A** %a, align 8
148; CHECK-NOT: !invariant.group
149  %3 = load %struct.A*, %struct.A** %a, align 8
150  %4 = bitcast %struct.A* %3 to void (%struct.A*)***
151
152  %vtable1 = load void (%struct.A*)**, void (%struct.A*)*** %4, align 8, !invariant.group !0
153  %vfn = getelementptr inbounds void (%struct.A*)*, void (%struct.A*)** %vtable1, i64 0
154  %5 = load void (%struct.A*)*, void (%struct.A*)** %vfn, align 8
155  call void %5(%struct.A* %3)
156
157  ret void
158}
159
160; CHECK-LABEL:define void @loadCombine() {
161define void @loadCombine() {
162enter:
163  %ptr = alloca i8
164  store i8 42, i8* %ptr
165  call void @foo(i8* %ptr)
166; CHECK: %[[A:.*]] = load i8, i8* %ptr, !invariant.group
167  %a = load i8, i8* %ptr, !invariant.group !0
168; CHECK-NOT: load
169  %b = load i8, i8* %ptr, !invariant.group !0
170; CHECK: call void @bar(i8 %[[A]])
171  call void @bar(i8 %a)
172; CHECK: call void @bar(i8 %[[A]])
173  call void @bar(i8 %b)
174  ret void
175}
176
177; CHECK-LABEL: define void @loadCombine1() {
178define void @loadCombine1() {
179enter:
180  %ptr = alloca i8
181  store i8 42, i8* %ptr
182  call void @foo(i8* %ptr)
183; CHECK: %[[D:.*]] = load i8, i8* %ptr, !invariant.group
184  %c = load i8, i8* %ptr
185; CHECK-NOT: load
186  %d = load i8, i8* %ptr, !invariant.group !0
187; CHECK: call void @bar(i8 %[[D]])
188  call void @bar(i8 %c)
189; CHECK: call void @bar(i8 %[[D]])
190  call void @bar(i8 %d)
191  ret void
192}
193
194; CHECK-LABEL: define void @loadCombine2() {
195define void @loadCombine2() {
196enter:
197  %ptr = alloca i8
198  store i8 42, i8* %ptr
199  call void @foo(i8* %ptr)
200; CHECK: %[[E:.*]] = load i8, i8* %ptr, !invariant.group
201  %e = load i8, i8* %ptr, !invariant.group !0
202; CHECK-NOT: load
203  %f = load i8, i8* %ptr
204; CHECK: call void @bar(i8 %[[E]])
205  call void @bar(i8 %e)
206; CHECK: call void @bar(i8 %[[E]])
207  call void @bar(i8 %f)
208  ret void
209}
210
211; CHECK-LABEL: define void @loadCombine3() {
212define void @loadCombine3() {
213enter:
214  %ptr = alloca i8
215  store i8 42, i8* %ptr
216  call void @foo(i8* %ptr)
217; CHECK: %[[E:.*]] = load i8, i8* %ptr, !invariant.group
218  %e = load i8, i8* %ptr, !invariant.group !0
219; CHECK-NOT: load
220  %f = load i8, i8* %ptr, !invariant.group !0
221; CHECK: call void @bar(i8 %[[E]])
222  call void @bar(i8 %e)
223; CHECK: call void @bar(i8 %[[E]])
224  call void @bar(i8 %f)
225  ret void
226}
227
228; CHECK-LABEL: define i8 @unoptimizable2() {
229define i8 @unoptimizable2() {
230entry:
231    %ptr = alloca i8
232    store i8 42, i8* %ptr
233    call void @foo(i8* %ptr)
234    %a = load i8, i8* %ptr
235    call void @foo(i8* %ptr)
236    %b = load i8, i8* %ptr, !invariant.group !0
237
238; CHECK: ret i8 %a
239    ret i8 %a
240}
241
242; CHECK-LABEL: define i8 @unoptimizable3() {
243define i8 @unoptimizable3() {
244entry:
245    %ptr = alloca i8
246    store i8 42, i8* %ptr, !invariant.group !0
247    %ptr2 = call i8* @getPointer(i8* %ptr)
248    %a = load i8, i8* %ptr2, !invariant.group !0
249
250; CHECK: ret i8 %a
251    ret i8 %a
252}
253
254; CHECK-LABEL: define i8 @optimizable4() {
255define i8 @optimizable4() {
256entry:
257    %ptr = alloca i8
258    store i8 42, i8* %ptr, !invariant.group !0
259    %ptr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
260; CHECK-NOT: load
261    %a = load i8, i8* %ptr2, !invariant.group !0
262
263; CHECK: ret i8 42
264    ret i8 %a
265}
266
267; CHECK-LABEL: define i8 @volatile1() {
268define i8 @volatile1() {
269entry:
270    %ptr = alloca i8
271    store i8 42, i8* %ptr, !invariant.group !0
272    call void @foo(i8* %ptr)
273    %a = load i8, i8* %ptr, !invariant.group !0
274    %b = load volatile i8, i8* %ptr
275; CHECK: call void @bar(i8 %b)
276    call void @bar(i8 %b)
277
278    %c = load volatile i8, i8* %ptr, !invariant.group !0
279; FIXME: we could change %c to 42, preserving volatile load
280; CHECK: call void @bar(i8 %c)
281    call void @bar(i8 %c)
282; CHECK: ret i8 42
283    ret i8 %a
284}
285
286; CHECK-LABEL: define i8 @volatile2() {
287define i8 @volatile2() {
288entry:
289    %ptr = alloca i8
290    store i8 42, i8* %ptr, !invariant.group !0
291    call void @foo(i8* %ptr)
292    %a = load i8, i8* %ptr, !invariant.group !0
293    %b = load volatile i8, i8* %ptr
294; CHECK: call void @bar(i8 %b)
295    call void @bar(i8 %b)
296
297    %c = load volatile i8, i8* %ptr, !invariant.group !0
298; FIXME: we could change %c to 42, preserving volatile load
299; CHECK: call void @bar(i8 %c)
300    call void @bar(i8 %c)
301; CHECK: ret i8 42
302    ret i8 %a
303}
304
305; CHECK-LABEL: define i8 @fun() {
306define i8 @fun() {
307entry:
308    %ptr = alloca i8
309    store i8 42, i8* %ptr, !invariant.group !0
310    call void @foo(i8* %ptr)
311
312    %a = load i8, i8* %ptr, !invariant.group !0 ; Can assume that value under %ptr didn't change
313; CHECK: call void @bar(i8 42)
314    call void @bar(i8 %a)
315
316    %newPtr = call i8* @getPointer(i8* %ptr)
317    %c = load i8, i8* %newPtr, !invariant.group !0 ; Can't assume anything, because we only have information about %ptr
318; CHECK: call void @bar(i8 %c)
319    call void @bar(i8 %c)
320
321    %unknownValue = load i8, i8* @unknownPtr
322; FIXME: Can assume that %unknownValue == 42
323; CHECK: store i8 %unknownValue, i8* %ptr, !invariant.group !0
324    store i8 %unknownValue, i8* %ptr, !invariant.group !0
325
326    %newPtr2 = call i8* @llvm.launder.invariant.group.p0i8(i8* %ptr)
327; CHECK-NOT: load
328    %d = load i8, i8* %newPtr2, !invariant.group !0
329; CHECK: ret i8 %unknownValue
330    ret i8 %d
331}
332
333; This test checks if invariant.group understands gep with zeros
334; CHECK-LABEL: define void @testGEP0() {
335define void @testGEP0() {
336  %a = alloca %struct.A, align 8
337  %1 = bitcast %struct.A* %a to i8*
338  %2 = getelementptr inbounds %struct.A, %struct.A* %a, i64 0, i32 0
339  store i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @_ZTV1A, i64 0, i64 2) to i32 (...)**), i32 (...)*** %2, align 8, !invariant.group !0
340; CHECK: call void @_ZN1A3fooEv(%struct.A* nonnull dereferenceable(8) %a)
341  call void @_ZN1A3fooEv(%struct.A* nonnull dereferenceable(8) %a) ; This call may change vptr
342  %3 = load i8, i8* @unknownPtr, align 4
343  %4 = icmp eq i8 %3, 0
344  br i1 %4, label %_Z1gR1A.exit, label %5
345
346; This should be devirtualized by invariant.group
347  %6 = bitcast %struct.A* %a to void (%struct.A*)***
348  %7 = load void (%struct.A*)**, void (%struct.A*)*** %6, align 8, !invariant.group !0
349  %8 = load void (%struct.A*)*, void (%struct.A*)** %7, align 8
350; CHECK: call void @_ZN1A3fooEv(%struct.A* nonnull %a)
351  call void %8(%struct.A* nonnull %a)
352  br label %_Z1gR1A.exit
353
354_Z1gR1A.exit:                                     ; preds = %0, %5
355  ret void
356}
357
358; Check if no optimizations are performed with global pointers.
359; FIXME: we could do the optimizations if we would check if dependency comes
360; from the same function.
361; CHECK-LABEL: define void @testGlobal() {
362define void @testGlobal() {
363; CHECK:  %a = load i8, i8* @unknownPtr, !invariant.group !0
364   %a = load i8, i8* @unknownPtr, !invariant.group !0
365   call void @foo2(i8* @unknownPtr, i8 %a)
366; CHECK:  %1 = load i8, i8* @unknownPtr, !invariant.group !0
367   %1 = load i8, i8* @unknownPtr, !invariant.group !0
368   call void @bar(i8 %1)
369
370   %b0 = bitcast i8* @unknownPtr to i1*
371   call void @fooBit(i1* %b0, i1 1)
372; Adding regex because of canonicalization of bitcasts
373; CHECK: %2 = load i1, i1* {{.*}}, !invariant.group !0
374   %2 = load i1, i1* %b0, !invariant.group !0
375   call void @fooBit(i1* %b0, i1 %2)
376; CHECK:  %3 = load i1, i1* {{.*}}, !invariant.group !0
377   %3 = load i1, i1* %b0, !invariant.group !0
378   call void @fooBit(i1* %b0, i1 %3)
379   ret void
380}
381; And in the case it is not global
382; CHECK-LABEL: define void @testNotGlobal() {
383define void @testNotGlobal() {
384   %a = alloca i8
385   call void @foo(i8* %a)
386; CHECK:  %b = load i8, i8* %a, !invariant.group !0
387   %b = load i8, i8* %a, !invariant.group !0
388   call void @foo2(i8* %a, i8 %b)
389
390   %1 = load i8, i8* %a, !invariant.group !0
391; CHECK: call void @bar(i8 %b)
392   call void @bar(i8 %1)
393
394   %b0 = bitcast i8* %a to i1*
395   call void @fooBit(i1* %b0, i1 1)
396; CHECK: %1 = trunc i8 %b to i1
397   %2 = load i1, i1* %b0, !invariant.group !0
398; CHECK-NEXT: call void @fooBit(i1* %b0, i1 %1)
399   call void @fooBit(i1* %b0, i1 %2)
400   %3 = load i1, i1* %b0, !invariant.group !0
401; CHECK-NEXT: call void @fooBit(i1* %b0, i1 %1)
402   call void @fooBit(i1* %b0, i1 %3)
403   ret void
404}
405
406; CHECK-LABEL: define void @handling_loops()
407define void @handling_loops() {
408  %a = alloca %struct.A, align 8
409  %1 = bitcast %struct.A* %a to i8*
410  %2 = getelementptr inbounds %struct.A, %struct.A* %a, i64 0, i32 0
411  store i32 (...)** bitcast (i8** getelementptr inbounds ([3 x i8*], [3 x i8*]* @_ZTV1A, i64 0, i64 2) to i32 (...)**), i32 (...)*** %2, align 8, !invariant.group !0
412  %3 = load i8, i8* @unknownPtr, align 4
413  %4 = icmp sgt i8 %3, 0
414  br i1 %4, label %.lr.ph.i, label %_Z2g2R1A.exit
415
416.lr.ph.i:                                         ; preds = %0
417  %5 = bitcast %struct.A* %a to void (%struct.A*)***
418  %6 = load i8, i8* @unknownPtr, align 4
419  %7 = icmp sgt i8 %6, 1
420  br i1 %7, label %._crit_edge.preheader, label %_Z2g2R1A.exit
421
422._crit_edge.preheader:                            ; preds = %.lr.ph.i
423  br label %._crit_edge
424
425._crit_edge:                                      ; preds = %._crit_edge.preheader, %._crit_edge
426  %8 = phi i8 [ %10, %._crit_edge ], [ 1, %._crit_edge.preheader ]
427  %.pre = load void (%struct.A*)**, void (%struct.A*)*** %5, align 8, !invariant.group !0
428  %9 = load void (%struct.A*)*, void (%struct.A*)** %.pre, align 8
429  ; CHECK: call void @_ZN1A3fooEv(%struct.A* nonnull %a)
430  call void %9(%struct.A* nonnull %a) #3
431  ; CHECK-NOT: call void %
432  %10 = add nuw nsw i8 %8, 1
433  %11 = load i8, i8* @unknownPtr, align 4
434  %12 = icmp slt i8 %10, %11
435  br i1 %12, label %._crit_edge, label %_Z2g2R1A.exit.loopexit
436
437_Z2g2R1A.exit.loopexit:                           ; preds = %._crit_edge
438  br label %_Z2g2R1A.exit
439
440_Z2g2R1A.exit:                                    ; preds = %_Z2g2R1A.exit.loopexit, %.lr.ph.i, %0
441  ret void
442}
443
444
445declare void @foo(i8*)
446declare void @foo2(i8*, i8)
447declare void @bar(i8)
448declare i8* @getPointer(i8*)
449declare void @_ZN1A3fooEv(%struct.A*)
450declare void @_ZN1AC1Ev(%struct.A*)
451declare void @fooBit(i1*, i1)
452
453declare i8* @llvm.launder.invariant.group.p0i8(i8*)
454
455; Function Attrs: nounwind
456declare void @llvm.assume(i1 %cmp.vtables) #0
457
458
459attributes #0 = { nounwind }
460!0 = !{}