• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -S -early-cse -earlycse-debug-hash < %s | FileCheck %s --check-prefixes=CHECK,NO_ASSUME
3; RUN: opt < %s -S -basic-aa -early-cse-memssa | FileCheck %s --check-prefixes=CHECK,NO_ASSUME
4; RUN: opt < %s -S -basic-aa -early-cse-memssa --enable-knowledge-retention | FileCheck %s --check-prefixes=CHECK,USE_ASSUME
5
6declare void @llvm.experimental.guard(i1,...)
7
8declare void @llvm.assume(i1)
9
10define i32 @test0(i32* %ptr, i1 %cond) {
11; We can do store to load forwarding over a guard, since it does not
12; clobber memory
13; NO_ASSUME-LABEL: @test0(
14; NO_ASSUME-NEXT:    store i32 40, i32* [[PTR:%.*]], align 4
15; NO_ASSUME-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[COND:%.*]]) [ "deopt"() ]
16; NO_ASSUME-NEXT:    ret i32 40
17;
18; USE_ASSUME-LABEL: @test0(
19; USE_ASSUME-NEXT:    store i32 40, i32* [[PTR:%.*]], align 4
20; USE_ASSUME-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[COND:%.*]]) [ "deopt"() ]
21; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ]
22; USE_ASSUME-NEXT:    ret i32 40
23;
24
25  store i32 40, i32* %ptr
26  call void(i1,...) @llvm.experimental.guard(i1 %cond) [ "deopt"() ]
27  %rval = load i32, i32* %ptr
28  ret i32 %rval
29}
30
31define i32 @test1(i32* %val, i1 %cond) {
32; We can CSE loads over a guard, since it does not clobber memory
33; NO_ASSUME-LABEL: @test1(
34; NO_ASSUME-NEXT:    [[VAL0:%.*]] = load i32, i32* [[VAL:%.*]], align 4
35; NO_ASSUME-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[COND:%.*]]) [ "deopt"() ]
36; NO_ASSUME-NEXT:    ret i32 0
37;
38; USE_ASSUME-LABEL: @test1(
39; USE_ASSUME-NEXT:    [[VAL0:%.*]] = load i32, i32* [[VAL:%.*]], align 4
40; USE_ASSUME-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[COND:%.*]]) [ "deopt"() ]
41; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[VAL]], i64 4), "nonnull"(i32* [[VAL]]), "align"(i32* [[VAL]], i64 4) ]
42; USE_ASSUME-NEXT:    ret i32 0
43;
44
45  %val0 = load i32, i32* %val
46  call void(i1,...) @llvm.experimental.guard(i1 %cond) [ "deopt"() ]
47  %val1 = load i32, i32* %val
48  %rval = sub i32 %val0, %val1
49  ret i32 %rval
50}
51
52define i32 @test2() {
53; Guards on "true" get removed
54; CHECK-LABEL: @test2(
55; CHECK-NEXT:    ret i32 0
56;
57  call void(i1, ...) @llvm.experimental.guard(i1 true) [ "deopt"() ]
58  ret i32 0
59}
60
61define i32 @test3(i32 %val) {
62; After a guard has executed the condition it was guarding is known to
63; be true.
64; CHECK-LABEL: @test3(
65; CHECK-NEXT:    [[COND0:%.*]] = icmp slt i32 [[VAL:%.*]], 40
66; CHECK-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[COND0]]) [ "deopt"() ]
67; CHECK-NEXT:    ret i32 -1
68;
69
70  %cond0 = icmp slt i32 %val, 40
71  call void(i1,...) @llvm.experimental.guard(i1 %cond0) [ "deopt"() ]
72  %cond1 = icmp slt i32 %val, 40
73  call void(i1,...) @llvm.experimental.guard(i1 %cond1) [ "deopt"() ]
74
75  %cond2 = icmp slt i32 %val, 40
76  %rval = sext i1 %cond2 to i32
77  ret i32 %rval
78}
79
80define i32 @test3.unhandled(i32 %val) {
81; After a guard has executed the condition it was guarding is known to
82; be true.
83; CHECK-LABEL: @test3.unhandled(
84; CHECK-NEXT:    [[COND0:%.*]] = icmp slt i32 [[VAL:%.*]], 40
85; CHECK-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[COND0]]) [ "deopt"() ]
86; CHECK-NEXT:    [[COND1:%.*]] = icmp sge i32 [[VAL]], 40
87; CHECK-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[COND1]]) [ "deopt"() ]
88; CHECK-NEXT:    ret i32 0
89;
90
91; Demonstrates a case we do not yet handle (it is legal to fold %cond2
92; to false)
93  %cond0 = icmp slt i32 %val, 40
94  call void(i1,...) @llvm.experimental.guard(i1 %cond0) [ "deopt"() ]
95  %cond1 = icmp sge i32 %val, 40
96  call void(i1,...) @llvm.experimental.guard(i1 %cond1) [ "deopt"() ]
97  ret i32 0
98}
99
100define i32 @test4(i32 %val, i1 %c) {
101; Same as test3, but with some control flow involved.
102; CHECK-LABEL: @test4(
103; CHECK-NEXT:  entry:
104; CHECK-NEXT:    [[COND0:%.*]] = icmp slt i32 [[VAL:%.*]], 40
105; CHECK-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[COND0]]) [ "deopt"() ]
106; CHECK-NEXT:    br label [[BB0:%.*]]
107; CHECK:       bb0:
108; CHECK-NEXT:    [[COND2:%.*]] = icmp ult i32 [[VAL]], 200
109; CHECK-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[COND2]]) [ "deopt"() ]
110; CHECK-NEXT:    br i1 [[C:%.*]], label [[LEFT:%.*]], label [[RIGHT:%.*]]
111; CHECK:       left:
112; CHECK-NEXT:    ret i32 0
113; CHECK:       right:
114; CHECK-NEXT:    ret i32 20
115;
116
117
118
119
120entry:
121  %cond0 = icmp slt i32 %val, 40
122  call void(i1,...) @llvm.experimental.guard(i1 %cond0) [ "deopt"() ]
123  %cond1 = icmp slt i32 %val, 40
124  call void(i1,...) @llvm.experimental.guard(i1 %cond1) [ "deopt"() ]
125  br label %bb0
126
127bb0:
128  %cond2 = icmp ult i32 %val, 200
129  call void(i1,...) @llvm.experimental.guard(i1 %cond2) [ "deopt"() ]
130  br i1 %c, label %left, label %right
131
132left:
133  %cond3 = icmp ult i32 %val, 200
134  call void(i1,...) @llvm.experimental.guard(i1 %cond3) [ "deopt"() ]
135  ret i32 0
136
137right:
138  ret i32 20
139}
140
141define i32 @test5(i32 %val, i1 %c) {
142; Same as test4, but the %left block has mutliple predecessors.
143; CHECK-LABEL: @test5(
144; CHECK-NEXT:  entry:
145; CHECK-NEXT:    [[COND0:%.*]] = icmp slt i32 [[VAL:%.*]], 40
146; CHECK-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[COND0]]) [ "deopt"() ]
147; CHECK-NEXT:    br label [[BB0:%.*]]
148; CHECK:       bb0:
149; CHECK-NEXT:    [[COND2:%.*]] = icmp ult i32 [[VAL]], 200
150; CHECK-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[COND2]]) [ "deopt"() ]
151; CHECK-NEXT:    br i1 [[C:%.*]], label [[LEFT:%.*]], label [[RIGHT:%.*]]
152; CHECK:       left:
153; CHECK-NEXT:    br label [[RIGHT]]
154; CHECK:       right:
155; CHECK-NEXT:    br label [[LEFT]]
156;
157
158
159
160
161
162entry:
163  %cond0 = icmp slt i32 %val, 40
164  call void(i1,...) @llvm.experimental.guard(i1 %cond0) [ "deopt"() ]
165  %cond1 = icmp slt i32 %val, 40
166  call void(i1,...) @llvm.experimental.guard(i1 %cond1) [ "deopt"() ]
167  br label %bb0
168
169bb0:
170  %cond2 = icmp ult i32 %val, 200
171  call void(i1,...) @llvm.experimental.guard(i1 %cond2) [ "deopt"() ]
172  br i1 %c, label %left, label %right
173
174left:
175  %cond3 = icmp ult i32 %val, 200
176  call void(i1,...) @llvm.experimental.guard(i1 %cond3) [ "deopt"() ]
177  br label %right
178
179right:
180  br label %left
181}
182
183define void @test6(i1 %c, i32* %ptr) {
184; Check that we do not DSE over calls to @llvm.experimental.guard.
185; Guard intrinsics do _read_ memory, so th call to guard below needs
186; to see the store of 500 to %ptr
187; CHECK-LABEL: @test6(
188; CHECK-NEXT:    store i32 500, i32* [[PTR:%.*]], align 4
189; CHECK-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[C:%.*]]) [ "deopt"() ]
190; CHECK-NEXT:    store i32 600, i32* [[PTR]], align 4
191; CHECK-NEXT:    ret void
192;
193
194
195  store i32 500, i32* %ptr
196  call void(i1,...) @llvm.experimental.guard(i1 %c) [ "deopt"() ]
197  store i32 600, i32* %ptr
198  ret void
199}
200
201define void @test07(i32 %a, i32 %b) {
202; Check that we are able to remove the guards on the same condition even if the
203; condition is not being recalculated.
204; CHECK-LABEL: @test07(
205; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
206; CHECK-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ]
207; CHECK-NEXT:    ret void
208;
209
210  %cmp = icmp eq i32 %a, %b
211  call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
212  call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
213  call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
214  ret void
215}
216
217define void @test08(i32 %a, i32 %b, i32* %ptr) {
218; Check that we deal correctly with stores when removing guards in the same
219; block in case when the condition is not recalculated.
220; NO_ASSUME-LABEL: @test08(
221; NO_ASSUME-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
222; NO_ASSUME-NEXT:    store i32 100, i32* [[PTR:%.*]], align 4
223; NO_ASSUME-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ]
224; NO_ASSUME-NEXT:    store i32 400, i32* [[PTR]], align 4
225; NO_ASSUME-NEXT:    ret void
226;
227; USE_ASSUME-LABEL: @test08(
228; USE_ASSUME-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
229; USE_ASSUME-NEXT:    store i32 100, i32* [[PTR:%.*]], align 4
230; USE_ASSUME-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ]
231; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ]
232; USE_ASSUME-NEXT:    store i32 400, i32* [[PTR]], align 4
233; USE_ASSUME-NEXT:    ret void
234;
235
236  %cmp = icmp eq i32 %a, %b
237  store i32 100, i32* %ptr
238  call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
239  store i32 200, i32* %ptr
240  call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
241  store i32 300, i32* %ptr
242  call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
243  store i32 400, i32* %ptr
244  ret void
245}
246
247define void @test09(i32 %a, i32 %b, i1 %c, i32* %ptr) {
248; Similar to test08, but with more control flow.
249; TODO: Can we get rid of the store in the end of entry given that it is
250; post-dominated by other stores?
251; NO_ASSUME-LABEL: @test09(
252; NO_ASSUME-NEXT:  entry:
253; NO_ASSUME-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
254; NO_ASSUME-NEXT:    store i32 100, i32* [[PTR:%.*]], align 4
255; NO_ASSUME-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ]
256; NO_ASSUME-NEXT:    store i32 400, i32* [[PTR]], align 4
257; NO_ASSUME-NEXT:    br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]]
258; NO_ASSUME:       if.true:
259; NO_ASSUME-NEXT:    store i32 500, i32* [[PTR]], align 4
260; NO_ASSUME-NEXT:    br label [[MERGE:%.*]]
261; NO_ASSUME:       if.false:
262; NO_ASSUME-NEXT:    store i32 600, i32* [[PTR]], align 4
263; NO_ASSUME-NEXT:    br label [[MERGE]]
264; NO_ASSUME:       merge:
265; NO_ASSUME-NEXT:    ret void
266;
267; USE_ASSUME-LABEL: @test09(
268; USE_ASSUME-NEXT:  entry:
269; USE_ASSUME-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
270; USE_ASSUME-NEXT:    store i32 100, i32* [[PTR:%.*]], align 4
271; USE_ASSUME-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ]
272; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ]
273; USE_ASSUME-NEXT:    store i32 400, i32* [[PTR]], align 4
274; USE_ASSUME-NEXT:    br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]]
275; USE_ASSUME:       if.true:
276; USE_ASSUME-NEXT:    store i32 500, i32* [[PTR]], align 4
277; USE_ASSUME-NEXT:    br label [[MERGE:%.*]]
278; USE_ASSUME:       if.false:
279; USE_ASSUME-NEXT:    store i32 600, i32* [[PTR]], align 4
280; USE_ASSUME-NEXT:    br label [[MERGE]]
281; USE_ASSUME:       merge:
282; USE_ASSUME-NEXT:    ret void
283;
284
285entry:
286  %cmp = icmp eq i32 %a, %b
287  store i32 100, i32* %ptr
288  call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
289  store i32 200, i32* %ptr
290  call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
291  store i32 300, i32* %ptr
292  call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
293  store i32 400, i32* %ptr
294  br i1 %c, label %if.true, label %if.false
295
296if.true:
297  call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
298  store i32 500, i32* %ptr
299  br label %merge
300
301if.false:
302  call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
303  store i32 600, i32* %ptr
304  br label %merge
305
306merge:
307  ret void
308}
309
310define void @test10(i32 %a, i32 %b, i1 %c, i32* %ptr) {
311; Make sure that non-dominating guards do not cause other guards removal.
312; CHECK-LABEL: @test10(
313; CHECK-NEXT:  entry:
314; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
315; CHECK-NEXT:    br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]]
316; CHECK:       if.true:
317; CHECK-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ]
318; CHECK-NEXT:    store i32 100, i32* [[PTR:%.*]], align 4
319; CHECK-NEXT:    br label [[MERGE:%.*]]
320; CHECK:       if.false:
321; CHECK-NEXT:    store i32 200, i32* [[PTR]], align 4
322; CHECK-NEXT:    br label [[MERGE]]
323; CHECK:       merge:
324; CHECK-NEXT:    store i32 300, i32* [[PTR]], align 4
325; CHECK-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ]
326; CHECK-NEXT:    store i32 400, i32* [[PTR]], align 4
327; CHECK-NEXT:    ret void
328;
329
330entry:
331  %cmp = icmp eq i32 %a, %b
332  br i1 %c, label %if.true, label %if.false
333
334if.true:
335  call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
336  store i32 100, i32* %ptr
337  call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
338  br label %merge
339
340if.false:
341  store i32 200, i32* %ptr
342  br label %merge
343
344merge:
345  store i32 300, i32* %ptr
346  call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
347  store i32 400, i32* %ptr
348  ret void
349}
350
351define void @test11(i32 %a, i32 %b, i32* %ptr) {
352; Make sure that branching condition is applied to guards.
353; CHECK-LABEL: @test11(
354; CHECK-NEXT:  entry:
355; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
356; CHECK-NEXT:    br i1 [[CMP]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]]
357; CHECK:       if.true:
358; CHECK-NEXT:    br label [[MERGE:%.*]]
359; CHECK:       if.false:
360; CHECK-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 false) [ "deopt"() ]
361; CHECK-NEXT:    br label [[MERGE]]
362; CHECK:       merge:
363; CHECK-NEXT:    ret void
364;
365
366entry:
367  %cmp = icmp eq i32 %a, %b
368  br i1 %cmp, label %if.true, label %if.false
369
370if.true:
371  call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
372  br label %merge
373
374if.false:
375  call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
376  br label %merge
377
378merge:
379  ret void
380}
381
382define void @test12(i32 %a, i32 %b) {
383; Check that the assume marks its condition as being true (and thus allows to
384; eliminate the dominated guards).
385; CHECK-LABEL: @test12(
386; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
387; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
388; CHECK-NEXT:    ret void
389;
390
391  %cmp = icmp eq i32 %a, %b
392  call void @llvm.assume(i1 %cmp)
393  call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
394  call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
395  call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
396  ret void
397}
398
399define void @test13(i32 %a, i32 %b, i32* %ptr) {
400; Check that we deal correctly with stores when removing guards due to assume.
401; NO_ASSUME-LABEL: @test13(
402; NO_ASSUME-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
403; NO_ASSUME-NEXT:    call void @llvm.assume(i1 [[CMP]])
404; NO_ASSUME-NEXT:    store i32 400, i32* [[PTR:%.*]], align 4
405; NO_ASSUME-NEXT:    ret void
406;
407; USE_ASSUME-LABEL: @test13(
408; USE_ASSUME-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
409; USE_ASSUME-NEXT:    call void @llvm.assume(i1 [[CMP]])
410; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR:%.*]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ]
411; USE_ASSUME-NEXT:    store i32 400, i32* [[PTR]], align 4
412; USE_ASSUME-NEXT:    ret void
413;
414
415  %cmp = icmp eq i32 %a, %b
416  call void @llvm.assume(i1 %cmp)
417  store i32 100, i32* %ptr
418  call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
419  store i32 200, i32* %ptr
420  call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
421  store i32 300, i32* %ptr
422  call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
423  store i32 400, i32* %ptr
424  ret void
425}
426
427define void @test14(i32 %a, i32 %b, i1 %c, i32* %ptr) {
428; Similar to test13, but with more control flow.
429; TODO: Can we get rid of the store in the end of entry given that it is
430; post-dominated by other stores?
431; NO_ASSUME-LABEL: @test14(
432; NO_ASSUME-NEXT:  entry:
433; NO_ASSUME-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
434; NO_ASSUME-NEXT:    call void @llvm.assume(i1 [[CMP]])
435; NO_ASSUME-NEXT:    store i32 400, i32* [[PTR:%.*]], align 4
436; NO_ASSUME-NEXT:    br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]]
437; NO_ASSUME:       if.true:
438; NO_ASSUME-NEXT:    store i32 500, i32* [[PTR]], align 4
439; NO_ASSUME-NEXT:    br label [[MERGE:%.*]]
440; NO_ASSUME:       if.false:
441; NO_ASSUME-NEXT:    store i32 600, i32* [[PTR]], align 4
442; NO_ASSUME-NEXT:    br label [[MERGE]]
443; NO_ASSUME:       merge:
444; NO_ASSUME-NEXT:    ret void
445;
446; USE_ASSUME-LABEL: @test14(
447; USE_ASSUME-NEXT:  entry:
448; USE_ASSUME-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
449; USE_ASSUME-NEXT:    call void @llvm.assume(i1 [[CMP]])
450; USE_ASSUME-NEXT:    call void @llvm.assume(i1 true) [ "dereferenceable"(i32* [[PTR:%.*]], i64 4), "nonnull"(i32* [[PTR]]), "align"(i32* [[PTR]], i64 4) ]
451; USE_ASSUME-NEXT:    store i32 400, i32* [[PTR]], align 4
452; USE_ASSUME-NEXT:    br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]]
453; USE_ASSUME:       if.true:
454; USE_ASSUME-NEXT:    store i32 500, i32* [[PTR]], align 4
455; USE_ASSUME-NEXT:    br label [[MERGE:%.*]]
456; USE_ASSUME:       if.false:
457; USE_ASSUME-NEXT:    store i32 600, i32* [[PTR]], align 4
458; USE_ASSUME-NEXT:    br label [[MERGE]]
459; USE_ASSUME:       merge:
460; USE_ASSUME-NEXT:    ret void
461;
462
463entry:
464  %cmp = icmp eq i32 %a, %b
465  call void @llvm.assume(i1 %cmp)
466  store i32 100, i32* %ptr
467  call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
468  store i32 200, i32* %ptr
469  call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
470  store i32 300, i32* %ptr
471  call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
472  store i32 400, i32* %ptr
473  br i1 %c, label %if.true, label %if.false
474
475if.true:
476  call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
477  store i32 500, i32* %ptr
478  br label %merge
479
480if.false:
481  call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
482  store i32 600, i32* %ptr
483  br label %merge
484
485merge:
486  ret void
487}
488
489define void @test15(i32 %a, i32 %b, i1 %c, i32* %ptr) {
490; Make sure that non-dominating assumes do not cause guards removal.
491; CHECK-LABEL: @test15(
492; CHECK-NEXT:  entry:
493; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
494; CHECK-NEXT:    br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]]
495; CHECK:       if.true:
496; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
497; CHECK-NEXT:    store i32 100, i32* [[PTR:%.*]], align 4
498; CHECK-NEXT:    br label [[MERGE:%.*]]
499; CHECK:       if.false:
500; CHECK-NEXT:    store i32 200, i32* [[PTR]], align 4
501; CHECK-NEXT:    br label [[MERGE]]
502; CHECK:       merge:
503; CHECK-NEXT:    store i32 300, i32* [[PTR]], align 4
504; CHECK-NEXT:    call void (i1, ...) @llvm.experimental.guard(i1 [[CMP]]) [ "deopt"() ]
505; CHECK-NEXT:    store i32 400, i32* [[PTR]], align 4
506; CHECK-NEXT:    ret void
507;
508
509entry:
510  %cmp = icmp eq i32 %a, %b
511  br i1 %c, label %if.true, label %if.false
512
513if.true:
514  call void @llvm.assume(i1 %cmp)
515  store i32 100, i32* %ptr
516  call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
517  br label %merge
518
519if.false:
520  store i32 200, i32* %ptr
521  br label %merge
522
523merge:
524  store i32 300, i32* %ptr
525  call void (i1, ...) @llvm.experimental.guard(i1 %cmp) [ "deopt"() ]
526  store i32 400, i32* %ptr
527  ret void
528}
529
530define void @test16(i32 %a, i32 %b) {
531; Check that we don't bother to do anything with assumes even if we know the
532; condition being true.
533; CHECK-LABEL: @test16(
534; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
535; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
536; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
537; CHECK-NEXT:    ret void
538;
539
540  %cmp = icmp eq i32 %a, %b
541  call void @llvm.assume(i1 %cmp)
542  call void @llvm.assume(i1 %cmp)
543  ret void
544}
545
546define void @test17(i32 %a, i32 %b, i1 %c, i32* %ptr) {
547; Check that we don't bother to do anything with assumes even if we know the
548; condition being true or false (includes come control flow).
549; CHECK-LABEL: @test17(
550; CHECK-NEXT:  entry:
551; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32 [[A:%.*]], [[B:%.*]]
552; CHECK-NEXT:    br i1 [[C:%.*]], label [[IF_TRUE:%.*]], label [[IF_FALSE:%.*]]
553; CHECK:       if.true:
554; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
555; CHECK-NEXT:    br label [[MERGE:%.*]]
556; CHECK:       if.false:
557; CHECK-NEXT:    call void @llvm.assume(i1 [[CMP]])
558; CHECK-NEXT:    br label [[MERGE]]
559; CHECK:       merge:
560; CHECK-NEXT:    ret void
561;
562
563entry:
564  %cmp = icmp eq i32 %a, %b
565  br i1 %c, label %if.true, label %if.false
566
567if.true:
568  call void @llvm.assume(i1 %cmp)
569  br label %merge
570
571if.false:
572  call void @llvm.assume(i1 %cmp)
573  br label %merge
574
575merge:
576  ret void
577}
578
579define void @test18(i1 %c) {
580; Check that we don't bother to do anything with assumes even if we know the
581; condition being true and not being an instruction.
582; CHECK-LABEL: @test18(
583; CHECK-NEXT:    call void @llvm.assume(i1 [[C:%.*]])
584; CHECK-NEXT:    call void @llvm.assume(i1 [[C]])
585; CHECK-NEXT:    ret void
586;
587
588  call void @llvm.assume(i1 %c)
589  call void @llvm.assume(i1 %c)
590  ret void
591}
592