• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -instcombine -S < %s | FileCheck %s
3
4@gp = global i32* null, align 8
5
6declare i8* @malloc(i64) #1
7
8define i1 @compare_global_trivialeq() {
9; CHECK-LABEL: @compare_global_trivialeq(
10; CHECK-NEXT:    ret i1 false
11;
12  %m = call i8* @malloc(i64 4)
13  %bc = bitcast i8* %m to i32*
14  %lgp = load i32*, i32** @gp, align 8
15  %cmp = icmp eq i32* %bc, %lgp
16  ret i1 %cmp
17}
18
19define i1 @compare_global_trivialne() {
20; CHECK-LABEL: @compare_global_trivialne(
21; CHECK-NEXT:    ret i1 true
22;
23  %m = call i8* @malloc(i64 4)
24  %bc = bitcast i8* %m to i32*
25  %lgp = load i32*, i32** @gp, align 8
26  %cmp = icmp ne i32* %bc, %lgp
27  ret i1 %cmp
28}
29
30
31; Although the %m is marked nocapture in the deopt operand in call to function f,
32; we cannot remove the alloc site: call to malloc
33; The comparison should fold to false irrespective of whether the call to malloc can be elided or not
34declare void @f()
35define i1 @compare_and_call_with_deopt() {
36; CHECK-LABEL: @compare_and_call_with_deopt(
37; CHECK-NEXT:    [[M:%.*]] = call dereferenceable_or_null(24) i8* @malloc(i64 24)
38; CHECK-NEXT:    tail call void @f() [ "deopt"(i8* [[M]]) ]
39; CHECK-NEXT:    ret i1 false
40;
41  %m = call i8* @malloc(i64 24)
42  %bc = bitcast i8* %m to i32*
43  %lgp = load i32*, i32** @gp, align 8, !nonnull !0
44  %cmp = icmp eq i32* %lgp, %bc
45  tail call void @f() [ "deopt"(i8* %m) ]
46  ret i1 %cmp
47}
48
49; Same functon as above with deopt operand in function f, but comparison is NE
50define i1 @compare_ne_and_call_with_deopt() {
51; CHECK-LABEL: @compare_ne_and_call_with_deopt(
52; CHECK-NEXT:    [[M:%.*]] = call dereferenceable_or_null(24) i8* @malloc(i64 24)
53; CHECK-NEXT:    tail call void @f() [ "deopt"(i8* [[M]]) ]
54; CHECK-NEXT:    ret i1 true
55;
56  %m = call i8* @malloc(i64 24)
57  %bc = bitcast i8* %m to i32*
58  %lgp = load i32*, i32** @gp, align 8, !nonnull !0
59  %cmp = icmp ne i32* %lgp, %bc
60  tail call void @f() [ "deopt"(i8* %m) ]
61  ret i1 %cmp
62}
63
64; Same function as above, but global not marked nonnull, and we cannot fold the comparison
65define i1 @compare_ne_global_maybe_null() {
66; CHECK-LABEL: @compare_ne_global_maybe_null(
67; CHECK-NEXT:    [[M:%.*]] = call dereferenceable_or_null(24) i8* @malloc(i64 24)
68; CHECK-NEXT:    [[BC:%.*]] = bitcast i8* [[M]] to i32*
69; CHECK-NEXT:    [[LGP:%.*]] = load i32*, i32** @gp, align 8
70; CHECK-NEXT:    [[CMP:%.*]] = icmp ne i32* [[LGP]], [[BC]]
71; CHECK-NEXT:    tail call void @f() [ "deopt"(i8* [[M]]) ]
72; CHECK-NEXT:    ret i1 [[CMP]]
73;
74  %m = call i8* @malloc(i64 24)
75  %bc = bitcast i8* %m to i32*
76  %lgp = load i32*, i32** @gp
77  %cmp = icmp ne i32* %lgp, %bc
78  tail call void @f() [ "deopt"(i8* %m) ]
79  ret i1 %cmp
80}
81
82; FIXME: The comparison should fold to false since %m escapes (call to function escape)
83; after the comparison.
84declare void @escape(i8*)
85define i1 @compare_and_call_after() {
86; CHECK-LABEL: @compare_and_call_after(
87; CHECK-NEXT:    [[M:%.*]] = call dereferenceable_or_null(24) i8* @malloc(i64 24)
88; CHECK-NEXT:    [[BC:%.*]] = bitcast i8* [[M]] to i32*
89; CHECK-NEXT:    [[LGP:%.*]] = load i32*, i32** @gp, align 8, !nonnull !0
90; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i32* [[LGP]], [[BC]]
91; CHECK-NEXT:    br i1 [[CMP]], label [[ESCAPE_CALL:%.*]], label [[JUST_RETURN:%.*]]
92; CHECK:       escape_call:
93; CHECK-NEXT:    call void @escape(i8* [[M]])
94; CHECK-NEXT:    ret i1 true
95; CHECK:       just_return:
96; CHECK-NEXT:    ret i1 [[CMP]]
97;
98  %m = call i8* @malloc(i64 24)
99  %bc = bitcast i8* %m to i32*
100  %lgp = load i32*, i32** @gp, align 8, !nonnull !0
101  %cmp = icmp eq i32* %bc, %lgp
102  br i1 %cmp, label %escape_call, label %just_return
103
104escape_call:
105  call void @escape(i8* %m)
106  ret i1 true
107
108just_return:
109  ret i1 %cmp
110}
111
112define i1 @compare_distinct_mallocs() {
113; CHECK-LABEL: @compare_distinct_mallocs(
114; CHECK-NEXT:    ret i1 false
115;
116  %m = call i8* @malloc(i64 4)
117  %n = call i8* @malloc(i64 4)
118  %cmp = icmp eq i8* %m, %n
119  ret i1 %cmp
120}
121
122; the compare is folded to true since the folding compare looks through bitcasts.
123; call to malloc and the bitcast instructions are elided after that since there are no uses of the malloc
124define i1 @compare_samepointer_under_bitcast() {
125; CHECK-LABEL: @compare_samepointer_under_bitcast(
126; CHECK-NEXT:    ret i1 true
127;
128  %m = call i8* @malloc(i64 4)
129  %bc = bitcast i8* %m to i32*
130  %bcback = bitcast i32* %bc to i8*
131  %cmp = icmp eq i8* %m, %bcback
132  ret i1 %cmp
133}
134
135; the compare is folded to true since the folding compare looks through bitcasts.
136; The malloc call for %m cannot be elided since it is used in the call to function f.
137define i1 @compare_samepointer_escaped() {
138; CHECK-LABEL: @compare_samepointer_escaped(
139; CHECK-NEXT:    [[M:%.*]] = call dereferenceable_or_null(4) i8* @malloc(i64 4)
140; CHECK-NEXT:    call void @f() [ "deopt"(i8* [[M]]) ]
141; CHECK-NEXT:    ret i1 true
142;
143  %m = call i8* @malloc(i64 4)
144  %bc = bitcast i8* %m to i32*
145  %bcback = bitcast i32* %bc to i8*
146  %cmp = icmp eq i8* %m, %bcback
147  call void @f() [ "deopt"(i8* %m) ]
148  ret i1 %cmp
149}
150
151; Technically, we can fold the %cmp2 comparison, even though %m escapes through
152; the ret statement since `ret` terminates the function and we cannot reach from
153; the ret to cmp.
154; FIXME: Folding this %cmp2 when %m escapes through ret could be an issue with
155; cross-threading data dependencies since we do not make the distinction between
156; atomic and non-atomic loads in capture tracking.
157define i8* @compare_ret_escape(i8* %c) {
158; CHECK-LABEL: @compare_ret_escape(
159; CHECK-NEXT:    [[M:%.*]] = call dereferenceable_or_null(4) i8* @malloc(i64 4)
160; CHECK-NEXT:    [[N:%.*]] = call dereferenceable_or_null(4) i8* @malloc(i64 4)
161; CHECK-NEXT:    [[CMP:%.*]] = icmp eq i8* [[N]], [[C:%.*]]
162; CHECK-NEXT:    br i1 [[CMP]], label [[RETST:%.*]], label [[CHK:%.*]]
163; CHECK:       retst:
164; CHECK-NEXT:    ret i8* [[M]]
165; CHECK:       chk:
166; CHECK-NEXT:    [[BC:%.*]] = bitcast i8* [[M]] to i32*
167; CHECK-NEXT:    [[LGP:%.*]] = load i32*, i32** @gp, align 8, !nonnull !0
168; CHECK-NEXT:    [[CMP2:%.*]] = icmp eq i32* [[LGP]], [[BC]]
169; CHECK-NEXT:    br i1 [[CMP2]], label [[RETST]], label [[CHK2:%.*]]
170; CHECK:       chk2:
171; CHECK-NEXT:    ret i8* [[N]]
172;
173  %m = call i8* @malloc(i64 4)
174  %n = call i8* @malloc(i64 4)
175  %cmp = icmp eq i8* %n, %c
176  br i1 %cmp, label %retst, label %chk
177
178retst:
179  ret i8* %m
180
181chk:
182  %bc = bitcast i8* %m to i32*
183  %lgp = load i32*, i32** @gp, align 8, !nonnull !0
184  %cmp2 = icmp eq i32* %bc, %lgp
185  br i1 %cmp2, label %retst,  label %chk2
186
187chk2:
188  ret i8* %n
189}
190
191; The malloc call for %m cannot be elided since it is used in the call to function f.
192; However, the cmp can be folded to true as %n doesnt escape and %m, %n are distinct allocations
193define i1 @compare_distinct_pointer_escape() {
194; CHECK-LABEL: @compare_distinct_pointer_escape(
195; CHECK-NEXT:    [[M:%.*]] = call dereferenceable_or_null(4) i8* @malloc(i64 4)
196; CHECK-NEXT:    tail call void @f() [ "deopt"(i8* [[M]]) ]
197; CHECK-NEXT:    ret i1 true
198;
199  %m = call i8* @malloc(i64 4)
200  %n = call i8* @malloc(i64 4)
201  tail call void @f() [ "deopt"(i8* %m) ]
202  %cmp = icmp ne i8* %m, %n
203  ret i1 %cmp
204}
205
206!0 = !{}
207