• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1; NOTE: Assertions have been autogenerated by utils/update_test_checks.py
2; RUN: opt -basic-aa -loop-distribute -enable-loop-distribute -S -enable-mem-access-versioning=0 < %s | FileCheck %s
3
4target datalayout = "e-m:o-i64:64-f80:128-n8:16:32:64-S128"
5
6; PredicatedScalarEvolution decides it needs to insert a bounds check
7; not based on memory access.
8
9define void @f(i32* noalias %a, i32* noalias %b, i32* noalias %c, i32* noalias %d, i32* noalias %e, i64 %N) {
10; CHECK-LABEL: @f(
11; CHECK-NEXT:  entry:
12; CHECK-NEXT:    [[A2:%.*]] = ptrtoint i32* [[A:%.*]] to i64
13; CHECK-NEXT:    br label [[FOR_BODY_LVER_CHECK:%.*]]
14; CHECK:       for.body.lver.check:
15; CHECK-NEXT:    [[TMP0:%.*]] = add i64 [[N:%.*]], -1
16; CHECK-NEXT:    [[TMP1:%.*]] = trunc i64 [[TMP0]] to i32
17; CHECK-NEXT:    [[MUL1:%.*]] = call { i32, i1 } @llvm.umul.with.overflow.i32(i32 2, i32 [[TMP1]])
18; CHECK-NEXT:    [[MUL_RESULT:%.*]] = extractvalue { i32, i1 } [[MUL1]], 0
19; CHECK-NEXT:    [[MUL_OVERFLOW:%.*]] = extractvalue { i32, i1 } [[MUL1]], 1
20; CHECK-NEXT:    [[TMP2:%.*]] = add i32 0, [[MUL_RESULT]]
21; CHECK-NEXT:    [[TMP3:%.*]] = sub i32 0, [[MUL_RESULT]]
22; CHECK-NEXT:    [[TMP4:%.*]] = icmp ugt i32 [[TMP3]], 0
23; CHECK-NEXT:    [[TMP5:%.*]] = icmp ult i32 [[TMP2]], 0
24; CHECK-NEXT:    [[TMP6:%.*]] = select i1 false, i1 [[TMP4]], i1 [[TMP5]]
25; CHECK-NEXT:    [[TMP7:%.*]] = icmp ugt i64 [[TMP0]], 4294967295
26; CHECK-NEXT:    [[TMP8:%.*]] = or i1 [[TMP6]], [[TMP7]]
27; CHECK-NEXT:    [[TMP9:%.*]] = or i1 [[TMP8]], [[MUL_OVERFLOW]]
28; CHECK-NEXT:    [[TMP10:%.*]] = or i1 false, [[TMP9]]
29; CHECK-NEXT:    [[MUL3:%.*]] = call { i64, i1 } @llvm.umul.with.overflow.i64(i64 8, i64 [[TMP0]])
30; CHECK-NEXT:    [[MUL_RESULT4:%.*]] = extractvalue { i64, i1 } [[MUL3]], 0
31; CHECK-NEXT:    [[MUL_OVERFLOW5:%.*]] = extractvalue { i64, i1 } [[MUL3]], 1
32; CHECK-NEXT:    [[TMP11:%.*]] = add i64 [[A2]], [[MUL_RESULT4]]
33; CHECK-NEXT:    [[TMP12:%.*]] = sub i64 [[A2]], [[MUL_RESULT4]]
34; CHECK-NEXT:    [[TMP13:%.*]] = icmp ugt i64 [[TMP12]], [[A2]]
35; CHECK-NEXT:    [[TMP14:%.*]] = icmp ult i64 [[TMP11]], [[A2]]
36; CHECK-NEXT:    [[TMP15:%.*]] = select i1 false, i1 [[TMP13]], i1 [[TMP14]]
37; CHECK-NEXT:    [[TMP16:%.*]] = or i1 [[TMP15]], [[MUL_OVERFLOW5]]
38; CHECK-NEXT:    [[TMP17:%.*]] = or i1 [[TMP10]], [[TMP16]]
39; CHECK-NEXT:    br i1 [[TMP17]], label [[FOR_BODY_PH_LVER_ORIG:%.*]], label [[FOR_BODY_PH_LDIST1:%.*]]
40; CHECK:       for.body.ph.lver.orig:
41; CHECK-NEXT:    br label [[FOR_BODY_LVER_ORIG:%.*]]
42; CHECK:       for.body.lver.orig:
43; CHECK-NEXT:    [[IND_LVER_ORIG:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[ADD_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
44; CHECK-NEXT:    [[IND1_LVER_ORIG:%.*]] = phi i32 [ 0, [[FOR_BODY_PH_LVER_ORIG]] ], [ [[INC1_LVER_ORIG:%.*]], [[FOR_BODY_LVER_ORIG]] ]
45; CHECK-NEXT:    [[MUL_LVER_ORIG:%.*]] = mul i32 [[IND1_LVER_ORIG]], 2
46; CHECK-NEXT:    [[MUL_EXT_LVER_ORIG:%.*]] = zext i32 [[MUL_LVER_ORIG]] to i64
47; CHECK-NEXT:    [[ARRAYIDXA_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL_EXT_LVER_ORIG]]
48; CHECK-NEXT:    [[LOADA_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXA_LVER_ORIG]], align 4
49; CHECK-NEXT:    [[ARRAYIDXB_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[MUL_EXT_LVER_ORIG]]
50; CHECK-NEXT:    [[LOADB_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXB_LVER_ORIG]], align 4
51; CHECK-NEXT:    [[MULA_LVER_ORIG:%.*]] = mul i32 [[LOADB_LVER_ORIG]], [[LOADA_LVER_ORIG]]
52; CHECK-NEXT:    [[ADD_LVER_ORIG]] = add nuw nsw i64 [[IND_LVER_ORIG]], 1
53; CHECK-NEXT:    [[INC1_LVER_ORIG]] = add i32 [[IND1_LVER_ORIG]], 1
54; CHECK-NEXT:    [[ARRAYIDXA_PLUS_4_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[ADD_LVER_ORIG]]
55; CHECK-NEXT:    store i32 [[MULA_LVER_ORIG]], i32* [[ARRAYIDXA_PLUS_4_LVER_ORIG]], align 4
56; CHECK-NEXT:    [[ARRAYIDXD_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[D:%.*]], i64 [[MUL_EXT_LVER_ORIG]]
57; CHECK-NEXT:    [[LOADD_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXD_LVER_ORIG]], align 4
58; CHECK-NEXT:    [[ARRAYIDXE_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[E:%.*]], i64 [[MUL_EXT_LVER_ORIG]]
59; CHECK-NEXT:    [[LOADE_LVER_ORIG:%.*]] = load i32, i32* [[ARRAYIDXE_LVER_ORIG]], align 4
60; CHECK-NEXT:    [[MULC_LVER_ORIG:%.*]] = mul i32 [[LOADD_LVER_ORIG]], [[LOADE_LVER_ORIG]]
61; CHECK-NEXT:    [[ARRAYIDXC_LVER_ORIG:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[MUL_EXT_LVER_ORIG]]
62; CHECK-NEXT:    store i32 [[MULC_LVER_ORIG]], i32* [[ARRAYIDXC_LVER_ORIG]], align 4
63; CHECK-NEXT:    [[EXITCOND_LVER_ORIG:%.*]] = icmp eq i64 [[ADD_LVER_ORIG]], [[N]]
64; CHECK-NEXT:    br i1 [[EXITCOND_LVER_ORIG]], label %[[FOR_END1:.*]], label [[FOR_BODY_LVER_ORIG]]
65; CHECK:       for.body.ph.ldist1:
66; CHECK-NEXT:    br label [[FOR_BODY_LDIST1:%.*]]
67; CHECK:       for.body.ldist1:
68; CHECK-NEXT:    [[IND_LDIST1:%.*]] = phi i64 [ 0, [[FOR_BODY_PH_LDIST1]] ], [ [[ADD_LDIST1:%.*]], [[FOR_BODY_LDIST1]] ]
69; CHECK-NEXT:    [[IND1_LDIST1:%.*]] = phi i32 [ 0, [[FOR_BODY_PH_LDIST1]] ], [ [[INC1_LDIST1:%.*]], [[FOR_BODY_LDIST1]] ]
70; CHECK-NEXT:    [[MUL_LDIST1:%.*]] = mul i32 [[IND1_LDIST1]], 2
71; CHECK-NEXT:    [[MUL_EXT_LDIST1:%.*]] = zext i32 [[MUL_LDIST1]] to i64
72; CHECK-NEXT:    [[ARRAYIDXA_LDIST1:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[MUL_EXT_LDIST1]]
73; CHECK-NEXT:    [[LOADA_LDIST1:%.*]] = load i32, i32* [[ARRAYIDXA_LDIST1]], align 4
74; CHECK-NEXT:    [[ARRAYIDXB_LDIST1:%.*]] = getelementptr inbounds i32, i32* [[B]], i64 [[MUL_EXT_LDIST1]]
75; CHECK-NEXT:    [[LOADB_LDIST1:%.*]] = load i32, i32* [[ARRAYIDXB_LDIST1]], align 4
76; CHECK-NEXT:    [[MULA_LDIST1:%.*]] = mul i32 [[LOADB_LDIST1]], [[LOADA_LDIST1]]
77; CHECK-NEXT:    [[ADD_LDIST1]] = add nuw nsw i64 [[IND_LDIST1]], 1
78; CHECK-NEXT:    [[INC1_LDIST1]] = add i32 [[IND1_LDIST1]], 1
79; CHECK-NEXT:    [[ARRAYIDXA_PLUS_4_LDIST1:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[ADD_LDIST1]]
80; CHECK-NEXT:    store i32 [[MULA_LDIST1]], i32* [[ARRAYIDXA_PLUS_4_LDIST1]], align 4
81; CHECK-NEXT:    [[EXITCOND_LDIST1:%.*]] = icmp eq i64 [[ADD_LDIST1]], [[N]]
82; CHECK-NEXT:    br i1 [[EXITCOND_LDIST1]], label [[FOR_BODY_PH:%.*]], label [[FOR_BODY_LDIST1]]
83; CHECK:       for.body.ph:
84; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
85; CHECK:       for.body:
86; CHECK-NEXT:    [[IND:%.*]] = phi i64 [ 0, [[FOR_BODY_PH]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
87; CHECK-NEXT:    [[IND1:%.*]] = phi i32 [ 0, [[FOR_BODY_PH]] ], [ [[INC1:%.*]], [[FOR_BODY]] ]
88; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[IND1]], 2
89; CHECK-NEXT:    [[MUL_EXT:%.*]] = zext i32 [[MUL]] to i64
90; CHECK-NEXT:    [[ADD]] = add nuw nsw i64 [[IND]], 1
91; CHECK-NEXT:    [[INC1]] = add i32 [[IND1]], 1
92; CHECK-NEXT:    [[ARRAYIDXD:%.*]] = getelementptr inbounds i32, i32* [[D]], i64 [[MUL_EXT]]
93; CHECK-NEXT:    [[LOADD:%.*]] = load i32, i32* [[ARRAYIDXD]], align 4
94; CHECK-NEXT:    [[ARRAYIDXE:%.*]] = getelementptr inbounds i32, i32* [[E]], i64 [[MUL_EXT]]
95; CHECK-NEXT:    [[LOADE:%.*]] = load i32, i32* [[ARRAYIDXE]], align 4
96; CHECK-NEXT:    [[MULC:%.*]] = mul i32 [[LOADD]], [[LOADE]]
97; CHECK-NEXT:    [[ARRAYIDXC:%.*]] = getelementptr inbounds i32, i32* [[C]], i64 [[MUL_EXT]]
98; CHECK-NEXT:    store i32 [[MULC]], i32* [[ARRAYIDXC]], align 4
99; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[ADD]], [[N]]
100; CHECK-NEXT:    br i1 [[EXITCOND]], label %[[FOR_END2:.*]], label [[FOR_BODY]]
101; CHECK:       [[FOR_END1]]:
102; CHECK:         br label %for.end
103; CHECK:       [[FOR_END2]]:
104; CHECK:         br label %for.end
105; CHECK:       for.end:
106; CHECK-NEXT:    ret void
107;
108entry:
109  br label %for.body
110
111for.body:                                         ; preds = %for.body, %entry
112  %ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
113  %ind1 = phi i32 [ 0, %entry ], [ %inc1, %for.body ]
114
115  %mul = mul i32 %ind1, 2
116  %mul_ext = zext i32 %mul to i64
117
118
119  %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %mul_ext
120  %loadA = load i32, i32* %arrayidxA, align 4
121
122  %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %mul_ext
123  %loadB = load i32, i32* %arrayidxB, align 4
124
125  %mulA = mul i32 %loadB, %loadA
126
127  %add = add nuw nsw i64 %ind, 1
128  %inc1 = add i32 %ind1, 1
129
130  %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
131  store i32 %mulA, i32* %arrayidxA_plus_4, align 4
132
133  %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %mul_ext
134  %loadD = load i32, i32* %arrayidxD, align 4
135
136  %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %mul_ext
137  %loadE = load i32, i32* %arrayidxE, align 4
138
139  %mulC = mul i32 %loadD, %loadE
140
141  %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %mul_ext
142  store i32 %mulC, i32* %arrayidxC, align 4
143
144  %exitcond = icmp eq i64 %add, %N
145  br i1 %exitcond, label %for.end, label %for.body
146
147for.end:                                          ; preds = %for.body
148  ret void
149}
150
151; Can't add control dependency with convergent in loop body.
152define void @f_with_convergent(i32* noalias %a, i32* noalias %b, i32* noalias %c, i32* noalias %d, i32* noalias %e, i64 %N) #1 {
153; CHECK-LABEL: @f_with_convergent(
154; CHECK-NEXT:  entry:
155; CHECK-NEXT:    br label [[FOR_BODY:%.*]]
156; CHECK:       for.body:
157; CHECK-NEXT:    [[IND:%.*]] = phi i64 [ 0, [[ENTRY:%.*]] ], [ [[ADD:%.*]], [[FOR_BODY]] ]
158; CHECK-NEXT:    [[IND1:%.*]] = phi i32 [ 0, [[ENTRY]] ], [ [[INC1:%.*]], [[FOR_BODY]] ]
159; CHECK-NEXT:    [[MUL:%.*]] = mul i32 [[IND1]], 2
160; CHECK-NEXT:    [[MUL_EXT:%.*]] = zext i32 [[MUL]] to i64
161; CHECK-NEXT:    [[ARRAYIDXA:%.*]] = getelementptr inbounds i32, i32* [[A:%.*]], i64 [[MUL_EXT]]
162; CHECK-NEXT:    [[LOADA:%.*]] = load i32, i32* [[ARRAYIDXA]], align 4
163; CHECK-NEXT:    [[ARRAYIDXB:%.*]] = getelementptr inbounds i32, i32* [[B:%.*]], i64 [[MUL_EXT]]
164; CHECK-NEXT:    [[LOADB:%.*]] = load i32, i32* [[ARRAYIDXB]], align 4
165; CHECK-NEXT:    [[MULA:%.*]] = mul i32 [[LOADB]], [[LOADA]]
166; CHECK-NEXT:    [[ADD]] = add nuw nsw i64 [[IND]], 1
167; CHECK-NEXT:    [[INC1]] = add i32 [[IND1]], 1
168; CHECK-NEXT:    [[ARRAYIDXA_PLUS_4:%.*]] = getelementptr inbounds i32, i32* [[A]], i64 [[ADD]]
169; CHECK-NEXT:    store i32 [[MULA]], i32* [[ARRAYIDXA_PLUS_4]], align 4
170; CHECK-NEXT:    [[ARRAYIDXD:%.*]] = getelementptr inbounds i32, i32* [[D:%.*]], i64 [[MUL_EXT]]
171; CHECK-NEXT:    [[LOADD:%.*]] = load i32, i32* [[ARRAYIDXD]], align 4
172; CHECK-NEXT:    [[ARRAYIDXE:%.*]] = getelementptr inbounds i32, i32* [[E:%.*]], i64 [[MUL_EXT]]
173; CHECK-NEXT:    [[LOADE:%.*]] = load i32, i32* [[ARRAYIDXE]], align 4
174; CHECK-NEXT:    [[CONVERGENTD:%.*]] = call i32 @llvm.convergent(i32 [[LOADD]])
175; CHECK-NEXT:    [[MULC:%.*]] = mul i32 [[CONVERGENTD]], [[LOADE]]
176; CHECK-NEXT:    [[ARRAYIDXC:%.*]] = getelementptr inbounds i32, i32* [[C:%.*]], i64 [[MUL_EXT]]
177; CHECK-NEXT:    store i32 [[MULC]], i32* [[ARRAYIDXC]], align 4
178; CHECK-NEXT:    [[EXITCOND:%.*]] = icmp eq i64 [[ADD]], [[N:%.*]]
179; CHECK-NEXT:    br i1 [[EXITCOND]], label [[FOR_END:%.*]], label [[FOR_BODY]]
180; CHECK:       for.end:
181; CHECK-NEXT:    ret void
182;
183entry:
184  br label %for.body
185
186for.body:                                         ; preds = %for.body, %entry
187  %ind = phi i64 [ 0, %entry ], [ %add, %for.body ]
188  %ind1 = phi i32 [ 0, %entry ], [ %inc1, %for.body ]
189
190  %mul = mul i32 %ind1, 2
191  %mul_ext = zext i32 %mul to i64
192
193
194  %arrayidxA = getelementptr inbounds i32, i32* %a, i64 %mul_ext
195  %loadA = load i32, i32* %arrayidxA, align 4
196
197  %arrayidxB = getelementptr inbounds i32, i32* %b, i64 %mul_ext
198  %loadB = load i32, i32* %arrayidxB, align 4
199
200  %mulA = mul i32 %loadB, %loadA
201
202  %add = add nuw nsw i64 %ind, 1
203  %inc1 = add i32 %ind1, 1
204
205  %arrayidxA_plus_4 = getelementptr inbounds i32, i32* %a, i64 %add
206  store i32 %mulA, i32* %arrayidxA_plus_4, align 4
207
208  %arrayidxD = getelementptr inbounds i32, i32* %d, i64 %mul_ext
209  %loadD = load i32, i32* %arrayidxD, align 4
210
211  %arrayidxE = getelementptr inbounds i32, i32* %e, i64 %mul_ext
212  %loadE = load i32, i32* %arrayidxE, align 4
213
214  %convergentD = call i32 @llvm.convergent(i32 %loadD)
215  %mulC = mul i32 %convergentD, %loadE
216
217  %arrayidxC = getelementptr inbounds i32, i32* %c, i64 %mul_ext
218  store i32 %mulC, i32* %arrayidxC, align 4
219
220  %exitcond = icmp eq i64 %add, %N
221  br i1 %exitcond, label %for.end, label %for.body
222
223for.end:                                          ; preds = %for.body
224  ret void
225}
226
227declare i32 @llvm.convergent(i32) #0
228
229attributes #0 = { nounwind readnone convergent }
230attributes #1 = { nounwind convergent }
231