1 //=== ScalarEvolutionExpanderTest.cpp - ScalarEvolutionExpander unit tests ===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8
9 #include "llvm/Transforms/Utils/ScalarEvolutionExpander.h"
10 #include "llvm/ADT/SmallVector.h"
11 #include "llvm/Analysis/AssumptionCache.h"
12 #include "llvm/Analysis/LoopInfo.h"
13 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
14 #include "llvm/Analysis/TargetLibraryInfo.h"
15 #include "llvm/AsmParser/Parser.h"
16 #include "llvm/IR/Constants.h"
17 #include "llvm/IR/Dominators.h"
18 #include "llvm/IR/GlobalVariable.h"
19 #include "llvm/IR/IRBuilder.h"
20 #include "llvm/IR/InstIterator.h"
21 #include "llvm/IR/LLVMContext.h"
22 #include "llvm/IR/Module.h"
23 #include "llvm/IR/PatternMatch.h"
24 #include "llvm/IR/Verifier.h"
25 #include "gtest/gtest.h"
26
27 namespace llvm {
28
29 using namespace PatternMatch;
30
31 // We use this fixture to ensure that we clean up ScalarEvolution before
32 // deleting the PassManager.
33 class ScalarEvolutionExpanderTest : public testing::Test {
34 protected:
35 LLVMContext Context;
36 Module M;
37 TargetLibraryInfoImpl TLII;
38 TargetLibraryInfo TLI;
39
40 std::unique_ptr<AssumptionCache> AC;
41 std::unique_ptr<DominatorTree> DT;
42 std::unique_ptr<LoopInfo> LI;
43
ScalarEvolutionExpanderTest()44 ScalarEvolutionExpanderTest() : M("", Context), TLII(), TLI(TLII) {}
45
buildSE(Function & F)46 ScalarEvolution buildSE(Function &F) {
47 AC.reset(new AssumptionCache(F));
48 DT.reset(new DominatorTree(F));
49 LI.reset(new LoopInfo(*DT));
50 return ScalarEvolution(F, TLI, *AC, *DT, *LI);
51 }
52
runWithSE(Module & M,StringRef FuncName,function_ref<void (Function & F,LoopInfo & LI,ScalarEvolution & SE)> Test)53 void runWithSE(
54 Module &M, StringRef FuncName,
55 function_ref<void(Function &F, LoopInfo &LI, ScalarEvolution &SE)> Test) {
56 auto *F = M.getFunction(FuncName);
57 ASSERT_NE(F, nullptr) << "Could not find " << FuncName;
58 ScalarEvolution SE = buildSE(*F);
59 Test(*F, *LI, SE);
60 }
61 };
62
GetInstByName(Function & F,StringRef Name)63 static Instruction &GetInstByName(Function &F, StringRef Name) {
64 for (auto &I : instructions(F))
65 if (I.getName() == Name)
66 return I;
67 llvm_unreachable("Could not find instructions!");
68 }
69
TEST_F(ScalarEvolutionExpanderTest,ExpandPtrTypeSCEV)70 TEST_F(ScalarEvolutionExpanderTest, ExpandPtrTypeSCEV) {
71 // It is to test the fix for PR30213. It exercises the branch in scev
72 // expansion when the value in ValueOffsetPair is a ptr and the offset
73 // is not divisible by the elem type size of value.
74 auto *I8Ty = Type::getInt8Ty(Context);
75 auto *I8PtrTy = Type::getInt8PtrTy(Context);
76 auto *I32Ty = Type::getInt32Ty(Context);
77 auto *I32PtrTy = Type::getInt32PtrTy(Context);
78 FunctionType *FTy =
79 FunctionType::get(Type::getVoidTy(Context), std::vector<Type *>(), false);
80 Function *F = Function::Create(FTy, Function::ExternalLinkage, "f", M);
81 BasicBlock *EntryBB = BasicBlock::Create(Context, "entry", F);
82 BasicBlock *LoopBB = BasicBlock::Create(Context, "loop", F);
83 BasicBlock *ExitBB = BasicBlock::Create(Context, "exit", F);
84 BranchInst::Create(LoopBB, EntryBB);
85 ReturnInst::Create(Context, nullptr, ExitBB);
86
87 // loop: ; preds = %loop, %entry
88 // %alloca = alloca i32
89 // %gep0 = getelementptr i32, i32* %alloca, i32 1
90 // %bitcast1 = bitcast i32* %gep0 to i8*
91 // %gep1 = getelementptr i8, i8* %bitcast1, i32 1
92 // %gep2 = getelementptr i8, i8* undef, i32 1
93 // %cmp = icmp ult i8* undef, %bitcast1
94 // %select = select i1 %cmp, i8* %gep1, i8* %gep2
95 // %bitcast2 = bitcast i8* %select to i32*
96 // br i1 undef, label %loop, label %exit
97
98 const DataLayout &DL = F->getParent()->getDataLayout();
99 BranchInst *Br = BranchInst::Create(
100 LoopBB, ExitBB, UndefValue::get(Type::getInt1Ty(Context)), LoopBB);
101 AllocaInst *Alloca =
102 new AllocaInst(I32Ty, DL.getAllocaAddrSpace(), "alloca", Br);
103 ConstantInt *Ci32 = ConstantInt::get(Context, APInt(32, 1));
104 GetElementPtrInst *Gep0 =
105 GetElementPtrInst::Create(I32Ty, Alloca, Ci32, "gep0", Br);
106 CastInst *CastA =
107 CastInst::CreateBitOrPointerCast(Gep0, I8PtrTy, "bitcast1", Br);
108 GetElementPtrInst *Gep1 =
109 GetElementPtrInst::Create(I8Ty, CastA, Ci32, "gep1", Br);
110 GetElementPtrInst *Gep2 = GetElementPtrInst::Create(
111 I8Ty, UndefValue::get(I8PtrTy), Ci32, "gep2", Br);
112 CmpInst *Cmp = CmpInst::Create(Instruction::ICmp, CmpInst::ICMP_ULT,
113 UndefValue::get(I8PtrTy), CastA, "cmp", Br);
114 SelectInst *Sel = SelectInst::Create(Cmp, Gep1, Gep2, "select", Br);
115 CastInst *CastB =
116 CastInst::CreateBitOrPointerCast(Sel, I32PtrTy, "bitcast2", Br);
117
118 ScalarEvolution SE = buildSE(*F);
119 auto *S = SE.getSCEV(CastB);
120 SCEVExpander Exp(SE, M.getDataLayout(), "expander");
121 Value *V =
122 Exp.expandCodeFor(cast<SCEVAddExpr>(S)->getOperand(1), nullptr, Br);
123
124 // Expect the expansion code contains:
125 // %0 = bitcast i32* %bitcast2 to i8*
126 // %uglygep = getelementptr i8, i8* %0, i64 -1
127 // %1 = bitcast i8* %uglygep to i32*
128 EXPECT_TRUE(isa<BitCastInst>(V));
129 Instruction *Gep = cast<Instruction>(V)->getPrevNode();
130 EXPECT_TRUE(isa<GetElementPtrInst>(Gep));
131 EXPECT_TRUE(isa<ConstantInt>(Gep->getOperand(1)));
132 EXPECT_EQ(cast<ConstantInt>(Gep->getOperand(1))->getSExtValue(), -1);
133 EXPECT_TRUE(isa<BitCastInst>(Gep->getPrevNode()));
134 }
135
136 // Make sure that SCEV doesn't introduce illegal ptrtoint/inttoptr instructions
TEST_F(ScalarEvolutionExpanderTest,SCEVZeroExtendExprNonIntegral)137 TEST_F(ScalarEvolutionExpanderTest, SCEVZeroExtendExprNonIntegral) {
138 /*
139 * Create the following code:
140 * func(i64 addrspace(10)* %arg)
141 * top:
142 * br label %L.ph
143 * L.ph:
144 * br label %L
145 * L:
146 * %phi = phi i64 [i64 0, %L.ph], [ %add, %L2 ]
147 * %add = add i64 %phi2, 1
148 * br i1 undef, label %post, label %L2
149 * post:
150 * %gepbase = getelementptr i64 addrspace(10)* %arg, i64 1
151 * #= %gep = getelementptr i64 addrspace(10)* %gepbase, i64 %add =#
152 * ret void
153 *
154 * We will create the appropriate SCEV expression for %gep and expand it,
155 * then check that no inttoptr/ptrtoint instructions got inserted.
156 */
157
158 // Create a module with non-integral pointers in it's datalayout
159 Module NIM("nonintegral", Context);
160 std::string DataLayout = M.getDataLayoutStr();
161 if (!DataLayout.empty())
162 DataLayout += "-";
163 DataLayout += "ni:10";
164 NIM.setDataLayout(DataLayout);
165
166 Type *T_int1 = Type::getInt1Ty(Context);
167 Type *T_int64 = Type::getInt64Ty(Context);
168 Type *T_pint64 = T_int64->getPointerTo(10);
169
170 FunctionType *FTy =
171 FunctionType::get(Type::getVoidTy(Context), {T_pint64}, false);
172 Function *F = Function::Create(FTy, Function::ExternalLinkage, "foo", NIM);
173
174 Argument *Arg = &*F->arg_begin();
175
176 BasicBlock *Top = BasicBlock::Create(Context, "top", F);
177 BasicBlock *LPh = BasicBlock::Create(Context, "L.ph", F);
178 BasicBlock *L = BasicBlock::Create(Context, "L", F);
179 BasicBlock *Post = BasicBlock::Create(Context, "post", F);
180
181 IRBuilder<> Builder(Top);
182 Builder.CreateBr(LPh);
183
184 Builder.SetInsertPoint(LPh);
185 Builder.CreateBr(L);
186
187 Builder.SetInsertPoint(L);
188 PHINode *Phi = Builder.CreatePHI(T_int64, 2);
189 Value *Add = Builder.CreateAdd(Phi, ConstantInt::get(T_int64, 1), "add");
190 Builder.CreateCondBr(UndefValue::get(T_int1), L, Post);
191 Phi->addIncoming(ConstantInt::get(T_int64, 0), LPh);
192 Phi->addIncoming(Add, L);
193
194 Builder.SetInsertPoint(Post);
195 Value *GepBase =
196 Builder.CreateGEP(T_int64, Arg, ConstantInt::get(T_int64, 1));
197 Instruction *Ret = Builder.CreateRetVoid();
198
199 ScalarEvolution SE = buildSE(*F);
200 auto *AddRec =
201 SE.getAddRecExpr(SE.getUnknown(GepBase), SE.getConstant(T_int64, 1),
202 LI->getLoopFor(L), SCEV::FlagNUW);
203
204 SCEVExpander Exp(SE, NIM.getDataLayout(), "expander");
205 Exp.disableCanonicalMode();
206 Exp.expandCodeFor(AddRec, T_pint64, Ret);
207
208 // Make sure none of the instructions inserted were inttoptr/ptrtoint.
209 // The verifier will check this.
210 EXPECT_FALSE(verifyFunction(*F, &errs()));
211 }
212
213 // Check that we can correctly identify the points at which the SCEV of the
214 // AddRec can be expanded.
TEST_F(ScalarEvolutionExpanderTest,SCEVExpanderIsSafeToExpandAt)215 TEST_F(ScalarEvolutionExpanderTest, SCEVExpanderIsSafeToExpandAt) {
216 /*
217 * Create the following code:
218 * func(i64 addrspace(10)* %arg)
219 * top:
220 * br label %L.ph
221 * L.ph:
222 * br label %L
223 * L:
224 * %phi = phi i64 [i64 0, %L.ph], [ %add, %L2 ]
225 * %add = add i64 %phi2, 1
226 * %cond = icmp slt i64 %add, 1000; then becomes 2000.
227 * br i1 %cond, label %post, label %L2
228 * post:
229 * ret void
230 *
231 */
232
233 // Create a module with non-integral pointers in it's datalayout
234 Module NIM("nonintegral", Context);
235 std::string DataLayout = M.getDataLayoutStr();
236 if (!DataLayout.empty())
237 DataLayout += "-";
238 DataLayout += "ni:10";
239 NIM.setDataLayout(DataLayout);
240
241 Type *T_int64 = Type::getInt64Ty(Context);
242 Type *T_pint64 = T_int64->getPointerTo(10);
243
244 FunctionType *FTy =
245 FunctionType::get(Type::getVoidTy(Context), {T_pint64}, false);
246 Function *F = Function::Create(FTy, Function::ExternalLinkage, "foo", NIM);
247
248 BasicBlock *Top = BasicBlock::Create(Context, "top", F);
249 BasicBlock *LPh = BasicBlock::Create(Context, "L.ph", F);
250 BasicBlock *L = BasicBlock::Create(Context, "L", F);
251 BasicBlock *Post = BasicBlock::Create(Context, "post", F);
252
253 IRBuilder<> Builder(Top);
254 Builder.CreateBr(LPh);
255
256 Builder.SetInsertPoint(LPh);
257 Builder.CreateBr(L);
258
259 Builder.SetInsertPoint(L);
260 PHINode *Phi = Builder.CreatePHI(T_int64, 2);
261 auto *Add = cast<Instruction>(
262 Builder.CreateAdd(Phi, ConstantInt::get(T_int64, 1), "add"));
263 auto *Limit = ConstantInt::get(T_int64, 1000);
264 auto *Cond = cast<Instruction>(
265 Builder.CreateICmp(ICmpInst::ICMP_SLT, Add, Limit, "cond"));
266 Builder.CreateCondBr(Cond, L, Post);
267 Phi->addIncoming(ConstantInt::get(T_int64, 0), LPh);
268 Phi->addIncoming(Add, L);
269
270 Builder.SetInsertPoint(Post);
271 Instruction *Ret = Builder.CreateRetVoid();
272
273 ScalarEvolution SE = buildSE(*F);
274 const SCEV *S = SE.getSCEV(Phi);
275 EXPECT_TRUE(isa<SCEVAddRecExpr>(S));
276 const SCEVAddRecExpr *AR = cast<SCEVAddRecExpr>(S);
277 EXPECT_TRUE(AR->isAffine());
278 EXPECT_FALSE(isSafeToExpandAt(AR, Top->getTerminator(), SE));
279 EXPECT_FALSE(isSafeToExpandAt(AR, LPh->getTerminator(), SE));
280 EXPECT_TRUE(isSafeToExpandAt(AR, L->getTerminator(), SE));
281 EXPECT_TRUE(isSafeToExpandAt(AR, Post->getTerminator(), SE));
282
283 EXPECT_TRUE(LI->getLoopFor(L)->isLCSSAForm(*DT));
284 SCEVExpander Exp(SE, M.getDataLayout(), "expander");
285 Exp.expandCodeFor(SE.getSCEV(Add), nullptr, Ret);
286 EXPECT_TRUE(LI->getLoopFor(L)->isLCSSAForm(*DT));
287 }
288
289 // Check that SCEV expander does not use the nuw instruction
290 // for expansion.
TEST_F(ScalarEvolutionExpanderTest,SCEVExpanderNUW)291 TEST_F(ScalarEvolutionExpanderTest, SCEVExpanderNUW) {
292 /*
293 * Create the following code:
294 * func(i64 %a)
295 * entry:
296 * br false, label %exit, label %body
297 * body:
298 * %s1 = add i64 %a, -1
299 * br label %exit
300 * exit:
301 * %s = add nuw i64 %a, -1
302 * ret %s
303 */
304
305 // Create a module.
306 Module M("SCEVExpanderNUW", Context);
307
308 Type *T_int64 = Type::getInt64Ty(Context);
309
310 FunctionType *FTy =
311 FunctionType::get(Type::getVoidTy(Context), {T_int64}, false);
312 Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M);
313 Argument *Arg = &*F->arg_begin();
314 ConstantInt *C = ConstantInt::get(Context, APInt(64, -1));
315
316 BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
317 BasicBlock *Body = BasicBlock::Create(Context, "body", F);
318 BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
319
320 IRBuilder<> Builder(Entry);
321 ConstantInt *Cond = ConstantInt::get(Context, APInt(1, 0));
322 Builder.CreateCondBr(Cond, Exit, Body);
323
324 Builder.SetInsertPoint(Body);
325 auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
326 Builder.CreateBr(Exit);
327
328 Builder.SetInsertPoint(Exit);
329 auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
330 S2->setHasNoUnsignedWrap(true);
331 auto *R = cast<Instruction>(Builder.CreateRetVoid());
332
333 ScalarEvolution SE = buildSE(*F);
334 const SCEV *S = SE.getSCEV(S1);
335 EXPECT_TRUE(isa<SCEVAddExpr>(S));
336 SCEVExpander Exp(SE, M.getDataLayout(), "expander");
337 auto *I = cast<Instruction>(Exp.expandCodeFor(S, nullptr, R));
338 EXPECT_FALSE(I->hasNoUnsignedWrap());
339 }
340
341 // Check that SCEV expander does not use the nsw instruction
342 // for expansion.
TEST_F(ScalarEvolutionExpanderTest,SCEVExpanderNSW)343 TEST_F(ScalarEvolutionExpanderTest, SCEVExpanderNSW) {
344 /*
345 * Create the following code:
346 * func(i64 %a)
347 * entry:
348 * br false, label %exit, label %body
349 * body:
350 * %s1 = add i64 %a, -1
351 * br label %exit
352 * exit:
353 * %s = add nsw i64 %a, -1
354 * ret %s
355 */
356
357 // Create a module.
358 Module M("SCEVExpanderNSW", Context);
359
360 Type *T_int64 = Type::getInt64Ty(Context);
361
362 FunctionType *FTy =
363 FunctionType::get(Type::getVoidTy(Context), {T_int64}, false);
364 Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M);
365 Argument *Arg = &*F->arg_begin();
366 ConstantInt *C = ConstantInt::get(Context, APInt(64, -1));
367
368 BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
369 BasicBlock *Body = BasicBlock::Create(Context, "body", F);
370 BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
371
372 IRBuilder<> Builder(Entry);
373 ConstantInt *Cond = ConstantInt::get(Context, APInt(1, 0));
374 Builder.CreateCondBr(Cond, Exit, Body);
375
376 Builder.SetInsertPoint(Body);
377 auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
378 Builder.CreateBr(Exit);
379
380 Builder.SetInsertPoint(Exit);
381 auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
382 S2->setHasNoSignedWrap(true);
383 auto *R = cast<Instruction>(Builder.CreateRetVoid());
384
385 ScalarEvolution SE = buildSE(*F);
386 const SCEV *S = SE.getSCEV(S1);
387 EXPECT_TRUE(isa<SCEVAddExpr>(S));
388 SCEVExpander Exp(SE, M.getDataLayout(), "expander");
389 auto *I = cast<Instruction>(Exp.expandCodeFor(S, nullptr, R));
390 EXPECT_FALSE(I->hasNoSignedWrap());
391 }
392
393 // Check that SCEV does not save the SCEV -> V
394 // mapping of SCEV differ from V in NUW flag.
TEST_F(ScalarEvolutionExpanderTest,SCEVCacheNUW)395 TEST_F(ScalarEvolutionExpanderTest, SCEVCacheNUW) {
396 /*
397 * Create the following code:
398 * func(i64 %a)
399 * entry:
400 * %s1 = add i64 %a, -1
401 * %s2 = add nuw i64 %a, -1
402 * br label %exit
403 * exit:
404 * ret %s
405 */
406
407 // Create a module.
408 Module M("SCEVCacheNUW", Context);
409
410 Type *T_int64 = Type::getInt64Ty(Context);
411
412 FunctionType *FTy =
413 FunctionType::get(Type::getVoidTy(Context), {T_int64}, false);
414 Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M);
415 Argument *Arg = &*F->arg_begin();
416 ConstantInt *C = ConstantInt::get(Context, APInt(64, -1));
417
418 BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
419 BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
420
421 IRBuilder<> Builder(Entry);
422 auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
423 auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
424 S2->setHasNoUnsignedWrap(true);
425 Builder.CreateBr(Exit);
426
427 Builder.SetInsertPoint(Exit);
428 auto *R = cast<Instruction>(Builder.CreateRetVoid());
429
430 ScalarEvolution SE = buildSE(*F);
431 // Get S2 first to move it to cache.
432 const SCEV *SC2 = SE.getSCEV(S2);
433 EXPECT_TRUE(isa<SCEVAddExpr>(SC2));
434 // Now get S1.
435 const SCEV *SC1 = SE.getSCEV(S1);
436 EXPECT_TRUE(isa<SCEVAddExpr>(SC1));
437 // Expand for S1, it should use S1 not S2 in spite S2
438 // first in the cache.
439 SCEVExpander Exp(SE, M.getDataLayout(), "expander");
440 auto *I = cast<Instruction>(Exp.expandCodeFor(SC1, nullptr, R));
441 EXPECT_FALSE(I->hasNoUnsignedWrap());
442 }
443
444 // Check that SCEV does not save the SCEV -> V
445 // mapping of SCEV differ from V in NSW flag.
TEST_F(ScalarEvolutionExpanderTest,SCEVCacheNSW)446 TEST_F(ScalarEvolutionExpanderTest, SCEVCacheNSW) {
447 /*
448 * Create the following code:
449 * func(i64 %a)
450 * entry:
451 * %s1 = add i64 %a, -1
452 * %s2 = add nsw i64 %a, -1
453 * br label %exit
454 * exit:
455 * ret %s
456 */
457
458 // Create a module.
459 Module M("SCEVCacheNUW", Context);
460
461 Type *T_int64 = Type::getInt64Ty(Context);
462
463 FunctionType *FTy =
464 FunctionType::get(Type::getVoidTy(Context), {T_int64}, false);
465 Function *F = Function::Create(FTy, Function::ExternalLinkage, "func", M);
466 Argument *Arg = &*F->arg_begin();
467 ConstantInt *C = ConstantInt::get(Context, APInt(64, -1));
468
469 BasicBlock *Entry = BasicBlock::Create(Context, "entry", F);
470 BasicBlock *Exit = BasicBlock::Create(Context, "exit", F);
471
472 IRBuilder<> Builder(Entry);
473 auto *S1 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
474 auto *S2 = cast<Instruction>(Builder.CreateAdd(Arg, C, "add"));
475 S2->setHasNoSignedWrap(true);
476 Builder.CreateBr(Exit);
477
478 Builder.SetInsertPoint(Exit);
479 auto *R = cast<Instruction>(Builder.CreateRetVoid());
480
481 ScalarEvolution SE = buildSE(*F);
482 // Get S2 first to move it to cache.
483 const SCEV *SC2 = SE.getSCEV(S2);
484 EXPECT_TRUE(isa<SCEVAddExpr>(SC2));
485 // Now get S1.
486 const SCEV *SC1 = SE.getSCEV(S1);
487 EXPECT_TRUE(isa<SCEVAddExpr>(SC1));
488 // Expand for S1, it should use S1 not S2 in spite S2
489 // first in the cache.
490 SCEVExpander Exp(SE, M.getDataLayout(), "expander");
491 auto *I = cast<Instruction>(Exp.expandCodeFor(SC1, nullptr, R));
492 EXPECT_FALSE(I->hasNoSignedWrap());
493 }
494
TEST_F(ScalarEvolutionExpanderTest,SCEVExpandInsertCanonicalIV)495 TEST_F(ScalarEvolutionExpanderTest, SCEVExpandInsertCanonicalIV) {
496 LLVMContext C;
497 SMDiagnostic Err;
498
499 // Expand the addrec produced by GetAddRec into a loop without a canonical IV.
500 // SCEVExpander will insert one.
501 auto TestNoCanonicalIV =
502 [&](std::function<const SCEV *(ScalarEvolution & SE, Loop * L)>
503 GetAddRec) {
504 std::unique_ptr<Module> M = parseAssemblyString(
505 "define i32 @test(i32 %limit) { "
506 "entry: "
507 " br label %loop "
508 "loop: "
509 " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
510 " %i.inc = add nsw i32 %i, 1 "
511 " %cont = icmp slt i32 %i.inc, %limit "
512 " br i1 %cont, label %loop, label %exit "
513 "exit: "
514 " ret i32 %i.inc "
515 "}",
516 Err, C);
517
518 assert(M && "Could not parse module?");
519 assert(!verifyModule(*M) && "Must have been well formed!");
520
521 runWithSE(
522 *M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
523 auto &I = GetInstByName(F, "i");
524 auto *Loop = LI.getLoopFor(I.getParent());
525 EXPECT_FALSE(Loop->getCanonicalInductionVariable());
526
527 auto *AR = GetAddRec(SE, Loop);
528 unsigned ExpectedCanonicalIVWidth =
529 SE.getTypeSizeInBits(AR->getType());
530
531 SCEVExpander Exp(SE, M->getDataLayout(), "expander");
532 auto *InsertAt = I.getNextNode();
533 Exp.expandCodeFor(AR, nullptr, InsertAt);
534 PHINode *CanonicalIV = Loop->getCanonicalInductionVariable();
535 unsigned CanonicalIVBitWidth =
536 cast<IntegerType>(CanonicalIV->getType())->getBitWidth();
537 EXPECT_EQ(CanonicalIVBitWidth, ExpectedCanonicalIVWidth);
538 });
539 };
540
541 // Expand the addrec produced by GetAddRec into a loop with a canonical IV
542 // which is narrower than addrec type.
543 // SCEVExpander will insert a canonical IV of a wider type to expand the
544 // addrec.
545 auto TestNarrowCanonicalIV = [&](std::function<const SCEV *(
546 ScalarEvolution & SE, Loop * L)>
547 GetAddRec) {
548 std::unique_ptr<Module> M = parseAssemblyString(
549 "define i32 @test(i32 %limit) { "
550 "entry: "
551 " br label %loop "
552 "loop: "
553 " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
554 " %canonical.iv = phi i8 [ 0, %entry ], [ %canonical.iv.inc, %loop ] "
555 " %i.inc = add nsw i32 %i, 1 "
556 " %canonical.iv.inc = add i8 %canonical.iv, 1 "
557 " %cont = icmp slt i32 %i.inc, %limit "
558 " br i1 %cont, label %loop, label %exit "
559 "exit: "
560 " ret i32 %i.inc "
561 "}",
562 Err, C);
563
564 assert(M && "Could not parse module?");
565 assert(!verifyModule(*M) && "Must have been well formed!");
566
567 runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
568 auto &I = GetInstByName(F, "i");
569
570 auto *LoopHeaderBB = I.getParent();
571 auto *Loop = LI.getLoopFor(LoopHeaderBB);
572 PHINode *CanonicalIV = Loop->getCanonicalInductionVariable();
573 EXPECT_EQ(CanonicalIV, &GetInstByName(F, "canonical.iv"));
574
575 auto *AR = GetAddRec(SE, Loop);
576
577 unsigned ExpectedCanonicalIVWidth = SE.getTypeSizeInBits(AR->getType());
578 unsigned CanonicalIVBitWidth =
579 cast<IntegerType>(CanonicalIV->getType())->getBitWidth();
580 EXPECT_LT(CanonicalIVBitWidth, ExpectedCanonicalIVWidth);
581
582 SCEVExpander Exp(SE, M->getDataLayout(), "expander");
583 auto *InsertAt = I.getNextNode();
584 Exp.expandCodeFor(AR, nullptr, InsertAt);
585
586 // Loop over all of the PHI nodes, looking for the new canonical indvar.
587 PHINode *NewCanonicalIV = nullptr;
588 for (BasicBlock::iterator i = LoopHeaderBB->begin(); isa<PHINode>(i);
589 ++i) {
590 PHINode *PN = cast<PHINode>(i);
591 if (PN == &I || PN == CanonicalIV)
592 continue;
593 // We expect that the only PHI added is the new canonical IV
594 EXPECT_FALSE(NewCanonicalIV);
595 NewCanonicalIV = PN;
596 }
597
598 // Check that NewCanonicalIV is a canonical IV, i.e {0,+,1}
599 BasicBlock *Incoming = nullptr, *Backedge = nullptr;
600 EXPECT_TRUE(Loop->getIncomingAndBackEdge(Incoming, Backedge));
601 auto *Start = NewCanonicalIV->getIncomingValueForBlock(Incoming);
602 EXPECT_TRUE(isa<ConstantInt>(Start));
603 EXPECT_TRUE(dyn_cast<ConstantInt>(Start)->isZero());
604 auto *Next = NewCanonicalIV->getIncomingValueForBlock(Backedge);
605 EXPECT_TRUE(isa<BinaryOperator>(Next));
606 auto *NextBinOp = dyn_cast<BinaryOperator>(Next);
607 EXPECT_EQ(NextBinOp->getOpcode(), Instruction::Add);
608 EXPECT_EQ(NextBinOp->getOperand(0), NewCanonicalIV);
609 auto *Step = NextBinOp->getOperand(1);
610 EXPECT_TRUE(isa<ConstantInt>(Step));
611 EXPECT_TRUE(dyn_cast<ConstantInt>(Step)->isOne());
612
613 unsigned NewCanonicalIVBitWidth =
614 cast<IntegerType>(NewCanonicalIV->getType())->getBitWidth();
615 EXPECT_EQ(NewCanonicalIVBitWidth, ExpectedCanonicalIVWidth);
616 });
617 };
618
619 // Expand the addrec produced by GetAddRec into a loop with a canonical IV
620 // of addrec width.
621 // To expand the addrec SCEVExpander should use the existing canonical IV.
622 auto TestMatchingCanonicalIV =
623 [&](std::function<const SCEV *(ScalarEvolution & SE, Loop * L)> GetAddRec,
624 unsigned ARBitWidth) {
625 auto ARBitWidthTypeStr = "i" + std::to_string(ARBitWidth);
626 std::unique_ptr<Module> M = parseAssemblyString(
627 "define i32 @test(i32 %limit) { "
628 "entry: "
629 " br label %loop "
630 "loop: "
631 " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
632 " %canonical.iv = phi " +
633 ARBitWidthTypeStr +
634 " [ 0, %entry ], [ %canonical.iv.inc, %loop ] "
635 " %i.inc = add nsw i32 %i, 1 "
636 " %canonical.iv.inc = add " +
637 ARBitWidthTypeStr +
638 " %canonical.iv, 1 "
639 " %cont = icmp slt i32 %i.inc, %limit "
640 " br i1 %cont, label %loop, label %exit "
641 "exit: "
642 " ret i32 %i.inc "
643 "}",
644 Err, C);
645
646 assert(M && "Could not parse module?");
647 assert(!verifyModule(*M) && "Must have been well formed!");
648
649 runWithSE(
650 *M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
651 auto &I = GetInstByName(F, "i");
652 auto &CanonicalIV = GetInstByName(F, "canonical.iv");
653
654 auto *LoopHeaderBB = I.getParent();
655 auto *Loop = LI.getLoopFor(LoopHeaderBB);
656 EXPECT_EQ(&CanonicalIV, Loop->getCanonicalInductionVariable());
657 unsigned CanonicalIVBitWidth =
658 cast<IntegerType>(CanonicalIV.getType())->getBitWidth();
659
660 auto *AR = GetAddRec(SE, Loop);
661 EXPECT_EQ(ARBitWidth, SE.getTypeSizeInBits(AR->getType()));
662 EXPECT_EQ(CanonicalIVBitWidth, ARBitWidth);
663
664 SCEVExpander Exp(SE, M->getDataLayout(), "expander");
665 auto *InsertAt = I.getNextNode();
666 Exp.expandCodeFor(AR, nullptr, InsertAt);
667
668 // Loop over all of the PHI nodes, looking if a new canonical
669 // indvar was introduced.
670 PHINode *NewCanonicalIV = nullptr;
671 for (BasicBlock::iterator i = LoopHeaderBB->begin();
672 isa<PHINode>(i); ++i) {
673 PHINode *PN = cast<PHINode>(i);
674 if (PN == &I || PN == &CanonicalIV)
675 continue;
676 NewCanonicalIV = PN;
677 }
678 EXPECT_FALSE(NewCanonicalIV);
679 });
680 };
681
682 unsigned ARBitWidth = 16;
683 Type *ARType = IntegerType::get(C, ARBitWidth);
684
685 // Expand {5,+,1}
686 auto GetAR2 = [&](ScalarEvolution &SE, Loop *L) -> const SCEV * {
687 return SE.getAddRecExpr(SE.getConstant(APInt(ARBitWidth, 5)),
688 SE.getOne(ARType), L, SCEV::FlagAnyWrap);
689 };
690 TestNoCanonicalIV(GetAR2);
691 TestNarrowCanonicalIV(GetAR2);
692 TestMatchingCanonicalIV(GetAR2, ARBitWidth);
693 }
694
TEST_F(ScalarEvolutionExpanderTest,SCEVExpanderShlNSW)695 TEST_F(ScalarEvolutionExpanderTest, SCEVExpanderShlNSW) {
696
697 auto checkOneCase = [this](std::string &&str) {
698 LLVMContext C;
699 SMDiagnostic Err;
700 std::unique_ptr<Module> M = parseAssemblyString(str, Err, C);
701
702 assert(M && "Could not parse module?");
703 assert(!verifyModule(*M) && "Must have been well formed!");
704
705 Function *F = M->getFunction("f");
706 ASSERT_NE(F, nullptr) << "Could not find function 'f'";
707
708 BasicBlock &Entry = F->getEntryBlock();
709 LoadInst *Load = cast<LoadInst>(&Entry.front());
710 BinaryOperator *And = cast<BinaryOperator>(*Load->user_begin());
711
712 ScalarEvolution SE = buildSE(*F);
713 const SCEV *AndSCEV = SE.getSCEV(And);
714 EXPECT_TRUE(isa<SCEVMulExpr>(AndSCEV));
715 EXPECT_TRUE(cast<SCEVMulExpr>(AndSCEV)->hasNoSignedWrap());
716
717 SCEVExpander Exp(SE, M->getDataLayout(), "expander");
718 auto *I = cast<Instruction>(Exp.expandCodeFor(AndSCEV, nullptr, And));
719 EXPECT_EQ(I->getOpcode(), Instruction::Shl);
720 EXPECT_FALSE(I->hasNoSignedWrap());
721 };
722
723 checkOneCase("define void @f(i16* %arrayidx) { "
724 " %1 = load i16, i16* %arrayidx "
725 " %2 = and i16 %1, -32768 "
726 " ret void "
727 "} ");
728
729 checkOneCase("define void @f(i8* %arrayidx) { "
730 " %1 = load i8, i8* %arrayidx "
731 " %2 = and i8 %1, -128 "
732 " ret void "
733 "} ");
734 }
735
736 // Test expansion of nested addrecs in CanonicalMode.
737 // Expanding nested addrecs in canonical mode requiers a canonical IV of a
738 // type wider than the type of the addrec itself. Currently, SCEVExpander
739 // just falls back to literal mode for nested addrecs.
TEST_F(ScalarEvolutionExpanderTest,SCEVExpandNonAffineAddRec)740 TEST_F(ScalarEvolutionExpanderTest, SCEVExpandNonAffineAddRec) {
741 LLVMContext C;
742 SMDiagnostic Err;
743
744 // Expand the addrec produced by GetAddRec into a loop without a canonical IV.
745 auto TestNoCanonicalIV =
746 [&](std::function<const SCEVAddRecExpr *(ScalarEvolution & SE, Loop * L)>
747 GetAddRec) {
748 std::unique_ptr<Module> M = parseAssemblyString(
749 "define i32 @test(i32 %limit) { "
750 "entry: "
751 " br label %loop "
752 "loop: "
753 " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
754 " %i.inc = add nsw i32 %i, 1 "
755 " %cont = icmp slt i32 %i.inc, %limit "
756 " br i1 %cont, label %loop, label %exit "
757 "exit: "
758 " ret i32 %i.inc "
759 "}",
760 Err, C);
761
762 assert(M && "Could not parse module?");
763 assert(!verifyModule(*M) && "Must have been well formed!");
764
765 runWithSE(*M, "test",
766 [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
767 auto &I = GetInstByName(F, "i");
768 auto *Loop = LI.getLoopFor(I.getParent());
769 EXPECT_FALSE(Loop->getCanonicalInductionVariable());
770
771 auto *AR = GetAddRec(SE, Loop);
772 EXPECT_FALSE(AR->isAffine());
773
774 SCEVExpander Exp(SE, M->getDataLayout(), "expander");
775 auto *InsertAt = I.getNextNode();
776 Value *V = Exp.expandCodeFor(AR, nullptr, InsertAt);
777 auto *ExpandedAR = SE.getSCEV(V);
778 // Check that the expansion happened literally.
779 EXPECT_EQ(AR, ExpandedAR);
780 });
781 };
782
783 // Expand the addrec produced by GetAddRec into a loop with a canonical IV
784 // which is narrower than addrec type.
785 auto TestNarrowCanonicalIV = [&](std::function<const SCEVAddRecExpr *(
786 ScalarEvolution & SE, Loop * L)>
787 GetAddRec) {
788 std::unique_ptr<Module> M = parseAssemblyString(
789 "define i32 @test(i32 %limit) { "
790 "entry: "
791 " br label %loop "
792 "loop: "
793 " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
794 " %canonical.iv = phi i8 [ 0, %entry ], [ %canonical.iv.inc, %loop ] "
795 " %i.inc = add nsw i32 %i, 1 "
796 " %canonical.iv.inc = add i8 %canonical.iv, 1 "
797 " %cont = icmp slt i32 %i.inc, %limit "
798 " br i1 %cont, label %loop, label %exit "
799 "exit: "
800 " ret i32 %i.inc "
801 "}",
802 Err, C);
803
804 assert(M && "Could not parse module?");
805 assert(!verifyModule(*M) && "Must have been well formed!");
806
807 runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
808 auto &I = GetInstByName(F, "i");
809
810 auto *LoopHeaderBB = I.getParent();
811 auto *Loop = LI.getLoopFor(LoopHeaderBB);
812 PHINode *CanonicalIV = Loop->getCanonicalInductionVariable();
813 EXPECT_EQ(CanonicalIV, &GetInstByName(F, "canonical.iv"));
814
815 auto *AR = GetAddRec(SE, Loop);
816 EXPECT_FALSE(AR->isAffine());
817
818 unsigned ExpectedCanonicalIVWidth = SE.getTypeSizeInBits(AR->getType());
819 unsigned CanonicalIVBitWidth =
820 cast<IntegerType>(CanonicalIV->getType())->getBitWidth();
821 EXPECT_LT(CanonicalIVBitWidth, ExpectedCanonicalIVWidth);
822
823 SCEVExpander Exp(SE, M->getDataLayout(), "expander");
824 auto *InsertAt = I.getNextNode();
825 Value *V = Exp.expandCodeFor(AR, nullptr, InsertAt);
826 auto *ExpandedAR = SE.getSCEV(V);
827 // Check that the expansion happened literally.
828 EXPECT_EQ(AR, ExpandedAR);
829 });
830 };
831
832 // Expand the addrec produced by GetAddRec into a loop with a canonical IV
833 // of addrec width.
834 auto TestMatchingCanonicalIV =
835 [&](std::function<const SCEVAddRecExpr *(ScalarEvolution & SE, Loop * L)>
836 GetAddRec,
837 unsigned ARBitWidth) {
838 auto ARBitWidthTypeStr = "i" + std::to_string(ARBitWidth);
839 std::unique_ptr<Module> M = parseAssemblyString(
840 "define i32 @test(i32 %limit) { "
841 "entry: "
842 " br label %loop "
843 "loop: "
844 " %i = phi i32 [ 1, %entry ], [ %i.inc, %loop ] "
845 " %canonical.iv = phi " +
846 ARBitWidthTypeStr +
847 " [ 0, %entry ], [ %canonical.iv.inc, %loop ] "
848 " %i.inc = add nsw i32 %i, 1 "
849 " %canonical.iv.inc = add " +
850 ARBitWidthTypeStr +
851 " %canonical.iv, 1 "
852 " %cont = icmp slt i32 %i.inc, %limit "
853 " br i1 %cont, label %loop, label %exit "
854 "exit: "
855 " ret i32 %i.inc "
856 "}",
857 Err, C);
858
859 assert(M && "Could not parse module?");
860 assert(!verifyModule(*M) && "Must have been well formed!");
861
862 runWithSE(
863 *M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
864 auto &I = GetInstByName(F, "i");
865 auto &CanonicalIV = GetInstByName(F, "canonical.iv");
866
867 auto *LoopHeaderBB = I.getParent();
868 auto *Loop = LI.getLoopFor(LoopHeaderBB);
869 EXPECT_EQ(&CanonicalIV, Loop->getCanonicalInductionVariable());
870 unsigned CanonicalIVBitWidth =
871 cast<IntegerType>(CanonicalIV.getType())->getBitWidth();
872
873 auto *AR = GetAddRec(SE, Loop);
874 EXPECT_FALSE(AR->isAffine());
875 EXPECT_EQ(ARBitWidth, SE.getTypeSizeInBits(AR->getType()));
876 EXPECT_EQ(CanonicalIVBitWidth, ARBitWidth);
877
878 SCEVExpander Exp(SE, M->getDataLayout(), "expander");
879 auto *InsertAt = I.getNextNode();
880 Value *V = Exp.expandCodeFor(AR, nullptr, InsertAt);
881 auto *ExpandedAR = SE.getSCEV(V);
882 // Check that the expansion happened literally.
883 EXPECT_EQ(AR, ExpandedAR);
884 });
885 };
886
887 unsigned ARBitWidth = 16;
888 Type *ARType = IntegerType::get(C, ARBitWidth);
889
890 // Expand {5,+,1,+,1}
891 auto GetAR3 = [&](ScalarEvolution &SE, Loop *L) -> const SCEVAddRecExpr * {
892 SmallVector<const SCEV *, 3> Ops = {SE.getConstant(APInt(ARBitWidth, 5)),
893 SE.getOne(ARType), SE.getOne(ARType)};
894 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, L, SCEV::FlagAnyWrap));
895 };
896 TestNoCanonicalIV(GetAR3);
897 TestNarrowCanonicalIV(GetAR3);
898 TestMatchingCanonicalIV(GetAR3, ARBitWidth);
899
900 // Expand {5,+,1,+,1,+,1}
901 auto GetAR4 = [&](ScalarEvolution &SE, Loop *L) -> const SCEVAddRecExpr * {
902 SmallVector<const SCEV *, 4> Ops = {SE.getConstant(APInt(ARBitWidth, 5)),
903 SE.getOne(ARType), SE.getOne(ARType),
904 SE.getOne(ARType)};
905 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, L, SCEV::FlagAnyWrap));
906 };
907 TestNoCanonicalIV(GetAR4);
908 TestNarrowCanonicalIV(GetAR4);
909 TestMatchingCanonicalIV(GetAR4, ARBitWidth);
910
911 // Expand {5,+,1,+,1,+,1,+,1}
912 auto GetAR5 = [&](ScalarEvolution &SE, Loop *L) -> const SCEVAddRecExpr * {
913 SmallVector<const SCEV *, 5> Ops = {SE.getConstant(APInt(ARBitWidth, 5)),
914 SE.getOne(ARType), SE.getOne(ARType),
915 SE.getOne(ARType), SE.getOne(ARType)};
916 return cast<SCEVAddRecExpr>(SE.getAddRecExpr(Ops, L, SCEV::FlagAnyWrap));
917 };
918 TestNoCanonicalIV(GetAR5);
919 TestNarrowCanonicalIV(GetAR5);
920 TestMatchingCanonicalIV(GetAR5, ARBitWidth);
921 }
922
TEST_F(ScalarEvolutionExpanderTest,ExpandNonIntegralPtrWithNullBase)923 TEST_F(ScalarEvolutionExpanderTest, ExpandNonIntegralPtrWithNullBase) {
924 LLVMContext C;
925 SMDiagnostic Err;
926
927 std::unique_ptr<Module> M =
928 parseAssemblyString("target datalayout = "
929 "\"e-m:e-p270:32:32-p271:32:32-p272:64:64-i64:64-f80:"
930 "128-n8:16:32:64-S128-ni:1-p2:32:8:8:32-ni:2\""
931 "define float addrspace(1)* @test(i64 %offset) { "
932 " %ptr = getelementptr inbounds float, float "
933 "addrspace(1)* null, i64 %offset"
934 " ret float addrspace(1)* %ptr"
935 "}",
936 Err, C);
937
938 assert(M && "Could not parse module?");
939 assert(!verifyModule(*M) && "Must have been well formed!");
940
941 runWithSE(*M, "test", [&](Function &F, LoopInfo &LI, ScalarEvolution &SE) {
942 auto &I = GetInstByName(F, "ptr");
943 auto PtrPlus1 =
944 SE.getAddExpr(SE.getSCEV(&I), SE.getConstant(I.getType(), 1));
945 SCEVExpander Exp(SE, M->getDataLayout(), "expander");
946
947 Value *V = Exp.expandCodeFor(PtrPlus1, I.getType(), &I);
948 I.replaceAllUsesWith(V);
949
950 // Check the expander created bitcast (gep i8* null, %offset).
951 auto *Cast = dyn_cast<BitCastInst>(V);
952 EXPECT_TRUE(Cast);
953 EXPECT_EQ(Cast->getType(), I.getType());
954 auto *GEP = dyn_cast<GetElementPtrInst>(Cast->getOperand(0));
955 EXPECT_TRUE(GEP);
956 EXPECT_TRUE(cast<Constant>(GEP->getPointerOperand())->isNullValue());
957 EXPECT_EQ(cast<PointerType>(GEP->getPointerOperand()->getType())
958 ->getAddressSpace(),
959 cast<PointerType>(I.getType())->getAddressSpace());
960
961 // Check the expander created the expected index computation: add (shl
962 // %offset, 2), 1.
963 Value *Arg;
964 EXPECT_TRUE(
965 match(GEP->getOperand(1),
966 m_Add(m_Shl(m_Value(Arg), m_SpecificInt(2)), m_SpecificInt(1))));
967 EXPECT_EQ(Arg, &*F.arg_begin());
968 EXPECT_FALSE(verifyFunction(F, &errs()));
969 });
970 }
971
972 } // end namespace llvm
973