1 //===- ScopInfo.cpp -------------------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Create a polyhedral description for a static control flow region.
10 //
11 // The pass creates a polyhedral description of the Scops detected by the Scop
12 // detection derived from their LLVM-IR code.
13 //
14 // This representation is shared among several tools in the polyhedral
15 // community, which are e.g. Cloog, Pluto, Loopo, Graphite.
16 //
17 //===----------------------------------------------------------------------===//
18
19 #include "polly/ScopInfo.h"
20 #include "polly/LinkAllPasses.h"
21 #include "polly/Options.h"
22 #include "polly/ScopBuilder.h"
23 #include "polly/ScopDetection.h"
24 #include "polly/Support/GICHelper.h"
25 #include "polly/Support/ISLOStream.h"
26 #include "polly/Support/ISLTools.h"
27 #include "polly/Support/SCEVAffinator.h"
28 #include "polly/Support/SCEVValidator.h"
29 #include "polly/Support/ScopHelper.h"
30 #include "llvm/ADT/APInt.h"
31 #include "llvm/ADT/ArrayRef.h"
32 #include "llvm/ADT/PostOrderIterator.h"
33 #include "llvm/ADT/SmallPtrSet.h"
34 #include "llvm/ADT/SmallSet.h"
35 #include "llvm/ADT/Statistic.h"
36 #include "llvm/Analysis/AliasAnalysis.h"
37 #include "llvm/Analysis/AssumptionCache.h"
38 #include "llvm/Analysis/Loads.h"
39 #include "llvm/Analysis/LoopInfo.h"
40 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
41 #include "llvm/Analysis/RegionInfo.h"
42 #include "llvm/Analysis/RegionIterator.h"
43 #include "llvm/Analysis/ScalarEvolution.h"
44 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
45 #include "llvm/IR/BasicBlock.h"
46 #include "llvm/IR/ConstantRange.h"
47 #include "llvm/IR/DataLayout.h"
48 #include "llvm/IR/DebugLoc.h"
49 #include "llvm/IR/Dominators.h"
50 #include "llvm/IR/Function.h"
51 #include "llvm/IR/InstrTypes.h"
52 #include "llvm/IR/Instruction.h"
53 #include "llvm/IR/Instructions.h"
54 #include "llvm/IR/Module.h"
55 #include "llvm/IR/PassManager.h"
56 #include "llvm/IR/Type.h"
57 #include "llvm/IR/Value.h"
58 #include "llvm/InitializePasses.h"
59 #include "llvm/Support/Compiler.h"
60 #include "llvm/Support/Debug.h"
61 #include "llvm/Support/ErrorHandling.h"
62 #include "llvm/Support/raw_ostream.h"
63 #include "isl/aff.h"
64 #include "isl/local_space.h"
65 #include "isl/map.h"
66 #include "isl/options.h"
67 #include "isl/set.h"
68 #include <cassert>
69
70 using namespace llvm;
71 using namespace polly;
72
73 #define DEBUG_TYPE "polly-scops"
74
75 STATISTIC(AssumptionsAliasing, "Number of aliasing assumptions taken.");
76 STATISTIC(AssumptionsInbounds, "Number of inbounds assumptions taken.");
77 STATISTIC(AssumptionsWrapping, "Number of wrapping assumptions taken.");
78 STATISTIC(AssumptionsUnsigned, "Number of unsigned assumptions taken.");
79 STATISTIC(AssumptionsComplexity, "Number of too complex SCoPs.");
80 STATISTIC(AssumptionsUnprofitable, "Number of unprofitable SCoPs.");
81 STATISTIC(AssumptionsErrorBlock, "Number of error block assumptions taken.");
82 STATISTIC(AssumptionsInfiniteLoop, "Number of bounded loop assumptions taken.");
83 STATISTIC(AssumptionsInvariantLoad,
84 "Number of invariant loads assumptions taken.");
85 STATISTIC(AssumptionsDelinearization,
86 "Number of delinearization assumptions taken.");
87
88 STATISTIC(NumScops, "Number of feasible SCoPs after ScopInfo");
89 STATISTIC(NumLoopsInScop, "Number of loops in scops");
90 STATISTIC(NumBoxedLoops, "Number of boxed loops in SCoPs after ScopInfo");
91 STATISTIC(NumAffineLoops, "Number of affine loops in SCoPs after ScopInfo");
92
93 STATISTIC(NumScopsDepthZero, "Number of scops with maximal loop depth 0");
94 STATISTIC(NumScopsDepthOne, "Number of scops with maximal loop depth 1");
95 STATISTIC(NumScopsDepthTwo, "Number of scops with maximal loop depth 2");
96 STATISTIC(NumScopsDepthThree, "Number of scops with maximal loop depth 3");
97 STATISTIC(NumScopsDepthFour, "Number of scops with maximal loop depth 4");
98 STATISTIC(NumScopsDepthFive, "Number of scops with maximal loop depth 5");
99 STATISTIC(NumScopsDepthLarger,
100 "Number of scops with maximal loop depth 6 and larger");
101 STATISTIC(MaxNumLoopsInScop, "Maximal number of loops in scops");
102
103 STATISTIC(NumValueWrites, "Number of scalar value writes after ScopInfo");
104 STATISTIC(
105 NumValueWritesInLoops,
106 "Number of scalar value writes nested in affine loops after ScopInfo");
107 STATISTIC(NumPHIWrites, "Number of scalar phi writes after ScopInfo");
108 STATISTIC(NumPHIWritesInLoops,
109 "Number of scalar phi writes nested in affine loops after ScopInfo");
110 STATISTIC(NumSingletonWrites, "Number of singleton writes after ScopInfo");
111 STATISTIC(NumSingletonWritesInLoops,
112 "Number of singleton writes nested in affine loops after ScopInfo");
113
114 int const polly::MaxDisjunctsInDomain = 20;
115
116 // The number of disjunct in the context after which we stop to add more
117 // disjuncts. This parameter is there to avoid exponential growth in the
118 // number of disjunct when adding non-convex sets to the context.
119 static int const MaxDisjunctsInContext = 4;
120
121 static cl::opt<bool> PollyRemarksMinimal(
122 "polly-remarks-minimal",
123 cl::desc("Do not emit remarks about assumptions that are known"),
124 cl::Hidden, cl::ZeroOrMore, cl::init(false), cl::cat(PollyCategory));
125
126 static cl::opt<bool>
127 IslOnErrorAbort("polly-on-isl-error-abort",
128 cl::desc("Abort if an isl error is encountered"),
129 cl::init(true), cl::cat(PollyCategory));
130
131 static cl::opt<bool> PollyPreciseInbounds(
132 "polly-precise-inbounds",
133 cl::desc("Take more precise inbounds assumptions (do not scale well)"),
134 cl::Hidden, cl::init(false), cl::cat(PollyCategory));
135
136 static cl::opt<bool> PollyIgnoreParamBounds(
137 "polly-ignore-parameter-bounds",
138 cl::desc(
139 "Do not add parameter bounds and do no gist simplify sets accordingly"),
140 cl::Hidden, cl::init(false), cl::cat(PollyCategory));
141
142 static cl::opt<bool> PollyPreciseFoldAccesses(
143 "polly-precise-fold-accesses",
144 cl::desc("Fold memory accesses to model more possible delinearizations "
145 "(does not scale well)"),
146 cl::Hidden, cl::init(false), cl::cat(PollyCategory));
147
148 bool polly::UseInstructionNames;
149
150 static cl::opt<bool, true> XUseInstructionNames(
151 "polly-use-llvm-names",
152 cl::desc("Use LLVM-IR names when deriving statement names"),
153 cl::location(UseInstructionNames), cl::Hidden, cl::init(false),
154 cl::ZeroOrMore, cl::cat(PollyCategory));
155
156 static cl::opt<bool> PollyPrintInstructions(
157 "polly-print-instructions", cl::desc("Output instructions per ScopStmt"),
158 cl::Hidden, cl::Optional, cl::init(false), cl::cat(PollyCategory));
159
160 static cl::list<std::string> IslArgs("polly-isl-arg",
161 cl::value_desc("argument"),
162 cl::desc("Option passed to ISL"),
163 cl::ZeroOrMore, cl::cat(PollyCategory));
164
165 //===----------------------------------------------------------------------===//
166
addRangeBoundsToSet(isl::set S,const ConstantRange & Range,int dim,isl::dim type)167 static isl::set addRangeBoundsToSet(isl::set S, const ConstantRange &Range,
168 int dim, isl::dim type) {
169 isl::val V;
170 isl::ctx Ctx = S.get_ctx();
171
172 // The upper and lower bound for a parameter value is derived either from
173 // the data type of the parameter or from the - possibly more restrictive -
174 // range metadata.
175 V = valFromAPInt(Ctx.get(), Range.getSignedMin(), true);
176 S = S.lower_bound_val(type, dim, V);
177 V = valFromAPInt(Ctx.get(), Range.getSignedMax(), true);
178 S = S.upper_bound_val(type, dim, V);
179
180 if (Range.isFullSet())
181 return S;
182
183 if (S.n_basic_set() > MaxDisjunctsInContext)
184 return S;
185
186 // In case of signed wrapping, we can refine the set of valid values by
187 // excluding the part not covered by the wrapping range.
188 if (Range.isSignWrappedSet()) {
189 V = valFromAPInt(Ctx.get(), Range.getLower(), true);
190 isl::set SLB = S.lower_bound_val(type, dim, V);
191
192 V = valFromAPInt(Ctx.get(), Range.getUpper(), true);
193 V = V.sub_ui(1);
194 isl::set SUB = S.upper_bound_val(type, dim, V);
195 S = SLB.unite(SUB);
196 }
197
198 return S;
199 }
200
identifyBasePtrOriginSAI(Scop * S,Value * BasePtr)201 static const ScopArrayInfo *identifyBasePtrOriginSAI(Scop *S, Value *BasePtr) {
202 LoadInst *BasePtrLI = dyn_cast<LoadInst>(BasePtr);
203 if (!BasePtrLI)
204 return nullptr;
205
206 if (!S->contains(BasePtrLI))
207 return nullptr;
208
209 ScalarEvolution &SE = *S->getSE();
210
211 auto *OriginBaseSCEV =
212 SE.getPointerBase(SE.getSCEV(BasePtrLI->getPointerOperand()));
213 if (!OriginBaseSCEV)
214 return nullptr;
215
216 auto *OriginBaseSCEVUnknown = dyn_cast<SCEVUnknown>(OriginBaseSCEV);
217 if (!OriginBaseSCEVUnknown)
218 return nullptr;
219
220 return S->getScopArrayInfo(OriginBaseSCEVUnknown->getValue(),
221 MemoryKind::Array);
222 }
223
ScopArrayInfo(Value * BasePtr,Type * ElementType,isl::ctx Ctx,ArrayRef<const SCEV * > Sizes,MemoryKind Kind,const DataLayout & DL,Scop * S,const char * BaseName)224 ScopArrayInfo::ScopArrayInfo(Value *BasePtr, Type *ElementType, isl::ctx Ctx,
225 ArrayRef<const SCEV *> Sizes, MemoryKind Kind,
226 const DataLayout &DL, Scop *S,
227 const char *BaseName)
228 : BasePtr(BasePtr), ElementType(ElementType), Kind(Kind), DL(DL), S(*S) {
229 std::string BasePtrName =
230 BaseName ? BaseName
231 : getIslCompatibleName("MemRef", BasePtr, S->getNextArrayIdx(),
232 Kind == MemoryKind::PHI ? "__phi" : "",
233 UseInstructionNames);
234 Id = isl::id::alloc(Ctx, BasePtrName, this);
235
236 updateSizes(Sizes);
237
238 if (!BasePtr || Kind != MemoryKind::Array) {
239 BasePtrOriginSAI = nullptr;
240 return;
241 }
242
243 BasePtrOriginSAI = identifyBasePtrOriginSAI(S, BasePtr);
244 if (BasePtrOriginSAI)
245 const_cast<ScopArrayInfo *>(BasePtrOriginSAI)->addDerivedSAI(this);
246 }
247
248 ScopArrayInfo::~ScopArrayInfo() = default;
249
getSpace() const250 isl::space ScopArrayInfo::getSpace() const {
251 auto Space = isl::space(Id.get_ctx(), 0, getNumberOfDimensions());
252 Space = Space.set_tuple_id(isl::dim::set, Id);
253 return Space;
254 }
255
isReadOnly()256 bool ScopArrayInfo::isReadOnly() {
257 isl::union_set WriteSet = S.getWrites().range();
258 isl::space Space = getSpace();
259 WriteSet = WriteSet.extract_set(Space);
260
261 return bool(WriteSet.is_empty());
262 }
263
isCompatibleWith(const ScopArrayInfo * Array) const264 bool ScopArrayInfo::isCompatibleWith(const ScopArrayInfo *Array) const {
265 if (Array->getElementType() != getElementType())
266 return false;
267
268 if (Array->getNumberOfDimensions() != getNumberOfDimensions())
269 return false;
270
271 for (unsigned i = 0; i < getNumberOfDimensions(); i++)
272 if (Array->getDimensionSize(i) != getDimensionSize(i))
273 return false;
274
275 return true;
276 }
277
updateElementType(Type * NewElementType)278 void ScopArrayInfo::updateElementType(Type *NewElementType) {
279 if (NewElementType == ElementType)
280 return;
281
282 auto OldElementSize = DL.getTypeAllocSizeInBits(ElementType);
283 auto NewElementSize = DL.getTypeAllocSizeInBits(NewElementType);
284
285 if (NewElementSize == OldElementSize || NewElementSize == 0)
286 return;
287
288 if (NewElementSize % OldElementSize == 0 && NewElementSize < OldElementSize) {
289 ElementType = NewElementType;
290 } else {
291 auto GCD = GreatestCommonDivisor64(NewElementSize, OldElementSize);
292 ElementType = IntegerType::get(ElementType->getContext(), GCD);
293 }
294 }
295
296 /// Make the ScopArrayInfo model a Fortran Array
applyAndSetFAD(Value * FAD)297 void ScopArrayInfo::applyAndSetFAD(Value *FAD) {
298 assert(FAD && "got invalid Fortran array descriptor");
299 if (this->FAD) {
300 assert(this->FAD == FAD &&
301 "receiving different array descriptors for same array");
302 return;
303 }
304
305 assert(DimensionSizesPw.size() > 0 && !DimensionSizesPw[0]);
306 assert(!this->FAD);
307 this->FAD = FAD;
308
309 isl::space Space(S.getIslCtx(), 1, 0);
310
311 std::string param_name = getName();
312 param_name += "_fortranarr_size";
313 isl::id IdPwAff = isl::id::alloc(S.getIslCtx(), param_name, this);
314
315 Space = Space.set_dim_id(isl::dim::param, 0, IdPwAff);
316 isl::pw_aff PwAff =
317 isl::aff::var_on_domain(isl::local_space(Space), isl::dim::param, 0);
318
319 DimensionSizesPw[0] = PwAff;
320 }
321
updateSizes(ArrayRef<const SCEV * > NewSizes,bool CheckConsistency)322 bool ScopArrayInfo::updateSizes(ArrayRef<const SCEV *> NewSizes,
323 bool CheckConsistency) {
324 int SharedDims = std::min(NewSizes.size(), DimensionSizes.size());
325 int ExtraDimsNew = NewSizes.size() - SharedDims;
326 int ExtraDimsOld = DimensionSizes.size() - SharedDims;
327
328 if (CheckConsistency) {
329 for (int i = 0; i < SharedDims; i++) {
330 auto *NewSize = NewSizes[i + ExtraDimsNew];
331 auto *KnownSize = DimensionSizes[i + ExtraDimsOld];
332 if (NewSize && KnownSize && NewSize != KnownSize)
333 return false;
334 }
335
336 if (DimensionSizes.size() >= NewSizes.size())
337 return true;
338 }
339
340 DimensionSizes.clear();
341 DimensionSizes.insert(DimensionSizes.begin(), NewSizes.begin(),
342 NewSizes.end());
343 DimensionSizesPw.clear();
344 for (const SCEV *Expr : DimensionSizes) {
345 if (!Expr) {
346 DimensionSizesPw.push_back(nullptr);
347 continue;
348 }
349 isl::pw_aff Size = S.getPwAffOnly(Expr);
350 DimensionSizesPw.push_back(Size);
351 }
352 return true;
353 }
354
getName() const355 std::string ScopArrayInfo::getName() const { return Id.get_name(); }
356
getElemSizeInBytes() const357 int ScopArrayInfo::getElemSizeInBytes() const {
358 return DL.getTypeAllocSize(ElementType);
359 }
360
getBasePtrId() const361 isl::id ScopArrayInfo::getBasePtrId() const { return Id; }
362
363 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump() const364 LLVM_DUMP_METHOD void ScopArrayInfo::dump() const { print(errs()); }
365 #endif
366
print(raw_ostream & OS,bool SizeAsPwAff) const367 void ScopArrayInfo::print(raw_ostream &OS, bool SizeAsPwAff) const {
368 OS.indent(8) << *getElementType() << " " << getName();
369 unsigned u = 0;
370 // If this is a Fortran array, then we can print the outermost dimension
371 // as a isl_pw_aff even though there is no SCEV information.
372 bool IsOutermostSizeKnown = SizeAsPwAff && FAD;
373
374 if (!IsOutermostSizeKnown && getNumberOfDimensions() > 0 &&
375 !getDimensionSize(0)) {
376 OS << "[*]";
377 u++;
378 }
379 for (; u < getNumberOfDimensions(); u++) {
380 OS << "[";
381
382 if (SizeAsPwAff) {
383 isl::pw_aff Size = getDimensionSizePw(u);
384 OS << " " << Size << " ";
385 } else {
386 OS << *getDimensionSize(u);
387 }
388
389 OS << "]";
390 }
391
392 OS << ";";
393
394 if (BasePtrOriginSAI)
395 OS << " [BasePtrOrigin: " << BasePtrOriginSAI->getName() << "]";
396
397 OS << " // Element size " << getElemSizeInBytes() << "\n";
398 }
399
400 const ScopArrayInfo *
getFromAccessFunction(isl::pw_multi_aff PMA)401 ScopArrayInfo::getFromAccessFunction(isl::pw_multi_aff PMA) {
402 isl::id Id = PMA.get_tuple_id(isl::dim::out);
403 assert(!Id.is_null() && "Output dimension didn't have an ID");
404 return getFromId(Id);
405 }
406
getFromId(isl::id Id)407 const ScopArrayInfo *ScopArrayInfo::getFromId(isl::id Id) {
408 void *User = Id.get_user();
409 const ScopArrayInfo *SAI = static_cast<ScopArrayInfo *>(User);
410 return SAI;
411 }
412
wrapConstantDimensions()413 void MemoryAccess::wrapConstantDimensions() {
414 auto *SAI = getScopArrayInfo();
415 isl::space ArraySpace = SAI->getSpace();
416 isl::ctx Ctx = ArraySpace.get_ctx();
417 unsigned DimsArray = SAI->getNumberOfDimensions();
418
419 isl::multi_aff DivModAff = isl::multi_aff::identity(
420 ArraySpace.map_from_domain_and_range(ArraySpace));
421 isl::local_space LArraySpace = isl::local_space(ArraySpace);
422
423 // Begin with last dimension, to iteratively carry into higher dimensions.
424 for (int i = DimsArray - 1; i > 0; i--) {
425 auto *DimSize = SAI->getDimensionSize(i);
426 auto *DimSizeCst = dyn_cast<SCEVConstant>(DimSize);
427
428 // This transformation is not applicable to dimensions with dynamic size.
429 if (!DimSizeCst)
430 continue;
431
432 // This transformation is not applicable to dimensions of size zero.
433 if (DimSize->isZero())
434 continue;
435
436 isl::val DimSizeVal =
437 valFromAPInt(Ctx.get(), DimSizeCst->getAPInt(), false);
438 isl::aff Var = isl::aff::var_on_domain(LArraySpace, isl::dim::set, i);
439 isl::aff PrevVar =
440 isl::aff::var_on_domain(LArraySpace, isl::dim::set, i - 1);
441
442 // Compute: index % size
443 // Modulo must apply in the divide of the previous iteration, if any.
444 isl::aff Modulo = Var.mod(DimSizeVal);
445 Modulo = Modulo.pullback(DivModAff);
446
447 // Compute: floor(index / size)
448 isl::aff Divide = Var.div(isl::aff(LArraySpace, DimSizeVal));
449 Divide = Divide.floor();
450 Divide = Divide.add(PrevVar);
451 Divide = Divide.pullback(DivModAff);
452
453 // Apply Modulo and Divide.
454 DivModAff = DivModAff.set_aff(i, Modulo);
455 DivModAff = DivModAff.set_aff(i - 1, Divide);
456 }
457
458 // Apply all modulo/divides on the accesses.
459 isl::map Relation = AccessRelation;
460 Relation = Relation.apply_range(isl::map::from_multi_aff(DivModAff));
461 Relation = Relation.detect_equalities();
462 AccessRelation = Relation;
463 }
464
updateDimensionality()465 void MemoryAccess::updateDimensionality() {
466 auto *SAI = getScopArrayInfo();
467 isl::space ArraySpace = SAI->getSpace();
468 isl::space AccessSpace = AccessRelation.get_space().range();
469 isl::ctx Ctx = ArraySpace.get_ctx();
470
471 auto DimsArray = ArraySpace.dim(isl::dim::set);
472 auto DimsAccess = AccessSpace.dim(isl::dim::set);
473 auto DimsMissing = DimsArray - DimsAccess;
474
475 auto *BB = getStatement()->getEntryBlock();
476 auto &DL = BB->getModule()->getDataLayout();
477 unsigned ArrayElemSize = SAI->getElemSizeInBytes();
478 unsigned ElemBytes = DL.getTypeAllocSize(getElementType());
479
480 isl::map Map = isl::map::from_domain_and_range(
481 isl::set::universe(AccessSpace), isl::set::universe(ArraySpace));
482
483 for (unsigned i = 0; i < DimsMissing; i++)
484 Map = Map.fix_si(isl::dim::out, i, 0);
485
486 for (unsigned i = DimsMissing; i < DimsArray; i++)
487 Map = Map.equate(isl::dim::in, i - DimsMissing, isl::dim::out, i);
488
489 AccessRelation = AccessRelation.apply_range(Map);
490
491 // For the non delinearized arrays, divide the access function of the last
492 // subscript by the size of the elements in the array.
493 //
494 // A stride one array access in C expressed as A[i] is expressed in
495 // LLVM-IR as something like A[i * elementsize]. This hides the fact that
496 // two subsequent values of 'i' index two values that are stored next to
497 // each other in memory. By this division we make this characteristic
498 // obvious again. If the base pointer was accessed with offsets not divisible
499 // by the accesses element size, we will have chosen a smaller ArrayElemSize
500 // that divides the offsets of all accesses to this base pointer.
501 if (DimsAccess == 1) {
502 isl::val V = isl::val(Ctx, ArrayElemSize);
503 AccessRelation = AccessRelation.floordiv_val(V);
504 }
505
506 // We currently do this only if we added at least one dimension, which means
507 // some dimension's indices have not been specified, an indicator that some
508 // index values have been added together.
509 // TODO: Investigate general usefulness; Effect on unit tests is to make index
510 // expressions more complicated.
511 if (DimsMissing)
512 wrapConstantDimensions();
513
514 if (!isAffine())
515 computeBoundsOnAccessRelation(ArrayElemSize);
516
517 // Introduce multi-element accesses in case the type loaded by this memory
518 // access is larger than the canonical element type of the array.
519 //
520 // An access ((float *)A)[i] to an array char *A is modeled as
521 // {[i] -> A[o] : 4 i <= o <= 4 i + 3
522 if (ElemBytes > ArrayElemSize) {
523 assert(ElemBytes % ArrayElemSize == 0 &&
524 "Loaded element size should be multiple of canonical element size");
525 isl::map Map = isl::map::from_domain_and_range(
526 isl::set::universe(ArraySpace), isl::set::universe(ArraySpace));
527 for (unsigned i = 0; i < DimsArray - 1; i++)
528 Map = Map.equate(isl::dim::in, i, isl::dim::out, i);
529
530 isl::constraint C;
531 isl::local_space LS;
532
533 LS = isl::local_space(Map.get_space());
534 int Num = ElemBytes / getScopArrayInfo()->getElemSizeInBytes();
535
536 C = isl::constraint::alloc_inequality(LS);
537 C = C.set_constant_val(isl::val(Ctx, Num - 1));
538 C = C.set_coefficient_si(isl::dim::in, DimsArray - 1, 1);
539 C = C.set_coefficient_si(isl::dim::out, DimsArray - 1, -1);
540 Map = Map.add_constraint(C);
541
542 C = isl::constraint::alloc_inequality(LS);
543 C = C.set_coefficient_si(isl::dim::in, DimsArray - 1, -1);
544 C = C.set_coefficient_si(isl::dim::out, DimsArray - 1, 1);
545 C = C.set_constant_val(isl::val(Ctx, 0));
546 Map = Map.add_constraint(C);
547 AccessRelation = AccessRelation.apply_range(Map);
548 }
549 }
550
551 const std::string
getReductionOperatorStr(MemoryAccess::ReductionType RT)552 MemoryAccess::getReductionOperatorStr(MemoryAccess::ReductionType RT) {
553 switch (RT) {
554 case MemoryAccess::RT_NONE:
555 llvm_unreachable("Requested a reduction operator string for a memory "
556 "access which isn't a reduction");
557 case MemoryAccess::RT_ADD:
558 return "+";
559 case MemoryAccess::RT_MUL:
560 return "*";
561 case MemoryAccess::RT_BOR:
562 return "|";
563 case MemoryAccess::RT_BXOR:
564 return "^";
565 case MemoryAccess::RT_BAND:
566 return "&";
567 }
568 llvm_unreachable("Unknown reduction type");
569 }
570
getOriginalScopArrayInfo() const571 const ScopArrayInfo *MemoryAccess::getOriginalScopArrayInfo() const {
572 isl::id ArrayId = getArrayId();
573 void *User = ArrayId.get_user();
574 const ScopArrayInfo *SAI = static_cast<ScopArrayInfo *>(User);
575 return SAI;
576 }
577
getLatestScopArrayInfo() const578 const ScopArrayInfo *MemoryAccess::getLatestScopArrayInfo() const {
579 isl::id ArrayId = getLatestArrayId();
580 void *User = ArrayId.get_user();
581 const ScopArrayInfo *SAI = static_cast<ScopArrayInfo *>(User);
582 return SAI;
583 }
584
getOriginalArrayId() const585 isl::id MemoryAccess::getOriginalArrayId() const {
586 return AccessRelation.get_tuple_id(isl::dim::out);
587 }
588
getLatestArrayId() const589 isl::id MemoryAccess::getLatestArrayId() const {
590 if (!hasNewAccessRelation())
591 return getOriginalArrayId();
592 return NewAccessRelation.get_tuple_id(isl::dim::out);
593 }
594
getAddressFunction() const595 isl::map MemoryAccess::getAddressFunction() const {
596 return getAccessRelation().lexmin();
597 }
598
599 isl::pw_multi_aff
applyScheduleToAccessRelation(isl::union_map USchedule) const600 MemoryAccess::applyScheduleToAccessRelation(isl::union_map USchedule) const {
601 isl::map Schedule, ScheduledAccRel;
602 isl::union_set UDomain;
603
604 UDomain = getStatement()->getDomain();
605 USchedule = USchedule.intersect_domain(UDomain);
606 Schedule = isl::map::from_union_map(USchedule);
607 ScheduledAccRel = getAddressFunction().apply_domain(Schedule);
608 return isl::pw_multi_aff::from_map(ScheduledAccRel);
609 }
610
getOriginalAccessRelation() const611 isl::map MemoryAccess::getOriginalAccessRelation() const {
612 return AccessRelation;
613 }
614
getOriginalAccessRelationStr() const615 std::string MemoryAccess::getOriginalAccessRelationStr() const {
616 return AccessRelation.to_str();
617 }
618
getOriginalAccessRelationSpace() const619 isl::space MemoryAccess::getOriginalAccessRelationSpace() const {
620 return AccessRelation.get_space();
621 }
622
getNewAccessRelation() const623 isl::map MemoryAccess::getNewAccessRelation() const {
624 return NewAccessRelation;
625 }
626
getNewAccessRelationStr() const627 std::string MemoryAccess::getNewAccessRelationStr() const {
628 return NewAccessRelation.to_str();
629 }
630
getAccessRelationStr() const631 std::string MemoryAccess::getAccessRelationStr() const {
632 return getAccessRelation().to_str();
633 }
634
createBasicAccessMap(ScopStmt * Statement)635 isl::basic_map MemoryAccess::createBasicAccessMap(ScopStmt *Statement) {
636 isl::space Space = isl::space(Statement->getIslCtx(), 0, 1);
637 Space = Space.align_params(Statement->getDomainSpace());
638
639 return isl::basic_map::from_domain_and_range(
640 isl::basic_set::universe(Statement->getDomainSpace()),
641 isl::basic_set::universe(Space));
642 }
643
644 // Formalize no out-of-bound access assumption
645 //
646 // When delinearizing array accesses we optimistically assume that the
647 // delinearized accesses do not access out of bound locations (the subscript
648 // expression of each array evaluates for each statement instance that is
649 // executed to a value that is larger than zero and strictly smaller than the
650 // size of the corresponding dimension). The only exception is the outermost
651 // dimension for which we do not need to assume any upper bound. At this point
652 // we formalize this assumption to ensure that at code generation time the
653 // relevant run-time checks can be generated.
654 //
655 // To find the set of constraints necessary to avoid out of bound accesses, we
656 // first build the set of data locations that are not within array bounds. We
657 // then apply the reverse access relation to obtain the set of iterations that
658 // may contain invalid accesses and reduce this set of iterations to the ones
659 // that are actually executed by intersecting them with the domain of the
660 // statement. If we now project out all loop dimensions, we obtain a set of
661 // parameters that may cause statement instances to be executed that may
662 // possibly yield out of bound memory accesses. The complement of these
663 // constraints is the set of constraints that needs to be assumed to ensure such
664 // statement instances are never executed.
assumeNoOutOfBound()665 isl::set MemoryAccess::assumeNoOutOfBound() {
666 auto *SAI = getScopArrayInfo();
667 isl::space Space = getOriginalAccessRelationSpace().range();
668 isl::set Outside = isl::set::empty(Space);
669 for (int i = 1, Size = Space.dim(isl::dim::set); i < Size; ++i) {
670 isl::local_space LS(Space);
671 isl::pw_aff Var = isl::pw_aff::var_on_domain(LS, isl::dim::set, i);
672 isl::pw_aff Zero = isl::pw_aff(LS);
673
674 isl::set DimOutside = Var.lt_set(Zero);
675 isl::pw_aff SizeE = SAI->getDimensionSizePw(i);
676 SizeE = SizeE.add_dims(isl::dim::in, Space.dim(isl::dim::set));
677 SizeE = SizeE.set_tuple_id(isl::dim::in, Space.get_tuple_id(isl::dim::set));
678 DimOutside = DimOutside.unite(SizeE.le_set(Var));
679
680 Outside = Outside.unite(DimOutside);
681 }
682
683 Outside = Outside.apply(getAccessRelation().reverse());
684 Outside = Outside.intersect(Statement->getDomain());
685 Outside = Outside.params();
686
687 // Remove divs to avoid the construction of overly complicated assumptions.
688 // Doing so increases the set of parameter combinations that are assumed to
689 // not appear. This is always save, but may make the resulting run-time check
690 // bail out more often than strictly necessary.
691 Outside = Outside.remove_divs();
692 Outside = Outside.complement();
693
694 if (!PollyPreciseInbounds)
695 Outside = Outside.gist_params(Statement->getDomain().params());
696 return Outside;
697 }
698
buildMemIntrinsicAccessRelation()699 void MemoryAccess::buildMemIntrinsicAccessRelation() {
700 assert(isMemoryIntrinsic());
701 assert(Subscripts.size() == 2 && Sizes.size() == 1);
702
703 isl::pw_aff SubscriptPWA = getPwAff(Subscripts[0]);
704 isl::map SubscriptMap = isl::map::from_pw_aff(SubscriptPWA);
705
706 isl::map LengthMap;
707 if (Subscripts[1] == nullptr) {
708 LengthMap = isl::map::universe(SubscriptMap.get_space());
709 } else {
710 isl::pw_aff LengthPWA = getPwAff(Subscripts[1]);
711 LengthMap = isl::map::from_pw_aff(LengthPWA);
712 isl::space RangeSpace = LengthMap.get_space().range();
713 LengthMap = LengthMap.apply_range(isl::map::lex_gt(RangeSpace));
714 }
715 LengthMap = LengthMap.lower_bound_si(isl::dim::out, 0, 0);
716 LengthMap = LengthMap.align_params(SubscriptMap.get_space());
717 SubscriptMap = SubscriptMap.align_params(LengthMap.get_space());
718 LengthMap = LengthMap.sum(SubscriptMap);
719 AccessRelation =
720 LengthMap.set_tuple_id(isl::dim::in, getStatement()->getDomainId());
721 }
722
computeBoundsOnAccessRelation(unsigned ElementSize)723 void MemoryAccess::computeBoundsOnAccessRelation(unsigned ElementSize) {
724 ScalarEvolution *SE = Statement->getParent()->getSE();
725
726 auto MAI = MemAccInst(getAccessInstruction());
727 if (isa<MemIntrinsic>(MAI))
728 return;
729
730 Value *Ptr = MAI.getPointerOperand();
731 if (!Ptr || !SE->isSCEVable(Ptr->getType()))
732 return;
733
734 auto *PtrSCEV = SE->getSCEV(Ptr);
735 if (isa<SCEVCouldNotCompute>(PtrSCEV))
736 return;
737
738 auto *BasePtrSCEV = SE->getPointerBase(PtrSCEV);
739 if (BasePtrSCEV && !isa<SCEVCouldNotCompute>(BasePtrSCEV))
740 PtrSCEV = SE->getMinusSCEV(PtrSCEV, BasePtrSCEV);
741
742 const ConstantRange &Range = SE->getSignedRange(PtrSCEV);
743 if (Range.isFullSet())
744 return;
745
746 if (Range.isUpperWrapped() || Range.isSignWrappedSet())
747 return;
748
749 bool isWrapping = Range.isSignWrappedSet();
750
751 unsigned BW = Range.getBitWidth();
752 const auto One = APInt(BW, 1);
753 const auto LB = isWrapping ? Range.getLower() : Range.getSignedMin();
754 const auto UB = isWrapping ? (Range.getUpper() - One) : Range.getSignedMax();
755
756 auto Min = LB.sdiv(APInt(BW, ElementSize));
757 auto Max = UB.sdiv(APInt(BW, ElementSize)) + One;
758
759 assert(Min.sle(Max) && "Minimum expected to be less or equal than max");
760
761 isl::map Relation = AccessRelation;
762 isl::set AccessRange = Relation.range();
763 AccessRange = addRangeBoundsToSet(AccessRange, ConstantRange(Min, Max), 0,
764 isl::dim::set);
765 AccessRelation = Relation.intersect_range(AccessRange);
766 }
767
foldAccessRelation()768 void MemoryAccess::foldAccessRelation() {
769 if (Sizes.size() < 2 || isa<SCEVConstant>(Sizes[1]))
770 return;
771
772 int Size = Subscripts.size();
773
774 isl::map NewAccessRelation = AccessRelation;
775
776 for (int i = Size - 2; i >= 0; --i) {
777 isl::space Space;
778 isl::map MapOne, MapTwo;
779 isl::pw_aff DimSize = getPwAff(Sizes[i + 1]);
780
781 isl::space SpaceSize = DimSize.get_space();
782 isl::id ParamId = SpaceSize.get_dim_id(isl::dim::param, 0);
783
784 Space = AccessRelation.get_space();
785 Space = Space.range().map_from_set();
786 Space = Space.align_params(SpaceSize);
787
788 int ParamLocation = Space.find_dim_by_id(isl::dim::param, ParamId);
789
790 MapOne = isl::map::universe(Space);
791 for (int j = 0; j < Size; ++j)
792 MapOne = MapOne.equate(isl::dim::in, j, isl::dim::out, j);
793 MapOne = MapOne.lower_bound_si(isl::dim::in, i + 1, 0);
794
795 MapTwo = isl::map::universe(Space);
796 for (int j = 0; j < Size; ++j)
797 if (j < i || j > i + 1)
798 MapTwo = MapTwo.equate(isl::dim::in, j, isl::dim::out, j);
799
800 isl::local_space LS(Space);
801 isl::constraint C;
802 C = isl::constraint::alloc_equality(LS);
803 C = C.set_constant_si(-1);
804 C = C.set_coefficient_si(isl::dim::in, i, 1);
805 C = C.set_coefficient_si(isl::dim::out, i, -1);
806 MapTwo = MapTwo.add_constraint(C);
807 C = isl::constraint::alloc_equality(LS);
808 C = C.set_coefficient_si(isl::dim::in, i + 1, 1);
809 C = C.set_coefficient_si(isl::dim::out, i + 1, -1);
810 C = C.set_coefficient_si(isl::dim::param, ParamLocation, 1);
811 MapTwo = MapTwo.add_constraint(C);
812 MapTwo = MapTwo.upper_bound_si(isl::dim::in, i + 1, -1);
813
814 MapOne = MapOne.unite(MapTwo);
815 NewAccessRelation = NewAccessRelation.apply_range(MapOne);
816 }
817
818 isl::id BaseAddrId = getScopArrayInfo()->getBasePtrId();
819 isl::space Space = Statement->getDomainSpace();
820 NewAccessRelation = NewAccessRelation.set_tuple_id(
821 isl::dim::in, Space.get_tuple_id(isl::dim::set));
822 NewAccessRelation = NewAccessRelation.set_tuple_id(isl::dim::out, BaseAddrId);
823 NewAccessRelation = NewAccessRelation.gist_domain(Statement->getDomain());
824
825 // Access dimension folding might in certain cases increase the number of
826 // disjuncts in the memory access, which can possibly complicate the generated
827 // run-time checks and can lead to costly compilation.
828 if (!PollyPreciseFoldAccesses &&
829 NewAccessRelation.n_basic_map() > AccessRelation.n_basic_map()) {
830 } else {
831 AccessRelation = NewAccessRelation;
832 }
833 }
834
buildAccessRelation(const ScopArrayInfo * SAI)835 void MemoryAccess::buildAccessRelation(const ScopArrayInfo *SAI) {
836 assert(AccessRelation.is_null() && "AccessRelation already built");
837
838 // Initialize the invalid domain which describes all iterations for which the
839 // access relation is not modeled correctly.
840 isl::set StmtInvalidDomain = getStatement()->getInvalidDomain();
841 InvalidDomain = isl::set::empty(StmtInvalidDomain.get_space());
842
843 isl::ctx Ctx = Id.get_ctx();
844 isl::id BaseAddrId = SAI->getBasePtrId();
845
846 if (getAccessInstruction() && isa<MemIntrinsic>(getAccessInstruction())) {
847 buildMemIntrinsicAccessRelation();
848 AccessRelation = AccessRelation.set_tuple_id(isl::dim::out, BaseAddrId);
849 return;
850 }
851
852 if (!isAffine()) {
853 // We overapproximate non-affine accesses with a possible access to the
854 // whole array. For read accesses it does not make a difference, if an
855 // access must or may happen. However, for write accesses it is important to
856 // differentiate between writes that must happen and writes that may happen.
857 if (AccessRelation.is_null())
858 AccessRelation = createBasicAccessMap(Statement);
859
860 AccessRelation = AccessRelation.set_tuple_id(isl::dim::out, BaseAddrId);
861 return;
862 }
863
864 isl::space Space = isl::space(Ctx, 0, Statement->getNumIterators(), 0);
865 AccessRelation = isl::map::universe(Space);
866
867 for (int i = 0, Size = Subscripts.size(); i < Size; ++i) {
868 isl::pw_aff Affine = getPwAff(Subscripts[i]);
869 isl::map SubscriptMap = isl::map::from_pw_aff(Affine);
870 AccessRelation = AccessRelation.flat_range_product(SubscriptMap);
871 }
872
873 Space = Statement->getDomainSpace();
874 AccessRelation = AccessRelation.set_tuple_id(
875 isl::dim::in, Space.get_tuple_id(isl::dim::set));
876 AccessRelation = AccessRelation.set_tuple_id(isl::dim::out, BaseAddrId);
877
878 AccessRelation = AccessRelation.gist_domain(Statement->getDomain());
879 }
880
MemoryAccess(ScopStmt * Stmt,Instruction * AccessInst,AccessType AccType,Value * BaseAddress,Type * ElementType,bool Affine,ArrayRef<const SCEV * > Subscripts,ArrayRef<const SCEV * > Sizes,Value * AccessValue,MemoryKind Kind)881 MemoryAccess::MemoryAccess(ScopStmt *Stmt, Instruction *AccessInst,
882 AccessType AccType, Value *BaseAddress,
883 Type *ElementType, bool Affine,
884 ArrayRef<const SCEV *> Subscripts,
885 ArrayRef<const SCEV *> Sizes, Value *AccessValue,
886 MemoryKind Kind)
887 : Kind(Kind), AccType(AccType), Statement(Stmt), InvalidDomain(nullptr),
888 BaseAddr(BaseAddress), ElementType(ElementType),
889 Sizes(Sizes.begin(), Sizes.end()), AccessInstruction(AccessInst),
890 AccessValue(AccessValue), IsAffine(Affine),
891 Subscripts(Subscripts.begin(), Subscripts.end()), AccessRelation(nullptr),
892 NewAccessRelation(nullptr), FAD(nullptr) {
893 static const std::string TypeStrings[] = {"", "_Read", "_Write", "_MayWrite"};
894 const std::string Access = TypeStrings[AccType] + utostr(Stmt->size());
895
896 std::string IdName = Stmt->getBaseName() + Access;
897 Id = isl::id::alloc(Stmt->getParent()->getIslCtx(), IdName, this);
898 }
899
MemoryAccess(ScopStmt * Stmt,AccessType AccType,isl::map AccRel)900 MemoryAccess::MemoryAccess(ScopStmt *Stmt, AccessType AccType, isl::map AccRel)
901 : Kind(MemoryKind::Array), AccType(AccType), Statement(Stmt),
902 InvalidDomain(nullptr), AccessRelation(nullptr),
903 NewAccessRelation(AccRel), FAD(nullptr) {
904 isl::id ArrayInfoId = NewAccessRelation.get_tuple_id(isl::dim::out);
905 auto *SAI = ScopArrayInfo::getFromId(ArrayInfoId);
906 Sizes.push_back(nullptr);
907 for (unsigned i = 1; i < SAI->getNumberOfDimensions(); i++)
908 Sizes.push_back(SAI->getDimensionSize(i));
909 ElementType = SAI->getElementType();
910 BaseAddr = SAI->getBasePtr();
911 static const std::string TypeStrings[] = {"", "_Read", "_Write", "_MayWrite"};
912 const std::string Access = TypeStrings[AccType] + utostr(Stmt->size());
913
914 std::string IdName = Stmt->getBaseName() + Access;
915 Id = isl::id::alloc(Stmt->getParent()->getIslCtx(), IdName, this);
916 }
917
918 MemoryAccess::~MemoryAccess() = default;
919
realignParams()920 void MemoryAccess::realignParams() {
921 isl::set Ctx = Statement->getParent()->getContext();
922 InvalidDomain = InvalidDomain.gist_params(Ctx);
923 AccessRelation = AccessRelation.gist_params(Ctx);
924
925 // Predictable parameter order is required for JSON imports. Ensure alignment
926 // by explicitly calling align_params.
927 isl::space CtxSpace = Ctx.get_space();
928 InvalidDomain = InvalidDomain.align_params(CtxSpace);
929 AccessRelation = AccessRelation.align_params(CtxSpace);
930 }
931
getReductionOperatorStr() const932 const std::string MemoryAccess::getReductionOperatorStr() const {
933 return MemoryAccess::getReductionOperatorStr(getReductionType());
934 }
935
getId() const936 isl::id MemoryAccess::getId() const { return Id; }
937
operator <<(raw_ostream & OS,MemoryAccess::ReductionType RT)938 raw_ostream &polly::operator<<(raw_ostream &OS,
939 MemoryAccess::ReductionType RT) {
940 if (RT == MemoryAccess::RT_NONE)
941 OS << "NONE";
942 else
943 OS << MemoryAccess::getReductionOperatorStr(RT);
944 return OS;
945 }
946
setFortranArrayDescriptor(Value * FAD)947 void MemoryAccess::setFortranArrayDescriptor(Value *FAD) { this->FAD = FAD; }
948
print(raw_ostream & OS) const949 void MemoryAccess::print(raw_ostream &OS) const {
950 switch (AccType) {
951 case READ:
952 OS.indent(12) << "ReadAccess :=\t";
953 break;
954 case MUST_WRITE:
955 OS.indent(12) << "MustWriteAccess :=\t";
956 break;
957 case MAY_WRITE:
958 OS.indent(12) << "MayWriteAccess :=\t";
959 break;
960 }
961
962 OS << "[Reduction Type: " << getReductionType() << "] ";
963
964 if (FAD) {
965 OS << "[Fortran array descriptor: " << FAD->getName();
966 OS << "] ";
967 };
968
969 OS << "[Scalar: " << isScalarKind() << "]\n";
970 OS.indent(16) << getOriginalAccessRelationStr() << ";\n";
971 if (hasNewAccessRelation())
972 OS.indent(11) << "new: " << getNewAccessRelationStr() << ";\n";
973 }
974
975 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump() const976 LLVM_DUMP_METHOD void MemoryAccess::dump() const { print(errs()); }
977 #endif
978
getPwAff(const SCEV * E)979 isl::pw_aff MemoryAccess::getPwAff(const SCEV *E) {
980 auto *Stmt = getStatement();
981 PWACtx PWAC = Stmt->getParent()->getPwAff(E, Stmt->getEntryBlock());
982 isl::set StmtDom = getStatement()->getDomain();
983 StmtDom = StmtDom.reset_tuple_id();
984 isl::set NewInvalidDom = StmtDom.intersect(PWAC.second);
985 InvalidDomain = InvalidDomain.unite(NewInvalidDom);
986 return PWAC.first;
987 }
988
989 // Create a map in the size of the provided set domain, that maps from the
990 // one element of the provided set domain to another element of the provided
991 // set domain.
992 // The mapping is limited to all points that are equal in all but the last
993 // dimension and for which the last dimension of the input is strict smaller
994 // than the last dimension of the output.
995 //
996 // getEqualAndLarger(set[i0, i1, ..., iX]):
997 //
998 // set[i0, i1, ..., iX] -> set[o0, o1, ..., oX]
999 // : i0 = o0, i1 = o1, ..., i(X-1) = o(X-1), iX < oX
1000 //
getEqualAndLarger(isl::space SetDomain)1001 static isl::map getEqualAndLarger(isl::space SetDomain) {
1002 isl::space Space = SetDomain.map_from_set();
1003 isl::map Map = isl::map::universe(Space);
1004 unsigned lastDimension = Map.dim(isl::dim::in) - 1;
1005
1006 // Set all but the last dimension to be equal for the input and output
1007 //
1008 // input[i0, i1, ..., iX] -> output[o0, o1, ..., oX]
1009 // : i0 = o0, i1 = o1, ..., i(X-1) = o(X-1)
1010 for (unsigned i = 0; i < lastDimension; ++i)
1011 Map = Map.equate(isl::dim::in, i, isl::dim::out, i);
1012
1013 // Set the last dimension of the input to be strict smaller than the
1014 // last dimension of the output.
1015 //
1016 // input[?,?,?,...,iX] -> output[?,?,?,...,oX] : iX < oX
1017 Map = Map.order_lt(isl::dim::in, lastDimension, isl::dim::out, lastDimension);
1018 return Map;
1019 }
1020
getStride(isl::map Schedule) const1021 isl::set MemoryAccess::getStride(isl::map Schedule) const {
1022 isl::map AccessRelation = getAccessRelation();
1023 isl::space Space = Schedule.get_space().range();
1024 isl::map NextScatt = getEqualAndLarger(Space);
1025
1026 Schedule = Schedule.reverse();
1027 NextScatt = NextScatt.lexmin();
1028
1029 NextScatt = NextScatt.apply_range(Schedule);
1030 NextScatt = NextScatt.apply_range(AccessRelation);
1031 NextScatt = NextScatt.apply_domain(Schedule);
1032 NextScatt = NextScatt.apply_domain(AccessRelation);
1033
1034 isl::set Deltas = NextScatt.deltas();
1035 return Deltas;
1036 }
1037
isStrideX(isl::map Schedule,int StrideWidth) const1038 bool MemoryAccess::isStrideX(isl::map Schedule, int StrideWidth) const {
1039 isl::set Stride, StrideX;
1040 bool IsStrideX;
1041
1042 Stride = getStride(Schedule);
1043 StrideX = isl::set::universe(Stride.get_space());
1044 for (unsigned i = 0; i < StrideX.dim(isl::dim::set) - 1; i++)
1045 StrideX = StrideX.fix_si(isl::dim::set, i, 0);
1046 StrideX = StrideX.fix_si(isl::dim::set, StrideX.dim(isl::dim::set) - 1,
1047 StrideWidth);
1048 IsStrideX = Stride.is_subset(StrideX);
1049
1050 return IsStrideX;
1051 }
1052
isStrideZero(isl::map Schedule) const1053 bool MemoryAccess::isStrideZero(isl::map Schedule) const {
1054 return isStrideX(Schedule, 0);
1055 }
1056
isStrideOne(isl::map Schedule) const1057 bool MemoryAccess::isStrideOne(isl::map Schedule) const {
1058 return isStrideX(Schedule, 1);
1059 }
1060
setAccessRelation(isl::map NewAccess)1061 void MemoryAccess::setAccessRelation(isl::map NewAccess) {
1062 AccessRelation = NewAccess;
1063 }
1064
setNewAccessRelation(isl::map NewAccess)1065 void MemoryAccess::setNewAccessRelation(isl::map NewAccess) {
1066 assert(NewAccess);
1067
1068 #ifndef NDEBUG
1069 // Check domain space compatibility.
1070 isl::space NewSpace = NewAccess.get_space();
1071 isl::space NewDomainSpace = NewSpace.domain();
1072 isl::space OriginalDomainSpace = getStatement()->getDomainSpace();
1073 assert(OriginalDomainSpace.has_equal_tuples(NewDomainSpace));
1074
1075 // Reads must be executed unconditionally. Writes might be executed in a
1076 // subdomain only.
1077 if (isRead()) {
1078 // Check whether there is an access for every statement instance.
1079 isl::set StmtDomain = getStatement()->getDomain();
1080 StmtDomain =
1081 StmtDomain.intersect_params(getStatement()->getParent()->getContext());
1082 isl::set NewDomain = NewAccess.domain();
1083 assert(StmtDomain.is_subset(NewDomain) &&
1084 "Partial READ accesses not supported");
1085 }
1086
1087 isl::space NewAccessSpace = NewAccess.get_space();
1088 assert(NewAccessSpace.has_tuple_id(isl::dim::set) &&
1089 "Must specify the array that is accessed");
1090 isl::id NewArrayId = NewAccessSpace.get_tuple_id(isl::dim::set);
1091 auto *SAI = static_cast<ScopArrayInfo *>(NewArrayId.get_user());
1092 assert(SAI && "Must set a ScopArrayInfo");
1093
1094 if (SAI->isArrayKind() && SAI->getBasePtrOriginSAI()) {
1095 InvariantEquivClassTy *EqClass =
1096 getStatement()->getParent()->lookupInvariantEquivClass(
1097 SAI->getBasePtr());
1098 assert(EqClass &&
1099 "Access functions to indirect arrays must have an invariant and "
1100 "hoisted base pointer");
1101 }
1102
1103 // Check whether access dimensions correspond to number of dimensions of the
1104 // accesses array.
1105 auto Dims = SAI->getNumberOfDimensions();
1106 assert(NewAccessSpace.dim(isl::dim::set) == Dims &&
1107 "Access dims must match array dims");
1108 #endif
1109
1110 NewAccess = NewAccess.gist_domain(getStatement()->getDomain());
1111 NewAccessRelation = NewAccess;
1112 }
1113
isLatestPartialAccess() const1114 bool MemoryAccess::isLatestPartialAccess() const {
1115 isl::set StmtDom = getStatement()->getDomain();
1116 isl::set AccDom = getLatestAccessRelation().domain();
1117
1118 return !StmtDom.is_subset(AccDom);
1119 }
1120
1121 //===----------------------------------------------------------------------===//
1122
getSchedule() const1123 isl::map ScopStmt::getSchedule() const {
1124 isl::set Domain = getDomain();
1125 if (Domain.is_empty())
1126 return isl::map::from_aff(isl::aff(isl::local_space(getDomainSpace())));
1127 auto Schedule = getParent()->getSchedule();
1128 if (!Schedule)
1129 return nullptr;
1130 Schedule = Schedule.intersect_domain(isl::union_set(Domain));
1131 if (Schedule.is_empty())
1132 return isl::map::from_aff(isl::aff(isl::local_space(getDomainSpace())));
1133 isl::map M = M.from_union_map(Schedule);
1134 M = M.coalesce();
1135 M = M.gist_domain(Domain);
1136 M = M.coalesce();
1137 return M;
1138 }
1139
restrictDomain(isl::set NewDomain)1140 void ScopStmt::restrictDomain(isl::set NewDomain) {
1141 assert(NewDomain.is_subset(Domain) &&
1142 "New domain is not a subset of old domain!");
1143 Domain = NewDomain;
1144 }
1145
addAccess(MemoryAccess * Access,bool Prepend)1146 void ScopStmt::addAccess(MemoryAccess *Access, bool Prepend) {
1147 Instruction *AccessInst = Access->getAccessInstruction();
1148
1149 if (Access->isArrayKind()) {
1150 MemoryAccessList &MAL = InstructionToAccess[AccessInst];
1151 MAL.emplace_front(Access);
1152 } else if (Access->isValueKind() && Access->isWrite()) {
1153 Instruction *AccessVal = cast<Instruction>(Access->getAccessValue());
1154 assert(!ValueWrites.lookup(AccessVal));
1155
1156 ValueWrites[AccessVal] = Access;
1157 } else if (Access->isValueKind() && Access->isRead()) {
1158 Value *AccessVal = Access->getAccessValue();
1159 assert(!ValueReads.lookup(AccessVal));
1160
1161 ValueReads[AccessVal] = Access;
1162 } else if (Access->isAnyPHIKind() && Access->isWrite()) {
1163 PHINode *PHI = cast<PHINode>(Access->getAccessValue());
1164 assert(!PHIWrites.lookup(PHI));
1165
1166 PHIWrites[PHI] = Access;
1167 } else if (Access->isAnyPHIKind() && Access->isRead()) {
1168 PHINode *PHI = cast<PHINode>(Access->getAccessValue());
1169 assert(!PHIReads.lookup(PHI));
1170
1171 PHIReads[PHI] = Access;
1172 }
1173
1174 if (Prepend) {
1175 MemAccs.insert(MemAccs.begin(), Access);
1176 return;
1177 }
1178 MemAccs.push_back(Access);
1179 }
1180
realignParams()1181 void ScopStmt::realignParams() {
1182 for (MemoryAccess *MA : *this)
1183 MA->realignParams();
1184
1185 isl::set Ctx = Parent.getContext();
1186 InvalidDomain = InvalidDomain.gist_params(Ctx);
1187 Domain = Domain.gist_params(Ctx);
1188
1189 // Predictable parameter order is required for JSON imports. Ensure alignment
1190 // by explicitly calling align_params.
1191 isl::space CtxSpace = Ctx.get_space();
1192 InvalidDomain = InvalidDomain.align_params(CtxSpace);
1193 Domain = Domain.align_params(CtxSpace);
1194 }
1195
ScopStmt(Scop & parent,Region & R,StringRef Name,Loop * SurroundingLoop,std::vector<Instruction * > EntryBlockInstructions)1196 ScopStmt::ScopStmt(Scop &parent, Region &R, StringRef Name,
1197 Loop *SurroundingLoop,
1198 std::vector<Instruction *> EntryBlockInstructions)
1199 : Parent(parent), InvalidDomain(nullptr), Domain(nullptr), R(&R),
1200 Build(nullptr), BaseName(Name), SurroundingLoop(SurroundingLoop),
1201 Instructions(EntryBlockInstructions) {}
1202
ScopStmt(Scop & parent,BasicBlock & bb,StringRef Name,Loop * SurroundingLoop,std::vector<Instruction * > Instructions)1203 ScopStmt::ScopStmt(Scop &parent, BasicBlock &bb, StringRef Name,
1204 Loop *SurroundingLoop,
1205 std::vector<Instruction *> Instructions)
1206 : Parent(parent), InvalidDomain(nullptr), Domain(nullptr), BB(&bb),
1207 Build(nullptr), BaseName(Name), SurroundingLoop(SurroundingLoop),
1208 Instructions(Instructions) {}
1209
ScopStmt(Scop & parent,isl::map SourceRel,isl::map TargetRel,isl::set NewDomain)1210 ScopStmt::ScopStmt(Scop &parent, isl::map SourceRel, isl::map TargetRel,
1211 isl::set NewDomain)
1212 : Parent(parent), InvalidDomain(nullptr), Domain(NewDomain),
1213 Build(nullptr) {
1214 BaseName = getIslCompatibleName("CopyStmt_", "",
1215 std::to_string(parent.getCopyStmtsNum()));
1216 isl::id Id = isl::id::alloc(getIslCtx(), getBaseName(), this);
1217 Domain = Domain.set_tuple_id(Id);
1218 TargetRel = TargetRel.set_tuple_id(isl::dim::in, Id);
1219 auto *Access =
1220 new MemoryAccess(this, MemoryAccess::AccessType::MUST_WRITE, TargetRel);
1221 parent.addAccessFunction(Access);
1222 addAccess(Access);
1223 SourceRel = SourceRel.set_tuple_id(isl::dim::in, Id);
1224 Access = new MemoryAccess(this, MemoryAccess::AccessType::READ, SourceRel);
1225 parent.addAccessFunction(Access);
1226 addAccess(Access);
1227 }
1228
1229 ScopStmt::~ScopStmt() = default;
1230
getDomainStr() const1231 std::string ScopStmt::getDomainStr() const { return Domain.to_str(); }
1232
getScheduleStr() const1233 std::string ScopStmt::getScheduleStr() const {
1234 auto *S = getSchedule().release();
1235 if (!S)
1236 return {};
1237 auto Str = stringFromIslObj(S);
1238 isl_map_free(S);
1239 return Str;
1240 }
1241
setInvalidDomain(isl::set ID)1242 void ScopStmt::setInvalidDomain(isl::set ID) { InvalidDomain = ID; }
1243
getEntryBlock() const1244 BasicBlock *ScopStmt::getEntryBlock() const {
1245 if (isBlockStmt())
1246 return getBasicBlock();
1247 return getRegion()->getEntry();
1248 }
1249
getNumIterators() const1250 unsigned ScopStmt::getNumIterators() const { return NestLoops.size(); }
1251
getBaseName() const1252 const char *ScopStmt::getBaseName() const { return BaseName.c_str(); }
1253
getLoopForDimension(unsigned Dimension) const1254 Loop *ScopStmt::getLoopForDimension(unsigned Dimension) const {
1255 return NestLoops[Dimension];
1256 }
1257
getIslCtx() const1258 isl::ctx ScopStmt::getIslCtx() const { return Parent.getIslCtx(); }
1259
getDomain() const1260 isl::set ScopStmt::getDomain() const { return Domain; }
1261
getDomainSpace() const1262 isl::space ScopStmt::getDomainSpace() const { return Domain.get_space(); }
1263
getDomainId() const1264 isl::id ScopStmt::getDomainId() const { return Domain.get_tuple_id(); }
1265
printInstructions(raw_ostream & OS) const1266 void ScopStmt::printInstructions(raw_ostream &OS) const {
1267 OS << "Instructions {\n";
1268
1269 for (Instruction *Inst : Instructions)
1270 OS.indent(16) << *Inst << "\n";
1271
1272 OS.indent(12) << "}\n";
1273 }
1274
print(raw_ostream & OS,bool PrintInstructions) const1275 void ScopStmt::print(raw_ostream &OS, bool PrintInstructions) const {
1276 OS << "\t" << getBaseName() << "\n";
1277 OS.indent(12) << "Domain :=\n";
1278
1279 if (Domain) {
1280 OS.indent(16) << getDomainStr() << ";\n";
1281 } else
1282 OS.indent(16) << "n/a\n";
1283
1284 OS.indent(12) << "Schedule :=\n";
1285
1286 if (Domain) {
1287 OS.indent(16) << getScheduleStr() << ";\n";
1288 } else
1289 OS.indent(16) << "n/a\n";
1290
1291 for (MemoryAccess *Access : MemAccs)
1292 Access->print(OS);
1293
1294 if (PrintInstructions)
1295 printInstructions(OS.indent(12));
1296 }
1297
1298 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump() const1299 LLVM_DUMP_METHOD void ScopStmt::dump() const { print(dbgs(), true); }
1300 #endif
1301
removeAccessData(MemoryAccess * MA)1302 void ScopStmt::removeAccessData(MemoryAccess *MA) {
1303 if (MA->isRead() && MA->isOriginalValueKind()) {
1304 bool Found = ValueReads.erase(MA->getAccessValue());
1305 (void)Found;
1306 assert(Found && "Expected access data not found");
1307 }
1308 if (MA->isWrite() && MA->isOriginalValueKind()) {
1309 bool Found = ValueWrites.erase(cast<Instruction>(MA->getAccessValue()));
1310 (void)Found;
1311 assert(Found && "Expected access data not found");
1312 }
1313 if (MA->isWrite() && MA->isOriginalAnyPHIKind()) {
1314 bool Found = PHIWrites.erase(cast<PHINode>(MA->getAccessInstruction()));
1315 (void)Found;
1316 assert(Found && "Expected access data not found");
1317 }
1318 if (MA->isRead() && MA->isOriginalAnyPHIKind()) {
1319 bool Found = PHIReads.erase(cast<PHINode>(MA->getAccessInstruction()));
1320 (void)Found;
1321 assert(Found && "Expected access data not found");
1322 }
1323 }
1324
removeMemoryAccess(MemoryAccess * MA)1325 void ScopStmt::removeMemoryAccess(MemoryAccess *MA) {
1326 // Remove the memory accesses from this statement together with all scalar
1327 // accesses that were caused by it. MemoryKind::Value READs have no access
1328 // instruction, hence would not be removed by this function. However, it is
1329 // only used for invariant LoadInst accesses, its arguments are always affine,
1330 // hence synthesizable, and therefore there are no MemoryKind::Value READ
1331 // accesses to be removed.
1332 auto Predicate = [&](MemoryAccess *Acc) {
1333 return Acc->getAccessInstruction() == MA->getAccessInstruction();
1334 };
1335 for (auto *MA : MemAccs) {
1336 if (Predicate(MA)) {
1337 removeAccessData(MA);
1338 Parent.removeAccessData(MA);
1339 }
1340 }
1341 MemAccs.erase(std::remove_if(MemAccs.begin(), MemAccs.end(), Predicate),
1342 MemAccs.end());
1343 InstructionToAccess.erase(MA->getAccessInstruction());
1344 }
1345
removeSingleMemoryAccess(MemoryAccess * MA,bool AfterHoisting)1346 void ScopStmt::removeSingleMemoryAccess(MemoryAccess *MA, bool AfterHoisting) {
1347 if (AfterHoisting) {
1348 auto MAIt = std::find(MemAccs.begin(), MemAccs.end(), MA);
1349 assert(MAIt != MemAccs.end());
1350 MemAccs.erase(MAIt);
1351
1352 removeAccessData(MA);
1353 Parent.removeAccessData(MA);
1354 }
1355
1356 auto It = InstructionToAccess.find(MA->getAccessInstruction());
1357 if (It != InstructionToAccess.end()) {
1358 It->second.remove(MA);
1359 if (It->second.empty())
1360 InstructionToAccess.erase(MA->getAccessInstruction());
1361 }
1362 }
1363
ensureValueRead(Value * V)1364 MemoryAccess *ScopStmt::ensureValueRead(Value *V) {
1365 MemoryAccess *Access = lookupInputAccessOf(V);
1366 if (Access)
1367 return Access;
1368
1369 ScopArrayInfo *SAI =
1370 Parent.getOrCreateScopArrayInfo(V, V->getType(), {}, MemoryKind::Value);
1371 Access = new MemoryAccess(this, nullptr, MemoryAccess::READ, V, V->getType(),
1372 true, {}, {}, V, MemoryKind::Value);
1373 Parent.addAccessFunction(Access);
1374 Access->buildAccessRelation(SAI);
1375 addAccess(Access);
1376 Parent.addAccessData(Access);
1377 return Access;
1378 }
1379
operator <<(raw_ostream & OS,const ScopStmt & S)1380 raw_ostream &polly::operator<<(raw_ostream &OS, const ScopStmt &S) {
1381 S.print(OS, PollyPrintInstructions);
1382 return OS;
1383 }
1384
1385 //===----------------------------------------------------------------------===//
1386 /// Scop class implement
1387
setContext(isl::set NewContext)1388 void Scop::setContext(isl::set NewContext) {
1389 Context = NewContext.align_params(Context.get_space());
1390 }
1391
1392 namespace {
1393
1394 /// Remap parameter values but keep AddRecs valid wrt. invariant loads.
1395 struct SCEVSensitiveParameterRewriter
1396 : public SCEVRewriteVisitor<SCEVSensitiveParameterRewriter> {
1397 const ValueToValueMap &VMap;
1398
1399 public:
SCEVSensitiveParameterRewriter__anon5d001fb00211::SCEVSensitiveParameterRewriter1400 SCEVSensitiveParameterRewriter(const ValueToValueMap &VMap,
1401 ScalarEvolution &SE)
1402 : SCEVRewriteVisitor(SE), VMap(VMap) {}
1403
rewrite__anon5d001fb00211::SCEVSensitiveParameterRewriter1404 static const SCEV *rewrite(const SCEV *E, ScalarEvolution &SE,
1405 const ValueToValueMap &VMap) {
1406 SCEVSensitiveParameterRewriter SSPR(VMap, SE);
1407 return SSPR.visit(E);
1408 }
1409
visitAddRecExpr__anon5d001fb00211::SCEVSensitiveParameterRewriter1410 const SCEV *visitAddRecExpr(const SCEVAddRecExpr *E) {
1411 auto *Start = visit(E->getStart());
1412 auto *AddRec = SE.getAddRecExpr(SE.getConstant(E->getType(), 0),
1413 visit(E->getStepRecurrence(SE)),
1414 E->getLoop(), SCEV::FlagAnyWrap);
1415 return SE.getAddExpr(Start, AddRec);
1416 }
1417
visitUnknown__anon5d001fb00211::SCEVSensitiveParameterRewriter1418 const SCEV *visitUnknown(const SCEVUnknown *E) {
1419 if (auto *NewValue = VMap.lookup(E->getValue()))
1420 return SE.getUnknown(NewValue);
1421 return E;
1422 }
1423 };
1424
1425 /// Check whether we should remap a SCEV expression.
1426 struct SCEVFindInsideScop : public SCEVTraversal<SCEVFindInsideScop> {
1427 const ValueToValueMap &VMap;
1428 bool FoundInside = false;
1429 const Scop *S;
1430
1431 public:
SCEVFindInsideScop__anon5d001fb00211::SCEVFindInsideScop1432 SCEVFindInsideScop(const ValueToValueMap &VMap, ScalarEvolution &SE,
1433 const Scop *S)
1434 : SCEVTraversal(*this), VMap(VMap), S(S) {}
1435
hasVariant__anon5d001fb00211::SCEVFindInsideScop1436 static bool hasVariant(const SCEV *E, ScalarEvolution &SE,
1437 const ValueToValueMap &VMap, const Scop *S) {
1438 SCEVFindInsideScop SFIS(VMap, SE, S);
1439 SFIS.visitAll(E);
1440 return SFIS.FoundInside;
1441 }
1442
follow__anon5d001fb00211::SCEVFindInsideScop1443 bool follow(const SCEV *E) {
1444 if (auto *AddRec = dyn_cast<SCEVAddRecExpr>(E)) {
1445 FoundInside |= S->getRegion().contains(AddRec->getLoop());
1446 } else if (auto *Unknown = dyn_cast<SCEVUnknown>(E)) {
1447 if (Instruction *I = dyn_cast<Instruction>(Unknown->getValue()))
1448 FoundInside |= S->getRegion().contains(I) && !VMap.count(I);
1449 }
1450 return !FoundInside;
1451 }
1452
isDone__anon5d001fb00211::SCEVFindInsideScop1453 bool isDone() { return FoundInside; }
1454 };
1455 } // end anonymous namespace
1456
getRepresentingInvariantLoadSCEV(const SCEV * E) const1457 const SCEV *Scop::getRepresentingInvariantLoadSCEV(const SCEV *E) const {
1458 // Check whether it makes sense to rewrite the SCEV. (ScalarEvolution
1459 // doesn't like addition between an AddRec and an expression that
1460 // doesn't have a dominance relationship with it.)
1461 if (SCEVFindInsideScop::hasVariant(E, *SE, InvEquivClassVMap, this))
1462 return E;
1463
1464 // Rewrite SCEV.
1465 return SCEVSensitiveParameterRewriter::rewrite(E, *SE, InvEquivClassVMap);
1466 }
1467
1468 // This table of function names is used to translate parameter names in more
1469 // human-readable names. This makes it easier to interpret Polly analysis
1470 // results.
1471 StringMap<std::string> KnownNames = {
1472 {"_Z13get_global_idj", "global_id"},
1473 {"_Z12get_local_idj", "local_id"},
1474 {"_Z15get_global_sizej", "global_size"},
1475 {"_Z14get_local_sizej", "local_size"},
1476 {"_Z12get_work_dimv", "work_dim"},
1477 {"_Z17get_global_offsetj", "global_offset"},
1478 {"_Z12get_group_idj", "group_id"},
1479 {"_Z14get_num_groupsj", "num_groups"},
1480 };
1481
getCallParamName(CallInst * Call)1482 static std::string getCallParamName(CallInst *Call) {
1483 std::string Result;
1484 raw_string_ostream OS(Result);
1485 std::string Name = Call->getCalledFunction()->getName().str();
1486
1487 auto Iterator = KnownNames.find(Name);
1488 if (Iterator != KnownNames.end())
1489 Name = "__" + Iterator->getValue();
1490 OS << Name;
1491 for (auto &Operand : Call->arg_operands()) {
1492 ConstantInt *Op = cast<ConstantInt>(&Operand);
1493 OS << "_" << Op->getValue();
1494 }
1495 OS.flush();
1496 return Result;
1497 }
1498
createParameterId(const SCEV * Parameter)1499 void Scop::createParameterId(const SCEV *Parameter) {
1500 assert(Parameters.count(Parameter));
1501 assert(!ParameterIds.count(Parameter));
1502
1503 std::string ParameterName = "p_" + std::to_string(getNumParams() - 1);
1504
1505 if (const SCEVUnknown *ValueParameter = dyn_cast<SCEVUnknown>(Parameter)) {
1506 Value *Val = ValueParameter->getValue();
1507 CallInst *Call = dyn_cast<CallInst>(Val);
1508
1509 if (Call && isConstCall(Call)) {
1510 ParameterName = getCallParamName(Call);
1511 } else if (UseInstructionNames) {
1512 // If this parameter references a specific Value and this value has a name
1513 // we use this name as it is likely to be unique and more useful than just
1514 // a number.
1515 if (Val->hasName())
1516 ParameterName = Val->getName().str();
1517 else if (LoadInst *LI = dyn_cast<LoadInst>(Val)) {
1518 auto *LoadOrigin = LI->getPointerOperand()->stripInBoundsOffsets();
1519 if (LoadOrigin->hasName()) {
1520 ParameterName += "_loaded_from_";
1521 ParameterName +=
1522 LI->getPointerOperand()->stripInBoundsOffsets()->getName();
1523 }
1524 }
1525 }
1526
1527 ParameterName = getIslCompatibleName("", ParameterName, "");
1528 }
1529
1530 isl::id Id = isl::id::alloc(getIslCtx(), ParameterName,
1531 const_cast<void *>((const void *)Parameter));
1532 ParameterIds[Parameter] = Id;
1533 }
1534
addParams(const ParameterSetTy & NewParameters)1535 void Scop::addParams(const ParameterSetTy &NewParameters) {
1536 for (const SCEV *Parameter : NewParameters) {
1537 // Normalize the SCEV to get the representing element for an invariant load.
1538 Parameter = extractConstantFactor(Parameter, *SE).second;
1539 Parameter = getRepresentingInvariantLoadSCEV(Parameter);
1540
1541 if (Parameters.insert(Parameter))
1542 createParameterId(Parameter);
1543 }
1544 }
1545
getIdForParam(const SCEV * Parameter) const1546 isl::id Scop::getIdForParam(const SCEV *Parameter) const {
1547 // Normalize the SCEV to get the representing element for an invariant load.
1548 Parameter = getRepresentingInvariantLoadSCEV(Parameter);
1549 return ParameterIds.lookup(Parameter);
1550 }
1551
isDominatedBy(const DominatorTree & DT,BasicBlock * BB) const1552 bool Scop::isDominatedBy(const DominatorTree &DT, BasicBlock *BB) const {
1553 return DT.dominates(BB, getEntry());
1554 }
1555
buildContext()1556 void Scop::buildContext() {
1557 isl::space Space = isl::space::params_alloc(getIslCtx(), 0);
1558 Context = isl::set::universe(Space);
1559 InvalidContext = isl::set::empty(Space);
1560 AssumedContext = isl::set::universe(Space);
1561 }
1562
addParameterBounds()1563 void Scop::addParameterBounds() {
1564 unsigned PDim = 0;
1565 for (auto *Parameter : Parameters) {
1566 ConstantRange SRange = SE->getSignedRange(Parameter);
1567 Context = addRangeBoundsToSet(Context, SRange, PDim++, isl::dim::param);
1568 }
1569 }
1570
getFortranArrayIds(Scop::array_range Arrays)1571 static std::vector<isl::id> getFortranArrayIds(Scop::array_range Arrays) {
1572 std::vector<isl::id> OutermostSizeIds;
1573 for (auto Array : Arrays) {
1574 // To check if an array is a Fortran array, we check if it has a isl_pw_aff
1575 // for its outermost dimension. Fortran arrays will have this since the
1576 // outermost dimension size can be picked up from their runtime description.
1577 // TODO: actually need to check if it has a FAD, but for now this works.
1578 if (Array->getNumberOfDimensions() > 0) {
1579 isl::pw_aff PwAff = Array->getDimensionSizePw(0);
1580 if (!PwAff)
1581 continue;
1582
1583 isl::id Id = PwAff.get_dim_id(isl::dim::param, 0);
1584 assert(!Id.is_null() &&
1585 "Invalid Id for PwAff expression in Fortran array");
1586 OutermostSizeIds.push_back(Id);
1587 }
1588 }
1589 return OutermostSizeIds;
1590 }
1591
1592 // The FORTRAN array size parameters are known to be non-negative.
boundFortranArrayParams(isl::set Context,Scop::array_range Arrays)1593 static isl::set boundFortranArrayParams(isl::set Context,
1594 Scop::array_range Arrays) {
1595 std::vector<isl::id> OutermostSizeIds;
1596 OutermostSizeIds = getFortranArrayIds(Arrays);
1597
1598 for (isl::id Id : OutermostSizeIds) {
1599 int dim = Context.find_dim_by_id(isl::dim::param, Id);
1600 Context = Context.lower_bound_si(isl::dim::param, dim, 0);
1601 }
1602
1603 return Context;
1604 }
1605
realignParams()1606 void Scop::realignParams() {
1607 if (PollyIgnoreParamBounds)
1608 return;
1609
1610 // Add all parameters into a common model.
1611 isl::space Space = getFullParamSpace();
1612
1613 // Align the parameters of all data structures to the model.
1614 Context = Context.align_params(Space);
1615 AssumedContext = AssumedContext.align_params(Space);
1616 InvalidContext = InvalidContext.align_params(Space);
1617
1618 // Bound the size of the fortran array dimensions.
1619 Context = boundFortranArrayParams(Context, arrays());
1620
1621 // As all parameters are known add bounds to them.
1622 addParameterBounds();
1623
1624 for (ScopStmt &Stmt : *this)
1625 Stmt.realignParams();
1626 // Simplify the schedule according to the context too.
1627 Schedule = Schedule.gist_domain_params(getContext());
1628
1629 // Predictable parameter order is required for JSON imports. Ensure alignment
1630 // by explicitly calling align_params.
1631 Schedule = Schedule.align_params(Space);
1632 }
1633
simplifyAssumptionContext(isl::set AssumptionContext,const Scop & S)1634 static isl::set simplifyAssumptionContext(isl::set AssumptionContext,
1635 const Scop &S) {
1636 // If we have modeled all blocks in the SCoP that have side effects we can
1637 // simplify the context with the constraints that are needed for anything to
1638 // be executed at all. However, if we have error blocks in the SCoP we already
1639 // assumed some parameter combinations cannot occur and removed them from the
1640 // domains, thus we cannot use the remaining domain to simplify the
1641 // assumptions.
1642 if (!S.hasErrorBlock()) {
1643 auto DomainParameters = S.getDomains().params();
1644 AssumptionContext = AssumptionContext.gist_params(DomainParameters);
1645 }
1646
1647 AssumptionContext = AssumptionContext.gist_params(S.getContext());
1648 return AssumptionContext;
1649 }
1650
simplifyContexts()1651 void Scop::simplifyContexts() {
1652 // The parameter constraints of the iteration domains give us a set of
1653 // constraints that need to hold for all cases where at least a single
1654 // statement iteration is executed in the whole scop. We now simplify the
1655 // assumed context under the assumption that such constraints hold and at
1656 // least a single statement iteration is executed. For cases where no
1657 // statement instances are executed, the assumptions we have taken about
1658 // the executed code do not matter and can be changed.
1659 //
1660 // WARNING: This only holds if the assumptions we have taken do not reduce
1661 // the set of statement instances that are executed. Otherwise we
1662 // may run into a case where the iteration domains suggest that
1663 // for a certain set of parameter constraints no code is executed,
1664 // but in the original program some computation would have been
1665 // performed. In such a case, modifying the run-time conditions and
1666 // possibly influencing the run-time check may cause certain scops
1667 // to not be executed.
1668 //
1669 // Example:
1670 //
1671 // When delinearizing the following code:
1672 //
1673 // for (long i = 0; i < 100; i++)
1674 // for (long j = 0; j < m; j++)
1675 // A[i+p][j] = 1.0;
1676 //
1677 // we assume that the condition m <= 0 or (m >= 1 and p >= 0) holds as
1678 // otherwise we would access out of bound data. Now, knowing that code is
1679 // only executed for the case m >= 0, it is sufficient to assume p >= 0.
1680 AssumedContext = simplifyAssumptionContext(AssumedContext, *this);
1681 InvalidContext = InvalidContext.align_params(getParamSpace());
1682 }
1683
getDomainConditions(const ScopStmt * Stmt) const1684 isl::set Scop::getDomainConditions(const ScopStmt *Stmt) const {
1685 return getDomainConditions(Stmt->getEntryBlock());
1686 }
1687
getDomainConditions(BasicBlock * BB) const1688 isl::set Scop::getDomainConditions(BasicBlock *BB) const {
1689 auto DIt = DomainMap.find(BB);
1690 if (DIt != DomainMap.end())
1691 return DIt->getSecond();
1692
1693 auto &RI = *R.getRegionInfo();
1694 auto *BBR = RI.getRegionFor(BB);
1695 while (BBR->getEntry() == BB)
1696 BBR = BBR->getParent();
1697 return getDomainConditions(BBR->getEntry());
1698 }
1699
Scop(Region & R,ScalarEvolution & ScalarEvolution,LoopInfo & LI,DominatorTree & DT,ScopDetection::DetectionContext & DC,OptimizationRemarkEmitter & ORE,int ID)1700 Scop::Scop(Region &R, ScalarEvolution &ScalarEvolution, LoopInfo &LI,
1701 DominatorTree &DT, ScopDetection::DetectionContext &DC,
1702 OptimizationRemarkEmitter &ORE, int ID)
1703 : IslCtx(isl_ctx_alloc(), isl_ctx_free), SE(&ScalarEvolution), DT(&DT),
1704 R(R), name(None), HasSingleExitEdge(R.getExitingBlock()), DC(DC),
1705 ORE(ORE), Affinator(this, LI), ID(ID) {
1706 SmallVector<char *, 8> IslArgv;
1707 IslArgv.reserve(1 + IslArgs.size());
1708
1709 // Substitute for program name.
1710 IslArgv.push_back(const_cast<char *>("-polly-isl-arg"));
1711
1712 for (std::string &Arg : IslArgs)
1713 IslArgv.push_back(const_cast<char *>(Arg.c_str()));
1714
1715 // Abort if unknown argument is passed.
1716 // Note that "-V" (print isl version) will always call exit(0), so we cannot
1717 // avoid ISL aborting the program at this point.
1718 unsigned IslParseFlags = ISL_ARG_ALL;
1719
1720 isl_ctx_parse_options(IslCtx.get(), IslArgv.size(), IslArgv.data(),
1721 IslParseFlags);
1722
1723 if (IslOnErrorAbort)
1724 isl_options_set_on_error(getIslCtx().get(), ISL_ON_ERROR_ABORT);
1725 buildContext();
1726 }
1727
1728 Scop::~Scop() = default;
1729
removeFromStmtMap(ScopStmt & Stmt)1730 void Scop::removeFromStmtMap(ScopStmt &Stmt) {
1731 for (Instruction *Inst : Stmt.getInstructions())
1732 InstStmtMap.erase(Inst);
1733
1734 if (Stmt.isRegionStmt()) {
1735 for (BasicBlock *BB : Stmt.getRegion()->blocks()) {
1736 StmtMap.erase(BB);
1737 // Skip entry basic block, as its instructions are already deleted as
1738 // part of the statement's instruction list.
1739 if (BB == Stmt.getEntryBlock())
1740 continue;
1741 for (Instruction &Inst : *BB)
1742 InstStmtMap.erase(&Inst);
1743 }
1744 } else {
1745 auto StmtMapIt = StmtMap.find(Stmt.getBasicBlock());
1746 if (StmtMapIt != StmtMap.end())
1747 StmtMapIt->second.erase(std::remove(StmtMapIt->second.begin(),
1748 StmtMapIt->second.end(), &Stmt),
1749 StmtMapIt->second.end());
1750 for (Instruction *Inst : Stmt.getInstructions())
1751 InstStmtMap.erase(Inst);
1752 }
1753 }
1754
removeStmts(function_ref<bool (ScopStmt &)> ShouldDelete,bool AfterHoisting)1755 void Scop::removeStmts(function_ref<bool(ScopStmt &)> ShouldDelete,
1756 bool AfterHoisting) {
1757 for (auto StmtIt = Stmts.begin(), StmtEnd = Stmts.end(); StmtIt != StmtEnd;) {
1758 if (!ShouldDelete(*StmtIt)) {
1759 StmtIt++;
1760 continue;
1761 }
1762
1763 // Start with removing all of the statement's accesses including erasing it
1764 // from all maps that are pointing to them.
1765 // Make a temporary copy because removing MAs invalidates the iterator.
1766 SmallVector<MemoryAccess *, 16> MAList(StmtIt->begin(), StmtIt->end());
1767 for (MemoryAccess *MA : MAList)
1768 StmtIt->removeSingleMemoryAccess(MA, AfterHoisting);
1769
1770 removeFromStmtMap(*StmtIt);
1771 StmtIt = Stmts.erase(StmtIt);
1772 }
1773 }
1774
removeStmtNotInDomainMap()1775 void Scop::removeStmtNotInDomainMap() {
1776 removeStmts([this](ScopStmt &Stmt) -> bool {
1777 isl::set Domain = DomainMap.lookup(Stmt.getEntryBlock());
1778 if (!Domain)
1779 return true;
1780 return Domain.is_empty();
1781 });
1782 }
1783
simplifySCoP(bool AfterHoisting)1784 void Scop::simplifySCoP(bool AfterHoisting) {
1785 removeStmts(
1786 [AfterHoisting](ScopStmt &Stmt) -> bool {
1787 // Never delete statements that contain calls to debug functions.
1788 if (hasDebugCall(&Stmt))
1789 return false;
1790
1791 bool RemoveStmt = Stmt.isEmpty();
1792
1793 // Remove read only statements only after invariant load hoisting.
1794 if (!RemoveStmt && AfterHoisting) {
1795 bool OnlyRead = true;
1796 for (MemoryAccess *MA : Stmt) {
1797 if (MA->isRead())
1798 continue;
1799
1800 OnlyRead = false;
1801 break;
1802 }
1803
1804 RemoveStmt = OnlyRead;
1805 }
1806 return RemoveStmt;
1807 },
1808 AfterHoisting);
1809 }
1810
lookupInvariantEquivClass(Value * Val)1811 InvariantEquivClassTy *Scop::lookupInvariantEquivClass(Value *Val) {
1812 LoadInst *LInst = dyn_cast<LoadInst>(Val);
1813 if (!LInst)
1814 return nullptr;
1815
1816 if (Value *Rep = InvEquivClassVMap.lookup(LInst))
1817 LInst = cast<LoadInst>(Rep);
1818
1819 Type *Ty = LInst->getType();
1820 const SCEV *PointerSCEV = SE->getSCEV(LInst->getPointerOperand());
1821 for (auto &IAClass : InvariantEquivClasses) {
1822 if (PointerSCEV != IAClass.IdentifyingPointer || Ty != IAClass.AccessType)
1823 continue;
1824
1825 auto &MAs = IAClass.InvariantAccesses;
1826 for (auto *MA : MAs)
1827 if (MA->getAccessInstruction() == Val)
1828 return &IAClass;
1829 }
1830
1831 return nullptr;
1832 }
1833
getOrCreateScopArrayInfo(Value * BasePtr,Type * ElementType,ArrayRef<const SCEV * > Sizes,MemoryKind Kind,const char * BaseName)1834 ScopArrayInfo *Scop::getOrCreateScopArrayInfo(Value *BasePtr, Type *ElementType,
1835 ArrayRef<const SCEV *> Sizes,
1836 MemoryKind Kind,
1837 const char *BaseName) {
1838 assert((BasePtr || BaseName) &&
1839 "BasePtr and BaseName can not be nullptr at the same time.");
1840 assert(!(BasePtr && BaseName) && "BaseName is redundant.");
1841 auto &SAI = BasePtr ? ScopArrayInfoMap[std::make_pair(BasePtr, Kind)]
1842 : ScopArrayNameMap[BaseName];
1843 if (!SAI) {
1844 auto &DL = getFunction().getParent()->getDataLayout();
1845 SAI.reset(new ScopArrayInfo(BasePtr, ElementType, getIslCtx(), Sizes, Kind,
1846 DL, this, BaseName));
1847 ScopArrayInfoSet.insert(SAI.get());
1848 } else {
1849 SAI->updateElementType(ElementType);
1850 // In case of mismatching array sizes, we bail out by setting the run-time
1851 // context to false.
1852 if (!SAI->updateSizes(Sizes))
1853 invalidate(DELINEARIZATION, DebugLoc());
1854 }
1855 return SAI.get();
1856 }
1857
createScopArrayInfo(Type * ElementType,const std::string & BaseName,const std::vector<unsigned> & Sizes)1858 ScopArrayInfo *Scop::createScopArrayInfo(Type *ElementType,
1859 const std::string &BaseName,
1860 const std::vector<unsigned> &Sizes) {
1861 auto *DimSizeType = Type::getInt64Ty(getSE()->getContext());
1862 std::vector<const SCEV *> SCEVSizes;
1863
1864 for (auto size : Sizes)
1865 if (size)
1866 SCEVSizes.push_back(getSE()->getConstant(DimSizeType, size, false));
1867 else
1868 SCEVSizes.push_back(nullptr);
1869
1870 auto *SAI = getOrCreateScopArrayInfo(nullptr, ElementType, SCEVSizes,
1871 MemoryKind::Array, BaseName.c_str());
1872 return SAI;
1873 }
1874
getScopArrayInfoOrNull(Value * BasePtr,MemoryKind Kind)1875 ScopArrayInfo *Scop::getScopArrayInfoOrNull(Value *BasePtr, MemoryKind Kind) {
1876 auto *SAI = ScopArrayInfoMap[std::make_pair(BasePtr, Kind)].get();
1877 return SAI;
1878 }
1879
getScopArrayInfo(Value * BasePtr,MemoryKind Kind)1880 ScopArrayInfo *Scop::getScopArrayInfo(Value *BasePtr, MemoryKind Kind) {
1881 auto *SAI = getScopArrayInfoOrNull(BasePtr, Kind);
1882 assert(SAI && "No ScopArrayInfo available for this base pointer");
1883 return SAI;
1884 }
1885
getContextStr() const1886 std::string Scop::getContextStr() const { return getContext().to_str(); }
1887
getAssumedContextStr() const1888 std::string Scop::getAssumedContextStr() const {
1889 assert(AssumedContext && "Assumed context not yet built");
1890 return AssumedContext.to_str();
1891 }
1892
getInvalidContextStr() const1893 std::string Scop::getInvalidContextStr() const {
1894 return InvalidContext.to_str();
1895 }
1896
getNameStr() const1897 std::string Scop::getNameStr() const {
1898 std::string ExitName, EntryName;
1899 std::tie(EntryName, ExitName) = getEntryExitStr();
1900 return EntryName + "---" + ExitName;
1901 }
1902
getEntryExitStr() const1903 std::pair<std::string, std::string> Scop::getEntryExitStr() const {
1904 std::string ExitName, EntryName;
1905 raw_string_ostream ExitStr(ExitName);
1906 raw_string_ostream EntryStr(EntryName);
1907
1908 R.getEntry()->printAsOperand(EntryStr, false);
1909 EntryStr.str();
1910
1911 if (R.getExit()) {
1912 R.getExit()->printAsOperand(ExitStr, false);
1913 ExitStr.str();
1914 } else
1915 ExitName = "FunctionExit";
1916
1917 return std::make_pair(EntryName, ExitName);
1918 }
1919
getContext() const1920 isl::set Scop::getContext() const { return Context; }
1921
getParamSpace() const1922 isl::space Scop::getParamSpace() const { return getContext().get_space(); }
1923
getFullParamSpace() const1924 isl::space Scop::getFullParamSpace() const {
1925 std::vector<isl::id> FortranIDs;
1926 FortranIDs = getFortranArrayIds(arrays());
1927
1928 isl::space Space = isl::space::params_alloc(
1929 getIslCtx(), ParameterIds.size() + FortranIDs.size());
1930
1931 unsigned PDim = 0;
1932 for (const SCEV *Parameter : Parameters) {
1933 isl::id Id = getIdForParam(Parameter);
1934 Space = Space.set_dim_id(isl::dim::param, PDim++, Id);
1935 }
1936
1937 for (isl::id Id : FortranIDs)
1938 Space = Space.set_dim_id(isl::dim::param, PDim++, Id);
1939
1940 return Space;
1941 }
1942
getAssumedContext() const1943 isl::set Scop::getAssumedContext() const {
1944 assert(AssumedContext && "Assumed context not yet built");
1945 return AssumedContext;
1946 }
1947
isProfitable(bool ScalarsAreUnprofitable) const1948 bool Scop::isProfitable(bool ScalarsAreUnprofitable) const {
1949 if (PollyProcessUnprofitable)
1950 return true;
1951
1952 if (isEmpty())
1953 return false;
1954
1955 unsigned OptimizableStmtsOrLoops = 0;
1956 for (auto &Stmt : *this) {
1957 if (Stmt.getNumIterators() == 0)
1958 continue;
1959
1960 bool ContainsArrayAccs = false;
1961 bool ContainsScalarAccs = false;
1962 for (auto *MA : Stmt) {
1963 if (MA->isRead())
1964 continue;
1965 ContainsArrayAccs |= MA->isLatestArrayKind();
1966 ContainsScalarAccs |= MA->isLatestScalarKind();
1967 }
1968
1969 if (!ScalarsAreUnprofitable || (ContainsArrayAccs && !ContainsScalarAccs))
1970 OptimizableStmtsOrLoops += Stmt.getNumIterators();
1971 }
1972
1973 return OptimizableStmtsOrLoops > 1;
1974 }
1975
hasFeasibleRuntimeContext() const1976 bool Scop::hasFeasibleRuntimeContext() const {
1977 auto PositiveContext = getAssumedContext();
1978 auto NegativeContext = getInvalidContext();
1979 PositiveContext = addNonEmptyDomainConstraints(PositiveContext);
1980 // addNonEmptyDomainConstraints returns null if ScopStmts have a null domain
1981 if (!PositiveContext)
1982 return false;
1983
1984 bool IsFeasible = !(PositiveContext.is_empty() ||
1985 PositiveContext.is_subset(NegativeContext));
1986 if (!IsFeasible)
1987 return false;
1988
1989 auto DomainContext = getDomains().params();
1990 IsFeasible = !DomainContext.is_subset(NegativeContext);
1991 IsFeasible &= !getContext().is_subset(NegativeContext);
1992
1993 return IsFeasible;
1994 }
1995
addNonEmptyDomainConstraints(isl::set C) const1996 isl::set Scop::addNonEmptyDomainConstraints(isl::set C) const {
1997 isl::set DomainContext = getDomains().params();
1998 return C.intersect_params(DomainContext);
1999 }
2000
lookupBasePtrAccess(MemoryAccess * MA)2001 MemoryAccess *Scop::lookupBasePtrAccess(MemoryAccess *MA) {
2002 Value *PointerBase = MA->getOriginalBaseAddr();
2003
2004 auto *PointerBaseInst = dyn_cast<Instruction>(PointerBase);
2005 if (!PointerBaseInst)
2006 return nullptr;
2007
2008 auto *BasePtrStmt = getStmtFor(PointerBaseInst);
2009 if (!BasePtrStmt)
2010 return nullptr;
2011
2012 return BasePtrStmt->getArrayAccessOrNULLFor(PointerBaseInst);
2013 }
2014
toString(AssumptionKind Kind)2015 static std::string toString(AssumptionKind Kind) {
2016 switch (Kind) {
2017 case ALIASING:
2018 return "No-aliasing";
2019 case INBOUNDS:
2020 return "Inbounds";
2021 case WRAPPING:
2022 return "No-overflows";
2023 case UNSIGNED:
2024 return "Signed-unsigned";
2025 case COMPLEXITY:
2026 return "Low complexity";
2027 case PROFITABLE:
2028 return "Profitable";
2029 case ERRORBLOCK:
2030 return "No-error";
2031 case INFINITELOOP:
2032 return "Finite loop";
2033 case INVARIANTLOAD:
2034 return "Invariant load";
2035 case DELINEARIZATION:
2036 return "Delinearization";
2037 }
2038 llvm_unreachable("Unknown AssumptionKind!");
2039 }
2040
isEffectiveAssumption(isl::set Set,AssumptionSign Sign)2041 bool Scop::isEffectiveAssumption(isl::set Set, AssumptionSign Sign) {
2042 if (Sign == AS_ASSUMPTION) {
2043 if (Context.is_subset(Set))
2044 return false;
2045
2046 if (AssumedContext.is_subset(Set))
2047 return false;
2048 } else {
2049 if (Set.is_disjoint(Context))
2050 return false;
2051
2052 if (Set.is_subset(InvalidContext))
2053 return false;
2054 }
2055 return true;
2056 }
2057
trackAssumption(AssumptionKind Kind,isl::set Set,DebugLoc Loc,AssumptionSign Sign,BasicBlock * BB)2058 bool Scop::trackAssumption(AssumptionKind Kind, isl::set Set, DebugLoc Loc,
2059 AssumptionSign Sign, BasicBlock *BB) {
2060 if (PollyRemarksMinimal && !isEffectiveAssumption(Set, Sign))
2061 return false;
2062
2063 // Do never emit trivial assumptions as they only clutter the output.
2064 if (!PollyRemarksMinimal) {
2065 isl::set Univ;
2066 if (Sign == AS_ASSUMPTION)
2067 Univ = isl::set::universe(Set.get_space());
2068
2069 bool IsTrivial = (Sign == AS_RESTRICTION && Set.is_empty()) ||
2070 (Sign == AS_ASSUMPTION && Univ.is_equal(Set));
2071
2072 if (IsTrivial)
2073 return false;
2074 }
2075
2076 switch (Kind) {
2077 case ALIASING:
2078 AssumptionsAliasing++;
2079 break;
2080 case INBOUNDS:
2081 AssumptionsInbounds++;
2082 break;
2083 case WRAPPING:
2084 AssumptionsWrapping++;
2085 break;
2086 case UNSIGNED:
2087 AssumptionsUnsigned++;
2088 break;
2089 case COMPLEXITY:
2090 AssumptionsComplexity++;
2091 break;
2092 case PROFITABLE:
2093 AssumptionsUnprofitable++;
2094 break;
2095 case ERRORBLOCK:
2096 AssumptionsErrorBlock++;
2097 break;
2098 case INFINITELOOP:
2099 AssumptionsInfiniteLoop++;
2100 break;
2101 case INVARIANTLOAD:
2102 AssumptionsInvariantLoad++;
2103 break;
2104 case DELINEARIZATION:
2105 AssumptionsDelinearization++;
2106 break;
2107 }
2108
2109 auto Suffix = Sign == AS_ASSUMPTION ? " assumption:\t" : " restriction:\t";
2110 std::string Msg = toString(Kind) + Suffix + Set.to_str();
2111 if (BB)
2112 ORE.emit(OptimizationRemarkAnalysis(DEBUG_TYPE, "AssumpRestrict", Loc, BB)
2113 << Msg);
2114 else
2115 ORE.emit(OptimizationRemarkAnalysis(DEBUG_TYPE, "AssumpRestrict", Loc,
2116 R.getEntry())
2117 << Msg);
2118 return true;
2119 }
2120
addAssumption(AssumptionKind Kind,isl::set Set,DebugLoc Loc,AssumptionSign Sign,BasicBlock * BB)2121 void Scop::addAssumption(AssumptionKind Kind, isl::set Set, DebugLoc Loc,
2122 AssumptionSign Sign, BasicBlock *BB) {
2123 // Simplify the assumptions/restrictions first.
2124 Set = Set.gist_params(getContext());
2125
2126 if (!trackAssumption(Kind, Set, Loc, Sign, BB))
2127 return;
2128
2129 if (Sign == AS_ASSUMPTION)
2130 AssumedContext = AssumedContext.intersect(Set).coalesce();
2131 else
2132 InvalidContext = InvalidContext.unite(Set).coalesce();
2133 }
2134
invalidate(AssumptionKind Kind,DebugLoc Loc,BasicBlock * BB)2135 void Scop::invalidate(AssumptionKind Kind, DebugLoc Loc, BasicBlock *BB) {
2136 LLVM_DEBUG(dbgs() << "Invalidate SCoP because of reason " << Kind << "\n");
2137 addAssumption(Kind, isl::set::empty(getParamSpace()), Loc, AS_ASSUMPTION, BB);
2138 }
2139
getInvalidContext() const2140 isl::set Scop::getInvalidContext() const { return InvalidContext; }
2141
printContext(raw_ostream & OS) const2142 void Scop::printContext(raw_ostream &OS) const {
2143 OS << "Context:\n";
2144 OS.indent(4) << Context << "\n";
2145
2146 OS.indent(4) << "Assumed Context:\n";
2147 OS.indent(4) << AssumedContext << "\n";
2148
2149 OS.indent(4) << "Invalid Context:\n";
2150 OS.indent(4) << InvalidContext << "\n";
2151
2152 unsigned Dim = 0;
2153 for (const SCEV *Parameter : Parameters)
2154 OS.indent(4) << "p" << Dim++ << ": " << *Parameter << "\n";
2155 }
2156
printAliasAssumptions(raw_ostream & OS) const2157 void Scop::printAliasAssumptions(raw_ostream &OS) const {
2158 int noOfGroups = 0;
2159 for (const MinMaxVectorPairTy &Pair : MinMaxAliasGroups) {
2160 if (Pair.second.size() == 0)
2161 noOfGroups += 1;
2162 else
2163 noOfGroups += Pair.second.size();
2164 }
2165
2166 OS.indent(4) << "Alias Groups (" << noOfGroups << "):\n";
2167 if (MinMaxAliasGroups.empty()) {
2168 OS.indent(8) << "n/a\n";
2169 return;
2170 }
2171
2172 for (const MinMaxVectorPairTy &Pair : MinMaxAliasGroups) {
2173
2174 // If the group has no read only accesses print the write accesses.
2175 if (Pair.second.empty()) {
2176 OS.indent(8) << "[[";
2177 for (const MinMaxAccessTy &MMANonReadOnly : Pair.first) {
2178 OS << " <" << MMANonReadOnly.first << ", " << MMANonReadOnly.second
2179 << ">";
2180 }
2181 OS << " ]]\n";
2182 }
2183
2184 for (const MinMaxAccessTy &MMAReadOnly : Pair.second) {
2185 OS.indent(8) << "[[";
2186 OS << " <" << MMAReadOnly.first << ", " << MMAReadOnly.second << ">";
2187 for (const MinMaxAccessTy &MMANonReadOnly : Pair.first) {
2188 OS << " <" << MMANonReadOnly.first << ", " << MMANonReadOnly.second
2189 << ">";
2190 }
2191 OS << " ]]\n";
2192 }
2193 }
2194 }
2195
printStatements(raw_ostream & OS,bool PrintInstructions) const2196 void Scop::printStatements(raw_ostream &OS, bool PrintInstructions) const {
2197 OS << "Statements {\n";
2198
2199 for (const ScopStmt &Stmt : *this) {
2200 OS.indent(4);
2201 Stmt.print(OS, PrintInstructions);
2202 }
2203
2204 OS.indent(4) << "}\n";
2205 }
2206
printArrayInfo(raw_ostream & OS) const2207 void Scop::printArrayInfo(raw_ostream &OS) const {
2208 OS << "Arrays {\n";
2209
2210 for (auto &Array : arrays())
2211 Array->print(OS);
2212
2213 OS.indent(4) << "}\n";
2214
2215 OS.indent(4) << "Arrays (Bounds as pw_affs) {\n";
2216
2217 for (auto &Array : arrays())
2218 Array->print(OS, /* SizeAsPwAff */ true);
2219
2220 OS.indent(4) << "}\n";
2221 }
2222
print(raw_ostream & OS,bool PrintInstructions) const2223 void Scop::print(raw_ostream &OS, bool PrintInstructions) const {
2224 OS.indent(4) << "Function: " << getFunction().getName() << "\n";
2225 OS.indent(4) << "Region: " << getNameStr() << "\n";
2226 OS.indent(4) << "Max Loop Depth: " << getMaxLoopDepth() << "\n";
2227 OS.indent(4) << "Invariant Accesses: {\n";
2228 for (const auto &IAClass : InvariantEquivClasses) {
2229 const auto &MAs = IAClass.InvariantAccesses;
2230 if (MAs.empty()) {
2231 OS.indent(12) << "Class Pointer: " << *IAClass.IdentifyingPointer << "\n";
2232 } else {
2233 MAs.front()->print(OS);
2234 OS.indent(12) << "Execution Context: " << IAClass.ExecutionContext
2235 << "\n";
2236 }
2237 }
2238 OS.indent(4) << "}\n";
2239 printContext(OS.indent(4));
2240 printArrayInfo(OS.indent(4));
2241 printAliasAssumptions(OS);
2242 printStatements(OS.indent(4), PrintInstructions);
2243 }
2244
2245 #if !defined(NDEBUG) || defined(LLVM_ENABLE_DUMP)
dump() const2246 LLVM_DUMP_METHOD void Scop::dump() const { print(dbgs(), true); }
2247 #endif
2248
getIslCtx() const2249 isl::ctx Scop::getIslCtx() const { return IslCtx.get(); }
2250
getPwAff(const SCEV * E,BasicBlock * BB,bool NonNegative,RecordedAssumptionsTy * RecordedAssumptions)2251 __isl_give PWACtx Scop::getPwAff(const SCEV *E, BasicBlock *BB,
2252 bool NonNegative,
2253 RecordedAssumptionsTy *RecordedAssumptions) {
2254 // First try to use the SCEVAffinator to generate a piecewise defined
2255 // affine function from @p E in the context of @p BB. If that tasks becomes to
2256 // complex the affinator might return a nullptr. In such a case we invalidate
2257 // the SCoP and return a dummy value. This way we do not need to add error
2258 // handling code to all users of this function.
2259 auto PWAC = Affinator.getPwAff(E, BB, RecordedAssumptions);
2260 if (PWAC.first) {
2261 // TODO: We could use a heuristic and either use:
2262 // SCEVAffinator::takeNonNegativeAssumption
2263 // or
2264 // SCEVAffinator::interpretAsUnsigned
2265 // to deal with unsigned or "NonNegative" SCEVs.
2266 if (NonNegative)
2267 Affinator.takeNonNegativeAssumption(PWAC, RecordedAssumptions);
2268 return PWAC;
2269 }
2270
2271 auto DL = BB ? BB->getTerminator()->getDebugLoc() : DebugLoc();
2272 invalidate(COMPLEXITY, DL, BB);
2273 return Affinator.getPwAff(SE->getZero(E->getType()), BB, RecordedAssumptions);
2274 }
2275
getDomains() const2276 isl::union_set Scop::getDomains() const {
2277 isl_space *EmptySpace = isl_space_params_alloc(getIslCtx().get(), 0);
2278 isl_union_set *Domain = isl_union_set_empty(EmptySpace);
2279
2280 for (const ScopStmt &Stmt : *this)
2281 Domain = isl_union_set_add_set(Domain, Stmt.getDomain().release());
2282
2283 return isl::manage(Domain);
2284 }
2285
getPwAffOnly(const SCEV * E,BasicBlock * BB,RecordedAssumptionsTy * RecordedAssumptions)2286 isl::pw_aff Scop::getPwAffOnly(const SCEV *E, BasicBlock *BB,
2287 RecordedAssumptionsTy *RecordedAssumptions) {
2288 PWACtx PWAC = getPwAff(E, BB, RecordedAssumptions);
2289 return PWAC.first;
2290 }
2291
2292 isl::union_map
getAccessesOfType(std::function<bool (MemoryAccess &)> Predicate)2293 Scop::getAccessesOfType(std::function<bool(MemoryAccess &)> Predicate) {
2294 isl::union_map Accesses = isl::union_map::empty(getParamSpace());
2295
2296 for (ScopStmt &Stmt : *this) {
2297 for (MemoryAccess *MA : Stmt) {
2298 if (!Predicate(*MA))
2299 continue;
2300
2301 isl::set Domain = Stmt.getDomain();
2302 isl::map AccessDomain = MA->getAccessRelation();
2303 AccessDomain = AccessDomain.intersect_domain(Domain);
2304 Accesses = Accesses.add_map(AccessDomain);
2305 }
2306 }
2307
2308 return Accesses.coalesce();
2309 }
2310
getMustWrites()2311 isl::union_map Scop::getMustWrites() {
2312 return getAccessesOfType([](MemoryAccess &MA) { return MA.isMustWrite(); });
2313 }
2314
getMayWrites()2315 isl::union_map Scop::getMayWrites() {
2316 return getAccessesOfType([](MemoryAccess &MA) { return MA.isMayWrite(); });
2317 }
2318
getWrites()2319 isl::union_map Scop::getWrites() {
2320 return getAccessesOfType([](MemoryAccess &MA) { return MA.isWrite(); });
2321 }
2322
getReads()2323 isl::union_map Scop::getReads() {
2324 return getAccessesOfType([](MemoryAccess &MA) { return MA.isRead(); });
2325 }
2326
getAccesses()2327 isl::union_map Scop::getAccesses() {
2328 return getAccessesOfType([](MemoryAccess &MA) { return true; });
2329 }
2330
getAccesses(ScopArrayInfo * Array)2331 isl::union_map Scop::getAccesses(ScopArrayInfo *Array) {
2332 return getAccessesOfType(
2333 [Array](MemoryAccess &MA) { return MA.getScopArrayInfo() == Array; });
2334 }
2335
getSchedule() const2336 isl::union_map Scop::getSchedule() const {
2337 auto Tree = getScheduleTree();
2338 return Tree.get_map();
2339 }
2340
getScheduleTree() const2341 isl::schedule Scop::getScheduleTree() const {
2342 return Schedule.intersect_domain(getDomains());
2343 }
2344
setSchedule(isl::union_map NewSchedule)2345 void Scop::setSchedule(isl::union_map NewSchedule) {
2346 auto S = isl::schedule::from_domain(getDomains());
2347 Schedule = S.insert_partial_schedule(
2348 isl::multi_union_pw_aff::from_union_map(NewSchedule));
2349 ScheduleModified = true;
2350 }
2351
setScheduleTree(isl::schedule NewSchedule)2352 void Scop::setScheduleTree(isl::schedule NewSchedule) {
2353 Schedule = NewSchedule;
2354 ScheduleModified = true;
2355 }
2356
restrictDomains(isl::union_set Domain)2357 bool Scop::restrictDomains(isl::union_set Domain) {
2358 bool Changed = false;
2359 for (ScopStmt &Stmt : *this) {
2360 isl::union_set StmtDomain = isl::union_set(Stmt.getDomain());
2361 isl::union_set NewStmtDomain = StmtDomain.intersect(Domain);
2362
2363 if (StmtDomain.is_subset(NewStmtDomain))
2364 continue;
2365
2366 Changed = true;
2367
2368 NewStmtDomain = NewStmtDomain.coalesce();
2369
2370 if (NewStmtDomain.is_empty())
2371 Stmt.restrictDomain(isl::set::empty(Stmt.getDomainSpace()));
2372 else
2373 Stmt.restrictDomain(isl::set(NewStmtDomain));
2374 }
2375 return Changed;
2376 }
2377
getSE() const2378 ScalarEvolution *Scop::getSE() const { return SE; }
2379
addScopStmt(BasicBlock * BB,StringRef Name,Loop * SurroundingLoop,std::vector<Instruction * > Instructions)2380 void Scop::addScopStmt(BasicBlock *BB, StringRef Name, Loop *SurroundingLoop,
2381 std::vector<Instruction *> Instructions) {
2382 assert(BB && "Unexpected nullptr!");
2383 Stmts.emplace_back(*this, *BB, Name, SurroundingLoop, Instructions);
2384 auto *Stmt = &Stmts.back();
2385 StmtMap[BB].push_back(Stmt);
2386 for (Instruction *Inst : Instructions) {
2387 assert(!InstStmtMap.count(Inst) &&
2388 "Unexpected statement corresponding to the instruction.");
2389 InstStmtMap[Inst] = Stmt;
2390 }
2391 }
2392
addScopStmt(Region * R,StringRef Name,Loop * SurroundingLoop,std::vector<Instruction * > Instructions)2393 void Scop::addScopStmt(Region *R, StringRef Name, Loop *SurroundingLoop,
2394 std::vector<Instruction *> Instructions) {
2395 assert(R && "Unexpected nullptr!");
2396 Stmts.emplace_back(*this, *R, Name, SurroundingLoop, Instructions);
2397 auto *Stmt = &Stmts.back();
2398
2399 for (Instruction *Inst : Instructions) {
2400 assert(!InstStmtMap.count(Inst) &&
2401 "Unexpected statement corresponding to the instruction.");
2402 InstStmtMap[Inst] = Stmt;
2403 }
2404
2405 for (BasicBlock *BB : R->blocks()) {
2406 StmtMap[BB].push_back(Stmt);
2407 if (BB == R->getEntry())
2408 continue;
2409 for (Instruction &Inst : *BB) {
2410 assert(!InstStmtMap.count(&Inst) &&
2411 "Unexpected statement corresponding to the instruction.");
2412 InstStmtMap[&Inst] = Stmt;
2413 }
2414 }
2415 }
2416
addScopStmt(isl::map SourceRel,isl::map TargetRel,isl::set Domain)2417 ScopStmt *Scop::addScopStmt(isl::map SourceRel, isl::map TargetRel,
2418 isl::set Domain) {
2419 #ifndef NDEBUG
2420 isl::set SourceDomain = SourceRel.domain();
2421 isl::set TargetDomain = TargetRel.domain();
2422 assert(Domain.is_subset(TargetDomain) &&
2423 "Target access not defined for complete statement domain");
2424 assert(Domain.is_subset(SourceDomain) &&
2425 "Source access not defined for complete statement domain");
2426 #endif
2427 Stmts.emplace_back(*this, SourceRel, TargetRel, Domain);
2428 CopyStmtsNum++;
2429 return &(Stmts.back());
2430 }
2431
getStmtListFor(BasicBlock * BB) const2432 ArrayRef<ScopStmt *> Scop::getStmtListFor(BasicBlock *BB) const {
2433 auto StmtMapIt = StmtMap.find(BB);
2434 if (StmtMapIt == StmtMap.end())
2435 return {};
2436 return StmtMapIt->second;
2437 }
2438
getIncomingStmtFor(const Use & U) const2439 ScopStmt *Scop::getIncomingStmtFor(const Use &U) const {
2440 auto *PHI = cast<PHINode>(U.getUser());
2441 BasicBlock *IncomingBB = PHI->getIncomingBlock(U);
2442
2443 // If the value is a non-synthesizable from the incoming block, use the
2444 // statement that contains it as user statement.
2445 if (auto *IncomingInst = dyn_cast<Instruction>(U.get())) {
2446 if (IncomingInst->getParent() == IncomingBB) {
2447 if (ScopStmt *IncomingStmt = getStmtFor(IncomingInst))
2448 return IncomingStmt;
2449 }
2450 }
2451
2452 // Otherwise, use the epilogue/last statement.
2453 return getLastStmtFor(IncomingBB);
2454 }
2455
getLastStmtFor(BasicBlock * BB) const2456 ScopStmt *Scop::getLastStmtFor(BasicBlock *BB) const {
2457 ArrayRef<ScopStmt *> StmtList = getStmtListFor(BB);
2458 if (!StmtList.empty())
2459 return StmtList.back();
2460 return nullptr;
2461 }
2462
getStmtListFor(RegionNode * RN) const2463 ArrayRef<ScopStmt *> Scop::getStmtListFor(RegionNode *RN) const {
2464 if (RN->isSubRegion())
2465 return getStmtListFor(RN->getNodeAs<Region>());
2466 return getStmtListFor(RN->getNodeAs<BasicBlock>());
2467 }
2468
getStmtListFor(Region * R) const2469 ArrayRef<ScopStmt *> Scop::getStmtListFor(Region *R) const {
2470 return getStmtListFor(R->getEntry());
2471 }
2472
getRelativeLoopDepth(const Loop * L) const2473 int Scop::getRelativeLoopDepth(const Loop *L) const {
2474 if (!L || !R.contains(L))
2475 return -1;
2476 // outermostLoopInRegion always returns nullptr for top level regions
2477 if (R.isTopLevelRegion()) {
2478 // LoopInfo's depths start at 1, we start at 0
2479 return L->getLoopDepth() - 1;
2480 } else {
2481 Loop *OuterLoop = R.outermostLoopInRegion(const_cast<Loop *>(L));
2482 assert(OuterLoop);
2483 return L->getLoopDepth() - OuterLoop->getLoopDepth();
2484 }
2485 }
2486
getArrayInfoByName(const std::string BaseName)2487 ScopArrayInfo *Scop::getArrayInfoByName(const std::string BaseName) {
2488 for (auto &SAI : arrays()) {
2489 if (SAI->getName() == BaseName)
2490 return SAI;
2491 }
2492 return nullptr;
2493 }
2494
addAccessData(MemoryAccess * Access)2495 void Scop::addAccessData(MemoryAccess *Access) {
2496 const ScopArrayInfo *SAI = Access->getOriginalScopArrayInfo();
2497 assert(SAI && "can only use after access relations have been constructed");
2498
2499 if (Access->isOriginalValueKind() && Access->isRead())
2500 ValueUseAccs[SAI].push_back(Access);
2501 else if (Access->isOriginalAnyPHIKind() && Access->isWrite())
2502 PHIIncomingAccs[SAI].push_back(Access);
2503 }
2504
removeAccessData(MemoryAccess * Access)2505 void Scop::removeAccessData(MemoryAccess *Access) {
2506 if (Access->isOriginalValueKind() && Access->isWrite()) {
2507 ValueDefAccs.erase(Access->getAccessValue());
2508 } else if (Access->isOriginalValueKind() && Access->isRead()) {
2509 auto &Uses = ValueUseAccs[Access->getScopArrayInfo()];
2510 auto NewEnd = std::remove(Uses.begin(), Uses.end(), Access);
2511 Uses.erase(NewEnd, Uses.end());
2512 } else if (Access->isOriginalPHIKind() && Access->isRead()) {
2513 PHINode *PHI = cast<PHINode>(Access->getAccessInstruction());
2514 PHIReadAccs.erase(PHI);
2515 } else if (Access->isOriginalAnyPHIKind() && Access->isWrite()) {
2516 auto &Incomings = PHIIncomingAccs[Access->getScopArrayInfo()];
2517 auto NewEnd = std::remove(Incomings.begin(), Incomings.end(), Access);
2518 Incomings.erase(NewEnd, Incomings.end());
2519 }
2520 }
2521
getValueDef(const ScopArrayInfo * SAI) const2522 MemoryAccess *Scop::getValueDef(const ScopArrayInfo *SAI) const {
2523 assert(SAI->isValueKind());
2524
2525 Instruction *Val = dyn_cast<Instruction>(SAI->getBasePtr());
2526 if (!Val)
2527 return nullptr;
2528
2529 return ValueDefAccs.lookup(Val);
2530 }
2531
getValueUses(const ScopArrayInfo * SAI) const2532 ArrayRef<MemoryAccess *> Scop::getValueUses(const ScopArrayInfo *SAI) const {
2533 assert(SAI->isValueKind());
2534 auto It = ValueUseAccs.find(SAI);
2535 if (It == ValueUseAccs.end())
2536 return {};
2537 return It->second;
2538 }
2539
getPHIRead(const ScopArrayInfo * SAI) const2540 MemoryAccess *Scop::getPHIRead(const ScopArrayInfo *SAI) const {
2541 assert(SAI->isPHIKind() || SAI->isExitPHIKind());
2542
2543 if (SAI->isExitPHIKind())
2544 return nullptr;
2545
2546 PHINode *PHI = cast<PHINode>(SAI->getBasePtr());
2547 return PHIReadAccs.lookup(PHI);
2548 }
2549
getPHIIncomings(const ScopArrayInfo * SAI) const2550 ArrayRef<MemoryAccess *> Scop::getPHIIncomings(const ScopArrayInfo *SAI) const {
2551 assert(SAI->isPHIKind() || SAI->isExitPHIKind());
2552 auto It = PHIIncomingAccs.find(SAI);
2553 if (It == PHIIncomingAccs.end())
2554 return {};
2555 return It->second;
2556 }
2557
isEscaping(Instruction * Inst)2558 bool Scop::isEscaping(Instruction *Inst) {
2559 assert(contains(Inst) && "The concept of escaping makes only sense for "
2560 "values defined inside the SCoP");
2561
2562 for (Use &Use : Inst->uses()) {
2563 BasicBlock *UserBB = getUseBlock(Use);
2564 if (!contains(UserBB))
2565 return true;
2566
2567 // When the SCoP region exit needs to be simplified, PHIs in the region exit
2568 // move to a new basic block such that its incoming blocks are not in the
2569 // SCoP anymore.
2570 if (hasSingleExitEdge() && isa<PHINode>(Use.getUser()) &&
2571 isExit(cast<PHINode>(Use.getUser())->getParent()))
2572 return true;
2573 }
2574 return false;
2575 }
2576
incrementNumberOfAliasingAssumptions(unsigned step)2577 void Scop::incrementNumberOfAliasingAssumptions(unsigned step) {
2578 AssumptionsAliasing += step;
2579 }
2580
getStatistics() const2581 Scop::ScopStatistics Scop::getStatistics() const {
2582 ScopStatistics Result;
2583 #if !defined(NDEBUG) || defined(LLVM_ENABLE_STATS)
2584 auto LoopStat = ScopDetection::countBeneficialLoops(&R, *SE, *getLI(), 0);
2585
2586 int NumTotalLoops = LoopStat.NumLoops;
2587 Result.NumBoxedLoops = getBoxedLoops().size();
2588 Result.NumAffineLoops = NumTotalLoops - Result.NumBoxedLoops;
2589
2590 for (const ScopStmt &Stmt : *this) {
2591 isl::set Domain = Stmt.getDomain().intersect_params(getContext());
2592 bool IsInLoop = Stmt.getNumIterators() >= 1;
2593 for (MemoryAccess *MA : Stmt) {
2594 if (!MA->isWrite())
2595 continue;
2596
2597 if (MA->isLatestValueKind()) {
2598 Result.NumValueWrites += 1;
2599 if (IsInLoop)
2600 Result.NumValueWritesInLoops += 1;
2601 }
2602
2603 if (MA->isLatestAnyPHIKind()) {
2604 Result.NumPHIWrites += 1;
2605 if (IsInLoop)
2606 Result.NumPHIWritesInLoops += 1;
2607 }
2608
2609 isl::set AccSet =
2610 MA->getAccessRelation().intersect_domain(Domain).range();
2611 if (AccSet.is_singleton()) {
2612 Result.NumSingletonWrites += 1;
2613 if (IsInLoop)
2614 Result.NumSingletonWritesInLoops += 1;
2615 }
2616 }
2617 }
2618 #endif
2619 return Result;
2620 }
2621
operator <<(raw_ostream & OS,const Scop & scop)2622 raw_ostream &polly::operator<<(raw_ostream &OS, const Scop &scop) {
2623 scop.print(OS, PollyPrintInstructions);
2624 return OS;
2625 }
2626
2627 //===----------------------------------------------------------------------===//
getAnalysisUsage(AnalysisUsage & AU) const2628 void ScopInfoRegionPass::getAnalysisUsage(AnalysisUsage &AU) const {
2629 AU.addRequired<LoopInfoWrapperPass>();
2630 AU.addRequired<RegionInfoPass>();
2631 AU.addRequired<DominatorTreeWrapperPass>();
2632 AU.addRequiredTransitive<ScalarEvolutionWrapperPass>();
2633 AU.addRequiredTransitive<ScopDetectionWrapperPass>();
2634 AU.addRequired<AAResultsWrapperPass>();
2635 AU.addRequired<AssumptionCacheTracker>();
2636 AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2637 AU.setPreservesAll();
2638 }
2639
updateLoopCountStatistic(ScopDetection::LoopStats Stats,Scop::ScopStatistics ScopStats)2640 void updateLoopCountStatistic(ScopDetection::LoopStats Stats,
2641 Scop::ScopStatistics ScopStats) {
2642 assert(Stats.NumLoops == ScopStats.NumAffineLoops + ScopStats.NumBoxedLoops);
2643
2644 NumScops++;
2645 NumLoopsInScop += Stats.NumLoops;
2646 MaxNumLoopsInScop =
2647 std::max(MaxNumLoopsInScop.getValue(), (unsigned)Stats.NumLoops);
2648
2649 if (Stats.MaxDepth == 0)
2650 NumScopsDepthZero++;
2651 else if (Stats.MaxDepth == 1)
2652 NumScopsDepthOne++;
2653 else if (Stats.MaxDepth == 2)
2654 NumScopsDepthTwo++;
2655 else if (Stats.MaxDepth == 3)
2656 NumScopsDepthThree++;
2657 else if (Stats.MaxDepth == 4)
2658 NumScopsDepthFour++;
2659 else if (Stats.MaxDepth == 5)
2660 NumScopsDepthFive++;
2661 else
2662 NumScopsDepthLarger++;
2663
2664 NumAffineLoops += ScopStats.NumAffineLoops;
2665 NumBoxedLoops += ScopStats.NumBoxedLoops;
2666
2667 NumValueWrites += ScopStats.NumValueWrites;
2668 NumValueWritesInLoops += ScopStats.NumValueWritesInLoops;
2669 NumPHIWrites += ScopStats.NumPHIWrites;
2670 NumPHIWritesInLoops += ScopStats.NumPHIWritesInLoops;
2671 NumSingletonWrites += ScopStats.NumSingletonWrites;
2672 NumSingletonWritesInLoops += ScopStats.NumSingletonWritesInLoops;
2673 }
2674
runOnRegion(Region * R,RGPassManager & RGM)2675 bool ScopInfoRegionPass::runOnRegion(Region *R, RGPassManager &RGM) {
2676 auto &SD = getAnalysis<ScopDetectionWrapperPass>().getSD();
2677
2678 if (!SD.isMaxRegionInScop(*R))
2679 return false;
2680
2681 Function *F = R->getEntry()->getParent();
2682 auto &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2683 auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2684 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2685 auto const &DL = F->getParent()->getDataLayout();
2686 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2687 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(*F);
2688 auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2689
2690 ScopBuilder SB(R, AC, AA, DL, DT, LI, SD, SE, ORE);
2691 S = SB.getScop(); // take ownership of scop object
2692
2693 #if !defined(NDEBUG) || defined(LLVM_ENABLE_STATS)
2694 if (S) {
2695 ScopDetection::LoopStats Stats =
2696 ScopDetection::countBeneficialLoops(&S->getRegion(), SE, LI, 0);
2697 updateLoopCountStatistic(Stats, S->getStatistics());
2698 }
2699 #endif
2700
2701 return false;
2702 }
2703
print(raw_ostream & OS,const Module *) const2704 void ScopInfoRegionPass::print(raw_ostream &OS, const Module *) const {
2705 if (S)
2706 S->print(OS, PollyPrintInstructions);
2707 else
2708 OS << "Invalid Scop!\n";
2709 }
2710
2711 char ScopInfoRegionPass::ID = 0;
2712
createScopInfoRegionPassPass()2713 Pass *polly::createScopInfoRegionPassPass() { return new ScopInfoRegionPass(); }
2714
2715 INITIALIZE_PASS_BEGIN(ScopInfoRegionPass, "polly-scops",
2716 "Polly - Create polyhedral description of Scops", false,
2717 false);
2718 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass);
2719 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker);
2720 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass);
2721 INITIALIZE_PASS_DEPENDENCY(RegionInfoPass);
2722 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass);
2723 INITIALIZE_PASS_DEPENDENCY(ScopDetectionWrapperPass);
2724 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass);
2725 INITIALIZE_PASS_END(ScopInfoRegionPass, "polly-scops",
2726 "Polly - Create polyhedral description of Scops", false,
2727 false)
2728
2729 //===----------------------------------------------------------------------===//
ScopInfo(const DataLayout & DL,ScopDetection & SD,ScalarEvolution & SE,LoopInfo & LI,AliasAnalysis & AA,DominatorTree & DT,AssumptionCache & AC,OptimizationRemarkEmitter & ORE)2730 ScopInfo::ScopInfo(const DataLayout &DL, ScopDetection &SD, ScalarEvolution &SE,
2731 LoopInfo &LI, AliasAnalysis &AA, DominatorTree &DT,
2732 AssumptionCache &AC, OptimizationRemarkEmitter &ORE)
2733 : DL(DL), SD(SD), SE(SE), LI(LI), AA(AA), DT(DT), AC(AC), ORE(ORE) {
2734 recompute();
2735 }
2736
recompute()2737 void ScopInfo::recompute() {
2738 RegionToScopMap.clear();
2739 /// Create polyhedral description of scops for all the valid regions of a
2740 /// function.
2741 for (auto &It : SD) {
2742 Region *R = const_cast<Region *>(It);
2743 if (!SD.isMaxRegionInScop(*R))
2744 continue;
2745
2746 ScopBuilder SB(R, AC, AA, DL, DT, LI, SD, SE, ORE);
2747 std::unique_ptr<Scop> S = SB.getScop();
2748 if (!S)
2749 continue;
2750 #if !defined(NDEBUG) || defined(LLVM_ENABLE_STATS)
2751 ScopDetection::LoopStats Stats =
2752 ScopDetection::countBeneficialLoops(&S->getRegion(), SE, LI, 0);
2753 updateLoopCountStatistic(Stats, S->getStatistics());
2754 #endif
2755 bool Inserted = RegionToScopMap.insert({R, std::move(S)}).second;
2756 assert(Inserted && "Building Scop for the same region twice!");
2757 (void)Inserted;
2758 }
2759 }
2760
invalidate(Function & F,const PreservedAnalyses & PA,FunctionAnalysisManager::Invalidator & Inv)2761 bool ScopInfo::invalidate(Function &F, const PreservedAnalyses &PA,
2762 FunctionAnalysisManager::Invalidator &Inv) {
2763 // Check whether the analysis, all analyses on functions have been preserved
2764 // or anything we're holding references to is being invalidated
2765 auto PAC = PA.getChecker<ScopInfoAnalysis>();
2766 return !(PAC.preserved() || PAC.preservedSet<AllAnalysesOn<Function>>()) ||
2767 Inv.invalidate<ScopAnalysis>(F, PA) ||
2768 Inv.invalidate<ScalarEvolutionAnalysis>(F, PA) ||
2769 Inv.invalidate<LoopAnalysis>(F, PA) ||
2770 Inv.invalidate<AAManager>(F, PA) ||
2771 Inv.invalidate<DominatorTreeAnalysis>(F, PA) ||
2772 Inv.invalidate<AssumptionAnalysis>(F, PA);
2773 }
2774
2775 AnalysisKey ScopInfoAnalysis::Key;
2776
run(Function & F,FunctionAnalysisManager & FAM)2777 ScopInfoAnalysis::Result ScopInfoAnalysis::run(Function &F,
2778 FunctionAnalysisManager &FAM) {
2779 auto &SD = FAM.getResult<ScopAnalysis>(F);
2780 auto &SE = FAM.getResult<ScalarEvolutionAnalysis>(F);
2781 auto &LI = FAM.getResult<LoopAnalysis>(F);
2782 auto &AA = FAM.getResult<AAManager>(F);
2783 auto &DT = FAM.getResult<DominatorTreeAnalysis>(F);
2784 auto &AC = FAM.getResult<AssumptionAnalysis>(F);
2785 auto &DL = F.getParent()->getDataLayout();
2786 auto &ORE = FAM.getResult<OptimizationRemarkEmitterAnalysis>(F);
2787 return {DL, SD, SE, LI, AA, DT, AC, ORE};
2788 }
2789
run(Function & F,FunctionAnalysisManager & FAM)2790 PreservedAnalyses ScopInfoPrinterPass::run(Function &F,
2791 FunctionAnalysisManager &FAM) {
2792 auto &SI = FAM.getResult<ScopInfoAnalysis>(F);
2793 // Since the legacy PM processes Scops in bottom up, we print them in reverse
2794 // order here to keep the output persistent
2795 for (auto &It : reverse(SI)) {
2796 if (It.second)
2797 It.second->print(Stream, PollyPrintInstructions);
2798 else
2799 Stream << "Invalid Scop!\n";
2800 }
2801 return PreservedAnalyses::all();
2802 }
2803
getAnalysisUsage(AnalysisUsage & AU) const2804 void ScopInfoWrapperPass::getAnalysisUsage(AnalysisUsage &AU) const {
2805 AU.addRequired<LoopInfoWrapperPass>();
2806 AU.addRequired<RegionInfoPass>();
2807 AU.addRequired<DominatorTreeWrapperPass>();
2808 AU.addRequiredTransitive<ScalarEvolutionWrapperPass>();
2809 AU.addRequiredTransitive<ScopDetectionWrapperPass>();
2810 AU.addRequired<AAResultsWrapperPass>();
2811 AU.addRequired<AssumptionCacheTracker>();
2812 AU.addRequired<OptimizationRemarkEmitterWrapperPass>();
2813 AU.setPreservesAll();
2814 }
2815
runOnFunction(Function & F)2816 bool ScopInfoWrapperPass::runOnFunction(Function &F) {
2817 auto &SD = getAnalysis<ScopDetectionWrapperPass>().getSD();
2818 auto &SE = getAnalysis<ScalarEvolutionWrapperPass>().getSE();
2819 auto &LI = getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
2820 auto &AA = getAnalysis<AAResultsWrapperPass>().getAAResults();
2821 auto const &DL = F.getParent()->getDataLayout();
2822 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
2823 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
2824 auto &ORE = getAnalysis<OptimizationRemarkEmitterWrapperPass>().getORE();
2825
2826 Result.reset(new ScopInfo{DL, SD, SE, LI, AA, DT, AC, ORE});
2827 return false;
2828 }
2829
print(raw_ostream & OS,const Module *) const2830 void ScopInfoWrapperPass::print(raw_ostream &OS, const Module *) const {
2831 for (auto &It : *Result) {
2832 if (It.second)
2833 It.second->print(OS, PollyPrintInstructions);
2834 else
2835 OS << "Invalid Scop!\n";
2836 }
2837 }
2838
2839 char ScopInfoWrapperPass::ID = 0;
2840
createScopInfoWrapperPassPass()2841 Pass *polly::createScopInfoWrapperPassPass() {
2842 return new ScopInfoWrapperPass();
2843 }
2844
2845 INITIALIZE_PASS_BEGIN(
2846 ScopInfoWrapperPass, "polly-function-scops",
2847 "Polly - Create polyhedral description of all Scops of a function", false,
2848 false);
2849 INITIALIZE_PASS_DEPENDENCY(AAResultsWrapperPass);
2850 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker);
2851 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass);
2852 INITIALIZE_PASS_DEPENDENCY(RegionInfoPass);
2853 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass);
2854 INITIALIZE_PASS_DEPENDENCY(ScopDetectionWrapperPass);
2855 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass);
2856 INITIALIZE_PASS_END(
2857 ScopInfoWrapperPass, "polly-function-scops",
2858 "Polly - Create polyhedral description of all Scops of a function", false,
2859 false)
2860