• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===------ ZoneAlgo.cpp ----------------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Derive information about array elements between statements ("Zones").
10 //
11 // The algorithms here work on the scatter space - the image space of the
12 // schedule returned by Scop::getSchedule(). We call an element in that space a
13 // "timepoint". Timepoints are lexicographically ordered such that we can
14 // defined ranges in the scatter space. We use two flavors of such ranges:
15 // Timepoint sets and zones. A timepoint set is simply a subset of the scatter
16 // space and is directly stored as isl_set.
17 //
18 // Zones are used to describe the space between timepoints as open sets, i.e.
19 // they do not contain the extrema. Using isl rational sets to express these
20 // would be overkill. We also cannot store them as the integer timepoints they
21 // contain; the (nonempty) zone between 1 and 2 would be empty and
22 // indistinguishable from e.g. the zone between 3 and 4. Also, we cannot store
23 // the integer set including the extrema; the set ]1,2[ + ]3,4[ could be
24 // coalesced to ]1,3[, although we defined the range [2,3] to be not in the set.
25 // Instead, we store the "half-open" integer extrema, including the lower bound,
26 // but excluding the upper bound. Examples:
27 //
28 // * The set { [i] : 1 <= i <= 3 } represents the zone ]0,3[ (which contains the
29 //   integer points 1 and 2, but not 0 or 3)
30 //
31 // * { [1] } represents the zone ]0,1[
32 //
33 // * { [i] : i = 1 or i = 3 } represents the zone ]0,1[ + ]2,3[
34 //
35 // Therefore, an integer i in the set represents the zone ]i-1,i[, i.e. strictly
36 // speaking the integer points never belong to the zone. However, depending an
37 // the interpretation, one might want to include them. Part of the
38 // interpretation may not be known when the zone is constructed.
39 //
40 // Reads are assumed to always take place before writes, hence we can think of
41 // reads taking place at the beginning of a timepoint and writes at the end.
42 //
43 // Let's assume that the zone represents the lifetime of a variable. That is,
44 // the zone begins with a write that defines the value during its lifetime and
45 // ends with the last read of that value. In the following we consider whether a
46 // read/write at the beginning/ending of the lifetime zone should be within the
47 // zone or outside of it.
48 //
49 // * A read at the timepoint that starts the live-range loads the previous
50 //   value. Hence, exclude the timepoint starting the zone.
51 //
52 // * A write at the timepoint that starts the live-range is not defined whether
53 //   it occurs before or after the write that starts the lifetime. We do not
54 //   allow this situation to occur. Hence, we include the timepoint starting the
55 //   zone to determine whether they are conflicting.
56 //
57 // * A read at the timepoint that ends the live-range reads the same variable.
58 //   We include the timepoint at the end of the zone to include that read into
59 //   the live-range. Doing otherwise would mean that the two reads access
60 //   different values, which would mean that the value they read are both alive
61 //   at the same time but occupy the same variable.
62 //
63 // * A write at the timepoint that ends the live-range starts a new live-range.
64 //   It must not be included in the live-range of the previous definition.
65 //
66 // All combinations of reads and writes at the endpoints are possible, but most
67 // of the time only the write->read (for instance, a live-range from definition
68 // to last use) and read->write (for instance, an unused range from last use to
69 // overwrite) and combinations are interesting (half-open ranges). write->write
70 // zones might be useful as well in some context to represent
71 // output-dependencies.
72 //
73 // @see convertZoneToTimepoints
74 //
75 //
76 // The code makes use of maps and sets in many different spaces. To not loose
77 // track in which space a set or map is expected to be in, variables holding an
78 // isl reference are usually annotated in the comments. They roughly follow isl
79 // syntax for spaces, but only the tuples, not the dimensions. The tuples have a
80 // meaning as follows:
81 //
82 // * Space[] - An unspecified tuple. Used for function parameters such that the
83 //             function caller can use it for anything they like.
84 //
85 // * Domain[] - A statement instance as returned by ScopStmt::getDomain()
86 //     isl_id_get_name: Stmt_<NameOfBasicBlock>
87 //     isl_id_get_user: Pointer to ScopStmt
88 //
89 // * Element[] - An array element as in the range part of
90 //               MemoryAccess::getAccessRelation()
91 //     isl_id_get_name: MemRef_<NameOfArrayVariable>
92 //     isl_id_get_user: Pointer to ScopArrayInfo
93 //
94 // * Scatter[] - Scatter space or space of timepoints
95 //     Has no tuple id
96 //
97 // * Zone[] - Range between timepoints as described above
98 //     Has no tuple id
99 //
100 // * ValInst[] - An llvm::Value as defined at a specific timepoint.
101 //
102 //     A ValInst[] itself can be structured as one of:
103 //
104 //     * [] - An unknown value.
105 //         Always zero dimensions
106 //         Has no tuple id
107 //
108 //     * Value[] - An llvm::Value that is read-only in the SCoP, i.e. its
109 //                 runtime content does not depend on the timepoint.
110 //         Always zero dimensions
111 //         isl_id_get_name: Val_<NameOfValue>
112 //         isl_id_get_user: A pointer to an llvm::Value
113 //
114 //     * SCEV[...] - A synthesizable llvm::SCEV Expression.
115 //         In contrast to a Value[] is has at least one dimension per
116 //         SCEVAddRecExpr in the SCEV.
117 //
118 //     * [Domain[] -> Value[]] - An llvm::Value that may change during the
119 //                               Scop's execution.
120 //         The tuple itself has no id, but it wraps a map space holding a
121 //         statement instance which defines the llvm::Value as the map's domain
122 //         and llvm::Value itself as range.
123 //
124 // @see makeValInst()
125 //
126 // An annotation "{ Domain[] -> Scatter[] }" therefore means: A map from a
127 // statement instance to a timepoint, aka a schedule. There is only one scatter
128 // space, but most of the time multiple statements are processed in one set.
129 // This is why most of the time isl_union_map has to be used.
130 //
131 // The basic algorithm works as follows:
132 // At first we verify that the SCoP is compatible with this technique. For
133 // instance, two writes cannot write to the same location at the same statement
134 // instance because we cannot determine within the polyhedral model which one
135 // comes first. Once this was verified, we compute zones at which an array
136 // element is unused. This computation can fail if it takes too long. Then the
137 // main algorithm is executed. Because every store potentially trails an unused
138 // zone, we start at stores. We search for a scalar (MemoryKind::Value or
139 // MemoryKind::PHI) that we can map to the array element overwritten by the
140 // store, preferably one that is used by the store or at least the ScopStmt.
141 // When it does not conflict with the lifetime of the values in the array
142 // element, the map is applied and the unused zone updated as it is now used. We
143 // continue to try to map scalars to the array element until there are no more
144 // candidates to map. The algorithm is greedy in the sense that the first scalar
145 // not conflicting will be mapped. Other scalars processed later that could have
146 // fit the same unused zone will be rejected. As such the result depends on the
147 // processing order.
148 //
149 //===----------------------------------------------------------------------===//
150 
151 #include "polly/ZoneAlgo.h"
152 #include "polly/ScopInfo.h"
153 #include "polly/Support/GICHelper.h"
154 #include "polly/Support/ISLTools.h"
155 #include "polly/Support/VirtualInstruction.h"
156 #include "llvm/ADT/Statistic.h"
157 #include "llvm/Support/raw_ostream.h"
158 
159 #define DEBUG_TYPE "polly-zone"
160 
161 STATISTIC(NumIncompatibleArrays, "Number of not zone-analyzable arrays");
162 STATISTIC(NumCompatibleArrays, "Number of zone-analyzable arrays");
163 STATISTIC(NumRecursivePHIs, "Number of recursive PHIs");
164 STATISTIC(NumNormalizablePHIs, "Number of normalizable PHIs");
165 STATISTIC(NumPHINormialization, "Number of PHI executed normalizations");
166 
167 using namespace polly;
168 using namespace llvm;
169 
computeReachingDefinition(isl::union_map Schedule,isl::union_map Writes,bool InclDef,bool InclRedef)170 static isl::union_map computeReachingDefinition(isl::union_map Schedule,
171                                                 isl::union_map Writes,
172                                                 bool InclDef, bool InclRedef) {
173   return computeReachingWrite(Schedule, Writes, false, InclDef, InclRedef);
174 }
175 
176 /// Compute the reaching definition of a scalar.
177 ///
178 /// Compared to computeReachingDefinition, there is just one element which is
179 /// accessed and therefore only a set if instances that accesses that element is
180 /// required.
181 ///
182 /// @param Schedule  { DomainWrite[] -> Scatter[] }
183 /// @param Writes    { DomainWrite[] }
184 /// @param InclDef   Include the timepoint of the definition to the result.
185 /// @param InclRedef Include the timepoint of the overwrite into the result.
186 ///
187 /// @return { Scatter[] -> DomainWrite[] }
computeScalarReachingDefinition(isl::union_map Schedule,isl::union_set Writes,bool InclDef,bool InclRedef)188 static isl::union_map computeScalarReachingDefinition(isl::union_map Schedule,
189                                                       isl::union_set Writes,
190                                                       bool InclDef,
191                                                       bool InclRedef) {
192   // { DomainWrite[] -> Element[] }
193   isl::union_map Defs = isl::union_map::from_domain(Writes);
194 
195   // { [Element[] -> Scatter[]] -> DomainWrite[] }
196   auto ReachDefs =
197       computeReachingDefinition(Schedule, Defs, InclDef, InclRedef);
198 
199   // { Scatter[] -> DomainWrite[] }
200   return ReachDefs.curry().range().unwrap();
201 }
202 
203 /// Compute the reaching definition of a scalar.
204 ///
205 /// This overload accepts only a single writing statement as an isl_map,
206 /// consequently the result also is only a single isl_map.
207 ///
208 /// @param Schedule  { DomainWrite[] -> Scatter[] }
209 /// @param Writes    { DomainWrite[] }
210 /// @param InclDef   Include the timepoint of the definition to the result.
211 /// @param InclRedef Include the timepoint of the overwrite into the result.
212 ///
213 /// @return { Scatter[] -> DomainWrite[] }
computeScalarReachingDefinition(isl::union_map Schedule,isl::set Writes,bool InclDef,bool InclRedef)214 static isl::map computeScalarReachingDefinition(isl::union_map Schedule,
215                                                 isl::set Writes, bool InclDef,
216                                                 bool InclRedef) {
217   isl::space DomainSpace = Writes.get_space();
218   isl::space ScatterSpace = getScatterSpace(Schedule);
219 
220   //  { Scatter[] -> DomainWrite[] }
221   isl::union_map UMap = computeScalarReachingDefinition(
222       Schedule, isl::union_set(Writes), InclDef, InclRedef);
223 
224   isl::space ResultSpace = ScatterSpace.map_from_domain_and_range(DomainSpace);
225   return singleton(UMap, ResultSpace);
226 }
227 
makeUnknownForDomain(isl::union_set Domain)228 isl::union_map polly::makeUnknownForDomain(isl::union_set Domain) {
229   return isl::union_map::from_domain(Domain);
230 }
231 
232 /// Create a domain-to-unknown value mapping.
233 ///
234 /// @see makeUnknownForDomain(isl::union_set)
235 ///
236 /// @param Domain { Domain[] }
237 ///
238 /// @return { Domain[] -> ValInst[] }
makeUnknownForDomain(isl::set Domain)239 static isl::map makeUnknownForDomain(isl::set Domain) {
240   return isl::map::from_domain(Domain);
241 }
242 
243 /// Return whether @p Map maps to an unknown value.
244 ///
245 /// @param { [] -> ValInst[] }
isMapToUnknown(const isl::map & Map)246 static bool isMapToUnknown(const isl::map &Map) {
247   isl::space Space = Map.get_space().range();
248   return Space.has_tuple_id(isl::dim::set).is_false() &&
249          Space.is_wrapping().is_false() && Space.dim(isl::dim::set) == 0;
250 }
251 
filterKnownValInst(const isl::union_map & UMap)252 isl::union_map polly::filterKnownValInst(const isl::union_map &UMap) {
253   isl::union_map Result = isl::union_map::empty(UMap.get_space());
254   for (isl::map Map : UMap.get_map_list()) {
255     if (!isMapToUnknown(Map))
256       Result = Result.add_map(Map);
257   }
258   return Result;
259 }
260 
ZoneAlgorithm(const char * PassName,Scop * S,LoopInfo * LI)261 ZoneAlgorithm::ZoneAlgorithm(const char *PassName, Scop *S, LoopInfo *LI)
262     : PassName(PassName), IslCtx(S->getSharedIslCtx()), S(S), LI(LI),
263       Schedule(S->getSchedule()) {
264   auto Domains = S->getDomains();
265 
266   Schedule = Schedule.intersect_domain(Domains);
267   ParamSpace = Schedule.get_space();
268   ScatterSpace = getScatterSpace(Schedule);
269 }
270 
271 /// Check if all stores in @p Stmt store the very same value.
272 ///
273 /// This covers a special situation occurring in Polybench's
274 /// covariance/correlation (which is typical for algorithms that cover symmetric
275 /// matrices):
276 ///
277 /// for (int i = 0; i < n; i += 1)
278 /// 	for (int j = 0; j <= i; j += 1) {
279 /// 		double x = ...;
280 /// 		C[i][j] = x;
281 /// 		C[j][i] = x;
282 /// 	}
283 ///
284 /// For i == j, the same value is written twice to the same element.Double
285 /// writes to the same element are not allowed in DeLICM because its algorithm
286 /// does not see which of the writes is effective.But if its the same value
287 /// anyway, it doesn't matter.
288 ///
289 /// LLVM passes, however, cannot simplify this because the write is necessary
290 /// for i != j (unless it would add a condition for one of the writes to occur
291 /// only if i != j).
292 ///
293 /// TODO: In the future we may want to extent this to make the checks
294 ///       specific to different memory locations.
onlySameValueWrites(ScopStmt * Stmt)295 static bool onlySameValueWrites(ScopStmt *Stmt) {
296   Value *V = nullptr;
297 
298   for (auto *MA : *Stmt) {
299     if (!MA->isLatestArrayKind() || !MA->isMustWrite() ||
300         !MA->isOriginalArrayKind())
301       continue;
302 
303     if (!V) {
304       V = MA->getAccessValue();
305       continue;
306     }
307 
308     if (V != MA->getAccessValue())
309       return false;
310   }
311   return true;
312 }
313 
314 /// Is @p InnerLoop nested inside @p OuterLoop?
isInsideLoop(Loop * OuterLoop,Loop * InnerLoop)315 static bool isInsideLoop(Loop *OuterLoop, Loop *InnerLoop) {
316   // If OuterLoop is nullptr, we cannot call its contains() method. In this case
317   // OuterLoop represents the 'top level' and therefore contains all loop.
318   return !OuterLoop || OuterLoop->contains(InnerLoop);
319 }
320 
collectIncompatibleElts(ScopStmt * Stmt,isl::union_set & IncompatibleElts,isl::union_set & AllElts)321 void ZoneAlgorithm::collectIncompatibleElts(ScopStmt *Stmt,
322                                             isl::union_set &IncompatibleElts,
323                                             isl::union_set &AllElts) {
324   auto Stores = makeEmptyUnionMap();
325   auto Loads = makeEmptyUnionMap();
326 
327   // This assumes that the MemoryKind::Array MemoryAccesses are iterated in
328   // order.
329   for (auto *MA : *Stmt) {
330     if (!MA->isOriginalArrayKind())
331       continue;
332 
333     isl::map AccRelMap = getAccessRelationFor(MA);
334     isl::union_map AccRel = AccRelMap;
335 
336     // To avoid solving any ILP problems, always add entire arrays instead of
337     // just the elements that are accessed.
338     auto ArrayElts = isl::set::universe(AccRelMap.get_space().range());
339     AllElts = AllElts.add_set(ArrayElts);
340 
341     if (MA->isRead()) {
342       // Reject load after store to same location.
343       if (!Stores.is_disjoint(AccRel)) {
344         LLVM_DEBUG(
345             dbgs() << "Load after store of same element in same statement\n");
346         OptimizationRemarkMissed R(PassName, "LoadAfterStore",
347                                    MA->getAccessInstruction());
348         R << "load after store of same element in same statement";
349         R << " (previous stores: " << Stores;
350         R << ", loading: " << AccRel << ")";
351         S->getFunction().getContext().diagnose(R);
352 
353         IncompatibleElts = IncompatibleElts.add_set(ArrayElts);
354       }
355 
356       Loads = Loads.unite(AccRel);
357 
358       continue;
359     }
360 
361     // In region statements the order is less clear, eg. the load and store
362     // might be in a boxed loop.
363     if (Stmt->isRegionStmt() && !Loads.is_disjoint(AccRel)) {
364       LLVM_DEBUG(dbgs() << "WRITE in non-affine subregion not supported\n");
365       OptimizationRemarkMissed R(PassName, "StoreInSubregion",
366                                  MA->getAccessInstruction());
367       R << "store is in a non-affine subregion";
368       S->getFunction().getContext().diagnose(R);
369 
370       IncompatibleElts = IncompatibleElts.add_set(ArrayElts);
371     }
372 
373     // Do not allow more than one store to the same location.
374     if (!Stores.is_disjoint(AccRel) && !onlySameValueWrites(Stmt)) {
375       LLVM_DEBUG(dbgs() << "WRITE after WRITE to same element\n");
376       OptimizationRemarkMissed R(PassName, "StoreAfterStore",
377                                  MA->getAccessInstruction());
378       R << "store after store of same element in same statement";
379       R << " (previous stores: " << Stores;
380       R << ", storing: " << AccRel << ")";
381       S->getFunction().getContext().diagnose(R);
382 
383       IncompatibleElts = IncompatibleElts.add_set(ArrayElts);
384     }
385 
386     Stores = Stores.unite(AccRel);
387   }
388 }
389 
addArrayReadAccess(MemoryAccess * MA)390 void ZoneAlgorithm::addArrayReadAccess(MemoryAccess *MA) {
391   assert(MA->isLatestArrayKind());
392   assert(MA->isRead());
393   ScopStmt *Stmt = MA->getStatement();
394 
395   // { DomainRead[] -> Element[] }
396   auto AccRel = intersectRange(getAccessRelationFor(MA), CompatibleElts);
397   AllReads = AllReads.add_map(AccRel);
398 
399   if (LoadInst *Load = dyn_cast_or_null<LoadInst>(MA->getAccessInstruction())) {
400     // { DomainRead[] -> ValInst[] }
401     isl::map LoadValInst = makeValInst(
402         Load, Stmt, LI->getLoopFor(Load->getParent()), Stmt->isBlockStmt());
403 
404     // { DomainRead[] -> [Element[] -> DomainRead[]] }
405     isl::map IncludeElement = AccRel.domain_map().curry();
406 
407     // { [Element[] -> DomainRead[]] -> ValInst[] }
408     isl::map EltLoadValInst = LoadValInst.apply_domain(IncludeElement);
409 
410     AllReadValInst = AllReadValInst.add_map(EltLoadValInst);
411   }
412 }
413 
getWrittenValue(MemoryAccess * MA,isl::map AccRel)414 isl::union_map ZoneAlgorithm::getWrittenValue(MemoryAccess *MA,
415                                               isl::map AccRel) {
416   if (!MA->isMustWrite())
417     return {};
418 
419   Value *AccVal = MA->getAccessValue();
420   ScopStmt *Stmt = MA->getStatement();
421   Instruction *AccInst = MA->getAccessInstruction();
422 
423   // Write a value to a single element.
424   auto L = MA->isOriginalArrayKind() ? LI->getLoopFor(AccInst->getParent())
425                                      : Stmt->getSurroundingLoop();
426   if (AccVal &&
427       AccVal->getType() == MA->getLatestScopArrayInfo()->getElementType() &&
428       AccRel.is_single_valued().is_true())
429     return makeNormalizedValInst(AccVal, Stmt, L);
430 
431   // memset(_, '0', ) is equivalent to writing the null value to all touched
432   // elements. isMustWrite() ensures that all of an element's bytes are
433   // overwritten.
434   if (auto *Memset = dyn_cast<MemSetInst>(AccInst)) {
435     auto *WrittenConstant = dyn_cast<Constant>(Memset->getValue());
436     Type *Ty = MA->getLatestScopArrayInfo()->getElementType();
437     if (WrittenConstant && WrittenConstant->isZeroValue()) {
438       Constant *Zero = Constant::getNullValue(Ty);
439       return makeNormalizedValInst(Zero, Stmt, L);
440     }
441   }
442 
443   return {};
444 }
445 
addArrayWriteAccess(MemoryAccess * MA)446 void ZoneAlgorithm::addArrayWriteAccess(MemoryAccess *MA) {
447   assert(MA->isLatestArrayKind());
448   assert(MA->isWrite());
449   auto *Stmt = MA->getStatement();
450 
451   // { Domain[] -> Element[] }
452   isl::map AccRel = intersectRange(getAccessRelationFor(MA), CompatibleElts);
453 
454   if (MA->isMustWrite())
455     AllMustWrites = AllMustWrites.add_map(AccRel);
456 
457   if (MA->isMayWrite())
458     AllMayWrites = AllMayWrites.add_map(AccRel);
459 
460   // { Domain[] -> ValInst[] }
461   isl::union_map WriteValInstance = getWrittenValue(MA, AccRel);
462   if (!WriteValInstance)
463     WriteValInstance = makeUnknownForDomain(Stmt);
464 
465   // { Domain[] -> [Element[] -> Domain[]] }
466   isl::map IncludeElement = AccRel.domain_map().curry();
467 
468   // { [Element[] -> DomainWrite[]] -> ValInst[] }
469   isl::union_map EltWriteValInst =
470       WriteValInstance.apply_domain(IncludeElement);
471 
472   AllWriteValInst = AllWriteValInst.unite(EltWriteValInst);
473 }
474 
475 /// For an llvm::Value defined in @p DefStmt, compute the RAW dependency for a
476 /// use in every instance of @p UseStmt.
477 ///
478 /// @param UseStmt Statement a scalar is used in.
479 /// @param DefStmt Statement a scalar is defined in.
480 ///
481 /// @return { DomainUse[] -> DomainDef[] }
computeUseToDefFlowDependency(ScopStmt * UseStmt,ScopStmt * DefStmt)482 isl::map ZoneAlgorithm::computeUseToDefFlowDependency(ScopStmt *UseStmt,
483                                                       ScopStmt *DefStmt) {
484   // { DomainUse[] -> Scatter[] }
485   isl::map UseScatter = getScatterFor(UseStmt);
486 
487   // { Zone[] -> DomainDef[] }
488   isl::map ReachDefZone = getScalarReachingDefinition(DefStmt);
489 
490   // { Scatter[] -> DomainDef[] }
491   isl::map ReachDefTimepoints =
492       convertZoneToTimepoints(ReachDefZone, isl::dim::in, false, true);
493 
494   // { DomainUse[] -> DomainDef[] }
495   return UseScatter.apply_range(ReachDefTimepoints);
496 }
497 
498 /// Return whether @p PHI refers (also transitively through other PHIs) to
499 /// itself.
500 ///
501 /// loop:
502 ///   %phi1 = phi [0, %preheader], [%phi1, %loop]
503 ///   br i1 %c, label %loop, label %exit
504 ///
505 /// exit:
506 ///   %phi2 = phi [%phi1, %bb]
507 ///
508 /// In this example, %phi1 is recursive, but %phi2 is not.
isRecursivePHI(const PHINode * PHI)509 static bool isRecursivePHI(const PHINode *PHI) {
510   SmallVector<const PHINode *, 8> Worklist;
511   SmallPtrSet<const PHINode *, 8> Visited;
512   Worklist.push_back(PHI);
513 
514   while (!Worklist.empty()) {
515     const PHINode *Cur = Worklist.pop_back_val();
516 
517     if (Visited.count(Cur))
518       continue;
519     Visited.insert(Cur);
520 
521     for (const Use &Incoming : Cur->incoming_values()) {
522       Value *IncomingVal = Incoming.get();
523       auto *IncomingPHI = dyn_cast<PHINode>(IncomingVal);
524       if (!IncomingPHI)
525         continue;
526 
527       if (IncomingPHI == PHI)
528         return true;
529       Worklist.push_back(IncomingPHI);
530     }
531   }
532   return false;
533 }
534 
computePerPHI(const ScopArrayInfo * SAI)535 isl::union_map ZoneAlgorithm::computePerPHI(const ScopArrayInfo *SAI) {
536   // TODO: If the PHI has an incoming block from before the SCoP, it is not
537   // represented in any ScopStmt.
538 
539   auto *PHI = cast<PHINode>(SAI->getBasePtr());
540   auto It = PerPHIMaps.find(PHI);
541   if (It != PerPHIMaps.end())
542     return It->second;
543 
544   assert(SAI->isPHIKind());
545 
546   // { DomainPHIWrite[] -> Scatter[] }
547   isl::union_map PHIWriteScatter = makeEmptyUnionMap();
548 
549   // Collect all incoming block timepoints.
550   for (MemoryAccess *MA : S->getPHIIncomings(SAI)) {
551     isl::map Scatter = getScatterFor(MA);
552     PHIWriteScatter = PHIWriteScatter.add_map(Scatter);
553   }
554 
555   // { DomainPHIRead[] -> Scatter[] }
556   isl::map PHIReadScatter = getScatterFor(S->getPHIRead(SAI));
557 
558   // { DomainPHIRead[] -> Scatter[] }
559   isl::map BeforeRead = beforeScatter(PHIReadScatter, true);
560 
561   // { Scatter[] }
562   isl::set WriteTimes = singleton(PHIWriteScatter.range(), ScatterSpace);
563 
564   // { DomainPHIRead[] -> Scatter[] }
565   isl::map PHIWriteTimes = BeforeRead.intersect_range(WriteTimes);
566 
567   // Remove instances outside the context.
568   PHIWriteTimes = PHIWriteTimes.intersect_params(S->getAssumedContext());
569   PHIWriteTimes = subtractParams(PHIWriteTimes, S->getInvalidContext());
570 
571   isl::map LastPerPHIWrites = PHIWriteTimes.lexmax();
572 
573   // { DomainPHIRead[] -> DomainPHIWrite[] }
574   isl::union_map Result =
575       isl::union_map(LastPerPHIWrites).apply_range(PHIWriteScatter.reverse());
576   assert(!Result.is_single_valued().is_false());
577   assert(!Result.is_injective().is_false());
578 
579   PerPHIMaps.insert({PHI, Result});
580   return Result;
581 }
582 
makeEmptyUnionSet() const583 isl::union_set ZoneAlgorithm::makeEmptyUnionSet() const {
584   return isl::union_set::empty(ParamSpace);
585 }
586 
makeEmptyUnionMap() const587 isl::union_map ZoneAlgorithm::makeEmptyUnionMap() const {
588   return isl::union_map::empty(ParamSpace);
589 }
590 
collectCompatibleElts()591 void ZoneAlgorithm::collectCompatibleElts() {
592   // First find all the incompatible elements, then take the complement.
593   // We compile the list of compatible (rather than incompatible) elements so
594   // users can intersect with the list, not requiring a subtract operation. It
595   // also allows us to define a 'universe' of all elements and makes it more
596   // explicit in which array elements can be used.
597   isl::union_set AllElts = makeEmptyUnionSet();
598   isl::union_set IncompatibleElts = makeEmptyUnionSet();
599 
600   for (auto &Stmt : *S)
601     collectIncompatibleElts(&Stmt, IncompatibleElts, AllElts);
602 
603   NumIncompatibleArrays += isl_union_set_n_set(IncompatibleElts.get());
604   CompatibleElts = AllElts.subtract(IncompatibleElts);
605   NumCompatibleArrays += isl_union_set_n_set(CompatibleElts.get());
606 }
607 
getScatterFor(ScopStmt * Stmt) const608 isl::map ZoneAlgorithm::getScatterFor(ScopStmt *Stmt) const {
609   isl::space ResultSpace =
610       Stmt->getDomainSpace().map_from_domain_and_range(ScatterSpace);
611   return Schedule.extract_map(ResultSpace);
612 }
613 
getScatterFor(MemoryAccess * MA) const614 isl::map ZoneAlgorithm::getScatterFor(MemoryAccess *MA) const {
615   return getScatterFor(MA->getStatement());
616 }
617 
getScatterFor(isl::union_set Domain) const618 isl::union_map ZoneAlgorithm::getScatterFor(isl::union_set Domain) const {
619   return Schedule.intersect_domain(Domain);
620 }
621 
getScatterFor(isl::set Domain) const622 isl::map ZoneAlgorithm::getScatterFor(isl::set Domain) const {
623   auto ResultSpace = Domain.get_space().map_from_domain_and_range(ScatterSpace);
624   auto UDomain = isl::union_set(Domain);
625   auto UResult = getScatterFor(std::move(UDomain));
626   auto Result = singleton(std::move(UResult), std::move(ResultSpace));
627   assert(!Result || Result.domain().is_equal(Domain) == isl_bool_true);
628   return Result;
629 }
630 
getDomainFor(ScopStmt * Stmt) const631 isl::set ZoneAlgorithm::getDomainFor(ScopStmt *Stmt) const {
632   return Stmt->getDomain().remove_redundancies();
633 }
634 
getDomainFor(MemoryAccess * MA) const635 isl::set ZoneAlgorithm::getDomainFor(MemoryAccess *MA) const {
636   return getDomainFor(MA->getStatement());
637 }
638 
getAccessRelationFor(MemoryAccess * MA) const639 isl::map ZoneAlgorithm::getAccessRelationFor(MemoryAccess *MA) const {
640   auto Domain = getDomainFor(MA);
641   auto AccRel = MA->getLatestAccessRelation();
642   return AccRel.intersect_domain(Domain);
643 }
644 
getDefToTarget(ScopStmt * DefStmt,ScopStmt * TargetStmt)645 isl::map ZoneAlgorithm::getDefToTarget(ScopStmt *DefStmt,
646                                        ScopStmt *TargetStmt) {
647   // No translation required if the definition is already at the target.
648   if (TargetStmt == DefStmt)
649     return isl::map::identity(
650         getDomainFor(TargetStmt).get_space().map_from_set());
651 
652   isl::map &Result = DefToTargetCache[std::make_pair(TargetStmt, DefStmt)];
653 
654   // This is a shortcut in case the schedule is still the original and
655   // TargetStmt is in the same or nested inside DefStmt's loop. With the
656   // additional assumption that operand trees do not cross DefStmt's loop
657   // header, then TargetStmt's instance shared coordinates are the same as
658   // DefStmt's coordinates. All TargetStmt instances with this prefix share
659   // the same DefStmt instance.
660   // Model:
661   //
662   //   for (int i < 0; i < N; i+=1) {
663   // DefStmt:
664   //    D = ...;
665   //    for (int j < 0; j < N; j+=1) {
666   // TargetStmt:
667   //      use(D);
668   //    }
669   //  }
670   //
671   // Here, the value used in TargetStmt is defined in the corresponding
672   // DefStmt, i.e.
673   //
674   //   { DefStmt[i] -> TargetStmt[i,j] }
675   //
676   // In practice, this should cover the majority of cases.
677   if (!Result && S->isOriginalSchedule() &&
678       isInsideLoop(DefStmt->getSurroundingLoop(),
679                    TargetStmt->getSurroundingLoop())) {
680     isl::set DefDomain = getDomainFor(DefStmt);
681     isl::set TargetDomain = getDomainFor(TargetStmt);
682     assert(DefDomain.dim(isl::dim::set) <= TargetDomain.dim(isl::dim::set));
683 
684     Result = isl::map::from_domain_and_range(DefDomain, TargetDomain);
685     for (unsigned i = 0, DefDims = DefDomain.dim(isl::dim::set); i < DefDims;
686          i += 1)
687       Result = Result.equate(isl::dim::in, i, isl::dim::out, i);
688   }
689 
690   if (!Result) {
691     // { DomainDef[] -> DomainTarget[] }
692     Result = computeUseToDefFlowDependency(TargetStmt, DefStmt).reverse();
693     simplify(Result);
694   }
695 
696   return Result;
697 }
698 
getScalarReachingDefinition(ScopStmt * Stmt)699 isl::map ZoneAlgorithm::getScalarReachingDefinition(ScopStmt *Stmt) {
700   auto &Result = ScalarReachDefZone[Stmt];
701   if (Result)
702     return Result;
703 
704   auto Domain = getDomainFor(Stmt);
705   Result = computeScalarReachingDefinition(Schedule, Domain, false, true);
706   simplify(Result);
707 
708   return Result;
709 }
710 
getScalarReachingDefinition(isl::set DomainDef)711 isl::map ZoneAlgorithm::getScalarReachingDefinition(isl::set DomainDef) {
712   auto DomId = DomainDef.get_tuple_id();
713   auto *Stmt = static_cast<ScopStmt *>(isl_id_get_user(DomId.get()));
714 
715   auto StmtResult = getScalarReachingDefinition(Stmt);
716 
717   return StmtResult.intersect_range(DomainDef);
718 }
719 
makeUnknownForDomain(ScopStmt * Stmt) const720 isl::map ZoneAlgorithm::makeUnknownForDomain(ScopStmt *Stmt) const {
721   return ::makeUnknownForDomain(getDomainFor(Stmt));
722 }
723 
makeValueId(Value * V)724 isl::id ZoneAlgorithm::makeValueId(Value *V) {
725   if (!V)
726     return nullptr;
727 
728   auto &Id = ValueIds[V];
729   if (Id.is_null()) {
730     auto Name = getIslCompatibleName("Val_", V, ValueIds.size() - 1,
731                                      std::string(), UseInstructionNames);
732     Id = isl::id::alloc(IslCtx.get(), Name.c_str(), V);
733   }
734   return Id;
735 }
736 
makeValueSpace(Value * V)737 isl::space ZoneAlgorithm::makeValueSpace(Value *V) {
738   auto Result = ParamSpace.set_from_params();
739   return Result.set_tuple_id(isl::dim::set, makeValueId(V));
740 }
741 
makeValueSet(Value * V)742 isl::set ZoneAlgorithm::makeValueSet(Value *V) {
743   auto Space = makeValueSpace(V);
744   return isl::set::universe(Space);
745 }
746 
makeValInst(Value * Val,ScopStmt * UserStmt,Loop * Scope,bool IsCertain)747 isl::map ZoneAlgorithm::makeValInst(Value *Val, ScopStmt *UserStmt, Loop *Scope,
748                                     bool IsCertain) {
749   // If the definition/write is conditional, the value at the location could
750   // be either the written value or the old value. Since we cannot know which
751   // one, consider the value to be unknown.
752   if (!IsCertain)
753     return makeUnknownForDomain(UserStmt);
754 
755   auto DomainUse = getDomainFor(UserStmt);
756   auto VUse = VirtualUse::create(S, UserStmt, Scope, Val, true);
757   switch (VUse.getKind()) {
758   case VirtualUse::Constant:
759   case VirtualUse::Block:
760   case VirtualUse::Hoisted:
761   case VirtualUse::ReadOnly: {
762     // The definition does not depend on the statement which uses it.
763     auto ValSet = makeValueSet(Val);
764     return isl::map::from_domain_and_range(DomainUse, ValSet);
765   }
766 
767   case VirtualUse::Synthesizable: {
768     auto *ScevExpr = VUse.getScevExpr();
769     auto UseDomainSpace = DomainUse.get_space();
770 
771     // Construct the SCEV space.
772     // TODO: Add only the induction variables referenced in SCEVAddRecExpr
773     // expressions, not just all of them.
774     auto ScevId = isl::manage(isl_id_alloc(
775         UseDomainSpace.get_ctx().get(), nullptr, const_cast<SCEV *>(ScevExpr)));
776 
777     auto ScevSpace = UseDomainSpace.drop_dims(isl::dim::set, 0, 0);
778     ScevSpace = ScevSpace.set_tuple_id(isl::dim::set, ScevId);
779 
780     // { DomainUse[] -> ScevExpr[] }
781     auto ValInst =
782         isl::map::identity(UseDomainSpace.map_from_domain_and_range(ScevSpace));
783     return ValInst;
784   }
785 
786   case VirtualUse::Intra: {
787     // Definition and use is in the same statement. We do not need to compute
788     // a reaching definition.
789 
790     // { llvm::Value }
791     auto ValSet = makeValueSet(Val);
792 
793     // {  UserDomain[] -> llvm::Value }
794     auto ValInstSet = isl::map::from_domain_and_range(DomainUse, ValSet);
795 
796     // { UserDomain[] -> [UserDomain[] - >llvm::Value] }
797     auto Result = ValInstSet.domain_map().reverse();
798     simplify(Result);
799     return Result;
800   }
801 
802   case VirtualUse::Inter: {
803     // The value is defined in a different statement.
804 
805     auto *Inst = cast<Instruction>(Val);
806     auto *ValStmt = S->getStmtFor(Inst);
807 
808     // If the llvm::Value is defined in a removed Stmt, we cannot derive its
809     // domain. We could use an arbitrary statement, but this could result in
810     // different ValInst[] for the same llvm::Value.
811     if (!ValStmt)
812       return ::makeUnknownForDomain(DomainUse);
813 
814     // { DomainUse[] -> DomainDef[] }
815     auto UsedInstance = getDefToTarget(ValStmt, UserStmt).reverse();
816 
817     // { llvm::Value }
818     auto ValSet = makeValueSet(Val);
819 
820     // { DomainUse[] -> llvm::Value[] }
821     auto ValInstSet = isl::map::from_domain_and_range(DomainUse, ValSet);
822 
823     // { DomainUse[] -> [DomainDef[] -> llvm::Value]  }
824     auto Result = UsedInstance.range_product(ValInstSet);
825 
826     simplify(Result);
827     return Result;
828   }
829   }
830   llvm_unreachable("Unhandled use type");
831 }
832 
833 /// Remove all computed PHIs out of @p Input and replace by their incoming
834 /// value.
835 ///
836 /// @param Input        { [] -> ValInst[] }
837 /// @param ComputedPHIs Set of PHIs that are replaced. Its ValInst must appear
838 ///                     on the LHS of @p NormalizeMap.
839 /// @param NormalizeMap { ValInst[] -> ValInst[] }
normalizeValInst(isl::union_map Input,const DenseSet<PHINode * > & ComputedPHIs,isl::union_map NormalizeMap)840 static isl::union_map normalizeValInst(isl::union_map Input,
841                                        const DenseSet<PHINode *> &ComputedPHIs,
842                                        isl::union_map NormalizeMap) {
843   isl::union_map Result = isl::union_map::empty(Input.get_space());
844   for (isl::map Map : Input.get_map_list()) {
845     isl::space Space = Map.get_space();
846     isl::space RangeSpace = Space.range();
847 
848     // Instructions within the SCoP are always wrapped. Non-wrapped tuples
849     // are therefore invariant in the SCoP and don't need normalization.
850     if (!RangeSpace.is_wrapping()) {
851       Result = Result.add_map(Map);
852       continue;
853     }
854 
855     auto *PHI = dyn_cast<PHINode>(static_cast<Value *>(
856         RangeSpace.unwrap().get_tuple_id(isl::dim::out).get_user()));
857 
858     // If no normalization is necessary, then the ValInst stands for itself.
859     if (!ComputedPHIs.count(PHI)) {
860       Result = Result.add_map(Map);
861       continue;
862     }
863 
864     // Otherwise, apply the normalization.
865     isl::union_map Mapped = isl::union_map(Map).apply_range(NormalizeMap);
866     Result = Result.unite(Mapped);
867     NumPHINormialization++;
868   }
869   return Result;
870 }
871 
makeNormalizedValInst(llvm::Value * Val,ScopStmt * UserStmt,llvm::Loop * Scope,bool IsCertain)872 isl::union_map ZoneAlgorithm::makeNormalizedValInst(llvm::Value *Val,
873                                                     ScopStmt *UserStmt,
874                                                     llvm::Loop *Scope,
875                                                     bool IsCertain) {
876   isl::map ValInst = makeValInst(Val, UserStmt, Scope, IsCertain);
877   isl::union_map Normalized =
878       normalizeValInst(ValInst, ComputedPHIs, NormalizeMap);
879   return Normalized;
880 }
881 
isCompatibleAccess(MemoryAccess * MA)882 bool ZoneAlgorithm::isCompatibleAccess(MemoryAccess *MA) {
883   if (!MA)
884     return false;
885   if (!MA->isLatestArrayKind())
886     return false;
887   Instruction *AccInst = MA->getAccessInstruction();
888   return isa<StoreInst>(AccInst) || isa<LoadInst>(AccInst);
889 }
890 
isNormalizable(MemoryAccess * MA)891 bool ZoneAlgorithm::isNormalizable(MemoryAccess *MA) {
892   assert(MA->isRead());
893 
894   // Exclude ExitPHIs, we are assuming that a normalizable PHI has a READ
895   // MemoryAccess.
896   if (!MA->isOriginalPHIKind())
897     return false;
898 
899   // Exclude recursive PHIs, normalizing them would require a transitive
900   // closure.
901   auto *PHI = cast<PHINode>(MA->getAccessInstruction());
902   if (RecursivePHIs.count(PHI))
903     return false;
904 
905   // Ensure that each incoming value can be represented by a ValInst[].
906   // We do represent values from statements associated to multiple incoming
907   // value by the PHI itself, but we do not handle this case yet (especially
908   // isNormalized()) when normalizing.
909   const ScopArrayInfo *SAI = MA->getOriginalScopArrayInfo();
910   auto Incomings = S->getPHIIncomings(SAI);
911   for (MemoryAccess *Incoming : Incomings) {
912     if (Incoming->getIncoming().size() != 1)
913       return false;
914   }
915 
916   return true;
917 }
918 
isNormalized(isl::map Map)919 isl::boolean ZoneAlgorithm::isNormalized(isl::map Map) {
920   isl::space Space = Map.get_space();
921   isl::space RangeSpace = Space.range();
922 
923   isl::boolean IsWrapping = RangeSpace.is_wrapping();
924   if (!IsWrapping.is_true())
925     return !IsWrapping;
926   isl::space Unwrapped = RangeSpace.unwrap();
927 
928   isl::id OutTupleId = Unwrapped.get_tuple_id(isl::dim::out);
929   if (OutTupleId.is_null())
930     return isl::boolean();
931   auto *PHI = dyn_cast<PHINode>(static_cast<Value *>(OutTupleId.get_user()));
932   if (!PHI)
933     return true;
934 
935   isl::id InTupleId = Unwrapped.get_tuple_id(isl::dim::in);
936   if (OutTupleId.is_null())
937     return isl::boolean();
938   auto *IncomingStmt = static_cast<ScopStmt *>(InTupleId.get_user());
939   MemoryAccess *PHIRead = IncomingStmt->lookupPHIReadOf(PHI);
940   if (!isNormalizable(PHIRead))
941     return true;
942 
943   return false;
944 }
945 
isNormalized(isl::union_map UMap)946 isl::boolean ZoneAlgorithm::isNormalized(isl::union_map UMap) {
947   isl::boolean Result = true;
948   for (isl::map Map : UMap.get_map_list()) {
949     Result = isNormalized(Map);
950     if (Result.is_true())
951       continue;
952     break;
953   }
954   return Result;
955 }
956 
computeCommon()957 void ZoneAlgorithm::computeCommon() {
958   AllReads = makeEmptyUnionMap();
959   AllMayWrites = makeEmptyUnionMap();
960   AllMustWrites = makeEmptyUnionMap();
961   AllWriteValInst = makeEmptyUnionMap();
962   AllReadValInst = makeEmptyUnionMap();
963 
964   // Default to empty, i.e. no normalization/replacement is taking place. Call
965   // computeNormalizedPHIs() to initialize.
966   NormalizeMap = makeEmptyUnionMap();
967   ComputedPHIs.clear();
968 
969   for (auto &Stmt : *S) {
970     for (auto *MA : Stmt) {
971       if (!MA->isLatestArrayKind())
972         continue;
973 
974       if (MA->isRead())
975         addArrayReadAccess(MA);
976 
977       if (MA->isWrite())
978         addArrayWriteAccess(MA);
979     }
980   }
981 
982   // { DomainWrite[] -> Element[] }
983   AllWrites = AllMustWrites.unite(AllMayWrites);
984 
985   // { [Element[] -> Zone[]] -> DomainWrite[] }
986   WriteReachDefZone =
987       computeReachingDefinition(Schedule, AllWrites, false, true);
988   simplify(WriteReachDefZone);
989 }
990 
computeNormalizedPHIs()991 void ZoneAlgorithm::computeNormalizedPHIs() {
992   // Determine which PHIs can reference themselves. They are excluded from
993   // normalization to avoid problems with transitive closures.
994   for (ScopStmt &Stmt : *S) {
995     for (MemoryAccess *MA : Stmt) {
996       if (!MA->isPHIKind())
997         continue;
998       if (!MA->isRead())
999         continue;
1000 
1001       // TODO: Can be more efficient since isRecursivePHI can theoretically
1002       // determine recursiveness for multiple values and/or cache results.
1003       auto *PHI = cast<PHINode>(MA->getAccessInstruction());
1004       if (isRecursivePHI(PHI)) {
1005         NumRecursivePHIs++;
1006         RecursivePHIs.insert(PHI);
1007       }
1008     }
1009   }
1010 
1011   // { PHIValInst[] -> IncomingValInst[] }
1012   isl::union_map AllPHIMaps = makeEmptyUnionMap();
1013 
1014   // Discover new PHIs and try to normalize them.
1015   DenseSet<PHINode *> AllPHIs;
1016   for (ScopStmt &Stmt : *S) {
1017     for (MemoryAccess *MA : Stmt) {
1018       if (!MA->isOriginalPHIKind())
1019         continue;
1020       if (!MA->isRead())
1021         continue;
1022       if (!isNormalizable(MA))
1023         continue;
1024 
1025       auto *PHI = cast<PHINode>(MA->getAccessInstruction());
1026       const ScopArrayInfo *SAI = MA->getOriginalScopArrayInfo();
1027 
1028       // { PHIDomain[] -> PHIValInst[] }
1029       isl::map PHIValInst = makeValInst(PHI, &Stmt, Stmt.getSurroundingLoop());
1030 
1031       // { IncomingDomain[] -> IncomingValInst[] }
1032       isl::union_map IncomingValInsts = makeEmptyUnionMap();
1033 
1034       // Get all incoming values.
1035       for (MemoryAccess *MA : S->getPHIIncomings(SAI)) {
1036         ScopStmt *IncomingStmt = MA->getStatement();
1037 
1038         auto Incoming = MA->getIncoming();
1039         assert(Incoming.size() == 1 && "The incoming value must be "
1040                                        "representable by something else than "
1041                                        "the PHI itself");
1042         Value *IncomingVal = Incoming[0].second;
1043 
1044         // { IncomingDomain[] -> IncomingValInst[] }
1045         isl::map IncomingValInst = makeValInst(
1046             IncomingVal, IncomingStmt, IncomingStmt->getSurroundingLoop());
1047 
1048         IncomingValInsts = IncomingValInsts.add_map(IncomingValInst);
1049       }
1050 
1051       // Determine which instance of the PHI statement corresponds to which
1052       // incoming value.
1053       // { PHIDomain[] -> IncomingDomain[] }
1054       isl::union_map PerPHI = computePerPHI(SAI);
1055 
1056       // { PHIValInst[] -> IncomingValInst[] }
1057       isl::union_map PHIMap =
1058           PerPHI.apply_domain(PHIValInst).apply_range(IncomingValInsts);
1059       assert(!PHIMap.is_single_valued().is_false());
1060 
1061       // Resolve transitiveness: The incoming value of the newly discovered PHI
1062       // may reference a previously normalized PHI. At the same time, already
1063       // normalized PHIs might be normalized to the new PHI. At the end, none of
1064       // the PHIs may appear on the right-hand-side of the normalization map.
1065       PHIMap = normalizeValInst(PHIMap, AllPHIs, AllPHIMaps);
1066       AllPHIs.insert(PHI);
1067       AllPHIMaps = normalizeValInst(AllPHIMaps, AllPHIs, PHIMap);
1068 
1069       AllPHIMaps = AllPHIMaps.unite(PHIMap);
1070       NumNormalizablePHIs++;
1071     }
1072   }
1073   simplify(AllPHIMaps);
1074 
1075   // Apply the normalization.
1076   ComputedPHIs = AllPHIs;
1077   NormalizeMap = AllPHIMaps;
1078 
1079   assert(!NormalizeMap || isNormalized(NormalizeMap));
1080 }
1081 
printAccesses(llvm::raw_ostream & OS,int Indent) const1082 void ZoneAlgorithm::printAccesses(llvm::raw_ostream &OS, int Indent) const {
1083   OS.indent(Indent) << "After accesses {\n";
1084   for (auto &Stmt : *S) {
1085     OS.indent(Indent + 4) << Stmt.getBaseName() << "\n";
1086     for (auto *MA : Stmt)
1087       MA->print(OS);
1088   }
1089   OS.indent(Indent) << "}\n";
1090 }
1091 
computeKnownFromMustWrites() const1092 isl::union_map ZoneAlgorithm::computeKnownFromMustWrites() const {
1093   // { [Element[] -> Zone[]] -> [Element[] -> DomainWrite[]] }
1094   isl::union_map EltReachdDef = distributeDomain(WriteReachDefZone.curry());
1095 
1096   // { [Element[] -> DomainWrite[]] -> ValInst[] }
1097   isl::union_map AllKnownWriteValInst = filterKnownValInst(AllWriteValInst);
1098 
1099   // { [Element[] -> Zone[]] -> ValInst[] }
1100   return EltReachdDef.apply_range(AllKnownWriteValInst);
1101 }
1102 
computeKnownFromLoad() const1103 isl::union_map ZoneAlgorithm::computeKnownFromLoad() const {
1104   // { Element[] }
1105   isl::union_set AllAccessedElts = AllReads.range().unite(AllWrites.range());
1106 
1107   // { Element[] -> Scatter[] }
1108   isl::union_map EltZoneUniverse = isl::union_map::from_domain_and_range(
1109       AllAccessedElts, isl::set::universe(ScatterSpace));
1110 
1111   // This assumes there are no "holes" in
1112   // isl_union_map_domain(WriteReachDefZone); alternatively, compute the zone
1113   // before the first write or that are not written at all.
1114   // { Element[] -> Scatter[] }
1115   isl::union_set NonReachDef =
1116       EltZoneUniverse.wrap().subtract(WriteReachDefZone.domain());
1117 
1118   // { [Element[] -> Zone[]] -> ReachDefId[] }
1119   isl::union_map DefZone =
1120       WriteReachDefZone.unite(isl::union_map::from_domain(NonReachDef));
1121 
1122   // { [Element[] -> Scatter[]] -> Element[] }
1123   isl::union_map EltZoneElt = EltZoneUniverse.domain_map();
1124 
1125   // { [Element[] -> Zone[]] -> [Element[] -> ReachDefId[]] }
1126   isl::union_map DefZoneEltDefId = EltZoneElt.range_product(DefZone);
1127 
1128   // { Element[] -> [Zone[] -> ReachDefId[]] }
1129   isl::union_map EltDefZone = DefZone.curry();
1130 
1131   // { [Element[] -> Zone[] -> [Element[] -> ReachDefId[]] }
1132   isl::union_map EltZoneEltDefid = distributeDomain(EltDefZone);
1133 
1134   // { [Element[] -> Scatter[]] -> DomainRead[] }
1135   isl::union_map Reads = AllReads.range_product(Schedule).reverse();
1136 
1137   // { [Element[] -> Scatter[]] -> [Element[] -> DomainRead[]] }
1138   isl::union_map ReadsElt = EltZoneElt.range_product(Reads);
1139 
1140   // { [Element[] -> Scatter[]] -> ValInst[] }
1141   isl::union_map ScatterKnown = ReadsElt.apply_range(AllReadValInst);
1142 
1143   // { [Element[] -> ReachDefId[]] -> ValInst[] }
1144   isl::union_map DefidKnown =
1145       DefZoneEltDefId.apply_domain(ScatterKnown).reverse();
1146 
1147   // { [Element[] -> Zone[]] -> ValInst[] }
1148   return DefZoneEltDefId.apply_range(DefidKnown);
1149 }
1150 
computeKnown(bool FromWrite,bool FromRead) const1151 isl::union_map ZoneAlgorithm::computeKnown(bool FromWrite,
1152                                            bool FromRead) const {
1153   isl::union_map Result = makeEmptyUnionMap();
1154 
1155   if (FromWrite)
1156     Result = Result.unite(computeKnownFromMustWrites());
1157 
1158   if (FromRead)
1159     Result = Result.unite(computeKnownFromLoad());
1160 
1161   simplify(Result);
1162   return Result;
1163 }
1164