• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright 2011      INRIA Saclay
3  * Copyright 2012-2014 Ecole Normale Superieure
4  * Copyright 2015-2016 Sven Verdoolaege
5  * Copyright 2016      INRIA Paris
6  * Copyright 2017      Sven Verdoolaege
7  *
8  * Use of this software is governed by the MIT license
9  *
10  * Written by Sven Verdoolaege, INRIA Saclay - Ile-de-France,
11  * Parc Club Orsay Universite, ZAC des vignes, 4 rue Jacques Monod,
12  * 91893 Orsay, France
13  * and Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France
14  * and Centre de Recherche Inria de Paris, 2 rue Simone Iff - Voie DQ12,
15  * CS 42112, 75589 Paris Cedex 12, France
16  */
17 
18 #include <isl_ctx_private.h>
19 #include <isl_map_private.h>
20 #include <isl_space_private.h>
21 #include <isl_aff_private.h>
22 #include <isl/hash.h>
23 #include <isl/id.h>
24 #include <isl/constraint.h>
25 #include <isl/schedule.h>
26 #include <isl_schedule_constraints.h>
27 #include <isl/schedule_node.h>
28 #include <isl_mat_private.h>
29 #include <isl_vec_private.h>
30 #include <isl/set.h>
31 #include <isl_union_set_private.h>
32 #include <isl_seq.h>
33 #include <isl_tab.h>
34 #include <isl_dim_map.h>
35 #include <isl/map_to_basic_set.h>
36 #include <isl_sort.h>
37 #include <isl_options_private.h>
38 #include <isl_tarjan.h>
39 #include <isl_morph.h>
40 #include <isl/ilp.h>
41 #include <isl_val_private.h>
42 
43 /*
44  * The scheduling algorithm implemented in this file was inspired by
45  * Bondhugula et al., "Automatic Transformations for Communication-Minimized
46  * Parallelization and Locality Optimization in the Polyhedral Model".
47  *
48  * For a detailed description of the variant implemented in isl,
49  * see Verdoolaege and Janssens, "Scheduling for PPCG" (2017).
50  */
51 
52 
53 /* Internal information about a node that is used during the construction
54  * of a schedule.
55  * space represents the original space in which the domain lives;
56  *	that is, the space is not affected by compression
57  * sched is a matrix representation of the schedule being constructed
58  *	for this node; if compressed is set, then this schedule is
59  *	defined over the compressed domain space
60  * sched_map is an isl_map representation of the same (partial) schedule
61  *	sched_map may be NULL; if compressed is set, then this map
62  *	is defined over the uncompressed domain space
63  * rank is the number of linearly independent rows in the linear part
64  *	of sched
65  * the rows of "vmap" represent a change of basis for the node
66  *	variables; the first rank rows span the linear part of
67  *	the schedule rows; the remaining rows are linearly independent
68  * the rows of "indep" represent linear combinations of the schedule
69  * coefficients that are non-zero when the schedule coefficients are
70  * linearly independent of previously computed schedule rows.
71  * start is the first variable in the LP problem in the sequences that
72  *	represents the schedule coefficients of this node
73  * nvar is the dimension of the (compressed) domain
74  * nparam is the number of parameters or 0 if we are not constructing
75  *	a parametric schedule
76  *
77  * If compressed is set, then hull represents the constraints
78  * that were used to derive the compression, while compress and
79  * decompress map the original space to the compressed space and
80  * vice versa.
81  *
82  * scc is the index of SCC (or WCC) this node belongs to
83  *
84  * "cluster" is only used inside extract_clusters and identifies
85  * the cluster of SCCs that the node belongs to.
86  *
87  * coincident contains a boolean for each of the rows of the schedule,
88  * indicating whether the corresponding scheduling dimension satisfies
89  * the coincidence constraints in the sense that the corresponding
90  * dependence distances are zero.
91  *
92  * If the schedule_treat_coalescing option is set, then
93  * "sizes" contains the sizes of the (compressed) instance set
94  * in each direction.  If there is no fixed size in a given direction,
95  * then the corresponding size value is set to infinity.
96  * If the schedule_treat_coalescing option or the schedule_max_coefficient
97  * option is set, then "max" contains the maximal values for
98  * schedule coefficients of the (compressed) variables.  If no bound
99  * needs to be imposed on a particular variable, then the corresponding
100  * value is negative.
101  * If not NULL, then "bounds" contains a non-parametric set
102  * in the compressed space that is bounded by the size in each direction.
103  */
104 struct isl_sched_node {
105 	isl_space *space;
106 	int	compressed;
107 	isl_set	*hull;
108 	isl_multi_aff *compress;
109 	isl_pw_multi_aff *decompress;
110 	isl_mat *sched;
111 	isl_map *sched_map;
112 	int	 rank;
113 	isl_mat *indep;
114 	isl_mat *vmap;
115 	int	 start;
116 	int	 nvar;
117 	int	 nparam;
118 
119 	int	 scc;
120 	int	 cluster;
121 
122 	int	*coincident;
123 
124 	isl_multi_val *sizes;
125 	isl_basic_set *bounds;
126 	isl_vec *max;
127 };
128 
node_has_tuples(const void * entry,const void * val)129 static isl_bool node_has_tuples(const void *entry, const void *val)
130 {
131 	struct isl_sched_node *node = (struct isl_sched_node *)entry;
132 	isl_space *space = (isl_space *) val;
133 
134 	return isl_space_has_equal_tuples(node->space, space);
135 }
136 
node_scc_exactly(struct isl_sched_node * node,int scc)137 static int node_scc_exactly(struct isl_sched_node *node, int scc)
138 {
139 	return node->scc == scc;
140 }
141 
node_scc_at_most(struct isl_sched_node * node,int scc)142 static int node_scc_at_most(struct isl_sched_node *node, int scc)
143 {
144 	return node->scc <= scc;
145 }
146 
node_scc_at_least(struct isl_sched_node * node,int scc)147 static int node_scc_at_least(struct isl_sched_node *node, int scc)
148 {
149 	return node->scc >= scc;
150 }
151 
152 /* An edge in the dependence graph.  An edge may be used to
153  * ensure validity of the generated schedule, to minimize the dependence
154  * distance or both
155  *
156  * map is the dependence relation, with i -> j in the map if j depends on i
157  * tagged_condition and tagged_validity contain the union of all tagged
158  *	condition or conditional validity dependence relations that
159  *	specialize the dependence relation "map"; that is,
160  *	if (i -> a) -> (j -> b) is an element of "tagged_condition"
161  *	or "tagged_validity", then i -> j is an element of "map".
162  *	If these fields are NULL, then they represent the empty relation.
163  * src is the source node
164  * dst is the sink node
165  *
166  * types is a bit vector containing the types of this edge.
167  * validity is set if the edge is used to ensure correctness
168  * coincidence is used to enforce zero dependence distances
169  * proximity is set if the edge is used to minimize dependence distances
170  * condition is set if the edge represents a condition
171  *	for a conditional validity schedule constraint
172  * local can only be set for condition edges and indicates that
173  *	the dependence distance over the edge should be zero
174  * conditional_validity is set if the edge is used to conditionally
175  *	ensure correctness
176  *
177  * For validity edges, start and end mark the sequence of inequality
178  * constraints in the LP problem that encode the validity constraint
179  * corresponding to this edge.
180  *
181  * During clustering, an edge may be marked "no_merge" if it should
182  * not be used to merge clusters.
183  * The weight is also only used during clustering and it is
184  * an indication of how many schedule dimensions on either side
185  * of the schedule constraints can be aligned.
186  * If the weight is negative, then this means that this edge was postponed
187  * by has_bounded_distances or any_no_merge.  The original weight can
188  * be retrieved by adding 1 + graph->max_weight, with "graph"
189  * the graph containing this edge.
190  */
191 struct isl_sched_edge {
192 	isl_map *map;
193 	isl_union_map *tagged_condition;
194 	isl_union_map *tagged_validity;
195 
196 	struct isl_sched_node *src;
197 	struct isl_sched_node *dst;
198 
199 	unsigned types;
200 
201 	int start;
202 	int end;
203 
204 	int no_merge;
205 	int weight;
206 };
207 
208 /* Is "edge" marked as being of type "type"?
209  */
is_type(struct isl_sched_edge * edge,enum isl_edge_type type)210 static int is_type(struct isl_sched_edge *edge, enum isl_edge_type type)
211 {
212 	return ISL_FL_ISSET(edge->types, 1 << type);
213 }
214 
215 /* Mark "edge" as being of type "type".
216  */
set_type(struct isl_sched_edge * edge,enum isl_edge_type type)217 static void set_type(struct isl_sched_edge *edge, enum isl_edge_type type)
218 {
219 	ISL_FL_SET(edge->types, 1 << type);
220 }
221 
222 /* No longer mark "edge" as being of type "type"?
223  */
clear_type(struct isl_sched_edge * edge,enum isl_edge_type type)224 static void clear_type(struct isl_sched_edge *edge, enum isl_edge_type type)
225 {
226 	ISL_FL_CLR(edge->types, 1 << type);
227 }
228 
229 /* Is "edge" marked as a validity edge?
230  */
is_validity(struct isl_sched_edge * edge)231 static int is_validity(struct isl_sched_edge *edge)
232 {
233 	return is_type(edge, isl_edge_validity);
234 }
235 
236 /* Mark "edge" as a validity edge.
237  */
set_validity(struct isl_sched_edge * edge)238 static void set_validity(struct isl_sched_edge *edge)
239 {
240 	set_type(edge, isl_edge_validity);
241 }
242 
243 /* Is "edge" marked as a proximity edge?
244  */
is_proximity(struct isl_sched_edge * edge)245 static int is_proximity(struct isl_sched_edge *edge)
246 {
247 	return is_type(edge, isl_edge_proximity);
248 }
249 
250 /* Is "edge" marked as a local edge?
251  */
is_local(struct isl_sched_edge * edge)252 static int is_local(struct isl_sched_edge *edge)
253 {
254 	return is_type(edge, isl_edge_local);
255 }
256 
257 /* Mark "edge" as a local edge.
258  */
set_local(struct isl_sched_edge * edge)259 static void set_local(struct isl_sched_edge *edge)
260 {
261 	set_type(edge, isl_edge_local);
262 }
263 
264 /* No longer mark "edge" as a local edge.
265  */
clear_local(struct isl_sched_edge * edge)266 static void clear_local(struct isl_sched_edge *edge)
267 {
268 	clear_type(edge, isl_edge_local);
269 }
270 
271 /* Is "edge" marked as a coincidence edge?
272  */
is_coincidence(struct isl_sched_edge * edge)273 static int is_coincidence(struct isl_sched_edge *edge)
274 {
275 	return is_type(edge, isl_edge_coincidence);
276 }
277 
278 /* Is "edge" marked as a condition edge?
279  */
is_condition(struct isl_sched_edge * edge)280 static int is_condition(struct isl_sched_edge *edge)
281 {
282 	return is_type(edge, isl_edge_condition);
283 }
284 
285 /* Is "edge" marked as a conditional validity edge?
286  */
is_conditional_validity(struct isl_sched_edge * edge)287 static int is_conditional_validity(struct isl_sched_edge *edge)
288 {
289 	return is_type(edge, isl_edge_conditional_validity);
290 }
291 
292 /* Is "edge" of a type that can appear multiple times between
293  * the same pair of nodes?
294  *
295  * Condition edges and conditional validity edges may have tagged
296  * dependence relations, in which case an edge is added for each
297  * pair of tags.
298  */
is_multi_edge_type(struct isl_sched_edge * edge)299 static int is_multi_edge_type(struct isl_sched_edge *edge)
300 {
301 	return is_condition(edge) || is_conditional_validity(edge);
302 }
303 
304 /* Internal information about the dependence graph used during
305  * the construction of the schedule.
306  *
307  * intra_hmap is a cache, mapping dependence relations to their dual,
308  *	for dependences from a node to itself, possibly without
309  *	coefficients for the parameters
310  * intra_hmap_param is a cache, mapping dependence relations to their dual,
311  *	for dependences from a node to itself, including coefficients
312  *	for the parameters
313  * inter_hmap is a cache, mapping dependence relations to their dual,
314  *	for dependences between distinct nodes
315  * if compression is involved then the key for these maps
316  * is the original, uncompressed dependence relation, while
317  * the value is the dual of the compressed dependence relation.
318  *
319  * n is the number of nodes
320  * node is the list of nodes
321  * maxvar is the maximal number of variables over all nodes
322  * max_row is the allocated number of rows in the schedule
323  * n_row is the current (maximal) number of linearly independent
324  *	rows in the node schedules
325  * n_total_row is the current number of rows in the node schedules
326  * band_start is the starting row in the node schedules of the current band
327  * root is set to the original dependence graph from which this graph
328  *	is derived through splitting.  If this graph is not the result of
329  *	splitting, then the root field points to the graph itself.
330  *
331  * sorted contains a list of node indices sorted according to the
332  *	SCC to which a node belongs
333  *
334  * n_edge is the number of edges
335  * edge is the list of edges
336  * max_edge contains the maximal number of edges of each type;
337  *	in particular, it contains the number of edges in the inital graph.
338  * edge_table contains pointers into the edge array, hashed on the source
339  *	and sink spaces; there is one such table for each type;
340  *	a given edge may be referenced from more than one table
341  *	if the corresponding relation appears in more than one of the
342  *	sets of dependences; however, for each type there is only
343  *	a single edge between a given pair of source and sink space
344  *	in the entire graph
345  *
346  * node_table contains pointers into the node array, hashed on the space tuples
347  *
348  * region contains a list of variable sequences that should be non-trivial
349  *
350  * lp contains the (I)LP problem used to obtain new schedule rows
351  *
352  * src_scc and dst_scc are the source and sink SCCs of an edge with
353  *	conflicting constraints
354  *
355  * scc represents the number of components
356  * weak is set if the components are weakly connected
357  *
358  * max_weight is used during clustering and represents the maximal
359  * weight of the relevant proximity edges.
360  */
361 struct isl_sched_graph {
362 	isl_map_to_basic_set *intra_hmap;
363 	isl_map_to_basic_set *intra_hmap_param;
364 	isl_map_to_basic_set *inter_hmap;
365 
366 	struct isl_sched_node *node;
367 	int n;
368 	int maxvar;
369 	int max_row;
370 	int n_row;
371 
372 	int *sorted;
373 
374 	int n_total_row;
375 	int band_start;
376 
377 	struct isl_sched_graph *root;
378 
379 	struct isl_sched_edge *edge;
380 	int n_edge;
381 	int max_edge[isl_edge_last + 1];
382 	struct isl_hash_table *edge_table[isl_edge_last + 1];
383 
384 	struct isl_hash_table *node_table;
385 	struct isl_trivial_region *region;
386 
387 	isl_basic_set *lp;
388 
389 	int src_scc;
390 	int dst_scc;
391 
392 	int scc;
393 	int weak;
394 
395 	int max_weight;
396 };
397 
398 /* Initialize node_table based on the list of nodes.
399  */
graph_init_table(isl_ctx * ctx,struct isl_sched_graph * graph)400 static int graph_init_table(isl_ctx *ctx, struct isl_sched_graph *graph)
401 {
402 	int i;
403 
404 	graph->node_table = isl_hash_table_alloc(ctx, graph->n);
405 	if (!graph->node_table)
406 		return -1;
407 
408 	for (i = 0; i < graph->n; ++i) {
409 		struct isl_hash_table_entry *entry;
410 		uint32_t hash;
411 
412 		hash = isl_space_get_tuple_hash(graph->node[i].space);
413 		entry = isl_hash_table_find(ctx, graph->node_table, hash,
414 					    &node_has_tuples,
415 					    graph->node[i].space, 1);
416 		if (!entry)
417 			return -1;
418 		entry->data = &graph->node[i];
419 	}
420 
421 	return 0;
422 }
423 
424 /* Return a pointer to the node that lives within the given space,
425  * an invalid node if there is no such node, or NULL in case of error.
426  */
graph_find_node(isl_ctx * ctx,struct isl_sched_graph * graph,__isl_keep isl_space * space)427 static struct isl_sched_node *graph_find_node(isl_ctx *ctx,
428 	struct isl_sched_graph *graph, __isl_keep isl_space *space)
429 {
430 	struct isl_hash_table_entry *entry;
431 	uint32_t hash;
432 
433 	if (!space)
434 		return NULL;
435 
436 	hash = isl_space_get_tuple_hash(space);
437 	entry = isl_hash_table_find(ctx, graph->node_table, hash,
438 				    &node_has_tuples, space, 0);
439 	if (!entry)
440 		return NULL;
441 	if (entry == isl_hash_table_entry_none)
442 		return graph->node + graph->n;
443 
444 	return entry->data;
445 }
446 
447 /* Is "node" a node in "graph"?
448  */
is_node(struct isl_sched_graph * graph,struct isl_sched_node * node)449 static int is_node(struct isl_sched_graph *graph,
450 	struct isl_sched_node *node)
451 {
452 	return node && node >= &graph->node[0] && node < &graph->node[graph->n];
453 }
454 
edge_has_src_and_dst(const void * entry,const void * val)455 static isl_bool edge_has_src_and_dst(const void *entry, const void *val)
456 {
457 	const struct isl_sched_edge *edge = entry;
458 	const struct isl_sched_edge *temp = val;
459 
460 	return isl_bool_ok(edge->src == temp->src && edge->dst == temp->dst);
461 }
462 
463 /* Add the given edge to graph->edge_table[type].
464  */
graph_edge_table_add(isl_ctx * ctx,struct isl_sched_graph * graph,enum isl_edge_type type,struct isl_sched_edge * edge)465 static isl_stat graph_edge_table_add(isl_ctx *ctx,
466 	struct isl_sched_graph *graph, enum isl_edge_type type,
467 	struct isl_sched_edge *edge)
468 {
469 	struct isl_hash_table_entry *entry;
470 	uint32_t hash;
471 
472 	hash = isl_hash_init();
473 	hash = isl_hash_builtin(hash, edge->src);
474 	hash = isl_hash_builtin(hash, edge->dst);
475 	entry = isl_hash_table_find(ctx, graph->edge_table[type], hash,
476 				    &edge_has_src_and_dst, edge, 1);
477 	if (!entry)
478 		return isl_stat_error;
479 	entry->data = edge;
480 
481 	return isl_stat_ok;
482 }
483 
484 /* Add "edge" to all relevant edge tables.
485  * That is, for every type of the edge, add it to the corresponding table.
486  */
graph_edge_tables_add(isl_ctx * ctx,struct isl_sched_graph * graph,struct isl_sched_edge * edge)487 static isl_stat graph_edge_tables_add(isl_ctx *ctx,
488 	struct isl_sched_graph *graph, struct isl_sched_edge *edge)
489 {
490 	enum isl_edge_type t;
491 
492 	for (t = isl_edge_first; t <= isl_edge_last; ++t) {
493 		if (!is_type(edge, t))
494 			continue;
495 		if (graph_edge_table_add(ctx, graph, t, edge) < 0)
496 			return isl_stat_error;
497 	}
498 
499 	return isl_stat_ok;
500 }
501 
502 /* Allocate the edge_tables based on the maximal number of edges of
503  * each type.
504  */
graph_init_edge_tables(isl_ctx * ctx,struct isl_sched_graph * graph)505 static int graph_init_edge_tables(isl_ctx *ctx, struct isl_sched_graph *graph)
506 {
507 	int i;
508 
509 	for (i = 0; i <= isl_edge_last; ++i) {
510 		graph->edge_table[i] = isl_hash_table_alloc(ctx,
511 							    graph->max_edge[i]);
512 		if (!graph->edge_table[i])
513 			return -1;
514 	}
515 
516 	return 0;
517 }
518 
519 /* If graph->edge_table[type] contains an edge from the given source
520  * to the given destination, then return the hash table entry of this edge.
521  * Otherwise, return NULL.
522  */
graph_find_edge_entry(struct isl_sched_graph * graph,enum isl_edge_type type,struct isl_sched_node * src,struct isl_sched_node * dst)523 static struct isl_hash_table_entry *graph_find_edge_entry(
524 	struct isl_sched_graph *graph,
525 	enum isl_edge_type type,
526 	struct isl_sched_node *src, struct isl_sched_node *dst)
527 {
528 	isl_ctx *ctx = isl_space_get_ctx(src->space);
529 	uint32_t hash;
530 	struct isl_sched_edge temp = { .src = src, .dst = dst };
531 
532 	hash = isl_hash_init();
533 	hash = isl_hash_builtin(hash, temp.src);
534 	hash = isl_hash_builtin(hash, temp.dst);
535 	return isl_hash_table_find(ctx, graph->edge_table[type], hash,
536 				    &edge_has_src_and_dst, &temp, 0);
537 }
538 
539 
540 /* If graph->edge_table[type] contains an edge from the given source
541  * to the given destination, then return this edge.
542  * Return "none" if no such edge can be found.
543  * Return NULL on error.
544  */
graph_find_edge(struct isl_sched_graph * graph,enum isl_edge_type type,struct isl_sched_node * src,struct isl_sched_node * dst,struct isl_sched_edge * none)545 static struct isl_sched_edge *graph_find_edge(struct isl_sched_graph *graph,
546 	enum isl_edge_type type,
547 	struct isl_sched_node *src, struct isl_sched_node *dst,
548 	struct isl_sched_edge *none)
549 {
550 	struct isl_hash_table_entry *entry;
551 
552 	entry = graph_find_edge_entry(graph, type, src, dst);
553 	if (!entry)
554 		return NULL;
555 	if (entry == isl_hash_table_entry_none)
556 		return none;
557 
558 	return entry->data;
559 }
560 
561 /* Check whether the dependence graph has an edge of the given type
562  * between the given two nodes.
563  */
graph_has_edge(struct isl_sched_graph * graph,enum isl_edge_type type,struct isl_sched_node * src,struct isl_sched_node * dst)564 static isl_bool graph_has_edge(struct isl_sched_graph *graph,
565 	enum isl_edge_type type,
566 	struct isl_sched_node *src, struct isl_sched_node *dst)
567 {
568 	struct isl_sched_edge dummy;
569 	struct isl_sched_edge *edge;
570 	isl_bool empty;
571 
572 	edge = graph_find_edge(graph, type, src, dst, &dummy);
573 	if (!edge)
574 		return isl_bool_error;
575 	if (edge == &dummy)
576 		return isl_bool_false;
577 
578 	empty = isl_map_plain_is_empty(edge->map);
579 
580 	return isl_bool_not(empty);
581 }
582 
583 /* Look for any edge with the same src, dst and map fields as "model".
584  *
585  * Return the matching edge if one can be found.
586  * Return "model" if no matching edge is found.
587  * Return NULL on error.
588  */
graph_find_matching_edge(struct isl_sched_graph * graph,struct isl_sched_edge * model)589 static struct isl_sched_edge *graph_find_matching_edge(
590 	struct isl_sched_graph *graph, struct isl_sched_edge *model)
591 {
592 	enum isl_edge_type i;
593 	struct isl_sched_edge *edge;
594 
595 	for (i = isl_edge_first; i <= isl_edge_last; ++i) {
596 		int is_equal;
597 
598 		edge = graph_find_edge(graph, i, model->src, model->dst, model);
599 		if (!edge)
600 			return NULL;
601 		if (edge == model)
602 			continue;
603 		is_equal = isl_map_plain_is_equal(model->map, edge->map);
604 		if (is_equal < 0)
605 			return NULL;
606 		if (is_equal)
607 			return edge;
608 	}
609 
610 	return model;
611 }
612 
613 /* Remove the given edge from all the edge_tables that refer to it.
614  */
graph_remove_edge(struct isl_sched_graph * graph,struct isl_sched_edge * edge)615 static isl_stat graph_remove_edge(struct isl_sched_graph *graph,
616 	struct isl_sched_edge *edge)
617 {
618 	isl_ctx *ctx = isl_map_get_ctx(edge->map);
619 	enum isl_edge_type i;
620 
621 	for (i = isl_edge_first; i <= isl_edge_last; ++i) {
622 		struct isl_hash_table_entry *entry;
623 
624 		entry = graph_find_edge_entry(graph, i, edge->src, edge->dst);
625 		if (!entry)
626 			return isl_stat_error;
627 		if (entry == isl_hash_table_entry_none)
628 			continue;
629 		if (entry->data != edge)
630 			continue;
631 		isl_hash_table_remove(ctx, graph->edge_table[i], entry);
632 	}
633 
634 	return isl_stat_ok;
635 }
636 
637 /* Check whether the dependence graph has any edge
638  * between the given two nodes.
639  */
graph_has_any_edge(struct isl_sched_graph * graph,struct isl_sched_node * src,struct isl_sched_node * dst)640 static isl_bool graph_has_any_edge(struct isl_sched_graph *graph,
641 	struct isl_sched_node *src, struct isl_sched_node *dst)
642 {
643 	enum isl_edge_type i;
644 	isl_bool r;
645 
646 	for (i = isl_edge_first; i <= isl_edge_last; ++i) {
647 		r = graph_has_edge(graph, i, src, dst);
648 		if (r < 0 || r)
649 			return r;
650 	}
651 
652 	return r;
653 }
654 
655 /* Check whether the dependence graph has a validity edge
656  * between the given two nodes.
657  *
658  * Conditional validity edges are essentially validity edges that
659  * can be ignored if the corresponding condition edges are iteration private.
660  * Here, we are only checking for the presence of validity
661  * edges, so we need to consider the conditional validity edges too.
662  * In particular, this function is used during the detection
663  * of strongly connected components and we cannot ignore
664  * conditional validity edges during this detection.
665  */
graph_has_validity_edge(struct isl_sched_graph * graph,struct isl_sched_node * src,struct isl_sched_node * dst)666 static isl_bool graph_has_validity_edge(struct isl_sched_graph *graph,
667 	struct isl_sched_node *src, struct isl_sched_node *dst)
668 {
669 	isl_bool r;
670 
671 	r = graph_has_edge(graph, isl_edge_validity, src, dst);
672 	if (r < 0 || r)
673 		return r;
674 
675 	return graph_has_edge(graph, isl_edge_conditional_validity, src, dst);
676 }
677 
678 /* Perform all the required memory allocations for a schedule graph "graph"
679  * with "n_node" nodes and "n_edge" edge and initialize the corresponding
680  * fields.
681  */
graph_alloc(isl_ctx * ctx,struct isl_sched_graph * graph,int n_node,int n_edge)682 static isl_stat graph_alloc(isl_ctx *ctx, struct isl_sched_graph *graph,
683 	int n_node, int n_edge)
684 {
685 	int i;
686 
687 	graph->n = n_node;
688 	graph->n_edge = n_edge;
689 	graph->node = isl_calloc_array(ctx, struct isl_sched_node, graph->n);
690 	graph->sorted = isl_calloc_array(ctx, int, graph->n);
691 	graph->region = isl_alloc_array(ctx,
692 					struct isl_trivial_region, graph->n);
693 	graph->edge = isl_calloc_array(ctx,
694 					struct isl_sched_edge, graph->n_edge);
695 
696 	graph->intra_hmap = isl_map_to_basic_set_alloc(ctx, 2 * n_edge);
697 	graph->intra_hmap_param = isl_map_to_basic_set_alloc(ctx, 2 * n_edge);
698 	graph->inter_hmap = isl_map_to_basic_set_alloc(ctx, 2 * n_edge);
699 
700 	if (!graph->node || !graph->region || (graph->n_edge && !graph->edge) ||
701 	    !graph->sorted)
702 		return isl_stat_error;
703 
704 	for(i = 0; i < graph->n; ++i)
705 		graph->sorted[i] = i;
706 
707 	return isl_stat_ok;
708 }
709 
710 /* Free the memory associated to node "node" in "graph".
711  * The "coincident" field is shared by nodes in a graph and its subgraph.
712  * It therefore only needs to be freed for the original dependence graph,
713  * i.e., one that is not the result of splitting.
714  */
clear_node(struct isl_sched_graph * graph,struct isl_sched_node * node)715 static void clear_node(struct isl_sched_graph *graph,
716 	struct isl_sched_node *node)
717 {
718 	isl_space_free(node->space);
719 	isl_set_free(node->hull);
720 	isl_multi_aff_free(node->compress);
721 	isl_pw_multi_aff_free(node->decompress);
722 	isl_mat_free(node->sched);
723 	isl_map_free(node->sched_map);
724 	isl_mat_free(node->indep);
725 	isl_mat_free(node->vmap);
726 	if (graph->root == graph)
727 		free(node->coincident);
728 	isl_multi_val_free(node->sizes);
729 	isl_basic_set_free(node->bounds);
730 	isl_vec_free(node->max);
731 }
732 
graph_free(isl_ctx * ctx,struct isl_sched_graph * graph)733 static void graph_free(isl_ctx *ctx, struct isl_sched_graph *graph)
734 {
735 	int i;
736 
737 	isl_map_to_basic_set_free(graph->intra_hmap);
738 	isl_map_to_basic_set_free(graph->intra_hmap_param);
739 	isl_map_to_basic_set_free(graph->inter_hmap);
740 
741 	if (graph->node)
742 		for (i = 0; i < graph->n; ++i)
743 			clear_node(graph, &graph->node[i]);
744 	free(graph->node);
745 	free(graph->sorted);
746 	if (graph->edge)
747 		for (i = 0; i < graph->n_edge; ++i) {
748 			isl_map_free(graph->edge[i].map);
749 			isl_union_map_free(graph->edge[i].tagged_condition);
750 			isl_union_map_free(graph->edge[i].tagged_validity);
751 		}
752 	free(graph->edge);
753 	free(graph->region);
754 	for (i = 0; i <= isl_edge_last; ++i)
755 		isl_hash_table_free(ctx, graph->edge_table[i]);
756 	isl_hash_table_free(ctx, graph->node_table);
757 	isl_basic_set_free(graph->lp);
758 }
759 
760 /* For each "set" on which this function is called, increment
761  * graph->n by one and update graph->maxvar.
762  */
init_n_maxvar(__isl_take isl_set * set,void * user)763 static isl_stat init_n_maxvar(__isl_take isl_set *set, void *user)
764 {
765 	struct isl_sched_graph *graph = user;
766 	isl_size nvar = isl_set_dim(set, isl_dim_set);
767 
768 	graph->n++;
769 	if (nvar > graph->maxvar)
770 		graph->maxvar = nvar;
771 
772 	isl_set_free(set);
773 
774 	if (nvar < 0)
775 		return isl_stat_error;
776 	return isl_stat_ok;
777 }
778 
779 /* Compute the number of rows that should be allocated for the schedule.
780  * In particular, we need one row for each variable or one row
781  * for each basic map in the dependences.
782  * Note that it is practically impossible to exhaust both
783  * the number of dependences and the number of variables.
784  */
compute_max_row(struct isl_sched_graph * graph,__isl_keep isl_schedule_constraints * sc)785 static isl_stat compute_max_row(struct isl_sched_graph *graph,
786 	__isl_keep isl_schedule_constraints *sc)
787 {
788 	int n_edge;
789 	isl_stat r;
790 	isl_union_set *domain;
791 
792 	graph->n = 0;
793 	graph->maxvar = 0;
794 	domain = isl_schedule_constraints_get_domain(sc);
795 	r = isl_union_set_foreach_set(domain, &init_n_maxvar, graph);
796 	isl_union_set_free(domain);
797 	if (r < 0)
798 		return isl_stat_error;
799 	n_edge = isl_schedule_constraints_n_basic_map(sc);
800 	if (n_edge < 0)
801 		return isl_stat_error;
802 	graph->max_row = n_edge + graph->maxvar;
803 
804 	return isl_stat_ok;
805 }
806 
807 /* Does "bset" have any defining equalities for its set variables?
808  */
has_any_defining_equality(__isl_keep isl_basic_set * bset)809 static isl_bool has_any_defining_equality(__isl_keep isl_basic_set *bset)
810 {
811 	int i;
812 	isl_size n;
813 
814 	n = isl_basic_set_dim(bset, isl_dim_set);
815 	if (n < 0)
816 		return isl_bool_error;
817 
818 	for (i = 0; i < n; ++i) {
819 		isl_bool has;
820 
821 		has = isl_basic_set_has_defining_equality(bset, isl_dim_set, i,
822 							NULL);
823 		if (has < 0 || has)
824 			return has;
825 	}
826 
827 	return isl_bool_false;
828 }
829 
830 /* Set the entries of node->max to the value of the schedule_max_coefficient
831  * option, if set.
832  */
set_max_coefficient(isl_ctx * ctx,struct isl_sched_node * node)833 static isl_stat set_max_coefficient(isl_ctx *ctx, struct isl_sched_node *node)
834 {
835 	int max;
836 
837 	max = isl_options_get_schedule_max_coefficient(ctx);
838 	if (max == -1)
839 		return isl_stat_ok;
840 
841 	node->max = isl_vec_alloc(ctx, node->nvar);
842 	node->max = isl_vec_set_si(node->max, max);
843 	if (!node->max)
844 		return isl_stat_error;
845 
846 	return isl_stat_ok;
847 }
848 
849 /* Set the entries of node->max to the minimum of the schedule_max_coefficient
850  * option (if set) and half of the minimum of the sizes in the other
851  * dimensions.  Round up when computing the half such that
852  * if the minimum of the sizes is one, half of the size is taken to be one
853  * rather than zero.
854  * If the global minimum is unbounded (i.e., if both
855  * the schedule_max_coefficient is not set and the sizes in the other
856  * dimensions are unbounded), then store a negative value.
857  * If the schedule coefficient is close to the size of the instance set
858  * in another dimension, then the schedule may represent a loop
859  * coalescing transformation (especially if the coefficient
860  * in that other dimension is one).  Forcing the coefficient to be
861  * smaller than or equal to half the minimal size should avoid this
862  * situation.
863  */
compute_max_coefficient(isl_ctx * ctx,struct isl_sched_node * node)864 static isl_stat compute_max_coefficient(isl_ctx *ctx,
865 	struct isl_sched_node *node)
866 {
867 	int max;
868 	int i, j;
869 	isl_vec *v;
870 
871 	max = isl_options_get_schedule_max_coefficient(ctx);
872 	v = isl_vec_alloc(ctx, node->nvar);
873 	if (!v)
874 		return isl_stat_error;
875 
876 	for (i = 0; i < node->nvar; ++i) {
877 		isl_int_set_si(v->el[i], max);
878 		isl_int_mul_si(v->el[i], v->el[i], 2);
879 	}
880 
881 	for (i = 0; i < node->nvar; ++i) {
882 		isl_val *size;
883 
884 		size = isl_multi_val_get_val(node->sizes, i);
885 		if (!size)
886 			goto error;
887 		if (!isl_val_is_int(size)) {
888 			isl_val_free(size);
889 			continue;
890 		}
891 		for (j = 0; j < node->nvar; ++j) {
892 			if (j == i)
893 				continue;
894 			if (isl_int_is_neg(v->el[j]) ||
895 			    isl_int_gt(v->el[j], size->n))
896 				isl_int_set(v->el[j], size->n);
897 		}
898 		isl_val_free(size);
899 	}
900 
901 	for (i = 0; i < node->nvar; ++i)
902 		isl_int_cdiv_q_ui(v->el[i], v->el[i], 2);
903 
904 	node->max = v;
905 	return isl_stat_ok;
906 error:
907 	isl_vec_free(v);
908 	return isl_stat_error;
909 }
910 
911 /* Construct an identifier for node "node", which will represent "set".
912  * The name of the identifier is either "compressed" or
913  * "compressed_<name>", with <name> the name of the space of "set".
914  * The user pointer of the identifier points to "node".
915  */
construct_compressed_id(__isl_keep isl_set * set,struct isl_sched_node * node)916 static __isl_give isl_id *construct_compressed_id(__isl_keep isl_set *set,
917 	struct isl_sched_node *node)
918 {
919 	isl_bool has_name;
920 	isl_ctx *ctx;
921 	isl_id *id;
922 	isl_printer *p;
923 	const char *name;
924 	char *id_name;
925 
926 	has_name = isl_set_has_tuple_name(set);
927 	if (has_name < 0)
928 		return NULL;
929 
930 	ctx = isl_set_get_ctx(set);
931 	if (!has_name)
932 		return isl_id_alloc(ctx, "compressed", node);
933 
934 	p = isl_printer_to_str(ctx);
935 	name = isl_set_get_tuple_name(set);
936 	p = isl_printer_print_str(p, "compressed_");
937 	p = isl_printer_print_str(p, name);
938 	id_name = isl_printer_get_str(p);
939 	isl_printer_free(p);
940 
941 	id = isl_id_alloc(ctx, id_name, node);
942 	free(id_name);
943 
944 	return id;
945 }
946 
947 /* Construct a map that isolates the variable in position "pos" in "set".
948  *
949  * That is, construct
950  *
951  *	[i_0, ..., i_pos-1, i_pos+1, ...] -> [i_pos]
952  */
isolate(__isl_take isl_set * set,int pos)953 static __isl_give isl_map *isolate(__isl_take isl_set *set, int pos)
954 {
955 	isl_map *map;
956 
957 	map = isl_set_project_onto_map(set, isl_dim_set, pos, 1);
958 	map = isl_map_project_out(map, isl_dim_in, pos, 1);
959 	return map;
960 }
961 
962 /* Compute and return the size of "set" in dimension "dim".
963  * The size is taken to be the difference in values for that variable
964  * for fixed values of the other variables.
965  * This assumes that "set" is convex.
966  * In particular, the variable is first isolated from the other variables
967  * in the range of a map
968  *
969  *	[i_0, ..., i_dim-1, i_dim+1, ...] -> [i_dim]
970  *
971  * and then duplicated
972  *
973  *	[i_0, ..., i_dim-1, i_dim+1, ...] -> [[i_dim] -> [i_dim']]
974  *
975  * The shared variables are then projected out and the maximal value
976  * of i_dim' - i_dim is computed.
977  */
compute_size(__isl_take isl_set * set,int dim)978 static __isl_give isl_val *compute_size(__isl_take isl_set *set, int dim)
979 {
980 	isl_map *map;
981 	isl_local_space *ls;
982 	isl_aff *obj;
983 	isl_val *v;
984 
985 	map = isolate(set, dim);
986 	map = isl_map_range_product(map, isl_map_copy(map));
987 	map = isl_set_unwrap(isl_map_range(map));
988 	set = isl_map_deltas(map);
989 	ls = isl_local_space_from_space(isl_set_get_space(set));
990 	obj = isl_aff_var_on_domain(ls, isl_dim_set, 0);
991 	v = isl_set_max_val(set, obj);
992 	isl_aff_free(obj);
993 	isl_set_free(set);
994 
995 	return v;
996 }
997 
998 /* Perform a compression on "node" where "hull" represents the constraints
999  * that were used to derive the compression, while "compress" and
1000  * "decompress" map the original space to the compressed space and
1001  * vice versa.
1002  *
1003  * If "node" was not compressed already, then simply store
1004  * the compression information.
1005  * Otherwise the "original" space is actually the result
1006  * of a previous compression, which is then combined
1007  * with the present compression.
1008  *
1009  * The dimensionality of the compressed domain is also adjusted.
1010  * Other information, such as the sizes and the maximal coefficient values,
1011  * has not been computed yet and therefore does not need to be adjusted.
1012  */
compress_node(struct isl_sched_node * node,__isl_take isl_set * hull,__isl_take isl_multi_aff * compress,__isl_take isl_pw_multi_aff * decompress)1013 static isl_stat compress_node(struct isl_sched_node *node,
1014 	__isl_take isl_set *hull, __isl_take isl_multi_aff *compress,
1015 	__isl_take isl_pw_multi_aff *decompress)
1016 {
1017 	node->nvar = isl_multi_aff_dim(compress, isl_dim_out);
1018 	if (!node->compressed) {
1019 		node->compressed = 1;
1020 		node->hull = hull;
1021 		node->compress = compress;
1022 		node->decompress = decompress;
1023 	} else {
1024 		hull = isl_set_preimage_multi_aff(hull,
1025 					    isl_multi_aff_copy(node->compress));
1026 		node->hull = isl_set_intersect(node->hull, hull);
1027 		node->compress = isl_multi_aff_pullback_multi_aff(
1028 						compress, node->compress);
1029 		node->decompress = isl_pw_multi_aff_pullback_pw_multi_aff(
1030 						node->decompress, decompress);
1031 	}
1032 
1033 	if (!node->hull || !node->compress || !node->decompress)
1034 		return isl_stat_error;
1035 
1036 	return isl_stat_ok;
1037 }
1038 
1039 /* Given that dimension "pos" in "set" has a fixed value
1040  * in terms of the other dimensions, (further) compress "node"
1041  * by projecting out this dimension.
1042  * "set" may be the result of a previous compression.
1043  * "uncompressed" is the original domain (without compression).
1044  *
1045  * The compression function simply projects out the dimension.
1046  * The decompression function adds back the dimension
1047  * in the right position as an expression of the other dimensions
1048  * derived from "set".
1049  * As in extract_node, the compressed space has an identifier
1050  * that references "node" such that each compressed space is unique and
1051  * such that the node can be recovered from the compressed space.
1052  *
1053  * The constraint removed through the compression is added to the "hull"
1054  * such that only edges that relate to the original domains
1055  * are taken into account.
1056  * In particular, it is obtained by composing compression and decompression and
1057  * taking the relation among the variables in the range.
1058  */
project_out_fixed(struct isl_sched_node * node,__isl_keep isl_set * uncompressed,__isl_take isl_set * set,int pos)1059 static isl_stat project_out_fixed(struct isl_sched_node *node,
1060 	__isl_keep isl_set *uncompressed, __isl_take isl_set *set, int pos)
1061 {
1062 	isl_id *id;
1063 	isl_space *space;
1064 	isl_set *domain;
1065 	isl_map *map;
1066 	isl_multi_aff *compress;
1067 	isl_pw_multi_aff *decompress, *pma;
1068 	isl_multi_pw_aff *mpa;
1069 	isl_set *hull;
1070 
1071 	map = isolate(isl_set_copy(set), pos);
1072 	pma = isl_pw_multi_aff_from_map(map);
1073 	domain = isl_pw_multi_aff_domain(isl_pw_multi_aff_copy(pma));
1074 	pma = isl_pw_multi_aff_gist(pma, domain);
1075 	space = isl_pw_multi_aff_get_domain_space(pma);
1076 	mpa = isl_multi_pw_aff_identity(isl_space_map_from_set(space));
1077 	mpa = isl_multi_pw_aff_range_splice(mpa, pos,
1078 				    isl_multi_pw_aff_from_pw_multi_aff(pma));
1079 	decompress = isl_pw_multi_aff_from_multi_pw_aff(mpa);
1080 	space = isl_set_get_space(set);
1081 	compress = isl_multi_aff_project_out_map(space, isl_dim_set, pos, 1);
1082 	id = construct_compressed_id(uncompressed, node);
1083 	compress = isl_multi_aff_set_tuple_id(compress, isl_dim_out, id);
1084 	space = isl_space_reverse(isl_multi_aff_get_space(compress));
1085 	decompress = isl_pw_multi_aff_reset_space(decompress, space);
1086 	pma = isl_pw_multi_aff_pullback_multi_aff(
1087 	    isl_pw_multi_aff_copy(decompress), isl_multi_aff_copy(compress));
1088 	hull = isl_map_range(isl_map_from_pw_multi_aff(pma));
1089 
1090 	isl_set_free(set);
1091 
1092 	return compress_node(node, hull, compress, decompress);
1093 }
1094 
1095 /* Compute the size of the compressed domain in each dimension and
1096  * store the results in node->sizes.
1097  * "uncompressed" is the original domain (without compression).
1098  *
1099  * First compress the domain if needed and then compute the size
1100  * in each direction.
1101  * If the domain is not convex, then the sizes are computed
1102  * on a convex superset in order to avoid picking up sizes
1103  * that are valid for the individual disjuncts, but not for
1104  * the domain as a whole.
1105  *
1106  * If any of the sizes turns out to be zero, then this means
1107  * that this dimension has a fixed value in terms of
1108  * the other dimensions.  Perform an (extra) compression
1109  * to remove this dimensions.
1110  */
compute_sizes(struct isl_sched_node * node,__isl_keep isl_set * uncompressed)1111 static isl_stat compute_sizes(struct isl_sched_node *node,
1112 	__isl_keep isl_set *uncompressed)
1113 {
1114 	int j;
1115 	isl_size n;
1116 	isl_multi_val *mv;
1117 	isl_set *set = isl_set_copy(uncompressed);
1118 
1119 	if (node->compressed)
1120 		set = isl_set_preimage_pw_multi_aff(set,
1121 				    isl_pw_multi_aff_copy(node->decompress));
1122 	set = isl_set_from_basic_set(isl_set_simple_hull(set));
1123 	mv = isl_multi_val_zero(isl_set_get_space(set));
1124 	n = isl_set_dim(set, isl_dim_set);
1125 	if (n < 0)
1126 		mv = isl_multi_val_free(mv);
1127 	for (j = 0; j < n; ++j) {
1128 		isl_bool is_zero;
1129 		isl_val *v;
1130 
1131 		v = compute_size(isl_set_copy(set), j);
1132 		is_zero = isl_val_is_zero(v);
1133 		mv = isl_multi_val_set_val(mv, j, v);
1134 		if (is_zero >= 0 && is_zero) {
1135 			isl_multi_val_free(mv);
1136 			if (project_out_fixed(node, uncompressed, set, j) < 0)
1137 				return isl_stat_error;
1138 			return compute_sizes(node, uncompressed);
1139 		}
1140 	}
1141 	node->sizes = mv;
1142 	isl_set_free(set);
1143 	if (!node->sizes)
1144 		return isl_stat_error;
1145 	return isl_stat_ok;
1146 }
1147 
1148 /* Compute the size of the instance set "set" of "node", after compression,
1149  * as well as bounds on the corresponding coefficients, if needed.
1150  *
1151  * The sizes are needed when the schedule_treat_coalescing option is set.
1152  * The bounds are needed when the schedule_treat_coalescing option or
1153  * the schedule_max_coefficient option is set.
1154  *
1155  * If the schedule_treat_coalescing option is not set, then at most
1156  * the bounds need to be set and this is done in set_max_coefficient.
1157  * Otherwise, compute the size of the compressed domain
1158  * in each direction and store the results in node->size.
1159  * Finally, set the bounds on the coefficients based on the sizes
1160  * and the schedule_max_coefficient option in compute_max_coefficient.
1161  */
compute_sizes_and_max(isl_ctx * ctx,struct isl_sched_node * node,__isl_take isl_set * set)1162 static isl_stat compute_sizes_and_max(isl_ctx *ctx, struct isl_sched_node *node,
1163 	__isl_take isl_set *set)
1164 {
1165 	isl_stat r;
1166 
1167 	if (!isl_options_get_schedule_treat_coalescing(ctx)) {
1168 		isl_set_free(set);
1169 		return set_max_coefficient(ctx, node);
1170 	}
1171 
1172 	r = compute_sizes(node, set);
1173 	isl_set_free(set);
1174 	if (r < 0)
1175 		return isl_stat_error;
1176 	return compute_max_coefficient(ctx, node);
1177 }
1178 
1179 /* Add a new node to the graph representing the given instance set.
1180  * "nvar" is the (possibly compressed) number of variables and
1181  * may be smaller than then number of set variables in "set"
1182  * if "compressed" is set.
1183  * If "compressed" is set, then "hull" represents the constraints
1184  * that were used to derive the compression, while "compress" and
1185  * "decompress" map the original space to the compressed space and
1186  * vice versa.
1187  * If "compressed" is not set, then "hull", "compress" and "decompress"
1188  * should be NULL.
1189  *
1190  * Compute the size of the instance set and bounds on the coefficients,
1191  * if needed.
1192  */
add_node(struct isl_sched_graph * graph,__isl_take isl_set * set,int nvar,int compressed,__isl_take isl_set * hull,__isl_take isl_multi_aff * compress,__isl_take isl_pw_multi_aff * decompress)1193 static isl_stat add_node(struct isl_sched_graph *graph,
1194 	__isl_take isl_set *set, int nvar, int compressed,
1195 	__isl_take isl_set *hull, __isl_take isl_multi_aff *compress,
1196 	__isl_take isl_pw_multi_aff *decompress)
1197 {
1198 	isl_size nparam;
1199 	isl_ctx *ctx;
1200 	isl_mat *sched;
1201 	isl_space *space;
1202 	int *coincident;
1203 	struct isl_sched_node *node;
1204 
1205 	nparam = isl_set_dim(set, isl_dim_param);
1206 	if (nparam < 0)
1207 		goto error;
1208 
1209 	ctx = isl_set_get_ctx(set);
1210 	if (!ctx->opt->schedule_parametric)
1211 		nparam = 0;
1212 	sched = isl_mat_alloc(ctx, 0, 1 + nparam + nvar);
1213 	node = &graph->node[graph->n];
1214 	graph->n++;
1215 	space = isl_set_get_space(set);
1216 	node->space = space;
1217 	node->nvar = nvar;
1218 	node->nparam = nparam;
1219 	node->sched = sched;
1220 	node->sched_map = NULL;
1221 	coincident = isl_calloc_array(ctx, int, graph->max_row);
1222 	node->coincident = coincident;
1223 	node->compressed = compressed;
1224 	node->hull = hull;
1225 	node->compress = compress;
1226 	node->decompress = decompress;
1227 	if (compute_sizes_and_max(ctx, node, set) < 0)
1228 		return isl_stat_error;
1229 
1230 	if (!space || !sched || (graph->max_row && !coincident))
1231 		return isl_stat_error;
1232 	if (compressed && (!hull || !compress || !decompress))
1233 		return isl_stat_error;
1234 
1235 	return isl_stat_ok;
1236 error:
1237 	isl_set_free(set);
1238 	isl_set_free(hull);
1239 	isl_multi_aff_free(compress);
1240 	isl_pw_multi_aff_free(decompress);
1241 	return isl_stat_error;
1242 }
1243 
1244 /* Add a new node to the graph representing the given set.
1245  *
1246  * If any of the set variables is defined by an equality, then
1247  * we perform variable compression such that we can perform
1248  * the scheduling on the compressed domain.
1249  * In this case, an identifier is used that references the new node
1250  * such that each compressed space is unique and
1251  * such that the node can be recovered from the compressed space.
1252  */
extract_node(__isl_take isl_set * set,void * user)1253 static isl_stat extract_node(__isl_take isl_set *set, void *user)
1254 {
1255 	isl_size nvar;
1256 	isl_bool has_equality;
1257 	isl_id *id;
1258 	isl_basic_set *hull;
1259 	isl_set *hull_set;
1260 	isl_morph *morph;
1261 	isl_multi_aff *compress, *decompress_ma;
1262 	isl_pw_multi_aff *decompress;
1263 	struct isl_sched_graph *graph = user;
1264 
1265 	hull = isl_set_affine_hull(isl_set_copy(set));
1266 	hull = isl_basic_set_remove_divs(hull);
1267 	nvar = isl_set_dim(set, isl_dim_set);
1268 	has_equality = has_any_defining_equality(hull);
1269 
1270 	if (nvar < 0 || has_equality < 0)
1271 		goto error;
1272 	if (!has_equality) {
1273 		isl_basic_set_free(hull);
1274 		return add_node(graph, set, nvar, 0, NULL, NULL, NULL);
1275 	}
1276 
1277 	id = construct_compressed_id(set, &graph->node[graph->n]);
1278 	morph = isl_basic_set_variable_compression_with_id(hull, id);
1279 	isl_id_free(id);
1280 	nvar = isl_morph_ran_dim(morph, isl_dim_set);
1281 	if (nvar < 0)
1282 		set = isl_set_free(set);
1283 	compress = isl_morph_get_var_multi_aff(morph);
1284 	morph = isl_morph_inverse(morph);
1285 	decompress_ma = isl_morph_get_var_multi_aff(morph);
1286 	decompress = isl_pw_multi_aff_from_multi_aff(decompress_ma);
1287 	isl_morph_free(morph);
1288 
1289 	hull_set = isl_set_from_basic_set(hull);
1290 	return add_node(graph, set, nvar, 1, hull_set, compress, decompress);
1291 error:
1292 	isl_basic_set_free(hull);
1293 	isl_set_free(set);
1294 	return isl_stat_error;
1295 }
1296 
1297 struct isl_extract_edge_data {
1298 	enum isl_edge_type type;
1299 	struct isl_sched_graph *graph;
1300 };
1301 
1302 /* Merge edge2 into edge1, freeing the contents of edge2.
1303  * Return 0 on success and -1 on failure.
1304  *
1305  * edge1 and edge2 are assumed to have the same value for the map field.
1306  */
merge_edge(struct isl_sched_edge * edge1,struct isl_sched_edge * edge2)1307 static int merge_edge(struct isl_sched_edge *edge1,
1308 	struct isl_sched_edge *edge2)
1309 {
1310 	edge1->types |= edge2->types;
1311 	isl_map_free(edge2->map);
1312 
1313 	if (is_condition(edge2)) {
1314 		if (!edge1->tagged_condition)
1315 			edge1->tagged_condition = edge2->tagged_condition;
1316 		else
1317 			edge1->tagged_condition =
1318 				isl_union_map_union(edge1->tagged_condition,
1319 						    edge2->tagged_condition);
1320 	}
1321 
1322 	if (is_conditional_validity(edge2)) {
1323 		if (!edge1->tagged_validity)
1324 			edge1->tagged_validity = edge2->tagged_validity;
1325 		else
1326 			edge1->tagged_validity =
1327 				isl_union_map_union(edge1->tagged_validity,
1328 						    edge2->tagged_validity);
1329 	}
1330 
1331 	if (is_condition(edge2) && !edge1->tagged_condition)
1332 		return -1;
1333 	if (is_conditional_validity(edge2) && !edge1->tagged_validity)
1334 		return -1;
1335 
1336 	return 0;
1337 }
1338 
1339 /* Insert dummy tags in domain and range of "map".
1340  *
1341  * In particular, if "map" is of the form
1342  *
1343  *	A -> B
1344  *
1345  * then return
1346  *
1347  *	[A -> dummy_tag] -> [B -> dummy_tag]
1348  *
1349  * where the dummy_tags are identical and equal to any dummy tags
1350  * introduced by any other call to this function.
1351  */
insert_dummy_tags(__isl_take isl_map * map)1352 static __isl_give isl_map *insert_dummy_tags(__isl_take isl_map *map)
1353 {
1354 	static char dummy;
1355 	isl_ctx *ctx;
1356 	isl_id *id;
1357 	isl_space *space;
1358 	isl_set *domain, *range;
1359 
1360 	ctx = isl_map_get_ctx(map);
1361 
1362 	id = isl_id_alloc(ctx, NULL, &dummy);
1363 	space = isl_space_params(isl_map_get_space(map));
1364 	space = isl_space_set_from_params(space);
1365 	space = isl_space_set_tuple_id(space, isl_dim_set, id);
1366 	space = isl_space_map_from_set(space);
1367 
1368 	domain = isl_map_wrap(map);
1369 	range = isl_map_wrap(isl_map_universe(space));
1370 	map = isl_map_from_domain_and_range(domain, range);
1371 	map = isl_map_zip(map);
1372 
1373 	return map;
1374 }
1375 
1376 /* Given that at least one of "src" or "dst" is compressed, return
1377  * a map between the spaces of these nodes restricted to the affine
1378  * hull that was used in the compression.
1379  */
extract_hull(struct isl_sched_node * src,struct isl_sched_node * dst)1380 static __isl_give isl_map *extract_hull(struct isl_sched_node *src,
1381 	struct isl_sched_node *dst)
1382 {
1383 	isl_set *dom, *ran;
1384 
1385 	if (src->compressed)
1386 		dom = isl_set_copy(src->hull);
1387 	else
1388 		dom = isl_set_universe(isl_space_copy(src->space));
1389 	if (dst->compressed)
1390 		ran = isl_set_copy(dst->hull);
1391 	else
1392 		ran = isl_set_universe(isl_space_copy(dst->space));
1393 
1394 	return isl_map_from_domain_and_range(dom, ran);
1395 }
1396 
1397 /* Intersect the domains of the nested relations in domain and range
1398  * of "tagged" with "map".
1399  */
map_intersect_domains(__isl_take isl_map * tagged,__isl_keep isl_map * map)1400 static __isl_give isl_map *map_intersect_domains(__isl_take isl_map *tagged,
1401 	__isl_keep isl_map *map)
1402 {
1403 	isl_set *set;
1404 
1405 	tagged = isl_map_zip(tagged);
1406 	set = isl_map_wrap(isl_map_copy(map));
1407 	tagged = isl_map_intersect_domain(tagged, set);
1408 	tagged = isl_map_zip(tagged);
1409 	return tagged;
1410 }
1411 
1412 /* Return a pointer to the node that lives in the domain space of "map",
1413  * an invalid node if there is no such node, or NULL in case of error.
1414  */
find_domain_node(isl_ctx * ctx,struct isl_sched_graph * graph,__isl_keep isl_map * map)1415 static struct isl_sched_node *find_domain_node(isl_ctx *ctx,
1416 	struct isl_sched_graph *graph, __isl_keep isl_map *map)
1417 {
1418 	struct isl_sched_node *node;
1419 	isl_space *space;
1420 
1421 	space = isl_space_domain(isl_map_get_space(map));
1422 	node = graph_find_node(ctx, graph, space);
1423 	isl_space_free(space);
1424 
1425 	return node;
1426 }
1427 
1428 /* Return a pointer to the node that lives in the range space of "map",
1429  * an invalid node if there is no such node, or NULL in case of error.
1430  */
find_range_node(isl_ctx * ctx,struct isl_sched_graph * graph,__isl_keep isl_map * map)1431 static struct isl_sched_node *find_range_node(isl_ctx *ctx,
1432 	struct isl_sched_graph *graph, __isl_keep isl_map *map)
1433 {
1434 	struct isl_sched_node *node;
1435 	isl_space *space;
1436 
1437 	space = isl_space_range(isl_map_get_space(map));
1438 	node = graph_find_node(ctx, graph, space);
1439 	isl_space_free(space);
1440 
1441 	return node;
1442 }
1443 
1444 /* Refrain from adding a new edge based on "map".
1445  * Instead, just free the map.
1446  * "tagged" is either a copy of "map" with additional tags or NULL.
1447  */
skip_edge(__isl_take isl_map * map,__isl_take isl_map * tagged)1448 static isl_stat skip_edge(__isl_take isl_map *map, __isl_take isl_map *tagged)
1449 {
1450 	isl_map_free(map);
1451 	isl_map_free(tagged);
1452 
1453 	return isl_stat_ok;
1454 }
1455 
1456 /* Add a new edge to the graph based on the given map
1457  * and add it to data->graph->edge_table[data->type].
1458  * If a dependence relation of a given type happens to be identical
1459  * to one of the dependence relations of a type that was added before,
1460  * then we don't create a new edge, but instead mark the original edge
1461  * as also representing a dependence of the current type.
1462  *
1463  * Edges of type isl_edge_condition or isl_edge_conditional_validity
1464  * may be specified as "tagged" dependence relations.  That is, "map"
1465  * may contain elements (i -> a) -> (j -> b), where i -> j denotes
1466  * the dependence on iterations and a and b are tags.
1467  * edge->map is set to the relation containing the elements i -> j,
1468  * while edge->tagged_condition and edge->tagged_validity contain
1469  * the union of all the "map" relations
1470  * for which extract_edge is called that result in the same edge->map.
1471  *
1472  * If the source or the destination node is compressed, then
1473  * intersect both "map" and "tagged" with the constraints that
1474  * were used to construct the compression.
1475  * This ensures that there are no schedule constraints defined
1476  * outside of these domains, while the scheduler no longer has
1477  * any control over those outside parts.
1478  */
extract_edge(__isl_take isl_map * map,void * user)1479 static isl_stat extract_edge(__isl_take isl_map *map, void *user)
1480 {
1481 	isl_bool empty;
1482 	isl_ctx *ctx = isl_map_get_ctx(map);
1483 	struct isl_extract_edge_data *data = user;
1484 	struct isl_sched_graph *graph = data->graph;
1485 	struct isl_sched_node *src, *dst;
1486 	struct isl_sched_edge *edge;
1487 	isl_map *tagged = NULL;
1488 
1489 	if (data->type == isl_edge_condition ||
1490 	    data->type == isl_edge_conditional_validity) {
1491 		if (isl_map_can_zip(map)) {
1492 			tagged = isl_map_copy(map);
1493 			map = isl_set_unwrap(isl_map_domain(isl_map_zip(map)));
1494 		} else {
1495 			tagged = insert_dummy_tags(isl_map_copy(map));
1496 		}
1497 	}
1498 
1499 	src = find_domain_node(ctx, graph, map);
1500 	dst = find_range_node(ctx, graph, map);
1501 
1502 	if (!src || !dst)
1503 		goto error;
1504 	if (!is_node(graph, src) || !is_node(graph, dst))
1505 		return skip_edge(map, tagged);
1506 
1507 	if (src->compressed || dst->compressed) {
1508 		isl_map *hull;
1509 		hull = extract_hull(src, dst);
1510 		if (tagged)
1511 			tagged = map_intersect_domains(tagged, hull);
1512 		map = isl_map_intersect(map, hull);
1513 	}
1514 
1515 	empty = isl_map_plain_is_empty(map);
1516 	if (empty < 0)
1517 		goto error;
1518 	if (empty)
1519 		return skip_edge(map, tagged);
1520 
1521 	graph->edge[graph->n_edge].src = src;
1522 	graph->edge[graph->n_edge].dst = dst;
1523 	graph->edge[graph->n_edge].map = map;
1524 	graph->edge[graph->n_edge].types = 0;
1525 	graph->edge[graph->n_edge].tagged_condition = NULL;
1526 	graph->edge[graph->n_edge].tagged_validity = NULL;
1527 	set_type(&graph->edge[graph->n_edge], data->type);
1528 	if (data->type == isl_edge_condition)
1529 		graph->edge[graph->n_edge].tagged_condition =
1530 					isl_union_map_from_map(tagged);
1531 	if (data->type == isl_edge_conditional_validity)
1532 		graph->edge[graph->n_edge].tagged_validity =
1533 					isl_union_map_from_map(tagged);
1534 
1535 	edge = graph_find_matching_edge(graph, &graph->edge[graph->n_edge]);
1536 	if (!edge) {
1537 		graph->n_edge++;
1538 		return isl_stat_error;
1539 	}
1540 	if (edge == &graph->edge[graph->n_edge])
1541 		return graph_edge_table_add(ctx, graph, data->type,
1542 				    &graph->edge[graph->n_edge++]);
1543 
1544 	if (merge_edge(edge, &graph->edge[graph->n_edge]) < 0)
1545 		return isl_stat_error;
1546 
1547 	return graph_edge_table_add(ctx, graph, data->type, edge);
1548 error:
1549 	isl_map_free(map);
1550 	isl_map_free(tagged);
1551 	return isl_stat_error;
1552 }
1553 
1554 /* Initialize the schedule graph "graph" from the schedule constraints "sc".
1555  *
1556  * The context is included in the domain before the nodes of
1557  * the graphs are extracted in order to be able to exploit
1558  * any possible additional equalities.
1559  * Note that this intersection is only performed locally here.
1560  */
graph_init(struct isl_sched_graph * graph,__isl_keep isl_schedule_constraints * sc)1561 static isl_stat graph_init(struct isl_sched_graph *graph,
1562 	__isl_keep isl_schedule_constraints *sc)
1563 {
1564 	isl_ctx *ctx;
1565 	isl_union_set *domain;
1566 	isl_union_map *c;
1567 	struct isl_extract_edge_data data;
1568 	enum isl_edge_type i;
1569 	isl_stat r;
1570 	isl_size n;
1571 
1572 	if (!sc)
1573 		return isl_stat_error;
1574 
1575 	ctx = isl_schedule_constraints_get_ctx(sc);
1576 
1577 	domain = isl_schedule_constraints_get_domain(sc);
1578 	n = isl_union_set_n_set(domain);
1579 	graph->n = n;
1580 	isl_union_set_free(domain);
1581 	if (n < 0)
1582 		return isl_stat_error;
1583 
1584 	n = isl_schedule_constraints_n_map(sc);
1585 	if (n < 0 || graph_alloc(ctx, graph, graph->n, n) < 0)
1586 		return isl_stat_error;
1587 
1588 	if (compute_max_row(graph, sc) < 0)
1589 		return isl_stat_error;
1590 	graph->root = graph;
1591 	graph->n = 0;
1592 	domain = isl_schedule_constraints_get_domain(sc);
1593 	domain = isl_union_set_intersect_params(domain,
1594 				    isl_schedule_constraints_get_context(sc));
1595 	r = isl_union_set_foreach_set(domain, &extract_node, graph);
1596 	isl_union_set_free(domain);
1597 	if (r < 0)
1598 		return isl_stat_error;
1599 	if (graph_init_table(ctx, graph) < 0)
1600 		return isl_stat_error;
1601 	for (i = isl_edge_first; i <= isl_edge_last; ++i) {
1602 		isl_size n;
1603 
1604 		c = isl_schedule_constraints_get(sc, i);
1605 		n = isl_union_map_n_map(c);
1606 		graph->max_edge[i] = n;
1607 		isl_union_map_free(c);
1608 		if (n < 0)
1609 			return isl_stat_error;
1610 	}
1611 	if (graph_init_edge_tables(ctx, graph) < 0)
1612 		return isl_stat_error;
1613 	graph->n_edge = 0;
1614 	data.graph = graph;
1615 	for (i = isl_edge_first; i <= isl_edge_last; ++i) {
1616 		isl_stat r;
1617 
1618 		data.type = i;
1619 		c = isl_schedule_constraints_get(sc, i);
1620 		r = isl_union_map_foreach_map(c, &extract_edge, &data);
1621 		isl_union_map_free(c);
1622 		if (r < 0)
1623 			return isl_stat_error;
1624 	}
1625 
1626 	return isl_stat_ok;
1627 }
1628 
1629 /* Check whether there is any dependence from node[j] to node[i]
1630  * or from node[i] to node[j].
1631  */
node_follows_weak(int i,int j,void * user)1632 static isl_bool node_follows_weak(int i, int j, void *user)
1633 {
1634 	isl_bool f;
1635 	struct isl_sched_graph *graph = user;
1636 
1637 	f = graph_has_any_edge(graph, &graph->node[j], &graph->node[i]);
1638 	if (f < 0 || f)
1639 		return f;
1640 	return graph_has_any_edge(graph, &graph->node[i], &graph->node[j]);
1641 }
1642 
1643 /* Check whether there is a (conditional) validity dependence from node[j]
1644  * to node[i], forcing node[i] to follow node[j].
1645  */
node_follows_strong(int i,int j,void * user)1646 static isl_bool node_follows_strong(int i, int j, void *user)
1647 {
1648 	struct isl_sched_graph *graph = user;
1649 
1650 	return graph_has_validity_edge(graph, &graph->node[j], &graph->node[i]);
1651 }
1652 
1653 /* Use Tarjan's algorithm for computing the strongly connected components
1654  * in the dependence graph only considering those edges defined by "follows".
1655  */
detect_ccs(isl_ctx * ctx,struct isl_sched_graph * graph,isl_bool (* follows)(int i,int j,void * user))1656 static isl_stat detect_ccs(isl_ctx *ctx, struct isl_sched_graph *graph,
1657 	isl_bool (*follows)(int i, int j, void *user))
1658 {
1659 	int i, n;
1660 	struct isl_tarjan_graph *g = NULL;
1661 
1662 	g = isl_tarjan_graph_init(ctx, graph->n, follows, graph);
1663 	if (!g)
1664 		return isl_stat_error;
1665 
1666 	graph->scc = 0;
1667 	i = 0;
1668 	n = graph->n;
1669 	while (n) {
1670 		while (g->order[i] != -1) {
1671 			graph->node[g->order[i]].scc = graph->scc;
1672 			--n;
1673 			++i;
1674 		}
1675 		++i;
1676 		graph->scc++;
1677 	}
1678 
1679 	isl_tarjan_graph_free(g);
1680 
1681 	return isl_stat_ok;
1682 }
1683 
1684 /* Apply Tarjan's algorithm to detect the strongly connected components
1685  * in the dependence graph.
1686  * Only consider the (conditional) validity dependences and clear "weak".
1687  */
detect_sccs(isl_ctx * ctx,struct isl_sched_graph * graph)1688 static isl_stat detect_sccs(isl_ctx *ctx, struct isl_sched_graph *graph)
1689 {
1690 	graph->weak = 0;
1691 	return detect_ccs(ctx, graph, &node_follows_strong);
1692 }
1693 
1694 /* Apply Tarjan's algorithm to detect the (weakly) connected components
1695  * in the dependence graph.
1696  * Consider all dependences and set "weak".
1697  */
detect_wccs(isl_ctx * ctx,struct isl_sched_graph * graph)1698 static isl_stat detect_wccs(isl_ctx *ctx, struct isl_sched_graph *graph)
1699 {
1700 	graph->weak = 1;
1701 	return detect_ccs(ctx, graph, &node_follows_weak);
1702 }
1703 
cmp_scc(const void * a,const void * b,void * data)1704 static int cmp_scc(const void *a, const void *b, void *data)
1705 {
1706 	struct isl_sched_graph *graph = data;
1707 	const int *i1 = a;
1708 	const int *i2 = b;
1709 
1710 	return graph->node[*i1].scc - graph->node[*i2].scc;
1711 }
1712 
1713 /* Sort the elements of graph->sorted according to the corresponding SCCs.
1714  */
sort_sccs(struct isl_sched_graph * graph)1715 static int sort_sccs(struct isl_sched_graph *graph)
1716 {
1717 	return isl_sort(graph->sorted, graph->n, sizeof(int), &cmp_scc, graph);
1718 }
1719 
1720 /* Return a non-parametric set in the compressed space of "node" that is
1721  * bounded by the size in each direction
1722  *
1723  *	{ [x] : -S_i <= x_i <= S_i }
1724  *
1725  * If S_i is infinity in direction i, then there are no constraints
1726  * in that direction.
1727  *
1728  * Cache the result in node->bounds.
1729  */
get_size_bounds(struct isl_sched_node * node)1730 static __isl_give isl_basic_set *get_size_bounds(struct isl_sched_node *node)
1731 {
1732 	isl_space *space;
1733 	isl_basic_set *bounds;
1734 	int i;
1735 
1736 	if (node->bounds)
1737 		return isl_basic_set_copy(node->bounds);
1738 
1739 	if (node->compressed)
1740 		space = isl_pw_multi_aff_get_domain_space(node->decompress);
1741 	else
1742 		space = isl_space_copy(node->space);
1743 	space = isl_space_drop_all_params(space);
1744 	bounds = isl_basic_set_universe(space);
1745 
1746 	for (i = 0; i < node->nvar; ++i) {
1747 		isl_val *size;
1748 
1749 		size = isl_multi_val_get_val(node->sizes, i);
1750 		if (!size)
1751 			return isl_basic_set_free(bounds);
1752 		if (!isl_val_is_int(size)) {
1753 			isl_val_free(size);
1754 			continue;
1755 		}
1756 		bounds = isl_basic_set_upper_bound_val(bounds, isl_dim_set, i,
1757 							isl_val_copy(size));
1758 		bounds = isl_basic_set_lower_bound_val(bounds, isl_dim_set, i,
1759 							isl_val_neg(size));
1760 	}
1761 
1762 	node->bounds = isl_basic_set_copy(bounds);
1763 	return bounds;
1764 }
1765 
1766 /* Compress the dependence relation "map", if needed, i.e.,
1767  * when the source node "src" and/or the destination node "dst"
1768  * has been compressed.
1769  */
compress(__isl_take isl_map * map,struct isl_sched_node * src,struct isl_sched_node * dst)1770 static __isl_give isl_map *compress(__isl_take isl_map *map,
1771 	struct isl_sched_node *src, struct isl_sched_node *dst)
1772 {
1773 	if (src->compressed)
1774 		map = isl_map_preimage_domain_pw_multi_aff(map,
1775 					isl_pw_multi_aff_copy(src->decompress));
1776 	if (dst->compressed)
1777 		map = isl_map_preimage_range_pw_multi_aff(map,
1778 					isl_pw_multi_aff_copy(dst->decompress));
1779 	return map;
1780 }
1781 
1782 /* Drop some constraints from "delta" that could be exploited
1783  * to construct loop coalescing schedules.
1784  * In particular, drop those constraint that bound the difference
1785  * to the size of the domain.
1786  * First project out the parameters to improve the effectiveness.
1787  */
drop_coalescing_constraints(__isl_take isl_set * delta,struct isl_sched_node * node)1788 static __isl_give isl_set *drop_coalescing_constraints(
1789 	__isl_take isl_set *delta, struct isl_sched_node *node)
1790 {
1791 	isl_size nparam;
1792 	isl_basic_set *bounds;
1793 
1794 	nparam = isl_set_dim(delta, isl_dim_param);
1795 	if (nparam < 0)
1796 		return isl_set_free(delta);
1797 
1798 	bounds = get_size_bounds(node);
1799 
1800 	delta = isl_set_project_out(delta, isl_dim_param, 0, nparam);
1801 	delta = isl_set_remove_divs(delta);
1802 	delta = isl_set_plain_gist_basic_set(delta, bounds);
1803 	return delta;
1804 }
1805 
1806 /* Given a dependence relation R from "node" to itself,
1807  * construct the set of coefficients of valid constraints for elements
1808  * in that dependence relation.
1809  * In particular, the result contains tuples of coefficients
1810  * c_0, c_n, c_x such that
1811  *
1812  *	c_0 + c_n n + c_x y - c_x x >= 0 for each (x,y) in R
1813  *
1814  * or, equivalently,
1815  *
1816  *	c_0 + c_n n + c_x d >= 0 for each d in delta R = { y - x | (x,y) in R }
1817  *
1818  * We choose here to compute the dual of delta R.
1819  * Alternatively, we could have computed the dual of R, resulting
1820  * in a set of tuples c_0, c_n, c_x, c_y, and then
1821  * plugged in (c_0, c_n, c_x, -c_x).
1822  *
1823  * If "need_param" is set, then the resulting coefficients effectively
1824  * include coefficients for the parameters c_n.  Otherwise, they may
1825  * have been projected out already.
1826  * Since the constraints may be different for these two cases,
1827  * they are stored in separate caches.
1828  * In particular, if no parameter coefficients are required and
1829  * the schedule_treat_coalescing option is set, then the parameters
1830  * are projected out and some constraints that could be exploited
1831  * to construct coalescing schedules are removed before the dual
1832  * is computed.
1833  *
1834  * If "node" has been compressed, then the dependence relation
1835  * is also compressed before the set of coefficients is computed.
1836  */
intra_coefficients(struct isl_sched_graph * graph,struct isl_sched_node * node,__isl_take isl_map * map,int need_param)1837 static __isl_give isl_basic_set *intra_coefficients(
1838 	struct isl_sched_graph *graph, struct isl_sched_node *node,
1839 	__isl_take isl_map *map, int need_param)
1840 {
1841 	isl_ctx *ctx;
1842 	isl_set *delta;
1843 	isl_map *key;
1844 	isl_basic_set *coef;
1845 	isl_maybe_isl_basic_set m;
1846 	isl_map_to_basic_set **hmap = &graph->intra_hmap;
1847 	int treat;
1848 
1849 	if (!map)
1850 		return NULL;
1851 
1852 	ctx = isl_map_get_ctx(map);
1853 	treat = !need_param && isl_options_get_schedule_treat_coalescing(ctx);
1854 	if (!treat)
1855 		hmap = &graph->intra_hmap_param;
1856 	m = isl_map_to_basic_set_try_get(*hmap, map);
1857 	if (m.valid < 0 || m.valid) {
1858 		isl_map_free(map);
1859 		return m.value;
1860 	}
1861 
1862 	key = isl_map_copy(map);
1863 	map = compress(map, node, node);
1864 	delta = isl_map_deltas(map);
1865 	if (treat)
1866 		delta = drop_coalescing_constraints(delta, node);
1867 	delta = isl_set_remove_divs(delta);
1868 	coef = isl_set_coefficients(delta);
1869 	*hmap = isl_map_to_basic_set_set(*hmap, key, isl_basic_set_copy(coef));
1870 
1871 	return coef;
1872 }
1873 
1874 /* Given a dependence relation R, construct the set of coefficients
1875  * of valid constraints for elements in that dependence relation.
1876  * In particular, the result contains tuples of coefficients
1877  * c_0, c_n, c_x, c_y such that
1878  *
1879  *	c_0 + c_n n + c_x x + c_y y >= 0 for each (x,y) in R
1880  *
1881  * If the source or destination nodes of "edge" have been compressed,
1882  * then the dependence relation is also compressed before
1883  * the set of coefficients is computed.
1884  */
inter_coefficients(struct isl_sched_graph * graph,struct isl_sched_edge * edge,__isl_take isl_map * map)1885 static __isl_give isl_basic_set *inter_coefficients(
1886 	struct isl_sched_graph *graph, struct isl_sched_edge *edge,
1887 	__isl_take isl_map *map)
1888 {
1889 	isl_set *set;
1890 	isl_map *key;
1891 	isl_basic_set *coef;
1892 	isl_maybe_isl_basic_set m;
1893 
1894 	m = isl_map_to_basic_set_try_get(graph->inter_hmap, map);
1895 	if (m.valid < 0 || m.valid) {
1896 		isl_map_free(map);
1897 		return m.value;
1898 	}
1899 
1900 	key = isl_map_copy(map);
1901 	map = compress(map, edge->src, edge->dst);
1902 	set = isl_map_wrap(isl_map_remove_divs(map));
1903 	coef = isl_set_coefficients(set);
1904 	graph->inter_hmap = isl_map_to_basic_set_set(graph->inter_hmap, key,
1905 					isl_basic_set_copy(coef));
1906 
1907 	return coef;
1908 }
1909 
1910 /* Return the position of the coefficients of the variables in
1911  * the coefficients constraints "coef".
1912  *
1913  * The space of "coef" is of the form
1914  *
1915  *	{ coefficients[[cst, params] -> S] }
1916  *
1917  * Return the position of S.
1918  */
coef_var_offset(__isl_keep isl_basic_set * coef)1919 static isl_size coef_var_offset(__isl_keep isl_basic_set *coef)
1920 {
1921 	isl_size offset;
1922 	isl_space *space;
1923 
1924 	space = isl_space_unwrap(isl_basic_set_get_space(coef));
1925 	offset = isl_space_dim(space, isl_dim_in);
1926 	isl_space_free(space);
1927 
1928 	return offset;
1929 }
1930 
1931 /* Return the offset of the coefficient of the constant term of "node"
1932  * within the (I)LP.
1933  *
1934  * Within each node, the coefficients have the following order:
1935  *	- positive and negative parts of c_i_x
1936  *	- c_i_n (if parametric)
1937  *	- c_i_0
1938  */
node_cst_coef_offset(struct isl_sched_node * node)1939 static int node_cst_coef_offset(struct isl_sched_node *node)
1940 {
1941 	return node->start + 2 * node->nvar + node->nparam;
1942 }
1943 
1944 /* Return the offset of the coefficients of the parameters of "node"
1945  * within the (I)LP.
1946  *
1947  * Within each node, the coefficients have the following order:
1948  *	- positive and negative parts of c_i_x
1949  *	- c_i_n (if parametric)
1950  *	- c_i_0
1951  */
node_par_coef_offset(struct isl_sched_node * node)1952 static int node_par_coef_offset(struct isl_sched_node *node)
1953 {
1954 	return node->start + 2 * node->nvar;
1955 }
1956 
1957 /* Return the offset of the coefficients of the variables of "node"
1958  * within the (I)LP.
1959  *
1960  * Within each node, the coefficients have the following order:
1961  *	- positive and negative parts of c_i_x
1962  *	- c_i_n (if parametric)
1963  *	- c_i_0
1964  */
node_var_coef_offset(struct isl_sched_node * node)1965 static int node_var_coef_offset(struct isl_sched_node *node)
1966 {
1967 	return node->start;
1968 }
1969 
1970 /* Return the position of the pair of variables encoding
1971  * coefficient "i" of "node".
1972  *
1973  * The order of these variable pairs is the opposite of
1974  * that of the coefficients, with 2 variables per coefficient.
1975  */
node_var_coef_pos(struct isl_sched_node * node,int i)1976 static int node_var_coef_pos(struct isl_sched_node *node, int i)
1977 {
1978 	return node_var_coef_offset(node) + 2 * (node->nvar - 1 - i);
1979 }
1980 
1981 /* Construct an isl_dim_map for mapping constraints on coefficients
1982  * for "node" to the corresponding positions in graph->lp.
1983  * "offset" is the offset of the coefficients for the variables
1984  * in the input constraints.
1985  * "s" is the sign of the mapping.
1986  *
1987  * The input constraints are given in terms of the coefficients
1988  * (c_0, c_x) or (c_0, c_n, c_x).
1989  * The mapping produced by this function essentially plugs in
1990  * (0, c_i_x^+ - c_i_x^-) if s = 1 and
1991  * (0, -c_i_x^+ + c_i_x^-) if s = -1 or
1992  * (0, 0, c_i_x^+ - c_i_x^-) if s = 1 and
1993  * (0, 0, -c_i_x^+ + c_i_x^-) if s = -1.
1994  * In graph->lp, the c_i_x^- appear before their c_i_x^+ counterpart.
1995  * Furthermore, the order of these pairs is the opposite of that
1996  * of the corresponding coefficients.
1997  *
1998  * The caller can extend the mapping to also map the other coefficients
1999  * (and therefore not plug in 0).
2000  */
intra_dim_map(isl_ctx * ctx,struct isl_sched_graph * graph,struct isl_sched_node * node,int offset,int s)2001 static __isl_give isl_dim_map *intra_dim_map(isl_ctx *ctx,
2002 	struct isl_sched_graph *graph, struct isl_sched_node *node,
2003 	int offset, int s)
2004 {
2005 	int pos;
2006 	isl_size total;
2007 	isl_dim_map *dim_map;
2008 
2009 	total = isl_basic_set_dim(graph->lp, isl_dim_all);
2010 	if (!node || total < 0)
2011 		return NULL;
2012 
2013 	pos = node_var_coef_pos(node, 0);
2014 	dim_map = isl_dim_map_alloc(ctx, total);
2015 	isl_dim_map_range(dim_map, pos, -2, offset, 1, node->nvar, -s);
2016 	isl_dim_map_range(dim_map, pos + 1, -2, offset, 1, node->nvar, s);
2017 
2018 	return dim_map;
2019 }
2020 
2021 /* Construct an isl_dim_map for mapping constraints on coefficients
2022  * for "src" (node i) and "dst" (node j) to the corresponding positions
2023  * in graph->lp.
2024  * "offset" is the offset of the coefficients for the variables of "src"
2025  * in the input constraints.
2026  * "s" is the sign of the mapping.
2027  *
2028  * The input constraints are given in terms of the coefficients
2029  * (c_0, c_n, c_x, c_y).
2030  * The mapping produced by this function essentially plugs in
2031  * (c_j_0 - c_i_0, c_j_n - c_i_n,
2032  *  -(c_i_x^+ - c_i_x^-), c_j_x^+ - c_j_x^-) if s = 1 and
2033  * (-c_j_0 + c_i_0, -c_j_n + c_i_n,
2034  *  c_i_x^+ - c_i_x^-, -(c_j_x^+ - c_j_x^-)) if s = -1.
2035  * In graph->lp, the c_*^- appear before their c_*^+ counterpart.
2036  * Furthermore, the order of these pairs is the opposite of that
2037  * of the corresponding coefficients.
2038  *
2039  * The caller can further extend the mapping.
2040  */
inter_dim_map(isl_ctx * ctx,struct isl_sched_graph * graph,struct isl_sched_node * src,struct isl_sched_node * dst,int offset,int s)2041 static __isl_give isl_dim_map *inter_dim_map(isl_ctx *ctx,
2042 	struct isl_sched_graph *graph, struct isl_sched_node *src,
2043 	struct isl_sched_node *dst, int offset, int s)
2044 {
2045 	int pos;
2046 	isl_size total;
2047 	isl_dim_map *dim_map;
2048 
2049 	total = isl_basic_set_dim(graph->lp, isl_dim_all);
2050 	if (!src || !dst || total < 0)
2051 		return NULL;
2052 
2053 	dim_map = isl_dim_map_alloc(ctx, total);
2054 
2055 	pos = node_cst_coef_offset(dst);
2056 	isl_dim_map_range(dim_map, pos, 0, 0, 0, 1, s);
2057 	pos = node_par_coef_offset(dst);
2058 	isl_dim_map_range(dim_map, pos, 1, 1, 1, dst->nparam, s);
2059 	pos = node_var_coef_pos(dst, 0);
2060 	isl_dim_map_range(dim_map, pos, -2, offset + src->nvar, 1,
2061 			  dst->nvar, -s);
2062 	isl_dim_map_range(dim_map, pos + 1, -2, offset + src->nvar, 1,
2063 			  dst->nvar, s);
2064 
2065 	pos = node_cst_coef_offset(src);
2066 	isl_dim_map_range(dim_map, pos, 0, 0, 0, 1, -s);
2067 	pos = node_par_coef_offset(src);
2068 	isl_dim_map_range(dim_map, pos, 1, 1, 1, src->nparam, -s);
2069 	pos = node_var_coef_pos(src, 0);
2070 	isl_dim_map_range(dim_map, pos, -2, offset, 1, src->nvar, s);
2071 	isl_dim_map_range(dim_map, pos + 1, -2, offset, 1, src->nvar, -s);
2072 
2073 	return dim_map;
2074 }
2075 
2076 /* Add the constraints from "src" to "dst" using "dim_map",
2077  * after making sure there is enough room in "dst" for the extra constraints.
2078  */
add_constraints_dim_map(__isl_take isl_basic_set * dst,__isl_take isl_basic_set * src,__isl_take isl_dim_map * dim_map)2079 static __isl_give isl_basic_set *add_constraints_dim_map(
2080 	__isl_take isl_basic_set *dst, __isl_take isl_basic_set *src,
2081 	__isl_take isl_dim_map *dim_map)
2082 {
2083 	isl_size n_eq, n_ineq;
2084 
2085 	n_eq = isl_basic_set_n_equality(src);
2086 	n_ineq = isl_basic_set_n_inequality(src);
2087 	if (n_eq < 0 || n_ineq < 0)
2088 		dst = isl_basic_set_free(dst);
2089 	dst = isl_basic_set_extend_constraints(dst, n_eq, n_ineq);
2090 	dst = isl_basic_set_add_constraints_dim_map(dst, src, dim_map);
2091 	return dst;
2092 }
2093 
2094 /* Add constraints to graph->lp that force validity for the given
2095  * dependence from a node i to itself.
2096  * That is, add constraints that enforce
2097  *
2098  *	(c_i_0 + c_i_n n + c_i_x y) - (c_i_0 + c_i_n n + c_i_x x)
2099  *	= c_i_x (y - x) >= 0
2100  *
2101  * for each (x,y) in R.
2102  * We obtain general constraints on coefficients (c_0, c_x)
2103  * of valid constraints for (y - x) and then plug in (0, c_i_x^+ - c_i_x^-),
2104  * where c_i_x = c_i_x^+ - c_i_x^-, with c_i_x^+ and c_i_x^- non-negative.
2105  * In graph->lp, the c_i_x^- appear before their c_i_x^+ counterpart.
2106  * Note that the result of intra_coefficients may also contain
2107  * parameter coefficients c_n, in which case 0 is plugged in for them as well.
2108  */
add_intra_validity_constraints(struct isl_sched_graph * graph,struct isl_sched_edge * edge)2109 static isl_stat add_intra_validity_constraints(struct isl_sched_graph *graph,
2110 	struct isl_sched_edge *edge)
2111 {
2112 	isl_size offset;
2113 	isl_map *map = isl_map_copy(edge->map);
2114 	isl_ctx *ctx = isl_map_get_ctx(map);
2115 	isl_dim_map *dim_map;
2116 	isl_basic_set *coef;
2117 	struct isl_sched_node *node = edge->src;
2118 
2119 	coef = intra_coefficients(graph, node, map, 0);
2120 
2121 	offset = coef_var_offset(coef);
2122 	if (offset < 0)
2123 		coef = isl_basic_set_free(coef);
2124 	if (!coef)
2125 		return isl_stat_error;
2126 
2127 	dim_map = intra_dim_map(ctx, graph, node, offset, 1);
2128 	graph->lp = add_constraints_dim_map(graph->lp, coef, dim_map);
2129 
2130 	return isl_stat_ok;
2131 }
2132 
2133 /* Add constraints to graph->lp that force validity for the given
2134  * dependence from node i to node j.
2135  * That is, add constraints that enforce
2136  *
2137  *	(c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x) >= 0
2138  *
2139  * for each (x,y) in R.
2140  * We obtain general constraints on coefficients (c_0, c_n, c_x, c_y)
2141  * of valid constraints for R and then plug in
2142  * (c_j_0 - c_i_0, c_j_n - c_i_n, -(c_i_x^+ - c_i_x^-), c_j_x^+ - c_j_x^-),
2143  * where c_* = c_*^+ - c_*^-, with c_*^+ and c_*^- non-negative.
2144  * In graph->lp, the c_*^- appear before their c_*^+ counterpart.
2145  */
add_inter_validity_constraints(struct isl_sched_graph * graph,struct isl_sched_edge * edge)2146 static isl_stat add_inter_validity_constraints(struct isl_sched_graph *graph,
2147 	struct isl_sched_edge *edge)
2148 {
2149 	isl_size offset;
2150 	isl_map *map;
2151 	isl_ctx *ctx;
2152 	isl_dim_map *dim_map;
2153 	isl_basic_set *coef;
2154 	struct isl_sched_node *src = edge->src;
2155 	struct isl_sched_node *dst = edge->dst;
2156 
2157 	if (!graph->lp)
2158 		return isl_stat_error;
2159 
2160 	map = isl_map_copy(edge->map);
2161 	ctx = isl_map_get_ctx(map);
2162 	coef = inter_coefficients(graph, edge, map);
2163 
2164 	offset = coef_var_offset(coef);
2165 	if (offset < 0)
2166 		coef = isl_basic_set_free(coef);
2167 	if (!coef)
2168 		return isl_stat_error;
2169 
2170 	dim_map = inter_dim_map(ctx, graph, src, dst, offset, 1);
2171 
2172 	edge->start = graph->lp->n_ineq;
2173 	graph->lp = add_constraints_dim_map(graph->lp, coef, dim_map);
2174 	if (!graph->lp)
2175 		return isl_stat_error;
2176 	edge->end = graph->lp->n_ineq;
2177 
2178 	return isl_stat_ok;
2179 }
2180 
2181 /* Add constraints to graph->lp that bound the dependence distance for the given
2182  * dependence from a node i to itself.
2183  * If s = 1, we add the constraint
2184  *
2185  *	c_i_x (y - x) <= m_0 + m_n n
2186  *
2187  * or
2188  *
2189  *	-c_i_x (y - x) + m_0 + m_n n >= 0
2190  *
2191  * for each (x,y) in R.
2192  * If s = -1, we add the constraint
2193  *
2194  *	-c_i_x (y - x) <= m_0 + m_n n
2195  *
2196  * or
2197  *
2198  *	c_i_x (y - x) + m_0 + m_n n >= 0
2199  *
2200  * for each (x,y) in R.
2201  * We obtain general constraints on coefficients (c_0, c_n, c_x)
2202  * of valid constraints for (y - x) and then plug in (m_0, m_n, -s * c_i_x),
2203  * with each coefficient (except m_0) represented as a pair of non-negative
2204  * coefficients.
2205  *
2206  *
2207  * If "local" is set, then we add constraints
2208  *
2209  *	c_i_x (y - x) <= 0
2210  *
2211  * or
2212  *
2213  *	-c_i_x (y - x) <= 0
2214  *
2215  * instead, forcing the dependence distance to be (less than or) equal to 0.
2216  * That is, we plug in (0, 0, -s * c_i_x),
2217  * intra_coefficients is not required to have c_n in its result when
2218  * "local" is set.  If they are missing, then (0, -s * c_i_x) is plugged in.
2219  * Note that dependences marked local are treated as validity constraints
2220  * by add_all_validity_constraints and therefore also have
2221  * their distances bounded by 0 from below.
2222  */
add_intra_proximity_constraints(struct isl_sched_graph * graph,struct isl_sched_edge * edge,int s,int local)2223 static isl_stat add_intra_proximity_constraints(struct isl_sched_graph *graph,
2224 	struct isl_sched_edge *edge, int s, int local)
2225 {
2226 	isl_size offset;
2227 	isl_size nparam;
2228 	isl_map *map = isl_map_copy(edge->map);
2229 	isl_ctx *ctx = isl_map_get_ctx(map);
2230 	isl_dim_map *dim_map;
2231 	isl_basic_set *coef;
2232 	struct isl_sched_node *node = edge->src;
2233 
2234 	coef = intra_coefficients(graph, node, map, !local);
2235 	nparam = isl_space_dim(node->space, isl_dim_param);
2236 
2237 	offset = coef_var_offset(coef);
2238 	if (nparam < 0 || offset < 0)
2239 		coef = isl_basic_set_free(coef);
2240 	if (!coef)
2241 		return isl_stat_error;
2242 
2243 	dim_map = intra_dim_map(ctx, graph, node, offset, -s);
2244 
2245 	if (!local) {
2246 		isl_dim_map_range(dim_map, 1, 0, 0, 0, 1, 1);
2247 		isl_dim_map_range(dim_map, 4, 2, 1, 1, nparam, -1);
2248 		isl_dim_map_range(dim_map, 5, 2, 1, 1, nparam, 1);
2249 	}
2250 	graph->lp = add_constraints_dim_map(graph->lp, coef, dim_map);
2251 
2252 	return isl_stat_ok;
2253 }
2254 
2255 /* Add constraints to graph->lp that bound the dependence distance for the given
2256  * dependence from node i to node j.
2257  * If s = 1, we add the constraint
2258  *
2259  *	(c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x)
2260  *		<= m_0 + m_n n
2261  *
2262  * or
2263  *
2264  *	-(c_j_0 + c_j_n n + c_j_x y) + (c_i_0 + c_i_n n + c_i_x x) +
2265  *		m_0 + m_n n >= 0
2266  *
2267  * for each (x,y) in R.
2268  * If s = -1, we add the constraint
2269  *
2270  *	-((c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x))
2271  *		<= m_0 + m_n n
2272  *
2273  * or
2274  *
2275  *	(c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x) +
2276  *		m_0 + m_n n >= 0
2277  *
2278  * for each (x,y) in R.
2279  * We obtain general constraints on coefficients (c_0, c_n, c_x, c_y)
2280  * of valid constraints for R and then plug in
2281  * (m_0 - s*c_j_0 + s*c_i_0, m_n - s*c_j_n + s*c_i_n,
2282  *  s*c_i_x, -s*c_j_x)
2283  * with each coefficient (except m_0, c_*_0 and c_*_n)
2284  * represented as a pair of non-negative coefficients.
2285  *
2286  *
2287  * If "local" is set (and s = 1), then we add constraints
2288  *
2289  *	(c_j_0 + c_j_n n + c_j_x y) - (c_i_0 + c_i_n n + c_i_x x) <= 0
2290  *
2291  * or
2292  *
2293  *	-((c_j_0 + c_j_n n + c_j_x y) + (c_i_0 + c_i_n n + c_i_x x)) >= 0
2294  *
2295  * instead, forcing the dependence distance to be (less than or) equal to 0.
2296  * That is, we plug in
2297  * (-s*c_j_0 + s*c_i_0, -s*c_j_n + s*c_i_n, s*c_i_x, -s*c_j_x).
2298  * Note that dependences marked local are treated as validity constraints
2299  * by add_all_validity_constraints and therefore also have
2300  * their distances bounded by 0 from below.
2301  */
add_inter_proximity_constraints(struct isl_sched_graph * graph,struct isl_sched_edge * edge,int s,int local)2302 static isl_stat add_inter_proximity_constraints(struct isl_sched_graph *graph,
2303 	struct isl_sched_edge *edge, int s, int local)
2304 {
2305 	isl_size offset;
2306 	isl_size nparam;
2307 	isl_map *map = isl_map_copy(edge->map);
2308 	isl_ctx *ctx = isl_map_get_ctx(map);
2309 	isl_dim_map *dim_map;
2310 	isl_basic_set *coef;
2311 	struct isl_sched_node *src = edge->src;
2312 	struct isl_sched_node *dst = edge->dst;
2313 
2314 	coef = inter_coefficients(graph, edge, map);
2315 	nparam = isl_space_dim(src->space, isl_dim_param);
2316 
2317 	offset = coef_var_offset(coef);
2318 	if (nparam < 0 || offset < 0)
2319 		coef = isl_basic_set_free(coef);
2320 	if (!coef)
2321 		return isl_stat_error;
2322 
2323 	dim_map = inter_dim_map(ctx, graph, src, dst, offset, -s);
2324 
2325 	if (!local) {
2326 		isl_dim_map_range(dim_map, 1, 0, 0, 0, 1, 1);
2327 		isl_dim_map_range(dim_map, 4, 2, 1, 1, nparam, -1);
2328 		isl_dim_map_range(dim_map, 5, 2, 1, 1, nparam, 1);
2329 	}
2330 
2331 	graph->lp = add_constraints_dim_map(graph->lp, coef, dim_map);
2332 
2333 	return isl_stat_ok;
2334 }
2335 
2336 /* Should the distance over "edge" be forced to zero?
2337  * That is, is it marked as a local edge?
2338  * If "use_coincidence" is set, then coincidence edges are treated
2339  * as local edges.
2340  */
force_zero(struct isl_sched_edge * edge,int use_coincidence)2341 static int force_zero(struct isl_sched_edge *edge, int use_coincidence)
2342 {
2343 	return is_local(edge) || (use_coincidence && is_coincidence(edge));
2344 }
2345 
2346 /* Add all validity constraints to graph->lp.
2347  *
2348  * An edge that is forced to be local needs to have its dependence
2349  * distances equal to zero.  We take care of bounding them by 0 from below
2350  * here.  add_all_proximity_constraints takes care of bounding them by 0
2351  * from above.
2352  *
2353  * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2354  * Otherwise, we ignore them.
2355  */
add_all_validity_constraints(struct isl_sched_graph * graph,int use_coincidence)2356 static int add_all_validity_constraints(struct isl_sched_graph *graph,
2357 	int use_coincidence)
2358 {
2359 	int i;
2360 
2361 	for (i = 0; i < graph->n_edge; ++i) {
2362 		struct isl_sched_edge *edge = &graph->edge[i];
2363 		int zero;
2364 
2365 		zero = force_zero(edge, use_coincidence);
2366 		if (!is_validity(edge) && !zero)
2367 			continue;
2368 		if (edge->src != edge->dst)
2369 			continue;
2370 		if (add_intra_validity_constraints(graph, edge) < 0)
2371 			return -1;
2372 	}
2373 
2374 	for (i = 0; i < graph->n_edge; ++i) {
2375 		struct isl_sched_edge *edge = &graph->edge[i];
2376 		int zero;
2377 
2378 		zero = force_zero(edge, use_coincidence);
2379 		if (!is_validity(edge) && !zero)
2380 			continue;
2381 		if (edge->src == edge->dst)
2382 			continue;
2383 		if (add_inter_validity_constraints(graph, edge) < 0)
2384 			return -1;
2385 	}
2386 
2387 	return 0;
2388 }
2389 
2390 /* Add constraints to graph->lp that bound the dependence distance
2391  * for all dependence relations.
2392  * If a given proximity dependence is identical to a validity
2393  * dependence, then the dependence distance is already bounded
2394  * from below (by zero), so we only need to bound the distance
2395  * from above.  (This includes the case of "local" dependences
2396  * which are treated as validity dependence by add_all_validity_constraints.)
2397  * Otherwise, we need to bound the distance both from above and from below.
2398  *
2399  * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2400  * Otherwise, we ignore them.
2401  */
add_all_proximity_constraints(struct isl_sched_graph * graph,int use_coincidence)2402 static int add_all_proximity_constraints(struct isl_sched_graph *graph,
2403 	int use_coincidence)
2404 {
2405 	int i;
2406 
2407 	for (i = 0; i < graph->n_edge; ++i) {
2408 		struct isl_sched_edge *edge = &graph->edge[i];
2409 		int zero;
2410 
2411 		zero = force_zero(edge, use_coincidence);
2412 		if (!is_proximity(edge) && !zero)
2413 			continue;
2414 		if (edge->src == edge->dst &&
2415 		    add_intra_proximity_constraints(graph, edge, 1, zero) < 0)
2416 			return -1;
2417 		if (edge->src != edge->dst &&
2418 		    add_inter_proximity_constraints(graph, edge, 1, zero) < 0)
2419 			return -1;
2420 		if (is_validity(edge) || zero)
2421 			continue;
2422 		if (edge->src == edge->dst &&
2423 		    add_intra_proximity_constraints(graph, edge, -1, 0) < 0)
2424 			return -1;
2425 		if (edge->src != edge->dst &&
2426 		    add_inter_proximity_constraints(graph, edge, -1, 0) < 0)
2427 			return -1;
2428 	}
2429 
2430 	return 0;
2431 }
2432 
2433 /* Normalize the rows of "indep" such that all rows are lexicographically
2434  * positive and such that each row contains as many final zeros as possible,
2435  * given the choice for the previous rows.
2436  * Do this by performing elementary row operations.
2437  */
normalize_independent(__isl_take isl_mat * indep)2438 static __isl_give isl_mat *normalize_independent(__isl_take isl_mat *indep)
2439 {
2440 	indep = isl_mat_reverse_gauss(indep);
2441 	indep = isl_mat_lexnonneg_rows(indep);
2442 	return indep;
2443 }
2444 
2445 /* Extract the linear part of the current schedule for node "node".
2446  */
extract_linear_schedule(struct isl_sched_node * node)2447 static __isl_give isl_mat *extract_linear_schedule(struct isl_sched_node *node)
2448 {
2449 	isl_size n_row = isl_mat_rows(node->sched);
2450 
2451 	if (n_row < 0)
2452 		return NULL;
2453 	return isl_mat_sub_alloc(node->sched, 0, n_row,
2454 			      1 + node->nparam, node->nvar);
2455 }
2456 
2457 /* Compute a basis for the rows in the linear part of the schedule
2458  * and extend this basis to a full basis.  The remaining rows
2459  * can then be used to force linear independence from the rows
2460  * in the schedule.
2461  *
2462  * In particular, given the schedule rows S, we compute
2463  *
2464  *	S   = H Q
2465  *	S U = H
2466  *
2467  * with H the Hermite normal form of S.  That is, all but the
2468  * first rank columns of H are zero and so each row in S is
2469  * a linear combination of the first rank rows of Q.
2470  * The matrix Q can be used as a variable transformation
2471  * that isolates the directions of S in the first rank rows.
2472  * Transposing S U = H yields
2473  *
2474  *	U^T S^T = H^T
2475  *
2476  * with all but the first rank rows of H^T zero.
2477  * The last rows of U^T are therefore linear combinations
2478  * of schedule coefficients that are all zero on schedule
2479  * coefficients that are linearly dependent on the rows of S.
2480  * At least one of these combinations is non-zero on
2481  * linearly independent schedule coefficients.
2482  * The rows are normalized to involve as few of the last
2483  * coefficients as possible and to have a positive initial value.
2484  */
node_update_vmap(struct isl_sched_node * node)2485 static int node_update_vmap(struct isl_sched_node *node)
2486 {
2487 	isl_mat *H, *U, *Q;
2488 
2489 	H = extract_linear_schedule(node);
2490 
2491 	H = isl_mat_left_hermite(H, 0, &U, &Q);
2492 	isl_mat_free(node->indep);
2493 	isl_mat_free(node->vmap);
2494 	node->vmap = Q;
2495 	node->indep = isl_mat_transpose(U);
2496 	node->rank = isl_mat_initial_non_zero_cols(H);
2497 	node->indep = isl_mat_drop_rows(node->indep, 0, node->rank);
2498 	node->indep = normalize_independent(node->indep);
2499 	isl_mat_free(H);
2500 
2501 	if (!node->indep || !node->vmap || node->rank < 0)
2502 		return -1;
2503 	return 0;
2504 }
2505 
2506 /* Is "edge" marked as a validity or a conditional validity edge?
2507  */
is_any_validity(struct isl_sched_edge * edge)2508 static int is_any_validity(struct isl_sched_edge *edge)
2509 {
2510 	return is_validity(edge) || is_conditional_validity(edge);
2511 }
2512 
2513 /* How many times should we count the constraints in "edge"?
2514  *
2515  * We count as follows
2516  * validity		-> 1 (>= 0)
2517  * validity+proximity	-> 2 (>= 0 and upper bound)
2518  * proximity		-> 2 (lower and upper bound)
2519  * local(+any)		-> 2 (>= 0 and <= 0)
2520  *
2521  * If an edge is only marked conditional_validity then it counts
2522  * as zero since it is only checked afterwards.
2523  *
2524  * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2525  * Otherwise, we ignore them.
2526  */
edge_multiplicity(struct isl_sched_edge * edge,int use_coincidence)2527 static int edge_multiplicity(struct isl_sched_edge *edge, int use_coincidence)
2528 {
2529 	if (is_proximity(edge) || force_zero(edge, use_coincidence))
2530 		return 2;
2531 	if (is_validity(edge))
2532 		return 1;
2533 	return 0;
2534 }
2535 
2536 /* How many times should the constraints in "edge" be counted
2537  * as a parametric intra-node constraint?
2538  *
2539  * Only proximity edges that are not forced zero need
2540  * coefficient constraints that include coefficients for parameters.
2541  * If the edge is also a validity edge, then only
2542  * an upper bound is introduced.  Otherwise, both lower and upper bounds
2543  * are introduced.
2544  */
parametric_intra_edge_multiplicity(struct isl_sched_edge * edge,int use_coincidence)2545 static int parametric_intra_edge_multiplicity(struct isl_sched_edge *edge,
2546 	int use_coincidence)
2547 {
2548 	if (edge->src != edge->dst)
2549 		return 0;
2550 	if (!is_proximity(edge))
2551 		return 0;
2552 	if (force_zero(edge, use_coincidence))
2553 		return 0;
2554 	if (is_validity(edge))
2555 		return 1;
2556 	else
2557 		return 2;
2558 }
2559 
2560 /* Add "f" times the number of equality and inequality constraints of "bset"
2561  * to "n_eq" and "n_ineq" and free "bset".
2562  */
update_count(__isl_take isl_basic_set * bset,int f,int * n_eq,int * n_ineq)2563 static isl_stat update_count(__isl_take isl_basic_set *bset,
2564 	int f, int *n_eq, int *n_ineq)
2565 {
2566 	isl_size eq, ineq;
2567 
2568 	eq = isl_basic_set_n_equality(bset);
2569 	ineq = isl_basic_set_n_inequality(bset);
2570 	isl_basic_set_free(bset);
2571 
2572 	if (eq < 0 || ineq < 0)
2573 		return isl_stat_error;
2574 
2575 	*n_eq += eq;
2576 	*n_ineq += ineq;
2577 
2578 	return isl_stat_ok;
2579 }
2580 
2581 /* Count the number of equality and inequality constraints
2582  * that will be added for the given map.
2583  *
2584  * The edges that require parameter coefficients are counted separately.
2585  *
2586  * "use_coincidence" is set if we should take into account coincidence edges.
2587  */
count_map_constraints(struct isl_sched_graph * graph,struct isl_sched_edge * edge,__isl_take isl_map * map,int * n_eq,int * n_ineq,int use_coincidence)2588 static isl_stat count_map_constraints(struct isl_sched_graph *graph,
2589 	struct isl_sched_edge *edge, __isl_take isl_map *map,
2590 	int *n_eq, int *n_ineq, int use_coincidence)
2591 {
2592 	isl_map *copy;
2593 	isl_basic_set *coef;
2594 	int f = edge_multiplicity(edge, use_coincidence);
2595 	int fp = parametric_intra_edge_multiplicity(edge, use_coincidence);
2596 
2597 	if (f == 0) {
2598 		isl_map_free(map);
2599 		return isl_stat_ok;
2600 	}
2601 
2602 	if (edge->src != edge->dst) {
2603 		coef = inter_coefficients(graph, edge, map);
2604 		return update_count(coef, f, n_eq, n_ineq);
2605 	}
2606 
2607 	if (fp > 0) {
2608 		copy = isl_map_copy(map);
2609 		coef = intra_coefficients(graph, edge->src, copy, 1);
2610 		if (update_count(coef, fp, n_eq, n_ineq) < 0)
2611 			goto error;
2612 	}
2613 
2614 	if (f > fp) {
2615 		copy = isl_map_copy(map);
2616 		coef = intra_coefficients(graph, edge->src, copy, 0);
2617 		if (update_count(coef, f - fp, n_eq, n_ineq) < 0)
2618 			goto error;
2619 	}
2620 
2621 	isl_map_free(map);
2622 	return isl_stat_ok;
2623 error:
2624 	isl_map_free(map);
2625 	return isl_stat_error;
2626 }
2627 
2628 /* Count the number of equality and inequality constraints
2629  * that will be added to the main lp problem.
2630  * We count as follows
2631  * validity		-> 1 (>= 0)
2632  * validity+proximity	-> 2 (>= 0 and upper bound)
2633  * proximity		-> 2 (lower and upper bound)
2634  * local(+any)		-> 2 (>= 0 and <= 0)
2635  *
2636  * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2637  * Otherwise, we ignore them.
2638  */
count_constraints(struct isl_sched_graph * graph,int * n_eq,int * n_ineq,int use_coincidence)2639 static int count_constraints(struct isl_sched_graph *graph,
2640 	int *n_eq, int *n_ineq, int use_coincidence)
2641 {
2642 	int i;
2643 
2644 	*n_eq = *n_ineq = 0;
2645 	for (i = 0; i < graph->n_edge; ++i) {
2646 		struct isl_sched_edge *edge = &graph->edge[i];
2647 		isl_map *map = isl_map_copy(edge->map);
2648 
2649 		if (count_map_constraints(graph, edge, map, n_eq, n_ineq,
2650 					    use_coincidence) < 0)
2651 			return -1;
2652 	}
2653 
2654 	return 0;
2655 }
2656 
2657 /* Count the number of constraints that will be added by
2658  * add_bound_constant_constraints to bound the values of the constant terms
2659  * and increment *n_eq and *n_ineq accordingly.
2660  *
2661  * In practice, add_bound_constant_constraints only adds inequalities.
2662  */
count_bound_constant_constraints(isl_ctx * ctx,struct isl_sched_graph * graph,int * n_eq,int * n_ineq)2663 static isl_stat count_bound_constant_constraints(isl_ctx *ctx,
2664 	struct isl_sched_graph *graph, int *n_eq, int *n_ineq)
2665 {
2666 	if (isl_options_get_schedule_max_constant_term(ctx) == -1)
2667 		return isl_stat_ok;
2668 
2669 	*n_ineq += graph->n;
2670 
2671 	return isl_stat_ok;
2672 }
2673 
2674 /* Add constraints to bound the values of the constant terms in the schedule,
2675  * if requested by the user.
2676  *
2677  * The maximal value of the constant terms is defined by the option
2678  * "schedule_max_constant_term".
2679  */
add_bound_constant_constraints(isl_ctx * ctx,struct isl_sched_graph * graph)2680 static isl_stat add_bound_constant_constraints(isl_ctx *ctx,
2681 	struct isl_sched_graph *graph)
2682 {
2683 	int i, k;
2684 	int max;
2685 	isl_size total;
2686 
2687 	max = isl_options_get_schedule_max_constant_term(ctx);
2688 	if (max == -1)
2689 		return isl_stat_ok;
2690 
2691 	total = isl_basic_set_dim(graph->lp, isl_dim_set);
2692 	if (total < 0)
2693 		return isl_stat_error;
2694 
2695 	for (i = 0; i < graph->n; ++i) {
2696 		struct isl_sched_node *node = &graph->node[i];
2697 		int pos;
2698 
2699 		k = isl_basic_set_alloc_inequality(graph->lp);
2700 		if (k < 0)
2701 			return isl_stat_error;
2702 		isl_seq_clr(graph->lp->ineq[k], 1 + total);
2703 		pos = node_cst_coef_offset(node);
2704 		isl_int_set_si(graph->lp->ineq[k][1 + pos], -1);
2705 		isl_int_set_si(graph->lp->ineq[k][0], max);
2706 	}
2707 
2708 	return isl_stat_ok;
2709 }
2710 
2711 /* Count the number of constraints that will be added by
2712  * add_bound_coefficient_constraints and increment *n_eq and *n_ineq
2713  * accordingly.
2714  *
2715  * In practice, add_bound_coefficient_constraints only adds inequalities.
2716  */
count_bound_coefficient_constraints(isl_ctx * ctx,struct isl_sched_graph * graph,int * n_eq,int * n_ineq)2717 static int count_bound_coefficient_constraints(isl_ctx *ctx,
2718 	struct isl_sched_graph *graph, int *n_eq, int *n_ineq)
2719 {
2720 	int i;
2721 
2722 	if (isl_options_get_schedule_max_coefficient(ctx) == -1 &&
2723 	    !isl_options_get_schedule_treat_coalescing(ctx))
2724 		return 0;
2725 
2726 	for (i = 0; i < graph->n; ++i)
2727 		*n_ineq += graph->node[i].nparam + 2 * graph->node[i].nvar;
2728 
2729 	return 0;
2730 }
2731 
2732 /* Add constraints to graph->lp that bound the values of
2733  * the parameter schedule coefficients of "node" to "max" and
2734  * the variable schedule coefficients to the corresponding entry
2735  * in node->max.
2736  * In either case, a negative value means that no bound needs to be imposed.
2737  *
2738  * For parameter coefficients, this amounts to adding a constraint
2739  *
2740  *	c_n <= max
2741  *
2742  * i.e.,
2743  *
2744  *	-c_n + max >= 0
2745  *
2746  * The variables coefficients are, however, not represented directly.
2747  * Instead, the variable coefficients c_x are written as differences
2748  * c_x = c_x^+ - c_x^-.
2749  * That is,
2750  *
2751  *	-max_i <= c_x_i <= max_i
2752  *
2753  * is encoded as
2754  *
2755  *	-max_i <= c_x_i^+ - c_x_i^- <= max_i
2756  *
2757  * or
2758  *
2759  *	-(c_x_i^+ - c_x_i^-) + max_i >= 0
2760  *	c_x_i^+ - c_x_i^- + max_i >= 0
2761  */
node_add_coefficient_constraints(isl_ctx * ctx,struct isl_sched_graph * graph,struct isl_sched_node * node,int max)2762 static isl_stat node_add_coefficient_constraints(isl_ctx *ctx,
2763 	struct isl_sched_graph *graph, struct isl_sched_node *node, int max)
2764 {
2765 	int i, j, k;
2766 	isl_size total;
2767 	isl_vec *ineq;
2768 
2769 	total = isl_basic_set_dim(graph->lp, isl_dim_set);
2770 	if (total < 0)
2771 		return isl_stat_error;
2772 
2773 	for (j = 0; j < node->nparam; ++j) {
2774 		int dim;
2775 
2776 		if (max < 0)
2777 			continue;
2778 
2779 		k = isl_basic_set_alloc_inequality(graph->lp);
2780 		if (k < 0)
2781 			return isl_stat_error;
2782 		dim = 1 + node_par_coef_offset(node) + j;
2783 		isl_seq_clr(graph->lp->ineq[k], 1 + total);
2784 		isl_int_set_si(graph->lp->ineq[k][dim], -1);
2785 		isl_int_set_si(graph->lp->ineq[k][0], max);
2786 	}
2787 
2788 	ineq = isl_vec_alloc(ctx, 1 + total);
2789 	ineq = isl_vec_clr(ineq);
2790 	if (!ineq)
2791 		return isl_stat_error;
2792 	for (i = 0; i < node->nvar; ++i) {
2793 		int pos = 1 + node_var_coef_pos(node, i);
2794 
2795 		if (isl_int_is_neg(node->max->el[i]))
2796 			continue;
2797 
2798 		isl_int_set_si(ineq->el[pos], 1);
2799 		isl_int_set_si(ineq->el[pos + 1], -1);
2800 		isl_int_set(ineq->el[0], node->max->el[i]);
2801 
2802 		k = isl_basic_set_alloc_inequality(graph->lp);
2803 		if (k < 0)
2804 			goto error;
2805 		isl_seq_cpy(graph->lp->ineq[k], ineq->el, 1 + total);
2806 
2807 		isl_seq_neg(ineq->el + pos, ineq->el + pos, 2);
2808 		k = isl_basic_set_alloc_inequality(graph->lp);
2809 		if (k < 0)
2810 			goto error;
2811 		isl_seq_cpy(graph->lp->ineq[k], ineq->el, 1 + total);
2812 
2813 		isl_seq_clr(ineq->el + pos, 2);
2814 	}
2815 	isl_vec_free(ineq);
2816 
2817 	return isl_stat_ok;
2818 error:
2819 	isl_vec_free(ineq);
2820 	return isl_stat_error;
2821 }
2822 
2823 /* Add constraints that bound the values of the variable and parameter
2824  * coefficients of the schedule.
2825  *
2826  * The maximal value of the coefficients is defined by the option
2827  * 'schedule_max_coefficient' and the entries in node->max.
2828  * These latter entries are only set if either the schedule_max_coefficient
2829  * option or the schedule_treat_coalescing option is set.
2830  */
add_bound_coefficient_constraints(isl_ctx * ctx,struct isl_sched_graph * graph)2831 static isl_stat add_bound_coefficient_constraints(isl_ctx *ctx,
2832 	struct isl_sched_graph *graph)
2833 {
2834 	int i;
2835 	int max;
2836 
2837 	max = isl_options_get_schedule_max_coefficient(ctx);
2838 
2839 	if (max == -1 && !isl_options_get_schedule_treat_coalescing(ctx))
2840 		return isl_stat_ok;
2841 
2842 	for (i = 0; i < graph->n; ++i) {
2843 		struct isl_sched_node *node = &graph->node[i];
2844 
2845 		if (node_add_coefficient_constraints(ctx, graph, node, max) < 0)
2846 			return isl_stat_error;
2847 	}
2848 
2849 	return isl_stat_ok;
2850 }
2851 
2852 /* Add a constraint to graph->lp that equates the value at position
2853  * "sum_pos" to the sum of the "n" values starting at "first".
2854  */
add_sum_constraint(struct isl_sched_graph * graph,int sum_pos,int first,int n)2855 static isl_stat add_sum_constraint(struct isl_sched_graph *graph,
2856 	int sum_pos, int first, int n)
2857 {
2858 	int i, k;
2859 	isl_size total;
2860 
2861 	total = isl_basic_set_dim(graph->lp, isl_dim_set);
2862 	if (total < 0)
2863 		return isl_stat_error;
2864 
2865 	k = isl_basic_set_alloc_equality(graph->lp);
2866 	if (k < 0)
2867 		return isl_stat_error;
2868 	isl_seq_clr(graph->lp->eq[k], 1 + total);
2869 	isl_int_set_si(graph->lp->eq[k][1 + sum_pos], -1);
2870 	for (i = 0; i < n; ++i)
2871 		isl_int_set_si(graph->lp->eq[k][1 + first + i], 1);
2872 
2873 	return isl_stat_ok;
2874 }
2875 
2876 /* Add a constraint to graph->lp that equates the value at position
2877  * "sum_pos" to the sum of the parameter coefficients of all nodes.
2878  */
add_param_sum_constraint(struct isl_sched_graph * graph,int sum_pos)2879 static isl_stat add_param_sum_constraint(struct isl_sched_graph *graph,
2880 	int sum_pos)
2881 {
2882 	int i, j, k;
2883 	isl_size total;
2884 
2885 	total = isl_basic_set_dim(graph->lp, isl_dim_set);
2886 	if (total < 0)
2887 		return isl_stat_error;
2888 
2889 	k = isl_basic_set_alloc_equality(graph->lp);
2890 	if (k < 0)
2891 		return isl_stat_error;
2892 	isl_seq_clr(graph->lp->eq[k], 1 + total);
2893 	isl_int_set_si(graph->lp->eq[k][1 + sum_pos], -1);
2894 	for (i = 0; i < graph->n; ++i) {
2895 		int pos = 1 + node_par_coef_offset(&graph->node[i]);
2896 
2897 		for (j = 0; j < graph->node[i].nparam; ++j)
2898 			isl_int_set_si(graph->lp->eq[k][pos + j], 1);
2899 	}
2900 
2901 	return isl_stat_ok;
2902 }
2903 
2904 /* Add a constraint to graph->lp that equates the value at position
2905  * "sum_pos" to the sum of the variable coefficients of all nodes.
2906  */
add_var_sum_constraint(struct isl_sched_graph * graph,int sum_pos)2907 static isl_stat add_var_sum_constraint(struct isl_sched_graph *graph,
2908 	int sum_pos)
2909 {
2910 	int i, j, k;
2911 	isl_size total;
2912 
2913 	total = isl_basic_set_dim(graph->lp, isl_dim_set);
2914 	if (total < 0)
2915 		return isl_stat_error;
2916 
2917 	k = isl_basic_set_alloc_equality(graph->lp);
2918 	if (k < 0)
2919 		return isl_stat_error;
2920 	isl_seq_clr(graph->lp->eq[k], 1 + total);
2921 	isl_int_set_si(graph->lp->eq[k][1 + sum_pos], -1);
2922 	for (i = 0; i < graph->n; ++i) {
2923 		struct isl_sched_node *node = &graph->node[i];
2924 		int pos = 1 + node_var_coef_offset(node);
2925 
2926 		for (j = 0; j < 2 * node->nvar; ++j)
2927 			isl_int_set_si(graph->lp->eq[k][pos + j], 1);
2928 	}
2929 
2930 	return isl_stat_ok;
2931 }
2932 
2933 /* Construct an ILP problem for finding schedule coefficients
2934  * that result in non-negative, but small dependence distances
2935  * over all dependences.
2936  * In particular, the dependence distances over proximity edges
2937  * are bounded by m_0 + m_n n and we compute schedule coefficients
2938  * with small values (preferably zero) of m_n and m_0.
2939  *
2940  * All variables of the ILP are non-negative.  The actual coefficients
2941  * may be negative, so each coefficient is represented as the difference
2942  * of two non-negative variables.  The negative part always appears
2943  * immediately before the positive part.
2944  * Other than that, the variables have the following order
2945  *
2946  *	- sum of positive and negative parts of m_n coefficients
2947  *	- m_0
2948  *	- sum of all c_n coefficients
2949  *		(unconstrained when computing non-parametric schedules)
2950  *	- sum of positive and negative parts of all c_x coefficients
2951  *	- positive and negative parts of m_n coefficients
2952  *	- for each node
2953  *		- positive and negative parts of c_i_x, in opposite order
2954  *		- c_i_n (if parametric)
2955  *		- c_i_0
2956  *
2957  * The constraints are those from the edges plus two or three equalities
2958  * to express the sums.
2959  *
2960  * If "use_coincidence" is set, then we treat coincidence edges as local edges.
2961  * Otherwise, we ignore them.
2962  */
setup_lp(isl_ctx * ctx,struct isl_sched_graph * graph,int use_coincidence)2963 static isl_stat setup_lp(isl_ctx *ctx, struct isl_sched_graph *graph,
2964 	int use_coincidence)
2965 {
2966 	int i;
2967 	isl_size nparam;
2968 	unsigned total;
2969 	isl_space *space;
2970 	int parametric;
2971 	int param_pos;
2972 	int n_eq, n_ineq;
2973 
2974 	parametric = ctx->opt->schedule_parametric;
2975 	nparam = isl_space_dim(graph->node[0].space, isl_dim_param);
2976 	if (nparam < 0)
2977 		return isl_stat_error;
2978 	param_pos = 4;
2979 	total = param_pos + 2 * nparam;
2980 	for (i = 0; i < graph->n; ++i) {
2981 		struct isl_sched_node *node = &graph->node[graph->sorted[i]];
2982 		if (node_update_vmap(node) < 0)
2983 			return isl_stat_error;
2984 		node->start = total;
2985 		total += 1 + node->nparam + 2 * node->nvar;
2986 	}
2987 
2988 	if (count_constraints(graph, &n_eq, &n_ineq, use_coincidence) < 0)
2989 		return isl_stat_error;
2990 	if (count_bound_constant_constraints(ctx, graph, &n_eq, &n_ineq) < 0)
2991 		return isl_stat_error;
2992 	if (count_bound_coefficient_constraints(ctx, graph, &n_eq, &n_ineq) < 0)
2993 		return isl_stat_error;
2994 
2995 	space = isl_space_set_alloc(ctx, 0, total);
2996 	isl_basic_set_free(graph->lp);
2997 	n_eq += 2 + parametric;
2998 
2999 	graph->lp = isl_basic_set_alloc_space(space, 0, n_eq, n_ineq);
3000 
3001 	if (add_sum_constraint(graph, 0, param_pos, 2 * nparam) < 0)
3002 		return isl_stat_error;
3003 	if (parametric && add_param_sum_constraint(graph, 2) < 0)
3004 		return isl_stat_error;
3005 	if (add_var_sum_constraint(graph, 3) < 0)
3006 		return isl_stat_error;
3007 	if (add_bound_constant_constraints(ctx, graph) < 0)
3008 		return isl_stat_error;
3009 	if (add_bound_coefficient_constraints(ctx, graph) < 0)
3010 		return isl_stat_error;
3011 	if (add_all_validity_constraints(graph, use_coincidence) < 0)
3012 		return isl_stat_error;
3013 	if (add_all_proximity_constraints(graph, use_coincidence) < 0)
3014 		return isl_stat_error;
3015 
3016 	return isl_stat_ok;
3017 }
3018 
3019 /* Analyze the conflicting constraint found by
3020  * isl_tab_basic_set_non_trivial_lexmin.  If it corresponds to the validity
3021  * constraint of one of the edges between distinct nodes, living, moreover
3022  * in distinct SCCs, then record the source and sink SCC as this may
3023  * be a good place to cut between SCCs.
3024  */
check_conflict(int con,void * user)3025 static int check_conflict(int con, void *user)
3026 {
3027 	int i;
3028 	struct isl_sched_graph *graph = user;
3029 
3030 	if (graph->src_scc >= 0)
3031 		return 0;
3032 
3033 	con -= graph->lp->n_eq;
3034 
3035 	if (con >= graph->lp->n_ineq)
3036 		return 0;
3037 
3038 	for (i = 0; i < graph->n_edge; ++i) {
3039 		if (!is_validity(&graph->edge[i]))
3040 			continue;
3041 		if (graph->edge[i].src == graph->edge[i].dst)
3042 			continue;
3043 		if (graph->edge[i].src->scc == graph->edge[i].dst->scc)
3044 			continue;
3045 		if (graph->edge[i].start > con)
3046 			continue;
3047 		if (graph->edge[i].end <= con)
3048 			continue;
3049 		graph->src_scc = graph->edge[i].src->scc;
3050 		graph->dst_scc = graph->edge[i].dst->scc;
3051 	}
3052 
3053 	return 0;
3054 }
3055 
3056 /* Check whether the next schedule row of the given node needs to be
3057  * non-trivial.  Lower-dimensional domains may have some trivial rows,
3058  * but as soon as the number of remaining required non-trivial rows
3059  * is as large as the number or remaining rows to be computed,
3060  * all remaining rows need to be non-trivial.
3061  */
needs_row(struct isl_sched_graph * graph,struct isl_sched_node * node)3062 static int needs_row(struct isl_sched_graph *graph, struct isl_sched_node *node)
3063 {
3064 	return node->nvar - node->rank >= graph->maxvar - graph->n_row;
3065 }
3066 
3067 /* Construct a non-triviality region with triviality directions
3068  * corresponding to the rows of "indep".
3069  * The rows of "indep" are expressed in terms of the schedule coefficients c_i,
3070  * while the triviality directions are expressed in terms of
3071  * pairs of non-negative variables c^+_i - c^-_i, with c^-_i appearing
3072  * before c^+_i.  Furthermore,
3073  * the pairs of non-negative variables representing the coefficients
3074  * are stored in the opposite order.
3075  */
construct_trivial(__isl_keep isl_mat * indep)3076 static __isl_give isl_mat *construct_trivial(__isl_keep isl_mat *indep)
3077 {
3078 	isl_ctx *ctx;
3079 	isl_mat *mat;
3080 	int i, j;
3081 	isl_size n, n_var;
3082 
3083 	n = isl_mat_rows(indep);
3084 	n_var = isl_mat_cols(indep);
3085 	if (n < 0 || n_var < 0)
3086 		return NULL;
3087 
3088 	ctx = isl_mat_get_ctx(indep);
3089 	mat = isl_mat_alloc(ctx, n, 2 * n_var);
3090 	if (!mat)
3091 		return NULL;
3092 	for (i = 0; i < n; ++i) {
3093 		for (j = 0; j < n_var; ++j) {
3094 			int nj = n_var - 1 - j;
3095 			isl_int_neg(mat->row[i][2 * nj], indep->row[i][j]);
3096 			isl_int_set(mat->row[i][2 * nj + 1], indep->row[i][j]);
3097 		}
3098 	}
3099 
3100 	return mat;
3101 }
3102 
3103 /* Solve the ILP problem constructed in setup_lp.
3104  * For each node such that all the remaining rows of its schedule
3105  * need to be non-trivial, we construct a non-triviality region.
3106  * This region imposes that the next row is independent of previous rows.
3107  * In particular, the non-triviality region enforces that at least
3108  * one of the linear combinations in the rows of node->indep is non-zero.
3109  */
solve_lp(isl_ctx * ctx,struct isl_sched_graph * graph)3110 static __isl_give isl_vec *solve_lp(isl_ctx *ctx, struct isl_sched_graph *graph)
3111 {
3112 	int i;
3113 	isl_vec *sol;
3114 	isl_basic_set *lp;
3115 
3116 	for (i = 0; i < graph->n; ++i) {
3117 		struct isl_sched_node *node = &graph->node[i];
3118 		isl_mat *trivial;
3119 
3120 		graph->region[i].pos = node_var_coef_offset(node);
3121 		if (needs_row(graph, node))
3122 			trivial = construct_trivial(node->indep);
3123 		else
3124 			trivial = isl_mat_zero(ctx, 0, 0);
3125 		graph->region[i].trivial = trivial;
3126 	}
3127 	lp = isl_basic_set_copy(graph->lp);
3128 	sol = isl_tab_basic_set_non_trivial_lexmin(lp, 2, graph->n,
3129 				       graph->region, &check_conflict, graph);
3130 	for (i = 0; i < graph->n; ++i)
3131 		isl_mat_free(graph->region[i].trivial);
3132 	return sol;
3133 }
3134 
3135 /* Extract the coefficients for the variables of "node" from "sol".
3136  *
3137  * Each schedule coefficient c_i_x is represented as the difference
3138  * between two non-negative variables c_i_x^+ - c_i_x^-.
3139  * The c_i_x^- appear before their c_i_x^+ counterpart.
3140  * Furthermore, the order of these pairs is the opposite of that
3141  * of the corresponding coefficients.
3142  *
3143  * Return c_i_x = c_i_x^+ - c_i_x^-
3144  */
extract_var_coef(struct isl_sched_node * node,__isl_keep isl_vec * sol)3145 static __isl_give isl_vec *extract_var_coef(struct isl_sched_node *node,
3146 	__isl_keep isl_vec *sol)
3147 {
3148 	int i;
3149 	int pos;
3150 	isl_vec *csol;
3151 
3152 	if (!sol)
3153 		return NULL;
3154 	csol = isl_vec_alloc(isl_vec_get_ctx(sol), node->nvar);
3155 	if (!csol)
3156 		return NULL;
3157 
3158 	pos = 1 + node_var_coef_offset(node);
3159 	for (i = 0; i < node->nvar; ++i)
3160 		isl_int_sub(csol->el[node->nvar - 1 - i],
3161 			    sol->el[pos + 2 * i + 1], sol->el[pos + 2 * i]);
3162 
3163 	return csol;
3164 }
3165 
3166 /* Update the schedules of all nodes based on the given solution
3167  * of the LP problem.
3168  * The new row is added to the current band.
3169  * All possibly negative coefficients are encoded as a difference
3170  * of two non-negative variables, so we need to perform the subtraction
3171  * here.
3172  *
3173  * If coincident is set, then the caller guarantees that the new
3174  * row satisfies the coincidence constraints.
3175  */
update_schedule(struct isl_sched_graph * graph,__isl_take isl_vec * sol,int coincident)3176 static int update_schedule(struct isl_sched_graph *graph,
3177 	__isl_take isl_vec *sol, int coincident)
3178 {
3179 	int i, j;
3180 	isl_vec *csol = NULL;
3181 
3182 	if (!sol)
3183 		goto error;
3184 	if (sol->size == 0)
3185 		isl_die(sol->ctx, isl_error_internal,
3186 			"no solution found", goto error);
3187 	if (graph->n_total_row >= graph->max_row)
3188 		isl_die(sol->ctx, isl_error_internal,
3189 			"too many schedule rows", goto error);
3190 
3191 	for (i = 0; i < graph->n; ++i) {
3192 		struct isl_sched_node *node = &graph->node[i];
3193 		int pos;
3194 		isl_size row = isl_mat_rows(node->sched);
3195 
3196 		isl_vec_free(csol);
3197 		csol = extract_var_coef(node, sol);
3198 		if (row < 0 || !csol)
3199 			goto error;
3200 
3201 		isl_map_free(node->sched_map);
3202 		node->sched_map = NULL;
3203 		node->sched = isl_mat_add_rows(node->sched, 1);
3204 		if (!node->sched)
3205 			goto error;
3206 		pos = node_cst_coef_offset(node);
3207 		node->sched = isl_mat_set_element(node->sched,
3208 					row, 0, sol->el[1 + pos]);
3209 		pos = node_par_coef_offset(node);
3210 		for (j = 0; j < node->nparam; ++j)
3211 			node->sched = isl_mat_set_element(node->sched,
3212 					row, 1 + j, sol->el[1 + pos + j]);
3213 		for (j = 0; j < node->nvar; ++j)
3214 			node->sched = isl_mat_set_element(node->sched,
3215 					row, 1 + node->nparam + j, csol->el[j]);
3216 		node->coincident[graph->n_total_row] = coincident;
3217 	}
3218 	isl_vec_free(sol);
3219 	isl_vec_free(csol);
3220 
3221 	graph->n_row++;
3222 	graph->n_total_row++;
3223 
3224 	return 0;
3225 error:
3226 	isl_vec_free(sol);
3227 	isl_vec_free(csol);
3228 	return -1;
3229 }
3230 
3231 /* Convert row "row" of node->sched into an isl_aff living in "ls"
3232  * and return this isl_aff.
3233  */
extract_schedule_row(__isl_take isl_local_space * ls,struct isl_sched_node * node,int row)3234 static __isl_give isl_aff *extract_schedule_row(__isl_take isl_local_space *ls,
3235 	struct isl_sched_node *node, int row)
3236 {
3237 	int j;
3238 	isl_int v;
3239 	isl_aff *aff;
3240 
3241 	isl_int_init(v);
3242 
3243 	aff = isl_aff_zero_on_domain(ls);
3244 	if (isl_mat_get_element(node->sched, row, 0, &v) < 0)
3245 		goto error;
3246 	aff = isl_aff_set_constant(aff, v);
3247 	for (j = 0; j < node->nparam; ++j) {
3248 		if (isl_mat_get_element(node->sched, row, 1 + j, &v) < 0)
3249 			goto error;
3250 		aff = isl_aff_set_coefficient(aff, isl_dim_param, j, v);
3251 	}
3252 	for (j = 0; j < node->nvar; ++j) {
3253 		if (isl_mat_get_element(node->sched, row,
3254 					1 + node->nparam + j, &v) < 0)
3255 			goto error;
3256 		aff = isl_aff_set_coefficient(aff, isl_dim_in, j, v);
3257 	}
3258 
3259 	isl_int_clear(v);
3260 
3261 	return aff;
3262 error:
3263 	isl_int_clear(v);
3264 	isl_aff_free(aff);
3265 	return NULL;
3266 }
3267 
3268 /* Convert the "n" rows starting at "first" of node->sched into a multi_aff
3269  * and return this multi_aff.
3270  *
3271  * The result is defined over the uncompressed node domain.
3272  */
node_extract_partial_schedule_multi_aff(struct isl_sched_node * node,int first,int n)3273 static __isl_give isl_multi_aff *node_extract_partial_schedule_multi_aff(
3274 	struct isl_sched_node *node, int first, int n)
3275 {
3276 	int i;
3277 	isl_space *space;
3278 	isl_local_space *ls;
3279 	isl_aff *aff;
3280 	isl_multi_aff *ma;
3281 	isl_size nrow;
3282 
3283 	if (!node)
3284 		return NULL;
3285 	nrow = isl_mat_rows(node->sched);
3286 	if (nrow < 0)
3287 		return NULL;
3288 	if (node->compressed)
3289 		space = isl_pw_multi_aff_get_domain_space(node->decompress);
3290 	else
3291 		space = isl_space_copy(node->space);
3292 	ls = isl_local_space_from_space(isl_space_copy(space));
3293 	space = isl_space_from_domain(space);
3294 	space = isl_space_add_dims(space, isl_dim_out, n);
3295 	ma = isl_multi_aff_zero(space);
3296 
3297 	for (i = first; i < first + n; ++i) {
3298 		aff = extract_schedule_row(isl_local_space_copy(ls), node, i);
3299 		ma = isl_multi_aff_set_aff(ma, i - first, aff);
3300 	}
3301 
3302 	isl_local_space_free(ls);
3303 
3304 	if (node->compressed)
3305 		ma = isl_multi_aff_pullback_multi_aff(ma,
3306 					isl_multi_aff_copy(node->compress));
3307 
3308 	return ma;
3309 }
3310 
3311 /* Convert node->sched into a multi_aff and return this multi_aff.
3312  *
3313  * The result is defined over the uncompressed node domain.
3314  */
node_extract_schedule_multi_aff(struct isl_sched_node * node)3315 static __isl_give isl_multi_aff *node_extract_schedule_multi_aff(
3316 	struct isl_sched_node *node)
3317 {
3318 	isl_size nrow;
3319 
3320 	nrow = isl_mat_rows(node->sched);
3321 	if (nrow < 0)
3322 		return NULL;
3323 	return node_extract_partial_schedule_multi_aff(node, 0, nrow);
3324 }
3325 
3326 /* Convert node->sched into a map and return this map.
3327  *
3328  * The result is cached in node->sched_map, which needs to be released
3329  * whenever node->sched is updated.
3330  * It is defined over the uncompressed node domain.
3331  */
node_extract_schedule(struct isl_sched_node * node)3332 static __isl_give isl_map *node_extract_schedule(struct isl_sched_node *node)
3333 {
3334 	if (!node->sched_map) {
3335 		isl_multi_aff *ma;
3336 
3337 		ma = node_extract_schedule_multi_aff(node);
3338 		node->sched_map = isl_map_from_multi_aff(ma);
3339 	}
3340 
3341 	return isl_map_copy(node->sched_map);
3342 }
3343 
3344 /* Construct a map that can be used to update a dependence relation
3345  * based on the current schedule.
3346  * That is, construct a map expressing that source and sink
3347  * are executed within the same iteration of the current schedule.
3348  * This map can then be intersected with the dependence relation.
3349  * This is not the most efficient way, but this shouldn't be a critical
3350  * operation.
3351  */
specializer(struct isl_sched_node * src,struct isl_sched_node * dst)3352 static __isl_give isl_map *specializer(struct isl_sched_node *src,
3353 	struct isl_sched_node *dst)
3354 {
3355 	isl_map *src_sched, *dst_sched;
3356 
3357 	src_sched = node_extract_schedule(src);
3358 	dst_sched = node_extract_schedule(dst);
3359 	return isl_map_apply_range(src_sched, isl_map_reverse(dst_sched));
3360 }
3361 
3362 /* Intersect the domains of the nested relations in domain and range
3363  * of "umap" with "map".
3364  */
intersect_domains(__isl_take isl_union_map * umap,__isl_keep isl_map * map)3365 static __isl_give isl_union_map *intersect_domains(
3366 	__isl_take isl_union_map *umap, __isl_keep isl_map *map)
3367 {
3368 	isl_union_set *uset;
3369 
3370 	umap = isl_union_map_zip(umap);
3371 	uset = isl_union_set_from_set(isl_map_wrap(isl_map_copy(map)));
3372 	umap = isl_union_map_intersect_domain(umap, uset);
3373 	umap = isl_union_map_zip(umap);
3374 	return umap;
3375 }
3376 
3377 /* Update the dependence relation of the given edge based
3378  * on the current schedule.
3379  * If the dependence is carried completely by the current schedule, then
3380  * it is removed from the edge_tables.  It is kept in the list of edges
3381  * as otherwise all edge_tables would have to be recomputed.
3382  *
3383  * If the edge is of a type that can appear multiple times
3384  * between the same pair of nodes, then it is added to
3385  * the edge table (again).  This prevents the situation
3386  * where none of these edges is referenced from the edge table
3387  * because the one that was referenced turned out to be empty and
3388  * was therefore removed from the table.
3389  */
update_edge(isl_ctx * ctx,struct isl_sched_graph * graph,struct isl_sched_edge * edge)3390 static isl_stat update_edge(isl_ctx *ctx, struct isl_sched_graph *graph,
3391 	struct isl_sched_edge *edge)
3392 {
3393 	int empty;
3394 	isl_map *id;
3395 
3396 	id = specializer(edge->src, edge->dst);
3397 	edge->map = isl_map_intersect(edge->map, isl_map_copy(id));
3398 	if (!edge->map)
3399 		goto error;
3400 
3401 	if (edge->tagged_condition) {
3402 		edge->tagged_condition =
3403 			intersect_domains(edge->tagged_condition, id);
3404 		if (!edge->tagged_condition)
3405 			goto error;
3406 	}
3407 	if (edge->tagged_validity) {
3408 		edge->tagged_validity =
3409 			intersect_domains(edge->tagged_validity, id);
3410 		if (!edge->tagged_validity)
3411 			goto error;
3412 	}
3413 
3414 	empty = isl_map_plain_is_empty(edge->map);
3415 	if (empty < 0)
3416 		goto error;
3417 	if (empty) {
3418 		if (graph_remove_edge(graph, edge) < 0)
3419 			goto error;
3420 	} else if (is_multi_edge_type(edge)) {
3421 		if (graph_edge_tables_add(ctx, graph, edge) < 0)
3422 			goto error;
3423 	}
3424 
3425 	isl_map_free(id);
3426 	return isl_stat_ok;
3427 error:
3428 	isl_map_free(id);
3429 	return isl_stat_error;
3430 }
3431 
3432 /* Does the domain of "umap" intersect "uset"?
3433  */
domain_intersects(__isl_keep isl_union_map * umap,__isl_keep isl_union_set * uset)3434 static int domain_intersects(__isl_keep isl_union_map *umap,
3435 	__isl_keep isl_union_set *uset)
3436 {
3437 	int empty;
3438 
3439 	umap = isl_union_map_copy(umap);
3440 	umap = isl_union_map_intersect_domain(umap, isl_union_set_copy(uset));
3441 	empty = isl_union_map_is_empty(umap);
3442 	isl_union_map_free(umap);
3443 
3444 	return empty < 0 ? -1 : !empty;
3445 }
3446 
3447 /* Does the range of "umap" intersect "uset"?
3448  */
range_intersects(__isl_keep isl_union_map * umap,__isl_keep isl_union_set * uset)3449 static int range_intersects(__isl_keep isl_union_map *umap,
3450 	__isl_keep isl_union_set *uset)
3451 {
3452 	int empty;
3453 
3454 	umap = isl_union_map_copy(umap);
3455 	umap = isl_union_map_intersect_range(umap, isl_union_set_copy(uset));
3456 	empty = isl_union_map_is_empty(umap);
3457 	isl_union_map_free(umap);
3458 
3459 	return empty < 0 ? -1 : !empty;
3460 }
3461 
3462 /* Are the condition dependences of "edge" local with respect to
3463  * the current schedule?
3464  *
3465  * That is, are domain and range of the condition dependences mapped
3466  * to the same point?
3467  *
3468  * In other words, is the condition false?
3469  */
is_condition_false(struct isl_sched_edge * edge)3470 static int is_condition_false(struct isl_sched_edge *edge)
3471 {
3472 	isl_union_map *umap;
3473 	isl_map *map, *sched, *test;
3474 	int empty, local;
3475 
3476 	empty = isl_union_map_is_empty(edge->tagged_condition);
3477 	if (empty < 0 || empty)
3478 		return empty;
3479 
3480 	umap = isl_union_map_copy(edge->tagged_condition);
3481 	umap = isl_union_map_zip(umap);
3482 	umap = isl_union_set_unwrap(isl_union_map_domain(umap));
3483 	map = isl_map_from_union_map(umap);
3484 
3485 	sched = node_extract_schedule(edge->src);
3486 	map = isl_map_apply_domain(map, sched);
3487 	sched = node_extract_schedule(edge->dst);
3488 	map = isl_map_apply_range(map, sched);
3489 
3490 	test = isl_map_identity(isl_map_get_space(map));
3491 	local = isl_map_is_subset(map, test);
3492 	isl_map_free(map);
3493 	isl_map_free(test);
3494 
3495 	return local;
3496 }
3497 
3498 /* For each conditional validity constraint that is adjacent
3499  * to a condition with domain in condition_source or range in condition_sink,
3500  * turn it into an unconditional validity constraint.
3501  */
unconditionalize_adjacent_validity(struct isl_sched_graph * graph,__isl_take isl_union_set * condition_source,__isl_take isl_union_set * condition_sink)3502 static int unconditionalize_adjacent_validity(struct isl_sched_graph *graph,
3503 	__isl_take isl_union_set *condition_source,
3504 	__isl_take isl_union_set *condition_sink)
3505 {
3506 	int i;
3507 
3508 	condition_source = isl_union_set_coalesce(condition_source);
3509 	condition_sink = isl_union_set_coalesce(condition_sink);
3510 
3511 	for (i = 0; i < graph->n_edge; ++i) {
3512 		int adjacent;
3513 		isl_union_map *validity;
3514 
3515 		if (!is_conditional_validity(&graph->edge[i]))
3516 			continue;
3517 		if (is_validity(&graph->edge[i]))
3518 			continue;
3519 
3520 		validity = graph->edge[i].tagged_validity;
3521 		adjacent = domain_intersects(validity, condition_sink);
3522 		if (adjacent >= 0 && !adjacent)
3523 			adjacent = range_intersects(validity, condition_source);
3524 		if (adjacent < 0)
3525 			goto error;
3526 		if (!adjacent)
3527 			continue;
3528 
3529 		set_validity(&graph->edge[i]);
3530 	}
3531 
3532 	isl_union_set_free(condition_source);
3533 	isl_union_set_free(condition_sink);
3534 	return 0;
3535 error:
3536 	isl_union_set_free(condition_source);
3537 	isl_union_set_free(condition_sink);
3538 	return -1;
3539 }
3540 
3541 /* Update the dependence relations of all edges based on the current schedule
3542  * and enforce conditional validity constraints that are adjacent
3543  * to satisfied condition constraints.
3544  *
3545  * First check if any of the condition constraints are satisfied
3546  * (i.e., not local to the outer schedule) and keep track of
3547  * their domain and range.
3548  * Then update all dependence relations (which removes the non-local
3549  * constraints).
3550  * Finally, if any condition constraints turned out to be satisfied,
3551  * then turn all adjacent conditional validity constraints into
3552  * unconditional validity constraints.
3553  */
update_edges(isl_ctx * ctx,struct isl_sched_graph * graph)3554 static int update_edges(isl_ctx *ctx, struct isl_sched_graph *graph)
3555 {
3556 	int i;
3557 	int any = 0;
3558 	isl_union_set *source, *sink;
3559 
3560 	source = isl_union_set_empty(isl_space_params_alloc(ctx, 0));
3561 	sink = isl_union_set_empty(isl_space_params_alloc(ctx, 0));
3562 	for (i = 0; i < graph->n_edge; ++i) {
3563 		int local;
3564 		isl_union_set *uset;
3565 		isl_union_map *umap;
3566 
3567 		if (!is_condition(&graph->edge[i]))
3568 			continue;
3569 		if (is_local(&graph->edge[i]))
3570 			continue;
3571 		local = is_condition_false(&graph->edge[i]);
3572 		if (local < 0)
3573 			goto error;
3574 		if (local)
3575 			continue;
3576 
3577 		any = 1;
3578 
3579 		umap = isl_union_map_copy(graph->edge[i].tagged_condition);
3580 		uset = isl_union_map_domain(umap);
3581 		source = isl_union_set_union(source, uset);
3582 
3583 		umap = isl_union_map_copy(graph->edge[i].tagged_condition);
3584 		uset = isl_union_map_range(umap);
3585 		sink = isl_union_set_union(sink, uset);
3586 	}
3587 
3588 	for (i = 0; i < graph->n_edge; ++i) {
3589 		if (update_edge(ctx, graph, &graph->edge[i]) < 0)
3590 			goto error;
3591 	}
3592 
3593 	if (any)
3594 		return unconditionalize_adjacent_validity(graph, source, sink);
3595 
3596 	isl_union_set_free(source);
3597 	isl_union_set_free(sink);
3598 	return 0;
3599 error:
3600 	isl_union_set_free(source);
3601 	isl_union_set_free(sink);
3602 	return -1;
3603 }
3604 
next_band(struct isl_sched_graph * graph)3605 static void next_band(struct isl_sched_graph *graph)
3606 {
3607 	graph->band_start = graph->n_total_row;
3608 }
3609 
3610 /* Return the union of the universe domains of the nodes in "graph"
3611  * that satisfy "pred".
3612  */
isl_sched_graph_domain(isl_ctx * ctx,struct isl_sched_graph * graph,int (* pred)(struct isl_sched_node * node,int data),int data)3613 static __isl_give isl_union_set *isl_sched_graph_domain(isl_ctx *ctx,
3614 	struct isl_sched_graph *graph,
3615 	int (*pred)(struct isl_sched_node *node, int data), int data)
3616 {
3617 	int i;
3618 	isl_set *set;
3619 	isl_union_set *dom;
3620 
3621 	for (i = 0; i < graph->n; ++i)
3622 		if (pred(&graph->node[i], data))
3623 			break;
3624 
3625 	if (i >= graph->n)
3626 		isl_die(ctx, isl_error_internal,
3627 			"empty component", return NULL);
3628 
3629 	set = isl_set_universe(isl_space_copy(graph->node[i].space));
3630 	dom = isl_union_set_from_set(set);
3631 
3632 	for (i = i + 1; i < graph->n; ++i) {
3633 		if (!pred(&graph->node[i], data))
3634 			continue;
3635 		set = isl_set_universe(isl_space_copy(graph->node[i].space));
3636 		dom = isl_union_set_union(dom, isl_union_set_from_set(set));
3637 	}
3638 
3639 	return dom;
3640 }
3641 
3642 /* Return a list of unions of universe domains, where each element
3643  * in the list corresponds to an SCC (or WCC) indexed by node->scc.
3644  */
extract_sccs(isl_ctx * ctx,struct isl_sched_graph * graph)3645 static __isl_give isl_union_set_list *extract_sccs(isl_ctx *ctx,
3646 	struct isl_sched_graph *graph)
3647 {
3648 	int i;
3649 	isl_union_set_list *filters;
3650 
3651 	filters = isl_union_set_list_alloc(ctx, graph->scc);
3652 	for (i = 0; i < graph->scc; ++i) {
3653 		isl_union_set *dom;
3654 
3655 		dom = isl_sched_graph_domain(ctx, graph, &node_scc_exactly, i);
3656 		filters = isl_union_set_list_add(filters, dom);
3657 	}
3658 
3659 	return filters;
3660 }
3661 
3662 /* Return a list of two unions of universe domains, one for the SCCs up
3663  * to and including graph->src_scc and another for the other SCCs.
3664  */
extract_split(isl_ctx * ctx,struct isl_sched_graph * graph)3665 static __isl_give isl_union_set_list *extract_split(isl_ctx *ctx,
3666 	struct isl_sched_graph *graph)
3667 {
3668 	isl_union_set *dom;
3669 	isl_union_set_list *filters;
3670 
3671 	filters = isl_union_set_list_alloc(ctx, 2);
3672 	dom = isl_sched_graph_domain(ctx, graph,
3673 					&node_scc_at_most, graph->src_scc);
3674 	filters = isl_union_set_list_add(filters, dom);
3675 	dom = isl_sched_graph_domain(ctx, graph,
3676 					&node_scc_at_least, graph->src_scc + 1);
3677 	filters = isl_union_set_list_add(filters, dom);
3678 
3679 	return filters;
3680 }
3681 
3682 /* Copy nodes that satisfy node_pred from the src dependence graph
3683  * to the dst dependence graph.
3684  */
copy_nodes(struct isl_sched_graph * dst,struct isl_sched_graph * src,int (* node_pred)(struct isl_sched_node * node,int data),int data)3685 static isl_stat copy_nodes(struct isl_sched_graph *dst,
3686 	struct isl_sched_graph *src,
3687 	int (*node_pred)(struct isl_sched_node *node, int data), int data)
3688 {
3689 	int i;
3690 
3691 	dst->n = 0;
3692 	for (i = 0; i < src->n; ++i) {
3693 		int j;
3694 
3695 		if (!node_pred(&src->node[i], data))
3696 			continue;
3697 
3698 		j = dst->n;
3699 		dst->node[j].space = isl_space_copy(src->node[i].space);
3700 		dst->node[j].compressed = src->node[i].compressed;
3701 		dst->node[j].hull = isl_set_copy(src->node[i].hull);
3702 		dst->node[j].compress =
3703 			isl_multi_aff_copy(src->node[i].compress);
3704 		dst->node[j].decompress =
3705 			isl_pw_multi_aff_copy(src->node[i].decompress);
3706 		dst->node[j].nvar = src->node[i].nvar;
3707 		dst->node[j].nparam = src->node[i].nparam;
3708 		dst->node[j].sched = isl_mat_copy(src->node[i].sched);
3709 		dst->node[j].sched_map = isl_map_copy(src->node[i].sched_map);
3710 		dst->node[j].coincident = src->node[i].coincident;
3711 		dst->node[j].sizes = isl_multi_val_copy(src->node[i].sizes);
3712 		dst->node[j].bounds = isl_basic_set_copy(src->node[i].bounds);
3713 		dst->node[j].max = isl_vec_copy(src->node[i].max);
3714 		dst->n++;
3715 
3716 		if (!dst->node[j].space || !dst->node[j].sched)
3717 			return isl_stat_error;
3718 		if (dst->node[j].compressed &&
3719 		    (!dst->node[j].hull || !dst->node[j].compress ||
3720 		     !dst->node[j].decompress))
3721 			return isl_stat_error;
3722 	}
3723 
3724 	return isl_stat_ok;
3725 }
3726 
3727 /* Copy non-empty edges that satisfy edge_pred from the src dependence graph
3728  * to the dst dependence graph.
3729  * If the source or destination node of the edge is not in the destination
3730  * graph, then it must be a backward proximity edge and it should simply
3731  * be ignored.
3732  */
copy_edges(isl_ctx * ctx,struct isl_sched_graph * dst,struct isl_sched_graph * src,int (* edge_pred)(struct isl_sched_edge * edge,int data),int data)3733 static isl_stat copy_edges(isl_ctx *ctx, struct isl_sched_graph *dst,
3734 	struct isl_sched_graph *src,
3735 	int (*edge_pred)(struct isl_sched_edge *edge, int data), int data)
3736 {
3737 	int i;
3738 
3739 	dst->n_edge = 0;
3740 	for (i = 0; i < src->n_edge; ++i) {
3741 		struct isl_sched_edge *edge = &src->edge[i];
3742 		isl_map *map;
3743 		isl_union_map *tagged_condition;
3744 		isl_union_map *tagged_validity;
3745 		struct isl_sched_node *dst_src, *dst_dst;
3746 
3747 		if (!edge_pred(edge, data))
3748 			continue;
3749 
3750 		if (isl_map_plain_is_empty(edge->map))
3751 			continue;
3752 
3753 		dst_src = graph_find_node(ctx, dst, edge->src->space);
3754 		dst_dst = graph_find_node(ctx, dst, edge->dst->space);
3755 		if (!dst_src || !dst_dst)
3756 			return isl_stat_error;
3757 		if (!is_node(dst, dst_src) || !is_node(dst, dst_dst)) {
3758 			if (is_validity(edge) || is_conditional_validity(edge))
3759 				isl_die(ctx, isl_error_internal,
3760 					"backward (conditional) validity edge",
3761 					return isl_stat_error);
3762 			continue;
3763 		}
3764 
3765 		map = isl_map_copy(edge->map);
3766 		tagged_condition = isl_union_map_copy(edge->tagged_condition);
3767 		tagged_validity = isl_union_map_copy(edge->tagged_validity);
3768 
3769 		dst->edge[dst->n_edge].src = dst_src;
3770 		dst->edge[dst->n_edge].dst = dst_dst;
3771 		dst->edge[dst->n_edge].map = map;
3772 		dst->edge[dst->n_edge].tagged_condition = tagged_condition;
3773 		dst->edge[dst->n_edge].tagged_validity = tagged_validity;
3774 		dst->edge[dst->n_edge].types = edge->types;
3775 		dst->n_edge++;
3776 
3777 		if (edge->tagged_condition && !tagged_condition)
3778 			return isl_stat_error;
3779 		if (edge->tagged_validity && !tagged_validity)
3780 			return isl_stat_error;
3781 
3782 		if (graph_edge_tables_add(ctx, dst,
3783 					    &dst->edge[dst->n_edge - 1]) < 0)
3784 			return isl_stat_error;
3785 	}
3786 
3787 	return isl_stat_ok;
3788 }
3789 
3790 /* Compute the maximal number of variables over all nodes.
3791  * This is the maximal number of linearly independent schedule
3792  * rows that we need to compute.
3793  * Just in case we end up in a part of the dependence graph
3794  * with only lower-dimensional domains, we make sure we will
3795  * compute the required amount of extra linearly independent rows.
3796  */
compute_maxvar(struct isl_sched_graph * graph)3797 static int compute_maxvar(struct isl_sched_graph *graph)
3798 {
3799 	int i;
3800 
3801 	graph->maxvar = 0;
3802 	for (i = 0; i < graph->n; ++i) {
3803 		struct isl_sched_node *node = &graph->node[i];
3804 		int nvar;
3805 
3806 		if (node_update_vmap(node) < 0)
3807 			return -1;
3808 		nvar = node->nvar + graph->n_row - node->rank;
3809 		if (nvar > graph->maxvar)
3810 			graph->maxvar = nvar;
3811 	}
3812 
3813 	return 0;
3814 }
3815 
3816 /* Extract the subgraph of "graph" that consists of the nodes satisfying
3817  * "node_pred" and the edges satisfying "edge_pred" and store
3818  * the result in "sub".
3819  */
extract_sub_graph(isl_ctx * ctx,struct isl_sched_graph * graph,int (* node_pred)(struct isl_sched_node * node,int data),int (* edge_pred)(struct isl_sched_edge * edge,int data),int data,struct isl_sched_graph * sub)3820 static isl_stat extract_sub_graph(isl_ctx *ctx, struct isl_sched_graph *graph,
3821 	int (*node_pred)(struct isl_sched_node *node, int data),
3822 	int (*edge_pred)(struct isl_sched_edge *edge, int data),
3823 	int data, struct isl_sched_graph *sub)
3824 {
3825 	int i, n = 0, n_edge = 0;
3826 	int t;
3827 
3828 	for (i = 0; i < graph->n; ++i)
3829 		if (node_pred(&graph->node[i], data))
3830 			++n;
3831 	for (i = 0; i < graph->n_edge; ++i)
3832 		if (edge_pred(&graph->edge[i], data))
3833 			++n_edge;
3834 	if (graph_alloc(ctx, sub, n, n_edge) < 0)
3835 		return isl_stat_error;
3836 	sub->root = graph->root;
3837 	if (copy_nodes(sub, graph, node_pred, data) < 0)
3838 		return isl_stat_error;
3839 	if (graph_init_table(ctx, sub) < 0)
3840 		return isl_stat_error;
3841 	for (t = 0; t <= isl_edge_last; ++t)
3842 		sub->max_edge[t] = graph->max_edge[t];
3843 	if (graph_init_edge_tables(ctx, sub) < 0)
3844 		return isl_stat_error;
3845 	if (copy_edges(ctx, sub, graph, edge_pred, data) < 0)
3846 		return isl_stat_error;
3847 	sub->n_row = graph->n_row;
3848 	sub->max_row = graph->max_row;
3849 	sub->n_total_row = graph->n_total_row;
3850 	sub->band_start = graph->band_start;
3851 
3852 	return isl_stat_ok;
3853 }
3854 
3855 static __isl_give isl_schedule_node *compute_schedule(isl_schedule_node *node,
3856 	struct isl_sched_graph *graph);
3857 static __isl_give isl_schedule_node *compute_schedule_wcc(
3858 	isl_schedule_node *node, struct isl_sched_graph *graph);
3859 
3860 /* Compute a schedule for a subgraph of "graph".  In particular, for
3861  * the graph composed of nodes that satisfy node_pred and edges that
3862  * that satisfy edge_pred.
3863  * If the subgraph is known to consist of a single component, then wcc should
3864  * be set and then we call compute_schedule_wcc on the constructed subgraph.
3865  * Otherwise, we call compute_schedule, which will check whether the subgraph
3866  * is connected.
3867  *
3868  * The schedule is inserted at "node" and the updated schedule node
3869  * is returned.
3870  */
compute_sub_schedule(__isl_take isl_schedule_node * node,isl_ctx * ctx,struct isl_sched_graph * graph,int (* node_pred)(struct isl_sched_node * node,int data),int (* edge_pred)(struct isl_sched_edge * edge,int data),int data,int wcc)3871 static __isl_give isl_schedule_node *compute_sub_schedule(
3872 	__isl_take isl_schedule_node *node, isl_ctx *ctx,
3873 	struct isl_sched_graph *graph,
3874 	int (*node_pred)(struct isl_sched_node *node, int data),
3875 	int (*edge_pred)(struct isl_sched_edge *edge, int data),
3876 	int data, int wcc)
3877 {
3878 	struct isl_sched_graph split = { 0 };
3879 
3880 	if (extract_sub_graph(ctx, graph, node_pred, edge_pred, data,
3881 				&split) < 0)
3882 		goto error;
3883 
3884 	if (wcc)
3885 		node = compute_schedule_wcc(node, &split);
3886 	else
3887 		node = compute_schedule(node, &split);
3888 
3889 	graph_free(ctx, &split);
3890 	return node;
3891 error:
3892 	graph_free(ctx, &split);
3893 	return isl_schedule_node_free(node);
3894 }
3895 
edge_scc_exactly(struct isl_sched_edge * edge,int scc)3896 static int edge_scc_exactly(struct isl_sched_edge *edge, int scc)
3897 {
3898 	return edge->src->scc == scc && edge->dst->scc == scc;
3899 }
3900 
edge_dst_scc_at_most(struct isl_sched_edge * edge,int scc)3901 static int edge_dst_scc_at_most(struct isl_sched_edge *edge, int scc)
3902 {
3903 	return edge->dst->scc <= scc;
3904 }
3905 
edge_src_scc_at_least(struct isl_sched_edge * edge,int scc)3906 static int edge_src_scc_at_least(struct isl_sched_edge *edge, int scc)
3907 {
3908 	return edge->src->scc >= scc;
3909 }
3910 
3911 /* Reset the current band by dropping all its schedule rows.
3912  */
reset_band(struct isl_sched_graph * graph)3913 static isl_stat reset_band(struct isl_sched_graph *graph)
3914 {
3915 	int i;
3916 	int drop;
3917 
3918 	drop = graph->n_total_row - graph->band_start;
3919 	graph->n_total_row -= drop;
3920 	graph->n_row -= drop;
3921 
3922 	for (i = 0; i < graph->n; ++i) {
3923 		struct isl_sched_node *node = &graph->node[i];
3924 
3925 		isl_map_free(node->sched_map);
3926 		node->sched_map = NULL;
3927 
3928 		node->sched = isl_mat_drop_rows(node->sched,
3929 						graph->band_start, drop);
3930 
3931 		if (!node->sched)
3932 			return isl_stat_error;
3933 	}
3934 
3935 	return isl_stat_ok;
3936 }
3937 
3938 /* Split the current graph into two parts and compute a schedule for each
3939  * part individually.  In particular, one part consists of all SCCs up
3940  * to and including graph->src_scc, while the other part contains the other
3941  * SCCs.  The split is enforced by a sequence node inserted at position "node"
3942  * in the schedule tree.  Return the updated schedule node.
3943  * If either of these two parts consists of a sequence, then it is spliced
3944  * into the sequence containing the two parts.
3945  *
3946  * The current band is reset. It would be possible to reuse
3947  * the previously computed rows as the first rows in the next
3948  * band, but recomputing them may result in better rows as we are looking
3949  * at a smaller part of the dependence graph.
3950  */
compute_split_schedule(__isl_take isl_schedule_node * node,struct isl_sched_graph * graph)3951 static __isl_give isl_schedule_node *compute_split_schedule(
3952 	__isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
3953 {
3954 	int is_seq;
3955 	isl_ctx *ctx;
3956 	isl_union_set_list *filters;
3957 
3958 	if (!node)
3959 		return NULL;
3960 
3961 	if (reset_band(graph) < 0)
3962 		return isl_schedule_node_free(node);
3963 
3964 	next_band(graph);
3965 
3966 	ctx = isl_schedule_node_get_ctx(node);
3967 	filters = extract_split(ctx, graph);
3968 	node = isl_schedule_node_insert_sequence(node, filters);
3969 	node = isl_schedule_node_child(node, 1);
3970 	node = isl_schedule_node_child(node, 0);
3971 
3972 	node = compute_sub_schedule(node, ctx, graph,
3973 				&node_scc_at_least, &edge_src_scc_at_least,
3974 				graph->src_scc + 1, 0);
3975 	is_seq = isl_schedule_node_get_type(node) == isl_schedule_node_sequence;
3976 	node = isl_schedule_node_parent(node);
3977 	node = isl_schedule_node_parent(node);
3978 	if (is_seq)
3979 		node = isl_schedule_node_sequence_splice_child(node, 1);
3980 	node = isl_schedule_node_child(node, 0);
3981 	node = isl_schedule_node_child(node, 0);
3982 	node = compute_sub_schedule(node, ctx, graph,
3983 				&node_scc_at_most, &edge_dst_scc_at_most,
3984 				graph->src_scc, 0);
3985 	is_seq = isl_schedule_node_get_type(node) == isl_schedule_node_sequence;
3986 	node = isl_schedule_node_parent(node);
3987 	node = isl_schedule_node_parent(node);
3988 	if (is_seq)
3989 		node = isl_schedule_node_sequence_splice_child(node, 0);
3990 
3991 	return node;
3992 }
3993 
3994 /* Insert a band node at position "node" in the schedule tree corresponding
3995  * to the current band in "graph".  Mark the band node permutable
3996  * if "permutable" is set.
3997  * The partial schedules and the coincidence property are extracted
3998  * from the graph nodes.
3999  * Return the updated schedule node.
4000  */
insert_current_band(__isl_take isl_schedule_node * node,struct isl_sched_graph * graph,int permutable)4001 static __isl_give isl_schedule_node *insert_current_band(
4002 	__isl_take isl_schedule_node *node, struct isl_sched_graph *graph,
4003 	int permutable)
4004 {
4005 	int i;
4006 	int start, end, n;
4007 	isl_multi_aff *ma;
4008 	isl_multi_pw_aff *mpa;
4009 	isl_multi_union_pw_aff *mupa;
4010 
4011 	if (!node)
4012 		return NULL;
4013 
4014 	if (graph->n < 1)
4015 		isl_die(isl_schedule_node_get_ctx(node), isl_error_internal,
4016 			"graph should have at least one node",
4017 			return isl_schedule_node_free(node));
4018 
4019 	start = graph->band_start;
4020 	end = graph->n_total_row;
4021 	n = end - start;
4022 
4023 	ma = node_extract_partial_schedule_multi_aff(&graph->node[0], start, n);
4024 	mpa = isl_multi_pw_aff_from_multi_aff(ma);
4025 	mupa = isl_multi_union_pw_aff_from_multi_pw_aff(mpa);
4026 
4027 	for (i = 1; i < graph->n; ++i) {
4028 		isl_multi_union_pw_aff *mupa_i;
4029 
4030 		ma = node_extract_partial_schedule_multi_aff(&graph->node[i],
4031 								start, n);
4032 		mpa = isl_multi_pw_aff_from_multi_aff(ma);
4033 		mupa_i = isl_multi_union_pw_aff_from_multi_pw_aff(mpa);
4034 		mupa = isl_multi_union_pw_aff_union_add(mupa, mupa_i);
4035 	}
4036 	node = isl_schedule_node_insert_partial_schedule(node, mupa);
4037 
4038 	for (i = 0; i < n; ++i)
4039 		node = isl_schedule_node_band_member_set_coincident(node, i,
4040 					graph->node[0].coincident[start + i]);
4041 	node = isl_schedule_node_band_set_permutable(node, permutable);
4042 
4043 	return node;
4044 }
4045 
4046 /* Update the dependence relations based on the current schedule,
4047  * add the current band to "node" and then continue with the computation
4048  * of the next band.
4049  * Return the updated schedule node.
4050  */
compute_next_band(__isl_take isl_schedule_node * node,struct isl_sched_graph * graph,int permutable)4051 static __isl_give isl_schedule_node *compute_next_band(
4052 	__isl_take isl_schedule_node *node,
4053 	struct isl_sched_graph *graph, int permutable)
4054 {
4055 	isl_ctx *ctx;
4056 
4057 	if (!node)
4058 		return NULL;
4059 
4060 	ctx = isl_schedule_node_get_ctx(node);
4061 	if (update_edges(ctx, graph) < 0)
4062 		return isl_schedule_node_free(node);
4063 	node = insert_current_band(node, graph, permutable);
4064 	next_band(graph);
4065 
4066 	node = isl_schedule_node_child(node, 0);
4067 	node = compute_schedule(node, graph);
4068 	node = isl_schedule_node_parent(node);
4069 
4070 	return node;
4071 }
4072 
4073 /* Add the constraints "coef" derived from an edge from "node" to itself
4074  * to graph->lp in order to respect the dependences and to try and carry them.
4075  * "pos" is the sequence number of the edge that needs to be carried.
4076  * "coef" represents general constraints on coefficients (c_0, c_x)
4077  * of valid constraints for (y - x) with x and y instances of the node.
4078  *
4079  * The constraints added to graph->lp need to enforce
4080  *
4081  *	(c_j_0 + c_j_x y) - (c_j_0 + c_j_x x)
4082  *	= c_j_x (y - x) >= e_i
4083  *
4084  * for each (x,y) in the dependence relation of the edge.
4085  * That is, (-e_i, c_j_x) needs to be plugged in for (c_0, c_x),
4086  * taking into account that each coefficient in c_j_x is represented
4087  * as a pair of non-negative coefficients.
4088  */
add_intra_constraints(struct isl_sched_graph * graph,struct isl_sched_node * node,__isl_take isl_basic_set * coef,int pos)4089 static isl_stat add_intra_constraints(struct isl_sched_graph *graph,
4090 	struct isl_sched_node *node, __isl_take isl_basic_set *coef, int pos)
4091 {
4092 	isl_size offset;
4093 	isl_ctx *ctx;
4094 	isl_dim_map *dim_map;
4095 
4096 	offset = coef_var_offset(coef);
4097 	if (offset < 0)
4098 		coef = isl_basic_set_free(coef);
4099 	if (!coef)
4100 		return isl_stat_error;
4101 
4102 	ctx = isl_basic_set_get_ctx(coef);
4103 	dim_map = intra_dim_map(ctx, graph, node, offset, 1);
4104 	isl_dim_map_range(dim_map, 3 + pos, 0, 0, 0, 1, -1);
4105 	graph->lp = add_constraints_dim_map(graph->lp, coef, dim_map);
4106 
4107 	return isl_stat_ok;
4108 }
4109 
4110 /* Add the constraints "coef" derived from an edge from "src" to "dst"
4111  * to graph->lp in order to respect the dependences and to try and carry them.
4112  * "pos" is the sequence number of the edge that needs to be carried or
4113  * -1 if no attempt should be made to carry the dependences.
4114  * "coef" represents general constraints on coefficients (c_0, c_n, c_x, c_y)
4115  * of valid constraints for (x, y) with x and y instances of "src" and "dst".
4116  *
4117  * The constraints added to graph->lp need to enforce
4118  *
4119  *	(c_k_0 + c_k_n n + c_k_x y) - (c_j_0 + c_j_n n + c_j_x x) >= e_i
4120  *
4121  * for each (x,y) in the dependence relation of the edge or
4122  *
4123  *	(c_k_0 + c_k_n n + c_k_x y) - (c_j_0 + c_j_n n + c_j_x x) >= 0
4124  *
4125  * if pos is -1.
4126  * That is,
4127  * (-e_i + c_k_0 - c_j_0, c_k_n - c_j_n, -c_j_x, c_k_x)
4128  * or
4129  * (c_k_0 - c_j_0, c_k_n - c_j_n, -c_j_x, c_k_x)
4130  * needs to be plugged in for (c_0, c_n, c_x, c_y),
4131  * taking into account that each coefficient in c_j_x and c_k_x is represented
4132  * as a pair of non-negative coefficients.
4133  */
add_inter_constraints(struct isl_sched_graph * graph,struct isl_sched_node * src,struct isl_sched_node * dst,__isl_take isl_basic_set * coef,int pos)4134 static isl_stat add_inter_constraints(struct isl_sched_graph *graph,
4135 	struct isl_sched_node *src, struct isl_sched_node *dst,
4136 	__isl_take isl_basic_set *coef, int pos)
4137 {
4138 	isl_size offset;
4139 	isl_ctx *ctx;
4140 	isl_dim_map *dim_map;
4141 
4142 	offset = coef_var_offset(coef);
4143 	if (offset < 0)
4144 		coef = isl_basic_set_free(coef);
4145 	if (!coef)
4146 		return isl_stat_error;
4147 
4148 	ctx = isl_basic_set_get_ctx(coef);
4149 	dim_map = inter_dim_map(ctx, graph, src, dst, offset, 1);
4150 	if (pos >= 0)
4151 		isl_dim_map_range(dim_map, 3 + pos, 0, 0, 0, 1, -1);
4152 	graph->lp = add_constraints_dim_map(graph->lp, coef, dim_map);
4153 
4154 	return isl_stat_ok;
4155 }
4156 
4157 /* Data structure for keeping track of the data needed
4158  * to exploit non-trivial lineality spaces.
4159  *
4160  * "any_non_trivial" is true if there are any non-trivial lineality spaces.
4161  * If "any_non_trivial" is not true, then "equivalent" and "mask" may be NULL.
4162  * "equivalent" connects instances to other instances on the same line(s).
4163  * "mask" contains the domain spaces of "equivalent".
4164  * Any instance set not in "mask" does not have a non-trivial lineality space.
4165  */
4166 struct isl_exploit_lineality_data {
4167 	isl_bool any_non_trivial;
4168 	isl_union_map *equivalent;
4169 	isl_union_set *mask;
4170 };
4171 
4172 /* Data structure collecting information used during the construction
4173  * of an LP for carrying dependences.
4174  *
4175  * "intra" is a sequence of coefficient constraints for intra-node edges.
4176  * "inter" is a sequence of coefficient constraints for inter-node edges.
4177  * "lineality" contains data used to exploit non-trivial lineality spaces.
4178  */
4179 struct isl_carry {
4180 	isl_basic_set_list *intra;
4181 	isl_basic_set_list *inter;
4182 	struct isl_exploit_lineality_data lineality;
4183 };
4184 
4185 /* Free all the data stored in "carry".
4186  */
isl_carry_clear(struct isl_carry * carry)4187 static void isl_carry_clear(struct isl_carry *carry)
4188 {
4189 	isl_basic_set_list_free(carry->intra);
4190 	isl_basic_set_list_free(carry->inter);
4191 	isl_union_map_free(carry->lineality.equivalent);
4192 	isl_union_set_free(carry->lineality.mask);
4193 }
4194 
4195 /* Return a pointer to the node in "graph" that lives in "space".
4196  * If the requested node has been compressed, then "space"
4197  * corresponds to the compressed space.
4198  * The graph is assumed to have such a node.
4199  * Return NULL in case of error.
4200  *
4201  * First try and see if "space" is the space of an uncompressed node.
4202  * If so, return that node.
4203  * Otherwise, "space" was constructed by construct_compressed_id and
4204  * contains a user pointer pointing to the node in the tuple id.
4205  * However, this node belongs to the original dependence graph.
4206  * If "graph" is a subgraph of this original dependence graph,
4207  * then the node with the same space still needs to be looked up
4208  * in the current graph.
4209  */
graph_find_compressed_node(isl_ctx * ctx,struct isl_sched_graph * graph,__isl_keep isl_space * space)4210 static struct isl_sched_node *graph_find_compressed_node(isl_ctx *ctx,
4211 	struct isl_sched_graph *graph, __isl_keep isl_space *space)
4212 {
4213 	isl_id *id;
4214 	struct isl_sched_node *node;
4215 
4216 	if (!space)
4217 		return NULL;
4218 
4219 	node = graph_find_node(ctx, graph, space);
4220 	if (!node)
4221 		return NULL;
4222 	if (is_node(graph, node))
4223 		return node;
4224 
4225 	id = isl_space_get_tuple_id(space, isl_dim_set);
4226 	node = isl_id_get_user(id);
4227 	isl_id_free(id);
4228 
4229 	if (!node)
4230 		return NULL;
4231 
4232 	if (!is_node(graph->root, node))
4233 		isl_die(ctx, isl_error_internal,
4234 			"space points to invalid node", return NULL);
4235 	if (graph != graph->root)
4236 		node = graph_find_node(ctx, graph, node->space);
4237 	if (!is_node(graph, node))
4238 		isl_die(ctx, isl_error_internal,
4239 			"unable to find node", return NULL);
4240 
4241 	return node;
4242 }
4243 
4244 /* Internal data structure for add_all_constraints.
4245  *
4246  * "graph" is the schedule constraint graph for which an LP problem
4247  * is being constructed.
4248  * "carry_inter" indicates whether inter-node edges should be carried.
4249  * "pos" is the position of the next edge that needs to be carried.
4250  */
4251 struct isl_add_all_constraints_data {
4252 	isl_ctx *ctx;
4253 	struct isl_sched_graph *graph;
4254 	int carry_inter;
4255 	int pos;
4256 };
4257 
4258 /* Add the constraints "coef" derived from an edge from a node to itself
4259  * to data->graph->lp in order to respect the dependences and
4260  * to try and carry them.
4261  *
4262  * The space of "coef" is of the form
4263  *
4264  *	coefficients[[c_cst] -> S[c_x]]
4265  *
4266  * with S[c_x] the (compressed) space of the node.
4267  * Extract the node from the space and call add_intra_constraints.
4268  */
lp_add_intra(__isl_take isl_basic_set * coef,void * user)4269 static isl_stat lp_add_intra(__isl_take isl_basic_set *coef, void *user)
4270 {
4271 	struct isl_add_all_constraints_data *data = user;
4272 	isl_space *space;
4273 	struct isl_sched_node *node;
4274 
4275 	space = isl_basic_set_get_space(coef);
4276 	space = isl_space_range(isl_space_unwrap(space));
4277 	node = graph_find_compressed_node(data->ctx, data->graph, space);
4278 	isl_space_free(space);
4279 	return add_intra_constraints(data->graph, node, coef, data->pos++);
4280 }
4281 
4282 /* Add the constraints "coef" derived from an edge from a node j
4283  * to a node k to data->graph->lp in order to respect the dependences and
4284  * to try and carry them (provided data->carry_inter is set).
4285  *
4286  * The space of "coef" is of the form
4287  *
4288  *	coefficients[[c_cst, c_n] -> [S_j[c_x] -> S_k[c_y]]]
4289  *
4290  * with S_j[c_x] and S_k[c_y] the (compressed) spaces of the nodes.
4291  * Extract the nodes from the space and call add_inter_constraints.
4292  */
lp_add_inter(__isl_take isl_basic_set * coef,void * user)4293 static isl_stat lp_add_inter(__isl_take isl_basic_set *coef, void *user)
4294 {
4295 	struct isl_add_all_constraints_data *data = user;
4296 	isl_space *space, *dom;
4297 	struct isl_sched_node *src, *dst;
4298 	int pos;
4299 
4300 	space = isl_basic_set_get_space(coef);
4301 	space = isl_space_unwrap(isl_space_range(isl_space_unwrap(space)));
4302 	dom = isl_space_domain(isl_space_copy(space));
4303 	src = graph_find_compressed_node(data->ctx, data->graph, dom);
4304 	isl_space_free(dom);
4305 	space = isl_space_range(space);
4306 	dst = graph_find_compressed_node(data->ctx, data->graph, space);
4307 	isl_space_free(space);
4308 
4309 	pos = data->carry_inter ? data->pos++ : -1;
4310 	return add_inter_constraints(data->graph, src, dst, coef, pos);
4311 }
4312 
4313 /* Add constraints to graph->lp that force all (conditional) validity
4314  * dependences to be respected and attempt to carry them.
4315  * "intra" is the sequence of coefficient constraints for intra-node edges.
4316  * "inter" is the sequence of coefficient constraints for inter-node edges.
4317  * "carry_inter" indicates whether inter-node edges should be carried or
4318  * only respected.
4319  */
add_all_constraints(isl_ctx * ctx,struct isl_sched_graph * graph,__isl_keep isl_basic_set_list * intra,__isl_keep isl_basic_set_list * inter,int carry_inter)4320 static isl_stat add_all_constraints(isl_ctx *ctx, struct isl_sched_graph *graph,
4321 	__isl_keep isl_basic_set_list *intra,
4322 	__isl_keep isl_basic_set_list *inter, int carry_inter)
4323 {
4324 	struct isl_add_all_constraints_data data = { ctx, graph, carry_inter };
4325 
4326 	data.pos = 0;
4327 	if (isl_basic_set_list_foreach(intra, &lp_add_intra, &data) < 0)
4328 		return isl_stat_error;
4329 	if (isl_basic_set_list_foreach(inter, &lp_add_inter, &data) < 0)
4330 		return isl_stat_error;
4331 	return isl_stat_ok;
4332 }
4333 
4334 /* Internal data structure for count_all_constraints
4335  * for keeping track of the number of equality and inequality constraints.
4336  */
4337 struct isl_sched_count {
4338 	int n_eq;
4339 	int n_ineq;
4340 };
4341 
4342 /* Add the number of equality and inequality constraints of "bset"
4343  * to data->n_eq and data->n_ineq.
4344  */
bset_update_count(__isl_take isl_basic_set * bset,void * user)4345 static isl_stat bset_update_count(__isl_take isl_basic_set *bset, void *user)
4346 {
4347 	struct isl_sched_count *data = user;
4348 
4349 	return update_count(bset, 1, &data->n_eq, &data->n_ineq);
4350 }
4351 
4352 /* Count the number of equality and inequality constraints
4353  * that will be added to the carry_lp problem.
4354  * We count each edge exactly once.
4355  * "intra" is the sequence of coefficient constraints for intra-node edges.
4356  * "inter" is the sequence of coefficient constraints for inter-node edges.
4357  */
count_all_constraints(__isl_keep isl_basic_set_list * intra,__isl_keep isl_basic_set_list * inter,int * n_eq,int * n_ineq)4358 static isl_stat count_all_constraints(__isl_keep isl_basic_set_list *intra,
4359 	__isl_keep isl_basic_set_list *inter, int *n_eq, int *n_ineq)
4360 {
4361 	struct isl_sched_count data;
4362 
4363 	data.n_eq = data.n_ineq = 0;
4364 	if (isl_basic_set_list_foreach(inter, &bset_update_count, &data) < 0)
4365 		return isl_stat_error;
4366 	if (isl_basic_set_list_foreach(intra, &bset_update_count, &data) < 0)
4367 		return isl_stat_error;
4368 
4369 	*n_eq = data.n_eq;
4370 	*n_ineq = data.n_ineq;
4371 
4372 	return isl_stat_ok;
4373 }
4374 
4375 /* Construct an LP problem for finding schedule coefficients
4376  * such that the schedule carries as many validity dependences as possible.
4377  * In particular, for each dependence i, we bound the dependence distance
4378  * from below by e_i, with 0 <= e_i <= 1 and then maximize the sum
4379  * of all e_i's.  Dependences with e_i = 0 in the solution are simply
4380  * respected, while those with e_i > 0 (in practice e_i = 1) are carried.
4381  * "intra" is the sequence of coefficient constraints for intra-node edges.
4382  * "inter" is the sequence of coefficient constraints for inter-node edges.
4383  * "n_edge" is the total number of edges.
4384  * "carry_inter" indicates whether inter-node edges should be carried or
4385  * only respected.  That is, if "carry_inter" is not set, then
4386  * no e_i variables are introduced for the inter-node edges.
4387  *
4388  * All variables of the LP are non-negative.  The actual coefficients
4389  * may be negative, so each coefficient is represented as the difference
4390  * of two non-negative variables.  The negative part always appears
4391  * immediately before the positive part.
4392  * Other than that, the variables have the following order
4393  *
4394  *	- sum of (1 - e_i) over all edges
4395  *	- sum of all c_n coefficients
4396  *		(unconstrained when computing non-parametric schedules)
4397  *	- sum of positive and negative parts of all c_x coefficients
4398  *	- for each edge
4399  *		- e_i
4400  *	- for each node
4401  *		- positive and negative parts of c_i_x, in opposite order
4402  *		- c_i_n (if parametric)
4403  *		- c_i_0
4404  *
4405  * The constraints are those from the (validity) edges plus three equalities
4406  * to express the sums and n_edge inequalities to express e_i <= 1.
4407  */
setup_carry_lp(isl_ctx * ctx,struct isl_sched_graph * graph,int n_edge,__isl_keep isl_basic_set_list * intra,__isl_keep isl_basic_set_list * inter,int carry_inter)4408 static isl_stat setup_carry_lp(isl_ctx *ctx, struct isl_sched_graph *graph,
4409 	int n_edge, __isl_keep isl_basic_set_list *intra,
4410 	__isl_keep isl_basic_set_list *inter, int carry_inter)
4411 {
4412 	int i;
4413 	int k;
4414 	isl_space *space;
4415 	unsigned total;
4416 	int n_eq, n_ineq;
4417 
4418 	total = 3 + n_edge;
4419 	for (i = 0; i < graph->n; ++i) {
4420 		struct isl_sched_node *node = &graph->node[graph->sorted[i]];
4421 		node->start = total;
4422 		total += 1 + node->nparam + 2 * node->nvar;
4423 	}
4424 
4425 	if (count_all_constraints(intra, inter, &n_eq, &n_ineq) < 0)
4426 		return isl_stat_error;
4427 
4428 	space = isl_space_set_alloc(ctx, 0, total);
4429 	isl_basic_set_free(graph->lp);
4430 	n_eq += 3;
4431 	n_ineq += n_edge;
4432 	graph->lp = isl_basic_set_alloc_space(space, 0, n_eq, n_ineq);
4433 	graph->lp = isl_basic_set_set_rational(graph->lp);
4434 
4435 	k = isl_basic_set_alloc_equality(graph->lp);
4436 	if (k < 0)
4437 		return isl_stat_error;
4438 	isl_seq_clr(graph->lp->eq[k], 1 + total);
4439 	isl_int_set_si(graph->lp->eq[k][0], -n_edge);
4440 	isl_int_set_si(graph->lp->eq[k][1], 1);
4441 	for (i = 0; i < n_edge; ++i)
4442 		isl_int_set_si(graph->lp->eq[k][4 + i], 1);
4443 
4444 	if (add_param_sum_constraint(graph, 1) < 0)
4445 		return isl_stat_error;
4446 	if (add_var_sum_constraint(graph, 2) < 0)
4447 		return isl_stat_error;
4448 
4449 	for (i = 0; i < n_edge; ++i) {
4450 		k = isl_basic_set_alloc_inequality(graph->lp);
4451 		if (k < 0)
4452 			return isl_stat_error;
4453 		isl_seq_clr(graph->lp->ineq[k], 1 + total);
4454 		isl_int_set_si(graph->lp->ineq[k][4 + i], -1);
4455 		isl_int_set_si(graph->lp->ineq[k][0], 1);
4456 	}
4457 
4458 	if (add_all_constraints(ctx, graph, intra, inter, carry_inter) < 0)
4459 		return isl_stat_error;
4460 
4461 	return isl_stat_ok;
4462 }
4463 
4464 static __isl_give isl_schedule_node *compute_component_schedule(
4465 	__isl_take isl_schedule_node *node, struct isl_sched_graph *graph,
4466 	int wcc);
4467 
4468 /* If the schedule_split_scaled option is set and if the linear
4469  * parts of the scheduling rows for all nodes in the graphs have
4470  * a non-trivial common divisor, then remove this
4471  * common divisor from the linear part.
4472  * Otherwise, insert a band node directly and continue with
4473  * the construction of the schedule.
4474  *
4475  * If a non-trivial common divisor is found, then
4476  * the linear part is reduced and the remainder is ignored.
4477  * The pieces of the graph that are assigned different remainders
4478  * form (groups of) strongly connected components within
4479  * the scaled down band.  If needed, they can therefore
4480  * be ordered along this remainder in a sequence node.
4481  * However, this ordering is not enforced here in order to allow
4482  * the scheduler to combine some of the strongly connected components.
4483  */
split_scaled(__isl_take isl_schedule_node * node,struct isl_sched_graph * graph)4484 static __isl_give isl_schedule_node *split_scaled(
4485 	__isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
4486 {
4487 	int i;
4488 	int row;
4489 	isl_ctx *ctx;
4490 	isl_int gcd, gcd_i;
4491 	isl_size n_row;
4492 
4493 	if (!node)
4494 		return NULL;
4495 
4496 	ctx = isl_schedule_node_get_ctx(node);
4497 	if (!ctx->opt->schedule_split_scaled)
4498 		return compute_next_band(node, graph, 0);
4499 	if (graph->n <= 1)
4500 		return compute_next_band(node, graph, 0);
4501 	n_row = isl_mat_rows(graph->node[0].sched);
4502 	if (n_row < 0)
4503 		return isl_schedule_node_free(node);
4504 
4505 	isl_int_init(gcd);
4506 	isl_int_init(gcd_i);
4507 
4508 	isl_int_set_si(gcd, 0);
4509 
4510 	row = n_row - 1;
4511 
4512 	for (i = 0; i < graph->n; ++i) {
4513 		struct isl_sched_node *node = &graph->node[i];
4514 		isl_size cols = isl_mat_cols(node->sched);
4515 
4516 		if (cols < 0)
4517 			break;
4518 		isl_seq_gcd(node->sched->row[row] + 1, cols - 1, &gcd_i);
4519 		isl_int_gcd(gcd, gcd, gcd_i);
4520 	}
4521 
4522 	isl_int_clear(gcd_i);
4523 	if (i < graph->n)
4524 		goto error;
4525 
4526 	if (isl_int_cmp_si(gcd, 1) <= 0) {
4527 		isl_int_clear(gcd);
4528 		return compute_next_band(node, graph, 0);
4529 	}
4530 
4531 	for (i = 0; i < graph->n; ++i) {
4532 		struct isl_sched_node *node = &graph->node[i];
4533 
4534 		isl_int_fdiv_q(node->sched->row[row][0],
4535 			       node->sched->row[row][0], gcd);
4536 		isl_int_mul(node->sched->row[row][0],
4537 			    node->sched->row[row][0], gcd);
4538 		node->sched = isl_mat_scale_down_row(node->sched, row, gcd);
4539 		if (!node->sched)
4540 			goto error;
4541 	}
4542 
4543 	isl_int_clear(gcd);
4544 
4545 	return compute_next_band(node, graph, 0);
4546 error:
4547 	isl_int_clear(gcd);
4548 	return isl_schedule_node_free(node);
4549 }
4550 
4551 /* Is the schedule row "sol" trivial on node "node"?
4552  * That is, is the solution zero on the dimensions linearly independent of
4553  * the previously found solutions?
4554  * Return 1 if the solution is trivial, 0 if it is not and -1 on error.
4555  *
4556  * Each coefficient is represented as the difference between
4557  * two non-negative values in "sol".
4558  * We construct the schedule row s and check if it is linearly
4559  * independent of previously computed schedule rows
4560  * by computing T s, with T the linear combinations that are zero
4561  * on linearly dependent schedule rows.
4562  * If the result consists of all zeros, then the solution is trivial.
4563  */
is_trivial(struct isl_sched_node * node,__isl_keep isl_vec * sol)4564 static int is_trivial(struct isl_sched_node *node, __isl_keep isl_vec *sol)
4565 {
4566 	int trivial;
4567 	isl_vec *node_sol;
4568 
4569 	if (!sol)
4570 		return -1;
4571 	if (node->nvar == node->rank)
4572 		return 0;
4573 
4574 	node_sol = extract_var_coef(node, sol);
4575 	node_sol = isl_mat_vec_product(isl_mat_copy(node->indep), node_sol);
4576 	if (!node_sol)
4577 		return -1;
4578 
4579 	trivial = isl_seq_first_non_zero(node_sol->el,
4580 					node->nvar - node->rank) == -1;
4581 
4582 	isl_vec_free(node_sol);
4583 
4584 	return trivial;
4585 }
4586 
4587 /* Is the schedule row "sol" trivial on any node where it should
4588  * not be trivial?
4589  * Return 1 if any solution is trivial, 0 if they are not and -1 on error.
4590  */
is_any_trivial(struct isl_sched_graph * graph,__isl_keep isl_vec * sol)4591 static int is_any_trivial(struct isl_sched_graph *graph,
4592 	__isl_keep isl_vec *sol)
4593 {
4594 	int i;
4595 
4596 	for (i = 0; i < graph->n; ++i) {
4597 		struct isl_sched_node *node = &graph->node[i];
4598 		int trivial;
4599 
4600 		if (!needs_row(graph, node))
4601 			continue;
4602 		trivial = is_trivial(node, sol);
4603 		if (trivial < 0 || trivial)
4604 			return trivial;
4605 	}
4606 
4607 	return 0;
4608 }
4609 
4610 /* Does the schedule represented by "sol" perform loop coalescing on "node"?
4611  * If so, return the position of the coalesced dimension.
4612  * Otherwise, return node->nvar or -1 on error.
4613  *
4614  * In particular, look for pairs of coefficients c_i and c_j such that
4615  * |c_j/c_i| > ceil(size_i/2), i.e., |c_j| > |c_i * ceil(size_i/2)|.
4616  * If any such pair is found, then return i.
4617  * If size_i is infinity, then no check on c_i needs to be performed.
4618  */
find_node_coalescing(struct isl_sched_node * node,__isl_keep isl_vec * sol)4619 static int find_node_coalescing(struct isl_sched_node *node,
4620 	__isl_keep isl_vec *sol)
4621 {
4622 	int i, j;
4623 	isl_int max;
4624 	isl_vec *csol;
4625 
4626 	if (node->nvar <= 1)
4627 		return node->nvar;
4628 
4629 	csol = extract_var_coef(node, sol);
4630 	if (!csol)
4631 		return -1;
4632 	isl_int_init(max);
4633 	for (i = 0; i < node->nvar; ++i) {
4634 		isl_val *v;
4635 
4636 		if (isl_int_is_zero(csol->el[i]))
4637 			continue;
4638 		v = isl_multi_val_get_val(node->sizes, i);
4639 		if (!v)
4640 			goto error;
4641 		if (!isl_val_is_int(v)) {
4642 			isl_val_free(v);
4643 			continue;
4644 		}
4645 		v = isl_val_div_ui(v, 2);
4646 		v = isl_val_ceil(v);
4647 		if (!v)
4648 			goto error;
4649 		isl_int_mul(max, v->n, csol->el[i]);
4650 		isl_val_free(v);
4651 
4652 		for (j = 0; j < node->nvar; ++j) {
4653 			if (j == i)
4654 				continue;
4655 			if (isl_int_abs_gt(csol->el[j], max))
4656 				break;
4657 		}
4658 		if (j < node->nvar)
4659 			break;
4660 	}
4661 
4662 	isl_int_clear(max);
4663 	isl_vec_free(csol);
4664 	return i;
4665 error:
4666 	isl_int_clear(max);
4667 	isl_vec_free(csol);
4668 	return -1;
4669 }
4670 
4671 /* Force the schedule coefficient at position "pos" of "node" to be zero
4672  * in "tl".
4673  * The coefficient is encoded as the difference between two non-negative
4674  * variables.  Force these two variables to have the same value.
4675  */
zero_out_node_coef(__isl_take isl_tab_lexmin * tl,struct isl_sched_node * node,int pos)4676 static __isl_give isl_tab_lexmin *zero_out_node_coef(
4677 	__isl_take isl_tab_lexmin *tl, struct isl_sched_node *node, int pos)
4678 {
4679 	int dim;
4680 	isl_ctx *ctx;
4681 	isl_vec *eq;
4682 
4683 	ctx = isl_space_get_ctx(node->space);
4684 	dim = isl_tab_lexmin_dim(tl);
4685 	if (dim < 0)
4686 		return isl_tab_lexmin_free(tl);
4687 	eq = isl_vec_alloc(ctx, 1 + dim);
4688 	eq = isl_vec_clr(eq);
4689 	if (!eq)
4690 		return isl_tab_lexmin_free(tl);
4691 
4692 	pos = 1 + node_var_coef_pos(node, pos);
4693 	isl_int_set_si(eq->el[pos], 1);
4694 	isl_int_set_si(eq->el[pos + 1], -1);
4695 	tl = isl_tab_lexmin_add_eq(tl, eq->el);
4696 	isl_vec_free(eq);
4697 
4698 	return tl;
4699 }
4700 
4701 /* Return the lexicographically smallest rational point in the basic set
4702  * from which "tl" was constructed, double checking that this input set
4703  * was not empty.
4704  */
non_empty_solution(__isl_keep isl_tab_lexmin * tl)4705 static __isl_give isl_vec *non_empty_solution(__isl_keep isl_tab_lexmin *tl)
4706 {
4707 	isl_vec *sol;
4708 
4709 	sol = isl_tab_lexmin_get_solution(tl);
4710 	if (!sol)
4711 		return NULL;
4712 	if (sol->size == 0)
4713 		isl_die(isl_vec_get_ctx(sol), isl_error_internal,
4714 			"error in schedule construction",
4715 			return isl_vec_free(sol));
4716 	return sol;
4717 }
4718 
4719 /* Does the solution "sol" of the LP problem constructed by setup_carry_lp
4720  * carry any of the "n_edge" groups of dependences?
4721  * The value in the first position is the sum of (1 - e_i) over all "n_edge"
4722  * edges, with 0 <= e_i <= 1 equal to 1 when the dependences represented
4723  * by the edge are carried by the solution.
4724  * If the sum of the (1 - e_i) is smaller than "n_edge" then at least
4725  * one of those is carried.
4726  *
4727  * Note that despite the fact that the problem is solved using a rational
4728  * solver, the solution is guaranteed to be integral.
4729  * Specifically, the dependence distance lower bounds e_i (and therefore
4730  * also their sum) are integers.  See Lemma 5 of [1].
4731  *
4732  * Any potential denominator of the sum is cleared by this function.
4733  * The denominator is not relevant for any of the other elements
4734  * in the solution.
4735  *
4736  * [1] P. Feautrier, Some Efficient Solutions to the Affine Scheduling
4737  *     Problem, Part II: Multi-Dimensional Time.
4738  *     In Intl. Journal of Parallel Programming, 1992.
4739  */
carries_dependences(__isl_keep isl_vec * sol,int n_edge)4740 static int carries_dependences(__isl_keep isl_vec *sol, int n_edge)
4741 {
4742 	isl_int_divexact(sol->el[1], sol->el[1], sol->el[0]);
4743 	isl_int_set_si(sol->el[0], 1);
4744 	return isl_int_cmp_si(sol->el[1], n_edge) < 0;
4745 }
4746 
4747 /* Return the lexicographically smallest rational point in "lp",
4748  * assuming that all variables are non-negative and performing some
4749  * additional sanity checks.
4750  * If "want_integral" is set, then compute the lexicographically smallest
4751  * integer point instead.
4752  * In particular, "lp" should not be empty by construction.
4753  * Double check that this is the case.
4754  * If dependences are not carried for any of the "n_edge" edges,
4755  * then return an empty vector.
4756  *
4757  * If the schedule_treat_coalescing option is set and
4758  * if the computed schedule performs loop coalescing on a given node,
4759  * i.e., if it is of the form
4760  *
4761  *	c_i i + c_j j + ...
4762  *
4763  * with |c_j/c_i| >= size_i, then force the coefficient c_i to be zero
4764  * to cut out this solution.  Repeat this process until no more loop
4765  * coalescing occurs or until no more dependences can be carried.
4766  * In the latter case, revert to the previously computed solution.
4767  *
4768  * If the caller requests an integral solution and if coalescing should
4769  * be treated, then perform the coalescing treatment first as
4770  * an integral solution computed before coalescing treatment
4771  * would carry the same number of edges and would therefore probably
4772  * also be coalescing.
4773  *
4774  * To allow the coalescing treatment to be performed first,
4775  * the initial solution is allowed to be rational and it is only
4776  * cut out (if needed) in the next iteration, if no coalescing measures
4777  * were taken.
4778  */
non_neg_lexmin(struct isl_sched_graph * graph,__isl_take isl_basic_set * lp,int n_edge,int want_integral)4779 static __isl_give isl_vec *non_neg_lexmin(struct isl_sched_graph *graph,
4780 	__isl_take isl_basic_set *lp, int n_edge, int want_integral)
4781 {
4782 	int i, pos, cut;
4783 	isl_ctx *ctx;
4784 	isl_tab_lexmin *tl;
4785 	isl_vec *sol = NULL, *prev;
4786 	int treat_coalescing;
4787 	int try_again;
4788 
4789 	if (!lp)
4790 		return NULL;
4791 	ctx = isl_basic_set_get_ctx(lp);
4792 	treat_coalescing = isl_options_get_schedule_treat_coalescing(ctx);
4793 	tl = isl_tab_lexmin_from_basic_set(lp);
4794 
4795 	cut = 0;
4796 	do {
4797 		int integral;
4798 
4799 		try_again = 0;
4800 		if (cut)
4801 			tl = isl_tab_lexmin_cut_to_integer(tl);
4802 		prev = sol;
4803 		sol = non_empty_solution(tl);
4804 		if (!sol)
4805 			goto error;
4806 
4807 		integral = isl_int_is_one(sol->el[0]);
4808 		if (!carries_dependences(sol, n_edge)) {
4809 			if (!prev)
4810 				prev = isl_vec_alloc(ctx, 0);
4811 			isl_vec_free(sol);
4812 			sol = prev;
4813 			break;
4814 		}
4815 		prev = isl_vec_free(prev);
4816 		cut = want_integral && !integral;
4817 		if (cut)
4818 			try_again = 1;
4819 		if (!treat_coalescing)
4820 			continue;
4821 		for (i = 0; i < graph->n; ++i) {
4822 			struct isl_sched_node *node = &graph->node[i];
4823 
4824 			pos = find_node_coalescing(node, sol);
4825 			if (pos < 0)
4826 				goto error;
4827 			if (pos < node->nvar)
4828 				break;
4829 		}
4830 		if (i < graph->n) {
4831 			try_again = 1;
4832 			tl = zero_out_node_coef(tl, &graph->node[i], pos);
4833 			cut = 0;
4834 		}
4835 	} while (try_again);
4836 
4837 	isl_tab_lexmin_free(tl);
4838 
4839 	return sol;
4840 error:
4841 	isl_tab_lexmin_free(tl);
4842 	isl_vec_free(prev);
4843 	isl_vec_free(sol);
4844 	return NULL;
4845 }
4846 
4847 /* If "edge" is an edge from a node to itself, then add the corresponding
4848  * dependence relation to "umap".
4849  * If "node" has been compressed, then the dependence relation
4850  * is also compressed first.
4851  */
add_intra(__isl_take isl_union_map * umap,struct isl_sched_edge * edge)4852 static __isl_give isl_union_map *add_intra(__isl_take isl_union_map *umap,
4853 	struct isl_sched_edge *edge)
4854 {
4855 	isl_map *map;
4856 	struct isl_sched_node *node = edge->src;
4857 
4858 	if (edge->src != edge->dst)
4859 		return umap;
4860 
4861 	map = isl_map_copy(edge->map);
4862 	map = compress(map, node, node);
4863 	umap = isl_union_map_add_map(umap, map);
4864 	return umap;
4865 }
4866 
4867 /* If "edge" is an edge from a node to another node, then add the corresponding
4868  * dependence relation to "umap".
4869  * If the source or destination nodes of "edge" have been compressed,
4870  * then the dependence relation is also compressed first.
4871  */
add_inter(__isl_take isl_union_map * umap,struct isl_sched_edge * edge)4872 static __isl_give isl_union_map *add_inter(__isl_take isl_union_map *umap,
4873 	struct isl_sched_edge *edge)
4874 {
4875 	isl_map *map;
4876 
4877 	if (edge->src == edge->dst)
4878 		return umap;
4879 
4880 	map = isl_map_copy(edge->map);
4881 	map = compress(map, edge->src, edge->dst);
4882 	umap = isl_union_map_add_map(umap, map);
4883 	return umap;
4884 }
4885 
4886 /* Internal data structure used by union_drop_coalescing_constraints
4887  * to collect bounds on all relevant statements.
4888  *
4889  * "graph" is the schedule constraint graph for which an LP problem
4890  * is being constructed.
4891  * "bounds" collects the bounds.
4892  */
4893 struct isl_collect_bounds_data {
4894 	isl_ctx *ctx;
4895 	struct isl_sched_graph *graph;
4896 	isl_union_set *bounds;
4897 };
4898 
4899 /* Add the size bounds for the node with instance deltas in "set"
4900  * to data->bounds.
4901  */
collect_bounds(__isl_take isl_set * set,void * user)4902 static isl_stat collect_bounds(__isl_take isl_set *set, void *user)
4903 {
4904 	struct isl_collect_bounds_data *data = user;
4905 	struct isl_sched_node *node;
4906 	isl_space *space;
4907 	isl_set *bounds;
4908 
4909 	space = isl_set_get_space(set);
4910 	isl_set_free(set);
4911 
4912 	node = graph_find_compressed_node(data->ctx, data->graph, space);
4913 	isl_space_free(space);
4914 
4915 	bounds = isl_set_from_basic_set(get_size_bounds(node));
4916 	data->bounds = isl_union_set_add_set(data->bounds, bounds);
4917 
4918 	return isl_stat_ok;
4919 }
4920 
4921 /* Drop some constraints from "delta" that could be exploited
4922  * to construct loop coalescing schedules.
4923  * In particular, drop those constraint that bound the difference
4924  * to the size of the domain.
4925  * Do this for each set/node in "delta" separately.
4926  * The parameters are assumed to have been projected out by the caller.
4927  */
union_drop_coalescing_constraints(isl_ctx * ctx,struct isl_sched_graph * graph,__isl_take isl_union_set * delta)4928 static __isl_give isl_union_set *union_drop_coalescing_constraints(isl_ctx *ctx,
4929 	struct isl_sched_graph *graph, __isl_take isl_union_set *delta)
4930 {
4931 	struct isl_collect_bounds_data data = { ctx, graph };
4932 
4933 	data.bounds = isl_union_set_empty(isl_space_params_alloc(ctx, 0));
4934 	if (isl_union_set_foreach_set(delta, &collect_bounds, &data) < 0)
4935 		data.bounds = isl_union_set_free(data.bounds);
4936 	delta = isl_union_set_plain_gist(delta, data.bounds);
4937 
4938 	return delta;
4939 }
4940 
4941 /* Given a non-trivial lineality space "lineality", add the corresponding
4942  * universe set to data->mask and add a map from elements to
4943  * other elements along the lines in "lineality" to data->equivalent.
4944  * If this is the first time this function gets called
4945  * (data->any_non_trivial is still false), then set data->any_non_trivial and
4946  * initialize data->mask and data->equivalent.
4947  *
4948  * In particular, if the lineality space is defined by equality constraints
4949  *
4950  *	E x = 0
4951  *
4952  * then construct an affine mapping
4953  *
4954  *	f : x -> E x
4955  *
4956  * and compute the equivalence relation of having the same image under f:
4957  *
4958  *	{ x -> x' : E x = E x' }
4959  */
add_non_trivial_lineality(__isl_take isl_basic_set * lineality,struct isl_exploit_lineality_data * data)4960 static isl_stat add_non_trivial_lineality(__isl_take isl_basic_set *lineality,
4961 	struct isl_exploit_lineality_data *data)
4962 {
4963 	isl_mat *eq;
4964 	isl_space *space;
4965 	isl_set *univ;
4966 	isl_multi_aff *ma;
4967 	isl_multi_pw_aff *mpa;
4968 	isl_map *map;
4969 	isl_size n;
4970 
4971 	if (isl_basic_set_check_no_locals(lineality) < 0)
4972 		goto error;
4973 
4974 	space = isl_basic_set_get_space(lineality);
4975 	if (!data->any_non_trivial) {
4976 		data->equivalent = isl_union_map_empty(isl_space_copy(space));
4977 		data->mask = isl_union_set_empty(isl_space_copy(space));
4978 	}
4979 	data->any_non_trivial = isl_bool_true;
4980 
4981 	univ = isl_set_universe(isl_space_copy(space));
4982 	data->mask = isl_union_set_add_set(data->mask, univ);
4983 
4984 	eq = isl_basic_set_extract_equalities(lineality);
4985 	n = isl_mat_rows(eq);
4986 	if (n < 0)
4987 		space = isl_space_free(space);
4988 	eq = isl_mat_insert_zero_rows(eq, 0, 1);
4989 	eq = isl_mat_set_element_si(eq, 0, 0, 1);
4990 	space = isl_space_from_domain(space);
4991 	space = isl_space_add_dims(space, isl_dim_out, n);
4992 	ma = isl_multi_aff_from_aff_mat(space, eq);
4993 	mpa = isl_multi_pw_aff_from_multi_aff(ma);
4994 	map = isl_multi_pw_aff_eq_map(mpa, isl_multi_pw_aff_copy(mpa));
4995 	data->equivalent = isl_union_map_add_map(data->equivalent, map);
4996 
4997 	isl_basic_set_free(lineality);
4998 	return isl_stat_ok;
4999 error:
5000 	isl_basic_set_free(lineality);
5001 	return isl_stat_error;
5002 }
5003 
5004 /* Check if the lineality space "set" is non-trivial (i.e., is not just
5005  * the origin or, in other words, satisfies a number of equality constraints
5006  * that is smaller than the dimension of the set).
5007  * If so, extend data->mask and data->equivalent accordingly.
5008  *
5009  * The input should not have any local variables already, but
5010  * isl_set_remove_divs is called to make sure it does not.
5011  */
add_lineality(__isl_take isl_set * set,void * user)5012 static isl_stat add_lineality(__isl_take isl_set *set, void *user)
5013 {
5014 	struct isl_exploit_lineality_data *data = user;
5015 	isl_basic_set *hull;
5016 	isl_size dim;
5017 	isl_size n_eq;
5018 
5019 	set = isl_set_remove_divs(set);
5020 	hull = isl_set_unshifted_simple_hull(set);
5021 	dim = isl_basic_set_dim(hull, isl_dim_set);
5022 	n_eq = isl_basic_set_n_equality(hull);
5023 	if (dim < 0 || n_eq < 0)
5024 		goto error;
5025 	if (dim != n_eq)
5026 		return add_non_trivial_lineality(hull, data);
5027 	isl_basic_set_free(hull);
5028 	return isl_stat_ok;
5029 error:
5030 	isl_basic_set_free(hull);
5031 	return isl_stat_error;
5032 }
5033 
5034 /* Check if the difference set on intra-node schedule constraints "intra"
5035  * has any non-trivial lineality space.
5036  * If so, then extend the difference set to a difference set
5037  * on equivalent elements.  That is, if "intra" is
5038  *
5039  *	{ y - x : (x,y) \in V }
5040  *
5041  * and elements are equivalent if they have the same image under f,
5042  * then return
5043  *
5044  *	{ y' - x' : (x,y) \in V and f(x) = f(x') and f(y) = f(y') }
5045  *
5046  * or, since f is linear,
5047  *
5048  *	{ y' - x' : (x,y) \in V and f(y - x) = f(y' - x') }
5049  *
5050  * The results of the search for non-trivial lineality spaces is stored
5051  * in "data".
5052  */
exploit_intra_lineality(__isl_take isl_union_set * intra,struct isl_exploit_lineality_data * data)5053 static __isl_give isl_union_set *exploit_intra_lineality(
5054 	__isl_take isl_union_set *intra,
5055 	struct isl_exploit_lineality_data *data)
5056 {
5057 	isl_union_set *lineality;
5058 	isl_union_set *uset;
5059 
5060 	data->any_non_trivial = isl_bool_false;
5061 	lineality = isl_union_set_copy(intra);
5062 	lineality = isl_union_set_combined_lineality_space(lineality);
5063 	if (isl_union_set_foreach_set(lineality, &add_lineality, data) < 0)
5064 		data->any_non_trivial = isl_bool_error;
5065 	isl_union_set_free(lineality);
5066 
5067 	if (data->any_non_trivial < 0)
5068 		return isl_union_set_free(intra);
5069 	if (!data->any_non_trivial)
5070 		return intra;
5071 
5072 	uset = isl_union_set_copy(intra);
5073 	intra = isl_union_set_subtract(intra, isl_union_set_copy(data->mask));
5074 	uset = isl_union_set_apply(uset, isl_union_map_copy(data->equivalent));
5075 	intra = isl_union_set_union(intra, uset);
5076 
5077 	intra = isl_union_set_remove_divs(intra);
5078 
5079 	return intra;
5080 }
5081 
5082 /* If the difference set on intra-node schedule constraints was found to have
5083  * any non-trivial lineality space by exploit_intra_lineality,
5084  * as recorded in "data", then extend the inter-node
5085  * schedule constraints "inter" to schedule constraints on equivalent elements.
5086  * That is, if "inter" is V and
5087  * elements are equivalent if they have the same image under f, then return
5088  *
5089  *	{ (x', y') : (x,y) \in V and f(x) = f(x') and f(y) = f(y') }
5090  */
exploit_inter_lineality(__isl_take isl_union_map * inter,struct isl_exploit_lineality_data * data)5091 static __isl_give isl_union_map *exploit_inter_lineality(
5092 	__isl_take isl_union_map *inter,
5093 	struct isl_exploit_lineality_data *data)
5094 {
5095 	isl_union_map *umap;
5096 
5097 	if (data->any_non_trivial < 0)
5098 		return isl_union_map_free(inter);
5099 	if (!data->any_non_trivial)
5100 		return inter;
5101 
5102 	umap = isl_union_map_copy(inter);
5103 	inter = isl_union_map_subtract_range(inter,
5104 				isl_union_set_copy(data->mask));
5105 	umap = isl_union_map_apply_range(umap,
5106 				isl_union_map_copy(data->equivalent));
5107 	inter = isl_union_map_union(inter, umap);
5108 	umap = isl_union_map_copy(inter);
5109 	inter = isl_union_map_subtract_domain(inter,
5110 				isl_union_set_copy(data->mask));
5111 	umap = isl_union_map_apply_range(isl_union_map_copy(data->equivalent),
5112 				umap);
5113 	inter = isl_union_map_union(inter, umap);
5114 
5115 	inter = isl_union_map_remove_divs(inter);
5116 
5117 	return inter;
5118 }
5119 
5120 /* For each (conditional) validity edge in "graph",
5121  * add the corresponding dependence relation using "add"
5122  * to a collection of dependence relations and return the result.
5123  * If "coincidence" is set, then coincidence edges are considered as well.
5124  */
collect_validity(struct isl_sched_graph * graph,__isl_give isl_union_map * (* add)(__isl_take isl_union_map * umap,struct isl_sched_edge * edge),int coincidence)5125 static __isl_give isl_union_map *collect_validity(struct isl_sched_graph *graph,
5126 	__isl_give isl_union_map *(*add)(__isl_take isl_union_map *umap,
5127 		struct isl_sched_edge *edge), int coincidence)
5128 {
5129 	int i;
5130 	isl_space *space;
5131 	isl_union_map *umap;
5132 
5133 	space = isl_space_copy(graph->node[0].space);
5134 	umap = isl_union_map_empty(space);
5135 
5136 	for (i = 0; i < graph->n_edge; ++i) {
5137 		struct isl_sched_edge *edge = &graph->edge[i];
5138 
5139 		if (!is_any_validity(edge) &&
5140 		    (!coincidence || !is_coincidence(edge)))
5141 			continue;
5142 
5143 		umap = add(umap, edge);
5144 	}
5145 
5146 	return umap;
5147 }
5148 
5149 /* For each dependence relation on a (conditional) validity edge
5150  * from a node to itself,
5151  * construct the set of coefficients of valid constraints for elements
5152  * in that dependence relation and collect the results.
5153  * If "coincidence" is set, then coincidence edges are considered as well.
5154  *
5155  * In particular, for each dependence relation R, constraints
5156  * on coefficients (c_0, c_x) are constructed such that
5157  *
5158  *	c_0 + c_x d >= 0 for each d in delta R = { y - x | (x,y) in R }
5159  *
5160  * If the schedule_treat_coalescing option is set, then some constraints
5161  * that could be exploited to construct coalescing schedules
5162  * are removed before the dual is computed, but after the parameters
5163  * have been projected out.
5164  * The entire computation is essentially the same as that performed
5165  * by intra_coefficients, except that it operates on multiple
5166  * edges together and that the parameters are always projected out.
5167  *
5168  * Additionally, exploit any non-trivial lineality space
5169  * in the difference set after removing coalescing constraints and
5170  * store the results of the non-trivial lineality space detection in "data".
5171  * The procedure is currently run unconditionally, but it is unlikely
5172  * to find any non-trivial lineality spaces if no coalescing constraints
5173  * have been removed.
5174  *
5175  * Note that if a dependence relation is a union of basic maps,
5176  * then each basic map needs to be treated individually as it may only
5177  * be possible to carry the dependences expressed by some of those
5178  * basic maps and not all of them.
5179  * The collected validity constraints are therefore not coalesced and
5180  * it is assumed that they are not coalesced automatically.
5181  * Duplicate basic maps can be removed, however.
5182  * In particular, if the same basic map appears as a disjunct
5183  * in multiple edges, then it only needs to be carried once.
5184  */
collect_intra_validity(isl_ctx * ctx,struct isl_sched_graph * graph,int coincidence,struct isl_exploit_lineality_data * data)5185 static __isl_give isl_basic_set_list *collect_intra_validity(isl_ctx *ctx,
5186 	struct isl_sched_graph *graph, int coincidence,
5187 	struct isl_exploit_lineality_data *data)
5188 {
5189 	isl_union_map *intra;
5190 	isl_union_set *delta;
5191 	isl_basic_set_list *list;
5192 
5193 	intra = collect_validity(graph, &add_intra, coincidence);
5194 	delta = isl_union_map_deltas(intra);
5195 	delta = isl_union_set_project_out_all_params(delta);
5196 	delta = isl_union_set_remove_divs(delta);
5197 	if (isl_options_get_schedule_treat_coalescing(ctx))
5198 		delta = union_drop_coalescing_constraints(ctx, graph, delta);
5199 	delta = exploit_intra_lineality(delta, data);
5200 	list = isl_union_set_get_basic_set_list(delta);
5201 	isl_union_set_free(delta);
5202 
5203 	return isl_basic_set_list_coefficients(list);
5204 }
5205 
5206 /* For each dependence relation on a (conditional) validity edge
5207  * from a node to some other node,
5208  * construct the set of coefficients of valid constraints for elements
5209  * in that dependence relation and collect the results.
5210  * If "coincidence" is set, then coincidence edges are considered as well.
5211  *
5212  * In particular, for each dependence relation R, constraints
5213  * on coefficients (c_0, c_n, c_x, c_y) are constructed such that
5214  *
5215  *	c_0 + c_n n + c_x x + c_y y >= 0 for each (x,y) in R
5216  *
5217  * This computation is essentially the same as that performed
5218  * by inter_coefficients, except that it operates on multiple
5219  * edges together.
5220  *
5221  * Additionally, exploit any non-trivial lineality space
5222  * that may have been discovered by collect_intra_validity
5223  * (as stored in "data").
5224  *
5225  * Note that if a dependence relation is a union of basic maps,
5226  * then each basic map needs to be treated individually as it may only
5227  * be possible to carry the dependences expressed by some of those
5228  * basic maps and not all of them.
5229  * The collected validity constraints are therefore not coalesced and
5230  * it is assumed that they are not coalesced automatically.
5231  * Duplicate basic maps can be removed, however.
5232  * In particular, if the same basic map appears as a disjunct
5233  * in multiple edges, then it only needs to be carried once.
5234  */
collect_inter_validity(struct isl_sched_graph * graph,int coincidence,struct isl_exploit_lineality_data * data)5235 static __isl_give isl_basic_set_list *collect_inter_validity(
5236 	struct isl_sched_graph *graph, int coincidence,
5237 	struct isl_exploit_lineality_data *data)
5238 {
5239 	isl_union_map *inter;
5240 	isl_union_set *wrap;
5241 	isl_basic_set_list *list;
5242 
5243 	inter = collect_validity(graph, &add_inter, coincidence);
5244 	inter = exploit_inter_lineality(inter, data);
5245 	inter = isl_union_map_remove_divs(inter);
5246 	wrap = isl_union_map_wrap(inter);
5247 	list = isl_union_set_get_basic_set_list(wrap);
5248 	isl_union_set_free(wrap);
5249 	return isl_basic_set_list_coefficients(list);
5250 }
5251 
5252 /* Construct an LP problem for finding schedule coefficients
5253  * such that the schedule carries as many of the "n_edge" groups of
5254  * dependences as possible based on the corresponding coefficient
5255  * constraints and return the lexicographically smallest non-trivial solution.
5256  * "intra" is the sequence of coefficient constraints for intra-node edges.
5257  * "inter" is the sequence of coefficient constraints for inter-node edges.
5258  * If "want_integral" is set, then compute an integral solution
5259  * for the coefficients rather than using the numerators
5260  * of a rational solution.
5261  * "carry_inter" indicates whether inter-node edges should be carried or
5262  * only respected.
5263  *
5264  * If none of the "n_edge" groups can be carried
5265  * then return an empty vector.
5266  */
compute_carrying_sol_coef(isl_ctx * ctx,struct isl_sched_graph * graph,int n_edge,__isl_keep isl_basic_set_list * intra,__isl_keep isl_basic_set_list * inter,int want_integral,int carry_inter)5267 static __isl_give isl_vec *compute_carrying_sol_coef(isl_ctx *ctx,
5268 	struct isl_sched_graph *graph, int n_edge,
5269 	__isl_keep isl_basic_set_list *intra,
5270 	__isl_keep isl_basic_set_list *inter, int want_integral,
5271 	int carry_inter)
5272 {
5273 	isl_basic_set *lp;
5274 
5275 	if (setup_carry_lp(ctx, graph, n_edge, intra, inter, carry_inter) < 0)
5276 		return NULL;
5277 
5278 	lp = isl_basic_set_copy(graph->lp);
5279 	return non_neg_lexmin(graph, lp, n_edge, want_integral);
5280 }
5281 
5282 /* Construct an LP problem for finding schedule coefficients
5283  * such that the schedule carries as many of the validity dependences
5284  * as possible and
5285  * return the lexicographically smallest non-trivial solution.
5286  * If "fallback" is set, then the carrying is performed as a fallback
5287  * for the Pluto-like scheduler.
5288  * If "coincidence" is set, then try and carry coincidence edges as well.
5289  *
5290  * The variable "n_edge" stores the number of groups that should be carried.
5291  * If none of the "n_edge" groups can be carried
5292  * then return an empty vector.
5293  * If, moreover, "n_edge" is zero, then the LP problem does not even
5294  * need to be constructed.
5295  *
5296  * If a fallback solution is being computed, then compute an integral solution
5297  * for the coefficients rather than using the numerators
5298  * of a rational solution.
5299  *
5300  * If a fallback solution is being computed, if there are any intra-node
5301  * dependences, and if requested by the user, then first try
5302  * to only carry those intra-node dependences.
5303  * If this fails to carry any dependences, then try again
5304  * with the inter-node dependences included.
5305  */
compute_carrying_sol(isl_ctx * ctx,struct isl_sched_graph * graph,int fallback,int coincidence)5306 static __isl_give isl_vec *compute_carrying_sol(isl_ctx *ctx,
5307 	struct isl_sched_graph *graph, int fallback, int coincidence)
5308 {
5309 	isl_size n_intra, n_inter;
5310 	int n_edge;
5311 	struct isl_carry carry = { 0 };
5312 	isl_vec *sol;
5313 
5314 	carry.intra = collect_intra_validity(ctx, graph, coincidence,
5315 						&carry.lineality);
5316 	carry.inter = collect_inter_validity(graph, coincidence,
5317 						&carry.lineality);
5318 	n_intra = isl_basic_set_list_n_basic_set(carry.intra);
5319 	n_inter = isl_basic_set_list_n_basic_set(carry.inter);
5320 	if (n_intra < 0 || n_inter < 0)
5321 		goto error;
5322 
5323 	if (fallback && n_intra > 0 &&
5324 	    isl_options_get_schedule_carry_self_first(ctx)) {
5325 		sol = compute_carrying_sol_coef(ctx, graph, n_intra,
5326 				carry.intra, carry.inter, fallback, 0);
5327 		if (!sol || sol->size != 0 || n_inter == 0) {
5328 			isl_carry_clear(&carry);
5329 			return sol;
5330 		}
5331 		isl_vec_free(sol);
5332 	}
5333 
5334 	n_edge = n_intra + n_inter;
5335 	if (n_edge == 0) {
5336 		isl_carry_clear(&carry);
5337 		return isl_vec_alloc(ctx, 0);
5338 	}
5339 
5340 	sol = compute_carrying_sol_coef(ctx, graph, n_edge,
5341 				carry.intra, carry.inter, fallback, 1);
5342 	isl_carry_clear(&carry);
5343 	return sol;
5344 error:
5345 	isl_carry_clear(&carry);
5346 	return NULL;
5347 }
5348 
5349 /* Construct a schedule row for each node such that as many validity dependences
5350  * as possible are carried and then continue with the next band.
5351  * If "fallback" is set, then the carrying is performed as a fallback
5352  * for the Pluto-like scheduler.
5353  * If "coincidence" is set, then try and carry coincidence edges as well.
5354  *
5355  * If there are no validity dependences, then no dependence can be carried and
5356  * the procedure is guaranteed to fail.  If there is more than one component,
5357  * then try computing a schedule on each component separately
5358  * to prevent or at least postpone this failure.
5359  *
5360  * If a schedule row is computed, then check that dependences are carried
5361  * for at least one of the edges.
5362  *
5363  * If the computed schedule row turns out to be trivial on one or
5364  * more nodes where it should not be trivial, then we throw it away
5365  * and try again on each component separately.
5366  *
5367  * If there is only one component, then we accept the schedule row anyway,
5368  * but we do not consider it as a complete row and therefore do not
5369  * increment graph->n_row.  Note that the ranks of the nodes that
5370  * do get a non-trivial schedule part will get updated regardless and
5371  * graph->maxvar is computed based on these ranks.  The test for
5372  * whether more schedule rows are required in compute_schedule_wcc
5373  * is therefore not affected.
5374  *
5375  * Insert a band corresponding to the schedule row at position "node"
5376  * of the schedule tree and continue with the construction of the schedule.
5377  * This insertion and the continued construction is performed by split_scaled
5378  * after optionally checking for non-trivial common divisors.
5379  */
carry(__isl_take isl_schedule_node * node,struct isl_sched_graph * graph,int fallback,int coincidence)5380 static __isl_give isl_schedule_node *carry(__isl_take isl_schedule_node *node,
5381 	struct isl_sched_graph *graph, int fallback, int coincidence)
5382 {
5383 	int trivial;
5384 	isl_ctx *ctx;
5385 	isl_vec *sol;
5386 
5387 	if (!node)
5388 		return NULL;
5389 
5390 	ctx = isl_schedule_node_get_ctx(node);
5391 	sol = compute_carrying_sol(ctx, graph, fallback, coincidence);
5392 	if (!sol)
5393 		return isl_schedule_node_free(node);
5394 	if (sol->size == 0) {
5395 		isl_vec_free(sol);
5396 		if (graph->scc > 1)
5397 			return compute_component_schedule(node, graph, 1);
5398 		isl_die(ctx, isl_error_unknown, "unable to carry dependences",
5399 			return isl_schedule_node_free(node));
5400 	}
5401 
5402 	trivial = is_any_trivial(graph, sol);
5403 	if (trivial < 0) {
5404 		sol = isl_vec_free(sol);
5405 	} else if (trivial && graph->scc > 1) {
5406 		isl_vec_free(sol);
5407 		return compute_component_schedule(node, graph, 1);
5408 	}
5409 
5410 	if (update_schedule(graph, sol, 0) < 0)
5411 		return isl_schedule_node_free(node);
5412 	if (trivial)
5413 		graph->n_row--;
5414 
5415 	return split_scaled(node, graph);
5416 }
5417 
5418 /* Construct a schedule row for each node such that as many validity dependences
5419  * as possible are carried and then continue with the next band.
5420  * Do so as a fallback for the Pluto-like scheduler.
5421  * If "coincidence" is set, then try and carry coincidence edges as well.
5422  */
carry_fallback(__isl_take isl_schedule_node * node,struct isl_sched_graph * graph,int coincidence)5423 static __isl_give isl_schedule_node *carry_fallback(
5424 	__isl_take isl_schedule_node *node, struct isl_sched_graph *graph,
5425 	int coincidence)
5426 {
5427 	return carry(node, graph, 1, coincidence);
5428 }
5429 
5430 /* Construct a schedule row for each node such that as many validity dependences
5431  * as possible are carried and then continue with the next band.
5432  * Do so for the case where the Feautrier scheduler was selected
5433  * by the user.
5434  */
carry_feautrier(__isl_take isl_schedule_node * node,struct isl_sched_graph * graph)5435 static __isl_give isl_schedule_node *carry_feautrier(
5436 	__isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
5437 {
5438 	return carry(node, graph, 0, 0);
5439 }
5440 
5441 /* Construct a schedule row for each node such that as many validity dependences
5442  * as possible are carried and then continue with the next band.
5443  * Do so as a fallback for the Pluto-like scheduler.
5444  */
carry_dependences(__isl_take isl_schedule_node * node,struct isl_sched_graph * graph)5445 static __isl_give isl_schedule_node *carry_dependences(
5446 	__isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
5447 {
5448 	return carry_fallback(node, graph, 0);
5449 }
5450 
5451 /* Construct a schedule row for each node such that as many validity or
5452  * coincidence dependences as possible are carried and
5453  * then continue with the next band.
5454  * Do so as a fallback for the Pluto-like scheduler.
5455  */
carry_coincidence(__isl_take isl_schedule_node * node,struct isl_sched_graph * graph)5456 static __isl_give isl_schedule_node *carry_coincidence(
5457 	__isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
5458 {
5459 	return carry_fallback(node, graph, 1);
5460 }
5461 
5462 /* Topologically sort statements mapped to the same schedule iteration
5463  * and add insert a sequence node in front of "node"
5464  * corresponding to this order.
5465  * If "initialized" is set, then it may be assumed that compute_maxvar
5466  * has been called on the current band.  Otherwise, call
5467  * compute_maxvar if and before carry_dependences gets called.
5468  *
5469  * If it turns out to be impossible to sort the statements apart,
5470  * because different dependences impose different orderings
5471  * on the statements, then we extend the schedule such that
5472  * it carries at least one more dependence.
5473  */
sort_statements(__isl_take isl_schedule_node * node,struct isl_sched_graph * graph,int initialized)5474 static __isl_give isl_schedule_node *sort_statements(
5475 	__isl_take isl_schedule_node *node, struct isl_sched_graph *graph,
5476 	int initialized)
5477 {
5478 	isl_ctx *ctx;
5479 	isl_union_set_list *filters;
5480 
5481 	if (!node)
5482 		return NULL;
5483 
5484 	ctx = isl_schedule_node_get_ctx(node);
5485 	if (graph->n < 1)
5486 		isl_die(ctx, isl_error_internal,
5487 			"graph should have at least one node",
5488 			return isl_schedule_node_free(node));
5489 
5490 	if (graph->n == 1)
5491 		return node;
5492 
5493 	if (update_edges(ctx, graph) < 0)
5494 		return isl_schedule_node_free(node);
5495 
5496 	if (graph->n_edge == 0)
5497 		return node;
5498 
5499 	if (detect_sccs(ctx, graph) < 0)
5500 		return isl_schedule_node_free(node);
5501 
5502 	next_band(graph);
5503 	if (graph->scc < graph->n) {
5504 		if (!initialized && compute_maxvar(graph) < 0)
5505 			return isl_schedule_node_free(node);
5506 		return carry_dependences(node, graph);
5507 	}
5508 
5509 	filters = extract_sccs(ctx, graph);
5510 	node = isl_schedule_node_insert_sequence(node, filters);
5511 
5512 	return node;
5513 }
5514 
5515 /* Are there any (non-empty) (conditional) validity edges in the graph?
5516  */
has_validity_edges(struct isl_sched_graph * graph)5517 static int has_validity_edges(struct isl_sched_graph *graph)
5518 {
5519 	int i;
5520 
5521 	for (i = 0; i < graph->n_edge; ++i) {
5522 		int empty;
5523 
5524 		empty = isl_map_plain_is_empty(graph->edge[i].map);
5525 		if (empty < 0)
5526 			return -1;
5527 		if (empty)
5528 			continue;
5529 		if (is_any_validity(&graph->edge[i]))
5530 			return 1;
5531 	}
5532 
5533 	return 0;
5534 }
5535 
5536 /* Should we apply a Feautrier step?
5537  * That is, did the user request the Feautrier algorithm and are
5538  * there any validity dependences (left)?
5539  */
need_feautrier_step(isl_ctx * ctx,struct isl_sched_graph * graph)5540 static int need_feautrier_step(isl_ctx *ctx, struct isl_sched_graph *graph)
5541 {
5542 	if (ctx->opt->schedule_algorithm != ISL_SCHEDULE_ALGORITHM_FEAUTRIER)
5543 		return 0;
5544 
5545 	return has_validity_edges(graph);
5546 }
5547 
5548 /* Compute a schedule for a connected dependence graph using Feautrier's
5549  * multi-dimensional scheduling algorithm and return the updated schedule node.
5550  *
5551  * The original algorithm is described in [1].
5552  * The main idea is to minimize the number of scheduling dimensions, by
5553  * trying to satisfy as many dependences as possible per scheduling dimension.
5554  *
5555  * [1] P. Feautrier, Some Efficient Solutions to the Affine Scheduling
5556  *     Problem, Part II: Multi-Dimensional Time.
5557  *     In Intl. Journal of Parallel Programming, 1992.
5558  */
compute_schedule_wcc_feautrier(isl_schedule_node * node,struct isl_sched_graph * graph)5559 static __isl_give isl_schedule_node *compute_schedule_wcc_feautrier(
5560 	isl_schedule_node *node, struct isl_sched_graph *graph)
5561 {
5562 	return carry_feautrier(node, graph);
5563 }
5564 
5565 /* Turn off the "local" bit on all (condition) edges.
5566  */
clear_local_edges(struct isl_sched_graph * graph)5567 static void clear_local_edges(struct isl_sched_graph *graph)
5568 {
5569 	int i;
5570 
5571 	for (i = 0; i < graph->n_edge; ++i)
5572 		if (is_condition(&graph->edge[i]))
5573 			clear_local(&graph->edge[i]);
5574 }
5575 
5576 /* Does "graph" have both condition and conditional validity edges?
5577  */
need_condition_check(struct isl_sched_graph * graph)5578 static int need_condition_check(struct isl_sched_graph *graph)
5579 {
5580 	int i;
5581 	int any_condition = 0;
5582 	int any_conditional_validity = 0;
5583 
5584 	for (i = 0; i < graph->n_edge; ++i) {
5585 		if (is_condition(&graph->edge[i]))
5586 			any_condition = 1;
5587 		if (is_conditional_validity(&graph->edge[i]))
5588 			any_conditional_validity = 1;
5589 	}
5590 
5591 	return any_condition && any_conditional_validity;
5592 }
5593 
5594 /* Does "graph" contain any coincidence edge?
5595  */
has_any_coincidence(struct isl_sched_graph * graph)5596 static int has_any_coincidence(struct isl_sched_graph *graph)
5597 {
5598 	int i;
5599 
5600 	for (i = 0; i < graph->n_edge; ++i)
5601 		if (is_coincidence(&graph->edge[i]))
5602 			return 1;
5603 
5604 	return 0;
5605 }
5606 
5607 /* Extract the final schedule row as a map with the iteration domain
5608  * of "node" as domain.
5609  */
final_row(struct isl_sched_node * node)5610 static __isl_give isl_map *final_row(struct isl_sched_node *node)
5611 {
5612 	isl_multi_aff *ma;
5613 	isl_size n_row;
5614 
5615 	n_row = isl_mat_rows(node->sched);
5616 	if (n_row < 0)
5617 		return NULL;
5618 	ma = node_extract_partial_schedule_multi_aff(node, n_row - 1, 1);
5619 	return isl_map_from_multi_aff(ma);
5620 }
5621 
5622 /* Is the conditional validity dependence in the edge with index "edge_index"
5623  * violated by the latest (i.e., final) row of the schedule?
5624  * That is, is i scheduled after j
5625  * for any conditional validity dependence i -> j?
5626  */
is_violated(struct isl_sched_graph * graph,int edge_index)5627 static int is_violated(struct isl_sched_graph *graph, int edge_index)
5628 {
5629 	isl_map *src_sched, *dst_sched, *map;
5630 	struct isl_sched_edge *edge = &graph->edge[edge_index];
5631 	int empty;
5632 
5633 	src_sched = final_row(edge->src);
5634 	dst_sched = final_row(edge->dst);
5635 	map = isl_map_copy(edge->map);
5636 	map = isl_map_apply_domain(map, src_sched);
5637 	map = isl_map_apply_range(map, dst_sched);
5638 	map = isl_map_order_gt(map, isl_dim_in, 0, isl_dim_out, 0);
5639 	empty = isl_map_is_empty(map);
5640 	isl_map_free(map);
5641 
5642 	if (empty < 0)
5643 		return -1;
5644 
5645 	return !empty;
5646 }
5647 
5648 /* Does "graph" have any satisfied condition edges that
5649  * are adjacent to the conditional validity constraint with
5650  * domain "conditional_source" and range "conditional_sink"?
5651  *
5652  * A satisfied condition is one that is not local.
5653  * If a condition was forced to be local already (i.e., marked as local)
5654  * then there is no need to check if it is in fact local.
5655  *
5656  * Additionally, mark all adjacent condition edges found as local.
5657  */
has_adjacent_true_conditions(struct isl_sched_graph * graph,__isl_keep isl_union_set * conditional_source,__isl_keep isl_union_set * conditional_sink)5658 static int has_adjacent_true_conditions(struct isl_sched_graph *graph,
5659 	__isl_keep isl_union_set *conditional_source,
5660 	__isl_keep isl_union_set *conditional_sink)
5661 {
5662 	int i;
5663 	int any = 0;
5664 
5665 	for (i = 0; i < graph->n_edge; ++i) {
5666 		int adjacent, local;
5667 		isl_union_map *condition;
5668 
5669 		if (!is_condition(&graph->edge[i]))
5670 			continue;
5671 		if (is_local(&graph->edge[i]))
5672 			continue;
5673 
5674 		condition = graph->edge[i].tagged_condition;
5675 		adjacent = domain_intersects(condition, conditional_sink);
5676 		if (adjacent >= 0 && !adjacent)
5677 			adjacent = range_intersects(condition,
5678 							conditional_source);
5679 		if (adjacent < 0)
5680 			return -1;
5681 		if (!adjacent)
5682 			continue;
5683 
5684 		set_local(&graph->edge[i]);
5685 
5686 		local = is_condition_false(&graph->edge[i]);
5687 		if (local < 0)
5688 			return -1;
5689 		if (!local)
5690 			any = 1;
5691 	}
5692 
5693 	return any;
5694 }
5695 
5696 /* Are there any violated conditional validity dependences with
5697  * adjacent condition dependences that are not local with respect
5698  * to the current schedule?
5699  * That is, is the conditional validity constraint violated?
5700  *
5701  * Additionally, mark all those adjacent condition dependences as local.
5702  * We also mark those adjacent condition dependences that were not marked
5703  * as local before, but just happened to be local already.  This ensures
5704  * that they remain local if the schedule is recomputed.
5705  *
5706  * We first collect domain and range of all violated conditional validity
5707  * dependences and then check if there are any adjacent non-local
5708  * condition dependences.
5709  */
has_violated_conditional_constraint(isl_ctx * ctx,struct isl_sched_graph * graph)5710 static int has_violated_conditional_constraint(isl_ctx *ctx,
5711 	struct isl_sched_graph *graph)
5712 {
5713 	int i;
5714 	int any = 0;
5715 	isl_union_set *source, *sink;
5716 
5717 	source = isl_union_set_empty(isl_space_params_alloc(ctx, 0));
5718 	sink = isl_union_set_empty(isl_space_params_alloc(ctx, 0));
5719 	for (i = 0; i < graph->n_edge; ++i) {
5720 		isl_union_set *uset;
5721 		isl_union_map *umap;
5722 		int violated;
5723 
5724 		if (!is_conditional_validity(&graph->edge[i]))
5725 			continue;
5726 
5727 		violated = is_violated(graph, i);
5728 		if (violated < 0)
5729 			goto error;
5730 		if (!violated)
5731 			continue;
5732 
5733 		any = 1;
5734 
5735 		umap = isl_union_map_copy(graph->edge[i].tagged_validity);
5736 		uset = isl_union_map_domain(umap);
5737 		source = isl_union_set_union(source, uset);
5738 		source = isl_union_set_coalesce(source);
5739 
5740 		umap = isl_union_map_copy(graph->edge[i].tagged_validity);
5741 		uset = isl_union_map_range(umap);
5742 		sink = isl_union_set_union(sink, uset);
5743 		sink = isl_union_set_coalesce(sink);
5744 	}
5745 
5746 	if (any)
5747 		any = has_adjacent_true_conditions(graph, source, sink);
5748 
5749 	isl_union_set_free(source);
5750 	isl_union_set_free(sink);
5751 	return any;
5752 error:
5753 	isl_union_set_free(source);
5754 	isl_union_set_free(sink);
5755 	return -1;
5756 }
5757 
5758 /* Examine the current band (the rows between graph->band_start and
5759  * graph->n_total_row), deciding whether to drop it or add it to "node"
5760  * and then continue with the computation of the next band, if any.
5761  * If "initialized" is set, then it may be assumed that compute_maxvar
5762  * has been called on the current band.  Otherwise, call
5763  * compute_maxvar if and before carry_dependences gets called.
5764  *
5765  * The caller keeps looking for a new row as long as
5766  * graph->n_row < graph->maxvar.  If the latest attempt to find
5767  * such a row failed (i.e., we still have graph->n_row < graph->maxvar),
5768  * then we either
5769  * - split between SCCs and start over (assuming we found an interesting
5770  *	pair of SCCs between which to split)
5771  * - continue with the next band (assuming the current band has at least
5772  *	one row)
5773  * - if there is more than one SCC left, then split along all SCCs
5774  * - if outer coincidence needs to be enforced, then try to carry as many
5775  *	validity or coincidence dependences as possible and
5776  *	continue with the next band
5777  * - try to carry as many validity dependences as possible and
5778  *	continue with the next band
5779  * In each case, we first insert a band node in the schedule tree
5780  * if any rows have been computed.
5781  *
5782  * If the caller managed to complete the schedule and the current band
5783  * is empty, then finish off by topologically
5784  * sorting the statements based on the remaining dependences.
5785  * If, on the other hand, the current band has at least one row,
5786  * then continue with the next band.  Note that this next band
5787  * will necessarily be empty, but the graph may still be split up
5788  * into weakly connected components before arriving back here.
5789  */
compute_schedule_finish_band(__isl_take isl_schedule_node * node,struct isl_sched_graph * graph,int initialized)5790 static __isl_give isl_schedule_node *compute_schedule_finish_band(
5791 	__isl_take isl_schedule_node *node, struct isl_sched_graph *graph,
5792 	int initialized)
5793 {
5794 	int empty;
5795 
5796 	if (!node)
5797 		return NULL;
5798 
5799 	empty = graph->n_total_row == graph->band_start;
5800 	if (graph->n_row < graph->maxvar) {
5801 		isl_ctx *ctx;
5802 
5803 		ctx = isl_schedule_node_get_ctx(node);
5804 		if (!ctx->opt->schedule_maximize_band_depth && !empty)
5805 			return compute_next_band(node, graph, 1);
5806 		if (graph->src_scc >= 0)
5807 			return compute_split_schedule(node, graph);
5808 		if (!empty)
5809 			return compute_next_band(node, graph, 1);
5810 		if (graph->scc > 1)
5811 			return compute_component_schedule(node, graph, 1);
5812 		if (!initialized && compute_maxvar(graph) < 0)
5813 			return isl_schedule_node_free(node);
5814 		if (isl_options_get_schedule_outer_coincidence(ctx))
5815 			return carry_coincidence(node, graph);
5816 		return carry_dependences(node, graph);
5817 	}
5818 
5819 	if (!empty)
5820 		return compute_next_band(node, graph, 1);
5821 	return sort_statements(node, graph, initialized);
5822 }
5823 
5824 /* Construct a band of schedule rows for a connected dependence graph.
5825  * The caller is responsible for determining the strongly connected
5826  * components and calling compute_maxvar first.
5827  *
5828  * We try to find a sequence of as many schedule rows as possible that result
5829  * in non-negative dependence distances (independent of the previous rows
5830  * in the sequence, i.e., such that the sequence is tilable), with as
5831  * many of the initial rows as possible satisfying the coincidence constraints.
5832  * The computation stops if we can't find any more rows or if we have found
5833  * all the rows we wanted to find.
5834  *
5835  * If ctx->opt->schedule_outer_coincidence is set, then we force the
5836  * outermost dimension to satisfy the coincidence constraints.  If this
5837  * turns out to be impossible, we fall back on the general scheme above
5838  * and try to carry as many dependences as possible.
5839  *
5840  * If "graph" contains both condition and conditional validity dependences,
5841  * then we need to check that that the conditional schedule constraint
5842  * is satisfied, i.e., there are no violated conditional validity dependences
5843  * that are adjacent to any non-local condition dependences.
5844  * If there are, then we mark all those adjacent condition dependences
5845  * as local and recompute the current band.  Those dependences that
5846  * are marked local will then be forced to be local.
5847  * The initial computation is performed with no dependences marked as local.
5848  * If we are lucky, then there will be no violated conditional validity
5849  * dependences adjacent to any non-local condition dependences.
5850  * Otherwise, we mark some additional condition dependences as local and
5851  * recompute.  We continue this process until there are no violations left or
5852  * until we are no longer able to compute a schedule.
5853  * Since there are only a finite number of dependences,
5854  * there will only be a finite number of iterations.
5855  */
compute_schedule_wcc_band(isl_ctx * ctx,struct isl_sched_graph * graph)5856 static isl_stat compute_schedule_wcc_band(isl_ctx *ctx,
5857 	struct isl_sched_graph *graph)
5858 {
5859 	int has_coincidence;
5860 	int use_coincidence;
5861 	int force_coincidence = 0;
5862 	int check_conditional;
5863 
5864 	if (sort_sccs(graph) < 0)
5865 		return isl_stat_error;
5866 
5867 	clear_local_edges(graph);
5868 	check_conditional = need_condition_check(graph);
5869 	has_coincidence = has_any_coincidence(graph);
5870 
5871 	if (ctx->opt->schedule_outer_coincidence)
5872 		force_coincidence = 1;
5873 
5874 	use_coincidence = has_coincidence;
5875 	while (graph->n_row < graph->maxvar) {
5876 		isl_vec *sol;
5877 		int violated;
5878 		int coincident;
5879 
5880 		graph->src_scc = -1;
5881 		graph->dst_scc = -1;
5882 
5883 		if (setup_lp(ctx, graph, use_coincidence) < 0)
5884 			return isl_stat_error;
5885 		sol = solve_lp(ctx, graph);
5886 		if (!sol)
5887 			return isl_stat_error;
5888 		if (sol->size == 0) {
5889 			int empty = graph->n_total_row == graph->band_start;
5890 
5891 			isl_vec_free(sol);
5892 			if (use_coincidence && (!force_coincidence || !empty)) {
5893 				use_coincidence = 0;
5894 				continue;
5895 			}
5896 			return isl_stat_ok;
5897 		}
5898 		coincident = !has_coincidence || use_coincidence;
5899 		if (update_schedule(graph, sol, coincident) < 0)
5900 			return isl_stat_error;
5901 
5902 		if (!check_conditional)
5903 			continue;
5904 		violated = has_violated_conditional_constraint(ctx, graph);
5905 		if (violated < 0)
5906 			return isl_stat_error;
5907 		if (!violated)
5908 			continue;
5909 		if (reset_band(graph) < 0)
5910 			return isl_stat_error;
5911 		use_coincidence = has_coincidence;
5912 	}
5913 
5914 	return isl_stat_ok;
5915 }
5916 
5917 /* Compute a schedule for a connected dependence graph by considering
5918  * the graph as a whole and return the updated schedule node.
5919  *
5920  * The actual schedule rows of the current band are computed by
5921  * compute_schedule_wcc_band.  compute_schedule_finish_band takes
5922  * care of integrating the band into "node" and continuing
5923  * the computation.
5924  */
compute_schedule_wcc_whole(__isl_take isl_schedule_node * node,struct isl_sched_graph * graph)5925 static __isl_give isl_schedule_node *compute_schedule_wcc_whole(
5926 	__isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
5927 {
5928 	isl_ctx *ctx;
5929 
5930 	if (!node)
5931 		return NULL;
5932 
5933 	ctx = isl_schedule_node_get_ctx(node);
5934 	if (compute_schedule_wcc_band(ctx, graph) < 0)
5935 		return isl_schedule_node_free(node);
5936 
5937 	return compute_schedule_finish_band(node, graph, 1);
5938 }
5939 
5940 /* Clustering information used by compute_schedule_wcc_clustering.
5941  *
5942  * "n" is the number of SCCs in the original dependence graph
5943  * "scc" is an array of "n" elements, each representing an SCC
5944  * of the original dependence graph.  All entries in the same cluster
5945  * have the same number of schedule rows.
5946  * "scc_cluster" maps each SCC index to the cluster to which it belongs,
5947  * where each cluster is represented by the index of the first SCC
5948  * in the cluster.  Initially, each SCC belongs to a cluster containing
5949  * only that SCC.
5950  *
5951  * "scc_in_merge" is used by merge_clusters_along_edge to keep
5952  * track of which SCCs need to be merged.
5953  *
5954  * "cluster" contains the merged clusters of SCCs after the clustering
5955  * has completed.
5956  *
5957  * "scc_node" is a temporary data structure used inside copy_partial.
5958  * For each SCC, it keeps track of the number of nodes in the SCC
5959  * that have already been copied.
5960  */
5961 struct isl_clustering {
5962 	int n;
5963 	struct isl_sched_graph *scc;
5964 	struct isl_sched_graph *cluster;
5965 	int *scc_cluster;
5966 	int *scc_node;
5967 	int *scc_in_merge;
5968 };
5969 
5970 /* Initialize the clustering data structure "c" from "graph".
5971  *
5972  * In particular, allocate memory, extract the SCCs from "graph"
5973  * into c->scc, initialize scc_cluster and construct
5974  * a band of schedule rows for each SCC.
5975  * Within each SCC, there is only one SCC by definition.
5976  * Each SCC initially belongs to a cluster containing only that SCC.
5977  */
clustering_init(isl_ctx * ctx,struct isl_clustering * c,struct isl_sched_graph * graph)5978 static isl_stat clustering_init(isl_ctx *ctx, struct isl_clustering *c,
5979 	struct isl_sched_graph *graph)
5980 {
5981 	int i;
5982 
5983 	c->n = graph->scc;
5984 	c->scc = isl_calloc_array(ctx, struct isl_sched_graph, c->n);
5985 	c->cluster = isl_calloc_array(ctx, struct isl_sched_graph, c->n);
5986 	c->scc_cluster = isl_calloc_array(ctx, int, c->n);
5987 	c->scc_node = isl_calloc_array(ctx, int, c->n);
5988 	c->scc_in_merge = isl_calloc_array(ctx, int, c->n);
5989 	if (!c->scc || !c->cluster ||
5990 	    !c->scc_cluster || !c->scc_node || !c->scc_in_merge)
5991 		return isl_stat_error;
5992 
5993 	for (i = 0; i < c->n; ++i) {
5994 		if (extract_sub_graph(ctx, graph, &node_scc_exactly,
5995 					&edge_scc_exactly, i, &c->scc[i]) < 0)
5996 			return isl_stat_error;
5997 		c->scc[i].scc = 1;
5998 		if (compute_maxvar(&c->scc[i]) < 0)
5999 			return isl_stat_error;
6000 		if (compute_schedule_wcc_band(ctx, &c->scc[i]) < 0)
6001 			return isl_stat_error;
6002 		c->scc_cluster[i] = i;
6003 	}
6004 
6005 	return isl_stat_ok;
6006 }
6007 
6008 /* Free all memory allocated for "c".
6009  */
clustering_free(isl_ctx * ctx,struct isl_clustering * c)6010 static void clustering_free(isl_ctx *ctx, struct isl_clustering *c)
6011 {
6012 	int i;
6013 
6014 	if (c->scc)
6015 		for (i = 0; i < c->n; ++i)
6016 			graph_free(ctx, &c->scc[i]);
6017 	free(c->scc);
6018 	if (c->cluster)
6019 		for (i = 0; i < c->n; ++i)
6020 			graph_free(ctx, &c->cluster[i]);
6021 	free(c->cluster);
6022 	free(c->scc_cluster);
6023 	free(c->scc_node);
6024 	free(c->scc_in_merge);
6025 }
6026 
6027 /* Should we refrain from merging the cluster in "graph" with
6028  * any other cluster?
6029  * In particular, is its current schedule band empty and incomplete.
6030  */
bad_cluster(struct isl_sched_graph * graph)6031 static int bad_cluster(struct isl_sched_graph *graph)
6032 {
6033 	return graph->n_row < graph->maxvar &&
6034 		graph->n_total_row == graph->band_start;
6035 }
6036 
6037 /* Is "edge" a proximity edge with a non-empty dependence relation?
6038  */
is_non_empty_proximity(struct isl_sched_edge * edge)6039 static isl_bool is_non_empty_proximity(struct isl_sched_edge *edge)
6040 {
6041 	if (!is_proximity(edge))
6042 		return isl_bool_false;
6043 	return isl_bool_not(isl_map_plain_is_empty(edge->map));
6044 }
6045 
6046 /* Return the index of an edge in "graph" that can be used to merge
6047  * two clusters in "c".
6048  * Return graph->n_edge if no such edge can be found.
6049  * Return -1 on error.
6050  *
6051  * In particular, return a proximity edge between two clusters
6052  * that is not marked "no_merge" and such that neither of the
6053  * two clusters has an incomplete, empty band.
6054  *
6055  * If there are multiple such edges, then try and find the most
6056  * appropriate edge to use for merging.  In particular, pick the edge
6057  * with the greatest weight.  If there are multiple of those,
6058  * then pick one with the shortest distance between
6059  * the two cluster representatives.
6060  */
find_proximity(struct isl_sched_graph * graph,struct isl_clustering * c)6061 static int find_proximity(struct isl_sched_graph *graph,
6062 	struct isl_clustering *c)
6063 {
6064 	int i, best = graph->n_edge, best_dist, best_weight;
6065 
6066 	for (i = 0; i < graph->n_edge; ++i) {
6067 		struct isl_sched_edge *edge = &graph->edge[i];
6068 		int dist, weight;
6069 		isl_bool prox;
6070 
6071 		prox = is_non_empty_proximity(edge);
6072 		if (prox < 0)
6073 			return -1;
6074 		if (!prox)
6075 			continue;
6076 		if (edge->no_merge)
6077 			continue;
6078 		if (bad_cluster(&c->scc[edge->src->scc]) ||
6079 		    bad_cluster(&c->scc[edge->dst->scc]))
6080 			continue;
6081 		dist = c->scc_cluster[edge->dst->scc] -
6082 			c->scc_cluster[edge->src->scc];
6083 		if (dist == 0)
6084 			continue;
6085 		weight = edge->weight;
6086 		if (best < graph->n_edge) {
6087 			if (best_weight > weight)
6088 				continue;
6089 			if (best_weight == weight && best_dist <= dist)
6090 				continue;
6091 		}
6092 		best = i;
6093 		best_dist = dist;
6094 		best_weight = weight;
6095 	}
6096 
6097 	return best;
6098 }
6099 
6100 /* Internal data structure used in mark_merge_sccs.
6101  *
6102  * "graph" is the dependence graph in which a strongly connected
6103  * component is constructed.
6104  * "scc_cluster" maps each SCC index to the cluster to which it belongs.
6105  * "src" and "dst" are the indices of the nodes that are being merged.
6106  */
6107 struct isl_mark_merge_sccs_data {
6108 	struct isl_sched_graph *graph;
6109 	int *scc_cluster;
6110 	int src;
6111 	int dst;
6112 };
6113 
6114 /* Check whether the cluster containing node "i" depends on the cluster
6115  * containing node "j".  If "i" and "j" belong to the same cluster,
6116  * then they are taken to depend on each other to ensure that
6117  * the resulting strongly connected component consists of complete
6118  * clusters.  Furthermore, if "i" and "j" are the two nodes that
6119  * are being merged, then they are taken to depend on each other as well.
6120  * Otherwise, check if there is a (conditional) validity dependence
6121  * from node[j] to node[i], forcing node[i] to follow node[j].
6122  */
cluster_follows(int i,int j,void * user)6123 static isl_bool cluster_follows(int i, int j, void *user)
6124 {
6125 	struct isl_mark_merge_sccs_data *data = user;
6126 	struct isl_sched_graph *graph = data->graph;
6127 	int *scc_cluster = data->scc_cluster;
6128 
6129 	if (data->src == i && data->dst == j)
6130 		return isl_bool_true;
6131 	if (data->src == j && data->dst == i)
6132 		return isl_bool_true;
6133 	if (scc_cluster[graph->node[i].scc] == scc_cluster[graph->node[j].scc])
6134 		return isl_bool_true;
6135 
6136 	return graph_has_validity_edge(graph, &graph->node[j], &graph->node[i]);
6137 }
6138 
6139 /* Mark all SCCs that belong to either of the two clusters in "c"
6140  * connected by the edge in "graph" with index "edge", or to any
6141  * of the intermediate clusters.
6142  * The marking is recorded in c->scc_in_merge.
6143  *
6144  * The given edge has been selected for merging two clusters,
6145  * meaning that there is at least a proximity edge between the two nodes.
6146  * However, there may also be (indirect) validity dependences
6147  * between the two nodes.  When merging the two clusters, all clusters
6148  * containing one or more of the intermediate nodes along the
6149  * indirect validity dependences need to be merged in as well.
6150  *
6151  * First collect all such nodes by computing the strongly connected
6152  * component (SCC) containing the two nodes connected by the edge, where
6153  * the two nodes are considered to depend on each other to make
6154  * sure they end up in the same SCC.  Similarly, each node is considered
6155  * to depend on every other node in the same cluster to ensure
6156  * that the SCC consists of complete clusters.
6157  *
6158  * Then the original SCCs that contain any of these nodes are marked
6159  * in c->scc_in_merge.
6160  */
mark_merge_sccs(isl_ctx * ctx,struct isl_sched_graph * graph,int edge,struct isl_clustering * c)6161 static isl_stat mark_merge_sccs(isl_ctx *ctx, struct isl_sched_graph *graph,
6162 	int edge, struct isl_clustering *c)
6163 {
6164 	struct isl_mark_merge_sccs_data data;
6165 	struct isl_tarjan_graph *g;
6166 	int i;
6167 
6168 	for (i = 0; i < c->n; ++i)
6169 		c->scc_in_merge[i] = 0;
6170 
6171 	data.graph = graph;
6172 	data.scc_cluster = c->scc_cluster;
6173 	data.src = graph->edge[edge].src - graph->node;
6174 	data.dst = graph->edge[edge].dst - graph->node;
6175 
6176 	g = isl_tarjan_graph_component(ctx, graph->n, data.dst,
6177 					&cluster_follows, &data);
6178 	if (!g)
6179 		goto error;
6180 
6181 	i = g->op;
6182 	if (i < 3)
6183 		isl_die(ctx, isl_error_internal,
6184 			"expecting at least two nodes in component",
6185 			goto error);
6186 	if (g->order[--i] != -1)
6187 		isl_die(ctx, isl_error_internal,
6188 			"expecting end of component marker", goto error);
6189 
6190 	for (--i; i >= 0 && g->order[i] != -1; --i) {
6191 		int scc = graph->node[g->order[i]].scc;
6192 		c->scc_in_merge[scc] = 1;
6193 	}
6194 
6195 	isl_tarjan_graph_free(g);
6196 	return isl_stat_ok;
6197 error:
6198 	isl_tarjan_graph_free(g);
6199 	return isl_stat_error;
6200 }
6201 
6202 /* Construct the identifier "cluster_i".
6203  */
cluster_id(isl_ctx * ctx,int i)6204 static __isl_give isl_id *cluster_id(isl_ctx *ctx, int i)
6205 {
6206 	char name[40];
6207 
6208 	snprintf(name, sizeof(name), "cluster_%d", i);
6209 	return isl_id_alloc(ctx, name, NULL);
6210 }
6211 
6212 /* Construct the space of the cluster with index "i" containing
6213  * the strongly connected component "scc".
6214  *
6215  * In particular, construct a space called cluster_i with dimension equal
6216  * to the number of schedule rows in the current band of "scc".
6217  */
cluster_space(struct isl_sched_graph * scc,int i)6218 static __isl_give isl_space *cluster_space(struct isl_sched_graph *scc, int i)
6219 {
6220 	int nvar;
6221 	isl_space *space;
6222 	isl_id *id;
6223 
6224 	nvar = scc->n_total_row - scc->band_start;
6225 	space = isl_space_copy(scc->node[0].space);
6226 	space = isl_space_params(space);
6227 	space = isl_space_set_from_params(space);
6228 	space = isl_space_add_dims(space, isl_dim_set, nvar);
6229 	id = cluster_id(isl_space_get_ctx(space), i);
6230 	space = isl_space_set_tuple_id(space, isl_dim_set, id);
6231 
6232 	return space;
6233 }
6234 
6235 /* Collect the domain of the graph for merging clusters.
6236  *
6237  * In particular, for each cluster with first SCC "i", construct
6238  * a set in the space called cluster_i with dimension equal
6239  * to the number of schedule rows in the current band of the cluster.
6240  */
collect_domain(isl_ctx * ctx,struct isl_sched_graph * graph,struct isl_clustering * c)6241 static __isl_give isl_union_set *collect_domain(isl_ctx *ctx,
6242 	struct isl_sched_graph *graph, struct isl_clustering *c)
6243 {
6244 	int i;
6245 	isl_space *space;
6246 	isl_union_set *domain;
6247 
6248 	space = isl_space_params_alloc(ctx, 0);
6249 	domain = isl_union_set_empty(space);
6250 
6251 	for (i = 0; i < graph->scc; ++i) {
6252 		isl_space *space;
6253 
6254 		if (!c->scc_in_merge[i])
6255 			continue;
6256 		if (c->scc_cluster[i] != i)
6257 			continue;
6258 		space = cluster_space(&c->scc[i], i);
6259 		domain = isl_union_set_add_set(domain, isl_set_universe(space));
6260 	}
6261 
6262 	return domain;
6263 }
6264 
6265 /* Construct a map from the original instances to the corresponding
6266  * cluster instance in the current bands of the clusters in "c".
6267  */
collect_cluster_map(isl_ctx * ctx,struct isl_sched_graph * graph,struct isl_clustering * c)6268 static __isl_give isl_union_map *collect_cluster_map(isl_ctx *ctx,
6269 	struct isl_sched_graph *graph, struct isl_clustering *c)
6270 {
6271 	int i, j;
6272 	isl_space *space;
6273 	isl_union_map *cluster_map;
6274 
6275 	space = isl_space_params_alloc(ctx, 0);
6276 	cluster_map = isl_union_map_empty(space);
6277 	for (i = 0; i < graph->scc; ++i) {
6278 		int start, n;
6279 		isl_id *id;
6280 
6281 		if (!c->scc_in_merge[i])
6282 			continue;
6283 
6284 		id = cluster_id(ctx, c->scc_cluster[i]);
6285 		start = c->scc[i].band_start;
6286 		n = c->scc[i].n_total_row - start;
6287 		for (j = 0; j < c->scc[i].n; ++j) {
6288 			isl_multi_aff *ma;
6289 			isl_map *map;
6290 			struct isl_sched_node *node = &c->scc[i].node[j];
6291 
6292 			ma = node_extract_partial_schedule_multi_aff(node,
6293 								    start, n);
6294 			ma = isl_multi_aff_set_tuple_id(ma, isl_dim_out,
6295 							    isl_id_copy(id));
6296 			map = isl_map_from_multi_aff(ma);
6297 			cluster_map = isl_union_map_add_map(cluster_map, map);
6298 		}
6299 		isl_id_free(id);
6300 	}
6301 
6302 	return cluster_map;
6303 }
6304 
6305 /* Add "umap" to the schedule constraints "sc" of all types of "edge"
6306  * that are not isl_edge_condition or isl_edge_conditional_validity.
6307  */
add_non_conditional_constraints(struct isl_sched_edge * edge,__isl_keep isl_union_map * umap,__isl_take isl_schedule_constraints * sc)6308 static __isl_give isl_schedule_constraints *add_non_conditional_constraints(
6309 	struct isl_sched_edge *edge, __isl_keep isl_union_map *umap,
6310 	__isl_take isl_schedule_constraints *sc)
6311 {
6312 	enum isl_edge_type t;
6313 
6314 	if (!sc)
6315 		return NULL;
6316 
6317 	for (t = isl_edge_first; t <= isl_edge_last; ++t) {
6318 		if (t == isl_edge_condition ||
6319 		    t == isl_edge_conditional_validity)
6320 			continue;
6321 		if (!is_type(edge, t))
6322 			continue;
6323 		sc = isl_schedule_constraints_add(sc, t,
6324 						    isl_union_map_copy(umap));
6325 	}
6326 
6327 	return sc;
6328 }
6329 
6330 /* Add schedule constraints of types isl_edge_condition and
6331  * isl_edge_conditional_validity to "sc" by applying "umap" to
6332  * the domains of the wrapped relations in domain and range
6333  * of the corresponding tagged constraints of "edge".
6334  */
add_conditional_constraints(struct isl_sched_edge * edge,__isl_keep isl_union_map * umap,__isl_take isl_schedule_constraints * sc)6335 static __isl_give isl_schedule_constraints *add_conditional_constraints(
6336 	struct isl_sched_edge *edge, __isl_keep isl_union_map *umap,
6337 	__isl_take isl_schedule_constraints *sc)
6338 {
6339 	enum isl_edge_type t;
6340 	isl_union_map *tagged;
6341 
6342 	for (t = isl_edge_condition; t <= isl_edge_conditional_validity; ++t) {
6343 		if (!is_type(edge, t))
6344 			continue;
6345 		if (t == isl_edge_condition)
6346 			tagged = isl_union_map_copy(edge->tagged_condition);
6347 		else
6348 			tagged = isl_union_map_copy(edge->tagged_validity);
6349 		tagged = isl_union_map_zip(tagged);
6350 		tagged = isl_union_map_apply_domain(tagged,
6351 					isl_union_map_copy(umap));
6352 		tagged = isl_union_map_zip(tagged);
6353 		sc = isl_schedule_constraints_add(sc, t, tagged);
6354 		if (!sc)
6355 			return NULL;
6356 	}
6357 
6358 	return sc;
6359 }
6360 
6361 /* Given a mapping "cluster_map" from the original instances to
6362  * the cluster instances, add schedule constraints on the clusters
6363  * to "sc" corresponding to the original constraints represented by "edge".
6364  *
6365  * For non-tagged dependence constraints, the cluster constraints
6366  * are obtained by applying "cluster_map" to the edge->map.
6367  *
6368  * For tagged dependence constraints, "cluster_map" needs to be applied
6369  * to the domains of the wrapped relations in domain and range
6370  * of the tagged dependence constraints.  Pick out the mappings
6371  * from these domains from "cluster_map" and construct their product.
6372  * This mapping can then be applied to the pair of domains.
6373  */
collect_edge_constraints(struct isl_sched_edge * edge,__isl_keep isl_union_map * cluster_map,__isl_take isl_schedule_constraints * sc)6374 static __isl_give isl_schedule_constraints *collect_edge_constraints(
6375 	struct isl_sched_edge *edge, __isl_keep isl_union_map *cluster_map,
6376 	__isl_take isl_schedule_constraints *sc)
6377 {
6378 	isl_union_map *umap;
6379 	isl_space *space;
6380 	isl_union_set *uset;
6381 	isl_union_map *umap1, *umap2;
6382 
6383 	if (!sc)
6384 		return NULL;
6385 
6386 	umap = isl_union_map_from_map(isl_map_copy(edge->map));
6387 	umap = isl_union_map_apply_domain(umap,
6388 				isl_union_map_copy(cluster_map));
6389 	umap = isl_union_map_apply_range(umap,
6390 				isl_union_map_copy(cluster_map));
6391 	sc = add_non_conditional_constraints(edge, umap, sc);
6392 	isl_union_map_free(umap);
6393 
6394 	if (!sc || (!is_condition(edge) && !is_conditional_validity(edge)))
6395 		return sc;
6396 
6397 	space = isl_space_domain(isl_map_get_space(edge->map));
6398 	uset = isl_union_set_from_set(isl_set_universe(space));
6399 	umap1 = isl_union_map_copy(cluster_map);
6400 	umap1 = isl_union_map_intersect_domain(umap1, uset);
6401 	space = isl_space_range(isl_map_get_space(edge->map));
6402 	uset = isl_union_set_from_set(isl_set_universe(space));
6403 	umap2 = isl_union_map_copy(cluster_map);
6404 	umap2 = isl_union_map_intersect_domain(umap2, uset);
6405 	umap = isl_union_map_product(umap1, umap2);
6406 
6407 	sc = add_conditional_constraints(edge, umap, sc);
6408 
6409 	isl_union_map_free(umap);
6410 	return sc;
6411 }
6412 
6413 /* Given a mapping "cluster_map" from the original instances to
6414  * the cluster instances, add schedule constraints on the clusters
6415  * to "sc" corresponding to all edges in "graph" between nodes that
6416  * belong to SCCs that are marked for merging in "scc_in_merge".
6417  */
collect_constraints(struct isl_sched_graph * graph,int * scc_in_merge,__isl_keep isl_union_map * cluster_map,__isl_take isl_schedule_constraints * sc)6418 static __isl_give isl_schedule_constraints *collect_constraints(
6419 	struct isl_sched_graph *graph, int *scc_in_merge,
6420 	__isl_keep isl_union_map *cluster_map,
6421 	__isl_take isl_schedule_constraints *sc)
6422 {
6423 	int i;
6424 
6425 	for (i = 0; i < graph->n_edge; ++i) {
6426 		struct isl_sched_edge *edge = &graph->edge[i];
6427 
6428 		if (!scc_in_merge[edge->src->scc])
6429 			continue;
6430 		if (!scc_in_merge[edge->dst->scc])
6431 			continue;
6432 		sc = collect_edge_constraints(edge, cluster_map, sc);
6433 	}
6434 
6435 	return sc;
6436 }
6437 
6438 /* Construct a dependence graph for scheduling clusters with respect
6439  * to each other and store the result in "merge_graph".
6440  * In particular, the nodes of the graph correspond to the schedule
6441  * dimensions of the current bands of those clusters that have been
6442  * marked for merging in "c".
6443  *
6444  * First construct an isl_schedule_constraints object for this domain
6445  * by transforming the edges in "graph" to the domain.
6446  * Then initialize a dependence graph for scheduling from these
6447  * constraints.
6448  */
init_merge_graph(isl_ctx * ctx,struct isl_sched_graph * graph,struct isl_clustering * c,struct isl_sched_graph * merge_graph)6449 static isl_stat init_merge_graph(isl_ctx *ctx, struct isl_sched_graph *graph,
6450 	struct isl_clustering *c, struct isl_sched_graph *merge_graph)
6451 {
6452 	isl_union_set *domain;
6453 	isl_union_map *cluster_map;
6454 	isl_schedule_constraints *sc;
6455 	isl_stat r;
6456 
6457 	domain = collect_domain(ctx, graph, c);
6458 	sc = isl_schedule_constraints_on_domain(domain);
6459 	if (!sc)
6460 		return isl_stat_error;
6461 	cluster_map = collect_cluster_map(ctx, graph, c);
6462 	sc = collect_constraints(graph, c->scc_in_merge, cluster_map, sc);
6463 	isl_union_map_free(cluster_map);
6464 
6465 	r = graph_init(merge_graph, sc);
6466 
6467 	isl_schedule_constraints_free(sc);
6468 
6469 	return r;
6470 }
6471 
6472 /* Compute the maximal number of remaining schedule rows that still need
6473  * to be computed for the nodes that belong to clusters with the maximal
6474  * dimension for the current band (i.e., the band that is to be merged).
6475  * Only clusters that are about to be merged are considered.
6476  * "maxvar" is the maximal dimension for the current band.
6477  * "c" contains information about the clusters.
6478  *
6479  * Return the maximal number of remaining schedule rows or -1 on error.
6480  */
compute_maxvar_max_slack(int maxvar,struct isl_clustering * c)6481 static int compute_maxvar_max_slack(int maxvar, struct isl_clustering *c)
6482 {
6483 	int i, j;
6484 	int max_slack;
6485 
6486 	max_slack = 0;
6487 	for (i = 0; i < c->n; ++i) {
6488 		int nvar;
6489 		struct isl_sched_graph *scc;
6490 
6491 		if (!c->scc_in_merge[i])
6492 			continue;
6493 		scc = &c->scc[i];
6494 		nvar = scc->n_total_row - scc->band_start;
6495 		if (nvar != maxvar)
6496 			continue;
6497 		for (j = 0; j < scc->n; ++j) {
6498 			struct isl_sched_node *node = &scc->node[j];
6499 			int slack;
6500 
6501 			if (node_update_vmap(node) < 0)
6502 				return -1;
6503 			slack = node->nvar - node->rank;
6504 			if (slack > max_slack)
6505 				max_slack = slack;
6506 		}
6507 	}
6508 
6509 	return max_slack;
6510 }
6511 
6512 /* If there are any clusters where the dimension of the current band
6513  * (i.e., the band that is to be merged) is smaller than "maxvar" and
6514  * if there are any nodes in such a cluster where the number
6515  * of remaining schedule rows that still need to be computed
6516  * is greater than "max_slack", then return the smallest current band
6517  * dimension of all these clusters.  Otherwise return the original value
6518  * of "maxvar".  Return -1 in case of any error.
6519  * Only clusters that are about to be merged are considered.
6520  * "c" contains information about the clusters.
6521  */
limit_maxvar_to_slack(int maxvar,int max_slack,struct isl_clustering * c)6522 static int limit_maxvar_to_slack(int maxvar, int max_slack,
6523 	struct isl_clustering *c)
6524 {
6525 	int i, j;
6526 
6527 	for (i = 0; i < c->n; ++i) {
6528 		int nvar;
6529 		struct isl_sched_graph *scc;
6530 
6531 		if (!c->scc_in_merge[i])
6532 			continue;
6533 		scc = &c->scc[i];
6534 		nvar = scc->n_total_row - scc->band_start;
6535 		if (nvar >= maxvar)
6536 			continue;
6537 		for (j = 0; j < scc->n; ++j) {
6538 			struct isl_sched_node *node = &scc->node[j];
6539 			int slack;
6540 
6541 			if (node_update_vmap(node) < 0)
6542 				return -1;
6543 			slack = node->nvar - node->rank;
6544 			if (slack > max_slack) {
6545 				maxvar = nvar;
6546 				break;
6547 			}
6548 		}
6549 	}
6550 
6551 	return maxvar;
6552 }
6553 
6554 /* Adjust merge_graph->maxvar based on the number of remaining schedule rows
6555  * that still need to be computed.  In particular, if there is a node
6556  * in a cluster where the dimension of the current band is smaller
6557  * than merge_graph->maxvar, but the number of remaining schedule rows
6558  * is greater than that of any node in a cluster with the maximal
6559  * dimension for the current band (i.e., merge_graph->maxvar),
6560  * then adjust merge_graph->maxvar to the (smallest) current band dimension
6561  * of those clusters.  Without this adjustment, the total number of
6562  * schedule dimensions would be increased, resulting in a skewed view
6563  * of the number of coincident dimensions.
6564  * "c" contains information about the clusters.
6565  *
6566  * If the maximize_band_depth option is set and merge_graph->maxvar is reduced,
6567  * then there is no point in attempting any merge since it will be rejected
6568  * anyway.  Set merge_graph->maxvar to zero in such cases.
6569  */
adjust_maxvar_to_slack(isl_ctx * ctx,struct isl_sched_graph * merge_graph,struct isl_clustering * c)6570 static isl_stat adjust_maxvar_to_slack(isl_ctx *ctx,
6571 	struct isl_sched_graph *merge_graph, struct isl_clustering *c)
6572 {
6573 	int max_slack, maxvar;
6574 
6575 	max_slack = compute_maxvar_max_slack(merge_graph->maxvar, c);
6576 	if (max_slack < 0)
6577 		return isl_stat_error;
6578 	maxvar = limit_maxvar_to_slack(merge_graph->maxvar, max_slack, c);
6579 	if (maxvar < 0)
6580 		return isl_stat_error;
6581 
6582 	if (maxvar < merge_graph->maxvar) {
6583 		if (isl_options_get_schedule_maximize_band_depth(ctx))
6584 			merge_graph->maxvar = 0;
6585 		else
6586 			merge_graph->maxvar = maxvar;
6587 	}
6588 
6589 	return isl_stat_ok;
6590 }
6591 
6592 /* Return the number of coincident dimensions in the current band of "graph",
6593  * where the nodes of "graph" are assumed to be scheduled by a single band.
6594  */
get_n_coincident(struct isl_sched_graph * graph)6595 static int get_n_coincident(struct isl_sched_graph *graph)
6596 {
6597 	int i;
6598 
6599 	for (i = graph->band_start; i < graph->n_total_row; ++i)
6600 		if (!graph->node[0].coincident[i])
6601 			break;
6602 
6603 	return i - graph->band_start;
6604 }
6605 
6606 /* Should the clusters be merged based on the cluster schedule
6607  * in the current (and only) band of "merge_graph", given that
6608  * coincidence should be maximized?
6609  *
6610  * If the number of coincident schedule dimensions in the merged band
6611  * would be less than the maximal number of coincident schedule dimensions
6612  * in any of the merged clusters, then the clusters should not be merged.
6613  */
ok_to_merge_coincident(struct isl_clustering * c,struct isl_sched_graph * merge_graph)6614 static isl_bool ok_to_merge_coincident(struct isl_clustering *c,
6615 	struct isl_sched_graph *merge_graph)
6616 {
6617 	int i;
6618 	int n_coincident;
6619 	int max_coincident;
6620 
6621 	max_coincident = 0;
6622 	for (i = 0; i < c->n; ++i) {
6623 		if (!c->scc_in_merge[i])
6624 			continue;
6625 		n_coincident = get_n_coincident(&c->scc[i]);
6626 		if (n_coincident > max_coincident)
6627 			max_coincident = n_coincident;
6628 	}
6629 
6630 	n_coincident = get_n_coincident(merge_graph);
6631 
6632 	return isl_bool_ok(n_coincident >= max_coincident);
6633 }
6634 
6635 /* Return the transformation on "node" expressed by the current (and only)
6636  * band of "merge_graph" applied to the clusters in "c".
6637  *
6638  * First find the representation of "node" in its SCC in "c" and
6639  * extract the transformation expressed by the current band.
6640  * Then extract the transformation applied by "merge_graph"
6641  * to the cluster to which this SCC belongs.
6642  * Combine the two to obtain the complete transformation on the node.
6643  *
6644  * Note that the range of the first transformation is an anonymous space,
6645  * while the domain of the second is named "cluster_X".  The range
6646  * of the former therefore needs to be adjusted before the two
6647  * can be combined.
6648  */
extract_node_transformation(isl_ctx * ctx,struct isl_sched_node * node,struct isl_clustering * c,struct isl_sched_graph * merge_graph)6649 static __isl_give isl_map *extract_node_transformation(isl_ctx *ctx,
6650 	struct isl_sched_node *node, struct isl_clustering *c,
6651 	struct isl_sched_graph *merge_graph)
6652 {
6653 	struct isl_sched_node *scc_node, *cluster_node;
6654 	int start, n;
6655 	isl_id *id;
6656 	isl_space *space;
6657 	isl_multi_aff *ma, *ma2;
6658 
6659 	scc_node = graph_find_node(ctx, &c->scc[node->scc], node->space);
6660 	if (scc_node && !is_node(&c->scc[node->scc], scc_node))
6661 		isl_die(ctx, isl_error_internal, "unable to find node",
6662 			return NULL);
6663 	start = c->scc[node->scc].band_start;
6664 	n = c->scc[node->scc].n_total_row - start;
6665 	ma = node_extract_partial_schedule_multi_aff(scc_node, start, n);
6666 	space = cluster_space(&c->scc[node->scc], c->scc_cluster[node->scc]);
6667 	cluster_node = graph_find_node(ctx, merge_graph, space);
6668 	if (cluster_node && !is_node(merge_graph, cluster_node))
6669 		isl_die(ctx, isl_error_internal, "unable to find cluster",
6670 			space = isl_space_free(space));
6671 	id = isl_space_get_tuple_id(space, isl_dim_set);
6672 	ma = isl_multi_aff_set_tuple_id(ma, isl_dim_out, id);
6673 	isl_space_free(space);
6674 	n = merge_graph->n_total_row;
6675 	ma2 = node_extract_partial_schedule_multi_aff(cluster_node, 0, n);
6676 	ma = isl_multi_aff_pullback_multi_aff(ma2, ma);
6677 
6678 	return isl_map_from_multi_aff(ma);
6679 }
6680 
6681 /* Give a set of distances "set", are they bounded by a small constant
6682  * in direction "pos"?
6683  * In practice, check if they are bounded by 2 by checking that there
6684  * are no elements with a value greater than or equal to 3 or
6685  * smaller than or equal to -3.
6686  */
distance_is_bounded(__isl_keep isl_set * set,int pos)6687 static isl_bool distance_is_bounded(__isl_keep isl_set *set, int pos)
6688 {
6689 	isl_bool bounded;
6690 	isl_set *test;
6691 
6692 	if (!set)
6693 		return isl_bool_error;
6694 
6695 	test = isl_set_copy(set);
6696 	test = isl_set_lower_bound_si(test, isl_dim_set, pos, 3);
6697 	bounded = isl_set_is_empty(test);
6698 	isl_set_free(test);
6699 
6700 	if (bounded < 0 || !bounded)
6701 		return bounded;
6702 
6703 	test = isl_set_copy(set);
6704 	test = isl_set_upper_bound_si(test, isl_dim_set, pos, -3);
6705 	bounded = isl_set_is_empty(test);
6706 	isl_set_free(test);
6707 
6708 	return bounded;
6709 }
6710 
6711 /* Does the set "set" have a fixed (but possible parametric) value
6712  * at dimension "pos"?
6713  */
has_single_value(__isl_keep isl_set * set,int pos)6714 static isl_bool has_single_value(__isl_keep isl_set *set, int pos)
6715 {
6716 	isl_size n;
6717 	isl_bool single;
6718 
6719 	n = isl_set_dim(set, isl_dim_set);
6720 	if (n < 0)
6721 		return isl_bool_error;
6722 	set = isl_set_copy(set);
6723 	set = isl_set_project_out(set, isl_dim_set, pos + 1, n - (pos + 1));
6724 	set = isl_set_project_out(set, isl_dim_set, 0, pos);
6725 	single = isl_set_is_singleton(set);
6726 	isl_set_free(set);
6727 
6728 	return single;
6729 }
6730 
6731 /* Does "map" have a fixed (but possible parametric) value
6732  * at dimension "pos" of either its domain or its range?
6733  */
has_singular_src_or_dst(__isl_keep isl_map * map,int pos)6734 static isl_bool has_singular_src_or_dst(__isl_keep isl_map *map, int pos)
6735 {
6736 	isl_set *set;
6737 	isl_bool single;
6738 
6739 	set = isl_map_domain(isl_map_copy(map));
6740 	single = has_single_value(set, pos);
6741 	isl_set_free(set);
6742 
6743 	if (single < 0 || single)
6744 		return single;
6745 
6746 	set = isl_map_range(isl_map_copy(map));
6747 	single = has_single_value(set, pos);
6748 	isl_set_free(set);
6749 
6750 	return single;
6751 }
6752 
6753 /* Does the edge "edge" from "graph" have bounded dependence distances
6754  * in the merged graph "merge_graph" of a selection of clusters in "c"?
6755  *
6756  * Extract the complete transformations of the source and destination
6757  * nodes of the edge, apply them to the edge constraints and
6758  * compute the differences.  Finally, check if these differences are bounded
6759  * in each direction.
6760  *
6761  * If the dimension of the band is greater than the number of
6762  * dimensions that can be expected to be optimized by the edge
6763  * (based on its weight), then also allow the differences to be unbounded
6764  * in the remaining dimensions, but only if either the source or
6765  * the destination has a fixed value in that direction.
6766  * This allows a statement that produces values that are used by
6767  * several instances of another statement to be merged with that
6768  * other statement.
6769  * However, merging such clusters will introduce an inherently
6770  * large proximity distance inside the merged cluster, meaning
6771  * that proximity distances will no longer be optimized in
6772  * subsequent merges.  These merges are therefore only allowed
6773  * after all other possible merges have been tried.
6774  * The first time such a merge is encountered, the weight of the edge
6775  * is replaced by a negative weight.  The second time (i.e., after
6776  * all merges over edges with a non-negative weight have been tried),
6777  * the merge is allowed.
6778  */
has_bounded_distances(isl_ctx * ctx,struct isl_sched_edge * edge,struct isl_sched_graph * graph,struct isl_clustering * c,struct isl_sched_graph * merge_graph)6779 static isl_bool has_bounded_distances(isl_ctx *ctx, struct isl_sched_edge *edge,
6780 	struct isl_sched_graph *graph, struct isl_clustering *c,
6781 	struct isl_sched_graph *merge_graph)
6782 {
6783 	int i, n_slack;
6784 	isl_size n;
6785 	isl_bool bounded;
6786 	isl_map *map, *t;
6787 	isl_set *dist;
6788 
6789 	map = isl_map_copy(edge->map);
6790 	t = extract_node_transformation(ctx, edge->src, c, merge_graph);
6791 	map = isl_map_apply_domain(map, t);
6792 	t = extract_node_transformation(ctx, edge->dst, c, merge_graph);
6793 	map = isl_map_apply_range(map, t);
6794 	dist = isl_map_deltas(isl_map_copy(map));
6795 
6796 	bounded = isl_bool_true;
6797 	n = isl_set_dim(dist, isl_dim_set);
6798 	if (n < 0)
6799 		goto error;
6800 	n_slack = n - edge->weight;
6801 	if (edge->weight < 0)
6802 		n_slack -= graph->max_weight + 1;
6803 	for (i = 0; i < n; ++i) {
6804 		isl_bool bounded_i, singular_i;
6805 
6806 		bounded_i = distance_is_bounded(dist, i);
6807 		if (bounded_i < 0)
6808 			goto error;
6809 		if (bounded_i)
6810 			continue;
6811 		if (edge->weight >= 0)
6812 			bounded = isl_bool_false;
6813 		n_slack--;
6814 		if (n_slack < 0)
6815 			break;
6816 		singular_i = has_singular_src_or_dst(map, i);
6817 		if (singular_i < 0)
6818 			goto error;
6819 		if (singular_i)
6820 			continue;
6821 		bounded = isl_bool_false;
6822 		break;
6823 	}
6824 	if (!bounded && i >= n && edge->weight >= 0)
6825 		edge->weight -= graph->max_weight + 1;
6826 	isl_map_free(map);
6827 	isl_set_free(dist);
6828 
6829 	return bounded;
6830 error:
6831 	isl_map_free(map);
6832 	isl_set_free(dist);
6833 	return isl_bool_error;
6834 }
6835 
6836 /* Should the clusters be merged based on the cluster schedule
6837  * in the current (and only) band of "merge_graph"?
6838  * "graph" is the original dependence graph, while "c" records
6839  * which SCCs are involved in the latest merge.
6840  *
6841  * In particular, is there at least one proximity constraint
6842  * that is optimized by the merge?
6843  *
6844  * A proximity constraint is considered to be optimized
6845  * if the dependence distances are small.
6846  */
ok_to_merge_proximity(isl_ctx * ctx,struct isl_sched_graph * graph,struct isl_clustering * c,struct isl_sched_graph * merge_graph)6847 static isl_bool ok_to_merge_proximity(isl_ctx *ctx,
6848 	struct isl_sched_graph *graph, struct isl_clustering *c,
6849 	struct isl_sched_graph *merge_graph)
6850 {
6851 	int i;
6852 
6853 	for (i = 0; i < graph->n_edge; ++i) {
6854 		struct isl_sched_edge *edge = &graph->edge[i];
6855 		isl_bool bounded;
6856 
6857 		if (!is_proximity(edge))
6858 			continue;
6859 		if (!c->scc_in_merge[edge->src->scc])
6860 			continue;
6861 		if (!c->scc_in_merge[edge->dst->scc])
6862 			continue;
6863 		if (c->scc_cluster[edge->dst->scc] ==
6864 		    c->scc_cluster[edge->src->scc])
6865 			continue;
6866 		bounded = has_bounded_distances(ctx, edge, graph, c,
6867 						merge_graph);
6868 		if (bounded < 0 || bounded)
6869 			return bounded;
6870 	}
6871 
6872 	return isl_bool_false;
6873 }
6874 
6875 /* Should the clusters be merged based on the cluster schedule
6876  * in the current (and only) band of "merge_graph"?
6877  * "graph" is the original dependence graph, while "c" records
6878  * which SCCs are involved in the latest merge.
6879  *
6880  * If the current band is empty, then the clusters should not be merged.
6881  *
6882  * If the band depth should be maximized and the merge schedule
6883  * is incomplete (meaning that the dimension of some of the schedule
6884  * bands in the original schedule will be reduced), then the clusters
6885  * should not be merged.
6886  *
6887  * If the schedule_maximize_coincidence option is set, then check that
6888  * the number of coincident schedule dimensions is not reduced.
6889  *
6890  * Finally, only allow the merge if at least one proximity
6891  * constraint is optimized.
6892  */
ok_to_merge(isl_ctx * ctx,struct isl_sched_graph * graph,struct isl_clustering * c,struct isl_sched_graph * merge_graph)6893 static isl_bool ok_to_merge(isl_ctx *ctx, struct isl_sched_graph *graph,
6894 	struct isl_clustering *c, struct isl_sched_graph *merge_graph)
6895 {
6896 	if (merge_graph->n_total_row == merge_graph->band_start)
6897 		return isl_bool_false;
6898 
6899 	if (isl_options_get_schedule_maximize_band_depth(ctx) &&
6900 	    merge_graph->n_total_row < merge_graph->maxvar)
6901 		return isl_bool_false;
6902 
6903 	if (isl_options_get_schedule_maximize_coincidence(ctx)) {
6904 		isl_bool ok;
6905 
6906 		ok = ok_to_merge_coincident(c, merge_graph);
6907 		if (ok < 0 || !ok)
6908 			return ok;
6909 	}
6910 
6911 	return ok_to_merge_proximity(ctx, graph, c, merge_graph);
6912 }
6913 
6914 /* Apply the schedule in "t_node" to the "n" rows starting at "first"
6915  * of the schedule in "node" and return the result.
6916  *
6917  * That is, essentially compute
6918  *
6919  *	T * N(first:first+n-1)
6920  *
6921  * taking into account the constant term and the parameter coefficients
6922  * in "t_node".
6923  */
node_transformation(isl_ctx * ctx,struct isl_sched_node * t_node,struct isl_sched_node * node,int first,int n)6924 static __isl_give isl_mat *node_transformation(isl_ctx *ctx,
6925 	struct isl_sched_node *t_node, struct isl_sched_node *node,
6926 	int first, int n)
6927 {
6928 	int i, j;
6929 	isl_mat *t;
6930 	isl_size n_row, n_col;
6931 	int n_param, n_var;
6932 
6933 	n_param = node->nparam;
6934 	n_var = node->nvar;
6935 	n_row = isl_mat_rows(t_node->sched);
6936 	n_col = isl_mat_cols(node->sched);
6937 	if (n_row < 0 || n_col < 0)
6938 		return NULL;
6939 	t = isl_mat_alloc(ctx, n_row, n_col);
6940 	if (!t)
6941 		return NULL;
6942 	for (i = 0; i < n_row; ++i) {
6943 		isl_seq_cpy(t->row[i], t_node->sched->row[i], 1 + n_param);
6944 		isl_seq_clr(t->row[i] + 1 + n_param, n_var);
6945 		for (j = 0; j < n; ++j)
6946 			isl_seq_addmul(t->row[i],
6947 					t_node->sched->row[i][1 + n_param + j],
6948 					node->sched->row[first + j],
6949 					1 + n_param + n_var);
6950 	}
6951 	return t;
6952 }
6953 
6954 /* Apply the cluster schedule in "t_node" to the current band
6955  * schedule of the nodes in "graph".
6956  *
6957  * In particular, replace the rows starting at band_start
6958  * by the result of applying the cluster schedule in "t_node"
6959  * to the original rows.
6960  *
6961  * The coincidence of the schedule is determined by the coincidence
6962  * of the cluster schedule.
6963  */
transform(isl_ctx * ctx,struct isl_sched_graph * graph,struct isl_sched_node * t_node)6964 static isl_stat transform(isl_ctx *ctx, struct isl_sched_graph *graph,
6965 	struct isl_sched_node *t_node)
6966 {
6967 	int i, j;
6968 	isl_size n_new;
6969 	int start, n;
6970 
6971 	start = graph->band_start;
6972 	n = graph->n_total_row - start;
6973 
6974 	n_new = isl_mat_rows(t_node->sched);
6975 	if (n_new < 0)
6976 		return isl_stat_error;
6977 	for (i = 0; i < graph->n; ++i) {
6978 		struct isl_sched_node *node = &graph->node[i];
6979 		isl_mat *t;
6980 
6981 		t = node_transformation(ctx, t_node, node, start, n);
6982 		node->sched = isl_mat_drop_rows(node->sched, start, n);
6983 		node->sched = isl_mat_concat(node->sched, t);
6984 		node->sched_map = isl_map_free(node->sched_map);
6985 		if (!node->sched)
6986 			return isl_stat_error;
6987 		for (j = 0; j < n_new; ++j)
6988 			node->coincident[start + j] = t_node->coincident[j];
6989 	}
6990 	graph->n_total_row -= n;
6991 	graph->n_row -= n;
6992 	graph->n_total_row += n_new;
6993 	graph->n_row += n_new;
6994 
6995 	return isl_stat_ok;
6996 }
6997 
6998 /* Merge the clusters marked for merging in "c" into a single
6999  * cluster using the cluster schedule in the current band of "merge_graph".
7000  * The representative SCC for the new cluster is the SCC with
7001  * the smallest index.
7002  *
7003  * The current band schedule of each SCC in the new cluster is obtained
7004  * by applying the schedule of the corresponding original cluster
7005  * to the original band schedule.
7006  * All SCCs in the new cluster have the same number of schedule rows.
7007  */
merge(isl_ctx * ctx,struct isl_clustering * c,struct isl_sched_graph * merge_graph)7008 static isl_stat merge(isl_ctx *ctx, struct isl_clustering *c,
7009 	struct isl_sched_graph *merge_graph)
7010 {
7011 	int i;
7012 	int cluster = -1;
7013 	isl_space *space;
7014 
7015 	for (i = 0; i < c->n; ++i) {
7016 		struct isl_sched_node *node;
7017 
7018 		if (!c->scc_in_merge[i])
7019 			continue;
7020 		if (cluster < 0)
7021 			cluster = i;
7022 		space = cluster_space(&c->scc[i], c->scc_cluster[i]);
7023 		node = graph_find_node(ctx, merge_graph, space);
7024 		isl_space_free(space);
7025 		if (!node)
7026 			return isl_stat_error;
7027 		if (!is_node(merge_graph, node))
7028 			isl_die(ctx, isl_error_internal,
7029 				"unable to find cluster",
7030 				return isl_stat_error);
7031 		if (transform(ctx, &c->scc[i], node) < 0)
7032 			return isl_stat_error;
7033 		c->scc_cluster[i] = cluster;
7034 	}
7035 
7036 	return isl_stat_ok;
7037 }
7038 
7039 /* Try and merge the clusters of SCCs marked in c->scc_in_merge
7040  * by scheduling the current cluster bands with respect to each other.
7041  *
7042  * Construct a dependence graph with a space for each cluster and
7043  * with the coordinates of each space corresponding to the schedule
7044  * dimensions of the current band of that cluster.
7045  * Construct a cluster schedule in this cluster dependence graph and
7046  * apply it to the current cluster bands if it is applicable
7047  * according to ok_to_merge.
7048  *
7049  * If the number of remaining schedule dimensions in a cluster
7050  * with a non-maximal current schedule dimension is greater than
7051  * the number of remaining schedule dimensions in clusters
7052  * with a maximal current schedule dimension, then restrict
7053  * the number of rows to be computed in the cluster schedule
7054  * to the minimal such non-maximal current schedule dimension.
7055  * Do this by adjusting merge_graph.maxvar.
7056  *
7057  * Return isl_bool_true if the clusters have effectively been merged
7058  * into a single cluster.
7059  *
7060  * Note that since the standard scheduling algorithm minimizes the maximal
7061  * distance over proximity constraints, the proximity constraints between
7062  * the merged clusters may not be optimized any further than what is
7063  * sufficient to bring the distances within the limits of the internal
7064  * proximity constraints inside the individual clusters.
7065  * It may therefore make sense to perform an additional translation step
7066  * to bring the clusters closer to each other, while maintaining
7067  * the linear part of the merging schedule found using the standard
7068  * scheduling algorithm.
7069  */
try_merge(isl_ctx * ctx,struct isl_sched_graph * graph,struct isl_clustering * c)7070 static isl_bool try_merge(isl_ctx *ctx, struct isl_sched_graph *graph,
7071 	struct isl_clustering *c)
7072 {
7073 	struct isl_sched_graph merge_graph = { 0 };
7074 	isl_bool merged;
7075 
7076 	if (init_merge_graph(ctx, graph, c, &merge_graph) < 0)
7077 		goto error;
7078 
7079 	if (compute_maxvar(&merge_graph) < 0)
7080 		goto error;
7081 	if (adjust_maxvar_to_slack(ctx, &merge_graph,c) < 0)
7082 		goto error;
7083 	if (compute_schedule_wcc_band(ctx, &merge_graph) < 0)
7084 		goto error;
7085 	merged = ok_to_merge(ctx, graph, c, &merge_graph);
7086 	if (merged && merge(ctx, c, &merge_graph) < 0)
7087 		goto error;
7088 
7089 	graph_free(ctx, &merge_graph);
7090 	return merged;
7091 error:
7092 	graph_free(ctx, &merge_graph);
7093 	return isl_bool_error;
7094 }
7095 
7096 /* Is there any edge marked "no_merge" between two SCCs that are
7097  * about to be merged (i.e., that are set in "scc_in_merge")?
7098  * "merge_edge" is the proximity edge along which the clusters of SCCs
7099  * are going to be merged.
7100  *
7101  * If there is any edge between two SCCs with a negative weight,
7102  * while the weight of "merge_edge" is non-negative, then this
7103  * means that the edge was postponed.  "merge_edge" should then
7104  * also be postponed since merging along the edge with negative weight should
7105  * be postponed until all edges with non-negative weight have been tried.
7106  * Replace the weight of "merge_edge" by a negative weight as well and
7107  * tell the caller not to attempt a merge.
7108  */
any_no_merge(struct isl_sched_graph * graph,int * scc_in_merge,struct isl_sched_edge * merge_edge)7109 static int any_no_merge(struct isl_sched_graph *graph, int *scc_in_merge,
7110 	struct isl_sched_edge *merge_edge)
7111 {
7112 	int i;
7113 
7114 	for (i = 0; i < graph->n_edge; ++i) {
7115 		struct isl_sched_edge *edge = &graph->edge[i];
7116 
7117 		if (!scc_in_merge[edge->src->scc])
7118 			continue;
7119 		if (!scc_in_merge[edge->dst->scc])
7120 			continue;
7121 		if (edge->no_merge)
7122 			return 1;
7123 		if (merge_edge->weight >= 0 && edge->weight < 0) {
7124 			merge_edge->weight -= graph->max_weight + 1;
7125 			return 1;
7126 		}
7127 	}
7128 
7129 	return 0;
7130 }
7131 
7132 /* Merge the two clusters in "c" connected by the edge in "graph"
7133  * with index "edge" into a single cluster.
7134  * If it turns out to be impossible to merge these two clusters,
7135  * then mark the edge as "no_merge" such that it will not be
7136  * considered again.
7137  *
7138  * First mark all SCCs that need to be merged.  This includes the SCCs
7139  * in the two clusters, but it may also include the SCCs
7140  * of intermediate clusters.
7141  * If there is already a no_merge edge between any pair of such SCCs,
7142  * then simply mark the current edge as no_merge as well.
7143  * Likewise, if any of those edges was postponed by has_bounded_distances,
7144  * then postpone the current edge as well.
7145  * Otherwise, try and merge the clusters and mark "edge" as "no_merge"
7146  * if the clusters did not end up getting merged, unless the non-merge
7147  * is due to the fact that the edge was postponed.  This postponement
7148  * can be recognized by a change in weight (from non-negative to negative).
7149  */
merge_clusters_along_edge(isl_ctx * ctx,struct isl_sched_graph * graph,int edge,struct isl_clustering * c)7150 static isl_stat merge_clusters_along_edge(isl_ctx *ctx,
7151 	struct isl_sched_graph *graph, int edge, struct isl_clustering *c)
7152 {
7153 	isl_bool merged;
7154 	int edge_weight = graph->edge[edge].weight;
7155 
7156 	if (mark_merge_sccs(ctx, graph, edge, c) < 0)
7157 		return isl_stat_error;
7158 
7159 	if (any_no_merge(graph, c->scc_in_merge, &graph->edge[edge]))
7160 		merged = isl_bool_false;
7161 	else
7162 		merged = try_merge(ctx, graph, c);
7163 	if (merged < 0)
7164 		return isl_stat_error;
7165 	if (!merged && edge_weight == graph->edge[edge].weight)
7166 		graph->edge[edge].no_merge = 1;
7167 
7168 	return isl_stat_ok;
7169 }
7170 
7171 /* Does "node" belong to the cluster identified by "cluster"?
7172  */
node_cluster_exactly(struct isl_sched_node * node,int cluster)7173 static int node_cluster_exactly(struct isl_sched_node *node, int cluster)
7174 {
7175 	return node->cluster == cluster;
7176 }
7177 
7178 /* Does "edge" connect two nodes belonging to the cluster
7179  * identified by "cluster"?
7180  */
edge_cluster_exactly(struct isl_sched_edge * edge,int cluster)7181 static int edge_cluster_exactly(struct isl_sched_edge *edge, int cluster)
7182 {
7183 	return edge->src->cluster == cluster && edge->dst->cluster == cluster;
7184 }
7185 
7186 /* Swap the schedule of "node1" and "node2".
7187  * Both nodes have been derived from the same node in a common parent graph.
7188  * Since the "coincident" field is shared with that node
7189  * in the parent graph, there is no need to also swap this field.
7190  */
swap_sched(struct isl_sched_node * node1,struct isl_sched_node * node2)7191 static void swap_sched(struct isl_sched_node *node1,
7192 	struct isl_sched_node *node2)
7193 {
7194 	isl_mat *sched;
7195 	isl_map *sched_map;
7196 
7197 	sched = node1->sched;
7198 	node1->sched = node2->sched;
7199 	node2->sched = sched;
7200 
7201 	sched_map = node1->sched_map;
7202 	node1->sched_map = node2->sched_map;
7203 	node2->sched_map = sched_map;
7204 }
7205 
7206 /* Copy the current band schedule from the SCCs that form the cluster
7207  * with index "pos" to the actual cluster at position "pos".
7208  * By construction, the index of the first SCC that belongs to the cluster
7209  * is also "pos".
7210  *
7211  * The order of the nodes inside both the SCCs and the cluster
7212  * is assumed to be same as the order in the original "graph".
7213  *
7214  * Since the SCC graphs will no longer be used after this function,
7215  * the schedules are actually swapped rather than copied.
7216  */
copy_partial(struct isl_sched_graph * graph,struct isl_clustering * c,int pos)7217 static isl_stat copy_partial(struct isl_sched_graph *graph,
7218 	struct isl_clustering *c, int pos)
7219 {
7220 	int i, j;
7221 
7222 	c->cluster[pos].n_total_row = c->scc[pos].n_total_row;
7223 	c->cluster[pos].n_row = c->scc[pos].n_row;
7224 	c->cluster[pos].maxvar = c->scc[pos].maxvar;
7225 	j = 0;
7226 	for (i = 0; i < graph->n; ++i) {
7227 		int k;
7228 		int s;
7229 
7230 		if (graph->node[i].cluster != pos)
7231 			continue;
7232 		s = graph->node[i].scc;
7233 		k = c->scc_node[s]++;
7234 		swap_sched(&c->cluster[pos].node[j], &c->scc[s].node[k]);
7235 		if (c->scc[s].maxvar > c->cluster[pos].maxvar)
7236 			c->cluster[pos].maxvar = c->scc[s].maxvar;
7237 		++j;
7238 	}
7239 
7240 	return isl_stat_ok;
7241 }
7242 
7243 /* Is there a (conditional) validity dependence from node[j] to node[i],
7244  * forcing node[i] to follow node[j] or do the nodes belong to the same
7245  * cluster?
7246  */
node_follows_strong_or_same_cluster(int i,int j,void * user)7247 static isl_bool node_follows_strong_or_same_cluster(int i, int j, void *user)
7248 {
7249 	struct isl_sched_graph *graph = user;
7250 
7251 	if (graph->node[i].cluster == graph->node[j].cluster)
7252 		return isl_bool_true;
7253 	return graph_has_validity_edge(graph, &graph->node[j], &graph->node[i]);
7254 }
7255 
7256 /* Extract the merged clusters of SCCs in "graph", sort them, and
7257  * store them in c->clusters.  Update c->scc_cluster accordingly.
7258  *
7259  * First keep track of the cluster containing the SCC to which a node
7260  * belongs in the node itself.
7261  * Then extract the clusters into c->clusters, copying the current
7262  * band schedule from the SCCs that belong to the cluster.
7263  * Do this only once per cluster.
7264  *
7265  * Finally, topologically sort the clusters and update c->scc_cluster
7266  * to match the new scc numbering.  While the SCCs were originally
7267  * sorted already, some SCCs that depend on some other SCCs may
7268  * have been merged with SCCs that appear before these other SCCs.
7269  * A reordering may therefore be required.
7270  */
extract_clusters(isl_ctx * ctx,struct isl_sched_graph * graph,struct isl_clustering * c)7271 static isl_stat extract_clusters(isl_ctx *ctx, struct isl_sched_graph *graph,
7272 	struct isl_clustering *c)
7273 {
7274 	int i;
7275 
7276 	for (i = 0; i < graph->n; ++i)
7277 		graph->node[i].cluster = c->scc_cluster[graph->node[i].scc];
7278 
7279 	for (i = 0; i < graph->scc; ++i) {
7280 		if (c->scc_cluster[i] != i)
7281 			continue;
7282 		if (extract_sub_graph(ctx, graph, &node_cluster_exactly,
7283 				&edge_cluster_exactly, i, &c->cluster[i]) < 0)
7284 			return isl_stat_error;
7285 		c->cluster[i].src_scc = -1;
7286 		c->cluster[i].dst_scc = -1;
7287 		if (copy_partial(graph, c, i) < 0)
7288 			return isl_stat_error;
7289 	}
7290 
7291 	if (detect_ccs(ctx, graph, &node_follows_strong_or_same_cluster) < 0)
7292 		return isl_stat_error;
7293 	for (i = 0; i < graph->n; ++i)
7294 		c->scc_cluster[graph->node[i].scc] = graph->node[i].cluster;
7295 
7296 	return isl_stat_ok;
7297 }
7298 
7299 /* Compute weights on the proximity edges of "graph" that can
7300  * be used by find_proximity to find the most appropriate
7301  * proximity edge to use to merge two clusters in "c".
7302  * The weights are also used by has_bounded_distances to determine
7303  * whether the merge should be allowed.
7304  * Store the maximum of the computed weights in graph->max_weight.
7305  *
7306  * The computed weight is a measure for the number of remaining schedule
7307  * dimensions that can still be completely aligned.
7308  * In particular, compute the number of equalities between
7309  * input dimensions and output dimensions in the proximity constraints.
7310  * The directions that are already handled by outer schedule bands
7311  * are projected out prior to determining this number.
7312  *
7313  * Edges that will never be considered by find_proximity are ignored.
7314  */
compute_weights(struct isl_sched_graph * graph,struct isl_clustering * c)7315 static isl_stat compute_weights(struct isl_sched_graph *graph,
7316 	struct isl_clustering *c)
7317 {
7318 	int i;
7319 
7320 	graph->max_weight = 0;
7321 
7322 	for (i = 0; i < graph->n_edge; ++i) {
7323 		struct isl_sched_edge *edge = &graph->edge[i];
7324 		struct isl_sched_node *src = edge->src;
7325 		struct isl_sched_node *dst = edge->dst;
7326 		isl_basic_map *hull;
7327 		isl_bool prox;
7328 		isl_size n_in, n_out, n;
7329 
7330 		prox = is_non_empty_proximity(edge);
7331 		if (prox < 0)
7332 			return isl_stat_error;
7333 		if (!prox)
7334 			continue;
7335 		if (bad_cluster(&c->scc[edge->src->scc]) ||
7336 		    bad_cluster(&c->scc[edge->dst->scc]))
7337 			continue;
7338 		if (c->scc_cluster[edge->dst->scc] ==
7339 		    c->scc_cluster[edge->src->scc])
7340 			continue;
7341 
7342 		hull = isl_map_affine_hull(isl_map_copy(edge->map));
7343 		hull = isl_basic_map_transform_dims(hull, isl_dim_in, 0,
7344 						    isl_mat_copy(src->vmap));
7345 		hull = isl_basic_map_transform_dims(hull, isl_dim_out, 0,
7346 						    isl_mat_copy(dst->vmap));
7347 		hull = isl_basic_map_project_out(hull,
7348 						isl_dim_in, 0, src->rank);
7349 		hull = isl_basic_map_project_out(hull,
7350 						isl_dim_out, 0, dst->rank);
7351 		hull = isl_basic_map_remove_divs(hull);
7352 		n_in = isl_basic_map_dim(hull, isl_dim_in);
7353 		n_out = isl_basic_map_dim(hull, isl_dim_out);
7354 		if (n_in < 0 || n_out < 0)
7355 			hull = isl_basic_map_free(hull);
7356 		hull = isl_basic_map_drop_constraints_not_involving_dims(hull,
7357 							isl_dim_in, 0, n_in);
7358 		hull = isl_basic_map_drop_constraints_not_involving_dims(hull,
7359 							isl_dim_out, 0, n_out);
7360 		n = isl_basic_map_n_equality(hull);
7361 		isl_basic_map_free(hull);
7362 		if (n < 0)
7363 			return isl_stat_error;
7364 		edge->weight = n;
7365 
7366 		if (edge->weight > graph->max_weight)
7367 			graph->max_weight = edge->weight;
7368 	}
7369 
7370 	return isl_stat_ok;
7371 }
7372 
7373 /* Call compute_schedule_finish_band on each of the clusters in "c"
7374  * in their topological order.  This order is determined by the scc
7375  * fields of the nodes in "graph".
7376  * Combine the results in a sequence expressing the topological order.
7377  *
7378  * If there is only one cluster left, then there is no need to introduce
7379  * a sequence node.  Also, in this case, the cluster necessarily contains
7380  * the SCC at position 0 in the original graph and is therefore also
7381  * stored in the first cluster of "c".
7382  */
finish_bands_clustering(__isl_take isl_schedule_node * node,struct isl_sched_graph * graph,struct isl_clustering * c)7383 static __isl_give isl_schedule_node *finish_bands_clustering(
7384 	__isl_take isl_schedule_node *node, struct isl_sched_graph *graph,
7385 	struct isl_clustering *c)
7386 {
7387 	int i;
7388 	isl_ctx *ctx;
7389 	isl_union_set_list *filters;
7390 
7391 	if (graph->scc == 1)
7392 		return compute_schedule_finish_band(node, &c->cluster[0], 0);
7393 
7394 	ctx = isl_schedule_node_get_ctx(node);
7395 
7396 	filters = extract_sccs(ctx, graph);
7397 	node = isl_schedule_node_insert_sequence(node, filters);
7398 
7399 	for (i = 0; i < graph->scc; ++i) {
7400 		int j = c->scc_cluster[i];
7401 		node = isl_schedule_node_child(node, i);
7402 		node = isl_schedule_node_child(node, 0);
7403 		node = compute_schedule_finish_band(node, &c->cluster[j], 0);
7404 		node = isl_schedule_node_parent(node);
7405 		node = isl_schedule_node_parent(node);
7406 	}
7407 
7408 	return node;
7409 }
7410 
7411 /* Compute a schedule for a connected dependence graph by first considering
7412  * each strongly connected component (SCC) in the graph separately and then
7413  * incrementally combining them into clusters.
7414  * Return the updated schedule node.
7415  *
7416  * Initially, each cluster consists of a single SCC, each with its
7417  * own band schedule.  The algorithm then tries to merge pairs
7418  * of clusters along a proximity edge until no more suitable
7419  * proximity edges can be found.  During this merging, the schedule
7420  * is maintained in the individual SCCs.
7421  * After the merging is completed, the full resulting clusters
7422  * are extracted and in finish_bands_clustering,
7423  * compute_schedule_finish_band is called on each of them to integrate
7424  * the band into "node" and to continue the computation.
7425  *
7426  * compute_weights initializes the weights that are used by find_proximity.
7427  */
compute_schedule_wcc_clustering(__isl_take isl_schedule_node * node,struct isl_sched_graph * graph)7428 static __isl_give isl_schedule_node *compute_schedule_wcc_clustering(
7429 	__isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
7430 {
7431 	isl_ctx *ctx;
7432 	struct isl_clustering c;
7433 	int i;
7434 
7435 	ctx = isl_schedule_node_get_ctx(node);
7436 
7437 	if (clustering_init(ctx, &c, graph) < 0)
7438 		goto error;
7439 
7440 	if (compute_weights(graph, &c) < 0)
7441 		goto error;
7442 
7443 	for (;;) {
7444 		i = find_proximity(graph, &c);
7445 		if (i < 0)
7446 			goto error;
7447 		if (i >= graph->n_edge)
7448 			break;
7449 		if (merge_clusters_along_edge(ctx, graph, i, &c) < 0)
7450 			goto error;
7451 	}
7452 
7453 	if (extract_clusters(ctx, graph, &c) < 0)
7454 		goto error;
7455 
7456 	node = finish_bands_clustering(node, graph, &c);
7457 
7458 	clustering_free(ctx, &c);
7459 	return node;
7460 error:
7461 	clustering_free(ctx, &c);
7462 	return isl_schedule_node_free(node);
7463 }
7464 
7465 /* Compute a schedule for a connected dependence graph and return
7466  * the updated schedule node.
7467  *
7468  * If Feautrier's algorithm is selected, we first recursively try to satisfy
7469  * as many validity dependences as possible. When all validity dependences
7470  * are satisfied we extend the schedule to a full-dimensional schedule.
7471  *
7472  * Call compute_schedule_wcc_whole or compute_schedule_wcc_clustering
7473  * depending on whether the user has selected the option to try and
7474  * compute a schedule for the entire (weakly connected) component first.
7475  * If there is only a single strongly connected component (SCC), then
7476  * there is no point in trying to combine SCCs
7477  * in compute_schedule_wcc_clustering, so compute_schedule_wcc_whole
7478  * is called instead.
7479  */
compute_schedule_wcc(__isl_take isl_schedule_node * node,struct isl_sched_graph * graph)7480 static __isl_give isl_schedule_node *compute_schedule_wcc(
7481 	__isl_take isl_schedule_node *node, struct isl_sched_graph *graph)
7482 {
7483 	isl_ctx *ctx;
7484 
7485 	if (!node)
7486 		return NULL;
7487 
7488 	ctx = isl_schedule_node_get_ctx(node);
7489 	if (detect_sccs(ctx, graph) < 0)
7490 		return isl_schedule_node_free(node);
7491 
7492 	if (compute_maxvar(graph) < 0)
7493 		return isl_schedule_node_free(node);
7494 
7495 	if (need_feautrier_step(ctx, graph))
7496 		return compute_schedule_wcc_feautrier(node, graph);
7497 
7498 	if (graph->scc <= 1 || isl_options_get_schedule_whole_component(ctx))
7499 		return compute_schedule_wcc_whole(node, graph);
7500 	else
7501 		return compute_schedule_wcc_clustering(node, graph);
7502 }
7503 
7504 /* Compute a schedule for each group of nodes identified by node->scc
7505  * separately and then combine them in a sequence node (or as set node
7506  * if graph->weak is set) inserted at position "node" of the schedule tree.
7507  * Return the updated schedule node.
7508  *
7509  * If "wcc" is set then each of the groups belongs to a single
7510  * weakly connected component in the dependence graph so that
7511  * there is no need for compute_sub_schedule to look for weakly
7512  * connected components.
7513  *
7514  * If a set node would be introduced and if the number of components
7515  * is equal to the number of nodes, then check if the schedule
7516  * is already complete.  If so, a redundant set node would be introduced
7517  * (without any further descendants) stating that the statements
7518  * can be executed in arbitrary order, which is also expressed
7519  * by the absence of any node.  Refrain from inserting any nodes
7520  * in this case and simply return.
7521  */
compute_component_schedule(__isl_take isl_schedule_node * node,struct isl_sched_graph * graph,int wcc)7522 static __isl_give isl_schedule_node *compute_component_schedule(
7523 	__isl_take isl_schedule_node *node, struct isl_sched_graph *graph,
7524 	int wcc)
7525 {
7526 	int component;
7527 	isl_ctx *ctx;
7528 	isl_union_set_list *filters;
7529 
7530 	if (!node)
7531 		return NULL;
7532 
7533 	if (graph->weak && graph->scc == graph->n) {
7534 		if (compute_maxvar(graph) < 0)
7535 			return isl_schedule_node_free(node);
7536 		if (graph->n_row >= graph->maxvar)
7537 			return node;
7538 	}
7539 
7540 	ctx = isl_schedule_node_get_ctx(node);
7541 	filters = extract_sccs(ctx, graph);
7542 	if (graph->weak)
7543 		node = isl_schedule_node_insert_set(node, filters);
7544 	else
7545 		node = isl_schedule_node_insert_sequence(node, filters);
7546 
7547 	for (component = 0; component < graph->scc; ++component) {
7548 		node = isl_schedule_node_child(node, component);
7549 		node = isl_schedule_node_child(node, 0);
7550 		node = compute_sub_schedule(node, ctx, graph,
7551 				    &node_scc_exactly,
7552 				    &edge_scc_exactly, component, wcc);
7553 		node = isl_schedule_node_parent(node);
7554 		node = isl_schedule_node_parent(node);
7555 	}
7556 
7557 	return node;
7558 }
7559 
7560 /* Compute a schedule for the given dependence graph and insert it at "node".
7561  * Return the updated schedule node.
7562  *
7563  * We first check if the graph is connected (through validity and conditional
7564  * validity dependences) and, if not, compute a schedule
7565  * for each component separately.
7566  * If the schedule_serialize_sccs option is set, then we check for strongly
7567  * connected components instead and compute a separate schedule for
7568  * each such strongly connected component.
7569  */
compute_schedule(isl_schedule_node * node,struct isl_sched_graph * graph)7570 static __isl_give isl_schedule_node *compute_schedule(isl_schedule_node *node,
7571 	struct isl_sched_graph *graph)
7572 {
7573 	isl_ctx *ctx;
7574 
7575 	if (!node)
7576 		return NULL;
7577 
7578 	ctx = isl_schedule_node_get_ctx(node);
7579 	if (isl_options_get_schedule_serialize_sccs(ctx)) {
7580 		if (detect_sccs(ctx, graph) < 0)
7581 			return isl_schedule_node_free(node);
7582 	} else {
7583 		if (detect_wccs(ctx, graph) < 0)
7584 			return isl_schedule_node_free(node);
7585 	}
7586 
7587 	if (graph->scc > 1)
7588 		return compute_component_schedule(node, graph, 1);
7589 
7590 	return compute_schedule_wcc(node, graph);
7591 }
7592 
7593 /* Compute a schedule on sc->domain that respects the given schedule
7594  * constraints.
7595  *
7596  * In particular, the schedule respects all the validity dependences.
7597  * If the default isl scheduling algorithm is used, it tries to minimize
7598  * the dependence distances over the proximity dependences.
7599  * If Feautrier's scheduling algorithm is used, the proximity dependence
7600  * distances are only minimized during the extension to a full-dimensional
7601  * schedule.
7602  *
7603  * If there are any condition and conditional validity dependences,
7604  * then the conditional validity dependences may be violated inside
7605  * a tilable band, provided they have no adjacent non-local
7606  * condition dependences.
7607  */
isl_schedule_constraints_compute_schedule(__isl_take isl_schedule_constraints * sc)7608 __isl_give isl_schedule *isl_schedule_constraints_compute_schedule(
7609 	__isl_take isl_schedule_constraints *sc)
7610 {
7611 	isl_ctx *ctx = isl_schedule_constraints_get_ctx(sc);
7612 	struct isl_sched_graph graph = { 0 };
7613 	isl_schedule *sched;
7614 	isl_schedule_node *node;
7615 	isl_union_set *domain;
7616 	isl_size n;
7617 
7618 	sc = isl_schedule_constraints_align_params(sc);
7619 
7620 	domain = isl_schedule_constraints_get_domain(sc);
7621 	n = isl_union_set_n_set(domain);
7622 	if (n == 0) {
7623 		isl_schedule_constraints_free(sc);
7624 		return isl_schedule_from_domain(domain);
7625 	}
7626 
7627 	if (n < 0 || graph_init(&graph, sc) < 0)
7628 		domain = isl_union_set_free(domain);
7629 
7630 	node = isl_schedule_node_from_domain(domain);
7631 	node = isl_schedule_node_child(node, 0);
7632 	if (graph.n > 0)
7633 		node = compute_schedule(node, &graph);
7634 	sched = isl_schedule_node_get_schedule(node);
7635 	isl_schedule_node_free(node);
7636 
7637 	graph_free(ctx, &graph);
7638 	isl_schedule_constraints_free(sc);
7639 
7640 	return sched;
7641 }
7642 
7643 /* Compute a schedule for the given union of domains that respects
7644  * all the validity dependences and minimizes
7645  * the dependence distances over the proximity dependences.
7646  *
7647  * This function is kept for backward compatibility.
7648  */
isl_union_set_compute_schedule(__isl_take isl_union_set * domain,__isl_take isl_union_map * validity,__isl_take isl_union_map * proximity)7649 __isl_give isl_schedule *isl_union_set_compute_schedule(
7650 	__isl_take isl_union_set *domain,
7651 	__isl_take isl_union_map *validity,
7652 	__isl_take isl_union_map *proximity)
7653 {
7654 	isl_schedule_constraints *sc;
7655 
7656 	sc = isl_schedule_constraints_on_domain(domain);
7657 	sc = isl_schedule_constraints_set_validity(sc, validity);
7658 	sc = isl_schedule_constraints_set_proximity(sc, proximity);
7659 
7660 	return isl_schedule_constraints_compute_schedule(sc);
7661 }
7662