1 /*
2 * Copyright 2013 Ecole Normale Superieure
3 *
4 * Use of this software is governed by the MIT license
5 *
6 * Written by Sven Verdoolaege,
7 * Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France
8 */
9
10 #include <string.h>
11
12 #include <isl/set.h>
13 #include <isl/union_set.h>
14 #include <isl/space.h>
15
16 #include "gpu_tree.h"
17
18 /* The functions in this file are used to navigate part of a schedule tree
19 * that is mapped to blocks. Initially, this part consists of a linear
20 * branch segment with a mark node with name "kernel" on the outer end
21 * and a mark node with name "thread" on the inner end.
22 * During the mapping to blocks, branching may be introduced, but only
23 * one of the elements in each sequence contains the "thread" mark.
24 * The filter of this element (and only this filter) contains
25 * domain elements identified by the "core" argument of the functions
26 * that move down this tree.
27 *
28 * Synchronization statements have a name that starts with "sync" and
29 * a user pointer pointing to the kernel that contains the synchronization.
30 * The functions inserting or detecting synchronizations take a ppcg_kernel
31 * argument to be able to create or identify such statements.
32 * They may also use two fields in this structure, the "core" field
33 * to move around in the tree and the "n_sync" field to make sure that
34 * each synchronization has a different name (within the kernel).
35 */
36
37 /* Is "node" a mark node with an identifier called "name"?
38 */
is_marked(__isl_keep isl_schedule_node * node,const char * name)39 static int is_marked(__isl_keep isl_schedule_node *node, const char *name)
40 {
41 isl_id *mark;
42 int has_name;
43
44 if (!node)
45 return -1;
46
47 if (isl_schedule_node_get_type(node) != isl_schedule_node_mark)
48 return 0;
49
50 mark = isl_schedule_node_mark_get_id(node);
51 if (!mark)
52 return -1;
53
54 has_name = !strcmp(isl_id_get_name(mark), name);
55 isl_id_free(mark);
56
57 return has_name;
58 }
59
60 /* Is "node" a mark node with an identifier called "kernel"?
61 */
gpu_tree_node_is_kernel(__isl_keep isl_schedule_node * node)62 int gpu_tree_node_is_kernel(__isl_keep isl_schedule_node *node)
63 {
64 return is_marked(node, "kernel");
65 }
66
67 /* Is "node" a mark node with an identifier called "shared"?
68 */
node_is_shared(__isl_keep isl_schedule_node * node)69 static int node_is_shared(__isl_keep isl_schedule_node *node)
70 {
71 return is_marked(node, "shared");
72 }
73
74 /* Is "node" a mark node with an identifier called "thread"?
75 */
node_is_thread(__isl_keep isl_schedule_node * node)76 static int node_is_thread(__isl_keep isl_schedule_node *node)
77 {
78 return is_marked(node, "thread");
79 }
80
81 /* Insert a mark node with identifier "shared" in front of "node".
82 */
insert_shared(__isl_take isl_schedule_node * node)83 static __isl_give isl_schedule_node *insert_shared(
84 __isl_take isl_schedule_node *node)
85 {
86 isl_ctx *ctx;
87 isl_id *id;
88
89 ctx = isl_schedule_node_get_ctx(node);
90 id = isl_id_alloc(ctx, "shared", NULL);
91 node = isl_schedule_node_insert_mark(node, id);
92
93 return node;
94 }
95
96 /* Insert a "shared" mark in front of the "thread" mark
97 * provided the linear branch between "node" and the "thread" mark
98 * does not contain such a "shared" mark already.
99 *
100 * As a side effect, this function checks that the subtree at "node"
101 * actually contains a "thread" mark and that there is no branching
102 * in between "node" and this "thread" mark.
103 */
gpu_tree_insert_shared_before_thread(__isl_take isl_schedule_node * node)104 __isl_give isl_schedule_node *gpu_tree_insert_shared_before_thread(
105 __isl_take isl_schedule_node *node)
106 {
107 int depth0, depth;
108 int any_shared = 0;
109
110 if (!node)
111 return NULL;
112
113 depth0 = isl_schedule_node_get_tree_depth(node);
114
115 for (;;) {
116 int is_thread;
117 int n;
118
119 if (!any_shared) {
120 any_shared = node_is_shared(node);
121 if (any_shared < 0)
122 return isl_schedule_node_free(node);
123 }
124 is_thread = node_is_thread(node);
125 if (is_thread < 0)
126 return isl_schedule_node_free(node);
127 if (is_thread)
128 break;
129 n = isl_schedule_node_n_children(node);
130 if (n == 0)
131 isl_die(isl_schedule_node_get_ctx(node),
132 isl_error_invalid,
133 "no thread marker found",
134 return isl_schedule_node_free(node));
135 if (n > 1)
136 isl_die(isl_schedule_node_get_ctx(node),
137 isl_error_invalid,
138 "expecting single thread marker",
139 return isl_schedule_node_free(node));
140
141 node = isl_schedule_node_child(node, 0);
142 }
143
144 if (!any_shared)
145 node = insert_shared(node);
146 depth = isl_schedule_node_get_tree_depth(node);
147 node = isl_schedule_node_ancestor(node, depth - depth0);
148
149 return node;
150 }
151
152 /* Assuming "node" is a filter node, does it correspond to the branch
153 * that contains the "thread" mark, i.e., does it contain any elements
154 * in "core"?
155 */
node_is_core(__isl_keep isl_schedule_node * node,__isl_keep isl_union_set * core)156 static int node_is_core(__isl_keep isl_schedule_node *node,
157 __isl_keep isl_union_set *core)
158 {
159 int disjoint;
160 isl_union_set *filter;
161
162 filter = isl_schedule_node_filter_get_filter(node);
163 disjoint = isl_union_set_is_disjoint(filter, core);
164 isl_union_set_free(filter);
165 if (disjoint < 0)
166 return -1;
167
168 return !disjoint;
169 }
170
171 /* Move to the only child of "node" that has the "thread" mark as descendant,
172 * where the branch containing this mark is identified by the domain elements
173 * in "core".
174 *
175 * If "node" is not a sequence, then it only has one child and we move
176 * to that single child.
177 * Otherwise, we check each of the filters in the children, pick
178 * the one that corresponds to "core" and return a pointer to the child
179 * of the filter node.
180 */
core_child(__isl_take isl_schedule_node * node,__isl_keep isl_union_set * core)181 static __isl_give isl_schedule_node *core_child(
182 __isl_take isl_schedule_node *node, __isl_keep isl_union_set *core)
183 {
184 int i, n;
185
186 if (isl_schedule_node_get_type(node) != isl_schedule_node_sequence)
187 return isl_schedule_node_child(node, 0);
188
189 n = isl_schedule_node_n_children(node);
190 for (i = 0; i < n; ++i) {
191 int is_core;
192
193 node = isl_schedule_node_child(node, i);
194 is_core = node_is_core(node, core);
195
196 if (is_core < 0)
197 return isl_schedule_node_free(node);
198 if (is_core)
199 return isl_schedule_node_child(node, 0);
200
201 node = isl_schedule_node_parent(node);
202 }
203
204 isl_die(isl_schedule_node_get_ctx(node), isl_error_internal,
205 "core child not found", return isl_schedule_node_free(node));
206 }
207
208 /* Move down the branch between "kernel" and "thread" until
209 * the "shared" mark is reached, where the branch containing the "shared"
210 * mark is identified by the domain elements in "core".
211 */
gpu_tree_move_down_to_shared(__isl_take isl_schedule_node * node,__isl_keep isl_union_set * core)212 __isl_give isl_schedule_node *gpu_tree_move_down_to_shared(
213 __isl_take isl_schedule_node *node, __isl_keep isl_union_set *core)
214 {
215 int is_shared;
216
217 while ((is_shared = node_is_shared(node)) == 0)
218 node = core_child(node, core);
219 if (is_shared < 0)
220 node = isl_schedule_node_free(node);
221
222 return node;
223 }
224
225 /* Move down the branch between "kernel" and "thread" until
226 * the "thread" mark is reached, where the branch containing the "thread"
227 * mark is identified by the domain elements in "core".
228 */
gpu_tree_move_down_to_thread(__isl_take isl_schedule_node * node,__isl_keep isl_union_set * core)229 __isl_give isl_schedule_node *gpu_tree_move_down_to_thread(
230 __isl_take isl_schedule_node *node, __isl_keep isl_union_set *core)
231 {
232 int is_thread;
233
234 while ((is_thread = node_is_thread(node)) == 0)
235 node = core_child(node, core);
236 if (is_thread < 0)
237 node = isl_schedule_node_free(node);
238
239 return node;
240 }
241
242 /* Move up the tree underneath the "thread" mark until
243 * the "thread" mark is reached.
244 */
gpu_tree_move_up_to_thread(__isl_take isl_schedule_node * node)245 __isl_give isl_schedule_node *gpu_tree_move_up_to_thread(
246 __isl_take isl_schedule_node *node)
247 {
248 int is_thread;
249
250 while ((is_thread = node_is_thread(node)) == 0)
251 node = isl_schedule_node_parent(node);
252 if (is_thread < 0)
253 node = isl_schedule_node_free(node);
254
255 return node;
256 }
257
258 /* Move up the tree underneath the "kernel" mark until
259 * the "kernel" mark is reached.
260 */
gpu_tree_move_up_to_kernel(__isl_take isl_schedule_node * node)261 __isl_give isl_schedule_node *gpu_tree_move_up_to_kernel(
262 __isl_take isl_schedule_node *node)
263 {
264 int is_kernel;
265
266 while ((is_kernel = gpu_tree_node_is_kernel(node)) == 0)
267 node = isl_schedule_node_parent(node);
268 if (is_kernel < 0)
269 node = isl_schedule_node_free(node);
270
271 return node;
272 }
273
274 /* Move down from the "kernel" mark (or at least a node with schedule
275 * depth smaller than or equal to "depth") to a band node at schedule
276 * depth "depth". The "thread" mark is assumed to have a schedule
277 * depth greater than or equal to "depth". The branch containing the
278 * "thread" mark is identified by the domain elements in "core".
279 *
280 * If the desired schedule depth is in the middle of band node,
281 * then the band node is split into two pieces, the second piece
282 * at the desired schedule depth.
283 */
gpu_tree_move_down_to_depth(__isl_take isl_schedule_node * node,int depth,__isl_keep isl_union_set * core)284 __isl_give isl_schedule_node *gpu_tree_move_down_to_depth(
285 __isl_take isl_schedule_node *node, int depth,
286 __isl_keep isl_union_set *core)
287 {
288 int is_shared;
289 int is_thread = 0;
290
291 while (node && isl_schedule_node_get_schedule_depth(node) < depth) {
292 if (isl_schedule_node_get_type(node) ==
293 isl_schedule_node_band) {
294 int node_depth, node_dim;
295 node_depth = isl_schedule_node_get_schedule_depth(node);
296 node_dim = isl_schedule_node_band_n_member(node);
297 if (node_depth + node_dim > depth)
298 node = isl_schedule_node_band_split(node,
299 depth - node_depth);
300 }
301 node = core_child(node, core);
302 }
303 while ((is_shared = node_is_shared(node)) == 0 &&
304 (is_thread = node_is_thread(node)) == 0 &&
305 isl_schedule_node_get_type(node) != isl_schedule_node_band)
306 node = core_child(node, core);
307 if (is_shared < 0 || is_thread < 0)
308 node = isl_schedule_node_free(node);
309
310 return node;
311 }
312
313 /* Create a union set containing a single set with a tuple identifier
314 * called "syncX" and user pointer equal to "kernel".
315 */
create_sync_domain(struct ppcg_kernel * kernel)316 static __isl_give isl_union_set *create_sync_domain(struct ppcg_kernel *kernel)
317 {
318 isl_space *space;
319 isl_id *id;
320 char name[40];
321
322 space = isl_space_set_alloc(kernel->ctx, 0, 0);
323 snprintf(name, sizeof(name), "sync%d", kernel->n_sync++);
324 id = isl_id_alloc(kernel->ctx, name, kernel);
325 space = isl_space_set_tuple_id(space, isl_dim_set, id);
326 return isl_union_set_from_set(isl_set_universe(space));
327 }
328
329 /* Is "id" the identifier of a synchronization statement inside "kernel"?
330 * That is, does its name start with "sync" and does it point to "kernel"?
331 */
gpu_tree_id_is_sync(__isl_keep isl_id * id,struct ppcg_kernel * kernel)332 int gpu_tree_id_is_sync(__isl_keep isl_id *id, struct ppcg_kernel *kernel)
333 {
334 const char *name;
335
336 name = isl_id_get_name(id);
337 if (!name)
338 return 0;
339 else if (strncmp(name, "sync", 4))
340 return 0;
341 return isl_id_get_user(id) == kernel;
342 }
343
344 /* Does "domain" consist of a single set with a tuple identifier
345 * corresponding to a synchronization for "kernel"?
346 */
domain_is_sync(__isl_keep isl_union_set * domain,struct ppcg_kernel * kernel)347 static int domain_is_sync(__isl_keep isl_union_set *domain,
348 struct ppcg_kernel *kernel)
349 {
350 int is_sync;
351 isl_id *id;
352 isl_set *set;
353
354 if (isl_union_set_n_set(domain) != 1)
355 return 0;
356 set = isl_set_from_union_set(isl_union_set_copy(domain));
357 id = isl_set_get_tuple_id(set);
358 is_sync = gpu_tree_id_is_sync(id, kernel);
359 isl_id_free(id);
360 isl_set_free(set);
361
362 return is_sync;
363 }
364
365 /* Does "node" point to a filter selecting a synchronization statement
366 * for "kernel"?
367 */
node_is_sync_filter(__isl_keep isl_schedule_node * node,struct ppcg_kernel * kernel)368 static int node_is_sync_filter(__isl_keep isl_schedule_node *node,
369 struct ppcg_kernel *kernel)
370 {
371 int is_sync;
372 enum isl_schedule_node_type type;
373 isl_union_set *domain;
374
375 if (!node)
376 return -1;
377 type = isl_schedule_node_get_type(node);
378 if (type != isl_schedule_node_filter)
379 return 0;
380 domain = isl_schedule_node_filter_get_filter(node);
381 is_sync = domain_is_sync(domain, kernel);
382 isl_union_set_free(domain);
383
384 return is_sync;
385 }
386
387 /* Is "node" part of a sequence with a previous synchronization statement
388 * for "kernel"?
389 * That is, is the parent of "node" a filter such that there is
390 * a previous filter that picks out exactly such a synchronization statement?
391 */
has_preceding_sync(__isl_keep isl_schedule_node * node,struct ppcg_kernel * kernel)392 static int has_preceding_sync(__isl_keep isl_schedule_node *node,
393 struct ppcg_kernel *kernel)
394 {
395 int found = 0;
396
397 node = isl_schedule_node_copy(node);
398 node = isl_schedule_node_parent(node);
399 while (!found && isl_schedule_node_has_previous_sibling(node)) {
400 node = isl_schedule_node_previous_sibling(node);
401 if (!node)
402 break;
403 found = node_is_sync_filter(node, kernel);
404 }
405 if (!node)
406 found = -1;
407 isl_schedule_node_free(node);
408
409 return found;
410 }
411
412 /* Is "node" part of a sequence with a subsequent synchronization statement
413 * for "kernel"?
414 * That is, is the parent of "node" a filter such that there is
415 * a subsequent filter that picks out exactly such a synchronization statement?
416 */
has_following_sync(__isl_keep isl_schedule_node * node,struct ppcg_kernel * kernel)417 static int has_following_sync(__isl_keep isl_schedule_node *node,
418 struct ppcg_kernel *kernel)
419 {
420 int found = 0;
421
422 node = isl_schedule_node_copy(node);
423 node = isl_schedule_node_parent(node);
424 while (!found && isl_schedule_node_has_next_sibling(node)) {
425 node = isl_schedule_node_next_sibling(node);
426 if (!node)
427 break;
428 found = node_is_sync_filter(node, kernel);
429 }
430 if (!node)
431 found = -1;
432 isl_schedule_node_free(node);
433
434 return found;
435 }
436
437 /* Does the subtree rooted at "node" (which is a band node) contain
438 * any synchronization statement for "kernel" that precedes
439 * the core computation of "kernel" (identified by the elements
440 * in kernel->core)?
441 */
has_sync_before_core(__isl_keep isl_schedule_node * node,struct ppcg_kernel * kernel)442 static int has_sync_before_core(__isl_keep isl_schedule_node *node,
443 struct ppcg_kernel *kernel)
444 {
445 int has_sync = 0;
446 int is_thread;
447
448 node = isl_schedule_node_copy(node);
449 while ((is_thread = node_is_thread(node)) == 0) {
450 node = core_child(node, kernel->core);
451 has_sync = has_preceding_sync(node, kernel);
452 if (has_sync < 0 || has_sync)
453 break;
454 }
455 if (is_thread < 0 || !node)
456 has_sync = -1;
457 isl_schedule_node_free(node);
458
459 return has_sync;
460 }
461
462 /* Does the subtree rooted at "node" (which is a band node) contain
463 * any synchronization statement for "kernel" that follows
464 * the core computation of "kernel" (identified by the elements
465 * in kernel->core)?
466 */
has_sync_after_core(__isl_keep isl_schedule_node * node,struct ppcg_kernel * kernel)467 static int has_sync_after_core(__isl_keep isl_schedule_node *node,
468 struct ppcg_kernel *kernel)
469 {
470 int has_sync = 0;
471 int is_thread;
472
473 node = isl_schedule_node_copy(node);
474 while ((is_thread = node_is_thread(node)) == 0) {
475 node = core_child(node, kernel->core);
476 has_sync = has_following_sync(node, kernel);
477 if (has_sync < 0 || has_sync)
478 break;
479 }
480 if (is_thread < 0 || !node)
481 has_sync = -1;
482 isl_schedule_node_free(node);
483
484 return has_sync;
485 }
486
487 /* Insert (or extend) an extension on top of "node" that puts
488 * a synchronization node for "kernel" before "node".
489 * Return a pointer to the original node in the updated schedule tree.
490 */
insert_sync_before(__isl_take isl_schedule_node * node,struct ppcg_kernel * kernel)491 static __isl_give isl_schedule_node *insert_sync_before(
492 __isl_take isl_schedule_node *node, struct ppcg_kernel *kernel)
493 {
494 isl_union_set *domain;
495 isl_schedule_node *graft;
496
497 if (!node)
498 return NULL;
499
500 domain = create_sync_domain(kernel);
501 graft = isl_schedule_node_from_domain(domain);
502 node = isl_schedule_node_graft_before(node, graft);
503
504 return node;
505 }
506
507 /* Insert (or extend) an extension on top of "node" that puts
508 * a synchronization node for "kernel" afater "node".
509 * Return a pointer to the original node in the updated schedule tree.
510 */
insert_sync_after(__isl_take isl_schedule_node * node,struct ppcg_kernel * kernel)511 static __isl_give isl_schedule_node *insert_sync_after(
512 __isl_take isl_schedule_node *node, struct ppcg_kernel *kernel)
513 {
514 isl_union_set *domain;
515 isl_schedule_node *graft;
516
517 if (!node)
518 return NULL;
519
520 domain = create_sync_domain(kernel);
521 graft = isl_schedule_node_from_domain(domain);
522 node = isl_schedule_node_graft_after(node, graft);
523
524 return node;
525 }
526
527 /* Insert an extension on top of "node" that puts a synchronization node
528 * for "kernel" before "node" unless there already is
529 * such a synchronization node.
530 */
gpu_tree_ensure_preceding_sync(__isl_take isl_schedule_node * node,struct ppcg_kernel * kernel)531 __isl_give isl_schedule_node *gpu_tree_ensure_preceding_sync(
532 __isl_take isl_schedule_node *node, struct ppcg_kernel *kernel)
533 {
534 int has_sync;
535
536 has_sync = has_preceding_sync(node, kernel);
537 if (has_sync < 0)
538 return isl_schedule_node_free(node);
539 if (has_sync)
540 return node;
541 return insert_sync_before(node, kernel);
542 }
543
544 /* Insert an extension on top of "node" that puts a synchronization node
545 * for "kernel" after "node" unless there already is
546 * such a synchronization node.
547 */
gpu_tree_ensure_following_sync(__isl_take isl_schedule_node * node,struct ppcg_kernel * kernel)548 __isl_give isl_schedule_node *gpu_tree_ensure_following_sync(
549 __isl_take isl_schedule_node *node, struct ppcg_kernel *kernel)
550 {
551 int has_sync;
552
553 has_sync = has_following_sync(node, kernel);
554 if (has_sync < 0)
555 return isl_schedule_node_free(node);
556 if (has_sync)
557 return node;
558 return insert_sync_after(node, kernel);
559 }
560
561 /* Insert an extension on top of "node" that puts a synchronization node
562 * for "kernel" after "node" unless there already is such a sync node or
563 * "node" itself already * contains a synchronization node following
564 * the core computation of "kernel".
565 */
gpu_tree_ensure_sync_after_core(__isl_take isl_schedule_node * node,struct ppcg_kernel * kernel)566 __isl_give isl_schedule_node *gpu_tree_ensure_sync_after_core(
567 __isl_take isl_schedule_node *node, struct ppcg_kernel *kernel)
568 {
569 int has_sync;
570
571 has_sync = has_sync_after_core(node, kernel);
572 if (has_sync < 0)
573 return isl_schedule_node_free(node);
574 if (has_sync)
575 return node;
576 has_sync = has_following_sync(node, kernel);
577 if (has_sync < 0)
578 return isl_schedule_node_free(node);
579 if (has_sync)
580 return node;
581 return insert_sync_after(node, kernel);
582 }
583
584 /* Move left in the sequence on top of "node" to a synchronization node
585 * for "kernel".
586 * If "node" itself contains a synchronization node preceding
587 * the core computation of "kernel", then return "node" itself.
588 * Otherwise, if "node" does not have a preceding synchronization node,
589 * then create one first.
590 */
gpu_tree_move_left_to_sync(__isl_take isl_schedule_node * node,struct ppcg_kernel * kernel)591 __isl_give isl_schedule_node *gpu_tree_move_left_to_sync(
592 __isl_take isl_schedule_node *node, struct ppcg_kernel *kernel)
593 {
594 int has_sync;
595 int is_sync;
596
597 has_sync = has_sync_before_core(node, kernel);
598 if (has_sync < 0)
599 return isl_schedule_node_free(node);
600 if (has_sync)
601 return node;
602 node = gpu_tree_ensure_preceding_sync(node, kernel);
603 node = isl_schedule_node_parent(node);
604 while ((is_sync = node_is_sync_filter(node, kernel)) == 0)
605 node = isl_schedule_node_previous_sibling(node);
606 if (is_sync < 0)
607 node = isl_schedule_node_free(node);
608 node = isl_schedule_node_child(node, 0);
609
610 return node;
611 }
612
613 /* Move right in the sequence on top of "node" to a synchronization node
614 * for "kernel".
615 * If "node" itself contains a synchronization node following
616 * the core computation of "kernel", then return "node" itself.
617 * Otherwise, if "node" does not have a following synchronization node,
618 * then create one first.
619 */
gpu_tree_move_right_to_sync(__isl_take isl_schedule_node * node,struct ppcg_kernel * kernel)620 __isl_give isl_schedule_node *gpu_tree_move_right_to_sync(
621 __isl_take isl_schedule_node *node, struct ppcg_kernel *kernel)
622 {
623 int has_sync;
624 int is_sync;
625
626 has_sync = has_sync_after_core(node, kernel);
627 if (has_sync < 0)
628 return isl_schedule_node_free(node);
629 if (has_sync)
630 return node;
631 node = gpu_tree_ensure_following_sync(node, kernel);
632 node = isl_schedule_node_parent(node);
633 while ((is_sync = node_is_sync_filter(node, kernel)) == 0)
634 node = isl_schedule_node_next_sibling(node);
635 if (is_sync < 0)
636 node = isl_schedule_node_free(node);
637 node = isl_schedule_node_child(node, 0);
638
639 return node;
640 }
641