1 | /* |
2 | * Copyright 2013-2014 Ecole Normale Superieure |
3 | * Copyright 2014 INRIA Rocquencourt |
4 | * Copyright 2016 Sven Verdoolaege |
5 | * |
6 | * Use of this software is governed by the MIT license |
7 | * |
8 | * Written by Sven Verdoolaege, |
9 | * Ecole Normale Superieure, 45 rue d'Ulm, 75230 Paris, France |
10 | * and Inria Paris - Rocquencourt, Domaine de Voluceau - Rocquencourt, |
11 | * B.P. 105 - 78153 Le Chesnay, France |
12 | */ |
13 | |
14 | #include <isl/id.h> |
15 | #include <isl/val.h> |
16 | #include <isl/space.h> |
17 | #include <isl/set.h> |
18 | #include <isl_schedule_band.h> |
19 | #include <isl_schedule_private.h> |
20 | #include <isl_schedule_node_private.h> |
21 | |
22 | /* Create a new schedule node in the given schedule, point at the given |
23 | * tree with given ancestors and child positions. |
24 | * "child_pos" may be NULL if there are no ancestors. |
25 | */ |
26 | __isl_give isl_schedule_node *isl_schedule_node_alloc( |
27 | __isl_take isl_schedule *schedule, __isl_take isl_schedule_tree *tree, |
28 | __isl_take isl_schedule_tree_list *ancestors, int *child_pos) |
29 | { |
30 | isl_ctx *ctx; |
31 | isl_schedule_node *node; |
32 | int i; |
33 | isl_size n; |
34 | |
35 | n = isl_schedule_tree_list_n_schedule_tree(list: ancestors); |
36 | if (!schedule || !tree || n < 0) |
37 | goto error; |
38 | if (n > 0 && !child_pos) |
39 | goto error; |
40 | ctx = isl_schedule_get_ctx(sched: schedule); |
41 | node = isl_calloc_type(ctx, isl_schedule_node); |
42 | if (!node) |
43 | goto error; |
44 | node->ref = 1; |
45 | node->schedule = schedule; |
46 | node->tree = tree; |
47 | node->ancestors = ancestors; |
48 | node->child_pos = isl_alloc_array(ctx, int, n); |
49 | if (n && !node->child_pos) |
50 | return isl_schedule_node_free(node); |
51 | for (i = 0; i < n; ++i) |
52 | node->child_pos[i] = child_pos[i]; |
53 | |
54 | return node; |
55 | error: |
56 | isl_schedule_free(sched: schedule); |
57 | isl_schedule_tree_free(tree); |
58 | isl_schedule_tree_list_free(list: ancestors); |
59 | return NULL; |
60 | } |
61 | |
62 | /* Return a pointer to the root of a schedule tree with as single |
63 | * node a domain node with the given domain. |
64 | */ |
65 | __isl_give isl_schedule_node *isl_schedule_node_from_domain( |
66 | __isl_take isl_union_set *domain) |
67 | { |
68 | isl_schedule *schedule; |
69 | isl_schedule_node *node; |
70 | |
71 | schedule = isl_schedule_from_domain(domain); |
72 | node = isl_schedule_get_root(schedule); |
73 | isl_schedule_free(sched: schedule); |
74 | |
75 | return node; |
76 | } |
77 | |
78 | /* Return a pointer to the root of a schedule tree with as single |
79 | * node a extension node with the given extension. |
80 | */ |
81 | __isl_give isl_schedule_node *isl_schedule_node_from_extension( |
82 | __isl_take isl_union_map *extension) |
83 | { |
84 | isl_ctx *ctx; |
85 | isl_schedule *schedule; |
86 | isl_schedule_tree *tree; |
87 | isl_schedule_node *node; |
88 | |
89 | if (!extension) |
90 | return NULL; |
91 | |
92 | ctx = isl_union_map_get_ctx(umap: extension); |
93 | tree = isl_schedule_tree_from_extension(extension); |
94 | schedule = isl_schedule_from_schedule_tree(ctx, tree); |
95 | node = isl_schedule_get_root(schedule); |
96 | isl_schedule_free(sched: schedule); |
97 | |
98 | return node; |
99 | } |
100 | |
101 | /* Return the isl_ctx to which "node" belongs. |
102 | */ |
103 | isl_ctx *isl_schedule_node_get_ctx(__isl_keep isl_schedule_node *node) |
104 | { |
105 | return node ? isl_schedule_get_ctx(sched: node->schedule) : NULL; |
106 | } |
107 | |
108 | /* Return a pointer to the leaf of the schedule into which "node" points. |
109 | */ |
110 | __isl_keep isl_schedule_tree *isl_schedule_node_peek_leaf( |
111 | __isl_keep isl_schedule_node *node) |
112 | { |
113 | return node ? isl_schedule_peek_leaf(schedule: node->schedule) : NULL; |
114 | } |
115 | |
116 | /* Return a copy of the leaf of the schedule into which "node" points. |
117 | */ |
118 | __isl_give isl_schedule_tree *isl_schedule_node_get_leaf( |
119 | __isl_keep isl_schedule_node *node) |
120 | { |
121 | return isl_schedule_tree_copy(tree: isl_schedule_node_peek_leaf(node)); |
122 | } |
123 | |
124 | /* Return the type of the node or isl_schedule_node_error on error. |
125 | */ |
126 | enum isl_schedule_node_type isl_schedule_node_get_type( |
127 | __isl_keep isl_schedule_node *node) |
128 | { |
129 | return node ? isl_schedule_tree_get_type(tree: node->tree) |
130 | : isl_schedule_node_error; |
131 | } |
132 | |
133 | /* Return the type of the parent of "node" or isl_schedule_node_error on error. |
134 | */ |
135 | enum isl_schedule_node_type isl_schedule_node_get_parent_type( |
136 | __isl_keep isl_schedule_node *node) |
137 | { |
138 | isl_size n; |
139 | int pos; |
140 | int has_parent; |
141 | isl_schedule_tree *parent; |
142 | enum isl_schedule_node_type type; |
143 | |
144 | if (!node) |
145 | return isl_schedule_node_error; |
146 | has_parent = isl_schedule_node_has_parent(node); |
147 | if (has_parent < 0) |
148 | return isl_schedule_node_error; |
149 | if (!has_parent) |
150 | isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid, |
151 | "node has no parent" , return isl_schedule_node_error); |
152 | n = isl_schedule_tree_list_n_schedule_tree(list: node->ancestors); |
153 | if (n < 0) |
154 | return isl_schedule_node_error; |
155 | |
156 | pos = n - 1; |
157 | parent = isl_schedule_tree_list_get_schedule_tree(list: node->ancestors, index: pos); |
158 | type = isl_schedule_tree_get_type(tree: parent); |
159 | isl_schedule_tree_free(tree: parent); |
160 | |
161 | return type; |
162 | } |
163 | |
164 | /* Return a copy of the subtree that this node points to. |
165 | */ |
166 | __isl_give isl_schedule_tree *isl_schedule_node_get_tree( |
167 | __isl_keep isl_schedule_node *node) |
168 | { |
169 | if (!node) |
170 | return NULL; |
171 | |
172 | return isl_schedule_tree_copy(tree: node->tree); |
173 | } |
174 | |
175 | /* Return a copy of the schedule into which "node" points. |
176 | */ |
177 | __isl_give isl_schedule *isl_schedule_node_get_schedule( |
178 | __isl_keep isl_schedule_node *node) |
179 | { |
180 | if (!node) |
181 | return NULL; |
182 | return isl_schedule_copy(sched: node->schedule); |
183 | } |
184 | |
185 | /* Return a fresh copy of "node". |
186 | */ |
187 | __isl_give isl_schedule_node *isl_schedule_node_dup( |
188 | __isl_keep isl_schedule_node *node) |
189 | { |
190 | if (!node) |
191 | return NULL; |
192 | |
193 | return isl_schedule_node_alloc(schedule: isl_schedule_copy(sched: node->schedule), |
194 | tree: isl_schedule_tree_copy(tree: node->tree), |
195 | ancestors: isl_schedule_tree_list_copy(list: node->ancestors), |
196 | child_pos: node->child_pos); |
197 | } |
198 | |
199 | /* Return an isl_schedule_node that is equal to "node" and that has only |
200 | * a single reference. |
201 | */ |
202 | __isl_give isl_schedule_node *isl_schedule_node_cow( |
203 | __isl_take isl_schedule_node *node) |
204 | { |
205 | if (!node) |
206 | return NULL; |
207 | |
208 | if (node->ref == 1) |
209 | return node; |
210 | node->ref--; |
211 | return isl_schedule_node_dup(node); |
212 | } |
213 | |
214 | /* Return a new reference to "node". |
215 | */ |
216 | __isl_give isl_schedule_node *isl_schedule_node_copy( |
217 | __isl_keep isl_schedule_node *node) |
218 | { |
219 | if (!node) |
220 | return NULL; |
221 | |
222 | node->ref++; |
223 | return node; |
224 | } |
225 | |
226 | /* Free "node" and return NULL. |
227 | */ |
228 | __isl_null isl_schedule_node *isl_schedule_node_free( |
229 | __isl_take isl_schedule_node *node) |
230 | { |
231 | if (!node) |
232 | return NULL; |
233 | if (--node->ref > 0) |
234 | return NULL; |
235 | |
236 | isl_schedule_tree_list_free(list: node->ancestors); |
237 | free(ptr: node->child_pos); |
238 | isl_schedule_tree_free(tree: node->tree); |
239 | isl_schedule_free(sched: node->schedule); |
240 | free(ptr: node); |
241 | |
242 | return NULL; |
243 | } |
244 | |
245 | /* Do "node1" and "node2" point to the same position in the same |
246 | * schedule? |
247 | */ |
248 | isl_bool isl_schedule_node_is_equal(__isl_keep isl_schedule_node *node1, |
249 | __isl_keep isl_schedule_node *node2) |
250 | { |
251 | int i; |
252 | isl_size n1, n2; |
253 | |
254 | if (!node1 || !node2) |
255 | return isl_bool_error; |
256 | if (node1 == node2) |
257 | return isl_bool_true; |
258 | if (node1->schedule != node2->schedule) |
259 | return isl_bool_false; |
260 | |
261 | n1 = isl_schedule_node_get_tree_depth(node: node1); |
262 | n2 = isl_schedule_node_get_tree_depth(node: node2); |
263 | if (n1 < 0 || n2 < 0) |
264 | return isl_bool_error; |
265 | if (n1 != n2) |
266 | return isl_bool_false; |
267 | for (i = 0; i < n1; ++i) |
268 | if (node1->child_pos[i] != node2->child_pos[i]) |
269 | return isl_bool_false; |
270 | |
271 | return isl_bool_true; |
272 | } |
273 | |
274 | /* Return the number of outer schedule dimensions of "node" |
275 | * in its schedule tree. |
276 | * |
277 | * Return isl_size_error on error. |
278 | */ |
279 | isl_size isl_schedule_node_get_schedule_depth( |
280 | __isl_keep isl_schedule_node *node) |
281 | { |
282 | int i; |
283 | isl_size n; |
284 | int depth = 0; |
285 | |
286 | if (!node) |
287 | return isl_size_error; |
288 | |
289 | n = isl_schedule_tree_list_n_schedule_tree(list: node->ancestors); |
290 | if (n < 0) |
291 | return isl_size_error; |
292 | for (i = n - 1; i >= 0; --i) { |
293 | isl_schedule_tree *tree; |
294 | isl_size n; |
295 | |
296 | tree = isl_schedule_tree_list_get_schedule_tree( |
297 | list: node->ancestors, index: i); |
298 | if (!tree) |
299 | return isl_size_error; |
300 | n = 0; |
301 | if (tree->type == isl_schedule_node_band) |
302 | n = isl_schedule_tree_band_n_member(tree); |
303 | depth += n; |
304 | isl_schedule_tree_free(tree); |
305 | if (n < 0) |
306 | return isl_size_error; |
307 | } |
308 | |
309 | return depth; |
310 | } |
311 | |
312 | /* Internal data structure for |
313 | * isl_schedule_node_get_prefix_schedule_union_pw_multi_aff |
314 | * |
315 | * "initialized" is set if the filter field has been initialized. |
316 | * If "universe_domain" is not set, then the collected filter is intersected |
317 | * with the domain of the root domain node. |
318 | * "universe_filter" is set if we are only collecting the universes of filters |
319 | * "collect_prefix" is set if we are collecting prefixes. |
320 | * "filter" collects all outer filters and is NULL until "initialized" is set. |
321 | * "prefix" collects all outer band partial schedules (if "collect_prefix" |
322 | * is set). If it is used, then it is initialized by the caller |
323 | * of collect_filter_prefix to a zero-dimensional function. |
324 | */ |
325 | struct isl_schedule_node_get_filter_prefix_data { |
326 | int initialized; |
327 | int universe_domain; |
328 | int universe_filter; |
329 | int collect_prefix; |
330 | isl_union_set *filter; |
331 | isl_multi_union_pw_aff *prefix; |
332 | }; |
333 | |
334 | static isl_stat collect_filter_prefix(__isl_keep isl_schedule_tree_list *list, |
335 | int n, struct isl_schedule_node_get_filter_prefix_data *data); |
336 | |
337 | /* Update the filter and prefix information in "data" based on the first "n" |
338 | * elements in "list" and the expansion tree root "tree". |
339 | * |
340 | * We first collect the information from the elements in "list", |
341 | * initializing the filter based on the domain of the expansion. |
342 | * Then we map the results to the expanded space and combined them |
343 | * with the results already in "data". |
344 | */ |
345 | static isl_stat collect_filter_prefix_expansion( |
346 | __isl_take isl_schedule_tree *tree, |
347 | __isl_keep isl_schedule_tree_list *list, int n, |
348 | struct isl_schedule_node_get_filter_prefix_data *data) |
349 | { |
350 | struct isl_schedule_node_get_filter_prefix_data contracted; |
351 | isl_union_pw_multi_aff *c; |
352 | isl_union_map *exp, *universe; |
353 | isl_union_set *filter; |
354 | |
355 | c = isl_schedule_tree_expansion_get_contraction(tree); |
356 | exp = isl_schedule_tree_expansion_get_expansion(tree); |
357 | |
358 | contracted.initialized = 1; |
359 | contracted.universe_domain = data->universe_domain; |
360 | contracted.universe_filter = data->universe_filter; |
361 | contracted.collect_prefix = data->collect_prefix; |
362 | universe = isl_union_map_universe(umap: isl_union_map_copy(umap: exp)); |
363 | filter = isl_union_map_domain(umap: universe); |
364 | if (data->collect_prefix) { |
365 | isl_space *space = isl_union_set_get_space(uset: filter); |
366 | space = isl_space_set_from_params(space); |
367 | contracted.prefix = isl_multi_union_pw_aff_zero(space); |
368 | } |
369 | contracted.filter = filter; |
370 | |
371 | if (collect_filter_prefix(list, n, data: &contracted) < 0) |
372 | contracted.filter = isl_union_set_free(uset: contracted.filter); |
373 | if (data->collect_prefix) { |
374 | isl_multi_union_pw_aff *prefix; |
375 | |
376 | prefix = contracted.prefix; |
377 | prefix = |
378 | isl_multi_union_pw_aff_pullback_union_pw_multi_aff(mupa: prefix, |
379 | upma: isl_union_pw_multi_aff_copy(upma: c)); |
380 | data->prefix = isl_multi_union_pw_aff_flat_range_product( |
381 | multi1: prefix, multi2: data->prefix); |
382 | } |
383 | filter = contracted.filter; |
384 | if (data->universe_domain) |
385 | filter = isl_union_set_preimage_union_pw_multi_aff(uset: filter, |
386 | upma: isl_union_pw_multi_aff_copy(upma: c)); |
387 | else |
388 | filter = isl_union_set_apply(uset: filter, umap: isl_union_map_copy(umap: exp)); |
389 | if (!data->initialized) |
390 | data->filter = filter; |
391 | else |
392 | data->filter = isl_union_set_intersect(uset1: filter, uset2: data->filter); |
393 | data->initialized = 1; |
394 | |
395 | isl_union_pw_multi_aff_free(upma: c); |
396 | isl_union_map_free(umap: exp); |
397 | isl_schedule_tree_free(tree); |
398 | |
399 | return isl_stat_ok; |
400 | } |
401 | |
402 | /* Update the filter information in "data" based on the first "n" |
403 | * elements in "list" and the extension tree root "tree", in case |
404 | * data->universe_domain is set and data->collect_prefix is not. |
405 | * |
406 | * We collect the universe domain of the elements in "list" and |
407 | * add it to the universe range of the extension (intersected |
408 | * with the already collected filter, if any). |
409 | */ |
410 | static isl_stat collect_universe_domain_extension( |
411 | __isl_take isl_schedule_tree *tree, |
412 | __isl_keep isl_schedule_tree_list *list, int n, |
413 | struct isl_schedule_node_get_filter_prefix_data *data) |
414 | { |
415 | struct isl_schedule_node_get_filter_prefix_data data_outer; |
416 | isl_union_map *extension; |
417 | isl_union_set *filter; |
418 | |
419 | data_outer.initialized = 0; |
420 | data_outer.universe_domain = 1; |
421 | data_outer.universe_filter = data->universe_filter; |
422 | data_outer.collect_prefix = 0; |
423 | data_outer.filter = NULL; |
424 | data_outer.prefix = NULL; |
425 | |
426 | if (collect_filter_prefix(list, n, data: &data_outer) < 0) |
427 | data_outer.filter = isl_union_set_free(uset: data_outer.filter); |
428 | |
429 | extension = isl_schedule_tree_extension_get_extension(tree); |
430 | extension = isl_union_map_universe(umap: extension); |
431 | filter = isl_union_map_range(umap: extension); |
432 | if (data_outer.initialized) |
433 | filter = isl_union_set_union(uset1: filter, uset2: data_outer.filter); |
434 | if (data->initialized) |
435 | filter = isl_union_set_intersect(uset1: filter, uset2: data->filter); |
436 | |
437 | data->filter = filter; |
438 | |
439 | isl_schedule_tree_free(tree); |
440 | |
441 | return isl_stat_ok; |
442 | } |
443 | |
444 | /* Update "data" based on the tree node "tree" in case "data" has |
445 | * not been initialized yet. |
446 | * |
447 | * Return 0 on success and -1 on error. |
448 | * |
449 | * If "tree" is a filter, then we set data->filter to this filter |
450 | * (or its universe). |
451 | * If "tree" is a domain, then this means we have reached the root |
452 | * of the schedule tree without being able to extract any information. |
453 | * We therefore initialize data->filter to the universe of the domain, |
454 | * or the domain itself if data->universe_domain is not set. |
455 | * If "tree" is a band with at least one member, then we set data->filter |
456 | * to the universe of the schedule domain and replace the zero-dimensional |
457 | * data->prefix by the band schedule (if data->collect_prefix is set). |
458 | */ |
459 | static isl_stat collect_filter_prefix_init(__isl_keep isl_schedule_tree *tree, |
460 | struct isl_schedule_node_get_filter_prefix_data *data) |
461 | { |
462 | enum isl_schedule_node_type type; |
463 | isl_multi_union_pw_aff *mupa; |
464 | isl_union_set *filter; |
465 | isl_size n; |
466 | |
467 | type = isl_schedule_tree_get_type(tree); |
468 | switch (type) { |
469 | case isl_schedule_node_error: |
470 | return isl_stat_error; |
471 | case isl_schedule_node_expansion: |
472 | isl_die(isl_schedule_tree_get_ctx(tree), isl_error_internal, |
473 | "should be handled by caller" , return isl_stat_error); |
474 | case isl_schedule_node_extension: |
475 | isl_die(isl_schedule_tree_get_ctx(tree), isl_error_invalid, |
476 | "cannot handle extension nodes" , return isl_stat_error); |
477 | case isl_schedule_node_context: |
478 | case isl_schedule_node_leaf: |
479 | case isl_schedule_node_guard: |
480 | case isl_schedule_node_mark: |
481 | case isl_schedule_node_sequence: |
482 | case isl_schedule_node_set: |
483 | return isl_stat_ok; |
484 | case isl_schedule_node_domain: |
485 | filter = isl_schedule_tree_domain_get_domain(tree); |
486 | if (data->universe_domain) |
487 | filter = isl_union_set_universe(uset: filter); |
488 | data->filter = filter; |
489 | break; |
490 | case isl_schedule_node_band: |
491 | n = isl_schedule_tree_band_n_member(tree); |
492 | if (n < 0) |
493 | return isl_stat_error; |
494 | if (n == 0) |
495 | return isl_stat_ok; |
496 | mupa = isl_schedule_tree_band_get_partial_schedule(tree); |
497 | if (data->collect_prefix) { |
498 | isl_multi_union_pw_aff_free(multi: data->prefix); |
499 | mupa = isl_multi_union_pw_aff_reset_tuple_id(multi: mupa, |
500 | type: isl_dim_set); |
501 | data->prefix = isl_multi_union_pw_aff_copy(multi: mupa); |
502 | } |
503 | filter = isl_multi_union_pw_aff_domain(mupa); |
504 | filter = isl_union_set_universe(uset: filter); |
505 | data->filter = filter; |
506 | break; |
507 | case isl_schedule_node_filter: |
508 | filter = isl_schedule_tree_filter_get_filter(tree); |
509 | if (data->universe_filter) |
510 | filter = isl_union_set_universe(uset: filter); |
511 | data->filter = filter; |
512 | break; |
513 | } |
514 | |
515 | if ((data->collect_prefix && !data->prefix) || !data->filter) |
516 | return isl_stat_error; |
517 | |
518 | data->initialized = 1; |
519 | |
520 | return isl_stat_ok; |
521 | } |
522 | |
523 | /* Update "data" based on the tree node "tree" in case "data" has |
524 | * already been initialized. |
525 | * |
526 | * Return 0 on success and -1 on error. |
527 | * |
528 | * If "tree" is a domain and data->universe_domain is not set, then |
529 | * intersect data->filter with the domain. |
530 | * If "tree" is a filter, then we intersect data->filter with this filter |
531 | * (or its universe). |
532 | * If "tree" is a band with at least one member and data->collect_prefix |
533 | * is set, then we extend data->prefix with the band schedule. |
534 | * If "tree" is an extension, then we make sure that we are not collecting |
535 | * information on any extended domain elements. |
536 | */ |
537 | static isl_stat collect_filter_prefix_update(__isl_keep isl_schedule_tree *tree, |
538 | struct isl_schedule_node_get_filter_prefix_data *data) |
539 | { |
540 | enum isl_schedule_node_type type; |
541 | isl_multi_union_pw_aff *mupa; |
542 | isl_union_set *filter; |
543 | isl_union_map *extension; |
544 | isl_bool empty; |
545 | isl_size n; |
546 | |
547 | type = isl_schedule_tree_get_type(tree); |
548 | switch (type) { |
549 | case isl_schedule_node_error: |
550 | return isl_stat_error; |
551 | case isl_schedule_node_expansion: |
552 | isl_die(isl_schedule_tree_get_ctx(tree), isl_error_internal, |
553 | "should be handled by caller" , return isl_stat_error); |
554 | case isl_schedule_node_extension: |
555 | extension = isl_schedule_tree_extension_get_extension(tree); |
556 | extension = isl_union_map_intersect_range(umap: extension, |
557 | uset: isl_union_set_copy(uset: data->filter)); |
558 | empty = isl_union_map_is_empty(umap: extension); |
559 | isl_union_map_free(umap: extension); |
560 | if (empty < 0) |
561 | return isl_stat_error; |
562 | if (empty) |
563 | break; |
564 | isl_die(isl_schedule_tree_get_ctx(tree), isl_error_invalid, |
565 | "cannot handle extension nodes" , return isl_stat_error); |
566 | case isl_schedule_node_context: |
567 | case isl_schedule_node_leaf: |
568 | case isl_schedule_node_guard: |
569 | case isl_schedule_node_mark: |
570 | case isl_schedule_node_sequence: |
571 | case isl_schedule_node_set: |
572 | break; |
573 | case isl_schedule_node_domain: |
574 | if (data->universe_domain) |
575 | break; |
576 | filter = isl_schedule_tree_domain_get_domain(tree); |
577 | data->filter = isl_union_set_intersect(uset1: data->filter, uset2: filter); |
578 | break; |
579 | case isl_schedule_node_band: |
580 | n = isl_schedule_tree_band_n_member(tree); |
581 | if (n < 0) |
582 | return isl_stat_error; |
583 | if (n == 0) |
584 | break; |
585 | if (!data->collect_prefix) |
586 | break; |
587 | mupa = isl_schedule_tree_band_get_partial_schedule(tree); |
588 | data->prefix = isl_multi_union_pw_aff_flat_range_product(multi1: mupa, |
589 | multi2: data->prefix); |
590 | if (!data->prefix) |
591 | return isl_stat_error; |
592 | break; |
593 | case isl_schedule_node_filter: |
594 | filter = isl_schedule_tree_filter_get_filter(tree); |
595 | if (data->universe_filter) |
596 | filter = isl_union_set_universe(uset: filter); |
597 | data->filter = isl_union_set_intersect(uset1: data->filter, uset2: filter); |
598 | if (!data->filter) |
599 | return isl_stat_error; |
600 | break; |
601 | } |
602 | |
603 | return isl_stat_ok; |
604 | } |
605 | |
606 | /* Collect filter and/or prefix information from the first "n" |
607 | * elements in "list" (which represent the ancestors of a node). |
608 | * Store the results in "data". |
609 | * |
610 | * Extension nodes are only supported if they do not affect the outcome, |
611 | * i.e., if we are collecting information on non-extended domain elements, |
612 | * or if we are collecting the universe domain (without prefix). |
613 | * |
614 | * Return 0 on success and -1 on error. |
615 | * |
616 | * We traverse the list from innermost ancestor (last element) |
617 | * to outermost ancestor (first element), calling collect_filter_prefix_init |
618 | * on each node as long as we have not been able to extract any information |
619 | * yet and collect_filter_prefix_update afterwards. |
620 | * If we come across an expansion node, then we interrupt the traversal |
621 | * and call collect_filter_prefix_expansion to restart the traversal |
622 | * over the remaining ancestors and to combine the results with those |
623 | * that have already been collected. |
624 | * If we come across an extension node and we are only computing |
625 | * the universe domain, then we interrupt the traversal and call |
626 | * collect_universe_domain_extension to restart the traversal |
627 | * over the remaining ancestors and to combine the results with those |
628 | * that have already been collected. |
629 | * On successful return, data->initialized will be set since the outermost |
630 | * ancestor is a domain node, which always results in an initialization. |
631 | */ |
632 | static isl_stat collect_filter_prefix(__isl_keep isl_schedule_tree_list *list, |
633 | int n, struct isl_schedule_node_get_filter_prefix_data *data) |
634 | { |
635 | int i; |
636 | |
637 | if (!list) |
638 | return isl_stat_error; |
639 | |
640 | for (i = n - 1; i >= 0; --i) { |
641 | isl_schedule_tree *tree; |
642 | enum isl_schedule_node_type type; |
643 | isl_stat r; |
644 | |
645 | tree = isl_schedule_tree_list_get_schedule_tree(list, index: i); |
646 | if (!tree) |
647 | return isl_stat_error; |
648 | type = isl_schedule_tree_get_type(tree); |
649 | if (type == isl_schedule_node_expansion) |
650 | return collect_filter_prefix_expansion(tree, list, n: i, |
651 | data); |
652 | if (type == isl_schedule_node_extension && |
653 | data->universe_domain && !data->collect_prefix) |
654 | return collect_universe_domain_extension(tree, list, n: i, |
655 | data); |
656 | if (!data->initialized) |
657 | r = collect_filter_prefix_init(tree, data); |
658 | else |
659 | r = collect_filter_prefix_update(tree, data); |
660 | isl_schedule_tree_free(tree); |
661 | if (r < 0) |
662 | return isl_stat_error; |
663 | } |
664 | |
665 | return isl_stat_ok; |
666 | } |
667 | |
668 | /* Return the concatenation of the partial schedules of all outer band |
669 | * nodes of "node" interesected with all outer filters |
670 | * as an isl_multi_union_pw_aff. |
671 | * None of the ancestors of "node" may be an extension node, unless |
672 | * there is also a filter ancestor that filters out all the extended |
673 | * domain elements. |
674 | * |
675 | * If "node" is pointing at the root of the schedule tree, then |
676 | * there are no domain elements reaching the current node, so |
677 | * we return an empty result. |
678 | * |
679 | * We collect all the filters and partial schedules in collect_filter_prefix |
680 | * and intersect the domain of the combined schedule with the combined filter. |
681 | */ |
682 | __isl_give isl_multi_union_pw_aff * |
683 | isl_schedule_node_get_prefix_schedule_multi_union_pw_aff( |
684 | __isl_keep isl_schedule_node *node) |
685 | { |
686 | isl_size n; |
687 | isl_space *space; |
688 | struct isl_schedule_node_get_filter_prefix_data data; |
689 | |
690 | if (!node) |
691 | return NULL; |
692 | |
693 | space = isl_schedule_get_space(schedule: node->schedule); |
694 | space = isl_space_set_from_params(space); |
695 | if (node->tree == node->schedule->root) |
696 | return isl_multi_union_pw_aff_zero(space); |
697 | |
698 | data.initialized = 0; |
699 | data.universe_domain = 1; |
700 | data.universe_filter = 0; |
701 | data.collect_prefix = 1; |
702 | data.filter = NULL; |
703 | data.prefix = isl_multi_union_pw_aff_zero(space); |
704 | |
705 | n = isl_schedule_tree_list_n_schedule_tree(list: node->ancestors); |
706 | if (n < 0 || collect_filter_prefix(list: node->ancestors, n, data: &data) < 0) |
707 | data.prefix = isl_multi_union_pw_aff_free(multi: data.prefix); |
708 | |
709 | data.prefix = isl_multi_union_pw_aff_intersect_domain(mupa: data.prefix, |
710 | uset: data.filter); |
711 | |
712 | return data.prefix; |
713 | } |
714 | |
715 | /* Return the concatenation of the partial schedules of all outer band |
716 | * nodes of "node" interesected with all outer filters |
717 | * as an isl_union_pw_multi_aff. |
718 | * None of the ancestors of "node" may be an extension node, unless |
719 | * there is also a filter ancestor that filters out all the extended |
720 | * domain elements. |
721 | * |
722 | * If "node" is pointing at the root of the schedule tree, then |
723 | * there are no domain elements reaching the current node, so |
724 | * we return an empty result. |
725 | * |
726 | * We collect all the filters and partial schedules in collect_filter_prefix. |
727 | * The partial schedules are collected as an isl_multi_union_pw_aff. |
728 | * If this isl_multi_union_pw_aff is zero-dimensional, then it does not |
729 | * contain any domain information, so we construct the isl_union_pw_multi_aff |
730 | * result as a zero-dimensional function on the collected filter. |
731 | * Otherwise, we convert the isl_multi_union_pw_aff to |
732 | * an isl_multi_union_pw_aff and intersect the domain with the filter. |
733 | */ |
734 | __isl_give isl_union_pw_multi_aff * |
735 | isl_schedule_node_get_prefix_schedule_union_pw_multi_aff( |
736 | __isl_keep isl_schedule_node *node) |
737 | { |
738 | isl_size n, dim; |
739 | isl_space *space; |
740 | isl_union_pw_multi_aff *prefix; |
741 | struct isl_schedule_node_get_filter_prefix_data data; |
742 | |
743 | if (!node) |
744 | return NULL; |
745 | |
746 | space = isl_schedule_get_space(schedule: node->schedule); |
747 | if (node->tree == node->schedule->root) |
748 | return isl_union_pw_multi_aff_empty(space); |
749 | |
750 | space = isl_space_set_from_params(space); |
751 | data.initialized = 0; |
752 | data.universe_domain = 1; |
753 | data.universe_filter = 0; |
754 | data.collect_prefix = 1; |
755 | data.filter = NULL; |
756 | data.prefix = isl_multi_union_pw_aff_zero(space); |
757 | |
758 | n = isl_schedule_tree_list_n_schedule_tree(list: node->ancestors); |
759 | if (n < 0 || collect_filter_prefix(list: node->ancestors, n, data: &data) < 0) |
760 | data.prefix = isl_multi_union_pw_aff_free(multi: data.prefix); |
761 | |
762 | dim = isl_multi_union_pw_aff_dim(multi: data.prefix, type: isl_dim_set); |
763 | if (dim < 0) |
764 | data.prefix = isl_multi_union_pw_aff_free(multi: data.prefix); |
765 | if (data.prefix && dim == 0) { |
766 | isl_multi_union_pw_aff_free(multi: data.prefix); |
767 | prefix = isl_union_pw_multi_aff_from_domain(uset: data.filter); |
768 | } else { |
769 | prefix = |
770 | isl_union_pw_multi_aff_from_multi_union_pw_aff(mupa: data.prefix); |
771 | prefix = isl_union_pw_multi_aff_intersect_domain(upma: prefix, |
772 | uset: data.filter); |
773 | } |
774 | |
775 | return prefix; |
776 | } |
777 | |
778 | /* Return the concatenation of the partial schedules of all outer band |
779 | * nodes of "node" interesected with all outer filters |
780 | * as an isl_union_map. |
781 | */ |
782 | __isl_give isl_union_map *isl_schedule_node_get_prefix_schedule_union_map( |
783 | __isl_keep isl_schedule_node *node) |
784 | { |
785 | isl_union_pw_multi_aff *upma; |
786 | |
787 | upma = isl_schedule_node_get_prefix_schedule_union_pw_multi_aff(node); |
788 | return isl_union_map_from_union_pw_multi_aff(upma); |
789 | } |
790 | |
791 | /* Return the concatenation of the partial schedules of all outer band |
792 | * nodes of "node" intersected with all outer domain constraints. |
793 | * None of the ancestors of "node" may be an extension node, unless |
794 | * there is also a filter ancestor that filters out all the extended |
795 | * domain elements. |
796 | * |
797 | * Essentially, this function intersects the domain of the output |
798 | * of isl_schedule_node_get_prefix_schedule_union_map with the output |
799 | * of isl_schedule_node_get_domain, except that it only traverses |
800 | * the ancestors of "node" once. |
801 | */ |
802 | __isl_give isl_union_map *isl_schedule_node_get_prefix_schedule_relation( |
803 | __isl_keep isl_schedule_node *node) |
804 | { |
805 | isl_size n, dim; |
806 | isl_space *space; |
807 | isl_union_map *prefix; |
808 | struct isl_schedule_node_get_filter_prefix_data data; |
809 | |
810 | if (!node) |
811 | return NULL; |
812 | |
813 | space = isl_schedule_get_space(schedule: node->schedule); |
814 | if (node->tree == node->schedule->root) |
815 | return isl_union_map_empty(space); |
816 | |
817 | space = isl_space_set_from_params(space); |
818 | data.initialized = 0; |
819 | data.universe_domain = 0; |
820 | data.universe_filter = 0; |
821 | data.collect_prefix = 1; |
822 | data.filter = NULL; |
823 | data.prefix = isl_multi_union_pw_aff_zero(space); |
824 | |
825 | n = isl_schedule_tree_list_n_schedule_tree(list: node->ancestors); |
826 | if (n < 0 || collect_filter_prefix(list: node->ancestors, n, data: &data) < 0) |
827 | data.prefix = isl_multi_union_pw_aff_free(multi: data.prefix); |
828 | |
829 | dim = isl_multi_union_pw_aff_dim(multi: data.prefix, type: isl_dim_set); |
830 | if (dim < 0) |
831 | data.prefix = isl_multi_union_pw_aff_free(multi: data.prefix); |
832 | if (data.prefix && dim == 0) { |
833 | isl_multi_union_pw_aff_free(multi: data.prefix); |
834 | prefix = isl_union_map_from_domain(uset: data.filter); |
835 | } else { |
836 | prefix = isl_union_map_from_multi_union_pw_aff(mupa: data.prefix); |
837 | prefix = isl_union_map_intersect_domain(umap: prefix, uset: data.filter); |
838 | } |
839 | |
840 | return prefix; |
841 | } |
842 | |
843 | /* Return the domain elements that reach "node". |
844 | * |
845 | * If "node" is pointing at the root of the schedule tree, then |
846 | * there are no domain elements reaching the current node, so |
847 | * we return an empty result. |
848 | * None of the ancestors of "node" may be an extension node, unless |
849 | * there is also a filter ancestor that filters out all the extended |
850 | * domain elements. |
851 | * |
852 | * Otherwise, we collect all filters reaching the node, |
853 | * intersected with the root domain in collect_filter_prefix. |
854 | */ |
855 | __isl_give isl_union_set *isl_schedule_node_get_domain( |
856 | __isl_keep isl_schedule_node *node) |
857 | { |
858 | isl_size n; |
859 | struct isl_schedule_node_get_filter_prefix_data data; |
860 | |
861 | if (!node) |
862 | return NULL; |
863 | |
864 | if (node->tree == node->schedule->root) { |
865 | isl_space *space; |
866 | |
867 | space = isl_schedule_get_space(schedule: node->schedule); |
868 | return isl_union_set_empty(space); |
869 | } |
870 | |
871 | data.initialized = 0; |
872 | data.universe_domain = 0; |
873 | data.universe_filter = 0; |
874 | data.collect_prefix = 0; |
875 | data.filter = NULL; |
876 | data.prefix = NULL; |
877 | |
878 | n = isl_schedule_tree_list_n_schedule_tree(list: node->ancestors); |
879 | if (n < 0 || collect_filter_prefix(list: node->ancestors, n, data: &data) < 0) |
880 | data.filter = isl_union_set_free(uset: data.filter); |
881 | |
882 | return data.filter; |
883 | } |
884 | |
885 | /* Return the union of universe sets of the domain elements that reach "node". |
886 | * |
887 | * If "node" is pointing at the root of the schedule tree, then |
888 | * there are no domain elements reaching the current node, so |
889 | * we return an empty result. |
890 | * |
891 | * Otherwise, we collect the universes of all filters reaching the node |
892 | * in collect_filter_prefix. |
893 | */ |
894 | __isl_give isl_union_set *isl_schedule_node_get_universe_domain( |
895 | __isl_keep isl_schedule_node *node) |
896 | { |
897 | isl_size n; |
898 | struct isl_schedule_node_get_filter_prefix_data data; |
899 | |
900 | if (!node) |
901 | return NULL; |
902 | |
903 | if (node->tree == node->schedule->root) { |
904 | isl_space *space; |
905 | |
906 | space = isl_schedule_get_space(schedule: node->schedule); |
907 | return isl_union_set_empty(space); |
908 | } |
909 | |
910 | data.initialized = 0; |
911 | data.universe_domain = 1; |
912 | data.universe_filter = 1; |
913 | data.collect_prefix = 0; |
914 | data.filter = NULL; |
915 | data.prefix = NULL; |
916 | |
917 | n = isl_schedule_tree_list_n_schedule_tree(list: node->ancestors); |
918 | if (n < 0 || collect_filter_prefix(list: node->ancestors, n, data: &data) < 0) |
919 | data.filter = isl_union_set_free(uset: data.filter); |
920 | |
921 | return data.filter; |
922 | } |
923 | |
924 | /* Return the subtree schedule of "node". |
925 | * |
926 | * Since isl_schedule_tree_get_subtree_schedule_union_map does not handle |
927 | * trees that do not contain any schedule information, we first |
928 | * move down to the first relevant descendant and handle leaves ourselves. |
929 | * |
930 | * If the subtree rooted at "node" contains any expansion nodes, then |
931 | * the returned subtree schedule is formulated in terms of the expanded |
932 | * domains. |
933 | * The subtree is not allowed to contain any extension nodes. |
934 | */ |
935 | __isl_give isl_union_map *isl_schedule_node_get_subtree_schedule_union_map( |
936 | __isl_keep isl_schedule_node *node) |
937 | { |
938 | isl_schedule_tree *tree, *leaf; |
939 | isl_union_map *umap; |
940 | |
941 | tree = isl_schedule_node_get_tree(node); |
942 | leaf = isl_schedule_node_peek_leaf(node); |
943 | tree = isl_schedule_tree_first_schedule_descendant(tree, leaf); |
944 | if (!tree) |
945 | return NULL; |
946 | if (tree == leaf) { |
947 | isl_union_set *domain; |
948 | domain = isl_schedule_node_get_universe_domain(node); |
949 | isl_schedule_tree_free(tree); |
950 | return isl_union_map_from_domain(uset: domain); |
951 | } |
952 | |
953 | umap = isl_schedule_tree_get_subtree_schedule_union_map(tree); |
954 | isl_schedule_tree_free(tree); |
955 | return umap; |
956 | } |
957 | |
958 | /* Return the number of ancestors of "node" in its schedule tree. |
959 | */ |
960 | isl_size isl_schedule_node_get_tree_depth(__isl_keep isl_schedule_node *node) |
961 | { |
962 | if (!node) |
963 | return isl_size_error; |
964 | return isl_schedule_tree_list_n_schedule_tree(list: node->ancestors); |
965 | } |
966 | |
967 | /* Does "node" have a parent? |
968 | * |
969 | * That is, does it point to any node of the schedule other than the root? |
970 | */ |
971 | isl_bool isl_schedule_node_has_parent(__isl_keep isl_schedule_node *node) |
972 | { |
973 | isl_size depth; |
974 | |
975 | depth = isl_schedule_node_get_tree_depth(node); |
976 | if (depth < 0) |
977 | return isl_bool_error; |
978 | return isl_bool_ok(b: depth != 0); |
979 | } |
980 | |
981 | /* Return the position of "node" among the children of its parent. |
982 | */ |
983 | isl_size isl_schedule_node_get_child_position( |
984 | __isl_keep isl_schedule_node *node) |
985 | { |
986 | isl_size n; |
987 | isl_bool has_parent; |
988 | |
989 | if (!node) |
990 | return isl_size_error; |
991 | has_parent = isl_schedule_node_has_parent(node); |
992 | if (has_parent < 0) |
993 | return isl_size_error; |
994 | if (!has_parent) |
995 | isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid, |
996 | "node has no parent" , return isl_size_error); |
997 | |
998 | n = isl_schedule_tree_list_n_schedule_tree(list: node->ancestors); |
999 | return n < 0 ? isl_size_error : node->child_pos[n - 1]; |
1000 | } |
1001 | |
1002 | /* Does the parent (if any) of "node" have any children with a smaller child |
1003 | * position than this one? |
1004 | */ |
1005 | isl_bool isl_schedule_node_has_previous_sibling( |
1006 | __isl_keep isl_schedule_node *node) |
1007 | { |
1008 | isl_size n; |
1009 | isl_bool has_parent; |
1010 | |
1011 | if (!node) |
1012 | return isl_bool_error; |
1013 | has_parent = isl_schedule_node_has_parent(node); |
1014 | if (has_parent < 0 || !has_parent) |
1015 | return has_parent; |
1016 | |
1017 | n = isl_schedule_tree_list_n_schedule_tree(list: node->ancestors); |
1018 | if (n < 0) |
1019 | return isl_bool_error; |
1020 | |
1021 | return isl_bool_ok(b: node->child_pos[n - 1] > 0); |
1022 | } |
1023 | |
1024 | /* Does the parent (if any) of "node" have any children with a greater child |
1025 | * position than this one? |
1026 | */ |
1027 | isl_bool isl_schedule_node_has_next_sibling(__isl_keep isl_schedule_node *node) |
1028 | { |
1029 | isl_size n, n_child; |
1030 | isl_bool has_parent; |
1031 | isl_schedule_tree *tree; |
1032 | |
1033 | if (!node) |
1034 | return isl_bool_error; |
1035 | has_parent = isl_schedule_node_has_parent(node); |
1036 | if (has_parent < 0 || !has_parent) |
1037 | return has_parent; |
1038 | |
1039 | n = isl_schedule_tree_list_n_schedule_tree(list: node->ancestors); |
1040 | if (n < 0) |
1041 | return isl_bool_error; |
1042 | tree = isl_schedule_tree_list_get_schedule_tree(list: node->ancestors, index: n - 1); |
1043 | n_child = isl_schedule_tree_n_children(tree); |
1044 | isl_schedule_tree_free(tree); |
1045 | if (n_child < 0) |
1046 | return isl_bool_error; |
1047 | |
1048 | return isl_bool_ok(b: node->child_pos[n - 1] + 1 < n_child); |
1049 | } |
1050 | |
1051 | /* Does "node" have any children? |
1052 | * |
1053 | * Any node other than the leaf nodes is considered to have at least |
1054 | * one child, even if the corresponding isl_schedule_tree does not |
1055 | * have any children. |
1056 | */ |
1057 | isl_bool isl_schedule_node_has_children(__isl_keep isl_schedule_node *node) |
1058 | { |
1059 | if (!node) |
1060 | return isl_bool_error; |
1061 | return isl_bool_ok(b: !isl_schedule_tree_is_leaf(tree: node->tree)); |
1062 | } |
1063 | |
1064 | /* Return the number of children of "node"? |
1065 | * |
1066 | * Any node other than the leaf nodes is considered to have at least |
1067 | * one child, even if the corresponding isl_schedule_tree does not |
1068 | * have any children. That is, the number of children of "node" is |
1069 | * only zero if its tree is the explicit empty tree. Otherwise, |
1070 | * if the isl_schedule_tree has any children, then it is equal |
1071 | * to the number of children of "node". If it has zero children, |
1072 | * then "node" still has a leaf node as child. |
1073 | */ |
1074 | isl_size isl_schedule_node_n_children(__isl_keep isl_schedule_node *node) |
1075 | { |
1076 | isl_size n; |
1077 | |
1078 | if (!node) |
1079 | return isl_size_error; |
1080 | |
1081 | if (isl_schedule_tree_is_leaf(tree: node->tree)) |
1082 | return 0; |
1083 | |
1084 | n = isl_schedule_tree_n_children(tree: node->tree); |
1085 | if (n < 0) |
1086 | return isl_size_error; |
1087 | if (n == 0) |
1088 | return 1; |
1089 | |
1090 | return n; |
1091 | } |
1092 | |
1093 | /* Move the "node" pointer to the ancestor of the given generation |
1094 | * of the node it currently points to, where generation 0 is the node |
1095 | * itself and generation 1 is its parent. |
1096 | */ |
1097 | __isl_give isl_schedule_node *isl_schedule_node_ancestor( |
1098 | __isl_take isl_schedule_node *node, int generation) |
1099 | { |
1100 | isl_size n; |
1101 | isl_schedule_tree *tree; |
1102 | |
1103 | if (!node) |
1104 | return NULL; |
1105 | if (generation == 0) |
1106 | return node; |
1107 | n = isl_schedule_node_get_tree_depth(node); |
1108 | if (n < 0) |
1109 | return isl_schedule_node_free(node); |
1110 | if (generation < 0 || generation > n) |
1111 | isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid, |
1112 | "generation out of bounds" , |
1113 | return isl_schedule_node_free(node)); |
1114 | node = isl_schedule_node_cow(node); |
1115 | if (!node) |
1116 | return NULL; |
1117 | |
1118 | tree = isl_schedule_tree_list_get_schedule_tree(list: node->ancestors, |
1119 | index: n - generation); |
1120 | isl_schedule_tree_free(tree: node->tree); |
1121 | node->tree = tree; |
1122 | node->ancestors = isl_schedule_tree_list_drop(list: node->ancestors, |
1123 | first: n - generation, n: generation); |
1124 | if (!node->ancestors || !node->tree) |
1125 | return isl_schedule_node_free(node); |
1126 | |
1127 | return node; |
1128 | } |
1129 | |
1130 | /* Move the "node" pointer to the parent of the node it currently points to. |
1131 | */ |
1132 | __isl_give isl_schedule_node *isl_schedule_node_parent( |
1133 | __isl_take isl_schedule_node *node) |
1134 | { |
1135 | if (!node) |
1136 | return NULL; |
1137 | if (!isl_schedule_node_has_parent(node)) |
1138 | isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid, |
1139 | "node has no parent" , |
1140 | return isl_schedule_node_free(node)); |
1141 | return isl_schedule_node_ancestor(node, generation: 1); |
1142 | } |
1143 | |
1144 | /* Move the "node" pointer to the parent of its parent. |
1145 | */ |
1146 | __isl_give isl_schedule_node *isl_schedule_node_grandparent( |
1147 | __isl_take isl_schedule_node *node) |
1148 | { |
1149 | return isl_schedule_node_ancestor(node, generation: 2); |
1150 | } |
1151 | |
1152 | /* Move the "node" pointer to the root of its schedule tree. |
1153 | */ |
1154 | __isl_give isl_schedule_node *isl_schedule_node_root( |
1155 | __isl_take isl_schedule_node *node) |
1156 | { |
1157 | isl_size n; |
1158 | |
1159 | if (!node) |
1160 | return NULL; |
1161 | n = isl_schedule_node_get_tree_depth(node); |
1162 | if (n < 0) |
1163 | return isl_schedule_node_free(node); |
1164 | return isl_schedule_node_ancestor(node, generation: n); |
1165 | } |
1166 | |
1167 | /* Move the "node" pointer to the child at position "pos" of the node |
1168 | * it currently points to. |
1169 | */ |
1170 | __isl_give isl_schedule_node *isl_schedule_node_child( |
1171 | __isl_take isl_schedule_node *node, int pos) |
1172 | { |
1173 | isl_size n; |
1174 | isl_ctx *ctx; |
1175 | isl_schedule_tree *tree; |
1176 | int *child_pos; |
1177 | |
1178 | node = isl_schedule_node_cow(node); |
1179 | if (!node) |
1180 | return NULL; |
1181 | if (!isl_schedule_node_has_children(node)) |
1182 | isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid, |
1183 | "node has no children" , |
1184 | return isl_schedule_node_free(node)); |
1185 | |
1186 | ctx = isl_schedule_node_get_ctx(node); |
1187 | n = isl_schedule_tree_list_n_schedule_tree(list: node->ancestors); |
1188 | if (n < 0) |
1189 | return isl_schedule_node_free(node); |
1190 | child_pos = isl_realloc_array(ctx, node->child_pos, int, n + 1); |
1191 | if (!child_pos) |
1192 | return isl_schedule_node_free(node); |
1193 | node->child_pos = child_pos; |
1194 | node->child_pos[n] = pos; |
1195 | |
1196 | node->ancestors = isl_schedule_tree_list_add(list: node->ancestors, |
1197 | el: isl_schedule_tree_copy(tree: node->tree)); |
1198 | tree = node->tree; |
1199 | if (isl_schedule_tree_has_children(tree)) |
1200 | tree = isl_schedule_tree_get_child(tree, pos); |
1201 | else |
1202 | tree = isl_schedule_node_get_leaf(node); |
1203 | isl_schedule_tree_free(tree: node->tree); |
1204 | node->tree = tree; |
1205 | |
1206 | if (!node->tree || !node->ancestors) |
1207 | return isl_schedule_node_free(node); |
1208 | |
1209 | return node; |
1210 | } |
1211 | |
1212 | /* Move the "node" pointer to the child at position "pos2" of the child |
1213 | * at position "pos1". |
1214 | */ |
1215 | __isl_give isl_schedule_node *isl_schedule_node_grandchild( |
1216 | __isl_take isl_schedule_node *node, int pos1, int pos2) |
1217 | { |
1218 | node = isl_schedule_node_child(node, pos: pos1); |
1219 | node = isl_schedule_node_child(node, pos: pos2); |
1220 | return node; |
1221 | } |
1222 | |
1223 | /* Move the "node" pointer to the first child of the node |
1224 | * it currently points to. |
1225 | */ |
1226 | __isl_give isl_schedule_node *isl_schedule_node_first_child( |
1227 | __isl_take isl_schedule_node *node) |
1228 | { |
1229 | return isl_schedule_node_child(node, pos: 0); |
1230 | } |
1231 | |
1232 | /* Move the "node" pointer to the child of this node's parent in |
1233 | * the previous child position. |
1234 | */ |
1235 | __isl_give isl_schedule_node *isl_schedule_node_previous_sibling( |
1236 | __isl_take isl_schedule_node *node) |
1237 | { |
1238 | isl_size n; |
1239 | isl_schedule_tree *parent, *tree; |
1240 | |
1241 | node = isl_schedule_node_cow(node); |
1242 | if (!node) |
1243 | return NULL; |
1244 | if (!isl_schedule_node_has_previous_sibling(node)) |
1245 | isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid, |
1246 | "node has no previous sibling" , |
1247 | return isl_schedule_node_free(node)); |
1248 | |
1249 | n = isl_schedule_tree_list_n_schedule_tree(list: node->ancestors); |
1250 | if (n < 0) |
1251 | return isl_schedule_node_free(node); |
1252 | parent = isl_schedule_tree_list_get_schedule_tree(list: node->ancestors, |
1253 | index: n - 1); |
1254 | if (!parent) |
1255 | return isl_schedule_node_free(node); |
1256 | node->child_pos[n - 1]--; |
1257 | tree = isl_schedule_tree_list_get_schedule_tree(list: parent->children, |
1258 | index: node->child_pos[n - 1]); |
1259 | isl_schedule_tree_free(tree: parent); |
1260 | if (!tree) |
1261 | return isl_schedule_node_free(node); |
1262 | isl_schedule_tree_free(tree: node->tree); |
1263 | node->tree = tree; |
1264 | |
1265 | return node; |
1266 | } |
1267 | |
1268 | /* Move the "node" pointer to the child of this node's parent in |
1269 | * the next child position. |
1270 | */ |
1271 | __isl_give isl_schedule_node *isl_schedule_node_next_sibling( |
1272 | __isl_take isl_schedule_node *node) |
1273 | { |
1274 | isl_size n; |
1275 | isl_schedule_tree *parent, *tree; |
1276 | |
1277 | node = isl_schedule_node_cow(node); |
1278 | if (!node) |
1279 | return NULL; |
1280 | if (!isl_schedule_node_has_next_sibling(node)) |
1281 | isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid, |
1282 | "node has no next sibling" , |
1283 | return isl_schedule_node_free(node)); |
1284 | |
1285 | n = isl_schedule_tree_list_n_schedule_tree(list: node->ancestors); |
1286 | if (n < 0) |
1287 | return isl_schedule_node_free(node); |
1288 | parent = isl_schedule_tree_list_get_schedule_tree(list: node->ancestors, |
1289 | index: n - 1); |
1290 | if (!parent) |
1291 | return isl_schedule_node_free(node); |
1292 | node->child_pos[n - 1]++; |
1293 | tree = isl_schedule_tree_list_get_schedule_tree(list: parent->children, |
1294 | index: node->child_pos[n - 1]); |
1295 | isl_schedule_tree_free(tree: parent); |
1296 | if (!tree) |
1297 | return isl_schedule_node_free(node); |
1298 | isl_schedule_tree_free(tree: node->tree); |
1299 | node->tree = tree; |
1300 | |
1301 | return node; |
1302 | } |
1303 | |
1304 | /* Return a copy to the child at position "pos" of "node". |
1305 | */ |
1306 | __isl_give isl_schedule_node *isl_schedule_node_get_child( |
1307 | __isl_keep isl_schedule_node *node, int pos) |
1308 | { |
1309 | return isl_schedule_node_child(node: isl_schedule_node_copy(node), pos); |
1310 | } |
1311 | |
1312 | /* Traverse the descendant of "node" in depth-first order, including |
1313 | * "node" itself. Call "enter" whenever a node is entered and "leave" |
1314 | * whenever a node is left. The callback "enter" is responsible |
1315 | * for moving to the deepest initial subtree of its argument that |
1316 | * should be traversed. |
1317 | */ |
1318 | static __isl_give isl_schedule_node *traverse( |
1319 | __isl_take isl_schedule_node *node, |
1320 | __isl_give isl_schedule_node *(*enter)( |
1321 | __isl_take isl_schedule_node *node, void *user), |
1322 | __isl_give isl_schedule_node *(*leave)( |
1323 | __isl_take isl_schedule_node *node, void *user), |
1324 | void *user) |
1325 | { |
1326 | isl_size depth; |
1327 | isl_size node_depth; |
1328 | |
1329 | depth = isl_schedule_node_get_tree_depth(node); |
1330 | if (depth < 0) |
1331 | return isl_schedule_node_free(node); |
1332 | |
1333 | do { |
1334 | node = enter(node, user); |
1335 | node = leave(node, user); |
1336 | while ((node_depth = isl_schedule_node_get_tree_depth(node)) > |
1337 | depth && |
1338 | !isl_schedule_node_has_next_sibling(node)) { |
1339 | node = isl_schedule_node_parent(node); |
1340 | node = leave(node, user); |
1341 | } |
1342 | if (node_depth < 0) |
1343 | return isl_schedule_node_free(node); |
1344 | if (node_depth > depth) |
1345 | node = isl_schedule_node_next_sibling(node); |
1346 | } while (node_depth > depth); |
1347 | |
1348 | return node; |
1349 | } |
1350 | |
1351 | /* Internal data structure for isl_schedule_node_foreach_descendant_top_down. |
1352 | * |
1353 | * "fn" is the user-specified callback function. |
1354 | * "user" is the user-specified argument for the callback. |
1355 | */ |
1356 | struct isl_schedule_node_preorder_data { |
1357 | isl_bool (*fn)(__isl_keep isl_schedule_node *node, void *user); |
1358 | void *user; |
1359 | }; |
1360 | |
1361 | /* Callback for "traverse" to enter a node and to move |
1362 | * to the deepest initial subtree that should be traversed |
1363 | * for use in a preorder visit. |
1364 | * |
1365 | * If the user callback returns a negative value, then we abort |
1366 | * the traversal. If this callback returns zero, then we skip |
1367 | * the subtree rooted at the current node. Otherwise, we move |
1368 | * down to the first child and repeat the process until a leaf |
1369 | * is reached. |
1370 | */ |
1371 | static __isl_give isl_schedule_node *preorder_enter( |
1372 | __isl_take isl_schedule_node *node, void *user) |
1373 | { |
1374 | struct isl_schedule_node_preorder_data *data = user; |
1375 | |
1376 | if (!node) |
1377 | return NULL; |
1378 | |
1379 | do { |
1380 | isl_bool r; |
1381 | |
1382 | r = data->fn(node, data->user); |
1383 | if (r < 0) |
1384 | return isl_schedule_node_free(node); |
1385 | if (r == isl_bool_false) |
1386 | return node; |
1387 | } while (isl_schedule_node_has_children(node) && |
1388 | (node = isl_schedule_node_first_child(node)) != NULL); |
1389 | |
1390 | return node; |
1391 | } |
1392 | |
1393 | /* Callback for "traverse" to leave a node |
1394 | * for use in a preorder visit. |
1395 | * Since we already visited the node when we entered it, |
1396 | * we do not need to do anything here. |
1397 | */ |
1398 | static __isl_give isl_schedule_node *preorder_leave( |
1399 | __isl_take isl_schedule_node *node, void *user) |
1400 | { |
1401 | return node; |
1402 | } |
1403 | |
1404 | /* Traverse the descendants of "node" (including the node itself) |
1405 | * in depth first preorder. |
1406 | * |
1407 | * If "fn" returns isl_bool_error on any of the nodes, |
1408 | * then the traversal is aborted. |
1409 | * If "fn" returns isl_bool_false on any of the nodes, then the subtree rooted |
1410 | * at that node is skipped. |
1411 | * |
1412 | * Return isl_stat_ok on success and isl_stat_error on failure. |
1413 | */ |
1414 | isl_stat isl_schedule_node_foreach_descendant_top_down( |
1415 | __isl_keep isl_schedule_node *node, |
1416 | isl_bool (*fn)(__isl_keep isl_schedule_node *node, void *user), |
1417 | void *user) |
1418 | { |
1419 | struct isl_schedule_node_preorder_data data = { fn, user }; |
1420 | |
1421 | node = isl_schedule_node_copy(node); |
1422 | node = traverse(node, enter: &preorder_enter, leave: &preorder_leave, user: &data); |
1423 | isl_schedule_node_free(node); |
1424 | |
1425 | return node ? isl_stat_ok : isl_stat_error; |
1426 | } |
1427 | |
1428 | /* Internal data structure for isl_schedule_node_every_descendant. |
1429 | * |
1430 | * "test" is the user-specified callback function. |
1431 | * "user" is the user-specified callback function argument. |
1432 | * |
1433 | * "failed" is initialized to 0 and set to 1 if "test" fails |
1434 | * on any node. |
1435 | */ |
1436 | struct isl_union_map_every_data { |
1437 | isl_bool (*test)(__isl_keep isl_schedule_node *node, void *user); |
1438 | void *user; |
1439 | int failed; |
1440 | }; |
1441 | |
1442 | /* isl_schedule_node_foreach_descendant_top_down callback |
1443 | * that sets data->failed if data->test returns false and |
1444 | * subsequently aborts the traversal. |
1445 | */ |
1446 | static isl_bool call_every(__isl_keep isl_schedule_node *node, void *user) |
1447 | { |
1448 | struct isl_union_map_every_data *data = user; |
1449 | isl_bool r; |
1450 | |
1451 | r = data->test(node, data->user); |
1452 | if (r < 0) |
1453 | return isl_bool_error; |
1454 | if (r) |
1455 | return isl_bool_true; |
1456 | data->failed = 1; |
1457 | return isl_bool_error; |
1458 | } |
1459 | |
1460 | /* Does "test" succeed on every descendant of "node" (including "node" itself)? |
1461 | */ |
1462 | isl_bool isl_schedule_node_every_descendant(__isl_keep isl_schedule_node *node, |
1463 | isl_bool (*test)(__isl_keep isl_schedule_node *node, void *user), |
1464 | void *user) |
1465 | { |
1466 | struct isl_union_map_every_data data = { test, user, 0 }; |
1467 | isl_stat r; |
1468 | |
1469 | r = isl_schedule_node_foreach_descendant_top_down(node, fn: &call_every, |
1470 | user: &data); |
1471 | if (r >= 0) |
1472 | return isl_bool_true; |
1473 | if (data.failed) |
1474 | return isl_bool_false; |
1475 | return isl_bool_error; |
1476 | } |
1477 | |
1478 | /* Internal data structure for isl_schedule_node_map_descendant_bottom_up. |
1479 | * |
1480 | * "fn" is the user-specified callback function. |
1481 | * "user" is the user-specified argument for the callback. |
1482 | */ |
1483 | struct isl_schedule_node_postorder_data { |
1484 | __isl_give isl_schedule_node *(*fn)(__isl_take isl_schedule_node *node, |
1485 | void *user); |
1486 | void *user; |
1487 | }; |
1488 | |
1489 | /* Callback for "traverse" to enter a node and to move |
1490 | * to the deepest initial subtree that should be traversed |
1491 | * for use in a postorder visit. |
1492 | * |
1493 | * Since we are performing a postorder visit, we only need |
1494 | * to move to the deepest initial leaf here. |
1495 | */ |
1496 | static __isl_give isl_schedule_node *postorder_enter( |
1497 | __isl_take isl_schedule_node *node, void *user) |
1498 | { |
1499 | while (node && isl_schedule_node_has_children(node)) |
1500 | node = isl_schedule_node_first_child(node); |
1501 | |
1502 | return node; |
1503 | } |
1504 | |
1505 | /* Callback for "traverse" to leave a node |
1506 | * for use in a postorder visit. |
1507 | * |
1508 | * Since we are performing a postorder visit, we need |
1509 | * to call the user callback here. |
1510 | */ |
1511 | static __isl_give isl_schedule_node *postorder_leave( |
1512 | __isl_take isl_schedule_node *node, void *user) |
1513 | { |
1514 | struct isl_schedule_node_postorder_data *data = user; |
1515 | |
1516 | return data->fn(node, data->user); |
1517 | } |
1518 | |
1519 | /* Traverse the descendants of "node" (including the node itself) |
1520 | * in depth first postorder, allowing the user to modify the visited node. |
1521 | * The traversal continues from the node returned by the callback function. |
1522 | * It is the responsibility of the user to ensure that this does not |
1523 | * lead to an infinite loop. It is safest to always return a pointer |
1524 | * to the same position (same ancestors and child positions) as the input node. |
1525 | */ |
1526 | __isl_give isl_schedule_node *isl_schedule_node_map_descendant_bottom_up( |
1527 | __isl_take isl_schedule_node *node, |
1528 | __isl_give isl_schedule_node *(*fn)(__isl_take isl_schedule_node *node, |
1529 | void *user), void *user) |
1530 | { |
1531 | struct isl_schedule_node_postorder_data data = { fn, user }; |
1532 | |
1533 | return traverse(node, enter: &postorder_enter, leave: &postorder_leave, user: &data); |
1534 | } |
1535 | |
1536 | /* Traverse the ancestors of "node" from the root down to and including |
1537 | * the parent of "node", calling "fn" on each of them. |
1538 | * |
1539 | * If "fn" returns -1 on any of the nodes, then the traversal is aborted. |
1540 | * |
1541 | * Return 0 on success and -1 on failure. |
1542 | */ |
1543 | isl_stat isl_schedule_node_foreach_ancestor_top_down( |
1544 | __isl_keep isl_schedule_node *node, |
1545 | isl_stat (*fn)(__isl_keep isl_schedule_node *node, void *user), |
1546 | void *user) |
1547 | { |
1548 | int i; |
1549 | isl_size n; |
1550 | |
1551 | n = isl_schedule_node_get_tree_depth(node); |
1552 | if (n < 0) |
1553 | return isl_stat_error; |
1554 | |
1555 | for (i = 0; i < n; ++i) { |
1556 | isl_schedule_node *ancestor; |
1557 | isl_stat r; |
1558 | |
1559 | ancestor = isl_schedule_node_copy(node); |
1560 | ancestor = isl_schedule_node_ancestor(node: ancestor, generation: n - i); |
1561 | r = fn(ancestor, user); |
1562 | isl_schedule_node_free(node: ancestor); |
1563 | if (r < 0) |
1564 | return isl_stat_error; |
1565 | } |
1566 | |
1567 | return isl_stat_ok; |
1568 | } |
1569 | |
1570 | /* Is any node in the subtree rooted at "node" anchored? |
1571 | * That is, do any of these nodes reference the outer band nodes? |
1572 | */ |
1573 | isl_bool isl_schedule_node_is_subtree_anchored( |
1574 | __isl_keep isl_schedule_node *node) |
1575 | { |
1576 | if (!node) |
1577 | return isl_bool_error; |
1578 | return isl_schedule_tree_is_subtree_anchored(tree: node->tree); |
1579 | } |
1580 | |
1581 | /* Return the number of members in the given band node. |
1582 | */ |
1583 | isl_size isl_schedule_node_band_n_member(__isl_keep isl_schedule_node *node) |
1584 | { |
1585 | if (!node) |
1586 | return isl_size_error; |
1587 | return isl_schedule_tree_band_n_member(tree: node->tree); |
1588 | } |
1589 | |
1590 | /* Is the band member at position "pos" of the band node "node" |
1591 | * marked coincident? |
1592 | */ |
1593 | isl_bool isl_schedule_node_band_member_get_coincident( |
1594 | __isl_keep isl_schedule_node *node, int pos) |
1595 | { |
1596 | if (!node) |
1597 | return isl_bool_error; |
1598 | return isl_schedule_tree_band_member_get_coincident(tree: node->tree, pos); |
1599 | } |
1600 | |
1601 | /* Mark the band member at position "pos" the band node "node" |
1602 | * as being coincident or not according to "coincident". |
1603 | */ |
1604 | __isl_give isl_schedule_node *isl_schedule_node_band_member_set_coincident( |
1605 | __isl_take isl_schedule_node *node, int pos, int coincident) |
1606 | { |
1607 | int c; |
1608 | isl_schedule_tree *tree; |
1609 | |
1610 | if (!node) |
1611 | return NULL; |
1612 | c = isl_schedule_node_band_member_get_coincident(node, pos); |
1613 | if (c == coincident) |
1614 | return node; |
1615 | |
1616 | tree = isl_schedule_tree_copy(tree: node->tree); |
1617 | tree = isl_schedule_tree_band_member_set_coincident(tree, pos, |
1618 | coincident); |
1619 | node = isl_schedule_node_graft_tree(pos: node, tree); |
1620 | |
1621 | return node; |
1622 | } |
1623 | |
1624 | /* Is the band node "node" marked permutable? |
1625 | */ |
1626 | isl_bool isl_schedule_node_band_get_permutable( |
1627 | __isl_keep isl_schedule_node *node) |
1628 | { |
1629 | if (!node) |
1630 | return isl_bool_error; |
1631 | |
1632 | return isl_schedule_tree_band_get_permutable(tree: node->tree); |
1633 | } |
1634 | |
1635 | /* Mark the band node "node" permutable or not according to "permutable"? |
1636 | */ |
1637 | __isl_give isl_schedule_node *isl_schedule_node_band_set_permutable( |
1638 | __isl_take isl_schedule_node *node, int permutable) |
1639 | { |
1640 | isl_schedule_tree *tree; |
1641 | |
1642 | if (!node) |
1643 | return NULL; |
1644 | if (isl_schedule_node_band_get_permutable(node) == permutable) |
1645 | return node; |
1646 | |
1647 | tree = isl_schedule_tree_copy(tree: node->tree); |
1648 | tree = isl_schedule_tree_band_set_permutable(tree, permutable); |
1649 | node = isl_schedule_node_graft_tree(pos: node, tree); |
1650 | |
1651 | return node; |
1652 | } |
1653 | |
1654 | /* Return the schedule space of the band node. |
1655 | */ |
1656 | __isl_give isl_space *isl_schedule_node_band_get_space( |
1657 | __isl_keep isl_schedule_node *node) |
1658 | { |
1659 | if (!node) |
1660 | return NULL; |
1661 | |
1662 | return isl_schedule_tree_band_get_space(tree: node->tree); |
1663 | } |
1664 | |
1665 | /* Return the schedule of the band node in isolation. |
1666 | */ |
1667 | __isl_give isl_multi_union_pw_aff *isl_schedule_node_band_get_partial_schedule( |
1668 | __isl_keep isl_schedule_node *node) |
1669 | { |
1670 | if (!node) |
1671 | return NULL; |
1672 | |
1673 | return isl_schedule_tree_band_get_partial_schedule(tree: node->tree); |
1674 | } |
1675 | |
1676 | /* Return the schedule of the band node in isolation in the form of |
1677 | * an isl_union_map. |
1678 | * |
1679 | * If the band does not have any members, then we construct a universe map |
1680 | * with the universe of the domain elements reaching the node as domain. |
1681 | * Otherwise, we extract an isl_multi_union_pw_aff representation and |
1682 | * convert that to an isl_union_map. |
1683 | */ |
1684 | __isl_give isl_union_map *isl_schedule_node_band_get_partial_schedule_union_map( |
1685 | __isl_keep isl_schedule_node *node) |
1686 | { |
1687 | isl_size n; |
1688 | isl_multi_union_pw_aff *mupa; |
1689 | |
1690 | if (!node) |
1691 | return NULL; |
1692 | |
1693 | if (isl_schedule_node_get_type(node) != isl_schedule_node_band) |
1694 | isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid, |
1695 | "not a band node" , return NULL); |
1696 | n = isl_schedule_node_band_n_member(node); |
1697 | if (n < 0) |
1698 | return NULL; |
1699 | if (n == 0) { |
1700 | isl_union_set *domain; |
1701 | |
1702 | domain = isl_schedule_node_get_universe_domain(node); |
1703 | return isl_union_map_from_domain(uset: domain); |
1704 | } |
1705 | |
1706 | mupa = isl_schedule_node_band_get_partial_schedule(node); |
1707 | return isl_union_map_from_multi_union_pw_aff(mupa); |
1708 | } |
1709 | |
1710 | /* Return the loop AST generation type for the band member of band node "node" |
1711 | * at position "pos". |
1712 | */ |
1713 | enum isl_ast_loop_type isl_schedule_node_band_member_get_ast_loop_type( |
1714 | __isl_keep isl_schedule_node *node, int pos) |
1715 | { |
1716 | if (!node) |
1717 | return isl_ast_loop_error; |
1718 | |
1719 | return isl_schedule_tree_band_member_get_ast_loop_type(tree: node->tree, pos); |
1720 | } |
1721 | |
1722 | /* Set the loop AST generation type for the band member of band node "node" |
1723 | * at position "pos" to "type". |
1724 | */ |
1725 | __isl_give isl_schedule_node *isl_schedule_node_band_member_set_ast_loop_type( |
1726 | __isl_take isl_schedule_node *node, int pos, |
1727 | enum isl_ast_loop_type type) |
1728 | { |
1729 | isl_schedule_tree *tree; |
1730 | |
1731 | if (!node) |
1732 | return NULL; |
1733 | |
1734 | tree = isl_schedule_tree_copy(tree: node->tree); |
1735 | tree = isl_schedule_tree_band_member_set_ast_loop_type(tree, pos, type); |
1736 | return isl_schedule_node_graft_tree(pos: node, tree); |
1737 | } |
1738 | |
1739 | /* Return the loop AST generation type for the band member of band node "node" |
1740 | * at position "pos" for the isolated part. |
1741 | */ |
1742 | enum isl_ast_loop_type isl_schedule_node_band_member_get_isolate_ast_loop_type( |
1743 | __isl_keep isl_schedule_node *node, int pos) |
1744 | { |
1745 | if (!node) |
1746 | return isl_ast_loop_error; |
1747 | |
1748 | return isl_schedule_tree_band_member_get_isolate_ast_loop_type( |
1749 | tree: node->tree, pos); |
1750 | } |
1751 | |
1752 | /* Set the loop AST generation type for the band member of band node "node" |
1753 | * at position "pos" for the isolated part to "type". |
1754 | */ |
1755 | __isl_give isl_schedule_node * |
1756 | isl_schedule_node_band_member_set_isolate_ast_loop_type( |
1757 | __isl_take isl_schedule_node *node, int pos, |
1758 | enum isl_ast_loop_type type) |
1759 | { |
1760 | isl_schedule_tree *tree; |
1761 | |
1762 | if (!node) |
1763 | return NULL; |
1764 | |
1765 | tree = isl_schedule_tree_copy(tree: node->tree); |
1766 | tree = isl_schedule_tree_band_member_set_isolate_ast_loop_type(tree, |
1767 | pos, type); |
1768 | return isl_schedule_node_graft_tree(pos: node, tree); |
1769 | } |
1770 | |
1771 | /* Return the AST build options associated to band node "node". |
1772 | */ |
1773 | __isl_give isl_union_set *isl_schedule_node_band_get_ast_build_options( |
1774 | __isl_keep isl_schedule_node *node) |
1775 | { |
1776 | if (!node) |
1777 | return NULL; |
1778 | |
1779 | return isl_schedule_tree_band_get_ast_build_options(tree: node->tree); |
1780 | } |
1781 | |
1782 | /* Replace the AST build options associated to band node "node" by "options". |
1783 | */ |
1784 | __isl_give isl_schedule_node *isl_schedule_node_band_set_ast_build_options( |
1785 | __isl_take isl_schedule_node *node, __isl_take isl_union_set *options) |
1786 | { |
1787 | isl_schedule_tree *tree; |
1788 | |
1789 | if (!node || !options) |
1790 | goto error; |
1791 | |
1792 | tree = isl_schedule_tree_copy(tree: node->tree); |
1793 | tree = isl_schedule_tree_band_set_ast_build_options(tree, options); |
1794 | return isl_schedule_node_graft_tree(pos: node, tree); |
1795 | error: |
1796 | isl_schedule_node_free(node); |
1797 | isl_union_set_free(uset: options); |
1798 | return NULL; |
1799 | } |
1800 | |
1801 | /* Return the "isolate" option associated to band node "node". |
1802 | */ |
1803 | __isl_give isl_set *isl_schedule_node_band_get_ast_isolate_option( |
1804 | __isl_keep isl_schedule_node *node) |
1805 | { |
1806 | isl_size depth; |
1807 | |
1808 | depth = isl_schedule_node_get_schedule_depth(node); |
1809 | if (depth < 0) |
1810 | return NULL; |
1811 | |
1812 | return isl_schedule_tree_band_get_ast_isolate_option(tree: node->tree, depth); |
1813 | } |
1814 | |
1815 | /* Make sure that that spaces of "node" and "mv" are the same. |
1816 | * Return -1 on error, reporting the error to the user. |
1817 | */ |
1818 | static int check_space_multi_val(__isl_keep isl_schedule_node *node, |
1819 | __isl_keep isl_multi_val *mv) |
1820 | { |
1821 | isl_space *node_space, *mv_space; |
1822 | int equal; |
1823 | |
1824 | node_space = isl_schedule_node_band_get_space(node); |
1825 | mv_space = isl_multi_val_get_space(multi: mv); |
1826 | equal = isl_space_tuple_is_equal(space1: node_space, type1: isl_dim_set, |
1827 | space2: mv_space, type2: isl_dim_set); |
1828 | isl_space_free(space: mv_space); |
1829 | isl_space_free(space: node_space); |
1830 | if (equal < 0) |
1831 | return -1; |
1832 | if (!equal) |
1833 | isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid, |
1834 | "spaces don't match" , return -1); |
1835 | |
1836 | return 0; |
1837 | } |
1838 | |
1839 | /* Multiply the partial schedule of the band node "node" |
1840 | * with the factors in "mv". |
1841 | */ |
1842 | __isl_give isl_schedule_node *isl_schedule_node_band_scale( |
1843 | __isl_take isl_schedule_node *node, __isl_take isl_multi_val *mv) |
1844 | { |
1845 | isl_schedule_tree *tree; |
1846 | int anchored; |
1847 | |
1848 | if (!node || !mv) |
1849 | goto error; |
1850 | if (check_space_multi_val(node, mv) < 0) |
1851 | goto error; |
1852 | anchored = isl_schedule_node_is_subtree_anchored(node); |
1853 | if (anchored < 0) |
1854 | goto error; |
1855 | if (anchored) |
1856 | isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid, |
1857 | "cannot scale band node with anchored subtree" , |
1858 | goto error); |
1859 | |
1860 | tree = isl_schedule_node_get_tree(node); |
1861 | tree = isl_schedule_tree_band_scale(tree, mv); |
1862 | return isl_schedule_node_graft_tree(pos: node, tree); |
1863 | error: |
1864 | isl_multi_val_free(multi: mv); |
1865 | isl_schedule_node_free(node); |
1866 | return NULL; |
1867 | } |
1868 | |
1869 | /* Divide the partial schedule of the band node "node" |
1870 | * by the factors in "mv". |
1871 | */ |
1872 | __isl_give isl_schedule_node *isl_schedule_node_band_scale_down( |
1873 | __isl_take isl_schedule_node *node, __isl_take isl_multi_val *mv) |
1874 | { |
1875 | isl_schedule_tree *tree; |
1876 | int anchored; |
1877 | |
1878 | if (!node || !mv) |
1879 | goto error; |
1880 | if (check_space_multi_val(node, mv) < 0) |
1881 | goto error; |
1882 | anchored = isl_schedule_node_is_subtree_anchored(node); |
1883 | if (anchored < 0) |
1884 | goto error; |
1885 | if (anchored) |
1886 | isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid, |
1887 | "cannot scale down band node with anchored subtree" , |
1888 | goto error); |
1889 | |
1890 | tree = isl_schedule_node_get_tree(node); |
1891 | tree = isl_schedule_tree_band_scale_down(tree, mv); |
1892 | return isl_schedule_node_graft_tree(pos: node, tree); |
1893 | error: |
1894 | isl_multi_val_free(multi: mv); |
1895 | isl_schedule_node_free(node); |
1896 | return NULL; |
1897 | } |
1898 | |
1899 | /* Reduce the partial schedule of the band node "node" |
1900 | * modulo the factors in "mv". |
1901 | */ |
1902 | __isl_give isl_schedule_node *isl_schedule_node_band_mod( |
1903 | __isl_take isl_schedule_node *node, __isl_take isl_multi_val *mv) |
1904 | { |
1905 | isl_schedule_tree *tree; |
1906 | isl_bool anchored; |
1907 | |
1908 | if (!node || !mv) |
1909 | goto error; |
1910 | if (check_space_multi_val(node, mv) < 0) |
1911 | goto error; |
1912 | anchored = isl_schedule_node_is_subtree_anchored(node); |
1913 | if (anchored < 0) |
1914 | goto error; |
1915 | if (anchored) |
1916 | isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid, |
1917 | "cannot perform mod on band node with anchored subtree" , |
1918 | goto error); |
1919 | |
1920 | tree = isl_schedule_node_get_tree(node); |
1921 | tree = isl_schedule_tree_band_mod(tree, mv); |
1922 | return isl_schedule_node_graft_tree(pos: node, tree); |
1923 | error: |
1924 | isl_multi_val_free(multi: mv); |
1925 | isl_schedule_node_free(node); |
1926 | return NULL; |
1927 | } |
1928 | |
1929 | /* Make sure that that spaces of "node" and "mupa" are the same. |
1930 | * Return isl_stat_error on error, reporting the error to the user. |
1931 | */ |
1932 | static isl_stat check_space_multi_union_pw_aff( |
1933 | __isl_keep isl_schedule_node *node, |
1934 | __isl_keep isl_multi_union_pw_aff *mupa) |
1935 | { |
1936 | isl_space *node_space, *mupa_space; |
1937 | isl_bool equal; |
1938 | |
1939 | node_space = isl_schedule_node_band_get_space(node); |
1940 | mupa_space = isl_multi_union_pw_aff_get_space(multi: mupa); |
1941 | equal = isl_space_tuple_is_equal(space1: node_space, type1: isl_dim_set, |
1942 | space2: mupa_space, type2: isl_dim_set); |
1943 | isl_space_free(space: mupa_space); |
1944 | isl_space_free(space: node_space); |
1945 | if (equal < 0) |
1946 | return isl_stat_error; |
1947 | if (!equal) |
1948 | isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid, |
1949 | "spaces don't match" , return isl_stat_error); |
1950 | |
1951 | return isl_stat_ok; |
1952 | } |
1953 | |
1954 | /* Shift the partial schedule of the band node "node" by "shift". |
1955 | */ |
1956 | __isl_give isl_schedule_node *isl_schedule_node_band_shift( |
1957 | __isl_take isl_schedule_node *node, |
1958 | __isl_take isl_multi_union_pw_aff *shift) |
1959 | { |
1960 | isl_schedule_tree *tree; |
1961 | int anchored; |
1962 | |
1963 | if (!node || !shift) |
1964 | goto error; |
1965 | if (check_space_multi_union_pw_aff(node, mupa: shift) < 0) |
1966 | goto error; |
1967 | anchored = isl_schedule_node_is_subtree_anchored(node); |
1968 | if (anchored < 0) |
1969 | goto error; |
1970 | if (anchored) |
1971 | isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid, |
1972 | "cannot shift band node with anchored subtree" , |
1973 | goto error); |
1974 | |
1975 | tree = isl_schedule_node_get_tree(node); |
1976 | tree = isl_schedule_tree_band_shift(tree, shift); |
1977 | return isl_schedule_node_graft_tree(pos: node, tree); |
1978 | error: |
1979 | isl_multi_union_pw_aff_free(multi: shift); |
1980 | isl_schedule_node_free(node); |
1981 | return NULL; |
1982 | } |
1983 | |
1984 | /* Tile "node" with tile sizes "sizes". |
1985 | * |
1986 | * The current node is replaced by two nested nodes corresponding |
1987 | * to the tile dimensions and the point dimensions. |
1988 | * |
1989 | * Return a pointer to the outer (tile) node. |
1990 | * |
1991 | * If any of the descendants of "node" depend on the set of outer band nodes, |
1992 | * then we refuse to tile the node. |
1993 | * |
1994 | * If the scale tile loops option is set, then the tile loops |
1995 | * are scaled by the tile sizes. If the shift point loops option is set, |
1996 | * then the point loops are shifted to start at zero. |
1997 | * In particular, these options affect the tile and point loop schedules |
1998 | * as follows |
1999 | * |
2000 | * scale shift original tile point |
2001 | * |
2002 | * 0 0 i floor(i/s) i |
2003 | * 1 0 i s * floor(i/s) i |
2004 | * 0 1 i floor(i/s) i - s * floor(i/s) |
2005 | * 1 1 i s * floor(i/s) i - s * floor(i/s) |
2006 | */ |
2007 | __isl_give isl_schedule_node *isl_schedule_node_band_tile( |
2008 | __isl_take isl_schedule_node *node, __isl_take isl_multi_val *sizes) |
2009 | { |
2010 | isl_schedule_tree *tree; |
2011 | int anchored; |
2012 | |
2013 | if (!node || !sizes) |
2014 | goto error; |
2015 | anchored = isl_schedule_node_is_subtree_anchored(node); |
2016 | if (anchored < 0) |
2017 | goto error; |
2018 | if (anchored) |
2019 | isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid, |
2020 | "cannot tile band node with anchored subtree" , |
2021 | goto error); |
2022 | |
2023 | if (check_space_multi_val(node, mv: sizes) < 0) |
2024 | goto error; |
2025 | |
2026 | tree = isl_schedule_node_get_tree(node); |
2027 | tree = isl_schedule_tree_band_tile(tree, sizes); |
2028 | return isl_schedule_node_graft_tree(pos: node, tree); |
2029 | error: |
2030 | isl_multi_val_free(multi: sizes); |
2031 | isl_schedule_node_free(node); |
2032 | return NULL; |
2033 | } |
2034 | |
2035 | /* Move the band node "node" down to all the leaves in the subtree |
2036 | * rooted at "node". |
2037 | * Return a pointer to the node in the resulting tree that is in the same |
2038 | * position as the node pointed to by "node" in the original tree. |
2039 | * |
2040 | * If the node only has a leaf child, then nothing needs to be done. |
2041 | * Otherwise, the child of the node is removed and the result is |
2042 | * appended to all the leaves in the subtree rooted at the original child. |
2043 | * Since the node is moved to the leaves, it needs to be expanded |
2044 | * according to the expansion, if any, defined by that subtree. |
2045 | * In the end, the original node is replaced by the result of |
2046 | * attaching copies of the expanded node to the leaves. |
2047 | * |
2048 | * If any of the nodes in the subtree rooted at "node" depend on |
2049 | * the set of outer band nodes then we refuse to sink the band node. |
2050 | */ |
2051 | __isl_give isl_schedule_node *isl_schedule_node_band_sink( |
2052 | __isl_take isl_schedule_node *node) |
2053 | { |
2054 | enum isl_schedule_node_type type; |
2055 | isl_schedule_tree *tree, *child; |
2056 | isl_union_pw_multi_aff *contraction; |
2057 | isl_bool anchored; |
2058 | isl_size n; |
2059 | |
2060 | if (!node) |
2061 | return NULL; |
2062 | |
2063 | type = isl_schedule_node_get_type(node); |
2064 | if (type != isl_schedule_node_band) |
2065 | isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid, |
2066 | "not a band node" , return isl_schedule_node_free(node)); |
2067 | anchored = isl_schedule_node_is_subtree_anchored(node); |
2068 | if (anchored < 0) |
2069 | return isl_schedule_node_free(node); |
2070 | if (anchored) |
2071 | isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid, |
2072 | "cannot sink band node in anchored subtree" , |
2073 | return isl_schedule_node_free(node)); |
2074 | n = isl_schedule_tree_n_children(tree: node->tree); |
2075 | if (n < 0) |
2076 | return isl_schedule_node_free(node); |
2077 | if (n == 0) |
2078 | return node; |
2079 | |
2080 | contraction = isl_schedule_node_get_subtree_contraction(node); |
2081 | |
2082 | tree = isl_schedule_node_get_tree(node); |
2083 | child = isl_schedule_tree_get_child(tree, pos: 0); |
2084 | tree = isl_schedule_tree_reset_children(tree); |
2085 | tree = isl_schedule_tree_pullback_union_pw_multi_aff(tree, upma: contraction); |
2086 | tree = isl_schedule_tree_append_to_leaves(tree1: child, tree2: tree); |
2087 | |
2088 | return isl_schedule_node_graft_tree(pos: node, tree); |
2089 | } |
2090 | |
2091 | /* Split "node" into two nested band nodes, one with the first "pos" |
2092 | * dimensions and one with the remaining dimensions. |
2093 | * The schedules of the two band nodes live in anonymous spaces. |
2094 | * The loop AST generation type options and the isolate option |
2095 | * are split over the two band nodes. |
2096 | */ |
2097 | __isl_give isl_schedule_node *isl_schedule_node_band_split( |
2098 | __isl_take isl_schedule_node *node, int pos) |
2099 | { |
2100 | isl_size depth; |
2101 | isl_schedule_tree *tree; |
2102 | |
2103 | depth = isl_schedule_node_get_schedule_depth(node); |
2104 | if (depth < 0) |
2105 | return isl_schedule_node_free(node); |
2106 | tree = isl_schedule_node_get_tree(node); |
2107 | tree = isl_schedule_tree_band_split(tree, pos, depth); |
2108 | return isl_schedule_node_graft_tree(pos: node, tree); |
2109 | } |
2110 | |
2111 | /* Return the context of the context node "node". |
2112 | */ |
2113 | __isl_give isl_set *isl_schedule_node_context_get_context( |
2114 | __isl_keep isl_schedule_node *node) |
2115 | { |
2116 | if (!node) |
2117 | return NULL; |
2118 | |
2119 | return isl_schedule_tree_context_get_context(tree: node->tree); |
2120 | } |
2121 | |
2122 | /* Return the domain of the domain node "node". |
2123 | */ |
2124 | __isl_give isl_union_set *isl_schedule_node_domain_get_domain( |
2125 | __isl_keep isl_schedule_node *node) |
2126 | { |
2127 | if (!node) |
2128 | return NULL; |
2129 | |
2130 | return isl_schedule_tree_domain_get_domain(tree: node->tree); |
2131 | } |
2132 | |
2133 | /* Return the expansion map of expansion node "node". |
2134 | */ |
2135 | __isl_give isl_union_map *isl_schedule_node_expansion_get_expansion( |
2136 | __isl_keep isl_schedule_node *node) |
2137 | { |
2138 | if (!node) |
2139 | return NULL; |
2140 | |
2141 | return isl_schedule_tree_expansion_get_expansion(tree: node->tree); |
2142 | } |
2143 | |
2144 | /* Return the contraction of expansion node "node". |
2145 | */ |
2146 | __isl_give isl_union_pw_multi_aff *isl_schedule_node_expansion_get_contraction( |
2147 | __isl_keep isl_schedule_node *node) |
2148 | { |
2149 | if (!node) |
2150 | return NULL; |
2151 | |
2152 | return isl_schedule_tree_expansion_get_contraction(tree: node->tree); |
2153 | } |
2154 | |
2155 | /* Replace the contraction and the expansion of the expansion node "node" |
2156 | * by "contraction" and "expansion". |
2157 | */ |
2158 | __isl_give isl_schedule_node * |
2159 | isl_schedule_node_expansion_set_contraction_and_expansion( |
2160 | __isl_take isl_schedule_node *node, |
2161 | __isl_take isl_union_pw_multi_aff *contraction, |
2162 | __isl_take isl_union_map *expansion) |
2163 | { |
2164 | isl_schedule_tree *tree; |
2165 | |
2166 | if (!node || !contraction || !expansion) |
2167 | goto error; |
2168 | |
2169 | tree = isl_schedule_tree_copy(tree: node->tree); |
2170 | tree = isl_schedule_tree_expansion_set_contraction_and_expansion(tree, |
2171 | contraction, expansion); |
2172 | return isl_schedule_node_graft_tree(pos: node, tree); |
2173 | error: |
2174 | isl_schedule_node_free(node); |
2175 | isl_union_pw_multi_aff_free(upma: contraction); |
2176 | isl_union_map_free(umap: expansion); |
2177 | return NULL; |
2178 | } |
2179 | |
2180 | /* Return the extension of the extension node "node". |
2181 | */ |
2182 | __isl_give isl_union_map *isl_schedule_node_extension_get_extension( |
2183 | __isl_keep isl_schedule_node *node) |
2184 | { |
2185 | if (!node) |
2186 | return NULL; |
2187 | |
2188 | return isl_schedule_tree_extension_get_extension(tree: node->tree); |
2189 | } |
2190 | |
2191 | /* Replace the extension of extension node "node" by "extension". |
2192 | */ |
2193 | __isl_give isl_schedule_node *isl_schedule_node_extension_set_extension( |
2194 | __isl_take isl_schedule_node *node, __isl_take isl_union_map *extension) |
2195 | { |
2196 | isl_schedule_tree *tree; |
2197 | |
2198 | if (!node || !extension) |
2199 | goto error; |
2200 | |
2201 | tree = isl_schedule_tree_copy(tree: node->tree); |
2202 | tree = isl_schedule_tree_extension_set_extension(tree, extension); |
2203 | return isl_schedule_node_graft_tree(pos: node, tree); |
2204 | error: |
2205 | isl_schedule_node_free(node); |
2206 | isl_union_map_free(umap: extension); |
2207 | return NULL; |
2208 | } |
2209 | |
2210 | /* Return the filter of the filter node "node". |
2211 | */ |
2212 | __isl_give isl_union_set *isl_schedule_node_filter_get_filter( |
2213 | __isl_keep isl_schedule_node *node) |
2214 | { |
2215 | if (!node) |
2216 | return NULL; |
2217 | |
2218 | return isl_schedule_tree_filter_get_filter(tree: node->tree); |
2219 | } |
2220 | |
2221 | /* Replace the filter of filter node "node" by "filter". |
2222 | */ |
2223 | __isl_give isl_schedule_node *isl_schedule_node_filter_set_filter( |
2224 | __isl_take isl_schedule_node *node, __isl_take isl_union_set *filter) |
2225 | { |
2226 | isl_schedule_tree *tree; |
2227 | |
2228 | if (!node || !filter) |
2229 | goto error; |
2230 | |
2231 | tree = isl_schedule_tree_copy(tree: node->tree); |
2232 | tree = isl_schedule_tree_filter_set_filter(tree, filter); |
2233 | return isl_schedule_node_graft_tree(pos: node, tree); |
2234 | error: |
2235 | isl_schedule_node_free(node); |
2236 | isl_union_set_free(uset: filter); |
2237 | return NULL; |
2238 | } |
2239 | |
2240 | /* Intersect the filter of filter node "node" with "filter". |
2241 | * |
2242 | * If the filter of the node is already a subset of "filter", |
2243 | * then leave the node unchanged. |
2244 | */ |
2245 | __isl_give isl_schedule_node *isl_schedule_node_filter_intersect_filter( |
2246 | __isl_take isl_schedule_node *node, __isl_take isl_union_set *filter) |
2247 | { |
2248 | isl_union_set *node_filter = NULL; |
2249 | isl_bool subset; |
2250 | |
2251 | if (!node || !filter) |
2252 | goto error; |
2253 | |
2254 | node_filter = isl_schedule_node_filter_get_filter(node); |
2255 | subset = isl_union_set_is_subset(uset1: node_filter, uset2: filter); |
2256 | if (subset < 0) |
2257 | goto error; |
2258 | if (subset) { |
2259 | isl_union_set_free(uset: node_filter); |
2260 | isl_union_set_free(uset: filter); |
2261 | return node; |
2262 | } |
2263 | node_filter = isl_union_set_intersect(uset1: node_filter, uset2: filter); |
2264 | node = isl_schedule_node_filter_set_filter(node, filter: node_filter); |
2265 | return node; |
2266 | error: |
2267 | isl_schedule_node_free(node); |
2268 | isl_union_set_free(uset: node_filter); |
2269 | isl_union_set_free(uset: filter); |
2270 | return NULL; |
2271 | } |
2272 | |
2273 | /* Return the guard of the guard node "node". |
2274 | */ |
2275 | __isl_give isl_set *isl_schedule_node_guard_get_guard( |
2276 | __isl_keep isl_schedule_node *node) |
2277 | { |
2278 | if (!node) |
2279 | return NULL; |
2280 | |
2281 | return isl_schedule_tree_guard_get_guard(tree: node->tree); |
2282 | } |
2283 | |
2284 | /* Return the mark identifier of the mark node "node". |
2285 | */ |
2286 | __isl_give isl_id *isl_schedule_node_mark_get_id( |
2287 | __isl_keep isl_schedule_node *node) |
2288 | { |
2289 | if (!node) |
2290 | return NULL; |
2291 | |
2292 | return isl_schedule_tree_mark_get_id(tree: node->tree); |
2293 | } |
2294 | |
2295 | /* Check that "node" is a sequence node. |
2296 | */ |
2297 | static isl_stat check_is_sequence(__isl_keep isl_schedule_node *node) |
2298 | { |
2299 | if (!node) |
2300 | return isl_stat_error; |
2301 | |
2302 | if (isl_schedule_node_get_type(node) != isl_schedule_node_sequence) |
2303 | isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid, |
2304 | "not a sequence node" , return isl_stat_error); |
2305 | |
2306 | return isl_stat_ok; |
2307 | } |
2308 | |
2309 | /* Replace the child at position "pos" of the sequence node "node" |
2310 | * by the children of sequence root node of "tree". |
2311 | */ |
2312 | __isl_give isl_schedule_node *isl_schedule_node_sequence_splice( |
2313 | __isl_take isl_schedule_node *node, int pos, |
2314 | __isl_take isl_schedule_tree *tree) |
2315 | { |
2316 | isl_schedule_tree *node_tree; |
2317 | |
2318 | if (check_is_sequence(node) < 0 || !tree) |
2319 | goto error; |
2320 | if (isl_schedule_tree_get_type(tree) != isl_schedule_node_sequence) |
2321 | isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid, |
2322 | "not a sequence node" , goto error); |
2323 | node_tree = isl_schedule_node_get_tree(node); |
2324 | node_tree = isl_schedule_tree_sequence_splice(tree: node_tree, pos, child: tree); |
2325 | node = isl_schedule_node_graft_tree(pos: node, tree: node_tree); |
2326 | |
2327 | return node; |
2328 | error: |
2329 | isl_schedule_node_free(node); |
2330 | isl_schedule_tree_free(tree); |
2331 | return NULL; |
2332 | } |
2333 | |
2334 | /* Given a sequence node "node", with a child at position "pos" that |
2335 | * is also a sequence node, attach the children of that node directly |
2336 | * as children of "node" at that position, replacing the original child. |
2337 | * |
2338 | * The filters of these children are intersected with the filter |
2339 | * of the child at position "pos". |
2340 | */ |
2341 | __isl_give isl_schedule_node *isl_schedule_node_sequence_splice_child( |
2342 | __isl_take isl_schedule_node *node, int pos) |
2343 | { |
2344 | int i; |
2345 | isl_size n; |
2346 | isl_union_set *filter; |
2347 | isl_schedule_node *child; |
2348 | isl_schedule_tree *tree; |
2349 | |
2350 | if (check_is_sequence(node) < 0) |
2351 | return isl_schedule_node_free(node); |
2352 | node = isl_schedule_node_grandchild(node, pos1: pos, pos2: 0); |
2353 | if (check_is_sequence(node) < 0) |
2354 | return isl_schedule_node_free(node); |
2355 | n = isl_schedule_node_n_children(node); |
2356 | if (n < 0) |
2357 | return isl_schedule_node_free(node); |
2358 | child = isl_schedule_node_copy(node); |
2359 | node = isl_schedule_node_parent(node); |
2360 | filter = isl_schedule_node_filter_get_filter(node); |
2361 | for (i = 0; i < n; ++i) { |
2362 | child = isl_schedule_node_child(node: child, pos: i); |
2363 | child = isl_schedule_node_filter_intersect_filter(node: child, |
2364 | filter: isl_union_set_copy(uset: filter)); |
2365 | child = isl_schedule_node_parent(node: child); |
2366 | } |
2367 | isl_union_set_free(uset: filter); |
2368 | tree = isl_schedule_node_get_tree(node: child); |
2369 | isl_schedule_node_free(node: child); |
2370 | node = isl_schedule_node_parent(node); |
2371 | node = isl_schedule_node_sequence_splice(node, pos, tree); |
2372 | |
2373 | return node; |
2374 | } |
2375 | |
2376 | /* Given a sequence node "node", for each child that is also |
2377 | * (the parent of) a sequence node, attach the children of that node directly |
2378 | * as children of "node" at the position of the child, |
2379 | * replacing this original child. |
2380 | * |
2381 | * Since splicing in a child may change the positions of later children, |
2382 | * iterate through the children from last to first. |
2383 | */ |
2384 | __isl_give isl_schedule_node *isl_schedule_node_sequence_splice_children( |
2385 | __isl_take isl_schedule_node *node) |
2386 | { |
2387 | int i; |
2388 | isl_size n; |
2389 | |
2390 | if (check_is_sequence(node) < 0) |
2391 | return isl_schedule_node_free(node); |
2392 | n = isl_schedule_node_n_children(node); |
2393 | if (n < 0) |
2394 | return isl_schedule_node_free(node); |
2395 | |
2396 | for (i = n - 1; i >= 0; --i) { |
2397 | enum isl_schedule_node_type type; |
2398 | int is_seq; |
2399 | |
2400 | node = isl_schedule_node_grandchild(node, pos1: i, pos2: 0); |
2401 | type = isl_schedule_node_get_type(node); |
2402 | if (type < 0) |
2403 | return isl_schedule_node_free(node); |
2404 | is_seq = type == isl_schedule_node_sequence; |
2405 | node = isl_schedule_node_grandparent(node); |
2406 | |
2407 | if (!is_seq) |
2408 | continue; |
2409 | |
2410 | node = isl_schedule_node_sequence_splice_child(node, pos: i); |
2411 | } |
2412 | |
2413 | return node; |
2414 | } |
2415 | |
2416 | /* Update the ancestors of "node" to point to the tree that "node" |
2417 | * now points to. |
2418 | * That is, replace the child in the original parent that corresponds |
2419 | * to the current tree position by node->tree and continue updating |
2420 | * the ancestors in the same way until the root is reached. |
2421 | * |
2422 | * If "fn" is not NULL, then it is called on each ancestor as we move up |
2423 | * the tree so that it can modify the ancestor before it is added |
2424 | * to the list of ancestors of the modified node. |
2425 | * The additional "pos" argument records the position |
2426 | * of the "tree" argument in the original schedule tree. |
2427 | * |
2428 | * If "node" originally points to a leaf of the schedule tree, then make sure |
2429 | * that in the end it points to a leaf in the updated schedule tree. |
2430 | */ |
2431 | static __isl_give isl_schedule_node *update_ancestors( |
2432 | __isl_take isl_schedule_node *node, |
2433 | __isl_give isl_schedule_tree *(*fn)(__isl_take isl_schedule_tree *tree, |
2434 | __isl_keep isl_schedule_node *pos, void *user), void *user) |
2435 | { |
2436 | int i; |
2437 | isl_size n; |
2438 | int is_leaf; |
2439 | isl_schedule_tree *tree; |
2440 | isl_schedule_node *pos = NULL; |
2441 | |
2442 | if (fn) |
2443 | pos = isl_schedule_node_copy(node); |
2444 | |
2445 | node = isl_schedule_node_cow(node); |
2446 | if (!node) |
2447 | return isl_schedule_node_free(node: pos); |
2448 | |
2449 | n = isl_schedule_tree_list_n_schedule_tree(list: node->ancestors); |
2450 | if (n < 0) |
2451 | return isl_schedule_node_free(node: pos); |
2452 | tree = isl_schedule_tree_copy(tree: node->tree); |
2453 | |
2454 | for (i = n - 1; i >= 0; --i) { |
2455 | isl_schedule_tree *parent; |
2456 | |
2457 | parent = isl_schedule_tree_list_get_schedule_tree( |
2458 | list: node->ancestors, index: i); |
2459 | parent = isl_schedule_tree_replace_child(tree: parent, |
2460 | pos: node->child_pos[i], new_child: tree); |
2461 | if (fn) { |
2462 | pos = isl_schedule_node_parent(node: pos); |
2463 | parent = fn(parent, pos, user); |
2464 | } |
2465 | node->ancestors = isl_schedule_tree_list_set_schedule_tree( |
2466 | list: node->ancestors, index: i, el: isl_schedule_tree_copy(tree: parent)); |
2467 | |
2468 | tree = parent; |
2469 | } |
2470 | |
2471 | if (fn) |
2472 | isl_schedule_node_free(node: pos); |
2473 | |
2474 | is_leaf = isl_schedule_tree_is_leaf(tree: node->tree); |
2475 | node->schedule = isl_schedule_set_root(schedule: node->schedule, tree); |
2476 | if (is_leaf) { |
2477 | isl_schedule_tree_free(tree: node->tree); |
2478 | node->tree = isl_schedule_node_get_leaf(node); |
2479 | } |
2480 | |
2481 | if (!node->schedule || !node->ancestors) |
2482 | return isl_schedule_node_free(node); |
2483 | |
2484 | return node; |
2485 | } |
2486 | |
2487 | /* Replace the subtree that "pos" points to by "tree", updating |
2488 | * the ancestors to maintain a consistent state. |
2489 | */ |
2490 | __isl_give isl_schedule_node *isl_schedule_node_graft_tree( |
2491 | __isl_take isl_schedule_node *pos, __isl_take isl_schedule_tree *tree) |
2492 | { |
2493 | if (!tree || !pos) |
2494 | goto error; |
2495 | if (pos->tree == tree) { |
2496 | isl_schedule_tree_free(tree); |
2497 | return pos; |
2498 | } |
2499 | |
2500 | pos = isl_schedule_node_cow(node: pos); |
2501 | if (!pos) |
2502 | goto error; |
2503 | |
2504 | isl_schedule_tree_free(tree: pos->tree); |
2505 | pos->tree = tree; |
2506 | |
2507 | return update_ancestors(node: pos, NULL, NULL); |
2508 | error: |
2509 | isl_schedule_node_free(node: pos); |
2510 | isl_schedule_tree_free(tree); |
2511 | return NULL; |
2512 | } |
2513 | |
2514 | /* Make sure we can insert a node between "node" and its parent. |
2515 | * Return -1 on error, reporting the reason why we cannot insert a node. |
2516 | */ |
2517 | static int check_insert(__isl_keep isl_schedule_node *node) |
2518 | { |
2519 | int has_parent; |
2520 | enum isl_schedule_node_type type; |
2521 | |
2522 | has_parent = isl_schedule_node_has_parent(node); |
2523 | if (has_parent < 0) |
2524 | return -1; |
2525 | if (!has_parent) |
2526 | isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid, |
2527 | "cannot insert node outside of root" , return -1); |
2528 | |
2529 | type = isl_schedule_node_get_parent_type(node); |
2530 | if (type == isl_schedule_node_error) |
2531 | return -1; |
2532 | if (type == isl_schedule_node_set || type == isl_schedule_node_sequence) |
2533 | isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid, |
2534 | "cannot insert node between set or sequence node " |
2535 | "and its filter children" , return -1); |
2536 | |
2537 | return 0; |
2538 | } |
2539 | |
2540 | /* Insert a band node with partial schedule "mupa" between "node" and |
2541 | * its parent. |
2542 | * Return a pointer to the new band node. |
2543 | * |
2544 | * If any of the nodes in the subtree rooted at "node" depend on |
2545 | * the set of outer band nodes then we refuse to insert the band node. |
2546 | */ |
2547 | __isl_give isl_schedule_node *isl_schedule_node_insert_partial_schedule( |
2548 | __isl_take isl_schedule_node *node, |
2549 | __isl_take isl_multi_union_pw_aff *mupa) |
2550 | { |
2551 | int anchored; |
2552 | isl_schedule_band *band; |
2553 | isl_schedule_tree *tree; |
2554 | |
2555 | if (check_insert(node) < 0) |
2556 | node = isl_schedule_node_free(node); |
2557 | anchored = isl_schedule_node_is_subtree_anchored(node); |
2558 | if (anchored < 0) |
2559 | goto error; |
2560 | if (anchored) |
2561 | isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid, |
2562 | "cannot insert band node in anchored subtree" , |
2563 | goto error); |
2564 | |
2565 | tree = isl_schedule_node_get_tree(node); |
2566 | band = isl_schedule_band_from_multi_union_pw_aff(mupa); |
2567 | tree = isl_schedule_tree_insert_band(tree, band); |
2568 | node = isl_schedule_node_graft_tree(pos: node, tree); |
2569 | |
2570 | return node; |
2571 | error: |
2572 | isl_schedule_node_free(node); |
2573 | isl_multi_union_pw_aff_free(multi: mupa); |
2574 | return NULL; |
2575 | } |
2576 | |
2577 | /* Insert a context node with context "context" between "node" and its parent. |
2578 | * Return a pointer to the new context node. |
2579 | */ |
2580 | __isl_give isl_schedule_node *isl_schedule_node_insert_context( |
2581 | __isl_take isl_schedule_node *node, __isl_take isl_set *context) |
2582 | { |
2583 | isl_schedule_tree *tree; |
2584 | |
2585 | if (check_insert(node) < 0) |
2586 | node = isl_schedule_node_free(node); |
2587 | |
2588 | tree = isl_schedule_node_get_tree(node); |
2589 | tree = isl_schedule_tree_insert_context(tree, context); |
2590 | node = isl_schedule_node_graft_tree(pos: node, tree); |
2591 | |
2592 | return node; |
2593 | } |
2594 | |
2595 | /* Insert an expansion node with the given "contraction" and "expansion" |
2596 | * between "node" and its parent. |
2597 | * Return a pointer to the new expansion node. |
2598 | * |
2599 | * Typically the domain and range spaces of the expansion are different. |
2600 | * This means that only one of them can refer to the current domain space |
2601 | * in a consistent tree. It is up to the caller to ensure that the tree |
2602 | * returns to a consistent state. |
2603 | */ |
2604 | __isl_give isl_schedule_node *isl_schedule_node_insert_expansion( |
2605 | __isl_take isl_schedule_node *node, |
2606 | __isl_take isl_union_pw_multi_aff *contraction, |
2607 | __isl_take isl_union_map *expansion) |
2608 | { |
2609 | isl_schedule_tree *tree; |
2610 | |
2611 | if (check_insert(node) < 0) |
2612 | node = isl_schedule_node_free(node); |
2613 | |
2614 | tree = isl_schedule_node_get_tree(node); |
2615 | tree = isl_schedule_tree_insert_expansion(tree, contraction, expansion); |
2616 | node = isl_schedule_node_graft_tree(pos: node, tree); |
2617 | |
2618 | return node; |
2619 | } |
2620 | |
2621 | /* Insert an extension node with extension "extension" between "node" and |
2622 | * its parent. |
2623 | * Return a pointer to the new extension node. |
2624 | */ |
2625 | __isl_give isl_schedule_node *isl_schedule_node_insert_extension( |
2626 | __isl_take isl_schedule_node *node, |
2627 | __isl_take isl_union_map *extension) |
2628 | { |
2629 | isl_schedule_tree *tree; |
2630 | |
2631 | tree = isl_schedule_node_get_tree(node); |
2632 | tree = isl_schedule_tree_insert_extension(tree, extension); |
2633 | node = isl_schedule_node_graft_tree(pos: node, tree); |
2634 | |
2635 | return node; |
2636 | } |
2637 | |
2638 | /* Insert a filter node with filter "filter" between "node" and its parent. |
2639 | * Return a pointer to the new filter node. |
2640 | */ |
2641 | __isl_give isl_schedule_node *isl_schedule_node_insert_filter( |
2642 | __isl_take isl_schedule_node *node, __isl_take isl_union_set *filter) |
2643 | { |
2644 | isl_schedule_tree *tree; |
2645 | |
2646 | if (check_insert(node) < 0) |
2647 | node = isl_schedule_node_free(node); |
2648 | |
2649 | tree = isl_schedule_node_get_tree(node); |
2650 | tree = isl_schedule_tree_insert_filter(tree, filter); |
2651 | node = isl_schedule_node_graft_tree(pos: node, tree); |
2652 | |
2653 | return node; |
2654 | } |
2655 | |
2656 | /* Insert a guard node with guard "guard" between "node" and its parent. |
2657 | * Return a pointer to the new guard node. |
2658 | */ |
2659 | __isl_give isl_schedule_node *isl_schedule_node_insert_guard( |
2660 | __isl_take isl_schedule_node *node, __isl_take isl_set *guard) |
2661 | { |
2662 | isl_schedule_tree *tree; |
2663 | |
2664 | if (check_insert(node) < 0) |
2665 | node = isl_schedule_node_free(node); |
2666 | |
2667 | tree = isl_schedule_node_get_tree(node); |
2668 | tree = isl_schedule_tree_insert_guard(tree, guard); |
2669 | node = isl_schedule_node_graft_tree(pos: node, tree); |
2670 | |
2671 | return node; |
2672 | } |
2673 | |
2674 | /* Insert a mark node with mark identifier "mark" between "node" and |
2675 | * its parent. |
2676 | * Return a pointer to the new mark node. |
2677 | */ |
2678 | __isl_give isl_schedule_node *isl_schedule_node_insert_mark( |
2679 | __isl_take isl_schedule_node *node, __isl_take isl_id *mark) |
2680 | { |
2681 | isl_schedule_tree *tree; |
2682 | |
2683 | if (check_insert(node) < 0) |
2684 | node = isl_schedule_node_free(node); |
2685 | |
2686 | tree = isl_schedule_node_get_tree(node); |
2687 | tree = isl_schedule_tree_insert_mark(tree, mark); |
2688 | node = isl_schedule_node_graft_tree(pos: node, tree); |
2689 | |
2690 | return node; |
2691 | } |
2692 | |
2693 | /* Attach the current subtree of "node" to a sequence of filter tree nodes |
2694 | * with filters described by "filters", attach this sequence |
2695 | * of filter tree nodes as children to a new tree of type "type" and |
2696 | * replace the original subtree of "node" by this new tree. |
2697 | * Each copy of the original subtree is simplified with respect |
2698 | * to the corresponding filter. |
2699 | */ |
2700 | static __isl_give isl_schedule_node *isl_schedule_node_insert_children( |
2701 | __isl_take isl_schedule_node *node, |
2702 | enum isl_schedule_node_type type, |
2703 | __isl_take isl_union_set_list *filters) |
2704 | { |
2705 | int i; |
2706 | isl_size n; |
2707 | isl_ctx *ctx; |
2708 | isl_schedule_tree *tree; |
2709 | isl_schedule_tree_list *list; |
2710 | |
2711 | if (check_insert(node) < 0) |
2712 | node = isl_schedule_node_free(node); |
2713 | |
2714 | n = isl_union_set_list_n_union_set(list: filters); |
2715 | if (!node || n < 0) |
2716 | goto error; |
2717 | |
2718 | ctx = isl_schedule_node_get_ctx(node); |
2719 | list = isl_schedule_tree_list_alloc(ctx, n); |
2720 | for (i = 0; i < n; ++i) { |
2721 | isl_schedule_node *node_i; |
2722 | isl_schedule_tree *tree; |
2723 | isl_union_set *filter; |
2724 | |
2725 | filter = isl_union_set_list_get_union_set(list: filters, index: i); |
2726 | node_i = isl_schedule_node_copy(node); |
2727 | node_i = isl_schedule_node_gist(node: node_i, |
2728 | context: isl_union_set_copy(uset: filter)); |
2729 | tree = isl_schedule_node_get_tree(node: node_i); |
2730 | isl_schedule_node_free(node: node_i); |
2731 | tree = isl_schedule_tree_insert_filter(tree, filter); |
2732 | list = isl_schedule_tree_list_add(list, el: tree); |
2733 | } |
2734 | tree = isl_schedule_tree_from_children(type, list); |
2735 | node = isl_schedule_node_graft_tree(pos: node, tree); |
2736 | |
2737 | isl_union_set_list_free(list: filters); |
2738 | return node; |
2739 | error: |
2740 | isl_union_set_list_free(list: filters); |
2741 | isl_schedule_node_free(node); |
2742 | return NULL; |
2743 | } |
2744 | |
2745 | /* Insert a sequence node with child filters "filters" between "node" and |
2746 | * its parent. That is, the tree that "node" points to is attached |
2747 | * to each of the child nodes of the filter nodes. |
2748 | * Return a pointer to the new sequence node. |
2749 | */ |
2750 | __isl_give isl_schedule_node *isl_schedule_node_insert_sequence( |
2751 | __isl_take isl_schedule_node *node, |
2752 | __isl_take isl_union_set_list *filters) |
2753 | { |
2754 | return isl_schedule_node_insert_children(node, |
2755 | type: isl_schedule_node_sequence, filters); |
2756 | } |
2757 | |
2758 | /* Insert a set node with child filters "filters" between "node" and |
2759 | * its parent. That is, the tree that "node" points to is attached |
2760 | * to each of the child nodes of the filter nodes. |
2761 | * Return a pointer to the new set node. |
2762 | */ |
2763 | __isl_give isl_schedule_node *isl_schedule_node_insert_set( |
2764 | __isl_take isl_schedule_node *node, |
2765 | __isl_take isl_union_set_list *filters) |
2766 | { |
2767 | return isl_schedule_node_insert_children(node, |
2768 | type: isl_schedule_node_set, filters); |
2769 | } |
2770 | |
2771 | /* Remove "node" from its schedule tree and return a pointer |
2772 | * to the leaf at the same position in the updated schedule tree. |
2773 | * |
2774 | * It is not allowed to remove the root of a schedule tree or |
2775 | * a child of a set or sequence node. |
2776 | */ |
2777 | __isl_give isl_schedule_node *isl_schedule_node_cut( |
2778 | __isl_take isl_schedule_node *node) |
2779 | { |
2780 | isl_schedule_tree *leaf; |
2781 | enum isl_schedule_node_type parent_type; |
2782 | |
2783 | if (!node) |
2784 | return NULL; |
2785 | if (!isl_schedule_node_has_parent(node)) |
2786 | isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid, |
2787 | "cannot cut root" , return isl_schedule_node_free(node)); |
2788 | |
2789 | parent_type = isl_schedule_node_get_parent_type(node); |
2790 | if (parent_type == isl_schedule_node_set || |
2791 | parent_type == isl_schedule_node_sequence) |
2792 | isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid, |
2793 | "cannot cut child of set or sequence" , |
2794 | return isl_schedule_node_free(node)); |
2795 | |
2796 | leaf = isl_schedule_node_get_leaf(node); |
2797 | return isl_schedule_node_graft_tree(pos: node, tree: leaf); |
2798 | } |
2799 | |
2800 | /* Remove a single node from the schedule tree, attaching the child |
2801 | * of "node" directly to its parent. |
2802 | * Return a pointer to this former child or to the leaf the position |
2803 | * of the original node if there was no child. |
2804 | * It is not allowed to remove the root of a schedule tree, |
2805 | * a set or sequence node, a child of a set or sequence node or |
2806 | * a band node with an anchored subtree. |
2807 | */ |
2808 | __isl_give isl_schedule_node *isl_schedule_node_delete( |
2809 | __isl_take isl_schedule_node *node) |
2810 | { |
2811 | isl_size n, depth; |
2812 | isl_schedule_tree *tree; |
2813 | enum isl_schedule_node_type type; |
2814 | |
2815 | depth = isl_schedule_node_get_tree_depth(node); |
2816 | n = isl_schedule_node_n_children(node); |
2817 | if (depth < 0 || n < 0) |
2818 | return isl_schedule_node_free(node); |
2819 | |
2820 | if (depth == 0) |
2821 | isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid, |
2822 | "cannot delete root node" , |
2823 | return isl_schedule_node_free(node)); |
2824 | if (n != 1) |
2825 | isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid, |
2826 | "can only delete node with a single child" , |
2827 | return isl_schedule_node_free(node)); |
2828 | type = isl_schedule_node_get_parent_type(node); |
2829 | if (type == isl_schedule_node_sequence || type == isl_schedule_node_set) |
2830 | isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid, |
2831 | "cannot delete child of set or sequence" , |
2832 | return isl_schedule_node_free(node)); |
2833 | if (isl_schedule_node_get_type(node) == isl_schedule_node_band) { |
2834 | int anchored; |
2835 | |
2836 | anchored = isl_schedule_node_is_subtree_anchored(node); |
2837 | if (anchored < 0) |
2838 | return isl_schedule_node_free(node); |
2839 | if (anchored) |
2840 | isl_die(isl_schedule_node_get_ctx(node), |
2841 | isl_error_invalid, |
2842 | "cannot delete band node with anchored subtree" , |
2843 | return isl_schedule_node_free(node)); |
2844 | } |
2845 | |
2846 | tree = isl_schedule_node_get_tree(node); |
2847 | if (!tree || isl_schedule_tree_has_children(tree)) { |
2848 | tree = isl_schedule_tree_child(tree, pos: 0); |
2849 | } else { |
2850 | isl_schedule_tree_free(tree); |
2851 | tree = isl_schedule_node_get_leaf(node); |
2852 | } |
2853 | node = isl_schedule_node_graft_tree(pos: node, tree); |
2854 | |
2855 | return node; |
2856 | } |
2857 | |
2858 | /* Internal data structure for the group_ancestor callback. |
2859 | * |
2860 | * If "finished" is set, then we no longer need to modify |
2861 | * any further ancestors. |
2862 | * |
2863 | * "contraction" and "expansion" represent the expansion |
2864 | * that reflects the grouping. |
2865 | * |
2866 | * "domain" contains the domain elements that reach the position |
2867 | * where the grouping is performed. That is, it is the range |
2868 | * of the resulting expansion. |
2869 | * "domain_universe" is the universe of "domain". |
2870 | * "group" is the set of group elements, i.e., the domain |
2871 | * of the resulting expansion. |
2872 | * "group_universe" is the universe of "group". |
2873 | * |
2874 | * "sched" is the schedule for the group elements, in pratice |
2875 | * an identity mapping on "group_universe". |
2876 | * "dim" is the dimension of "sched". |
2877 | */ |
2878 | struct isl_schedule_group_data { |
2879 | int finished; |
2880 | |
2881 | isl_union_map *expansion; |
2882 | isl_union_pw_multi_aff *contraction; |
2883 | |
2884 | isl_union_set *domain; |
2885 | isl_union_set *domain_universe; |
2886 | isl_union_set *group; |
2887 | isl_union_set *group_universe; |
2888 | |
2889 | int dim; |
2890 | isl_multi_aff *sched; |
2891 | }; |
2892 | |
2893 | /* Is domain covered by data->domain within data->domain_universe? |
2894 | */ |
2895 | static isl_bool locally_covered_by_domain(__isl_keep isl_union_set *domain, |
2896 | struct isl_schedule_group_data *data) |
2897 | { |
2898 | isl_bool is_subset; |
2899 | isl_union_set *test; |
2900 | |
2901 | test = isl_union_set_copy(uset: domain); |
2902 | test = isl_union_set_intersect(uset1: test, |
2903 | uset2: isl_union_set_copy(uset: data->domain_universe)); |
2904 | is_subset = isl_union_set_is_subset(uset1: test, uset2: data->domain); |
2905 | isl_union_set_free(uset: test); |
2906 | |
2907 | return is_subset; |
2908 | } |
2909 | |
2910 | /* Update the band tree root "tree" to refer to the group instances |
2911 | * in data->group rather than the original domain elements in data->domain. |
2912 | * "pos" is the position in the original schedule tree where the modified |
2913 | * "tree" will be attached. |
2914 | * |
2915 | * Add the part of the identity schedule on the group instances data->sched |
2916 | * that corresponds to this band node to the band schedule. |
2917 | * If the domain elements that reach the node and that are part |
2918 | * of data->domain_universe are all elements of data->domain (and therefore |
2919 | * replaced by the group instances) then this data->domain_universe |
2920 | * is removed from the domain of the band schedule. |
2921 | */ |
2922 | static __isl_give isl_schedule_tree *group_band( |
2923 | __isl_take isl_schedule_tree *tree, __isl_keep isl_schedule_node *pos, |
2924 | struct isl_schedule_group_data *data) |
2925 | { |
2926 | isl_union_set *domain; |
2927 | isl_multi_aff *ma; |
2928 | isl_multi_union_pw_aff *mupa, *partial; |
2929 | isl_bool is_covered; |
2930 | isl_size depth, n; |
2931 | isl_bool has_id; |
2932 | |
2933 | domain = isl_schedule_node_get_domain(node: pos); |
2934 | is_covered = locally_covered_by_domain(domain, data); |
2935 | if (is_covered >= 0 && is_covered) { |
2936 | domain = isl_union_set_universe(uset: domain); |
2937 | domain = isl_union_set_subtract(uset1: domain, |
2938 | uset2: isl_union_set_copy(uset: data->domain_universe)); |
2939 | tree = isl_schedule_tree_band_intersect_domain(tree, domain); |
2940 | } else |
2941 | isl_union_set_free(uset: domain); |
2942 | if (is_covered < 0) |
2943 | return isl_schedule_tree_free(tree); |
2944 | depth = isl_schedule_node_get_schedule_depth(node: pos); |
2945 | n = isl_schedule_tree_band_n_member(tree); |
2946 | if (depth < 0 || n < 0) |
2947 | return isl_schedule_tree_free(tree); |
2948 | ma = isl_multi_aff_copy(multi: data->sched); |
2949 | ma = isl_multi_aff_drop_dims(multi: ma, type: isl_dim_out, first: 0, n: depth); |
2950 | ma = isl_multi_aff_drop_dims(multi: ma, type: isl_dim_out, first: n, n: data->dim - depth - n); |
2951 | mupa = isl_multi_union_pw_aff_from_multi_aff(ma); |
2952 | partial = isl_schedule_tree_band_get_partial_schedule(tree); |
2953 | has_id = isl_multi_union_pw_aff_has_tuple_id(multi: partial, type: isl_dim_set); |
2954 | if (has_id < 0) { |
2955 | partial = isl_multi_union_pw_aff_free(multi: partial); |
2956 | } else if (has_id) { |
2957 | isl_id *id; |
2958 | id = isl_multi_union_pw_aff_get_tuple_id(multi: partial, type: isl_dim_set); |
2959 | mupa = isl_multi_union_pw_aff_set_tuple_id(multi: mupa, |
2960 | type: isl_dim_set, id); |
2961 | } |
2962 | partial = isl_multi_union_pw_aff_union_add(mupa1: partial, mupa2: mupa); |
2963 | tree = isl_schedule_tree_band_set_partial_schedule(tree, schedule: partial); |
2964 | |
2965 | return tree; |
2966 | } |
2967 | |
2968 | /* Drop the parameters in "uset" that are not also in "space". |
2969 | * "n" is the number of parameters in "space". |
2970 | */ |
2971 | static __isl_give isl_union_set *( |
2972 | __isl_take isl_union_set *uset, __isl_keep isl_space *space, int n) |
2973 | { |
2974 | isl_size n2; |
2975 | |
2976 | uset = isl_union_set_align_params(uset, model: isl_space_copy(space)); |
2977 | n2 = isl_union_set_dim(uset, type: isl_dim_param); |
2978 | if (n2 < 0) |
2979 | return isl_union_set_free(uset); |
2980 | uset = isl_union_set_project_out(uset, type: isl_dim_param, first: n, n: n2 - n); |
2981 | |
2982 | return uset; |
2983 | } |
2984 | |
2985 | /* Update the context tree root "tree" to refer to the group instances |
2986 | * in data->group rather than the original domain elements in data->domain. |
2987 | * "pos" is the position in the original schedule tree where the modified |
2988 | * "tree" will be attached. |
2989 | * |
2990 | * We do not actually need to update "tree" since a context node only |
2991 | * refers to the schedule space. However, we may need to update "data" |
2992 | * to not refer to any parameters introduced by the context node. |
2993 | */ |
2994 | static __isl_give isl_schedule_tree *group_context( |
2995 | __isl_take isl_schedule_tree *tree, __isl_keep isl_schedule_node *pos, |
2996 | struct isl_schedule_group_data *data) |
2997 | { |
2998 | isl_space *space; |
2999 | isl_union_set *domain; |
3000 | isl_size n1, n2; |
3001 | isl_bool involves; |
3002 | isl_size depth; |
3003 | |
3004 | depth = isl_schedule_node_get_tree_depth(node: pos); |
3005 | if (depth < 0) |
3006 | return isl_schedule_tree_free(tree); |
3007 | if (depth == 1) |
3008 | return tree; |
3009 | |
3010 | domain = isl_schedule_node_get_universe_domain(node: pos); |
3011 | space = isl_union_set_get_space(uset: domain); |
3012 | isl_union_set_free(uset: domain); |
3013 | |
3014 | n1 = isl_space_dim(space, type: isl_dim_param); |
3015 | data->expansion = isl_union_map_align_params(umap: data->expansion, model: space); |
3016 | n2 = isl_union_map_dim(umap: data->expansion, type: isl_dim_param); |
3017 | |
3018 | if (n1 < 0 || n2 < 0) |
3019 | return isl_schedule_tree_free(tree); |
3020 | if (n1 == n2) |
3021 | return tree; |
3022 | |
3023 | involves = isl_union_map_involves_dims(umap: data->expansion, |
3024 | type: isl_dim_param, first: n1, n: n2 - n1); |
3025 | if (involves < 0) |
3026 | return isl_schedule_tree_free(tree); |
3027 | if (involves) |
3028 | isl_die(isl_schedule_node_get_ctx(pos), isl_error_invalid, |
3029 | "grouping cannot only refer to global parameters" , |
3030 | return isl_schedule_tree_free(tree)); |
3031 | |
3032 | data->expansion = isl_union_map_project_out(umap: data->expansion, |
3033 | type: isl_dim_param, first: n1, n: n2 - n1); |
3034 | space = isl_union_map_get_space(umap: data->expansion); |
3035 | |
3036 | data->contraction = isl_union_pw_multi_aff_align_params( |
3037 | upma: data->contraction, model: isl_space_copy(space)); |
3038 | n2 = isl_union_pw_multi_aff_dim(upma: data->contraction, type: isl_dim_param); |
3039 | if (n2 < 0) |
3040 | data->contraction = |
3041 | isl_union_pw_multi_aff_free(upma: data->contraction); |
3042 | data->contraction = isl_union_pw_multi_aff_drop_dims(upma: data->contraction, |
3043 | type: isl_dim_param, first: n1, n: n2 - n1); |
3044 | |
3045 | data->domain = union_set_drop_extra_params(uset: data->domain, space, n: n1); |
3046 | data->domain_universe = |
3047 | union_set_drop_extra_params(uset: data->domain_universe, space, n: n1); |
3048 | data->group = union_set_drop_extra_params(uset: data->group, space, n: n1); |
3049 | data->group_universe = |
3050 | union_set_drop_extra_params(uset: data->group_universe, space, n: n1); |
3051 | |
3052 | data->sched = isl_multi_aff_align_params(multi: data->sched, |
3053 | model: isl_space_copy(space)); |
3054 | n2 = isl_multi_aff_dim(multi: data->sched, type: isl_dim_param); |
3055 | if (n2 < 0) |
3056 | data->sched = isl_multi_aff_free(multi: data->sched); |
3057 | data->sched = isl_multi_aff_drop_dims(multi: data->sched, |
3058 | type: isl_dim_param, first: n1, n: n2 - n1); |
3059 | |
3060 | isl_space_free(space); |
3061 | |
3062 | return tree; |
3063 | } |
3064 | |
3065 | /* Update the domain tree root "tree" to refer to the group instances |
3066 | * in data->group rather than the original domain elements in data->domain. |
3067 | * "pos" is the position in the original schedule tree where the modified |
3068 | * "tree" will be attached. |
3069 | * |
3070 | * We first double-check that all grouped domain elements are actually |
3071 | * part of the root domain and then replace those elements by the group |
3072 | * instances. |
3073 | */ |
3074 | static __isl_give isl_schedule_tree *group_domain( |
3075 | __isl_take isl_schedule_tree *tree, __isl_keep isl_schedule_node *pos, |
3076 | struct isl_schedule_group_data *data) |
3077 | { |
3078 | isl_union_set *domain; |
3079 | isl_bool is_subset; |
3080 | |
3081 | domain = isl_schedule_tree_domain_get_domain(tree); |
3082 | is_subset = isl_union_set_is_subset(uset1: data->domain, uset2: domain); |
3083 | isl_union_set_free(uset: domain); |
3084 | if (is_subset < 0) |
3085 | return isl_schedule_tree_free(tree); |
3086 | if (!is_subset) |
3087 | isl_die(isl_schedule_tree_get_ctx(tree), isl_error_internal, |
3088 | "grouped domain should be part of outer domain" , |
3089 | return isl_schedule_tree_free(tree)); |
3090 | domain = isl_schedule_tree_domain_get_domain(tree); |
3091 | domain = isl_union_set_subtract(uset1: domain, |
3092 | uset2: isl_union_set_copy(uset: data->domain)); |
3093 | domain = isl_union_set_union(uset1: domain, uset2: isl_union_set_copy(uset: data->group)); |
3094 | tree = isl_schedule_tree_domain_set_domain(tree, domain); |
3095 | |
3096 | return tree; |
3097 | } |
3098 | |
3099 | /* Update the expansion tree root "tree" to refer to the group instances |
3100 | * in data->group rather than the original domain elements in data->domain. |
3101 | * "pos" is the position in the original schedule tree where the modified |
3102 | * "tree" will be attached. |
3103 | * |
3104 | * Let G_1 -> D_1 be the expansion of "tree" and G_2 -> D_2 the newly |
3105 | * introduced expansion in a descendant of "tree". |
3106 | * We first double-check that D_2 is a subset of D_1. |
3107 | * Then we remove D_2 from the range of G_1 -> D_1 and add the mapping |
3108 | * G_1 -> D_1 . D_2 -> G_2. |
3109 | * Simmilarly, we restrict the domain of the contraction to the universe |
3110 | * of the range of the updated expansion and add G_2 -> D_2 . D_1 -> G_1, |
3111 | * attempting to remove the domain constraints of this additional part. |
3112 | */ |
3113 | static __isl_give isl_schedule_tree *group_expansion( |
3114 | __isl_take isl_schedule_tree *tree, __isl_keep isl_schedule_node *pos, |
3115 | struct isl_schedule_group_data *data) |
3116 | { |
3117 | isl_union_set *domain; |
3118 | isl_union_map *expansion, *umap; |
3119 | isl_union_pw_multi_aff *contraction, *upma; |
3120 | int is_subset; |
3121 | |
3122 | expansion = isl_schedule_tree_expansion_get_expansion(tree); |
3123 | domain = isl_union_map_range(umap: expansion); |
3124 | is_subset = isl_union_set_is_subset(uset1: data->domain, uset2: domain); |
3125 | isl_union_set_free(uset: domain); |
3126 | if (is_subset < 0) |
3127 | return isl_schedule_tree_free(tree); |
3128 | if (!is_subset) |
3129 | isl_die(isl_schedule_tree_get_ctx(tree), isl_error_internal, |
3130 | "grouped domain should be part " |
3131 | "of outer expansion domain" , |
3132 | return isl_schedule_tree_free(tree)); |
3133 | expansion = isl_schedule_tree_expansion_get_expansion(tree); |
3134 | umap = isl_union_map_from_union_pw_multi_aff( |
3135 | upma: isl_union_pw_multi_aff_copy(upma: data->contraction)); |
3136 | umap = isl_union_map_apply_range(umap1: expansion, umap2: umap); |
3137 | expansion = isl_schedule_tree_expansion_get_expansion(tree); |
3138 | expansion = isl_union_map_subtract_range(umap: expansion, |
3139 | dom: isl_union_set_copy(uset: data->domain)); |
3140 | expansion = isl_union_map_union(umap1: expansion, umap2: umap); |
3141 | umap = isl_union_map_universe(umap: isl_union_map_copy(umap: expansion)); |
3142 | domain = isl_union_map_range(umap); |
3143 | contraction = isl_schedule_tree_expansion_get_contraction(tree); |
3144 | umap = isl_union_map_from_union_pw_multi_aff(upma: contraction); |
3145 | umap = isl_union_map_apply_range(umap1: isl_union_map_copy(umap: data->expansion), |
3146 | umap2: umap); |
3147 | upma = isl_union_pw_multi_aff_from_union_map(umap); |
3148 | contraction = isl_schedule_tree_expansion_get_contraction(tree); |
3149 | contraction = isl_union_pw_multi_aff_intersect_domain(upma: contraction, |
3150 | uset: domain); |
3151 | domain = isl_union_pw_multi_aff_domain( |
3152 | upma: isl_union_pw_multi_aff_copy(upma)); |
3153 | upma = isl_union_pw_multi_aff_gist(upma, context: domain); |
3154 | contraction = isl_union_pw_multi_aff_union_add(upma1: contraction, upma2: upma); |
3155 | tree = isl_schedule_tree_expansion_set_contraction_and_expansion(tree, |
3156 | contraction, expansion); |
3157 | |
3158 | return tree; |
3159 | } |
3160 | |
3161 | /* Update the tree root "tree" to refer to the group instances |
3162 | * in data->group rather than the original domain elements in data->domain. |
3163 | * "pos" is the position in the original schedule tree where the modified |
3164 | * "tree" will be attached. |
3165 | * |
3166 | * If we have come across a domain or expansion node before (data->finished |
3167 | * is set), then we no longer need perform any modifications. |
3168 | * |
3169 | * If "tree" is a filter, then we add data->group_universe to the filter. |
3170 | * We also remove data->domain_universe from the filter if all the domain |
3171 | * elements in this universe that reach the filter node are part of |
3172 | * the elements that are being grouped by data->expansion. |
3173 | * If "tree" is a band, domain or expansion, then it is handled |
3174 | * in a separate function. |
3175 | */ |
3176 | static __isl_give isl_schedule_tree *group_ancestor( |
3177 | __isl_take isl_schedule_tree *tree, __isl_keep isl_schedule_node *pos, |
3178 | void *user) |
3179 | { |
3180 | struct isl_schedule_group_data *data = user; |
3181 | isl_union_set *domain; |
3182 | isl_bool is_covered; |
3183 | |
3184 | if (!tree || !pos) |
3185 | return isl_schedule_tree_free(tree); |
3186 | |
3187 | if (data->finished) |
3188 | return tree; |
3189 | |
3190 | switch (isl_schedule_tree_get_type(tree)) { |
3191 | case isl_schedule_node_error: |
3192 | return isl_schedule_tree_free(tree); |
3193 | case isl_schedule_node_extension: |
3194 | isl_die(isl_schedule_tree_get_ctx(tree), isl_error_unsupported, |
3195 | "grouping not allowed in extended tree" , |
3196 | return isl_schedule_tree_free(tree)); |
3197 | case isl_schedule_node_band: |
3198 | tree = group_band(tree, pos, data); |
3199 | break; |
3200 | case isl_schedule_node_context: |
3201 | tree = group_context(tree, pos, data); |
3202 | break; |
3203 | case isl_schedule_node_domain: |
3204 | tree = group_domain(tree, pos, data); |
3205 | data->finished = 1; |
3206 | break; |
3207 | case isl_schedule_node_filter: |
3208 | domain = isl_schedule_node_get_domain(node: pos); |
3209 | is_covered = locally_covered_by_domain(domain, data); |
3210 | isl_union_set_free(uset: domain); |
3211 | if (is_covered < 0) |
3212 | return isl_schedule_tree_free(tree); |
3213 | domain = isl_schedule_tree_filter_get_filter(tree); |
3214 | if (is_covered) |
3215 | domain = isl_union_set_subtract(uset1: domain, |
3216 | uset2: isl_union_set_copy(uset: data->domain_universe)); |
3217 | domain = isl_union_set_union(uset1: domain, |
3218 | uset2: isl_union_set_copy(uset: data->group_universe)); |
3219 | tree = isl_schedule_tree_filter_set_filter(tree, filter: domain); |
3220 | break; |
3221 | case isl_schedule_node_expansion: |
3222 | tree = group_expansion(tree, pos, data); |
3223 | data->finished = 1; |
3224 | break; |
3225 | case isl_schedule_node_leaf: |
3226 | case isl_schedule_node_guard: |
3227 | case isl_schedule_node_mark: |
3228 | case isl_schedule_node_sequence: |
3229 | case isl_schedule_node_set: |
3230 | break; |
3231 | } |
3232 | |
3233 | return tree; |
3234 | } |
3235 | |
3236 | /* Group the domain elements that reach "node" into instances |
3237 | * of a single statement with identifier "group_id". |
3238 | * In particular, group the domain elements according to their |
3239 | * prefix schedule. |
3240 | * |
3241 | * That is, introduce an expansion node with as contraction |
3242 | * the prefix schedule (with the target space replaced by "group_id") |
3243 | * and as expansion the inverse of this contraction (with its range |
3244 | * intersected with the domain elements that reach "node"). |
3245 | * The outer nodes are then modified to refer to the group instances |
3246 | * instead of the original domain elements. |
3247 | * |
3248 | * No instance of "group_id" is allowed to reach "node" prior |
3249 | * to the grouping. |
3250 | * No ancestor of "node" is allowed to be an extension node. |
3251 | * |
3252 | * Return a pointer to original node in tree, i.e., the child |
3253 | * of the newly introduced expansion node. |
3254 | */ |
3255 | __isl_give isl_schedule_node *isl_schedule_node_group( |
3256 | __isl_take isl_schedule_node *node, __isl_take isl_id *group_id) |
3257 | { |
3258 | struct isl_schedule_group_data data = { 0 }; |
3259 | isl_space *space; |
3260 | isl_union_set *domain; |
3261 | isl_union_pw_multi_aff *contraction; |
3262 | isl_union_map *expansion; |
3263 | isl_bool disjoint; |
3264 | isl_size depth; |
3265 | |
3266 | depth = isl_schedule_node_get_schedule_depth(node); |
3267 | if (depth < 0 || !group_id) |
3268 | goto error; |
3269 | if (check_insert(node) < 0) |
3270 | goto error; |
3271 | |
3272 | domain = isl_schedule_node_get_domain(node); |
3273 | data.domain = isl_union_set_copy(uset: domain); |
3274 | data.domain_universe = isl_union_set_copy(uset: domain); |
3275 | data.domain_universe = isl_union_set_universe(uset: data.domain_universe); |
3276 | |
3277 | data.dim = depth; |
3278 | if (data.dim == 0) { |
3279 | isl_ctx *ctx; |
3280 | isl_set *set; |
3281 | isl_union_set *group; |
3282 | isl_union_map *univ; |
3283 | |
3284 | ctx = isl_schedule_node_get_ctx(node); |
3285 | space = isl_space_set_alloc(ctx, nparam: 0, dim: 0); |
3286 | space = isl_space_set_tuple_id(space, type: isl_dim_set, id: group_id); |
3287 | set = isl_set_universe(space: isl_space_copy(space)); |
3288 | group = isl_union_set_from_set(set); |
3289 | expansion = isl_union_map_from_domain_and_range(domain, range: group); |
3290 | univ = isl_union_map_universe(umap: isl_union_map_copy(umap: expansion)); |
3291 | contraction = isl_union_pw_multi_aff_from_union_map(umap: univ); |
3292 | expansion = isl_union_map_reverse(umap: expansion); |
3293 | } else { |
3294 | isl_multi_union_pw_aff *prefix; |
3295 | isl_union_set *univ; |
3296 | |
3297 | prefix = |
3298 | isl_schedule_node_get_prefix_schedule_multi_union_pw_aff(node); |
3299 | prefix = isl_multi_union_pw_aff_set_tuple_id(multi: prefix, |
3300 | type: isl_dim_set, id: group_id); |
3301 | space = isl_multi_union_pw_aff_get_space(multi: prefix); |
3302 | contraction = isl_union_pw_multi_aff_from_multi_union_pw_aff( |
3303 | mupa: prefix); |
3304 | univ = isl_union_set_universe(uset: isl_union_set_copy(uset: domain)); |
3305 | contraction = |
3306 | isl_union_pw_multi_aff_intersect_domain(upma: contraction, uset: univ); |
3307 | expansion = isl_union_map_from_union_pw_multi_aff( |
3308 | upma: isl_union_pw_multi_aff_copy(upma: contraction)); |
3309 | expansion = isl_union_map_reverse(umap: expansion); |
3310 | expansion = isl_union_map_intersect_range(umap: expansion, uset: domain); |
3311 | } |
3312 | space = isl_space_map_from_set(space); |
3313 | data.sched = isl_multi_aff_identity(space); |
3314 | data.group = isl_union_map_domain(umap: isl_union_map_copy(umap: expansion)); |
3315 | data.group = isl_union_set_coalesce(uset: data.group); |
3316 | data.group_universe = isl_union_set_copy(uset: data.group); |
3317 | data.group_universe = isl_union_set_universe(uset: data.group_universe); |
3318 | data.expansion = isl_union_map_copy(umap: expansion); |
3319 | data.contraction = isl_union_pw_multi_aff_copy(upma: contraction); |
3320 | node = isl_schedule_node_insert_expansion(node, contraction, expansion); |
3321 | |
3322 | disjoint = isl_union_set_is_disjoint(uset1: data.domain_universe, |
3323 | uset2: data.group_universe); |
3324 | |
3325 | node = update_ancestors(node, fn: &group_ancestor, user: &data); |
3326 | |
3327 | isl_union_set_free(uset: data.domain); |
3328 | isl_union_set_free(uset: data.domain_universe); |
3329 | isl_union_set_free(uset: data.group); |
3330 | isl_union_set_free(uset: data.group_universe); |
3331 | isl_multi_aff_free(multi: data.sched); |
3332 | isl_union_map_free(umap: data.expansion); |
3333 | isl_union_pw_multi_aff_free(upma: data.contraction); |
3334 | |
3335 | node = isl_schedule_node_child(node, pos: 0); |
3336 | |
3337 | if (!node || disjoint < 0) |
3338 | return isl_schedule_node_free(node); |
3339 | if (!disjoint) |
3340 | isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid, |
3341 | "group instances already reach node" , |
3342 | return isl_schedule_node_free(node)); |
3343 | |
3344 | return node; |
3345 | error: |
3346 | isl_schedule_node_free(node); |
3347 | isl_id_free(id: group_id); |
3348 | return NULL; |
3349 | } |
3350 | |
3351 | /* Compute the gist of the given band node with respect to "context". |
3352 | */ |
3353 | __isl_give isl_schedule_node *isl_schedule_node_band_gist( |
3354 | __isl_take isl_schedule_node *node, __isl_take isl_union_set *context) |
3355 | { |
3356 | isl_schedule_tree *tree; |
3357 | |
3358 | tree = isl_schedule_node_get_tree(node); |
3359 | tree = isl_schedule_tree_band_gist(tree, context); |
3360 | return isl_schedule_node_graft_tree(pos: node, tree); |
3361 | } |
3362 | |
3363 | /* Internal data structure for isl_schedule_node_gist. |
3364 | * "n_expansion" is the number of outer expansion nodes |
3365 | * with respect to the current position |
3366 | * "filters" contains an element for each outer filter, expansion or |
3367 | * extension node with respect to the current position, each representing |
3368 | * the intersection of the previous element and the filter on the filter node |
3369 | * or the expansion/extension of the previous element. |
3370 | * The first element in the original context passed to isl_schedule_node_gist. |
3371 | */ |
3372 | struct isl_node_gist_data { |
3373 | int n_expansion; |
3374 | isl_union_set_list *filters; |
3375 | }; |
3376 | |
3377 | /* Enter the expansion node "node" during a isl_schedule_node_gist traversal. |
3378 | * |
3379 | * In particular, add an extra element to data->filters containing |
3380 | * the expansion of the previous element and replace the expansion |
3381 | * and contraction on "node" by the gist with respect to these filters. |
3382 | * Also keep track of the fact that we have entered another expansion. |
3383 | */ |
3384 | static __isl_give isl_schedule_node *gist_enter_expansion( |
3385 | __isl_take isl_schedule_node *node, struct isl_node_gist_data *data) |
3386 | { |
3387 | isl_size n; |
3388 | isl_union_set *inner; |
3389 | isl_union_map *expansion; |
3390 | isl_union_pw_multi_aff *contraction; |
3391 | |
3392 | data->n_expansion++; |
3393 | |
3394 | n = isl_union_set_list_n_union_set(list: data->filters); |
3395 | if (n < 0) |
3396 | return isl_schedule_node_free(node); |
3397 | inner = isl_union_set_list_get_union_set(list: data->filters, index: n - 1); |
3398 | expansion = isl_schedule_node_expansion_get_expansion(node); |
3399 | inner = isl_union_set_apply(uset: inner, umap: expansion); |
3400 | |
3401 | contraction = isl_schedule_node_expansion_get_contraction(node); |
3402 | contraction = isl_union_pw_multi_aff_gist(upma: contraction, |
3403 | context: isl_union_set_copy(uset: inner)); |
3404 | |
3405 | data->filters = isl_union_set_list_add(list: data->filters, el: inner); |
3406 | |
3407 | inner = isl_union_set_list_get_union_set(list: data->filters, index: n - 1); |
3408 | expansion = isl_schedule_node_expansion_get_expansion(node); |
3409 | expansion = isl_union_map_gist_domain(umap: expansion, uset: inner); |
3410 | node = isl_schedule_node_expansion_set_contraction_and_expansion(node, |
3411 | contraction, expansion); |
3412 | |
3413 | return node; |
3414 | } |
3415 | |
3416 | /* Leave the expansion node "node" during a isl_schedule_node_gist traversal. |
3417 | * |
3418 | * In particular, remove the element in data->filters that was added by |
3419 | * gist_enter_expansion and decrement the number of outer expansions. |
3420 | * |
3421 | * The expansion has already been simplified in gist_enter_expansion. |
3422 | * If this simplification results in an identity expansion, then |
3423 | * it is removed here. |
3424 | */ |
3425 | static __isl_give isl_schedule_node *gist_leave_expansion( |
3426 | __isl_take isl_schedule_node *node, struct isl_node_gist_data *data) |
3427 | { |
3428 | isl_size n; |
3429 | isl_bool identity; |
3430 | isl_union_map *expansion; |
3431 | |
3432 | expansion = isl_schedule_node_expansion_get_expansion(node); |
3433 | identity = isl_union_map_is_identity(umap: expansion); |
3434 | isl_union_map_free(umap: expansion); |
3435 | |
3436 | if (identity < 0) |
3437 | node = isl_schedule_node_free(node); |
3438 | else if (identity) |
3439 | node = isl_schedule_node_delete(node); |
3440 | |
3441 | n = isl_union_set_list_n_union_set(list: data->filters); |
3442 | if (n < 0) |
3443 | return isl_schedule_node_free(node); |
3444 | data->filters = isl_union_set_list_drop(list: data->filters, first: n - 1, n: 1); |
3445 | |
3446 | data->n_expansion--; |
3447 | |
3448 | return node; |
3449 | } |
3450 | |
3451 | /* Enter the extension node "node" during a isl_schedule_node_gist traversal. |
3452 | * |
3453 | * In particular, add an extra element to data->filters containing |
3454 | * the union of the previous element with the additional domain elements |
3455 | * introduced by the extension. |
3456 | */ |
3457 | static __isl_give isl_schedule_node *gist_enter_extension( |
3458 | __isl_take isl_schedule_node *node, struct isl_node_gist_data *data) |
3459 | { |
3460 | isl_size n; |
3461 | isl_union_set *inner, *; |
3462 | isl_union_map *extension; |
3463 | |
3464 | n = isl_union_set_list_n_union_set(list: data->filters); |
3465 | if (n < 0) |
3466 | return isl_schedule_node_free(node); |
3467 | inner = isl_union_set_list_get_union_set(list: data->filters, index: n - 1); |
3468 | extension = isl_schedule_node_extension_get_extension(node); |
3469 | extra = isl_union_map_range(umap: extension); |
3470 | inner = isl_union_set_union(uset1: inner, uset2: extra); |
3471 | |
3472 | data->filters = isl_union_set_list_add(list: data->filters, el: inner); |
3473 | |
3474 | return node; |
3475 | } |
3476 | |
3477 | /* Can we finish gisting at this node? |
3478 | * That is, is the filter on the current filter node a subset of |
3479 | * the original context passed to isl_schedule_node_gist? |
3480 | * If we have gone through any expansions, then we cannot perform |
3481 | * this test since the current domain elements are incomparable |
3482 | * to the domain elements in the original context. |
3483 | */ |
3484 | static isl_bool gist_done(__isl_keep isl_schedule_node *node, |
3485 | struct isl_node_gist_data *data) |
3486 | { |
3487 | isl_union_set *filter, *outer; |
3488 | isl_bool subset; |
3489 | |
3490 | if (data->n_expansion != 0) |
3491 | return isl_bool_false; |
3492 | |
3493 | filter = isl_schedule_node_filter_get_filter(node); |
3494 | outer = isl_union_set_list_get_union_set(list: data->filters, index: 0); |
3495 | subset = isl_union_set_is_subset(uset1: filter, uset2: outer); |
3496 | isl_union_set_free(uset: outer); |
3497 | isl_union_set_free(uset: filter); |
3498 | |
3499 | return subset; |
3500 | } |
3501 | |
3502 | /* Callback for "traverse" to enter a node and to move |
3503 | * to the deepest initial subtree that should be traversed |
3504 | * by isl_schedule_node_gist. |
3505 | * |
3506 | * The "filters" list is extended by one element each time |
3507 | * we come across a filter node by the result of intersecting |
3508 | * the last element in the list with the filter on the filter node. |
3509 | * |
3510 | * If the filter on the current filter node is a subset of |
3511 | * the original context passed to isl_schedule_node_gist, |
3512 | * then there is no need to go into its subtree since it cannot |
3513 | * be further simplified by the context. The "filters" list is |
3514 | * still extended for consistency, but the actual value of the |
3515 | * added element is immaterial since it will not be used. |
3516 | * |
3517 | * Otherwise, the filter on the current filter node is replaced by |
3518 | * the gist of the original filter with respect to the intersection |
3519 | * of the original context with the intermediate filters. |
3520 | * |
3521 | * If the new element in the "filters" list is empty, then no elements |
3522 | * can reach the descendants of the current filter node. The subtree |
3523 | * underneath the filter node is therefore removed. |
3524 | * |
3525 | * Each expansion node we come across is handled by |
3526 | * gist_enter_expansion. |
3527 | * |
3528 | * Each extension node we come across is handled by |
3529 | * gist_enter_extension. |
3530 | */ |
3531 | static __isl_give isl_schedule_node *gist_enter( |
3532 | __isl_take isl_schedule_node *node, void *user) |
3533 | { |
3534 | struct isl_node_gist_data *data = user; |
3535 | |
3536 | do { |
3537 | isl_union_set *filter, *inner; |
3538 | isl_bool done, empty; |
3539 | isl_size n; |
3540 | |
3541 | switch (isl_schedule_node_get_type(node)) { |
3542 | case isl_schedule_node_error: |
3543 | return isl_schedule_node_free(node); |
3544 | case isl_schedule_node_expansion: |
3545 | node = gist_enter_expansion(node, data); |
3546 | continue; |
3547 | case isl_schedule_node_extension: |
3548 | node = gist_enter_extension(node, data); |
3549 | continue; |
3550 | case isl_schedule_node_band: |
3551 | case isl_schedule_node_context: |
3552 | case isl_schedule_node_domain: |
3553 | case isl_schedule_node_guard: |
3554 | case isl_schedule_node_leaf: |
3555 | case isl_schedule_node_mark: |
3556 | case isl_schedule_node_sequence: |
3557 | case isl_schedule_node_set: |
3558 | continue; |
3559 | case isl_schedule_node_filter: |
3560 | break; |
3561 | } |
3562 | done = gist_done(node, data); |
3563 | filter = isl_schedule_node_filter_get_filter(node); |
3564 | n = isl_union_set_list_n_union_set(list: data->filters); |
3565 | if (n < 0 || done < 0 || done) { |
3566 | data->filters = isl_union_set_list_add(list: data->filters, |
3567 | el: filter); |
3568 | if (n < 0 || done < 0) |
3569 | return isl_schedule_node_free(node); |
3570 | return node; |
3571 | } |
3572 | inner = isl_union_set_list_get_union_set(list: data->filters, index: n - 1); |
3573 | filter = isl_union_set_gist(uset: filter, context: isl_union_set_copy(uset: inner)); |
3574 | node = isl_schedule_node_filter_set_filter(node, |
3575 | filter: isl_union_set_copy(uset: filter)); |
3576 | filter = isl_union_set_intersect(uset1: filter, uset2: inner); |
3577 | empty = isl_union_set_is_empty(uset: filter); |
3578 | data->filters = isl_union_set_list_add(list: data->filters, el: filter); |
3579 | if (empty < 0) |
3580 | return isl_schedule_node_free(node); |
3581 | if (!empty) |
3582 | continue; |
3583 | node = isl_schedule_node_child(node, pos: 0); |
3584 | node = isl_schedule_node_cut(node); |
3585 | node = isl_schedule_node_parent(node); |
3586 | return node; |
3587 | } while (isl_schedule_node_has_children(node) && |
3588 | (node = isl_schedule_node_first_child(node)) != NULL); |
3589 | |
3590 | return node; |
3591 | } |
3592 | |
3593 | /* Callback for "traverse" to leave a node for isl_schedule_node_gist. |
3594 | * |
3595 | * In particular, if the current node is a filter node, then we remove |
3596 | * the element on the "filters" list that was added when we entered |
3597 | * the node. There is no need to compute any gist here, since we |
3598 | * already did that when we entered the node. |
3599 | * |
3600 | * Expansion nodes are handled by gist_leave_expansion. |
3601 | * |
3602 | * If the current node is an extension, then remove the element |
3603 | * in data->filters that was added by gist_enter_extension. |
3604 | * |
3605 | * If the current node is a band node, then we compute the gist of |
3606 | * the band node with respect to the intersection of the original context |
3607 | * and the intermediate filters. |
3608 | * |
3609 | * If the current node is a sequence or set node, then some of |
3610 | * the filter children may have become empty and so they are removed. |
3611 | * If only one child is left, then the set or sequence node along with |
3612 | * the single remaining child filter is removed. The filter can be |
3613 | * removed because the filters on a sequence or set node are supposed |
3614 | * to partition the incoming domain instances. |
3615 | * In principle, it should then be impossible for there to be zero |
3616 | * remaining children, but should this happen, we replace the entire |
3617 | * subtree with an empty filter. |
3618 | */ |
3619 | static __isl_give isl_schedule_node *gist_leave( |
3620 | __isl_take isl_schedule_node *node, void *user) |
3621 | { |
3622 | struct isl_node_gist_data *data = user; |
3623 | isl_schedule_tree *tree; |
3624 | int i; |
3625 | isl_size n; |
3626 | isl_union_set *filter; |
3627 | |
3628 | switch (isl_schedule_node_get_type(node)) { |
3629 | case isl_schedule_node_error: |
3630 | return isl_schedule_node_free(node); |
3631 | case isl_schedule_node_expansion: |
3632 | node = gist_leave_expansion(node, data); |
3633 | break; |
3634 | case isl_schedule_node_extension: |
3635 | case isl_schedule_node_filter: |
3636 | n = isl_union_set_list_n_union_set(list: data->filters); |
3637 | if (n < 0) |
3638 | return isl_schedule_node_free(node); |
3639 | data->filters = isl_union_set_list_drop(list: data->filters, |
3640 | first: n - 1, n: 1); |
3641 | break; |
3642 | case isl_schedule_node_band: |
3643 | n = isl_union_set_list_n_union_set(list: data->filters); |
3644 | if (n < 0) |
3645 | return isl_schedule_node_free(node); |
3646 | filter = isl_union_set_list_get_union_set(list: data->filters, index: n - 1); |
3647 | node = isl_schedule_node_band_gist(node, context: filter); |
3648 | break; |
3649 | case isl_schedule_node_set: |
3650 | case isl_schedule_node_sequence: |
3651 | tree = isl_schedule_node_get_tree(node); |
3652 | n = isl_schedule_tree_n_children(tree); |
3653 | if (n < 0) |
3654 | tree = isl_schedule_tree_free(tree); |
3655 | for (i = n - 1; i >= 0; --i) { |
3656 | isl_schedule_tree *child; |
3657 | isl_union_set *filter; |
3658 | isl_bool empty; |
3659 | |
3660 | child = isl_schedule_tree_get_child(tree, pos: i); |
3661 | filter = isl_schedule_tree_filter_get_filter(tree: child); |
3662 | empty = isl_union_set_is_empty(uset: filter); |
3663 | isl_union_set_free(uset: filter); |
3664 | isl_schedule_tree_free(tree: child); |
3665 | if (empty < 0) |
3666 | tree = isl_schedule_tree_free(tree); |
3667 | else if (empty) |
3668 | tree = isl_schedule_tree_drop_child(tree, pos: i); |
3669 | } |
3670 | n = isl_schedule_tree_n_children(tree); |
3671 | if (n < 0) |
3672 | tree = isl_schedule_tree_free(tree); |
3673 | node = isl_schedule_node_graft_tree(pos: node, tree); |
3674 | if (n == 1) { |
3675 | node = isl_schedule_node_delete(node); |
3676 | node = isl_schedule_node_delete(node); |
3677 | } else if (n == 0) { |
3678 | isl_space *space; |
3679 | |
3680 | filter = |
3681 | isl_union_set_list_get_union_set(list: data->filters, index: 0); |
3682 | space = isl_union_set_get_space(uset: filter); |
3683 | isl_union_set_free(uset: filter); |
3684 | filter = isl_union_set_empty(space); |
3685 | node = isl_schedule_node_cut(node); |
3686 | node = isl_schedule_node_insert_filter(node, filter); |
3687 | } |
3688 | break; |
3689 | case isl_schedule_node_context: |
3690 | case isl_schedule_node_domain: |
3691 | case isl_schedule_node_guard: |
3692 | case isl_schedule_node_leaf: |
3693 | case isl_schedule_node_mark: |
3694 | break; |
3695 | } |
3696 | |
3697 | return node; |
3698 | } |
3699 | |
3700 | /* Compute the gist of the subtree at "node" with respect to |
3701 | * the reaching domain elements in "context". |
3702 | * In particular, compute the gist of all band and filter nodes |
3703 | * in the subtree with respect to "context". Children of set or sequence |
3704 | * nodes that end up with an empty filter are removed completely. |
3705 | * |
3706 | * We keep track of the intersection of "context" with all outer filters |
3707 | * of the current node within the subtree in the final element of "filters". |
3708 | * Initially, this list contains the single element "context" and it is |
3709 | * extended or shortened each time we enter or leave a filter node. |
3710 | */ |
3711 | __isl_give isl_schedule_node *isl_schedule_node_gist( |
3712 | __isl_take isl_schedule_node *node, __isl_take isl_union_set *context) |
3713 | { |
3714 | struct isl_node_gist_data data; |
3715 | |
3716 | data.n_expansion = 0; |
3717 | data.filters = isl_union_set_list_from_union_set(el: context); |
3718 | node = traverse(node, enter: &gist_enter, leave: &gist_leave, user: &data); |
3719 | isl_union_set_list_free(list: data.filters); |
3720 | return node; |
3721 | } |
3722 | |
3723 | /* Intersect the domain of domain node "node" with "domain". |
3724 | * |
3725 | * If the domain of "node" is already a subset of "domain", |
3726 | * then nothing needs to be changed. |
3727 | * |
3728 | * Otherwise, we replace the domain of the domain node by the intersection |
3729 | * and simplify the subtree rooted at "node" with respect to this intersection. |
3730 | */ |
3731 | __isl_give isl_schedule_node *isl_schedule_node_domain_intersect_domain( |
3732 | __isl_take isl_schedule_node *node, __isl_take isl_union_set *domain) |
3733 | { |
3734 | isl_schedule_tree *tree; |
3735 | isl_union_set *uset; |
3736 | int is_subset; |
3737 | |
3738 | if (!node || !domain) |
3739 | goto error; |
3740 | |
3741 | uset = isl_schedule_tree_domain_get_domain(tree: node->tree); |
3742 | is_subset = isl_union_set_is_subset(uset1: uset, uset2: domain); |
3743 | isl_union_set_free(uset); |
3744 | if (is_subset < 0) |
3745 | goto error; |
3746 | if (is_subset) { |
3747 | isl_union_set_free(uset: domain); |
3748 | return node; |
3749 | } |
3750 | |
3751 | tree = isl_schedule_tree_copy(tree: node->tree); |
3752 | uset = isl_schedule_tree_domain_get_domain(tree); |
3753 | uset = isl_union_set_intersect(uset1: uset, uset2: domain); |
3754 | tree = isl_schedule_tree_domain_set_domain(tree, |
3755 | domain: isl_union_set_copy(uset)); |
3756 | node = isl_schedule_node_graft_tree(pos: node, tree); |
3757 | |
3758 | node = isl_schedule_node_child(node, pos: 0); |
3759 | node = isl_schedule_node_gist(node, context: uset); |
3760 | node = isl_schedule_node_parent(node); |
3761 | |
3762 | return node; |
3763 | error: |
3764 | isl_schedule_node_free(node); |
3765 | isl_union_set_free(uset: domain); |
3766 | return NULL; |
3767 | } |
3768 | |
3769 | /* Replace the domain of domain node "node" with the gist |
3770 | * of the original domain with respect to the parameter domain "context". |
3771 | */ |
3772 | __isl_give isl_schedule_node *isl_schedule_node_domain_gist_params( |
3773 | __isl_take isl_schedule_node *node, __isl_take isl_set *context) |
3774 | { |
3775 | isl_union_set *domain; |
3776 | isl_schedule_tree *tree; |
3777 | |
3778 | if (!node || !context) |
3779 | goto error; |
3780 | |
3781 | tree = isl_schedule_tree_copy(tree: node->tree); |
3782 | domain = isl_schedule_tree_domain_get_domain(tree: node->tree); |
3783 | domain = isl_union_set_gist_params(uset: domain, set: context); |
3784 | tree = isl_schedule_tree_domain_set_domain(tree, domain); |
3785 | node = isl_schedule_node_graft_tree(pos: node, tree); |
3786 | |
3787 | return node; |
3788 | error: |
3789 | isl_schedule_node_free(node); |
3790 | isl_set_free(set: context); |
3791 | return NULL; |
3792 | } |
3793 | |
3794 | /* Internal data structure for isl_schedule_node_get_subtree_expansion. |
3795 | * "expansions" contains a list of accumulated expansions |
3796 | * for each outer expansion, set or sequence node. The first element |
3797 | * in the list is an identity mapping on the reaching domain elements. |
3798 | * "res" collects the results. |
3799 | */ |
3800 | struct isl_subtree_expansion_data { |
3801 | isl_union_map_list *expansions; |
3802 | isl_union_map *res; |
3803 | }; |
3804 | |
3805 | /* Callback for "traverse" to enter a node and to move |
3806 | * to the deepest initial subtree that should be traversed |
3807 | * by isl_schedule_node_get_subtree_expansion. |
3808 | * |
3809 | * Whenever we come across an expansion node, the last element |
3810 | * of data->expansions is combined with the expansion |
3811 | * on the expansion node. |
3812 | * |
3813 | * Whenever we come across a filter node that is the child |
3814 | * of a set or sequence node, data->expansions is extended |
3815 | * with a new element that restricts the previous element |
3816 | * to the elements selected by the filter. |
3817 | * The previous element can then be reused while backtracking. |
3818 | */ |
3819 | static __isl_give isl_schedule_node *subtree_expansion_enter( |
3820 | __isl_take isl_schedule_node *node, void *user) |
3821 | { |
3822 | struct isl_subtree_expansion_data *data = user; |
3823 | |
3824 | do { |
3825 | enum isl_schedule_node_type type; |
3826 | isl_union_set *filter; |
3827 | isl_union_map *inner, *expansion; |
3828 | isl_size n; |
3829 | |
3830 | switch (isl_schedule_node_get_type(node)) { |
3831 | case isl_schedule_node_error: |
3832 | return isl_schedule_node_free(node); |
3833 | case isl_schedule_node_filter: |
3834 | type = isl_schedule_node_get_parent_type(node); |
3835 | if (type != isl_schedule_node_set && |
3836 | type != isl_schedule_node_sequence) |
3837 | break; |
3838 | filter = isl_schedule_node_filter_get_filter(node); |
3839 | n = isl_union_map_list_n_union_map(list: data->expansions); |
3840 | if (n < 0) |
3841 | data->expansions = |
3842 | isl_union_map_list_free(list: data->expansions); |
3843 | inner = |
3844 | isl_union_map_list_get_union_map(list: data->expansions, |
3845 | index: n - 1); |
3846 | inner = isl_union_map_intersect_range(umap: inner, uset: filter); |
3847 | data->expansions = |
3848 | isl_union_map_list_add(list: data->expansions, el: inner); |
3849 | break; |
3850 | case isl_schedule_node_expansion: |
3851 | n = isl_union_map_list_n_union_map(list: data->expansions); |
3852 | if (n < 0) |
3853 | data->expansions = |
3854 | isl_union_map_list_free(list: data->expansions); |
3855 | expansion = |
3856 | isl_schedule_node_expansion_get_expansion(node); |
3857 | inner = |
3858 | isl_union_map_list_get_union_map(list: data->expansions, |
3859 | index: n - 1); |
3860 | inner = isl_union_map_apply_range(umap1: inner, umap2: expansion); |
3861 | data->expansions = |
3862 | isl_union_map_list_set_union_map(list: data->expansions, |
3863 | index: n - 1, el: inner); |
3864 | break; |
3865 | case isl_schedule_node_band: |
3866 | case isl_schedule_node_context: |
3867 | case isl_schedule_node_domain: |
3868 | case isl_schedule_node_extension: |
3869 | case isl_schedule_node_guard: |
3870 | case isl_schedule_node_leaf: |
3871 | case isl_schedule_node_mark: |
3872 | case isl_schedule_node_sequence: |
3873 | case isl_schedule_node_set: |
3874 | break; |
3875 | } |
3876 | } while (isl_schedule_node_has_children(node) && |
3877 | (node = isl_schedule_node_first_child(node)) != NULL); |
3878 | |
3879 | return node; |
3880 | } |
3881 | |
3882 | /* Callback for "traverse" to leave a node for |
3883 | * isl_schedule_node_get_subtree_expansion. |
3884 | * |
3885 | * If we come across a filter node that is the child |
3886 | * of a set or sequence node, then we remove the element |
3887 | * of data->expansions that was added in subtree_expansion_enter. |
3888 | * |
3889 | * If we reach a leaf node, then the accumulated expansion is |
3890 | * added to data->res. |
3891 | */ |
3892 | static __isl_give isl_schedule_node *subtree_expansion_leave( |
3893 | __isl_take isl_schedule_node *node, void *user) |
3894 | { |
3895 | struct isl_subtree_expansion_data *data = user; |
3896 | isl_size n; |
3897 | isl_union_map *inner; |
3898 | enum isl_schedule_node_type type; |
3899 | |
3900 | switch (isl_schedule_node_get_type(node)) { |
3901 | case isl_schedule_node_error: |
3902 | return isl_schedule_node_free(node); |
3903 | case isl_schedule_node_filter: |
3904 | type = isl_schedule_node_get_parent_type(node); |
3905 | if (type != isl_schedule_node_set && |
3906 | type != isl_schedule_node_sequence) |
3907 | break; |
3908 | n = isl_union_map_list_n_union_map(list: data->expansions); |
3909 | if (n < 0) |
3910 | data->expansions = |
3911 | isl_union_map_list_free(list: data->expansions); |
3912 | data->expansions = isl_union_map_list_drop(list: data->expansions, |
3913 | first: n - 1, n: 1); |
3914 | break; |
3915 | case isl_schedule_node_leaf: |
3916 | n = isl_union_map_list_n_union_map(list: data->expansions); |
3917 | if (n < 0) |
3918 | data->expansions = |
3919 | isl_union_map_list_free(list: data->expansions); |
3920 | inner = isl_union_map_list_get_union_map(list: data->expansions, |
3921 | index: n - 1); |
3922 | data->res = isl_union_map_union(umap1: data->res, umap2: inner); |
3923 | break; |
3924 | case isl_schedule_node_band: |
3925 | case isl_schedule_node_context: |
3926 | case isl_schedule_node_domain: |
3927 | case isl_schedule_node_expansion: |
3928 | case isl_schedule_node_extension: |
3929 | case isl_schedule_node_guard: |
3930 | case isl_schedule_node_mark: |
3931 | case isl_schedule_node_sequence: |
3932 | case isl_schedule_node_set: |
3933 | break; |
3934 | } |
3935 | |
3936 | return node; |
3937 | } |
3938 | |
3939 | /* Return a mapping from the domain elements that reach "node" |
3940 | * to the corresponding domain elements in the leaves of the subtree |
3941 | * rooted at "node" obtained by composing the intermediate expansions. |
3942 | * |
3943 | * We start out with an identity mapping between the domain elements |
3944 | * that reach "node" and compose it with all the expansions |
3945 | * on a path from "node" to a leaf while traversing the subtree. |
3946 | * Within the children of an a sequence or set node, the |
3947 | * accumulated expansion is restricted to the elements selected |
3948 | * by the filter child. |
3949 | */ |
3950 | __isl_give isl_union_map *isl_schedule_node_get_subtree_expansion( |
3951 | __isl_keep isl_schedule_node *node) |
3952 | { |
3953 | struct isl_subtree_expansion_data data; |
3954 | isl_space *space; |
3955 | isl_union_set *domain; |
3956 | isl_union_map *expansion; |
3957 | |
3958 | if (!node) |
3959 | return NULL; |
3960 | |
3961 | domain = isl_schedule_node_get_universe_domain(node); |
3962 | space = isl_union_set_get_space(uset: domain); |
3963 | expansion = isl_union_set_identity(uset: domain); |
3964 | data.res = isl_union_map_empty(space); |
3965 | data.expansions = isl_union_map_list_from_union_map(el: expansion); |
3966 | |
3967 | node = isl_schedule_node_copy(node); |
3968 | node = traverse(node, enter: &subtree_expansion_enter, |
3969 | leave: &subtree_expansion_leave, user: &data); |
3970 | if (!node) |
3971 | data.res = isl_union_map_free(umap: data.res); |
3972 | isl_schedule_node_free(node); |
3973 | |
3974 | isl_union_map_list_free(list: data.expansions); |
3975 | |
3976 | return data.res; |
3977 | } |
3978 | |
3979 | /* Internal data structure for isl_schedule_node_get_subtree_contraction. |
3980 | * "contractions" contains a list of accumulated contractions |
3981 | * for each outer expansion, set or sequence node. The first element |
3982 | * in the list is an identity mapping on the reaching domain elements. |
3983 | * "res" collects the results. |
3984 | */ |
3985 | struct isl_subtree_contraction_data { |
3986 | isl_union_pw_multi_aff_list *contractions; |
3987 | isl_union_pw_multi_aff *res; |
3988 | }; |
3989 | |
3990 | /* Callback for "traverse" to enter a node and to move |
3991 | * to the deepest initial subtree that should be traversed |
3992 | * by isl_schedule_node_get_subtree_contraction. |
3993 | * |
3994 | * Whenever we come across an expansion node, the last element |
3995 | * of data->contractions is combined with the contraction |
3996 | * on the expansion node. |
3997 | * |
3998 | * Whenever we come across a filter node that is the child |
3999 | * of a set or sequence node, data->contractions is extended |
4000 | * with a new element that restricts the previous element |
4001 | * to the elements selected by the filter. |
4002 | * The previous element can then be reused while backtracking. |
4003 | */ |
4004 | static __isl_give isl_schedule_node *subtree_contraction_enter( |
4005 | __isl_take isl_schedule_node *node, void *user) |
4006 | { |
4007 | struct isl_subtree_contraction_data *data = user; |
4008 | |
4009 | do { |
4010 | enum isl_schedule_node_type type; |
4011 | isl_union_set *filter; |
4012 | isl_union_pw_multi_aff *inner, *contraction; |
4013 | isl_size n; |
4014 | |
4015 | switch (isl_schedule_node_get_type(node)) { |
4016 | case isl_schedule_node_error: |
4017 | return isl_schedule_node_free(node); |
4018 | case isl_schedule_node_filter: |
4019 | type = isl_schedule_node_get_parent_type(node); |
4020 | if (type != isl_schedule_node_set && |
4021 | type != isl_schedule_node_sequence) |
4022 | break; |
4023 | filter = isl_schedule_node_filter_get_filter(node); |
4024 | n = isl_union_pw_multi_aff_list_n_union_pw_multi_aff( |
4025 | list: data->contractions); |
4026 | if (n < 0) |
4027 | data->contractions = |
4028 | isl_union_pw_multi_aff_list_free( |
4029 | list: data->contractions); |
4030 | inner = |
4031 | isl_union_pw_multi_aff_list_get_union_pw_multi_aff( |
4032 | list: data->contractions, index: n - 1); |
4033 | inner = isl_union_pw_multi_aff_intersect_domain(upma: inner, |
4034 | uset: filter); |
4035 | data->contractions = |
4036 | isl_union_pw_multi_aff_list_add(list: data->contractions, |
4037 | el: inner); |
4038 | break; |
4039 | case isl_schedule_node_expansion: |
4040 | n = isl_union_pw_multi_aff_list_n_union_pw_multi_aff( |
4041 | list: data->contractions); |
4042 | if (n < 0) |
4043 | data->contractions = |
4044 | isl_union_pw_multi_aff_list_free( |
4045 | list: data->contractions); |
4046 | contraction = |
4047 | isl_schedule_node_expansion_get_contraction(node); |
4048 | inner = |
4049 | isl_union_pw_multi_aff_list_get_union_pw_multi_aff( |
4050 | list: data->contractions, index: n - 1); |
4051 | inner = |
4052 | isl_union_pw_multi_aff_pullback_union_pw_multi_aff( |
4053 | upma1: inner, upma2: contraction); |
4054 | data->contractions = |
4055 | isl_union_pw_multi_aff_list_set_union_pw_multi_aff( |
4056 | list: data->contractions, index: n - 1, el: inner); |
4057 | break; |
4058 | case isl_schedule_node_band: |
4059 | case isl_schedule_node_context: |
4060 | case isl_schedule_node_domain: |
4061 | case isl_schedule_node_extension: |
4062 | case isl_schedule_node_guard: |
4063 | case isl_schedule_node_leaf: |
4064 | case isl_schedule_node_mark: |
4065 | case isl_schedule_node_sequence: |
4066 | case isl_schedule_node_set: |
4067 | break; |
4068 | } |
4069 | } while (isl_schedule_node_has_children(node) && |
4070 | (node = isl_schedule_node_first_child(node)) != NULL); |
4071 | |
4072 | return node; |
4073 | } |
4074 | |
4075 | /* Callback for "traverse" to leave a node for |
4076 | * isl_schedule_node_get_subtree_contraction. |
4077 | * |
4078 | * If we come across a filter node that is the child |
4079 | * of a set or sequence node, then we remove the element |
4080 | * of data->contractions that was added in subtree_contraction_enter. |
4081 | * |
4082 | * If we reach a leaf node, then the accumulated contraction is |
4083 | * added to data->res. |
4084 | */ |
4085 | static __isl_give isl_schedule_node *subtree_contraction_leave( |
4086 | __isl_take isl_schedule_node *node, void *user) |
4087 | { |
4088 | struct isl_subtree_contraction_data *data = user; |
4089 | isl_size n; |
4090 | isl_union_pw_multi_aff *inner; |
4091 | enum isl_schedule_node_type type; |
4092 | |
4093 | switch (isl_schedule_node_get_type(node)) { |
4094 | case isl_schedule_node_error: |
4095 | return isl_schedule_node_free(node); |
4096 | case isl_schedule_node_filter: |
4097 | type = isl_schedule_node_get_parent_type(node); |
4098 | if (type != isl_schedule_node_set && |
4099 | type != isl_schedule_node_sequence) |
4100 | break; |
4101 | n = isl_union_pw_multi_aff_list_n_union_pw_multi_aff( |
4102 | list: data->contractions); |
4103 | if (n < 0) |
4104 | data->contractions = isl_union_pw_multi_aff_list_free( |
4105 | list: data->contractions); |
4106 | data->contractions = |
4107 | isl_union_pw_multi_aff_list_drop(list: data->contractions, |
4108 | first: n - 1, n: 1); |
4109 | break; |
4110 | case isl_schedule_node_leaf: |
4111 | n = isl_union_pw_multi_aff_list_n_union_pw_multi_aff( |
4112 | list: data->contractions); |
4113 | if (n < 0) |
4114 | data->contractions = isl_union_pw_multi_aff_list_free( |
4115 | list: data->contractions); |
4116 | inner = isl_union_pw_multi_aff_list_get_union_pw_multi_aff( |
4117 | list: data->contractions, index: n - 1); |
4118 | data->res = isl_union_pw_multi_aff_union_add(upma1: data->res, upma2: inner); |
4119 | break; |
4120 | case isl_schedule_node_band: |
4121 | case isl_schedule_node_context: |
4122 | case isl_schedule_node_domain: |
4123 | case isl_schedule_node_expansion: |
4124 | case isl_schedule_node_extension: |
4125 | case isl_schedule_node_guard: |
4126 | case isl_schedule_node_mark: |
4127 | case isl_schedule_node_sequence: |
4128 | case isl_schedule_node_set: |
4129 | break; |
4130 | } |
4131 | |
4132 | return node; |
4133 | } |
4134 | |
4135 | /* Return a mapping from the domain elements in the leaves of the subtree |
4136 | * rooted at "node" to the corresponding domain elements that reach "node" |
4137 | * obtained by composing the intermediate contractions. |
4138 | * |
4139 | * We start out with an identity mapping between the domain elements |
4140 | * that reach "node" and compose it with all the contractions |
4141 | * on a path from "node" to a leaf while traversing the subtree. |
4142 | * Within the children of an a sequence or set node, the |
4143 | * accumulated contraction is restricted to the elements selected |
4144 | * by the filter child. |
4145 | */ |
4146 | __isl_give isl_union_pw_multi_aff *isl_schedule_node_get_subtree_contraction( |
4147 | __isl_keep isl_schedule_node *node) |
4148 | { |
4149 | struct isl_subtree_contraction_data data; |
4150 | isl_space *space; |
4151 | isl_union_set *domain; |
4152 | isl_union_pw_multi_aff *contraction; |
4153 | |
4154 | if (!node) |
4155 | return NULL; |
4156 | |
4157 | domain = isl_schedule_node_get_universe_domain(node); |
4158 | space = isl_union_set_get_space(uset: domain); |
4159 | contraction = isl_union_set_identity_union_pw_multi_aff(uset: domain); |
4160 | data.res = isl_union_pw_multi_aff_empty(space); |
4161 | data.contractions = |
4162 | isl_union_pw_multi_aff_list_from_union_pw_multi_aff(el: contraction); |
4163 | |
4164 | node = isl_schedule_node_copy(node); |
4165 | node = traverse(node, enter: &subtree_contraction_enter, |
4166 | leave: &subtree_contraction_leave, user: &data); |
4167 | if (!node) |
4168 | data.res = isl_union_pw_multi_aff_free(upma: data.res); |
4169 | isl_schedule_node_free(node); |
4170 | |
4171 | isl_union_pw_multi_aff_list_free(list: data.contractions); |
4172 | |
4173 | return data.res; |
4174 | } |
4175 | |
4176 | /* Do the nearest "n" ancestors of "node" have the types given in "types" |
4177 | * (starting at the parent of "node")? |
4178 | */ |
4179 | static isl_bool has_ancestors(__isl_keep isl_schedule_node *node, |
4180 | int n, enum isl_schedule_node_type *types) |
4181 | { |
4182 | int i; |
4183 | isl_size n_ancestor; |
4184 | |
4185 | if (!node) |
4186 | return isl_bool_error; |
4187 | |
4188 | n_ancestor = isl_schedule_tree_list_n_schedule_tree(list: node->ancestors); |
4189 | if (n_ancestor < 0) |
4190 | return isl_bool_error; |
4191 | if (n_ancestor < n) |
4192 | return isl_bool_false; |
4193 | |
4194 | for (i = 0; i < n; ++i) { |
4195 | isl_schedule_tree *tree; |
4196 | int correct_type; |
4197 | |
4198 | tree = isl_schedule_tree_list_get_schedule_tree(list: node->ancestors, |
4199 | index: n_ancestor - 1 - i); |
4200 | if (!tree) |
4201 | return isl_bool_error; |
4202 | correct_type = isl_schedule_tree_get_type(tree) == types[i]; |
4203 | isl_schedule_tree_free(tree); |
4204 | if (!correct_type) |
4205 | return isl_bool_false; |
4206 | } |
4207 | |
4208 | return isl_bool_true; |
4209 | } |
4210 | |
4211 | /* Given a node "node" that appears in an extension (i.e., it is the child |
4212 | * of a filter in a sequence inside an extension node), are the spaces |
4213 | * of the extension specified by "extension" disjoint from those |
4214 | * of both the original extension and the domain elements that reach |
4215 | * that original extension? |
4216 | */ |
4217 | static isl_bool is_disjoint_extension(__isl_keep isl_schedule_node *node, |
4218 | __isl_keep isl_union_map *extension) |
4219 | { |
4220 | isl_union_map *old; |
4221 | isl_union_set *domain; |
4222 | isl_bool empty; |
4223 | |
4224 | node = isl_schedule_node_copy(node); |
4225 | node = isl_schedule_node_ancestor(node, generation: 3); |
4226 | old = isl_schedule_node_extension_get_extension(node); |
4227 | domain = isl_schedule_node_get_universe_domain(node); |
4228 | isl_schedule_node_free(node); |
4229 | old = isl_union_map_universe(umap: old); |
4230 | domain = isl_union_set_union(uset1: domain, uset2: isl_union_map_range(umap: old)); |
4231 | extension = isl_union_map_copy(umap: extension); |
4232 | extension = isl_union_map_intersect_range(umap: extension, uset: domain); |
4233 | empty = isl_union_map_is_empty(umap: extension); |
4234 | isl_union_map_free(umap: extension); |
4235 | |
4236 | return empty; |
4237 | } |
4238 | |
4239 | /* Given a node "node" that is governed by an extension node, extend |
4240 | * that extension node with "extension". |
4241 | * |
4242 | * In particular, "node" is the child of a filter in a sequence that |
4243 | * is in turn a child of an extension node. Extend that extension node |
4244 | * with "extension". |
4245 | * |
4246 | * Return a pointer to the parent of the original node (i.e., a filter). |
4247 | */ |
4248 | static __isl_give isl_schedule_node *extend_extension( |
4249 | __isl_take isl_schedule_node *node, __isl_take isl_union_map *extension) |
4250 | { |
4251 | isl_size pos; |
4252 | isl_bool disjoint; |
4253 | isl_union_map *node_extension; |
4254 | |
4255 | node = isl_schedule_node_parent(node); |
4256 | pos = isl_schedule_node_get_child_position(node); |
4257 | if (pos < 0) |
4258 | node = isl_schedule_node_free(node); |
4259 | node = isl_schedule_node_grandparent(node); |
4260 | node_extension = isl_schedule_node_extension_get_extension(node); |
4261 | disjoint = isl_union_map_is_disjoint(umap1: extension, umap2: node_extension); |
4262 | extension = isl_union_map_union(umap1: extension, umap2: node_extension); |
4263 | node = isl_schedule_node_extension_set_extension(node, extension); |
4264 | node = isl_schedule_node_grandchild(node, pos1: 0, pos2: pos); |
4265 | |
4266 | if (disjoint < 0) |
4267 | return isl_schedule_node_free(node); |
4268 | if (!node) |
4269 | return NULL; |
4270 | if (!disjoint) |
4271 | isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid, |
4272 | "extension domain should be disjoint from earlier " |
4273 | "extensions" , return isl_schedule_node_free(node)); |
4274 | |
4275 | return node; |
4276 | } |
4277 | |
4278 | /* Return the universe of "uset" if this universe is disjoint from "ref". |
4279 | * Otherwise, return "uset". |
4280 | * |
4281 | * Also check if "uset" itself is disjoint from "ref", reporting |
4282 | * an error if it is not. |
4283 | */ |
4284 | static __isl_give isl_union_set *replace_by_universe_if_disjoint( |
4285 | __isl_take isl_union_set *uset, __isl_keep isl_union_set *ref) |
4286 | { |
4287 | int disjoint; |
4288 | isl_union_set *universe; |
4289 | |
4290 | disjoint = isl_union_set_is_disjoint(uset1: uset, uset2: ref); |
4291 | if (disjoint < 0) |
4292 | return isl_union_set_free(uset); |
4293 | if (!disjoint) |
4294 | isl_die(isl_union_set_get_ctx(uset), isl_error_invalid, |
4295 | "extension domain should be disjoint from " |
4296 | "current domain" , return isl_union_set_free(uset)); |
4297 | |
4298 | universe = isl_union_set_universe(uset: isl_union_set_copy(uset)); |
4299 | disjoint = isl_union_set_is_disjoint(uset1: universe, uset2: ref); |
4300 | if (disjoint >= 0 && disjoint) { |
4301 | isl_union_set_free(uset); |
4302 | return universe; |
4303 | } |
4304 | isl_union_set_free(uset: universe); |
4305 | |
4306 | if (disjoint < 0) |
4307 | return isl_union_set_free(uset); |
4308 | return uset; |
4309 | } |
4310 | |
4311 | /* Insert an extension node on top of "node" with extension "extension". |
4312 | * In addition, insert a filter that separates node from the extension |
4313 | * between the extension node and "node". |
4314 | * Return a pointer to the inserted filter node. |
4315 | * |
4316 | * If "node" already appears in an extension (i.e., if it is the child |
4317 | * of a filter in a sequence inside an extension node), then extend that |
4318 | * extension with "extension" instead. |
4319 | * In this case, a pointer to the original filter node is returned. |
4320 | * Note that if some of the elements in the new extension live in the |
4321 | * same space as those of the original extension or the domain elements |
4322 | * reaching the original extension, then we insert a new extension anyway. |
4323 | * Otherwise, we would have to adjust the filters in the sequence child |
4324 | * of the extension to ensure that the elements in the new extension |
4325 | * are filtered out. |
4326 | */ |
4327 | static __isl_give isl_schedule_node *insert_extension( |
4328 | __isl_take isl_schedule_node *node, __isl_take isl_union_map *extension) |
4329 | { |
4330 | enum isl_schedule_node_type ancestors[] = |
4331 | { isl_schedule_node_filter, isl_schedule_node_sequence, |
4332 | isl_schedule_node_extension }; |
4333 | isl_union_set *domain; |
4334 | isl_union_set *filter; |
4335 | isl_bool in_ext; |
4336 | |
4337 | in_ext = has_ancestors(node, n: 3, types: ancestors); |
4338 | if (in_ext < 0) |
4339 | goto error; |
4340 | if (in_ext) { |
4341 | isl_bool disjoint; |
4342 | |
4343 | disjoint = is_disjoint_extension(node, extension); |
4344 | if (disjoint < 0) |
4345 | goto error; |
4346 | if (disjoint) |
4347 | return extend_extension(node, extension); |
4348 | } |
4349 | |
4350 | filter = isl_schedule_node_get_domain(node); |
4351 | domain = isl_union_map_range(umap: isl_union_map_copy(umap: extension)); |
4352 | filter = replace_by_universe_if_disjoint(uset: filter, ref: domain); |
4353 | isl_union_set_free(uset: domain); |
4354 | |
4355 | node = isl_schedule_node_insert_filter(node, filter); |
4356 | node = isl_schedule_node_insert_extension(node, extension); |
4357 | node = isl_schedule_node_child(node, pos: 0); |
4358 | return node; |
4359 | error: |
4360 | isl_schedule_node_free(node); |
4361 | isl_union_map_free(umap: extension); |
4362 | return NULL; |
4363 | } |
4364 | |
4365 | /* Replace the subtree that "node" points to by "tree" (which has |
4366 | * a sequence root with two children), except if the parent of "node" |
4367 | * is a sequence as well, in which case "tree" is spliced at the position |
4368 | * of "node" in its parent. |
4369 | * Return a pointer to the child of the "tree_pos" (filter) child of "tree" |
4370 | * in the updated schedule tree. |
4371 | */ |
4372 | static __isl_give isl_schedule_node *graft_or_splice( |
4373 | __isl_take isl_schedule_node *node, __isl_take isl_schedule_tree *tree, |
4374 | int tree_pos) |
4375 | { |
4376 | isl_size pos; |
4377 | |
4378 | if (isl_schedule_node_get_parent_type(node) == |
4379 | isl_schedule_node_sequence) { |
4380 | pos = isl_schedule_node_get_child_position(node); |
4381 | if (pos < 0) |
4382 | node = isl_schedule_node_free(node); |
4383 | node = isl_schedule_node_parent(node); |
4384 | node = isl_schedule_node_sequence_splice(node, pos, tree); |
4385 | } else { |
4386 | pos = 0; |
4387 | node = isl_schedule_node_graft_tree(pos: node, tree); |
4388 | } |
4389 | node = isl_schedule_node_grandchild(node, pos1: pos + tree_pos, pos2: 0); |
4390 | |
4391 | return node; |
4392 | } |
4393 | |
4394 | /* Insert a node "graft" into the schedule tree of "node" such that it |
4395 | * is executed before (if "before" is set) or after (if "before" is not set) |
4396 | * the node that "node" points to. |
4397 | * The root of "graft" is an extension node. |
4398 | * Return a pointer to the node that "node" pointed to. |
4399 | * |
4400 | * We first insert an extension node on top of "node" (or extend |
4401 | * the extension node if there already is one), with a filter on "node" |
4402 | * separating it from the extension. |
4403 | * We then insert a filter in the graft to separate it from the original |
4404 | * domain elements and combine the original and new tree in a sequence. |
4405 | * If we have extended an extension node, then the children of this |
4406 | * sequence are spliced in the sequence of the extended extension |
4407 | * at the position where "node" appears in the original extension. |
4408 | * Otherwise, the sequence pair is attached to the new extension node. |
4409 | */ |
4410 | static __isl_give isl_schedule_node *graft_extension( |
4411 | __isl_take isl_schedule_node *node, __isl_take isl_schedule_node *graft, |
4412 | int before) |
4413 | { |
4414 | isl_union_map *extension; |
4415 | isl_union_set *graft_domain; |
4416 | isl_union_set *node_domain; |
4417 | isl_schedule_tree *tree, *tree_graft; |
4418 | |
4419 | extension = isl_schedule_node_extension_get_extension(node: graft); |
4420 | graft_domain = isl_union_map_range(umap: isl_union_map_copy(umap: extension)); |
4421 | node_domain = isl_schedule_node_get_universe_domain(node); |
4422 | node = insert_extension(node, extension); |
4423 | |
4424 | graft_domain = replace_by_universe_if_disjoint(uset: graft_domain, |
4425 | ref: node_domain); |
4426 | isl_union_set_free(uset: node_domain); |
4427 | |
4428 | tree = isl_schedule_node_get_tree(node); |
4429 | if (!isl_schedule_node_has_children(node: graft)) { |
4430 | tree_graft = isl_schedule_tree_from_filter(filter: graft_domain); |
4431 | } else { |
4432 | graft = isl_schedule_node_child(node: graft, pos: 0); |
4433 | tree_graft = isl_schedule_node_get_tree(node: graft); |
4434 | tree_graft = isl_schedule_tree_insert_filter(tree: tree_graft, |
4435 | filter: graft_domain); |
4436 | } |
4437 | if (before) |
4438 | tree = isl_schedule_tree_sequence_pair(tree1: tree_graft, tree2: tree); |
4439 | else |
4440 | tree = isl_schedule_tree_sequence_pair(tree1: tree, tree2: tree_graft); |
4441 | node = graft_or_splice(node, tree, tree_pos: before); |
4442 | |
4443 | isl_schedule_node_free(node: graft); |
4444 | |
4445 | return node; |
4446 | } |
4447 | |
4448 | /* Replace the root domain node of "node" by an extension node suitable |
4449 | * for insertion at "pos". |
4450 | * That is, create an extension node that maps the outer band nodes |
4451 | * at "pos" to the domain of the root node of "node" and attach |
4452 | * the child of this root node to the extension node. |
4453 | */ |
4454 | static __isl_give isl_schedule_node *extension_from_domain( |
4455 | __isl_take isl_schedule_node *node, __isl_keep isl_schedule_node *pos) |
4456 | { |
4457 | isl_union_set *universe; |
4458 | isl_union_set *domain; |
4459 | isl_union_map *ext; |
4460 | isl_size depth; |
4461 | isl_bool anchored; |
4462 | isl_space *space; |
4463 | isl_schedule_node *res; |
4464 | isl_schedule_tree *tree; |
4465 | |
4466 | depth = isl_schedule_node_get_schedule_depth(node: pos); |
4467 | anchored = isl_schedule_node_is_subtree_anchored(node); |
4468 | if (depth < 0 || anchored < 0) |
4469 | return isl_schedule_node_free(node); |
4470 | if (anchored) |
4471 | isl_die(isl_schedule_node_get_ctx(node), isl_error_unsupported, |
4472 | "cannot graft anchored tree with domain root" , |
4473 | return isl_schedule_node_free(node)); |
4474 | |
4475 | domain = isl_schedule_node_domain_get_domain(node); |
4476 | space = isl_union_set_get_space(uset: domain); |
4477 | space = isl_space_set_from_params(space); |
4478 | space = isl_space_add_dims(space, type: isl_dim_set, n: depth); |
4479 | universe = isl_union_set_from_set(set: isl_set_universe(space)); |
4480 | ext = isl_union_map_from_domain_and_range(domain: universe, range: domain); |
4481 | res = isl_schedule_node_from_extension(extension: ext); |
4482 | node = isl_schedule_node_child(node, pos: 0); |
4483 | if (!node) |
4484 | return isl_schedule_node_free(node: res); |
4485 | if (!isl_schedule_tree_is_leaf(tree: node->tree)) { |
4486 | tree = isl_schedule_node_get_tree(node); |
4487 | res = isl_schedule_node_child(node: res, pos: 0); |
4488 | res = isl_schedule_node_graft_tree(pos: res, tree); |
4489 | res = isl_schedule_node_parent(node: res); |
4490 | } |
4491 | isl_schedule_node_free(node); |
4492 | |
4493 | return res; |
4494 | } |
4495 | |
4496 | /* Insert a node "graft" into the schedule tree of "node" such that it |
4497 | * is executed before (if "before" is set) or after (if "before" is not set) |
4498 | * the node that "node" points to. |
4499 | * The root of "graft" may be either a domain or an extension node. |
4500 | * In the latter case, the domain of the extension needs to correspond |
4501 | * to the outer band nodes of "node". |
4502 | * The elements of the domain or the range of the extension may not |
4503 | * intersect with the domain elements that reach "node". |
4504 | * The schedule tree of "graft" may not be anchored. |
4505 | * |
4506 | * The schedule tree of "node" is modified to include an extension node |
4507 | * corresponding to the root node of "graft" as a child of the original |
4508 | * parent of "node". The original node that "node" points to and the |
4509 | * child of the root node of "graft" are attached to this extension node |
4510 | * through a sequence, with appropriate filters and with the child |
4511 | * of "graft" appearing before or after the original "node". |
4512 | * |
4513 | * If "node" already appears inside a sequence that is the child of |
4514 | * an extension node and if the spaces of the new domain elements |
4515 | * do not overlap with those of the original domain elements, |
4516 | * then that extension node is extended with the new extension |
4517 | * rather than introducing a new segment of extension and sequence nodes. |
4518 | * |
4519 | * Return a pointer to the same node in the modified tree that |
4520 | * "node" pointed to in the original tree. |
4521 | */ |
4522 | static __isl_give isl_schedule_node *isl_schedule_node_graft_before_or_after( |
4523 | __isl_take isl_schedule_node *node, __isl_take isl_schedule_node *graft, |
4524 | int before) |
4525 | { |
4526 | if (!node || !graft) |
4527 | goto error; |
4528 | if (check_insert(node) < 0) |
4529 | goto error; |
4530 | |
4531 | if (isl_schedule_node_get_type(node: graft) == isl_schedule_node_domain) |
4532 | graft = extension_from_domain(node: graft, pos: node); |
4533 | |
4534 | if (!graft) |
4535 | goto error; |
4536 | if (isl_schedule_node_get_type(node: graft) != isl_schedule_node_extension) |
4537 | isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid, |
4538 | "expecting domain or extension as root of graft" , |
4539 | goto error); |
4540 | |
4541 | return graft_extension(node, graft, before); |
4542 | error: |
4543 | isl_schedule_node_free(node); |
4544 | isl_schedule_node_free(node: graft); |
4545 | return NULL; |
4546 | } |
4547 | |
4548 | /* Insert a node "graft" into the schedule tree of "node" such that it |
4549 | * is executed before the node that "node" points to. |
4550 | * The root of "graft" may be either a domain or an extension node. |
4551 | * In the latter case, the domain of the extension needs to correspond |
4552 | * to the outer band nodes of "node". |
4553 | * The elements of the domain or the range of the extension may not |
4554 | * intersect with the domain elements that reach "node". |
4555 | * The schedule tree of "graft" may not be anchored. |
4556 | * |
4557 | * Return a pointer to the same node in the modified tree that |
4558 | * "node" pointed to in the original tree. |
4559 | */ |
4560 | __isl_give isl_schedule_node *isl_schedule_node_graft_before( |
4561 | __isl_take isl_schedule_node *node, __isl_take isl_schedule_node *graft) |
4562 | { |
4563 | return isl_schedule_node_graft_before_or_after(node, graft, before: 1); |
4564 | } |
4565 | |
4566 | /* Insert a node "graft" into the schedule tree of "node" such that it |
4567 | * is executed after the node that "node" points to. |
4568 | * The root of "graft" may be either a domain or an extension node. |
4569 | * In the latter case, the domain of the extension needs to correspond |
4570 | * to the outer band nodes of "node". |
4571 | * The elements of the domain or the range of the extension may not |
4572 | * intersect with the domain elements that reach "node". |
4573 | * The schedule tree of "graft" may not be anchored. |
4574 | * |
4575 | * Return a pointer to the same node in the modified tree that |
4576 | * "node" pointed to in the original tree. |
4577 | */ |
4578 | __isl_give isl_schedule_node *isl_schedule_node_graft_after( |
4579 | __isl_take isl_schedule_node *node, |
4580 | __isl_take isl_schedule_node *graft) |
4581 | { |
4582 | return isl_schedule_node_graft_before_or_after(node, graft, before: 0); |
4583 | } |
4584 | |
4585 | /* Split the domain elements that reach "node" into those that satisfy |
4586 | * "filter" and those that do not. Arrange for the first subset to be |
4587 | * executed before or after the second subset, depending on the value |
4588 | * of "before". |
4589 | * Return a pointer to the tree corresponding to the second subset, |
4590 | * except when this subset is empty in which case the original pointer |
4591 | * is returned. |
4592 | * If both subsets are non-empty, then a sequence node is introduced |
4593 | * to impose the order. If the grandparent of the original node was |
4594 | * itself a sequence, then the original child is replaced by two children |
4595 | * in this sequence instead. |
4596 | * The children in the sequence are copies of the original subtree, |
4597 | * simplified with respect to their filters. |
4598 | */ |
4599 | static __isl_give isl_schedule_node *isl_schedule_node_order_before_or_after( |
4600 | __isl_take isl_schedule_node *node, __isl_take isl_union_set *filter, |
4601 | int before) |
4602 | { |
4603 | enum isl_schedule_node_type ancestors[] = |
4604 | { isl_schedule_node_filter, isl_schedule_node_sequence }; |
4605 | isl_union_set *node_domain, *node_filter = NULL, *parent_filter; |
4606 | isl_schedule_node *node2; |
4607 | isl_schedule_tree *tree1, *tree2; |
4608 | isl_bool empty1, empty2; |
4609 | isl_bool in_seq; |
4610 | |
4611 | if (!node || !filter) |
4612 | goto error; |
4613 | if (check_insert(node) < 0) |
4614 | goto error; |
4615 | |
4616 | in_seq = has_ancestors(node, n: 2, types: ancestors); |
4617 | if (in_seq < 0) |
4618 | goto error; |
4619 | node_domain = isl_schedule_node_get_domain(node); |
4620 | filter = isl_union_set_gist(uset: filter, context: isl_union_set_copy(uset: node_domain)); |
4621 | node_filter = isl_union_set_copy(uset: node_domain); |
4622 | node_filter = isl_union_set_subtract(uset1: node_filter, |
4623 | uset2: isl_union_set_copy(uset: filter)); |
4624 | node_filter = isl_union_set_gist(uset: node_filter, context: node_domain); |
4625 | empty1 = isl_union_set_is_empty(uset: filter); |
4626 | empty2 = isl_union_set_is_empty(uset: node_filter); |
4627 | if (empty1 < 0 || empty2 < 0) |
4628 | goto error; |
4629 | if (empty1 || empty2) { |
4630 | isl_union_set_free(uset: filter); |
4631 | isl_union_set_free(uset: node_filter); |
4632 | return node; |
4633 | } |
4634 | |
4635 | if (in_seq) { |
4636 | node = isl_schedule_node_parent(node); |
4637 | parent_filter = isl_schedule_node_filter_get_filter(node); |
4638 | node_filter = isl_union_set_intersect(uset1: node_filter, |
4639 | uset2: isl_union_set_copy(uset: parent_filter)); |
4640 | filter = isl_union_set_intersect(uset1: filter, uset2: parent_filter); |
4641 | } |
4642 | |
4643 | node2 = isl_schedule_node_copy(node); |
4644 | node = isl_schedule_node_gist(node, context: isl_union_set_copy(uset: node_filter)); |
4645 | node2 = isl_schedule_node_gist(node: node2, context: isl_union_set_copy(uset: filter)); |
4646 | tree1 = isl_schedule_node_get_tree(node); |
4647 | tree2 = isl_schedule_node_get_tree(node: node2); |
4648 | tree1 = isl_schedule_tree_insert_filter(tree: tree1, filter: node_filter); |
4649 | tree2 = isl_schedule_tree_insert_filter(tree: tree2, filter); |
4650 | isl_schedule_node_free(node: node2); |
4651 | |
4652 | if (before) { |
4653 | tree1 = isl_schedule_tree_sequence_pair(tree1: tree2, tree2: tree1); |
4654 | node = graft_or_splice(node, tree: tree1, tree_pos: 1); |
4655 | } else { |
4656 | tree1 = isl_schedule_tree_sequence_pair(tree1, tree2); |
4657 | node = graft_or_splice(node, tree: tree1, tree_pos: 0); |
4658 | } |
4659 | |
4660 | return node; |
4661 | error: |
4662 | isl_schedule_node_free(node); |
4663 | isl_union_set_free(uset: filter); |
4664 | isl_union_set_free(uset: node_filter); |
4665 | return NULL; |
4666 | } |
4667 | |
4668 | /* Split the domain elements that reach "node" into those that satisfy |
4669 | * "filter" and those that do not. Arrange for the first subset to be |
4670 | * executed before the second subset. |
4671 | * Return a pointer to the tree corresponding to the second subset, |
4672 | * except when this subset is empty in which case the original pointer |
4673 | * is returned. |
4674 | */ |
4675 | __isl_give isl_schedule_node *isl_schedule_node_order_before( |
4676 | __isl_take isl_schedule_node *node, __isl_take isl_union_set *filter) |
4677 | { |
4678 | return isl_schedule_node_order_before_or_after(node, filter, before: 1); |
4679 | } |
4680 | |
4681 | /* Split the domain elements that reach "node" into those that satisfy |
4682 | * "filter" and those that do not. Arrange for the first subset to be |
4683 | * executed after the second subset. |
4684 | * Return a pointer to the tree corresponding to the second subset, |
4685 | * except when this subset is empty in which case the original pointer |
4686 | * is returned. |
4687 | */ |
4688 | __isl_give isl_schedule_node *isl_schedule_node_order_after( |
4689 | __isl_take isl_schedule_node *node, __isl_take isl_union_set *filter) |
4690 | { |
4691 | return isl_schedule_node_order_before_or_after(node, filter, before: 0); |
4692 | } |
4693 | |
4694 | /* Reset the user pointer on all identifiers of parameters and tuples |
4695 | * in the schedule node "node". |
4696 | */ |
4697 | __isl_give isl_schedule_node *isl_schedule_node_reset_user( |
4698 | __isl_take isl_schedule_node *node) |
4699 | { |
4700 | isl_schedule_tree *tree; |
4701 | |
4702 | tree = isl_schedule_node_get_tree(node); |
4703 | tree = isl_schedule_tree_reset_user(tree); |
4704 | node = isl_schedule_node_graft_tree(pos: node, tree); |
4705 | |
4706 | return node; |
4707 | } |
4708 | |
4709 | /* Align the parameters of the schedule node "node" to those of "space". |
4710 | */ |
4711 | __isl_give isl_schedule_node *isl_schedule_node_align_params( |
4712 | __isl_take isl_schedule_node *node, __isl_take isl_space *space) |
4713 | { |
4714 | isl_schedule_tree *tree; |
4715 | |
4716 | tree = isl_schedule_node_get_tree(node); |
4717 | tree = isl_schedule_tree_align_params(tree, space); |
4718 | node = isl_schedule_node_graft_tree(pos: node, tree); |
4719 | |
4720 | return node; |
4721 | } |
4722 | |
4723 | /* Compute the pullback of schedule node "node" |
4724 | * by the function represented by "upma". |
4725 | * In other words, plug in "upma" in the iteration domains |
4726 | * of schedule node "node". |
4727 | * We currently do not handle expansion nodes. |
4728 | * |
4729 | * Note that this is only a helper function for |
4730 | * isl_schedule_pullback_union_pw_multi_aff. In order to maintain consistency, |
4731 | * this function should not be called on a single node without also |
4732 | * calling it on all the other nodes. |
4733 | */ |
4734 | __isl_give isl_schedule_node *isl_schedule_node_pullback_union_pw_multi_aff( |
4735 | __isl_take isl_schedule_node *node, |
4736 | __isl_take isl_union_pw_multi_aff *upma) |
4737 | { |
4738 | isl_schedule_tree *tree; |
4739 | |
4740 | tree = isl_schedule_node_get_tree(node); |
4741 | tree = isl_schedule_tree_pullback_union_pw_multi_aff(tree, upma); |
4742 | node = isl_schedule_node_graft_tree(pos: node, tree); |
4743 | |
4744 | return node; |
4745 | } |
4746 | |
4747 | /* Internal data structure for isl_schedule_node_expand. |
4748 | * "tree" is the tree that needs to be plugged in in all the leaves. |
4749 | * "domain" is the set of domain elements in the original leaves |
4750 | * to which the tree applies. |
4751 | */ |
4752 | struct isl_schedule_expand_data { |
4753 | isl_schedule_tree *tree; |
4754 | isl_union_set *domain; |
4755 | }; |
4756 | |
4757 | /* If "node" is a leaf, then plug in data->tree, simplifying it |
4758 | * within its new context. |
4759 | * |
4760 | * If there are any domain elements at the leaf where the tree |
4761 | * should not be plugged in (i.e., there are elements not in data->domain) |
4762 | * then first extend the tree to only apply to the elements in data->domain |
4763 | * by constructing a set node that selects data->tree for elements |
4764 | * in data->domain and a leaf for the other elements. |
4765 | */ |
4766 | static __isl_give isl_schedule_node *expand(__isl_take isl_schedule_node *node, |
4767 | void *user) |
4768 | { |
4769 | struct isl_schedule_expand_data *data = user; |
4770 | isl_schedule_tree *tree, *leaf; |
4771 | isl_union_set *domain, *left; |
4772 | isl_bool empty; |
4773 | |
4774 | if (isl_schedule_node_get_type(node) != isl_schedule_node_leaf) |
4775 | return node; |
4776 | |
4777 | domain = isl_schedule_node_get_domain(node); |
4778 | tree = isl_schedule_tree_copy(tree: data->tree); |
4779 | |
4780 | left = isl_union_set_copy(uset: domain); |
4781 | left = isl_union_set_subtract(uset1: left, uset2: isl_union_set_copy(uset: data->domain)); |
4782 | empty = isl_union_set_is_empty(uset: left); |
4783 | if (empty >= 0 && !empty) { |
4784 | leaf = isl_schedule_node_get_leaf(node); |
4785 | leaf = isl_schedule_tree_insert_filter(tree: leaf, filter: left); |
4786 | left = isl_union_set_copy(uset: data->domain); |
4787 | tree = isl_schedule_tree_insert_filter(tree, filter: left); |
4788 | tree = isl_schedule_tree_set_pair(tree1: tree, tree2: leaf); |
4789 | } else { |
4790 | if (empty < 0) |
4791 | node = isl_schedule_node_free(node); |
4792 | isl_union_set_free(uset: left); |
4793 | } |
4794 | |
4795 | node = isl_schedule_node_graft_tree(pos: node, tree); |
4796 | node = isl_schedule_node_gist(node, context: domain); |
4797 | |
4798 | return node; |
4799 | } |
4800 | |
4801 | /* Expand the tree rooted at "node" by extending all leaves |
4802 | * with an expansion node with as child "tree". |
4803 | * The expansion is determined by "contraction" and "domain". |
4804 | * That is, the elements of "domain" are contracted according |
4805 | * to "contraction". The expansion relation is then the inverse |
4806 | * of "contraction" with its range intersected with "domain". |
4807 | * |
4808 | * Insert the appropriate expansion node on top of "tree" and |
4809 | * then plug in the result in all leaves of "node". |
4810 | */ |
4811 | __isl_give isl_schedule_node *isl_schedule_node_expand( |
4812 | __isl_take isl_schedule_node *node, |
4813 | __isl_take isl_union_pw_multi_aff *contraction, |
4814 | __isl_take isl_union_set *domain, |
4815 | __isl_take isl_schedule_tree *tree) |
4816 | { |
4817 | struct isl_schedule_expand_data data; |
4818 | isl_union_map *expansion; |
4819 | isl_union_pw_multi_aff *copy; |
4820 | |
4821 | if (!node || !contraction || !tree) |
4822 | node = isl_schedule_node_free(node); |
4823 | |
4824 | copy = isl_union_pw_multi_aff_copy(upma: contraction); |
4825 | expansion = isl_union_map_from_union_pw_multi_aff(upma: copy); |
4826 | expansion = isl_union_map_reverse(umap: expansion); |
4827 | expansion = isl_union_map_intersect_range(umap: expansion, uset: domain); |
4828 | data.domain = isl_union_map_domain(umap: isl_union_map_copy(umap: expansion)); |
4829 | |
4830 | tree = isl_schedule_tree_insert_expansion(tree, contraction, expansion); |
4831 | data.tree = tree; |
4832 | |
4833 | node = isl_schedule_node_map_descendant_bottom_up(node, fn: &expand, user: &data); |
4834 | isl_union_set_free(uset: data.domain); |
4835 | isl_schedule_tree_free(tree: data.tree); |
4836 | return node; |
4837 | } |
4838 | |
4839 | /* Return the position of the subtree containing "node" among the children |
4840 | * of "ancestor". "node" is assumed to be a descendant of "ancestor". |
4841 | * In particular, both nodes should point to the same schedule tree. |
4842 | * |
4843 | * Return isl_size_error on error. |
4844 | */ |
4845 | isl_size isl_schedule_node_get_ancestor_child_position( |
4846 | __isl_keep isl_schedule_node *node, |
4847 | __isl_keep isl_schedule_node *ancestor) |
4848 | { |
4849 | isl_size n1, n2; |
4850 | isl_schedule_tree *tree; |
4851 | |
4852 | n1 = isl_schedule_node_get_tree_depth(node: ancestor); |
4853 | n2 = isl_schedule_node_get_tree_depth(node); |
4854 | if (n1 < 0 || n2 < 0) |
4855 | return isl_size_error; |
4856 | |
4857 | if (node->schedule != ancestor->schedule) |
4858 | isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid, |
4859 | "not a descendant" , return isl_size_error); |
4860 | |
4861 | if (n1 >= n2) |
4862 | isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid, |
4863 | "not a descendant" , return isl_size_error); |
4864 | tree = isl_schedule_tree_list_get_schedule_tree(list: node->ancestors, index: n1); |
4865 | isl_schedule_tree_free(tree); |
4866 | if (tree != ancestor->tree) |
4867 | isl_die(isl_schedule_node_get_ctx(node), isl_error_invalid, |
4868 | "not a descendant" , return isl_size_error); |
4869 | |
4870 | return node->child_pos[n1]; |
4871 | } |
4872 | |
4873 | /* Given two nodes that point to the same schedule tree, return their |
4874 | * closest shared ancestor. |
4875 | * |
4876 | * Since the two nodes point to the same schedule, they share at least |
4877 | * one ancestor, the root of the schedule. We move down from the root |
4878 | * to the first ancestor where the respective children have a different |
4879 | * child position. This is the requested ancestor. |
4880 | * If there is no ancestor where the children have a different position, |
4881 | * then one node is an ancestor of the other and then this node is |
4882 | * the requested ancestor. |
4883 | */ |
4884 | __isl_give isl_schedule_node *isl_schedule_node_get_shared_ancestor( |
4885 | __isl_keep isl_schedule_node *node1, |
4886 | __isl_keep isl_schedule_node *node2) |
4887 | { |
4888 | int i; |
4889 | isl_size n1, n2; |
4890 | |
4891 | n1 = isl_schedule_node_get_tree_depth(node: node1); |
4892 | n2 = isl_schedule_node_get_tree_depth(node: node2); |
4893 | if (n1 < 0 || n2 < 0) |
4894 | return NULL; |
4895 | if (node1->schedule != node2->schedule) |
4896 | isl_die(isl_schedule_node_get_ctx(node1), isl_error_invalid, |
4897 | "not part of same schedule" , return NULL); |
4898 | if (n2 < n1) |
4899 | return isl_schedule_node_get_shared_ancestor(node1: node2, node2: node1); |
4900 | if (n1 == 0) |
4901 | return isl_schedule_node_copy(node: node1); |
4902 | if (isl_schedule_node_is_equal(node1, node2)) |
4903 | return isl_schedule_node_copy(node: node1); |
4904 | |
4905 | for (i = 0; i < n1; ++i) |
4906 | if (node1->child_pos[i] != node2->child_pos[i]) |
4907 | break; |
4908 | |
4909 | node1 = isl_schedule_node_copy(node: node1); |
4910 | return isl_schedule_node_ancestor(node: node1, generation: n1 - i); |
4911 | } |
4912 | |
4913 | /* Print "node" to "p". |
4914 | */ |
4915 | __isl_give isl_printer *isl_printer_print_schedule_node( |
4916 | __isl_take isl_printer *p, __isl_keep isl_schedule_node *node) |
4917 | { |
4918 | isl_size n; |
4919 | |
4920 | if (!node) |
4921 | return isl_printer_free(printer: p); |
4922 | n = isl_schedule_tree_list_n_schedule_tree(list: node->ancestors); |
4923 | if (n < 0) |
4924 | return isl_printer_free(printer: p); |
4925 | return isl_printer_print_schedule_tree_mark(p, tree: node->schedule->root, n_ancestor: n, |
4926 | child_pos: node->child_pos); |
4927 | } |
4928 | |
4929 | void isl_schedule_node_dump(__isl_keep isl_schedule_node *node) |
4930 | { |
4931 | isl_ctx *ctx; |
4932 | isl_printer *printer; |
4933 | |
4934 | if (!node) |
4935 | return; |
4936 | |
4937 | ctx = isl_schedule_node_get_ctx(node); |
4938 | printer = isl_printer_to_file(ctx, stderr); |
4939 | printer = isl_printer_set_yaml_style(p: printer, ISL_YAML_STYLE_BLOCK); |
4940 | printer = isl_printer_print_schedule_node(p: printer, node); |
4941 | |
4942 | isl_printer_free(printer); |
4943 | } |
4944 | |
4945 | /* Return a string representation of "node". |
4946 | * Print the schedule node in block format as it would otherwise |
4947 | * look identical to the entire schedule. |
4948 | */ |
4949 | __isl_give char *isl_schedule_node_to_str(__isl_keep isl_schedule_node *node) |
4950 | { |
4951 | isl_printer *printer; |
4952 | char *s; |
4953 | |
4954 | if (!node) |
4955 | return NULL; |
4956 | |
4957 | printer = isl_printer_to_str(ctx: isl_schedule_node_get_ctx(node)); |
4958 | printer = isl_printer_set_yaml_style(p: printer, ISL_YAML_STYLE_BLOCK); |
4959 | printer = isl_printer_print_schedule_node(p: printer, node); |
4960 | s = isl_printer_get_str(printer); |
4961 | isl_printer_free(printer); |
4962 | |
4963 | return s; |
4964 | } |
4965 | |