1 | /* Instruction scheduling pass. |
2 | Copyright (C) 1992-2023 Free Software Foundation, Inc. |
3 | Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by, |
4 | and currently maintained by, Jim Wilson (wilson@cygnus.com) |
5 | |
6 | This file is part of GCC. |
7 | |
8 | GCC is free software; you can redistribute it and/or modify it under |
9 | the terms of the GNU General Public License as published by the Free |
10 | Software Foundation; either version 3, or (at your option) any later |
11 | version. |
12 | |
13 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
14 | WARRANTY; without even the implied warranty of MERCHANTABILITY or |
15 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
16 | for more details. |
17 | |
18 | You should have received a copy of the GNU General Public License |
19 | along with GCC; see the file COPYING3. If not see |
20 | <http://www.gnu.org/licenses/>. */ |
21 | |
22 | /* This pass implements list scheduling within basic blocks. It is |
23 | run twice: (1) after flow analysis, but before register allocation, |
24 | and (2) after register allocation. |
25 | |
26 | The first run performs interblock scheduling, moving insns between |
27 | different blocks in the same "region", and the second runs only |
28 | basic block scheduling. |
29 | |
30 | Interblock motions performed are useful motions and speculative |
31 | motions, including speculative loads. Motions requiring code |
32 | duplication are not supported. The identification of motion type |
33 | and the check for validity of speculative motions requires |
34 | construction and analysis of the function's control flow graph. |
35 | |
36 | The main entry point for this pass is schedule_insns(), called for |
37 | each function. The work of the scheduler is organized in three |
38 | levels: (1) function level: insns are subject to splitting, |
39 | control-flow-graph is constructed, regions are computed (after |
40 | reload, each region is of one block), (2) region level: control |
41 | flow graph attributes required for interblock scheduling are |
42 | computed (dominators, reachability, etc.), data dependences and |
43 | priorities are computed, and (3) block level: insns in the block |
44 | are actually scheduled. */ |
45 | |
46 | #include "config.h" |
47 | #include "system.h" |
48 | #include "coretypes.h" |
49 | #include "backend.h" |
50 | #include "target.h" |
51 | #include "rtl.h" |
52 | #include "df.h" |
53 | #include "memmodel.h" |
54 | #include "tm_p.h" |
55 | #include "insn-config.h" |
56 | #include "emit-rtl.h" |
57 | #include "recog.h" |
58 | #include "profile.h" |
59 | #include "insn-attr.h" |
60 | #include "except.h" |
61 | #include "cfganal.h" |
62 | #include "sched-int.h" |
63 | #include "sel-sched.h" |
64 | #include "tree-pass.h" |
65 | #include "dbgcnt.h" |
66 | #include "pretty-print.h" |
67 | #include "print-rtl.h" |
68 | |
69 | /* Disable warnings about quoting issues in the pp_xxx calls below |
70 | that (intentionally) don't follow GCC diagnostic conventions. */ |
71 | #if __GNUC__ >= 10 |
72 | # pragma GCC diagnostic push |
73 | # pragma GCC diagnostic ignored "-Wformat-diag" |
74 | #endif |
75 | |
76 | #ifdef INSN_SCHEDULING |
77 | |
78 | /* Some accessor macros for h_i_d members only used within this file. */ |
79 | #define FED_BY_SPEC_LOAD(INSN) (HID (INSN)->fed_by_spec_load) |
80 | #define IS_LOAD_INSN(INSN) (HID (insn)->is_load_insn) |
81 | |
82 | /* nr_inter/spec counts interblock/speculative motion for the function. */ |
83 | static int nr_inter, nr_spec; |
84 | |
85 | static bool is_cfg_nonregular (void); |
86 | |
87 | /* Number of regions in the procedure. */ |
88 | int nr_regions = 0; |
89 | |
90 | /* Same as above before adding any new regions. */ |
91 | static int nr_regions_initial = 0; |
92 | |
93 | /* Table of region descriptions. */ |
94 | region *rgn_table = NULL; |
95 | |
96 | /* Array of lists of regions' blocks. */ |
97 | int *rgn_bb_table = NULL; |
98 | |
99 | /* Topological order of blocks in the region (if b2 is reachable from |
100 | b1, block_to_bb[b2] > block_to_bb[b1]). Note: A basic block is |
101 | always referred to by either block or b, while its topological |
102 | order name (in the region) is referred to by bb. */ |
103 | int *block_to_bb = NULL; |
104 | |
105 | /* The number of the region containing a block. */ |
106 | int *containing_rgn = NULL; |
107 | |
108 | /* ebb_head [i] - is index in rgn_bb_table of the head basic block of i'th ebb. |
109 | Currently we can get a ebb only through splitting of currently |
110 | scheduling block, therefore, we don't need ebb_head array for every region, |
111 | hence, its sufficient to hold it for current one only. */ |
112 | int *ebb_head = NULL; |
113 | |
114 | /* The minimum probability of reaching a source block so that it will be |
115 | considered for speculative scheduling. */ |
116 | static int min_spec_prob; |
117 | |
118 | static void find_single_block_region (bool); |
119 | static void find_rgns (void); |
120 | static bool too_large (int, int *, int *); |
121 | |
122 | /* Blocks of the current region being scheduled. */ |
123 | int current_nr_blocks; |
124 | int current_blocks; |
125 | |
126 | /* A speculative motion requires checking live information on the path |
127 | from 'source' to 'target'. The split blocks are those to be checked. |
128 | After a speculative motion, live information should be modified in |
129 | the 'update' blocks. |
130 | |
131 | Lists of split and update blocks for each candidate of the current |
132 | target are in array bblst_table. */ |
133 | static basic_block *bblst_table; |
134 | static int bblst_size, bblst_last; |
135 | |
136 | /* Arrays that hold the DFA state at the end of a basic block, to re-use |
137 | as the initial state at the start of successor blocks. The BB_STATE |
138 | array holds the actual DFA state, and BB_STATE_ARRAY[I] is a pointer |
139 | into BB_STATE for basic block I. FIXME: This should be a vec. */ |
140 | static char *bb_state_array = NULL; |
141 | static state_t *bb_state = NULL; |
142 | |
143 | /* Target info declarations. |
144 | |
145 | The block currently being scheduled is referred to as the "target" block, |
146 | while other blocks in the region from which insns can be moved to the |
147 | target are called "source" blocks. The candidate structure holds info |
148 | about such sources: are they valid? Speculative? Etc. */ |
149 | struct bblst |
150 | { |
151 | basic_block *first_member; |
152 | int nr_members; |
153 | }; |
154 | |
155 | struct candidate |
156 | { |
157 | char is_valid; |
158 | char is_speculative; |
159 | int src_prob; |
160 | bblst split_bbs; |
161 | bblst update_bbs; |
162 | }; |
163 | |
164 | static candidate *candidate_table; |
165 | #define IS_VALID(src) (candidate_table[src].is_valid) |
166 | #define IS_SPECULATIVE(src) (candidate_table[src].is_speculative) |
167 | #define IS_SPECULATIVE_INSN(INSN) \ |
168 | (IS_SPECULATIVE (BLOCK_TO_BB (BLOCK_NUM (INSN)))) |
169 | #define SRC_PROB(src) ( candidate_table[src].src_prob ) |
170 | |
171 | /* The bb being currently scheduled. */ |
172 | int target_bb; |
173 | |
174 | /* List of edges. */ |
175 | struct edgelst |
176 | { |
177 | edge *first_member; |
178 | int nr_members; |
179 | }; |
180 | |
181 | static edge *edgelst_table; |
182 | static int edgelst_last; |
183 | |
184 | static void extract_edgelst (sbitmap, edgelst *); |
185 | |
186 | /* Target info functions. */ |
187 | static void split_edges (int, int, edgelst *); |
188 | static void compute_trg_info (int); |
189 | void debug_candidate (int); |
190 | void debug_candidates (int); |
191 | |
192 | /* Dominators array: dom[i] contains the sbitmap of dominators of |
193 | bb i in the region. */ |
194 | static sbitmap *dom; |
195 | |
196 | /* bb 0 is the only region entry. */ |
197 | #define IS_RGN_ENTRY(bb) (!bb) |
198 | |
199 | /* Is bb_src dominated by bb_trg. */ |
200 | #define IS_DOMINATED(bb_src, bb_trg) \ |
201 | ( bitmap_bit_p (dom[bb_src], bb_trg) ) |
202 | |
203 | /* Probability: Prob[i] is an int in [0, REG_BR_PROB_BASE] which is |
204 | the probability of bb i relative to the region entry. */ |
205 | static int *prob; |
206 | |
207 | /* Bit-set of edges, where bit i stands for edge i. */ |
208 | typedef sbitmap edgeset; |
209 | |
210 | /* Number of edges in the region. */ |
211 | static int rgn_nr_edges; |
212 | |
213 | /* Array of size rgn_nr_edges. */ |
214 | static edge *rgn_edges; |
215 | |
216 | /* Mapping from each edge in the graph to its number in the rgn. */ |
217 | #define EDGE_TO_BIT(edge) ((int)(size_t)(edge)->aux) |
218 | #define SET_EDGE_TO_BIT(edge,nr) ((edge)->aux = (void *)(size_t)(nr)) |
219 | |
220 | /* The split edges of a source bb is different for each target |
221 | bb. In order to compute this efficiently, the 'potential-split edges' |
222 | are computed for each bb prior to scheduling a region. This is actually |
223 | the split edges of each bb relative to the region entry. |
224 | |
225 | pot_split[bb] is the set of potential split edges of bb. */ |
226 | static edgeset *pot_split; |
227 | |
228 | /* For every bb, a set of its ancestor edges. */ |
229 | static edgeset *ancestor_edges; |
230 | |
231 | #define INSN_PROBABILITY(INSN) (SRC_PROB (BLOCK_TO_BB (BLOCK_NUM (INSN)))) |
232 | |
233 | /* Speculative scheduling functions. */ |
234 | static bool check_live_1 (int, rtx); |
235 | static void update_live_1 (int, rtx); |
236 | static bool is_pfree (rtx, int, int); |
237 | static bool find_conditional_protection (rtx_insn *, int); |
238 | static bool is_conditionally_protected (rtx, int, int); |
239 | static bool is_prisky (rtx, int, int); |
240 | static bool is_exception_free (rtx_insn *, int, int); |
241 | |
242 | static bool sets_likely_spilled (rtx); |
243 | static void sets_likely_spilled_1 (rtx, const_rtx, void *); |
244 | static void add_branch_dependences (rtx_insn *, rtx_insn *); |
245 | static void compute_block_dependences (int); |
246 | |
247 | static void schedule_region (int); |
248 | static void concat_insn_mem_list (rtx_insn_list *, rtx_expr_list *, |
249 | rtx_insn_list **, rtx_expr_list **); |
250 | static void propagate_deps (int, class deps_desc *); |
251 | static void free_pending_lists (void); |
252 | |
253 | /* Functions for construction of the control flow graph. */ |
254 | |
255 | /* Return true if control flow graph should not be constructed, |
256 | false otherwise. |
257 | |
258 | We decide not to build the control flow graph if there is possibly more |
259 | than one entry to the function, if computed branches exist, if we |
260 | have nonlocal gotos, or if we have an unreachable loop. */ |
261 | |
262 | static bool |
263 | is_cfg_nonregular (void) |
264 | { |
265 | basic_block b; |
266 | rtx_insn *insn; |
267 | |
268 | /* If we have a label that could be the target of a nonlocal goto, then |
269 | the cfg is not well structured. */ |
270 | if (nonlocal_goto_handler_labels) |
271 | return true; |
272 | |
273 | /* If we have any forced labels, then the cfg is not well structured. */ |
274 | if (forced_labels) |
275 | return true; |
276 | |
277 | /* If we have exception handlers, then we consider the cfg not well |
278 | structured. ?!? We should be able to handle this now that we |
279 | compute an accurate cfg for EH. */ |
280 | if (current_function_has_exception_handlers ()) |
281 | return true; |
282 | |
283 | /* If we have insns which refer to labels as non-jumped-to operands, |
284 | then we consider the cfg not well structured. */ |
285 | FOR_EACH_BB_FN (b, cfun) |
286 | FOR_BB_INSNS (b, insn) |
287 | { |
288 | rtx note, set, dest; |
289 | rtx_insn *next; |
290 | |
291 | /* If this function has a computed jump, then we consider the cfg |
292 | not well structured. */ |
293 | if (JUMP_P (insn) && computed_jump_p (insn)) |
294 | return true; |
295 | |
296 | if (!INSN_P (insn)) |
297 | continue; |
298 | |
299 | note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX); |
300 | if (note == NULL_RTX) |
301 | continue; |
302 | |
303 | /* For that label not to be seen as a referred-to label, this |
304 | must be a single-set which is feeding a jump *only*. This |
305 | could be a conditional jump with the label split off for |
306 | machine-specific reasons or a casesi/tablejump. */ |
307 | next = next_nonnote_insn (insn); |
308 | if (next == NULL_RTX |
309 | || !JUMP_P (next) |
310 | || (JUMP_LABEL (next) != XEXP (note, 0) |
311 | && find_reg_note (next, REG_LABEL_TARGET, |
312 | XEXP (note, 0)) == NULL_RTX) |
313 | || BLOCK_FOR_INSN (insn) != BLOCK_FOR_INSN (insn: next)) |
314 | return true; |
315 | |
316 | set = single_set (insn); |
317 | if (set == NULL_RTX) |
318 | return true; |
319 | |
320 | dest = SET_DEST (set); |
321 | if (!REG_P (dest) || !dead_or_set_p (next, dest)) |
322 | return true; |
323 | } |
324 | |
325 | /* Unreachable loops with more than one basic block are detected |
326 | during the DFS traversal in find_rgns. |
327 | |
328 | Unreachable loops with a single block are detected here. This |
329 | test is redundant with the one in find_rgns, but it's much |
330 | cheaper to go ahead and catch the trivial case here. */ |
331 | FOR_EACH_BB_FN (b, cfun) |
332 | { |
333 | if (EDGE_COUNT (b->preds) == 0 |
334 | || (single_pred_p (bb: b) |
335 | && single_pred (bb: b) == b)) |
336 | return true; |
337 | } |
338 | |
339 | /* All the tests passed. Consider the cfg well structured. */ |
340 | return false; |
341 | } |
342 | |
343 | /* Extract list of edges from a bitmap containing EDGE_TO_BIT bits. */ |
344 | |
345 | static void |
346 | (sbitmap set, edgelst *el) |
347 | { |
348 | unsigned int i = 0; |
349 | sbitmap_iterator sbi; |
350 | |
351 | /* edgelst table space is reused in each call to extract_edgelst. */ |
352 | edgelst_last = 0; |
353 | |
354 | el->first_member = &edgelst_table[edgelst_last]; |
355 | el->nr_members = 0; |
356 | |
357 | /* Iterate over each word in the bitset. */ |
358 | EXECUTE_IF_SET_IN_BITMAP (set, 0, i, sbi) |
359 | { |
360 | edgelst_table[edgelst_last++] = rgn_edges[i]; |
361 | el->nr_members++; |
362 | } |
363 | } |
364 | |
365 | /* Functions for the construction of regions. */ |
366 | |
367 | /* Print the regions, for debugging purposes. Callable from debugger. */ |
368 | |
369 | DEBUG_FUNCTION void |
370 | debug_regions (void) |
371 | { |
372 | int rgn, bb; |
373 | |
374 | fprintf (stream: sched_dump, format: "\n;; ------------ REGIONS ----------\n\n" ); |
375 | for (rgn = 0; rgn < nr_regions; rgn++) |
376 | { |
377 | fprintf (stream: sched_dump, format: ";;\trgn %d nr_blocks %d:\n" , rgn, |
378 | rgn_table[rgn].rgn_nr_blocks); |
379 | fprintf (stream: sched_dump, format: ";;\tbb/block: " ); |
380 | |
381 | /* We don't have ebb_head initialized yet, so we can't use |
382 | BB_TO_BLOCK (). */ |
383 | current_blocks = RGN_BLOCKS (rgn); |
384 | |
385 | for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++) |
386 | fprintf (stream: sched_dump, format: " %d/%d " , bb, rgn_bb_table[current_blocks + bb]); |
387 | |
388 | fprintf (stream: sched_dump, format: "\n\n" ); |
389 | } |
390 | } |
391 | |
392 | /* Print the region's basic blocks. */ |
393 | |
394 | DEBUG_FUNCTION void |
395 | debug_region (int rgn) |
396 | { |
397 | int bb; |
398 | |
399 | fprintf (stderr, format: "\n;; ------------ REGION %d ----------\n\n" , rgn); |
400 | fprintf (stderr, format: ";;\trgn %d nr_blocks %d:\n" , rgn, |
401 | rgn_table[rgn].rgn_nr_blocks); |
402 | fprintf (stderr, format: ";;\tbb/block: " ); |
403 | |
404 | /* We don't have ebb_head initialized yet, so we can't use |
405 | BB_TO_BLOCK (). */ |
406 | current_blocks = RGN_BLOCKS (rgn); |
407 | |
408 | for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++) |
409 | fprintf (stderr, format: " %d/%d " , bb, rgn_bb_table[current_blocks + bb]); |
410 | |
411 | fprintf (stderr, format: "\n\n" ); |
412 | |
413 | for (bb = 0; bb < rgn_table[rgn].rgn_nr_blocks; bb++) |
414 | { |
415 | dump_bb (stderr, |
416 | BASIC_BLOCK_FOR_FN (cfun, rgn_bb_table[current_blocks + bb]), |
417 | 0, TDF_SLIM | TDF_BLOCKS); |
418 | fprintf (stderr, format: "\n" ); |
419 | } |
420 | |
421 | fprintf (stderr, format: "\n" ); |
422 | |
423 | } |
424 | |
425 | /* True when a bb with index BB_INDEX contained in region RGN. */ |
426 | static bool |
427 | bb_in_region_p (int bb_index, int rgn) |
428 | { |
429 | int i; |
430 | |
431 | for (i = 0; i < rgn_table[rgn].rgn_nr_blocks; i++) |
432 | if (rgn_bb_table[current_blocks + i] == bb_index) |
433 | return true; |
434 | |
435 | return false; |
436 | } |
437 | |
438 | /* Dump region RGN to file F using dot syntax. */ |
439 | void |
440 | dump_region_dot (FILE *f, int rgn) |
441 | { |
442 | int i; |
443 | |
444 | fprintf (stream: f, format: "digraph Region_%d {\n" , rgn); |
445 | |
446 | /* We don't have ebb_head initialized yet, so we can't use |
447 | BB_TO_BLOCK (). */ |
448 | current_blocks = RGN_BLOCKS (rgn); |
449 | |
450 | for (i = 0; i < rgn_table[rgn].rgn_nr_blocks; i++) |
451 | { |
452 | edge e; |
453 | edge_iterator ei; |
454 | int src_bb_num = rgn_bb_table[current_blocks + i]; |
455 | basic_block bb = BASIC_BLOCK_FOR_FN (cfun, src_bb_num); |
456 | |
457 | FOR_EACH_EDGE (e, ei, bb->succs) |
458 | if (bb_in_region_p (bb_index: e->dest->index, rgn)) |
459 | fprintf (stream: f, format: "\t%d -> %d\n" , src_bb_num, e->dest->index); |
460 | } |
461 | fprintf (stream: f, format: "}\n" ); |
462 | } |
463 | |
464 | /* The same, but first open a file specified by FNAME. */ |
465 | void |
466 | dump_region_dot_file (const char *fname, int rgn) |
467 | { |
468 | FILE *f = fopen (filename: fname, modes: "wt" ); |
469 | dump_region_dot (f, rgn); |
470 | fclose (stream: f); |
471 | } |
472 | |
473 | /* Build a single block region for each basic block in the function. |
474 | This allows for using the same code for interblock and basic block |
475 | scheduling. */ |
476 | |
477 | static void |
478 | find_single_block_region (bool ebbs_p) |
479 | { |
480 | basic_block bb, ebb_start; |
481 | int i = 0; |
482 | |
483 | nr_regions = 0; |
484 | |
485 | if (ebbs_p) { |
486 | int probability_cutoff; |
487 | if (profile_info && profile_status_for_fn (cfun) == PROFILE_READ) |
488 | probability_cutoff = param_tracer_min_branch_probability_feedback; |
489 | else |
490 | probability_cutoff = param_tracer_min_branch_probability; |
491 | probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff; |
492 | |
493 | FOR_EACH_BB_FN (ebb_start, cfun) |
494 | { |
495 | RGN_NR_BLOCKS (nr_regions) = 0; |
496 | RGN_BLOCKS (nr_regions) = i; |
497 | RGN_DONT_CALC_DEPS (nr_regions) = 0; |
498 | RGN_HAS_REAL_EBB (nr_regions) = 0; |
499 | |
500 | for (bb = ebb_start; ; bb = bb->next_bb) |
501 | { |
502 | edge e; |
503 | |
504 | rgn_bb_table[i] = bb->index; |
505 | RGN_NR_BLOCKS (nr_regions)++; |
506 | CONTAINING_RGN (bb->index) = nr_regions; |
507 | BLOCK_TO_BB (bb->index) = i - RGN_BLOCKS (nr_regions); |
508 | i++; |
509 | |
510 | if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun) |
511 | || LABEL_P (BB_HEAD (bb->next_bb))) |
512 | break; |
513 | |
514 | e = find_fallthru_edge (edges: bb->succs); |
515 | if (! e) |
516 | break; |
517 | if (e->probability.initialized_p () |
518 | && e->probability.to_reg_br_prob_base () <= probability_cutoff) |
519 | break; |
520 | } |
521 | |
522 | ebb_start = bb; |
523 | nr_regions++; |
524 | } |
525 | } |
526 | else |
527 | FOR_EACH_BB_FN (bb, cfun) |
528 | { |
529 | rgn_bb_table[nr_regions] = bb->index; |
530 | RGN_NR_BLOCKS (nr_regions) = 1; |
531 | RGN_BLOCKS (nr_regions) = nr_regions; |
532 | RGN_DONT_CALC_DEPS (nr_regions) = 0; |
533 | RGN_HAS_REAL_EBB (nr_regions) = 0; |
534 | |
535 | CONTAINING_RGN (bb->index) = nr_regions; |
536 | BLOCK_TO_BB (bb->index) = 0; |
537 | nr_regions++; |
538 | } |
539 | } |
540 | |
541 | /* Estimate number of the insns in the BB. */ |
542 | static int |
543 | rgn_estimate_number_of_insns (basic_block bb) |
544 | { |
545 | int count; |
546 | |
547 | count = INSN_LUID (BB_END (bb)) - INSN_LUID (BB_HEAD (bb)); |
548 | |
549 | if (MAY_HAVE_DEBUG_INSNS) |
550 | { |
551 | rtx_insn *insn; |
552 | |
553 | FOR_BB_INSNS (bb, insn) |
554 | if (DEBUG_INSN_P (insn)) |
555 | count--; |
556 | } |
557 | |
558 | return count; |
559 | } |
560 | |
561 | /* Update number of blocks and the estimate for number of insns |
562 | in the region. Return true if the region is "too large" for interblock |
563 | scheduling (compile time considerations). */ |
564 | |
565 | static bool |
566 | too_large (int block, int *num_bbs, int *num_insns) |
567 | { |
568 | (*num_bbs)++; |
569 | (*num_insns) += (common_sched_info->estimate_number_of_insns |
570 | (BASIC_BLOCK_FOR_FN (cfun, block))); |
571 | |
572 | return ((*num_bbs > param_max_sched_region_blocks) |
573 | || (*num_insns > param_max_sched_region_insns)); |
574 | } |
575 | |
576 | /* Update_loop_relations(blk, hdr): Check if the loop headed by max_hdr[blk] |
577 | is still an inner loop. Put in max_hdr[blk] the header of the most inner |
578 | loop containing blk. */ |
579 | #define UPDATE_LOOP_RELATIONS(blk, hdr) \ |
580 | { \ |
581 | if (max_hdr[blk] == -1) \ |
582 | max_hdr[blk] = hdr; \ |
583 | else if (dfs_nr[max_hdr[blk]] > dfs_nr[hdr]) \ |
584 | bitmap_clear_bit (inner, hdr); \ |
585 | else if (dfs_nr[max_hdr[blk]] < dfs_nr[hdr]) \ |
586 | { \ |
587 | bitmap_clear_bit (inner,max_hdr[blk]); \ |
588 | max_hdr[blk] = hdr; \ |
589 | } \ |
590 | } |
591 | |
592 | /* Find regions for interblock scheduling. |
593 | |
594 | A region for scheduling can be: |
595 | |
596 | * A loop-free procedure, or |
597 | |
598 | * A reducible inner loop, or |
599 | |
600 | * A basic block not contained in any other region. |
601 | |
602 | ?!? In theory we could build other regions based on extended basic |
603 | blocks or reverse extended basic blocks. Is it worth the trouble? |
604 | |
605 | Loop blocks that form a region are put into the region's block list |
606 | in topological order. |
607 | |
608 | This procedure stores its results into the following global (ick) variables |
609 | |
610 | * rgn_nr |
611 | * rgn_table |
612 | * rgn_bb_table |
613 | * block_to_bb |
614 | * containing region |
615 | |
616 | We use dominator relationships to avoid making regions out of non-reducible |
617 | loops. |
618 | |
619 | This procedure needs to be converted to work on pred/succ lists instead |
620 | of edge tables. That would simplify it somewhat. */ |
621 | |
622 | static void |
623 | haifa_find_rgns (void) |
624 | { |
625 | int *max_hdr, *dfs_nr, *degree; |
626 | char no_loops = 1; |
627 | int node, child, loop_head, i, head, tail; |
628 | int count = 0, sp, idx = 0; |
629 | edge_iterator current_edge; |
630 | edge_iterator *stack; |
631 | int num_bbs, num_insns; |
632 | bool unreachable; |
633 | bool too_large_failure; |
634 | basic_block bb; |
635 | |
636 | /* Perform a DFS traversal of the cfg. Identify loop headers, inner loops |
637 | and a mapping from block to its loop header (if the block is contained |
638 | in a loop, else -1). |
639 | |
640 | Store results in HEADER, INNER, and MAX_HDR respectively, these will |
641 | be used as inputs to the second traversal. |
642 | |
643 | STACK, SP and DFS_NR are only used during the first traversal. */ |
644 | |
645 | /* Allocate and initialize variables for the first traversal. */ |
646 | max_hdr = XNEWVEC (int, last_basic_block_for_fn (cfun)); |
647 | dfs_nr = XCNEWVEC (int, last_basic_block_for_fn (cfun)); |
648 | stack = XNEWVEC (edge_iterator, n_edges_for_fn (cfun)); |
649 | |
650 | /* Note if a block is a natural inner loop header. */ |
651 | auto_sbitmap inner (last_basic_block_for_fn (cfun)); |
652 | bitmap_ones (inner); |
653 | |
654 | /* Note if a block is a natural loop header. */ |
655 | auto_sbitmap (last_basic_block_for_fn (cfun)); |
656 | bitmap_clear (header); |
657 | |
658 | /* Note if a block is in the block queue. */ |
659 | auto_sbitmap in_queue (last_basic_block_for_fn (cfun)); |
660 | bitmap_clear (in_queue); |
661 | |
662 | /* Note if a block is in the block queue. */ |
663 | auto_sbitmap in_stack (last_basic_block_for_fn (cfun)); |
664 | bitmap_clear (in_stack); |
665 | |
666 | for (i = 0; i < last_basic_block_for_fn (cfun); i++) |
667 | max_hdr[i] = -1; |
668 | |
669 | #define EDGE_PASSED(E) (ei_end_p ((E)) || ei_edge ((E))->aux) |
670 | #define SET_EDGE_PASSED(E) (ei_edge ((E))->aux = ei_edge ((E))) |
671 | |
672 | /* DFS traversal to find inner loops in the cfg. */ |
673 | |
674 | current_edge = ei_start (single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))->succs); |
675 | sp = -1; |
676 | |
677 | while (1) |
678 | { |
679 | if (EDGE_PASSED (current_edge)) |
680 | { |
681 | /* We have reached a leaf node or a node that was already |
682 | processed. Pop edges off the stack until we find |
683 | an edge that has not yet been processed. */ |
684 | while (sp >= 0 && EDGE_PASSED (current_edge)) |
685 | { |
686 | /* Pop entry off the stack. */ |
687 | current_edge = stack[sp--]; |
688 | node = ei_edge (i: current_edge)->src->index; |
689 | gcc_assert (node != ENTRY_BLOCK); |
690 | child = ei_edge (i: current_edge)->dest->index; |
691 | gcc_assert (child != EXIT_BLOCK); |
692 | bitmap_clear_bit (map: in_stack, bitno: child); |
693 | if (max_hdr[child] >= 0 && bitmap_bit_p (map: in_stack, bitno: max_hdr[child])) |
694 | UPDATE_LOOP_RELATIONS (node, max_hdr[child]); |
695 | ei_next (i: ¤t_edge); |
696 | } |
697 | |
698 | /* See if have finished the DFS tree traversal. */ |
699 | if (sp < 0 && EDGE_PASSED (current_edge)) |
700 | break; |
701 | |
702 | /* Nope, continue the traversal with the popped node. */ |
703 | continue; |
704 | } |
705 | |
706 | /* Process a node. */ |
707 | node = ei_edge (i: current_edge)->src->index; |
708 | gcc_assert (node != ENTRY_BLOCK); |
709 | bitmap_set_bit (map: in_stack, bitno: node); |
710 | dfs_nr[node] = ++count; |
711 | |
712 | /* We don't traverse to the exit block. */ |
713 | child = ei_edge (i: current_edge)->dest->index; |
714 | if (child == EXIT_BLOCK) |
715 | { |
716 | SET_EDGE_PASSED (current_edge); |
717 | ei_next (i: ¤t_edge); |
718 | continue; |
719 | } |
720 | |
721 | /* If the successor is in the stack, then we've found a loop. |
722 | Mark the loop, if it is not a natural loop, then it will |
723 | be rejected during the second traversal. */ |
724 | if (bitmap_bit_p (map: in_stack, bitno: child)) |
725 | { |
726 | no_loops = 0; |
727 | bitmap_set_bit (map: header, bitno: child); |
728 | UPDATE_LOOP_RELATIONS (node, child); |
729 | SET_EDGE_PASSED (current_edge); |
730 | ei_next (i: ¤t_edge); |
731 | continue; |
732 | } |
733 | |
734 | /* If the child was already visited, then there is no need to visit |
735 | it again. Just update the loop relationships and restart |
736 | with a new edge. */ |
737 | if (dfs_nr[child]) |
738 | { |
739 | if (max_hdr[child] >= 0 && bitmap_bit_p (map: in_stack, bitno: max_hdr[child])) |
740 | UPDATE_LOOP_RELATIONS (node, max_hdr[child]); |
741 | SET_EDGE_PASSED (current_edge); |
742 | ei_next (i: ¤t_edge); |
743 | continue; |
744 | } |
745 | |
746 | /* Push an entry on the stack and continue DFS traversal. */ |
747 | stack[++sp] = current_edge; |
748 | SET_EDGE_PASSED (current_edge); |
749 | current_edge = ei_start (ei_edge (current_edge)->dest->succs); |
750 | } |
751 | |
752 | /* Reset ->aux field used by EDGE_PASSED. */ |
753 | FOR_ALL_BB_FN (bb, cfun) |
754 | { |
755 | edge_iterator ei; |
756 | edge e; |
757 | FOR_EACH_EDGE (e, ei, bb->succs) |
758 | e->aux = NULL; |
759 | } |
760 | |
761 | |
762 | /* Another check for unreachable blocks. The earlier test in |
763 | is_cfg_nonregular only finds unreachable blocks that do not |
764 | form a loop. |
765 | |
766 | The DFS traversal will mark every block that is reachable from |
767 | the entry node by placing a nonzero value in dfs_nr. Thus if |
768 | dfs_nr is zero for any block, then it must be unreachable. */ |
769 | unreachable = false; |
770 | FOR_EACH_BB_FN (bb, cfun) |
771 | if (dfs_nr[bb->index] == 0) |
772 | { |
773 | unreachable = true; |
774 | break; |
775 | } |
776 | |
777 | /* Gross. To avoid wasting memory, the second pass uses the dfs_nr array |
778 | to hold degree counts. */ |
779 | degree = dfs_nr; |
780 | |
781 | FOR_EACH_BB_FN (bb, cfun) |
782 | degree[bb->index] = EDGE_COUNT (bb->preds); |
783 | |
784 | /* Do not perform region scheduling if there are any unreachable |
785 | blocks. */ |
786 | if (!unreachable) |
787 | { |
788 | int *queue, *degree1 = NULL; |
789 | /* We use EXTENDED_RGN_HEADER as an addition to HEADER and put |
790 | there basic blocks, which are forced to be region heads. |
791 | This is done to try to assemble few smaller regions |
792 | from a too_large region. */ |
793 | sbitmap = NULL; |
794 | bool extend_regions_p; |
795 | |
796 | if (no_loops) |
797 | bitmap_set_bit (map: header, bitno: 0); |
798 | |
799 | /* Second traversal:find reducible inner loops and topologically sort |
800 | block of each region. */ |
801 | |
802 | queue = XNEWVEC (int, n_basic_blocks_for_fn (cfun)); |
803 | |
804 | extend_regions_p = param_max_sched_extend_regions_iters > 0; |
805 | if (extend_regions_p) |
806 | { |
807 | degree1 = XNEWVEC (int, last_basic_block_for_fn (cfun)); |
808 | extended_rgn_header = |
809 | sbitmap_alloc (last_basic_block_for_fn (cfun)); |
810 | bitmap_clear (extended_rgn_header); |
811 | } |
812 | |
813 | /* Find blocks which are inner loop headers. We still have non-reducible |
814 | loops to consider at this point. */ |
815 | FOR_EACH_BB_FN (bb, cfun) |
816 | { |
817 | if (bitmap_bit_p (map: header, bitno: bb->index) && bitmap_bit_p (map: inner, bitno: bb->index)) |
818 | { |
819 | edge e; |
820 | edge_iterator ei; |
821 | basic_block jbb; |
822 | |
823 | /* Now check that the loop is reducible. We do this separate |
824 | from finding inner loops so that we do not find a reducible |
825 | loop which contains an inner non-reducible loop. |
826 | |
827 | A simple way to find reducible/natural loops is to verify |
828 | that each block in the loop is dominated by the loop |
829 | header. |
830 | |
831 | If there exists a block that is not dominated by the loop |
832 | header, then the block is reachable from outside the loop |
833 | and thus the loop is not a natural loop. */ |
834 | FOR_EACH_BB_FN (jbb, cfun) |
835 | { |
836 | /* First identify blocks in the loop, except for the loop |
837 | entry block. */ |
838 | if (bb->index == max_hdr[jbb->index] && bb != jbb) |
839 | { |
840 | /* Now verify that the block is dominated by the loop |
841 | header. */ |
842 | if (!dominated_by_p (CDI_DOMINATORS, jbb, bb)) |
843 | break; |
844 | } |
845 | } |
846 | |
847 | /* If we exited the loop early, then I is the header of |
848 | a non-reducible loop and we should quit processing it |
849 | now. */ |
850 | if (jbb != EXIT_BLOCK_PTR_FOR_FN (cfun)) |
851 | continue; |
852 | |
853 | /* I is a header of an inner loop, or block 0 in a subroutine |
854 | with no loops at all. */ |
855 | head = tail = -1; |
856 | too_large_failure = false; |
857 | loop_head = max_hdr[bb->index]; |
858 | |
859 | if (extend_regions_p) |
860 | /* We save degree in case when we meet a too_large region |
861 | and cancel it. We need a correct degree later when |
862 | calling extend_rgns. */ |
863 | memcpy (dest: degree1, src: degree, |
864 | last_basic_block_for_fn (cfun) * sizeof (int)); |
865 | |
866 | /* Decrease degree of all I's successors for topological |
867 | ordering. */ |
868 | FOR_EACH_EDGE (e, ei, bb->succs) |
869 | if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)) |
870 | --degree[e->dest->index]; |
871 | |
872 | /* Estimate # insns, and count # blocks in the region. */ |
873 | num_bbs = 1; |
874 | num_insns = common_sched_info->estimate_number_of_insns (bb); |
875 | |
876 | /* Find all loop latches (blocks with back edges to the loop |
877 | header) or all the leaf blocks in the cfg has no loops. |
878 | |
879 | Place those blocks into the queue. */ |
880 | if (no_loops) |
881 | { |
882 | FOR_EACH_BB_FN (jbb, cfun) |
883 | /* Leaf nodes have only a single successor which must |
884 | be EXIT_BLOCK. */ |
885 | if (single_succ_p (bb: jbb) |
886 | && single_succ (bb: jbb) == EXIT_BLOCK_PTR_FOR_FN (cfun)) |
887 | { |
888 | queue[++tail] = jbb->index; |
889 | bitmap_set_bit (map: in_queue, bitno: jbb->index); |
890 | |
891 | if (too_large (block: jbb->index, num_bbs: &num_bbs, num_insns: &num_insns)) |
892 | { |
893 | too_large_failure = true; |
894 | break; |
895 | } |
896 | } |
897 | } |
898 | else |
899 | { |
900 | edge e; |
901 | |
902 | FOR_EACH_EDGE (e, ei, bb->preds) |
903 | { |
904 | if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)) |
905 | continue; |
906 | |
907 | node = e->src->index; |
908 | |
909 | if (max_hdr[node] == loop_head && node != bb->index) |
910 | { |
911 | /* This is a loop latch. */ |
912 | queue[++tail] = node; |
913 | bitmap_set_bit (map: in_queue, bitno: node); |
914 | |
915 | if (too_large (block: node, num_bbs: &num_bbs, num_insns: &num_insns)) |
916 | { |
917 | too_large_failure = true; |
918 | break; |
919 | } |
920 | } |
921 | } |
922 | } |
923 | |
924 | /* Now add all the blocks in the loop to the queue. |
925 | |
926 | We know the loop is a natural loop; however the algorithm |
927 | above will not always mark certain blocks as being in the |
928 | loop. Consider: |
929 | node children |
930 | a b,c |
931 | b c |
932 | c a,d |
933 | d b |
934 | |
935 | The algorithm in the DFS traversal may not mark B & D as part |
936 | of the loop (i.e. they will not have max_hdr set to A). |
937 | |
938 | We know they cannot be loop latches (else they would have |
939 | had max_hdr set since they'd have a backedge to a dominator |
940 | block). So we don't need them on the initial queue. |
941 | |
942 | We know they are part of the loop because they are dominated |
943 | by the loop header and can be reached by a backwards walk of |
944 | the edges starting with nodes on the initial queue. |
945 | |
946 | It is safe and desirable to include those nodes in the |
947 | loop/scheduling region. To do so we would need to decrease |
948 | the degree of a node if it is the target of a backedge |
949 | within the loop itself as the node is placed in the queue. |
950 | |
951 | We do not do this because I'm not sure that the actual |
952 | scheduling code will properly handle this case. ?!? */ |
953 | |
954 | while (head < tail && !too_large_failure) |
955 | { |
956 | edge e; |
957 | child = queue[++head]; |
958 | |
959 | FOR_EACH_EDGE (e, ei, |
960 | BASIC_BLOCK_FOR_FN (cfun, child)->preds) |
961 | { |
962 | node = e->src->index; |
963 | |
964 | /* See discussion above about nodes not marked as in |
965 | this loop during the initial DFS traversal. */ |
966 | if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) |
967 | || max_hdr[node] != loop_head) |
968 | { |
969 | tail = -1; |
970 | break; |
971 | } |
972 | else if (!bitmap_bit_p (map: in_queue, bitno: node) && node != bb->index) |
973 | { |
974 | queue[++tail] = node; |
975 | bitmap_set_bit (map: in_queue, bitno: node); |
976 | |
977 | if (too_large (block: node, num_bbs: &num_bbs, num_insns: &num_insns)) |
978 | { |
979 | too_large_failure = true; |
980 | break; |
981 | } |
982 | } |
983 | } |
984 | } |
985 | |
986 | if (tail >= 0 && !too_large_failure) |
987 | { |
988 | /* Place the loop header into list of region blocks. */ |
989 | degree[bb->index] = -1; |
990 | rgn_bb_table[idx] = bb->index; |
991 | RGN_NR_BLOCKS (nr_regions) = num_bbs; |
992 | RGN_BLOCKS (nr_regions) = idx++; |
993 | RGN_DONT_CALC_DEPS (nr_regions) = 0; |
994 | RGN_HAS_REAL_EBB (nr_regions) = 0; |
995 | CONTAINING_RGN (bb->index) = nr_regions; |
996 | BLOCK_TO_BB (bb->index) = count = 0; |
997 | |
998 | /* Remove blocks from queue[] when their in degree |
999 | becomes zero. Repeat until no blocks are left on the |
1000 | list. This produces a topological list of blocks in |
1001 | the region. */ |
1002 | while (tail >= 0) |
1003 | { |
1004 | if (head < 0) |
1005 | head = tail; |
1006 | child = queue[head]; |
1007 | if (degree[child] == 0) |
1008 | { |
1009 | edge e; |
1010 | |
1011 | degree[child] = -1; |
1012 | rgn_bb_table[idx++] = child; |
1013 | BLOCK_TO_BB (child) = ++count; |
1014 | CONTAINING_RGN (child) = nr_regions; |
1015 | queue[head] = queue[tail--]; |
1016 | |
1017 | FOR_EACH_EDGE (e, ei, |
1018 | BASIC_BLOCK_FOR_FN (cfun, |
1019 | child)->succs) |
1020 | if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)) |
1021 | --degree[e->dest->index]; |
1022 | } |
1023 | else |
1024 | --head; |
1025 | } |
1026 | ++nr_regions; |
1027 | } |
1028 | else if (extend_regions_p) |
1029 | { |
1030 | /* Restore DEGREE. */ |
1031 | int *t = degree; |
1032 | |
1033 | degree = degree1; |
1034 | degree1 = t; |
1035 | |
1036 | /* And force successors of BB to be region heads. |
1037 | This may provide several smaller regions instead |
1038 | of one too_large region. */ |
1039 | FOR_EACH_EDGE (e, ei, bb->succs) |
1040 | if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)) |
1041 | bitmap_set_bit (map: extended_rgn_header, bitno: e->dest->index); |
1042 | } |
1043 | } |
1044 | } |
1045 | free (ptr: queue); |
1046 | |
1047 | if (extend_regions_p) |
1048 | { |
1049 | free (ptr: degree1); |
1050 | |
1051 | bitmap_ior (header, header, extended_rgn_header); |
1052 | sbitmap_free (map: extended_rgn_header); |
1053 | |
1054 | extend_rgns (degree, &idx, header, max_hdr); |
1055 | } |
1056 | } |
1057 | |
1058 | /* Any block that did not end up in a region is placed into a region |
1059 | by itself. */ |
1060 | FOR_EACH_BB_FN (bb, cfun) |
1061 | if (degree[bb->index] >= 0) |
1062 | { |
1063 | rgn_bb_table[idx] = bb->index; |
1064 | RGN_NR_BLOCKS (nr_regions) = 1; |
1065 | RGN_BLOCKS (nr_regions) = idx++; |
1066 | RGN_DONT_CALC_DEPS (nr_regions) = 0; |
1067 | RGN_HAS_REAL_EBB (nr_regions) = 0; |
1068 | CONTAINING_RGN (bb->index) = nr_regions++; |
1069 | BLOCK_TO_BB (bb->index) = 0; |
1070 | } |
1071 | |
1072 | free (ptr: max_hdr); |
1073 | free (ptr: degree); |
1074 | free (ptr: stack); |
1075 | } |
1076 | |
1077 | |
1078 | /* Wrapper function. |
1079 | If FLAG_SEL_SCHED_PIPELINING is set, then use custom function to form |
1080 | regions. Otherwise just call find_rgns_haifa. */ |
1081 | static void |
1082 | find_rgns (void) |
1083 | { |
1084 | if (sel_sched_p () && flag_sel_sched_pipelining) |
1085 | sel_find_rgns (); |
1086 | else |
1087 | haifa_find_rgns (); |
1088 | } |
1089 | |
1090 | static int gather_region_statistics (int **); |
1091 | static void print_region_statistics (int *, int, int *, int); |
1092 | |
1093 | /* Calculate the histogram that shows the number of regions having the |
1094 | given number of basic blocks, and store it in the RSP array. Return |
1095 | the size of this array. */ |
1096 | static int |
1097 | gather_region_statistics (int **rsp) |
1098 | { |
1099 | int i, *a = 0, a_sz = 0; |
1100 | |
1101 | /* a[i] is the number of regions that have (i + 1) basic blocks. */ |
1102 | for (i = 0; i < nr_regions; i++) |
1103 | { |
1104 | int nr_blocks = RGN_NR_BLOCKS (i); |
1105 | |
1106 | gcc_assert (nr_blocks >= 1); |
1107 | |
1108 | if (nr_blocks > a_sz) |
1109 | { |
1110 | a = XRESIZEVEC (int, a, nr_blocks); |
1111 | do |
1112 | a[a_sz++] = 0; |
1113 | while (a_sz != nr_blocks); |
1114 | } |
1115 | |
1116 | a[nr_blocks - 1]++; |
1117 | } |
1118 | |
1119 | *rsp = a; |
1120 | return a_sz; |
1121 | } |
1122 | |
1123 | /* Print regions statistics. S1 and S2 denote the data before and after |
1124 | calling extend_rgns, respectively. */ |
1125 | static void |
1126 | print_region_statistics (int *s1, int s1_sz, int *s2, int s2_sz) |
1127 | { |
1128 | int i; |
1129 | |
1130 | /* We iterate until s2_sz because extend_rgns does not decrease |
1131 | the maximal region size. */ |
1132 | for (i = 1; i < s2_sz; i++) |
1133 | { |
1134 | int n1, n2; |
1135 | |
1136 | n2 = s2[i]; |
1137 | |
1138 | if (n2 == 0) |
1139 | continue; |
1140 | |
1141 | if (i >= s1_sz) |
1142 | n1 = 0; |
1143 | else |
1144 | n1 = s1[i]; |
1145 | |
1146 | fprintf (stream: sched_dump, format: ";; Region extension statistics: size %d: " \ |
1147 | "was %d + %d more\n" , i + 1, n1, n2 - n1); |
1148 | } |
1149 | } |
1150 | |
1151 | /* Extend regions. |
1152 | DEGREE - Array of incoming edge count, considering only |
1153 | the edges, that don't have their sources in formed regions yet. |
1154 | IDXP - pointer to the next available index in rgn_bb_table. |
1155 | HEADER - set of all region heads. |
1156 | LOOP_HDR - mapping from block to the containing loop |
1157 | (two blocks can reside within one region if they have |
1158 | the same loop header). */ |
1159 | void |
1160 | extend_rgns (int *degree, int *idxp, sbitmap , int *loop_hdr) |
1161 | { |
1162 | int *order, i, idx = *idxp, iter = 0, max_iter, *max_hdr; |
1163 | int nblocks = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS; |
1164 | bool rescan = false; |
1165 | |
1166 | max_iter = param_max_sched_extend_regions_iters; |
1167 | |
1168 | max_hdr = XNEWVEC (int, last_basic_block_for_fn (cfun)); |
1169 | |
1170 | order = XNEWVEC (int, last_basic_block_for_fn (cfun)); |
1171 | post_order_compute (order, false, false); |
1172 | |
1173 | for (i = nblocks - 1; i >= 0; i--) |
1174 | { |
1175 | int bbn = order[i]; |
1176 | if (degree[bbn] >= 0) |
1177 | { |
1178 | max_hdr[bbn] = bbn; |
1179 | rescan = true; |
1180 | } |
1181 | else |
1182 | /* This block already was processed in find_rgns. */ |
1183 | max_hdr[bbn] = -1; |
1184 | } |
1185 | |
1186 | /* The idea is to topologically walk through CFG in top-down order. |
1187 | During the traversal, if all the predecessors of a node are |
1188 | marked to be in the same region (they all have the same max_hdr), |
1189 | then current node is also marked to be a part of that region. |
1190 | Otherwise the node starts its own region. |
1191 | CFG should be traversed until no further changes are made. On each |
1192 | iteration the set of the region heads is extended (the set of those |
1193 | blocks that have max_hdr[bbi] == bbi). This set is upper bounded by the |
1194 | set of all basic blocks, thus the algorithm is guaranteed to |
1195 | terminate. */ |
1196 | |
1197 | while (rescan && iter < max_iter) |
1198 | { |
1199 | rescan = false; |
1200 | |
1201 | for (i = nblocks - 1; i >= 0; i--) |
1202 | { |
1203 | edge e; |
1204 | edge_iterator ei; |
1205 | int bbn = order[i]; |
1206 | |
1207 | if (max_hdr[bbn] != -1 && !bitmap_bit_p (map: header, bitno: bbn)) |
1208 | { |
1209 | int hdr = -1; |
1210 | |
1211 | FOR_EACH_EDGE (e, ei, BASIC_BLOCK_FOR_FN (cfun, bbn)->preds) |
1212 | { |
1213 | int predn = e->src->index; |
1214 | |
1215 | if (predn != ENTRY_BLOCK |
1216 | /* If pred wasn't processed in find_rgns. */ |
1217 | && max_hdr[predn] != -1 |
1218 | /* And pred and bb reside in the same loop. |
1219 | (Or out of any loop). */ |
1220 | && loop_hdr[bbn] == loop_hdr[predn]) |
1221 | { |
1222 | if (hdr == -1) |
1223 | /* Then bb extends the containing region of pred. */ |
1224 | hdr = max_hdr[predn]; |
1225 | else if (hdr != max_hdr[predn]) |
1226 | /* Too bad, there are at least two predecessors |
1227 | that reside in different regions. Thus, BB should |
1228 | begin its own region. */ |
1229 | { |
1230 | hdr = bbn; |
1231 | break; |
1232 | } |
1233 | } |
1234 | else |
1235 | /* BB starts its own region. */ |
1236 | { |
1237 | hdr = bbn; |
1238 | break; |
1239 | } |
1240 | } |
1241 | |
1242 | if (hdr == bbn) |
1243 | { |
1244 | /* If BB start its own region, |
1245 | update set of headers with BB. */ |
1246 | bitmap_set_bit (map: header, bitno: bbn); |
1247 | rescan = true; |
1248 | } |
1249 | else |
1250 | gcc_assert (hdr != -1); |
1251 | |
1252 | max_hdr[bbn] = hdr; |
1253 | } |
1254 | } |
1255 | |
1256 | iter++; |
1257 | } |
1258 | |
1259 | /* Statistics were gathered on the SPEC2000 package of tests with |
1260 | mainline weekly snapshot gcc-4.1-20051015 on ia64. |
1261 | |
1262 | Statistics for SPECint: |
1263 | 1 iteration : 1751 cases (38.7%) |
1264 | 2 iterations: 2770 cases (61.3%) |
1265 | Blocks wrapped in regions by find_rgns without extension: 18295 blocks |
1266 | Blocks wrapped in regions by 2 iterations in extend_rgns: 23821 blocks |
1267 | (We don't count single block regions here). |
1268 | |
1269 | Statistics for SPECfp: |
1270 | 1 iteration : 621 cases (35.9%) |
1271 | 2 iterations: 1110 cases (64.1%) |
1272 | Blocks wrapped in regions by find_rgns without extension: 6476 blocks |
1273 | Blocks wrapped in regions by 2 iterations in extend_rgns: 11155 blocks |
1274 | (We don't count single block regions here). |
1275 | |
1276 | By default we do at most 2 iterations. |
1277 | This can be overridden with max-sched-extend-regions-iters parameter: |
1278 | 0 - disable region extension, |
1279 | N > 0 - do at most N iterations. */ |
1280 | |
1281 | if (sched_verbose && iter != 0) |
1282 | fprintf (stream: sched_dump, format: ";; Region extension iterations: %d%s\n" , iter, |
1283 | rescan ? "... failed" : "" ); |
1284 | |
1285 | if (!rescan && iter != 0) |
1286 | { |
1287 | int *s1 = NULL, s1_sz = 0; |
1288 | |
1289 | /* Save the old statistics for later printout. */ |
1290 | if (sched_verbose >= 6) |
1291 | s1_sz = gather_region_statistics (rsp: &s1); |
1292 | |
1293 | /* We have succeeded. Now assemble the regions. */ |
1294 | for (i = nblocks - 1; i >= 0; i--) |
1295 | { |
1296 | int bbn = order[i]; |
1297 | |
1298 | if (max_hdr[bbn] == bbn) |
1299 | /* BBN is a region head. */ |
1300 | { |
1301 | edge e; |
1302 | edge_iterator ei; |
1303 | int num_bbs = 0, j, num_insns = 0, large; |
1304 | |
1305 | large = too_large (block: bbn, num_bbs: &num_bbs, num_insns: &num_insns); |
1306 | |
1307 | degree[bbn] = -1; |
1308 | rgn_bb_table[idx] = bbn; |
1309 | RGN_BLOCKS (nr_regions) = idx++; |
1310 | RGN_DONT_CALC_DEPS (nr_regions) = 0; |
1311 | RGN_HAS_REAL_EBB (nr_regions) = 0; |
1312 | CONTAINING_RGN (bbn) = nr_regions; |
1313 | BLOCK_TO_BB (bbn) = 0; |
1314 | |
1315 | FOR_EACH_EDGE (e, ei, BASIC_BLOCK_FOR_FN (cfun, bbn)->succs) |
1316 | if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)) |
1317 | degree[e->dest->index]--; |
1318 | |
1319 | if (!large) |
1320 | /* Here we check whether the region is too_large. */ |
1321 | for (j = i - 1; j >= 0; j--) |
1322 | { |
1323 | int succn = order[j]; |
1324 | if (max_hdr[succn] == bbn) |
1325 | { |
1326 | if ((large = too_large (block: succn, num_bbs: &num_bbs, num_insns: &num_insns))) |
1327 | break; |
1328 | } |
1329 | } |
1330 | |
1331 | if (large) |
1332 | /* If the region is too_large, then wrap every block of |
1333 | the region into single block region. |
1334 | Here we wrap region head only. Other blocks are |
1335 | processed in the below cycle. */ |
1336 | { |
1337 | RGN_NR_BLOCKS (nr_regions) = 1; |
1338 | nr_regions++; |
1339 | } |
1340 | |
1341 | num_bbs = 1; |
1342 | |
1343 | for (j = i - 1; j >= 0; j--) |
1344 | { |
1345 | int succn = order[j]; |
1346 | |
1347 | if (max_hdr[succn] == bbn) |
1348 | /* This cycle iterates over all basic blocks, that |
1349 | are supposed to be in the region with head BBN, |
1350 | and wraps them into that region (or in single |
1351 | block region). */ |
1352 | { |
1353 | gcc_assert (degree[succn] == 0); |
1354 | |
1355 | degree[succn] = -1; |
1356 | rgn_bb_table[idx] = succn; |
1357 | BLOCK_TO_BB (succn) = large ? 0 : num_bbs++; |
1358 | CONTAINING_RGN (succn) = nr_regions; |
1359 | |
1360 | if (large) |
1361 | /* Wrap SUCCN into single block region. */ |
1362 | { |
1363 | RGN_BLOCKS (nr_regions) = idx; |
1364 | RGN_NR_BLOCKS (nr_regions) = 1; |
1365 | RGN_DONT_CALC_DEPS (nr_regions) = 0; |
1366 | RGN_HAS_REAL_EBB (nr_regions) = 0; |
1367 | nr_regions++; |
1368 | } |
1369 | |
1370 | idx++; |
1371 | |
1372 | FOR_EACH_EDGE (e, ei, |
1373 | BASIC_BLOCK_FOR_FN (cfun, succn)->succs) |
1374 | if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)) |
1375 | degree[e->dest->index]--; |
1376 | } |
1377 | } |
1378 | |
1379 | if (!large) |
1380 | { |
1381 | RGN_NR_BLOCKS (nr_regions) = num_bbs; |
1382 | nr_regions++; |
1383 | } |
1384 | } |
1385 | } |
1386 | |
1387 | if (sched_verbose >= 6) |
1388 | { |
1389 | int *s2, s2_sz; |
1390 | |
1391 | /* Get the new statistics and print the comparison with the |
1392 | one before calling this function. */ |
1393 | s2_sz = gather_region_statistics (rsp: &s2); |
1394 | print_region_statistics (s1, s1_sz, s2, s2_sz); |
1395 | free (ptr: s1); |
1396 | free (ptr: s2); |
1397 | } |
1398 | } |
1399 | |
1400 | free (ptr: order); |
1401 | free (ptr: max_hdr); |
1402 | |
1403 | *idxp = idx; |
1404 | } |
1405 | |
1406 | /* Functions for regions scheduling information. */ |
1407 | |
1408 | /* Compute dominators, probability, and potential-split-edges of bb. |
1409 | Assume that these values were already computed for bb's predecessors. */ |
1410 | |
1411 | static void |
1412 | compute_dom_prob_ps (int bb) |
1413 | { |
1414 | edge_iterator in_ei; |
1415 | edge in_edge; |
1416 | |
1417 | /* We shouldn't have any real ebbs yet. */ |
1418 | gcc_assert (ebb_head [bb] == bb + current_blocks); |
1419 | |
1420 | if (IS_RGN_ENTRY (bb)) |
1421 | { |
1422 | bitmap_set_bit (map: dom[bb], bitno: 0); |
1423 | prob[bb] = REG_BR_PROB_BASE; |
1424 | return; |
1425 | } |
1426 | |
1427 | prob[bb] = 0; |
1428 | |
1429 | /* Initialize dom[bb] to '111..1'. */ |
1430 | bitmap_ones (dom[bb]); |
1431 | |
1432 | FOR_EACH_EDGE (in_edge, in_ei, |
1433 | BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (bb))->preds) |
1434 | { |
1435 | int pred_bb; |
1436 | edge out_edge; |
1437 | edge_iterator out_ei; |
1438 | |
1439 | if (in_edge->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)) |
1440 | continue; |
1441 | |
1442 | pred_bb = BLOCK_TO_BB (in_edge->src->index); |
1443 | bitmap_and (dom[bb], dom[bb], dom[pred_bb]); |
1444 | bitmap_ior (ancestor_edges[bb], |
1445 | ancestor_edges[bb], ancestor_edges[pred_bb]); |
1446 | |
1447 | bitmap_set_bit (map: ancestor_edges[bb], EDGE_TO_BIT (in_edge)); |
1448 | |
1449 | bitmap_ior (pot_split[bb], pot_split[bb], pot_split[pred_bb]); |
1450 | |
1451 | FOR_EACH_EDGE (out_edge, out_ei, in_edge->src->succs) |
1452 | bitmap_set_bit (map: pot_split[bb], EDGE_TO_BIT (out_edge)); |
1453 | |
1454 | prob[bb] += combine_probabilities |
1455 | (prob1: prob[pred_bb], |
1456 | prob2: in_edge->probability.initialized_p () |
1457 | ? in_edge->probability.to_reg_br_prob_base () |
1458 | : 0); |
1459 | // The rounding divide in combine_probabilities can result in an extra |
1460 | // probability increment propagating along 50-50 edges. Eventually when |
1461 | // the edges re-merge, the accumulated probability can go slightly above |
1462 | // REG_BR_PROB_BASE. |
1463 | if (prob[bb] > REG_BR_PROB_BASE) |
1464 | prob[bb] = REG_BR_PROB_BASE; |
1465 | } |
1466 | |
1467 | bitmap_set_bit (map: dom[bb], bitno: bb); |
1468 | bitmap_and_compl (pot_split[bb], pot_split[bb], ancestor_edges[bb]); |
1469 | |
1470 | if (sched_verbose >= 2) |
1471 | fprintf (stream: sched_dump, format: ";; bb_prob(%d, %d) = %3d\n" , bb, BB_TO_BLOCK (bb), |
1472 | (100 * prob[bb]) / REG_BR_PROB_BASE); |
1473 | } |
1474 | |
1475 | /* Functions for target info. */ |
1476 | |
1477 | /* Compute in BL the list of split-edges of bb_src relatively to bb_trg. |
1478 | Note that bb_trg dominates bb_src. */ |
1479 | |
1480 | static void |
1481 | split_edges (int bb_src, int bb_trg, edgelst *bl) |
1482 | { |
1483 | auto_sbitmap src (SBITMAP_SIZE (pot_split[bb_src])); |
1484 | bitmap_copy (src, pot_split[bb_src]); |
1485 | |
1486 | bitmap_and_compl (src, src, pot_split[bb_trg]); |
1487 | extract_edgelst (set: src, el: bl); |
1488 | } |
1489 | |
1490 | /* Find the valid candidate-source-blocks for the target block TRG, compute |
1491 | their probability, and check if they are speculative or not. |
1492 | For speculative sources, compute their update-blocks and split-blocks. */ |
1493 | |
1494 | static void |
1495 | compute_trg_info (int trg) |
1496 | { |
1497 | candidate *sp; |
1498 | edgelst el = { NULL, .nr_members: 0 }; |
1499 | int i, j, k, update_idx; |
1500 | basic_block block; |
1501 | edge_iterator ei; |
1502 | edge e; |
1503 | |
1504 | candidate_table = XNEWVEC (candidate, current_nr_blocks); |
1505 | |
1506 | bblst_last = 0; |
1507 | /* bblst_table holds split blocks and update blocks for each block after |
1508 | the current one in the region. split blocks and update blocks are |
1509 | the TO blocks of region edges, so there can be at most rgn_nr_edges |
1510 | of them. */ |
1511 | bblst_size = (current_nr_blocks - target_bb) * rgn_nr_edges; |
1512 | bblst_table = XNEWVEC (basic_block, bblst_size); |
1513 | |
1514 | edgelst_last = 0; |
1515 | edgelst_table = XNEWVEC (edge, rgn_nr_edges); |
1516 | |
1517 | /* Define some of the fields for the target bb as well. */ |
1518 | sp = candidate_table + trg; |
1519 | sp->is_valid = 1; |
1520 | sp->is_speculative = 0; |
1521 | sp->src_prob = REG_BR_PROB_BASE; |
1522 | |
1523 | auto_sbitmap visited (last_basic_block_for_fn (cfun)); |
1524 | |
1525 | for (i = trg + 1; i < current_nr_blocks; i++) |
1526 | { |
1527 | sp = candidate_table + i; |
1528 | |
1529 | sp->is_valid = IS_DOMINATED (i, trg); |
1530 | if (sp->is_valid) |
1531 | { |
1532 | int tf = prob[trg], cf = prob[i]; |
1533 | |
1534 | /* In CFGs with low probability edges TF can possibly be zero. */ |
1535 | sp->src_prob = (tf ? GCOV_COMPUTE_SCALE (cf, tf) : 0); |
1536 | sp->is_valid = (sp->src_prob >= min_spec_prob); |
1537 | } |
1538 | |
1539 | if (sp->is_valid) |
1540 | { |
1541 | split_edges (bb_src: i, bb_trg: trg, bl: &el); |
1542 | sp->is_speculative = (el.nr_members) ? 1 : 0; |
1543 | if (sp->is_speculative && !flag_schedule_speculative) |
1544 | sp->is_valid = 0; |
1545 | } |
1546 | |
1547 | if (sp->is_valid) |
1548 | { |
1549 | /* Compute split blocks and store them in bblst_table. |
1550 | The TO block of every split edge is a split block. */ |
1551 | sp->split_bbs.first_member = &bblst_table[bblst_last]; |
1552 | sp->split_bbs.nr_members = el.nr_members; |
1553 | for (j = 0; j < el.nr_members; bblst_last++, j++) |
1554 | bblst_table[bblst_last] = el.first_member[j]->dest; |
1555 | sp->update_bbs.first_member = &bblst_table[bblst_last]; |
1556 | |
1557 | /* Compute update blocks and store them in bblst_table. |
1558 | For every split edge, look at the FROM block, and check |
1559 | all out edges. For each out edge that is not a split edge, |
1560 | add the TO block to the update block list. This list can end |
1561 | up with a lot of duplicates. We need to weed them out to avoid |
1562 | overrunning the end of the bblst_table. */ |
1563 | |
1564 | update_idx = 0; |
1565 | bitmap_clear (visited); |
1566 | for (j = 0; j < el.nr_members; j++) |
1567 | { |
1568 | block = el.first_member[j]->src; |
1569 | FOR_EACH_EDGE (e, ei, block->succs) |
1570 | { |
1571 | if (!bitmap_bit_p (map: visited, bitno: e->dest->index)) |
1572 | { |
1573 | for (k = 0; k < el.nr_members; k++) |
1574 | if (e == el.first_member[k]) |
1575 | break; |
1576 | |
1577 | if (k >= el.nr_members) |
1578 | { |
1579 | bblst_table[bblst_last++] = e->dest; |
1580 | bitmap_set_bit (map: visited, bitno: e->dest->index); |
1581 | update_idx++; |
1582 | } |
1583 | } |
1584 | } |
1585 | } |
1586 | sp->update_bbs.nr_members = update_idx; |
1587 | |
1588 | /* Make sure we didn't overrun the end of bblst_table. */ |
1589 | gcc_assert (bblst_last <= bblst_size); |
1590 | } |
1591 | else |
1592 | { |
1593 | sp->split_bbs.nr_members = sp->update_bbs.nr_members = 0; |
1594 | |
1595 | sp->is_speculative = 0; |
1596 | sp->src_prob = 0; |
1597 | } |
1598 | } |
1599 | } |
1600 | |
1601 | /* Free the computed target info. */ |
1602 | static void |
1603 | free_trg_info (void) |
1604 | { |
1605 | free (ptr: candidate_table); |
1606 | free (ptr: bblst_table); |
1607 | free (ptr: edgelst_table); |
1608 | } |
1609 | |
1610 | /* Print candidates info, for debugging purposes. Callable from debugger. */ |
1611 | |
1612 | DEBUG_FUNCTION void |
1613 | debug_candidate (int i) |
1614 | { |
1615 | if (!candidate_table[i].is_valid) |
1616 | return; |
1617 | |
1618 | if (candidate_table[i].is_speculative) |
1619 | { |
1620 | int j; |
1621 | fprintf (stream: sched_dump, format: "src b %d bb %d speculative \n" , BB_TO_BLOCK (i), i); |
1622 | |
1623 | fprintf (stream: sched_dump, format: "split path: " ); |
1624 | for (j = 0; j < candidate_table[i].split_bbs.nr_members; j++) |
1625 | { |
1626 | int b = candidate_table[i].split_bbs.first_member[j]->index; |
1627 | |
1628 | fprintf (stream: sched_dump, format: " %d " , b); |
1629 | } |
1630 | fprintf (stream: sched_dump, format: "\n" ); |
1631 | |
1632 | fprintf (stream: sched_dump, format: "update path: " ); |
1633 | for (j = 0; j < candidate_table[i].update_bbs.nr_members; j++) |
1634 | { |
1635 | int b = candidate_table[i].update_bbs.first_member[j]->index; |
1636 | |
1637 | fprintf (stream: sched_dump, format: " %d " , b); |
1638 | } |
1639 | fprintf (stream: sched_dump, format: "\n" ); |
1640 | } |
1641 | else |
1642 | { |
1643 | fprintf (stream: sched_dump, format: " src %d equivalent\n" , BB_TO_BLOCK (i)); |
1644 | } |
1645 | } |
1646 | |
1647 | /* Print candidates info, for debugging purposes. Callable from debugger. */ |
1648 | |
1649 | DEBUG_FUNCTION void |
1650 | debug_candidates (int trg) |
1651 | { |
1652 | int i; |
1653 | |
1654 | fprintf (stream: sched_dump, format: "----------- candidate table: target: b=%d bb=%d ---\n" , |
1655 | BB_TO_BLOCK (trg), trg); |
1656 | for (i = trg + 1; i < current_nr_blocks; i++) |
1657 | debug_candidate (i); |
1658 | } |
1659 | |
1660 | /* Functions for speculative scheduling. */ |
1661 | |
1662 | static bitmap_head not_in_df; |
1663 | |
1664 | /* Return false if x is a set of a register alive in the beginning of one |
1665 | of the split-blocks of src, otherwise return true. */ |
1666 | |
1667 | static bool |
1668 | check_live_1 (int src, rtx x) |
1669 | { |
1670 | int i; |
1671 | int regno; |
1672 | rtx reg = SET_DEST (x); |
1673 | |
1674 | if (reg == 0) |
1675 | return true; |
1676 | |
1677 | while (GET_CODE (reg) == SUBREG |
1678 | || GET_CODE (reg) == ZERO_EXTRACT |
1679 | || GET_CODE (reg) == STRICT_LOW_PART) |
1680 | reg = XEXP (reg, 0); |
1681 | |
1682 | if (GET_CODE (reg) == PARALLEL) |
1683 | { |
1684 | int i; |
1685 | |
1686 | for (i = XVECLEN (reg, 0) - 1; i >= 0; i--) |
1687 | if (XEXP (XVECEXP (reg, 0, i), 0) != 0) |
1688 | if (check_live_1 (src, XEXP (XVECEXP (reg, 0, i), 0))) |
1689 | return true; |
1690 | |
1691 | return false; |
1692 | } |
1693 | |
1694 | if (!REG_P (reg)) |
1695 | return true; |
1696 | |
1697 | regno = REGNO (reg); |
1698 | |
1699 | if (regno < FIRST_PSEUDO_REGISTER && global_regs[regno]) |
1700 | { |
1701 | /* Global registers are assumed live. */ |
1702 | return false; |
1703 | } |
1704 | else |
1705 | { |
1706 | if (regno < FIRST_PSEUDO_REGISTER) |
1707 | { |
1708 | /* Check for hard registers. */ |
1709 | int j = REG_NREGS (reg); |
1710 | while (--j >= 0) |
1711 | { |
1712 | for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++) |
1713 | { |
1714 | basic_block b = candidate_table[src].split_bbs.first_member[i]; |
1715 | int t = bitmap_bit_p (¬_in_df, b->index); |
1716 | |
1717 | /* We can have split blocks, that were recently generated. |
1718 | Such blocks are always outside current region. */ |
1719 | gcc_assert (!t || (CONTAINING_RGN (b->index) |
1720 | != CONTAINING_RGN (BB_TO_BLOCK (src)))); |
1721 | |
1722 | if (t || REGNO_REG_SET_P (df_get_live_in (b), regno + j)) |
1723 | return false; |
1724 | } |
1725 | } |
1726 | } |
1727 | else |
1728 | { |
1729 | /* Check for pseudo registers. */ |
1730 | for (i = 0; i < candidate_table[src].split_bbs.nr_members; i++) |
1731 | { |
1732 | basic_block b = candidate_table[src].split_bbs.first_member[i]; |
1733 | int t = bitmap_bit_p (¬_in_df, b->index); |
1734 | |
1735 | gcc_assert (!t || (CONTAINING_RGN (b->index) |
1736 | != CONTAINING_RGN (BB_TO_BLOCK (src)))); |
1737 | |
1738 | if (t || REGNO_REG_SET_P (df_get_live_in (b), regno)) |
1739 | return false; |
1740 | } |
1741 | } |
1742 | } |
1743 | |
1744 | return true; |
1745 | } |
1746 | |
1747 | /* If x is a set of a register R, mark that R is alive in the beginning |
1748 | of every update-block of src. */ |
1749 | |
1750 | static void |
1751 | update_live_1 (int src, rtx x) |
1752 | { |
1753 | int i; |
1754 | int regno; |
1755 | rtx reg = SET_DEST (x); |
1756 | |
1757 | if (reg == 0) |
1758 | return; |
1759 | |
1760 | while (GET_CODE (reg) == SUBREG |
1761 | || GET_CODE (reg) == ZERO_EXTRACT |
1762 | || GET_CODE (reg) == STRICT_LOW_PART) |
1763 | reg = XEXP (reg, 0); |
1764 | |
1765 | if (GET_CODE (reg) == PARALLEL) |
1766 | { |
1767 | int i; |
1768 | |
1769 | for (i = XVECLEN (reg, 0) - 1; i >= 0; i--) |
1770 | if (XEXP (XVECEXP (reg, 0, i), 0) != 0) |
1771 | update_live_1 (src, XEXP (XVECEXP (reg, 0, i), 0)); |
1772 | |
1773 | return; |
1774 | } |
1775 | |
1776 | if (!REG_P (reg)) |
1777 | return; |
1778 | |
1779 | /* Global registers are always live, so the code below does not apply |
1780 | to them. */ |
1781 | |
1782 | regno = REGNO (reg); |
1783 | |
1784 | if (! HARD_REGISTER_NUM_P (regno) |
1785 | || !global_regs[regno]) |
1786 | { |
1787 | for (i = 0; i < candidate_table[src].update_bbs.nr_members; i++) |
1788 | { |
1789 | basic_block b = candidate_table[src].update_bbs.first_member[i]; |
1790 | bitmap_set_range (df_get_live_in (bb: b), regno, REG_NREGS (reg)); |
1791 | } |
1792 | } |
1793 | } |
1794 | |
1795 | /* Return true if insn can be speculatively moved from block src to trg, |
1796 | otherwise return false. Called before first insertion of insn to |
1797 | ready-list or before the scheduling. */ |
1798 | |
1799 | static bool |
1800 | check_live (rtx_insn *insn, int src) |
1801 | { |
1802 | /* Find the registers set by instruction. */ |
1803 | if (GET_CODE (PATTERN (insn)) == SET |
1804 | || GET_CODE (PATTERN (insn)) == CLOBBER) |
1805 | return check_live_1 (src, x: PATTERN (insn)); |
1806 | else if (GET_CODE (PATTERN (insn)) == PARALLEL) |
1807 | { |
1808 | int j; |
1809 | for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--) |
1810 | if ((GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET |
1811 | || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER) |
1812 | && !check_live_1 (src, XVECEXP (PATTERN (insn), 0, j))) |
1813 | return false; |
1814 | |
1815 | return true; |
1816 | } |
1817 | |
1818 | return true; |
1819 | } |
1820 | |
1821 | /* Update the live registers info after insn was moved speculatively from |
1822 | block src to trg. */ |
1823 | |
1824 | static void |
1825 | update_live (rtx_insn *insn, int src) |
1826 | { |
1827 | /* Find the registers set by instruction. */ |
1828 | if (GET_CODE (PATTERN (insn)) == SET |
1829 | || GET_CODE (PATTERN (insn)) == CLOBBER) |
1830 | update_live_1 (src, x: PATTERN (insn)); |
1831 | else if (GET_CODE (PATTERN (insn)) == PARALLEL) |
1832 | { |
1833 | int j; |
1834 | for (j = XVECLEN (PATTERN (insn), 0) - 1; j >= 0; j--) |
1835 | if (GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == SET |
1836 | || GET_CODE (XVECEXP (PATTERN (insn), 0, j)) == CLOBBER) |
1837 | update_live_1 (src, XVECEXP (PATTERN (insn), 0, j)); |
1838 | } |
1839 | } |
1840 | |
1841 | /* True if block bb_to is equal to, or reachable from block bb_from. */ |
1842 | #define IS_REACHABLE(bb_from, bb_to) \ |
1843 | (bb_from == bb_to \ |
1844 | || IS_RGN_ENTRY (bb_from) \ |
1845 | || (bitmap_bit_p \ |
1846 | (ancestor_edges[bb_to], \ |
1847 | EDGE_TO_BIT (single_pred_edge \ |
1848 | (BASIC_BLOCK_FOR_FN (cfun, \ |
1849 | BB_TO_BLOCK (bb_from))))))) |
1850 | |
1851 | /* Turns on the fed_by_spec_load flag for insns fed by load_insn. */ |
1852 | |
1853 | static void |
1854 | set_spec_fed (rtx load_insn) |
1855 | { |
1856 | sd_iterator_def sd_it; |
1857 | dep_t dep; |
1858 | |
1859 | FOR_EACH_DEP (load_insn, SD_LIST_FORW, sd_it, dep) |
1860 | if (DEP_TYPE (dep) == REG_DEP_TRUE) |
1861 | FED_BY_SPEC_LOAD (DEP_CON (dep)) = 1; |
1862 | } |
1863 | |
1864 | /* On the path from the insn to load_insn_bb, find a conditional |
1865 | branch depending on insn, that guards the speculative load. */ |
1866 | |
1867 | static bool |
1868 | find_conditional_protection (rtx_insn *insn, int load_insn_bb) |
1869 | { |
1870 | sd_iterator_def sd_it; |
1871 | dep_t dep; |
1872 | |
1873 | /* Iterate through DEF-USE forward dependences. */ |
1874 | FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep) |
1875 | { |
1876 | rtx_insn *next = DEP_CON (dep); |
1877 | |
1878 | if ((CONTAINING_RGN (BLOCK_NUM (next)) == |
1879 | CONTAINING_RGN (BB_TO_BLOCK (load_insn_bb))) |
1880 | && IS_REACHABLE (INSN_BB (next), load_insn_bb) |
1881 | && load_insn_bb != INSN_BB (next) |
1882 | && DEP_TYPE (dep) == REG_DEP_TRUE |
1883 | && (JUMP_P (next) |
1884 | || find_conditional_protection (insn: next, load_insn_bb))) |
1885 | return true; |
1886 | } |
1887 | return false; |
1888 | } /* find_conditional_protection */ |
1889 | |
1890 | /* Returns true if the same insn1 that participates in the computation |
1891 | of load_insn's address is feeding a conditional branch that is |
1892 | guarding on load_insn. This is true if we find two DEF-USE |
1893 | chains: |
1894 | insn1 -> ... -> conditional-branch |
1895 | insn1 -> ... -> load_insn, |
1896 | and if a flow path exists: |
1897 | insn1 -> ... -> conditional-branch -> ... -> load_insn, |
1898 | and if insn1 is on the path |
1899 | region-entry -> ... -> bb_trg -> ... load_insn. |
1900 | |
1901 | Locate insn1 by climbing on INSN_BACK_DEPS from load_insn. |
1902 | Locate the branch by following INSN_FORW_DEPS from insn1. */ |
1903 | |
1904 | static bool |
1905 | is_conditionally_protected (rtx load_insn, int bb_src, int bb_trg) |
1906 | { |
1907 | sd_iterator_def sd_it; |
1908 | dep_t dep; |
1909 | |
1910 | FOR_EACH_DEP (load_insn, SD_LIST_BACK, sd_it, dep) |
1911 | { |
1912 | rtx_insn *insn1 = DEP_PRO (dep); |
1913 | |
1914 | /* Must be a DEF-USE dependence upon non-branch. */ |
1915 | if (DEP_TYPE (dep) != REG_DEP_TRUE |
1916 | || JUMP_P (insn1)) |
1917 | continue; |
1918 | |
1919 | /* Must exist a path: region-entry -> ... -> bb_trg -> ... load_insn. */ |
1920 | if (INSN_BB (insn1) == bb_src |
1921 | || (CONTAINING_RGN (BLOCK_NUM (insn1)) |
1922 | != CONTAINING_RGN (BB_TO_BLOCK (bb_src))) |
1923 | || (!IS_REACHABLE (bb_trg, INSN_BB (insn1)) |
1924 | && !IS_REACHABLE (INSN_BB (insn1), bb_trg))) |
1925 | continue; |
1926 | |
1927 | /* Now search for the conditional-branch. */ |
1928 | if (find_conditional_protection (insn: insn1, load_insn_bb: bb_src)) |
1929 | return true; |
1930 | |
1931 | /* Recursive step: search another insn1, "above" current insn1. */ |
1932 | return is_conditionally_protected (load_insn: insn1, bb_src, bb_trg); |
1933 | } |
1934 | |
1935 | /* The chain does not exist. */ |
1936 | return false; |
1937 | } /* is_conditionally_protected */ |
1938 | |
1939 | /* Returns true if a clue for "similar load" 'insn2' is found, and hence |
1940 | load_insn can move speculatively from bb_src to bb_trg. All the |
1941 | following must hold: |
1942 | |
1943 | (1) both loads have 1 base register (PFREE_CANDIDATEs). |
1944 | (2) load_insn and load1 have a def-use dependence upon |
1945 | the same insn 'insn1'. |
1946 | (3) either load2 is in bb_trg, or: |
1947 | - there's only one split-block, and |
1948 | - load1 is on the escape path, and |
1949 | |
1950 | From all these we can conclude that the two loads access memory |
1951 | addresses that differ at most by a constant, and hence if moving |
1952 | load_insn would cause an exception, it would have been caused by |
1953 | load2 anyhow. */ |
1954 | |
1955 | static bool |
1956 | is_pfree (rtx load_insn, int bb_src, int bb_trg) |
1957 | { |
1958 | sd_iterator_def back_sd_it; |
1959 | dep_t back_dep; |
1960 | candidate *candp = candidate_table + bb_src; |
1961 | |
1962 | if (candp->split_bbs.nr_members != 1) |
1963 | /* Must have exactly one escape block. */ |
1964 | return false; |
1965 | |
1966 | FOR_EACH_DEP (load_insn, SD_LIST_BACK, back_sd_it, back_dep) |
1967 | { |
1968 | rtx_insn *insn1 = DEP_PRO (back_dep); |
1969 | |
1970 | if (DEP_TYPE (back_dep) == REG_DEP_TRUE) |
1971 | /* Found a DEF-USE dependence (insn1, load_insn). */ |
1972 | { |
1973 | sd_iterator_def fore_sd_it; |
1974 | dep_t fore_dep; |
1975 | |
1976 | FOR_EACH_DEP (insn1, SD_LIST_FORW, fore_sd_it, fore_dep) |
1977 | { |
1978 | rtx_insn *insn2 = DEP_CON (fore_dep); |
1979 | |
1980 | if (DEP_TYPE (fore_dep) == REG_DEP_TRUE) |
1981 | { |
1982 | /* Found a DEF-USE dependence (insn1, insn2). */ |
1983 | if (haifa_classify_insn (insn2) != PFREE_CANDIDATE) |
1984 | /* insn2 not guaranteed to be a 1 base reg load. */ |
1985 | continue; |
1986 | |
1987 | if (INSN_BB (insn2) == bb_trg) |
1988 | /* insn2 is the similar load, in the target block. */ |
1989 | return true; |
1990 | |
1991 | if (*(candp->split_bbs.first_member) == BLOCK_FOR_INSN (insn: insn2)) |
1992 | /* insn2 is a similar load, in a split-block. */ |
1993 | return true; |
1994 | } |
1995 | } |
1996 | } |
1997 | } |
1998 | |
1999 | /* Couldn't find a similar load. */ |
2000 | return false; |
2001 | } /* is_pfree */ |
2002 | |
2003 | /* Return true if load_insn is prisky (i.e. if load_insn is fed by |
2004 | a load moved speculatively, or if load_insn is protected by |
2005 | a compare on load_insn's address). */ |
2006 | |
2007 | static bool |
2008 | is_prisky (rtx load_insn, int bb_src, int bb_trg) |
2009 | { |
2010 | if (FED_BY_SPEC_LOAD (load_insn)) |
2011 | return true; |
2012 | |
2013 | if (sd_lists_empty_p (load_insn, SD_LIST_BACK)) |
2014 | /* Dependence may 'hide' out of the region. */ |
2015 | return true; |
2016 | |
2017 | if (is_conditionally_protected (load_insn, bb_src, bb_trg)) |
2018 | return true; |
2019 | |
2020 | return false; |
2021 | } |
2022 | |
2023 | /* Insn is a candidate to be moved speculatively from bb_src to bb_trg. |
2024 | Return true if insn is exception-free (and the motion is valid) |
2025 | and false otherwise. */ |
2026 | |
2027 | static bool |
2028 | is_exception_free (rtx_insn *insn, int bb_src, int bb_trg) |
2029 | { |
2030 | int insn_class = haifa_classify_insn (insn); |
2031 | |
2032 | /* Handle non-load insns. */ |
2033 | switch (insn_class) |
2034 | { |
2035 | case TRAP_FREE: |
2036 | return true; |
2037 | case TRAP_RISKY: |
2038 | return false; |
2039 | default:; |
2040 | } |
2041 | |
2042 | /* Handle loads. */ |
2043 | if (!flag_schedule_speculative_load) |
2044 | return false; |
2045 | IS_LOAD_INSN (insn) = 1; |
2046 | switch (insn_class) |
2047 | { |
2048 | case IFREE: |
2049 | return true; |
2050 | case IRISKY: |
2051 | return false; |
2052 | case PFREE_CANDIDATE: |
2053 | if (is_pfree (load_insn: insn, bb_src, bb_trg)) |
2054 | return true; |
2055 | /* Don't 'break' here: PFREE-candidate is also PRISKY-candidate. */ |
2056 | /* FALLTHRU */ |
2057 | case PRISKY_CANDIDATE: |
2058 | if (!flag_schedule_speculative_load_dangerous |
2059 | || is_prisky (load_insn: insn, bb_src, bb_trg)) |
2060 | return false; |
2061 | break; |
2062 | default:; |
2063 | } |
2064 | |
2065 | return flag_schedule_speculative_load_dangerous; |
2066 | } |
2067 | |
2068 | /* The number of insns from the current block scheduled so far. */ |
2069 | static int sched_target_n_insns; |
2070 | /* The number of insns from the current block to be scheduled in total. */ |
2071 | static int target_n_insns; |
2072 | /* The number of insns from the entire region scheduled so far. */ |
2073 | static int sched_n_insns; |
2074 | |
2075 | /* Implementations of the sched_info functions for region scheduling. */ |
2076 | static void init_ready_list (void); |
2077 | static bool can_schedule_ready_p (rtx_insn *); |
2078 | static void begin_schedule_ready (rtx_insn *); |
2079 | static ds_t new_ready (rtx_insn *, ds_t); |
2080 | static bool schedule_more_p (void); |
2081 | static const char *rgn_print_insn (const rtx_insn *, int); |
2082 | static int rgn_rank (rtx_insn *, rtx_insn *); |
2083 | static void compute_jump_reg_dependencies (rtx, regset); |
2084 | |
2085 | /* Functions for speculative scheduling. */ |
2086 | static void rgn_add_remove_insn (rtx_insn *, int); |
2087 | static void rgn_add_block (basic_block, basic_block); |
2088 | static void rgn_fix_recovery_cfg (int, int, int); |
2089 | static basic_block advance_target_bb (basic_block, rtx_insn *); |
2090 | |
2091 | /* Return true if there are more insns that should be scheduled. */ |
2092 | |
2093 | static bool |
2094 | schedule_more_p (void) |
2095 | { |
2096 | return sched_target_n_insns < target_n_insns; |
2097 | } |
2098 | |
2099 | /* Add all insns that are initially ready to the ready list READY. Called |
2100 | once before scheduling a set of insns. */ |
2101 | |
2102 | static void |
2103 | init_ready_list (void) |
2104 | { |
2105 | rtx_insn *prev_head = current_sched_info->prev_head; |
2106 | rtx_insn *next_tail = current_sched_info->next_tail; |
2107 | int bb_src; |
2108 | rtx_insn *insn; |
2109 | |
2110 | target_n_insns = 0; |
2111 | sched_target_n_insns = 0; |
2112 | sched_n_insns = 0; |
2113 | |
2114 | /* Print debugging information. */ |
2115 | if (sched_verbose >= 5) |
2116 | debug_rgn_dependencies (target_bb); |
2117 | |
2118 | /* Prepare current target block info. */ |
2119 | if (current_nr_blocks > 1) |
2120 | compute_trg_info (trg: target_bb); |
2121 | |
2122 | /* Initialize ready list with all 'ready' insns in target block. |
2123 | Count number of insns in the target block being scheduled. */ |
2124 | for (insn = NEXT_INSN (insn: prev_head); insn != next_tail; insn = NEXT_INSN (insn)) |
2125 | { |
2126 | gcc_assert (TODO_SPEC (insn) == HARD_DEP || TODO_SPEC (insn) == DEP_POSTPONED); |
2127 | TODO_SPEC (insn) = HARD_DEP; |
2128 | try_ready (insn); |
2129 | target_n_insns++; |
2130 | |
2131 | gcc_assert (!(TODO_SPEC (insn) & BEGIN_CONTROL)); |
2132 | } |
2133 | |
2134 | /* Add to ready list all 'ready' insns in valid source blocks. |
2135 | For speculative insns, check-live, exception-free, and |
2136 | issue-delay. */ |
2137 | for (bb_src = target_bb + 1; bb_src < current_nr_blocks; bb_src++) |
2138 | if (IS_VALID (bb_src)) |
2139 | { |
2140 | rtx_insn *src_head; |
2141 | rtx_insn *src_next_tail; |
2142 | rtx_insn *tail, *head; |
2143 | |
2144 | get_ebb_head_tail (EBB_FIRST_BB (bb_src), EBB_LAST_BB (bb_src), |
2145 | &head, &tail); |
2146 | src_next_tail = NEXT_INSN (insn: tail); |
2147 | src_head = head; |
2148 | |
2149 | for (insn = src_head; insn != src_next_tail; insn = NEXT_INSN (insn)) |
2150 | if (INSN_P (insn)) |
2151 | { |
2152 | gcc_assert (TODO_SPEC (insn) == HARD_DEP || TODO_SPEC (insn) == DEP_POSTPONED); |
2153 | TODO_SPEC (insn) = HARD_DEP; |
2154 | try_ready (insn); |
2155 | } |
2156 | } |
2157 | } |
2158 | |
2159 | /* Called after taking INSN from the ready list. Returns true if this |
2160 | insn can be scheduled, nonzero if we should silently discard it. */ |
2161 | |
2162 | static bool |
2163 | can_schedule_ready_p (rtx_insn *insn) |
2164 | { |
2165 | /* An interblock motion? */ |
2166 | if (INSN_BB (insn) != target_bb && IS_SPECULATIVE_INSN (insn)) |
2167 | { |
2168 | /* Cannot schedule this insn unless all operands are live. */ |
2169 | if (!check_live (insn, INSN_BB (insn))) |
2170 | return false; |
2171 | |
2172 | /* Should not move expensive instructions speculatively. */ |
2173 | if (GET_CODE (PATTERN (insn)) != CLOBBER |
2174 | && !targetm.sched.can_speculate_insn (insn)) |
2175 | return false; |
2176 | } |
2177 | |
2178 | return true; |
2179 | } |
2180 | |
2181 | /* Updates counter and other information. Split from can_schedule_ready_p () |
2182 | because when we schedule insn speculatively then insn passed to |
2183 | can_schedule_ready_p () differs from the one passed to |
2184 | begin_schedule_ready (). */ |
2185 | static void |
2186 | begin_schedule_ready (rtx_insn *insn) |
2187 | { |
2188 | /* An interblock motion? */ |
2189 | if (INSN_BB (insn) != target_bb) |
2190 | { |
2191 | if (IS_SPECULATIVE_INSN (insn)) |
2192 | { |
2193 | gcc_assert (check_live (insn, INSN_BB (insn))); |
2194 | |
2195 | update_live (insn, INSN_BB (insn)); |
2196 | |
2197 | /* For speculative load, mark insns fed by it. */ |
2198 | if (IS_LOAD_INSN (insn) || FED_BY_SPEC_LOAD (insn)) |
2199 | set_spec_fed (insn); |
2200 | |
2201 | nr_spec++; |
2202 | } |
2203 | nr_inter++; |
2204 | } |
2205 | else |
2206 | { |
2207 | /* In block motion. */ |
2208 | sched_target_n_insns++; |
2209 | } |
2210 | sched_n_insns++; |
2211 | } |
2212 | |
2213 | /* Called after INSN has all its hard dependencies resolved and the speculation |
2214 | of type TS is enough to overcome them all. |
2215 | Return nonzero if it should be moved to the ready list or the queue, or zero |
2216 | if we should silently discard it. */ |
2217 | static ds_t |
2218 | new_ready (rtx_insn *next, ds_t ts) |
2219 | { |
2220 | if (INSN_BB (next) != target_bb) |
2221 | { |
2222 | int not_ex_free = 0; |
2223 | |
2224 | /* For speculative insns, before inserting to ready/queue, |
2225 | check live, exception-free, and issue-delay. */ |
2226 | if (!IS_VALID (INSN_BB (next)) |
2227 | || CANT_MOVE (next) |
2228 | || (IS_SPECULATIVE_INSN (next) |
2229 | && ((recog_memoized (insn: next) >= 0 |
2230 | && min_insn_conflict_delay (curr_state, next, next) |
2231 | > param_max_sched_insn_conflict_delay) |
2232 | || IS_SPECULATION_CHECK_P (next) |
2233 | || !check_live (insn: next, INSN_BB (next)) |
2234 | || (not_ex_free = !is_exception_free (insn: next, INSN_BB (next), |
2235 | bb_trg: target_bb))))) |
2236 | { |
2237 | if (not_ex_free |
2238 | /* We are here because is_exception_free () == false. |
2239 | But we possibly can handle that with control speculation. */ |
2240 | && sched_deps_info->generate_spec_deps |
2241 | && spec_info->mask & BEGIN_CONTROL) |
2242 | { |
2243 | ds_t new_ds; |
2244 | |
2245 | /* Add control speculation to NEXT's dependency type. */ |
2246 | new_ds = set_dep_weak (ts, BEGIN_CONTROL, MAX_DEP_WEAK); |
2247 | |
2248 | /* Check if NEXT can be speculated with new dependency type. */ |
2249 | if (sched_insn_is_legitimate_for_speculation_p (next, new_ds)) |
2250 | /* Here we got new control-speculative instruction. */ |
2251 | ts = new_ds; |
2252 | else |
2253 | /* NEXT isn't ready yet. */ |
2254 | ts = DEP_POSTPONED; |
2255 | } |
2256 | else |
2257 | /* NEXT isn't ready yet. */ |
2258 | ts = DEP_POSTPONED; |
2259 | } |
2260 | } |
2261 | |
2262 | return ts; |
2263 | } |
2264 | |
2265 | /* Return a string that contains the insn uid and optionally anything else |
2266 | necessary to identify this insn in an output. It's valid to use a |
2267 | static buffer for this. The ALIGNED parameter should cause the string |
2268 | to be formatted so that multiple output lines will line up nicely. */ |
2269 | |
2270 | static const char * |
2271 | rgn_print_insn (const rtx_insn *insn, int aligned) |
2272 | { |
2273 | static char tmp[80]; |
2274 | |
2275 | if (aligned) |
2276 | sprintf (s: tmp, format: "b%3d: i%4d" , INSN_BB (insn), INSN_UID (insn)); |
2277 | else |
2278 | { |
2279 | if (current_nr_blocks > 1 && INSN_BB (insn) != target_bb) |
2280 | sprintf (s: tmp, format: "%d/b%d" , INSN_UID (insn), INSN_BB (insn)); |
2281 | else |
2282 | sprintf (s: tmp, format: "%d" , INSN_UID (insn)); |
2283 | } |
2284 | return tmp; |
2285 | } |
2286 | |
2287 | /* Compare priority of two insns. Return a positive number if the second |
2288 | insn is to be preferred for scheduling, and a negative one if the first |
2289 | is to be preferred. Zero if they are equally good. */ |
2290 | |
2291 | static int |
2292 | rgn_rank (rtx_insn *insn1, rtx_insn *insn2) |
2293 | { |
2294 | /* Some comparison make sense in interblock scheduling only. */ |
2295 | if (INSN_BB (insn1) != INSN_BB (insn2)) |
2296 | { |
2297 | int spec_val, prob_val; |
2298 | |
2299 | /* Prefer an inblock motion on an interblock motion. */ |
2300 | if ((INSN_BB (insn2) == target_bb) && (INSN_BB (insn1) != target_bb)) |
2301 | return 1; |
2302 | if ((INSN_BB (insn1) == target_bb) && (INSN_BB (insn2) != target_bb)) |
2303 | return -1; |
2304 | |
2305 | /* Prefer a useful motion on a speculative one. */ |
2306 | spec_val = IS_SPECULATIVE_INSN (insn1) - IS_SPECULATIVE_INSN (insn2); |
2307 | if (spec_val) |
2308 | return spec_val; |
2309 | |
2310 | /* Prefer a more probable (speculative) insn. */ |
2311 | prob_val = INSN_PROBABILITY (insn2) - INSN_PROBABILITY (insn1); |
2312 | if (prob_val) |
2313 | return prob_val; |
2314 | } |
2315 | return 0; |
2316 | } |
2317 | |
2318 | /* NEXT is an instruction that depends on INSN (a backward dependence); |
2319 | return true if we should include this dependence in priority |
2320 | calculations. */ |
2321 | |
2322 | bool |
2323 | contributes_to_priority (rtx_insn *next, rtx_insn *insn) |
2324 | { |
2325 | /* NEXT and INSN reside in one ebb. */ |
2326 | return BLOCK_TO_BB (BLOCK_NUM (next)) == BLOCK_TO_BB (BLOCK_NUM (insn)); |
2327 | } |
2328 | |
2329 | /* INSN is a JUMP_INSN. Store the set of registers that must be |
2330 | considered as used by this jump in USED. */ |
2331 | |
2332 | static void |
2333 | compute_jump_reg_dependencies (rtx insn ATTRIBUTE_UNUSED, |
2334 | regset used ATTRIBUTE_UNUSED) |
2335 | { |
2336 | /* Nothing to do here, since we postprocess jumps in |
2337 | add_branch_dependences. */ |
2338 | } |
2339 | |
2340 | /* This variable holds common_sched_info hooks and data relevant to |
2341 | the interblock scheduler. */ |
2342 | static struct common_sched_info_def rgn_common_sched_info; |
2343 | |
2344 | |
2345 | /* This holds data for the dependence analysis relevant to |
2346 | the interblock scheduler. */ |
2347 | static struct sched_deps_info_def rgn_sched_deps_info; |
2348 | |
2349 | /* This holds constant data used for initializing the above structure |
2350 | for the Haifa scheduler. */ |
2351 | static const struct sched_deps_info_def rgn_const_sched_deps_info = |
2352 | { |
2353 | .compute_jump_reg_dependencies: compute_jump_reg_dependencies, |
2354 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
2355 | .use_cselib: 0, .use_deps_list: 0, .generate_spec_deps: 0 |
2356 | }; |
2357 | |
2358 | /* Same as above, but for the selective scheduler. */ |
2359 | static const struct sched_deps_info_def rgn_const_sel_sched_deps_info = |
2360 | { |
2361 | .compute_jump_reg_dependencies: compute_jump_reg_dependencies, |
2362 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
2363 | .use_cselib: 0, .use_deps_list: 0, .generate_spec_deps: 0 |
2364 | }; |
2365 | |
2366 | /* Return true if scheduling INSN will trigger finish of scheduling |
2367 | current block. */ |
2368 | static bool |
2369 | rgn_insn_finishes_block_p (rtx_insn *insn) |
2370 | { |
2371 | if (INSN_BB (insn) == target_bb |
2372 | && sched_target_n_insns + 1 == target_n_insns) |
2373 | /* INSN is the last not-scheduled instruction in the current block. */ |
2374 | return true; |
2375 | |
2376 | return false; |
2377 | } |
2378 | |
2379 | /* Used in schedule_insns to initialize current_sched_info for scheduling |
2380 | regions (or single basic blocks). */ |
2381 | |
2382 | static const struct haifa_sched_info rgn_const_sched_info = |
2383 | { |
2384 | .init_ready_list: init_ready_list, |
2385 | .can_schedule_ready_p: can_schedule_ready_p, |
2386 | .schedule_more_p: schedule_more_p, |
2387 | .new_ready: new_ready, |
2388 | .rank: rgn_rank, |
2389 | .print_insn: rgn_print_insn, |
2390 | .contributes_to_priority: contributes_to_priority, |
2391 | .insn_finishes_block_p: rgn_insn_finishes_block_p, |
2392 | |
2393 | NULL, NULL, |
2394 | NULL, NULL, |
2395 | .queue_must_finish_empty: 0, .sched_max_insns_priority: 0, |
2396 | |
2397 | .add_remove_insn: rgn_add_remove_insn, |
2398 | .begin_schedule_ready: begin_schedule_ready, |
2399 | NULL, |
2400 | .advance_target_bb: advance_target_bb, |
2401 | NULL, NULL, |
2402 | .flags: SCHED_RGN |
2403 | }; |
2404 | |
2405 | /* This variable holds the data and hooks needed to the Haifa scheduler backend |
2406 | for the interblock scheduler frontend. */ |
2407 | static struct haifa_sched_info rgn_sched_info; |
2408 | |
2409 | /* Returns maximum priority that an insn was assigned to. */ |
2410 | |
2411 | int |
2412 | get_rgn_sched_max_insns_priority (void) |
2413 | { |
2414 | return rgn_sched_info.sched_max_insns_priority; |
2415 | } |
2416 | |
2417 | /* Determine if PAT sets a TARGET_CLASS_LIKELY_SPILLED_P register. */ |
2418 | |
2419 | static bool |
2420 | sets_likely_spilled (rtx pat) |
2421 | { |
2422 | bool ret = false; |
2423 | note_pattern_stores (pat, sets_likely_spilled_1, &ret); |
2424 | return ret; |
2425 | } |
2426 | |
2427 | static void |
2428 | sets_likely_spilled_1 (rtx x, const_rtx pat, void *data) |
2429 | { |
2430 | bool *ret = (bool *) data; |
2431 | |
2432 | if (GET_CODE (pat) == SET |
2433 | && REG_P (x) |
2434 | && HARD_REGISTER_P (x) |
2435 | && targetm.class_likely_spilled_p (REGNO_REG_CLASS (REGNO (x)))) |
2436 | *ret = true; |
2437 | } |
2438 | |
2439 | /* A bitmap to note insns that participate in any dependency. Used in |
2440 | add_branch_dependences. */ |
2441 | static sbitmap insn_referenced; |
2442 | |
2443 | /* Add dependences so that branches are scheduled to run last in their |
2444 | block. */ |
2445 | static void |
2446 | add_branch_dependences (rtx_insn *head, rtx_insn *tail) |
2447 | { |
2448 | rtx_insn *insn, *last; |
2449 | |
2450 | /* For all branches, calls, uses, clobbers, and instructions |
2451 | that can throw exceptions, force them to remain in order at the end of |
2452 | the block by adding dependencies and giving the last a high priority. |
2453 | There may be notes present, and prev_head may also be a note. |
2454 | |
2455 | Branches must obviously remain at the end. Calls should remain at the |
2456 | end since moving them results in worse register allocation. Uses remain |
2457 | at the end to ensure proper register allocation. |
2458 | |
2459 | Predecessors of SCHED_GROUP_P instructions at the end remain at the end. |
2460 | |
2461 | COND_EXEC insns cannot be moved past a branch (see e.g. PR17808). |
2462 | |
2463 | Insns setting TARGET_CLASS_LIKELY_SPILLED_P registers (usually return |
2464 | values) are not moved before reload because we can wind up with register |
2465 | allocation failures. */ |
2466 | |
2467 | while (tail != head && DEBUG_INSN_P (tail)) |
2468 | tail = PREV_INSN (insn: tail); |
2469 | |
2470 | insn = tail; |
2471 | last = 0; |
2472 | while (CALL_P (insn) |
2473 | || JUMP_P (insn) || JUMP_TABLE_DATA_P (insn) |
2474 | || (NONJUMP_INSN_P (insn) |
2475 | && (GET_CODE (PATTERN (insn)) == USE |
2476 | || GET_CODE (PATTERN (insn)) == CLOBBER |
2477 | || can_throw_internal (insn) |
2478 | || (!reload_completed |
2479 | && sets_likely_spilled (PATTERN (insn))))) |
2480 | || NOTE_P (insn) |
2481 | || (last != 0 && SCHED_GROUP_P (last))) |
2482 | { |
2483 | if (!NOTE_P (insn)) |
2484 | { |
2485 | if (last != 0 |
2486 | && sd_find_dep_between (insn, last, false) == NULL) |
2487 | { |
2488 | if (! sched_insns_conditions_mutex_p (last, insn)) |
2489 | add_dependence (last, insn, REG_DEP_ANTI); |
2490 | bitmap_set_bit (map: insn_referenced, INSN_LUID (insn)); |
2491 | } |
2492 | |
2493 | CANT_MOVE (insn) = 1; |
2494 | |
2495 | last = insn; |
2496 | } |
2497 | |
2498 | /* Don't overrun the bounds of the basic block. */ |
2499 | if (insn == head) |
2500 | break; |
2501 | |
2502 | do |
2503 | insn = PREV_INSN (insn); |
2504 | while (insn != head && DEBUG_INSN_P (insn)); |
2505 | } |
2506 | |
2507 | /* Selective scheduling handles control dependencies by itself, and |
2508 | CANT_MOVE flags ensure that other insns will be kept in place. */ |
2509 | if (sel_sched_p ()) |
2510 | return; |
2511 | |
2512 | /* Make sure these insns are scheduled last in their block. */ |
2513 | insn = last; |
2514 | if (insn != 0) |
2515 | while (insn != head) |
2516 | { |
2517 | insn = prev_nonnote_insn (insn); |
2518 | |
2519 | if (bitmap_bit_p (map: insn_referenced, INSN_LUID (insn)) |
2520 | || DEBUG_INSN_P (insn)) |
2521 | continue; |
2522 | |
2523 | if (! sched_insns_conditions_mutex_p (last, insn)) |
2524 | add_dependence (last, insn, REG_DEP_ANTI); |
2525 | } |
2526 | |
2527 | if (!targetm.have_conditional_execution ()) |
2528 | return; |
2529 | |
2530 | /* Finally, if the block ends in a jump, and we are doing intra-block |
2531 | scheduling, make sure that the branch depends on any COND_EXEC insns |
2532 | inside the block to avoid moving the COND_EXECs past the branch insn. |
2533 | |
2534 | We only have to do this after reload, because (1) before reload there |
2535 | are no COND_EXEC insns, and (2) the region scheduler is an intra-block |
2536 | scheduler after reload. |
2537 | |
2538 | FIXME: We could in some cases move COND_EXEC insns past the branch if |
2539 | this scheduler would be a little smarter. Consider this code: |
2540 | |
2541 | T = [addr] |
2542 | C ? addr += 4 |
2543 | !C ? X += 12 |
2544 | C ? T += 1 |
2545 | C ? jump foo |
2546 | |
2547 | On a target with a one cycle stall on a memory access the optimal |
2548 | sequence would be: |
2549 | |
2550 | T = [addr] |
2551 | C ? addr += 4 |
2552 | C ? T += 1 |
2553 | C ? jump foo |
2554 | !C ? X += 12 |
2555 | |
2556 | We don't want to put the 'X += 12' before the branch because it just |
2557 | wastes a cycle of execution time when the branch is taken. |
2558 | |
2559 | Note that in the example "!C" will always be true. That is another |
2560 | possible improvement for handling COND_EXECs in this scheduler: it |
2561 | could remove always-true predicates. */ |
2562 | |
2563 | if (!reload_completed || ! (JUMP_P (tail) || JUMP_TABLE_DATA_P (tail))) |
2564 | return; |
2565 | |
2566 | insn = tail; |
2567 | while (insn != head) |
2568 | { |
2569 | insn = PREV_INSN (insn); |
2570 | |
2571 | /* Note that we want to add this dependency even when |
2572 | sched_insns_conditions_mutex_p returns true. The whole point |
2573 | is that we _want_ this dependency, even if these insns really |
2574 | are independent. */ |
2575 | if (INSN_P (insn) && GET_CODE (PATTERN (insn)) == COND_EXEC) |
2576 | add_dependence (tail, insn, REG_DEP_ANTI); |
2577 | } |
2578 | } |
2579 | |
2580 | /* Data structures for the computation of data dependences in a regions. We |
2581 | keep one `deps' structure for every basic block. Before analyzing the |
2582 | data dependences for a bb, its variables are initialized as a function of |
2583 | the variables of its predecessors. When the analysis for a bb completes, |
2584 | we save the contents to the corresponding bb_deps[bb] variable. */ |
2585 | |
2586 | static class deps_desc *bb_deps; |
2587 | |
2588 | static void |
2589 | concat_insn_mem_list (rtx_insn_list *copy_insns, |
2590 | rtx_expr_list *copy_mems, |
2591 | rtx_insn_list **old_insns_p, |
2592 | rtx_expr_list **old_mems_p) |
2593 | { |
2594 | rtx_insn_list *new_insns = *old_insns_p; |
2595 | rtx_expr_list *new_mems = *old_mems_p; |
2596 | |
2597 | while (copy_insns) |
2598 | { |
2599 | new_insns = alloc_INSN_LIST (copy_insns->insn (), new_insns); |
2600 | new_mems = alloc_EXPR_LIST (VOIDmode, copy_mems->element (), new_mems); |
2601 | copy_insns = copy_insns->next (); |
2602 | copy_mems = copy_mems->next (); |
2603 | } |
2604 | |
2605 | *old_insns_p = new_insns; |
2606 | *old_mems_p = new_mems; |
2607 | } |
2608 | |
2609 | /* Join PRED_DEPS to the SUCC_DEPS. */ |
2610 | void |
2611 | deps_join (class deps_desc *succ_deps, class deps_desc *pred_deps) |
2612 | { |
2613 | unsigned reg; |
2614 | reg_set_iterator rsi; |
2615 | |
2616 | /* The reg_last lists are inherited by successor. */ |
2617 | EXECUTE_IF_SET_IN_REG_SET (&pred_deps->reg_last_in_use, 0, reg, rsi) |
2618 | { |
2619 | struct deps_reg *pred_rl = &pred_deps->reg_last[reg]; |
2620 | struct deps_reg *succ_rl = &succ_deps->reg_last[reg]; |
2621 | |
2622 | succ_rl->uses = concat_INSN_LIST (pred_rl->uses, succ_rl->uses); |
2623 | succ_rl->sets = concat_INSN_LIST (pred_rl->sets, succ_rl->sets); |
2624 | succ_rl->implicit_sets |
2625 | = concat_INSN_LIST (pred_rl->implicit_sets, succ_rl->implicit_sets); |
2626 | succ_rl->clobbers = concat_INSN_LIST (pred_rl->clobbers, |
2627 | succ_rl->clobbers); |
2628 | succ_rl->uses_length += pred_rl->uses_length; |
2629 | succ_rl->clobbers_length += pred_rl->clobbers_length; |
2630 | } |
2631 | IOR_REG_SET (&succ_deps->reg_last_in_use, &pred_deps->reg_last_in_use); |
2632 | |
2633 | /* Mem read/write lists are inherited by successor. */ |
2634 | concat_insn_mem_list (copy_insns: pred_deps->pending_read_insns, |
2635 | copy_mems: pred_deps->pending_read_mems, |
2636 | old_insns_p: &succ_deps->pending_read_insns, |
2637 | old_mems_p: &succ_deps->pending_read_mems); |
2638 | concat_insn_mem_list (copy_insns: pred_deps->pending_write_insns, |
2639 | copy_mems: pred_deps->pending_write_mems, |
2640 | old_insns_p: &succ_deps->pending_write_insns, |
2641 | old_mems_p: &succ_deps->pending_write_mems); |
2642 | |
2643 | succ_deps->pending_jump_insns |
2644 | = concat_INSN_LIST (pred_deps->pending_jump_insns, |
2645 | succ_deps->pending_jump_insns); |
2646 | succ_deps->last_pending_memory_flush |
2647 | = concat_INSN_LIST (pred_deps->last_pending_memory_flush, |
2648 | succ_deps->last_pending_memory_flush); |
2649 | |
2650 | succ_deps->pending_read_list_length += pred_deps->pending_read_list_length; |
2651 | succ_deps->pending_write_list_length += pred_deps->pending_write_list_length; |
2652 | succ_deps->pending_flush_length += pred_deps->pending_flush_length; |
2653 | |
2654 | /* last_function_call is inherited by successor. */ |
2655 | succ_deps->last_function_call |
2656 | = concat_INSN_LIST (pred_deps->last_function_call, |
2657 | succ_deps->last_function_call); |
2658 | |
2659 | /* last_function_call_may_noreturn is inherited by successor. */ |
2660 | succ_deps->last_function_call_may_noreturn |
2661 | = concat_INSN_LIST (pred_deps->last_function_call_may_noreturn, |
2662 | succ_deps->last_function_call_may_noreturn); |
2663 | |
2664 | /* sched_before_next_call is inherited by successor. */ |
2665 | succ_deps->sched_before_next_call |
2666 | = concat_INSN_LIST (pred_deps->sched_before_next_call, |
2667 | succ_deps->sched_before_next_call); |
2668 | } |
2669 | |
2670 | /* After computing the dependencies for block BB, propagate the dependencies |
2671 | found in TMP_DEPS to the successors of the block. */ |
2672 | static void |
2673 | propagate_deps (int bb, class deps_desc *pred_deps) |
2674 | { |
2675 | basic_block block = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (bb)); |
2676 | edge_iterator ei; |
2677 | edge e; |
2678 | |
2679 | /* bb's structures are inherited by its successors. */ |
2680 | FOR_EACH_EDGE (e, ei, block->succs) |
2681 | { |
2682 | /* Only bbs "below" bb, in the same region, are interesting. */ |
2683 | if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun) |
2684 | || CONTAINING_RGN (block->index) != CONTAINING_RGN (e->dest->index) |
2685 | || BLOCK_TO_BB (e->dest->index) <= bb) |
2686 | continue; |
2687 | |
2688 | deps_join (succ_deps: bb_deps + BLOCK_TO_BB (e->dest->index), pred_deps); |
2689 | } |
2690 | |
2691 | /* These lists should point to the right place, for correct |
2692 | freeing later. */ |
2693 | bb_deps[bb].pending_read_insns = pred_deps->pending_read_insns; |
2694 | bb_deps[bb].pending_read_mems = pred_deps->pending_read_mems; |
2695 | bb_deps[bb].pending_write_insns = pred_deps->pending_write_insns; |
2696 | bb_deps[bb].pending_write_mems = pred_deps->pending_write_mems; |
2697 | bb_deps[bb].pending_jump_insns = pred_deps->pending_jump_insns; |
2698 | |
2699 | /* Can't allow these to be freed twice. */ |
2700 | pred_deps->pending_read_insns = 0; |
2701 | pred_deps->pending_read_mems = 0; |
2702 | pred_deps->pending_write_insns = 0; |
2703 | pred_deps->pending_write_mems = 0; |
2704 | pred_deps->pending_jump_insns = 0; |
2705 | } |
2706 | |
2707 | /* Compute dependences inside bb. In a multiple blocks region: |
2708 | (1) a bb is analyzed after its predecessors, and (2) the lists in |
2709 | effect at the end of bb (after analyzing for bb) are inherited by |
2710 | bb's successors. |
2711 | |
2712 | Specifically for reg-reg data dependences, the block insns are |
2713 | scanned by sched_analyze () top-to-bottom. Three lists are |
2714 | maintained by sched_analyze (): reg_last[].sets for register DEFs, |
2715 | reg_last[].implicit_sets for implicit hard register DEFs, and |
2716 | reg_last[].uses for register USEs. |
2717 | |
2718 | When analysis is completed for bb, we update for its successors: |
2719 | ; - DEFS[succ] = Union (DEFS [succ], DEFS [bb]) |
2720 | ; - IMPLICIT_DEFS[succ] = Union (IMPLICIT_DEFS [succ], IMPLICIT_DEFS [bb]) |
2721 | ; - USES[succ] = Union (USES [succ], DEFS [bb]) |
2722 | |
2723 | The mechanism for computing mem-mem data dependence is very |
2724 | similar, and the result is interblock dependences in the region. */ |
2725 | |
2726 | static void |
2727 | compute_block_dependences (int bb) |
2728 | { |
2729 | rtx_insn *head, *tail; |
2730 | class deps_desc tmp_deps; |
2731 | |
2732 | tmp_deps = bb_deps[bb]; |
2733 | |
2734 | /* Do the analysis for this block. */ |
2735 | gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb)); |
2736 | get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail); |
2737 | |
2738 | sched_analyze (&tmp_deps, head, tail); |
2739 | |
2740 | add_branch_dependences (head, tail); |
2741 | |
2742 | if (current_nr_blocks > 1) |
2743 | propagate_deps (bb, pred_deps: &tmp_deps); |
2744 | |
2745 | /* Free up the INSN_LISTs. */ |
2746 | free_deps (&tmp_deps); |
2747 | |
2748 | if (targetm.sched.dependencies_evaluation_hook) |
2749 | targetm.sched.dependencies_evaluation_hook (head, tail); |
2750 | } |
2751 | |
2752 | /* Free dependencies of instructions inside BB. */ |
2753 | static void |
2754 | free_block_dependencies (int bb) |
2755 | { |
2756 | rtx_insn *head; |
2757 | rtx_insn *tail; |
2758 | |
2759 | get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail); |
2760 | |
2761 | if (no_real_insns_p (head, tail)) |
2762 | return; |
2763 | |
2764 | sched_free_deps (head, tail, true); |
2765 | } |
2766 | |
2767 | /* Remove all INSN_LISTs and EXPR_LISTs from the pending lists and add |
2768 | them to the unused_*_list variables, so that they can be reused. */ |
2769 | |
2770 | static void |
2771 | free_pending_lists (void) |
2772 | { |
2773 | int bb; |
2774 | |
2775 | for (bb = 0; bb < current_nr_blocks; bb++) |
2776 | { |
2777 | free_INSN_LIST_list (&bb_deps[bb].pending_read_insns); |
2778 | free_INSN_LIST_list (&bb_deps[bb].pending_write_insns); |
2779 | free_EXPR_LIST_list (&bb_deps[bb].pending_read_mems); |
2780 | free_EXPR_LIST_list (&bb_deps[bb].pending_write_mems); |
2781 | free_INSN_LIST_list (&bb_deps[bb].pending_jump_insns); |
2782 | } |
2783 | } |
2784 | |
2785 | /* Print dependences for debugging starting from FROM_BB. |
2786 | Callable from debugger. */ |
2787 | /* Print dependences for debugging starting from FROM_BB. |
2788 | Callable from debugger. */ |
2789 | DEBUG_FUNCTION void |
2790 | debug_rgn_dependencies (int from_bb) |
2791 | { |
2792 | int bb; |
2793 | |
2794 | fprintf (stream: sched_dump, |
2795 | format: ";; --------------- forward dependences: ------------ \n" ); |
2796 | |
2797 | for (bb = from_bb; bb < current_nr_blocks; bb++) |
2798 | { |
2799 | rtx_insn *head, *tail; |
2800 | |
2801 | get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail); |
2802 | fprintf (stream: sched_dump, format: "\n;; --- Region Dependences --- b %d bb %d \n" , |
2803 | BB_TO_BLOCK (bb), bb); |
2804 | |
2805 | debug_dependencies (head, tail); |
2806 | } |
2807 | } |
2808 | |
2809 | /* Print dependencies information for instructions between HEAD and TAIL. |
2810 | ??? This function would probably fit best in haifa-sched.cc. */ |
2811 | void debug_dependencies (rtx_insn *head, rtx_insn *tail) |
2812 | { |
2813 | rtx_insn *insn; |
2814 | rtx_insn *next_tail = NEXT_INSN (insn: tail); |
2815 | |
2816 | fprintf (stream: sched_dump, format: ";; %7s%6s%6s%6s%6s%6s%14s\n" , |
2817 | "insn" , "code" , "bb" , "dep" , "prio" , "cost" , |
2818 | "reservation" ); |
2819 | fprintf (stream: sched_dump, format: ";; %7s%6s%6s%6s%6s%6s%14s\n" , |
2820 | "----" , "----" , "--" , "---" , "----" , "----" , |
2821 | "-----------" ); |
2822 | |
2823 | for (insn = head; insn != next_tail; insn = NEXT_INSN (insn)) |
2824 | { |
2825 | if (! INSN_P (insn)) |
2826 | { |
2827 | int n; |
2828 | fprintf (stream: sched_dump, format: ";; %6d " , INSN_UID (insn)); |
2829 | if (NOTE_P (insn)) |
2830 | { |
2831 | n = NOTE_KIND (insn); |
2832 | fprintf (stream: sched_dump, format: "%s\n" , GET_NOTE_INSN_NAME (n)); |
2833 | } |
2834 | else |
2835 | fprintf (stream: sched_dump, format: " {%s}\n" , GET_RTX_NAME (GET_CODE (insn))); |
2836 | continue; |
2837 | } |
2838 | |
2839 | fprintf (stream: sched_dump, |
2840 | format: ";; %s%5d%6d%6d%6d%6d%6d " , |
2841 | (SCHED_GROUP_P (insn) ? "+" : " " ), |
2842 | INSN_UID (insn), |
2843 | INSN_CODE (insn), |
2844 | BLOCK_NUM (insn), |
2845 | sched_emulate_haifa_p ? -1 : sd_lists_size (insn, SD_LIST_BACK), |
2846 | (sel_sched_p () ? (sched_emulate_haifa_p ? -1 |
2847 | : INSN_PRIORITY (insn)) |
2848 | : INSN_PRIORITY (insn)), |
2849 | (sel_sched_p () ? (sched_emulate_haifa_p ? -1 |
2850 | : insn_sched_cost (insn)) |
2851 | : insn_sched_cost (insn))); |
2852 | |
2853 | if (recog_memoized (insn) < 0) |
2854 | fprintf (stream: sched_dump, format: "nothing" ); |
2855 | else |
2856 | print_reservation (sched_dump, insn); |
2857 | |
2858 | fprintf (stream: sched_dump, format: "\t: " ); |
2859 | { |
2860 | sd_iterator_def sd_it; |
2861 | dep_t dep; |
2862 | |
2863 | FOR_EACH_DEP (insn, SD_LIST_FORW, sd_it, dep) |
2864 | fprintf (stream: sched_dump, format: "%d%s%s " , INSN_UID (DEP_CON (dep)), |
2865 | DEP_NONREG (dep) ? "n" : "" , |
2866 | DEP_MULTIPLE (dep) ? "m" : "" ); |
2867 | } |
2868 | fprintf (stream: sched_dump, format: "\n" ); |
2869 | } |
2870 | |
2871 | fprintf (stream: sched_dump, format: "\n" ); |
2872 | } |
2873 | |
2874 | /* Dump dependency graph for the current region to a file using dot syntax. */ |
2875 | |
2876 | void |
2877 | dump_rgn_dependencies_dot (FILE *file) |
2878 | { |
2879 | rtx_insn *head, *tail, *con, *pro; |
2880 | sd_iterator_def sd_it; |
2881 | dep_t dep; |
2882 | int bb; |
2883 | pretty_printer pp; |
2884 | |
2885 | pp.buffer->stream = file; |
2886 | pp_printf (&pp, "digraph SchedDG {\n" ); |
2887 | |
2888 | for (bb = 0; bb < current_nr_blocks; ++bb) |
2889 | { |
2890 | /* Begin subgraph (basic block). */ |
2891 | pp_printf (&pp, "subgraph cluster_block_%d {\n" , bb); |
2892 | pp_printf (&pp, "\t" "color=blue;" "\n" ); |
2893 | pp_printf (&pp, "\t" "style=bold;" "\n" ); |
2894 | pp_printf (&pp, "\t" "label=\"BB #%d\";\n" , BB_TO_BLOCK (bb)); |
2895 | |
2896 | /* Setup head and tail (no support for EBBs). */ |
2897 | gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb)); |
2898 | get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail); |
2899 | tail = NEXT_INSN (insn: tail); |
2900 | |
2901 | /* Dump all insns. */ |
2902 | for (con = head; con != tail; con = NEXT_INSN (insn: con)) |
2903 | { |
2904 | if (!INSN_P (con)) |
2905 | continue; |
2906 | |
2907 | /* Pretty print the insn. */ |
2908 | pp_printf (&pp, "\t%d [label=\"{" , INSN_UID (insn: con)); |
2909 | pp_write_text_to_stream (&pp); |
2910 | print_insn (pp: &pp, x: con, /*verbose=*/false); |
2911 | pp_write_text_as_dot_label_to_stream (&pp, /*for_record=*/true); |
2912 | pp_write_text_to_stream (&pp); |
2913 | |
2914 | /* Dump instruction attributes. */ |
2915 | pp_printf (&pp, "|{ uid:%d | luid:%d | prio:%d }}\",shape=record]\n" , |
2916 | INSN_UID (insn: con), INSN_LUID (con), INSN_PRIORITY (con)); |
2917 | |
2918 | /* Dump all deps. */ |
2919 | FOR_EACH_DEP (con, SD_LIST_BACK, sd_it, dep) |
2920 | { |
2921 | int weight = 0; |
2922 | const char *color; |
2923 | pro = DEP_PRO (dep); |
2924 | |
2925 | switch (DEP_TYPE (dep)) |
2926 | { |
2927 | case REG_DEP_TRUE: |
2928 | color = "black" ; |
2929 | weight = 1; |
2930 | break; |
2931 | case REG_DEP_OUTPUT: |
2932 | case REG_DEP_ANTI: |
2933 | color = "orange" ; |
2934 | break; |
2935 | case REG_DEP_CONTROL: |
2936 | color = "blue" ; |
2937 | break; |
2938 | default: |
2939 | gcc_unreachable (); |
2940 | } |
2941 | |
2942 | pp_printf (&pp, "\t%d -> %d [color=%s" , |
2943 | INSN_UID (insn: pro), INSN_UID (insn: con), color); |
2944 | if (int cost = dep_cost (dep)) |
2945 | pp_printf (&pp, ",label=%d" , cost); |
2946 | pp_printf (&pp, ",weight=%d" , weight); |
2947 | pp_printf (&pp, "];\n" ); |
2948 | } |
2949 | } |
2950 | pp_printf (&pp, "}\n" ); |
2951 | } |
2952 | |
2953 | pp_printf (&pp, "}\n" ); |
2954 | pp_flush (&pp); |
2955 | } |
2956 | |
2957 | /* Dump dependency graph for the current region to a file using dot syntax. */ |
2958 | |
2959 | DEBUG_FUNCTION void |
2960 | dump_rgn_dependencies_dot (const char *fname) |
2961 | { |
2962 | FILE *fp; |
2963 | |
2964 | fp = fopen (filename: fname, modes: "w" ); |
2965 | if (!fp) |
2966 | { |
2967 | perror (s: "fopen" ); |
2968 | return; |
2969 | } |
2970 | |
2971 | dump_rgn_dependencies_dot (file: fp); |
2972 | fclose (stream: fp); |
2973 | } |
2974 | |
2975 | |
2976 | /* Returns true if all the basic blocks of the current region have |
2977 | NOTE_DISABLE_SCHED_OF_BLOCK which means not to schedule that region. */ |
2978 | bool |
2979 | sched_is_disabled_for_current_region_p (void) |
2980 | { |
2981 | int bb; |
2982 | |
2983 | for (bb = 0; bb < current_nr_blocks; bb++) |
2984 | if (!(BASIC_BLOCK_FOR_FN (cfun, |
2985 | BB_TO_BLOCK (bb))->flags & BB_DISABLE_SCHEDULE)) |
2986 | return false; |
2987 | |
2988 | return true; |
2989 | } |
2990 | |
2991 | /* Free all region dependencies saved in INSN_BACK_DEPS and |
2992 | INSN_RESOLVED_BACK_DEPS. The Haifa scheduler does this on the fly |
2993 | when scheduling, so this function is supposed to be called from |
2994 | the selective scheduling only. */ |
2995 | void |
2996 | free_rgn_deps (void) |
2997 | { |
2998 | int bb; |
2999 | |
3000 | for (bb = 0; bb < current_nr_blocks; bb++) |
3001 | { |
3002 | rtx_insn *head, *tail; |
3003 | |
3004 | gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb)); |
3005 | get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail); |
3006 | |
3007 | sched_free_deps (head, tail, false); |
3008 | } |
3009 | } |
3010 | |
3011 | static int rgn_n_insns; |
3012 | |
3013 | /* Compute insn priority for a current region. */ |
3014 | void |
3015 | compute_priorities (void) |
3016 | { |
3017 | int bb; |
3018 | |
3019 | current_sched_info->sched_max_insns_priority = 0; |
3020 | for (bb = 0; bb < current_nr_blocks; bb++) |
3021 | { |
3022 | rtx_insn *head, *tail; |
3023 | |
3024 | gcc_assert (EBB_FIRST_BB (bb) == EBB_LAST_BB (bb)); |
3025 | get_ebb_head_tail (EBB_FIRST_BB (bb), EBB_LAST_BB (bb), &head, &tail); |
3026 | |
3027 | if (no_real_insns_p (head, tail)) |
3028 | continue; |
3029 | |
3030 | rgn_n_insns += set_priorities (head, tail); |
3031 | } |
3032 | current_sched_info->sched_max_insns_priority++; |
3033 | } |
3034 | |
3035 | /* (Re-)initialize the arrays of DFA states at the end of each basic block. |
3036 | |
3037 | SAVED_LAST_BASIC_BLOCK is the previous length of the arrays. It must be |
3038 | zero for the first call to this function, to allocate the arrays for the |
3039 | first time. |
3040 | |
3041 | This function is called once during initialization of the scheduler, and |
3042 | called again to resize the arrays if new basic blocks have been created, |
3043 | for example for speculation recovery code. */ |
3044 | |
3045 | static void |
3046 | realloc_bb_state_array (int saved_last_basic_block) |
3047 | { |
3048 | char *old_bb_state_array = bb_state_array; |
3049 | size_t lbb = (size_t) last_basic_block_for_fn (cfun); |
3050 | size_t slbb = (size_t) saved_last_basic_block; |
3051 | |
3052 | /* Nothing to do if nothing changed since the last time this was called. */ |
3053 | if (saved_last_basic_block == last_basic_block_for_fn (cfun)) |
3054 | return; |
3055 | |
3056 | /* The selective scheduler doesn't use the state arrays. */ |
3057 | if (sel_sched_p ()) |
3058 | { |
3059 | gcc_assert (bb_state_array == NULL && bb_state == NULL); |
3060 | return; |
3061 | } |
3062 | |
3063 | gcc_checking_assert (saved_last_basic_block == 0 |
3064 | || (bb_state_array != NULL && bb_state != NULL)); |
3065 | |
3066 | bb_state_array = XRESIZEVEC (char, bb_state_array, lbb * dfa_state_size); |
3067 | bb_state = XRESIZEVEC (state_t, bb_state, lbb); |
3068 | |
3069 | /* If BB_STATE_ARRAY has moved, fixup all the state pointers array. |
3070 | Otherwise only fixup the newly allocated ones. For the state |
3071 | array itself, only initialize the new entries. */ |
3072 | bool bb_state_array_moved = (bb_state_array != old_bb_state_array); |
3073 | for (size_t i = bb_state_array_moved ? 0 : slbb; i < lbb; i++) |
3074 | bb_state[i] = (state_t) (bb_state_array + i * dfa_state_size); |
3075 | for (size_t i = slbb; i < lbb; i++) |
3076 | state_reset (bb_state[i]); |
3077 | } |
3078 | |
3079 | /* Free the arrays of DFA states at the end of each basic block. */ |
3080 | |
3081 | static void |
3082 | free_bb_state_array (void) |
3083 | { |
3084 | free (ptr: bb_state_array); |
3085 | free (ptr: bb_state); |
3086 | bb_state_array = NULL; |
3087 | bb_state = NULL; |
3088 | } |
3089 | |
3090 | /* If LAST_BB falls through to another block B, record that B should |
3091 | start with DFA start STATE. */ |
3092 | |
3093 | static void |
3094 | save_state_for_fallthru_edge (basic_block last_bb, state_t state) |
3095 | { |
3096 | edge f = find_fallthru_edge (edges: last_bb->succs); |
3097 | if (f |
3098 | && (!f->probability.initialized_p () |
3099 | || (f->probability.to_reg_br_prob_base () * 100 |
3100 | / REG_BR_PROB_BASE |
3101 | >= param_sched_state_edge_prob_cutoff))) |
3102 | { |
3103 | memcpy (dest: bb_state[f->dest->index], src: state, |
3104 | n: dfa_state_size); |
3105 | if (sched_verbose >= 5) |
3106 | fprintf (stream: sched_dump, format: "saving state for edge %d->%d\n" , |
3107 | f->src->index, f->dest->index); |
3108 | } |
3109 | } |
3110 | |
3111 | /* Schedule a region. A region is either an inner loop, a loop-free |
3112 | subroutine, or a single basic block. Each bb in the region is |
3113 | scheduled after its flow predecessors. */ |
3114 | |
3115 | static void |
3116 | schedule_region (int rgn) |
3117 | { |
3118 | int bb; |
3119 | int sched_rgn_n_insns = 0; |
3120 | |
3121 | rgn_n_insns = 0; |
3122 | |
3123 | /* Do not support register pressure sensitive scheduling for the new regions |
3124 | as we don't update the liveness info for them. */ |
3125 | if (sched_pressure != SCHED_PRESSURE_NONE |
3126 | && rgn >= nr_regions_initial) |
3127 | { |
3128 | free_global_sched_pressure_data (); |
3129 | sched_pressure = SCHED_PRESSURE_NONE; |
3130 | } |
3131 | |
3132 | rgn_setup_region (rgn); |
3133 | |
3134 | /* Don't schedule region that is marked by |
3135 | NOTE_DISABLE_SCHED_OF_BLOCK. */ |
3136 | if (sched_is_disabled_for_current_region_p ()) |
3137 | return; |
3138 | |
3139 | sched_rgn_compute_dependencies (rgn); |
3140 | |
3141 | sched_rgn_local_init (rgn); |
3142 | |
3143 | /* Set priorities. */ |
3144 | compute_priorities (); |
3145 | |
3146 | sched_extend_ready_list (rgn_n_insns); |
3147 | |
3148 | if (sched_pressure == SCHED_PRESSURE_WEIGHTED) |
3149 | { |
3150 | sched_init_region_reg_pressure_info (); |
3151 | for (bb = 0; bb < current_nr_blocks; bb++) |
3152 | { |
3153 | basic_block first_bb, last_bb; |
3154 | rtx_insn *head, *tail; |
3155 | |
3156 | first_bb = EBB_FIRST_BB (bb); |
3157 | last_bb = EBB_LAST_BB (bb); |
3158 | |
3159 | get_ebb_head_tail (first_bb, last_bb, &head, &tail); |
3160 | |
3161 | if (no_real_insns_p (head, tail)) |
3162 | { |
3163 | gcc_assert (first_bb == last_bb); |
3164 | continue; |
3165 | } |
3166 | sched_setup_bb_reg_pressure_info (first_bb, PREV_INSN (insn: head)); |
3167 | } |
3168 | } |
3169 | |
3170 | /* Now we can schedule all blocks. */ |
3171 | for (bb = 0; bb < current_nr_blocks; bb++) |
3172 | { |
3173 | basic_block first_bb, last_bb, curr_bb; |
3174 | rtx_insn *head, *tail; |
3175 | |
3176 | first_bb = EBB_FIRST_BB (bb); |
3177 | last_bb = EBB_LAST_BB (bb); |
3178 | |
3179 | get_ebb_head_tail (first_bb, last_bb, &head, &tail); |
3180 | |
3181 | if (no_real_insns_p (head, tail)) |
3182 | { |
3183 | gcc_assert (first_bb == last_bb); |
3184 | save_state_for_fallthru_edge (last_bb, state: bb_state[first_bb->index]); |
3185 | continue; |
3186 | } |
3187 | |
3188 | current_sched_info->prev_head = PREV_INSN (insn: head); |
3189 | current_sched_info->next_tail = NEXT_INSN (insn: tail); |
3190 | |
3191 | remove_notes (head, tail); |
3192 | |
3193 | unlink_bb_notes (first_bb, last_bb); |
3194 | |
3195 | target_bb = bb; |
3196 | |
3197 | gcc_assert (flag_schedule_interblock || current_nr_blocks == 1); |
3198 | current_sched_info->queue_must_finish_empty = current_nr_blocks == 1; |
3199 | |
3200 | curr_bb = first_bb; |
3201 | if (dbg_cnt (index: sched_block)) |
3202 | { |
3203 | int saved_last_basic_block = last_basic_block_for_fn (cfun); |
3204 | |
3205 | schedule_block (&curr_bb, bb_state[first_bb->index]); |
3206 | gcc_assert (EBB_FIRST_BB (bb) == first_bb); |
3207 | sched_rgn_n_insns += sched_n_insns; |
3208 | realloc_bb_state_array (saved_last_basic_block); |
3209 | save_state_for_fallthru_edge (last_bb, state: curr_state); |
3210 | } |
3211 | else |
3212 | { |
3213 | sched_rgn_n_insns += rgn_n_insns; |
3214 | } |
3215 | |
3216 | /* Clean up. */ |
3217 | if (current_nr_blocks > 1) |
3218 | free_trg_info (); |
3219 | } |
3220 | |
3221 | /* Sanity check: verify that all region insns were scheduled. */ |
3222 | gcc_assert (sched_rgn_n_insns == rgn_n_insns); |
3223 | |
3224 | sched_finish_ready_list (); |
3225 | |
3226 | /* Done with this region. */ |
3227 | sched_rgn_local_finish (); |
3228 | |
3229 | /* Free dependencies. */ |
3230 | for (bb = 0; bb < current_nr_blocks; ++bb) |
3231 | free_block_dependencies (bb); |
3232 | |
3233 | gcc_assert (haifa_recovery_bb_ever_added_p |
3234 | || deps_pools_are_empty_p ()); |
3235 | } |
3236 | |
3237 | /* Initialize data structures for region scheduling. */ |
3238 | |
3239 | void |
3240 | sched_rgn_init (bool single_blocks_p) |
3241 | { |
3242 | min_spec_prob = ((param_min_spec_prob * REG_BR_PROB_BASE) |
3243 | / 100); |
3244 | |
3245 | nr_inter = 0; |
3246 | nr_spec = 0; |
3247 | |
3248 | extend_regions (); |
3249 | |
3250 | CONTAINING_RGN (ENTRY_BLOCK) = -1; |
3251 | CONTAINING_RGN (EXIT_BLOCK) = -1; |
3252 | |
3253 | realloc_bb_state_array (saved_last_basic_block: 0); |
3254 | |
3255 | /* Compute regions for scheduling. */ |
3256 | if (single_blocks_p |
3257 | || n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS + 1 |
3258 | || !flag_schedule_interblock |
3259 | || is_cfg_nonregular ()) |
3260 | { |
3261 | find_single_block_region (ebbs_p: sel_sched_p ()); |
3262 | } |
3263 | else |
3264 | { |
3265 | /* Compute the dominators and post dominators. */ |
3266 | if (!sel_sched_p ()) |
3267 | calculate_dominance_info (CDI_DOMINATORS); |
3268 | |
3269 | /* Find regions. */ |
3270 | find_rgns (); |
3271 | |
3272 | if (sched_verbose >= 3) |
3273 | debug_regions (); |
3274 | |
3275 | /* For now. This will move as more and more of haifa is converted |
3276 | to using the cfg code. */ |
3277 | if (!sel_sched_p ()) |
3278 | free_dominance_info (CDI_DOMINATORS); |
3279 | } |
3280 | |
3281 | gcc_assert (nr_regions > 0 && nr_regions <= n_basic_blocks_for_fn (cfun)); |
3282 | |
3283 | RGN_BLOCKS (nr_regions) = (RGN_BLOCKS (nr_regions - 1) |
3284 | + RGN_NR_BLOCKS (nr_regions - 1)); |
3285 | nr_regions_initial = nr_regions; |
3286 | } |
3287 | |
3288 | /* Free data structures for region scheduling. */ |
3289 | void |
3290 | sched_rgn_finish (void) |
3291 | { |
3292 | free_bb_state_array (); |
3293 | |
3294 | /* Reposition the prologue and epilogue notes in case we moved the |
3295 | prologue/epilogue insns. */ |
3296 | if (reload_completed) |
3297 | reposition_prologue_and_epilogue_notes (); |
3298 | |
3299 | if (sched_verbose) |
3300 | { |
3301 | if (reload_completed == 0 |
3302 | && flag_schedule_interblock) |
3303 | { |
3304 | fprintf (stream: sched_dump, |
3305 | format: "\n;; Procedure interblock/speculative motions == %d/%d \n" , |
3306 | nr_inter, nr_spec); |
3307 | } |
3308 | else |
3309 | gcc_assert (nr_inter <= 0); |
3310 | fprintf (stream: sched_dump, format: "\n\n" ); |
3311 | } |
3312 | |
3313 | nr_regions = 0; |
3314 | |
3315 | free (ptr: rgn_table); |
3316 | rgn_table = NULL; |
3317 | |
3318 | free (ptr: rgn_bb_table); |
3319 | rgn_bb_table = NULL; |
3320 | |
3321 | free (ptr: block_to_bb); |
3322 | block_to_bb = NULL; |
3323 | |
3324 | free (ptr: containing_rgn); |
3325 | containing_rgn = NULL; |
3326 | |
3327 | free (ptr: ebb_head); |
3328 | ebb_head = NULL; |
3329 | } |
3330 | |
3331 | /* Setup global variables like CURRENT_BLOCKS and CURRENT_NR_BLOCK to |
3332 | point to the region RGN. */ |
3333 | void |
3334 | rgn_setup_region (int rgn) |
3335 | { |
3336 | int bb; |
3337 | |
3338 | /* Set variables for the current region. */ |
3339 | current_nr_blocks = RGN_NR_BLOCKS (rgn); |
3340 | current_blocks = RGN_BLOCKS (rgn); |
3341 | |
3342 | /* EBB_HEAD is a region-scope structure. But we realloc it for |
3343 | each region to save time/memory/something else. |
3344 | See comments in add_block1, for what reasons we allocate +1 element. */ |
3345 | ebb_head = XRESIZEVEC (int, ebb_head, current_nr_blocks + 1); |
3346 | for (bb = 0; bb <= current_nr_blocks; bb++) |
3347 | ebb_head[bb] = current_blocks + bb; |
3348 | } |
3349 | |
3350 | /* Compute instruction dependencies in region RGN. */ |
3351 | void |
3352 | sched_rgn_compute_dependencies (int rgn) |
3353 | { |
3354 | if (!RGN_DONT_CALC_DEPS (rgn)) |
3355 | { |
3356 | int bb; |
3357 | |
3358 | if (sel_sched_p ()) |
3359 | sched_emulate_haifa_p = 1; |
3360 | |
3361 | init_deps_global (); |
3362 | |
3363 | /* Initializations for region data dependence analysis. */ |
3364 | bb_deps = XNEWVEC (class deps_desc, current_nr_blocks); |
3365 | for (bb = 0; bb < current_nr_blocks; bb++) |
3366 | init_deps (bb_deps + bb, false); |
3367 | |
3368 | /* Initialize bitmap used in add_branch_dependences. */ |
3369 | insn_referenced = sbitmap_alloc (sched_max_luid); |
3370 | bitmap_clear (insn_referenced); |
3371 | |
3372 | /* Compute backward dependencies. */ |
3373 | for (bb = 0; bb < current_nr_blocks; bb++) |
3374 | compute_block_dependences (bb); |
3375 | |
3376 | sbitmap_free (map: insn_referenced); |
3377 | free_pending_lists (); |
3378 | finish_deps_global (); |
3379 | free (ptr: bb_deps); |
3380 | |
3381 | /* We don't want to recalculate this twice. */ |
3382 | RGN_DONT_CALC_DEPS (rgn) = 1; |
3383 | |
3384 | if (sel_sched_p ()) |
3385 | sched_emulate_haifa_p = 0; |
3386 | } |
3387 | else |
3388 | /* (This is a recovery block. It is always a single block region.) |
3389 | OR (We use selective scheduling.) */ |
3390 | gcc_assert (current_nr_blocks == 1 || sel_sched_p ()); |
3391 | } |
3392 | |
3393 | /* Init region data structures. Returns true if this region should |
3394 | not be scheduled. */ |
3395 | void |
3396 | sched_rgn_local_init (int rgn) |
3397 | { |
3398 | int bb; |
3399 | |
3400 | /* Compute interblock info: probabilities, split-edges, dominators, etc. */ |
3401 | if (current_nr_blocks > 1) |
3402 | { |
3403 | basic_block block; |
3404 | edge e; |
3405 | edge_iterator ei; |
3406 | |
3407 | prob = XNEWVEC (int, current_nr_blocks); |
3408 | |
3409 | dom = sbitmap_vector_alloc (current_nr_blocks, current_nr_blocks); |
3410 | bitmap_vector_clear (dom, current_nr_blocks); |
3411 | |
3412 | /* Use ->aux to implement EDGE_TO_BIT mapping. */ |
3413 | rgn_nr_edges = 0; |
3414 | FOR_EACH_BB_FN (block, cfun) |
3415 | { |
3416 | if (CONTAINING_RGN (block->index) != rgn) |
3417 | continue; |
3418 | FOR_EACH_EDGE (e, ei, block->succs) |
3419 | SET_EDGE_TO_BIT (e, rgn_nr_edges++); |
3420 | } |
3421 | |
3422 | rgn_edges = XNEWVEC (edge, rgn_nr_edges); |
3423 | rgn_nr_edges = 0; |
3424 | FOR_EACH_BB_FN (block, cfun) |
3425 | { |
3426 | if (CONTAINING_RGN (block->index) != rgn) |
3427 | continue; |
3428 | FOR_EACH_EDGE (e, ei, block->succs) |
3429 | rgn_edges[rgn_nr_edges++] = e; |
3430 | } |
3431 | |
3432 | /* Split edges. */ |
3433 | pot_split = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges); |
3434 | bitmap_vector_clear (pot_split, current_nr_blocks); |
3435 | ancestor_edges = sbitmap_vector_alloc (current_nr_blocks, rgn_nr_edges); |
3436 | bitmap_vector_clear (ancestor_edges, current_nr_blocks); |
3437 | |
3438 | /* Compute probabilities, dominators, split_edges. */ |
3439 | for (bb = 0; bb < current_nr_blocks; bb++) |
3440 | compute_dom_prob_ps (bb); |
3441 | |
3442 | /* Cleanup ->aux used for EDGE_TO_BIT mapping. */ |
3443 | /* We don't need them anymore. But we want to avoid duplication of |
3444 | aux fields in the newly created edges. */ |
3445 | FOR_EACH_BB_FN (block, cfun) |
3446 | { |
3447 | if (CONTAINING_RGN (block->index) != rgn) |
3448 | continue; |
3449 | FOR_EACH_EDGE (e, ei, block->succs) |
3450 | e->aux = NULL; |
3451 | } |
3452 | } |
3453 | } |
3454 | |
3455 | /* Free data computed for the finished region. */ |
3456 | void |
3457 | sched_rgn_local_free (void) |
3458 | { |
3459 | free (ptr: prob); |
3460 | sbitmap_vector_free (vec: dom); |
3461 | sbitmap_vector_free (vec: pot_split); |
3462 | sbitmap_vector_free (vec: ancestor_edges); |
3463 | free (ptr: rgn_edges); |
3464 | } |
3465 | |
3466 | /* Free data computed for the finished region. */ |
3467 | void |
3468 | sched_rgn_local_finish (void) |
3469 | { |
3470 | if (current_nr_blocks > 1 && !sel_sched_p ()) |
3471 | { |
3472 | sched_rgn_local_free (); |
3473 | } |
3474 | } |
3475 | |
3476 | /* Setup scheduler infos. */ |
3477 | void |
3478 | rgn_setup_common_sched_info (void) |
3479 | { |
3480 | memcpy (dest: &rgn_common_sched_info, src: &haifa_common_sched_info, |
3481 | n: sizeof (rgn_common_sched_info)); |
3482 | |
3483 | rgn_common_sched_info.fix_recovery_cfg = rgn_fix_recovery_cfg; |
3484 | rgn_common_sched_info.add_block = rgn_add_block; |
3485 | rgn_common_sched_info.estimate_number_of_insns |
3486 | = rgn_estimate_number_of_insns; |
3487 | rgn_common_sched_info.sched_pass_id = SCHED_RGN_PASS; |
3488 | |
3489 | common_sched_info = &rgn_common_sched_info; |
3490 | } |
3491 | |
3492 | /* Setup all *_sched_info structures (for the Haifa frontend |
3493 | and for the dependence analysis) in the interblock scheduler. */ |
3494 | void |
3495 | rgn_setup_sched_infos (void) |
3496 | { |
3497 | if (!sel_sched_p ()) |
3498 | memcpy (dest: &rgn_sched_deps_info, src: &rgn_const_sched_deps_info, |
3499 | n: sizeof (rgn_sched_deps_info)); |
3500 | else |
3501 | memcpy (dest: &rgn_sched_deps_info, src: &rgn_const_sel_sched_deps_info, |
3502 | n: sizeof (rgn_sched_deps_info)); |
3503 | |
3504 | sched_deps_info = &rgn_sched_deps_info; |
3505 | |
3506 | memcpy (dest: &rgn_sched_info, src: &rgn_const_sched_info, n: sizeof (rgn_sched_info)); |
3507 | current_sched_info = &rgn_sched_info; |
3508 | } |
3509 | |
3510 | /* The one entry point in this file. */ |
3511 | void |
3512 | schedule_insns (void) |
3513 | { |
3514 | int rgn; |
3515 | |
3516 | /* Taking care of this degenerate case makes the rest of |
3517 | this code simpler. */ |
3518 | if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS) |
3519 | return; |
3520 | |
3521 | rgn_setup_common_sched_info (); |
3522 | rgn_setup_sched_infos (); |
3523 | |
3524 | haifa_sched_init (); |
3525 | sched_rgn_init (single_blocks_p: reload_completed); |
3526 | |
3527 | bitmap_initialize (head: ¬_in_df, obstack: &bitmap_default_obstack); |
3528 | |
3529 | /* Schedule every region in the subroutine. */ |
3530 | for (rgn = 0; rgn < nr_regions; rgn++) |
3531 | if (dbg_cnt (index: sched_region)) |
3532 | schedule_region (rgn); |
3533 | |
3534 | /* Clean up. */ |
3535 | sched_rgn_finish (); |
3536 | bitmap_release (head: ¬_in_df); |
3537 | |
3538 | haifa_sched_finish (); |
3539 | } |
3540 | |
3541 | /* INSN has been added to/removed from current region. */ |
3542 | static void |
3543 | rgn_add_remove_insn (rtx_insn *insn, int remove_p) |
3544 | { |
3545 | if (!remove_p) |
3546 | rgn_n_insns++; |
3547 | else |
3548 | rgn_n_insns--; |
3549 | |
3550 | if (INSN_BB (insn) == target_bb) |
3551 | { |
3552 | if (!remove_p) |
3553 | target_n_insns++; |
3554 | else |
3555 | target_n_insns--; |
3556 | } |
3557 | } |
3558 | |
3559 | /* Extend internal data structures. */ |
3560 | void |
3561 | extend_regions (void) |
3562 | { |
3563 | rgn_table = XRESIZEVEC (region, rgn_table, n_basic_blocks_for_fn (cfun)); |
3564 | rgn_bb_table = XRESIZEVEC (int, rgn_bb_table, |
3565 | n_basic_blocks_for_fn (cfun)); |
3566 | block_to_bb = XRESIZEVEC (int, block_to_bb, |
3567 | last_basic_block_for_fn (cfun)); |
3568 | containing_rgn = XRESIZEVEC (int, containing_rgn, |
3569 | last_basic_block_for_fn (cfun)); |
3570 | } |
3571 | |
3572 | void |
3573 | rgn_make_new_region_out_of_new_block (basic_block bb) |
3574 | { |
3575 | int i; |
3576 | |
3577 | i = RGN_BLOCKS (nr_regions); |
3578 | /* I - first free position in rgn_bb_table. */ |
3579 | |
3580 | rgn_bb_table[i] = bb->index; |
3581 | RGN_NR_BLOCKS (nr_regions) = 1; |
3582 | RGN_HAS_REAL_EBB (nr_regions) = 0; |
3583 | RGN_DONT_CALC_DEPS (nr_regions) = 0; |
3584 | CONTAINING_RGN (bb->index) = nr_regions; |
3585 | BLOCK_TO_BB (bb->index) = 0; |
3586 | |
3587 | nr_regions++; |
3588 | |
3589 | RGN_BLOCKS (nr_regions) = i + 1; |
3590 | } |
3591 | |
3592 | /* BB was added to ebb after AFTER. */ |
3593 | static void |
3594 | rgn_add_block (basic_block bb, basic_block after) |
3595 | { |
3596 | extend_regions (); |
3597 | bitmap_set_bit (¬_in_df, bb->index); |
3598 | |
3599 | if (after == 0 || after == EXIT_BLOCK_PTR_FOR_FN (cfun)) |
3600 | { |
3601 | rgn_make_new_region_out_of_new_block (bb); |
3602 | RGN_DONT_CALC_DEPS (nr_regions - 1) = (after |
3603 | == EXIT_BLOCK_PTR_FOR_FN (cfun)); |
3604 | } |
3605 | else |
3606 | { |
3607 | int i, pos; |
3608 | |
3609 | /* We need to fix rgn_table, block_to_bb, containing_rgn |
3610 | and ebb_head. */ |
3611 | |
3612 | BLOCK_TO_BB (bb->index) = BLOCK_TO_BB (after->index); |
3613 | |
3614 | /* We extend ebb_head to one more position to |
3615 | easily find the last position of the last ebb in |
3616 | the current region. Thus, ebb_head[BLOCK_TO_BB (after) + 1] |
3617 | is _always_ valid for access. */ |
3618 | |
3619 | i = BLOCK_TO_BB (after->index) + 1; |
3620 | pos = ebb_head[i] - 1; |
3621 | /* Now POS is the index of the last block in the region. */ |
3622 | |
3623 | /* Find index of basic block AFTER. */ |
3624 | for (; rgn_bb_table[pos] != after->index; pos--) |
3625 | ; |
3626 | |
3627 | pos++; |
3628 | gcc_assert (pos > ebb_head[i - 1]); |
3629 | |
3630 | /* i - ebb right after "AFTER". */ |
3631 | /* ebb_head[i] - VALID. */ |
3632 | |
3633 | /* Source position: ebb_head[i] |
3634 | Destination position: ebb_head[i] + 1 |
3635 | Last position: |
3636 | RGN_BLOCKS (nr_regions) - 1 |
3637 | Number of elements to copy: (last_position) - (source_position) + 1 |
3638 | */ |
3639 | |
3640 | memmove (dest: rgn_bb_table + pos + 1, |
3641 | src: rgn_bb_table + pos, |
3642 | n: ((RGN_BLOCKS (nr_regions) - 1) - (pos) + 1) |
3643 | * sizeof (*rgn_bb_table)); |
3644 | |
3645 | rgn_bb_table[pos] = bb->index; |
3646 | |
3647 | for (; i <= current_nr_blocks; i++) |
3648 | ebb_head [i]++; |
3649 | |
3650 | i = CONTAINING_RGN (after->index); |
3651 | CONTAINING_RGN (bb->index) = i; |
3652 | |
3653 | RGN_HAS_REAL_EBB (i) = 1; |
3654 | |
3655 | for (++i; i <= nr_regions; i++) |
3656 | RGN_BLOCKS (i)++; |
3657 | } |
3658 | } |
3659 | |
3660 | /* Fix internal data after interblock movement of jump instruction. |
3661 | For parameter meaning please refer to |
3662 | sched-int.h: struct sched_info: fix_recovery_cfg. */ |
3663 | static void |
3664 | rgn_fix_recovery_cfg (int bbi, int check_bbi, int check_bb_nexti) |
3665 | { |
3666 | int old_pos, new_pos, i; |
3667 | |
3668 | BLOCK_TO_BB (check_bb_nexti) = BLOCK_TO_BB (bbi); |
3669 | |
3670 | for (old_pos = ebb_head[BLOCK_TO_BB (check_bbi) + 1] - 1; |
3671 | rgn_bb_table[old_pos] != check_bb_nexti; |
3672 | old_pos--) |
3673 | ; |
3674 | gcc_assert (old_pos > ebb_head[BLOCK_TO_BB (check_bbi)]); |
3675 | |
3676 | for (new_pos = ebb_head[BLOCK_TO_BB (bbi) + 1] - 1; |
3677 | rgn_bb_table[new_pos] != bbi; |
3678 | new_pos--) |
3679 | ; |
3680 | new_pos++; |
3681 | gcc_assert (new_pos > ebb_head[BLOCK_TO_BB (bbi)]); |
3682 | |
3683 | gcc_assert (new_pos < old_pos); |
3684 | |
3685 | memmove (dest: rgn_bb_table + new_pos + 1, |
3686 | src: rgn_bb_table + new_pos, |
3687 | n: (old_pos - new_pos) * sizeof (*rgn_bb_table)); |
3688 | |
3689 | rgn_bb_table[new_pos] = check_bb_nexti; |
3690 | |
3691 | for (i = BLOCK_TO_BB (bbi) + 1; i <= BLOCK_TO_BB (check_bbi); i++) |
3692 | ebb_head[i]++; |
3693 | } |
3694 | |
3695 | /* Return next block in ebb chain. For parameter meaning please refer to |
3696 | sched-int.h: struct sched_info: advance_target_bb. */ |
3697 | static basic_block |
3698 | advance_target_bb (basic_block bb, rtx_insn *insn) |
3699 | { |
3700 | if (insn) |
3701 | return 0; |
3702 | |
3703 | gcc_assert (BLOCK_TO_BB (bb->index) == target_bb |
3704 | && BLOCK_TO_BB (bb->next_bb->index) == target_bb); |
3705 | return bb->next_bb; |
3706 | } |
3707 | |
3708 | #endif |
3709 | |
3710 | /* Run instruction scheduler. */ |
3711 | static unsigned int |
3712 | rest_of_handle_live_range_shrinkage (void) |
3713 | { |
3714 | #ifdef INSN_SCHEDULING |
3715 | int saved; |
3716 | |
3717 | initialize_live_range_shrinkage (); |
3718 | saved = flag_schedule_interblock; |
3719 | flag_schedule_interblock = false; |
3720 | schedule_insns (); |
3721 | flag_schedule_interblock = saved; |
3722 | finish_live_range_shrinkage (); |
3723 | #endif |
3724 | return 0; |
3725 | } |
3726 | |
3727 | /* Run instruction scheduler. */ |
3728 | static unsigned int |
3729 | rest_of_handle_sched (void) |
3730 | { |
3731 | #ifdef INSN_SCHEDULING |
3732 | if (flag_selective_scheduling |
3733 | && ! maybe_skip_selective_scheduling ()) |
3734 | run_selective_scheduling (); |
3735 | else |
3736 | schedule_insns (); |
3737 | #endif |
3738 | return 0; |
3739 | } |
3740 | |
3741 | /* Run second scheduling pass after reload. */ |
3742 | static unsigned int |
3743 | rest_of_handle_sched2 (void) |
3744 | { |
3745 | #ifdef INSN_SCHEDULING |
3746 | if (flag_selective_scheduling2 |
3747 | && ! maybe_skip_selective_scheduling ()) |
3748 | run_selective_scheduling (); |
3749 | else |
3750 | { |
3751 | /* Do control and data sched analysis again, |
3752 | and write some more of the results to dump file. */ |
3753 | if (flag_sched2_use_superblocks) |
3754 | schedule_ebbs (); |
3755 | else |
3756 | schedule_insns (); |
3757 | } |
3758 | #endif |
3759 | return 0; |
3760 | } |
3761 | |
3762 | static unsigned int |
3763 | rest_of_handle_sched_fusion (void) |
3764 | { |
3765 | #ifdef INSN_SCHEDULING |
3766 | sched_fusion = true; |
3767 | schedule_insns (); |
3768 | sched_fusion = false; |
3769 | #endif |
3770 | return 0; |
3771 | } |
3772 | |
3773 | namespace { |
3774 | |
3775 | const pass_data pass_data_live_range_shrinkage = |
3776 | { |
3777 | .type: RTL_PASS, /* type */ |
3778 | .name: "lr_shrinkage" , /* name */ |
3779 | .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */ |
3780 | .tv_id: TV_LIVE_RANGE_SHRINKAGE, /* tv_id */ |
3781 | .properties_required: 0, /* properties_required */ |
3782 | .properties_provided: 0, /* properties_provided */ |
3783 | .properties_destroyed: 0, /* properties_destroyed */ |
3784 | .todo_flags_start: 0, /* todo_flags_start */ |
3785 | TODO_df_finish, /* todo_flags_finish */ |
3786 | }; |
3787 | |
3788 | class pass_live_range_shrinkage : public rtl_opt_pass |
3789 | { |
3790 | public: |
3791 | pass_live_range_shrinkage(gcc::context *ctxt) |
3792 | : rtl_opt_pass(pass_data_live_range_shrinkage, ctxt) |
3793 | {} |
3794 | |
3795 | /* opt_pass methods: */ |
3796 | bool gate (function *) final override |
3797 | { |
3798 | #ifdef INSN_SCHEDULING |
3799 | return flag_live_range_shrinkage; |
3800 | #else |
3801 | return 0; |
3802 | #endif |
3803 | } |
3804 | |
3805 | unsigned int execute (function *) final override |
3806 | { |
3807 | return rest_of_handle_live_range_shrinkage (); |
3808 | } |
3809 | |
3810 | }; // class pass_live_range_shrinkage |
3811 | |
3812 | } // anon namespace |
3813 | |
3814 | rtl_opt_pass * |
3815 | make_pass_live_range_shrinkage (gcc::context *ctxt) |
3816 | { |
3817 | return new pass_live_range_shrinkage (ctxt); |
3818 | } |
3819 | |
3820 | namespace { |
3821 | |
3822 | const pass_data pass_data_sched = |
3823 | { |
3824 | .type: RTL_PASS, /* type */ |
3825 | .name: "sched1" , /* name */ |
3826 | .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */ |
3827 | .tv_id: TV_SCHED, /* tv_id */ |
3828 | .properties_required: 0, /* properties_required */ |
3829 | .properties_provided: 0, /* properties_provided */ |
3830 | .properties_destroyed: 0, /* properties_destroyed */ |
3831 | .todo_flags_start: 0, /* todo_flags_start */ |
3832 | TODO_df_finish, /* todo_flags_finish */ |
3833 | }; |
3834 | |
3835 | class pass_sched : public rtl_opt_pass |
3836 | { |
3837 | public: |
3838 | pass_sched (gcc::context *ctxt) |
3839 | : rtl_opt_pass (pass_data_sched, ctxt) |
3840 | {} |
3841 | |
3842 | /* opt_pass methods: */ |
3843 | bool gate (function *) final override; |
3844 | unsigned int execute (function *) final override |
3845 | { |
3846 | return rest_of_handle_sched (); |
3847 | } |
3848 | |
3849 | }; // class pass_sched |
3850 | |
3851 | bool |
3852 | pass_sched::gate (function *) |
3853 | { |
3854 | #ifdef INSN_SCHEDULING |
3855 | return optimize > 0 && flag_schedule_insns && dbg_cnt (index: sched_func); |
3856 | #else |
3857 | return 0; |
3858 | #endif |
3859 | } |
3860 | |
3861 | } // anon namespace |
3862 | |
3863 | rtl_opt_pass * |
3864 | make_pass_sched (gcc::context *ctxt) |
3865 | { |
3866 | return new pass_sched (ctxt); |
3867 | } |
3868 | |
3869 | namespace { |
3870 | |
3871 | const pass_data pass_data_sched2 = |
3872 | { |
3873 | .type: RTL_PASS, /* type */ |
3874 | .name: "sched2" , /* name */ |
3875 | .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */ |
3876 | .tv_id: TV_SCHED2, /* tv_id */ |
3877 | .properties_required: 0, /* properties_required */ |
3878 | .properties_provided: 0, /* properties_provided */ |
3879 | .properties_destroyed: 0, /* properties_destroyed */ |
3880 | .todo_flags_start: 0, /* todo_flags_start */ |
3881 | TODO_df_finish, /* todo_flags_finish */ |
3882 | }; |
3883 | |
3884 | class pass_sched2 : public rtl_opt_pass |
3885 | { |
3886 | public: |
3887 | pass_sched2 (gcc::context *ctxt) |
3888 | : rtl_opt_pass (pass_data_sched2, ctxt) |
3889 | {} |
3890 | |
3891 | /* opt_pass methods: */ |
3892 | bool gate (function *) final override; |
3893 | unsigned int execute (function *) final override |
3894 | { |
3895 | return rest_of_handle_sched2 (); |
3896 | } |
3897 | |
3898 | }; // class pass_sched2 |
3899 | |
3900 | bool |
3901 | pass_sched2::gate (function *) |
3902 | { |
3903 | #ifdef INSN_SCHEDULING |
3904 | return optimize > 0 && flag_schedule_insns_after_reload |
3905 | && !targetm.delay_sched2 && dbg_cnt (index: sched2_func); |
3906 | #else |
3907 | return 0; |
3908 | #endif |
3909 | } |
3910 | |
3911 | } // anon namespace |
3912 | |
3913 | rtl_opt_pass * |
3914 | make_pass_sched2 (gcc::context *ctxt) |
3915 | { |
3916 | return new pass_sched2 (ctxt); |
3917 | } |
3918 | |
3919 | namespace { |
3920 | |
3921 | const pass_data pass_data_sched_fusion = |
3922 | { |
3923 | .type: RTL_PASS, /* type */ |
3924 | .name: "sched_fusion" , /* name */ |
3925 | .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */ |
3926 | .tv_id: TV_SCHED_FUSION, /* tv_id */ |
3927 | .properties_required: 0, /* properties_required */ |
3928 | .properties_provided: 0, /* properties_provided */ |
3929 | .properties_destroyed: 0, /* properties_destroyed */ |
3930 | .todo_flags_start: 0, /* todo_flags_start */ |
3931 | TODO_df_finish, /* todo_flags_finish */ |
3932 | }; |
3933 | |
3934 | class pass_sched_fusion : public rtl_opt_pass |
3935 | { |
3936 | public: |
3937 | pass_sched_fusion (gcc::context *ctxt) |
3938 | : rtl_opt_pass (pass_data_sched_fusion, ctxt) |
3939 | {} |
3940 | |
3941 | /* opt_pass methods: */ |
3942 | bool gate (function *) final override; |
3943 | unsigned int execute (function *) final override |
3944 | { |
3945 | return rest_of_handle_sched_fusion (); |
3946 | } |
3947 | |
3948 | }; // class pass_sched2 |
3949 | |
3950 | bool |
3951 | pass_sched_fusion::gate (function *) |
3952 | { |
3953 | #ifdef INSN_SCHEDULING |
3954 | /* Scheduling fusion relies on peephole2 to do real fusion work, |
3955 | so only enable it if peephole2 is in effect. */ |
3956 | return (optimize > 0 && flag_peephole2 |
3957 | && flag_schedule_fusion && targetm.sched.fusion_priority != NULL); |
3958 | #else |
3959 | return 0; |
3960 | #endif |
3961 | } |
3962 | |
3963 | } // anon namespace |
3964 | |
3965 | rtl_opt_pass * |
3966 | make_pass_sched_fusion (gcc::context *ctxt) |
3967 | { |
3968 | return new pass_sched_fusion (ctxt); |
3969 | } |
3970 | |
3971 | #if __GNUC__ >= 10 |
3972 | # pragma GCC diagnostic pop |
3973 | #endif |
3974 | |