1 | /* Instruction scheduling pass. |
2 | Copyright (C) 1992-2023 Free Software Foundation, Inc. |
3 | Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by, |
4 | and currently maintained by, Jim Wilson (wilson@cygnus.com) |
5 | |
6 | This file is part of GCC. |
7 | |
8 | GCC is free software; you can redistribute it and/or modify it under |
9 | the terms of the GNU General Public License as published by the Free |
10 | Software Foundation; either version 3, or (at your option) any later |
11 | version. |
12 | |
13 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
14 | WARRANTY; without even the implied warranty of MERCHANTABILITY or |
15 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
16 | for more details. |
17 | |
18 | You should have received a copy of the GNU General Public License |
19 | along with GCC; see the file COPYING3. If not see |
20 | <http://www.gnu.org/licenses/>. */ |
21 | |
22 | #include "config.h" |
23 | #include "system.h" |
24 | #include "coretypes.h" |
25 | #include "backend.h" |
26 | #include "target.h" |
27 | #include "rtl.h" |
28 | #include "cfghooks.h" |
29 | #include "df.h" |
30 | #include "profile.h" |
31 | #include "insn-attr.h" |
32 | #include "cfgrtl.h" |
33 | #include "cfgbuild.h" |
34 | #include "sched-int.h" |
35 | |
36 | |
37 | #ifdef INSN_SCHEDULING |
38 | |
39 | /* The number of insns to be scheduled in total. */ |
40 | static int rgn_n_insns; |
41 | |
42 | /* The number of insns scheduled so far. */ |
43 | static int sched_rgn_n_insns; |
44 | |
45 | /* Set of blocks, that already have their dependencies calculated. */ |
46 | static bitmap_head dont_calc_deps; |
47 | |
48 | /* Last basic block in current ebb. */ |
49 | static basic_block last_bb; |
50 | |
51 | /* Implementations of the sched_info functions for region scheduling. */ |
52 | static void init_ready_list (void); |
53 | static void begin_schedule_ready (rtx_insn *); |
54 | static bool schedule_more_p (void); |
55 | static const char *ebb_print_insn (const rtx_insn *, int); |
56 | static int rank (rtx_insn *, rtx_insn *); |
57 | static bool ebb_contributes_to_priority (rtx_insn *, rtx_insn *); |
58 | static basic_block earliest_block_with_similiar_load (basic_block, rtx); |
59 | static void add_deps_for_risky_insns (rtx_insn *, rtx_insn *); |
60 | static void debug_ebb_dependencies (rtx_insn *, rtx_insn *); |
61 | |
62 | static void ebb_add_remove_insn (rtx_insn *, int); |
63 | static void ebb_add_block (basic_block, basic_block); |
64 | static basic_block advance_target_bb (basic_block, rtx_insn *); |
65 | static void ebb_fix_recovery_cfg (int, int, int); |
66 | |
67 | /* Allocate memory and store the state of the frontend. Return the allocated |
68 | memory. */ |
69 | static void * |
70 | save_ebb_state (void) |
71 | { |
72 | int *p = XNEW (int); |
73 | *p = sched_rgn_n_insns; |
74 | return p; |
75 | } |
76 | |
77 | /* Restore the state of the frontend from P_, then free it. */ |
78 | static void |
79 | restore_ebb_state (void *p_) |
80 | { |
81 | int *p = (int *)p_; |
82 | sched_rgn_n_insns = *p; |
83 | free (ptr: p_); |
84 | } |
85 | |
86 | /* Return true if there are more insns that should be scheduled. */ |
87 | |
88 | static bool |
89 | schedule_more_p (void) |
90 | { |
91 | return sched_rgn_n_insns < rgn_n_insns; |
92 | } |
93 | |
94 | /* Print dependency information about ebb between HEAD and TAIL. */ |
95 | static void |
96 | debug_ebb_dependencies (rtx_insn *head, rtx_insn *tail) |
97 | { |
98 | fprintf (stream: sched_dump, |
99 | format: ";; --------------- forward dependences: ------------ \n" ); |
100 | |
101 | fprintf (stream: sched_dump, format: "\n;; --- EBB Dependences --- from bb%d to bb%d \n" , |
102 | BLOCK_NUM (head), BLOCK_NUM (tail)); |
103 | |
104 | debug_dependencies (head, tail); |
105 | } |
106 | |
107 | /* Add all insns that are initially ready to the ready list READY. Called |
108 | once before scheduling a set of insns. */ |
109 | |
110 | static void |
111 | init_ready_list (void) |
112 | { |
113 | int n = 0; |
114 | rtx_insn *prev_head = current_sched_info->prev_head; |
115 | rtx_insn *next_tail = current_sched_info->next_tail; |
116 | rtx_insn *insn; |
117 | |
118 | sched_rgn_n_insns = 0; |
119 | |
120 | /* Print debugging information. */ |
121 | if (sched_verbose >= 5) |
122 | debug_ebb_dependencies (head: NEXT_INSN (insn: prev_head), tail: PREV_INSN (insn: next_tail)); |
123 | |
124 | /* Initialize ready list with all 'ready' insns in target block. |
125 | Count number of insns in the target block being scheduled. */ |
126 | for (insn = NEXT_INSN (insn: prev_head); insn != next_tail; insn = NEXT_INSN (insn)) |
127 | { |
128 | try_ready (insn); |
129 | n++; |
130 | } |
131 | |
132 | gcc_assert (n == rgn_n_insns); |
133 | } |
134 | |
135 | /* INSN is being scheduled after LAST. Update counters. */ |
136 | static void |
137 | begin_schedule_ready (rtx_insn *insn ATTRIBUTE_UNUSED) |
138 | { |
139 | sched_rgn_n_insns++; |
140 | } |
141 | |
142 | /* INSN is being moved to its place in the schedule, after LAST. */ |
143 | static void |
144 | begin_move_insn (rtx_insn *insn, rtx_insn *last) |
145 | { |
146 | if (BLOCK_FOR_INSN (insn) == last_bb |
147 | /* INSN is a jump in the last block, ... */ |
148 | && control_flow_insn_p (insn) |
149 | /* that is going to be moved over some instructions. */ |
150 | && last != PREV_INSN (insn)) |
151 | { |
152 | edge e; |
153 | basic_block bb; |
154 | |
155 | /* An obscure special case, where we do have partially dead |
156 | instruction scheduled after last control flow instruction. |
157 | In this case we can create new basic block. It is |
158 | always exactly one basic block last in the sequence. */ |
159 | |
160 | e = find_fallthru_edge (edges: last_bb->succs); |
161 | |
162 | gcc_checking_assert (!e || !(e->flags & EDGE_COMPLEX)); |
163 | |
164 | gcc_checking_assert (BLOCK_FOR_INSN (insn) == last_bb |
165 | && !IS_SPECULATION_CHECK_P (insn) |
166 | && BB_HEAD (last_bb) != insn |
167 | && BB_END (last_bb) == insn); |
168 | |
169 | { |
170 | rtx_insn *x = NEXT_INSN (insn); |
171 | if (e) |
172 | gcc_checking_assert (NOTE_P (x) || LABEL_P (x)); |
173 | else |
174 | gcc_checking_assert (BARRIER_P (x)); |
175 | } |
176 | |
177 | if (e) |
178 | { |
179 | bb = split_edge (e); |
180 | gcc_assert (NOTE_INSN_BASIC_BLOCK_P (BB_END (bb))); |
181 | } |
182 | else |
183 | { |
184 | /* Create an empty unreachable block after the INSN. */ |
185 | rtx_insn *next = NEXT_INSN (insn); |
186 | if (next && BARRIER_P (next)) |
187 | next = NEXT_INSN (insn: next); |
188 | bb = create_basic_block (next, NULL_RTX, last_bb); |
189 | } |
190 | |
191 | /* split_edge () creates BB before E->DEST. Keep in mind, that |
192 | this operation extends scheduling region till the end of BB. |
193 | Hence, we need to shift NEXT_TAIL, so haifa-sched.cc won't go out |
194 | of the scheduling region. */ |
195 | current_sched_info->next_tail = NEXT_INSN (BB_END (bb)); |
196 | gcc_assert (current_sched_info->next_tail); |
197 | |
198 | /* Append new basic block to the end of the ebb. */ |
199 | sched_init_only_bb (bb, last_bb); |
200 | gcc_assert (last_bb == bb); |
201 | } |
202 | } |
203 | |
204 | /* Return a string that contains the insn uid and optionally anything else |
205 | necessary to identify this insn in an output. It's valid to use a |
206 | static buffer for this. The ALIGNED parameter should cause the string |
207 | to be formatted so that multiple output lines will line up nicely. */ |
208 | |
209 | static const char * |
210 | ebb_print_insn (const rtx_insn *insn, int aligned ATTRIBUTE_UNUSED) |
211 | { |
212 | static char tmp[80]; |
213 | |
214 | /* '+' before insn means it is a new cycle start. */ |
215 | if (GET_MODE (insn) == TImode) |
216 | sprintf (s: tmp, format: "+ %4d" , INSN_UID (insn)); |
217 | else |
218 | sprintf (s: tmp, format: " %4d" , INSN_UID (insn)); |
219 | |
220 | return tmp; |
221 | } |
222 | |
223 | /* Compare priority of two insns. Return a positive number if the second |
224 | insn is to be preferred for scheduling, and a negative one if the first |
225 | is to be preferred. Zero if they are equally good. */ |
226 | |
227 | static int |
228 | rank (rtx_insn *insn1, rtx_insn *insn2) |
229 | { |
230 | basic_block bb1 = BLOCK_FOR_INSN (insn: insn1); |
231 | basic_block bb2 = BLOCK_FOR_INSN (insn: insn2); |
232 | |
233 | if (bb1->count > bb2->count) |
234 | return -1; |
235 | if (bb1->count < bb2->count) |
236 | return 1; |
237 | return 0; |
238 | } |
239 | |
240 | /* NEXT is an instruction that depends on INSN (a backward dependence); |
241 | return true if we should include this dependence in priority |
242 | calculations. */ |
243 | |
244 | static bool |
245 | ebb_contributes_to_priority (rtx_insn *next ATTRIBUTE_UNUSED, |
246 | rtx_insn *insn ATTRIBUTE_UNUSED) |
247 | { |
248 | return true; |
249 | } |
250 | |
251 | /* INSN is a JUMP_INSN. Store the set of registers that |
252 | must be considered as used by this jump in USED. */ |
253 | |
254 | void |
255 | ebb_compute_jump_reg_dependencies (rtx insn, regset used) |
256 | { |
257 | basic_block b = BLOCK_FOR_INSN (insn); |
258 | edge e; |
259 | edge_iterator ei; |
260 | |
261 | FOR_EACH_EDGE (e, ei, b->succs) |
262 | if ((e->flags & EDGE_FALLTHRU) == 0) |
263 | bitmap_ior_into (used, df_get_live_in (bb: e->dest)); |
264 | } |
265 | |
266 | /* Used in schedule_insns to initialize current_sched_info for scheduling |
267 | regions (or single basic blocks). */ |
268 | |
269 | static struct common_sched_info_def ebb_common_sched_info; |
270 | |
271 | static struct sched_deps_info_def ebb_sched_deps_info = |
272 | { |
273 | .compute_jump_reg_dependencies: ebb_compute_jump_reg_dependencies, |
274 | NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, NULL, |
275 | NULL, |
276 | .use_cselib: 1, .use_deps_list: 0, .generate_spec_deps: 0 |
277 | }; |
278 | |
279 | static struct haifa_sched_info ebb_sched_info = |
280 | { |
281 | .init_ready_list: init_ready_list, |
282 | NULL, |
283 | .schedule_more_p: schedule_more_p, |
284 | NULL, |
285 | .rank: rank, |
286 | .print_insn: ebb_print_insn, |
287 | .contributes_to_priority: ebb_contributes_to_priority, |
288 | NULL, /* insn_finishes_block_p */ |
289 | |
290 | NULL, NULL, |
291 | NULL, NULL, |
292 | .queue_must_finish_empty: 1, .sched_max_insns_priority: 0, |
293 | |
294 | .add_remove_insn: ebb_add_remove_insn, |
295 | .begin_schedule_ready: begin_schedule_ready, |
296 | .begin_move_insn: begin_move_insn, |
297 | .advance_target_bb: advance_target_bb, |
298 | |
299 | .save_state: save_ebb_state, |
300 | .restore_state: restore_ebb_state, |
301 | |
302 | .flags: SCHED_EBB |
303 | /* We can create new blocks in begin_schedule_ready (). */ |
304 | | NEW_BBS |
305 | }; |
306 | |
307 | /* Returns the earliest block in EBB currently being processed where a |
308 | "similar load" 'insn2' is found, and hence LOAD_INSN can move |
309 | speculatively into the found block. All the following must hold: |
310 | |
311 | (1) both loads have 1 base register (PFREE_CANDIDATEs). |
312 | (2) load_insn and load2 have a def-use dependence upon |
313 | the same insn 'insn1'. |
314 | |
315 | From all these we can conclude that the two loads access memory |
316 | addresses that differ at most by a constant, and hence if moving |
317 | load_insn would cause an exception, it would have been caused by |
318 | load2 anyhow. |
319 | |
320 | The function uses list (given by LAST_BLOCK) of already processed |
321 | blocks in EBB. The list is formed in `add_deps_for_risky_insns'. */ |
322 | |
323 | static basic_block |
324 | earliest_block_with_similiar_load (basic_block last_block, rtx load_insn) |
325 | { |
326 | sd_iterator_def back_sd_it; |
327 | dep_t back_dep; |
328 | basic_block bb, earliest_block = NULL; |
329 | |
330 | FOR_EACH_DEP (load_insn, SD_LIST_BACK, back_sd_it, back_dep) |
331 | { |
332 | rtx_insn *insn1 = DEP_PRO (back_dep); |
333 | |
334 | if (DEP_TYPE (back_dep) == REG_DEP_TRUE) |
335 | /* Found a DEF-USE dependence (insn1, load_insn). */ |
336 | { |
337 | sd_iterator_def fore_sd_it; |
338 | dep_t fore_dep; |
339 | |
340 | FOR_EACH_DEP (insn1, SD_LIST_FORW, fore_sd_it, fore_dep) |
341 | { |
342 | rtx_insn *insn2 = DEP_CON (fore_dep); |
343 | basic_block insn2_block = BLOCK_FOR_INSN (insn: insn2); |
344 | |
345 | if (DEP_TYPE (fore_dep) == REG_DEP_TRUE) |
346 | { |
347 | if (earliest_block != NULL |
348 | && earliest_block->index < insn2_block->index) |
349 | continue; |
350 | |
351 | /* Found a DEF-USE dependence (insn1, insn2). */ |
352 | if (haifa_classify_insn (insn2) != PFREE_CANDIDATE) |
353 | /* insn2 not guaranteed to be a 1 base reg load. */ |
354 | continue; |
355 | |
356 | for (bb = last_block; bb; bb = (basic_block) bb->aux) |
357 | if (insn2_block == bb) |
358 | break; |
359 | |
360 | if (!bb) |
361 | /* insn2 is the similar load. */ |
362 | earliest_block = insn2_block; |
363 | } |
364 | } |
365 | } |
366 | } |
367 | |
368 | return earliest_block; |
369 | } |
370 | |
371 | /* The following function adds dependencies between jumps and risky |
372 | insns in given ebb. */ |
373 | |
374 | static void |
375 | add_deps_for_risky_insns (rtx_insn *head, rtx_insn *tail) |
376 | { |
377 | rtx_insn *insn, *prev; |
378 | int classification; |
379 | rtx_insn *last_jump = NULL; |
380 | rtx_insn *next_tail = NEXT_INSN (insn: tail); |
381 | basic_block last_block = NULL, bb; |
382 | |
383 | for (insn = head; insn != next_tail; insn = NEXT_INSN (insn)) |
384 | { |
385 | add_delay_dependencies (insn); |
386 | if (control_flow_insn_p (insn)) |
387 | { |
388 | bb = BLOCK_FOR_INSN (insn); |
389 | bb->aux = last_block; |
390 | last_block = bb; |
391 | /* Ensure blocks stay in the same order. */ |
392 | if (last_jump) |
393 | add_dependence (insn, last_jump, REG_DEP_ANTI); |
394 | last_jump = insn; |
395 | } |
396 | else if (INSN_P (insn) && last_jump != NULL_RTX) |
397 | { |
398 | classification = haifa_classify_insn (insn); |
399 | prev = last_jump; |
400 | |
401 | switch (classification) |
402 | { |
403 | case PFREE_CANDIDATE: |
404 | if (flag_schedule_speculative_load) |
405 | { |
406 | bb = earliest_block_with_similiar_load (last_block, load_insn: insn); |
407 | if (bb) |
408 | { |
409 | bb = (basic_block) bb->aux; |
410 | if (!bb) |
411 | break; |
412 | prev = BB_END (bb); |
413 | } |
414 | } |
415 | /* Fall through. */ |
416 | case TRAP_RISKY: |
417 | case IRISKY: |
418 | case PRISKY_CANDIDATE: |
419 | /* ??? We could implement better checking PRISKY_CANDIDATEs |
420 | analogous to sched-rgn.cc. */ |
421 | /* We cannot change the mode of the backward |
422 | dependency because REG_DEP_ANTI has the lowest |
423 | rank. */ |
424 | if (! sched_insns_conditions_mutex_p (insn, prev)) |
425 | { |
426 | if ((current_sched_info->flags & DO_SPECULATION) |
427 | && (spec_info->mask & BEGIN_CONTROL)) |
428 | { |
429 | dep_def _dep, *dep = &_dep; |
430 | |
431 | init_dep (dep, prev, insn, REG_DEP_ANTI); |
432 | |
433 | if (current_sched_info->flags & USE_DEPS_LIST) |
434 | { |
435 | DEP_STATUS (dep) = set_dep_weak (DEP_ANTI, BEGIN_CONTROL, |
436 | MAX_DEP_WEAK); |
437 | |
438 | } |
439 | sd_add_or_update_dep (dep, false); |
440 | } |
441 | else |
442 | add_dependence (insn, prev, REG_DEP_CONTROL); |
443 | } |
444 | |
445 | break; |
446 | |
447 | default: |
448 | break; |
449 | } |
450 | } |
451 | } |
452 | /* Maintain the invariant that bb->aux is clear after use. */ |
453 | while (last_block) |
454 | { |
455 | bb = (basic_block) last_block->aux; |
456 | last_block->aux = NULL; |
457 | last_block = bb; |
458 | } |
459 | } |
460 | |
461 | /* Schedule a single extended basic block, defined by the boundaries |
462 | HEAD and TAIL. |
463 | |
464 | We change our expectations about scheduler behavior depending on |
465 | whether MODULO_SCHEDULING is true. If it is, we expect that the |
466 | caller has already called set_modulo_params and created delay pairs |
467 | as appropriate. If the modulo schedule failed, we return |
468 | NULL_RTX. */ |
469 | |
470 | basic_block |
471 | schedule_ebb (rtx_insn *head, rtx_insn *tail, bool modulo_scheduling) |
472 | { |
473 | basic_block first_bb, target_bb; |
474 | class deps_desc tmp_deps; |
475 | bool success; |
476 | |
477 | /* Blah. We should fix the rest of the code not to get confused by |
478 | a note or two. */ |
479 | while (head != tail) |
480 | { |
481 | if (NOTE_P (head) || DEBUG_INSN_P (head)) |
482 | head = NEXT_INSN (insn: head); |
483 | else if (NOTE_P (tail) || DEBUG_INSN_P (tail)) |
484 | tail = PREV_INSN (insn: tail); |
485 | else if (LABEL_P (head)) |
486 | head = NEXT_INSN (insn: head); |
487 | else |
488 | break; |
489 | } |
490 | |
491 | first_bb = BLOCK_FOR_INSN (insn: head); |
492 | last_bb = BLOCK_FOR_INSN (insn: tail); |
493 | |
494 | if (no_real_insns_p (head, tail)) |
495 | return BLOCK_FOR_INSN (insn: tail); |
496 | |
497 | gcc_assert (INSN_P (head) && INSN_P (tail)); |
498 | |
499 | if (!bitmap_bit_p (&dont_calc_deps, first_bb->index)) |
500 | { |
501 | init_deps_global (); |
502 | |
503 | /* Compute dependencies. */ |
504 | init_deps (&tmp_deps, false); |
505 | sched_analyze (&tmp_deps, head, tail); |
506 | free_deps (&tmp_deps); |
507 | |
508 | add_deps_for_risky_insns (head, tail); |
509 | |
510 | if (targetm.sched.dependencies_evaluation_hook) |
511 | targetm.sched.dependencies_evaluation_hook (head, tail); |
512 | |
513 | finish_deps_global (); |
514 | } |
515 | else |
516 | /* Only recovery blocks can have their dependencies already calculated, |
517 | and they always are single block ebbs. */ |
518 | gcc_assert (first_bb == last_bb); |
519 | |
520 | /* Set priorities. */ |
521 | current_sched_info->sched_max_insns_priority = 0; |
522 | rgn_n_insns = set_priorities (head, tail); |
523 | current_sched_info->sched_max_insns_priority++; |
524 | |
525 | current_sched_info->prev_head = PREV_INSN (insn: head); |
526 | current_sched_info->next_tail = NEXT_INSN (insn: tail); |
527 | |
528 | remove_notes (head, tail); |
529 | |
530 | unlink_bb_notes (first_bb, last_bb); |
531 | |
532 | target_bb = first_bb; |
533 | |
534 | /* Make ready list big enough to hold all the instructions from the ebb. */ |
535 | sched_extend_ready_list (rgn_n_insns); |
536 | success = schedule_block (&target_bb, NULL); |
537 | gcc_assert (success || modulo_scheduling); |
538 | |
539 | /* Free ready list. */ |
540 | sched_finish_ready_list (); |
541 | |
542 | /* We might pack all instructions into fewer blocks, |
543 | so we may made some of them empty. Can't assert (b == last_bb). */ |
544 | |
545 | /* Sanity check: verify that all region insns were scheduled. */ |
546 | gcc_assert (modulo_scheduling || sched_rgn_n_insns == rgn_n_insns); |
547 | |
548 | /* Free dependencies. */ |
549 | sched_free_deps (current_sched_info->head, current_sched_info->tail, true); |
550 | |
551 | gcc_assert (haifa_recovery_bb_ever_added_p |
552 | || deps_pools_are_empty_p ()); |
553 | |
554 | if (EDGE_COUNT (last_bb->preds) == 0) |
555 | /* LAST_BB is unreachable. */ |
556 | { |
557 | gcc_assert (first_bb != last_bb |
558 | && EDGE_COUNT (last_bb->succs) == 0); |
559 | last_bb = last_bb->prev_bb; |
560 | delete_basic_block (last_bb->next_bb); |
561 | } |
562 | |
563 | return success ? last_bb : NULL; |
564 | } |
565 | |
566 | /* Perform initializations before running schedule_ebbs or a single |
567 | schedule_ebb. */ |
568 | void |
569 | schedule_ebbs_init (void) |
570 | { |
571 | /* Setup infos. */ |
572 | { |
573 | memcpy (dest: &ebb_common_sched_info, src: &haifa_common_sched_info, |
574 | n: sizeof (ebb_common_sched_info)); |
575 | |
576 | ebb_common_sched_info.fix_recovery_cfg = ebb_fix_recovery_cfg; |
577 | ebb_common_sched_info.add_block = ebb_add_block; |
578 | ebb_common_sched_info.sched_pass_id = SCHED_EBB_PASS; |
579 | |
580 | common_sched_info = &ebb_common_sched_info; |
581 | sched_deps_info = &ebb_sched_deps_info; |
582 | current_sched_info = &ebb_sched_info; |
583 | } |
584 | |
585 | haifa_sched_init (); |
586 | |
587 | compute_bb_for_insn (); |
588 | |
589 | /* Initialize DONT_CALC_DEPS and ebb-{start, end} markers. */ |
590 | bitmap_initialize (head: &dont_calc_deps, obstack: &bitmap_default_obstack); |
591 | } |
592 | |
593 | /* Perform cleanups after scheduling using schedules_ebbs or schedule_ebb. */ |
594 | void |
595 | schedule_ebbs_finish (void) |
596 | { |
597 | bitmap_release (head: &dont_calc_deps); |
598 | |
599 | /* Reposition the prologue and epilogue notes in case we moved the |
600 | prologue/epilogue insns. */ |
601 | if (reload_completed) |
602 | reposition_prologue_and_epilogue_notes (); |
603 | |
604 | haifa_sched_finish (); |
605 | } |
606 | |
607 | /* The main entry point in this file. */ |
608 | |
609 | void |
610 | schedule_ebbs (void) |
611 | { |
612 | basic_block bb; |
613 | int probability_cutoff; |
614 | rtx_insn *tail; |
615 | |
616 | /* Taking care of this degenerate case makes the rest of |
617 | this code simpler. */ |
618 | if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS) |
619 | return; |
620 | |
621 | if (profile_info && profile_status_for_fn (cfun) == PROFILE_READ) |
622 | probability_cutoff = param_tracer_min_branch_probability_feedback; |
623 | else |
624 | probability_cutoff = param_tracer_min_branch_probability; |
625 | probability_cutoff = REG_BR_PROB_BASE / 100 * probability_cutoff; |
626 | |
627 | schedule_ebbs_init (); |
628 | |
629 | /* Schedule every region in the subroutine. */ |
630 | FOR_EACH_BB_FN (bb, cfun) |
631 | { |
632 | rtx_insn *head = BB_HEAD (bb); |
633 | |
634 | if (bb->flags & BB_DISABLE_SCHEDULE) |
635 | continue; |
636 | |
637 | for (;;) |
638 | { |
639 | edge e; |
640 | tail = BB_END (bb); |
641 | if (bb->next_bb == EXIT_BLOCK_PTR_FOR_FN (cfun) |
642 | || LABEL_P (BB_HEAD (bb->next_bb))) |
643 | break; |
644 | e = find_fallthru_edge (edges: bb->succs); |
645 | if (! e) |
646 | break; |
647 | if (e->probability.initialized_p () |
648 | && e->probability.to_reg_br_prob_base () <= probability_cutoff) |
649 | break; |
650 | if (e->dest->flags & BB_DISABLE_SCHEDULE) |
651 | break; |
652 | bb = bb->next_bb; |
653 | } |
654 | |
655 | bb = schedule_ebb (head, tail, modulo_scheduling: false); |
656 | } |
657 | schedule_ebbs_finish (); |
658 | } |
659 | |
660 | /* INSN has been added to/removed from current ebb. */ |
661 | static void |
662 | ebb_add_remove_insn (rtx_insn *insn ATTRIBUTE_UNUSED, int remove_p) |
663 | { |
664 | if (!remove_p) |
665 | rgn_n_insns++; |
666 | else |
667 | rgn_n_insns--; |
668 | } |
669 | |
670 | /* BB was added to ebb after AFTER. */ |
671 | static void |
672 | ebb_add_block (basic_block bb, basic_block after) |
673 | { |
674 | /* Recovery blocks are always bounded by BARRIERS, |
675 | therefore, they always form single block EBB, |
676 | therefore, we can use rec->index to identify such EBBs. */ |
677 | if (after == EXIT_BLOCK_PTR_FOR_FN (cfun)) |
678 | bitmap_set_bit (&dont_calc_deps, bb->index); |
679 | else if (after == last_bb) |
680 | last_bb = bb; |
681 | } |
682 | |
683 | /* Return next block in ebb chain. For parameter meaning please refer to |
684 | sched-int.h: struct sched_info: advance_target_bb. */ |
685 | static basic_block |
686 | advance_target_bb (basic_block bb, rtx_insn *insn) |
687 | { |
688 | if (insn) |
689 | { |
690 | if (BLOCK_FOR_INSN (insn) != bb |
691 | && control_flow_insn_p (insn) |
692 | /* We handle interblock movement of the speculation check |
693 | or over a speculation check in |
694 | haifa-sched.cc: move_block_after_check (). */ |
695 | && !IS_SPECULATION_BRANCHY_CHECK_P (insn) |
696 | && !IS_SPECULATION_BRANCHY_CHECK_P (BB_END (bb))) |
697 | { |
698 | /* Assert that we don't move jumps across blocks. */ |
699 | gcc_assert (!control_flow_insn_p (BB_END (bb)) |
700 | && NOTE_INSN_BASIC_BLOCK_P (BB_HEAD (bb->next_bb))); |
701 | return bb; |
702 | } |
703 | else |
704 | return 0; |
705 | } |
706 | else |
707 | /* Return next non empty block. */ |
708 | { |
709 | do |
710 | { |
711 | gcc_assert (bb != last_bb); |
712 | |
713 | bb = bb->next_bb; |
714 | } |
715 | while (bb_note (bb) == BB_END (bb)); |
716 | |
717 | return bb; |
718 | } |
719 | } |
720 | |
721 | /* Fix internal data after interblock movement of jump instruction. |
722 | For parameter meaning please refer to |
723 | sched-int.h: struct sched_info: fix_recovery_cfg. */ |
724 | static void |
725 | ebb_fix_recovery_cfg (int bbi ATTRIBUTE_UNUSED, int jump_bbi, |
726 | int jump_bb_nexti) |
727 | { |
728 | gcc_assert (last_bb->index != bbi); |
729 | |
730 | if (jump_bb_nexti == last_bb->index) |
731 | last_bb = BASIC_BLOCK_FOR_FN (cfun, jump_bbi); |
732 | } |
733 | |
734 | #endif /* INSN_SCHEDULING */ |
735 | |