1/* Control flow graph manipulation code for GNU compiler.
2 Copyright (C) 1987-2023 Free Software Foundation, Inc.
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
8Software Foundation; either version 3, or (at your option) any later
9version.
10
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
15
16You should have received a copy of the GNU General Public License
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
19
20/* This file contains low level functions to manipulate the CFG and analyze it
21 that are aware of the RTL intermediate language.
22
23 Available functionality:
24 - Basic CFG/RTL manipulation API documented in cfghooks.h
25 - CFG-aware instruction chain manipulation
26 delete_insn, delete_insn_chain
27 - Edge splitting and committing to edges
28 insert_insn_on_edge, commit_edge_insertions
29 - CFG updating after insn simplification
30 purge_dead_edges, purge_all_dead_edges
31 - CFG fixing after coarse manipulation
32 fixup_abnormal_edges
33
34 Functions not supposed for generic use:
35 - Infrastructure to determine quickly basic block for insn
36 compute_bb_for_insn, update_bb_for_insn, set_block_for_insn,
37 - Edge redirection with updating and optimizing of insn chain
38 block_label, tidy_fallthru_edge, force_nonfallthru */
39
40#include "config.h"
41#include "system.h"
42#include "coretypes.h"
43#include "backend.h"
44#include "target.h"
45#include "rtl.h"
46#include "tree.h"
47#include "cfghooks.h"
48#include "df.h"
49#include "insn-config.h"
50#include "memmodel.h"
51#include "emit-rtl.h"
52#include "cfgrtl.h"
53#include "cfganal.h"
54#include "cfgbuild.h"
55#include "cfgcleanup.h"
56#include "bb-reorder.h"
57#include "rtl-error.h"
58#include "insn-attr.h"
59#include "dojump.h"
60#include "expr.h"
61#include "cfgloop.h"
62#include "tree-pass.h"
63#include "print-rtl.h"
64#include "rtl-iter.h"
65#include "gimplify.h"
66#include "profile.h"
67#include "sreal.h"
68
69/* Disable warnings about missing quoting in GCC diagnostics. */
70#if __GNUC__ >= 10
71# pragma GCC diagnostic push
72# pragma GCC diagnostic ignored "-Wformat-diag"
73#endif
74
75/* Holds the interesting leading and trailing notes for the function.
76 Only applicable if the CFG is in cfglayout mode. */
77static GTY(()) rtx_insn *cfg_layout_function_footer;
78static GTY(()) rtx_insn *cfg_layout_function_header;
79
80static rtx_insn *skip_insns_after_block (basic_block);
81static void record_effective_endpoints (void);
82static void fixup_reorder_chain (void);
83
84void verify_insn_chain (void);
85static void fixup_fallthru_exit_predecessor (void);
86static bool can_delete_note_p (const rtx_note *);
87static bool can_delete_label_p (const rtx_code_label *);
88static basic_block rtl_split_edge (edge);
89static bool rtl_move_block_after (basic_block, basic_block);
90static bool rtl_verify_flow_info (void);
91static basic_block cfg_layout_split_block (basic_block, void *);
92static edge cfg_layout_redirect_edge_and_branch (edge, basic_block);
93static basic_block cfg_layout_redirect_edge_and_branch_force (edge, basic_block);
94static void cfg_layout_delete_block (basic_block);
95static void rtl_delete_block (basic_block);
96static basic_block rtl_redirect_edge_and_branch_force (edge, basic_block);
97static edge rtl_redirect_edge_and_branch (edge, basic_block);
98static basic_block rtl_split_block (basic_block, void *);
99static void rtl_dump_bb (FILE *, basic_block, int, dump_flags_t);
100static bool rtl_verify_flow_info_1 (void);
101static void rtl_make_forwarder_block (edge);
102static bool rtl_bb_info_initialized_p (basic_block bb);
103
104/* Return true if NOTE is not one of the ones that must be kept paired,
105 so that we may simply delete it. */
106
107static bool
108can_delete_note_p (const rtx_note *note)
109{
110 switch (NOTE_KIND (note))
111 {
112 case NOTE_INSN_DELETED:
113 case NOTE_INSN_BASIC_BLOCK:
114 case NOTE_INSN_EPILOGUE_BEG:
115 return true;
116
117 default:
118 return false;
119 }
120}
121
122/* True if a given label can be deleted. */
123
124static bool
125can_delete_label_p (const rtx_code_label *label)
126{
127 return (!LABEL_PRESERVE_P (label)
128 /* User declared labels must be preserved. */
129 && LABEL_NAME (label) == 0
130 && !vec_safe_contains<rtx_insn *> (forced_labels,
131 search: const_cast<rtx_code_label *> (label)));
132}
133
134/* Delete INSN by patching it out. */
135
136void
137delete_insn (rtx_insn *insn)
138{
139 rtx note;
140 bool really_delete = true;
141
142 if (LABEL_P (insn))
143 {
144 /* Some labels can't be directly removed from the INSN chain, as they
145 might be references via variables, constant pool etc.
146 Convert them to the special NOTE_INSN_DELETED_LABEL note. */
147 if (! can_delete_label_p (label: as_a <rtx_code_label *> (p: insn)))
148 {
149 const char *name = LABEL_NAME (insn);
150 basic_block bb = BLOCK_FOR_INSN (insn);
151 rtx_insn *bb_note = NEXT_INSN (insn);
152
153 really_delete = false;
154 PUT_CODE (insn, NOTE);
155 NOTE_KIND (insn) = NOTE_INSN_DELETED_LABEL;
156 NOTE_DELETED_LABEL_NAME (insn) = name;
157
158 /* If the note following the label starts a basic block, and the
159 label is a member of the same basic block, interchange the two. */
160 if (bb_note != NULL_RTX
161 && NOTE_INSN_BASIC_BLOCK_P (bb_note)
162 && bb != NULL
163 && bb == BLOCK_FOR_INSN (insn: bb_note))
164 {
165 reorder_insns_nobb (insn, insn, bb_note);
166 BB_HEAD (bb) = bb_note;
167 if (BB_END (bb) == bb_note)
168 BB_END (bb) = insn;
169 }
170 }
171
172 remove_node_from_insn_list (insn, &nonlocal_goto_handler_labels);
173 }
174
175 if (really_delete)
176 {
177 /* If this insn has already been deleted, something is very wrong. */
178 gcc_assert (!insn->deleted ());
179 if (INSN_P (insn))
180 df_insn_delete (insn);
181 remove_insn (insn);
182 insn->set_deleted ();
183 }
184
185 /* If deleting a jump, decrement the use count of the label. Deleting
186 the label itself should happen in the normal course of block merging. */
187 if (JUMP_P (insn))
188 {
189 if (JUMP_LABEL (insn)
190 && LABEL_P (JUMP_LABEL (insn)))
191 LABEL_NUSES (JUMP_LABEL (insn))--;
192
193 /* If there are more targets, remove them too. */
194 while ((note
195 = find_reg_note (insn, REG_LABEL_TARGET, NULL_RTX)) != NULL_RTX
196 && LABEL_P (XEXP (note, 0)))
197 {
198 LABEL_NUSES (XEXP (note, 0))--;
199 remove_note (insn, note);
200 }
201 }
202
203 /* Also if deleting any insn that references a label as an operand. */
204 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX)) != NULL_RTX
205 && LABEL_P (XEXP (note, 0)))
206 {
207 LABEL_NUSES (XEXP (note, 0))--;
208 remove_note (insn, note);
209 }
210
211 if (rtx_jump_table_data *table = dyn_cast <rtx_jump_table_data *> (p: insn))
212 {
213 rtvec vec = table->get_labels ();
214 int len = GET_NUM_ELEM (vec);
215 int i;
216
217 for (i = 0; i < len; i++)
218 {
219 rtx label = XEXP (RTVEC_ELT (vec, i), 0);
220
221 /* When deleting code in bulk (e.g. removing many unreachable
222 blocks) we can delete a label that's a target of the vector
223 before deleting the vector itself. */
224 if (!NOTE_P (label))
225 LABEL_NUSES (label)--;
226 }
227 }
228}
229
230/* Like delete_insn but also purge dead edges from BB.
231 Return true if any edges are eliminated. */
232
233bool
234delete_insn_and_edges (rtx_insn *insn)
235{
236 bool purge = false;
237
238 if (NONDEBUG_INSN_P (insn) && BLOCK_FOR_INSN (insn))
239 {
240 basic_block bb = BLOCK_FOR_INSN (insn);
241 if (BB_END (bb) == insn)
242 purge = true;
243 else if (DEBUG_INSN_P (BB_END (bb)))
244 for (rtx_insn *dinsn = NEXT_INSN (insn);
245 DEBUG_INSN_P (dinsn); dinsn = NEXT_INSN (insn: dinsn))
246 if (BB_END (bb) == dinsn)
247 {
248 purge = true;
249 break;
250 }
251 }
252 delete_insn (insn);
253 if (purge)
254 return purge_dead_edges (BLOCK_FOR_INSN (insn));
255 return false;
256}
257
258/* Unlink a chain of insns between START and FINISH, leaving notes
259 that must be paired. If CLEAR_BB is true, we set bb field for
260 insns that cannot be removed to NULL. */
261
262void
263delete_insn_chain (rtx start, rtx_insn *finish, bool clear_bb)
264{
265 /* Unchain the insns one by one. It would be quicker to delete all of these
266 with a single unchaining, rather than one at a time, but we need to keep
267 the NOTE's. */
268 rtx_insn *current = finish;
269 while (1)
270 {
271 rtx_insn *prev = PREV_INSN (insn: current);
272 if (NOTE_P (current) && !can_delete_note_p (note: as_a <rtx_note *> (p: current)))
273 ;
274 else
275 delete_insn (insn: current);
276
277 if (clear_bb && !current->deleted ())
278 set_block_for_insn (insn: current, NULL);
279
280 if (current == start)
281 break;
282 current = prev;
283 }
284}
285
286/* Create a new basic block consisting of the instructions between HEAD and END
287 inclusive. This function is designed to allow fast BB construction - reuses
288 the note and basic block struct in BB_NOTE, if any and do not grow
289 BASIC_BLOCK chain and should be used directly only by CFG construction code.
290 END can be NULL in to create new empty basic block before HEAD. Both END
291 and HEAD can be NULL to create basic block at the end of INSN chain.
292 AFTER is the basic block we should be put after. */
293
294basic_block
295create_basic_block_structure (rtx_insn *head, rtx_insn *end, rtx_note *bb_note,
296 basic_block after)
297{
298 basic_block bb;
299
300 if (bb_note
301 && (bb = NOTE_BASIC_BLOCK (bb_note)) != NULL
302 && bb->aux == NULL)
303 {
304 /* If we found an existing note, thread it back onto the chain. */
305
306 rtx_insn *after;
307
308 if (LABEL_P (head))
309 after = head;
310 else
311 {
312 after = PREV_INSN (insn: head);
313 head = bb_note;
314 }
315
316 if (after != bb_note && NEXT_INSN (insn: after) != bb_note)
317 reorder_insns_nobb (bb_note, bb_note, after);
318 }
319 else
320 {
321 /* Otherwise we must create a note and a basic block structure. */
322
323 bb = alloc_block ();
324
325 init_rtl_bb_info (bb);
326 if (!head && !end)
327 head = end = bb_note
328 = emit_note_after (NOTE_INSN_BASIC_BLOCK, get_last_insn ());
329 else if (LABEL_P (head) && end)
330 {
331 bb_note = emit_note_after (NOTE_INSN_BASIC_BLOCK, head);
332 if (head == end)
333 end = bb_note;
334 }
335 else
336 {
337 bb_note = emit_note_before (NOTE_INSN_BASIC_BLOCK, head);
338 head = bb_note;
339 if (!end)
340 end = head;
341 }
342
343 NOTE_BASIC_BLOCK (bb_note) = bb;
344 }
345
346 /* Always include the bb note in the block. */
347 if (NEXT_INSN (insn: end) == bb_note)
348 end = bb_note;
349
350 BB_HEAD (bb) = head;
351 BB_END (bb) = end;
352 bb->index = last_basic_block_for_fn (cfun)++;
353 bb->flags = BB_NEW | BB_RTL;
354 link_block (bb, after);
355 SET_BASIC_BLOCK_FOR_FN (cfun, bb->index, bb);
356 df_bb_refs_record (bb->index, false);
357 update_bb_for_insn (bb);
358 BB_SET_PARTITION (bb, BB_UNPARTITIONED);
359
360 /* Tag the block so that we know it has been used when considering
361 other basic block notes. */
362 bb->aux = bb;
363
364 return bb;
365}
366
367/* Create new basic block consisting of instructions in between HEAD and END
368 and place it to the BB chain after block AFTER. END can be NULL to
369 create a new empty basic block before HEAD. Both END and HEAD can be
370 NULL to create basic block at the end of INSN chain. */
371
372static basic_block
373rtl_create_basic_block (void *headp, void *endp, basic_block after)
374{
375 rtx_insn *head = (rtx_insn *) headp;
376 rtx_insn *end = (rtx_insn *) endp;
377 basic_block bb;
378
379 /* Grow the basic block array if needed. */
380 if ((size_t) last_basic_block_for_fn (cfun)
381 >= basic_block_info_for_fn (cfun)->length ())
382 vec_safe_grow_cleared (basic_block_info_for_fn (cfun),
383 last_basic_block_for_fn (cfun) + 1);
384
385 n_basic_blocks_for_fn (cfun)++;
386
387 bb = create_basic_block_structure (head, end, NULL, after);
388 bb->aux = NULL;
389 return bb;
390}
391
392static basic_block
393cfg_layout_create_basic_block (void *head, void *end, basic_block after)
394{
395 basic_block newbb = rtl_create_basic_block (headp: head, endp: end, after);
396
397 return newbb;
398}
399
400/* Delete the insns in a (non-live) block. We physically delete every
401 non-deleted-note insn, and update the flow graph appropriately.
402
403 Return nonzero if we deleted an exception handler. */
404
405/* ??? Preserving all such notes strikes me as wrong. It would be nice
406 to post-process the stream to remove empty blocks, loops, ranges, etc. */
407
408static void
409rtl_delete_block (basic_block b)
410{
411 rtx_insn *insn, *end;
412
413 /* If the head of this block is a CODE_LABEL, then it might be the
414 label for an exception handler which can't be reached. We need
415 to remove the label from the exception_handler_label list. */
416 insn = BB_HEAD (b);
417
418 end = get_last_bb_insn (b);
419
420 /* Selectively delete the entire chain. */
421 BB_HEAD (b) = NULL;
422 delete_insn_chain (start: insn, finish: end, clear_bb: true);
423
424
425 if (dump_file)
426 fprintf (stream: dump_file, format: "deleting block %d\n", b->index);
427 df_bb_delete (b->index);
428}
429
430/* Records the basic block struct in BLOCK_FOR_INSN for every insn. */
431
432void
433compute_bb_for_insn (void)
434{
435 basic_block bb;
436
437 FOR_EACH_BB_FN (bb, cfun)
438 {
439 rtx_insn *end = BB_END (bb);
440 rtx_insn *insn;
441
442 for (insn = BB_HEAD (bb); ; insn = NEXT_INSN (insn))
443 {
444 BLOCK_FOR_INSN (insn) = bb;
445 if (insn == end)
446 break;
447 }
448 }
449}
450
451/* Release the basic_block_for_insn array. */
452
453void
454free_bb_for_insn (void)
455{
456 rtx_insn *insn;
457 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
458 if (!BARRIER_P (insn))
459 BLOCK_FOR_INSN (insn) = NULL;
460}
461
462namespace {
463
464const pass_data pass_data_free_cfg =
465{
466 .type: RTL_PASS, /* type */
467 .name: "*free_cfg", /* name */
468 .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */
469 .tv_id: TV_NONE, /* tv_id */
470 .properties_required: 0, /* properties_required */
471 .properties_provided: 0, /* properties_provided */
472 PROP_cfg, /* properties_destroyed */
473 .todo_flags_start: 0, /* todo_flags_start */
474 .todo_flags_finish: 0, /* todo_flags_finish */
475};
476
477class pass_free_cfg : public rtl_opt_pass
478{
479public:
480 pass_free_cfg (gcc::context *ctxt)
481 : rtl_opt_pass (pass_data_free_cfg, ctxt)
482 {}
483
484 /* opt_pass methods: */
485 unsigned int execute (function *) final override;
486
487}; // class pass_free_cfg
488
489unsigned int
490pass_free_cfg::execute (function *)
491{
492 /* The resource.cc machinery uses DF but the CFG isn't guaranteed to be
493 valid at that point so it would be too late to call df_analyze. */
494 if (DELAY_SLOTS && optimize > 0 && flag_delayed_branch)
495 {
496 df_note_add_problem ();
497 df_analyze ();
498 }
499
500 if (crtl->has_bb_partition)
501 insert_section_boundary_note ();
502
503 free_bb_for_insn ();
504 return 0;
505}
506
507} // anon namespace
508
509rtl_opt_pass *
510make_pass_free_cfg (gcc::context *ctxt)
511{
512 return new pass_free_cfg (ctxt);
513}
514
515/* Return RTX to emit after when we want to emit code on the entry of function. */
516rtx_insn *
517entry_of_function (void)
518{
519 return (n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS ?
520 BB_HEAD (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb) : get_insns ());
521}
522
523/* Emit INSN at the entry point of the function, ensuring that it is only
524 executed once per function. */
525void
526emit_insn_at_entry (rtx insn)
527{
528 edge_iterator ei = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
529 edge e = ei_safe_edge (i: ei);
530 gcc_assert (e->flags & EDGE_FALLTHRU);
531
532 insert_insn_on_edge (insn, e);
533 commit_edge_insertions ();
534}
535
536/* Update BLOCK_FOR_INSN of insns between BEGIN and END
537 (or BARRIER if found) and notify df of the bb change.
538 The insn chain range is inclusive
539 (i.e. both BEGIN and END will be updated. */
540
541static void
542update_bb_for_insn_chain (rtx_insn *begin, rtx_insn *end, basic_block bb)
543{
544 rtx_insn *insn;
545
546 end = NEXT_INSN (insn: end);
547 for (insn = begin; insn != end; insn = NEXT_INSN (insn))
548 if (!BARRIER_P (insn))
549 df_insn_change_bb (insn, bb);
550}
551
552/* Update BLOCK_FOR_INSN of insns in BB to BB,
553 and notify df of the change. */
554
555void
556update_bb_for_insn (basic_block bb)
557{
558 update_bb_for_insn_chain (BB_HEAD (bb), BB_END (bb), bb);
559}
560
561
562/* Like active_insn_p, except keep the return value use or clobber around
563 even after reload. */
564
565static bool
566flow_active_insn_p (const rtx_insn *insn)
567{
568 if (active_insn_p (insn))
569 return true;
570
571 /* A clobber of the function return value exists for buggy
572 programs that fail to return a value. Its effect is to
573 keep the return value from being live across the entire
574 function. If we allow it to be skipped, we introduce the
575 possibility for register lifetime confusion.
576 Similarly, keep a USE of the function return value, otherwise
577 the USE is dropped and we could fail to thread jump if USE
578 appears on some paths and not on others, see PR90257. */
579 if ((GET_CODE (PATTERN (insn)) == CLOBBER
580 || GET_CODE (PATTERN (insn)) == USE)
581 && REG_P (XEXP (PATTERN (insn), 0))
582 && REG_FUNCTION_VALUE_P (XEXP (PATTERN (insn), 0)))
583 return true;
584
585 return false;
586}
587
588/* Return true if the block has no effect and only forwards control flow to
589 its single destination. */
590
591bool
592contains_no_active_insn_p (const_basic_block bb)
593{
594 rtx_insn *insn;
595
596 if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
597 || bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
598 || !single_succ_p (bb)
599 || (single_succ_edge (bb)->flags & EDGE_FAKE) != 0)
600 return false;
601
602 for (insn = BB_HEAD (bb); insn != BB_END (bb); insn = NEXT_INSN (insn))
603 if (INSN_P (insn) && flow_active_insn_p (insn))
604 return false;
605
606 return (!INSN_P (insn)
607 || (JUMP_P (insn) && simplejump_p (insn))
608 || !flow_active_insn_p (insn));
609}
610
611/* Likewise, but protect loop latches, headers and preheaders. */
612/* FIXME: Make this a cfg hook. */
613
614bool
615forwarder_block_p (const_basic_block bb)
616{
617 if (!contains_no_active_insn_p (bb))
618 return false;
619
620 /* Protect loop latches, headers and preheaders. */
621 if (current_loops)
622 {
623 basic_block dest;
624 if (bb->loop_father->header == bb)
625 return false;
626 dest = EDGE_SUCC (bb, 0)->dest;
627 if (dest->loop_father->header == dest)
628 return false;
629 }
630
631 return true;
632}
633
634/* Return nonzero if we can reach target from src by falling through. */
635/* FIXME: Make this a cfg hook, the result is only valid in cfgrtl mode. */
636
637bool
638can_fallthru (basic_block src, basic_block target)
639{
640 rtx_insn *insn = BB_END (src);
641 rtx_insn *insn2;
642 edge e;
643 edge_iterator ei;
644
645 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
646 return true;
647 if (src->next_bb != target)
648 return false;
649
650 /* ??? Later we may add code to move jump tables offline. */
651 if (tablejump_p (insn, NULL, NULL))
652 return false;
653
654 FOR_EACH_EDGE (e, ei, src->succs)
655 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
656 && e->flags & EDGE_FALLTHRU)
657 return false;
658
659 insn2 = BB_HEAD (target);
660 if (!active_insn_p (insn2))
661 insn2 = next_active_insn (insn2);
662
663 return next_active_insn (insn) == insn2;
664}
665
666/* Return nonzero if we could reach target from src by falling through,
667 if the target was made adjacent. If we already have a fall-through
668 edge to the exit block, we can't do that. */
669static bool
670could_fall_through (basic_block src, basic_block target)
671{
672 edge e;
673 edge_iterator ei;
674
675 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
676 return true;
677 FOR_EACH_EDGE (e, ei, src->succs)
678 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
679 && e->flags & EDGE_FALLTHRU)
680 return 0;
681 return true;
682}
683
684/* Return the NOTE_INSN_BASIC_BLOCK of BB. */
685rtx_note *
686bb_note (basic_block bb)
687{
688 rtx_insn *note;
689
690 note = BB_HEAD (bb);
691 if (LABEL_P (note))
692 note = NEXT_INSN (insn: note);
693
694 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
695 return as_a <rtx_note *> (p: note);
696}
697
698/* Return the INSN immediately following the NOTE_INSN_BASIC_BLOCK
699 note associated with the BLOCK. */
700
701static rtx_insn *
702first_insn_after_basic_block_note (basic_block block)
703{
704 rtx_insn *insn;
705
706 /* Get the first instruction in the block. */
707 insn = BB_HEAD (block);
708
709 if (insn == NULL_RTX)
710 return NULL;
711 if (LABEL_P (insn))
712 insn = NEXT_INSN (insn);
713 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
714
715 return NEXT_INSN (insn);
716}
717
718/* Creates a new basic block just after basic block BB by splitting
719 everything after specified instruction INSNP. */
720
721static basic_block
722rtl_split_block (basic_block bb, void *insnp)
723{
724 basic_block new_bb;
725 rtx_insn *insn = (rtx_insn *) insnp;
726 edge e;
727 edge_iterator ei;
728
729 if (!insn)
730 {
731 insn = first_insn_after_basic_block_note (block: bb);
732
733 if (insn)
734 {
735 rtx_insn *next = insn;
736
737 insn = PREV_INSN (insn);
738
739 /* If the block contains only debug insns, insn would have
740 been NULL in a non-debug compilation, and then we'd end
741 up emitting a DELETED note. For -fcompare-debug
742 stability, emit the note too. */
743 if (insn != BB_END (bb)
744 && DEBUG_INSN_P (next)
745 && DEBUG_INSN_P (BB_END (bb)))
746 {
747 while (next != BB_END (bb) && DEBUG_INSN_P (next))
748 next = NEXT_INSN (insn: next);
749
750 if (next == BB_END (bb))
751 emit_note_after (NOTE_INSN_DELETED, next);
752 }
753 }
754 else
755 insn = get_last_insn ();
756 }
757
758 /* We probably should check type of the insn so that we do not create
759 inconsistent cfg. It is checked in verify_flow_info anyway, so do not
760 bother. */
761 if (insn == BB_END (bb))
762 emit_note_after (NOTE_INSN_DELETED, insn);
763
764 /* Create the new basic block. */
765 new_bb = create_basic_block (NEXT_INSN (insn), BB_END (bb), bb);
766 BB_COPY_PARTITION (new_bb, bb);
767 BB_END (bb) = insn;
768
769 /* Redirect the outgoing edges. */
770 new_bb->succs = bb->succs;
771 bb->succs = NULL;
772 FOR_EACH_EDGE (e, ei, new_bb->succs)
773 e->src = new_bb;
774
775 /* The new block starts off being dirty. */
776 df_set_bb_dirty (bb);
777 return new_bb;
778}
779
780/* Return true if LOC1 and LOC2 are equivalent for
781 unique_locus_on_edge_between_p purposes. */
782
783static bool
784loc_equal (location_t loc1, location_t loc2)
785{
786 if (loc1 == loc2)
787 return true;
788
789 expanded_location loce1 = expand_location (loc1);
790 expanded_location loce2 = expand_location (loc2);
791
792 if (loce1.line != loce2.line
793 || loce1.column != loce2.column
794 || loce1.data != loce2.data)
795 return false;
796 if (loce1.file == loce2.file)
797 return true;
798 return (loce1.file != NULL
799 && loce2.file != NULL
800 && filename_cmp (s1: loce1.file, s2: loce2.file) == 0);
801}
802
803/* Return true if the single edge between blocks A and B is the only place
804 in RTL which holds some unique locus. */
805
806static bool
807unique_locus_on_edge_between_p (basic_block a, basic_block b)
808{
809 const location_t goto_locus = EDGE_SUCC (a, 0)->goto_locus;
810 rtx_insn *insn, *end;
811
812 if (LOCATION_LOCUS (goto_locus) == UNKNOWN_LOCATION)
813 return false;
814
815 /* First scan block A backward. */
816 insn = BB_END (a);
817 end = PREV_INSN (BB_HEAD (a));
818 while (insn != end && (!NONDEBUG_INSN_P (insn) || !INSN_HAS_LOCATION (insn)))
819 insn = PREV_INSN (insn);
820
821 if (insn != end && loc_equal (loc1: INSN_LOCATION (insn), loc2: goto_locus))
822 return false;
823
824 /* Then scan block B forward. */
825 insn = BB_HEAD (b);
826 if (insn)
827 {
828 end = NEXT_INSN (BB_END (b));
829 while (insn != end && !NONDEBUG_INSN_P (insn))
830 insn = NEXT_INSN (insn);
831
832 if (insn != end && INSN_HAS_LOCATION (insn)
833 && loc_equal (loc1: INSN_LOCATION (insn), loc2: goto_locus))
834 return false;
835 }
836
837 return true;
838}
839
840/* If the single edge between blocks A and B is the only place in RTL which
841 holds some unique locus, emit a nop with that locus between the blocks. */
842
843static void
844emit_nop_for_unique_locus_between (basic_block a, basic_block b)
845{
846 if (!unique_locus_on_edge_between_p (a, b))
847 return;
848
849 BB_END (a) = emit_insn_after_noloc (gen_nop (), BB_END (a), a);
850 INSN_LOCATION (BB_END (a)) = EDGE_SUCC (a, 0)->goto_locus;
851}
852
853/* Blocks A and B are to be merged into a single block A. The insns
854 are already contiguous. */
855
856static void
857rtl_merge_blocks (basic_block a, basic_block b)
858{
859 /* If B is a forwarder block whose outgoing edge has no location, we'll
860 propagate the locus of the edge between A and B onto it. */
861 const bool forward_edge_locus
862 = (b->flags & BB_FORWARDER_BLOCK) != 0
863 && LOCATION_LOCUS (EDGE_SUCC (b, 0)->goto_locus) == UNKNOWN_LOCATION;
864 rtx_insn *b_head = BB_HEAD (b), *b_end = BB_END (b), *a_end = BB_END (a);
865 rtx_insn *del_first = NULL, *del_last = NULL;
866 rtx_insn *b_debug_start = b_end, *b_debug_end = b_end;
867 bool b_empty = false;
868
869 if (dump_file)
870 fprintf (stream: dump_file, format: "Merging block %d into block %d...\n", b->index,
871 a->index);
872
873 while (DEBUG_INSN_P (b_end))
874 b_end = PREV_INSN (insn: b_debug_start = b_end);
875
876 /* If there was a CODE_LABEL beginning B, delete it. */
877 if (LABEL_P (b_head))
878 {
879 /* Detect basic blocks with nothing but a label. This can happen
880 in particular at the end of a function. */
881 if (b_head == b_end)
882 b_empty = true;
883
884 del_first = del_last = b_head;
885 b_head = NEXT_INSN (insn: b_head);
886 }
887
888 /* Delete the basic block note and handle blocks containing just that
889 note. */
890 if (NOTE_INSN_BASIC_BLOCK_P (b_head))
891 {
892 if (b_head == b_end)
893 b_empty = true;
894 if (! del_last)
895 del_first = b_head;
896
897 del_last = b_head;
898 b_head = NEXT_INSN (insn: b_head);
899 }
900
901 /* If there was a jump out of A, delete it. */
902 if (JUMP_P (a_end))
903 {
904 rtx_insn *prev;
905
906 for (prev = PREV_INSN (insn: a_end); ; prev = PREV_INSN (insn: prev))
907 if (!NOTE_P (prev)
908 || NOTE_INSN_BASIC_BLOCK_P (prev)
909 || prev == BB_HEAD (a))
910 break;
911
912 del_first = a_end;
913
914 a_end = PREV_INSN (insn: del_first);
915 }
916 else if (BARRIER_P (NEXT_INSN (a_end)))
917 del_first = NEXT_INSN (insn: a_end);
918
919 /* Delete everything marked above as well as crap that might be
920 hanging out between the two blocks. */
921 BB_END (a) = a_end;
922 BB_HEAD (b) = b_empty ? NULL : b_head;
923 delete_insn_chain (start: del_first, finish: del_last, clear_bb: true);
924
925 /* If not optimizing, preserve the locus of the single edge between
926 blocks A and B if necessary by emitting a nop. */
927 if (!optimize
928 && !forward_edge_locus
929 && !DECL_IGNORED_P (current_function_decl))
930 {
931 emit_nop_for_unique_locus_between (a, b);
932 a_end = BB_END (a);
933 }
934
935 /* Reassociate the insns of B with A. */
936 if (!b_empty)
937 {
938 update_bb_for_insn_chain (begin: a_end, end: b_debug_end, bb: a);
939
940 BB_END (a) = b_debug_end;
941 BB_HEAD (b) = NULL;
942 }
943 else if (b_end != b_debug_end)
944 {
945 /* Move any deleted labels and other notes between the end of A
946 and the debug insns that make up B after the debug insns,
947 bringing the debug insns into A while keeping the notes after
948 the end of A. */
949 if (NEXT_INSN (insn: a_end) != b_debug_start)
950 reorder_insns_nobb (NEXT_INSN (insn: a_end), PREV_INSN (insn: b_debug_start),
951 b_debug_end);
952 update_bb_for_insn_chain (begin: b_debug_start, end: b_debug_end, bb: a);
953 BB_END (a) = b_debug_end;
954 }
955
956 df_bb_delete (b->index);
957
958 if (forward_edge_locus)
959 EDGE_SUCC (b, 0)->goto_locus = EDGE_SUCC (a, 0)->goto_locus;
960
961 if (dump_file)
962 fprintf (stream: dump_file, format: "Merged blocks %d and %d.\n", a->index, b->index);
963}
964
965
966/* Return true when block A and B can be merged. */
967
968static bool
969rtl_can_merge_blocks (basic_block a, basic_block b)
970{
971 /* If we are partitioning hot/cold basic blocks, we don't want to
972 mess up unconditional or indirect jumps that cross between hot
973 and cold sections.
974
975 Basic block partitioning may result in some jumps that appear to
976 be optimizable (or blocks that appear to be mergeable), but which really
977 must be left untouched (they are required to make it safely across
978 partition boundaries). See the comments at the top of
979 bb-reorder.cc:partition_hot_cold_basic_blocks for complete details. */
980
981 if (BB_PARTITION (a) != BB_PARTITION (b))
982 return false;
983
984 /* Protect the loop latches. */
985 if (current_loops && b->loop_father->latch == b)
986 return false;
987
988 /* There must be exactly one edge in between the blocks. */
989 return (single_succ_p (bb: a)
990 && single_succ (bb: a) == b
991 && single_pred_p (bb: b)
992 && a != b
993 /* Must be simple edge. */
994 && !(single_succ_edge (bb: a)->flags & EDGE_COMPLEX)
995 && a->next_bb == b
996 && a != ENTRY_BLOCK_PTR_FOR_FN (cfun)
997 && b != EXIT_BLOCK_PTR_FOR_FN (cfun)
998 /* If the jump insn has side effects,
999 we can't kill the edge. */
1000 && (!JUMP_P (BB_END (a))
1001 || (reload_completed
1002 ? simplejump_p (BB_END (a)) : onlyjump_p (BB_END (a)))));
1003}
1004
1005/* Return the label in the head of basic block BLOCK. Create one if it doesn't
1006 exist. */
1007
1008rtx_code_label *
1009block_label (basic_block block)
1010{
1011 if (block == EXIT_BLOCK_PTR_FOR_FN (cfun))
1012 return NULL;
1013
1014 if (!LABEL_P (BB_HEAD (block)))
1015 {
1016 BB_HEAD (block) = emit_label_before (gen_label_rtx (), BB_HEAD (block));
1017 }
1018
1019 return as_a <rtx_code_label *> (BB_HEAD (block));
1020}
1021
1022/* Remove all barriers from BB_FOOTER of a BB. */
1023
1024static void
1025remove_barriers_from_footer (basic_block bb)
1026{
1027 rtx_insn *insn = BB_FOOTER (bb);
1028
1029 /* Remove barriers but keep jumptables. */
1030 while (insn)
1031 {
1032 if (BARRIER_P (insn))
1033 {
1034 if (PREV_INSN (insn))
1035 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
1036 else
1037 BB_FOOTER (bb) = NEXT_INSN (insn);
1038 if (NEXT_INSN (insn))
1039 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
1040 }
1041 if (LABEL_P (insn))
1042 return;
1043 insn = NEXT_INSN (insn);
1044 }
1045}
1046
1047/* Attempt to perform edge redirection by replacing possibly complex jump
1048 instruction by unconditional jump or removing jump completely. This can
1049 apply only if all edges now point to the same block. The parameters and
1050 return values are equivalent to redirect_edge_and_branch. */
1051
1052edge
1053try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout)
1054{
1055 basic_block src = e->src;
1056 rtx_insn *insn = BB_END (src);
1057 rtx set;
1058 bool fallthru = false;
1059
1060 /* If we are partitioning hot/cold basic blocks, we don't want to
1061 mess up unconditional or indirect jumps that cross between hot
1062 and cold sections.
1063
1064 Basic block partitioning may result in some jumps that appear to
1065 be optimizable (or blocks that appear to be mergeable), but which really
1066 must be left untouched (they are required to make it safely across
1067 partition boundaries). See the comments at the top of
1068 bb-reorder.cc:partition_hot_cold_basic_blocks for complete details. */
1069
1070 if (BB_PARTITION (src) != BB_PARTITION (target))
1071 return NULL;
1072
1073 /* We can replace or remove a complex jump only when we have exactly
1074 two edges. Also, if we have exactly one outgoing edge, we can
1075 redirect that. */
1076 if (EDGE_COUNT (src->succs) >= 3
1077 /* Verify that all targets will be TARGET. Specifically, the
1078 edge that is not E must also go to TARGET. */
1079 || (EDGE_COUNT (src->succs) == 2
1080 && EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest != target))
1081 return NULL;
1082
1083 if (!onlyjump_p (insn))
1084 return NULL;
1085 if ((!optimize || reload_completed) && tablejump_p (insn, NULL, NULL))
1086 return NULL;
1087
1088 /* Avoid removing branch with side effects. */
1089 set = single_set (insn);
1090 if (!set || side_effects_p (set))
1091 return NULL;
1092
1093 /* See if we can create the fallthru edge. */
1094 if (in_cfglayout || can_fallthru (src, target))
1095 {
1096 if (dump_file)
1097 fprintf (stream: dump_file, format: "Removing jump %i.\n", INSN_UID (insn));
1098 fallthru = true;
1099
1100 /* Selectively unlink whole insn chain. */
1101 if (in_cfglayout)
1102 {
1103 delete_insn_chain (start: insn, BB_END (src), clear_bb: false);
1104 remove_barriers_from_footer (bb: src);
1105 }
1106 else
1107 delete_insn_chain (start: insn, finish: PREV_INSN (BB_HEAD (target)), clear_bb: false);
1108 }
1109
1110 /* If this already is simplejump, redirect it. */
1111 else if (simplejump_p (insn))
1112 {
1113 if (e->dest == target)
1114 return NULL;
1115 if (dump_file)
1116 fprintf (stream: dump_file, format: "Redirecting jump %i from %i to %i.\n",
1117 INSN_UID (insn), e->dest->index, target->index);
1118 if (!redirect_jump (as_a <rtx_jump_insn *> (p: insn),
1119 block_label (block: target), 0))
1120 {
1121 gcc_assert (target == EXIT_BLOCK_PTR_FOR_FN (cfun));
1122 return NULL;
1123 }
1124 }
1125
1126 /* Cannot do anything for target exit block. */
1127 else if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
1128 return NULL;
1129
1130 /* Or replace possibly complicated jump insn by simple jump insn. */
1131 else
1132 {
1133 rtx_code_label *target_label = block_label (block: target);
1134 rtx_insn *barrier;
1135 rtx_insn *label;
1136 rtx_jump_table_data *table;
1137
1138 emit_jump_insn_after_noloc (targetm.gen_jump (target_label), insn);
1139 JUMP_LABEL (BB_END (src)) = target_label;
1140 LABEL_NUSES (target_label)++;
1141 if (dump_file)
1142 fprintf (stream: dump_file, format: "Replacing insn %i by jump %i\n",
1143 INSN_UID (insn), INSN_UID (BB_END (src)));
1144
1145
1146 delete_insn_chain (start: insn, finish: insn, clear_bb: false);
1147
1148 /* Recognize a tablejump that we are converting to a
1149 simple jump and remove its associated CODE_LABEL
1150 and ADDR_VEC or ADDR_DIFF_VEC. */
1151 if (tablejump_p (insn, &label, &table))
1152 delete_insn_chain (start: label, finish: table, clear_bb: false);
1153
1154 barrier = next_nonnote_nondebug_insn (BB_END (src));
1155 if (!barrier || !BARRIER_P (barrier))
1156 emit_barrier_after (BB_END (src));
1157 else
1158 {
1159 if (barrier != NEXT_INSN (BB_END (src)))
1160 {
1161 /* Move the jump before barrier so that the notes
1162 which originally were or were created before jump table are
1163 inside the basic block. */
1164 rtx_insn *new_insn = BB_END (src);
1165
1166 update_bb_for_insn_chain (begin: NEXT_INSN (BB_END (src)),
1167 end: PREV_INSN (insn: barrier), bb: src);
1168
1169 SET_NEXT_INSN (PREV_INSN (insn: new_insn)) = NEXT_INSN (insn: new_insn);
1170 SET_PREV_INSN (NEXT_INSN (insn: new_insn)) = PREV_INSN (insn: new_insn);
1171
1172 SET_NEXT_INSN (new_insn) = barrier;
1173 SET_NEXT_INSN (PREV_INSN (insn: barrier)) = new_insn;
1174
1175 SET_PREV_INSN (new_insn) = PREV_INSN (insn: barrier);
1176 SET_PREV_INSN (barrier) = new_insn;
1177 }
1178 }
1179 }
1180
1181 /* Keep only one edge out and set proper flags. */
1182 if (!single_succ_p (bb: src))
1183 remove_edge (e);
1184 gcc_assert (single_succ_p (src));
1185
1186 e = single_succ_edge (bb: src);
1187 if (fallthru)
1188 e->flags = EDGE_FALLTHRU;
1189 else
1190 e->flags = 0;
1191
1192 e->probability = profile_probability::always ();
1193
1194 if (e->dest != target)
1195 redirect_edge_succ (e, target);
1196 return e;
1197}
1198
1199/* Subroutine of redirect_branch_edge that tries to patch the jump
1200 instruction INSN so that it reaches block NEW. Do this
1201 only when it originally reached block OLD. Return true if this
1202 worked or the original target wasn't OLD, return false if redirection
1203 doesn't work. */
1204
1205static bool
1206patch_jump_insn (rtx_insn *insn, rtx_insn *old_label, basic_block new_bb)
1207{
1208 rtx_jump_table_data *table;
1209 rtx tmp;
1210 /* Recognize a tablejump and adjust all matching cases. */
1211 if (tablejump_p (insn, NULL, &table))
1212 {
1213 rtvec vec;
1214 int j;
1215 rtx_code_label *new_label = block_label (block: new_bb);
1216
1217 if (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
1218 return false;
1219 vec = table->get_labels ();
1220
1221 for (j = GET_NUM_ELEM (vec) - 1; j >= 0; --j)
1222 if (XEXP (RTVEC_ELT (vec, j), 0) == old_label)
1223 {
1224 RTVEC_ELT (vec, j) = gen_rtx_LABEL_REF (Pmode, new_label);
1225 --LABEL_NUSES (old_label);
1226 ++LABEL_NUSES (new_label);
1227 }
1228
1229 /* Handle casesi dispatch insns. */
1230 if ((tmp = tablejump_casesi_pattern (insn)) != NULL_RTX
1231 && label_ref_label (XEXP (SET_SRC (tmp), 2)) == old_label)
1232 {
1233 XEXP (SET_SRC (tmp), 2) = gen_rtx_LABEL_REF (Pmode,
1234 new_label);
1235 --LABEL_NUSES (old_label);
1236 ++LABEL_NUSES (new_label);
1237 }
1238 }
1239 else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
1240 {
1241 int i, n = ASM_OPERANDS_LABEL_LENGTH (tmp);
1242 rtx note;
1243
1244 if (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
1245 return false;
1246 rtx_code_label *new_label = block_label (block: new_bb);
1247
1248 for (i = 0; i < n; ++i)
1249 {
1250 rtx old_ref = ASM_OPERANDS_LABEL (tmp, i);
1251 gcc_assert (GET_CODE (old_ref) == LABEL_REF);
1252 if (XEXP (old_ref, 0) == old_label)
1253 {
1254 ASM_OPERANDS_LABEL (tmp, i)
1255 = gen_rtx_LABEL_REF (Pmode, new_label);
1256 --LABEL_NUSES (old_label);
1257 ++LABEL_NUSES (new_label);
1258 }
1259 }
1260
1261 if (JUMP_LABEL (insn) == old_label)
1262 {
1263 JUMP_LABEL (insn) = new_label;
1264 note = find_reg_note (insn, REG_LABEL_TARGET, new_label);
1265 if (note)
1266 remove_note (insn, note);
1267 }
1268 else
1269 {
1270 note = find_reg_note (insn, REG_LABEL_TARGET, old_label);
1271 if (note)
1272 remove_note (insn, note);
1273 if (JUMP_LABEL (insn) != new_label
1274 && !find_reg_note (insn, REG_LABEL_TARGET, new_label))
1275 add_reg_note (insn, REG_LABEL_TARGET, new_label);
1276 }
1277 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, old_label))
1278 != NULL_RTX)
1279 XEXP (note, 0) = new_label;
1280 }
1281 else
1282 {
1283 /* ?? We may play the games with moving the named labels from
1284 one basic block to the other in case only one computed_jump is
1285 available. */
1286 if (computed_jump_p (insn)
1287 /* A return instruction can't be redirected. */
1288 || returnjump_p (insn))
1289 return false;
1290
1291 if (!currently_expanding_to_rtl || JUMP_LABEL (insn) == old_label)
1292 {
1293 /* If the insn doesn't go where we think, we're confused. */
1294 gcc_assert (JUMP_LABEL (insn) == old_label);
1295
1296 /* If the substitution doesn't succeed, die. This can happen
1297 if the back end emitted unrecognizable instructions or if
1298 target is exit block on some arches. Or for crossing
1299 jumps. */
1300 if (!redirect_jump (as_a <rtx_jump_insn *> (p: insn),
1301 block_label (block: new_bb), 0))
1302 {
1303 gcc_assert (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
1304 || CROSSING_JUMP_P (insn));
1305 return false;
1306 }
1307 }
1308 }
1309 return true;
1310}
1311
1312
1313/* Redirect edge representing branch of (un)conditional jump or tablejump,
1314 NULL on failure */
1315static edge
1316redirect_branch_edge (edge e, basic_block target)
1317{
1318 rtx_insn *old_label = BB_HEAD (e->dest);
1319 basic_block src = e->src;
1320 rtx_insn *insn = BB_END (src);
1321
1322 /* We can only redirect non-fallthru edges of jump insn. */
1323 if (e->flags & EDGE_FALLTHRU)
1324 return NULL;
1325 else if (!JUMP_P (insn) && !currently_expanding_to_rtl)
1326 return NULL;
1327
1328 if (!currently_expanding_to_rtl)
1329 {
1330 if (!patch_jump_insn (insn: as_a <rtx_jump_insn *> (p: insn), old_label, new_bb: target))
1331 return NULL;
1332 }
1333 else
1334 /* When expanding this BB might actually contain multiple
1335 jumps (i.e. not yet split by find_many_sub_basic_blocks).
1336 Redirect all of those that match our label. */
1337 FOR_BB_INSNS (src, insn)
1338 if (JUMP_P (insn) && !patch_jump_insn (insn: as_a <rtx_jump_insn *> (p: insn),
1339 old_label, new_bb: target))
1340 return NULL;
1341
1342 if (dump_file)
1343 fprintf (stream: dump_file, format: "Edge %i->%i redirected to %i\n",
1344 e->src->index, e->dest->index, target->index);
1345
1346 if (e->dest != target)
1347 e = redirect_edge_succ_nodup (e, target);
1348
1349 return e;
1350}
1351
1352/* Called when edge E has been redirected to a new destination,
1353 in order to update the region crossing flag on the edge and
1354 jump. */
1355
1356static void
1357fixup_partition_crossing (edge e)
1358{
1359 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) || e->dest
1360 == EXIT_BLOCK_PTR_FOR_FN (cfun))
1361 return;
1362 /* If we redirected an existing edge, it may already be marked
1363 crossing, even though the new src is missing a reg crossing note.
1364 But make sure reg crossing note doesn't already exist before
1365 inserting. */
1366 if (BB_PARTITION (e->src) != BB_PARTITION (e->dest))
1367 {
1368 e->flags |= EDGE_CROSSING;
1369 if (JUMP_P (BB_END (e->src)))
1370 CROSSING_JUMP_P (BB_END (e->src)) = 1;
1371 }
1372 else if (BB_PARTITION (e->src) == BB_PARTITION (e->dest))
1373 {
1374 e->flags &= ~EDGE_CROSSING;
1375 /* Remove the section crossing note from jump at end of
1376 src if it exists, and if no other successors are
1377 still crossing. */
1378 if (JUMP_P (BB_END (e->src)) && CROSSING_JUMP_P (BB_END (e->src)))
1379 {
1380 bool has_crossing_succ = false;
1381 edge e2;
1382 edge_iterator ei;
1383 FOR_EACH_EDGE (e2, ei, e->src->succs)
1384 {
1385 has_crossing_succ |= (e2->flags & EDGE_CROSSING);
1386 if (has_crossing_succ)
1387 break;
1388 }
1389 if (!has_crossing_succ)
1390 CROSSING_JUMP_P (BB_END (e->src)) = 0;
1391 }
1392 }
1393}
1394
1395/* Called when block BB has been reassigned to the cold partition,
1396 because it is now dominated by another cold block,
1397 to ensure that the region crossing attributes are updated. */
1398
1399static void
1400fixup_new_cold_bb (basic_block bb)
1401{
1402 edge e;
1403 edge_iterator ei;
1404
1405 /* This is called when a hot bb is found to now be dominated
1406 by a cold bb and therefore needs to become cold. Therefore,
1407 its preds will no longer be region crossing. Any non-dominating
1408 preds that were previously hot would also have become cold
1409 in the caller for the same region. Any preds that were previously
1410 region-crossing will be adjusted in fixup_partition_crossing. */
1411 FOR_EACH_EDGE (e, ei, bb->preds)
1412 {
1413 fixup_partition_crossing (e);
1414 }
1415
1416 /* Possibly need to make bb's successor edges region crossing,
1417 or remove stale region crossing. */
1418 FOR_EACH_EDGE (e, ei, bb->succs)
1419 {
1420 /* We can't have fall-through edges across partition boundaries.
1421 Note that force_nonfallthru will do any necessary partition
1422 boundary fixup by calling fixup_partition_crossing itself. */
1423 if ((e->flags & EDGE_FALLTHRU)
1424 && BB_PARTITION (bb) != BB_PARTITION (e->dest)
1425 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1426 force_nonfallthru (e);
1427 else
1428 fixup_partition_crossing (e);
1429 }
1430}
1431
1432/* Attempt to change code to redirect edge E to TARGET. Don't do that on
1433 expense of adding new instructions or reordering basic blocks.
1434
1435 Function can be also called with edge destination equivalent to the TARGET.
1436 Then it should try the simplifications and do nothing if none is possible.
1437
1438 Return edge representing the branch if transformation succeeded. Return NULL
1439 on failure.
1440 We still return NULL in case E already destinated TARGET and we didn't
1441 managed to simplify instruction stream. */
1442
1443static edge
1444rtl_redirect_edge_and_branch (edge e, basic_block target)
1445{
1446 edge ret;
1447 basic_block src = e->src;
1448 basic_block dest = e->dest;
1449
1450 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
1451 return NULL;
1452
1453 if (dest == target)
1454 return e;
1455
1456 if ((ret = try_redirect_by_replacing_jump (e, target, in_cfglayout: false)) != NULL)
1457 {
1458 df_set_bb_dirty (src);
1459 fixup_partition_crossing (e: ret);
1460 return ret;
1461 }
1462
1463 ret = redirect_branch_edge (e, target);
1464 if (!ret)
1465 return NULL;
1466
1467 df_set_bb_dirty (src);
1468 fixup_partition_crossing (e: ret);
1469 return ret;
1470}
1471
1472/* Emit a barrier after BB, into the footer if we are in CFGLAYOUT mode. */
1473
1474void
1475emit_barrier_after_bb (basic_block bb)
1476{
1477 rtx_barrier *barrier = emit_barrier_after (BB_END (bb));
1478 gcc_assert (current_ir_type () == IR_RTL_CFGRTL
1479 || current_ir_type () == IR_RTL_CFGLAYOUT);
1480 if (current_ir_type () == IR_RTL_CFGLAYOUT)
1481 {
1482 rtx_insn *insn = unlink_insn_chain (barrier, barrier);
1483
1484 if (BB_FOOTER (bb))
1485 {
1486 rtx_insn *footer_tail = BB_FOOTER (bb);
1487
1488 while (NEXT_INSN (insn: footer_tail))
1489 footer_tail = NEXT_INSN (insn: footer_tail);
1490 if (!BARRIER_P (footer_tail))
1491 {
1492 SET_NEXT_INSN (footer_tail) = insn;
1493 SET_PREV_INSN (insn) = footer_tail;
1494 }
1495 }
1496 else
1497 BB_FOOTER (bb) = insn;
1498 }
1499}
1500
1501/* Like force_nonfallthru below, but additionally performs redirection
1502 Used by redirect_edge_and_branch_force. JUMP_LABEL is used only
1503 when redirecting to the EXIT_BLOCK, it is either ret_rtx or
1504 simple_return_rtx, indicating which kind of returnjump to create.
1505 It should be NULL otherwise. */
1506
1507basic_block
1508force_nonfallthru_and_redirect (edge e, basic_block target, rtx jump_label)
1509{
1510 basic_block jump_block, new_bb = NULL, src = e->src;
1511 rtx note;
1512 edge new_edge;
1513 int abnormal_edge_flags = 0;
1514 bool asm_goto_edge = false;
1515 int loc;
1516
1517 /* In the case the last instruction is conditional jump to the next
1518 instruction, first redirect the jump itself and then continue
1519 by creating a basic block afterwards to redirect fallthru edge. */
1520 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
1521 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
1522 && any_condjump_p (BB_END (e->src))
1523 && JUMP_LABEL (BB_END (e->src)) == BB_HEAD (e->dest))
1524 {
1525 rtx note;
1526 edge b = unchecked_make_edge (e->src, target, 0);
1527 bool redirected;
1528
1529 redirected = redirect_jump (as_a <rtx_jump_insn *> (BB_END (e->src)),
1530 block_label (block: target), 0);
1531 gcc_assert (redirected);
1532
1533 note = find_reg_note (BB_END (e->src), REG_BR_PROB, NULL_RTX);
1534 if (note)
1535 {
1536 int prob = XINT (note, 0);
1537
1538 b->probability = profile_probability::from_reg_br_prob_note (v: prob);
1539 e->probability -= e->probability;
1540 }
1541 }
1542
1543 if (e->flags & EDGE_ABNORMAL)
1544 {
1545 /* Irritating special case - fallthru edge to the same block as abnormal
1546 edge.
1547 We can't redirect abnormal edge, but we still can split the fallthru
1548 one and create separate abnormal edge to original destination.
1549 This allows bb-reorder to make such edge non-fallthru. */
1550 gcc_assert (e->dest == target);
1551 abnormal_edge_flags = e->flags & ~EDGE_FALLTHRU;
1552 e->flags &= EDGE_FALLTHRU;
1553 }
1554 else
1555 {
1556 gcc_assert (e->flags & EDGE_FALLTHRU);
1557 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1558 {
1559 /* We can't redirect the entry block. Create an empty block
1560 at the start of the function which we use to add the new
1561 jump. */
1562 edge tmp;
1563 edge_iterator ei;
1564 bool found = false;
1565
1566 basic_block bb = create_basic_block (BB_HEAD (e->dest), NULL,
1567 ENTRY_BLOCK_PTR_FOR_FN (cfun));
1568 bb->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
1569
1570 /* Make sure new block ends up in correct hot/cold section. */
1571 BB_COPY_PARTITION (bb, e->dest);
1572
1573 /* Change the existing edge's source to be the new block, and add
1574 a new edge from the entry block to the new block. */
1575 e->src = bb;
1576 for (ei = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
1577 (tmp = ei_safe_edge (i: ei)); )
1578 {
1579 if (tmp == e)
1580 {
1581 ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs->unordered_remove (ix: ei.index);
1582 found = true;
1583 break;
1584 }
1585 else
1586 ei_next (i: &ei);
1587 }
1588
1589 gcc_assert (found);
1590
1591 vec_safe_push (v&: bb->succs, obj: e);
1592 make_single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), bb,
1593 EDGE_FALLTHRU);
1594 }
1595 }
1596
1597 /* If e->src ends with asm goto, see if any of the ASM_OPERANDS_LABELs
1598 don't point to the target or fallthru label. */
1599 if (JUMP_P (BB_END (e->src))
1600 && target != EXIT_BLOCK_PTR_FOR_FN (cfun)
1601 && (e->flags & EDGE_FALLTHRU)
1602 && (note = extract_asm_operands (PATTERN (BB_END (e->src)))))
1603 {
1604 int i, n = ASM_OPERANDS_LABEL_LENGTH (note);
1605 bool adjust_jump_target = false;
1606
1607 for (i = 0; i < n; ++i)
1608 {
1609 if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (e->dest))
1610 {
1611 LABEL_NUSES (XEXP (ASM_OPERANDS_LABEL (note, i), 0))--;
1612 XEXP (ASM_OPERANDS_LABEL (note, i), 0) = block_label (block: target);
1613 LABEL_NUSES (XEXP (ASM_OPERANDS_LABEL (note, i), 0))++;
1614 adjust_jump_target = true;
1615 }
1616 if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (target))
1617 asm_goto_edge = true;
1618 }
1619 if (adjust_jump_target)
1620 {
1621 rtx_insn *insn = BB_END (e->src);
1622 rtx note;
1623 rtx_insn *old_label = BB_HEAD (e->dest);
1624 rtx_insn *new_label = BB_HEAD (target);
1625
1626 if (JUMP_LABEL (insn) == old_label)
1627 {
1628 JUMP_LABEL (insn) = new_label;
1629 note = find_reg_note (insn, REG_LABEL_TARGET, new_label);
1630 if (note)
1631 remove_note (insn, note);
1632 }
1633 else
1634 {
1635 note = find_reg_note (insn, REG_LABEL_TARGET, old_label);
1636 if (note)
1637 remove_note (insn, note);
1638 if (JUMP_LABEL (insn) != new_label
1639 && !find_reg_note (insn, REG_LABEL_TARGET, new_label))
1640 add_reg_note (insn, REG_LABEL_TARGET, new_label);
1641 }
1642 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, old_label))
1643 != NULL_RTX)
1644 XEXP (note, 0) = new_label;
1645 }
1646 }
1647
1648 if (EDGE_COUNT (e->src->succs) >= 2 || abnormal_edge_flags || asm_goto_edge)
1649 {
1650 rtx_insn *new_head;
1651 profile_count count = e->count ();
1652 profile_probability probability = e->probability;
1653 /* Create the new structures. */
1654
1655 /* If the old block ended with a tablejump, skip its table
1656 by searching forward from there. Otherwise start searching
1657 forward from the last instruction of the old block. */
1658 rtx_jump_table_data *table;
1659 if (tablejump_p (BB_END (e->src), NULL, &table))
1660 new_head = table;
1661 else
1662 new_head = BB_END (e->src);
1663 new_head = NEXT_INSN (insn: new_head);
1664
1665 jump_block = create_basic_block (new_head, NULL, e->src);
1666 jump_block->count = count;
1667
1668 /* Make sure new block ends up in correct hot/cold section. */
1669
1670 BB_COPY_PARTITION (jump_block, e->src);
1671
1672 /* Wire edge in. */
1673 new_edge = make_edge (e->src, jump_block, EDGE_FALLTHRU);
1674 new_edge->probability = probability;
1675
1676 /* Redirect old edge. */
1677 redirect_edge_pred (e, jump_block);
1678 e->probability = profile_probability::always ();
1679
1680 /* If e->src was previously region crossing, it no longer is
1681 and the reg crossing note should be removed. */
1682 fixup_partition_crossing (e: new_edge);
1683
1684 /* If asm goto has any label refs to target's label,
1685 add also edge from asm goto bb to target. */
1686 if (asm_goto_edge)
1687 {
1688 new_edge->probability /= 2;
1689 jump_block->count /= 2;
1690 edge new_edge2 = make_edge (new_edge->src, target,
1691 e->flags & ~EDGE_FALLTHRU);
1692 new_edge2->probability = probability - new_edge->probability;
1693 }
1694
1695 new_bb = jump_block;
1696 }
1697 else
1698 jump_block = e->src;
1699
1700 loc = e->goto_locus;
1701 e->flags &= ~EDGE_FALLTHRU;
1702 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
1703 {
1704 if (jump_label == ret_rtx)
1705 emit_jump_insn_after_setloc (targetm.gen_return (),
1706 BB_END (jump_block), loc);
1707 else
1708 {
1709 gcc_assert (jump_label == simple_return_rtx);
1710 emit_jump_insn_after_setloc (targetm.gen_simple_return (),
1711 BB_END (jump_block), loc);
1712 }
1713 set_return_jump_label (BB_END (jump_block));
1714 }
1715 else
1716 {
1717 rtx_code_label *label = block_label (block: target);
1718 emit_jump_insn_after_setloc (targetm.gen_jump (label),
1719 BB_END (jump_block), loc);
1720 JUMP_LABEL (BB_END (jump_block)) = label;
1721 LABEL_NUSES (label)++;
1722 }
1723
1724 /* We might be in cfg layout mode, and if so, the following routine will
1725 insert the barrier correctly. */
1726 emit_barrier_after_bb (bb: jump_block);
1727 redirect_edge_succ_nodup (e, target);
1728
1729 if (abnormal_edge_flags)
1730 make_edge (src, target, abnormal_edge_flags);
1731
1732 df_mark_solutions_dirty ();
1733 fixup_partition_crossing (e);
1734 return new_bb;
1735}
1736
1737/* Edge E is assumed to be fallthru edge. Emit needed jump instruction
1738 (and possibly create new basic block) to make edge non-fallthru.
1739 Return newly created BB or NULL if none. */
1740
1741static basic_block
1742rtl_force_nonfallthru (edge e)
1743{
1744 return force_nonfallthru_and_redirect (e, target: e->dest, NULL_RTX);
1745}
1746
1747/* Redirect edge even at the expense of creating new jump insn or
1748 basic block. Return new basic block if created, NULL otherwise.
1749 Conversion must be possible. */
1750
1751static basic_block
1752rtl_redirect_edge_and_branch_force (edge e, basic_block target)
1753{
1754 if (redirect_edge_and_branch (e, target)
1755 || e->dest == target)
1756 return NULL;
1757
1758 /* In case the edge redirection failed, try to force it to be non-fallthru
1759 and redirect newly created simplejump. */
1760 df_set_bb_dirty (e->src);
1761 return force_nonfallthru_and_redirect (e, target, NULL_RTX);
1762}
1763
1764/* The given edge should potentially be a fallthru edge. If that is in
1765 fact true, delete the jump and barriers that are in the way. */
1766
1767static void
1768rtl_tidy_fallthru_edge (edge e)
1769{
1770 rtx_insn *q;
1771 basic_block b = e->src, c = b->next_bb;
1772
1773 /* ??? In a late-running flow pass, other folks may have deleted basic
1774 blocks by nopping out blocks, leaving multiple BARRIERs between here
1775 and the target label. They ought to be chastised and fixed.
1776
1777 We can also wind up with a sequence of undeletable labels between
1778 one block and the next.
1779
1780 So search through a sequence of barriers, labels, and notes for
1781 the head of block C and assert that we really do fall through. */
1782
1783 for (q = NEXT_INSN (BB_END (b)); q != BB_HEAD (c); q = NEXT_INSN (insn: q))
1784 if (NONDEBUG_INSN_P (q))
1785 return;
1786
1787 /* Remove what will soon cease being the jump insn from the source block.
1788 If block B consisted only of this single jump, turn it into a deleted
1789 note. */
1790 q = BB_END (b);
1791 if (JUMP_P (q)
1792 && onlyjump_p (q)
1793 && (any_uncondjump_p (q)
1794 || single_succ_p (bb: b)))
1795 {
1796 rtx_insn *label;
1797 rtx_jump_table_data *table;
1798
1799 if (tablejump_p (q, &label, &table))
1800 {
1801 /* The label is likely mentioned in some instruction before
1802 the tablejump and might not be DCEd, so turn it into
1803 a note instead and move before the tablejump that is going to
1804 be deleted. */
1805 const char *name = LABEL_NAME (label);
1806 PUT_CODE (label, NOTE);
1807 NOTE_KIND (label) = NOTE_INSN_DELETED_LABEL;
1808 NOTE_DELETED_LABEL_NAME (label) = name;
1809 reorder_insns (label, label, PREV_INSN (insn: q));
1810 delete_insn (insn: table);
1811 }
1812
1813 q = PREV_INSN (insn: q);
1814 }
1815 /* Unconditional jumps with side-effects (i.e. which we can't just delete
1816 together with the barrier) should never have a fallthru edge. */
1817 else if (JUMP_P (q) && any_uncondjump_p (q))
1818 return;
1819
1820 /* Selectively unlink the sequence. */
1821 if (q != PREV_INSN (BB_HEAD (c)))
1822 delete_insn_chain (start: NEXT_INSN (insn: q), finish: PREV_INSN (BB_HEAD (c)), clear_bb: false);
1823
1824 e->flags |= EDGE_FALLTHRU;
1825}
1826
1827/* Should move basic block BB after basic block AFTER. NIY. */
1828
1829static bool
1830rtl_move_block_after (basic_block bb ATTRIBUTE_UNUSED,
1831 basic_block after ATTRIBUTE_UNUSED)
1832{
1833 return false;
1834}
1835
1836/* Locate the last bb in the same partition as START_BB. */
1837
1838static basic_block
1839last_bb_in_partition (basic_block start_bb)
1840{
1841 basic_block bb;
1842 FOR_BB_BETWEEN (bb, start_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
1843 {
1844 if (BB_PARTITION (start_bb) != BB_PARTITION (bb->next_bb))
1845 return bb;
1846 }
1847 /* Return bb before the exit block. */
1848 return bb->prev_bb;
1849}
1850
1851/* Split a (typically critical) edge. Return the new block.
1852 The edge must not be abnormal.
1853
1854 ??? The code generally expects to be called on critical edges.
1855 The case of a block ending in an unconditional jump to a
1856 block with multiple predecessors is not handled optimally. */
1857
1858static basic_block
1859rtl_split_edge (edge edge_in)
1860{
1861 basic_block bb, new_bb;
1862 rtx_insn *before;
1863
1864 /* Abnormal edges cannot be split. */
1865 gcc_assert (!(edge_in->flags & EDGE_ABNORMAL));
1866
1867 /* We are going to place the new block in front of edge destination.
1868 Avoid existence of fallthru predecessors. */
1869 if ((edge_in->flags & EDGE_FALLTHRU) == 0)
1870 {
1871 edge e = find_fallthru_edge (edges: edge_in->dest->preds);
1872
1873 if (e)
1874 force_nonfallthru (e);
1875 }
1876
1877 /* Create the basic block note. */
1878 if (edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1879 before = BB_HEAD (edge_in->dest);
1880 else
1881 before = NULL;
1882
1883 /* If this is a fall through edge to the exit block, the blocks might be
1884 not adjacent, and the right place is after the source. */
1885 if ((edge_in->flags & EDGE_FALLTHRU)
1886 && edge_in->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
1887 {
1888 before = NEXT_INSN (BB_END (edge_in->src));
1889 bb = create_basic_block (before, NULL, edge_in->src);
1890 BB_COPY_PARTITION (bb, edge_in->src);
1891 }
1892 else
1893 {
1894 if (edge_in->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1895 {
1896 bb = create_basic_block (before, NULL, edge_in->dest->prev_bb);
1897 BB_COPY_PARTITION (bb, edge_in->dest);
1898 }
1899 else
1900 {
1901 basic_block after = edge_in->dest->prev_bb;
1902 /* If this is post-bb reordering, and the edge crosses a partition
1903 boundary, the new block needs to be inserted in the bb chain
1904 at the end of the src partition (since we put the new bb into
1905 that partition, see below). Otherwise we may end up creating
1906 an extra partition crossing in the chain, which is illegal.
1907 It can't go after the src, because src may have a fall-through
1908 to a different block. */
1909 if (crtl->bb_reorder_complete
1910 && (edge_in->flags & EDGE_CROSSING))
1911 {
1912 after = last_bb_in_partition (start_bb: edge_in->src);
1913 before = get_last_bb_insn (after);
1914 /* The instruction following the last bb in partition should
1915 be a barrier, since it cannot end in a fall-through. */
1916 gcc_checking_assert (BARRIER_P (before));
1917 before = NEXT_INSN (insn: before);
1918 }
1919 bb = create_basic_block (before, NULL, after);
1920 /* Put the split bb into the src partition, to avoid creating
1921 a situation where a cold bb dominates a hot bb, in the case
1922 where src is cold and dest is hot. The src will dominate
1923 the new bb (whereas it might not have dominated dest). */
1924 BB_COPY_PARTITION (bb, edge_in->src);
1925 }
1926 }
1927
1928 make_single_succ_edge (bb, edge_in->dest, EDGE_FALLTHRU);
1929
1930 /* Can't allow a region crossing edge to be fallthrough. */
1931 if (BB_PARTITION (bb) != BB_PARTITION (edge_in->dest)
1932 && edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1933 {
1934 new_bb = force_nonfallthru (single_succ_edge (bb));
1935 gcc_assert (!new_bb);
1936 }
1937
1938 /* For non-fallthru edges, we must adjust the predecessor's
1939 jump instruction to target our new block. */
1940 if ((edge_in->flags & EDGE_FALLTHRU) == 0)
1941 {
1942 edge redirected = redirect_edge_and_branch (edge_in, bb);
1943 gcc_assert (redirected);
1944 }
1945 else
1946 {
1947 if (edge_in->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
1948 {
1949 /* For asm goto even splitting of fallthru edge might
1950 need insn patching, as other labels might point to the
1951 old label. */
1952 rtx_insn *last = BB_END (edge_in->src);
1953 if (last
1954 && JUMP_P (last)
1955 && edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
1956 && (extract_asm_operands (PATTERN (insn: last))
1957 || JUMP_LABEL (last) == before)
1958 && patch_jump_insn (insn: last, old_label: before, new_bb: bb))
1959 df_set_bb_dirty (edge_in->src);
1960 }
1961 redirect_edge_succ (edge_in, bb);
1962 }
1963
1964 return bb;
1965}
1966
1967/* Queue instructions for insertion on an edge between two basic blocks.
1968 The new instructions and basic blocks (if any) will not appear in the
1969 CFG until commit_edge_insertions is called. */
1970
1971void
1972insert_insn_on_edge (rtx pattern, edge e)
1973{
1974 /* We cannot insert instructions on an abnormal critical edge.
1975 It will be easier to find the culprit if we die now. */
1976 gcc_assert (!((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e)));
1977
1978 if (e->insns.r == NULL_RTX)
1979 start_sequence ();
1980 else
1981 push_to_sequence (e->insns.r);
1982
1983 emit_insn (pattern);
1984
1985 e->insns.r = get_insns ();
1986 end_sequence ();
1987}
1988
1989/* Update the CFG for the instructions queued on edge E. */
1990
1991void
1992commit_one_edge_insertion (edge e)
1993{
1994 rtx_insn *before = NULL, *after = NULL, *insns, *tmp, *last;
1995 basic_block bb;
1996
1997 /* Pull the insns off the edge now since the edge might go away. */
1998 insns = e->insns.r;
1999 e->insns.r = NULL;
2000
2001 /* Figure out where to put these insns. If the destination has
2002 one predecessor, insert there. Except for the exit block. */
2003 if (single_pred_p (bb: e->dest) && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
2004 {
2005 bb = e->dest;
2006
2007 /* Get the location correct wrt a code label, and "nice" wrt
2008 a basic block note, and before everything else. */
2009 tmp = BB_HEAD (bb);
2010 if (LABEL_P (tmp))
2011 tmp = NEXT_INSN (insn: tmp);
2012 if (NOTE_INSN_BASIC_BLOCK_P (tmp))
2013 tmp = NEXT_INSN (insn: tmp);
2014 if (tmp == BB_HEAD (bb))
2015 before = tmp;
2016 else if (tmp)
2017 after = PREV_INSN (insn: tmp);
2018 else
2019 after = get_last_insn ();
2020 }
2021
2022 /* If the source has one successor and the edge is not abnormal,
2023 insert there. Except for the entry block.
2024 Don't do this if the predecessor ends in a jump other than
2025 unconditional simple jump. E.g. for asm goto that points all
2026 its labels at the fallthru basic block, we can't insert instructions
2027 before the asm goto, as the asm goto can have various of side effects,
2028 and can't emit instructions after the asm goto, as it must end
2029 the basic block. */
2030 else if ((e->flags & EDGE_ABNORMAL) == 0
2031 && single_succ_p (bb: e->src)
2032 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
2033 && (!JUMP_P (BB_END (e->src))
2034 || simplejump_p (BB_END (e->src))))
2035 {
2036 bb = e->src;
2037
2038 /* It is possible to have a non-simple jump here. Consider a target
2039 where some forms of unconditional jumps clobber a register. This
2040 happens on the fr30 for example.
2041
2042 We know this block has a single successor, so we can just emit
2043 the queued insns before the jump. */
2044 if (JUMP_P (BB_END (bb)))
2045 before = BB_END (bb);
2046 else
2047 {
2048 /* We'd better be fallthru, or we've lost track of what's what. */
2049 gcc_assert (e->flags & EDGE_FALLTHRU);
2050
2051 after = BB_END (bb);
2052 }
2053 }
2054
2055 /* Otherwise we must split the edge. */
2056 else
2057 {
2058 bb = split_edge (e);
2059
2060 /* If E crossed a partition boundary, we needed to make bb end in
2061 a region-crossing jump, even though it was originally fallthru. */
2062 if (JUMP_P (BB_END (bb)))
2063 before = BB_END (bb);
2064 else
2065 after = BB_END (bb);
2066 }
2067
2068 /* Now that we've found the spot, do the insertion. */
2069 if (before)
2070 {
2071 emit_insn_before_noloc (insns, before, bb);
2072 last = prev_nonnote_insn (before);
2073 }
2074 else
2075 last = emit_insn_after_noloc (insns, after, bb);
2076
2077 if (returnjump_p (last))
2078 {
2079 /* ??? Remove all outgoing edges from BB and add one for EXIT.
2080 This is not currently a problem because this only happens
2081 for the (single) epilogue, which already has a fallthru edge
2082 to EXIT. */
2083
2084 e = single_succ_edge (bb);
2085 gcc_assert (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
2086 && single_succ_p (bb) && (e->flags & EDGE_FALLTHRU));
2087
2088 e->flags &= ~EDGE_FALLTHRU;
2089 emit_barrier_after (last);
2090
2091 if (before)
2092 delete_insn (insn: before);
2093 }
2094 else
2095 gcc_assert (!JUMP_P (last));
2096}
2097
2098/* Update the CFG for all queued instructions. */
2099
2100void
2101commit_edge_insertions (void)
2102{
2103 basic_block bb;
2104
2105 /* Optimization passes that invoke this routine can cause hot blocks
2106 previously reached by both hot and cold blocks to become dominated only
2107 by cold blocks. This will cause the verification below to fail,
2108 and lead to now cold code in the hot section. In some cases this
2109 may only be visible after newly unreachable blocks are deleted,
2110 which will be done by fixup_partitions. */
2111 fixup_partitions ();
2112
2113 if (!currently_expanding_to_rtl)
2114 checking_verify_flow_info ();
2115
2116 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
2117 EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
2118 {
2119 edge e;
2120 edge_iterator ei;
2121
2122 FOR_EACH_EDGE (e, ei, bb->succs)
2123 if (e->insns.r)
2124 {
2125 if (currently_expanding_to_rtl)
2126 rebuild_jump_labels_chain (e->insns.r);
2127 commit_one_edge_insertion (e);
2128 }
2129 }
2130}
2131
2132
2133/* Print out RTL-specific basic block information (live information
2134 at start and end with TDF_DETAILS). FLAGS are the TDF_* masks
2135 documented in dumpfile.h. */
2136
2137static void
2138rtl_dump_bb (FILE *outf, basic_block bb, int indent, dump_flags_t flags)
2139{
2140 char *s_indent;
2141
2142 s_indent = (char *) alloca ((size_t) indent + 1);
2143 memset (s: s_indent, c: ' ', n: (size_t) indent);
2144 s_indent[indent] = '\0';
2145
2146 if (df && (flags & TDF_DETAILS))
2147 {
2148 df_dump_top (bb, outf);
2149 putc (c: '\n', stream: outf);
2150 }
2151
2152 if (bb->index != ENTRY_BLOCK && bb->index != EXIT_BLOCK
2153 && rtl_bb_info_initialized_p (bb))
2154 {
2155 rtx_insn *last = BB_END (bb);
2156 if (last)
2157 last = NEXT_INSN (insn: last);
2158 for (rtx_insn *insn = BB_HEAD (bb); insn != last; insn = NEXT_INSN (insn))
2159 {
2160 if (flags & TDF_DETAILS)
2161 df_dump_insn_top (insn, outf);
2162 if (! (flags & TDF_SLIM))
2163 print_rtl_single (outf, insn);
2164 else
2165 dump_insn_slim (outf, insn);
2166 if (flags & TDF_DETAILS)
2167 df_dump_insn_bottom (insn, outf);
2168 }
2169 }
2170
2171 if (df && (flags & TDF_DETAILS))
2172 {
2173 df_dump_bottom (bb, outf);
2174 putc (c: '\n', stream: outf);
2175 }
2176
2177}
2178
2179/* Like dump_function_to_file, but for RTL. Print out dataflow information
2180 for the start of each basic block. FLAGS are the TDF_* masks documented
2181 in dumpfile.h. */
2182
2183void
2184print_rtl_with_bb (FILE *outf, const rtx_insn *rtx_first, dump_flags_t flags)
2185{
2186 const rtx_insn *tmp_rtx;
2187 if (rtx_first == 0)
2188 fprintf (stream: outf, format: "(nil)\n");
2189 else
2190 {
2191 enum bb_state { NOT_IN_BB, IN_ONE_BB, IN_MULTIPLE_BB };
2192 int max_uid = get_max_uid ();
2193 basic_block *start = XCNEWVEC (basic_block, max_uid);
2194 basic_block *end = XCNEWVEC (basic_block, max_uid);
2195 enum bb_state *in_bb_p = XCNEWVEC (enum bb_state, max_uid);
2196 basic_block bb;
2197
2198 /* After freeing the CFG, we still have BLOCK_FOR_INSN set on most
2199 insns, but the CFG is not maintained so the basic block info
2200 is not reliable. Therefore it's omitted from the dumps. */
2201 if (! (cfun->curr_properties & PROP_cfg))
2202 flags &= ~TDF_BLOCKS;
2203
2204 if (df)
2205 df_dump_start (outf);
2206
2207 if (cfun->curr_properties & PROP_cfg)
2208 {
2209 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2210 {
2211 rtx_insn *x;
2212
2213 start[INSN_UID (BB_HEAD (bb))] = bb;
2214 end[INSN_UID (BB_END (bb))] = bb;
2215 if (flags & TDF_BLOCKS)
2216 {
2217 for (x = BB_HEAD (bb); x != NULL_RTX; x = NEXT_INSN (insn: x))
2218 {
2219 enum bb_state state = IN_MULTIPLE_BB;
2220
2221 if (in_bb_p[INSN_UID (insn: x)] == NOT_IN_BB)
2222 state = IN_ONE_BB;
2223 in_bb_p[INSN_UID (insn: x)] = state;
2224
2225 if (x == BB_END (bb))
2226 break;
2227 }
2228 }
2229 }
2230 }
2231
2232 for (tmp_rtx = rtx_first; tmp_rtx != NULL; tmp_rtx = NEXT_INSN (insn: tmp_rtx))
2233 {
2234 if (flags & TDF_BLOCKS)
2235 {
2236 bb = start[INSN_UID (insn: tmp_rtx)];
2237 if (bb != NULL)
2238 {
2239 dump_bb_info (outf, bb, 0, dump_flags, true, false);
2240 if (df && (flags & TDF_DETAILS))
2241 df_dump_top (bb, outf);
2242 }
2243
2244 if (in_bb_p[INSN_UID (insn: tmp_rtx)] == NOT_IN_BB
2245 && !NOTE_P (tmp_rtx)
2246 && !BARRIER_P (tmp_rtx))
2247 fprintf (stream: outf, format: ";; Insn is not within a basic block\n");
2248 else if (in_bb_p[INSN_UID (insn: tmp_rtx)] == IN_MULTIPLE_BB)
2249 fprintf (stream: outf, format: ";; Insn is in multiple basic blocks\n");
2250 }
2251
2252 if (flags & TDF_DETAILS)
2253 df_dump_insn_top (tmp_rtx, outf);
2254 if (! (flags & TDF_SLIM))
2255 print_rtl_single (outf, tmp_rtx);
2256 else
2257 dump_insn_slim (outf, tmp_rtx);
2258 if (flags & TDF_DETAILS)
2259 df_dump_insn_bottom (tmp_rtx, outf);
2260
2261 bb = end[INSN_UID (insn: tmp_rtx)];
2262 if (bb != NULL)
2263 {
2264 if (flags & TDF_BLOCKS)
2265 {
2266 dump_bb_info (outf, bb, 0, dump_flags, false, true);
2267 if (df && (flags & TDF_DETAILS))
2268 df_dump_bottom (bb, outf);
2269 putc (c: '\n', stream: outf);
2270 }
2271 /* Emit a hint if the fallthrough target of current basic block
2272 isn't the one placed right next. */
2273 else if (EDGE_COUNT (bb->succs) > 0)
2274 {
2275 gcc_assert (BB_END (bb) == tmp_rtx);
2276 const rtx_insn *ninsn = NEXT_INSN (insn: tmp_rtx);
2277 /* Bypass intervening deleted-insn notes and debug insns. */
2278 while (ninsn
2279 && !NONDEBUG_INSN_P (ninsn)
2280 && !start[INSN_UID (insn: ninsn)])
2281 ninsn = NEXT_INSN (insn: ninsn);
2282 edge e = find_fallthru_edge (edges: bb->succs);
2283 if (e && ninsn)
2284 {
2285 basic_block dest = e->dest;
2286 if (start[INSN_UID (insn: ninsn)] != dest)
2287 fprintf (stream: outf, format: "%s ; pc falls through to BB %d\n",
2288 print_rtx_head, dest->index);
2289 }
2290 }
2291 }
2292 }
2293
2294 free (ptr: start);
2295 free (ptr: end);
2296 free (ptr: in_bb_p);
2297 }
2298}
2299
2300/* Update the branch probability of BB if a REG_BR_PROB is present. */
2301
2302void
2303update_br_prob_note (basic_block bb)
2304{
2305 rtx note;
2306 note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX);
2307 if (!JUMP_P (BB_END (bb)) || !BRANCH_EDGE (bb)->probability.initialized_p ())
2308 {
2309 if (note)
2310 {
2311 rtx *note_link, this_rtx;
2312
2313 note_link = &REG_NOTES (BB_END (bb));
2314 for (this_rtx = *note_link; this_rtx; this_rtx = XEXP (this_rtx, 1))
2315 if (this_rtx == note)
2316 {
2317 *note_link = XEXP (this_rtx, 1);
2318 break;
2319 }
2320 }
2321 return;
2322 }
2323 if (!note
2324 || XINT (note, 0) == BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ())
2325 return;
2326 XINT (note, 0) = BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ();
2327}
2328
2329/* Get the last insn associated with block BB (that includes barriers and
2330 tablejumps after BB). */
2331rtx_insn *
2332get_last_bb_insn (basic_block bb)
2333{
2334 rtx_jump_table_data *table;
2335 rtx_insn *tmp;
2336 rtx_insn *end = BB_END (bb);
2337
2338 /* Include any jump table following the basic block. */
2339 if (tablejump_p (end, NULL, &table))
2340 end = table;
2341
2342 /* Include any barriers that may follow the basic block. */
2343 tmp = next_nonnote_nondebug_insn_bb (end);
2344 while (tmp && BARRIER_P (tmp))
2345 {
2346 end = tmp;
2347 tmp = next_nonnote_nondebug_insn_bb (end);
2348 }
2349
2350 return end;
2351}
2352
2353/* Add all BBs reachable from entry via hot paths into the SET. */
2354
2355void
2356find_bbs_reachable_by_hot_paths (hash_set<basic_block> *set)
2357{
2358 auto_vec<basic_block, 64> worklist;
2359
2360 set->add (ENTRY_BLOCK_PTR_FOR_FN (cfun));
2361 worklist.safe_push (ENTRY_BLOCK_PTR_FOR_FN (cfun));
2362
2363 while (worklist.length () > 0)
2364 {
2365 basic_block bb = worklist.pop ();
2366 edge_iterator ei;
2367 edge e;
2368
2369 FOR_EACH_EDGE (e, ei, bb->succs)
2370 if (BB_PARTITION (e->dest) != BB_COLD_PARTITION
2371 && !set->add (k: e->dest))
2372 worklist.safe_push (obj: e->dest);
2373 }
2374}
2375
2376/* Sanity check partition hotness to ensure that basic blocks in
2377   the cold partition don't dominate basic blocks in the hot partition.
2378 If FLAG_ONLY is true, report violations as errors. Otherwise
2379 re-mark the dominated blocks as cold, since this is run after
2380 cfg optimizations that may make hot blocks previously reached
2381 by both hot and cold blocks now only reachable along cold paths. */
2382
2383static auto_vec<basic_block>
2384find_partition_fixes (bool flag_only)
2385{
2386 basic_block bb;
2387 auto_vec<basic_block> bbs_to_fix;
2388 hash_set<basic_block> set;
2389
2390 /* Callers check this. */
2391 gcc_checking_assert (crtl->has_bb_partition);
2392
2393 find_bbs_reachable_by_hot_paths (set: &set);
2394
2395 FOR_EACH_BB_FN (bb, cfun)
2396 if (!set.contains (k: bb)
2397 && BB_PARTITION (bb) != BB_COLD_PARTITION)
2398 {
2399 if (flag_only)
2400 error ("non-cold basic block %d reachable only "
2401 "by paths crossing the cold partition", bb->index);
2402 else
2403 BB_SET_PARTITION (bb, BB_COLD_PARTITION);
2404 bbs_to_fix.safe_push (obj: bb);
2405 }
2406
2407 return bbs_to_fix;
2408}
2409
2410/* Perform cleanup on the hot/cold bb partitioning after optimization
2411 passes that modify the cfg. */
2412
2413void
2414fixup_partitions (void)
2415{
2416 if (!crtl->has_bb_partition)
2417 return;
2418
2419 /* Delete any blocks that became unreachable and weren't
2420 already cleaned up, for example during edge forwarding
2421 and convert_jumps_to_returns. This will expose more
2422 opportunities for fixing the partition boundaries here.
2423 Also, the calculation of the dominance graph during verification
2424 will assert if there are unreachable nodes. */
2425 delete_unreachable_blocks ();
2426
2427 /* If there are partitions, do a sanity check on them: A basic block in
2428   a cold partition cannot dominate a basic block in a hot partition.
2429 Fixup any that now violate this requirement, as a result of edge
2430 forwarding and unreachable block deletion.  */
2431 auto_vec<basic_block> bbs_to_fix = find_partition_fixes (flag_only: false);
2432
2433 /* Do the partition fixup after all necessary blocks have been converted to
2434 cold, so that we only update the region crossings the minimum number of
2435 places, which can require forcing edges to be non fallthru. */
2436 if (! bbs_to_fix.is_empty ())
2437 {
2438 do
2439 {
2440 basic_block bb = bbs_to_fix.pop ();
2441 fixup_new_cold_bb (bb);
2442 }
2443 while (! bbs_to_fix.is_empty ());
2444
2445 /* Fix up hot cold block grouping if needed. */
2446 if (crtl->bb_reorder_complete && current_ir_type () == IR_RTL_CFGRTL)
2447 {
2448 basic_block bb, first = NULL, second = NULL;
2449 int current_partition = BB_UNPARTITIONED;
2450
2451 FOR_EACH_BB_FN (bb, cfun)
2452 {
2453 if (current_partition != BB_UNPARTITIONED
2454 && BB_PARTITION (bb) != current_partition)
2455 {
2456 if (first == NULL)
2457 first = bb;
2458 else if (second == NULL)
2459 second = bb;
2460 else
2461 {
2462 /* If we switch partitions for the 3rd, 5th etc. time,
2463 move bbs first (inclusive) .. second (exclusive) right
2464 before bb. */
2465 basic_block prev_first = first->prev_bb;
2466 basic_block prev_second = second->prev_bb;
2467 basic_block prev_bb = bb->prev_bb;
2468 prev_first->next_bb = second;
2469 second->prev_bb = prev_first;
2470 prev_second->next_bb = bb;
2471 bb->prev_bb = prev_second;
2472 prev_bb->next_bb = first;
2473 first->prev_bb = prev_bb;
2474 rtx_insn *prev_first_insn = PREV_INSN (BB_HEAD (first));
2475 rtx_insn *prev_second_insn
2476 = PREV_INSN (BB_HEAD (second));
2477 rtx_insn *prev_bb_insn = PREV_INSN (BB_HEAD (bb));
2478 SET_NEXT_INSN (prev_first_insn) = BB_HEAD (second);
2479 SET_PREV_INSN (BB_HEAD (second)) = prev_first_insn;
2480 SET_NEXT_INSN (prev_second_insn) = BB_HEAD (bb);
2481 SET_PREV_INSN (BB_HEAD (bb)) = prev_second_insn;
2482 SET_NEXT_INSN (prev_bb_insn) = BB_HEAD (first);
2483 SET_PREV_INSN (BB_HEAD (first)) = prev_bb_insn;
2484 second = NULL;
2485 }
2486 }
2487 current_partition = BB_PARTITION (bb);
2488 }
2489 gcc_assert (!second);
2490 }
2491 }
2492}
2493
2494/* Verify, in the basic block chain, that there is at most one switch
2495 between hot/cold partitions. This condition will not be true until
2496 after reorder_basic_blocks is called. */
2497
2498static bool
2499verify_hot_cold_block_grouping (void)
2500{
2501 basic_block bb;
2502 bool err = false;
2503 bool switched_sections = false;
2504 int current_partition = BB_UNPARTITIONED;
2505
2506 /* Even after bb reordering is complete, we go into cfglayout mode
2507 again (in compgoto). Ensure we don't call this before going back
2508 into linearized RTL when any layout fixes would have been committed. */
2509 if (!crtl->bb_reorder_complete
2510 || current_ir_type () != IR_RTL_CFGRTL)
2511 return err;
2512
2513 FOR_EACH_BB_FN (bb, cfun)
2514 {
2515 if (current_partition != BB_UNPARTITIONED
2516 && BB_PARTITION (bb) != current_partition)
2517 {
2518 if (switched_sections)
2519 {
2520 error ("multiple hot/cold transitions found (bb %i)",
2521 bb->index);
2522 err = true;
2523 }
2524 else
2525 switched_sections = true;
2526
2527 if (!crtl->has_bb_partition)
2528 error ("partition found but function partition flag not set");
2529 }
2530 current_partition = BB_PARTITION (bb);
2531 }
2532
2533 return err;
2534}
2535
2536
2537/* Perform several checks on the edges out of each block, such as
2538 the consistency of the branch probabilities, the correctness
2539 of hot/cold partition crossing edges, and the number of expected
2540 successor edges. Also verify that the dominance relationship
2541 between hot/cold blocks is sane. */
2542
2543static bool
2544rtl_verify_edges (void)
2545{
2546 bool err = false;
2547 basic_block bb;
2548
2549 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2550 {
2551 int n_fallthru = 0, n_branch = 0, n_abnormal_call = 0, n_sibcall = 0;
2552 int n_eh = 0, n_abnormal = 0;
2553 edge e, fallthru = NULL;
2554 edge_iterator ei;
2555 rtx note;
2556 bool has_crossing_edge = false;
2557
2558 if (JUMP_P (BB_END (bb))
2559 && (note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX))
2560 && EDGE_COUNT (bb->succs) >= 2
2561 && any_condjump_p (BB_END (bb)))
2562 {
2563 if (!BRANCH_EDGE (bb)->probability.initialized_p ())
2564 {
2565 if (profile_status_for_fn (cfun) != PROFILE_ABSENT)
2566 {
2567 error ("verify_flow_info: "
2568 "REG_BR_PROB is set but cfg probability is not");
2569 err = true;
2570 }
2571 }
2572 else if (XINT (note, 0)
2573 != BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ()
2574 && profile_status_for_fn (cfun) != PROFILE_ABSENT)
2575 {
2576 error ("verify_flow_info: REG_BR_PROB does not match cfg %i %i",
2577 XINT (note, 0),
2578 BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ());
2579 err = true;
2580 }
2581 }
2582
2583 FOR_EACH_EDGE (e, ei, bb->succs)
2584 {
2585 bool is_crossing;
2586
2587 if (e->flags & EDGE_FALLTHRU)
2588 n_fallthru++, fallthru = e;
2589
2590 is_crossing = (BB_PARTITION (e->src) != BB_PARTITION (e->dest)
2591 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
2592 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun));
2593 has_crossing_edge |= is_crossing;
2594 if (e->flags & EDGE_CROSSING)
2595 {
2596 if (!is_crossing)
2597 {
2598 error ("EDGE_CROSSING incorrectly set across same section");
2599 err = true;
2600 }
2601 if (e->flags & EDGE_FALLTHRU)
2602 {
2603 error ("fallthru edge crosses section boundary in bb %i",
2604 e->src->index);
2605 err = true;
2606 }
2607 if (e->flags & EDGE_EH)
2608 {
2609 error ("EH edge crosses section boundary in bb %i",
2610 e->src->index);
2611 err = true;
2612 }
2613 if (JUMP_P (BB_END (bb)) && !CROSSING_JUMP_P (BB_END (bb)))
2614 {
2615 error ("No region crossing jump at section boundary in bb %i",
2616 bb->index);
2617 err = true;
2618 }
2619 }
2620 else if (is_crossing)
2621 {
2622 error ("EDGE_CROSSING missing across section boundary");
2623 err = true;
2624 }
2625
2626 if ((e->flags & ~(EDGE_DFS_BACK
2627 | EDGE_CAN_FALLTHRU
2628 | EDGE_IRREDUCIBLE_LOOP
2629 | EDGE_LOOP_EXIT
2630 | EDGE_CROSSING
2631 | EDGE_PRESERVE)) == 0)
2632 n_branch++;
2633
2634 if (e->flags & EDGE_ABNORMAL_CALL)
2635 n_abnormal_call++;
2636
2637 if (e->flags & EDGE_SIBCALL)
2638 n_sibcall++;
2639
2640 if (e->flags & EDGE_EH)
2641 n_eh++;
2642
2643 if (e->flags & EDGE_ABNORMAL)
2644 n_abnormal++;
2645 }
2646
2647 if (!has_crossing_edge
2648 && JUMP_P (BB_END (bb))
2649 && CROSSING_JUMP_P (BB_END (bb)))
2650 {
2651 print_rtl_with_bb (stderr, rtx_first: get_insns (), flags: TDF_BLOCKS | TDF_DETAILS);
2652 error ("Region crossing jump across same section in bb %i",
2653 bb->index);
2654 err = true;
2655 }
2656
2657 if (n_eh && !find_reg_note (BB_END (bb), REG_EH_REGION, NULL_RTX))
2658 {
2659 error ("missing REG_EH_REGION note at the end of bb %i", bb->index);
2660 err = true;
2661 }
2662 if (n_eh > 1)
2663 {
2664 error ("too many exception handling edges in bb %i", bb->index);
2665 err = true;
2666 }
2667 if (n_branch
2668 && (!JUMP_P (BB_END (bb))
2669 || (n_branch > 1 && (any_uncondjump_p (BB_END (bb))
2670 || any_condjump_p (BB_END (bb))))))
2671 {
2672 error ("too many outgoing branch edges from bb %i", bb->index);
2673 err = true;
2674 }
2675 if (n_fallthru && any_uncondjump_p (BB_END (bb)))
2676 {
2677 error ("fallthru edge after unconditional jump in bb %i", bb->index);
2678 err = true;
2679 }
2680 if (n_branch != 1 && any_uncondjump_p (BB_END (bb)))
2681 {
2682 error ("wrong number of branch edges after unconditional jump"
2683 " in bb %i", bb->index);
2684 err = true;
2685 }
2686 if (n_branch != 1 && any_condjump_p (BB_END (bb))
2687 && JUMP_LABEL (BB_END (bb)) != BB_HEAD (fallthru->dest))
2688 {
2689 error ("wrong amount of branch edges after conditional jump"
2690 " in bb %i", bb->index);
2691 err = true;
2692 }
2693 if (n_abnormal_call && !CALL_P (BB_END (bb)))
2694 {
2695 error ("abnormal call edges for non-call insn in bb %i", bb->index);
2696 err = true;
2697 }
2698 if (n_sibcall && !CALL_P (BB_END (bb)))
2699 {
2700 error ("sibcall edges for non-call insn in bb %i", bb->index);
2701 err = true;
2702 }
2703 if (n_abnormal > n_eh
2704 && !(CALL_P (BB_END (bb))
2705 && n_abnormal == n_abnormal_call + n_sibcall)
2706 && (!JUMP_P (BB_END (bb))
2707 || any_condjump_p (BB_END (bb))
2708 || any_uncondjump_p (BB_END (bb))))
2709 {
2710 error ("abnormal edges for no purpose in bb %i", bb->index);
2711 err = true;
2712 }
2713
2714 int has_eh = -1;
2715 FOR_EACH_EDGE (e, ei, bb->preds)
2716 {
2717 if (has_eh == -1)
2718 has_eh = (e->flags & EDGE_EH);
2719 if ((e->flags & EDGE_EH) == has_eh)
2720 continue;
2721 error ("EH incoming edge mixed with non-EH incoming edges "
2722 "in bb %i", bb->index);
2723 err = true;
2724 break;
2725 }
2726 }
2727
2728 /* If there are partitions, do a sanity check on them: A basic block in
2729   a cold partition cannot dominate a basic block in a hot partition.  */
2730 if (crtl->has_bb_partition && !err
2731 && current_ir_type () == IR_RTL_CFGLAYOUT)
2732 {
2733 auto_vec<basic_block> bbs_to_fix = find_partition_fixes (flag_only: true);
2734 err = !bbs_to_fix.is_empty ();
2735 }
2736
2737 /* Clean up. */
2738 return err;
2739}
2740
2741/* Checks on the instructions within blocks. Currently checks that each
2742 block starts with a basic block note, and that basic block notes and
2743 control flow jumps are not found in the middle of the block. */
2744
2745static bool
2746rtl_verify_bb_insns (void)
2747{
2748 rtx_insn *x;
2749 bool err = false;
2750 basic_block bb;
2751
2752 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2753 {
2754 /* Now check the header of basic
2755 block. It ought to contain optional CODE_LABEL followed
2756 by NOTE_BASIC_BLOCK. */
2757 x = BB_HEAD (bb);
2758 if (LABEL_P (x))
2759 {
2760 if (BB_END (bb) == x)
2761 {
2762 error ("NOTE_INSN_BASIC_BLOCK is missing for block %d",
2763 bb->index);
2764 err = true;
2765 }
2766
2767 x = NEXT_INSN (insn: x);
2768 }
2769
2770 if (!NOTE_INSN_BASIC_BLOCK_P (x) || NOTE_BASIC_BLOCK (x) != bb)
2771 {
2772 error ("NOTE_INSN_BASIC_BLOCK is missing for block %d",
2773 bb->index);
2774 err = true;
2775 }
2776
2777 if (BB_END (bb) == x)
2778 /* Do checks for empty blocks here. */
2779 ;
2780 else
2781 for (x = NEXT_INSN (insn: x); x; x = NEXT_INSN (insn: x))
2782 {
2783 if (NOTE_INSN_BASIC_BLOCK_P (x))
2784 {
2785 error ("NOTE_INSN_BASIC_BLOCK %d in middle of basic block %d",
2786 INSN_UID (insn: x), bb->index);
2787 err = true;
2788 }
2789
2790 if (x == BB_END (bb))
2791 break;
2792
2793 if (control_flow_insn_p (x))
2794 {
2795 error ("in basic block %d:", bb->index);
2796 fatal_insn ("flow control insn inside a basic block", x);
2797 }
2798 }
2799 }
2800
2801 /* Clean up. */
2802 return err;
2803}
2804
2805/* Verify that block pointers for instructions in basic blocks, headers and
2806 footers are set appropriately. */
2807
2808static bool
2809rtl_verify_bb_pointers (void)
2810{
2811 bool err = false;
2812 basic_block bb;
2813
2814 /* Check the general integrity of the basic blocks. */
2815 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2816 {
2817 rtx_insn *insn;
2818
2819 if (!(bb->flags & BB_RTL))
2820 {
2821 error ("BB_RTL flag not set for block %d", bb->index);
2822 err = true;
2823 }
2824
2825 FOR_BB_INSNS (bb, insn)
2826 if (BLOCK_FOR_INSN (insn) != bb)
2827 {
2828 error ("insn %d basic block pointer is %d, should be %d",
2829 INSN_UID (insn),
2830 BLOCK_FOR_INSN (insn) ? BLOCK_FOR_INSN (insn)->index : 0,
2831 bb->index);
2832 err = true;
2833 }
2834
2835 for (insn = BB_HEADER (bb); insn; insn = NEXT_INSN (insn))
2836 if (!BARRIER_P (insn)
2837 && BLOCK_FOR_INSN (insn) != NULL)
2838 {
2839 error ("insn %d in header of bb %d has non-NULL basic block",
2840 INSN_UID (insn), bb->index);
2841 err = true;
2842 }
2843 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2844 if (!BARRIER_P (insn)
2845 && BLOCK_FOR_INSN (insn) != NULL)
2846 {
2847 error ("insn %d in footer of bb %d has non-NULL basic block",
2848 INSN_UID (insn), bb->index);
2849 err = true;
2850 }
2851 }
2852
2853 /* Clean up. */
2854 return err;
2855}
2856
2857/* Verify the CFG and RTL consistency common for both underlying RTL and
2858 cfglayout RTL.
2859
2860 Currently it does following checks:
2861
2862 - overlapping of basic blocks
2863 - insns with wrong BLOCK_FOR_INSN pointers
2864 - headers of basic blocks (the NOTE_INSN_BASIC_BLOCK note)
2865 - tails of basic blocks (ensure that boundary is necessary)
2866 - scans body of the basic block for JUMP_INSN, CODE_LABEL
2867 and NOTE_INSN_BASIC_BLOCK
2868 - verify that no fall_thru edge crosses hot/cold partition boundaries
2869 - verify that there are no pending RTL branch predictions
2870 - verify that hot blocks are not dominated by cold blocks
2871
2872 In future it can be extended check a lot of other stuff as well
2873 (reachability of basic blocks, life information, etc. etc.). */
2874
2875static bool
2876rtl_verify_flow_info_1 (void)
2877{
2878 bool err = false;
2879
2880 if (rtl_verify_bb_pointers ())
2881 err = true;
2882
2883 if (rtl_verify_bb_insns ())
2884 err = true;
2885
2886 if (rtl_verify_edges ())
2887 err = true;
2888
2889 return err;
2890}
2891
2892/* Walk the instruction chain and verify that bb head/end pointers
2893 are correct, and that instructions are in exactly one bb and have
2894 correct block pointers. */
2895
2896static bool
2897rtl_verify_bb_insn_chain (void)
2898{
2899 basic_block bb;
2900 bool err = false;
2901 rtx_insn *x;
2902 rtx_insn *last_head = get_last_insn ();
2903 basic_block *bb_info;
2904 const int max_uid = get_max_uid ();
2905
2906 bb_info = XCNEWVEC (basic_block, max_uid);
2907
2908 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2909 {
2910 rtx_insn *head = BB_HEAD (bb);
2911 rtx_insn *end = BB_END (bb);
2912
2913 for (x = last_head; x != NULL_RTX; x = PREV_INSN (insn: x))
2914 {
2915 /* Verify the end of the basic block is in the INSN chain. */
2916 if (x == end)
2917 break;
2918
2919 /* And that the code outside of basic blocks has NULL bb field. */
2920 if (!BARRIER_P (x)
2921 && BLOCK_FOR_INSN (insn: x) != NULL)
2922 {
2923 error ("insn %d outside of basic blocks has non-NULL bb field",
2924 INSN_UID (insn: x));
2925 err = true;
2926 }
2927 }
2928
2929 if (!x)
2930 {
2931 error ("end insn %d for block %d not found in the insn stream",
2932 INSN_UID (insn: end), bb->index);
2933 err = true;
2934 }
2935
2936 /* Work backwards from the end to the head of the basic block
2937 to verify the head is in the RTL chain. */
2938 for (; x != NULL_RTX; x = PREV_INSN (insn: x))
2939 {
2940 /* While walking over the insn chain, verify insns appear
2941 in only one basic block. */
2942 if (bb_info[INSN_UID (insn: x)] != NULL)
2943 {
2944 error ("insn %d is in multiple basic blocks (%d and %d)",
2945 INSN_UID (insn: x), bb->index, bb_info[INSN_UID (insn: x)]->index);
2946 err = true;
2947 }
2948
2949 bb_info[INSN_UID (insn: x)] = bb;
2950
2951 if (x == head)
2952 break;
2953 }
2954 if (!x)
2955 {
2956 error ("head insn %d for block %d not found in the insn stream",
2957 INSN_UID (insn: head), bb->index);
2958 err = true;
2959 }
2960
2961 last_head = PREV_INSN (insn: x);
2962 }
2963
2964 for (x = last_head; x != NULL_RTX; x = PREV_INSN (insn: x))
2965 {
2966 /* Check that the code before the first basic block has NULL
2967 bb field. */
2968 if (!BARRIER_P (x)
2969 && BLOCK_FOR_INSN (insn: x) != NULL)
2970 {
2971 error ("insn %d outside of basic blocks has non-NULL bb field",
2972 INSN_UID (insn: x));
2973 err = true;
2974 }
2975 }
2976 free (ptr: bb_info);
2977
2978 return err;
2979}
2980
2981/* Verify that fallthru edges point to adjacent blocks in layout order and
2982 that barriers exist after non-fallthru blocks. */
2983
2984static bool
2985rtl_verify_fallthru (void)
2986{
2987 basic_block bb;
2988 bool err = false;
2989
2990 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2991 {
2992 edge e;
2993
2994 e = find_fallthru_edge (edges: bb->succs);
2995 if (!e)
2996 {
2997 rtx_insn *insn;
2998
2999 /* Ensure existence of barrier in BB with no fallthru edges. */
3000 for (insn = NEXT_INSN (BB_END (bb)); ; insn = NEXT_INSN (insn))
3001 {
3002 if (!insn || NOTE_INSN_BASIC_BLOCK_P (insn))
3003 {
3004 error ("missing barrier after block %i", bb->index);
3005 err = true;
3006 break;
3007 }
3008 if (BARRIER_P (insn))
3009 break;
3010 }
3011 }
3012 else if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
3013 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
3014 {
3015 rtx_insn *insn;
3016
3017 if (e->src->next_bb != e->dest)
3018 {
3019 error
3020 ("verify_flow_info: Incorrect blocks for fallthru %i->%i",
3021 e->src->index, e->dest->index);
3022 err = true;
3023 }
3024 else
3025 for (insn = NEXT_INSN (BB_END (e->src)); insn != BB_HEAD (e->dest);
3026 insn = NEXT_INSN (insn))
3027 if (BARRIER_P (insn) || NONDEBUG_INSN_P (insn))
3028 {
3029 error ("verify_flow_info: Incorrect fallthru %i->%i",
3030 e->src->index, e->dest->index);
3031 error ("wrong insn in the fallthru edge");
3032 debug_rtx (insn);
3033 err = true;
3034 }
3035 }
3036 }
3037
3038 return err;
3039}
3040
3041/* Verify that blocks are laid out in consecutive order. While walking the
3042 instructions, verify that all expected instructions are inside the basic
3043 blocks, and that all returns are followed by barriers. */
3044
3045static bool
3046rtl_verify_bb_layout (void)
3047{
3048 basic_block bb;
3049 bool err = false;
3050 rtx_insn *x, *y;
3051 int num_bb_notes;
3052 rtx_insn * const rtx_first = get_insns ();
3053 basic_block last_bb_seen = ENTRY_BLOCK_PTR_FOR_FN (cfun), curr_bb = NULL;
3054
3055 num_bb_notes = 0;
3056
3057 for (x = rtx_first; x; x = NEXT_INSN (insn: x))
3058 {
3059 if (NOTE_INSN_BASIC_BLOCK_P (x))
3060 {
3061 bb = NOTE_BASIC_BLOCK (x);
3062
3063 num_bb_notes++;
3064 if (bb != last_bb_seen->next_bb)
3065 internal_error ("basic blocks not laid down consecutively");
3066
3067 curr_bb = last_bb_seen = bb;
3068 }
3069
3070 if (!curr_bb)
3071 {
3072 switch (GET_CODE (x))
3073 {
3074 case BARRIER:
3075 case NOTE:
3076 break;
3077
3078 case CODE_LABEL:
3079 /* An ADDR_VEC is placed outside any basic block. */
3080 if (NEXT_INSN (insn: x)
3081 && JUMP_TABLE_DATA_P (NEXT_INSN (x)))
3082 x = NEXT_INSN (insn: x);
3083
3084 /* But in any case, non-deletable labels can appear anywhere. */
3085 break;
3086
3087 default:
3088 fatal_insn ("insn outside basic block", x);
3089 }
3090 }
3091
3092 if (JUMP_P (x)
3093 && returnjump_p (x) && ! condjump_p (x)
3094 && ! ((y = next_nonnote_nondebug_insn (x))
3095 && BARRIER_P (y)))
3096 fatal_insn ("return not followed by barrier", x);
3097
3098 if (curr_bb && x == BB_END (curr_bb))
3099 curr_bb = NULL;
3100 }
3101
3102 if (num_bb_notes != n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS)
3103 internal_error
3104 ("number of bb notes in insn chain (%d) != n_basic_blocks (%d)",
3105 num_bb_notes, n_basic_blocks_for_fn (cfun));
3106
3107 return err;
3108}
3109
3110/* Verify the CFG and RTL consistency common for both underlying RTL and
3111 cfglayout RTL, plus consistency checks specific to linearized RTL mode.
3112
3113 Currently it does following checks:
3114 - all checks of rtl_verify_flow_info_1
3115 - test head/end pointers
3116 - check that blocks are laid out in consecutive order
3117 - check that all insns are in the basic blocks
3118 (except the switch handling code, barriers and notes)
3119 - check that all returns are followed by barriers
3120 - check that all fallthru edge points to the adjacent blocks
3121 - verify that there is a single hot/cold partition boundary after bbro */
3122
3123static bool
3124rtl_verify_flow_info (void)
3125{
3126 bool err = false;
3127
3128 if (rtl_verify_flow_info_1 ())
3129 err = true;
3130
3131 if (rtl_verify_bb_insn_chain ())
3132 err = true;
3133
3134 if (rtl_verify_fallthru ())
3135 err = true;
3136
3137 if (rtl_verify_bb_layout ())
3138 err = true;
3139
3140 if (verify_hot_cold_block_grouping ())
3141 err = true;
3142
3143 return err;
3144}
3145
3146/* Assume that the preceding pass has possibly eliminated jump instructions
3147 or converted the unconditional jumps. Eliminate the edges from CFG.
3148 Return true if any edges are eliminated. */
3149
3150bool
3151purge_dead_edges (basic_block bb)
3152{
3153 edge e;
3154 rtx_insn *insn = BB_END (bb);
3155 rtx note;
3156 bool purged = false;
3157 bool found;
3158 edge_iterator ei;
3159
3160 if ((DEBUG_INSN_P (insn) || NOTE_P (insn)) && insn != BB_HEAD (bb))
3161 do
3162 insn = PREV_INSN (insn);
3163 while ((DEBUG_INSN_P (insn) || NOTE_P (insn)) && insn != BB_HEAD (bb));
3164
3165 /* If this instruction cannot trap, remove REG_EH_REGION notes. */
3166 if (NONJUMP_INSN_P (insn)
3167 && (note = find_reg_note (insn, REG_EH_REGION, NULL)))
3168 {
3169 rtx eqnote;
3170
3171 if (! may_trap_p (PATTERN (insn))
3172 || ((eqnote = find_reg_equal_equiv_note (insn))
3173 && ! may_trap_p (XEXP (eqnote, 0))))
3174 remove_note (insn, note);
3175 }
3176
3177 /* Cleanup abnormal edges caused by exceptions or non-local gotos. */
3178 for (ei = ei_start (bb->succs); (e = ei_safe_edge (i: ei)); )
3179 {
3180 bool remove = false;
3181
3182 /* There are three types of edges we need to handle correctly here: EH
3183 edges, abnormal call EH edges, and abnormal call non-EH edges. The
3184 latter can appear when nonlocal gotos are used. */
3185 if (e->flags & EDGE_ABNORMAL_CALL)
3186 {
3187 if (!CALL_P (insn))
3188 remove = true;
3189 else if (can_nonlocal_goto (insn))
3190 ;
3191 else if ((e->flags & EDGE_EH) && can_throw_internal (insn))
3192 ;
3193 else if (flag_tm && find_reg_note (insn, REG_TM, NULL))
3194 ;
3195 else
3196 remove = true;
3197 }
3198 else if (e->flags & EDGE_EH)
3199 remove = !can_throw_internal (insn);
3200
3201 if (remove)
3202 {
3203 remove_edge (e);
3204 df_set_bb_dirty (bb);
3205 purged = true;
3206 }
3207 else
3208 ei_next (i: &ei);
3209 }
3210
3211 if (JUMP_P (insn))
3212 {
3213 rtx note;
3214 edge b,f;
3215 edge_iterator ei;
3216
3217 /* We do care only about conditional jumps and simplejumps. */
3218 if (!any_condjump_p (insn)
3219 && !returnjump_p (insn)
3220 && !simplejump_p (insn))
3221 return purged;
3222
3223 /* Branch probability/prediction notes are defined only for
3224 condjumps. We've possibly turned condjump into simplejump. */
3225 if (simplejump_p (insn))
3226 {
3227 note = find_reg_note (insn, REG_BR_PROB, NULL);
3228 if (note)
3229 remove_note (insn, note);
3230 while ((note = find_reg_note (insn, REG_BR_PRED, NULL)))
3231 remove_note (insn, note);
3232 }
3233
3234 for (ei = ei_start (bb->succs); (e = ei_safe_edge (i: ei)); )
3235 {
3236 /* Avoid abnormal flags to leak from computed jumps turned
3237 into simplejumps. */
3238
3239 e->flags &= ~EDGE_ABNORMAL;
3240
3241 /* See if this edge is one we should keep. */
3242 if ((e->flags & EDGE_FALLTHRU) && any_condjump_p (insn))
3243 /* A conditional jump can fall through into the next
3244 block, so we should keep the edge. */
3245 {
3246 ei_next (i: &ei);
3247 continue;
3248 }
3249 else if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
3250 && BB_HEAD (e->dest) == JUMP_LABEL (insn))
3251 /* If the destination block is the target of the jump,
3252 keep the edge. */
3253 {
3254 ei_next (i: &ei);
3255 continue;
3256 }
3257 else if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
3258 && returnjump_p (insn))
3259 /* If the destination block is the exit block, and this
3260 instruction is a return, then keep the edge. */
3261 {
3262 ei_next (i: &ei);
3263 continue;
3264 }
3265 else if ((e->flags & EDGE_EH) && can_throw_internal (insn))
3266 /* Keep the edges that correspond to exceptions thrown by
3267 this instruction and rematerialize the EDGE_ABNORMAL
3268 flag we just cleared above. */
3269 {
3270 e->flags |= EDGE_ABNORMAL;
3271 ei_next (i: &ei);
3272 continue;
3273 }
3274
3275 /* We do not need this edge. */
3276 df_set_bb_dirty (bb);
3277 purged = true;
3278 remove_edge (e);
3279 }
3280
3281 if (EDGE_COUNT (bb->succs) == 0 || !purged)
3282 return purged;
3283
3284 if (dump_file)
3285 fprintf (stream: dump_file, format: "Purged edges from bb %i\n", bb->index);
3286
3287 if (!optimize)
3288 return purged;
3289
3290 /* Redistribute probabilities. */
3291 if (single_succ_p (bb))
3292 {
3293 single_succ_edge (bb)->probability = profile_probability::always ();
3294 }
3295 else
3296 {
3297 note = find_reg_note (insn, REG_BR_PROB, NULL);
3298 if (!note)
3299 return purged;
3300
3301 b = BRANCH_EDGE (bb);
3302 f = FALLTHRU_EDGE (bb);
3303 b->probability = profile_probability::from_reg_br_prob_note
3304 (XINT (note, 0));
3305 f->probability = b->probability.invert ();
3306 }
3307
3308 return purged;
3309 }
3310 else if (CALL_P (insn) && SIBLING_CALL_P (insn))
3311 {
3312 /* First, there should not be any EH or ABCALL edges resulting
3313 from non-local gotos and the like. If there were, we shouldn't
3314 have created the sibcall in the first place. Second, there
3315 should of course never have been a fallthru edge. */
3316 gcc_assert (single_succ_p (bb));
3317 gcc_assert (single_succ_edge (bb)->flags
3318 == (EDGE_SIBCALL | EDGE_ABNORMAL));
3319
3320 return false;
3321 }
3322
3323 /* If we don't see a jump insn, we don't know exactly why the block would
3324 have been broken at this point. Look for a simple, non-fallthru edge,
3325 as these are only created by conditional branches. If we find such an
3326 edge we know that there used to be a jump here and can then safely
3327 remove all non-fallthru edges. */
3328 found = false;
3329 FOR_EACH_EDGE (e, ei, bb->succs)
3330 if (! (e->flags & (EDGE_COMPLEX | EDGE_FALLTHRU)))
3331 {
3332 found = true;
3333 break;
3334 }
3335
3336 if (!found)
3337 return purged;
3338
3339 /* Remove all but the fake and fallthru edges. The fake edge may be
3340 the only successor for this block in the case of noreturn
3341 calls. */
3342 for (ei = ei_start (bb->succs); (e = ei_safe_edge (i: ei)); )
3343 {
3344 if (!(e->flags & (EDGE_FALLTHRU | EDGE_FAKE)))
3345 {
3346 df_set_bb_dirty (bb);
3347 remove_edge (e);
3348 purged = true;
3349 }
3350 else
3351 ei_next (i: &ei);
3352 }
3353
3354 gcc_assert (single_succ_p (bb));
3355
3356 single_succ_edge (bb)->probability = profile_probability::always ();
3357
3358 if (dump_file)
3359 fprintf (stream: dump_file, format: "Purged non-fallthru edges from bb %i\n",
3360 bb->index);
3361 return purged;
3362}
3363
3364/* Search all basic blocks for potentially dead edges and purge them. Return
3365 true if some edge has been eliminated. */
3366
3367bool
3368purge_all_dead_edges (void)
3369{
3370 bool purged = false;
3371 basic_block bb;
3372
3373 FOR_EACH_BB_FN (bb, cfun)
3374 if (purge_dead_edges (bb))
3375 purged = true;
3376
3377 return purged;
3378}
3379
3380/* This is used by a few passes that emit some instructions after abnormal
3381 calls, moving the basic block's end, while they in fact do want to emit
3382 them on the fallthru edge. Look for abnormal call edges, find backward
3383 the call in the block and insert the instructions on the edge instead.
3384
3385 Similarly, handle instructions throwing exceptions internally.
3386
3387 Return true when instructions have been found and inserted on edges. */
3388
3389bool
3390fixup_abnormal_edges (void)
3391{
3392 bool inserted = false;
3393 basic_block bb;
3394
3395 FOR_EACH_BB_FN (bb, cfun)
3396 {
3397 edge e;
3398 edge_iterator ei;
3399
3400 /* Look for cases we are interested in - calls or instructions causing
3401 exceptions. */
3402 FOR_EACH_EDGE (e, ei, bb->succs)
3403 if ((e->flags & EDGE_ABNORMAL_CALL)
3404 || ((e->flags & (EDGE_ABNORMAL | EDGE_EH))
3405 == (EDGE_ABNORMAL | EDGE_EH)))
3406 break;
3407
3408 if (e && !CALL_P (BB_END (bb)) && !can_throw_internal (BB_END (bb)))
3409 {
3410 rtx_insn *insn;
3411
3412 /* Get past the new insns generated. Allow notes, as the insns
3413 may be already deleted. */
3414 insn = BB_END (bb);
3415 while ((NONJUMP_INSN_P (insn) || NOTE_P (insn))
3416 && !can_throw_internal (insn)
3417 && insn != BB_HEAD (bb))
3418 insn = PREV_INSN (insn);
3419
3420 if (CALL_P (insn) || can_throw_internal (insn))
3421 {
3422 rtx_insn *stop, *next;
3423
3424 e = find_fallthru_edge (edges: bb->succs);
3425
3426 stop = NEXT_INSN (BB_END (bb));
3427 BB_END (bb) = insn;
3428
3429 for (insn = NEXT_INSN (insn); insn != stop; insn = next)
3430 {
3431 next = NEXT_INSN (insn);
3432 if (INSN_P (insn))
3433 {
3434 delete_insn (insn);
3435
3436 /* Sometimes there's still the return value USE.
3437 If it's placed after a trapping call (i.e. that
3438 call is the last insn anyway), we have no fallthru
3439 edge. Simply delete this use and don't try to insert
3440 on the non-existent edge.
3441 Similarly, sometimes a call that can throw is
3442 followed in the source with __builtin_unreachable (),
3443 meaning that there is UB if the call returns rather
3444 than throws. If there weren't any instructions
3445 following such calls before, supposedly even the ones
3446 we've deleted aren't significant and can be
3447 removed. */
3448 if (e)
3449 {
3450 /* We're not deleting it, we're moving it. */
3451 insn->set_undeleted ();
3452 SET_PREV_INSN (insn) = NULL_RTX;
3453 SET_NEXT_INSN (insn) = NULL_RTX;
3454
3455 insert_insn_on_edge (pattern: insn, e);
3456 inserted = true;
3457 }
3458 }
3459 else if (!BARRIER_P (insn))
3460 set_block_for_insn (insn, NULL);
3461 }
3462 }
3463
3464 /* It may be that we don't find any trapping insn. In this
3465 case we discovered quite late that the insn that had been
3466 marked as can_throw_internal in fact couldn't trap at all.
3467 So we should in fact delete the EH edges out of the block. */
3468 else
3469 purge_dead_edges (bb);
3470 }
3471 }
3472
3473 return inserted;
3474}
3475
3476/* Delete the unconditional jump INSN and adjust the CFG correspondingly.
3477 Note that the INSN should be deleted *after* removing dead edges, so
3478 that the kept edge is the fallthrough edge for a (set (pc) (pc))
3479 but not for a (set (pc) (label_ref FOO)). */
3480
3481void
3482update_cfg_for_uncondjump (rtx_insn *insn)
3483{
3484 basic_block bb = BLOCK_FOR_INSN (insn);
3485 gcc_assert (BB_END (bb) == insn);
3486
3487 purge_dead_edges (bb);
3488
3489 if (current_ir_type () != IR_RTL_CFGLAYOUT)
3490 {
3491 if (!find_fallthru_edge (edges: bb->succs))
3492 {
3493 auto barrier = next_nonnote_nondebug_insn (insn);
3494 if (!barrier || !BARRIER_P (barrier))
3495 emit_barrier_after (insn);
3496 }
3497 return;
3498 }
3499
3500 delete_insn (insn);
3501 if (EDGE_COUNT (bb->succs) == 1)
3502 {
3503 rtx_insn *insn;
3504
3505 single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
3506
3507 /* Remove barriers from the footer if there are any. */
3508 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
3509 if (BARRIER_P (insn))
3510 {
3511 if (PREV_INSN (insn))
3512 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
3513 else
3514 BB_FOOTER (bb) = NEXT_INSN (insn);
3515 if (NEXT_INSN (insn))
3516 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
3517 }
3518 else if (LABEL_P (insn))
3519 break;
3520 }
3521}
3522
3523/* Cut the insns from FIRST to LAST out of the insns stream. */
3524
3525rtx_insn *
3526unlink_insn_chain (rtx_insn *first, rtx_insn *last)
3527{
3528 rtx_insn *prevfirst = PREV_INSN (insn: first);
3529 rtx_insn *nextlast = NEXT_INSN (insn: last);
3530
3531 SET_PREV_INSN (first) = NULL;
3532 SET_NEXT_INSN (last) = NULL;
3533 if (prevfirst)
3534 SET_NEXT_INSN (prevfirst) = nextlast;
3535 if (nextlast)
3536 SET_PREV_INSN (nextlast) = prevfirst;
3537 else
3538 set_last_insn (prevfirst);
3539 if (!prevfirst)
3540 set_first_insn (nextlast);
3541 return first;
3542}
3543
3544/* Skip over inter-block insns occurring after BB which are typically
3545 associated with BB (e.g., barriers). If there are any such insns,
3546 we return the last one. Otherwise, we return the end of BB. */
3547
3548static rtx_insn *
3549skip_insns_after_block (basic_block bb)
3550{
3551 rtx_insn *insn, *last_insn, *next_head, *prev;
3552
3553 next_head = NULL;
3554 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
3555 next_head = BB_HEAD (bb->next_bb);
3556
3557 for (last_insn = insn = BB_END (bb); (insn = NEXT_INSN (insn)) != 0; )
3558 {
3559 if (insn == next_head)
3560 break;
3561
3562 switch (GET_CODE (insn))
3563 {
3564 case BARRIER:
3565 last_insn = insn;
3566 continue;
3567
3568 case NOTE:
3569 gcc_assert (NOTE_KIND (insn) != NOTE_INSN_BLOCK_END);
3570 continue;
3571
3572 case CODE_LABEL:
3573 if (NEXT_INSN (insn)
3574 && JUMP_TABLE_DATA_P (NEXT_INSN (insn)))
3575 {
3576 insn = NEXT_INSN (insn);
3577 last_insn = insn;
3578 continue;
3579 }
3580 break;
3581
3582 default:
3583 break;
3584 }
3585
3586 break;
3587 }
3588
3589 /* It is possible to hit contradictory sequence. For instance:
3590
3591 jump_insn
3592 NOTE_INSN_BLOCK_BEG
3593 barrier
3594
3595 Where barrier belongs to jump_insn, but the note does not. This can be
3596 created by removing the basic block originally following
3597 NOTE_INSN_BLOCK_BEG. In such case reorder the notes. */
3598
3599 for (insn = last_insn; insn != BB_END (bb); insn = prev)
3600 {
3601 prev = PREV_INSN (insn);
3602 if (NOTE_P (insn))
3603 switch (NOTE_KIND (insn))
3604 {
3605 case NOTE_INSN_BLOCK_END:
3606 gcc_unreachable ();
3607 break;
3608 case NOTE_INSN_DELETED:
3609 case NOTE_INSN_DELETED_LABEL:
3610 case NOTE_INSN_DELETED_DEBUG_LABEL:
3611 continue;
3612 default:
3613 reorder_insns (insn, insn, last_insn);
3614 }
3615 }
3616
3617 return last_insn;
3618}
3619
3620/* Locate or create a label for a given basic block. */
3621
3622static rtx_insn *
3623label_for_bb (basic_block bb)
3624{
3625 rtx_insn *label = BB_HEAD (bb);
3626
3627 if (!LABEL_P (label))
3628 {
3629 if (dump_file)
3630 fprintf (stream: dump_file, format: "Emitting label for block %d\n", bb->index);
3631
3632 label = block_label (block: bb);
3633 }
3634
3635 return label;
3636}
3637
3638/* Locate the effective beginning and end of the insn chain for each
3639 block, as defined by skip_insns_after_block above. */
3640
3641static void
3642record_effective_endpoints (void)
3643{
3644 rtx_insn *next_insn;
3645 basic_block bb;
3646 rtx_insn *insn;
3647
3648 for (insn = get_insns ();
3649 insn
3650 && NOTE_P (insn)
3651 && NOTE_KIND (insn) != NOTE_INSN_BASIC_BLOCK;
3652 insn = NEXT_INSN (insn))
3653 continue;
3654 /* No basic blocks at all? */
3655 gcc_assert (insn);
3656
3657 if (PREV_INSN (insn))
3658 cfg_layout_function_header =
3659 unlink_insn_chain (first: get_insns (), last: PREV_INSN (insn));
3660 else
3661 cfg_layout_function_header = NULL;
3662
3663 next_insn = get_insns ();
3664 FOR_EACH_BB_FN (bb, cfun)
3665 {
3666 rtx_insn *end;
3667
3668 if (PREV_INSN (BB_HEAD (bb)) && next_insn != BB_HEAD (bb))
3669 BB_HEADER (bb) = unlink_insn_chain (first: next_insn,
3670 last: PREV_INSN (BB_HEAD (bb)));
3671 end = skip_insns_after_block (bb);
3672 if (NEXT_INSN (BB_END (bb)) && BB_END (bb) != end)
3673 BB_FOOTER (bb) = unlink_insn_chain (first: NEXT_INSN (BB_END (bb)), last: end);
3674 next_insn = NEXT_INSN (BB_END (bb));
3675 }
3676
3677 cfg_layout_function_footer = next_insn;
3678 if (cfg_layout_function_footer)
3679 cfg_layout_function_footer = unlink_insn_chain (first: cfg_layout_function_footer, last: get_last_insn ());
3680}
3681
3682namespace {
3683
3684const pass_data pass_data_into_cfg_layout_mode =
3685{
3686 .type: RTL_PASS, /* type */
3687 .name: "into_cfglayout", /* name */
3688 .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */
3689 .tv_id: TV_CFG, /* tv_id */
3690 .properties_required: 0, /* properties_required */
3691 PROP_cfglayout, /* properties_provided */
3692 .properties_destroyed: 0, /* properties_destroyed */
3693 .todo_flags_start: 0, /* todo_flags_start */
3694 .todo_flags_finish: 0, /* todo_flags_finish */
3695};
3696
3697class pass_into_cfg_layout_mode : public rtl_opt_pass
3698{
3699public:
3700 pass_into_cfg_layout_mode (gcc::context *ctxt)
3701 : rtl_opt_pass (pass_data_into_cfg_layout_mode, ctxt)
3702 {}
3703
3704 /* opt_pass methods: */
3705 unsigned int execute (function *) final override
3706 {
3707 cfg_layout_initialize (0);
3708 return 0;
3709 }
3710
3711}; // class pass_into_cfg_layout_mode
3712
3713} // anon namespace
3714
3715rtl_opt_pass *
3716make_pass_into_cfg_layout_mode (gcc::context *ctxt)
3717{
3718 return new pass_into_cfg_layout_mode (ctxt);
3719}
3720
3721namespace {
3722
3723const pass_data pass_data_outof_cfg_layout_mode =
3724{
3725 .type: RTL_PASS, /* type */
3726 .name: "outof_cfglayout", /* name */
3727 .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */
3728 .tv_id: TV_CFG, /* tv_id */
3729 .properties_required: 0, /* properties_required */
3730 .properties_provided: 0, /* properties_provided */
3731 PROP_cfglayout, /* properties_destroyed */
3732 .todo_flags_start: 0, /* todo_flags_start */
3733 .todo_flags_finish: 0, /* todo_flags_finish */
3734};
3735
3736class pass_outof_cfg_layout_mode : public rtl_opt_pass
3737{
3738public:
3739 pass_outof_cfg_layout_mode (gcc::context *ctxt)
3740 : rtl_opt_pass (pass_data_outof_cfg_layout_mode, ctxt)
3741 {}
3742
3743 /* opt_pass methods: */
3744 unsigned int execute (function *) final override;
3745
3746}; // class pass_outof_cfg_layout_mode
3747
3748unsigned int
3749pass_outof_cfg_layout_mode::execute (function *fun)
3750{
3751 basic_block bb;
3752
3753 FOR_EACH_BB_FN (bb, fun)
3754 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (fun))
3755 bb->aux = bb->next_bb;
3756
3757 cfg_layout_finalize ();
3758
3759 return 0;
3760}
3761
3762} // anon namespace
3763
3764rtl_opt_pass *
3765make_pass_outof_cfg_layout_mode (gcc::context *ctxt)
3766{
3767 return new pass_outof_cfg_layout_mode (ctxt);
3768}
3769
3770
3771/* Link the basic blocks in the correct order, compacting the basic
3772 block queue while at it. If STAY_IN_CFGLAYOUT_MODE is false, this
3773 function also clears the basic block header and footer fields.
3774
3775 This function is usually called after a pass (e.g. tracer) finishes
3776 some transformations while in cfglayout mode. The required sequence
3777 of the basic blocks is in a linked list along the bb->aux field.
3778 This functions re-links the basic block prev_bb and next_bb pointers
3779 accordingly, and it compacts and renumbers the blocks.
3780
3781 FIXME: This currently works only for RTL, but the only RTL-specific
3782 bits are the STAY_IN_CFGLAYOUT_MODE bits. The tracer pass was moved
3783 to GIMPLE a long time ago, but it doesn't relink the basic block
3784 chain. It could do that (to give better initial RTL) if this function
3785 is made IR-agnostic (and moved to cfganal.cc or cfg.cc while at it). */
3786
3787void
3788relink_block_chain (bool stay_in_cfglayout_mode)
3789{
3790 basic_block bb, prev_bb;
3791 int index;
3792
3793 /* Maybe dump the re-ordered sequence. */
3794 if (dump_file)
3795 {
3796 fprintf (stream: dump_file, format: "Reordered sequence:\n");
3797 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb, index =
3798 NUM_FIXED_BLOCKS;
3799 bb;
3800 bb = (basic_block) bb->aux, index++)
3801 {
3802 fprintf (stream: dump_file, format: " %i ", index);
3803 if (get_bb_original (bb))
3804 fprintf (stream: dump_file, format: "duplicate of %i\n",
3805 get_bb_original (bb)->index);
3806 else if (forwarder_block_p (bb)
3807 && !LABEL_P (BB_HEAD (bb)))
3808 fprintf (stream: dump_file, format: "compensation\n");
3809 else
3810 fprintf (stream: dump_file, format: "bb %i\n", bb->index);
3811 }
3812 }
3813
3814 /* Now reorder the blocks. */
3815 prev_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
3816 bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
3817 for (; bb; prev_bb = bb, bb = (basic_block) bb->aux)
3818 {
3819 bb->prev_bb = prev_bb;
3820 prev_bb->next_bb = bb;
3821 }
3822 prev_bb->next_bb = EXIT_BLOCK_PTR_FOR_FN (cfun);
3823 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb = prev_bb;
3824
3825 /* Then, clean up the aux fields. */
3826 FOR_ALL_BB_FN (bb, cfun)
3827 {
3828 bb->aux = NULL;
3829 if (!stay_in_cfglayout_mode)
3830 BB_HEADER (bb) = BB_FOOTER (bb) = NULL;
3831 }
3832
3833 /* Maybe reset the original copy tables, they are not valid anymore
3834 when we renumber the basic blocks in compact_blocks. If we are
3835 are going out of cfglayout mode, don't re-allocate the tables. */
3836 if (original_copy_tables_initialized_p ())
3837 free_original_copy_tables ();
3838 if (stay_in_cfglayout_mode)
3839 initialize_original_copy_tables ();
3840
3841 /* Finally, put basic_block_info in the new order. */
3842 compact_blocks ();
3843}
3844
3845
3846/* Given a reorder chain, rearrange the code to match. */
3847
3848static void
3849fixup_reorder_chain (void)
3850{
3851 basic_block bb;
3852 rtx_insn *insn = NULL;
3853
3854 if (cfg_layout_function_header)
3855 {
3856 set_first_insn (cfg_layout_function_header);
3857 insn = cfg_layout_function_header;
3858 while (NEXT_INSN (insn))
3859 insn = NEXT_INSN (insn);
3860 }
3861
3862 /* First do the bulk reordering -- rechain the blocks without regard to
3863 the needed changes to jumps and labels. */
3864
3865 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; bb; bb = (basic_block)
3866 bb->aux)
3867 {
3868 if (BB_HEADER (bb))
3869 {
3870 if (insn)
3871 SET_NEXT_INSN (insn) = BB_HEADER (bb);
3872 else
3873 set_first_insn (BB_HEADER (bb));
3874 SET_PREV_INSN (BB_HEADER (bb)) = insn;
3875 insn = BB_HEADER (bb);
3876 while (NEXT_INSN (insn))
3877 insn = NEXT_INSN (insn);
3878 }
3879 if (insn)
3880 SET_NEXT_INSN (insn) = BB_HEAD (bb);
3881 else
3882 set_first_insn (BB_HEAD (bb));
3883 SET_PREV_INSN (BB_HEAD (bb)) = insn;
3884 insn = BB_END (bb);
3885 if (BB_FOOTER (bb))
3886 {
3887 SET_NEXT_INSN (insn) = BB_FOOTER (bb);
3888 SET_PREV_INSN (BB_FOOTER (bb)) = insn;
3889 while (NEXT_INSN (insn))
3890 insn = NEXT_INSN (insn);
3891 }
3892 }
3893
3894 SET_NEXT_INSN (insn) = cfg_layout_function_footer;
3895 if (cfg_layout_function_footer)
3896 SET_PREV_INSN (cfg_layout_function_footer) = insn;
3897
3898 while (NEXT_INSN (insn))
3899 insn = NEXT_INSN (insn);
3900
3901 set_last_insn (insn);
3902 if (flag_checking)
3903 verify_insn_chain ();
3904
3905 /* Now add jumps and labels as needed to match the blocks new
3906 outgoing edges. */
3907
3908 bool remove_unreachable_blocks = false;
3909 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; bb ; bb = (basic_block)
3910 bb->aux)
3911 {
3912 edge e_fall, e_taken, e;
3913 rtx_insn *bb_end_insn;
3914 rtx ret_label = NULL_RTX;
3915 basic_block nb;
3916 edge_iterator ei;
3917 bool asm_goto = false;
3918
3919 if (EDGE_COUNT (bb->succs) == 0)
3920 continue;
3921
3922 /* Find the old fallthru edge, and another non-EH edge for
3923 a taken jump. */
3924 e_taken = e_fall = NULL;
3925
3926 FOR_EACH_EDGE (e, ei, bb->succs)
3927 if (e->flags & EDGE_FALLTHRU)
3928 e_fall = e;
3929 else if (! (e->flags & EDGE_EH))
3930 e_taken = e;
3931
3932 bb_end_insn = BB_END (bb);
3933 if (rtx_jump_insn *bb_end_jump = dyn_cast <rtx_jump_insn *> (p: bb_end_insn))
3934 {
3935 ret_label = JUMP_LABEL (bb_end_jump);
3936 if (any_condjump_p (bb_end_jump))
3937 {
3938 /* This might happen if the conditional jump has side
3939 effects and could therefore not be optimized away.
3940 Make the basic block to end with a barrier in order
3941 to prevent rtl_verify_flow_info from complaining. */
3942 if (!e_fall)
3943 {
3944 gcc_assert (!onlyjump_p (bb_end_jump)
3945 || returnjump_p (bb_end_jump)
3946 || (e_taken->flags & EDGE_CROSSING));
3947 emit_barrier_after (bb_end_jump);
3948 continue;
3949 }
3950
3951 /* If the old fallthru is still next, nothing to do. */
3952 if (bb->aux == e_fall->dest
3953 || e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3954 continue;
3955
3956 /* The degenerated case of conditional jump jumping to the next
3957 instruction can happen for jumps with side effects. We need
3958 to construct a forwarder block and this will be done just
3959 fine by force_nonfallthru below. */
3960 if (!e_taken)
3961 ;
3962
3963 /* There is another special case: if *neither* block is next,
3964 such as happens at the very end of a function, then we'll
3965 need to add a new unconditional jump. Choose the taken
3966 edge based on known or assumed probability. */
3967 else if (bb->aux != e_taken->dest)
3968 {
3969 rtx note = find_reg_note (bb_end_jump, REG_BR_PROB, 0);
3970
3971 if (note
3972 && profile_probability::from_reg_br_prob_note
3973 (XINT (note, 0)) < profile_probability::even ()
3974 && invert_jump (bb_end_jump,
3975 (e_fall->dest
3976 == EXIT_BLOCK_PTR_FOR_FN (cfun)
3977 ? NULL_RTX
3978 : label_for_bb (bb: e_fall->dest)), 0))
3979 {
3980 e_fall->flags &= ~EDGE_FALLTHRU;
3981 gcc_checking_assert (could_fall_through
3982 (e_taken->src, e_taken->dest));
3983 e_taken->flags |= EDGE_FALLTHRU;
3984 update_br_prob_note (bb);
3985 e = e_fall, e_fall = e_taken, e_taken = e;
3986 }
3987 }
3988
3989 /* If the "jumping" edge is a crossing edge, and the fall
3990 through edge is non-crossing, leave things as they are. */
3991 else if ((e_taken->flags & EDGE_CROSSING)
3992 && !(e_fall->flags & EDGE_CROSSING))
3993 continue;
3994
3995 /* Otherwise we can try to invert the jump. This will
3996 basically never fail, however, keep up the pretense. */
3997 else if (invert_jump (bb_end_jump,
3998 (e_fall->dest
3999 == EXIT_BLOCK_PTR_FOR_FN (cfun)
4000 ? NULL_RTX
4001 : label_for_bb (bb: e_fall->dest)), 0))
4002 {
4003 e_fall->flags &= ~EDGE_FALLTHRU;
4004 gcc_checking_assert (could_fall_through
4005 (e_taken->src, e_taken->dest));
4006 e_taken->flags |= EDGE_FALLTHRU;
4007 update_br_prob_note (bb);
4008 if (LABEL_NUSES (ret_label) == 0
4009 && single_pred_p (bb: e_taken->dest))
4010 delete_insn (insn: as_a<rtx_insn *> (p: ret_label));
4011 continue;
4012 }
4013 }
4014 else if (extract_asm_operands (PATTERN (insn: bb_end_insn)) != NULL)
4015 {
4016 /* If the old fallthru is still next or if
4017 asm goto doesn't have a fallthru (e.g. when followed by
4018 __builtin_unreachable ()), nothing to do. */
4019 if (! e_fall
4020 || bb->aux == e_fall->dest
4021 || e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4022 continue;
4023
4024 /* Otherwise we'll have to use the fallthru fixup below.
4025 But avoid redirecting asm goto to EXIT. */
4026 asm_goto = true;
4027 }
4028 else
4029 {
4030 /* Otherwise we have some return, switch or computed
4031 jump. In the 99% case, there should not have been a
4032 fallthru edge. */
4033 gcc_assert (returnjump_p (bb_end_insn) || !e_fall);
4034 continue;
4035 }
4036 }
4037 else
4038 {
4039 /* No fallthru implies a noreturn function with EH edges, or
4040 something similarly bizarre. In any case, we don't need to
4041 do anything. */
4042 if (! e_fall)
4043 continue;
4044
4045 /* If the fallthru block is still next, nothing to do. */
4046 if (bb->aux == e_fall->dest)
4047 continue;
4048
4049 /* A fallthru to exit block. */
4050 if (e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4051 continue;
4052 }
4053
4054 /* If E_FALL->dest is just a return block, then we can emit a
4055 return rather than a jump to the return block. */
4056 rtx_insn *ret, *use;
4057 basic_block dest;
4058 if (!asm_goto
4059 && bb_is_just_return (e_fall->dest, &ret, &use)
4060 && ((PATTERN (insn: ret) == simple_return_rtx && targetm.have_simple_return ())
4061 || (PATTERN (insn: ret) == ret_rtx && targetm.have_return ())))
4062 {
4063 ret_label = PATTERN (insn: ret);
4064 dest = EXIT_BLOCK_PTR_FOR_FN (cfun);
4065
4066 e_fall->flags &= ~EDGE_CROSSING;
4067 /* E_FALL->dest might become unreachable as a result of
4068 replacing the jump with a return. So arrange to remove
4069 unreachable blocks. */
4070 remove_unreachable_blocks = true;
4071 }
4072 else
4073 {
4074 dest = e_fall->dest;
4075 }
4076
4077 /* We got here if we need to add a new jump insn.
4078 Note force_nonfallthru can delete E_FALL and thus we have to
4079 save E_FALL->src prior to the call to force_nonfallthru. */
4080 nb = force_nonfallthru_and_redirect (e: e_fall, target: dest, jump_label: ret_label);
4081 if (nb)
4082 {
4083 nb->aux = bb->aux;
4084 bb->aux = nb;
4085 /* Don't process this new block. */
4086 bb = nb;
4087 }
4088 }
4089
4090 relink_block_chain (/*stay_in_cfglayout_mode=*/false);
4091
4092 /* Annoying special case - jump around dead jumptables left in the code. */
4093 FOR_EACH_BB_FN (bb, cfun)
4094 {
4095 edge e = find_fallthru_edge (edges: bb->succs);
4096
4097 if (e && !can_fallthru (src: e->src, target: e->dest))
4098 force_nonfallthru (e);
4099 }
4100
4101 /* Ensure goto_locus from edges has some instructions with that locus in RTL
4102 when not optimizing. */
4103 if (!optimize && !DECL_IGNORED_P (current_function_decl))
4104 FOR_EACH_BB_FN (bb, cfun)
4105 {
4106 edge e;
4107 edge_iterator ei;
4108
4109 FOR_EACH_EDGE (e, ei, bb->succs)
4110 if (LOCATION_LOCUS (e->goto_locus) != UNKNOWN_LOCATION
4111 && !(e->flags & EDGE_ABNORMAL))
4112 {
4113 edge e2;
4114 edge_iterator ei2;
4115 basic_block dest, nb;
4116 rtx_insn *end;
4117
4118 insn = BB_END (e->src);
4119 end = PREV_INSN (BB_HEAD (e->src));
4120 while (insn != end
4121 && (!NONDEBUG_INSN_P (insn) || !INSN_HAS_LOCATION (insn)))
4122 insn = PREV_INSN (insn);
4123 if (insn != end
4124 && loc_equal (loc1: INSN_LOCATION (insn), loc2: e->goto_locus))
4125 continue;
4126 if (simplejump_p (BB_END (e->src))
4127 && !INSN_HAS_LOCATION (BB_END (e->src)))
4128 {
4129 INSN_LOCATION (BB_END (e->src)) = e->goto_locus;
4130 continue;
4131 }
4132 dest = e->dest;
4133 if (dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4134 {
4135 /* Non-fallthru edges to the exit block cannot be split. */
4136 if (!(e->flags & EDGE_FALLTHRU))
4137 continue;
4138 }
4139 else
4140 {
4141 insn = BB_HEAD (dest);
4142 end = NEXT_INSN (BB_END (dest));
4143 while (insn != end && !NONDEBUG_INSN_P (insn))
4144 insn = NEXT_INSN (insn);
4145 if (insn != end && INSN_HAS_LOCATION (insn)
4146 && loc_equal (loc1: INSN_LOCATION (insn), loc2: e->goto_locus))
4147 continue;
4148 }
4149 nb = split_edge (e);
4150 if (!INSN_P (BB_END (nb)))
4151 BB_END (nb) = emit_insn_after_noloc (gen_nop (), BB_END (nb),
4152 nb);
4153 INSN_LOCATION (BB_END (nb)) = e->goto_locus;
4154
4155 /* If there are other incoming edges to the destination block
4156 with the same goto locus, redirect them to the new block as
4157 well, this can prevent other such blocks from being created
4158 in subsequent iterations of the loop. */
4159 for (ei2 = ei_start (dest->preds); (e2 = ei_safe_edge (i: ei2)); )
4160 if (LOCATION_LOCUS (e2->goto_locus) != UNKNOWN_LOCATION
4161 && !(e2->flags & (EDGE_ABNORMAL | EDGE_FALLTHRU))
4162 && e->goto_locus == e2->goto_locus)
4163 redirect_edge_and_branch (e2, nb);
4164 else
4165 ei_next (i: &ei2);
4166 }
4167 }
4168
4169 /* Replacing a jump with a return may have exposed an unreachable
4170 block. Conditionally remove them if such transformations were
4171 made. */
4172 if (remove_unreachable_blocks)
4173 delete_unreachable_blocks ();
4174}
4175
4176/* Perform sanity checks on the insn chain.
4177 1. Check that next/prev pointers are consistent in both the forward and
4178 reverse direction.
4179 2. Count insns in chain, going both directions, and check if equal.
4180 3. Check that get_last_insn () returns the actual end of chain. */
4181
4182DEBUG_FUNCTION void
4183verify_insn_chain (void)
4184{
4185 rtx_insn *x, *prevx, *nextx;
4186 int insn_cnt1, insn_cnt2;
4187
4188 for (prevx = NULL, insn_cnt1 = 1, x = get_insns ();
4189 x != 0;
4190 prevx = x, insn_cnt1++, x = NEXT_INSN (insn: x))
4191 gcc_assert (PREV_INSN (x) == prevx);
4192
4193 gcc_assert (prevx == get_last_insn ());
4194
4195 for (nextx = NULL, insn_cnt2 = 1, x = get_last_insn ();
4196 x != 0;
4197 nextx = x, insn_cnt2++, x = PREV_INSN (insn: x))
4198 gcc_assert (NEXT_INSN (x) == nextx);
4199
4200 gcc_assert (insn_cnt1 == insn_cnt2);
4201}
4202
4203/* If we have assembler epilogues, the block falling through to exit must
4204 be the last one in the reordered chain when we reach final. Ensure
4205 that this condition is met. */
4206static void
4207fixup_fallthru_exit_predecessor (void)
4208{
4209 edge e;
4210 basic_block bb = NULL;
4211
4212 /* This transformation is not valid before reload, because we might
4213 separate a call from the instruction that copies the return
4214 value. */
4215 gcc_assert (reload_completed);
4216
4217 e = find_fallthru_edge (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
4218 if (e)
4219 bb = e->src;
4220
4221 if (bb && bb->aux)
4222 {
4223 basic_block c = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
4224
4225 /* If the very first block is the one with the fall-through exit
4226 edge, we have to split that block. */
4227 if (c == bb)
4228 {
4229 bb = split_block_after_labels (bb)->dest;
4230 bb->aux = c->aux;
4231 c->aux = bb;
4232 BB_FOOTER (bb) = BB_FOOTER (c);
4233 BB_FOOTER (c) = NULL;
4234 }
4235
4236 while (c->aux != bb)
4237 c = (basic_block) c->aux;
4238
4239 c->aux = bb->aux;
4240 while (c->aux)
4241 c = (basic_block) c->aux;
4242
4243 c->aux = bb;
4244 bb->aux = NULL;
4245 }
4246}
4247
4248/* In case there are more than one fallthru predecessors of exit, force that
4249 there is only one. */
4250
4251static void
4252force_one_exit_fallthru (void)
4253{
4254 edge e, predecessor = NULL;
4255 bool more = false;
4256 edge_iterator ei;
4257 basic_block forwarder, bb;
4258
4259 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
4260 if (e->flags & EDGE_FALLTHRU)
4261 {
4262 if (predecessor == NULL)
4263 predecessor = e;
4264 else
4265 {
4266 more = true;
4267 break;
4268 }
4269 }
4270
4271 if (!more)
4272 return;
4273
4274 /* Exit has several fallthru predecessors. Create a forwarder block for
4275 them. */
4276 forwarder = split_edge (predecessor);
4277 for (ei = ei_start (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
4278 (e = ei_safe_edge (i: ei)); )
4279 {
4280 if (e->src == forwarder
4281 || !(e->flags & EDGE_FALLTHRU))
4282 ei_next (i: &ei);
4283 else
4284 redirect_edge_and_branch_force (e, forwarder);
4285 }
4286
4287 /* Fix up the chain of blocks -- make FORWARDER immediately precede the
4288 exit block. */
4289 FOR_EACH_BB_FN (bb, cfun)
4290 {
4291 if (bb->aux == NULL && bb != forwarder)
4292 {
4293 bb->aux = forwarder;
4294 break;
4295 }
4296 }
4297}
4298
4299/* Return true in case it is possible to duplicate the basic block BB. */
4300
4301static bool
4302cfg_layout_can_duplicate_bb_p (const_basic_block bb)
4303{
4304 /* Do not attempt to duplicate tablejumps, as we need to unshare
4305 the dispatch table. This is difficult to do, as the instructions
4306 computing jump destination may be hoisted outside the basic block. */
4307 if (tablejump_p (BB_END (bb), NULL, NULL))
4308 return false;
4309
4310 /* Do not duplicate blocks containing insns that can't be copied. */
4311 if (targetm.cannot_copy_insn_p)
4312 {
4313 rtx_insn *insn = BB_HEAD (bb);
4314 while (1)
4315 {
4316 if (INSN_P (insn) && targetm.cannot_copy_insn_p (insn))
4317 return false;
4318 if (insn == BB_END (bb))
4319 break;
4320 insn = NEXT_INSN (insn);
4321 }
4322 }
4323
4324 return true;
4325}
4326
4327rtx_insn *
4328duplicate_insn_chain (rtx_insn *from, rtx_insn *to,
4329 class loop *loop, copy_bb_data *id)
4330{
4331 rtx_insn *insn, *next, *copy;
4332 rtx_note *last;
4333
4334 /* Avoid updating of boundaries of previous basic block. The
4335 note will get removed from insn stream in fixup. */
4336 last = emit_note (NOTE_INSN_DELETED);
4337
4338 /* Create copy at the end of INSN chain. The chain will
4339 be reordered later. */
4340 for (insn = from; insn != NEXT_INSN (insn: to); insn = NEXT_INSN (insn))
4341 {
4342 switch (GET_CODE (insn))
4343 {
4344 case DEBUG_INSN:
4345 /* Don't duplicate label debug insns. */
4346 if (DEBUG_BIND_INSN_P (insn)
4347 && TREE_CODE (INSN_VAR_LOCATION_DECL (insn)) == LABEL_DECL)
4348 break;
4349 /* FALLTHRU */
4350 case INSN:
4351 case CALL_INSN:
4352 case JUMP_INSN:
4353 copy = emit_copy_of_insn_after (insn, get_last_insn ());
4354 if (JUMP_P (insn) && JUMP_LABEL (insn) != NULL_RTX
4355 && ANY_RETURN_P (JUMP_LABEL (insn)))
4356 JUMP_LABEL (copy) = JUMP_LABEL (insn);
4357 maybe_copy_prologue_epilogue_insn (insn, copy);
4358 /* If requested remap dependence info of cliques brought in
4359 via inlining. */
4360 if (id)
4361 {
4362 subrtx_iterator::array_type array;
4363 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), ALL)
4364 if (MEM_P (*iter) && MEM_EXPR (*iter))
4365 {
4366 tree op = MEM_EXPR (*iter);
4367 if (TREE_CODE (op) == WITH_SIZE_EXPR)
4368 op = TREE_OPERAND (op, 0);
4369 while (handled_component_p (t: op))
4370 op = TREE_OPERAND (op, 0);
4371 if ((TREE_CODE (op) == MEM_REF
4372 || TREE_CODE (op) == TARGET_MEM_REF)
4373 && MR_DEPENDENCE_CLIQUE (op) > 1
4374 && (!loop
4375 || (MR_DEPENDENCE_CLIQUE (op)
4376 != loop->owned_clique)))
4377 {
4378 if (!id->dependence_map)
4379 id->dependence_map = new hash_map<dependence_hash,
4380 unsigned short>;
4381 bool existed;
4382 unsigned short &newc = id->dependence_map->get_or_insert
4383 (MR_DEPENDENCE_CLIQUE (op), existed: &existed);
4384 if (!existed)
4385 {
4386 gcc_assert
4387 (MR_DEPENDENCE_CLIQUE (op) <= cfun->last_clique);
4388 newc = ++cfun->last_clique;
4389 }
4390 /* We cannot adjust MR_DEPENDENCE_CLIQUE in-place
4391 since MEM_EXPR is shared so make a copy and
4392 walk to the subtree again. */
4393 tree new_expr = unshare_expr (MEM_EXPR (*iter));
4394 if (TREE_CODE (new_expr) == WITH_SIZE_EXPR)
4395 new_expr = TREE_OPERAND (new_expr, 0);
4396 while (handled_component_p (t: new_expr))
4397 new_expr = TREE_OPERAND (new_expr, 0);
4398 MR_DEPENDENCE_CLIQUE (new_expr) = newc;
4399 set_mem_expr (const_cast <rtx> (*iter), new_expr);
4400 }
4401 }
4402 }
4403 break;
4404
4405 case JUMP_TABLE_DATA:
4406 /* Avoid copying of dispatch tables. We never duplicate
4407 tablejumps, so this can hit only in case the table got
4408 moved far from original jump.
4409 Avoid copying following barrier as well if any
4410 (and debug insns in between). */
4411 for (next = NEXT_INSN (insn);
4412 next != NEXT_INSN (insn: to);
4413 next = NEXT_INSN (insn: next))
4414 if (!DEBUG_INSN_P (next))
4415 break;
4416 if (next != NEXT_INSN (insn: to) && BARRIER_P (next))
4417 insn = next;
4418 break;
4419
4420 case CODE_LABEL:
4421 break;
4422
4423 case BARRIER:
4424 emit_barrier ();
4425 break;
4426
4427 case NOTE:
4428 switch (NOTE_KIND (insn))
4429 {
4430 /* In case prologue is empty and function contain label
4431 in first BB, we may want to copy the block. */
4432 case NOTE_INSN_PROLOGUE_END:
4433
4434 case NOTE_INSN_DELETED:
4435 case NOTE_INSN_DELETED_LABEL:
4436 case NOTE_INSN_DELETED_DEBUG_LABEL:
4437 /* No problem to strip these. */
4438 case NOTE_INSN_FUNCTION_BEG:
4439 /* There is always just single entry to function. */
4440 case NOTE_INSN_BASIC_BLOCK:
4441 /* We should only switch text sections once. */
4442 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
4443 break;
4444
4445 case NOTE_INSN_EPILOGUE_BEG:
4446 case NOTE_INSN_UPDATE_SJLJ_CONTEXT:
4447 emit_note_copy (as_a <rtx_note *> (p: insn));
4448 break;
4449
4450 default:
4451 /* All other notes should have already been eliminated. */
4452 gcc_unreachable ();
4453 }
4454 break;
4455 default:
4456 gcc_unreachable ();
4457 }
4458 }
4459 insn = NEXT_INSN (insn: last);
4460 delete_insn (insn: last);
4461 return insn;
4462}
4463
4464/* Create a duplicate of the basic block BB. */
4465
4466static basic_block
4467cfg_layout_duplicate_bb (basic_block bb, copy_bb_data *id)
4468{
4469 rtx_insn *insn;
4470 basic_block new_bb;
4471
4472 class loop *loop = (id && current_loops) ? bb->loop_father : NULL;
4473
4474 insn = duplicate_insn_chain (BB_HEAD (bb), BB_END (bb), loop, id);
4475 new_bb = create_basic_block (insn,
4476 insn ? get_last_insn () : NULL,
4477 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
4478
4479 BB_COPY_PARTITION (new_bb, bb);
4480 if (BB_HEADER (bb))
4481 {
4482 insn = BB_HEADER (bb);
4483 while (NEXT_INSN (insn))
4484 insn = NEXT_INSN (insn);
4485 insn = duplicate_insn_chain (BB_HEADER (bb), to: insn, loop, id);
4486 if (insn)
4487 BB_HEADER (new_bb) = unlink_insn_chain (first: insn, last: get_last_insn ());
4488 }
4489
4490 if (BB_FOOTER (bb))
4491 {
4492 insn = BB_FOOTER (bb);
4493 while (NEXT_INSN (insn))
4494 insn = NEXT_INSN (insn);
4495 insn = duplicate_insn_chain (BB_FOOTER (bb), to: insn, loop, id);
4496 if (insn)
4497 BB_FOOTER (new_bb) = unlink_insn_chain (first: insn, last: get_last_insn ());
4498 }
4499
4500 return new_bb;
4501}
4502
4503
4504/* Main entry point to this module - initialize the datastructures for
4505 CFG layout changes. It keeps LOOPS up-to-date if not null.
4506
4507 FLAGS is a set of additional flags to pass to cleanup_cfg(). */
4508
4509void
4510cfg_layout_initialize (int flags)
4511{
4512 rtx_insn_list *x;
4513 basic_block bb;
4514
4515 /* Once bb partitioning is complete, cfg layout mode should not be
4516 re-entered. Entering cfg layout mode may require fixups. As an
4517 example, if edge forwarding performed when optimizing the cfg
4518 layout required moving a block from the hot to the cold
4519 section. This would create an illegal partitioning unless some
4520 manual fixup was performed. */
4521 gcc_assert (!crtl->bb_reorder_complete || !crtl->has_bb_partition);
4522
4523 initialize_original_copy_tables ();
4524
4525 cfg_layout_rtl_register_cfg_hooks ();
4526
4527 record_effective_endpoints ();
4528
4529 /* Make sure that the targets of non local gotos are marked. */
4530 for (x = nonlocal_goto_handler_labels; x; x = x->next ())
4531 {
4532 bb = BLOCK_FOR_INSN (insn: x->insn ());
4533 bb->flags |= BB_NON_LOCAL_GOTO_TARGET;
4534 }
4535
4536 cleanup_cfg (CLEANUP_CFGLAYOUT | flags);
4537}
4538
4539/* Splits superblocks. */
4540void
4541break_superblocks (void)
4542{
4543 bool need = false;
4544 basic_block bb;
4545
4546 auto_sbitmap superblocks (last_basic_block_for_fn (cfun));
4547 bitmap_clear (superblocks);
4548
4549 FOR_EACH_BB_FN (bb, cfun)
4550 if (bb->flags & BB_SUPERBLOCK)
4551 {
4552 bb->flags &= ~BB_SUPERBLOCK;
4553 bitmap_set_bit (map: superblocks, bitno: bb->index);
4554 need = true;
4555 }
4556
4557 if (need)
4558 {
4559 rebuild_jump_labels (get_insns ());
4560 find_many_sub_basic_blocks (superblocks);
4561 }
4562}
4563
4564/* Finalize the changes: reorder insn list according to the sequence specified
4565 by aux pointers, enter compensation code, rebuild scope forest. */
4566
4567void
4568cfg_layout_finalize (void)
4569{
4570 free_dominance_info (CDI_DOMINATORS);
4571 force_one_exit_fallthru ();
4572 rtl_register_cfg_hooks ();
4573 if (reload_completed && !targetm.have_epilogue ())
4574 fixup_fallthru_exit_predecessor ();
4575 fixup_reorder_chain ();
4576
4577 rebuild_jump_labels (get_insns ());
4578 delete_dead_jumptables ();
4579
4580 if (flag_checking)
4581 verify_insn_chain ();
4582 checking_verify_flow_info ();
4583}
4584
4585
4586/* Same as split_block but update cfg_layout structures. */
4587
4588static basic_block
4589cfg_layout_split_block (basic_block bb, void *insnp)
4590{
4591 rtx insn = (rtx) insnp;
4592 basic_block new_bb = rtl_split_block (bb, insnp: insn);
4593
4594 BB_FOOTER (new_bb) = BB_FOOTER (bb);
4595 BB_FOOTER (bb) = NULL;
4596
4597 return new_bb;
4598}
4599
4600/* Redirect Edge to DEST. */
4601static edge
4602cfg_layout_redirect_edge_and_branch (edge e, basic_block dest)
4603{
4604 basic_block src = e->src;
4605 edge ret;
4606
4607 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
4608 return NULL;
4609
4610 if (e->dest == dest)
4611 return e;
4612
4613 if (e->flags & EDGE_CROSSING
4614 && BB_PARTITION (e->src) == BB_PARTITION (dest)
4615 && simplejump_p (BB_END (src)))
4616 {
4617 if (dump_file)
4618 fprintf (stream: dump_file,
4619 format: "Removing crossing jump while redirecting edge form %i to %i\n",
4620 e->src->index, dest->index);
4621 delete_insn (BB_END (src));
4622 remove_barriers_from_footer (bb: src);
4623 e->flags |= EDGE_FALLTHRU;
4624 }
4625
4626 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4627 && (ret = try_redirect_by_replacing_jump (e, target: dest, in_cfglayout: true)))
4628 {
4629 df_set_bb_dirty (src);
4630 return ret;
4631 }
4632
4633 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
4634 && (e->flags & EDGE_FALLTHRU) && !(e->flags & EDGE_COMPLEX))
4635 {
4636 if (dump_file)
4637 fprintf (stream: dump_file, format: "Redirecting entry edge from bb %i to %i\n",
4638 e->src->index, dest->index);
4639
4640 df_set_bb_dirty (e->src);
4641 redirect_edge_succ (e, dest);
4642 return e;
4643 }
4644
4645 /* Redirect_edge_and_branch may decide to turn branch into fallthru edge
4646 in the case the basic block appears to be in sequence. Avoid this
4647 transformation. */
4648
4649 if (e->flags & EDGE_FALLTHRU)
4650 {
4651 /* Redirect any branch edges unified with the fallthru one. */
4652 if (JUMP_P (BB_END (src))
4653 && label_is_jump_target_p (BB_HEAD (e->dest),
4654 BB_END (src)))
4655 {
4656 edge redirected;
4657
4658 if (dump_file)
4659 fprintf (stream: dump_file, format: "Fallthru edge unified with branch "
4660 "%i->%i redirected to %i\n",
4661 e->src->index, e->dest->index, dest->index);
4662 e->flags &= ~EDGE_FALLTHRU;
4663 redirected = redirect_branch_edge (e, target: dest);
4664 gcc_assert (redirected);
4665 redirected->flags |= EDGE_FALLTHRU;
4666 df_set_bb_dirty (redirected->src);
4667 return redirected;
4668 }
4669 /* In case we are redirecting fallthru edge to the branch edge
4670 of conditional jump, remove it. */
4671 if (EDGE_COUNT (src->succs) == 2)
4672 {
4673 /* Find the edge that is different from E. */
4674 edge s = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e);
4675
4676 if (s->dest == dest
4677 && any_condjump_p (BB_END (src))
4678 && onlyjump_p (BB_END (src)))
4679 delete_insn (BB_END (src));
4680 }
4681 if (dump_file)
4682 fprintf (stream: dump_file, format: "Redirecting fallthru edge %i->%i to %i\n",
4683 e->src->index, e->dest->index, dest->index);
4684 ret = redirect_edge_succ_nodup (e, dest);
4685 }
4686 else
4687 ret = redirect_branch_edge (e, target: dest);
4688
4689 if (!ret)
4690 return NULL;
4691
4692 fixup_partition_crossing (e: ret);
4693 /* We don't want simplejumps in the insn stream during cfglayout. */
4694 gcc_assert (!simplejump_p (BB_END (src)) || CROSSING_JUMP_P (BB_END (src)));
4695
4696 df_set_bb_dirty (src);
4697 return ret;
4698}
4699
4700/* Simple wrapper as we always can redirect fallthru edges. */
4701static basic_block
4702cfg_layout_redirect_edge_and_branch_force (edge e, basic_block dest)
4703{
4704 edge redirected = cfg_layout_redirect_edge_and_branch (e, dest);
4705
4706 gcc_assert (redirected);
4707 return NULL;
4708}
4709
4710/* Same as delete_basic_block but update cfg_layout structures. */
4711
4712static void
4713cfg_layout_delete_block (basic_block bb)
4714{
4715 rtx_insn *insn, *next, *prev = PREV_INSN (BB_HEAD (bb)), *remaints;
4716 rtx_insn **to;
4717
4718 if (BB_HEADER (bb))
4719 {
4720 next = BB_HEAD (bb);
4721 if (prev)
4722 SET_NEXT_INSN (prev) = BB_HEADER (bb);
4723 else
4724 set_first_insn (BB_HEADER (bb));
4725 SET_PREV_INSN (BB_HEADER (bb)) = prev;
4726 insn = BB_HEADER (bb);
4727 while (NEXT_INSN (insn))
4728 insn = NEXT_INSN (insn);
4729 SET_NEXT_INSN (insn) = next;
4730 SET_PREV_INSN (next) = insn;
4731 }
4732 next = NEXT_INSN (BB_END (bb));
4733 if (BB_FOOTER (bb))
4734 {
4735 insn = BB_FOOTER (bb);
4736 while (insn)
4737 {
4738 if (BARRIER_P (insn))
4739 {
4740 if (PREV_INSN (insn))
4741 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
4742 else
4743 BB_FOOTER (bb) = NEXT_INSN (insn);
4744 if (NEXT_INSN (insn))
4745 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
4746 }
4747 if (LABEL_P (insn))
4748 break;
4749 insn = NEXT_INSN (insn);
4750 }
4751 if (BB_FOOTER (bb))
4752 {
4753 insn = BB_END (bb);
4754 SET_NEXT_INSN (insn) = BB_FOOTER (bb);
4755 SET_PREV_INSN (BB_FOOTER (bb)) = insn;
4756 while (NEXT_INSN (insn))
4757 insn = NEXT_INSN (insn);
4758 SET_NEXT_INSN (insn) = next;
4759 if (next)
4760 SET_PREV_INSN (next) = insn;
4761 else
4762 set_last_insn (insn);
4763 }
4764 }
4765 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
4766 to = &BB_HEADER (bb->next_bb);
4767 else
4768 to = &cfg_layout_function_footer;
4769
4770 rtl_delete_block (b: bb);
4771
4772 if (prev)
4773 prev = NEXT_INSN (insn: prev);
4774 else
4775 prev = get_insns ();
4776 if (next)
4777 next = PREV_INSN (insn: next);
4778 else
4779 next = get_last_insn ();
4780
4781 if (next && NEXT_INSN (insn: next) != prev)
4782 {
4783 remaints = unlink_insn_chain (first: prev, last: next);
4784 insn = remaints;
4785 while (NEXT_INSN (insn))
4786 insn = NEXT_INSN (insn);
4787 SET_NEXT_INSN (insn) = *to;
4788 if (*to)
4789 SET_PREV_INSN (*to) = insn;
4790 *to = remaints;
4791 }
4792}
4793
4794/* Return true when blocks A and B can be safely merged. */
4795
4796static bool
4797cfg_layout_can_merge_blocks_p (basic_block a, basic_block b)
4798{
4799 /* If we are partitioning hot/cold basic blocks, we don't want to
4800 mess up unconditional or indirect jumps that cross between hot
4801 and cold sections.
4802
4803 Basic block partitioning may result in some jumps that appear to
4804 be optimizable (or blocks that appear to be mergeable), but which really
4805 must be left untouched (they are required to make it safely across
4806 partition boundaries). See the comments at the top of
4807 bb-reorder.cc:partition_hot_cold_basic_blocks for complete details. */
4808
4809 if (BB_PARTITION (a) != BB_PARTITION (b))
4810 return false;
4811
4812 /* Protect the loop latches. */
4813 if (current_loops && b->loop_father->latch == b)
4814 return false;
4815
4816 /* If we would end up moving B's instructions, make sure it doesn't fall
4817 through into the exit block, since we cannot recover from a fallthrough
4818 edge into the exit block occurring in the middle of a function. */
4819 if (NEXT_INSN (BB_END (a)) != BB_HEAD (b))
4820 {
4821 edge e = find_fallthru_edge (edges: b->succs);
4822 if (e && e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4823 return false;
4824 }
4825
4826 /* There must be exactly one edge in between the blocks. */
4827 return (single_succ_p (bb: a)
4828 && single_succ (bb: a) == b
4829 && single_pred_p (bb: b) == 1
4830 && a != b
4831 /* Must be simple edge. */
4832 && !(single_succ_edge (bb: a)->flags & EDGE_COMPLEX)
4833 && a != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4834 && b != EXIT_BLOCK_PTR_FOR_FN (cfun)
4835 /* If the jump insn has side effects, we can't kill the edge.
4836 When not optimizing, try_redirect_by_replacing_jump will
4837 not allow us to redirect an edge by replacing a table jump. */
4838 && (!JUMP_P (BB_END (a))
4839 || ((!optimize || reload_completed)
4840 ? simplejump_p (BB_END (a)) : onlyjump_p (BB_END (a)))));
4841}
4842
4843/* Merge block A and B. The blocks must be mergeable. */
4844
4845static void
4846cfg_layout_merge_blocks (basic_block a, basic_block b)
4847{
4848 /* If B is a forwarder block whose outgoing edge has no location, we'll
4849 propagate the locus of the edge between A and B onto it. */
4850 const bool forward_edge_locus
4851 = (b->flags & BB_FORWARDER_BLOCK) != 0
4852 && LOCATION_LOCUS (EDGE_SUCC (b, 0)->goto_locus) == UNKNOWN_LOCATION;
4853 rtx_insn *insn;
4854
4855 gcc_checking_assert (cfg_layout_can_merge_blocks_p (a, b));
4856
4857 if (dump_file)
4858 fprintf (stream: dump_file, format: "Merging block %d into block %d...\n", b->index,
4859 a->index);
4860
4861 /* If there was a CODE_LABEL beginning B, delete it. */
4862 if (LABEL_P (BB_HEAD (b)))
4863 {
4864 delete_insn (BB_HEAD (b));
4865 }
4866
4867 /* We should have fallthru edge in a, or we can do dummy redirection to get
4868 it cleaned up. */
4869 if (JUMP_P (BB_END (a)))
4870 try_redirect_by_replacing_jump (EDGE_SUCC (a, 0), target: b, in_cfglayout: true);
4871 gcc_assert (!JUMP_P (BB_END (a)));
4872
4873 /* If not optimizing, preserve the locus of the single edge between
4874 blocks A and B if necessary by emitting a nop. */
4875 if (!optimize
4876 && !forward_edge_locus
4877 && !DECL_IGNORED_P (current_function_decl))
4878 emit_nop_for_unique_locus_between (a, b);
4879
4880 /* Move things from b->footer after a->footer. */
4881 if (BB_FOOTER (b))
4882 {
4883 if (!BB_FOOTER (a))
4884 BB_FOOTER (a) = BB_FOOTER (b);
4885 else
4886 {
4887 rtx_insn *last = BB_FOOTER (a);
4888
4889 while (NEXT_INSN (insn: last))
4890 last = NEXT_INSN (insn: last);
4891 SET_NEXT_INSN (last) = BB_FOOTER (b);
4892 SET_PREV_INSN (BB_FOOTER (b)) = last;
4893 }
4894 BB_FOOTER (b) = NULL;
4895 }
4896
4897 /* Move things from b->header before a->footer.
4898 Note that this may include dead tablejump data, but we don't clean
4899 those up until we go out of cfglayout mode. */
4900 if (BB_HEADER (b))
4901 {
4902 if (! BB_FOOTER (a))
4903 BB_FOOTER (a) = BB_HEADER (b);
4904 else
4905 {
4906 rtx_insn *last = BB_HEADER (b);
4907
4908 while (NEXT_INSN (insn: last))
4909 last = NEXT_INSN (insn: last);
4910 SET_NEXT_INSN (last) = BB_FOOTER (a);
4911 SET_PREV_INSN (BB_FOOTER (a)) = last;
4912 BB_FOOTER (a) = BB_HEADER (b);
4913 }
4914 BB_HEADER (b) = NULL;
4915 }
4916
4917 /* In the case basic blocks are not adjacent, move them around. */
4918 if (NEXT_INSN (BB_END (a)) != BB_HEAD (b))
4919 {
4920 insn = unlink_insn_chain (BB_HEAD (b), BB_END (b));
4921
4922 emit_insn_after_noloc (insn, BB_END (a), a);
4923 }
4924 /* Otherwise just re-associate the instructions. */
4925 else
4926 {
4927 insn = BB_HEAD (b);
4928 BB_END (a) = BB_END (b);
4929 }
4930
4931 /* emit_insn_after_noloc doesn't call df_insn_change_bb.
4932 We need to explicitly call. */
4933 update_bb_for_insn_chain (begin: insn, BB_END (b), bb: a);
4934
4935 /* Skip possible DELETED_LABEL insn. */
4936 if (!NOTE_INSN_BASIC_BLOCK_P (insn))
4937 insn = NEXT_INSN (insn);
4938 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
4939 BB_HEAD (b) = BB_END (b) = NULL;
4940 delete_insn (insn);
4941
4942 df_bb_delete (b->index);
4943
4944 if (forward_edge_locus)
4945 EDGE_SUCC (b, 0)->goto_locus = EDGE_SUCC (a, 0)->goto_locus;
4946
4947 if (dump_file)
4948 fprintf (stream: dump_file, format: "Merged blocks %d and %d.\n", a->index, b->index);
4949}
4950
4951/* Split edge E. */
4952
4953static basic_block
4954cfg_layout_split_edge (edge e)
4955{
4956 basic_block new_bb =
4957 create_basic_block (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4958 ? NEXT_INSN (BB_END (e->src)) : get_insns (),
4959 NULL_RTX, e->src);
4960
4961 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4962 BB_COPY_PARTITION (new_bb, e->src);
4963 else
4964 BB_COPY_PARTITION (new_bb, e->dest);
4965 make_edge (new_bb, e->dest, EDGE_FALLTHRU);
4966 redirect_edge_and_branch_force (e, new_bb);
4967
4968 return new_bb;
4969}
4970
4971/* Do postprocessing after making a forwarder block joined by edge FALLTHRU. */
4972
4973static void
4974rtl_make_forwarder_block (edge fallthru ATTRIBUTE_UNUSED)
4975{
4976}
4977
4978/* Return true if BB contains only labels or non-executable
4979 instructions. */
4980
4981static bool
4982rtl_block_empty_p (basic_block bb)
4983{
4984 rtx_insn *insn;
4985
4986 if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
4987 || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
4988 return true;
4989
4990 FOR_BB_INSNS (bb, insn)
4991 if (NONDEBUG_INSN_P (insn)
4992 && (!any_uncondjump_p (insn) || !onlyjump_p (insn)))
4993 return false;
4994
4995 return true;
4996}
4997
4998/* Split a basic block if it ends with a conditional branch and if
4999 the other part of the block is not empty. */
5000
5001static basic_block
5002rtl_split_block_before_cond_jump (basic_block bb)
5003{
5004 rtx_insn *insn;
5005 rtx_insn *split_point = NULL;
5006 rtx_insn *last = NULL;
5007 bool found_code = false;
5008
5009 FOR_BB_INSNS (bb, insn)
5010 {
5011 if (any_condjump_p (insn))
5012 split_point = last;
5013 else if (NONDEBUG_INSN_P (insn))
5014 found_code = true;
5015 last = insn;
5016 }
5017
5018 /* Did not find everything. */
5019 if (found_code && split_point)
5020 return split_block (bb, split_point)->dest;
5021 else
5022 return NULL;
5023}
5024
5025/* Return true if BB ends with a call, possibly followed by some
5026 instructions that must stay with the call, false otherwise. */
5027
5028static bool
5029rtl_block_ends_with_call_p (basic_block bb)
5030{
5031 rtx_insn *insn = BB_END (bb);
5032
5033 while (!CALL_P (insn)
5034 && insn != BB_HEAD (bb)
5035 && (keep_with_call_p (insn)
5036 || NOTE_P (insn)
5037 || DEBUG_INSN_P (insn)))
5038 insn = PREV_INSN (insn);
5039 return (CALL_P (insn));
5040}
5041
5042/* Return true if BB ends with a conditional branch, false otherwise. */
5043
5044static bool
5045rtl_block_ends_with_condjump_p (const_basic_block bb)
5046{
5047 return any_condjump_p (BB_END (bb));
5048}
5049
5050/* Return true if we need to add fake edge to exit.
5051 Helper function for rtl_flow_call_edges_add. */
5052
5053static bool
5054need_fake_edge_p (const rtx_insn *insn)
5055{
5056 if (!INSN_P (insn))
5057 return false;
5058
5059 if ((CALL_P (insn)
5060 && !SIBLING_CALL_P (insn)
5061 && !find_reg_note (insn, REG_NORETURN, NULL)
5062 && !(RTL_CONST_OR_PURE_CALL_P (insn))))
5063 return true;
5064
5065 return ((GET_CODE (PATTERN (insn)) == ASM_OPERANDS
5066 && MEM_VOLATILE_P (PATTERN (insn)))
5067 || (GET_CODE (PATTERN (insn)) == PARALLEL
5068 && asm_noperands (insn) != -1
5069 && MEM_VOLATILE_P (XVECEXP (PATTERN (insn), 0, 0)))
5070 || GET_CODE (PATTERN (insn)) == ASM_INPUT);
5071}
5072
5073/* Add fake edges to the function exit for any non constant and non noreturn
5074 calls, volatile inline assembly in the bitmap of blocks specified by
5075 BLOCKS or to the whole CFG if BLOCKS is zero. Return the number of blocks
5076 that were split.
5077
5078 The goal is to expose cases in which entering a basic block does not imply
5079 that all subsequent instructions must be executed. */
5080
5081static int
5082rtl_flow_call_edges_add (sbitmap blocks)
5083{
5084 int i;
5085 int blocks_split = 0;
5086 int last_bb = last_basic_block_for_fn (cfun);
5087 bool check_last_block = false;
5088
5089 if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
5090 return 0;
5091
5092 if (! blocks)
5093 check_last_block = true;
5094 else
5095 check_last_block = bitmap_bit_p (map: blocks,
5096 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb->index);
5097
5098 /* In the last basic block, before epilogue generation, there will be
5099 a fallthru edge to EXIT. Special care is required if the last insn
5100 of the last basic block is a call because make_edge folds duplicate
5101 edges, which would result in the fallthru edge also being marked
5102 fake, which would result in the fallthru edge being removed by
5103 remove_fake_edges, which would result in an invalid CFG.
5104
5105 Moreover, we can't elide the outgoing fake edge, since the block
5106 profiler needs to take this into account in order to solve the minimal
5107 spanning tree in the case that the call doesn't return.
5108
5109 Handle this by adding a dummy instruction in a new last basic block. */
5110 if (check_last_block)
5111 {
5112 basic_block bb = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
5113 rtx_insn *insn = BB_END (bb);
5114
5115 /* Back up past insns that must be kept in the same block as a call. */
5116 while (insn != BB_HEAD (bb)
5117 && keep_with_call_p (insn))
5118 insn = PREV_INSN (insn);
5119
5120 if (need_fake_edge_p (insn))
5121 {
5122 edge e;
5123
5124 e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
5125 if (e)
5126 {
5127 insert_insn_on_edge (pattern: gen_use (const0_rtx), e);
5128 commit_edge_insertions ();
5129 }
5130 }
5131 }
5132
5133 /* Now add fake edges to the function exit for any non constant
5134 calls since there is no way that we can determine if they will
5135 return or not... */
5136
5137 for (i = NUM_FIXED_BLOCKS; i < last_bb; i++)
5138 {
5139 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
5140 rtx_insn *insn;
5141 rtx_insn *prev_insn;
5142
5143 if (!bb)
5144 continue;
5145
5146 if (blocks && !bitmap_bit_p (map: blocks, bitno: i))
5147 continue;
5148
5149 for (insn = BB_END (bb); ; insn = prev_insn)
5150 {
5151 prev_insn = PREV_INSN (insn);
5152 if (need_fake_edge_p (insn))
5153 {
5154 edge e;
5155 rtx_insn *split_at_insn = insn;
5156
5157 /* Don't split the block between a call and an insn that should
5158 remain in the same block as the call. */
5159 if (CALL_P (insn))
5160 while (split_at_insn != BB_END (bb)
5161 && keep_with_call_p (NEXT_INSN (insn: split_at_insn)))
5162 split_at_insn = NEXT_INSN (insn: split_at_insn);
5163
5164 /* The handling above of the final block before the epilogue
5165 should be enough to verify that there is no edge to the exit
5166 block in CFG already. Calling make_edge in such case would
5167 cause us to mark that edge as fake and remove it later. */
5168
5169 if (flag_checking && split_at_insn == BB_END (bb))
5170 {
5171 e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
5172 gcc_assert (e == NULL);
5173 }
5174
5175 /* Note that the following may create a new basic block
5176 and renumber the existing basic blocks. */
5177 if (split_at_insn != BB_END (bb))
5178 {
5179 e = split_block (bb, split_at_insn);
5180 if (e)
5181 blocks_split++;
5182 }
5183
5184 edge ne = make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
5185 ne->probability = profile_probability::guessed_never ();
5186 }
5187
5188 if (insn == BB_HEAD (bb))
5189 break;
5190 }
5191 }
5192
5193 if (blocks_split)
5194 verify_flow_info ();
5195
5196 return blocks_split;
5197}
5198
5199/* Add COMP_RTX as a condition at end of COND_BB. FIRST_HEAD is
5200 the conditional branch target, SECOND_HEAD should be the fall-thru
5201 there is no need to handle this here the loop versioning code handles
5202 this. the reason for SECON_HEAD is that it is needed for condition
5203 in trees, and this should be of the same type since it is a hook. */
5204static void
5205rtl_lv_add_condition_to_bb (basic_block first_head ,
5206 basic_block second_head ATTRIBUTE_UNUSED,
5207 basic_block cond_bb, void *comp_rtx)
5208{
5209 rtx_code_label *label;
5210 rtx_insn *seq, *jump;
5211 rtx op0 = XEXP ((rtx)comp_rtx, 0);
5212 rtx op1 = XEXP ((rtx)comp_rtx, 1);
5213 enum rtx_code comp = GET_CODE ((rtx)comp_rtx);
5214 machine_mode mode;
5215
5216
5217 label = block_label (block: first_head);
5218 mode = GET_MODE (op0);
5219 if (mode == VOIDmode)
5220 mode = GET_MODE (op1);
5221
5222 start_sequence ();
5223 op0 = force_operand (op0, NULL_RTX);
5224 op1 = force_operand (op1, NULL_RTX);
5225 do_compare_rtx_and_jump (op0, op1, comp, 0, mode, NULL_RTX, NULL, label,
5226 profile_probability::uninitialized ());
5227 jump = get_last_insn ();
5228 JUMP_LABEL (jump) = label;
5229 LABEL_NUSES (label)++;
5230 seq = get_insns ();
5231 end_sequence ();
5232
5233 /* Add the new cond, in the new head. */
5234 emit_insn_after (seq, BB_END (cond_bb));
5235}
5236
5237
5238/* Given a block B with unconditional branch at its end, get the
5239 store the return the branch edge and the fall-thru edge in
5240 BRANCH_EDGE and FALLTHRU_EDGE respectively. */
5241static void
5242rtl_extract_cond_bb_edges (basic_block b, edge *branch_edge,
5243 edge *fallthru_edge)
5244{
5245 edge e = EDGE_SUCC (b, 0);
5246
5247 if (e->flags & EDGE_FALLTHRU)
5248 {
5249 *fallthru_edge = e;
5250 *branch_edge = EDGE_SUCC (b, 1);
5251 }
5252 else
5253 {
5254 *branch_edge = e;
5255 *fallthru_edge = EDGE_SUCC (b, 1);
5256 }
5257}
5258
5259void
5260init_rtl_bb_info (basic_block bb)
5261{
5262 gcc_assert (!bb->il.x.rtl);
5263 bb->il.x.head_ = NULL;
5264 bb->il.x.rtl = ggc_cleared_alloc<rtl_bb_info> ();
5265}
5266
5267static bool
5268rtl_bb_info_initialized_p (basic_block bb)
5269{
5270 return bb->il.x.rtl;
5271}
5272
5273/* Returns true if it is possible to remove edge E by redirecting
5274 it to the destination of the other edge from E->src. */
5275
5276static bool
5277rtl_can_remove_branch_p (const_edge e)
5278{
5279 const_basic_block src = e->src;
5280 const_basic_block target = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest;
5281 const rtx_insn *insn = BB_END (src);
5282 rtx set;
5283
5284 /* The conditions are taken from try_redirect_by_replacing_jump. */
5285 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
5286 return false;
5287
5288 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
5289 return false;
5290
5291 if (BB_PARTITION (src) != BB_PARTITION (target))
5292 return false;
5293
5294 if (!onlyjump_p (insn)
5295 || tablejump_p (insn, NULL, NULL))
5296 return false;
5297
5298 set = single_set (insn);
5299 if (!set || side_effects_p (set))
5300 return false;
5301
5302 return true;
5303}
5304
5305static basic_block
5306rtl_duplicate_bb (basic_block bb, copy_bb_data *id)
5307{
5308 bb = cfg_layout_duplicate_bb (bb, id);
5309 bb->aux = NULL;
5310 return bb;
5311}
5312
5313/* Do book-keeping of basic block BB for the profile consistency checker.
5314 Store the counting in RECORD. */
5315static void
5316rtl_account_profile_record (basic_block bb, struct profile_record *record)
5317{
5318 rtx_insn *insn;
5319 FOR_BB_INSNS (bb, insn)
5320 if (INSN_P (insn))
5321 {
5322 record->size += insn_cost (insn, false);
5323 if (profile_info)
5324 {
5325 if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.ipa ().initialized_p ()
5326 && ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.ipa ().nonzero_p ()
5327 && bb->count.ipa ().initialized_p ())
5328 record->time
5329 += insn_cost (insn, true) * bb->count.ipa ().to_gcov_type ();
5330 }
5331 else if (bb->count.initialized_p ()
5332 && ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.initialized_p ())
5333 record->time
5334 += insn_cost (insn, true)
5335 * bb->count.to_sreal_scale
5336 (ENTRY_BLOCK_PTR_FOR_FN (cfun)->count).to_double ();
5337 else
5338 record->time += insn_cost (insn, true);
5339 }
5340}
5341
5342/* Implementation of CFG manipulation for linearized RTL. */
5343struct cfg_hooks rtl_cfg_hooks = {
5344 .name: "rtl",
5345 .verify_flow_info: rtl_verify_flow_info,
5346 .dump_bb: rtl_dump_bb,
5347 .dump_bb_for_graph: rtl_dump_bb_for_graph,
5348 .create_basic_block: rtl_create_basic_block,
5349 .redirect_edge_and_branch: rtl_redirect_edge_and_branch,
5350 .redirect_edge_and_branch_force: rtl_redirect_edge_and_branch_force,
5351 .can_remove_branch_p: rtl_can_remove_branch_p,
5352 .delete_basic_block: rtl_delete_block,
5353 .split_block: rtl_split_block,
5354 .move_block_after: rtl_move_block_after,
5355 .can_merge_blocks_p: rtl_can_merge_blocks, /* can_merge_blocks_p */
5356 .merge_blocks: rtl_merge_blocks,
5357 .predict_edge: rtl_predict_edge,
5358 .predicted_by_p: rtl_predicted_by_p,
5359 .can_duplicate_block_p: cfg_layout_can_duplicate_bb_p,
5360 .duplicate_block: rtl_duplicate_bb,
5361 .split_edge: rtl_split_edge,
5362 .make_forwarder_block: rtl_make_forwarder_block,
5363 .tidy_fallthru_edge: rtl_tidy_fallthru_edge,
5364 .force_nonfallthru: rtl_force_nonfallthru,
5365 .block_ends_with_call_p: rtl_block_ends_with_call_p,
5366 .block_ends_with_condjump_p: rtl_block_ends_with_condjump_p,
5367 .flow_call_edges_add: rtl_flow_call_edges_add,
5368 NULL, /* execute_on_growing_pred */
5369 NULL, /* execute_on_shrinking_pred */
5370 NULL, /* duplicate loop for trees */
5371 NULL, /* lv_add_condition_to_bb */
5372 NULL, /* lv_adjust_loop_header_phi*/
5373 NULL, /* extract_cond_bb_edges */
5374 NULL, /* flush_pending_stmts */
5375 .empty_block_p: rtl_block_empty_p, /* block_empty_p */
5376 .split_block_before_cond_jump: rtl_split_block_before_cond_jump, /* split_block_before_cond_jump */
5377 .account_profile_record: rtl_account_profile_record,
5378};
5379
5380/* Implementation of CFG manipulation for cfg layout RTL, where
5381 basic block connected via fallthru edges does not have to be adjacent.
5382 This representation will hopefully become the default one in future
5383 version of the compiler. */
5384
5385struct cfg_hooks cfg_layout_rtl_cfg_hooks = {
5386 .name: "cfglayout mode",
5387 .verify_flow_info: rtl_verify_flow_info_1,
5388 .dump_bb: rtl_dump_bb,
5389 .dump_bb_for_graph: rtl_dump_bb_for_graph,
5390 .create_basic_block: cfg_layout_create_basic_block,
5391 .redirect_edge_and_branch: cfg_layout_redirect_edge_and_branch,
5392 .redirect_edge_and_branch_force: cfg_layout_redirect_edge_and_branch_force,
5393 .can_remove_branch_p: rtl_can_remove_branch_p,
5394 .delete_basic_block: cfg_layout_delete_block,
5395 .split_block: cfg_layout_split_block,
5396 .move_block_after: rtl_move_block_after,
5397 .can_merge_blocks_p: cfg_layout_can_merge_blocks_p,
5398 .merge_blocks: cfg_layout_merge_blocks,
5399 .predict_edge: rtl_predict_edge,
5400 .predicted_by_p: rtl_predicted_by_p,
5401 .can_duplicate_block_p: cfg_layout_can_duplicate_bb_p,
5402 .duplicate_block: cfg_layout_duplicate_bb,
5403 .split_edge: cfg_layout_split_edge,
5404 .make_forwarder_block: rtl_make_forwarder_block,
5405 NULL, /* tidy_fallthru_edge */
5406 .force_nonfallthru: rtl_force_nonfallthru,
5407 .block_ends_with_call_p: rtl_block_ends_with_call_p,
5408 .block_ends_with_condjump_p: rtl_block_ends_with_condjump_p,
5409 .flow_call_edges_add: rtl_flow_call_edges_add,
5410 NULL, /* execute_on_growing_pred */
5411 NULL, /* execute_on_shrinking_pred */
5412 .cfg_hook_duplicate_loop_body_to_header_edge: duplicate_loop_body_to_header_edge, /* duplicate loop for rtl */
5413 .lv_add_condition_to_bb: rtl_lv_add_condition_to_bb, /* lv_add_condition_to_bb */
5414 NULL, /* lv_adjust_loop_header_phi*/
5415 .extract_cond_bb_edges: rtl_extract_cond_bb_edges, /* extract_cond_bb_edges */
5416 NULL, /* flush_pending_stmts */
5417 .empty_block_p: rtl_block_empty_p, /* block_empty_p */
5418 .split_block_before_cond_jump: rtl_split_block_before_cond_jump, /* split_block_before_cond_jump */
5419 .account_profile_record: rtl_account_profile_record,
5420};
5421
5422#include "gt-cfgrtl.h"
5423
5424#if __GNUC__ >= 10
5425# pragma GCC diagnostic pop
5426#endif
5427

source code of gcc/cfgrtl.cc