1/* Control flow functions for trees.
2 Copyright (C) 2001-2023 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 3, or (at your option)
10any later version.
11
12GCC is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
24#include "backend.h"
25#include "target.h"
26#include "rtl.h"
27#include "tree.h"
28#include "gimple.h"
29#include "cfghooks.h"
30#include "tree-pass.h"
31#include "ssa.h"
32#include "cgraph.h"
33#include "gimple-pretty-print.h"
34#include "diagnostic-core.h"
35#include "fold-const.h"
36#include "trans-mem.h"
37#include "stor-layout.h"
38#include "print-tree.h"
39#include "cfganal.h"
40#include "gimple-iterator.h"
41#include "gimple-fold.h"
42#include "tree-eh.h"
43#include "gimplify-me.h"
44#include "gimple-walk.h"
45#include "tree-cfg.h"
46#include "tree-ssa-loop-manip.h"
47#include "tree-ssa-loop-niter.h"
48#include "tree-into-ssa.h"
49#include "tree-dfa.h"
50#include "tree-ssa.h"
51#include "except.h"
52#include "cfgloop.h"
53#include "tree-ssa-propagate.h"
54#include "value-prof.h"
55#include "tree-inline.h"
56#include "tree-ssa-live.h"
57#include "tree-ssa-dce.h"
58#include "omp-general.h"
59#include "omp-expand.h"
60#include "tree-cfgcleanup.h"
61#include "gimplify.h"
62#include "attribs.h"
63#include "selftest.h"
64#include "opts.h"
65#include "asan.h"
66#include "profile.h"
67#include "sreal.h"
68
69/* This file contains functions for building the Control Flow Graph (CFG)
70 for a function tree. */
71
72/* Local declarations. */
73
74/* Initial capacity for the basic block array. */
75static const int initial_cfg_capacity = 20;
76
77/* This hash table allows us to efficiently lookup all CASE_LABEL_EXPRs
78 which use a particular edge. The CASE_LABEL_EXPRs are chained together
79 via their CASE_CHAIN field, which we clear after we're done with the
80 hash table to prevent problems with duplication of GIMPLE_SWITCHes.
81
82 Access to this list of CASE_LABEL_EXPRs allows us to efficiently
83 update the case vector in response to edge redirections.
84
85 Right now this table is set up and torn down at key points in the
86 compilation process. It would be nice if we could make the table
87 more persistent. The key is getting notification of changes to
88 the CFG (particularly edge removal, creation and redirection). */
89
90static hash_map<edge, tree> *edge_to_cases;
91
92/* If we record edge_to_cases, this bitmap will hold indexes
93 of basic blocks that end in a GIMPLE_SWITCH which we touched
94 due to edge manipulations. */
95
96static bitmap touched_switch_bbs;
97
98/* OpenMP region idxs for blocks during cfg pass. */
99static vec<int> bb_to_omp_idx;
100
101/* CFG statistics. */
102struct cfg_stats_d
103{
104 long num_merged_labels;
105};
106
107static struct cfg_stats_d cfg_stats;
108
109/* Data to pass to replace_block_vars_by_duplicates_1. */
110struct replace_decls_d
111{
112 hash_map<tree, tree> *vars_map;
113 tree to_context;
114};
115
116/* Hash table to store last discriminator assigned for each locus. */
117struct locus_discrim_map
118{
119 int location_line;
120 int discriminator;
121};
122
123/* Hashtable helpers. */
124
125struct locus_discrim_hasher : free_ptr_hash <locus_discrim_map>
126{
127 static inline hashval_t hash (const locus_discrim_map *);
128 static inline bool equal (const locus_discrim_map *,
129 const locus_discrim_map *);
130};
131
132/* Trivial hash function for a location_t. ITEM is a pointer to
133 a hash table entry that maps a location_t to a discriminator. */
134
135inline hashval_t
136locus_discrim_hasher::hash (const locus_discrim_map *item)
137{
138 return item->location_line;
139}
140
141/* Equality function for the locus-to-discriminator map. A and B
142 point to the two hash table entries to compare. */
143
144inline bool
145locus_discrim_hasher::equal (const locus_discrim_map *a,
146 const locus_discrim_map *b)
147{
148 return a->location_line == b->location_line;
149}
150
151static hash_table<locus_discrim_hasher> *discriminator_per_locus;
152
153/* Basic blocks and flowgraphs. */
154static void make_blocks (gimple_seq);
155
156/* Edges. */
157static void make_edges (void);
158static void assign_discriminators (void);
159static void make_cond_expr_edges (basic_block);
160static void make_gimple_switch_edges (gswitch *, basic_block);
161static bool make_goto_expr_edges (basic_block);
162static void make_gimple_asm_edges (basic_block);
163static edge gimple_redirect_edge_and_branch (edge, basic_block);
164static edge gimple_try_redirect_by_replacing_jump (edge, basic_block);
165
166/* Various helpers. */
167static inline bool stmt_starts_bb_p (gimple *, gimple *);
168static bool gimple_verify_flow_info (void);
169static void gimple_make_forwarder_block (edge);
170static gimple *first_non_label_stmt (basic_block);
171static bool verify_gimple_transaction (gtransaction *);
172static bool call_can_make_abnormal_goto (gimple *);
173
174/* Flowgraph optimization and cleanup. */
175static void gimple_merge_blocks (basic_block, basic_block);
176static bool gimple_can_merge_blocks_p (basic_block, basic_block);
177static void remove_bb (basic_block);
178static edge find_taken_edge_computed_goto (basic_block, tree);
179static edge find_taken_edge_cond_expr (const gcond *, tree);
180
181void
182init_empty_tree_cfg_for_function (struct function *fn)
183{
184 /* Initialize the basic block array. */
185 init_flow (fn);
186 profile_status_for_fn (fn) = PROFILE_ABSENT;
187 n_basic_blocks_for_fn (fn) = NUM_FIXED_BLOCKS;
188 last_basic_block_for_fn (fn) = NUM_FIXED_BLOCKS;
189 vec_safe_grow_cleared (basic_block_info_for_fn (fn),
190 len: initial_cfg_capacity, exact: true);
191
192 /* Build a mapping of labels to their associated blocks. */
193 vec_safe_grow_cleared (label_to_block_map_for_fn (fn),
194 len: initial_cfg_capacity, exact: true);
195
196 SET_BASIC_BLOCK_FOR_FN (fn, ENTRY_BLOCK, ENTRY_BLOCK_PTR_FOR_FN (fn));
197 SET_BASIC_BLOCK_FOR_FN (fn, EXIT_BLOCK, EXIT_BLOCK_PTR_FOR_FN (fn));
198
199 ENTRY_BLOCK_PTR_FOR_FN (fn)->next_bb
200 = EXIT_BLOCK_PTR_FOR_FN (fn);
201 EXIT_BLOCK_PTR_FOR_FN (fn)->prev_bb
202 = ENTRY_BLOCK_PTR_FOR_FN (fn);
203}
204
205void
206init_empty_tree_cfg (void)
207{
208 init_empty_tree_cfg_for_function (cfun);
209}
210
211/*---------------------------------------------------------------------------
212 Create basic blocks
213---------------------------------------------------------------------------*/
214
215/* Entry point to the CFG builder for trees. SEQ is the sequence of
216 statements to be added to the flowgraph. */
217
218static void
219build_gimple_cfg (gimple_seq seq)
220{
221 /* Register specific gimple functions. */
222 gimple_register_cfg_hooks ();
223
224 memset (s: (void *) &cfg_stats, c: 0, n: sizeof (cfg_stats));
225
226 init_empty_tree_cfg ();
227
228 make_blocks (seq);
229
230 /* Make sure there is always at least one block, even if it's empty. */
231 if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
232 create_empty_bb (ENTRY_BLOCK_PTR_FOR_FN (cfun));
233
234 /* Adjust the size of the array. */
235 if (basic_block_info_for_fn (cfun)->length ()
236 < (size_t) n_basic_blocks_for_fn (cfun))
237 vec_safe_grow_cleared (basic_block_info_for_fn (cfun),
238 n_basic_blocks_for_fn (cfun));
239
240 /* To speed up statement iterator walks, we first purge dead labels. */
241 cleanup_dead_labels ();
242
243 /* Group case nodes to reduce the number of edges.
244 We do this after cleaning up dead labels because otherwise we miss
245 a lot of obvious case merging opportunities. */
246 group_case_labels ();
247
248 /* Create the edges of the flowgraph. */
249 discriminator_per_locus = new hash_table<locus_discrim_hasher> (13);
250 make_edges ();
251 assign_discriminators ();
252 cleanup_dead_labels ();
253 delete discriminator_per_locus;
254 discriminator_per_locus = NULL;
255}
256
257/* Look for ANNOTATE calls with loop annotation kind in BB; if found, remove
258 them and propagate the information to LOOP. We assume that the annotations
259 come immediately before the condition in BB, if any. */
260
261static void
262replace_loop_annotate_in_block (basic_block bb, class loop *loop)
263{
264 gimple_stmt_iterator gsi = gsi_last_bb (bb);
265 gimple *stmt = gsi_stmt (i: gsi);
266
267 if (!(stmt && gimple_code (g: stmt) == GIMPLE_COND))
268 return;
269
270 for (gsi_prev_nondebug (i: &gsi); !gsi_end_p (i: gsi); gsi_prev (i: &gsi))
271 {
272 stmt = gsi_stmt (i: gsi);
273 if (gimple_code (g: stmt) != GIMPLE_CALL)
274 break;
275 if (!gimple_call_internal_p (gs: stmt)
276 || gimple_call_internal_fn (gs: stmt) != IFN_ANNOTATE)
277 break;
278
279 switch ((annot_expr_kind) tree_to_shwi (gimple_call_arg (gs: stmt, index: 1)))
280 {
281 case annot_expr_ivdep_kind:
282 loop->safelen = INT_MAX;
283 break;
284 case annot_expr_unroll_kind:
285 loop->unroll
286 = (unsigned short) tree_to_shwi (gimple_call_arg (gs: stmt, index: 2));
287 cfun->has_unroll = true;
288 break;
289 case annot_expr_no_vector_kind:
290 loop->dont_vectorize = true;
291 break;
292 case annot_expr_vector_kind:
293 loop->force_vectorize = true;
294 cfun->has_force_vectorize_loops = true;
295 break;
296 case annot_expr_parallel_kind:
297 loop->can_be_parallel = true;
298 loop->safelen = INT_MAX;
299 break;
300 default:
301 gcc_unreachable ();
302 }
303
304 stmt = gimple_build_assign (gimple_call_lhs (gs: stmt),
305 gimple_call_arg (gs: stmt, index: 0));
306 gsi_replace (&gsi, stmt, true);
307 }
308}
309
310/* Look for ANNOTATE calls with loop annotation kind; if found, remove
311 them and propagate the information to the loop. We assume that the
312 annotations come immediately before the condition of the loop. */
313
314static void
315replace_loop_annotate (void)
316{
317 basic_block bb;
318 gimple_stmt_iterator gsi;
319 gimple *stmt;
320
321 for (auto loop : loops_list (cfun, 0))
322 {
323 /* First look into the header. */
324 replace_loop_annotate_in_block (bb: loop->header, loop);
325
326 /* Then look into the latch, if any. */
327 if (loop->latch)
328 replace_loop_annotate_in_block (bb: loop->latch, loop);
329
330 /* Push the global flag_finite_loops state down to individual loops. */
331 loop->finite_p = flag_finite_loops;
332 }
333
334 /* Remove IFN_ANNOTATE. Safeguard for the case loop->latch == NULL. */
335 FOR_EACH_BB_FN (bb, cfun)
336 {
337 for (gsi = gsi_last_bb (bb); !gsi_end_p (i: gsi); gsi_prev (i: &gsi))
338 {
339 stmt = gsi_stmt (i: gsi);
340 if (gimple_code (g: stmt) != GIMPLE_CALL)
341 continue;
342 if (!gimple_call_internal_p (gs: stmt)
343 || gimple_call_internal_fn (gs: stmt) != IFN_ANNOTATE)
344 continue;
345
346 switch ((annot_expr_kind) tree_to_shwi (gimple_call_arg (gs: stmt, index: 1)))
347 {
348 case annot_expr_ivdep_kind:
349 case annot_expr_unroll_kind:
350 case annot_expr_no_vector_kind:
351 case annot_expr_vector_kind:
352 case annot_expr_parallel_kind:
353 break;
354 default:
355 gcc_unreachable ();
356 }
357
358 warning_at (gimple_location (g: stmt), 0, "ignoring loop annotation");
359 stmt = gimple_build_assign (gimple_call_lhs (gs: stmt),
360 gimple_call_arg (gs: stmt, index: 0));
361 gsi_replace (&gsi, stmt, true);
362 }
363 }
364}
365
366static unsigned int
367execute_build_cfg (void)
368{
369 gimple_seq body = gimple_body (current_function_decl);
370
371 build_gimple_cfg (seq: body);
372 gimple_set_body (current_function_decl, NULL);
373 if (dump_file && (dump_flags & TDF_DETAILS))
374 {
375 fprintf (stream: dump_file, format: "Scope blocks:\n");
376 dump_scope_blocks (dump_file, dump_flags);
377 }
378 cleanup_tree_cfg ();
379
380 bb_to_omp_idx.release ();
381
382 loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
383 replace_loop_annotate ();
384 return 0;
385}
386
387namespace {
388
389const pass_data pass_data_build_cfg =
390{
391 .type: GIMPLE_PASS, /* type */
392 .name: "cfg", /* name */
393 .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */
394 .tv_id: TV_TREE_CFG, /* tv_id */
395 PROP_gimple_leh, /* properties_required */
396 .properties_provided: ( PROP_cfg | PROP_loops ), /* properties_provided */
397 .properties_destroyed: 0, /* properties_destroyed */
398 .todo_flags_start: 0, /* todo_flags_start */
399 .todo_flags_finish: 0, /* todo_flags_finish */
400};
401
402class pass_build_cfg : public gimple_opt_pass
403{
404public:
405 pass_build_cfg (gcc::context *ctxt)
406 : gimple_opt_pass (pass_data_build_cfg, ctxt)
407 {}
408
409 /* opt_pass methods: */
410 unsigned int execute (function *) final override
411 {
412 return execute_build_cfg ();
413 }
414
415}; // class pass_build_cfg
416
417} // anon namespace
418
419gimple_opt_pass *
420make_pass_build_cfg (gcc::context *ctxt)
421{
422 return new pass_build_cfg (ctxt);
423}
424
425
426/* Return true if T is a computed goto. */
427
428bool
429computed_goto_p (gimple *t)
430{
431 return (gimple_code (g: t) == GIMPLE_GOTO
432 && TREE_CODE (gimple_goto_dest (t)) != LABEL_DECL);
433}
434
435/* Returns true if the sequence of statements STMTS only contains
436 a call to __builtin_unreachable (). */
437
438bool
439gimple_seq_unreachable_p (gimple_seq stmts)
440{
441 if (stmts == NULL
442 /* Return false if -fsanitize=unreachable, we don't want to
443 optimize away those calls, but rather turn them into
444 __ubsan_handle_builtin_unreachable () or __builtin_trap ()
445 later. */
446 || sanitize_flags_p (flag: SANITIZE_UNREACHABLE))
447 return false;
448
449 gimple_stmt_iterator gsi = gsi_last (seq&: stmts);
450
451 if (!gimple_call_builtin_p (gsi_stmt (i: gsi), BUILT_IN_UNREACHABLE))
452 return false;
453
454 for (gsi_prev (i: &gsi); !gsi_end_p (i: gsi); gsi_prev (i: &gsi))
455 {
456 gimple *stmt = gsi_stmt (i: gsi);
457 if (gimple_code (g: stmt) != GIMPLE_LABEL
458 && !is_gimple_debug (gs: stmt)
459 && !gimple_clobber_p (s: stmt))
460 return false;
461 }
462 return true;
463}
464
465/* Returns true for edge E where e->src ends with a GIMPLE_COND and
466 the other edge points to a bb with just __builtin_unreachable ().
467 I.e. return true for C->M edge in:
468 <bb C>:
469 ...
470 if (something)
471 goto <bb N>;
472 else
473 goto <bb M>;
474 <bb N>:
475 __builtin_unreachable ();
476 <bb M>: */
477
478bool
479assert_unreachable_fallthru_edge_p (edge e)
480{
481 basic_block pred_bb = e->src;
482 if (safe_is_a <gcond *> (p: *gsi_last_bb (bb: pred_bb)))
483 {
484 basic_block other_bb = EDGE_SUCC (pred_bb, 0)->dest;
485 if (other_bb == e->dest)
486 other_bb = EDGE_SUCC (pred_bb, 1)->dest;
487 if (EDGE_COUNT (other_bb->succs) == 0)
488 return gimple_seq_unreachable_p (stmts: bb_seq (bb: other_bb));
489 }
490 return false;
491}
492
493
494/* Initialize GF_CALL_CTRL_ALTERING flag, which indicates the call
495 could alter control flow except via eh. We initialize the flag at
496 CFG build time and only ever clear it later. */
497
498static void
499gimple_call_initialize_ctrl_altering (gimple *stmt)
500{
501 int flags = gimple_call_flags (stmt);
502
503 /* A call alters control flow if it can make an abnormal goto. */
504 if (call_can_make_abnormal_goto (stmt)
505 /* A call also alters control flow if it does not return. */
506 || flags & ECF_NORETURN
507 /* TM ending statements have backedges out of the transaction.
508 Return true so we split the basic block containing them.
509 Note that the TM_BUILTIN test is merely an optimization. */
510 || ((flags & ECF_TM_BUILTIN)
511 && is_tm_ending_fndecl (gimple_call_fndecl (gs: stmt)))
512 /* BUILT_IN_RETURN call is same as return statement. */
513 || gimple_call_builtin_p (stmt, BUILT_IN_RETURN)
514 /* IFN_UNIQUE should be the last insn, to make checking for it
515 as cheap as possible. */
516 || (gimple_call_internal_p (gs: stmt)
517 && gimple_call_internal_unique_p (gs: stmt)))
518 gimple_call_set_ctrl_altering (s: stmt, ctrl_altering_p: true);
519 else
520 gimple_call_set_ctrl_altering (s: stmt, ctrl_altering_p: false);
521}
522
523
524/* Insert SEQ after BB and build a flowgraph. */
525
526static basic_block
527make_blocks_1 (gimple_seq seq, basic_block bb)
528{
529 gimple_stmt_iterator i = gsi_start (seq);
530 gimple *stmt = NULL;
531 gimple *prev_stmt = NULL;
532 bool start_new_block = true;
533 bool first_stmt_of_seq = true;
534
535 while (!gsi_end_p (i))
536 {
537 /* PREV_STMT should only be set to a debug stmt if the debug
538 stmt is before nondebug stmts. Once stmt reaches a nondebug
539 nonlabel, prev_stmt will be set to it, so that
540 stmt_starts_bb_p will know to start a new block if a label is
541 found. However, if stmt was a label after debug stmts only,
542 keep the label in prev_stmt even if we find further debug
543 stmts, for there may be other labels after them, and they
544 should land in the same block. */
545 if (!prev_stmt || !stmt || !is_gimple_debug (gs: stmt))
546 prev_stmt = stmt;
547 stmt = gsi_stmt (i);
548
549 if (stmt && is_gimple_call (gs: stmt))
550 gimple_call_initialize_ctrl_altering (stmt);
551
552 /* If the statement starts a new basic block or if we have determined
553 in a previous pass that we need to create a new block for STMT, do
554 so now. */
555 if (start_new_block || stmt_starts_bb_p (stmt, prev_stmt))
556 {
557 if (!first_stmt_of_seq)
558 gsi_split_seq_before (&i, &seq);
559 bb = create_basic_block (seq, bb);
560 start_new_block = false;
561 prev_stmt = NULL;
562 }
563
564 /* Now add STMT to BB and create the subgraphs for special statement
565 codes. */
566 gimple_set_bb (stmt, bb);
567
568 /* If STMT is a basic block terminator, set START_NEW_BLOCK for the
569 next iteration. */
570 if (stmt_ends_bb_p (stmt))
571 {
572 /* If the stmt can make abnormal goto use a new temporary
573 for the assignment to the LHS. This makes sure the old value
574 of the LHS is available on the abnormal edge. Otherwise
575 we will end up with overlapping life-ranges for abnormal
576 SSA names. */
577 if (gimple_has_lhs (stmt)
578 && stmt_can_make_abnormal_goto (stmt)
579 && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt))))
580 {
581 tree lhs = gimple_get_lhs (stmt);
582 tree tmp = create_tmp_var (TREE_TYPE (lhs));
583 gimple *s = gimple_build_assign (lhs, tmp);
584 gimple_set_location (g: s, location: gimple_location (g: stmt));
585 gimple_set_block (g: s, block: gimple_block (g: stmt));
586 gimple_set_lhs (stmt, tmp);
587 gsi_insert_after (&i, s, GSI_SAME_STMT);
588 }
589 start_new_block = true;
590 }
591
592 gsi_next (i: &i);
593 first_stmt_of_seq = false;
594 }
595 return bb;
596}
597
598/* Build a flowgraph for the sequence of stmts SEQ. */
599
600static void
601make_blocks (gimple_seq seq)
602{
603 /* Look for debug markers right before labels, and move the debug
604 stmts after the labels. Accepting labels among debug markers
605 adds no value, just complexity; if we wanted to annotate labels
606 with view numbers (so sequencing among markers would matter) or
607 somesuch, we're probably better off still moving the labels, but
608 adding other debug annotations in their original positions or
609 emitting nonbind or bind markers associated with the labels in
610 the original position of the labels.
611
612 Moving labels would probably be simpler, but we can't do that:
613 moving labels assigns label ids to them, and doing so because of
614 debug markers makes for -fcompare-debug and possibly even codegen
615 differences. So, we have to move the debug stmts instead. To
616 that end, we scan SEQ backwards, marking the position of the
617 latest (earliest we find) label, and moving debug stmts that are
618 not separated from it by nondebug nonlabel stmts after the
619 label. */
620 if (MAY_HAVE_DEBUG_MARKER_STMTS)
621 {
622 gimple_stmt_iterator label = gsi_none ();
623
624 for (gimple_stmt_iterator i = gsi_last (seq); !gsi_end_p (i); gsi_prev (i: &i))
625 {
626 gimple *stmt = gsi_stmt (i);
627
628 /* If this is the first label we encounter (latest in SEQ)
629 before nondebug stmts, record its position. */
630 if (is_a <glabel *> (p: stmt))
631 {
632 if (gsi_end_p (i: label))
633 label = i;
634 continue;
635 }
636
637 /* Without a recorded label position to move debug stmts to,
638 there's nothing to do. */
639 if (gsi_end_p (i: label))
640 continue;
641
642 /* Move the debug stmt at I after LABEL. */
643 if (is_gimple_debug (gs: stmt))
644 {
645 gcc_assert (gimple_debug_nonbind_marker_p (stmt));
646 /* As STMT is removed, I advances to the stmt after
647 STMT, so the gsi_prev in the for "increment"
648 expression gets us to the stmt we're to visit after
649 STMT. LABEL, however, would advance to the moved
650 stmt if we passed it to gsi_move_after, so pass it a
651 copy instead, so as to keep LABEL pointing to the
652 LABEL. */
653 gimple_stmt_iterator copy = label;
654 gsi_move_after (&i, &copy);
655 continue;
656 }
657
658 /* There aren't any (more?) debug stmts before label, so
659 there isn't anything else to move after it. */
660 label = gsi_none ();
661 }
662 }
663
664 make_blocks_1 (seq, ENTRY_BLOCK_PTR_FOR_FN (cfun));
665}
666
667/* Create and return a new empty basic block after bb AFTER. */
668
669static basic_block
670create_bb (void *h, void *e, basic_block after)
671{
672 basic_block bb;
673
674 gcc_assert (!e);
675
676 /* Create and initialize a new basic block. Since alloc_block uses
677 GC allocation that clears memory to allocate a basic block, we do
678 not have to clear the newly allocated basic block here. */
679 bb = alloc_block ();
680
681 bb->index = last_basic_block_for_fn (cfun);
682 bb->flags = BB_NEW;
683 set_bb_seq (bb, seq: h ? (gimple_seq) h : NULL);
684
685 /* Add the new block to the linked list of blocks. */
686 link_block (bb, after);
687
688 /* Grow the basic block array if needed. */
689 if ((size_t) last_basic_block_for_fn (cfun)
690 == basic_block_info_for_fn (cfun)->length ())
691 vec_safe_grow_cleared (basic_block_info_for_fn (cfun),
692 last_basic_block_for_fn (cfun) + 1);
693
694 /* Add the newly created block to the array. */
695 SET_BASIC_BLOCK_FOR_FN (cfun, last_basic_block_for_fn (cfun), bb);
696
697 n_basic_blocks_for_fn (cfun)++;
698 last_basic_block_for_fn (cfun)++;
699
700 return bb;
701}
702
703
704/*---------------------------------------------------------------------------
705 Edge creation
706---------------------------------------------------------------------------*/
707
708/* If basic block BB has an abnormal edge to a basic block
709 containing IFN_ABNORMAL_DISPATCHER internal call, return
710 that the dispatcher's basic block, otherwise return NULL. */
711
712basic_block
713get_abnormal_succ_dispatcher (basic_block bb)
714{
715 edge e;
716 edge_iterator ei;
717
718 FOR_EACH_EDGE (e, ei, bb->succs)
719 if ((e->flags & (EDGE_ABNORMAL | EDGE_EH)) == EDGE_ABNORMAL)
720 {
721 gimple_stmt_iterator gsi
722 = gsi_start_nondebug_after_labels_bb (bb: e->dest);
723 gimple *g = gsi_stmt (i: gsi);
724 if (g && gimple_call_internal_p (gs: g, fn: IFN_ABNORMAL_DISPATCHER))
725 return e->dest;
726 }
727 return NULL;
728}
729
730/* Helper function for make_edges. Create a basic block with
731 with ABNORMAL_DISPATCHER internal call in it if needed, and
732 create abnormal edges from BBS to it and from it to FOR_BB
733 if COMPUTED_GOTO is false, otherwise factor the computed gotos. */
734
735static void
736handle_abnormal_edges (basic_block *dispatcher_bbs, basic_block for_bb,
737 auto_vec<basic_block> *bbs, bool computed_goto)
738{
739 basic_block *dispatcher = dispatcher_bbs + (computed_goto ? 1 : 0);
740 unsigned int idx = 0;
741 basic_block bb;
742 bool inner = false;
743
744 if (!bb_to_omp_idx.is_empty ())
745 {
746 dispatcher = dispatcher_bbs + 2 * bb_to_omp_idx[for_bb->index];
747 if (bb_to_omp_idx[for_bb->index] != 0)
748 inner = true;
749 }
750
751 /* If the dispatcher has been created already, then there are basic
752 blocks with abnormal edges to it, so just make a new edge to
753 for_bb. */
754 if (*dispatcher == NULL)
755 {
756 /* Check if there are any basic blocks that need to have
757 abnormal edges to this dispatcher. If there are none, return
758 early. */
759 if (bb_to_omp_idx.is_empty ())
760 {
761 if (bbs->is_empty ())
762 return;
763 }
764 else
765 {
766 FOR_EACH_VEC_ELT (*bbs, idx, bb)
767 if (bb_to_omp_idx[bb->index] == bb_to_omp_idx[for_bb->index])
768 break;
769 if (bb == NULL)
770 return;
771 }
772
773 /* Create the dispatcher bb. */
774 *dispatcher = create_basic_block (NULL, for_bb);
775 if (computed_goto)
776 {
777 /* Factor computed gotos into a common computed goto site. Also
778 record the location of that site so that we can un-factor the
779 gotos after we have converted back to normal form. */
780 gimple_stmt_iterator gsi = gsi_start_bb (bb: *dispatcher);
781
782 /* Create the destination of the factored goto. Each original
783 computed goto will put its desired destination into this
784 variable and jump to the label we create immediately below. */
785 tree var = create_tmp_var (ptr_type_node, "gotovar");
786
787 /* Build a label for the new block which will contain the
788 factored computed goto. */
789 tree factored_label_decl
790 = create_artificial_label (UNKNOWN_LOCATION);
791 gimple *factored_computed_goto_label
792 = gimple_build_label (label: factored_label_decl);
793 gsi_insert_after (&gsi, factored_computed_goto_label, GSI_NEW_STMT);
794
795 /* Build our new computed goto. */
796 gimple *factored_computed_goto = gimple_build_goto (dest: var);
797 gsi_insert_after (&gsi, factored_computed_goto, GSI_NEW_STMT);
798
799 FOR_EACH_VEC_ELT (*bbs, idx, bb)
800 {
801 if (!bb_to_omp_idx.is_empty ()
802 && bb_to_omp_idx[bb->index] != bb_to_omp_idx[for_bb->index])
803 continue;
804
805 gsi = gsi_last_bb (bb);
806 gimple *last = gsi_stmt (i: gsi);
807
808 gcc_assert (computed_goto_p (last));
809
810 /* Copy the original computed goto's destination into VAR. */
811 gimple *assignment
812 = gimple_build_assign (var, gimple_goto_dest (gs: last));
813 gsi_insert_before (&gsi, assignment, GSI_SAME_STMT);
814
815 edge e = make_edge (bb, *dispatcher, EDGE_FALLTHRU);
816 e->goto_locus = gimple_location (g: last);
817 gsi_remove (&gsi, true);
818 }
819 }
820 else
821 {
822 tree arg = inner ? boolean_true_node : boolean_false_node;
823 gcall *g = gimple_build_call_internal (IFN_ABNORMAL_DISPATCHER,
824 1, arg);
825 gimple_call_set_ctrl_altering (s: g, ctrl_altering_p: true);
826 gimple_stmt_iterator gsi = gsi_after_labels (bb: *dispatcher);
827 gsi_insert_after (&gsi, g, GSI_NEW_STMT);
828
829 /* Create predecessor edges of the dispatcher. */
830 FOR_EACH_VEC_ELT (*bbs, idx, bb)
831 {
832 if (!bb_to_omp_idx.is_empty ()
833 && bb_to_omp_idx[bb->index] != bb_to_omp_idx[for_bb->index])
834 continue;
835 make_edge (bb, *dispatcher, EDGE_ABNORMAL);
836 }
837 }
838 }
839
840 make_edge (*dispatcher, for_bb, EDGE_ABNORMAL);
841}
842
843/* Creates outgoing edges for BB. Returns 1 when it ends with an
844 computed goto, returns 2 when it ends with a statement that
845 might return to this function via an nonlocal goto, otherwise
846 return 0. Updates *PCUR_REGION with the OMP region this BB is in. */
847
848static int
849make_edges_bb (basic_block bb, struct omp_region **pcur_region, int *pomp_index)
850{
851 gimple *last = *gsi_last_bb (bb);
852 bool fallthru = false;
853 int ret = 0;
854
855 if (!last)
856 return ret;
857
858 switch (gimple_code (g: last))
859 {
860 case GIMPLE_GOTO:
861 if (make_goto_expr_edges (bb))
862 ret = 1;
863 fallthru = false;
864 break;
865 case GIMPLE_RETURN:
866 {
867 edge e = make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
868 e->goto_locus = gimple_location (g: last);
869 fallthru = false;
870 }
871 break;
872 case GIMPLE_COND:
873 make_cond_expr_edges (bb);
874 fallthru = false;
875 break;
876 case GIMPLE_SWITCH:
877 make_gimple_switch_edges (as_a <gswitch *> (p: last), bb);
878 fallthru = false;
879 break;
880 case GIMPLE_RESX:
881 make_eh_edge (last);
882 fallthru = false;
883 break;
884 case GIMPLE_EH_DISPATCH:
885 fallthru = make_eh_dispatch_edges (as_a <geh_dispatch *> (p: last));
886 break;
887
888 case GIMPLE_CALL:
889 /* If this function receives a nonlocal goto, then we need to
890 make edges from this call site to all the nonlocal goto
891 handlers. */
892 if (stmt_can_make_abnormal_goto (last))
893 ret = 2;
894
895 /* If this statement has reachable exception handlers, then
896 create abnormal edges to them. */
897 make_eh_edge (last);
898
899 /* BUILTIN_RETURN is really a return statement. */
900 if (gimple_call_builtin_p (last, BUILT_IN_RETURN))
901 {
902 make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
903 fallthru = false;
904 }
905 /* Some calls are known not to return. */
906 else
907 fallthru = !gimple_call_noreturn_p (s: last);
908 break;
909
910 case GIMPLE_ASSIGN:
911 /* A GIMPLE_ASSIGN may throw internally and thus be considered
912 control-altering. */
913 if (is_ctrl_altering_stmt (last))
914 make_eh_edge (last);
915 fallthru = true;
916 break;
917
918 case GIMPLE_ASM:
919 make_gimple_asm_edges (bb);
920 fallthru = true;
921 break;
922
923 CASE_GIMPLE_OMP:
924 fallthru = omp_make_gimple_edges (bb, region: pcur_region, region_idx: pomp_index);
925 break;
926
927 case GIMPLE_TRANSACTION:
928 {
929 gtransaction *txn = as_a <gtransaction *> (p: last);
930 tree label1 = gimple_transaction_label_norm (transaction_stmt: txn);
931 tree label2 = gimple_transaction_label_uninst (transaction_stmt: txn);
932
933 if (label1)
934 make_edge (bb, label_to_block (cfun, label1), EDGE_FALLTHRU);
935 if (label2)
936 make_edge (bb, label_to_block (cfun, label2),
937 EDGE_TM_UNINSTRUMENTED | (label1 ? 0 : EDGE_FALLTHRU));
938
939 tree label3 = gimple_transaction_label_over (transaction_stmt: txn);
940 if (gimple_transaction_subcode (transaction_stmt: txn)
941 & (GTMA_HAVE_ABORT | GTMA_IS_OUTER))
942 make_edge (bb, label_to_block (cfun, label3), EDGE_TM_ABORT);
943
944 fallthru = false;
945 }
946 break;
947
948 default:
949 gcc_assert (!stmt_ends_bb_p (last));
950 fallthru = true;
951 break;
952 }
953
954 if (fallthru)
955 make_edge (bb, bb->next_bb, EDGE_FALLTHRU);
956
957 return ret;
958}
959
960/* Join all the blocks in the flowgraph. */
961
962static void
963make_edges (void)
964{
965 basic_block bb;
966 struct omp_region *cur_region = NULL;
967 auto_vec<basic_block> ab_edge_goto;
968 auto_vec<basic_block> ab_edge_call;
969 int cur_omp_region_idx = 0;
970
971 /* Create an edge from entry to the first block with executable
972 statements in it. */
973 make_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun),
974 BASIC_BLOCK_FOR_FN (cfun, NUM_FIXED_BLOCKS),
975 EDGE_FALLTHRU);
976
977 /* Traverse the basic block array placing edges. */
978 FOR_EACH_BB_FN (bb, cfun)
979 {
980 int mer;
981
982 if (!bb_to_omp_idx.is_empty ())
983 bb_to_omp_idx[bb->index] = cur_omp_region_idx;
984
985 mer = make_edges_bb (bb, pcur_region: &cur_region, pomp_index: &cur_omp_region_idx);
986 if (mer == 1)
987 ab_edge_goto.safe_push (obj: bb);
988 else if (mer == 2)
989 ab_edge_call.safe_push (obj: bb);
990
991 if (cur_region && bb_to_omp_idx.is_empty ())
992 bb_to_omp_idx.safe_grow_cleared (n_basic_blocks_for_fn (cfun), exact: true);
993 }
994
995 /* Computed gotos are hell to deal with, especially if there are
996 lots of them with a large number of destinations. So we factor
997 them to a common computed goto location before we build the
998 edge list. After we convert back to normal form, we will un-factor
999 the computed gotos since factoring introduces an unwanted jump.
1000 For non-local gotos and abnormal edges from calls to calls that return
1001 twice or forced labels, factor the abnormal edges too, by having all
1002 abnormal edges from the calls go to a common artificial basic block
1003 with ABNORMAL_DISPATCHER internal call and abnormal edges from that
1004 basic block to all forced labels and calls returning twice.
1005 We do this per-OpenMP structured block, because those regions
1006 are guaranteed to be single entry single exit by the standard,
1007 so it is not allowed to enter or exit such regions abnormally this way,
1008 thus all computed gotos, non-local gotos and setjmp/longjmp calls
1009 must not transfer control across SESE region boundaries. */
1010 if (!ab_edge_goto.is_empty () || !ab_edge_call.is_empty ())
1011 {
1012 gimple_stmt_iterator gsi;
1013 basic_block dispatcher_bb_array[2] = { NULL, NULL };
1014 basic_block *dispatcher_bbs = dispatcher_bb_array;
1015 int count = n_basic_blocks_for_fn (cfun);
1016
1017 if (!bb_to_omp_idx.is_empty ())
1018 dispatcher_bbs = XCNEWVEC (basic_block, 2 * count);
1019
1020 FOR_EACH_BB_FN (bb, cfun)
1021 {
1022 for (gsi = gsi_start_bb (bb); !gsi_end_p (i: gsi); gsi_next (i: &gsi))
1023 {
1024 glabel *label_stmt = dyn_cast <glabel *> (p: gsi_stmt (i: gsi));
1025 tree target;
1026
1027 if (!label_stmt)
1028 break;
1029
1030 target = gimple_label_label (gs: label_stmt);
1031
1032 /* Make an edge to every label block that has been marked as a
1033 potential target for a computed goto or a non-local goto. */
1034 if (FORCED_LABEL (target))
1035 handle_abnormal_edges (dispatcher_bbs, for_bb: bb, bbs: &ab_edge_goto,
1036 computed_goto: true);
1037 if (DECL_NONLOCAL (target))
1038 {
1039 handle_abnormal_edges (dispatcher_bbs, for_bb: bb, bbs: &ab_edge_call,
1040 computed_goto: false);
1041 break;
1042 }
1043 }
1044
1045 if (!gsi_end_p (i: gsi) && is_gimple_debug (gs: gsi_stmt (i: gsi)))
1046 gsi_next_nondebug (i: &gsi);
1047 if (!gsi_end_p (i: gsi))
1048 {
1049 /* Make an edge to every setjmp-like call. */
1050 gimple *call_stmt = gsi_stmt (i: gsi);
1051 if (is_gimple_call (gs: call_stmt)
1052 && ((gimple_call_flags (call_stmt) & ECF_RETURNS_TWICE)
1053 || gimple_call_builtin_p (call_stmt,
1054 BUILT_IN_SETJMP_RECEIVER)))
1055 handle_abnormal_edges (dispatcher_bbs, for_bb: bb, bbs: &ab_edge_call,
1056 computed_goto: false);
1057 }
1058 }
1059
1060 if (!bb_to_omp_idx.is_empty ())
1061 XDELETE (dispatcher_bbs);
1062 }
1063
1064 omp_free_regions ();
1065}
1066
1067/* Add SEQ after GSI. Start new bb after GSI, and created further bbs as
1068 needed. Returns true if new bbs were created.
1069 Note: This is transitional code, and should not be used for new code. We
1070 should be able to get rid of this by rewriting all target va-arg
1071 gimplification hooks to use an interface gimple_build_cond_value as described
1072 in https://gcc.gnu.org/ml/gcc-patches/2015-02/msg01194.html. */
1073
1074bool
1075gimple_find_sub_bbs (gimple_seq seq, gimple_stmt_iterator *gsi)
1076{
1077 gimple *stmt = gsi_stmt (i: *gsi);
1078 basic_block bb = gimple_bb (g: stmt);
1079 basic_block lastbb, afterbb;
1080 int old_num_bbs = n_basic_blocks_for_fn (cfun);
1081 edge e;
1082 lastbb = make_blocks_1 (seq, bb);
1083 if (old_num_bbs == n_basic_blocks_for_fn (cfun))
1084 return false;
1085 e = split_block (bb, stmt);
1086 /* Move e->dest to come after the new basic blocks. */
1087 afterbb = e->dest;
1088 unlink_block (afterbb);
1089 link_block (afterbb, lastbb);
1090 redirect_edge_succ (e, bb->next_bb);
1091 bb = bb->next_bb;
1092 while (bb != afterbb)
1093 {
1094 struct omp_region *cur_region = NULL;
1095 profile_count cnt = profile_count::zero ();
1096 bool all = true;
1097
1098 int cur_omp_region_idx = 0;
1099 int mer = make_edges_bb (bb, pcur_region: &cur_region, pomp_index: &cur_omp_region_idx);
1100 gcc_assert (!mer && !cur_region);
1101 add_bb_to_loop (bb, afterbb->loop_father);
1102
1103 edge e;
1104 edge_iterator ei;
1105 FOR_EACH_EDGE (e, ei, bb->preds)
1106 {
1107 if (e->count ().initialized_p ())
1108 cnt += e->count ();
1109 else
1110 all = false;
1111 }
1112 tree_guess_outgoing_edge_probabilities (bb);
1113 if (all || profile_status_for_fn (cfun) == PROFILE_READ)
1114 bb->count = cnt;
1115
1116 bb = bb->next_bb;
1117 }
1118 return true;
1119}
1120
1121/* Find the next available discriminator value for LOCUS. The
1122 discriminator distinguishes among several basic blocks that
1123 share a common locus, allowing for more accurate sample-based
1124 profiling. */
1125
1126static int
1127next_discriminator_for_locus (int line)
1128{
1129 struct locus_discrim_map item;
1130 struct locus_discrim_map **slot;
1131
1132 item.location_line = line;
1133 item.discriminator = 0;
1134 slot = discriminator_per_locus->find_slot_with_hash (comparable: &item, hash: line, insert: INSERT);
1135 gcc_assert (slot);
1136 if (*slot == HTAB_EMPTY_ENTRY)
1137 {
1138 *slot = XNEW (struct locus_discrim_map);
1139 gcc_assert (*slot);
1140 (*slot)->location_line = line;
1141 (*slot)->discriminator = 0;
1142 }
1143 (*slot)->discriminator++;
1144 return (*slot)->discriminator;
1145}
1146
1147/* Return TRUE if LOCUS1 and LOCUS2 refer to the same source line. */
1148
1149static bool
1150same_line_p (location_t locus1, expanded_location *from, location_t locus2)
1151{
1152 expanded_location to;
1153
1154 if (locus1 == locus2)
1155 return true;
1156
1157 to = expand_location (locus2);
1158
1159 if (from->line != to.line)
1160 return false;
1161 if (from->file == to.file)
1162 return true;
1163 return (from->file != NULL
1164 && to.file != NULL
1165 && filename_cmp (s1: from->file, s2: to.file) == 0);
1166}
1167
1168/* Assign a unique discriminator value to all statements in block bb that
1169 have the same line number as locus. */
1170
1171static void
1172assign_discriminator (location_t locus, basic_block bb)
1173{
1174 gimple_stmt_iterator gsi;
1175 int discriminator;
1176
1177 if (locus == UNKNOWN_LOCATION)
1178 return;
1179
1180 expanded_location locus_e = expand_location (locus);
1181
1182 discriminator = next_discriminator_for_locus (line: locus_e.line);
1183
1184 for (gsi = gsi_start_bb (bb); !gsi_end_p (i: gsi); gsi_next (i: &gsi))
1185 {
1186 gimple *stmt = gsi_stmt (i: gsi);
1187 location_t stmt_locus = gimple_location (g: stmt);
1188 if (same_line_p (locus1: locus, from: &locus_e, locus2: stmt_locus))
1189 gimple_set_location (g: stmt,
1190 location: location_with_discriminator (stmt_locus, discriminator));
1191 }
1192}
1193
1194/* Assign discriminators to statement locations. */
1195
1196static void
1197assign_discriminators (void)
1198{
1199 basic_block bb;
1200
1201 FOR_EACH_BB_FN (bb, cfun)
1202 {
1203 edge e;
1204 edge_iterator ei;
1205 gimple_stmt_iterator gsi;
1206 location_t curr_locus = UNKNOWN_LOCATION;
1207 expanded_location curr_locus_e = {};
1208 int curr_discr = 0;
1209
1210 /* Traverse the basic block, if two function calls within a basic block
1211 are mapped to the same line, assign a new discriminator because a call
1212 stmt could be a split point of a basic block. */
1213 for (gsi = gsi_start_bb (bb); !gsi_end_p (i: gsi); gsi_next (i: &gsi))
1214 {
1215 gimple *stmt = gsi_stmt (i: gsi);
1216
1217 /* Don't allow debug stmts to affect discriminators, but
1218 allow them to take discriminators when they're on the
1219 same line as the preceding nondebug stmt. */
1220 if (is_gimple_debug (gs: stmt))
1221 {
1222 if (curr_locus != UNKNOWN_LOCATION
1223 && same_line_p (locus1: curr_locus, from: &curr_locus_e,
1224 locus2: gimple_location (g: stmt)))
1225 {
1226 location_t loc = gimple_location (g: stmt);
1227 location_t dloc = location_with_discriminator (loc,
1228 curr_discr);
1229 gimple_set_location (g: stmt, location: dloc);
1230 }
1231 continue;
1232 }
1233 if (curr_locus == UNKNOWN_LOCATION)
1234 {
1235 curr_locus = gimple_location (g: stmt);
1236 curr_locus_e = expand_location (curr_locus);
1237 }
1238 else if (!same_line_p (locus1: curr_locus, from: &curr_locus_e, locus2: gimple_location (g: stmt)))
1239 {
1240 curr_locus = gimple_location (g: stmt);
1241 curr_locus_e = expand_location (curr_locus);
1242 curr_discr = 0;
1243 }
1244 else if (curr_discr != 0)
1245 {
1246 location_t loc = gimple_location (g: stmt);
1247 location_t dloc = location_with_discriminator (loc, curr_discr);
1248 gimple_set_location (g: stmt, location: dloc);
1249 }
1250 /* Allocate a new discriminator for CALL stmt. */
1251 if (gimple_code (g: stmt) == GIMPLE_CALL)
1252 curr_discr = next_discriminator_for_locus (line: curr_locus);
1253 }
1254
1255 gimple *last = last_nondebug_stmt (bb);
1256 location_t locus = last ? gimple_location (g: last) : UNKNOWN_LOCATION;
1257 if (locus == UNKNOWN_LOCATION)
1258 continue;
1259
1260 expanded_location locus_e = expand_location (locus);
1261
1262 FOR_EACH_EDGE (e, ei, bb->succs)
1263 {
1264 gimple *first = first_non_label_stmt (e->dest);
1265 gimple *last = last_nondebug_stmt (e->dest);
1266
1267 gimple *stmt_on_same_line = NULL;
1268 if (first && same_line_p (locus1: locus, from: &locus_e,
1269 locus2: gimple_location (g: first)))
1270 stmt_on_same_line = first;
1271 else if (last && same_line_p (locus1: locus, from: &locus_e,
1272 locus2: gimple_location (g: last)))
1273 stmt_on_same_line = last;
1274
1275 if (stmt_on_same_line)
1276 {
1277 if (has_discriminator (gimple_location (g: stmt_on_same_line))
1278 && !has_discriminator (locus))
1279 assign_discriminator (locus, bb);
1280 else
1281 assign_discriminator (locus, bb: e->dest);
1282 }
1283 }
1284 }
1285}
1286
1287/* Create the edges for a GIMPLE_COND starting at block BB. */
1288
1289static void
1290make_cond_expr_edges (basic_block bb)
1291{
1292 gcond *entry = as_a <gcond *> (p: *gsi_last_bb (bb));
1293 gimple *then_stmt, *else_stmt;
1294 basic_block then_bb, else_bb;
1295 tree then_label, else_label;
1296 edge e;
1297
1298 gcc_assert (entry);
1299
1300 /* Entry basic blocks for each component. */
1301 then_label = gimple_cond_true_label (gs: entry);
1302 else_label = gimple_cond_false_label (gs: entry);
1303 then_bb = label_to_block (cfun, then_label);
1304 else_bb = label_to_block (cfun, else_label);
1305 then_stmt = first_stmt (then_bb);
1306 else_stmt = first_stmt (else_bb);
1307
1308 e = make_edge (bb, then_bb, EDGE_TRUE_VALUE);
1309 e->goto_locus = gimple_location (g: then_stmt);
1310 e = make_edge (bb, else_bb, EDGE_FALSE_VALUE);
1311 if (e)
1312 e->goto_locus = gimple_location (g: else_stmt);
1313
1314 /* We do not need the labels anymore. */
1315 gimple_cond_set_true_label (gs: entry, NULL_TREE);
1316 gimple_cond_set_false_label (gs: entry, NULL_TREE);
1317}
1318
1319
1320/* Called for each element in the hash table (P) as we delete the
1321 edge to cases hash table.
1322
1323 Clear all the CASE_CHAINs to prevent problems with copying of
1324 SWITCH_EXPRs and structure sharing rules, then free the hash table
1325 element. */
1326
1327bool
1328edge_to_cases_cleanup (edge const &, tree const &value, void *)
1329{
1330 tree t, next;
1331
1332 for (t = value; t; t = next)
1333 {
1334 next = CASE_CHAIN (t);
1335 CASE_CHAIN (t) = NULL;
1336 }
1337
1338 return true;
1339}
1340
1341/* Start recording information mapping edges to case labels. */
1342
1343void
1344start_recording_case_labels (void)
1345{
1346 gcc_assert (edge_to_cases == NULL);
1347 edge_to_cases = new hash_map<edge, tree>;
1348 touched_switch_bbs = BITMAP_ALLOC (NULL);
1349}
1350
1351/* Return nonzero if we are recording information for case labels. */
1352
1353static bool
1354recording_case_labels_p (void)
1355{
1356 return (edge_to_cases != NULL);
1357}
1358
1359/* Stop recording information mapping edges to case labels and
1360 remove any information we have recorded. */
1361void
1362end_recording_case_labels (void)
1363{
1364 bitmap_iterator bi;
1365 unsigned i;
1366 edge_to_cases->traverse<void *, edge_to_cases_cleanup> (NULL);
1367 delete edge_to_cases;
1368 edge_to_cases = NULL;
1369 EXECUTE_IF_SET_IN_BITMAP (touched_switch_bbs, 0, i, bi)
1370 {
1371 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
1372 if (bb)
1373 {
1374 if (gswitch *stmt = safe_dyn_cast <gswitch *> (p: *gsi_last_bb (bb)))
1375 group_case_labels_stmt (stmt);
1376 }
1377 }
1378 BITMAP_FREE (touched_switch_bbs);
1379}
1380
1381/* If we are inside a {start,end}_recording_cases block, then return
1382 a chain of CASE_LABEL_EXPRs from T which reference E.
1383
1384 Otherwise return NULL. */
1385
1386tree
1387get_cases_for_edge (edge e, gswitch *t)
1388{
1389 tree *slot;
1390 size_t i, n;
1391
1392 /* If we are not recording cases, then we do not have CASE_LABEL_EXPR
1393 chains available. Return NULL so the caller can detect this case. */
1394 if (!recording_case_labels_p ())
1395 return NULL;
1396
1397 slot = edge_to_cases->get (k: e);
1398 if (slot)
1399 return *slot;
1400
1401 /* If we did not find E in the hash table, then this must be the first
1402 time we have been queried for information about E & T. Add all the
1403 elements from T to the hash table then perform the query again. */
1404
1405 n = gimple_switch_num_labels (gs: t);
1406 for (i = 0; i < n; i++)
1407 {
1408 tree elt = gimple_switch_label (gs: t, index: i);
1409 tree lab = CASE_LABEL (elt);
1410 basic_block label_bb = label_to_block (cfun, lab);
1411 edge this_edge = find_edge (e->src, label_bb);
1412
1413 /* Add it to the chain of CASE_LABEL_EXPRs referencing E, or create
1414 a new chain. */
1415 tree &s = edge_to_cases->get_or_insert (k: this_edge);
1416 CASE_CHAIN (elt) = s;
1417 s = elt;
1418 }
1419
1420 return *edge_to_cases->get (k: e);
1421}
1422
1423/* Create the edges for a GIMPLE_SWITCH starting at block BB. */
1424
1425static void
1426make_gimple_switch_edges (gswitch *entry, basic_block bb)
1427{
1428 size_t i, n;
1429
1430 n = gimple_switch_num_labels (gs: entry);
1431
1432 for (i = 0; i < n; ++i)
1433 {
1434 basic_block label_bb = gimple_switch_label_bb (cfun, entry, i);
1435 make_edge (bb, label_bb, 0);
1436 }
1437}
1438
1439
1440/* Return the basic block holding label DEST. */
1441
1442basic_block
1443label_to_block (struct function *ifun, tree dest)
1444{
1445 int uid = LABEL_DECL_UID (dest);
1446
1447 /* We would die hard when faced by an undefined label. Emit a label to
1448 the very first basic block. This will hopefully make even the dataflow
1449 and undefined variable warnings quite right. */
1450 if (seen_error () && uid < 0)
1451 {
1452 gimple_stmt_iterator gsi =
1453 gsi_start_bb (BASIC_BLOCK_FOR_FN (cfun, NUM_FIXED_BLOCKS));
1454 gimple *stmt;
1455
1456 stmt = gimple_build_label (label: dest);
1457 gsi_insert_before (&gsi, stmt, GSI_NEW_STMT);
1458 uid = LABEL_DECL_UID (dest);
1459 }
1460 if (vec_safe_length (v: ifun->cfg->x_label_to_block_map) <= (unsigned int) uid)
1461 return NULL;
1462 return (*ifun->cfg->x_label_to_block_map)[uid];
1463}
1464
1465/* Create edges for a goto statement at block BB. Returns true
1466 if abnormal edges should be created. */
1467
1468static bool
1469make_goto_expr_edges (basic_block bb)
1470{
1471 gimple_stmt_iterator last = gsi_last_bb (bb);
1472 gimple *goto_t = gsi_stmt (i: last);
1473
1474 /* A simple GOTO creates normal edges. */
1475 if (simple_goto_p (goto_t))
1476 {
1477 tree dest = gimple_goto_dest (gs: goto_t);
1478 basic_block label_bb = label_to_block (cfun, dest);
1479 edge e = make_edge (bb, label_bb, EDGE_FALLTHRU);
1480 e->goto_locus = gimple_location (g: goto_t);
1481 gsi_remove (&last, true);
1482 return false;
1483 }
1484
1485 /* A computed GOTO creates abnormal edges. */
1486 return true;
1487}
1488
1489/* Create edges for an asm statement with labels at block BB. */
1490
1491static void
1492make_gimple_asm_edges (basic_block bb)
1493{
1494 gasm *stmt = as_a <gasm *> (p: *gsi_last_bb (bb));
1495 int i, n = gimple_asm_nlabels (asm_stmt: stmt);
1496
1497 for (i = 0; i < n; ++i)
1498 {
1499 tree label = TREE_VALUE (gimple_asm_label_op (stmt, i));
1500 basic_block label_bb = label_to_block (cfun, dest: label);
1501 make_edge (bb, label_bb, 0);
1502 }
1503}
1504
1505/*---------------------------------------------------------------------------
1506 Flowgraph analysis
1507---------------------------------------------------------------------------*/
1508
1509/* Cleanup useless labels in basic blocks. This is something we wish
1510 to do early because it allows us to group case labels before creating
1511 the edges for the CFG, and it speeds up block statement iterators in
1512 all passes later on.
1513 We rerun this pass after CFG is created, to get rid of the labels that
1514 are no longer referenced. After then we do not run it any more, since
1515 (almost) no new labels should be created. */
1516
1517/* A map from basic block index to the leading label of that block. */
1518struct label_record
1519{
1520 /* The label. */
1521 tree label;
1522
1523 /* True if the label is referenced from somewhere. */
1524 bool used;
1525};
1526
1527/* Given LABEL return the first label in the same basic block. */
1528
1529static tree
1530main_block_label (tree label, label_record *label_for_bb)
1531{
1532 basic_block bb = label_to_block (cfun, dest: label);
1533 tree main_label = label_for_bb[bb->index].label;
1534
1535 /* label_to_block possibly inserted undefined label into the chain. */
1536 if (!main_label)
1537 {
1538 label_for_bb[bb->index].label = label;
1539 main_label = label;
1540 }
1541
1542 label_for_bb[bb->index].used = true;
1543 return main_label;
1544}
1545
1546/* Clean up redundant labels within the exception tree. */
1547
1548static void
1549cleanup_dead_labels_eh (label_record *label_for_bb)
1550{
1551 eh_landing_pad lp;
1552 eh_region r;
1553 tree lab;
1554 int i;
1555
1556 if (cfun->eh == NULL)
1557 return;
1558
1559 for (i = 1; vec_safe_iterate (cfun->eh->lp_array, ix: i, ptr: &lp); ++i)
1560 if (lp && lp->post_landing_pad)
1561 {
1562 lab = main_block_label (label: lp->post_landing_pad, label_for_bb);
1563 if (lab != lp->post_landing_pad)
1564 {
1565 EH_LANDING_PAD_NR (lp->post_landing_pad) = 0;
1566 lp->post_landing_pad = lab;
1567 EH_LANDING_PAD_NR (lab) = lp->index;
1568 }
1569 }
1570
1571 FOR_ALL_EH_REGION (r)
1572 switch (r->type)
1573 {
1574 case ERT_CLEANUP:
1575 case ERT_MUST_NOT_THROW:
1576 break;
1577
1578 case ERT_TRY:
1579 {
1580 eh_catch c;
1581 for (c = r->u.eh_try.first_catch; c ; c = c->next_catch)
1582 {
1583 lab = c->label;
1584 if (lab)
1585 c->label = main_block_label (label: lab, label_for_bb);
1586 }
1587 }
1588 break;
1589
1590 case ERT_ALLOWED_EXCEPTIONS:
1591 lab = r->u.allowed.label;
1592 if (lab)
1593 r->u.allowed.label = main_block_label (label: lab, label_for_bb);
1594 break;
1595 }
1596}
1597
1598
1599/* Cleanup redundant labels. This is a three-step process:
1600 1) Find the leading label for each block.
1601 2) Redirect all references to labels to the leading labels.
1602 3) Cleanup all useless labels. */
1603
1604void
1605cleanup_dead_labels (void)
1606{
1607 basic_block bb;
1608 label_record *label_for_bb = XCNEWVEC (struct label_record,
1609 last_basic_block_for_fn (cfun));
1610
1611 /* Find a suitable label for each block. We use the first user-defined
1612 label if there is one, or otherwise just the first label we see. */
1613 FOR_EACH_BB_FN (bb, cfun)
1614 {
1615 gimple_stmt_iterator i;
1616
1617 for (i = gsi_start_bb (bb); !gsi_end_p (i); gsi_next (i: &i))
1618 {
1619 tree label;
1620 glabel *label_stmt = dyn_cast <glabel *> (p: gsi_stmt (i));
1621
1622 if (!label_stmt)
1623 break;
1624
1625 label = gimple_label_label (gs: label_stmt);
1626
1627 /* If we have not yet seen a label for the current block,
1628 remember this one and see if there are more labels. */
1629 if (!label_for_bb[bb->index].label)
1630 {
1631 label_for_bb[bb->index].label = label;
1632 continue;
1633 }
1634
1635 /* If we did see a label for the current block already, but it
1636 is an artificially created label, replace it if the current
1637 label is a user defined label. */
1638 if (!DECL_ARTIFICIAL (label)
1639 && DECL_ARTIFICIAL (label_for_bb[bb->index].label))
1640 {
1641 label_for_bb[bb->index].label = label;
1642 break;
1643 }
1644 }
1645 }
1646
1647 /* Now redirect all jumps/branches to the selected label.
1648 First do so for each block ending in a control statement. */
1649 FOR_EACH_BB_FN (bb, cfun)
1650 {
1651 gimple *stmt = *gsi_last_bb (bb);
1652 tree label, new_label;
1653
1654 if (!stmt)
1655 continue;
1656
1657 switch (gimple_code (g: stmt))
1658 {
1659 case GIMPLE_COND:
1660 {
1661 gcond *cond_stmt = as_a <gcond *> (p: stmt);
1662 label = gimple_cond_true_label (gs: cond_stmt);
1663 if (label)
1664 {
1665 new_label = main_block_label (label, label_for_bb);
1666 if (new_label != label)
1667 gimple_cond_set_true_label (gs: cond_stmt, label: new_label);
1668 }
1669
1670 label = gimple_cond_false_label (gs: cond_stmt);
1671 if (label)
1672 {
1673 new_label = main_block_label (label, label_for_bb);
1674 if (new_label != label)
1675 gimple_cond_set_false_label (gs: cond_stmt, label: new_label);
1676 }
1677 }
1678 break;
1679
1680 case GIMPLE_SWITCH:
1681 {
1682 gswitch *switch_stmt = as_a <gswitch *> (p: stmt);
1683 size_t i, n = gimple_switch_num_labels (gs: switch_stmt);
1684
1685 /* Replace all destination labels. */
1686 for (i = 0; i < n; ++i)
1687 {
1688 tree case_label = gimple_switch_label (gs: switch_stmt, index: i);
1689 label = CASE_LABEL (case_label);
1690 new_label = main_block_label (label, label_for_bb);
1691 if (new_label != label)
1692 CASE_LABEL (case_label) = new_label;
1693 }
1694 break;
1695 }
1696
1697 case GIMPLE_ASM:
1698 {
1699 gasm *asm_stmt = as_a <gasm *> (p: stmt);
1700 int i, n = gimple_asm_nlabels (asm_stmt);
1701
1702 for (i = 0; i < n; ++i)
1703 {
1704 tree cons = gimple_asm_label_op (asm_stmt, index: i);
1705 tree label = main_block_label (TREE_VALUE (cons), label_for_bb);
1706 TREE_VALUE (cons) = label;
1707 }
1708 break;
1709 }
1710
1711 /* We have to handle gotos until they're removed, and we don't
1712 remove them until after we've created the CFG edges. */
1713 case GIMPLE_GOTO:
1714 if (!computed_goto_p (t: stmt))
1715 {
1716 ggoto *goto_stmt = as_a <ggoto *> (p: stmt);
1717 label = gimple_goto_dest (gs: goto_stmt);
1718 new_label = main_block_label (label, label_for_bb);
1719 if (new_label != label)
1720 gimple_goto_set_dest (gs: goto_stmt, dest: new_label);
1721 }
1722 break;
1723
1724 case GIMPLE_TRANSACTION:
1725 {
1726 gtransaction *txn = as_a <gtransaction *> (p: stmt);
1727
1728 label = gimple_transaction_label_norm (transaction_stmt: txn);
1729 if (label)
1730 {
1731 new_label = main_block_label (label, label_for_bb);
1732 if (new_label != label)
1733 gimple_transaction_set_label_norm (transaction_stmt: txn, label: new_label);
1734 }
1735
1736 label = gimple_transaction_label_uninst (transaction_stmt: txn);
1737 if (label)
1738 {
1739 new_label = main_block_label (label, label_for_bb);
1740 if (new_label != label)
1741 gimple_transaction_set_label_uninst (transaction_stmt: txn, label: new_label);
1742 }
1743
1744 label = gimple_transaction_label_over (transaction_stmt: txn);
1745 if (label)
1746 {
1747 new_label = main_block_label (label, label_for_bb);
1748 if (new_label != label)
1749 gimple_transaction_set_label_over (transaction_stmt: txn, label: new_label);
1750 }
1751 }
1752 break;
1753
1754 default:
1755 break;
1756 }
1757 }
1758
1759 /* Do the same for the exception region tree labels. */
1760 cleanup_dead_labels_eh (label_for_bb);
1761
1762 /* Finally, purge dead labels. All user-defined labels and labels that
1763 can be the target of non-local gotos and labels which have their
1764 address taken are preserved. */
1765 FOR_EACH_BB_FN (bb, cfun)
1766 {
1767 gimple_stmt_iterator i;
1768 tree label_for_this_bb = label_for_bb[bb->index].label;
1769
1770 if (!label_for_this_bb)
1771 continue;
1772
1773 /* If the main label of the block is unused, we may still remove it. */
1774 if (!label_for_bb[bb->index].used)
1775 label_for_this_bb = NULL;
1776
1777 for (i = gsi_start_bb (bb); !gsi_end_p (i); )
1778 {
1779 tree label;
1780 glabel *label_stmt = dyn_cast <glabel *> (p: gsi_stmt (i));
1781
1782 if (!label_stmt)
1783 break;
1784
1785 label = gimple_label_label (gs: label_stmt);
1786
1787 if (label == label_for_this_bb
1788 || !DECL_ARTIFICIAL (label)
1789 || DECL_NONLOCAL (label)
1790 || FORCED_LABEL (label))
1791 gsi_next (i: &i);
1792 else
1793 {
1794 gcc_checking_assert (EH_LANDING_PAD_NR (label) == 0);
1795 gsi_remove (&i, true);
1796 }
1797 }
1798 }
1799
1800 free (ptr: label_for_bb);
1801}
1802
1803/* Scan the sorted vector of cases in STMT (a GIMPLE_SWITCH) and combine
1804 the ones jumping to the same label.
1805 Eg. three separate entries 1: 2: 3: become one entry 1..3: */
1806
1807bool
1808group_case_labels_stmt (gswitch *stmt)
1809{
1810 int old_size = gimple_switch_num_labels (gs: stmt);
1811 int i, next_index, new_size;
1812 basic_block default_bb = NULL;
1813 hash_set<tree> *removed_labels = NULL;
1814
1815 default_bb = gimple_switch_default_bb (cfun, stmt);
1816
1817 /* Look for possible opportunities to merge cases. */
1818 new_size = i = 1;
1819 while (i < old_size)
1820 {
1821 tree base_case, base_high;
1822 basic_block base_bb;
1823
1824 base_case = gimple_switch_label (gs: stmt, index: i);
1825
1826 gcc_assert (base_case);
1827 base_bb = label_to_block (cfun, CASE_LABEL (base_case));
1828
1829 /* Discard cases that have the same destination as the default case or
1830 whose destination blocks have already been removed as unreachable. */
1831 if (base_bb == NULL
1832 || base_bb == default_bb
1833 || (removed_labels
1834 && removed_labels->contains (CASE_LABEL (base_case))))
1835 {
1836 i++;
1837 continue;
1838 }
1839
1840 base_high = CASE_HIGH (base_case)
1841 ? CASE_HIGH (base_case)
1842 : CASE_LOW (base_case);
1843 next_index = i + 1;
1844
1845 /* Try to merge case labels. Break out when we reach the end
1846 of the label vector or when we cannot merge the next case
1847 label with the current one. */
1848 while (next_index < old_size)
1849 {
1850 tree merge_case = gimple_switch_label (gs: stmt, index: next_index);
1851 basic_block merge_bb = label_to_block (cfun, CASE_LABEL (merge_case));
1852 wide_int bhp1 = wi::to_wide (t: base_high) + 1;
1853
1854 /* Merge the cases if they jump to the same place,
1855 and their ranges are consecutive. */
1856 if (merge_bb == base_bb
1857 && (removed_labels == NULL
1858 || !removed_labels->contains (CASE_LABEL (merge_case)))
1859 && wi::to_wide (CASE_LOW (merge_case)) == bhp1)
1860 {
1861 base_high
1862 = (CASE_HIGH (merge_case)
1863 ? CASE_HIGH (merge_case) : CASE_LOW (merge_case));
1864 CASE_HIGH (base_case) = base_high;
1865 next_index++;
1866 }
1867 else
1868 break;
1869 }
1870
1871 /* Discard cases that have an unreachable destination block. */
1872 if (EDGE_COUNT (base_bb->succs) == 0
1873 && gimple_seq_unreachable_p (stmts: bb_seq (bb: base_bb))
1874 /* Don't optimize this if __builtin_unreachable () is the
1875 implicitly added one by the C++ FE too early, before
1876 -Wreturn-type can be diagnosed. We'll optimize it later
1877 during switchconv pass or any other cfg cleanup. */
1878 && (gimple_in_ssa_p (cfun)
1879 || (LOCATION_LOCUS (gimple_location (last_nondebug_stmt (base_bb)))
1880 != BUILTINS_LOCATION)))
1881 {
1882 edge base_edge = find_edge (gimple_bb (g: stmt), base_bb);
1883 if (base_edge != NULL)
1884 {
1885 for (gimple_stmt_iterator gsi = gsi_start_bb (bb: base_bb);
1886 !gsi_end_p (i: gsi); gsi_next (i: &gsi))
1887 if (glabel *stmt = dyn_cast <glabel *> (p: gsi_stmt (i: gsi)))
1888 {
1889 if (FORCED_LABEL (gimple_label_label (stmt))
1890 || DECL_NONLOCAL (gimple_label_label (stmt)))
1891 {
1892 /* Forced/non-local labels aren't going to be removed,
1893 but they will be moved to some neighbouring basic
1894 block. If some later case label refers to one of
1895 those labels, we should throw that case away rather
1896 than keeping it around and refering to some random
1897 other basic block without an edge to it. */
1898 if (removed_labels == NULL)
1899 removed_labels = new hash_set<tree>;
1900 removed_labels->add (k: gimple_label_label (gs: stmt));
1901 }
1902 }
1903 else
1904 break;
1905 remove_edge_and_dominated_blocks (base_edge);
1906 }
1907 i = next_index;
1908 continue;
1909 }
1910
1911 if (new_size < i)
1912 gimple_switch_set_label (gs: stmt, index: new_size,
1913 label: gimple_switch_label (gs: stmt, index: i));
1914 i = next_index;
1915 new_size++;
1916 }
1917
1918 gcc_assert (new_size <= old_size);
1919
1920 if (new_size < old_size)
1921 gimple_switch_set_num_labels (g: stmt, nlabels: new_size);
1922
1923 delete removed_labels;
1924 return new_size < old_size;
1925}
1926
1927/* Look for blocks ending in a multiway branch (a GIMPLE_SWITCH),
1928 and scan the sorted vector of cases. Combine the ones jumping to the
1929 same label. */
1930
1931bool
1932group_case_labels (void)
1933{
1934 basic_block bb;
1935 bool changed = false;
1936
1937 FOR_EACH_BB_FN (bb, cfun)
1938 {
1939 if (gswitch *stmt = safe_dyn_cast <gswitch *> (p: *gsi_last_bb (bb)))
1940 changed |= group_case_labels_stmt (stmt);
1941 }
1942
1943 return changed;
1944}
1945
1946/* Checks whether we can merge block B into block A. */
1947
1948static bool
1949gimple_can_merge_blocks_p (basic_block a, basic_block b)
1950{
1951 gimple *stmt;
1952
1953 if (!single_succ_p (bb: a))
1954 return false;
1955
1956 if (single_succ_edge (bb: a)->flags & EDGE_COMPLEX)
1957 return false;
1958
1959 if (single_succ (bb: a) != b)
1960 return false;
1961
1962 if (!single_pred_p (bb: b))
1963 return false;
1964
1965 if (a == ENTRY_BLOCK_PTR_FOR_FN (cfun)
1966 || b == EXIT_BLOCK_PTR_FOR_FN (cfun))
1967 return false;
1968
1969 /* If A ends by a statement causing exceptions or something similar, we
1970 cannot merge the blocks. */
1971 stmt = *gsi_last_bb (bb: a);
1972 if (stmt && stmt_ends_bb_p (stmt))
1973 return false;
1974
1975 /* Examine the labels at the beginning of B. */
1976 for (gimple_stmt_iterator gsi = gsi_start_bb (bb: b); !gsi_end_p (i: gsi);
1977 gsi_next (i: &gsi))
1978 {
1979 tree lab;
1980 glabel *label_stmt = dyn_cast <glabel *> (p: gsi_stmt (i: gsi));
1981 if (!label_stmt)
1982 break;
1983 lab = gimple_label_label (gs: label_stmt);
1984
1985 /* Do not remove user forced labels or for -O0 any user labels. */
1986 if (!DECL_ARTIFICIAL (lab) && (!optimize || FORCED_LABEL (lab)))
1987 return false;
1988 }
1989
1990 /* Protect simple loop latches. We only want to avoid merging
1991 the latch with the loop header or with a block in another
1992 loop in this case. */
1993 if (current_loops
1994 && b->loop_father->latch == b
1995 && loops_state_satisfies_p (flags: LOOPS_HAVE_SIMPLE_LATCHES)
1996 && (b->loop_father->header == a
1997 || b->loop_father != a->loop_father))
1998 return false;
1999
2000 /* It must be possible to eliminate all phi nodes in B. If ssa form
2001 is not up-to-date and a name-mapping is registered, we cannot eliminate
2002 any phis. Symbols marked for renaming are never a problem though. */
2003 for (gphi_iterator gsi = gsi_start_phis (b); !gsi_end_p (i: gsi);
2004 gsi_next (i: &gsi))
2005 {
2006 gphi *phi = gsi.phi ();
2007 /* Technically only new names matter. */
2008 if (name_registered_for_update_p (PHI_RESULT (phi)))
2009 return false;
2010 }
2011
2012 /* When not optimizing, don't merge if we'd lose goto_locus. */
2013 if (!optimize
2014 && single_succ_edge (bb: a)->goto_locus != UNKNOWN_LOCATION)
2015 {
2016 location_t goto_locus = single_succ_edge (bb: a)->goto_locus;
2017 gimple_stmt_iterator prev, next;
2018 prev = gsi_last_nondebug_bb (bb: a);
2019 next = gsi_after_labels (bb: b);
2020 if (!gsi_end_p (i: next) && is_gimple_debug (gs: gsi_stmt (i: next)))
2021 gsi_next_nondebug (i: &next);
2022 if ((gsi_end_p (i: prev)
2023 || gimple_location (g: gsi_stmt (i: prev)) != goto_locus)
2024 && (gsi_end_p (i: next)
2025 || gimple_location (g: gsi_stmt (i: next)) != goto_locus))
2026 return false;
2027 }
2028
2029 return true;
2030}
2031
2032/* Replaces all uses of NAME by VAL. */
2033
2034void
2035replace_uses_by (tree name, tree val)
2036{
2037 imm_use_iterator imm_iter;
2038 use_operand_p use;
2039 gimple *stmt;
2040 edge e;
2041
2042 FOR_EACH_IMM_USE_STMT (stmt, imm_iter, name)
2043 {
2044 /* Mark the block if we change the last stmt in it. */
2045 if (cfgcleanup_altered_bbs
2046 && stmt_ends_bb_p (stmt))
2047 bitmap_set_bit (cfgcleanup_altered_bbs, gimple_bb (g: stmt)->index);
2048
2049 FOR_EACH_IMM_USE_ON_STMT (use, imm_iter)
2050 {
2051 replace_exp (use, val);
2052
2053 if (gimple_code (g: stmt) == GIMPLE_PHI)
2054 {
2055 e = gimple_phi_arg_edge (phi: as_a <gphi *> (p: stmt),
2056 PHI_ARG_INDEX_FROM_USE (use));
2057 if (e->flags & EDGE_ABNORMAL
2058 && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (val))
2059 {
2060 /* This can only occur for virtual operands, since
2061 for the real ones SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
2062 would prevent replacement. */
2063 gcc_checking_assert (virtual_operand_p (name));
2064 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (val) = 1;
2065 }
2066 }
2067 }
2068
2069 if (gimple_code (g: stmt) != GIMPLE_PHI)
2070 {
2071 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
2072 gimple *orig_stmt = stmt;
2073 size_t i;
2074
2075 /* FIXME. It shouldn't be required to keep TREE_CONSTANT
2076 on ADDR_EXPRs up-to-date on GIMPLE. Propagation will
2077 only change sth from non-invariant to invariant, and only
2078 when propagating constants. */
2079 if (is_gimple_min_invariant (val))
2080 for (i = 0; i < gimple_num_ops (gs: stmt); i++)
2081 {
2082 tree op = gimple_op (gs: stmt, i);
2083 /* Operands may be empty here. For example, the labels
2084 of a GIMPLE_COND are nulled out following the creation
2085 of the corresponding CFG edges. */
2086 if (op && TREE_CODE (op) == ADDR_EXPR)
2087 recompute_tree_invariant_for_addr_expr (op);
2088 }
2089
2090 if (fold_stmt (&gsi))
2091 stmt = gsi_stmt (i: gsi);
2092
2093 if (maybe_clean_or_replace_eh_stmt (orig_stmt, stmt))
2094 gimple_purge_dead_eh_edges (gimple_bb (g: stmt));
2095
2096 update_stmt (s: stmt);
2097 }
2098 }
2099
2100 gcc_checking_assert (has_zero_uses (name));
2101
2102 /* Also update the trees stored in loop structures. */
2103 if (current_loops)
2104 {
2105 for (auto loop : loops_list (cfun, 0))
2106 substitute_in_loop_info (loop, name, val);
2107 }
2108}
2109
2110/* Merge block B into block A. */
2111
2112static void
2113gimple_merge_blocks (basic_block a, basic_block b)
2114{
2115 gimple_stmt_iterator last, gsi;
2116 gphi_iterator psi;
2117
2118 if (dump_file)
2119 fprintf (stream: dump_file, format: "Merging blocks %d and %d\n", a->index, b->index);
2120
2121 /* Remove all single-valued PHI nodes from block B of the form
2122 V_i = PHI <V_j> by propagating V_j to all the uses of V_i. */
2123 gsi = gsi_last_bb (bb: a);
2124 for (psi = gsi_start_phis (b); !gsi_end_p (i: psi); )
2125 {
2126 gimple *phi = gsi_stmt (i: psi);
2127 tree def = gimple_phi_result (gs: phi), use = gimple_phi_arg_def (gs: phi, index: 0);
2128 gimple *copy;
2129 bool may_replace_uses = (virtual_operand_p (op: def)
2130 || may_propagate_copy (def, use));
2131
2132 /* In case we maintain loop closed ssa form, do not propagate arguments
2133 of loop exit phi nodes. */
2134 if (current_loops
2135 && loops_state_satisfies_p (flags: LOOP_CLOSED_SSA)
2136 && !virtual_operand_p (op: def)
2137 && TREE_CODE (use) == SSA_NAME
2138 && a->loop_father != b->loop_father)
2139 may_replace_uses = false;
2140
2141 if (!may_replace_uses)
2142 {
2143 gcc_assert (!virtual_operand_p (def));
2144
2145 /* Note that just emitting the copies is fine -- there is no problem
2146 with ordering of phi nodes. This is because A is the single
2147 predecessor of B, therefore results of the phi nodes cannot
2148 appear as arguments of the phi nodes. */
2149 copy = gimple_build_assign (def, use);
2150 gsi_insert_after (&gsi, copy, GSI_NEW_STMT);
2151 remove_phi_node (&psi, false);
2152 }
2153 else
2154 {
2155 /* If we deal with a PHI for virtual operands, we can simply
2156 propagate these without fussing with folding or updating
2157 the stmt. */
2158 if (virtual_operand_p (op: def))
2159 {
2160 imm_use_iterator iter;
2161 use_operand_p use_p;
2162 gimple *stmt;
2163
2164 FOR_EACH_IMM_USE_STMT (stmt, iter, def)
2165 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2166 SET_USE (use_p, use);
2167
2168 if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def))
2169 SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use) = 1;
2170 }
2171 else
2172 replace_uses_by (name: def, val: use);
2173
2174 remove_phi_node (&psi, true);
2175 }
2176 }
2177
2178 /* Ensure that B follows A. */
2179 move_block_after (b, a);
2180
2181 gcc_assert (single_succ_edge (a)->flags & EDGE_FALLTHRU);
2182 gcc_assert (!*gsi_last_bb (a)
2183 || !stmt_ends_bb_p (*gsi_last_bb (a)));
2184
2185 /* Remove labels from B and set gimple_bb to A for other statements. */
2186 for (gsi = gsi_start_bb (bb: b); !gsi_end_p (i: gsi);)
2187 {
2188 gimple *stmt = gsi_stmt (i: gsi);
2189 if (glabel *label_stmt = dyn_cast <glabel *> (p: stmt))
2190 {
2191 tree label = gimple_label_label (gs: label_stmt);
2192 int lp_nr;
2193
2194 gsi_remove (&gsi, false);
2195
2196 /* Now that we can thread computed gotos, we might have
2197 a situation where we have a forced label in block B
2198 However, the label at the start of block B might still be
2199 used in other ways (think about the runtime checking for
2200 Fortran assigned gotos). So we cannot just delete the
2201 label. Instead we move the label to the start of block A. */
2202 if (FORCED_LABEL (label))
2203 {
2204 gimple_stmt_iterator dest_gsi = gsi_start_bb (bb: a);
2205 tree first_label = NULL_TREE;
2206 if (!gsi_end_p (i: dest_gsi))
2207 if (glabel *first_label_stmt
2208 = dyn_cast <glabel *> (p: gsi_stmt (i: dest_gsi)))
2209 first_label = gimple_label_label (gs: first_label_stmt);
2210 if (first_label
2211 && (DECL_NONLOCAL (first_label)
2212 || EH_LANDING_PAD_NR (first_label) != 0))
2213 gsi_insert_after (&dest_gsi, stmt, GSI_NEW_STMT);
2214 else
2215 gsi_insert_before (&dest_gsi, stmt, GSI_NEW_STMT);
2216 }
2217 /* Other user labels keep around in a form of a debug stmt. */
2218 else if (!DECL_ARTIFICIAL (label) && MAY_HAVE_DEBUG_BIND_STMTS)
2219 {
2220 gimple *dbg = gimple_build_debug_bind (label,
2221 integer_zero_node,
2222 stmt);
2223 gimple_debug_bind_reset_value (dbg);
2224 gsi_insert_before (&gsi, dbg, GSI_SAME_STMT);
2225 }
2226
2227 lp_nr = EH_LANDING_PAD_NR (label);
2228 if (lp_nr)
2229 {
2230 eh_landing_pad lp = get_eh_landing_pad_from_number (lp_nr);
2231 lp->post_landing_pad = NULL;
2232 }
2233 }
2234 else
2235 {
2236 gimple_set_bb (stmt, a);
2237 gsi_next (i: &gsi);
2238 }
2239 }
2240
2241 /* When merging two BBs, if their counts are different, the larger count
2242 is selected as the new bb count. This is to handle inconsistent
2243 profiles. */
2244 if (a->loop_father == b->loop_father)
2245 {
2246 a->count = a->count.merge (other: b->count);
2247 }
2248
2249 /* Merge the sequences. */
2250 last = gsi_last_bb (bb: a);
2251 gsi_insert_seq_after (&last, bb_seq (bb: b), GSI_NEW_STMT);
2252 set_bb_seq (bb: b, NULL);
2253
2254 if (cfgcleanup_altered_bbs)
2255 bitmap_set_bit (cfgcleanup_altered_bbs, a->index);
2256}
2257
2258
2259/* Return the one of two successors of BB that is not reachable by a
2260 complex edge, if there is one. Else, return BB. We use
2261 this in optimizations that use post-dominators for their heuristics,
2262 to catch the cases in C++ where function calls are involved. */
2263
2264basic_block
2265single_noncomplex_succ (basic_block bb)
2266{
2267 edge e0, e1;
2268 if (EDGE_COUNT (bb->succs) != 2)
2269 return bb;
2270
2271 e0 = EDGE_SUCC (bb, 0);
2272 e1 = EDGE_SUCC (bb, 1);
2273 if (e0->flags & EDGE_COMPLEX)
2274 return e1->dest;
2275 if (e1->flags & EDGE_COMPLEX)
2276 return e0->dest;
2277
2278 return bb;
2279}
2280
2281/* T is CALL_EXPR. Set current_function_calls_* flags. */
2282
2283void
2284notice_special_calls (gcall *call)
2285{
2286 int flags = gimple_call_flags (call);
2287
2288 if (flags & ECF_MAY_BE_ALLOCA)
2289 cfun->calls_alloca = true;
2290 if (flags & ECF_RETURNS_TWICE)
2291 cfun->calls_setjmp = true;
2292}
2293
2294
2295/* Clear flags set by notice_special_calls. Used by dead code removal
2296 to update the flags. */
2297
2298void
2299clear_special_calls (void)
2300{
2301 cfun->calls_alloca = false;
2302 cfun->calls_setjmp = false;
2303}
2304
2305/* Remove PHI nodes associated with basic block BB and all edges out of BB. */
2306
2307static void
2308remove_phi_nodes_and_edges_for_unreachable_block (basic_block bb)
2309{
2310 /* Since this block is no longer reachable, we can just delete all
2311 of its PHI nodes. */
2312 remove_phi_nodes (bb);
2313
2314 /* Remove edges to BB's successors. */
2315 while (EDGE_COUNT (bb->succs) > 0)
2316 remove_edge (EDGE_SUCC (bb, 0));
2317}
2318
2319
2320/* Remove statements of basic block BB. */
2321
2322static void
2323remove_bb (basic_block bb)
2324{
2325 gimple_stmt_iterator i;
2326
2327 if (dump_file)
2328 {
2329 fprintf (stream: dump_file, format: "Removing basic block %d\n", bb->index);
2330 if (dump_flags & TDF_DETAILS)
2331 {
2332 dump_bb (dump_file, bb, 0, TDF_BLOCKS);
2333 fprintf (stream: dump_file, format: "\n");
2334 }
2335 }
2336
2337 if (current_loops)
2338 {
2339 class loop *loop = bb->loop_father;
2340
2341 /* If a loop gets removed, clean up the information associated
2342 with it. */
2343 if (loop->latch == bb
2344 || loop->header == bb)
2345 free_numbers_of_iterations_estimates (loop);
2346 }
2347
2348 /* Remove all the instructions in the block. */
2349 if (bb_seq (bb) != NULL)
2350 {
2351 /* Walk backwards so as to get a chance to substitute all
2352 released DEFs into debug stmts. See
2353 eliminate_unnecessary_stmts() in tree-ssa-dce.cc for more
2354 details. */
2355 for (i = gsi_last_bb (bb); !gsi_end_p (i);)
2356 {
2357 gimple *stmt = gsi_stmt (i);
2358 glabel *label_stmt = dyn_cast <glabel *> (p: stmt);
2359 if (label_stmt
2360 && (FORCED_LABEL (gimple_label_label (label_stmt))
2361 || DECL_NONLOCAL (gimple_label_label (label_stmt))))
2362 {
2363 basic_block new_bb;
2364 gimple_stmt_iterator new_gsi;
2365
2366 /* A non-reachable non-local label may still be referenced.
2367 But it no longer needs to carry the extra semantics of
2368 non-locality. */
2369 if (DECL_NONLOCAL (gimple_label_label (label_stmt)))
2370 {
2371 DECL_NONLOCAL (gimple_label_label (label_stmt)) = 0;
2372 FORCED_LABEL (gimple_label_label (label_stmt)) = 1;
2373 }
2374
2375 new_bb = bb->prev_bb;
2376 /* Don't move any labels into ENTRY block. */
2377 if (new_bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
2378 {
2379 new_bb = single_succ (bb: new_bb);
2380 gcc_assert (new_bb != bb);
2381 }
2382 if ((unsigned) bb->index < bb_to_omp_idx.length ()
2383 && ((unsigned) new_bb->index >= bb_to_omp_idx.length ()
2384 || (bb_to_omp_idx[bb->index]
2385 != bb_to_omp_idx[new_bb->index])))
2386 {
2387 /* During cfg pass make sure to put orphaned labels
2388 into the right OMP region. */
2389 unsigned int i;
2390 int idx;
2391 new_bb = NULL;
2392 FOR_EACH_VEC_ELT (bb_to_omp_idx, i, idx)
2393 if (i >= NUM_FIXED_BLOCKS
2394 && idx == bb_to_omp_idx[bb->index]
2395 && i != (unsigned) bb->index)
2396 {
2397 new_bb = BASIC_BLOCK_FOR_FN (cfun, i);
2398 break;
2399 }
2400 if (new_bb == NULL)
2401 {
2402 new_bb = single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun));
2403 gcc_assert (new_bb != bb);
2404 }
2405 }
2406 new_gsi = gsi_after_labels (bb: new_bb);
2407 gsi_remove (&i, false);
2408 gsi_insert_before (&new_gsi, stmt, GSI_NEW_STMT);
2409 }
2410 else
2411 {
2412 /* Release SSA definitions. */
2413 release_defs (stmt);
2414 gsi_remove (&i, true);
2415 }
2416
2417 if (gsi_end_p (i))
2418 i = gsi_last_bb (bb);
2419 else
2420 gsi_prev (i: &i);
2421 }
2422 }
2423
2424 if ((unsigned) bb->index < bb_to_omp_idx.length ())
2425 bb_to_omp_idx[bb->index] = -1;
2426 remove_phi_nodes_and_edges_for_unreachable_block (bb);
2427 bb->il.gimple.seq = NULL;
2428 bb->il.gimple.phi_nodes = NULL;
2429}
2430
2431
2432/* Given a basic block BB and a value VAL for use in the final statement
2433 of the block (if a GIMPLE_COND, GIMPLE_SWITCH, or computed goto), return
2434 the edge that will be taken out of the block.
2435 If VAL is NULL_TREE, then the current value of the final statement's
2436 predicate or index is used.
2437 If the value does not match a unique edge, NULL is returned. */
2438
2439edge
2440find_taken_edge (basic_block bb, tree val)
2441{
2442 gimple *stmt;
2443
2444 stmt = *gsi_last_bb (bb);
2445
2446 /* Handle ENTRY and EXIT. */
2447 if (!stmt)
2448 ;
2449
2450 else if (gimple_code (g: stmt) == GIMPLE_COND)
2451 return find_taken_edge_cond_expr (as_a <gcond *> (p: stmt), val);
2452
2453 else if (gimple_code (g: stmt) == GIMPLE_SWITCH)
2454 return find_taken_edge_switch_expr (switch_stmt: as_a <gswitch *> (p: stmt), val);
2455
2456 else if (computed_goto_p (t: stmt))
2457 {
2458 /* Only optimize if the argument is a label, if the argument is
2459 not a label then we cannot construct a proper CFG.
2460
2461 It may be the case that we only need to allow the LABEL_REF to
2462 appear inside an ADDR_EXPR, but we also allow the LABEL_REF to
2463 appear inside a LABEL_EXPR just to be safe. */
2464 if (val
2465 && (TREE_CODE (val) == ADDR_EXPR || TREE_CODE (val) == LABEL_EXPR)
2466 && TREE_CODE (TREE_OPERAND (val, 0)) == LABEL_DECL)
2467 return find_taken_edge_computed_goto (bb, TREE_OPERAND (val, 0));
2468 }
2469
2470 /* Otherwise we only know the taken successor edge if it's unique. */
2471 return single_succ_p (bb) ? single_succ_edge (bb) : NULL;
2472}
2473
2474/* Given a constant value VAL and the entry block BB to a GOTO_EXPR
2475 statement, determine which of the outgoing edges will be taken out of the
2476 block. Return NULL if either edge may be taken. */
2477
2478static edge
2479find_taken_edge_computed_goto (basic_block bb, tree val)
2480{
2481 basic_block dest;
2482 edge e = NULL;
2483
2484 dest = label_to_block (cfun, dest: val);
2485 if (dest)
2486 e = find_edge (bb, dest);
2487
2488 /* It's possible for find_edge to return NULL here on invalid code
2489 that abuses the labels-as-values extension (e.g. code that attempts to
2490 jump *between* functions via stored labels-as-values; PR 84136).
2491 If so, then we simply return that NULL for the edge.
2492 We don't currently have a way of detecting such invalid code, so we
2493 can't assert that it was the case when a NULL edge occurs here. */
2494
2495 return e;
2496}
2497
2498/* Given COND_STMT and a constant value VAL for use as the predicate,
2499 determine which of the two edges will be taken out of
2500 the statement's block. Return NULL if either edge may be taken.
2501 If VAL is NULL_TREE, then the current value of COND_STMT's predicate
2502 is used. */
2503
2504static edge
2505find_taken_edge_cond_expr (const gcond *cond_stmt, tree val)
2506{
2507 edge true_edge, false_edge;
2508
2509 if (val == NULL_TREE)
2510 {
2511 /* Use the current value of the predicate. */
2512 if (gimple_cond_true_p (gs: cond_stmt))
2513 val = integer_one_node;
2514 else if (gimple_cond_false_p (gs: cond_stmt))
2515 val = integer_zero_node;
2516 else
2517 return NULL;
2518 }
2519 else if (TREE_CODE (val) != INTEGER_CST)
2520 return NULL;
2521
2522 extract_true_false_edges_from_block (gimple_bb (g: cond_stmt),
2523 &true_edge, &false_edge);
2524
2525 return (integer_zerop (val) ? false_edge : true_edge);
2526}
2527
2528/* Given SWITCH_STMT and an INTEGER_CST VAL for use as the index, determine
2529 which edge will be taken out of the statement's block. Return NULL if any
2530 edge may be taken.
2531 If VAL is NULL_TREE, then the current value of SWITCH_STMT's index
2532 is used. */
2533
2534edge
2535find_taken_edge_switch_expr (const gswitch *switch_stmt, tree val)
2536{
2537 basic_block dest_bb;
2538 edge e;
2539 tree taken_case;
2540
2541 if (gimple_switch_num_labels (gs: switch_stmt) == 1)
2542 taken_case = gimple_switch_default_label (gs: switch_stmt);
2543 else
2544 {
2545 if (val == NULL_TREE)
2546 val = gimple_switch_index (gs: switch_stmt);
2547 if (TREE_CODE (val) != INTEGER_CST)
2548 return NULL;
2549 else
2550 taken_case = find_case_label_for_value (switch_stmt, val);
2551 }
2552 dest_bb = label_to_block (cfun, CASE_LABEL (taken_case));
2553
2554 e = find_edge (gimple_bb (g: switch_stmt), dest_bb);
2555 gcc_assert (e);
2556 return e;
2557}
2558
2559
2560/* Return the CASE_LABEL_EXPR that SWITCH_STMT will take for VAL.
2561 We can make optimal use here of the fact that the case labels are
2562 sorted: We can do a binary search for a case matching VAL. */
2563
2564tree
2565find_case_label_for_value (const gswitch *switch_stmt, tree val)
2566{
2567 size_t low, high, n = gimple_switch_num_labels (gs: switch_stmt);
2568 tree default_case = gimple_switch_default_label (gs: switch_stmt);
2569
2570 for (low = 0, high = n; high - low > 1; )
2571 {
2572 size_t i = (high + low) / 2;
2573 tree t = gimple_switch_label (gs: switch_stmt, index: i);
2574 int cmp;
2575
2576 /* Cache the result of comparing CASE_LOW and val. */
2577 cmp = tree_int_cst_compare (CASE_LOW (t), t2: val);
2578
2579 if (cmp > 0)
2580 high = i;
2581 else
2582 low = i;
2583
2584 if (CASE_HIGH (t) == NULL)
2585 {
2586 /* A singe-valued case label. */
2587 if (cmp == 0)
2588 return t;
2589 }
2590 else
2591 {
2592 /* A case range. We can only handle integer ranges. */
2593 if (cmp <= 0 && tree_int_cst_compare (CASE_HIGH (t), t2: val) >= 0)
2594 return t;
2595 }
2596 }
2597
2598 return default_case;
2599}
2600
2601
2602/* Dump a basic block on stderr. */
2603
2604void
2605gimple_debug_bb (basic_block bb)
2606{
2607 dump_bb (stderr, bb, 0, TDF_VOPS|TDF_MEMSYMS|TDF_BLOCKS);
2608}
2609
2610
2611/* Dump basic block with index N on stderr. */
2612
2613basic_block
2614gimple_debug_bb_n (int n)
2615{
2616 gimple_debug_bb (BASIC_BLOCK_FOR_FN (cfun, n));
2617 return BASIC_BLOCK_FOR_FN (cfun, n);
2618}
2619
2620
2621/* Dump the CFG on stderr.
2622
2623 FLAGS are the same used by the tree dumping functions
2624 (see TDF_* in dumpfile.h). */
2625
2626void
2627gimple_debug_cfg (dump_flags_t flags)
2628{
2629 gimple_dump_cfg (stderr, flags);
2630}
2631
2632
2633/* Dump the program showing basic block boundaries on the given FILE.
2634
2635 FLAGS are the same used by the tree dumping functions (see TDF_* in
2636 tree.h). */
2637
2638void
2639gimple_dump_cfg (FILE *file, dump_flags_t flags)
2640{
2641 if (flags & TDF_DETAILS)
2642 {
2643 dump_function_header (file, current_function_decl, flags);
2644 fprintf (stream: file, format: ";; \n%d basic blocks, %d edges, last basic block %d.\n\n",
2645 n_basic_blocks_for_fn (cfun), n_edges_for_fn (cfun),
2646 last_basic_block_for_fn (cfun));
2647
2648 brief_dump_cfg (file, flags);
2649 fprintf (stream: file, format: "\n");
2650 }
2651
2652 if (flags & TDF_STATS)
2653 dump_cfg_stats (file);
2654
2655 dump_function_to_file (current_function_decl, file, flags | TDF_BLOCKS);
2656}
2657
2658
2659/* Dump CFG statistics on FILE. */
2660
2661void
2662dump_cfg_stats (FILE *file)
2663{
2664 static long max_num_merged_labels = 0;
2665 unsigned long size, total = 0;
2666 long num_edges;
2667 basic_block bb;
2668 const char * const fmt_str = "%-30s%-13s%12s\n";
2669 const char * const fmt_str_1 = "%-30s%13d" PRsa (11) "\n";
2670 const char * const fmt_str_2 = "%-30s%13ld" PRsa (11) "\n";
2671 const char * const fmt_str_3 = "%-43s" PRsa (11) "\n";
2672 const char *funcname = current_function_name ();
2673
2674 fprintf (stream: file, format: "\nCFG Statistics for %s\n\n", funcname);
2675
2676 fprintf (stream: file, format: "---------------------------------------------------------\n");
2677 fprintf (stream: file, format: fmt_str, "", " Number of ", "Memory");
2678 fprintf (stream: file, format: fmt_str, "", " instances ", "used ");
2679 fprintf (stream: file, format: "---------------------------------------------------------\n");
2680
2681 size = n_basic_blocks_for_fn (cfun) * sizeof (struct basic_block_def);
2682 total += size;
2683 fprintf (stream: file, format: fmt_str_1, "Basic blocks", n_basic_blocks_for_fn (cfun),
2684 SIZE_AMOUNT (size));
2685
2686 num_edges = 0;
2687 FOR_EACH_BB_FN (bb, cfun)
2688 num_edges += EDGE_COUNT (bb->succs);
2689 size = num_edges * sizeof (class edge_def);
2690 total += size;
2691 fprintf (stream: file, format: fmt_str_2, "Edges", num_edges, SIZE_AMOUNT (size));
2692
2693 fprintf (stream: file, format: "---------------------------------------------------------\n");
2694 fprintf (stream: file, format: fmt_str_3, "Total memory used by CFG data",
2695 SIZE_AMOUNT (total));
2696 fprintf (stream: file, format: "---------------------------------------------------------\n");
2697 fprintf (stream: file, format: "\n");
2698
2699 if (cfg_stats.num_merged_labels > max_num_merged_labels)
2700 max_num_merged_labels = cfg_stats.num_merged_labels;
2701
2702 fprintf (stream: file, format: "Coalesced label blocks: %ld (Max so far: %ld)\n",
2703 cfg_stats.num_merged_labels, max_num_merged_labels);
2704
2705 fprintf (stream: file, format: "\n");
2706}
2707
2708
2709/* Dump CFG statistics on stderr. Keep extern so that it's always
2710 linked in the final executable. */
2711
2712DEBUG_FUNCTION void
2713debug_cfg_stats (void)
2714{
2715 dump_cfg_stats (stderr);
2716}
2717
2718/*---------------------------------------------------------------------------
2719 Miscellaneous helpers
2720---------------------------------------------------------------------------*/
2721
2722/* Return true if T, a GIMPLE_CALL, can make an abnormal transfer of control
2723 flow. Transfers of control flow associated with EH are excluded. */
2724
2725static bool
2726call_can_make_abnormal_goto (gimple *t)
2727{
2728 /* If the function has no non-local labels, then a call cannot make an
2729 abnormal transfer of control. */
2730 if (!cfun->has_nonlocal_label
2731 && !cfun->calls_setjmp)
2732 return false;
2733
2734 /* Likewise if the call has no side effects. */
2735 if (!gimple_has_side_effects (t))
2736 return false;
2737
2738 /* Likewise if the called function is leaf. */
2739 if (gimple_call_flags (t) & ECF_LEAF)
2740 return false;
2741
2742 return true;
2743}
2744
2745
2746/* Return true if T can make an abnormal transfer of control flow.
2747 Transfers of control flow associated with EH are excluded. */
2748
2749bool
2750stmt_can_make_abnormal_goto (gimple *t)
2751{
2752 if (computed_goto_p (t))
2753 return true;
2754 if (is_gimple_call (gs: t))
2755 return call_can_make_abnormal_goto (t);
2756 return false;
2757}
2758
2759
2760/* Return true if T represents a stmt that always transfers control. */
2761
2762bool
2763is_ctrl_stmt (gimple *t)
2764{
2765 switch (gimple_code (g: t))
2766 {
2767 case GIMPLE_COND:
2768 case GIMPLE_SWITCH:
2769 case GIMPLE_GOTO:
2770 case GIMPLE_RETURN:
2771 case GIMPLE_RESX:
2772 return true;
2773 default:
2774 return false;
2775 }
2776}
2777
2778
2779/* Return true if T is a statement that may alter the flow of control
2780 (e.g., a call to a non-returning function). */
2781
2782bool
2783is_ctrl_altering_stmt (gimple *t)
2784{
2785 gcc_assert (t);
2786
2787 switch (gimple_code (g: t))
2788 {
2789 case GIMPLE_CALL:
2790 /* Per stmt call flag indicates whether the call could alter
2791 controlflow. */
2792 if (gimple_call_ctrl_altering_p (gs: t))
2793 return true;
2794 break;
2795
2796 case GIMPLE_EH_DISPATCH:
2797 /* EH_DISPATCH branches to the individual catch handlers at
2798 this level of a try or allowed-exceptions region. It can
2799 fallthru to the next statement as well. */
2800 return true;
2801
2802 case GIMPLE_ASM:
2803 if (gimple_asm_nlabels (asm_stmt: as_a <gasm *> (p: t)) > 0)
2804 return true;
2805 break;
2806
2807 CASE_GIMPLE_OMP:
2808 /* OpenMP directives alter control flow. */
2809 return true;
2810
2811 case GIMPLE_TRANSACTION:
2812 /* A transaction start alters control flow. */
2813 return true;
2814
2815 default:
2816 break;
2817 }
2818
2819 /* If a statement can throw, it alters control flow. */
2820 return stmt_can_throw_internal (cfun, t);
2821}
2822
2823
2824/* Return true if T is a simple local goto. */
2825
2826bool
2827simple_goto_p (gimple *t)
2828{
2829 return (gimple_code (g: t) == GIMPLE_GOTO
2830 && TREE_CODE (gimple_goto_dest (t)) == LABEL_DECL);
2831}
2832
2833
2834/* Return true if STMT should start a new basic block. PREV_STMT is
2835 the statement preceding STMT. It is used when STMT is a label or a
2836 case label. Labels should only start a new basic block if their
2837 previous statement wasn't a label. Otherwise, sequence of labels
2838 would generate unnecessary basic blocks that only contain a single
2839 label. */
2840
2841static inline bool
2842stmt_starts_bb_p (gimple *stmt, gimple *prev_stmt)
2843{
2844 if (stmt == NULL)
2845 return false;
2846
2847 /* PREV_STMT is only set to a debug stmt if the debug stmt is before
2848 any nondebug stmts in the block. We don't want to start another
2849 block in this case: the debug stmt will already have started the
2850 one STMT would start if we weren't outputting debug stmts. */
2851 if (prev_stmt && is_gimple_debug (gs: prev_stmt))
2852 return false;
2853
2854 /* Labels start a new basic block only if the preceding statement
2855 wasn't a label of the same type. This prevents the creation of
2856 consecutive blocks that have nothing but a single label. */
2857 if (glabel *label_stmt = dyn_cast <glabel *> (p: stmt))
2858 {
2859 /* Nonlocal and computed GOTO targets always start a new block. */
2860 if (DECL_NONLOCAL (gimple_label_label (label_stmt))
2861 || FORCED_LABEL (gimple_label_label (label_stmt)))
2862 return true;
2863
2864 if (glabel *plabel = safe_dyn_cast <glabel *> (p: prev_stmt))
2865 {
2866 if (DECL_NONLOCAL (gimple_label_label (plabel))
2867 || !DECL_ARTIFICIAL (gimple_label_label (plabel)))
2868 return true;
2869
2870 cfg_stats.num_merged_labels++;
2871 return false;
2872 }
2873 else
2874 return true;
2875 }
2876 else if (gimple_code (g: stmt) == GIMPLE_CALL)
2877 {
2878 if (gimple_call_flags (stmt) & ECF_RETURNS_TWICE)
2879 /* setjmp acts similar to a nonlocal GOTO target and thus should
2880 start a new block. */
2881 return true;
2882 if (gimple_call_internal_p (gs: stmt, fn: IFN_PHI)
2883 && prev_stmt
2884 && gimple_code (g: prev_stmt) != GIMPLE_LABEL
2885 && (gimple_code (g: prev_stmt) != GIMPLE_CALL
2886 || ! gimple_call_internal_p (gs: prev_stmt, fn: IFN_PHI)))
2887 /* PHI nodes start a new block unless preceeded by a label
2888 or another PHI. */
2889 return true;
2890 }
2891
2892 return false;
2893}
2894
2895
2896/* Return true if T should end a basic block. */
2897
2898bool
2899stmt_ends_bb_p (gimple *t)
2900{
2901 return is_ctrl_stmt (t) || is_ctrl_altering_stmt (t);
2902}
2903
2904/* Remove block annotations and other data structures. */
2905
2906void
2907delete_tree_cfg_annotations (struct function *fn)
2908{
2909 vec_free (label_to_block_map_for_fn (fn));
2910}
2911
2912/* Return the virtual phi in BB. */
2913
2914gphi *
2915get_virtual_phi (basic_block bb)
2916{
2917 for (gphi_iterator gsi = gsi_start_phis (bb);
2918 !gsi_end_p (i: gsi);
2919 gsi_next (i: &gsi))
2920 {
2921 gphi *phi = gsi.phi ();
2922
2923 if (virtual_operand_p (PHI_RESULT (phi)))
2924 return phi;
2925 }
2926
2927 return NULL;
2928}
2929
2930/* Return the first statement in basic block BB. */
2931
2932gimple *
2933first_stmt (basic_block bb)
2934{
2935 gimple_stmt_iterator i = gsi_start_bb (bb);
2936 gimple *stmt = NULL;
2937
2938 while (!gsi_end_p (i) && is_gimple_debug (gs: (stmt = gsi_stmt (i))))
2939 {
2940 gsi_next (i: &i);
2941 stmt = NULL;
2942 }
2943 return stmt;
2944}
2945
2946/* Return the first non-label statement in basic block BB. */
2947
2948static gimple *
2949first_non_label_stmt (basic_block bb)
2950{
2951 gimple_stmt_iterator i = gsi_start_bb (bb);
2952 while (!gsi_end_p (i) && gimple_code (g: gsi_stmt (i)) == GIMPLE_LABEL)
2953 gsi_next (i: &i);
2954 return !gsi_end_p (i) ? gsi_stmt (i) : NULL;
2955}
2956
2957/* Return the last statement in basic block BB. */
2958
2959gimple *
2960last_nondebug_stmt (basic_block bb)
2961{
2962 gimple_stmt_iterator i = gsi_last_bb (bb);
2963 gimple *stmt = NULL;
2964
2965 while (!gsi_end_p (i) && is_gimple_debug (gs: (stmt = gsi_stmt (i))))
2966 {
2967 gsi_prev (i: &i);
2968 stmt = NULL;
2969 }
2970 return stmt;
2971}
2972
2973/* Return the last statement of an otherwise empty block. Return NULL
2974 if the block is totally empty, or if it contains more than one
2975 statement. */
2976
2977gimple *
2978last_and_only_stmt (basic_block bb)
2979{
2980 gimple_stmt_iterator i = gsi_last_nondebug_bb (bb);
2981 gimple *last, *prev;
2982
2983 if (gsi_end_p (i))
2984 return NULL;
2985
2986 last = gsi_stmt (i);
2987 gsi_prev_nondebug (i: &i);
2988 if (gsi_end_p (i))
2989 return last;
2990
2991 /* Empty statements should no longer appear in the instruction stream.
2992 Everything that might have appeared before should be deleted by
2993 remove_useless_stmts, and the optimizers should just gsi_remove
2994 instead of smashing with build_empty_stmt.
2995
2996 Thus the only thing that should appear here in a block containing
2997 one executable statement is a label. */
2998 prev = gsi_stmt (i);
2999 if (gimple_code (g: prev) == GIMPLE_LABEL)
3000 return last;
3001 else
3002 return NULL;
3003}
3004
3005/* Returns the basic block after which the new basic block created
3006 by splitting edge EDGE_IN should be placed. Tries to keep the new block
3007 near its "logical" location. This is of most help to humans looking
3008 at debugging dumps. */
3009
3010basic_block
3011split_edge_bb_loc (edge edge_in)
3012{
3013 basic_block dest = edge_in->dest;
3014 basic_block dest_prev = dest->prev_bb;
3015
3016 if (dest_prev)
3017 {
3018 edge e = find_edge (dest_prev, dest);
3019 if (e && !(e->flags & EDGE_COMPLEX))
3020 return edge_in->src;
3021 }
3022 return dest_prev;
3023}
3024
3025/* Split a (typically critical) edge EDGE_IN. Return the new block.
3026 Abort on abnormal edges. */
3027
3028static basic_block
3029gimple_split_edge (edge edge_in)
3030{
3031 basic_block new_bb, after_bb, dest;
3032 edge new_edge, e;
3033
3034 /* Abnormal edges cannot be split. */
3035 gcc_assert (!(edge_in->flags & EDGE_ABNORMAL));
3036
3037 dest = edge_in->dest;
3038
3039 after_bb = split_edge_bb_loc (edge_in);
3040
3041 new_bb = create_empty_bb (after_bb);
3042 new_bb->count = edge_in->count ();
3043
3044 /* We want to avoid re-allocating PHIs when we first
3045 add the fallthru edge from new_bb to dest but we also
3046 want to avoid changing PHI argument order when
3047 first redirecting edge_in away from dest. The former
3048 avoids changing PHI argument order by adding them
3049 last and then the redirection swapping it back into
3050 place by means of unordered remove.
3051 So hack around things by temporarily removing all PHIs
3052 from the destination during the edge redirection and then
3053 making sure the edges stay in order. */
3054 gimple_seq saved_phis = phi_nodes (bb: dest);
3055 unsigned old_dest_idx = edge_in->dest_idx;
3056 set_phi_nodes (dest, NULL);
3057 new_edge = make_single_succ_edge (new_bb, dest, EDGE_FALLTHRU);
3058 e = redirect_edge_and_branch (edge_in, new_bb);
3059 gcc_assert (e == edge_in && new_edge->dest_idx == old_dest_idx);
3060 /* set_phi_nodes sets the BB of the PHI nodes, so do it manually here. */
3061 dest->il.gimple.phi_nodes = saved_phis;
3062
3063 return new_bb;
3064}
3065
3066
3067/* Verify properties of the address expression T whose base should be
3068 TREE_ADDRESSABLE if VERIFY_ADDRESSABLE is true. */
3069
3070static bool
3071verify_address (tree t, bool verify_addressable)
3072{
3073 bool old_constant;
3074 bool old_side_effects;
3075 bool new_constant;
3076 bool new_side_effects;
3077
3078 old_constant = TREE_CONSTANT (t);
3079 old_side_effects = TREE_SIDE_EFFECTS (t);
3080
3081 recompute_tree_invariant_for_addr_expr (t);
3082 new_side_effects = TREE_SIDE_EFFECTS (t);
3083 new_constant = TREE_CONSTANT (t);
3084
3085 if (old_constant != new_constant)
3086 {
3087 error ("constant not recomputed when %<ADDR_EXPR%> changed");
3088 return true;
3089 }
3090 if (old_side_effects != new_side_effects)
3091 {
3092 error ("side effects not recomputed when %<ADDR_EXPR%> changed");
3093 return true;
3094 }
3095
3096 tree base = TREE_OPERAND (t, 0);
3097 while (handled_component_p (t: base))
3098 base = TREE_OPERAND (base, 0);
3099
3100 if (!(VAR_P (base)
3101 || TREE_CODE (base) == PARM_DECL
3102 || TREE_CODE (base) == RESULT_DECL))
3103 return false;
3104
3105 if (verify_addressable && !TREE_ADDRESSABLE (base))
3106 {
3107 error ("address taken but %<TREE_ADDRESSABLE%> bit not set");
3108 return true;
3109 }
3110
3111 return false;
3112}
3113
3114
3115/* Verify if EXPR is a valid GIMPLE reference expression. If
3116 REQUIRE_LVALUE is true verifies it is an lvalue. Returns true
3117 if there is an error, otherwise false. */
3118
3119static bool
3120verify_types_in_gimple_reference (tree expr, bool require_lvalue)
3121{
3122 const char *code_name = get_tree_code_name (TREE_CODE (expr));
3123
3124 if (TREE_CODE (expr) == REALPART_EXPR
3125 || TREE_CODE (expr) == IMAGPART_EXPR
3126 || TREE_CODE (expr) == BIT_FIELD_REF
3127 || TREE_CODE (expr) == VIEW_CONVERT_EXPR)
3128 {
3129 tree op = TREE_OPERAND (expr, 0);
3130 if (TREE_CODE (expr) != VIEW_CONVERT_EXPR
3131 && !is_gimple_reg_type (TREE_TYPE (expr)))
3132 {
3133 error ("non-scalar %qs", code_name);
3134 return true;
3135 }
3136
3137 if (TREE_CODE (expr) == BIT_FIELD_REF)
3138 {
3139 tree t1 = TREE_OPERAND (expr, 1);
3140 tree t2 = TREE_OPERAND (expr, 2);
3141 poly_uint64 size, bitpos;
3142 if (!poly_int_tree_p (t: t1, value: &size)
3143 || !poly_int_tree_p (t: t2, value: &bitpos)
3144 || !types_compatible_p (bitsizetype, TREE_TYPE (t1))
3145 || !types_compatible_p (bitsizetype, TREE_TYPE (t2)))
3146 {
3147 error ("invalid position or size operand to %qs", code_name);
3148 return true;
3149 }
3150 if (INTEGRAL_TYPE_P (TREE_TYPE (expr))
3151 && maybe_ne (TYPE_PRECISION (TREE_TYPE (expr)), b: size))
3152 {
3153 error ("integral result type precision does not match "
3154 "field size of %qs", code_name);
3155 return true;
3156 }
3157 else if (!INTEGRAL_TYPE_P (TREE_TYPE (expr))
3158 && TYPE_MODE (TREE_TYPE (expr)) != BLKmode
3159 && maybe_ne (a: GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (expr))),
3160 b: size))
3161 {
3162 error ("mode size of non-integral result does not "
3163 "match field size of %qs",
3164 code_name);
3165 return true;
3166 }
3167 if (INTEGRAL_TYPE_P (TREE_TYPE (op))
3168 && !type_has_mode_precision_p (TREE_TYPE (op)))
3169 {
3170 error ("%qs of non-mode-precision operand", code_name);
3171 return true;
3172 }
3173 if (!AGGREGATE_TYPE_P (TREE_TYPE (op))
3174 && maybe_gt (size + bitpos,
3175 tree_to_poly_uint64 (TYPE_SIZE (TREE_TYPE (op)))))
3176 {
3177 error ("position plus size exceeds size of referenced object in "
3178 "%qs", code_name);
3179 return true;
3180 }
3181 }
3182
3183 if ((TREE_CODE (expr) == REALPART_EXPR
3184 || TREE_CODE (expr) == IMAGPART_EXPR)
3185 && !useless_type_conversion_p (TREE_TYPE (expr),
3186 TREE_TYPE (TREE_TYPE (op))))
3187 {
3188 error ("type mismatch in %qs reference", code_name);
3189 debug_generic_stmt (TREE_TYPE (expr));
3190 debug_generic_stmt (TREE_TYPE (TREE_TYPE (op)));
3191 return true;
3192 }
3193
3194 if (TREE_CODE (expr) == VIEW_CONVERT_EXPR)
3195 {
3196 /* For VIEW_CONVERT_EXPRs which are allowed here too, we only check
3197 that their operand is not a register an invariant when
3198 requiring an lvalue (this usually means there is a SRA or IPA-SRA
3199 bug). Otherwise there is nothing to verify, gross mismatches at
3200 most invoke undefined behavior. */
3201 if (require_lvalue
3202 && (is_gimple_reg (op) || is_gimple_min_invariant (op)))
3203 {
3204 error ("conversion of %qs on the left hand side of %qs",
3205 get_tree_code_name (TREE_CODE (op)), code_name);
3206 debug_generic_stmt (expr);
3207 return true;
3208 }
3209 else if (is_gimple_reg (op)
3210 && TYPE_SIZE (TREE_TYPE (expr)) != TYPE_SIZE (TREE_TYPE (op)))
3211 {
3212 error ("conversion of register to a different size in %qs",
3213 code_name);
3214 debug_generic_stmt (expr);
3215 return true;
3216 }
3217 }
3218
3219 expr = op;
3220 }
3221
3222 bool require_non_reg = false;
3223 while (handled_component_p (t: expr))
3224 {
3225 require_non_reg = true;
3226 code_name = get_tree_code_name (TREE_CODE (expr));
3227
3228 if (TREE_CODE (expr) == REALPART_EXPR
3229 || TREE_CODE (expr) == IMAGPART_EXPR
3230 || TREE_CODE (expr) == BIT_FIELD_REF)
3231 {
3232 error ("non-top-level %qs", code_name);
3233 return true;
3234 }
3235
3236 tree op = TREE_OPERAND (expr, 0);
3237
3238 if (TREE_CODE (expr) == ARRAY_REF
3239 || TREE_CODE (expr) == ARRAY_RANGE_REF)
3240 {
3241 if (!is_gimple_val (TREE_OPERAND (expr, 1))
3242 || (TREE_OPERAND (expr, 2)
3243 && !is_gimple_val (TREE_OPERAND (expr, 2)))
3244 || (TREE_OPERAND (expr, 3)
3245 && !is_gimple_val (TREE_OPERAND (expr, 3))))
3246 {
3247 error ("invalid operands to %qs", code_name);
3248 debug_generic_stmt (expr);
3249 return true;
3250 }
3251 }
3252
3253 /* Verify if the reference array element types are compatible. */
3254 if (TREE_CODE (expr) == ARRAY_REF
3255 && !useless_type_conversion_p (TREE_TYPE (expr),
3256 TREE_TYPE (TREE_TYPE (op))))
3257 {
3258 error ("type mismatch in %qs", code_name);
3259 debug_generic_stmt (TREE_TYPE (expr));
3260 debug_generic_stmt (TREE_TYPE (TREE_TYPE (op)));
3261 return true;
3262 }
3263 if (TREE_CODE (expr) == ARRAY_RANGE_REF
3264 && !useless_type_conversion_p (TREE_TYPE (TREE_TYPE (expr)),
3265 TREE_TYPE (TREE_TYPE (op))))
3266 {
3267 error ("type mismatch in %qs", code_name);
3268 debug_generic_stmt (TREE_TYPE (TREE_TYPE (expr)));
3269 debug_generic_stmt (TREE_TYPE (TREE_TYPE (op)));
3270 return true;
3271 }
3272
3273 if (TREE_CODE (expr) == COMPONENT_REF)
3274 {
3275 if (TREE_OPERAND (expr, 2)
3276 && !is_gimple_val (TREE_OPERAND (expr, 2)))
3277 {
3278 error ("invalid %qs offset operator", code_name);
3279 return true;
3280 }
3281 if (!useless_type_conversion_p (TREE_TYPE (expr),
3282 TREE_TYPE (TREE_OPERAND (expr, 1))))
3283 {
3284 error ("type mismatch in %qs", code_name);
3285 debug_generic_stmt (TREE_TYPE (expr));
3286 debug_generic_stmt (TREE_TYPE (TREE_OPERAND (expr, 1)));
3287 return true;
3288 }
3289 }
3290
3291 expr = op;
3292 }
3293
3294 code_name = get_tree_code_name (TREE_CODE (expr));
3295
3296 if (TREE_CODE (expr) == MEM_REF)
3297 {
3298 if (!is_gimple_mem_ref_addr (TREE_OPERAND (expr, 0))
3299 || (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR
3300 && verify_address (TREE_OPERAND (expr, 0), verify_addressable: false)))
3301 {
3302 error ("invalid address operand in %qs", code_name);
3303 debug_generic_stmt (expr);
3304 return true;
3305 }
3306 if (!poly_int_tree_p (TREE_OPERAND (expr, 1))
3307 || !POINTER_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 1))))
3308 {
3309 error ("invalid offset operand in %qs", code_name);
3310 debug_generic_stmt (expr);
3311 return true;
3312 }
3313 if (MR_DEPENDENCE_CLIQUE (expr) != 0
3314 && MR_DEPENDENCE_CLIQUE (expr) > cfun->last_clique)
3315 {
3316 error ("invalid clique in %qs", code_name);
3317 debug_generic_stmt (expr);
3318 return true;
3319 }
3320 }
3321 else if (TREE_CODE (expr) == TARGET_MEM_REF)
3322 {
3323 if (!TMR_BASE (expr)
3324 || !is_gimple_mem_ref_addr (TMR_BASE (expr))
3325 || (TREE_CODE (TMR_BASE (expr)) == ADDR_EXPR
3326 && verify_address (TMR_BASE (expr), verify_addressable: false)))
3327 {
3328 error ("invalid address operand in %qs", code_name);
3329 return true;
3330 }
3331 if (!TMR_OFFSET (expr)
3332 || !poly_int_tree_p (TMR_OFFSET (expr))
3333 || !POINTER_TYPE_P (TREE_TYPE (TMR_OFFSET (expr))))
3334 {
3335 error ("invalid offset operand in %qs", code_name);
3336 debug_generic_stmt (expr);
3337 return true;
3338 }
3339 if (MR_DEPENDENCE_CLIQUE (expr) != 0
3340 && MR_DEPENDENCE_CLIQUE (expr) > cfun->last_clique)
3341 {
3342 error ("invalid clique in %qs", code_name);
3343 debug_generic_stmt (expr);
3344 return true;
3345 }
3346 }
3347 else if (INDIRECT_REF_P (expr))
3348 {
3349 error ("%qs in gimple IL", code_name);
3350 debug_generic_stmt (expr);
3351 return true;
3352 }
3353 else if (require_non_reg
3354 && (is_gimple_reg (expr)
3355 || (is_gimple_min_invariant (expr)
3356 /* STRING_CSTs are representatives of the string table
3357 entry which lives in memory. */
3358 && TREE_CODE (expr) != STRING_CST)))
3359 {
3360 error ("%qs as base where non-register is required", code_name);
3361 debug_generic_stmt (expr);
3362 return true;
3363 }
3364
3365 if (!require_lvalue
3366 && (is_gimple_reg (expr) || is_gimple_min_invariant (expr)))
3367 return false;
3368
3369 if (TREE_CODE (expr) != SSA_NAME && is_gimple_id (t: expr))
3370 return false;
3371
3372 if (TREE_CODE (expr) != TARGET_MEM_REF
3373 && TREE_CODE (expr) != MEM_REF)
3374 {
3375 error ("invalid expression for min lvalue");
3376 return true;
3377 }
3378
3379 return false;
3380}
3381
3382/* Returns true if there is one pointer type in TYPE_POINTER_TO (SRC_OBJ)
3383 list of pointer-to types that is trivially convertible to DEST. */
3384
3385static bool
3386one_pointer_to_useless_type_conversion_p (tree dest, tree src_obj)
3387{
3388 tree src;
3389
3390 if (!TYPE_POINTER_TO (src_obj))
3391 return true;
3392
3393 for (src = TYPE_POINTER_TO (src_obj); src; src = TYPE_NEXT_PTR_TO (src))
3394 if (useless_type_conversion_p (dest, src))
3395 return true;
3396
3397 return false;
3398}
3399
3400/* Return true if TYPE1 is a fixed-point type and if conversions to and
3401 from TYPE2 can be handled by FIXED_CONVERT_EXPR. */
3402
3403static bool
3404valid_fixed_convert_types_p (tree type1, tree type2)
3405{
3406 return (FIXED_POINT_TYPE_P (type1)
3407 && (INTEGRAL_TYPE_P (type2)
3408 || SCALAR_FLOAT_TYPE_P (type2)
3409 || FIXED_POINT_TYPE_P (type2)));
3410}
3411
3412/* Verify the contents of a GIMPLE_CALL STMT. Returns true when there
3413 is a problem, otherwise false. */
3414
3415static bool
3416verify_gimple_call (gcall *stmt)
3417{
3418 tree fn = gimple_call_fn (gs: stmt);
3419 tree fntype, fndecl;
3420 unsigned i;
3421
3422 if (gimple_call_internal_p (gs: stmt))
3423 {
3424 if (fn)
3425 {
3426 error ("gimple call has two targets");
3427 debug_generic_stmt (fn);
3428 return true;
3429 }
3430 }
3431 else
3432 {
3433 if (!fn)
3434 {
3435 error ("gimple call has no target");
3436 return true;
3437 }
3438 }
3439
3440 if (fn && !is_gimple_call_addr (fn))
3441 {
3442 error ("invalid function in gimple call");
3443 debug_generic_stmt (fn);
3444 return true;
3445 }
3446
3447 if (fn
3448 && (!POINTER_TYPE_P (TREE_TYPE (fn))
3449 || (TREE_CODE (TREE_TYPE (TREE_TYPE (fn))) != FUNCTION_TYPE
3450 && TREE_CODE (TREE_TYPE (TREE_TYPE (fn))) != METHOD_TYPE)))
3451 {
3452 error ("non-function in gimple call");
3453 return true;
3454 }
3455
3456 fndecl = gimple_call_fndecl (gs: stmt);
3457 if (fndecl
3458 && TREE_CODE (fndecl) == FUNCTION_DECL
3459 && DECL_LOOPING_CONST_OR_PURE_P (fndecl)
3460 && !DECL_PURE_P (fndecl)
3461 && !TREE_READONLY (fndecl))
3462 {
3463 error ("invalid pure const state for function");
3464 return true;
3465 }
3466
3467 tree lhs = gimple_call_lhs (gs: stmt);
3468 if (lhs
3469 && (!is_gimple_reg (lhs)
3470 && (!is_gimple_lvalue (lhs)
3471 || verify_types_in_gimple_reference
3472 (TREE_CODE (lhs) == WITH_SIZE_EXPR
3473 ? TREE_OPERAND (lhs, 0) : lhs, require_lvalue: true))))
3474 {
3475 error ("invalid LHS in gimple call");
3476 return true;
3477 }
3478
3479 if (gimple_call_ctrl_altering_p (gs: stmt)
3480 && gimple_call_noreturn_p (s: stmt)
3481 && should_remove_lhs_p (lhs))
3482 {
3483 error ("LHS in %<noreturn%> call");
3484 return true;
3485 }
3486
3487 fntype = gimple_call_fntype (gs: stmt);
3488 if (fntype
3489 && lhs
3490 && !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (fntype))
3491 /* ??? At least C++ misses conversions at assignments from
3492 void * call results.
3493 For now simply allow arbitrary pointer type conversions. */
3494 && !(POINTER_TYPE_P (TREE_TYPE (lhs))
3495 && POINTER_TYPE_P (TREE_TYPE (fntype))))
3496 {
3497 error ("invalid conversion in gimple call");
3498 debug_generic_stmt (TREE_TYPE (lhs));
3499 debug_generic_stmt (TREE_TYPE (fntype));
3500 return true;
3501 }
3502
3503 if (gimple_call_chain (gs: stmt)
3504 && !is_gimple_val (gimple_call_chain (gs: stmt)))
3505 {
3506 error ("invalid static chain in gimple call");
3507 debug_generic_stmt (gimple_call_chain (gs: stmt));
3508 return true;
3509 }
3510
3511 /* If there is a static chain argument, the call should either be
3512 indirect, or the decl should have DECL_STATIC_CHAIN set. */
3513 if (gimple_call_chain (gs: stmt)
3514 && fndecl
3515 && !DECL_STATIC_CHAIN (fndecl))
3516 {
3517 error ("static chain with function that doesn%'t use one");
3518 return true;
3519 }
3520
3521 if (fndecl && fndecl_built_in_p (node: fndecl, klass: BUILT_IN_NORMAL))
3522 {
3523 switch (DECL_FUNCTION_CODE (decl: fndecl))
3524 {
3525 case BUILT_IN_UNREACHABLE:
3526 case BUILT_IN_UNREACHABLE_TRAP:
3527 case BUILT_IN_TRAP:
3528 if (gimple_call_num_args (gs: stmt) > 0)
3529 {
3530 /* Built-in unreachable with parameters might not be caught by
3531 undefined behavior sanitizer. Front-ends do check users do not
3532 call them that way but we also produce calls to
3533 __builtin_unreachable internally, for example when IPA figures
3534 out a call cannot happen in a legal program. In such cases,
3535 we must make sure arguments are stripped off. */
3536 error ("%<__builtin_unreachable%> or %<__builtin_trap%> call "
3537 "with arguments");
3538 return true;
3539 }
3540 break;
3541 default:
3542 break;
3543 }
3544 }
3545
3546 /* For a call to .DEFERRED_INIT,
3547 LHS = DEFERRED_INIT (SIZE of the DECL, INIT_TYPE, NAME of the DECL)
3548 we should guarantee that when the 1st argument is a constant, it should
3549 be the same as the size of the LHS. */
3550
3551 if (gimple_call_internal_p (gs: stmt, fn: IFN_DEFERRED_INIT))
3552 {
3553 tree size_of_arg0 = gimple_call_arg (gs: stmt, index: 0);
3554 tree size_of_lhs = TYPE_SIZE_UNIT (TREE_TYPE (lhs));
3555
3556 if (TREE_CODE (lhs) == SSA_NAME)
3557 lhs = SSA_NAME_VAR (lhs);
3558
3559 poly_uint64 size_from_arg0, size_from_lhs;
3560 bool is_constant_size_arg0 = poly_int_tree_p (t: size_of_arg0,
3561 value: &size_from_arg0);
3562 bool is_constant_size_lhs = poly_int_tree_p (t: size_of_lhs,
3563 value: &size_from_lhs);
3564 if (is_constant_size_arg0 && is_constant_size_lhs)
3565 if (maybe_ne (a: size_from_arg0, b: size_from_lhs))
3566 {
3567 error ("%<DEFERRED_INIT%> calls should have same "
3568 "constant size for the first argument and LHS");
3569 return true;
3570 }
3571 }
3572
3573 /* ??? The C frontend passes unpromoted arguments in case it
3574 didn't see a function declaration before the call. So for now
3575 leave the call arguments mostly unverified. Once we gimplify
3576 unit-at-a-time we have a chance to fix this. */
3577 for (i = 0; i < gimple_call_num_args (gs: stmt); ++i)
3578 {
3579 tree arg = gimple_call_arg (gs: stmt, index: i);
3580 if ((is_gimple_reg_type (TREE_TYPE (arg))
3581 && !is_gimple_val (arg))
3582 || (!is_gimple_reg_type (TREE_TYPE (arg))
3583 && !is_gimple_lvalue (arg)))
3584 {
3585 error ("invalid argument to gimple call");
3586 debug_generic_expr (arg);
3587 return true;
3588 }
3589 if (!is_gimple_reg (arg))
3590 {
3591 if (TREE_CODE (arg) == WITH_SIZE_EXPR)
3592 arg = TREE_OPERAND (arg, 0);
3593 if (verify_types_in_gimple_reference (expr: arg, require_lvalue: false))
3594 return true;
3595 }
3596 }
3597
3598 return false;
3599}
3600
3601/* Verifies the gimple comparison with the result type TYPE and
3602 the operands OP0 and OP1, comparison code is CODE. */
3603
3604static bool
3605verify_gimple_comparison (tree type, tree op0, tree op1, enum tree_code code)
3606{
3607 tree op0_type = TREE_TYPE (op0);
3608 tree op1_type = TREE_TYPE (op1);
3609
3610 if (!is_gimple_val (op0) || !is_gimple_val (op1))
3611 {
3612 error ("invalid operands in gimple comparison");
3613 return true;
3614 }
3615
3616 /* For comparisons we do not have the operations type as the
3617 effective type the comparison is carried out in. Instead
3618 we require that either the first operand is trivially
3619 convertible into the second, or the other way around. */
3620 if (!useless_type_conversion_p (op0_type, op1_type)
3621 && !useless_type_conversion_p (op1_type, op0_type))
3622 {
3623 error ("mismatching comparison operand types");
3624 debug_generic_expr (op0_type);
3625 debug_generic_expr (op1_type);
3626 return true;
3627 }
3628
3629 /* The resulting type of a comparison may be an effective boolean type. */
3630 if (INTEGRAL_TYPE_P (type)
3631 && (TREE_CODE (type) == BOOLEAN_TYPE
3632 || TYPE_PRECISION (type) == 1))
3633 {
3634 if ((VECTOR_TYPE_P (op0_type)
3635 || VECTOR_TYPE_P (op1_type))
3636 && code != EQ_EXPR && code != NE_EXPR
3637 && !VECTOR_BOOLEAN_TYPE_P (op0_type)
3638 && !VECTOR_INTEGER_TYPE_P (op0_type))
3639 {
3640 error ("unsupported operation or type for vector comparison"
3641 " returning a boolean");
3642 debug_generic_expr (op0_type);
3643 debug_generic_expr (op1_type);
3644 return true;
3645 }
3646 }
3647 /* Or a boolean vector type with the same element count
3648 as the comparison operand types. */
3649 else if (VECTOR_TYPE_P (type)
3650 && TREE_CODE (TREE_TYPE (type)) == BOOLEAN_TYPE)
3651 {
3652 if (TREE_CODE (op0_type) != VECTOR_TYPE
3653 || TREE_CODE (op1_type) != VECTOR_TYPE)
3654 {
3655 error ("non-vector operands in vector comparison");
3656 debug_generic_expr (op0_type);
3657 debug_generic_expr (op1_type);
3658 return true;
3659 }
3660
3661 if (maybe_ne (a: TYPE_VECTOR_SUBPARTS (node: type),
3662 b: TYPE_VECTOR_SUBPARTS (node: op0_type)))
3663 {
3664 error ("invalid vector comparison resulting type");
3665 debug_generic_expr (type);
3666 return true;
3667 }
3668 }
3669 else
3670 {
3671 error ("bogus comparison result type");
3672 debug_generic_expr (type);
3673 return true;
3674 }
3675
3676 return false;
3677}
3678
3679/* Verify a gimple assignment statement STMT with an unary rhs.
3680 Returns true if anything is wrong. */
3681
3682static bool
3683verify_gimple_assign_unary (gassign *stmt)
3684{
3685 enum tree_code rhs_code = gimple_assign_rhs_code (gs: stmt);
3686 tree lhs = gimple_assign_lhs (gs: stmt);
3687 tree lhs_type = TREE_TYPE (lhs);
3688 tree rhs1 = gimple_assign_rhs1 (gs: stmt);
3689 tree rhs1_type = TREE_TYPE (rhs1);
3690
3691 if (!is_gimple_reg (lhs))
3692 {
3693 error ("non-register as LHS of unary operation");
3694 return true;
3695 }
3696
3697 if (!is_gimple_val (rhs1))
3698 {
3699 error ("invalid operand in unary operation");
3700 return true;
3701 }
3702
3703 const char* const code_name = get_tree_code_name (rhs_code);
3704
3705 /* First handle conversions. */
3706 switch (rhs_code)
3707 {
3708 CASE_CONVERT:
3709 {
3710 /* Allow conversions between vectors with the same number of elements,
3711 provided that the conversion is OK for the element types too. */
3712 if (VECTOR_TYPE_P (lhs_type)
3713 && VECTOR_TYPE_P (rhs1_type)
3714 && known_eq (TYPE_VECTOR_SUBPARTS (lhs_type),
3715 TYPE_VECTOR_SUBPARTS (rhs1_type)))
3716 {
3717 lhs_type = TREE_TYPE (lhs_type);
3718 rhs1_type = TREE_TYPE (rhs1_type);
3719 }
3720 else if (VECTOR_TYPE_P (lhs_type) || VECTOR_TYPE_P (rhs1_type))
3721 {
3722 error ("invalid vector types in nop conversion");
3723 debug_generic_expr (lhs_type);
3724 debug_generic_expr (rhs1_type);
3725 return true;
3726 }
3727
3728 /* Allow conversions from pointer type to integral type only if
3729 there is no sign or zero extension involved.
3730 For targets were the precision of ptrofftype doesn't match that
3731 of pointers we allow conversions to types where
3732 POINTERS_EXTEND_UNSIGNED specifies how that works. */
3733 if ((POINTER_TYPE_P (lhs_type)
3734 && INTEGRAL_TYPE_P (rhs1_type))
3735 || (POINTER_TYPE_P (rhs1_type)
3736 && INTEGRAL_TYPE_P (lhs_type)
3737 && (TYPE_PRECISION (rhs1_type) >= TYPE_PRECISION (lhs_type)
3738#if defined(POINTERS_EXTEND_UNSIGNED)
3739 || (TYPE_MODE (rhs1_type) == ptr_mode
3740 && (TYPE_PRECISION (lhs_type)
3741 == BITS_PER_WORD /* word_mode */
3742 || (TYPE_PRECISION (lhs_type)
3743 == GET_MODE_PRECISION (Pmode))))
3744#endif
3745 )))
3746 return false;
3747
3748 /* Allow conversion from integral to offset type and vice versa. */
3749 if ((TREE_CODE (lhs_type) == OFFSET_TYPE
3750 && INTEGRAL_TYPE_P (rhs1_type))
3751 || (INTEGRAL_TYPE_P (lhs_type)
3752 && TREE_CODE (rhs1_type) == OFFSET_TYPE))
3753 return false;
3754
3755 /* Otherwise assert we are converting between types of the
3756 same kind. */
3757 if (INTEGRAL_TYPE_P (lhs_type) != INTEGRAL_TYPE_P (rhs1_type))
3758 {
3759 error ("invalid types in nop conversion");
3760 debug_generic_expr (lhs_type);
3761 debug_generic_expr (rhs1_type);
3762 return true;
3763 }
3764
3765 return false;
3766 }
3767
3768 case ADDR_SPACE_CONVERT_EXPR:
3769 {
3770 if (!POINTER_TYPE_P (rhs1_type) || !POINTER_TYPE_P (lhs_type)
3771 || (TYPE_ADDR_SPACE (TREE_TYPE (rhs1_type))
3772 == TYPE_ADDR_SPACE (TREE_TYPE (lhs_type))))
3773 {
3774 error ("invalid types in address space conversion");
3775 debug_generic_expr (lhs_type);
3776 debug_generic_expr (rhs1_type);
3777 return true;
3778 }
3779
3780 return false;
3781 }
3782
3783 case FIXED_CONVERT_EXPR:
3784 {
3785 if (!valid_fixed_convert_types_p (type1: lhs_type, type2: rhs1_type)
3786 && !valid_fixed_convert_types_p (type1: rhs1_type, type2: lhs_type))
3787 {
3788 error ("invalid types in fixed-point conversion");
3789 debug_generic_expr (lhs_type);
3790 debug_generic_expr (rhs1_type);
3791 return true;
3792 }
3793
3794 return false;
3795 }
3796
3797 case FLOAT_EXPR:
3798 {
3799 if ((!INTEGRAL_TYPE_P (rhs1_type) || !SCALAR_FLOAT_TYPE_P (lhs_type))
3800 && (!VECTOR_INTEGER_TYPE_P (rhs1_type)
3801 || !VECTOR_FLOAT_TYPE_P (lhs_type)))
3802 {
3803 error ("invalid types in conversion to floating-point");
3804 debug_generic_expr (lhs_type);
3805 debug_generic_expr (rhs1_type);
3806 return true;
3807 }
3808
3809 return false;
3810 }
3811
3812 case FIX_TRUNC_EXPR:
3813 {
3814 if ((!INTEGRAL_TYPE_P (lhs_type) || !SCALAR_FLOAT_TYPE_P (rhs1_type))
3815 && (!VECTOR_INTEGER_TYPE_P (lhs_type)
3816 || !VECTOR_FLOAT_TYPE_P (rhs1_type)))
3817 {
3818 error ("invalid types in conversion to integer");
3819 debug_generic_expr (lhs_type);
3820 debug_generic_expr (rhs1_type);
3821 return true;
3822 }
3823
3824 return false;
3825 }
3826
3827 case VEC_UNPACK_HI_EXPR:
3828 case VEC_UNPACK_LO_EXPR:
3829 case VEC_UNPACK_FLOAT_HI_EXPR:
3830 case VEC_UNPACK_FLOAT_LO_EXPR:
3831 case VEC_UNPACK_FIX_TRUNC_HI_EXPR:
3832 case VEC_UNPACK_FIX_TRUNC_LO_EXPR:
3833 if (TREE_CODE (rhs1_type) != VECTOR_TYPE
3834 || TREE_CODE (lhs_type) != VECTOR_TYPE
3835 || (!INTEGRAL_TYPE_P (TREE_TYPE (lhs_type))
3836 && !SCALAR_FLOAT_TYPE_P (TREE_TYPE (lhs_type)))
3837 || (!INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))
3838 && !SCALAR_FLOAT_TYPE_P (TREE_TYPE (rhs1_type)))
3839 || ((rhs_code == VEC_UNPACK_HI_EXPR
3840 || rhs_code == VEC_UNPACK_LO_EXPR)
3841 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs_type))
3842 != INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))))
3843 || ((rhs_code == VEC_UNPACK_FLOAT_HI_EXPR
3844 || rhs_code == VEC_UNPACK_FLOAT_LO_EXPR)
3845 && (INTEGRAL_TYPE_P (TREE_TYPE (lhs_type))
3846 || SCALAR_FLOAT_TYPE_P (TREE_TYPE (rhs1_type))))
3847 || ((rhs_code == VEC_UNPACK_FIX_TRUNC_HI_EXPR
3848 || rhs_code == VEC_UNPACK_FIX_TRUNC_LO_EXPR)
3849 && (INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))
3850 || SCALAR_FLOAT_TYPE_P (TREE_TYPE (lhs_type))))
3851 || (maybe_ne (a: GET_MODE_SIZE (mode: element_mode (lhs_type)),
3852 b: 2 * GET_MODE_SIZE (mode: element_mode (rhs1_type)))
3853 && (!VECTOR_BOOLEAN_TYPE_P (lhs_type)
3854 || !VECTOR_BOOLEAN_TYPE_P (rhs1_type)))
3855 || maybe_ne (a: 2 * TYPE_VECTOR_SUBPARTS (node: lhs_type),
3856 b: TYPE_VECTOR_SUBPARTS (node: rhs1_type)))
3857 {
3858 error ("type mismatch in %qs expression", code_name);
3859 debug_generic_expr (lhs_type);
3860 debug_generic_expr (rhs1_type);
3861 return true;
3862 }
3863
3864 return false;
3865
3866 case NEGATE_EXPR:
3867 case ABS_EXPR:
3868 case BIT_NOT_EXPR:
3869 case PAREN_EXPR:
3870 case CONJ_EXPR:
3871 /* Disallow pointer and offset types for many of the unary gimple. */
3872 if (POINTER_TYPE_P (lhs_type)
3873 || TREE_CODE (lhs_type) == OFFSET_TYPE)
3874 {
3875 error ("invalid types for %qs", code_name);
3876 debug_generic_expr (lhs_type);
3877 debug_generic_expr (rhs1_type);
3878 return true;
3879 }
3880 break;
3881
3882 case ABSU_EXPR:
3883 if (!ANY_INTEGRAL_TYPE_P (lhs_type)
3884 || !TYPE_UNSIGNED (lhs_type)
3885 || !ANY_INTEGRAL_TYPE_P (rhs1_type)
3886 || TYPE_UNSIGNED (rhs1_type)
3887 || element_precision (lhs_type) != element_precision (rhs1_type))
3888 {
3889 error ("invalid types for %qs", code_name);
3890 debug_generic_expr (lhs_type);
3891 debug_generic_expr (rhs1_type);
3892 return true;
3893 }
3894 return false;
3895
3896 case VEC_DUPLICATE_EXPR:
3897 if (TREE_CODE (lhs_type) != VECTOR_TYPE
3898 || !useless_type_conversion_p (TREE_TYPE (lhs_type), rhs1_type))
3899 {
3900 error ("%qs should be from a scalar to a like vector", code_name);
3901 debug_generic_expr (lhs_type);
3902 debug_generic_expr (rhs1_type);
3903 return true;
3904 }
3905 return false;
3906
3907 default:
3908 gcc_unreachable ();
3909 }
3910
3911 /* For the remaining codes assert there is no conversion involved. */
3912 if (!useless_type_conversion_p (lhs_type, rhs1_type))
3913 {
3914 error ("non-trivial conversion in unary operation");
3915 debug_generic_expr (lhs_type);
3916 debug_generic_expr (rhs1_type);
3917 return true;
3918 }
3919
3920 return false;
3921}
3922
3923/* Verify a gimple assignment statement STMT with a binary rhs.
3924 Returns true if anything is wrong. */
3925
3926static bool
3927verify_gimple_assign_binary (gassign *stmt)
3928{
3929 enum tree_code rhs_code = gimple_assign_rhs_code (gs: stmt);
3930 tree lhs = gimple_assign_lhs (gs: stmt);
3931 tree lhs_type = TREE_TYPE (lhs);
3932 tree rhs1 = gimple_assign_rhs1 (gs: stmt);
3933 tree rhs1_type = TREE_TYPE (rhs1);
3934 tree rhs2 = gimple_assign_rhs2 (gs: stmt);
3935 tree rhs2_type = TREE_TYPE (rhs2);
3936
3937 if (!is_gimple_reg (lhs))
3938 {
3939 error ("non-register as LHS of binary operation");
3940 return true;
3941 }
3942
3943 if (!is_gimple_val (rhs1)
3944 || !is_gimple_val (rhs2))
3945 {
3946 error ("invalid operands in binary operation");
3947 return true;
3948 }
3949
3950 const char* const code_name = get_tree_code_name (rhs_code);
3951
3952 /* First handle operations that involve different types. */
3953 switch (rhs_code)
3954 {
3955 case COMPLEX_EXPR:
3956 {
3957 if (TREE_CODE (lhs_type) != COMPLEX_TYPE
3958 || !(INTEGRAL_TYPE_P (rhs1_type)
3959 || SCALAR_FLOAT_TYPE_P (rhs1_type))
3960 || !(INTEGRAL_TYPE_P (rhs2_type)
3961 || SCALAR_FLOAT_TYPE_P (rhs2_type)))
3962 {
3963 error ("type mismatch in %qs", code_name);
3964 debug_generic_expr (lhs_type);
3965 debug_generic_expr (rhs1_type);
3966 debug_generic_expr (rhs2_type);
3967 return true;
3968 }
3969
3970 return false;
3971 }
3972
3973 case LSHIFT_EXPR:
3974 case RSHIFT_EXPR:
3975 case LROTATE_EXPR:
3976 case RROTATE_EXPR:
3977 {
3978 /* Shifts and rotates are ok on integral types, fixed point
3979 types and integer vector types. */
3980 if ((!INTEGRAL_TYPE_P (rhs1_type)
3981 && !FIXED_POINT_TYPE_P (rhs1_type)
3982 && ! (VECTOR_TYPE_P (rhs1_type)
3983 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))))
3984 || (!INTEGRAL_TYPE_P (rhs2_type)
3985 /* Vector shifts of vectors are also ok. */
3986 && ! (VECTOR_TYPE_P (rhs1_type)
3987 && INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))
3988 && VECTOR_TYPE_P (rhs2_type)
3989 && INTEGRAL_TYPE_P (TREE_TYPE (rhs2_type))))
3990 || !useless_type_conversion_p (lhs_type, rhs1_type))
3991 {
3992 error ("type mismatch in %qs", code_name);
3993 debug_generic_expr (lhs_type);
3994 debug_generic_expr (rhs1_type);
3995 debug_generic_expr (rhs2_type);
3996 return true;
3997 }
3998
3999 return false;
4000 }
4001
4002 case WIDEN_LSHIFT_EXPR:
4003 {
4004 if (!INTEGRAL_TYPE_P (lhs_type)
4005 || !INTEGRAL_TYPE_P (rhs1_type)
4006 || TREE_CODE (rhs2) != INTEGER_CST
4007 || (2 * TYPE_PRECISION (rhs1_type) > TYPE_PRECISION (lhs_type)))
4008 {
4009 error ("type mismatch in %qs", code_name);
4010 debug_generic_expr (lhs_type);
4011 debug_generic_expr (rhs1_type);
4012 debug_generic_expr (rhs2_type);
4013 return true;
4014 }
4015
4016 return false;
4017 }
4018
4019 case VEC_WIDEN_LSHIFT_HI_EXPR:
4020 case VEC_WIDEN_LSHIFT_LO_EXPR:
4021 {
4022 if (TREE_CODE (rhs1_type) != VECTOR_TYPE
4023 || TREE_CODE (lhs_type) != VECTOR_TYPE
4024 || !INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))
4025 || !INTEGRAL_TYPE_P (TREE_TYPE (lhs_type))
4026 || TREE_CODE (rhs2) != INTEGER_CST
4027 || (2 * TYPE_PRECISION (TREE_TYPE (rhs1_type))
4028 > TYPE_PRECISION (TREE_TYPE (lhs_type))))
4029 {
4030 error ("type mismatch in %qs", code_name);
4031 debug_generic_expr (lhs_type);
4032 debug_generic_expr (rhs1_type);
4033 debug_generic_expr (rhs2_type);
4034 return true;
4035 }
4036
4037 return false;
4038 }
4039
4040 case PLUS_EXPR:
4041 case MINUS_EXPR:
4042 {
4043 tree lhs_etype = lhs_type;
4044 tree rhs1_etype = rhs1_type;
4045 tree rhs2_etype = rhs2_type;
4046 if (VECTOR_TYPE_P (lhs_type))
4047 {
4048 if (TREE_CODE (rhs1_type) != VECTOR_TYPE
4049 || TREE_CODE (rhs2_type) != VECTOR_TYPE)
4050 {
4051 error ("invalid non-vector operands to %qs", code_name);
4052 return true;
4053 }
4054 lhs_etype = TREE_TYPE (lhs_type);
4055 rhs1_etype = TREE_TYPE (rhs1_type);
4056 rhs2_etype = TREE_TYPE (rhs2_type);
4057 }
4058 if (POINTER_TYPE_P (lhs_etype)
4059 || POINTER_TYPE_P (rhs1_etype)
4060 || POINTER_TYPE_P (rhs2_etype))
4061 {
4062 error ("invalid (pointer) operands %qs", code_name);
4063 return true;
4064 }
4065
4066 /* Continue with generic binary expression handling. */
4067 break;
4068 }
4069
4070 case POINTER_PLUS_EXPR:
4071 {
4072 if (!POINTER_TYPE_P (rhs1_type)
4073 || !useless_type_conversion_p (lhs_type, rhs1_type)
4074 || !ptrofftype_p (type: rhs2_type))
4075 {
4076 error ("type mismatch in %qs", code_name);
4077 debug_generic_stmt (lhs_type);
4078 debug_generic_stmt (rhs1_type);
4079 debug_generic_stmt (rhs2_type);
4080 return true;
4081 }
4082
4083 return false;
4084 }
4085
4086 case POINTER_DIFF_EXPR:
4087 {
4088 if (!POINTER_TYPE_P (rhs1_type)
4089 || !POINTER_TYPE_P (rhs2_type)
4090 /* Because we special-case pointers to void we allow difference
4091 of arbitrary pointers with the same mode. */
4092 || TYPE_MODE (rhs1_type) != TYPE_MODE (rhs2_type)
4093 || !INTEGRAL_TYPE_P (lhs_type)
4094 || TYPE_UNSIGNED (lhs_type)
4095 || TYPE_PRECISION (lhs_type) != TYPE_PRECISION (rhs1_type))
4096 {
4097 error ("type mismatch in %qs", code_name);
4098 debug_generic_stmt (lhs_type);
4099 debug_generic_stmt (rhs1_type);
4100 debug_generic_stmt (rhs2_type);
4101 return true;
4102 }
4103
4104 return false;
4105 }
4106
4107 case TRUTH_ANDIF_EXPR:
4108 case TRUTH_ORIF_EXPR:
4109 case TRUTH_AND_EXPR:
4110 case TRUTH_OR_EXPR:
4111 case TRUTH_XOR_EXPR:
4112
4113 gcc_unreachable ();
4114
4115 case LT_EXPR:
4116 case LE_EXPR:
4117 case GT_EXPR:
4118 case GE_EXPR:
4119 case EQ_EXPR:
4120 case NE_EXPR:
4121 case UNORDERED_EXPR:
4122 case ORDERED_EXPR:
4123 case UNLT_EXPR:
4124 case UNLE_EXPR:
4125 case UNGT_EXPR:
4126 case UNGE_EXPR:
4127 case UNEQ_EXPR:
4128 case LTGT_EXPR:
4129 /* Comparisons are also binary, but the result type is not
4130 connected to the operand types. */
4131 return verify_gimple_comparison (type: lhs_type, op0: rhs1, op1: rhs2, code: rhs_code);
4132
4133 case WIDEN_MULT_EXPR:
4134 if (TREE_CODE (lhs_type) != INTEGER_TYPE)
4135 return true;
4136 return ((2 * TYPE_PRECISION (rhs1_type) > TYPE_PRECISION (lhs_type))
4137 || (TYPE_PRECISION (rhs1_type) != TYPE_PRECISION (rhs2_type)));
4138
4139 case WIDEN_SUM_EXPR:
4140 {
4141 if (((TREE_CODE (rhs1_type) != VECTOR_TYPE
4142 || TREE_CODE (lhs_type) != VECTOR_TYPE)
4143 && ((!INTEGRAL_TYPE_P (rhs1_type)
4144 && !SCALAR_FLOAT_TYPE_P (rhs1_type))
4145 || (!INTEGRAL_TYPE_P (lhs_type)
4146 && !SCALAR_FLOAT_TYPE_P (lhs_type))))
4147 || !useless_type_conversion_p (lhs_type, rhs2_type)
4148 || maybe_lt (a: GET_MODE_SIZE (mode: element_mode (rhs2_type)),
4149 b: 2 * GET_MODE_SIZE (mode: element_mode (rhs1_type))))
4150 {
4151 error ("type mismatch in %qs", code_name);
4152 debug_generic_expr (lhs_type);
4153 debug_generic_expr (rhs1_type);
4154 debug_generic_expr (rhs2_type);
4155 return true;
4156 }
4157 return false;
4158 }
4159
4160 case VEC_WIDEN_MULT_HI_EXPR:
4161 case VEC_WIDEN_MULT_LO_EXPR:
4162 case VEC_WIDEN_MULT_EVEN_EXPR:
4163 case VEC_WIDEN_MULT_ODD_EXPR:
4164 {
4165 if (TREE_CODE (rhs1_type) != VECTOR_TYPE
4166 || TREE_CODE (lhs_type) != VECTOR_TYPE
4167 || !types_compatible_p (type1: rhs1_type, type2: rhs2_type)
4168 || maybe_ne (a: GET_MODE_SIZE (mode: element_mode (lhs_type)),
4169 b: 2 * GET_MODE_SIZE (mode: element_mode (rhs1_type))))
4170 {
4171 error ("type mismatch in %qs", code_name);
4172 debug_generic_expr (lhs_type);
4173 debug_generic_expr (rhs1_type);
4174 debug_generic_expr (rhs2_type);
4175 return true;
4176 }
4177 return false;
4178 }
4179
4180 case VEC_PACK_TRUNC_EXPR:
4181 /* ??? We currently use VEC_PACK_TRUNC_EXPR to simply concat
4182 vector boolean types. */
4183 if (VECTOR_BOOLEAN_TYPE_P (lhs_type)
4184 && VECTOR_BOOLEAN_TYPE_P (rhs1_type)
4185 && types_compatible_p (type1: rhs1_type, type2: rhs2_type)
4186 && known_eq (TYPE_VECTOR_SUBPARTS (lhs_type),
4187 2 * TYPE_VECTOR_SUBPARTS (rhs1_type)))
4188 return false;
4189
4190 /* Fallthru. */
4191 case VEC_PACK_SAT_EXPR:
4192 case VEC_PACK_FIX_TRUNC_EXPR:
4193 {
4194 if (TREE_CODE (rhs1_type) != VECTOR_TYPE
4195 || TREE_CODE (lhs_type) != VECTOR_TYPE
4196 || !((rhs_code == VEC_PACK_FIX_TRUNC_EXPR
4197 && SCALAR_FLOAT_TYPE_P (TREE_TYPE (rhs1_type))
4198 && INTEGRAL_TYPE_P (TREE_TYPE (lhs_type)))
4199 || (INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))
4200 == INTEGRAL_TYPE_P (TREE_TYPE (lhs_type))))
4201 || !types_compatible_p (type1: rhs1_type, type2: rhs2_type)
4202 || maybe_ne (a: GET_MODE_SIZE (mode: element_mode (rhs1_type)),
4203 b: 2 * GET_MODE_SIZE (mode: element_mode (lhs_type)))
4204 || maybe_ne (a: 2 * TYPE_VECTOR_SUBPARTS (node: rhs1_type),
4205 b: TYPE_VECTOR_SUBPARTS (node: lhs_type)))
4206 {
4207 error ("type mismatch in %qs", code_name);
4208 debug_generic_expr (lhs_type);
4209 debug_generic_expr (rhs1_type);
4210 debug_generic_expr (rhs2_type);
4211 return true;
4212 }
4213
4214 return false;
4215 }
4216
4217 case VEC_PACK_FLOAT_EXPR:
4218 if (TREE_CODE (rhs1_type) != VECTOR_TYPE
4219 || TREE_CODE (lhs_type) != VECTOR_TYPE
4220 || !INTEGRAL_TYPE_P (TREE_TYPE (rhs1_type))
4221 || !SCALAR_FLOAT_TYPE_P (TREE_TYPE (lhs_type))
4222 || !types_compatible_p (type1: rhs1_type, type2: rhs2_type)
4223 || maybe_ne (a: GET_MODE_SIZE (mode: element_mode (rhs1_type)),
4224 b: 2 * GET_MODE_SIZE (mode: element_mode (lhs_type)))
4225 || maybe_ne (a: 2 * TYPE_VECTOR_SUBPARTS (node: rhs1_type),
4226 b: TYPE_VECTOR_SUBPARTS (node: lhs_type)))
4227 {
4228 error ("type mismatch in %qs", code_name);
4229 debug_generic_expr (lhs_type);
4230 debug_generic_expr (rhs1_type);
4231 debug_generic_expr (rhs2_type);
4232 return true;
4233 }
4234
4235 return false;
4236
4237 case MULT_EXPR:
4238 case MULT_HIGHPART_EXPR:
4239 case TRUNC_DIV_EXPR:
4240 case CEIL_DIV_EXPR:
4241 case FLOOR_DIV_EXPR:
4242 case ROUND_DIV_EXPR:
4243 case TRUNC_MOD_EXPR:
4244 case CEIL_MOD_EXPR:
4245 case FLOOR_MOD_EXPR:
4246 case ROUND_MOD_EXPR:
4247 case RDIV_EXPR:
4248 case EXACT_DIV_EXPR:
4249 case BIT_IOR_EXPR:
4250 case BIT_XOR_EXPR:
4251 /* Disallow pointer and offset types for many of the binary gimple. */
4252 if (POINTER_TYPE_P (lhs_type)
4253 || TREE_CODE (lhs_type) == OFFSET_TYPE)
4254 {
4255 error ("invalid types for %qs", code_name);
4256 debug_generic_expr (lhs_type);
4257 debug_generic_expr (rhs1_type);
4258 debug_generic_expr (rhs2_type);
4259 return true;
4260 }
4261 /* Continue with generic binary expression handling. */
4262 break;
4263
4264 case MIN_EXPR:
4265 case MAX_EXPR:
4266 /* Continue with generic binary expression handling. */
4267 break;
4268
4269 case BIT_AND_EXPR:
4270 if (POINTER_TYPE_P (lhs_type)
4271 && TREE_CODE (rhs2) == INTEGER_CST)
4272 break;
4273 /* Disallow pointer and offset types for many of the binary gimple. */
4274 if (POINTER_TYPE_P (lhs_type)
4275 || TREE_CODE (lhs_type) == OFFSET_TYPE)
4276 {
4277 error ("invalid types for %qs", code_name);
4278 debug_generic_expr (lhs_type);
4279 debug_generic_expr (rhs1_type);
4280 debug_generic_expr (rhs2_type);
4281 return true;
4282 }
4283 /* Continue with generic binary expression handling. */
4284 break;
4285
4286 case VEC_SERIES_EXPR:
4287 if (!useless_type_conversion_p (rhs1_type, rhs2_type))
4288 {
4289 error ("type mismatch in %qs", code_name);
4290 debug_generic_expr (rhs1_type);
4291 debug_generic_expr (rhs2_type);
4292 return true;
4293 }
4294 if (TREE_CODE (lhs_type) != VECTOR_TYPE
4295 || !useless_type_conversion_p (TREE_TYPE (lhs_type), rhs1_type))
4296 {
4297 error ("vector type expected in %qs", code_name);
4298 debug_generic_expr (lhs_type);
4299 return true;
4300 }
4301 return false;
4302
4303 default:
4304 gcc_unreachable ();
4305 }
4306
4307 if (!useless_type_conversion_p (lhs_type, rhs1_type)
4308 || !useless_type_conversion_p (lhs_type, rhs2_type))
4309 {
4310 error ("type mismatch in binary expression");
4311 debug_generic_stmt (lhs_type);
4312 debug_generic_stmt (rhs1_type);
4313 debug_generic_stmt (rhs2_type);
4314 return true;
4315 }
4316
4317 return false;
4318}
4319
4320/* Verify a gimple assignment statement STMT with a ternary rhs.
4321 Returns true if anything is wrong. */
4322
4323static bool
4324verify_gimple_assign_ternary (gassign *stmt)
4325{
4326 enum tree_code rhs_code = gimple_assign_rhs_code (gs: stmt);
4327 tree lhs = gimple_assign_lhs (gs: stmt);
4328 tree lhs_type = TREE_TYPE (lhs);
4329 tree rhs1 = gimple_assign_rhs1 (gs: stmt);
4330 tree rhs1_type = TREE_TYPE (rhs1);
4331 tree rhs2 = gimple_assign_rhs2 (gs: stmt);
4332 tree rhs2_type = TREE_TYPE (rhs2);
4333 tree rhs3 = gimple_assign_rhs3 (gs: stmt);
4334 tree rhs3_type = TREE_TYPE (rhs3);
4335
4336 if (!is_gimple_reg (lhs))
4337 {
4338 error ("non-register as LHS of ternary operation");
4339 return true;
4340 }
4341
4342 if (!is_gimple_val (rhs1)
4343 || !is_gimple_val (rhs2)
4344 || !is_gimple_val (rhs3))
4345 {
4346 error ("invalid operands in ternary operation");
4347 return true;
4348 }
4349
4350 const char* const code_name = get_tree_code_name (rhs_code);
4351
4352 /* First handle operations that involve different types. */
4353 switch (rhs_code)
4354 {
4355 case WIDEN_MULT_PLUS_EXPR:
4356 case WIDEN_MULT_MINUS_EXPR:
4357 if ((!INTEGRAL_TYPE_P (rhs1_type)
4358 && !FIXED_POINT_TYPE_P (rhs1_type))
4359 || !useless_type_conversion_p (rhs1_type, rhs2_type)
4360 || !useless_type_conversion_p (lhs_type, rhs3_type)
4361 || 2 * TYPE_PRECISION (rhs1_type) > TYPE_PRECISION (lhs_type)
4362 || TYPE_PRECISION (rhs1_type) != TYPE_PRECISION (rhs2_type))
4363 {
4364 error ("type mismatch in %qs", code_name);
4365 debug_generic_expr (lhs_type);
4366 debug_generic_expr (rhs1_type);
4367 debug_generic_expr (rhs2_type);
4368 debug_generic_expr (rhs3_type);
4369 return true;
4370 }
4371 break;
4372
4373 case VEC_COND_EXPR:
4374 if (!VECTOR_BOOLEAN_TYPE_P (rhs1_type)
4375 || maybe_ne (a: TYPE_VECTOR_SUBPARTS (node: rhs1_type),
4376 b: TYPE_VECTOR_SUBPARTS (node: lhs_type)))
4377 {
4378 error ("the first argument of a %qs must be of a "
4379 "boolean vector type of the same number of elements "
4380 "as the result", code_name);
4381 debug_generic_expr (lhs_type);
4382 debug_generic_expr (rhs1_type);
4383 return true;
4384 }
4385 /* Fallthrough. */
4386 case COND_EXPR:
4387 if (!useless_type_conversion_p (lhs_type, rhs2_type)
4388 || !useless_type_conversion_p (lhs_type, rhs3_type))
4389 {
4390 error ("type mismatch in %qs", code_name);
4391 debug_generic_expr (lhs_type);
4392 debug_generic_expr (rhs2_type);
4393 debug_generic_expr (rhs3_type);
4394 return true;
4395 }
4396 break;
4397
4398 case VEC_PERM_EXPR:
4399 /* If permute is constant, then we allow for lhs and rhs
4400 to have different vector types, provided:
4401 (1) lhs, rhs1, rhs2 have same element type.
4402 (2) rhs3 vector is constant and has integer element type.
4403 (3) len(lhs) == len(rhs3) && len(rhs1) == len(rhs2). */
4404
4405 if (TREE_CODE (lhs_type) != VECTOR_TYPE
4406 || TREE_CODE (rhs1_type) != VECTOR_TYPE
4407 || TREE_CODE (rhs2_type) != VECTOR_TYPE
4408 || TREE_CODE (rhs3_type) != VECTOR_TYPE)
4409 {
4410 error ("vector types expected in %qs", code_name);
4411 debug_generic_expr (lhs_type);
4412 debug_generic_expr (rhs1_type);
4413 debug_generic_expr (rhs2_type);
4414 debug_generic_expr (rhs3_type);
4415 return true;
4416 }
4417
4418 /* If rhs3 is constant, we allow lhs, rhs1 and rhs2 to be different vector types,
4419 as long as lhs, rhs1 and rhs2 have same element type. */
4420 if (TREE_CONSTANT (rhs3)
4421 ? (!useless_type_conversion_p (TREE_TYPE (lhs_type), TREE_TYPE (rhs1_type))
4422 || !useless_type_conversion_p (TREE_TYPE (lhs_type), TREE_TYPE (rhs2_type)))
4423 : (!useless_type_conversion_p (lhs_type, rhs1_type)
4424 || !useless_type_conversion_p (lhs_type, rhs2_type)))
4425 {
4426 error ("type mismatch in %qs", code_name);
4427 debug_generic_expr (lhs_type);
4428 debug_generic_expr (rhs1_type);
4429 debug_generic_expr (rhs2_type);
4430 debug_generic_expr (rhs3_type);
4431 return true;
4432 }
4433
4434 /* If rhs3 is constant, relax the check len(rhs2) == len(rhs3). */
4435 if (maybe_ne (a: TYPE_VECTOR_SUBPARTS (node: rhs1_type),
4436 b: TYPE_VECTOR_SUBPARTS (node: rhs2_type))
4437 || (!TREE_CONSTANT(rhs3)
4438 && maybe_ne (a: TYPE_VECTOR_SUBPARTS (node: rhs2_type),
4439 b: TYPE_VECTOR_SUBPARTS (node: rhs3_type)))
4440 || maybe_ne (a: TYPE_VECTOR_SUBPARTS (node: rhs3_type),
4441 b: TYPE_VECTOR_SUBPARTS (node: lhs_type)))
4442 {
4443 error ("vectors with different element number found in %qs",
4444 code_name);
4445 debug_generic_expr (lhs_type);
4446 debug_generic_expr (rhs1_type);
4447 debug_generic_expr (rhs2_type);
4448 debug_generic_expr (rhs3_type);
4449 return true;
4450 }
4451
4452 if (TREE_CODE (TREE_TYPE (rhs3_type)) != INTEGER_TYPE
4453 || (TREE_CODE (rhs3) != VECTOR_CST
4454 && (GET_MODE_BITSIZE (SCALAR_INT_TYPE_MODE
4455 (TREE_TYPE (rhs3_type)))
4456 != GET_MODE_BITSIZE (SCALAR_TYPE_MODE
4457 (TREE_TYPE (rhs1_type))))))
4458 {
4459 error ("invalid mask type in %qs", code_name);
4460 debug_generic_expr (lhs_type);
4461 debug_generic_expr (rhs1_type);
4462 debug_generic_expr (rhs2_type);
4463 debug_generic_expr (rhs3_type);
4464 return true;
4465 }
4466
4467 return false;
4468
4469 case SAD_EXPR:
4470 if (!useless_type_conversion_p (rhs1_type, rhs2_type)
4471 || !useless_type_conversion_p (lhs_type, rhs3_type)
4472 || 2 * GET_MODE_UNIT_BITSIZE (TYPE_MODE (TREE_TYPE (rhs1_type)))
4473 > GET_MODE_UNIT_BITSIZE (TYPE_MODE (TREE_TYPE (lhs_type))))
4474 {
4475 error ("type mismatch in %qs", code_name);
4476 debug_generic_expr (lhs_type);
4477 debug_generic_expr (rhs1_type);
4478 debug_generic_expr (rhs2_type);
4479 debug_generic_expr (rhs3_type);
4480 return true;
4481 }
4482
4483 if (TREE_CODE (rhs1_type) != VECTOR_TYPE
4484 || TREE_CODE (rhs2_type) != VECTOR_TYPE
4485 || TREE_CODE (rhs3_type) != VECTOR_TYPE)
4486 {
4487 error ("vector types expected in %qs", code_name);
4488 debug_generic_expr (lhs_type);
4489 debug_generic_expr (rhs1_type);
4490 debug_generic_expr (rhs2_type);
4491 debug_generic_expr (rhs3_type);
4492 return true;
4493 }
4494
4495 return false;
4496
4497 case BIT_INSERT_EXPR:
4498 if (! useless_type_conversion_p (lhs_type, rhs1_type))
4499 {
4500 error ("type mismatch in %qs", code_name);
4501 debug_generic_expr (lhs_type);
4502 debug_generic_expr (rhs1_type);
4503 return true;
4504 }
4505 if (! ((INTEGRAL_TYPE_P (rhs1_type)
4506 && INTEGRAL_TYPE_P (rhs2_type))
4507 /* Vector element insert. */
4508 || (VECTOR_TYPE_P (rhs1_type)
4509 && types_compatible_p (TREE_TYPE (rhs1_type), type2: rhs2_type))
4510 /* Aligned sub-vector insert. */
4511 || (VECTOR_TYPE_P (rhs1_type)
4512 && VECTOR_TYPE_P (rhs2_type)
4513 && types_compatible_p (TREE_TYPE (rhs1_type),
4514 TREE_TYPE (rhs2_type))
4515 && multiple_p (a: TYPE_VECTOR_SUBPARTS (node: rhs1_type),
4516 b: TYPE_VECTOR_SUBPARTS (node: rhs2_type))
4517 && multiple_p (a: wi::to_poly_offset (t: rhs3),
4518 b: wi::to_poly_offset (TYPE_SIZE (rhs2_type))))))
4519 {
4520 error ("not allowed type combination in %qs", code_name);
4521 debug_generic_expr (rhs1_type);
4522 debug_generic_expr (rhs2_type);
4523 return true;
4524 }
4525 if (! tree_fits_uhwi_p (rhs3)
4526 || ! types_compatible_p (bitsizetype, TREE_TYPE (rhs3))
4527 || ! tree_fits_uhwi_p (TYPE_SIZE (rhs2_type)))
4528 {
4529 error ("invalid position or size in %qs", code_name);
4530 return true;
4531 }
4532 if (INTEGRAL_TYPE_P (rhs1_type)
4533 && !type_has_mode_precision_p (t: rhs1_type))
4534 {
4535 error ("%qs into non-mode-precision operand", code_name);
4536 return true;
4537 }
4538 if (INTEGRAL_TYPE_P (rhs1_type))
4539 {
4540 unsigned HOST_WIDE_INT bitpos = tree_to_uhwi (rhs3);
4541 if (bitpos >= TYPE_PRECISION (rhs1_type)
4542 || (bitpos + TYPE_PRECISION (rhs2_type)
4543 > TYPE_PRECISION (rhs1_type)))
4544 {
4545 error ("insertion out of range in %qs", code_name);
4546 return true;
4547 }
4548 }
4549 else if (VECTOR_TYPE_P (rhs1_type))
4550 {
4551 unsigned HOST_WIDE_INT bitpos = tree_to_uhwi (rhs3);
4552 unsigned HOST_WIDE_INT bitsize = tree_to_uhwi (TYPE_SIZE (rhs2_type));
4553 if (bitpos % bitsize != 0)
4554 {
4555 error ("%qs not at element boundary", code_name);
4556 return true;
4557 }
4558 }
4559 return false;
4560
4561 case DOT_PROD_EXPR:
4562 {
4563 if (((TREE_CODE (rhs1_type) != VECTOR_TYPE
4564 || TREE_CODE (lhs_type) != VECTOR_TYPE)
4565 && ((!INTEGRAL_TYPE_P (rhs1_type)
4566 && !SCALAR_FLOAT_TYPE_P (rhs1_type))
4567 || (!INTEGRAL_TYPE_P (lhs_type)
4568 && !SCALAR_FLOAT_TYPE_P (lhs_type))))
4569 /* rhs1_type and rhs2_type may differ in sign. */
4570 || !tree_nop_conversion_p (rhs1_type, rhs2_type)
4571 || !useless_type_conversion_p (lhs_type, rhs3_type)
4572 || maybe_lt (a: GET_MODE_SIZE (mode: element_mode (rhs3_type)),
4573 b: 2 * GET_MODE_SIZE (mode: element_mode (rhs1_type))))
4574 {
4575 error ("type mismatch in %qs", code_name);
4576 debug_generic_expr (lhs_type);
4577 debug_generic_expr (rhs1_type);
4578 debug_generic_expr (rhs2_type);
4579 return true;
4580 }
4581 return false;
4582 }
4583
4584 case REALIGN_LOAD_EXPR:
4585 /* FIXME. */
4586 return false;
4587
4588 default:
4589 gcc_unreachable ();
4590 }
4591 return false;
4592}
4593
4594/* Verify a gimple assignment statement STMT with a single rhs.
4595 Returns true if anything is wrong. */
4596
4597static bool
4598verify_gimple_assign_single (gassign *stmt)
4599{
4600 enum tree_code rhs_code = gimple_assign_rhs_code (gs: stmt);
4601 tree lhs = gimple_assign_lhs (gs: stmt);
4602 tree lhs_type = TREE_TYPE (lhs);
4603 tree rhs1 = gimple_assign_rhs1 (gs: stmt);
4604 tree rhs1_type = TREE_TYPE (rhs1);
4605 bool res = false;
4606
4607 const char* const code_name = get_tree_code_name (rhs_code);
4608
4609 if (!useless_type_conversion_p (lhs_type, rhs1_type))
4610 {
4611 error ("non-trivial conversion in %qs", code_name);
4612 debug_generic_expr (lhs_type);
4613 debug_generic_expr (rhs1_type);
4614 return true;
4615 }
4616
4617 if (gimple_clobber_p (s: stmt)
4618 && !(DECL_P (lhs) || TREE_CODE (lhs) == MEM_REF))
4619 {
4620 error ("%qs LHS in clobber statement",
4621 get_tree_code_name (TREE_CODE (lhs)));
4622 debug_generic_expr (lhs);
4623 return true;
4624 }
4625
4626 if (TREE_CODE (lhs) == WITH_SIZE_EXPR)
4627 {
4628 error ("%qs LHS in assignment statement",
4629 get_tree_code_name (TREE_CODE (lhs)));
4630 debug_generic_expr (lhs);
4631 return true;
4632 }
4633
4634 if (handled_component_p (t: lhs)
4635 || TREE_CODE (lhs) == MEM_REF
4636 || TREE_CODE (lhs) == TARGET_MEM_REF)
4637 res |= verify_types_in_gimple_reference (expr: lhs, require_lvalue: true);
4638
4639 /* Special codes we cannot handle via their class. */
4640 switch (rhs_code)
4641 {
4642 case ADDR_EXPR:
4643 {
4644 tree op = TREE_OPERAND (rhs1, 0);
4645 if (!is_gimple_addressable (t: op))
4646 {
4647 error ("invalid operand in %qs", code_name);
4648 return true;
4649 }
4650
4651 /* Technically there is no longer a need for matching types, but
4652 gimple hygiene asks for this check. In LTO we can end up
4653 combining incompatible units and thus end up with addresses
4654 of globals that change their type to a common one. */
4655 if (!in_lto_p
4656 && !types_compatible_p (TREE_TYPE (op),
4657 TREE_TYPE (TREE_TYPE (rhs1)))
4658 && !one_pointer_to_useless_type_conversion_p (TREE_TYPE (rhs1),
4659 TREE_TYPE (op)))
4660 {
4661 error ("type mismatch in %qs", code_name);
4662 debug_generic_stmt (TREE_TYPE (rhs1));
4663 debug_generic_stmt (TREE_TYPE (op));
4664 return true;
4665 }
4666
4667 return (verify_address (t: rhs1, verify_addressable: true)
4668 || verify_types_in_gimple_reference (expr: op, require_lvalue: true));
4669 }
4670
4671 /* tcc_reference */
4672 case INDIRECT_REF:
4673 error ("%qs in gimple IL", code_name);
4674 return true;
4675
4676 case COMPONENT_REF:
4677 case BIT_FIELD_REF:
4678 case ARRAY_REF:
4679 case ARRAY_RANGE_REF:
4680 case VIEW_CONVERT_EXPR:
4681 case REALPART_EXPR:
4682 case IMAGPART_EXPR:
4683 case TARGET_MEM_REF:
4684 case MEM_REF:
4685 if (!is_gimple_reg (lhs)
4686 && is_gimple_reg_type (TREE_TYPE (lhs)))
4687 {
4688 error ("invalid RHS for gimple memory store: %qs", code_name);
4689 debug_generic_stmt (lhs);
4690 debug_generic_stmt (rhs1);
4691 return true;
4692 }
4693 return res || verify_types_in_gimple_reference (expr: rhs1, require_lvalue: false);
4694
4695 /* tcc_constant */
4696 case SSA_NAME:
4697 case INTEGER_CST:
4698 case REAL_CST:
4699 case FIXED_CST:
4700 case COMPLEX_CST:
4701 case VECTOR_CST:
4702 case STRING_CST:
4703 return res;
4704
4705 /* tcc_declaration */
4706 case CONST_DECL:
4707 return res;
4708 case VAR_DECL:
4709 case PARM_DECL:
4710 if (!is_gimple_reg (lhs)
4711 && !is_gimple_reg (rhs1)
4712 && is_gimple_reg_type (TREE_TYPE (lhs)))
4713 {
4714 error ("invalid RHS for gimple memory store: %qs", code_name);
4715 debug_generic_stmt (lhs);
4716 debug_generic_stmt (rhs1);
4717 return true;
4718 }
4719 return res;
4720
4721 case CONSTRUCTOR:
4722 if (VECTOR_TYPE_P (rhs1_type))
4723 {
4724 unsigned int i;
4725 tree elt_i, elt_v, elt_t = NULL_TREE;
4726
4727 if (CONSTRUCTOR_NELTS (rhs1) == 0)
4728 return res;
4729 /* For vector CONSTRUCTORs we require that either it is empty
4730 CONSTRUCTOR, or it is a CONSTRUCTOR of smaller vector elements
4731 (then the element count must be correct to cover the whole
4732 outer vector and index must be NULL on all elements, or it is
4733 a CONSTRUCTOR of scalar elements, where we as an exception allow
4734 smaller number of elements (assuming zero filling) and
4735 consecutive indexes as compared to NULL indexes (such
4736 CONSTRUCTORs can appear in the IL from FEs). */
4737 FOR_EACH_CONSTRUCTOR_ELT (CONSTRUCTOR_ELTS (rhs1), i, elt_i, elt_v)
4738 {
4739 if (elt_t == NULL_TREE)
4740 {
4741 elt_t = TREE_TYPE (elt_v);
4742 if (VECTOR_TYPE_P (elt_t))
4743 {
4744 tree elt_t = TREE_TYPE (elt_v);
4745 if (!useless_type_conversion_p (TREE_TYPE (rhs1_type),
4746 TREE_TYPE (elt_t)))
4747 {
4748 error ("incorrect type of vector %qs elements",
4749 code_name);
4750 debug_generic_stmt (rhs1);
4751 return true;
4752 }
4753 else if (maybe_ne (CONSTRUCTOR_NELTS (rhs1)
4754 * TYPE_VECTOR_SUBPARTS (node: elt_t),
4755 b: TYPE_VECTOR_SUBPARTS (node: rhs1_type)))
4756 {
4757 error ("incorrect number of vector %qs elements",
4758 code_name);
4759 debug_generic_stmt (rhs1);
4760 return true;
4761 }
4762 }
4763 else if (!useless_type_conversion_p (TREE_TYPE (rhs1_type),
4764 elt_t))
4765 {
4766 error ("incorrect type of vector %qs elements",
4767 code_name);
4768 debug_generic_stmt (rhs1);
4769 return true;
4770 }
4771 else if (maybe_gt (CONSTRUCTOR_NELTS (rhs1),
4772 TYPE_VECTOR_SUBPARTS (rhs1_type)))
4773 {
4774 error ("incorrect number of vector %qs elements",
4775 code_name);
4776 debug_generic_stmt (rhs1);
4777 return true;
4778 }
4779 }
4780 else if (!useless_type_conversion_p (elt_t, TREE_TYPE (elt_v)))
4781 {
4782 error ("incorrect type of vector CONSTRUCTOR elements");
4783 debug_generic_stmt (rhs1);
4784 return true;
4785 }
4786 if (elt_i != NULL_TREE
4787 && (VECTOR_TYPE_P (elt_t)
4788 || TREE_CODE (elt_i) != INTEGER_CST
4789 || compare_tree_int (elt_i, i) != 0))
4790 {
4791 error ("vector %qs with non-NULL element index",
4792 code_name);
4793 debug_generic_stmt (rhs1);
4794 return true;
4795 }
4796 if (!is_gimple_val (elt_v))
4797 {
4798 error ("vector %qs element is not a GIMPLE value",
4799 code_name);
4800 debug_generic_stmt (rhs1);
4801 return true;
4802 }
4803 }
4804 }
4805 else if (CONSTRUCTOR_NELTS (rhs1) != 0)
4806 {
4807 error ("non-vector %qs with elements", code_name);
4808 debug_generic_stmt (rhs1);
4809 return true;
4810 }
4811 return res;
4812
4813 case WITH_SIZE_EXPR:
4814 error ("%qs RHS in assignment statement",
4815 get_tree_code_name (rhs_code));
4816 debug_generic_expr (rhs1);
4817 return true;
4818
4819 case OBJ_TYPE_REF:
4820 /* FIXME. */
4821 return res;
4822
4823 default:;
4824 }
4825
4826 return res;
4827}
4828
4829/* Verify the contents of a GIMPLE_ASSIGN STMT. Returns true when there
4830 is a problem, otherwise false. */
4831
4832static bool
4833verify_gimple_assign (gassign *stmt)
4834{
4835 switch (gimple_assign_rhs_class (gs: stmt))
4836 {
4837 case GIMPLE_SINGLE_RHS:
4838 return verify_gimple_assign_single (stmt);
4839
4840 case GIMPLE_UNARY_RHS:
4841 return verify_gimple_assign_unary (stmt);
4842
4843 case GIMPLE_BINARY_RHS:
4844 return verify_gimple_assign_binary (stmt);
4845
4846 case GIMPLE_TERNARY_RHS:
4847 return verify_gimple_assign_ternary (stmt);
4848
4849 default:
4850 gcc_unreachable ();
4851 }
4852}
4853
4854/* Verify the contents of a GIMPLE_RETURN STMT. Returns true when there
4855 is a problem, otherwise false. */
4856
4857static bool
4858verify_gimple_return (greturn *stmt)
4859{
4860 tree op = gimple_return_retval (gs: stmt);
4861 tree restype = TREE_TYPE (TREE_TYPE (cfun->decl));
4862
4863 /* We cannot test for present return values as we do not fix up missing
4864 return values from the original source. */
4865 if (op == NULL)
4866 return false;
4867
4868 if (!is_gimple_val (op)
4869 && TREE_CODE (op) != RESULT_DECL)
4870 {
4871 error ("invalid operand in return statement");
4872 debug_generic_stmt (op);
4873 return true;
4874 }
4875
4876 if ((TREE_CODE (op) == RESULT_DECL
4877 && DECL_BY_REFERENCE (op))
4878 || (TREE_CODE (op) == SSA_NAME
4879 && SSA_NAME_VAR (op)
4880 && TREE_CODE (SSA_NAME_VAR (op)) == RESULT_DECL
4881 && DECL_BY_REFERENCE (SSA_NAME_VAR (op))))
4882 op = TREE_TYPE (op);
4883
4884 if (!useless_type_conversion_p (restype, TREE_TYPE (op)))
4885 {
4886 error ("invalid conversion in return statement");
4887 debug_generic_stmt (restype);
4888 debug_generic_stmt (TREE_TYPE (op));
4889 return true;
4890 }
4891
4892 return false;
4893}
4894
4895
4896/* Verify the contents of a GIMPLE_GOTO STMT. Returns true when there
4897 is a problem, otherwise false. */
4898
4899static bool
4900verify_gimple_goto (ggoto *stmt)
4901{
4902 tree dest = gimple_goto_dest (gs: stmt);
4903
4904 /* ??? We have two canonical forms of direct goto destinations, a
4905 bare LABEL_DECL and an ADDR_EXPR of a LABEL_DECL. */
4906 if (TREE_CODE (dest) != LABEL_DECL
4907 && (!is_gimple_val (dest)
4908 || !POINTER_TYPE_P (TREE_TYPE (dest))))
4909 {
4910 error ("goto destination is neither a label nor a pointer");
4911 return true;
4912 }
4913
4914 return false;
4915}
4916
4917/* Verify the contents of a GIMPLE_SWITCH STMT. Returns true when there
4918 is a problem, otherwise false. */
4919
4920static bool
4921verify_gimple_switch (gswitch *stmt)
4922{
4923 unsigned int i, n;
4924 tree elt, prev_upper_bound = NULL_TREE;
4925 tree index_type, elt_type = NULL_TREE;
4926
4927 if (!is_gimple_val (gimple_switch_index (gs: stmt)))
4928 {
4929 error ("invalid operand to switch statement");
4930 debug_generic_stmt (gimple_switch_index (gs: stmt));
4931 return true;
4932 }
4933
4934 index_type = TREE_TYPE (gimple_switch_index (stmt));
4935 if (! INTEGRAL_TYPE_P (index_type))
4936 {
4937 error ("non-integral type switch statement");
4938 debug_generic_expr (index_type);
4939 return true;
4940 }
4941
4942 elt = gimple_switch_label (gs: stmt, index: 0);
4943 if (CASE_LOW (elt) != NULL_TREE
4944 || CASE_HIGH (elt) != NULL_TREE
4945 || CASE_CHAIN (elt) != NULL_TREE)
4946 {
4947 error ("invalid default case label in switch statement");
4948 debug_generic_expr (elt);
4949 return true;
4950 }
4951
4952 n = gimple_switch_num_labels (gs: stmt);
4953 for (i = 1; i < n; i++)
4954 {
4955 elt = gimple_switch_label (gs: stmt, index: i);
4956
4957 if (CASE_CHAIN (elt))
4958 {
4959 error ("invalid %<CASE_CHAIN%>");
4960 debug_generic_expr (elt);
4961 return true;
4962 }
4963 if (! CASE_LOW (elt))
4964 {
4965 error ("invalid case label in switch statement");
4966 debug_generic_expr (elt);
4967 return true;
4968 }
4969 if (CASE_HIGH (elt)
4970 && ! tree_int_cst_lt (CASE_LOW (elt), CASE_HIGH (elt)))
4971 {
4972 error ("invalid case range in switch statement");
4973 debug_generic_expr (elt);
4974 return true;
4975 }
4976
4977 if (! elt_type)
4978 {
4979 elt_type = TREE_TYPE (CASE_LOW (elt));
4980 if (TYPE_PRECISION (index_type) < TYPE_PRECISION (elt_type))
4981 {
4982 error ("type precision mismatch in switch statement");
4983 return true;
4984 }
4985 }
4986 if (TREE_TYPE (CASE_LOW (elt)) != elt_type
4987 || (CASE_HIGH (elt) && TREE_TYPE (CASE_HIGH (elt)) != elt_type))
4988 {
4989 error ("type mismatch for case label in switch statement");
4990 debug_generic_expr (elt);
4991 return true;
4992 }
4993
4994 if (prev_upper_bound)
4995 {
4996 if (! tree_int_cst_lt (t1: prev_upper_bound, CASE_LOW (elt)))
4997 {
4998 error ("case labels not sorted in switch statement");
4999 return true;
5000 }
5001 }
5002
5003 prev_upper_bound = CASE_HIGH (elt);
5004 if (! prev_upper_bound)
5005 prev_upper_bound = CASE_LOW (elt);
5006 }
5007
5008 return false;
5009}
5010
5011/* Verify a gimple debug statement STMT.
5012 Returns true if anything is wrong. */
5013
5014static bool
5015verify_gimple_debug (gimple *stmt ATTRIBUTE_UNUSED)
5016{
5017 /* There isn't much that could be wrong in a gimple debug stmt. A
5018 gimple debug bind stmt, for example, maps a tree, that's usually
5019 a VAR_DECL or a PARM_DECL, but that could also be some scalarized
5020 component or member of an aggregate type, to another tree, that
5021 can be an arbitrary expression. These stmts expand into debug
5022 insns, and are converted to debug notes by var-tracking.cc. */
5023 return false;
5024}
5025
5026/* Verify a gimple label statement STMT.
5027 Returns true if anything is wrong. */
5028
5029static bool
5030verify_gimple_label (glabel *stmt)
5031{
5032 tree decl = gimple_label_label (gs: stmt);
5033 int uid;
5034 bool err = false;
5035
5036 if (TREE_CODE (decl) != LABEL_DECL)
5037 return true;
5038 if (!DECL_NONLOCAL (decl) && !FORCED_LABEL (decl)
5039 && DECL_CONTEXT (decl) != current_function_decl)
5040 {
5041 error ("label context is not the current function declaration");
5042 err |= true;
5043 }
5044
5045 uid = LABEL_DECL_UID (decl);
5046 if (cfun->cfg
5047 && (uid == -1
5048 || (*label_to_block_map_for_fn (cfun))[uid] != gimple_bb (g: stmt)))
5049 {
5050 error ("incorrect entry in %<label_to_block_map%>");
5051 err |= true;
5052 }
5053
5054 uid = EH_LANDING_PAD_NR (decl);
5055 if (uid)
5056 {
5057 eh_landing_pad lp = get_eh_landing_pad_from_number (uid);
5058 if (decl != lp->post_landing_pad)
5059 {
5060 error ("incorrect setting of landing pad number");
5061 err |= true;
5062 }
5063 }
5064
5065 return err;
5066}
5067
5068/* Verify a gimple cond statement STMT.
5069 Returns true if anything is wrong. */
5070
5071static bool
5072verify_gimple_cond (gcond *stmt)
5073{
5074 if (TREE_CODE_CLASS (gimple_cond_code (stmt)) != tcc_comparison)
5075 {
5076 error ("invalid comparison code in gimple cond");
5077 return true;
5078 }
5079 if (!(!gimple_cond_true_label (gs: stmt)
5080 || TREE_CODE (gimple_cond_true_label (stmt)) == LABEL_DECL)
5081 || !(!gimple_cond_false_label (gs: stmt)
5082 || TREE_CODE (gimple_cond_false_label (stmt)) == LABEL_DECL))
5083 {
5084 error ("invalid labels in gimple cond");
5085 return true;
5086 }
5087
5088 return verify_gimple_comparison (boolean_type_node,
5089 op0: gimple_cond_lhs (gs: stmt),
5090 op1: gimple_cond_rhs (gs: stmt),
5091 code: gimple_cond_code (gs: stmt));
5092}
5093
5094/* Verify the GIMPLE statement STMT. Returns true if there is an
5095 error, otherwise false. */
5096
5097static bool
5098verify_gimple_stmt (gimple *stmt)
5099{
5100 switch (gimple_code (g: stmt))
5101 {
5102 case GIMPLE_ASSIGN:
5103 return verify_gimple_assign (stmt: as_a <gassign *> (p: stmt));
5104
5105 case GIMPLE_LABEL:
5106 return verify_gimple_label (stmt: as_a <glabel *> (p: stmt));
5107
5108 case GIMPLE_CALL:
5109 return verify_gimple_call (stmt: as_a <gcall *> (p: stmt));
5110
5111 case GIMPLE_COND:
5112 return verify_gimple_cond (stmt: as_a <gcond *> (p: stmt));
5113
5114 case GIMPLE_GOTO:
5115 return verify_gimple_goto (stmt: as_a <ggoto *> (p: stmt));
5116
5117 case GIMPLE_SWITCH:
5118 return verify_gimple_switch (stmt: as_a <gswitch *> (p: stmt));
5119
5120 case GIMPLE_RETURN:
5121 return verify_gimple_return (stmt: as_a <greturn *> (p: stmt));
5122
5123 case GIMPLE_ASM:
5124 return false;
5125
5126 case GIMPLE_TRANSACTION:
5127 return verify_gimple_transaction (as_a <gtransaction *> (p: stmt));
5128
5129 /* Tuples that do not have tree operands. */
5130 case GIMPLE_NOP:
5131 case GIMPLE_PREDICT:
5132 case GIMPLE_RESX:
5133 case GIMPLE_EH_DISPATCH:
5134 case GIMPLE_EH_MUST_NOT_THROW:
5135 return false;
5136
5137 CASE_GIMPLE_OMP:
5138 /* OpenMP directives are validated by the FE and never operated
5139 on by the optimizers. Furthermore, GIMPLE_OMP_FOR may contain
5140 non-gimple expressions when the main index variable has had
5141 its address taken. This does not affect the loop itself
5142 because the header of an GIMPLE_OMP_FOR is merely used to determine
5143 how to setup the parallel iteration. */
5144 return false;
5145
5146 case GIMPLE_ASSUME:
5147 return false;
5148
5149 case GIMPLE_DEBUG:
5150 return verify_gimple_debug (stmt);
5151
5152 default:
5153 gcc_unreachable ();
5154 }
5155}
5156
5157/* Verify the contents of a GIMPLE_PHI. Returns true if there is a problem,
5158 and false otherwise. */
5159
5160static bool
5161verify_gimple_phi (gphi *phi)
5162{
5163 bool err = false;
5164 unsigned i;
5165 tree phi_result = gimple_phi_result (gs: phi);
5166 bool virtual_p;
5167
5168 if (!phi_result)
5169 {
5170 error ("invalid %<PHI%> result");
5171 return true;
5172 }
5173
5174 virtual_p = virtual_operand_p (op: phi_result);
5175 if (TREE_CODE (phi_result) != SSA_NAME
5176 || (virtual_p
5177 && SSA_NAME_VAR (phi_result) != gimple_vop (cfun)))
5178 {
5179 error ("invalid %<PHI%> result");
5180 err = true;
5181 }
5182
5183 for (i = 0; i < gimple_phi_num_args (gs: phi); i++)
5184 {
5185 tree t = gimple_phi_arg_def (gs: phi, index: i);
5186
5187 if (!t)
5188 {
5189 error ("missing %<PHI%> def");
5190 err |= true;
5191 continue;
5192 }
5193 /* Addressable variables do have SSA_NAMEs but they
5194 are not considered gimple values. */
5195 else if ((TREE_CODE (t) == SSA_NAME
5196 && virtual_p != virtual_operand_p (op: t))
5197 || (virtual_p
5198 && (TREE_CODE (t) != SSA_NAME
5199 || SSA_NAME_VAR (t) != gimple_vop (cfun)))
5200 || (!virtual_p
5201 && !is_gimple_val (t)))
5202 {
5203 error ("invalid %<PHI%> argument");
5204 debug_generic_expr (t);
5205 err |= true;
5206 }
5207#ifdef ENABLE_TYPES_CHECKING
5208 if (!useless_type_conversion_p (TREE_TYPE (phi_result), TREE_TYPE (t)))
5209 {
5210 error ("incompatible types in %<PHI%> argument %u", i);
5211 debug_generic_stmt (TREE_TYPE (phi_result));
5212 debug_generic_stmt (TREE_TYPE (t));
5213 err |= true;
5214 }
5215#endif
5216 }
5217
5218 return err;
5219}
5220
5221/* Verify the GIMPLE statements inside the sequence STMTS. */
5222
5223static bool
5224verify_gimple_in_seq_2 (gimple_seq stmts)
5225{
5226 gimple_stmt_iterator ittr;
5227 bool err = false;
5228
5229 for (ittr = gsi_start (seq&: stmts); !gsi_end_p (i: ittr); gsi_next (i: &ittr))
5230 {
5231 gimple *stmt = gsi_stmt (i: ittr);
5232
5233 switch (gimple_code (g: stmt))
5234 {
5235 case GIMPLE_BIND:
5236 err |= verify_gimple_in_seq_2 (
5237 stmts: gimple_bind_body (gs: as_a <gbind *> (p: stmt)));
5238 break;
5239
5240 case GIMPLE_TRY:
5241 err |= verify_gimple_in_seq_2 (stmts: gimple_try_eval (gs: stmt));
5242 err |= verify_gimple_in_seq_2 (stmts: gimple_try_cleanup (gs: stmt));
5243 break;
5244
5245 case GIMPLE_EH_FILTER:
5246 err |= verify_gimple_in_seq_2 (stmts: gimple_eh_filter_failure (gs: stmt));
5247 break;
5248
5249 case GIMPLE_EH_ELSE:
5250 {
5251 geh_else *eh_else = as_a <geh_else *> (p: stmt);
5252 err |= verify_gimple_in_seq_2 (stmts: gimple_eh_else_n_body (eh_else_stmt: eh_else));
5253 err |= verify_gimple_in_seq_2 (stmts: gimple_eh_else_e_body (eh_else_stmt: eh_else));
5254 }
5255 break;
5256
5257 case GIMPLE_CATCH:
5258 err |= verify_gimple_in_seq_2 (stmts: gimple_catch_handler (
5259 catch_stmt: as_a <gcatch *> (p: stmt)));
5260 break;
5261
5262 case GIMPLE_ASSUME:
5263 err |= verify_gimple_in_seq_2 (stmts: gimple_assume_body (gs: stmt));
5264 break;
5265
5266 case GIMPLE_TRANSACTION:
5267 err |= verify_gimple_transaction (as_a <gtransaction *> (p: stmt));
5268 break;
5269
5270 default:
5271 {
5272 bool err2 = verify_gimple_stmt (stmt);
5273 if (err2)
5274 debug_gimple_stmt (stmt);
5275 err |= err2;
5276 }
5277 }
5278 }
5279
5280 return err;
5281}
5282
5283/* Verify the contents of a GIMPLE_TRANSACTION. Returns true if there
5284 is a problem, otherwise false. */
5285
5286static bool
5287verify_gimple_transaction (gtransaction *stmt)
5288{
5289 tree lab;
5290
5291 lab = gimple_transaction_label_norm (transaction_stmt: stmt);
5292 if (lab != NULL && TREE_CODE (lab) != LABEL_DECL)
5293 return true;
5294 lab = gimple_transaction_label_uninst (transaction_stmt: stmt);
5295 if (lab != NULL && TREE_CODE (lab) != LABEL_DECL)
5296 return true;
5297 lab = gimple_transaction_label_over (transaction_stmt: stmt);
5298 if (lab != NULL && TREE_CODE (lab) != LABEL_DECL)
5299 return true;
5300
5301 return verify_gimple_in_seq_2 (stmts: gimple_transaction_body (transaction_stmt: stmt));
5302}
5303
5304
5305/* Verify the GIMPLE statements inside the statement list STMTS. */
5306
5307DEBUG_FUNCTION bool
5308verify_gimple_in_seq (gimple_seq stmts, bool ice)
5309{
5310 timevar_push (tv: TV_TREE_STMT_VERIFY);
5311 bool res = verify_gimple_in_seq_2 (stmts);
5312 if (res && ice)
5313 internal_error ("%<verify_gimple%> failed");
5314 timevar_pop (tv: TV_TREE_STMT_VERIFY);
5315 return res;
5316}
5317
5318/* Return true when the T can be shared. */
5319
5320static bool
5321tree_node_can_be_shared (tree t)
5322{
5323 if (IS_TYPE_OR_DECL_P (t)
5324 || TREE_CODE (t) == SSA_NAME
5325 || TREE_CODE (t) == IDENTIFIER_NODE
5326 || TREE_CODE (t) == CASE_LABEL_EXPR
5327 || is_gimple_min_invariant (t))
5328 return true;
5329
5330 if (t == error_mark_node)
5331 return true;
5332
5333 return false;
5334}
5335
5336/* Called via walk_tree. Verify tree sharing. */
5337
5338static tree
5339verify_node_sharing_1 (tree *tp, int *walk_subtrees, void *data)
5340{
5341 hash_set<void *> *visited = (hash_set<void *> *) data;
5342
5343 if (tree_node_can_be_shared (t: *tp))
5344 {
5345 *walk_subtrees = false;
5346 return NULL;
5347 }
5348
5349 if (visited->add (k: *tp))
5350 return *tp;
5351
5352 return NULL;
5353}
5354
5355/* Called via walk_gimple_stmt. Verify tree sharing. */
5356
5357static tree
5358verify_node_sharing (tree *tp, int *walk_subtrees, void *data)
5359{
5360 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
5361 return verify_node_sharing_1 (tp, walk_subtrees, data: wi->info);
5362}
5363
5364static bool eh_error_found;
5365bool
5366verify_eh_throw_stmt_node (gimple *const &stmt, const int &,
5367 hash_set<gimple *> *visited)
5368{
5369 if (!visited->contains (k: stmt))
5370 {
5371 error ("dead statement in EH table");
5372 debug_gimple_stmt (stmt);
5373 eh_error_found = true;
5374 }
5375 return true;
5376}
5377
5378/* Verify if the location LOCs block is in BLOCKS. */
5379
5380static bool
5381verify_location (hash_set<tree> *blocks, location_t loc)
5382{
5383 tree block = LOCATION_BLOCK (loc);
5384 if (block != NULL_TREE
5385 && !blocks->contains (k: block))
5386 {
5387 error ("location references block not in block tree");
5388 return true;
5389 }
5390 if (block != NULL_TREE)
5391 return verify_location (blocks, BLOCK_SOURCE_LOCATION (block));
5392 return false;
5393}
5394
5395/* Called via walk_tree. Verify that expressions have no blocks. */
5396
5397static tree
5398verify_expr_no_block (tree *tp, int *walk_subtrees, void *)
5399{
5400 if (!EXPR_P (*tp))
5401 {
5402 *walk_subtrees = false;
5403 return NULL;
5404 }
5405
5406 location_t loc = EXPR_LOCATION (*tp);
5407 if (LOCATION_BLOCK (loc) != NULL)
5408 return *tp;
5409
5410 return NULL;
5411}
5412
5413/* Called via walk_tree. Verify locations of expressions. */
5414
5415static tree
5416verify_expr_location_1 (tree *tp, int *walk_subtrees, void *data)
5417{
5418 hash_set<tree> *blocks = (hash_set<tree> *) data;
5419 tree t = *tp;
5420
5421 /* ??? This doesn't really belong here but there's no good place to
5422 stick this remainder of old verify_expr. */
5423 /* ??? This barfs on debug stmts which contain binds to vars with
5424 different function context. */
5425#if 0
5426 if (VAR_P (t)
5427 || TREE_CODE (t) == PARM_DECL
5428 || TREE_CODE (t) == RESULT_DECL)
5429 {
5430 tree context = decl_function_context (t);
5431 if (context != cfun->decl
5432 && !SCOPE_FILE_SCOPE_P (context)
5433 && !TREE_STATIC (t)
5434 && !DECL_EXTERNAL (t))
5435 {
5436 error ("local declaration from a different function");
5437 return t;
5438 }
5439 }
5440#endif
5441
5442 if (VAR_P (t) && DECL_HAS_DEBUG_EXPR_P (t))
5443 {
5444 tree x = DECL_DEBUG_EXPR (t);
5445 tree addr = walk_tree (&x, verify_expr_no_block, NULL, NULL);
5446 if (addr)
5447 return addr;
5448 }
5449 if ((VAR_P (t)
5450 || TREE_CODE (t) == PARM_DECL
5451 || TREE_CODE (t) == RESULT_DECL)
5452 && DECL_HAS_VALUE_EXPR_P (t))
5453 {
5454 tree x = DECL_VALUE_EXPR (t);
5455 tree addr = walk_tree (&x, verify_expr_no_block, NULL, NULL);
5456 if (addr)
5457 return addr;
5458 }
5459
5460 if (!EXPR_P (t))
5461 {
5462 *walk_subtrees = false;
5463 return NULL;
5464 }
5465
5466 location_t loc = EXPR_LOCATION (t);
5467 if (verify_location (blocks, loc))
5468 return t;
5469
5470 return NULL;
5471}
5472
5473/* Called via walk_gimple_op. Verify locations of expressions. */
5474
5475static tree
5476verify_expr_location (tree *tp, int *walk_subtrees, void *data)
5477{
5478 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
5479 return verify_expr_location_1 (tp, walk_subtrees, data: wi->info);
5480}
5481
5482/* Insert all subblocks of BLOCK into BLOCKS and recurse. */
5483
5484static void
5485collect_subblocks (hash_set<tree> *blocks, tree block)
5486{
5487 tree t;
5488 for (t = BLOCK_SUBBLOCKS (block); t; t = BLOCK_CHAIN (t))
5489 {
5490 blocks->add (k: t);
5491 collect_subblocks (blocks, block: t);
5492 }
5493}
5494
5495/* Disable warnings about missing quoting in GCC diagnostics for
5496 the verification errors. Their format strings don't follow
5497 GCC diagnostic conventions and trigger an ICE in the end. */
5498#if __GNUC__ >= 10
5499# pragma GCC diagnostic push
5500# pragma GCC diagnostic ignored "-Wformat-diag"
5501#endif
5502
5503/* Verify the GIMPLE statements in the CFG of FN. */
5504
5505DEBUG_FUNCTION bool
5506verify_gimple_in_cfg (struct function *fn, bool verify_nothrow, bool ice)
5507{
5508 basic_block bb;
5509 bool err = false;
5510
5511 timevar_push (tv: TV_TREE_STMT_VERIFY);
5512 hash_set<void *> visited;
5513 hash_set<gimple *> visited_throwing_stmts;
5514
5515 /* Collect all BLOCKs referenced by the BLOCK tree of FN. */
5516 hash_set<tree> blocks;
5517 if (DECL_INITIAL (fn->decl))
5518 {
5519 blocks.add (DECL_INITIAL (fn->decl));
5520 collect_subblocks (blocks: &blocks, DECL_INITIAL (fn->decl));
5521 }
5522
5523 FOR_EACH_BB_FN (bb, fn)
5524 {
5525 gimple_stmt_iterator gsi;
5526 edge_iterator ei;
5527 edge e;
5528
5529 for (gphi_iterator gpi = gsi_start_phis (bb);
5530 !gsi_end_p (i: gpi);
5531 gsi_next (i: &gpi))
5532 {
5533 gphi *phi = gpi.phi ();
5534 bool err2 = false;
5535 unsigned i;
5536
5537 if (gimple_bb (g: phi) != bb)
5538 {
5539 error ("gimple_bb (phi) is set to a wrong basic block");
5540 err2 = true;
5541 }
5542
5543 err2 |= verify_gimple_phi (phi);
5544
5545 /* Only PHI arguments have locations. */
5546 if (gimple_location (g: phi) != UNKNOWN_LOCATION)
5547 {
5548 error ("PHI node with location");
5549 err2 = true;
5550 }
5551
5552 for (i = 0; i < gimple_phi_num_args (gs: phi); i++)
5553 {
5554 tree arg = gimple_phi_arg_def (gs: phi, index: i);
5555 tree addr = walk_tree (&arg, verify_node_sharing_1,
5556 &visited, NULL);
5557 if (addr)
5558 {
5559 error ("incorrect sharing of tree nodes");
5560 debug_generic_expr (addr);
5561 err2 |= true;
5562 }
5563 location_t loc = gimple_phi_arg_location (phi, i);
5564 if (virtual_operand_p (op: gimple_phi_result (gs: phi))
5565 && loc != UNKNOWN_LOCATION)
5566 {
5567 error ("virtual PHI with argument locations");
5568 err2 = true;
5569 }
5570 addr = walk_tree (&arg, verify_expr_location_1, &blocks, NULL);
5571 if (addr)
5572 {
5573 debug_generic_expr (addr);
5574 err2 = true;
5575 }
5576 err2 |= verify_location (blocks: &blocks, loc);
5577 }
5578
5579 if (err2)
5580 debug_gimple_stmt (phi);
5581 err |= err2;
5582 }
5583
5584 for (gsi = gsi_start_bb (bb); !gsi_end_p (i: gsi); gsi_next (i: &gsi))
5585 {
5586 gimple *stmt = gsi_stmt (i: gsi);
5587 bool err2 = false;
5588 struct walk_stmt_info wi;
5589 tree addr;
5590 int lp_nr;
5591
5592 if (gimple_bb (g: stmt) != bb)
5593 {
5594 error ("gimple_bb (stmt) is set to a wrong basic block");
5595 err2 = true;
5596 }
5597
5598 err2 |= verify_gimple_stmt (stmt);
5599 err2 |= verify_location (blocks: &blocks, loc: gimple_location (g: stmt));
5600
5601 memset (s: &wi, c: 0, n: sizeof (wi));
5602 wi.info = (void *) &visited;
5603 addr = walk_gimple_op (stmt, verify_node_sharing, &wi);
5604 if (addr)
5605 {
5606 error ("incorrect sharing of tree nodes");
5607 debug_generic_expr (addr);
5608 err2 |= true;
5609 }
5610
5611 memset (s: &wi, c: 0, n: sizeof (wi));
5612 wi.info = (void *) &blocks;
5613 addr = walk_gimple_op (stmt, verify_expr_location, &wi);
5614 if (addr)
5615 {
5616 debug_generic_expr (addr);
5617 err2 |= true;
5618 }
5619
5620 /* If the statement is marked as part of an EH region, then it is
5621 expected that the statement could throw. Verify that when we
5622 have optimizations that simplify statements such that we prove
5623 that they cannot throw, that we update other data structures
5624 to match. */
5625 lp_nr = lookup_stmt_eh_lp (stmt);
5626 if (lp_nr != 0)
5627 visited_throwing_stmts.add (k: stmt);
5628 if (lp_nr > 0)
5629 {
5630 if (!stmt_could_throw_p (cfun, stmt))
5631 {
5632 if (verify_nothrow)
5633 {
5634 error ("statement marked for throw, but doesn%'t");
5635 err2 |= true;
5636 }
5637 }
5638 else if (!gsi_one_before_end_p (i: gsi))
5639 {
5640 error ("statement marked for throw in middle of block");
5641 err2 |= true;
5642 }
5643 }
5644
5645 if (err2)
5646 debug_gimple_stmt (stmt);
5647 err |= err2;
5648 }
5649
5650 FOR_EACH_EDGE (e, ei, bb->succs)
5651 if (e->goto_locus != UNKNOWN_LOCATION)
5652 err |= verify_location (blocks: &blocks, loc: e->goto_locus);
5653 }
5654
5655 hash_map<gimple *, int> *eh_table = get_eh_throw_stmt_table (cfun);
5656 eh_error_found = false;
5657 if (eh_table)
5658 eh_table->traverse<hash_set<gimple *> *, verify_eh_throw_stmt_node>
5659 (a: &visited_throwing_stmts);
5660
5661 if (ice && (err || eh_error_found))
5662 internal_error ("verify_gimple failed");
5663
5664 verify_histograms ();
5665 timevar_pop (tv: TV_TREE_STMT_VERIFY);
5666
5667 return (err || eh_error_found);
5668}
5669
5670
5671/* Verifies that the flow information is OK. */
5672
5673static bool
5674gimple_verify_flow_info (void)
5675{
5676 bool err = false;
5677 basic_block bb;
5678 gimple_stmt_iterator gsi;
5679 gimple *stmt;
5680 edge e;
5681 edge_iterator ei;
5682
5683 if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->il.gimple.seq
5684 || ENTRY_BLOCK_PTR_FOR_FN (cfun)->il.gimple.phi_nodes)
5685 {
5686 error ("ENTRY_BLOCK has IL associated with it");
5687 err = true;
5688 }
5689
5690 if (EXIT_BLOCK_PTR_FOR_FN (cfun)->il.gimple.seq
5691 || EXIT_BLOCK_PTR_FOR_FN (cfun)->il.gimple.phi_nodes)
5692 {
5693 error ("EXIT_BLOCK has IL associated with it");
5694 err = true;
5695 }
5696
5697 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
5698 if (e->flags & EDGE_FALLTHRU)
5699 {
5700 error ("fallthru to exit from bb %d", e->src->index);
5701 err = true;
5702 }
5703 if (cfun->cfg->full_profile
5704 && !ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.initialized_p ())
5705 {
5706 error ("entry block count not initialized");
5707 err = true;
5708 }
5709 if (cfun->cfg->full_profile
5710 && !EXIT_BLOCK_PTR_FOR_FN (cfun)->count.initialized_p ())
5711 {
5712 error ("exit block count not initialized");
5713 err = true;
5714 }
5715 if (cfun->cfg->full_profile
5716 && !single_succ_edge
5717 (ENTRY_BLOCK_PTR_FOR_FN (cfun))->probability.initialized_p ())
5718 {
5719 error ("probability of edge from entry block not initialized");
5720 err = true;
5721 }
5722
5723
5724 FOR_EACH_BB_FN (bb, cfun)
5725 {
5726 bool found_ctrl_stmt = false;
5727
5728 stmt = NULL;
5729
5730 if (cfun->cfg->full_profile)
5731 {
5732 if (!bb->count.initialized_p ())
5733 {
5734 error ("count of bb %d not initialized", bb->index);
5735 err = true;
5736 }
5737 FOR_EACH_EDGE (e, ei, bb->succs)
5738 if (!e->probability.initialized_p ())
5739 {
5740 error ("probability of edge %d->%d not initialized",
5741 bb->index, e->dest->index);
5742 err = true;
5743 }
5744 }
5745
5746 /* Skip labels on the start of basic block. */
5747 for (gsi = gsi_start_bb (bb); !gsi_end_p (i: gsi); gsi_next (i: &gsi))
5748 {
5749 tree label;
5750 gimple *prev_stmt = stmt;
5751
5752 stmt = gsi_stmt (i: gsi);
5753
5754 if (gimple_code (g: stmt) != GIMPLE_LABEL)
5755 break;
5756
5757 label = gimple_label_label (gs: as_a <glabel *> (p: stmt));
5758 if (prev_stmt && DECL_NONLOCAL (label))
5759 {
5760 error ("nonlocal label %qD is not first in a sequence "
5761 "of labels in bb %d", label, bb->index);
5762 err = true;
5763 }
5764
5765 if (prev_stmt && EH_LANDING_PAD_NR (label) != 0)
5766 {
5767 error ("EH landing pad label %qD is not first in a sequence "
5768 "of labels in bb %d", label, bb->index);
5769 err = true;
5770 }
5771
5772 if (label_to_block (cfun, dest: label) != bb)
5773 {
5774 error ("label %qD to block does not match in bb %d",
5775 label, bb->index);
5776 err = true;
5777 }
5778
5779 if (decl_function_context (label) != current_function_decl)
5780 {
5781 error ("label %qD has incorrect context in bb %d",
5782 label, bb->index);
5783 err = true;
5784 }
5785 }
5786
5787 /* Verify that body of basic block BB is free of control flow. */
5788 bool seen_nondebug_stmt = false;
5789 for (; !gsi_end_p (i: gsi); gsi_next (i: &gsi))
5790 {
5791 gimple *stmt = gsi_stmt (i: gsi);
5792
5793 if (found_ctrl_stmt)
5794 {
5795 error ("control flow in the middle of basic block %d",
5796 bb->index);
5797 err = true;
5798 }
5799
5800 if (stmt_ends_bb_p (t: stmt))
5801 found_ctrl_stmt = true;
5802
5803 if (glabel *label_stmt = dyn_cast <glabel *> (p: stmt))
5804 {
5805 error ("label %qD in the middle of basic block %d",
5806 gimple_label_label (gs: label_stmt), bb->index);
5807 err = true;
5808 }
5809
5810 /* Check that no statements appear between a returns_twice call
5811 and its associated abnormal edge. */
5812 if (gimple_code (g: stmt) == GIMPLE_CALL
5813 && gimple_call_flags (stmt) & ECF_RETURNS_TWICE)
5814 {
5815 const char *misplaced = NULL;
5816 /* TM is an exception: it points abnormal edges just after the
5817 call that starts a transaction, i.e. it must end the BB. */
5818 if (gimple_call_builtin_p (stmt, BUILT_IN_TM_START))
5819 {
5820 if (single_succ_p (bb)
5821 && bb_has_abnormal_pred (bb: single_succ (bb))
5822 && !gsi_one_nondebug_before_end_p (i: gsi))
5823 misplaced = "not last";
5824 }
5825 else
5826 {
5827 if (seen_nondebug_stmt
5828 && bb_has_abnormal_pred (bb))
5829 misplaced = "not first";
5830 }
5831 if (misplaced)
5832 {
5833 error ("returns_twice call is %s in basic block %d",
5834 misplaced, bb->index);
5835 print_gimple_stmt (stderr, stmt, 0, TDF_SLIM);
5836 err = true;
5837 }
5838 }
5839 if (!is_gimple_debug (gs: stmt))
5840 seen_nondebug_stmt = true;
5841 }
5842
5843 gsi = gsi_last_nondebug_bb (bb);
5844 if (gsi_end_p (i: gsi))
5845 continue;
5846
5847 stmt = gsi_stmt (i: gsi);
5848
5849 if (gimple_code (g: stmt) == GIMPLE_LABEL)
5850 continue;
5851
5852 if (verify_eh_edges (stmt))
5853 err = true;
5854
5855 if (is_ctrl_stmt (t: stmt))
5856 {
5857 FOR_EACH_EDGE (e, ei, bb->succs)
5858 if (e->flags & EDGE_FALLTHRU)
5859 {
5860 error ("fallthru edge after a control statement in bb %d",
5861 bb->index);
5862 err = true;
5863 }
5864 }
5865
5866 if (gimple_code (g: stmt) != GIMPLE_COND)
5867 {
5868 /* Verify that there are no edges with EDGE_TRUE/FALSE_FLAG set
5869 after anything else but if statement. */
5870 FOR_EACH_EDGE (e, ei, bb->succs)
5871 if (e->flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE))
5872 {
5873 error ("true/false edge after a non-GIMPLE_COND in bb %d",
5874 bb->index);
5875 err = true;
5876 }
5877 }
5878
5879 switch (gimple_code (g: stmt))
5880 {
5881 case GIMPLE_COND:
5882 {
5883 edge true_edge;
5884 edge false_edge;
5885
5886 extract_true_false_edges_from_block (bb, &true_edge, &false_edge);
5887
5888 if (!true_edge
5889 || !false_edge
5890 || !(true_edge->flags & EDGE_TRUE_VALUE)
5891 || !(false_edge->flags & EDGE_FALSE_VALUE)
5892 || (true_edge->flags & (EDGE_FALLTHRU | EDGE_ABNORMAL))
5893 || (false_edge->flags & (EDGE_FALLTHRU | EDGE_ABNORMAL))
5894 || EDGE_COUNT (bb->succs) >= 3)
5895 {
5896 error ("wrong outgoing edge flags at end of bb %d",
5897 bb->index);
5898 err = true;
5899 }
5900 }
5901 break;
5902
5903 case GIMPLE_GOTO:
5904 if (simple_goto_p (t: stmt))
5905 {
5906 error ("explicit goto at end of bb %d", bb->index);
5907 err = true;
5908 }
5909 else
5910 {
5911 /* FIXME. We should double check that the labels in the
5912 destination blocks have their address taken. */
5913 FOR_EACH_EDGE (e, ei, bb->succs)
5914 if ((e->flags & (EDGE_FALLTHRU | EDGE_TRUE_VALUE
5915 | EDGE_FALSE_VALUE))
5916 || !(e->flags & EDGE_ABNORMAL))
5917 {
5918 error ("wrong outgoing edge flags at end of bb %d",
5919 bb->index);
5920 err = true;
5921 }
5922 }
5923 break;
5924
5925 case GIMPLE_CALL:
5926 if (!gimple_call_builtin_p (stmt, BUILT_IN_RETURN))
5927 break;
5928 /* fallthru */
5929 case GIMPLE_RETURN:
5930 if (!single_succ_p (bb)
5931 || (single_succ_edge (bb)->flags
5932 & (EDGE_FALLTHRU | EDGE_ABNORMAL
5933 | EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
5934 {
5935 error ("wrong outgoing edge flags at end of bb %d", bb->index);
5936 err = true;
5937 }
5938 if (single_succ (bb) != EXIT_BLOCK_PTR_FOR_FN (cfun))
5939 {
5940 error ("return edge does not point to exit in bb %d",
5941 bb->index);
5942 err = true;
5943 }
5944 break;
5945
5946 case GIMPLE_SWITCH:
5947 {
5948 gswitch *switch_stmt = as_a <gswitch *> (p: stmt);
5949 tree prev;
5950 edge e;
5951 size_t i, n;
5952
5953 n = gimple_switch_num_labels (gs: switch_stmt);
5954
5955 /* Mark all the destination basic blocks. */
5956 for (i = 0; i < n; ++i)
5957 {
5958 basic_block label_bb = gimple_switch_label_bb (cfun, switch_stmt, i);
5959 gcc_assert (!label_bb->aux || label_bb->aux == (void *)1);
5960 label_bb->aux = (void *)1;
5961 }
5962
5963 /* Verify that the case labels are sorted. */
5964 prev = gimple_switch_label (gs: switch_stmt, index: 0);
5965 for (i = 1; i < n; ++i)
5966 {
5967 tree c = gimple_switch_label (gs: switch_stmt, index: i);
5968 if (!CASE_LOW (c))
5969 {
5970 error ("found default case not at the start of "
5971 "case vector");
5972 err = true;
5973 continue;
5974 }
5975 if (CASE_LOW (prev)
5976 && !tree_int_cst_lt (CASE_LOW (prev), CASE_LOW (c)))
5977 {
5978 error ("case labels not sorted: ");
5979 print_generic_expr (stderr, prev);
5980 fprintf (stderr,format: " is greater than ");
5981 print_generic_expr (stderr, c);
5982 fprintf (stderr,format: " but comes before it.\n");
5983 err = true;
5984 }
5985 prev = c;
5986 }
5987 /* VRP will remove the default case if it can prove it will
5988 never be executed. So do not verify there always exists
5989 a default case here. */
5990
5991 FOR_EACH_EDGE (e, ei, bb->succs)
5992 {
5993 if (!e->dest->aux)
5994 {
5995 error ("extra outgoing edge %d->%d",
5996 bb->index, e->dest->index);
5997 err = true;
5998 }
5999
6000 e->dest->aux = (void *)2;
6001 if ((e->flags & (EDGE_FALLTHRU | EDGE_ABNORMAL
6002 | EDGE_TRUE_VALUE | EDGE_FALSE_VALUE)))
6003 {
6004 error ("wrong outgoing edge flags at end of bb %d",
6005 bb->index);
6006 err = true;
6007 }
6008 }
6009
6010 /* Check that we have all of them. */
6011 for (i = 0; i < n; ++i)
6012 {
6013 basic_block label_bb = gimple_switch_label_bb (cfun,
6014 switch_stmt, i);
6015
6016 if (label_bb->aux != (void *)2)
6017 {
6018 error ("missing edge %i->%i", bb->index, label_bb->index);
6019 err = true;
6020 }
6021 }
6022
6023 FOR_EACH_EDGE (e, ei, bb->succs)
6024 e->dest->aux = (void *)0;
6025 }
6026 break;
6027
6028 case GIMPLE_EH_DISPATCH:
6029 if (verify_eh_dispatch_edge (as_a <geh_dispatch *> (p: stmt)))
6030 err = true;
6031 break;
6032
6033 default:
6034 break;
6035 }
6036 }
6037
6038 if (dom_info_state (CDI_DOMINATORS) >= DOM_NO_FAST_QUERY)
6039 verify_dominators (CDI_DOMINATORS);
6040
6041 return err;
6042}
6043
6044#if __GNUC__ >= 10
6045# pragma GCC diagnostic pop
6046#endif
6047
6048/* Updates phi nodes after creating a forwarder block joined
6049 by edge FALLTHRU. */
6050
6051static void
6052gimple_make_forwarder_block (edge fallthru)
6053{
6054 edge e;
6055 edge_iterator ei;
6056 basic_block dummy, bb;
6057 tree var;
6058 gphi_iterator gsi;
6059 bool forward_location_p;
6060
6061 dummy = fallthru->src;
6062 bb = fallthru->dest;
6063
6064 if (single_pred_p (bb))
6065 return;
6066
6067 /* We can forward location info if we have only one predecessor. */
6068 forward_location_p = single_pred_p (bb: dummy);
6069
6070 /* If we redirected a branch we must create new PHI nodes at the
6071 start of BB. */
6072 for (gsi = gsi_start_phis (dummy); !gsi_end_p (i: gsi); gsi_next (i: &gsi))
6073 {
6074 gphi *phi, *new_phi;
6075
6076 phi = gsi.phi ();
6077 var = gimple_phi_result (gs: phi);
6078 new_phi = create_phi_node (var, bb);
6079 gimple_phi_set_result (phi, result: copy_ssa_name (var, stmt: phi));
6080 add_phi_arg (new_phi, gimple_phi_result (gs: phi), fallthru,
6081 forward_location_p
6082 ? gimple_phi_arg_location (phi, i: 0) : UNKNOWN_LOCATION);
6083 }
6084
6085 /* Add the arguments we have stored on edges. */
6086 FOR_EACH_EDGE (e, ei, bb->preds)
6087 {
6088 if (e == fallthru)
6089 continue;
6090
6091 flush_pending_stmts (e);
6092 }
6093}
6094
6095
6096/* Return a non-special label in the head of basic block BLOCK.
6097 Create one if it doesn't exist. */
6098
6099tree
6100gimple_block_label (basic_block bb)
6101{
6102 gimple_stmt_iterator i, s = gsi_start_bb (bb);
6103 bool first = true;
6104 tree label;
6105 glabel *stmt;
6106
6107 for (i = s; !gsi_end_p (i); first = false, gsi_next (i: &i))
6108 {
6109 stmt = dyn_cast <glabel *> (p: gsi_stmt (i));
6110 if (!stmt)
6111 break;
6112 label = gimple_label_label (gs: stmt);
6113 if (!DECL_NONLOCAL (label))
6114 {
6115 if (!first)
6116 gsi_move_before (&i, &s);
6117 return label;
6118 }
6119 }
6120
6121 label = create_artificial_label (UNKNOWN_LOCATION);
6122 stmt = gimple_build_label (label);
6123 gsi_insert_before (&s, stmt, GSI_NEW_STMT);
6124 return label;
6125}
6126
6127
6128/* Attempt to perform edge redirection by replacing a possibly complex
6129 jump instruction by a goto or by removing the jump completely.
6130 This can apply only if all edges now point to the same block. The
6131 parameters and return values are equivalent to
6132 redirect_edge_and_branch. */
6133
6134static edge
6135gimple_try_redirect_by_replacing_jump (edge e, basic_block target)
6136{
6137 basic_block src = e->src;
6138 gimple_stmt_iterator i;
6139 gimple *stmt;
6140
6141 /* We can replace or remove a complex jump only when we have exactly
6142 two edges. */
6143 if (EDGE_COUNT (src->succs) != 2
6144 /* Verify that all targets will be TARGET. Specifically, the
6145 edge that is not E must also go to TARGET. */
6146 || EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest != target)
6147 return NULL;
6148
6149 i = gsi_last_bb (bb: src);
6150 if (gsi_end_p (i))
6151 return NULL;
6152
6153 stmt = gsi_stmt (i);
6154
6155 if (gimple_code (g: stmt) == GIMPLE_COND || gimple_code (g: stmt) == GIMPLE_SWITCH)
6156 {
6157 gsi_remove (&i, true);
6158 e = ssa_redirect_edge (e, target);
6159 e->flags = EDGE_FALLTHRU;
6160 return e;
6161 }
6162
6163 return NULL;
6164}
6165
6166
6167/* Redirect E to DEST. Return NULL on failure. Otherwise, return the
6168 edge representing the redirected branch. */
6169
6170static edge
6171gimple_redirect_edge_and_branch (edge e, basic_block dest)
6172{
6173 basic_block bb = e->src;
6174 gimple_stmt_iterator gsi;
6175 edge ret;
6176 gimple *stmt;
6177
6178 if (e->flags & EDGE_ABNORMAL)
6179 return NULL;
6180
6181 if (e->dest == dest)
6182 return NULL;
6183
6184 if (e->flags & EDGE_EH)
6185 return redirect_eh_edge (e, dest);
6186
6187 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
6188 {
6189 ret = gimple_try_redirect_by_replacing_jump (e, target: dest);
6190 if (ret)
6191 return ret;
6192 }
6193
6194 gsi = gsi_last_nondebug_bb (bb);
6195 stmt = gsi_end_p (i: gsi) ? NULL : gsi_stmt (i: gsi);
6196
6197 switch (stmt ? gimple_code (g: stmt) : GIMPLE_ERROR_MARK)
6198 {
6199 case GIMPLE_COND:
6200 /* For COND_EXPR, we only need to redirect the edge. */
6201 break;
6202
6203 case GIMPLE_GOTO:
6204 /* No non-abnormal edges should lead from a non-simple goto, and
6205 simple ones should be represented implicitly. */
6206 gcc_unreachable ();
6207
6208 case GIMPLE_SWITCH:
6209 {
6210 gswitch *switch_stmt = as_a <gswitch *> (p: stmt);
6211 tree label = gimple_block_label (bb: dest);
6212 tree cases = get_cases_for_edge (e, t: switch_stmt);
6213
6214 /* If we have a list of cases associated with E, then use it
6215 as it's a lot faster than walking the entire case vector. */
6216 if (cases)
6217 {
6218 edge e2 = find_edge (e->src, dest);
6219 tree last, first;
6220
6221 first = cases;
6222 while (cases)
6223 {
6224 last = cases;
6225 CASE_LABEL (cases) = label;
6226 cases = CASE_CHAIN (cases);
6227 }
6228
6229 /* If there was already an edge in the CFG, then we need
6230 to move all the cases associated with E to E2. */
6231 if (e2)
6232 {
6233 tree cases2 = get_cases_for_edge (e: e2, t: switch_stmt);
6234
6235 CASE_CHAIN (last) = CASE_CHAIN (cases2);
6236 CASE_CHAIN (cases2) = first;
6237 }
6238 bitmap_set_bit (touched_switch_bbs, gimple_bb (g: stmt)->index);
6239 }
6240 else
6241 {
6242 size_t i, n = gimple_switch_num_labels (gs: switch_stmt);
6243
6244 for (i = 0; i < n; i++)
6245 {
6246 tree elt = gimple_switch_label (gs: switch_stmt, index: i);
6247 if (label_to_block (cfun, CASE_LABEL (elt)) == e->dest)
6248 CASE_LABEL (elt) = label;
6249 }
6250 }
6251 }
6252 break;
6253
6254 case GIMPLE_ASM:
6255 {
6256 gasm *asm_stmt = as_a <gasm *> (p: stmt);
6257 int i, n = gimple_asm_nlabels (asm_stmt);
6258 tree label = NULL;
6259
6260 for (i = 0; i < n; ++i)
6261 {
6262 tree cons = gimple_asm_label_op (asm_stmt, index: i);
6263 if (label_to_block (cfun, TREE_VALUE (cons)) == e->dest)
6264 {
6265 if (!label)
6266 label = gimple_block_label (bb: dest);
6267 TREE_VALUE (cons) = label;
6268 }
6269 }
6270
6271 /* If we didn't find any label matching the former edge in the
6272 asm labels, we must be redirecting the fallthrough
6273 edge. */
6274 gcc_assert (label || (e->flags & EDGE_FALLTHRU));
6275 }
6276 break;
6277
6278 case GIMPLE_RETURN:
6279 gsi_remove (&gsi, true);
6280 e->flags |= EDGE_FALLTHRU;
6281 break;
6282
6283 case GIMPLE_OMP_RETURN:
6284 case GIMPLE_OMP_CONTINUE:
6285 case GIMPLE_OMP_SECTIONS_SWITCH:
6286 case GIMPLE_OMP_FOR:
6287 /* The edges from OMP constructs can be simply redirected. */
6288 break;
6289
6290 case GIMPLE_EH_DISPATCH:
6291 if (!(e->flags & EDGE_FALLTHRU))
6292 redirect_eh_dispatch_edge (as_a <geh_dispatch *> (p: stmt), e, dest);
6293 break;
6294
6295 case GIMPLE_TRANSACTION:
6296 if (e->flags & EDGE_TM_ABORT)
6297 gimple_transaction_set_label_over (transaction_stmt: as_a <gtransaction *> (p: stmt),
6298 label: gimple_block_label (bb: dest));
6299 else if (e->flags & EDGE_TM_UNINSTRUMENTED)
6300 gimple_transaction_set_label_uninst (transaction_stmt: as_a <gtransaction *> (p: stmt),
6301 label: gimple_block_label (bb: dest));
6302 else
6303 gimple_transaction_set_label_norm (transaction_stmt: as_a <gtransaction *> (p: stmt),
6304 label: gimple_block_label (bb: dest));
6305 break;
6306
6307 default:
6308 /* Otherwise it must be a fallthru edge, and we don't need to
6309 do anything besides redirecting it. */
6310 gcc_assert (e->flags & EDGE_FALLTHRU);
6311 break;
6312 }
6313
6314 /* Update/insert PHI nodes as necessary. */
6315
6316 /* Now update the edges in the CFG. */
6317 e = ssa_redirect_edge (e, dest);
6318
6319 return e;
6320}
6321
6322/* Returns true if it is possible to remove edge E by redirecting
6323 it to the destination of the other edge from E->src. */
6324
6325static bool
6326gimple_can_remove_branch_p (const_edge e)
6327{
6328 if (e->flags & (EDGE_ABNORMAL | EDGE_EH))
6329 return false;
6330
6331 return true;
6332}
6333
6334/* Simple wrapper, as we can always redirect fallthru edges. */
6335
6336static basic_block
6337gimple_redirect_edge_and_branch_force (edge e, basic_block dest)
6338{
6339 e = gimple_redirect_edge_and_branch (e, dest);
6340 gcc_assert (e);
6341
6342 return NULL;
6343}
6344
6345
6346/* Splits basic block BB after statement STMT (but at least after the
6347 labels). If STMT is NULL, BB is split just after the labels. */
6348
6349static basic_block
6350gimple_split_block (basic_block bb, void *stmt)
6351{
6352 gimple_stmt_iterator gsi;
6353 gimple_stmt_iterator gsi_tgt;
6354 gimple_seq list;
6355 basic_block new_bb;
6356 edge e;
6357 edge_iterator ei;
6358
6359 new_bb = create_empty_bb (bb);
6360
6361 /* Redirect the outgoing edges. */
6362 new_bb->succs = bb->succs;
6363 bb->succs = NULL;
6364 FOR_EACH_EDGE (e, ei, new_bb->succs)
6365 e->src = new_bb;
6366
6367 /* Get a stmt iterator pointing to the first stmt to move. */
6368 if (!stmt || gimple_code (g: (gimple *) stmt) == GIMPLE_LABEL)
6369 gsi = gsi_after_labels (bb);
6370 else
6371 {
6372 gsi = gsi_for_stmt ((gimple *) stmt);
6373 gsi_next (i: &gsi);
6374 }
6375
6376 /* Move everything from GSI to the new basic block. */
6377 if (gsi_end_p (i: gsi))
6378 return new_bb;
6379
6380 /* Split the statement list - avoid re-creating new containers as this
6381 brings ugly quadratic memory consumption in the inliner.
6382 (We are still quadratic since we need to update stmt BB pointers,
6383 sadly.) */
6384 gsi_split_seq_before (&gsi, &list);
6385 set_bb_seq (bb: new_bb, seq: list);
6386 for (gsi_tgt = gsi_start (seq&: list);
6387 !gsi_end_p (i: gsi_tgt); gsi_next (i: &gsi_tgt))
6388 gimple_set_bb (gsi_stmt (i: gsi_tgt), new_bb);
6389
6390 return new_bb;
6391}
6392
6393
6394/* Moves basic block BB after block AFTER. */
6395
6396static bool
6397gimple_move_block_after (basic_block bb, basic_block after)
6398{
6399 if (bb->prev_bb == after)
6400 return true;
6401
6402 unlink_block (bb);
6403 link_block (bb, after);
6404
6405 return true;
6406}
6407
6408
6409/* Return TRUE if block BB has no executable statements, otherwise return
6410 FALSE. */
6411
6412static bool
6413gimple_empty_block_p (basic_block bb)
6414{
6415 /* BB must have no executable statements. */
6416 gimple_stmt_iterator gsi = gsi_after_labels (bb);
6417 if (phi_nodes (bb))
6418 return false;
6419 while (!gsi_end_p (i: gsi))
6420 {
6421 gimple *stmt = gsi_stmt (i: gsi);
6422 if (is_gimple_debug (gs: stmt))
6423 ;
6424 else if (gimple_code (g: stmt) == GIMPLE_NOP
6425 || gimple_code (g: stmt) == GIMPLE_PREDICT)
6426 ;
6427 else
6428 return false;
6429 gsi_next (i: &gsi);
6430 }
6431 return true;
6432}
6433
6434
6435/* Split a basic block if it ends with a conditional branch and if the
6436 other part of the block is not empty. */
6437
6438static basic_block
6439gimple_split_block_before_cond_jump (basic_block bb)
6440{
6441 gimple *last, *split_point;
6442 gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb);
6443 if (gsi_end_p (i: gsi))
6444 return NULL;
6445 last = gsi_stmt (i: gsi);
6446 if (gimple_code (g: last) != GIMPLE_COND
6447 && gimple_code (g: last) != GIMPLE_SWITCH)
6448 return NULL;
6449 gsi_prev (i: &gsi);
6450 split_point = gsi_stmt (i: gsi);
6451 return split_block (bb, split_point)->dest;
6452}
6453
6454
6455/* Return true if basic_block can be duplicated. */
6456
6457static bool
6458gimple_can_duplicate_bb_p (const_basic_block bb)
6459{
6460 gimple *last = last_nondebug_stmt (CONST_CAST_BB (bb));
6461
6462 /* Do checks that can only fail for the last stmt, to minimize the work in the
6463 stmt loop. */
6464 if (last) {
6465 /* A transaction is a single entry multiple exit region. It
6466 must be duplicated in its entirety or not at all. */
6467 if (gimple_code (g: last) == GIMPLE_TRANSACTION)
6468 return false;
6469
6470 /* An IFN_UNIQUE call must be duplicated as part of its group,
6471 or not at all. */
6472 if (is_gimple_call (gs: last)
6473 && gimple_call_internal_p (gs: last)
6474 && gimple_call_internal_unique_p (gs: last))
6475 return false;
6476 }
6477
6478 for (gimple_stmt_iterator gsi = gsi_start_bb (CONST_CAST_BB (bb));
6479 !gsi_end_p (i: gsi); gsi_next (i: &gsi))
6480 {
6481 gimple *g = gsi_stmt (i: gsi);
6482
6483 /* Prohibit duplication of returns_twice calls, otherwise associated
6484 abnormal edges also need to be duplicated properly.
6485 An IFN_GOMP_SIMT_ENTER_ALLOC/IFN_GOMP_SIMT_EXIT call must be
6486 duplicated as part of its group, or not at all.
6487 The IFN_GOMP_SIMT_VOTE_ANY and IFN_GOMP_SIMT_XCHG_* are part of such a
6488 group, so the same holds there. */
6489 if (is_gimple_call (gs: g)
6490 && (gimple_call_flags (g) & ECF_RETURNS_TWICE
6491 || gimple_call_internal_p (gs: g, fn: IFN_GOMP_SIMT_ENTER_ALLOC)
6492 || gimple_call_internal_p (gs: g, fn: IFN_GOMP_SIMT_EXIT)
6493 || gimple_call_internal_p (gs: g, fn: IFN_GOMP_SIMT_VOTE_ANY)
6494 || gimple_call_internal_p (gs: g, fn: IFN_GOMP_SIMT_XCHG_BFLY)
6495 || gimple_call_internal_p (gs: g, fn: IFN_GOMP_SIMT_XCHG_IDX)))
6496 return false;
6497 }
6498
6499 return true;
6500}
6501
6502/* Create a duplicate of the basic block BB. NOTE: This does not
6503 preserve SSA form. */
6504
6505static basic_block
6506gimple_duplicate_bb (basic_block bb, copy_bb_data *id)
6507{
6508 basic_block new_bb;
6509 gimple_stmt_iterator gsi_tgt;
6510
6511 new_bb = create_empty_bb (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
6512
6513 /* Copy the PHI nodes. We ignore PHI node arguments here because
6514 the incoming edges have not been setup yet. */
6515 for (gphi_iterator gpi = gsi_start_phis (bb);
6516 !gsi_end_p (i: gpi);
6517 gsi_next (i: &gpi))
6518 {
6519 gphi *phi, *copy;
6520 phi = gpi.phi ();
6521 copy = create_phi_node (NULL_TREE, new_bb);
6522 create_new_def_for (gimple_phi_result (gs: phi), copy,
6523 gimple_phi_result_ptr (gs: copy));
6524 gimple_set_uid (g: copy, uid: gimple_uid (g: phi));
6525 }
6526
6527 gsi_tgt = gsi_start_bb (bb: new_bb);
6528 for (gimple_stmt_iterator gsi = gsi_start_bb (bb);
6529 !gsi_end_p (i: gsi);
6530 gsi_next (i: &gsi))
6531 {
6532 def_operand_p def_p;
6533 ssa_op_iter op_iter;
6534 tree lhs;
6535 gimple *stmt, *copy;
6536
6537 stmt = gsi_stmt (i: gsi);
6538 if (gimple_code (g: stmt) == GIMPLE_LABEL)
6539 continue;
6540
6541 /* Don't duplicate label debug stmts. */
6542 if (gimple_debug_bind_p (s: stmt)
6543 && TREE_CODE (gimple_debug_bind_get_var (stmt))
6544 == LABEL_DECL)
6545 continue;
6546
6547 /* Create a new copy of STMT and duplicate STMT's virtual
6548 operands. */
6549 copy = gimple_copy (stmt);
6550 gsi_insert_after (&gsi_tgt, copy, GSI_NEW_STMT);
6551
6552 maybe_duplicate_eh_stmt (copy, stmt);
6553 gimple_duplicate_stmt_histograms (cfun, copy, cfun, stmt);
6554
6555 /* When copying around a stmt writing into a local non-user
6556 aggregate, make sure it won't share stack slot with other
6557 vars. */
6558 lhs = gimple_get_lhs (stmt);
6559 if (lhs && TREE_CODE (lhs) != SSA_NAME)
6560 {
6561 tree base = get_base_address (t: lhs);
6562 if (base
6563 && (VAR_P (base) || TREE_CODE (base) == RESULT_DECL)
6564 && DECL_IGNORED_P (base)
6565 && !TREE_STATIC (base)
6566 && !DECL_EXTERNAL (base)
6567 && (!VAR_P (base) || !DECL_HAS_VALUE_EXPR_P (base)))
6568 DECL_NONSHAREABLE (base) = 1;
6569 }
6570
6571 /* If requested remap dependence info of cliques brought in
6572 via inlining. */
6573 if (id)
6574 for (unsigned i = 0; i < gimple_num_ops (gs: copy); ++i)
6575 {
6576 tree op = gimple_op (gs: copy, i);
6577 if (!op)
6578 continue;
6579 if (TREE_CODE (op) == ADDR_EXPR
6580 || TREE_CODE (op) == WITH_SIZE_EXPR)
6581 op = TREE_OPERAND (op, 0);
6582 while (handled_component_p (t: op))
6583 op = TREE_OPERAND (op, 0);
6584 if ((TREE_CODE (op) == MEM_REF
6585 || TREE_CODE (op) == TARGET_MEM_REF)
6586 && MR_DEPENDENCE_CLIQUE (op) > 1
6587 && MR_DEPENDENCE_CLIQUE (op) != bb->loop_father->owned_clique)
6588 {
6589 if (!id->dependence_map)
6590 id->dependence_map = new hash_map<dependence_hash,
6591 unsigned short>;
6592 bool existed;
6593 unsigned short &newc = id->dependence_map->get_or_insert
6594 (MR_DEPENDENCE_CLIQUE (op), existed: &existed);
6595 if (!existed)
6596 {
6597 gcc_assert (MR_DEPENDENCE_CLIQUE (op) <= cfun->last_clique);
6598 newc = ++cfun->last_clique;
6599 }
6600 MR_DEPENDENCE_CLIQUE (op) = newc;
6601 }
6602 }
6603
6604 /* Create new names for all the definitions created by COPY and
6605 add replacement mappings for each new name. */
6606 FOR_EACH_SSA_DEF_OPERAND (def_p, copy, op_iter, SSA_OP_ALL_DEFS)
6607 create_new_def_for (DEF_FROM_PTR (def_p), copy, def_p);
6608 }
6609
6610 return new_bb;
6611}
6612
6613/* Adds phi node arguments for edge E_COPY after basic block duplication. */
6614
6615static void
6616add_phi_args_after_copy_edge (edge e_copy)
6617{
6618 basic_block bb, bb_copy = e_copy->src, dest;
6619 edge e;
6620 edge_iterator ei;
6621 gphi *phi, *phi_copy;
6622 tree def;
6623 gphi_iterator psi, psi_copy;
6624
6625 if (gimple_seq_empty_p (s: phi_nodes (bb: e_copy->dest)))
6626 return;
6627
6628 bb = bb_copy->flags & BB_DUPLICATED ? get_bb_original (bb_copy) : bb_copy;
6629
6630 if (e_copy->dest->flags & BB_DUPLICATED)
6631 dest = get_bb_original (e_copy->dest);
6632 else
6633 dest = e_copy->dest;
6634
6635 e = find_edge (bb, dest);
6636 if (!e)
6637 {
6638 /* During loop unrolling the target of the latch edge is copied.
6639 In this case we are not looking for edge to dest, but to
6640 duplicated block whose original was dest. */
6641 FOR_EACH_EDGE (e, ei, bb->succs)
6642 {
6643 if ((e->dest->flags & BB_DUPLICATED)
6644 && get_bb_original (e->dest) == dest)
6645 break;
6646 }
6647
6648 gcc_assert (e != NULL);
6649 }
6650
6651 for (psi = gsi_start_phis (e->dest),
6652 psi_copy = gsi_start_phis (e_copy->dest);
6653 !gsi_end_p (i: psi);
6654 gsi_next (i: &psi), gsi_next (i: &psi_copy))
6655 {
6656 phi = psi.phi ();
6657 phi_copy = psi_copy.phi ();
6658 def = PHI_ARG_DEF_FROM_EDGE (phi, e);
6659 add_phi_arg (phi_copy, def, e_copy,
6660 gimple_phi_arg_location_from_edge (phi, e));
6661 }
6662}
6663
6664
6665/* Basic block BB_COPY was created by code duplication. Add phi node
6666 arguments for edges going out of BB_COPY. The blocks that were
6667 duplicated have BB_DUPLICATED set. */
6668
6669void
6670add_phi_args_after_copy_bb (basic_block bb_copy)
6671{
6672 edge e_copy;
6673 edge_iterator ei;
6674
6675 FOR_EACH_EDGE (e_copy, ei, bb_copy->succs)
6676 {
6677 add_phi_args_after_copy_edge (e_copy);
6678 }
6679}
6680
6681/* Blocks in REGION_COPY array of length N_REGION were created by
6682 duplication of basic blocks. Add phi node arguments for edges
6683 going from these blocks. If E_COPY is not NULL, also add
6684 phi node arguments for its destination.*/
6685
6686void
6687add_phi_args_after_copy (basic_block *region_copy, unsigned n_region,
6688 edge e_copy)
6689{
6690 unsigned i;
6691
6692 for (i = 0; i < n_region; i++)
6693 region_copy[i]->flags |= BB_DUPLICATED;
6694
6695 for (i = 0; i < n_region; i++)
6696 add_phi_args_after_copy_bb (bb_copy: region_copy[i]);
6697 if (e_copy)
6698 add_phi_args_after_copy_edge (e_copy);
6699
6700 for (i = 0; i < n_region; i++)
6701 region_copy[i]->flags &= ~BB_DUPLICATED;
6702}
6703
6704/* Duplicates a REGION (set of N_REGION basic blocks) with just a single
6705 important exit edge EXIT. By important we mean that no SSA name defined
6706 inside region is live over the other exit edges of the region. All entry
6707 edges to the region must go to ENTRY->dest. The edge ENTRY is redirected
6708 to the duplicate of the region. Dominance and loop information is
6709 updated if UPDATE_DOMINANCE is true, but not the SSA web. If
6710 UPDATE_DOMINANCE is false then we assume that the caller will update the
6711 dominance information after calling this function. The new basic
6712 blocks are stored to REGION_COPY in the same order as they had in REGION,
6713 provided that REGION_COPY is not NULL.
6714 The function returns false if it is unable to copy the region,
6715 true otherwise.
6716
6717 It is callers responsibility to update profile. */
6718
6719bool
6720gimple_duplicate_seme_region (edge entry, edge exit,
6721 basic_block *region, unsigned n_region,
6722 basic_block *region_copy,
6723 bool update_dominance)
6724{
6725 unsigned i;
6726 bool free_region_copy = false, copying_header = false;
6727 class loop *loop = entry->dest->loop_father;
6728 edge exit_copy;
6729 edge redirected;
6730
6731 if (!can_copy_bbs_p (region, n_region))
6732 return false;
6733
6734 /* Some sanity checking. Note that we do not check for all possible
6735 missuses of the functions. I.e. if you ask to copy something weird,
6736 it will work, but the state of structures probably will not be
6737 correct. */
6738 for (i = 0; i < n_region; i++)
6739 {
6740 /* We do not handle subloops, i.e. all the blocks must belong to the
6741 same loop. */
6742 if (region[i]->loop_father != loop)
6743 return false;
6744
6745 if (region[i] != entry->dest
6746 && region[i] == loop->header)
6747 return false;
6748 }
6749
6750 /* In case the function is used for loop header copying (which is the primary
6751 use), ensure that EXIT and its copy will be new latch and entry edges. */
6752 if (loop->header == entry->dest)
6753 {
6754 copying_header = true;
6755
6756 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, exit->src))
6757 return false;
6758
6759 for (i = 0; i < n_region; i++)
6760 if (region[i] != exit->src
6761 && dominated_by_p (CDI_DOMINATORS, region[i], exit->src))
6762 return false;
6763 }
6764
6765 initialize_original_copy_tables ();
6766
6767 if (copying_header)
6768 set_loop_copy (loop, loop_outer (loop));
6769 else
6770 set_loop_copy (loop, loop);
6771
6772 if (!region_copy)
6773 {
6774 region_copy = XNEWVEC (basic_block, n_region);
6775 free_region_copy = true;
6776 }
6777
6778 /* Record blocks outside the region that are dominated by something
6779 inside. */
6780 auto_vec<basic_block> doms;
6781 if (update_dominance)
6782 doms = get_dominated_by_region (CDI_DOMINATORS, region, n_region);
6783
6784 copy_bbs (region, n_region, region_copy, &exit, 1, &exit_copy, loop,
6785 split_edge_bb_loc (edge_in: entry), update_dominance);
6786
6787 if (copying_header)
6788 {
6789 loop->header = exit->dest;
6790 loop->latch = exit->src;
6791 }
6792
6793 /* Redirect the entry and add the phi node arguments. */
6794 redirected = redirect_edge_and_branch (entry, get_bb_copy (entry->dest));
6795 gcc_assert (redirected != NULL);
6796 flush_pending_stmts (entry);
6797
6798 /* Concerning updating of dominators: We must recount dominators
6799 for entry block and its copy. Anything that is outside of the
6800 region, but was dominated by something inside needs recounting as
6801 well. */
6802 if (update_dominance)
6803 {
6804 set_immediate_dominator (CDI_DOMINATORS, entry->dest, entry->src);
6805 doms.safe_push (obj: get_bb_original (entry->dest));
6806 iterate_fix_dominators (CDI_DOMINATORS, doms, false);
6807 }
6808
6809 /* Add the other PHI node arguments. */
6810 add_phi_args_after_copy (region_copy, n_region, NULL);
6811
6812 if (free_region_copy)
6813 free (ptr: region_copy);
6814
6815 free_original_copy_tables ();
6816 return true;
6817}
6818
6819/* Checks if BB is part of the region defined by N_REGION BBS. */
6820static bool
6821bb_part_of_region_p (basic_block bb, basic_block* bbs, unsigned n_region)
6822{
6823 unsigned int n;
6824
6825 for (n = 0; n < n_region; n++)
6826 {
6827 if (bb == bbs[n])
6828 return true;
6829 }
6830 return false;
6831}
6832
6833
6834/* For each PHI in BB, copy the argument associated with SRC_E to TGT_E.
6835 Assuming the argument exists, just does not have a value. */
6836
6837void
6838copy_phi_arg_into_existing_phi (edge src_e, edge tgt_e)
6839{
6840 int src_idx = src_e->dest_idx;
6841 int tgt_idx = tgt_e->dest_idx;
6842
6843 /* Iterate over each PHI in e->dest. */
6844 for (gphi_iterator gsi = gsi_start_phis (src_e->dest),
6845 gsi2 = gsi_start_phis (tgt_e->dest);
6846 !gsi_end_p (i: gsi);
6847 gsi_next (i: &gsi), gsi_next (i: &gsi2))
6848 {
6849 gphi *src_phi = gsi.phi ();
6850 gphi *dest_phi = gsi2.phi ();
6851 tree val = gimple_phi_arg_def (gs: src_phi, index: src_idx);
6852 location_t locus = gimple_phi_arg_location (phi: src_phi, i: src_idx);
6853
6854 SET_PHI_ARG_DEF (dest_phi, tgt_idx, val);
6855 gimple_phi_arg_set_location (phi: dest_phi, i: tgt_idx, loc: locus);
6856 }
6857}
6858
6859/* Duplicates REGION consisting of N_REGION blocks. The new blocks
6860 are stored to REGION_COPY in the same order in that they appear
6861 in REGION, if REGION_COPY is not NULL. ENTRY is the entry to
6862 the region, EXIT an exit from it. The condition guarding EXIT
6863 is moved to ENTRY. Returns true if duplication succeeds, false
6864 otherwise.
6865
6866 For example,
6867
6868 some_code;
6869 if (cond)
6870 A;
6871 else
6872 B;
6873
6874 is transformed to
6875
6876 if (cond)
6877 {
6878 some_code;
6879 A;
6880 }
6881 else
6882 {
6883 some_code;
6884 B;
6885 }
6886*/
6887
6888bool
6889gimple_duplicate_sese_tail (edge entry, edge exit,
6890 basic_block *region, unsigned n_region,
6891 basic_block *region_copy)
6892{
6893 unsigned i;
6894 bool free_region_copy = false;
6895 class loop *loop = exit->dest->loop_father;
6896 class loop *orig_loop = entry->dest->loop_father;
6897 basic_block switch_bb, entry_bb, nentry_bb;
6898 profile_count total_count = profile_count::uninitialized (),
6899 exit_count = profile_count::uninitialized ();
6900 edge exits[2], nexits[2], e;
6901 gimple_stmt_iterator gsi;
6902 edge sorig, snew;
6903 basic_block exit_bb;
6904 class loop *target, *aloop, *cloop;
6905
6906 gcc_assert (EDGE_COUNT (exit->src->succs) == 2);
6907 exits[0] = exit;
6908 exits[1] = EDGE_SUCC (exit->src, EDGE_SUCC (exit->src, 0) == exit);
6909
6910 if (!can_copy_bbs_p (region, n_region))
6911 return false;
6912
6913 initialize_original_copy_tables ();
6914 set_loop_copy (orig_loop, loop);
6915
6916 target= loop;
6917 for (aloop = orig_loop->inner; aloop; aloop = aloop->next)
6918 {
6919 if (bb_part_of_region_p (bb: aloop->header, bbs: region, n_region))
6920 {
6921 cloop = duplicate_loop (aloop, target);
6922 duplicate_subloops (aloop, cloop);
6923 }
6924 }
6925
6926 if (!region_copy)
6927 {
6928 region_copy = XNEWVEC (basic_block, n_region);
6929 free_region_copy = true;
6930 }
6931
6932 gcc_assert (!need_ssa_update_p (cfun));
6933
6934 /* Record blocks outside the region that are dominated by something
6935 inside. */
6936 auto_vec<basic_block> doms = get_dominated_by_region (CDI_DOMINATORS, region,
6937 n_region);
6938
6939 total_count = exit->src->count;
6940 exit_count = exit->count ();
6941 /* Fix up corner cases, to avoid division by zero or creation of negative
6942 frequencies. */
6943 if (exit_count > total_count)
6944 exit_count = total_count;
6945
6946 copy_bbs (region, n_region, region_copy, exits, 2, nexits, orig_loop,
6947 split_edge_bb_loc (edge_in: exit), true);
6948 if (total_count.initialized_p () && exit_count.initialized_p ())
6949 {
6950 scale_bbs_frequencies_profile_count (region, n_region,
6951 total_count - exit_count,
6952 total_count);
6953 scale_bbs_frequencies_profile_count (region_copy, n_region, exit_count,
6954 total_count);
6955 }
6956
6957 /* Create the switch block, and put the exit condition to it. */
6958 entry_bb = entry->dest;
6959 nentry_bb = get_bb_copy (entry_bb);
6960 if (!*gsi_last_bb (bb: entry->src)
6961 || !stmt_ends_bb_p (t: *gsi_last_bb (bb: entry->src)))
6962 switch_bb = entry->src;
6963 else
6964 switch_bb = split_edge (entry);
6965 set_immediate_dominator (CDI_DOMINATORS, nentry_bb, switch_bb);
6966
6967 gcond *cond_stmt = as_a <gcond *> (p: *gsi_last_bb (bb: exit->src));
6968 cond_stmt = as_a <gcond *> (p: gimple_copy (cond_stmt));
6969
6970 gsi = gsi_last_bb (bb: switch_bb);
6971 gsi_insert_after (&gsi, cond_stmt, GSI_NEW_STMT);
6972
6973 sorig = single_succ_edge (bb: switch_bb);
6974 sorig->flags = exits[1]->flags;
6975 sorig->probability = exits[1]->probability;
6976 snew = make_edge (switch_bb, nentry_bb, exits[0]->flags);
6977 snew->probability = exits[0]->probability;
6978
6979
6980 /* Register the new edge from SWITCH_BB in loop exit lists. */
6981 rescan_loop_exit (snew, true, false);
6982
6983 /* Add the PHI node arguments. */
6984 add_phi_args_after_copy (region_copy, n_region, e_copy: snew);
6985
6986 /* Get rid of now superfluous conditions and associated edges (and phi node
6987 arguments). */
6988 exit_bb = exit->dest;
6989
6990 e = redirect_edge_and_branch (exits[0], exits[1]->dest);
6991 PENDING_STMT (e) = NULL;
6992
6993 /* The latch of ORIG_LOOP was copied, and so was the backedge
6994 to the original header. We redirect this backedge to EXIT_BB. */
6995 for (i = 0; i < n_region; i++)
6996 if (get_bb_original (region_copy[i]) == orig_loop->latch)
6997 {
6998 gcc_assert (single_succ_edge (region_copy[i]));
6999 e = redirect_edge_and_branch (single_succ_edge (bb: region_copy[i]), exit_bb);
7000 PENDING_STMT (e) = NULL;
7001 copy_phi_arg_into_existing_phi (src_e: nexits[0], tgt_e: e);
7002 }
7003 e = redirect_edge_and_branch (nexits[1], nexits[0]->dest);
7004 PENDING_STMT (e) = NULL;
7005
7006 /* Anything that is outside of the region, but was dominated by something
7007 inside needs to update dominance info. */
7008 iterate_fix_dominators (CDI_DOMINATORS, doms, false);
7009
7010 if (free_region_copy)
7011 free (ptr: region_copy);
7012
7013 free_original_copy_tables ();
7014 return true;
7015}
7016
7017/* Add all the blocks dominated by ENTRY to the array BBS_P. Stop
7018 adding blocks when the dominator traversal reaches EXIT. This
7019 function silently assumes that ENTRY strictly dominates EXIT. */
7020
7021void
7022gather_blocks_in_sese_region (basic_block entry, basic_block exit,
7023 vec<basic_block> *bbs_p)
7024{
7025 basic_block son;
7026
7027 for (son = first_dom_son (CDI_DOMINATORS, entry);
7028 son;
7029 son = next_dom_son (CDI_DOMINATORS, son))
7030 {
7031 bbs_p->safe_push (obj: son);
7032 if (son != exit)
7033 gather_blocks_in_sese_region (entry: son, exit, bbs_p);
7034 }
7035}
7036
7037/* Replaces *TP with a duplicate (belonging to function TO_CONTEXT).
7038 The duplicates are recorded in VARS_MAP. */
7039
7040static void
7041replace_by_duplicate_decl (tree *tp, hash_map<tree, tree> *vars_map,
7042 tree to_context)
7043{
7044 tree t = *tp, new_t;
7045 struct function *f = DECL_STRUCT_FUNCTION (to_context);
7046
7047 if (DECL_CONTEXT (t) == to_context)
7048 return;
7049
7050 bool existed;
7051 tree &loc = vars_map->get_or_insert (k: t, existed: &existed);
7052
7053 if (!existed)
7054 {
7055 if (SSA_VAR_P (t))
7056 {
7057 new_t = copy_var_decl (t, DECL_NAME (t), TREE_TYPE (t));
7058 add_local_decl (fun: f, d: new_t);
7059 }
7060 else
7061 {
7062 gcc_assert (TREE_CODE (t) == CONST_DECL);
7063 new_t = copy_node (t);
7064 }
7065 DECL_CONTEXT (new_t) = to_context;
7066
7067 loc = new_t;
7068 }
7069 else
7070 new_t = loc;
7071
7072 *tp = new_t;
7073}
7074
7075
7076/* Creates an ssa name in TO_CONTEXT equivalent to NAME.
7077 VARS_MAP maps old ssa names and var_decls to the new ones. */
7078
7079static tree
7080replace_ssa_name (tree name, hash_map<tree, tree> *vars_map,
7081 tree to_context)
7082{
7083 tree new_name;
7084
7085 gcc_assert (!virtual_operand_p (name));
7086
7087 tree *loc = vars_map->get (k: name);
7088
7089 if (!loc)
7090 {
7091 tree decl = SSA_NAME_VAR (name);
7092 if (decl)
7093 {
7094 gcc_assert (!SSA_NAME_IS_DEFAULT_DEF (name));
7095 replace_by_duplicate_decl (tp: &decl, vars_map, to_context);
7096 new_name = make_ssa_name_fn (DECL_STRUCT_FUNCTION (to_context),
7097 decl, SSA_NAME_DEF_STMT (name));
7098 }
7099 else
7100 new_name = copy_ssa_name_fn (DECL_STRUCT_FUNCTION (to_context),
7101 name, SSA_NAME_DEF_STMT (name));
7102
7103 /* Now that we've used the def stmt to define new_name, make sure it
7104 doesn't define name anymore. */
7105 SSA_NAME_DEF_STMT (name) = NULL;
7106
7107 vars_map->put (k: name, v: new_name);
7108 }
7109 else
7110 new_name = *loc;
7111
7112 return new_name;
7113}
7114
7115struct move_stmt_d
7116{
7117 tree orig_block;
7118 tree new_block;
7119 tree from_context;
7120 tree to_context;
7121 hash_map<tree, tree> *vars_map;
7122 htab_t new_label_map;
7123 hash_map<void *, void *> *eh_map;
7124 bool remap_decls_p;
7125};
7126
7127/* Helper for move_block_to_fn. Set TREE_BLOCK in every expression
7128 contained in *TP if it has been ORIG_BLOCK previously and change the
7129 DECL_CONTEXT of every local variable referenced in *TP. */
7130
7131static tree
7132move_stmt_op (tree *tp, int *walk_subtrees, void *data)
7133{
7134 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
7135 struct move_stmt_d *p = (struct move_stmt_d *) wi->info;
7136 tree t = *tp;
7137
7138 if (EXPR_P (t))
7139 {
7140 tree block = TREE_BLOCK (t);
7141 if (block == NULL_TREE)
7142 ;
7143 else if (block == p->orig_block
7144 || p->orig_block == NULL_TREE)
7145 {
7146 /* tree_node_can_be_shared says we can share invariant
7147 addresses but unshare_expr copies them anyways. Make sure
7148 to unshare before adjusting the block in place - we do not
7149 always see a copy here. */
7150 if (TREE_CODE (t) == ADDR_EXPR
7151 && is_gimple_min_invariant (t))
7152 *tp = t = unshare_expr (t);
7153 TREE_SET_BLOCK (t, p->new_block);
7154 }
7155 else if (flag_checking)
7156 {
7157 while (block && TREE_CODE (block) == BLOCK && block != p->orig_block)
7158 block = BLOCK_SUPERCONTEXT (block);
7159 gcc_assert (block == p->orig_block);
7160 }
7161 }
7162 else if (DECL_P (t) || TREE_CODE (t) == SSA_NAME)
7163 {
7164 if (TREE_CODE (t) == SSA_NAME)
7165 *tp = replace_ssa_name (name: t, vars_map: p->vars_map, to_context: p->to_context);
7166 else if (TREE_CODE (t) == PARM_DECL
7167 && gimple_in_ssa_p (cfun))
7168 *tp = *(p->vars_map->get (k: t));
7169 else if (TREE_CODE (t) == LABEL_DECL)
7170 {
7171 if (p->new_label_map)
7172 {
7173 struct tree_map in, *out;
7174 in.base.from = t;
7175 out = (struct tree_map *)
7176 htab_find_with_hash (p->new_label_map, &in, DECL_UID (t));
7177 if (out)
7178 *tp = t = out->to;
7179 }
7180
7181 /* For FORCED_LABELs we can end up with references from other
7182 functions if some SESE regions are outlined. It is UB to
7183 jump in between them, but they could be used just for printing
7184 addresses etc. In that case, DECL_CONTEXT on the label should
7185 be the function containing the glabel stmt with that LABEL_DECL,
7186 rather than whatever function a reference to the label was seen
7187 last time. */
7188 if (!FORCED_LABEL (t) && !DECL_NONLOCAL (t))
7189 DECL_CONTEXT (t) = p->to_context;
7190 }
7191 else if (p->remap_decls_p)
7192 {
7193 /* Replace T with its duplicate. T should no longer appear in the
7194 parent function, so this looks wasteful; however, it may appear
7195 in referenced_vars, and more importantly, as virtual operands of
7196 statements, and in alias lists of other variables. It would be
7197 quite difficult to expunge it from all those places. ??? It might
7198 suffice to do this for addressable variables. */
7199 if ((VAR_P (t) && !is_global_var (t))
7200 || TREE_CODE (t) == CONST_DECL)
7201 replace_by_duplicate_decl (tp, vars_map: p->vars_map, to_context: p->to_context);
7202 }
7203 *walk_subtrees = 0;
7204 }
7205 else if (TYPE_P (t))
7206 *walk_subtrees = 0;
7207
7208 return NULL_TREE;
7209}
7210
7211/* Helper for move_stmt_r. Given an EH region number for the source
7212 function, map that to the duplicate EH regio number in the dest. */
7213
7214static int
7215move_stmt_eh_region_nr (int old_nr, struct move_stmt_d *p)
7216{
7217 eh_region old_r, new_r;
7218
7219 old_r = get_eh_region_from_number (old_nr);
7220 new_r = static_cast<eh_region> (*p->eh_map->get (k: old_r));
7221
7222 return new_r->index;
7223}
7224
7225/* Similar, but operate on INTEGER_CSTs. */
7226
7227static tree
7228move_stmt_eh_region_tree_nr (tree old_t_nr, struct move_stmt_d *p)
7229{
7230 int old_nr, new_nr;
7231
7232 old_nr = tree_to_shwi (old_t_nr);
7233 new_nr = move_stmt_eh_region_nr (old_nr, p);
7234
7235 return build_int_cst (integer_type_node, new_nr);
7236}
7237
7238/* Like move_stmt_op, but for gimple statements.
7239
7240 Helper for move_block_to_fn. Set GIMPLE_BLOCK in every expression
7241 contained in the current statement in *GSI_P and change the
7242 DECL_CONTEXT of every local variable referenced in the current
7243 statement. */
7244
7245static tree
7246move_stmt_r (gimple_stmt_iterator *gsi_p, bool *handled_ops_p,
7247 struct walk_stmt_info *wi)
7248{
7249 struct move_stmt_d *p = (struct move_stmt_d *) wi->info;
7250 gimple *stmt = gsi_stmt (i: *gsi_p);
7251 tree block = gimple_block (g: stmt);
7252
7253 if (block == p->orig_block
7254 || (p->orig_block == NULL_TREE
7255 && block != NULL_TREE))
7256 gimple_set_block (g: stmt, block: p->new_block);
7257
7258 switch (gimple_code (g: stmt))
7259 {
7260 case GIMPLE_CALL:
7261 /* Remap the region numbers for __builtin_eh_{pointer,filter}. */
7262 {
7263 tree r, fndecl = gimple_call_fndecl (gs: stmt);
7264 if (fndecl && fndecl_built_in_p (node: fndecl, klass: BUILT_IN_NORMAL))
7265 switch (DECL_FUNCTION_CODE (decl: fndecl))
7266 {
7267 case BUILT_IN_EH_COPY_VALUES:
7268 r = gimple_call_arg (gs: stmt, index: 1);
7269 r = move_stmt_eh_region_tree_nr (old_t_nr: r, p);
7270 gimple_call_set_arg (gs: stmt, index: 1, arg: r);
7271 /* FALLTHRU */
7272
7273 case BUILT_IN_EH_POINTER:
7274 case BUILT_IN_EH_FILTER:
7275 r = gimple_call_arg (gs: stmt, index: 0);
7276 r = move_stmt_eh_region_tree_nr (old_t_nr: r, p);
7277 gimple_call_set_arg (gs: stmt, index: 0, arg: r);
7278 break;
7279
7280 default:
7281 break;
7282 }
7283 }
7284 break;
7285
7286 case GIMPLE_RESX:
7287 {
7288 gresx *resx_stmt = as_a <gresx *> (p: stmt);
7289 int r = gimple_resx_region (resx_stmt);
7290 r = move_stmt_eh_region_nr (old_nr: r, p);
7291 gimple_resx_set_region (resx_stmt, region: r);
7292 }
7293 break;
7294
7295 case GIMPLE_EH_DISPATCH:
7296 {
7297 geh_dispatch *eh_dispatch_stmt = as_a <geh_dispatch *> (p: stmt);
7298 int r = gimple_eh_dispatch_region (eh_dispatch_stmt);
7299 r = move_stmt_eh_region_nr (old_nr: r, p);
7300 gimple_eh_dispatch_set_region (eh_dispatch_stmt, region: r);
7301 }
7302 break;
7303
7304 case GIMPLE_OMP_RETURN:
7305 case GIMPLE_OMP_CONTINUE:
7306 break;
7307
7308 case GIMPLE_LABEL:
7309 {
7310 /* For FORCED_LABEL, move_stmt_op doesn't adjust DECL_CONTEXT,
7311 so that such labels can be referenced from other regions.
7312 Make sure to update it when seeing a GIMPLE_LABEL though,
7313 that is the owner of the label. */
7314 walk_gimple_op (stmt, move_stmt_op, wi);
7315 *handled_ops_p = true;
7316 tree label = gimple_label_label (gs: as_a <glabel *> (p: stmt));
7317 if (FORCED_LABEL (label) || DECL_NONLOCAL (label))
7318 DECL_CONTEXT (label) = p->to_context;
7319 }
7320 break;
7321
7322 default:
7323 if (is_gimple_omp (stmt))
7324 {
7325 /* Do not remap variables inside OMP directives. Variables
7326 referenced in clauses and directive header belong to the
7327 parent function and should not be moved into the child
7328 function. */
7329 bool save_remap_decls_p = p->remap_decls_p;
7330 p->remap_decls_p = false;
7331 *handled_ops_p = true;
7332
7333 walk_gimple_seq_mod (gimple_omp_body_ptr (gs: stmt), move_stmt_r,
7334 move_stmt_op, wi);
7335
7336 p->remap_decls_p = save_remap_decls_p;
7337 }
7338 break;
7339 }
7340
7341 return NULL_TREE;
7342}
7343
7344/* Move basic block BB from function CFUN to function DEST_FN. The
7345 block is moved out of the original linked list and placed after
7346 block AFTER in the new list. Also, the block is removed from the
7347 original array of blocks and placed in DEST_FN's array of blocks.
7348 If UPDATE_EDGE_COUNT_P is true, the edge counts on both CFGs is
7349 updated to reflect the moved edges.
7350
7351 The local variables are remapped to new instances, VARS_MAP is used
7352 to record the mapping. */
7353
7354static void
7355move_block_to_fn (struct function *dest_cfun, basic_block bb,
7356 basic_block after, bool update_edge_count_p,
7357 struct move_stmt_d *d)
7358{
7359 struct control_flow_graph *cfg;
7360 edge_iterator ei;
7361 edge e;
7362 gimple_stmt_iterator si;
7363 unsigned old_len;
7364
7365 /* Remove BB from dominance structures. */
7366 delete_from_dominance_info (CDI_DOMINATORS, bb);
7367
7368 /* Move BB from its current loop to the copy in the new function. */
7369 if (current_loops)
7370 {
7371 class loop *new_loop = (class loop *)bb->loop_father->aux;
7372 if (new_loop)
7373 bb->loop_father = new_loop;
7374 }
7375
7376 /* Link BB to the new linked list. */
7377 move_block_after (bb, after);
7378
7379 /* Update the edge count in the corresponding flowgraphs. */
7380 if (update_edge_count_p)
7381 FOR_EACH_EDGE (e, ei, bb->succs)
7382 {
7383 cfun->cfg->x_n_edges--;
7384 dest_cfun->cfg->x_n_edges++;
7385 }
7386
7387 /* Remove BB from the original basic block array. */
7388 (*cfun->cfg->x_basic_block_info)[bb->index] = NULL;
7389 cfun->cfg->x_n_basic_blocks--;
7390
7391 /* Grow DEST_CFUN's basic block array if needed. */
7392 cfg = dest_cfun->cfg;
7393 cfg->x_n_basic_blocks++;
7394 if (bb->index >= cfg->x_last_basic_block)
7395 cfg->x_last_basic_block = bb->index + 1;
7396
7397 old_len = vec_safe_length (v: cfg->x_basic_block_info);
7398 if ((unsigned) cfg->x_last_basic_block >= old_len)
7399 vec_safe_grow_cleared (v&: cfg->x_basic_block_info,
7400 len: cfg->x_last_basic_block + 1);
7401
7402 (*cfg->x_basic_block_info)[bb->index] = bb;
7403
7404 /* Remap the variables in phi nodes. */
7405 for (gphi_iterator psi = gsi_start_phis (bb);
7406 !gsi_end_p (i: psi); )
7407 {
7408 gphi *phi = psi.phi ();
7409 use_operand_p use;
7410 tree op = PHI_RESULT (phi);
7411 ssa_op_iter oi;
7412 unsigned i;
7413
7414 if (virtual_operand_p (op))
7415 {
7416 /* Remove the phi nodes for virtual operands (alias analysis will be
7417 run for the new function, anyway). But replace all uses that
7418 might be outside of the region we move. */
7419 use_operand_p use_p;
7420 imm_use_iterator iter;
7421 gimple *use_stmt;
7422 FOR_EACH_IMM_USE_STMT (use_stmt, iter, op)
7423 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
7424 SET_USE (use_p, SSA_NAME_VAR (op));
7425 remove_phi_node (&psi, true);
7426 continue;
7427 }
7428
7429 SET_PHI_RESULT (phi,
7430 replace_ssa_name (op, d->vars_map, dest_cfun->decl));
7431 FOR_EACH_PHI_ARG (use, phi, oi, SSA_OP_USE)
7432 {
7433 op = USE_FROM_PTR (use);
7434 if (TREE_CODE (op) == SSA_NAME)
7435 SET_USE (use, replace_ssa_name (op, d->vars_map, dest_cfun->decl));
7436 }
7437
7438 for (i = 0; i < EDGE_COUNT (bb->preds); i++)
7439 {
7440 location_t locus = gimple_phi_arg_location (phi, i);
7441 tree block = LOCATION_BLOCK (locus);
7442
7443 if (locus == UNKNOWN_LOCATION)
7444 continue;
7445 if (d->orig_block == NULL_TREE || block == d->orig_block)
7446 {
7447 locus = set_block (loc: locus, block: d->new_block);
7448 gimple_phi_arg_set_location (phi, i, loc: locus);
7449 }
7450 }
7451
7452 gsi_next (i: &psi);
7453 }
7454
7455 for (si = gsi_start_bb (bb); !gsi_end_p (i: si); gsi_next (i: &si))
7456 {
7457 gimple *stmt = gsi_stmt (i: si);
7458 struct walk_stmt_info wi;
7459
7460 memset (s: &wi, c: 0, n: sizeof (wi));
7461 wi.info = d;
7462 walk_gimple_stmt (&si, move_stmt_r, move_stmt_op, &wi);
7463
7464 if (glabel *label_stmt = dyn_cast <glabel *> (p: stmt))
7465 {
7466 tree label = gimple_label_label (gs: label_stmt);
7467 int uid = LABEL_DECL_UID (label);
7468
7469 gcc_assert (uid > -1);
7470
7471 old_len = vec_safe_length (v: cfg->x_label_to_block_map);
7472 if (old_len <= (unsigned) uid)
7473 vec_safe_grow_cleared (v&: cfg->x_label_to_block_map, len: uid + 1);
7474
7475 (*cfg->x_label_to_block_map)[uid] = bb;
7476 (*cfun->cfg->x_label_to_block_map)[uid] = NULL;
7477
7478 gcc_assert (DECL_CONTEXT (label) == dest_cfun->decl);
7479
7480 if (uid >= dest_cfun->cfg->last_label_uid)
7481 dest_cfun->cfg->last_label_uid = uid + 1;
7482 }
7483
7484 maybe_duplicate_eh_stmt_fn (dest_cfun, stmt, cfun, stmt, d->eh_map, 0);
7485 remove_stmt_from_eh_lp_fn (cfun, stmt);
7486
7487 gimple_duplicate_stmt_histograms (dest_cfun, stmt, cfun, stmt);
7488 gimple_remove_stmt_histograms (cfun, stmt);
7489
7490 /* We cannot leave any operands allocated from the operand caches of
7491 the current function. */
7492 free_stmt_operands (cfun, stmt);
7493 push_cfun (new_cfun: dest_cfun);
7494 update_stmt (s: stmt);
7495 if (is_gimple_call (gs: stmt))
7496 notice_special_calls (call: as_a <gcall *> (p: stmt));
7497 pop_cfun ();
7498 }
7499
7500 FOR_EACH_EDGE (e, ei, bb->succs)
7501 if (e->goto_locus != UNKNOWN_LOCATION)
7502 {
7503 tree block = LOCATION_BLOCK (e->goto_locus);
7504 if (d->orig_block == NULL_TREE
7505 || block == d->orig_block)
7506 e->goto_locus = set_block (loc: e->goto_locus, block: d->new_block);
7507 }
7508}
7509
7510/* Examine the statements in BB (which is in SRC_CFUN); find and return
7511 the outermost EH region. Use REGION as the incoming base EH region.
7512 If there is no single outermost region, return NULL and set *ALL to
7513 true. */
7514
7515static eh_region
7516find_outermost_region_in_block (struct function *src_cfun,
7517 basic_block bb, eh_region region,
7518 bool *all)
7519{
7520 gimple_stmt_iterator si;
7521
7522 for (si = gsi_start_bb (bb); !gsi_end_p (i: si); gsi_next (i: &si))
7523 {
7524 gimple *stmt = gsi_stmt (i: si);
7525 eh_region stmt_region;
7526 int lp_nr;
7527
7528 lp_nr = lookup_stmt_eh_lp_fn (src_cfun, stmt);
7529 stmt_region = get_eh_region_from_lp_number_fn (src_cfun, lp_nr);
7530 if (stmt_region)
7531 {
7532 if (region == NULL)
7533 region = stmt_region;
7534 else if (stmt_region != region)
7535 {
7536 region = eh_region_outermost (src_cfun, stmt_region, region);
7537 if (region == NULL)
7538 {
7539 *all = true;
7540 return NULL;
7541 }
7542 }
7543 }
7544 }
7545
7546 return region;
7547}
7548
7549static tree
7550new_label_mapper (tree decl, void *data)
7551{
7552 htab_t hash = (htab_t) data;
7553 struct tree_map *m;
7554 void **slot;
7555
7556 gcc_assert (TREE_CODE (decl) == LABEL_DECL);
7557
7558 m = XNEW (struct tree_map);
7559 m->hash = DECL_UID (decl);
7560 m->base.from = decl;
7561 m->to = create_artificial_label (UNKNOWN_LOCATION);
7562 LABEL_DECL_UID (m->to) = LABEL_DECL_UID (decl);
7563 if (LABEL_DECL_UID (m->to) >= cfun->cfg->last_label_uid)
7564 cfun->cfg->last_label_uid = LABEL_DECL_UID (m->to) + 1;
7565
7566 slot = htab_find_slot_with_hash (hash, m, m->hash, INSERT);
7567 gcc_assert (*slot == NULL);
7568
7569 *slot = m;
7570
7571 return m->to;
7572}
7573
7574/* Tree walker to replace the decls used inside value expressions by
7575 duplicates. */
7576
7577static tree
7578replace_block_vars_by_duplicates_1 (tree *tp, int *walk_subtrees, void *data)
7579{
7580 struct replace_decls_d *rd = (struct replace_decls_d *)data;
7581
7582 switch (TREE_CODE (*tp))
7583 {
7584 case VAR_DECL:
7585 case PARM_DECL:
7586 case RESULT_DECL:
7587 replace_by_duplicate_decl (tp, vars_map: rd->vars_map, to_context: rd->to_context);
7588 break;
7589 default:
7590 break;
7591 }
7592
7593 if (IS_TYPE_OR_DECL_P (*tp))
7594 *walk_subtrees = false;
7595
7596 return NULL;
7597}
7598
7599/* Change DECL_CONTEXT of all BLOCK_VARS in block, including
7600 subblocks. */
7601
7602static void
7603replace_block_vars_by_duplicates (tree block, hash_map<tree, tree> *vars_map,
7604 tree to_context)
7605{
7606 tree *tp, t;
7607
7608 for (tp = &BLOCK_VARS (block); *tp; tp = &DECL_CHAIN (*tp))
7609 {
7610 t = *tp;
7611 if (!VAR_P (t) && TREE_CODE (t) != CONST_DECL)
7612 continue;
7613 replace_by_duplicate_decl (tp: &t, vars_map, to_context);
7614 if (t != *tp)
7615 {
7616 if (VAR_P (*tp) && DECL_HAS_VALUE_EXPR_P (*tp))
7617 {
7618 tree x = DECL_VALUE_EXPR (*tp);
7619 struct replace_decls_d rd = { .vars_map: vars_map, .to_context: to_context };
7620 unshare_expr (x);
7621 walk_tree (&x, replace_block_vars_by_duplicates_1, &rd, NULL);
7622 SET_DECL_VALUE_EXPR (t, x);
7623 DECL_HAS_VALUE_EXPR_P (t) = 1;
7624 }
7625 DECL_CHAIN (t) = DECL_CHAIN (*tp);
7626 *tp = t;
7627 }
7628 }
7629
7630 for (block = BLOCK_SUBBLOCKS (block); block; block = BLOCK_CHAIN (block))
7631 replace_block_vars_by_duplicates (block, vars_map, to_context);
7632}
7633
7634/* Fixup the loop arrays and numbers after moving LOOP and its subloops
7635 from FN1 to FN2. */
7636
7637static void
7638fixup_loop_arrays_after_move (struct function *fn1, struct function *fn2,
7639 class loop *loop)
7640{
7641 /* Discard it from the old loop array. */
7642 (*get_loops (fn: fn1))[loop->num] = NULL;
7643
7644 /* Place it in the new loop array, assigning it a new number. */
7645 loop->num = number_of_loops (fn: fn2);
7646 vec_safe_push (v&: loops_for_fn (fn: fn2)->larray, obj: loop);
7647
7648 /* Recurse to children. */
7649 for (loop = loop->inner; loop; loop = loop->next)
7650 fixup_loop_arrays_after_move (fn1, fn2, loop);
7651}
7652
7653/* Verify that the blocks in BBS_P are a single-entry, single-exit region
7654 delimited by ENTRY_BB and EXIT_BB, possibly containing noreturn blocks. */
7655
7656DEBUG_FUNCTION void
7657verify_sese (basic_block entry, basic_block exit, vec<basic_block> *bbs_p)
7658{
7659 basic_block bb;
7660 edge_iterator ei;
7661 edge e;
7662 bitmap bbs = BITMAP_ALLOC (NULL);
7663 int i;
7664
7665 gcc_assert (entry != NULL);
7666 gcc_assert (entry != exit);
7667 gcc_assert (bbs_p != NULL);
7668
7669 gcc_assert (bbs_p->length () > 0);
7670
7671 FOR_EACH_VEC_ELT (*bbs_p, i, bb)
7672 bitmap_set_bit (bbs, bb->index);
7673
7674 gcc_assert (bitmap_bit_p (bbs, entry->index));
7675 gcc_assert (exit == NULL || bitmap_bit_p (bbs, exit->index));
7676
7677 FOR_EACH_VEC_ELT (*bbs_p, i, bb)
7678 {
7679 if (bb == entry)
7680 {
7681 gcc_assert (single_pred_p (entry));
7682 gcc_assert (!bitmap_bit_p (bbs, single_pred (entry)->index));
7683 }
7684 else
7685 for (ei = ei_start (bb->preds); !ei_end_p (i: ei); ei_next (i: &ei))
7686 {
7687 e = ei_edge (i: ei);
7688 gcc_assert (bitmap_bit_p (bbs, e->src->index));
7689 }
7690
7691 if (bb == exit)
7692 {
7693 gcc_assert (single_succ_p (exit));
7694 gcc_assert (!bitmap_bit_p (bbs, single_succ (exit)->index));
7695 }
7696 else
7697 for (ei = ei_start (bb->succs); !ei_end_p (i: ei); ei_next (i: &ei))
7698 {
7699 e = ei_edge (i: ei);
7700 gcc_assert (bitmap_bit_p (bbs, e->dest->index));
7701 }
7702 }
7703
7704 BITMAP_FREE (bbs);
7705}
7706
7707/* If FROM is an SSA_NAME, mark the version in bitmap DATA. */
7708
7709bool
7710gather_ssa_name_hash_map_from (tree const &from, tree const &, void *data)
7711{
7712 bitmap release_names = (bitmap)data;
7713
7714 if (TREE_CODE (from) != SSA_NAME)
7715 return true;
7716
7717 bitmap_set_bit (release_names, SSA_NAME_VERSION (from));
7718 return true;
7719}
7720
7721/* Return LOOP_DIST_ALIAS call if present in BB. */
7722
7723static gimple *
7724find_loop_dist_alias (basic_block bb)
7725{
7726 gimple_stmt_iterator gsi = gsi_last_bb (bb);
7727 if (!safe_is_a <gcond *> (p: *gsi))
7728 return NULL;
7729
7730 gsi_prev (i: &gsi);
7731 if (gsi_end_p (i: gsi))
7732 return NULL;
7733
7734 gimple *g = gsi_stmt (i: gsi);
7735 if (gimple_call_internal_p (gs: g, fn: IFN_LOOP_DIST_ALIAS))
7736 return g;
7737 return NULL;
7738}
7739
7740/* Fold loop internal call G like IFN_LOOP_VECTORIZED/IFN_LOOP_DIST_ALIAS
7741 to VALUE and update any immediate uses of it's LHS. */
7742
7743void
7744fold_loop_internal_call (gimple *g, tree value)
7745{
7746 tree lhs = gimple_call_lhs (gs: g);
7747 use_operand_p use_p;
7748 imm_use_iterator iter;
7749 gimple *use_stmt;
7750 gimple_stmt_iterator gsi = gsi_for_stmt (g);
7751
7752 replace_call_with_value (&gsi, value);
7753 FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs)
7754 {
7755 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
7756 SET_USE (use_p, value);
7757 update_stmt (s: use_stmt);
7758 /* If we turn conditional to constant, scale profile counts.
7759 We know that the conditional was created by loop distribution
7760 and all basic blocks dominated by the taken edge are part of
7761 the loop distributed. */
7762 if (gimple_code (g: use_stmt) == GIMPLE_COND)
7763 {
7764 edge true_edge, false_edge;
7765 extract_true_false_edges_from_block (gimple_bb (g: use_stmt),
7766 &true_edge, &false_edge);
7767 edge taken_edge = NULL, other_edge = NULL;
7768 if (gimple_cond_true_p (gs: as_a <gcond *>(p: use_stmt)))
7769 {
7770 taken_edge = true_edge;
7771 other_edge = false_edge;
7772 }
7773 else if (gimple_cond_false_p (gs: as_a <gcond *>(p: use_stmt)))
7774 {
7775 taken_edge = false_edge;
7776 other_edge = true_edge;
7777 }
7778 if (taken_edge
7779 && !(taken_edge->probability == profile_probability::always ()))
7780 {
7781 profile_count old_count = taken_edge->count ();
7782 profile_count new_count = taken_edge->src->count;
7783 taken_edge->probability = profile_probability::always ();
7784 other_edge->probability = profile_probability::never ();
7785 /* If we have multiple predecessors, we can't use the dominance
7786 test. This should not happen as the guarded code should
7787 start with pre-header. */
7788 gcc_assert (single_pred_edge (taken_edge->dest));
7789 if (old_count.nonzero_p ())
7790 {
7791 taken_edge->dest->count
7792 = taken_edge->dest->count.apply_scale (num: new_count,
7793 den: old_count);
7794 scale_strictly_dominated_blocks (taken_edge->dest,
7795 new_count, old_count);
7796 }
7797 }
7798 }
7799 }
7800}
7801
7802/* Move a single-entry, single-exit region delimited by ENTRY_BB and
7803 EXIT_BB to function DEST_CFUN. The whole region is replaced by a
7804 single basic block in the original CFG and the new basic block is
7805 returned. DEST_CFUN must not have a CFG yet.
7806
7807 Note that the region need not be a pure SESE region. Blocks inside
7808 the region may contain calls to abort/exit. The only restriction
7809 is that ENTRY_BB should be the only entry point and it must
7810 dominate EXIT_BB.
7811
7812 Change TREE_BLOCK of all statements in ORIG_BLOCK to the new
7813 functions outermost BLOCK, move all subblocks of ORIG_BLOCK
7814 to the new function.
7815
7816 All local variables referenced in the region are assumed to be in
7817 the corresponding BLOCK_VARS and unexpanded variable lists
7818 associated with DEST_CFUN.
7819
7820 TODO: investigate whether we can reuse gimple_duplicate_sese_region to
7821 reimplement move_sese_region_to_fn by duplicating the region rather than
7822 moving it. */
7823
7824basic_block
7825move_sese_region_to_fn (struct function *dest_cfun, basic_block entry_bb,
7826 basic_block exit_bb, tree orig_block)
7827{
7828 vec<basic_block> bbs;
7829 basic_block dom_entry = get_immediate_dominator (CDI_DOMINATORS, entry_bb);
7830 basic_block after, bb, *entry_pred, *exit_succ, abb;
7831 struct function *saved_cfun = cfun;
7832 int *entry_flag, *exit_flag;
7833 profile_probability *entry_prob, *exit_prob;
7834 unsigned i, num_entry_edges, num_exit_edges, num_nodes;
7835 edge e;
7836 edge_iterator ei;
7837 htab_t new_label_map;
7838 hash_map<void *, void *> *eh_map;
7839 class loop *loop = entry_bb->loop_father;
7840 class loop *loop0 = get_loop (fn: saved_cfun, num: 0);
7841 struct move_stmt_d d;
7842
7843 /* If ENTRY does not strictly dominate EXIT, this cannot be an SESE
7844 region. */
7845 gcc_assert (entry_bb != exit_bb
7846 && (!exit_bb
7847 || dominated_by_p (CDI_DOMINATORS, exit_bb, entry_bb)));
7848
7849 /* Collect all the blocks in the region. Manually add ENTRY_BB
7850 because it won't be added by dfs_enumerate_from. */
7851 bbs.create (nelems: 0);
7852 bbs.safe_push (obj: entry_bb);
7853 gather_blocks_in_sese_region (entry: entry_bb, exit: exit_bb, bbs_p: &bbs);
7854
7855 if (flag_checking)
7856 verify_sese (entry: entry_bb, exit: exit_bb, bbs_p: &bbs);
7857
7858 /* The blocks that used to be dominated by something in BBS will now be
7859 dominated by the new block. */
7860 auto_vec<basic_block> dom_bbs = get_dominated_by_region (CDI_DOMINATORS,
7861 bbs.address (),
7862 bbs.length ());
7863
7864 /* Detach ENTRY_BB and EXIT_BB from CFUN->CFG. We need to remember
7865 the predecessor edges to ENTRY_BB and the successor edges to
7866 EXIT_BB so that we can re-attach them to the new basic block that
7867 will replace the region. */
7868 num_entry_edges = EDGE_COUNT (entry_bb->preds);
7869 entry_pred = XNEWVEC (basic_block, num_entry_edges);
7870 entry_flag = XNEWVEC (int, num_entry_edges);
7871 entry_prob = XNEWVEC (profile_probability, num_entry_edges);
7872 i = 0;
7873 for (ei = ei_start (entry_bb->preds); (e = ei_safe_edge (i: ei)) != NULL;)
7874 {
7875 entry_prob[i] = e->probability;
7876 entry_flag[i] = e->flags;
7877 entry_pred[i++] = e->src;
7878 remove_edge (e);
7879 }
7880
7881 if (exit_bb)
7882 {
7883 num_exit_edges = EDGE_COUNT (exit_bb->succs);
7884 exit_succ = XNEWVEC (basic_block, num_exit_edges);
7885 exit_flag = XNEWVEC (int, num_exit_edges);
7886 exit_prob = XNEWVEC (profile_probability, num_exit_edges);
7887 i = 0;
7888 for (ei = ei_start (exit_bb->succs); (e = ei_safe_edge (i: ei)) != NULL;)
7889 {
7890 exit_prob[i] = e->probability;
7891 exit_flag[i] = e->flags;
7892 exit_succ[i++] = e->dest;
7893 remove_edge (e);
7894 }
7895 }
7896 else
7897 {
7898 num_exit_edges = 0;
7899 exit_succ = NULL;
7900 exit_flag = NULL;
7901 exit_prob = NULL;
7902 }
7903
7904 /* Switch context to the child function to initialize DEST_FN's CFG. */
7905 gcc_assert (dest_cfun->cfg == NULL);
7906 push_cfun (new_cfun: dest_cfun);
7907
7908 init_empty_tree_cfg ();
7909
7910 /* Initialize EH information for the new function. */
7911 eh_map = NULL;
7912 new_label_map = NULL;
7913 if (saved_cfun->eh)
7914 {
7915 eh_region region = NULL;
7916 bool all = false;
7917
7918 FOR_EACH_VEC_ELT (bbs, i, bb)
7919 {
7920 region = find_outermost_region_in_block (src_cfun: saved_cfun, bb, region, all: &all);
7921 if (all)
7922 break;
7923 }
7924
7925 init_eh_for_function ();
7926 if (region != NULL || all)
7927 {
7928 new_label_map = htab_create (17, tree_map_hash, tree_map_eq, free);
7929 eh_map = duplicate_eh_regions (saved_cfun, region, 0,
7930 new_label_mapper, new_label_map);
7931 }
7932 }
7933
7934 /* Initialize an empty loop tree. */
7935 struct loops *loops = ggc_cleared_alloc<struct loops> ();
7936 init_loops_structure (dest_cfun, loops, 1);
7937 loops->state = LOOPS_MAY_HAVE_MULTIPLE_LATCHES;
7938 set_loops_for_fn (fn: dest_cfun, loops);
7939
7940 vec<loop_p, va_gc> *larray = get_loops (fn: saved_cfun)->copy ();
7941
7942 /* Move the outlined loop tree part. */
7943 num_nodes = bbs.length ();
7944 FOR_EACH_VEC_ELT (bbs, i, bb)
7945 {
7946 if (bb->loop_father->header == bb)
7947 {
7948 class loop *this_loop = bb->loop_father;
7949 /* Avoid the need to remap SSA names used in nb_iterations. */
7950 free_numbers_of_iterations_estimates (this_loop);
7951 class loop *outer = loop_outer (loop: this_loop);
7952 if (outer == loop
7953 /* If the SESE region contains some bbs ending with
7954 a noreturn call, those are considered to belong
7955 to the outermost loop in saved_cfun, rather than
7956 the entry_bb's loop_father. */
7957 || outer == loop0)
7958 {
7959 if (outer != loop)
7960 num_nodes -= this_loop->num_nodes;
7961 flow_loop_tree_node_remove (bb->loop_father);
7962 flow_loop_tree_node_add (get_loop (fn: dest_cfun, num: 0), this_loop);
7963 fixup_loop_arrays_after_move (fn1: saved_cfun, cfun, loop: this_loop);
7964 }
7965 }
7966 else if (bb->loop_father == loop0 && loop0 != loop)
7967 num_nodes--;
7968
7969 /* Remove loop exits from the outlined region. */
7970 if (loops_for_fn (fn: saved_cfun)->exits)
7971 FOR_EACH_EDGE (e, ei, bb->succs)
7972 {
7973 struct loops *l = loops_for_fn (fn: saved_cfun);
7974 loop_exit **slot
7975 = l->exits->find_slot_with_hash (comparable: e, hash: htab_hash_pointer (e),
7976 insert: NO_INSERT);
7977 if (slot)
7978 l->exits->clear_slot (slot);
7979 }
7980 }
7981
7982 /* Adjust the number of blocks in the tree root of the outlined part. */
7983 get_loop (fn: dest_cfun, num: 0)->num_nodes = bbs.length () + 2;
7984
7985 /* Setup a mapping to be used by move_block_to_fn. */
7986 loop->aux = current_loops->tree_root;
7987 loop0->aux = current_loops->tree_root;
7988
7989 /* Fix up orig_loop_num. If the block referenced in it has been moved
7990 to dest_cfun, update orig_loop_num field, otherwise clear it. */
7991 signed char *moved_orig_loop_num = NULL;
7992 for (auto dloop : loops_list (dest_cfun, 0))
7993 if (dloop->orig_loop_num)
7994 {
7995 if (moved_orig_loop_num == NULL)
7996 moved_orig_loop_num
7997 = XCNEWVEC (signed char, vec_safe_length (larray));
7998 if ((*larray)[dloop->orig_loop_num] != NULL
7999 && get_loop (fn: saved_cfun, num: dloop->orig_loop_num) == NULL)
8000 {
8001 if (moved_orig_loop_num[dloop->orig_loop_num] >= 0
8002 && moved_orig_loop_num[dloop->orig_loop_num] < 2)
8003 moved_orig_loop_num[dloop->orig_loop_num]++;
8004 dloop->orig_loop_num = (*larray)[dloop->orig_loop_num]->num;
8005 }
8006 else
8007 {
8008 moved_orig_loop_num[dloop->orig_loop_num] = -1;
8009 dloop->orig_loop_num = 0;
8010 }
8011 }
8012 pop_cfun ();
8013
8014 if (moved_orig_loop_num)
8015 {
8016 FOR_EACH_VEC_ELT (bbs, i, bb)
8017 {
8018 gimple *g = find_loop_dist_alias (bb);
8019 if (g == NULL)
8020 continue;
8021
8022 int orig_loop_num = tree_to_shwi (gimple_call_arg (gs: g, index: 0));
8023 gcc_assert (orig_loop_num
8024 && (unsigned) orig_loop_num < vec_safe_length (larray));
8025 if (moved_orig_loop_num[orig_loop_num] == 2)
8026 {
8027 /* If we have moved both loops with this orig_loop_num into
8028 dest_cfun and the LOOP_DIST_ALIAS call is being moved there
8029 too, update the first argument. */
8030 gcc_assert ((*larray)[orig_loop_num] != NULL
8031 && (get_loop (saved_cfun, orig_loop_num) == NULL));
8032 tree t = build_int_cst (integer_type_node,
8033 (*larray)[orig_loop_num]->num);
8034 gimple_call_set_arg (gs: g, index: 0, arg: t);
8035 update_stmt (s: g);
8036 /* Make sure the following loop will not update it. */
8037 moved_orig_loop_num[orig_loop_num] = 0;
8038 }
8039 else
8040 /* Otherwise at least one of the loops stayed in saved_cfun.
8041 Remove the LOOP_DIST_ALIAS call. */
8042 fold_loop_internal_call (g, value: gimple_call_arg (gs: g, index: 1));
8043 }
8044 FOR_EACH_BB_FN (bb, saved_cfun)
8045 {
8046 gimple *g = find_loop_dist_alias (bb);
8047 if (g == NULL)
8048 continue;
8049 int orig_loop_num = tree_to_shwi (gimple_call_arg (gs: g, index: 0));
8050 gcc_assert (orig_loop_num
8051 && (unsigned) orig_loop_num < vec_safe_length (larray));
8052 if (moved_orig_loop_num[orig_loop_num])
8053 /* LOOP_DIST_ALIAS call remained in saved_cfun, if at least one
8054 of the corresponding loops was moved, remove it. */
8055 fold_loop_internal_call (g, value: gimple_call_arg (gs: g, index: 1));
8056 }
8057 XDELETEVEC (moved_orig_loop_num);
8058 }
8059 ggc_free (larray);
8060
8061 /* Move blocks from BBS into DEST_CFUN. */
8062 gcc_assert (bbs.length () >= 2);
8063 after = dest_cfun->cfg->x_entry_block_ptr;
8064 hash_map<tree, tree> vars_map;
8065
8066 memset (s: &d, c: 0, n: sizeof (d));
8067 d.orig_block = orig_block;
8068 d.new_block = DECL_INITIAL (dest_cfun->decl);
8069 d.from_context = cfun->decl;
8070 d.to_context = dest_cfun->decl;
8071 d.vars_map = &vars_map;
8072 d.new_label_map = new_label_map;
8073 d.eh_map = eh_map;
8074 d.remap_decls_p = true;
8075
8076 if (gimple_in_ssa_p (cfun))
8077 for (tree arg = DECL_ARGUMENTS (d.to_context); arg; arg = DECL_CHAIN (arg))
8078 {
8079 tree narg = make_ssa_name_fn (dest_cfun, arg, gimple_build_nop ());
8080 set_ssa_default_def (dest_cfun, arg, narg);
8081 vars_map.put (k: arg, v: narg);
8082 }
8083
8084 FOR_EACH_VEC_ELT (bbs, i, bb)
8085 {
8086 /* No need to update edge counts on the last block. It has
8087 already been updated earlier when we detached the region from
8088 the original CFG. */
8089 move_block_to_fn (dest_cfun, bb, after, update_edge_count_p: bb != exit_bb, d: &d);
8090 after = bb;
8091 }
8092
8093 /* Adjust the maximum clique used. */
8094 dest_cfun->last_clique = saved_cfun->last_clique;
8095
8096 loop->aux = NULL;
8097 loop0->aux = NULL;
8098 /* Loop sizes are no longer correct, fix them up. */
8099 loop->num_nodes -= num_nodes;
8100 for (class loop *outer = loop_outer (loop);
8101 outer; outer = loop_outer (loop: outer))
8102 outer->num_nodes -= num_nodes;
8103 loop0->num_nodes -= bbs.length () - num_nodes;
8104
8105 if (saved_cfun->has_simduid_loops || saved_cfun->has_force_vectorize_loops)
8106 {
8107 class loop *aloop;
8108 for (i = 0; vec_safe_iterate (v: loops->larray, ix: i, ptr: &aloop); i++)
8109 if (aloop != NULL)
8110 {
8111 if (aloop->simduid)
8112 {
8113 replace_by_duplicate_decl (tp: &aloop->simduid, vars_map: d.vars_map,
8114 to_context: d.to_context);
8115 dest_cfun->has_simduid_loops = true;
8116 }
8117 if (aloop->force_vectorize)
8118 dest_cfun->has_force_vectorize_loops = true;
8119 }
8120 }
8121
8122 /* Rewire BLOCK_SUBBLOCKS of orig_block. */
8123 if (orig_block)
8124 {
8125 tree block;
8126 gcc_assert (BLOCK_SUBBLOCKS (DECL_INITIAL (dest_cfun->decl))
8127 == NULL_TREE);
8128 BLOCK_SUBBLOCKS (DECL_INITIAL (dest_cfun->decl))
8129 = BLOCK_SUBBLOCKS (orig_block);
8130 for (block = BLOCK_SUBBLOCKS (orig_block);
8131 block; block = BLOCK_CHAIN (block))
8132 BLOCK_SUPERCONTEXT (block) = DECL_INITIAL (dest_cfun->decl);
8133 BLOCK_SUBBLOCKS (orig_block) = NULL_TREE;
8134 }
8135
8136 replace_block_vars_by_duplicates (DECL_INITIAL (dest_cfun->decl),
8137 vars_map: &vars_map, to_context: dest_cfun->decl);
8138
8139 if (new_label_map)
8140 htab_delete (new_label_map);
8141 if (eh_map)
8142 delete eh_map;
8143
8144 /* We need to release ssa-names in a defined order, so first find them,
8145 and then iterate in ascending version order. */
8146 bitmap release_names = BITMAP_ALLOC (NULL);
8147 vars_map.traverse<void *, gather_ssa_name_hash_map_from> (a: release_names);
8148 bitmap_iterator bi;
8149 EXECUTE_IF_SET_IN_BITMAP (release_names, 0, i, bi)
8150 release_ssa_name (ssa_name (i));
8151 BITMAP_FREE (release_names);
8152
8153 /* Rewire the entry and exit blocks. The successor to the entry
8154 block turns into the successor of DEST_FN's ENTRY_BLOCK_PTR in
8155 the child function. Similarly, the predecessor of DEST_FN's
8156 EXIT_BLOCK_PTR turns into the predecessor of EXIT_BLOCK_PTR. We
8157 need to switch CFUN between DEST_CFUN and SAVED_CFUN so that the
8158 various CFG manipulation function get to the right CFG.
8159
8160 FIXME, this is silly. The CFG ought to become a parameter to
8161 these helpers. */
8162 push_cfun (new_cfun: dest_cfun);
8163 ENTRY_BLOCK_PTR_FOR_FN (cfun)->count = entry_bb->count;
8164 make_single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), entry_bb, EDGE_FALLTHRU);
8165 if (exit_bb)
8166 {
8167 make_single_succ_edge (exit_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), 0);
8168 EXIT_BLOCK_PTR_FOR_FN (cfun)->count = exit_bb->count;
8169 }
8170 else
8171 EXIT_BLOCK_PTR_FOR_FN (cfun)->count = profile_count::zero ();
8172 pop_cfun ();
8173
8174 /* Back in the original function, the SESE region has disappeared,
8175 create a new basic block in its place. */
8176 bb = create_empty_bb (entry_pred[0]);
8177 if (current_loops)
8178 add_bb_to_loop (bb, loop);
8179 profile_count count = profile_count::zero ();
8180 for (i = 0; i < num_entry_edges; i++)
8181 {
8182 e = make_edge (entry_pred[i], bb, entry_flag[i]);
8183 e->probability = entry_prob[i];
8184 count += e->count ();
8185 }
8186 bb->count = count;
8187
8188 for (i = 0; i < num_exit_edges; i++)
8189 {
8190 e = make_edge (bb, exit_succ[i], exit_flag[i]);
8191 e->probability = exit_prob[i];
8192 }
8193
8194 set_immediate_dominator (CDI_DOMINATORS, bb, dom_entry);
8195 FOR_EACH_VEC_ELT (dom_bbs, i, abb)
8196 set_immediate_dominator (CDI_DOMINATORS, abb, bb);
8197
8198 if (exit_bb)
8199 {
8200 free (ptr: exit_prob);
8201 free (ptr: exit_flag);
8202 free (ptr: exit_succ);
8203 }
8204 free (ptr: entry_prob);
8205 free (ptr: entry_flag);
8206 free (ptr: entry_pred);
8207 bbs.release ();
8208
8209 return bb;
8210}
8211
8212/* Dump default def DEF to file FILE using FLAGS and indentation
8213 SPC. */
8214
8215static void
8216dump_default_def (FILE *file, tree def, int spc, dump_flags_t flags)
8217{
8218 for (int i = 0; i < spc; ++i)
8219 fprintf (stream: file, format: " ");
8220 dump_ssaname_info_to_file (file, def, spc);
8221
8222 print_generic_expr (file, TREE_TYPE (def), flags);
8223 fprintf (stream: file, format: " ");
8224 print_generic_expr (file, def, flags);
8225 fprintf (stream: file, format: " = ");
8226 print_generic_expr (file, SSA_NAME_VAR (def), flags);
8227 fprintf (stream: file, format: ";\n");
8228}
8229
8230/* Print no_sanitize attribute to FILE for a given attribute VALUE. */
8231
8232static void
8233print_no_sanitize_attr_value (FILE *file, tree value)
8234{
8235 unsigned int flags = tree_to_uhwi (value);
8236 bool first = true;
8237 for (int i = 0; sanitizer_opts[i].name != NULL; ++i)
8238 {
8239 if ((sanitizer_opts[i].flag & flags) == sanitizer_opts[i].flag)
8240 {
8241 if (!first)
8242 fprintf (stream: file, format: " | ");
8243 fprintf (stream: file, format: "%s", sanitizer_opts[i].name);
8244 first = false;
8245 }
8246 }
8247}
8248
8249/* Dump FUNCTION_DECL FN to file FILE using FLAGS (see TDF_* in dumpfile.h)
8250 */
8251
8252void
8253dump_function_to_file (tree fndecl, FILE *file, dump_flags_t flags)
8254{
8255 tree arg, var, old_current_fndecl = current_function_decl;
8256 struct function *dsf;
8257 bool ignore_topmost_bind = false, any_var = false;
8258 basic_block bb;
8259 tree chain;
8260 bool tmclone = (TREE_CODE (fndecl) == FUNCTION_DECL
8261 && decl_is_tm_clone (fndecl));
8262 struct function *fun = DECL_STRUCT_FUNCTION (fndecl);
8263
8264 tree fntype = TREE_TYPE (fndecl);
8265 tree attrs[] = { DECL_ATTRIBUTES (fndecl), TYPE_ATTRIBUTES (fntype) };
8266
8267 for (int i = 0; i != 2; ++i)
8268 {
8269 if (!attrs[i])
8270 continue;
8271
8272 fprintf (stream: file, format: "__attribute__((");
8273
8274 bool first = true;
8275 tree chain;
8276 for (chain = attrs[i]; chain; first = false, chain = TREE_CHAIN (chain))
8277 {
8278 if (!first)
8279 fprintf (stream: file, format: ", ");
8280
8281 tree name = get_attribute_name (chain);
8282 print_generic_expr (file, name, dump_flags);
8283 if (TREE_VALUE (chain) != NULL_TREE)
8284 {
8285 fprintf (stream: file, format: " (");
8286
8287 if (strstr (IDENTIFIER_POINTER (name), needle: "no_sanitize"))
8288 print_no_sanitize_attr_value (file, TREE_VALUE (chain));
8289 else
8290 print_generic_expr (file, TREE_VALUE (chain), dump_flags);
8291 fprintf (stream: file, format: ")");
8292 }
8293 }
8294
8295 fprintf (stream: file, format: "))\n");
8296 }
8297
8298 current_function_decl = fndecl;
8299 if (flags & TDF_GIMPLE)
8300 {
8301 static bool hotness_bb_param_printed = false;
8302 if (profile_info != NULL
8303 && !hotness_bb_param_printed)
8304 {
8305 hotness_bb_param_printed = true;
8306 fprintf (stream: file,
8307 format: "/* --param=gimple-fe-computed-hot-bb-threshold=%" PRId64
8308 " */\n", get_hot_bb_threshold ());
8309 }
8310
8311 print_generic_expr (file, TREE_TYPE (TREE_TYPE (fndecl)),
8312 dump_flags | TDF_SLIM);
8313 fprintf (stream: file, format: " __GIMPLE (%s",
8314 (fun->curr_properties & PROP_ssa) ? "ssa"
8315 : (fun->curr_properties & PROP_cfg) ? "cfg"
8316 : "");
8317
8318 if (fun && fun->cfg)
8319 {
8320 basic_block bb = ENTRY_BLOCK_PTR_FOR_FN (fun);
8321 if (bb->count.initialized_p ())
8322 fprintf (stream: file, format: ",%s(%" PRIu64 ")",
8323 profile_quality_as_string (bb->count.quality ()),
8324 bb->count.value ());
8325 if (dump_flags & TDF_UID)
8326 fprintf (stream: file, format: ")\n%sD_%u (", function_name (fun),
8327 DECL_UID (fndecl));
8328 else
8329 fprintf (stream: file, format: ")\n%s (", function_name (fun));
8330 }
8331 }
8332 else
8333 {
8334 print_generic_expr (file, TREE_TYPE (fntype), dump_flags);
8335 if (dump_flags & TDF_UID)
8336 fprintf (stream: file, format: " %sD.%u %s(", function_name (fun), DECL_UID (fndecl),
8337 tmclone ? "[tm-clone] " : "");
8338 else
8339 fprintf (stream: file, format: " %s %s(", function_name (fun),
8340 tmclone ? "[tm-clone] " : "");
8341 }
8342
8343 arg = DECL_ARGUMENTS (fndecl);
8344 while (arg)
8345 {
8346 print_generic_expr (file, TREE_TYPE (arg), dump_flags);
8347 fprintf (stream: file, format: " ");
8348 print_generic_expr (file, arg, dump_flags);
8349 if (DECL_CHAIN (arg))
8350 fprintf (stream: file, format: ", ");
8351 arg = DECL_CHAIN (arg);
8352 }
8353 fprintf (stream: file, format: ")\n");
8354
8355 dsf = DECL_STRUCT_FUNCTION (fndecl);
8356 if (dsf && (flags & TDF_EH))
8357 dump_eh_tree (file, dsf);
8358
8359 if (flags & TDF_RAW && !gimple_has_body_p (fndecl))
8360 {
8361 dump_node (fndecl, TDF_SLIM | flags, file);
8362 current_function_decl = old_current_fndecl;
8363 return;
8364 }
8365
8366 /* When GIMPLE is lowered, the variables are no longer available in
8367 BIND_EXPRs, so display them separately. */
8368 if (fun && fun->decl == fndecl && (fun->curr_properties & PROP_gimple_lcf))
8369 {
8370 unsigned ix;
8371 ignore_topmost_bind = true;
8372
8373 fprintf (stream: file, format: "{\n");
8374 if (gimple_in_ssa_p (fun)
8375 && (flags & TDF_ALIAS))
8376 {
8377 for (arg = DECL_ARGUMENTS (fndecl); arg != NULL;
8378 arg = DECL_CHAIN (arg))
8379 {
8380 tree def = ssa_default_def (fun, arg);
8381 if (def)
8382 dump_default_def (file, def, spc: 2, flags);
8383 }
8384
8385 tree res = DECL_RESULT (fun->decl);
8386 if (res != NULL_TREE
8387 && DECL_BY_REFERENCE (res))
8388 {
8389 tree def = ssa_default_def (fun, res);
8390 if (def)
8391 dump_default_def (file, def, spc: 2, flags);
8392 }
8393
8394 tree static_chain = fun->static_chain_decl;
8395 if (static_chain != NULL_TREE)
8396 {
8397 tree def = ssa_default_def (fun, static_chain);
8398 if (def)
8399 dump_default_def (file, def, spc: 2, flags);
8400 }
8401 }
8402
8403 if (!vec_safe_is_empty (v: fun->local_decls))
8404 FOR_EACH_LOCAL_DECL (fun, ix, var)
8405 {
8406 print_generic_decl (file, var, flags);
8407 fprintf (stream: file, format: "\n");
8408
8409 any_var = true;
8410 }
8411
8412 tree name;
8413
8414 if (gimple_in_ssa_p (fun))
8415 FOR_EACH_SSA_NAME (ix, name, fun)
8416 {
8417 if (!SSA_NAME_VAR (name)
8418 /* SSA name with decls without a name still get
8419 dumped as _N, list those explicitely as well even
8420 though we've dumped the decl declaration as D.xxx
8421 above. */
8422 || !SSA_NAME_IDENTIFIER (name))
8423 {
8424 fprintf (stream: file, format: " ");
8425 print_generic_expr (file, TREE_TYPE (name), flags);
8426 fprintf (stream: file, format: " ");
8427 print_generic_expr (file, name, flags);
8428 fprintf (stream: file, format: ";\n");
8429
8430 any_var = true;
8431 }
8432 }
8433 }
8434
8435 if (fun && fun->decl == fndecl
8436 && fun->cfg
8437 && basic_block_info_for_fn (fun))
8438 {
8439 /* If the CFG has been built, emit a CFG-based dump. */
8440 if (!ignore_topmost_bind)
8441 fprintf (stream: file, format: "{\n");
8442
8443 if (any_var && n_basic_blocks_for_fn (fun))
8444 fprintf (stream: file, format: "\n");
8445
8446 FOR_EACH_BB_FN (bb, fun)
8447 dump_bb (file, bb, 2, flags);
8448
8449 fprintf (stream: file, format: "}\n");
8450 }
8451 else if (fun && (fun->curr_properties & PROP_gimple_any))
8452 {
8453 /* The function is now in GIMPLE form but the CFG has not been
8454 built yet. Emit the single sequence of GIMPLE statements
8455 that make up its body. */
8456 gimple_seq body = gimple_body (fndecl);
8457
8458 if (gimple_seq_first_stmt (s: body)
8459 && gimple_seq_first_stmt (s: body) == gimple_seq_last_stmt (s: body)
8460 && gimple_code (g: gimple_seq_first_stmt (s: body)) == GIMPLE_BIND)
8461 print_gimple_seq (file, body, 0, flags);
8462 else
8463 {
8464 if (!ignore_topmost_bind)
8465 fprintf (stream: file, format: "{\n");
8466
8467 if (any_var)
8468 fprintf (stream: file, format: "\n");
8469
8470 print_gimple_seq (file, body, 2, flags);
8471 fprintf (stream: file, format: "}\n");
8472 }
8473 }
8474 else
8475 {
8476 int indent;
8477
8478 /* Make a tree based dump. */
8479 chain = DECL_SAVED_TREE (fndecl);
8480 if (chain && TREE_CODE (chain) == BIND_EXPR)
8481 {
8482 if (ignore_topmost_bind)
8483 {
8484 chain = BIND_EXPR_BODY (chain);
8485 indent = 2;
8486 }
8487 else
8488 indent = 0;
8489 }
8490 else
8491 {
8492 if (!ignore_topmost_bind)
8493 {
8494 fprintf (stream: file, format: "{\n");
8495 /* No topmost bind, pretend it's ignored for later. */
8496 ignore_topmost_bind = true;
8497 }
8498 indent = 2;
8499 }
8500
8501 if (any_var)
8502 fprintf (stream: file, format: "\n");
8503
8504 print_generic_stmt_indented (file, chain, flags, indent);
8505 if (ignore_topmost_bind)
8506 fprintf (stream: file, format: "}\n");
8507 }
8508
8509 if (flags & TDF_ENUMERATE_LOCALS)
8510 dump_enumerated_decls (file, flags);
8511 fprintf (stream: file, format: "\n\n");
8512
8513 current_function_decl = old_current_fndecl;
8514}
8515
8516/* Dump FUNCTION_DECL FN to stderr using FLAGS (see TDF_* in tree.h) */
8517
8518DEBUG_FUNCTION void
8519debug_function (tree fn, dump_flags_t flags)
8520{
8521 dump_function_to_file (fndecl: fn, stderr, flags);
8522}
8523
8524
8525/* Print on FILE the indexes for the predecessors of basic_block BB. */
8526
8527static void
8528print_pred_bbs (FILE *file, basic_block bb)
8529{
8530 edge e;
8531 edge_iterator ei;
8532
8533 FOR_EACH_EDGE (e, ei, bb->preds)
8534 fprintf (stream: file, format: "bb_%d ", e->src->index);
8535}
8536
8537
8538/* Print on FILE the indexes for the successors of basic_block BB. */
8539
8540static void
8541print_succ_bbs (FILE *file, basic_block bb)
8542{
8543 edge e;
8544 edge_iterator ei;
8545
8546 FOR_EACH_EDGE (e, ei, bb->succs)
8547 fprintf (stream: file, format: "bb_%d ", e->dest->index);
8548}
8549
8550/* Print to FILE the basic block BB following the VERBOSITY level. */
8551
8552void
8553print_loops_bb (FILE *file, basic_block bb, int indent, int verbosity)
8554{
8555 char *s_indent = (char *) alloca ((size_t) indent + 1);
8556 memset (s: (void *) s_indent, c: ' ', n: (size_t) indent);
8557 s_indent[indent] = '\0';
8558
8559 /* Print basic_block's header. */
8560 if (verbosity >= 2)
8561 {
8562 fprintf (stream: file, format: "%s bb_%d (preds = {", s_indent, bb->index);
8563 print_pred_bbs (file, bb);
8564 fprintf (stream: file, format: "}, succs = {");
8565 print_succ_bbs (file, bb);
8566 fprintf (stream: file, format: "})\n");
8567 }
8568
8569 /* Print basic_block's body. */
8570 if (verbosity >= 3)
8571 {
8572 fprintf (stream: file, format: "%s {\n", s_indent);
8573 dump_bb (file, bb, indent + 4, TDF_VOPS|TDF_MEMSYMS);
8574 fprintf (stream: file, format: "%s }\n", s_indent);
8575 }
8576}
8577
8578/* Print loop information. */
8579
8580void
8581print_loop_info (FILE *file, const class loop *loop, const char *prefix)
8582{
8583 if (loop->can_be_parallel)
8584 fprintf (stream: file, format: ", can_be_parallel");
8585 if (loop->warned_aggressive_loop_optimizations)
8586 fprintf (stream: file, format: ", warned_aggressive_loop_optimizations");
8587 if (loop->dont_vectorize)
8588 fprintf (stream: file, format: ", dont_vectorize");
8589 if (loop->force_vectorize)
8590 fprintf (stream: file, format: ", force_vectorize");
8591 if (loop->in_oacc_kernels_region)
8592 fprintf (stream: file, format: ", in_oacc_kernels_region");
8593 if (loop->finite_p)
8594 fprintf (stream: file, format: ", finite_p");
8595 if (loop->unroll)
8596 fprintf (stream: file, format: "\n%sunroll %d", prefix, loop->unroll);
8597 if (loop->nb_iterations)
8598 {
8599 fprintf (stream: file, format: "\n%sniter ", prefix);
8600 print_generic_expr (file, loop->nb_iterations);
8601 }
8602
8603 if (loop->any_upper_bound)
8604 {
8605 fprintf (stream: file, format: "\n%supper_bound ", prefix);
8606 print_decu (wi: loop->nb_iterations_upper_bound, file);
8607 }
8608 if (loop->any_likely_upper_bound)
8609 {
8610 fprintf (stream: file, format: "\n%slikely_upper_bound ", prefix);
8611 print_decu (wi: loop->nb_iterations_likely_upper_bound, file);
8612 }
8613
8614 if (loop->any_estimate)
8615 {
8616 fprintf (stream: file, format: "\n%sestimate ", prefix);
8617 print_decu (wi: loop->nb_iterations_estimate, file);
8618 }
8619 bool reliable;
8620 sreal iterations;
8621 if (loop->num && expected_loop_iterations_by_profile (loop, ret: &iterations, reliable: &reliable))
8622 {
8623 fprintf (stream: file, format: "\n%siterations by profile: %f (%s%s) entry count:", prefix,
8624 iterations.to_double (), reliable ? "reliable" : "unreliable",
8625 maybe_flat_loop_profile (loop) ? ", maybe flat" : "");
8626 loop_count_in (loop).dump (f: file, cfun);
8627 }
8628
8629}
8630
8631static void print_loop_and_siblings (FILE *, class loop *, int, int);
8632
8633/* Pretty print LOOP on FILE, indented INDENT spaces. Following
8634 VERBOSITY level this outputs the contents of the loop, or just its
8635 structure. */
8636
8637static void
8638print_loop (FILE *file, class loop *loop, int indent, int verbosity)
8639{
8640 char *s_indent;
8641 basic_block bb;
8642
8643 if (loop == NULL)
8644 return;
8645
8646 s_indent = (char *) alloca ((size_t) indent + 1);
8647 memset (s: (void *) s_indent, c: ' ', n: (size_t) indent);
8648 s_indent[indent] = '\0';
8649
8650 /* Print loop's header. */
8651 fprintf (stream: file, format: "%sloop_%d (", s_indent, loop->num);
8652 if (loop->header)
8653 fprintf (stream: file, format: "header = %d", loop->header->index);
8654 else
8655 {
8656 fprintf (stream: file, format: "deleted)\n");
8657 return;
8658 }
8659 if (loop->latch)
8660 fprintf (stream: file, format: ", latch = %d", loop->latch->index);
8661 else
8662 fprintf (stream: file, format: ", multiple latches");
8663 print_loop_info (file, loop, prefix: s_indent);
8664 fprintf (stream: file, format: ")\n");
8665
8666 /* Print loop's body. */
8667 if (verbosity >= 1)
8668 {
8669 fprintf (stream: file, format: "%s{\n", s_indent);
8670 FOR_EACH_BB_FN (bb, cfun)
8671 if (bb->loop_father == loop)
8672 print_loops_bb (file, bb, indent, verbosity);
8673
8674 print_loop_and_siblings (file, loop->inner, indent + 2, verbosity);
8675 fprintf (stream: file, format: "%s}\n", s_indent);
8676 }
8677}
8678
8679/* Print the LOOP and its sibling loops on FILE, indented INDENT
8680 spaces. Following VERBOSITY level this outputs the contents of the
8681 loop, or just its structure. */
8682
8683static void
8684print_loop_and_siblings (FILE *file, class loop *loop, int indent,
8685 int verbosity)
8686{
8687 if (loop == NULL)
8688 return;
8689
8690 print_loop (file, loop, indent, verbosity);
8691 print_loop_and_siblings (file, loop: loop->next, indent, verbosity);
8692}
8693
8694/* Follow a CFG edge from the entry point of the program, and on entry
8695 of a loop, pretty print the loop structure on FILE. */
8696
8697void
8698print_loops (FILE *file, int verbosity)
8699{
8700 basic_block bb;
8701
8702 bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
8703 fprintf (stream: file, format: "\nLoops in function: %s\n", current_function_name ());
8704 if (bb && bb->loop_father)
8705 print_loop_and_siblings (file, loop: bb->loop_father, indent: 0, verbosity);
8706}
8707
8708/* Dump a loop. */
8709
8710DEBUG_FUNCTION void
8711debug (class loop &ref)
8712{
8713 print_loop (stderr, loop: &ref, indent: 0, /*verbosity*/0);
8714}
8715
8716DEBUG_FUNCTION void
8717debug (class loop *ptr)
8718{
8719 if (ptr)
8720 debug (ref&: *ptr);
8721 else
8722 fprintf (stderr, format: "<nil>\n");
8723}
8724
8725/* Dump a loop verbosely. */
8726
8727DEBUG_FUNCTION void
8728debug_verbose (class loop &ref)
8729{
8730 print_loop (stderr, loop: &ref, indent: 0, /*verbosity*/3);
8731}
8732
8733DEBUG_FUNCTION void
8734debug_verbose (class loop *ptr)
8735{
8736 if (ptr)
8737 debug (ref&: *ptr);
8738 else
8739 fprintf (stderr, format: "<nil>\n");
8740}
8741
8742
8743/* Debugging loops structure at tree level, at some VERBOSITY level. */
8744
8745DEBUG_FUNCTION void
8746debug_loops (int verbosity)
8747{
8748 print_loops (stderr, verbosity);
8749}
8750
8751/* Print on stderr the code of LOOP, at some VERBOSITY level. */
8752
8753DEBUG_FUNCTION void
8754debug_loop (class loop *loop, int verbosity)
8755{
8756 print_loop (stderr, loop, indent: 0, verbosity);
8757}
8758
8759/* Print on stderr the code of loop number NUM, at some VERBOSITY
8760 level. */
8761
8762DEBUG_FUNCTION void
8763debug_loop_num (unsigned num, int verbosity)
8764{
8765 debug_loop (loop: get_loop (cfun, num), verbosity);
8766}
8767
8768/* Return true if BB ends with a call, possibly followed by some
8769 instructions that must stay with the call. Return false,
8770 otherwise. */
8771
8772static bool
8773gimple_block_ends_with_call_p (basic_block bb)
8774{
8775 gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb);
8776 return !gsi_end_p (i: gsi) && is_gimple_call (gs: gsi_stmt (i: gsi));
8777}
8778
8779
8780/* Return true if BB ends with a conditional branch. Return false,
8781 otherwise. */
8782
8783static bool
8784gimple_block_ends_with_condjump_p (const_basic_block bb)
8785{
8786 return safe_is_a <gcond *> (p: *gsi_last_bb (bb: const_cast <basic_block> (bb)));
8787}
8788
8789
8790/* Return true if statement T may terminate execution of BB in ways not
8791 explicitly represtented in the CFG. */
8792
8793bool
8794stmt_can_terminate_bb_p (gimple *t)
8795{
8796 tree fndecl = NULL_TREE;
8797 int call_flags = 0;
8798
8799 /* Eh exception not handled internally terminates execution of the whole
8800 function. */
8801 if (stmt_can_throw_external (cfun, t))
8802 return true;
8803
8804 /* NORETURN and LONGJMP calls already have an edge to exit.
8805 CONST and PURE calls do not need one.
8806 We don't currently check for CONST and PURE here, although
8807 it would be a good idea, because those attributes are
8808 figured out from the RTL in mark_constant_function, and
8809 the counter incrementation code from -fprofile-arcs
8810 leads to different results from -fbranch-probabilities. */
8811 if (is_gimple_call (gs: t))
8812 {
8813 fndecl = gimple_call_fndecl (gs: t);
8814 call_flags = gimple_call_flags (t);
8815 }
8816
8817 if (is_gimple_call (gs: t)
8818 && fndecl
8819 && fndecl_built_in_p (node: fndecl)
8820 && (call_flags & ECF_NOTHROW)
8821 && !(call_flags & ECF_RETURNS_TWICE)
8822 /* fork() doesn't really return twice, but the effect of
8823 wrapping it in __gcov_fork() which calls __gcov_dump() and
8824 __gcov_reset() and clears the counters before forking has the same
8825 effect as returning twice. Force a fake edge. */
8826 && !fndecl_built_in_p (node: fndecl, name1: BUILT_IN_FORK))
8827 return false;
8828
8829 if (is_gimple_call (gs: t))
8830 {
8831 edge_iterator ei;
8832 edge e;
8833 basic_block bb;
8834
8835 if (call_flags & (ECF_PURE | ECF_CONST)
8836 && !(call_flags & ECF_LOOPING_CONST_OR_PURE))
8837 return false;
8838
8839 /* Function call may do longjmp, terminate program or do other things.
8840 Special case noreturn that have non-abnormal edges out as in this case
8841 the fact is sufficiently represented by lack of edges out of T. */
8842 if (!(call_flags & ECF_NORETURN))
8843 return true;
8844
8845 bb = gimple_bb (g: t);
8846 FOR_EACH_EDGE (e, ei, bb->succs)
8847 if ((e->flags & EDGE_FAKE) == 0)
8848 return true;
8849 }
8850
8851 if (gasm *asm_stmt = dyn_cast <gasm *> (p: t))
8852 if (gimple_asm_volatile_p (asm_stmt) || gimple_asm_input_p (asm_stmt))
8853 return true;
8854
8855 return false;
8856}
8857
8858
8859/* Add fake edges to the function exit for any non constant and non
8860 noreturn calls (or noreturn calls with EH/abnormal edges),
8861 volatile inline assembly in the bitmap of blocks specified by BLOCKS
8862 or to the whole CFG if BLOCKS is zero. Return the number of blocks
8863 that were split.
8864
8865 The goal is to expose cases in which entering a basic block does
8866 not imply that all subsequent instructions must be executed. */
8867
8868static int
8869gimple_flow_call_edges_add (sbitmap blocks)
8870{
8871 int i;
8872 int blocks_split = 0;
8873 int last_bb = last_basic_block_for_fn (cfun);
8874 bool check_last_block = false;
8875
8876 if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
8877 return 0;
8878
8879 if (! blocks)
8880 check_last_block = true;
8881 else
8882 check_last_block = bitmap_bit_p (map: blocks,
8883 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb->index);
8884
8885 /* In the last basic block, before epilogue generation, there will be
8886 a fallthru edge to EXIT. Special care is required if the last insn
8887 of the last basic block is a call because make_edge folds duplicate
8888 edges, which would result in the fallthru edge also being marked
8889 fake, which would result in the fallthru edge being removed by
8890 remove_fake_edges, which would result in an invalid CFG.
8891
8892 Moreover, we can't elide the outgoing fake edge, since the block
8893 profiler needs to take this into account in order to solve the minimal
8894 spanning tree in the case that the call doesn't return.
8895
8896 Handle this by adding a dummy instruction in a new last basic block. */
8897 if (check_last_block)
8898 {
8899 basic_block bb = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
8900 gimple_stmt_iterator gsi = gsi_last_nondebug_bb (bb);
8901 gimple *t = NULL;
8902
8903 if (!gsi_end_p (i: gsi))
8904 t = gsi_stmt (i: gsi);
8905
8906 if (t && stmt_can_terminate_bb_p (t))
8907 {
8908 edge e;
8909
8910 e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
8911 if (e)
8912 {
8913 gsi_insert_on_edge (e, gimple_build_nop ());
8914 gsi_commit_edge_inserts ();
8915 }
8916 }
8917 }
8918
8919 /* Now add fake edges to the function exit for any non constant
8920 calls since there is no way that we can determine if they will
8921 return or not... */
8922 for (i = 0; i < last_bb; i++)
8923 {
8924 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
8925 gimple_stmt_iterator gsi;
8926 gimple *stmt, *last_stmt;
8927
8928 if (!bb)
8929 continue;
8930
8931 if (blocks && !bitmap_bit_p (map: blocks, bitno: i))
8932 continue;
8933
8934 gsi = gsi_last_nondebug_bb (bb);
8935 if (!gsi_end_p (i: gsi))
8936 {
8937 last_stmt = gsi_stmt (i: gsi);
8938 do
8939 {
8940 stmt = gsi_stmt (i: gsi);
8941 if (stmt_can_terminate_bb_p (t: stmt))
8942 {
8943 edge e;
8944
8945 /* The handling above of the final block before the
8946 epilogue should be enough to verify that there is
8947 no edge to the exit block in CFG already.
8948 Calling make_edge in such case would cause us to
8949 mark that edge as fake and remove it later. */
8950 if (flag_checking && stmt == last_stmt)
8951 {
8952 e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
8953 gcc_assert (e == NULL);
8954 }
8955
8956 /* Note that the following may create a new basic block
8957 and renumber the existing basic blocks. */
8958 if (stmt != last_stmt)
8959 {
8960 e = split_block (bb, stmt);
8961 if (e)
8962 blocks_split++;
8963 }
8964 e = make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
8965 e->probability = profile_probability::guessed_never ();
8966 }
8967 gsi_prev (i: &gsi);
8968 }
8969 while (!gsi_end_p (i: gsi));
8970 }
8971 }
8972
8973 if (blocks_split)
8974 checking_verify_flow_info ();
8975
8976 return blocks_split;
8977}
8978
8979/* Removes edge E and all the blocks dominated by it, and updates dominance
8980 information. The IL in E->src needs to be updated separately.
8981 If dominance info is not available, only the edge E is removed.*/
8982
8983void
8984remove_edge_and_dominated_blocks (edge e)
8985{
8986 vec<basic_block> bbs_to_fix_dom = vNULL;
8987 edge f;
8988 edge_iterator ei;
8989 bool none_removed = false;
8990 unsigned i;
8991 basic_block bb, dbb;
8992 bitmap_iterator bi;
8993
8994 /* If we are removing a path inside a non-root loop that may change
8995 loop ownership of blocks or remove loops. Mark loops for fixup. */
8996 if (current_loops
8997 && loop_outer (loop: e->src->loop_father) != NULL
8998 && e->src->loop_father == e->dest->loop_father)
8999 loops_state_set (flags: LOOPS_NEED_FIXUP);
9000
9001 if (!dom_info_available_p (CDI_DOMINATORS))
9002 {
9003 remove_edge (e);
9004 return;
9005 }
9006
9007 /* No updating is needed for edges to exit. */
9008 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
9009 {
9010 if (cfgcleanup_altered_bbs)
9011 bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index);
9012 remove_edge (e);
9013 return;
9014 }
9015
9016 /* First, we find the basic blocks to remove. If E->dest has a predecessor
9017 that is not dominated by E->dest, then this set is empty. Otherwise,
9018 all the basic blocks dominated by E->dest are removed.
9019
9020 Also, to DF_IDOM we store the immediate dominators of the blocks in
9021 the dominance frontier of E (i.e., of the successors of the
9022 removed blocks, if there are any, and of E->dest otherwise). */
9023 FOR_EACH_EDGE (f, ei, e->dest->preds)
9024 {
9025 if (f == e)
9026 continue;
9027
9028 if (!dominated_by_p (CDI_DOMINATORS, f->src, e->dest))
9029 {
9030 none_removed = true;
9031 break;
9032 }
9033 }
9034
9035 auto_bitmap df, df_idom;
9036 auto_vec<basic_block> bbs_to_remove;
9037 if (none_removed)
9038 bitmap_set_bit (df_idom,
9039 get_immediate_dominator (CDI_DOMINATORS, e->dest)->index);
9040 else
9041 {
9042 bbs_to_remove = get_all_dominated_blocks (CDI_DOMINATORS, e->dest);
9043 FOR_EACH_VEC_ELT (bbs_to_remove, i, bb)
9044 {
9045 FOR_EACH_EDGE (f, ei, bb->succs)
9046 {
9047 if (f->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
9048 bitmap_set_bit (df, f->dest->index);
9049 }
9050 }
9051 FOR_EACH_VEC_ELT (bbs_to_remove, i, bb)
9052 bitmap_clear_bit (df, bb->index);
9053
9054 EXECUTE_IF_SET_IN_BITMAP (df, 0, i, bi)
9055 {
9056 bb = BASIC_BLOCK_FOR_FN (cfun, i);
9057 bitmap_set_bit (df_idom,
9058 get_immediate_dominator (CDI_DOMINATORS, bb)->index);
9059 }
9060 }
9061
9062 if (cfgcleanup_altered_bbs)
9063 {
9064 /* Record the set of the altered basic blocks. */
9065 bitmap_set_bit (cfgcleanup_altered_bbs, e->src->index);
9066 bitmap_ior_into (cfgcleanup_altered_bbs, df);
9067 }
9068
9069 /* Remove E and the cancelled blocks. */
9070 if (none_removed)
9071 remove_edge (e);
9072 else
9073 {
9074 /* Walk backwards so as to get a chance to substitute all
9075 released DEFs into debug stmts. See
9076 eliminate_unnecessary_stmts() in tree-ssa-dce.cc for more
9077 details. */
9078 for (i = bbs_to_remove.length (); i-- > 0; )
9079 delete_basic_block (bbs_to_remove[i]);
9080 }
9081
9082 /* Update the dominance information. The immediate dominator may change only
9083 for blocks whose immediate dominator belongs to DF_IDOM:
9084
9085 Suppose that idom(X) = Y before removal of E and idom(X) != Y after the
9086 removal. Let Z the arbitrary block such that idom(Z) = Y and
9087 Z dominates X after the removal. Before removal, there exists a path P
9088 from Y to X that avoids Z. Let F be the last edge on P that is
9089 removed, and let W = F->dest. Before removal, idom(W) = Y (since Y
9090 dominates W, and because of P, Z does not dominate W), and W belongs to
9091 the dominance frontier of E. Therefore, Y belongs to DF_IDOM. */
9092 EXECUTE_IF_SET_IN_BITMAP (df_idom, 0, i, bi)
9093 {
9094 bb = BASIC_BLOCK_FOR_FN (cfun, i);
9095 for (dbb = first_dom_son (CDI_DOMINATORS, bb);
9096 dbb;
9097 dbb = next_dom_son (CDI_DOMINATORS, dbb))
9098 bbs_to_fix_dom.safe_push (obj: dbb);
9099 }
9100
9101 iterate_fix_dominators (CDI_DOMINATORS, bbs_to_fix_dom, true);
9102
9103 bbs_to_fix_dom.release ();
9104}
9105
9106/* Purge dead EH edges from basic block BB. */
9107
9108bool
9109gimple_purge_dead_eh_edges (basic_block bb)
9110{
9111 bool changed = false;
9112 edge e;
9113 edge_iterator ei;
9114 gimple *stmt = *gsi_last_bb (bb);
9115
9116 if (stmt && stmt_can_throw_internal (cfun, stmt))
9117 return false;
9118
9119 for (ei = ei_start (bb->succs); (e = ei_safe_edge (i: ei)); )
9120 {
9121 if (e->flags & EDGE_EH)
9122 {
9123 remove_edge_and_dominated_blocks (e);
9124 changed = true;
9125 }
9126 else
9127 ei_next (i: &ei);
9128 }
9129
9130 return changed;
9131}
9132
9133/* Purge dead EH edges from basic block listed in BLOCKS. */
9134
9135bool
9136gimple_purge_all_dead_eh_edges (const_bitmap blocks)
9137{
9138 bool changed = false;
9139 unsigned i;
9140 bitmap_iterator bi;
9141
9142 EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i, bi)
9143 {
9144 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
9145
9146 /* Earlier gimple_purge_dead_eh_edges could have removed
9147 this basic block already. */
9148 gcc_assert (bb || changed);
9149 if (bb != NULL)
9150 changed |= gimple_purge_dead_eh_edges (bb);
9151 }
9152
9153 return changed;
9154}
9155
9156/* Purge dead abnormal call edges from basic block BB. */
9157
9158bool
9159gimple_purge_dead_abnormal_call_edges (basic_block bb)
9160{
9161 bool changed = false;
9162 edge e;
9163 edge_iterator ei;
9164 gimple *stmt = *gsi_last_bb (bb);
9165
9166 if (stmt && stmt_can_make_abnormal_goto (t: stmt))
9167 return false;
9168
9169 for (ei = ei_start (bb->succs); (e = ei_safe_edge (i: ei)); )
9170 {
9171 if (e->flags & EDGE_ABNORMAL)
9172 {
9173 if (e->flags & EDGE_FALLTHRU)
9174 e->flags &= ~EDGE_ABNORMAL;
9175 else
9176 remove_edge_and_dominated_blocks (e);
9177 changed = true;
9178 }
9179 else
9180 ei_next (i: &ei);
9181 }
9182
9183 return changed;
9184}
9185
9186/* Purge dead abnormal call edges from basic block listed in BLOCKS. */
9187
9188bool
9189gimple_purge_all_dead_abnormal_call_edges (const_bitmap blocks)
9190{
9191 bool changed = false;
9192 unsigned i;
9193 bitmap_iterator bi;
9194
9195 EXECUTE_IF_SET_IN_BITMAP (blocks, 0, i, bi)
9196 {
9197 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
9198
9199 /* Earlier gimple_purge_dead_abnormal_call_edges could have removed
9200 this basic block already. */
9201 gcc_assert (bb || changed);
9202 if (bb != NULL)
9203 changed |= gimple_purge_dead_abnormal_call_edges (bb);
9204 }
9205
9206 return changed;
9207}
9208
9209/* This function is called whenever a new edge is created or
9210 redirected. */
9211
9212static void
9213gimple_execute_on_growing_pred (edge e)
9214{
9215 basic_block bb = e->dest;
9216
9217 if (!gimple_seq_empty_p (s: phi_nodes (bb)))
9218 reserve_phi_args_for_new_edge (bb);
9219}
9220
9221/* This function is called immediately before edge E is removed from
9222 the edge vector E->dest->preds. */
9223
9224static void
9225gimple_execute_on_shrinking_pred (edge e)
9226{
9227 if (!gimple_seq_empty_p (s: phi_nodes (bb: e->dest)))
9228 remove_phi_args (e);
9229}
9230
9231/*---------------------------------------------------------------------------
9232 Helper functions for Loop versioning
9233 ---------------------------------------------------------------------------*/
9234
9235/* Adjust phi nodes for 'first' basic block. 'second' basic block is a copy
9236 of 'first'. Both of them are dominated by 'new_head' basic block. When
9237 'new_head' was created by 'second's incoming edge it received phi arguments
9238 on the edge by split_edge(). Later, additional edge 'e' was created to
9239 connect 'new_head' and 'first'. Now this routine adds phi args on this
9240 additional edge 'e' that new_head to second edge received as part of edge
9241 splitting. */
9242
9243static void
9244gimple_lv_adjust_loop_header_phi (basic_block first, basic_block second,
9245 basic_block new_head, edge e)
9246{
9247 gphi *phi1, *phi2;
9248 gphi_iterator psi1, psi2;
9249 tree def;
9250 edge e2 = find_edge (new_head, second);
9251
9252 /* Because NEW_HEAD has been created by splitting SECOND's incoming
9253 edge, we should always have an edge from NEW_HEAD to SECOND. */
9254 gcc_assert (e2 != NULL);
9255
9256 /* Browse all 'second' basic block phi nodes and add phi args to
9257 edge 'e' for 'first' head. PHI args are always in correct order. */
9258
9259 for (psi2 = gsi_start_phis (second),
9260 psi1 = gsi_start_phis (first);
9261 !gsi_end_p (i: psi2) && !gsi_end_p (i: psi1);
9262 gsi_next (i: &psi2), gsi_next (i: &psi1))
9263 {
9264 phi1 = psi1.phi ();
9265 phi2 = psi2.phi ();
9266 def = PHI_ARG_DEF (phi2, e2->dest_idx);
9267 add_phi_arg (phi1, def, e, gimple_phi_arg_location_from_edge (phi: phi2, e: e2));
9268 }
9269}
9270
9271
9272/* Adds a if else statement to COND_BB with condition COND_EXPR.
9273 SECOND_HEAD is the destination of the THEN and FIRST_HEAD is
9274 the destination of the ELSE part. */
9275
9276static void
9277gimple_lv_add_condition_to_bb (basic_block first_head ATTRIBUTE_UNUSED,
9278 basic_block second_head ATTRIBUTE_UNUSED,
9279 basic_block cond_bb, void *cond_e)
9280{
9281 gimple_stmt_iterator gsi;
9282 gimple *new_cond_expr;
9283 tree cond_expr = (tree) cond_e;
9284 edge e0;
9285
9286 /* Build new conditional expr */
9287 gsi = gsi_last_bb (bb: cond_bb);
9288
9289 cond_expr = force_gimple_operand_gsi_1 (&gsi, cond_expr,
9290 is_gimple_condexpr_for_cond,
9291 NULL_TREE, false,
9292 GSI_CONTINUE_LINKING);
9293 new_cond_expr = gimple_build_cond_from_tree (cond_expr,
9294 NULL_TREE, NULL_TREE);
9295
9296 /* Add new cond in cond_bb. */
9297 gsi_insert_after (&gsi, new_cond_expr, GSI_NEW_STMT);
9298
9299 /* Adjust edges appropriately to connect new head with first head
9300 as well as second head. */
9301 e0 = single_succ_edge (bb: cond_bb);
9302 e0->flags &= ~EDGE_FALLTHRU;
9303 e0->flags |= EDGE_FALSE_VALUE;
9304}
9305
9306
9307/* Do book-keeping of basic block BB for the profile consistency checker.
9308 Store the counting in RECORD. */
9309static void
9310gimple_account_profile_record (basic_block bb,
9311 struct profile_record *record)
9312{
9313 gimple_stmt_iterator i;
9314 for (i = gsi_start_nondebug_after_labels_bb (bb); !gsi_end_p (i);
9315 gsi_next_nondebug (i: &i))
9316 {
9317 record->size
9318 += estimate_num_insns (gsi_stmt (i), &eni_size_weights);
9319 if (profile_info)
9320 {
9321 if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.ipa ().initialized_p ()
9322 && ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.ipa ().nonzero_p ()
9323 && bb->count.ipa ().initialized_p ())
9324 record->time
9325 += estimate_num_insns (gsi_stmt (i),
9326 &eni_time_weights)
9327 * bb->count.ipa ().to_gcov_type ();
9328 }
9329 else if (bb->count.initialized_p ()
9330 && ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.initialized_p ())
9331 record->time
9332 += estimate_num_insns
9333 (gsi_stmt (i),
9334 &eni_time_weights)
9335 * bb->count.to_sreal_scale
9336 (ENTRY_BLOCK_PTR_FOR_FN (cfun)->count).to_double ();
9337 else
9338 record->time
9339 += estimate_num_insns (gsi_stmt (i), &eni_time_weights);
9340 }
9341}
9342
9343struct cfg_hooks gimple_cfg_hooks = {
9344 .name: "gimple",
9345 .verify_flow_info: gimple_verify_flow_info,
9346 .dump_bb: gimple_dump_bb, /* dump_bb */
9347 .dump_bb_for_graph: gimple_dump_bb_for_graph, /* dump_bb_for_graph */
9348 .create_basic_block: create_bb, /* create_basic_block */
9349 .redirect_edge_and_branch: gimple_redirect_edge_and_branch, /* redirect_edge_and_branch */
9350 .redirect_edge_and_branch_force: gimple_redirect_edge_and_branch_force, /* redirect_edge_and_branch_force */
9351 .can_remove_branch_p: gimple_can_remove_branch_p, /* can_remove_branch_p */
9352 .delete_basic_block: remove_bb, /* delete_basic_block */
9353 .split_block: gimple_split_block, /* split_block */
9354 .move_block_after: gimple_move_block_after, /* move_block_after */
9355 .can_merge_blocks_p: gimple_can_merge_blocks_p, /* can_merge_blocks_p */
9356 .merge_blocks: gimple_merge_blocks, /* merge_blocks */
9357 .predict_edge: gimple_predict_edge, /* predict_edge */
9358 .predicted_by_p: gimple_predicted_by_p, /* predicted_by_p */
9359 .can_duplicate_block_p: gimple_can_duplicate_bb_p, /* can_duplicate_block_p */
9360 .duplicate_block: gimple_duplicate_bb, /* duplicate_block */
9361 .split_edge: gimple_split_edge, /* split_edge */
9362 .make_forwarder_block: gimple_make_forwarder_block, /* make_forward_block */
9363 NULL, /* tidy_fallthru_edge */
9364 NULL, /* force_nonfallthru */
9365 .block_ends_with_call_p: gimple_block_ends_with_call_p,/* block_ends_with_call_p */
9366 .block_ends_with_condjump_p: gimple_block_ends_with_condjump_p, /* block_ends_with_condjump_p */
9367 .flow_call_edges_add: gimple_flow_call_edges_add, /* flow_call_edges_add */
9368 .execute_on_growing_pred: gimple_execute_on_growing_pred, /* execute_on_growing_pred */
9369 .execute_on_shrinking_pred: gimple_execute_on_shrinking_pred, /* execute_on_shrinking_pred */
9370 .cfg_hook_duplicate_loop_body_to_header_edge: gimple_duplicate_loop_body_to_header_edge, /* duplicate loop for trees */
9371 .lv_add_condition_to_bb: gimple_lv_add_condition_to_bb, /* lv_add_condition_to_bb */
9372 .lv_adjust_loop_header_phi: gimple_lv_adjust_loop_header_phi, /* lv_adjust_loop_header_phi*/
9373 .extract_cond_bb_edges: extract_true_false_edges_from_block, /* extract_cond_bb_edges */
9374 .flush_pending_stmts: flush_pending_stmts, /* flush_pending_stmts */
9375 .empty_block_p: gimple_empty_block_p, /* block_empty_p */
9376 .split_block_before_cond_jump: gimple_split_block_before_cond_jump, /* split_block_before_cond_jump */
9377 .account_profile_record: gimple_account_profile_record,
9378};
9379
9380
9381/* Split all critical edges. Split some extra (not necessarily critical) edges
9382 if FOR_EDGE_INSERTION_P is true. */
9383
9384unsigned int
9385split_critical_edges (bool for_edge_insertion_p /* = false */)
9386{
9387 basic_block bb;
9388 edge e;
9389 edge_iterator ei;
9390
9391 /* split_edge can redirect edges out of SWITCH_EXPRs, which can get
9392 expensive. So we want to enable recording of edge to CASE_LABEL_EXPR
9393 mappings around the calls to split_edge. */
9394 start_recording_case_labels ();
9395 FOR_ALL_BB_FN (bb, cfun)
9396 {
9397 FOR_EACH_EDGE (e, ei, bb->succs)
9398 {
9399 if (EDGE_CRITICAL_P (e) && !(e->flags & EDGE_ABNORMAL))
9400 split_edge (e);
9401 /* PRE inserts statements to edges and expects that
9402 since split_critical_edges was done beforehand, committing edge
9403 insertions will not split more edges. In addition to critical
9404 edges we must split edges that have multiple successors and
9405 end by control flow statements, such as RESX.
9406 Go ahead and split them too. This matches the logic in
9407 gimple_find_edge_insert_loc. */
9408 else if (for_edge_insertion_p
9409 && (!single_pred_p (bb: e->dest)
9410 || !gimple_seq_empty_p (s: phi_nodes (bb: e->dest))
9411 || e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
9412 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
9413 && !(e->flags & EDGE_ABNORMAL))
9414 {
9415 gimple_stmt_iterator gsi;
9416
9417 gsi = gsi_last_bb (bb: e->src);
9418 if (!gsi_end_p (i: gsi)
9419 && stmt_ends_bb_p (t: gsi_stmt (i: gsi))
9420 && (gimple_code (g: gsi_stmt (i: gsi)) != GIMPLE_RETURN
9421 && !gimple_call_builtin_p (gsi_stmt (i: gsi),
9422 BUILT_IN_RETURN)))
9423 split_edge (e);
9424 }
9425 }
9426 }
9427 end_recording_case_labels ();
9428 return 0;
9429}
9430
9431namespace {
9432
9433const pass_data pass_data_split_crit_edges =
9434{
9435 .type: GIMPLE_PASS, /* type */
9436 .name: "crited", /* name */
9437 .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */
9438 .tv_id: TV_TREE_SPLIT_EDGES, /* tv_id */
9439 PROP_cfg, /* properties_required */
9440 PROP_no_crit_edges, /* properties_provided */
9441 .properties_destroyed: 0, /* properties_destroyed */
9442 .todo_flags_start: 0, /* todo_flags_start */
9443 .todo_flags_finish: 0, /* todo_flags_finish */
9444};
9445
9446class pass_split_crit_edges : public gimple_opt_pass
9447{
9448public:
9449 pass_split_crit_edges (gcc::context *ctxt)
9450 : gimple_opt_pass (pass_data_split_crit_edges, ctxt)
9451 {}
9452
9453 /* opt_pass methods: */
9454 unsigned int execute (function *) final override
9455 {
9456 return split_critical_edges ();
9457 }
9458
9459 opt_pass * clone () final override
9460 {
9461 return new pass_split_crit_edges (m_ctxt);
9462 }
9463}; // class pass_split_crit_edges
9464
9465} // anon namespace
9466
9467gimple_opt_pass *
9468make_pass_split_crit_edges (gcc::context *ctxt)
9469{
9470 return new pass_split_crit_edges (ctxt);
9471}
9472
9473
9474/* Insert COND expression which is GIMPLE_COND after STMT
9475 in basic block BB with appropriate basic block split
9476 and creation of a new conditionally executed basic block.
9477 Update profile so the new bb is visited with probability PROB.
9478 Return created basic block. */
9479basic_block
9480insert_cond_bb (basic_block bb, gimple *stmt, gimple *cond,
9481 profile_probability prob)
9482{
9483 edge fall = split_block (bb, stmt);
9484 gimple_stmt_iterator iter = gsi_last_bb (bb);
9485 basic_block new_bb;
9486
9487 /* Insert cond statement. */
9488 gcc_assert (gimple_code (cond) == GIMPLE_COND);
9489 if (gsi_end_p (i: iter))
9490 gsi_insert_before (&iter, cond, GSI_CONTINUE_LINKING);
9491 else
9492 gsi_insert_after (&iter, cond, GSI_CONTINUE_LINKING);
9493
9494 /* Create conditionally executed block. */
9495 new_bb = create_empty_bb (bb);
9496 edge e = make_edge (bb, new_bb, EDGE_TRUE_VALUE);
9497 e->probability = prob;
9498 new_bb->count = e->count ();
9499 make_single_succ_edge (new_bb, fall->dest, EDGE_FALLTHRU);
9500
9501 /* Fix edge for split bb. */
9502 fall->flags = EDGE_FALSE_VALUE;
9503 fall->probability -= e->probability;
9504
9505 /* Update dominance info. */
9506 if (dom_info_available_p (CDI_DOMINATORS))
9507 {
9508 set_immediate_dominator (CDI_DOMINATORS, new_bb, bb);
9509 set_immediate_dominator (CDI_DOMINATORS, fall->dest, bb);
9510 }
9511
9512 /* Update loop info. */
9513 if (current_loops)
9514 add_bb_to_loop (new_bb, bb->loop_father);
9515
9516 return new_bb;
9517}
9518
9519
9520
9521/* Given a basic block B which ends with a conditional and has
9522 precisely two successors, determine which of the edges is taken if
9523 the conditional is true and which is taken if the conditional is
9524 false. Set TRUE_EDGE and FALSE_EDGE appropriately. */
9525
9526void
9527extract_true_false_edges_from_block (basic_block b,
9528 edge *true_edge,
9529 edge *false_edge)
9530{
9531 edge e = EDGE_SUCC (b, 0);
9532
9533 if (e->flags & EDGE_TRUE_VALUE)
9534 {
9535 *true_edge = e;
9536 *false_edge = EDGE_SUCC (b, 1);
9537 }
9538 else
9539 {
9540 *false_edge = e;
9541 *true_edge = EDGE_SUCC (b, 1);
9542 }
9543}
9544
9545
9546/* From a controlling predicate in the immediate dominator DOM of
9547 PHIBLOCK determine the edges into PHIBLOCK that are chosen if the
9548 predicate evaluates to true and false and store them to
9549 *TRUE_CONTROLLED_EDGE and *FALSE_CONTROLLED_EDGE if
9550 they are non-NULL. Returns true if the edges can be determined,
9551 else return false. */
9552
9553bool
9554extract_true_false_controlled_edges (basic_block dom, basic_block phiblock,
9555 edge *true_controlled_edge,
9556 edge *false_controlled_edge)
9557{
9558 basic_block bb = phiblock;
9559 edge true_edge, false_edge, tem;
9560 edge e0 = NULL, e1 = NULL;
9561
9562 /* We have to verify that one edge into the PHI node is dominated
9563 by the true edge of the predicate block and the other edge
9564 dominated by the false edge. This ensures that the PHI argument
9565 we are going to take is completely determined by the path we
9566 take from the predicate block.
9567 We can only use BB dominance checks below if the destination of
9568 the true/false edges are dominated by their edge, thus only
9569 have a single predecessor. */
9570 extract_true_false_edges_from_block (b: dom, true_edge: &true_edge, false_edge: &false_edge);
9571 tem = EDGE_PRED (bb, 0);
9572 if (tem == true_edge
9573 || (single_pred_p (bb: true_edge->dest)
9574 && (tem->src == true_edge->dest
9575 || dominated_by_p (CDI_DOMINATORS,
9576 tem->src, true_edge->dest))))
9577 e0 = tem;
9578 else if (tem == false_edge
9579 || (single_pred_p (bb: false_edge->dest)
9580 && (tem->src == false_edge->dest
9581 || dominated_by_p (CDI_DOMINATORS,
9582 tem->src, false_edge->dest))))
9583 e1 = tem;
9584 else
9585 return false;
9586 tem = EDGE_PRED (bb, 1);
9587 if (tem == true_edge
9588 || (single_pred_p (bb: true_edge->dest)
9589 && (tem->src == true_edge->dest
9590 || dominated_by_p (CDI_DOMINATORS,
9591 tem->src, true_edge->dest))))
9592 e0 = tem;
9593 else if (tem == false_edge
9594 || (single_pred_p (bb: false_edge->dest)
9595 && (tem->src == false_edge->dest
9596 || dominated_by_p (CDI_DOMINATORS,
9597 tem->src, false_edge->dest))))
9598 e1 = tem;
9599 else
9600 return false;
9601 if (!e0 || !e1)
9602 return false;
9603
9604 if (true_controlled_edge)
9605 *true_controlled_edge = e0;
9606 if (false_controlled_edge)
9607 *false_controlled_edge = e1;
9608
9609 return true;
9610}
9611
9612/* Generate a range test LHS CODE RHS that determines whether INDEX is in the
9613 range [low, high]. Place associated stmts before *GSI. */
9614
9615void
9616generate_range_test (basic_block bb, tree index, tree low, tree high,
9617 tree *lhs, tree *rhs)
9618{
9619 tree type = TREE_TYPE (index);
9620 tree utype = range_check_type (type);
9621
9622 low = fold_convert (utype, low);
9623 high = fold_convert (utype, high);
9624
9625 gimple_seq seq = NULL;
9626 index = gimple_convert (seq: &seq, type: utype, op: index);
9627 *lhs = gimple_build (seq: &seq, code: MINUS_EXPR, type: utype, ops: index, ops: low);
9628 *rhs = const_binop (MINUS_EXPR, utype, high, low);
9629
9630 gimple_stmt_iterator gsi = gsi_last_bb (bb);
9631 gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT);
9632}
9633
9634/* Return the basic block that belongs to label numbered INDEX
9635 of a switch statement. */
9636
9637basic_block
9638gimple_switch_label_bb (function *ifun, gswitch *gs, unsigned index)
9639{
9640 return label_to_block (ifun, CASE_LABEL (gimple_switch_label (gs, index)));
9641}
9642
9643/* Return the default basic block of a switch statement. */
9644
9645basic_block
9646gimple_switch_default_bb (function *ifun, gswitch *gs)
9647{
9648 return gimple_switch_label_bb (ifun, gs, index: 0);
9649}
9650
9651/* Return the edge that belongs to label numbered INDEX
9652 of a switch statement. */
9653
9654edge
9655gimple_switch_edge (function *ifun, gswitch *gs, unsigned index)
9656{
9657 return find_edge (gimple_bb (g: gs), gimple_switch_label_bb (ifun, gs, index));
9658}
9659
9660/* Return the default edge of a switch statement. */
9661
9662edge
9663gimple_switch_default_edge (function *ifun, gswitch *gs)
9664{
9665 return gimple_switch_edge (ifun, gs, index: 0);
9666}
9667
9668/* Return true if the only executable statement in BB is a GIMPLE_COND. */
9669
9670bool
9671cond_only_block_p (basic_block bb)
9672{
9673 /* BB must have no executable statements. */
9674 gimple_stmt_iterator gsi = gsi_after_labels (bb);
9675 if (phi_nodes (bb))
9676 return false;
9677 while (!gsi_end_p (i: gsi))
9678 {
9679 gimple *stmt = gsi_stmt (i: gsi);
9680 if (is_gimple_debug (gs: stmt))
9681 ;
9682 else if (gimple_code (g: stmt) == GIMPLE_NOP
9683 || gimple_code (g: stmt) == GIMPLE_PREDICT
9684 || gimple_code (g: stmt) == GIMPLE_COND)
9685 ;
9686 else
9687 return false;
9688 gsi_next (i: &gsi);
9689 }
9690 return true;
9691}
9692
9693
9694/* Emit return warnings. */
9695
9696namespace {
9697
9698const pass_data pass_data_warn_function_return =
9699{
9700 .type: GIMPLE_PASS, /* type */
9701 .name: "*warn_function_return", /* name */
9702 .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */
9703 .tv_id: TV_NONE, /* tv_id */
9704 PROP_cfg, /* properties_required */
9705 .properties_provided: 0, /* properties_provided */
9706 .properties_destroyed: 0, /* properties_destroyed */
9707 .todo_flags_start: 0, /* todo_flags_start */
9708 .todo_flags_finish: 0, /* todo_flags_finish */
9709};
9710
9711class pass_warn_function_return : public gimple_opt_pass
9712{
9713public:
9714 pass_warn_function_return (gcc::context *ctxt)
9715 : gimple_opt_pass (pass_data_warn_function_return, ctxt)
9716 {}
9717
9718 /* opt_pass methods: */
9719 unsigned int execute (function *) final override;
9720
9721}; // class pass_warn_function_return
9722
9723unsigned int
9724pass_warn_function_return::execute (function *fun)
9725{
9726 location_t location;
9727 gimple *last;
9728 edge e;
9729 edge_iterator ei;
9730
9731 if (!targetm.warn_func_return (fun->decl))
9732 return 0;
9733
9734 /* If we have a path to EXIT, then we do return. */
9735 if (TREE_THIS_VOLATILE (fun->decl)
9736 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (fun)->preds) > 0)
9737 {
9738 location = UNKNOWN_LOCATION;
9739 for (ei = ei_start (EXIT_BLOCK_PTR_FOR_FN (fun)->preds);
9740 (e = ei_safe_edge (i: ei)); )
9741 {
9742 last = *gsi_last_bb (bb: e->src);
9743 if ((gimple_code (g: last) == GIMPLE_RETURN
9744 || gimple_call_builtin_p (last, BUILT_IN_RETURN))
9745 && location == UNKNOWN_LOCATION
9746 && ((location = LOCATION_LOCUS (gimple_location (last)))
9747 != UNKNOWN_LOCATION)
9748 && !optimize)
9749 break;
9750 /* When optimizing, replace return stmts in noreturn functions
9751 with __builtin_unreachable () call. */
9752 if (optimize && gimple_code (g: last) == GIMPLE_RETURN)
9753 {
9754 location_t loc = gimple_location (g: last);
9755 gimple *new_stmt = gimple_build_builtin_unreachable (loc);
9756 gimple_stmt_iterator gsi = gsi_for_stmt (last);
9757 gsi_replace (&gsi, new_stmt, true);
9758 remove_edge (e);
9759 }
9760 else
9761 ei_next (i: &ei);
9762 }
9763 if (location == UNKNOWN_LOCATION)
9764 location = cfun->function_end_locus;
9765 warning_at (location, 0, "%<noreturn%> function does return");
9766 }
9767
9768 /* If we see "return;" in some basic block, then we do reach the end
9769 without returning a value. */
9770 else if (warn_return_type > 0
9771 && !warning_suppressed_p (fun->decl, OPT_Wreturn_type)
9772 && !VOID_TYPE_P (TREE_TYPE (TREE_TYPE (fun->decl))))
9773 {
9774 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (fun)->preds)
9775 {
9776 greturn *return_stmt = dyn_cast <greturn *> (p: *gsi_last_bb (bb: e->src));
9777 if (return_stmt
9778 && gimple_return_retval (gs: return_stmt) == NULL
9779 && !warning_suppressed_p (return_stmt, OPT_Wreturn_type))
9780 {
9781 location = gimple_location (g: return_stmt);
9782 if (LOCATION_LOCUS (location) == UNKNOWN_LOCATION)
9783 location = fun->function_end_locus;
9784 if (warning_at (location, OPT_Wreturn_type,
9785 "control reaches end of non-void function"))
9786 suppress_warning (fun->decl, OPT_Wreturn_type);
9787 break;
9788 }
9789 }
9790 /* The C++ FE turns fallthrough from the end of non-void function
9791 into __builtin_unreachable () call with BUILTINS_LOCATION.
9792 Recognize those as well as calls from ubsan_instrument_return. */
9793 basic_block bb;
9794 if (!warning_suppressed_p (fun->decl, OPT_Wreturn_type))
9795 FOR_EACH_BB_FN (bb, fun)
9796 if (EDGE_COUNT (bb->succs) == 0)
9797 {
9798 gimple *last = *gsi_last_bb (bb);
9799 const enum built_in_function ubsan_missing_ret
9800 = BUILT_IN_UBSAN_HANDLE_MISSING_RETURN;
9801 if (last
9802 && ((LOCATION_LOCUS (gimple_location (last))
9803 == BUILTINS_LOCATION
9804 && (gimple_call_builtin_p (last, BUILT_IN_UNREACHABLE)
9805 || gimple_call_builtin_p (last,
9806 BUILT_IN_UNREACHABLE_TRAP)
9807 || gimple_call_builtin_p (last, BUILT_IN_TRAP)))
9808 || gimple_call_builtin_p (last, ubsan_missing_ret)))
9809 {
9810 gimple_stmt_iterator gsi = gsi_for_stmt (last);
9811 gsi_prev_nondebug (i: &gsi);
9812 gimple *prev = gsi_stmt (i: gsi);
9813 if (prev == NULL)
9814 location = UNKNOWN_LOCATION;
9815 else
9816 location = gimple_location (g: prev);
9817 if (LOCATION_LOCUS (location) == UNKNOWN_LOCATION)
9818 location = fun->function_end_locus;
9819 if (warning_at (location, OPT_Wreturn_type,
9820 "control reaches end of non-void function"))
9821 suppress_warning (fun->decl, OPT_Wreturn_type);
9822 break;
9823 }
9824 }
9825 }
9826 return 0;
9827}
9828
9829} // anon namespace
9830
9831gimple_opt_pass *
9832make_pass_warn_function_return (gcc::context *ctxt)
9833{
9834 return new pass_warn_function_return (ctxt);
9835}
9836
9837/* Walk a gimplified function and warn for functions whose return value is
9838 ignored and attribute((warn_unused_result)) is set. This is done before
9839 inlining, so we don't have to worry about that. */
9840
9841static void
9842do_warn_unused_result (gimple_seq seq)
9843{
9844 tree fdecl, ftype;
9845 gimple_stmt_iterator i;
9846
9847 for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (i: &i))
9848 {
9849 gimple *g = gsi_stmt (i);
9850
9851 switch (gimple_code (g))
9852 {
9853 case GIMPLE_BIND:
9854 do_warn_unused_result (seq: gimple_bind_body (gs: as_a <gbind *>(p: g)));
9855 break;
9856 case GIMPLE_TRY:
9857 do_warn_unused_result (seq: gimple_try_eval (gs: g));
9858 do_warn_unused_result (seq: gimple_try_cleanup (gs: g));
9859 break;
9860 case GIMPLE_CATCH:
9861 do_warn_unused_result (seq: gimple_catch_handler (
9862 catch_stmt: as_a <gcatch *> (p: g)));
9863 break;
9864 case GIMPLE_EH_FILTER:
9865 do_warn_unused_result (seq: gimple_eh_filter_failure (gs: g));
9866 break;
9867
9868 case GIMPLE_CALL:
9869 if (gimple_call_lhs (gs: g))
9870 break;
9871 if (gimple_call_internal_p (gs: g))
9872 break;
9873
9874 /* This is a naked call, as opposed to a GIMPLE_CALL with an
9875 LHS. All calls whose value is ignored should be
9876 represented like this. Look for the attribute. */
9877 fdecl = gimple_call_fndecl (gs: g);
9878 ftype = gimple_call_fntype (gs: g);
9879
9880 if (lookup_attribute (attr_name: "warn_unused_result", TYPE_ATTRIBUTES (ftype)))
9881 {
9882 location_t loc = gimple_location (g);
9883
9884 if (fdecl)
9885 warning_at (loc, OPT_Wunused_result,
9886 "ignoring return value of %qD "
9887 "declared with attribute %<warn_unused_result%>",
9888 fdecl);
9889 else
9890 warning_at (loc, OPT_Wunused_result,
9891 "ignoring return value of function "
9892 "declared with attribute %<warn_unused_result%>");
9893 }
9894 break;
9895
9896 default:
9897 /* Not a container, not a call, or a call whose value is used. */
9898 break;
9899 }
9900 }
9901}
9902
9903namespace {
9904
9905const pass_data pass_data_warn_unused_result =
9906{
9907 .type: GIMPLE_PASS, /* type */
9908 .name: "*warn_unused_result", /* name */
9909 .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */
9910 .tv_id: TV_NONE, /* tv_id */
9911 PROP_gimple_any, /* properties_required */
9912 .properties_provided: 0, /* properties_provided */
9913 .properties_destroyed: 0, /* properties_destroyed */
9914 .todo_flags_start: 0, /* todo_flags_start */
9915 .todo_flags_finish: 0, /* todo_flags_finish */
9916};
9917
9918class pass_warn_unused_result : public gimple_opt_pass
9919{
9920public:
9921 pass_warn_unused_result (gcc::context *ctxt)
9922 : gimple_opt_pass (pass_data_warn_unused_result, ctxt)
9923 {}
9924
9925 /* opt_pass methods: */
9926 bool gate (function *) final override { return flag_warn_unused_result; }
9927 unsigned int execute (function *) final override
9928 {
9929 do_warn_unused_result (seq: gimple_body (current_function_decl));
9930 return 0;
9931 }
9932
9933}; // class pass_warn_unused_result
9934
9935} // anon namespace
9936
9937gimple_opt_pass *
9938make_pass_warn_unused_result (gcc::context *ctxt)
9939{
9940 return new pass_warn_unused_result (ctxt);
9941}
9942
9943/* Maybe Remove stores to variables we marked write-only.
9944 Return true if a store was removed. */
9945static bool
9946maybe_remove_writeonly_store (gimple_stmt_iterator &gsi, gimple *stmt,
9947 bitmap dce_ssa_names)
9948{
9949 /* Keep access when store has side effect, i.e. in case when source
9950 is volatile. */
9951 if (!gimple_store_p (gs: stmt)
9952 || gimple_has_side_effects (stmt)
9953 || optimize_debug)
9954 return false;
9955
9956 tree lhs = get_base_address (t: gimple_get_lhs (stmt));
9957
9958 if (!VAR_P (lhs)
9959 || (!TREE_STATIC (lhs) && !DECL_EXTERNAL (lhs))
9960 || !varpool_node::get (decl: lhs)->writeonly)
9961 return false;
9962
9963 if (dump_file && (dump_flags & TDF_DETAILS))
9964 {
9965 fprintf (stream: dump_file, format: "Removing statement, writes"
9966 " to write only var:\n");
9967 print_gimple_stmt (dump_file, stmt, 0,
9968 TDF_VOPS|TDF_MEMSYMS);
9969 }
9970
9971 /* Mark ssa name defining to be checked for simple dce. */
9972 if (gimple_assign_single_p (gs: stmt))
9973 {
9974 tree rhs = gimple_assign_rhs1 (gs: stmt);
9975 if (TREE_CODE (rhs) == SSA_NAME
9976 && !SSA_NAME_IS_DEFAULT_DEF (rhs))
9977 bitmap_set_bit (dce_ssa_names, SSA_NAME_VERSION (rhs));
9978 }
9979 unlink_stmt_vdef (stmt);
9980 gsi_remove (&gsi, true);
9981 release_defs (stmt);
9982 return true;
9983}
9984
9985/* IPA passes, compilation of earlier functions or inlining
9986 might have changed some properties, such as marked functions nothrow,
9987 pure, const or noreturn.
9988 Remove redundant edges and basic blocks, and create new ones if necessary. */
9989
9990unsigned int
9991execute_fixup_cfg (void)
9992{
9993 basic_block bb;
9994 gimple_stmt_iterator gsi;
9995 int todo = 0;
9996 cgraph_node *node = cgraph_node::get (decl: current_function_decl);
9997 /* Same scaling is also done by ipa_merge_profiles. */
9998 profile_count num = node->count;
9999 profile_count den = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
10000 bool scale = num.initialized_p () && !(num == den);
10001 auto_bitmap dce_ssa_names;
10002
10003 if (scale)
10004 {
10005 profile_count::adjust_for_ipa_scaling (num: &num, den: &den);
10006 ENTRY_BLOCK_PTR_FOR_FN (cfun)->count = node->count;
10007 EXIT_BLOCK_PTR_FOR_FN (cfun)->count
10008 = EXIT_BLOCK_PTR_FOR_FN (cfun)->count.apply_scale (num, den);
10009 }
10010
10011 FOR_EACH_BB_FN (bb, cfun)
10012 {
10013 if (scale)
10014 bb->count = bb->count.apply_scale (num, den);
10015 for (gsi = gsi_start_bb (bb); !gsi_end_p (i: gsi);)
10016 {
10017 gimple *stmt = gsi_stmt (i: gsi);
10018 tree decl = is_gimple_call (gs: stmt)
10019 ? gimple_call_fndecl (gs: stmt)
10020 : NULL;
10021 if (decl)
10022 {
10023 int flags = gimple_call_flags (stmt);
10024 if (flags & (ECF_CONST | ECF_PURE | ECF_LOOPING_CONST_OR_PURE))
10025 {
10026 if (gimple_in_ssa_p (cfun))
10027 {
10028 todo |= TODO_update_ssa | TODO_cleanup_cfg;
10029 update_stmt (s: stmt);
10030 }
10031 }
10032 if (flags & ECF_NORETURN
10033 && fixup_noreturn_call (stmt))
10034 todo |= TODO_cleanup_cfg;
10035 }
10036
10037 /* Remove stores to variables we marked write-only. */
10038 if (maybe_remove_writeonly_store (gsi, stmt, dce_ssa_names))
10039 {
10040 todo |= TODO_update_ssa | TODO_cleanup_cfg;
10041 continue;
10042 }
10043
10044 /* For calls we can simply remove LHS when it is known
10045 to be write-only. */
10046 if (is_gimple_call (gs: stmt)
10047 && gimple_get_lhs (stmt))
10048 {
10049 tree lhs = get_base_address (t: gimple_get_lhs (stmt));
10050
10051 if (VAR_P (lhs)
10052 && (TREE_STATIC (lhs) || DECL_EXTERNAL (lhs))
10053 && varpool_node::get (decl: lhs)->writeonly)
10054 {
10055 gimple_call_set_lhs (gs: stmt, NULL);
10056 update_stmt (s: stmt);
10057 todo |= TODO_update_ssa | TODO_cleanup_cfg;
10058 }
10059 }
10060
10061 gsi_next (i: &gsi);
10062 }
10063 if (gimple *last = *gsi_last_bb (bb))
10064 {
10065 if (maybe_clean_eh_stmt (last)
10066 && gimple_purge_dead_eh_edges (bb))
10067 todo |= TODO_cleanup_cfg;
10068 if (gimple_purge_dead_abnormal_call_edges (bb))
10069 todo |= TODO_cleanup_cfg;
10070 }
10071
10072 /* If we have a basic block with no successors that does not
10073 end with a control statement or a noreturn call end it with
10074 a call to __builtin_unreachable. This situation can occur
10075 when inlining a noreturn call that does in fact return. */
10076 if (EDGE_COUNT (bb->succs) == 0)
10077 {
10078 gimple *stmt = last_nondebug_stmt (bb);
10079 if (!stmt
10080 || (!is_ctrl_stmt (t: stmt)
10081 && (!is_gimple_call (gs: stmt)
10082 || !gimple_call_noreturn_p (s: stmt))))
10083 {
10084 if (stmt && is_gimple_call (gs: stmt))
10085 gimple_call_set_ctrl_altering (s: stmt, ctrl_altering_p: false);
10086 stmt = gimple_build_builtin_unreachable (UNKNOWN_LOCATION);
10087 gimple_stmt_iterator gsi = gsi_last_bb (bb);
10088 gsi_insert_after (&gsi, stmt, GSI_NEW_STMT);
10089 if (!cfun->after_inlining)
10090 if (tree fndecl = gimple_call_fndecl (gs: stmt))
10091 {
10092 gcall *call_stmt = dyn_cast <gcall *> (p: stmt);
10093 node->create_edge (callee: cgraph_node::get_create (fndecl),
10094 call_stmt, count: bb->count);
10095 }
10096 }
10097 }
10098 }
10099 if (scale)
10100 {
10101 update_max_bb_count ();
10102 compute_function_frequency ();
10103 }
10104
10105 if (current_loops
10106 && (todo & TODO_cleanup_cfg))
10107 loops_state_set (flags: LOOPS_NEED_FIXUP);
10108
10109 simple_dce_from_worklist (dce_ssa_names);
10110
10111 return todo;
10112}
10113
10114namespace {
10115
10116const pass_data pass_data_fixup_cfg =
10117{
10118 .type: GIMPLE_PASS, /* type */
10119 .name: "fixup_cfg", /* name */
10120 .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */
10121 .tv_id: TV_NONE, /* tv_id */
10122 PROP_cfg, /* properties_required */
10123 .properties_provided: 0, /* properties_provided */
10124 .properties_destroyed: 0, /* properties_destroyed */
10125 .todo_flags_start: 0, /* todo_flags_start */
10126 .todo_flags_finish: 0, /* todo_flags_finish */
10127};
10128
10129class pass_fixup_cfg : public gimple_opt_pass
10130{
10131public:
10132 pass_fixup_cfg (gcc::context *ctxt)
10133 : gimple_opt_pass (pass_data_fixup_cfg, ctxt)
10134 {}
10135
10136 /* opt_pass methods: */
10137 opt_pass * clone () final override { return new pass_fixup_cfg (m_ctxt); }
10138 unsigned int execute (function *) final override
10139 {
10140 return execute_fixup_cfg ();
10141 }
10142
10143}; // class pass_fixup_cfg
10144
10145} // anon namespace
10146
10147gimple_opt_pass *
10148make_pass_fixup_cfg (gcc::context *ctxt)
10149{
10150 return new pass_fixup_cfg (ctxt);
10151}
10152
10153/* Garbage collection support for edge_def. */
10154
10155extern void gt_ggc_mx (tree&);
10156extern void gt_ggc_mx (gimple *&);
10157extern void gt_ggc_mx (rtx&);
10158extern void gt_ggc_mx (basic_block&);
10159
10160static void
10161gt_ggc_mx (rtx_insn *& x)
10162{
10163 if (x)
10164 gt_ggc_mx_rtx_def ((void *) x);
10165}
10166
10167void
10168gt_ggc_mx (edge_def *e)
10169{
10170 tree block = LOCATION_BLOCK (e->goto_locus);
10171 gt_ggc_mx (e->src);
10172 gt_ggc_mx (e->dest);
10173 if (current_ir_type () == IR_GIMPLE)
10174 gt_ggc_mx (e->insns.g);
10175 else
10176 gt_ggc_mx (x&: e->insns.r);
10177 gt_ggc_mx (block);
10178}
10179
10180/* PCH support for edge_def. */
10181
10182extern void gt_pch_nx (tree&);
10183extern void gt_pch_nx (gimple *&);
10184extern void gt_pch_nx (rtx&);
10185extern void gt_pch_nx (basic_block&);
10186
10187static void
10188gt_pch_nx (rtx_insn *& x)
10189{
10190 if (x)
10191 gt_pch_nx_rtx_def ((void *) x);
10192}
10193
10194void
10195gt_pch_nx (edge_def *e)
10196{
10197 tree block = LOCATION_BLOCK (e->goto_locus);
10198 gt_pch_nx (e->src);
10199 gt_pch_nx (e->dest);
10200 if (current_ir_type () == IR_GIMPLE)
10201 gt_pch_nx (e->insns.g);
10202 else
10203 gt_pch_nx (x&: e->insns.r);
10204 gt_pch_nx (block);
10205}
10206
10207void
10208gt_pch_nx (edge_def *e, gt_pointer_operator op, void *cookie)
10209{
10210 tree block = LOCATION_BLOCK (e->goto_locus);
10211 op (&(e->src), NULL, cookie);
10212 op (&(e->dest), NULL, cookie);
10213 if (current_ir_type () == IR_GIMPLE)
10214 op (&(e->insns.g), NULL, cookie);
10215 else
10216 op (&(e->insns.r), NULL, cookie);
10217 op (&(block), &(block), cookie);
10218}
10219
10220#if CHECKING_P
10221
10222namespace selftest {
10223
10224/* Helper function for CFG selftests: create a dummy function decl
10225 and push it as cfun. */
10226
10227static tree
10228push_fndecl (const char *name)
10229{
10230 tree fn_type = build_function_type_array (integer_type_node, 0, NULL);
10231 /* FIXME: this uses input_location: */
10232 tree fndecl = build_fn_decl (name, fn_type);
10233 tree retval = build_decl (UNKNOWN_LOCATION, RESULT_DECL,
10234 NULL_TREE, integer_type_node);
10235 DECL_RESULT (fndecl) = retval;
10236 push_struct_function (fndecl);
10237 function *fun = DECL_STRUCT_FUNCTION (fndecl);
10238 ASSERT_TRUE (fun != NULL);
10239 init_empty_tree_cfg_for_function (fn: fun);
10240 ASSERT_EQ (2, n_basic_blocks_for_fn (fun));
10241 ASSERT_EQ (0, n_edges_for_fn (fun));
10242 return fndecl;
10243}
10244
10245/* These tests directly create CFGs.
10246 Compare with the static fns within tree-cfg.cc:
10247 - build_gimple_cfg
10248 - make_blocks: calls create_basic_block (seq, bb);
10249 - make_edges. */
10250
10251/* Verify a simple cfg of the form:
10252 ENTRY -> A -> B -> C -> EXIT. */
10253
10254static void
10255test_linear_chain ()
10256{
10257 gimple_register_cfg_hooks ();
10258
10259 tree fndecl = push_fndecl (name: "cfg_test_linear_chain");
10260 function *fun = DECL_STRUCT_FUNCTION (fndecl);
10261
10262 /* Create some empty blocks. */
10263 basic_block bb_a = create_empty_bb (ENTRY_BLOCK_PTR_FOR_FN (fun));
10264 basic_block bb_b = create_empty_bb (bb_a);
10265 basic_block bb_c = create_empty_bb (bb_b);
10266
10267 ASSERT_EQ (5, n_basic_blocks_for_fn (fun));
10268 ASSERT_EQ (0, n_edges_for_fn (fun));
10269
10270 /* Create some edges: a simple linear chain of BBs. */
10271 make_edge (ENTRY_BLOCK_PTR_FOR_FN (fun), bb_a, EDGE_FALLTHRU);
10272 make_edge (bb_a, bb_b, 0);
10273 make_edge (bb_b, bb_c, 0);
10274 make_edge (bb_c, EXIT_BLOCK_PTR_FOR_FN (fun), 0);
10275
10276 /* Verify the edges. */
10277 ASSERT_EQ (4, n_edges_for_fn (fun));
10278 ASSERT_EQ (NULL, ENTRY_BLOCK_PTR_FOR_FN (fun)->preds);
10279 ASSERT_EQ (1, ENTRY_BLOCK_PTR_FOR_FN (fun)->succs->length ());
10280 ASSERT_EQ (1, bb_a->preds->length ());
10281 ASSERT_EQ (1, bb_a->succs->length ());
10282 ASSERT_EQ (1, bb_b->preds->length ());
10283 ASSERT_EQ (1, bb_b->succs->length ());
10284 ASSERT_EQ (1, bb_c->preds->length ());
10285 ASSERT_EQ (1, bb_c->succs->length ());
10286 ASSERT_EQ (1, EXIT_BLOCK_PTR_FOR_FN (fun)->preds->length ());
10287 ASSERT_EQ (NULL, EXIT_BLOCK_PTR_FOR_FN (fun)->succs);
10288
10289 /* Verify the dominance information
10290 Each BB in our simple chain should be dominated by the one before
10291 it. */
10292 calculate_dominance_info (CDI_DOMINATORS);
10293 ASSERT_EQ (bb_a, get_immediate_dominator (CDI_DOMINATORS, bb_b));
10294 ASSERT_EQ (bb_b, get_immediate_dominator (CDI_DOMINATORS, bb_c));
10295 auto_vec<basic_block> dom_by_b = get_dominated_by (CDI_DOMINATORS, bb_b);
10296 ASSERT_EQ (1, dom_by_b.length ());
10297 ASSERT_EQ (bb_c, dom_by_b[0]);
10298 free_dominance_info (CDI_DOMINATORS);
10299
10300 /* Similarly for post-dominance: each BB in our chain is post-dominated
10301 by the one after it. */
10302 calculate_dominance_info (CDI_POST_DOMINATORS);
10303 ASSERT_EQ (bb_b, get_immediate_dominator (CDI_POST_DOMINATORS, bb_a));
10304 ASSERT_EQ (bb_c, get_immediate_dominator (CDI_POST_DOMINATORS, bb_b));
10305 auto_vec<basic_block> postdom_by_b = get_dominated_by (CDI_POST_DOMINATORS, bb_b);
10306 ASSERT_EQ (1, postdom_by_b.length ());
10307 ASSERT_EQ (bb_a, postdom_by_b[0]);
10308 free_dominance_info (CDI_POST_DOMINATORS);
10309
10310 pop_cfun ();
10311}
10312
10313/* Verify a simple CFG of the form:
10314 ENTRY
10315 |
10316 A
10317 / \
10318 /t \f
10319 B C
10320 \ /
10321 \ /
10322 D
10323 |
10324 EXIT. */
10325
10326static void
10327test_diamond ()
10328{
10329 gimple_register_cfg_hooks ();
10330
10331 tree fndecl = push_fndecl (name: "cfg_test_diamond");
10332 function *fun = DECL_STRUCT_FUNCTION (fndecl);
10333
10334 /* Create some empty blocks. */
10335 basic_block bb_a = create_empty_bb (ENTRY_BLOCK_PTR_FOR_FN (fun));
10336 basic_block bb_b = create_empty_bb (bb_a);
10337 basic_block bb_c = create_empty_bb (bb_a);
10338 basic_block bb_d = create_empty_bb (bb_b);
10339
10340 ASSERT_EQ (6, n_basic_blocks_for_fn (fun));
10341 ASSERT_EQ (0, n_edges_for_fn (fun));
10342
10343 /* Create the edges. */
10344 make_edge (ENTRY_BLOCK_PTR_FOR_FN (fun), bb_a, EDGE_FALLTHRU);
10345 make_edge (bb_a, bb_b, EDGE_TRUE_VALUE);
10346 make_edge (bb_a, bb_c, EDGE_FALSE_VALUE);
10347 make_edge (bb_b, bb_d, 0);
10348 make_edge (bb_c, bb_d, 0);
10349 make_edge (bb_d, EXIT_BLOCK_PTR_FOR_FN (fun), 0);
10350
10351 /* Verify the edges. */
10352 ASSERT_EQ (6, n_edges_for_fn (fun));
10353 ASSERT_EQ (1, bb_a->preds->length ());
10354 ASSERT_EQ (2, bb_a->succs->length ());
10355 ASSERT_EQ (1, bb_b->preds->length ());
10356 ASSERT_EQ (1, bb_b->succs->length ());
10357 ASSERT_EQ (1, bb_c->preds->length ());
10358 ASSERT_EQ (1, bb_c->succs->length ());
10359 ASSERT_EQ (2, bb_d->preds->length ());
10360 ASSERT_EQ (1, bb_d->succs->length ());
10361
10362 /* Verify the dominance information. */
10363 calculate_dominance_info (CDI_DOMINATORS);
10364 ASSERT_EQ (bb_a, get_immediate_dominator (CDI_DOMINATORS, bb_b));
10365 ASSERT_EQ (bb_a, get_immediate_dominator (CDI_DOMINATORS, bb_c));
10366 ASSERT_EQ (bb_a, get_immediate_dominator (CDI_DOMINATORS, bb_d));
10367 auto_vec<basic_block> dom_by_a = get_dominated_by (CDI_DOMINATORS, bb_a);
10368 ASSERT_EQ (3, dom_by_a.length ()); /* B, C, D, in some order. */
10369 dom_by_a.release ();
10370 auto_vec<basic_block> dom_by_b = get_dominated_by (CDI_DOMINATORS, bb_b);
10371 ASSERT_EQ (0, dom_by_b.length ());
10372 dom_by_b.release ();
10373 free_dominance_info (CDI_DOMINATORS);
10374
10375 /* Similarly for post-dominance. */
10376 calculate_dominance_info (CDI_POST_DOMINATORS);
10377 ASSERT_EQ (bb_d, get_immediate_dominator (CDI_POST_DOMINATORS, bb_a));
10378 ASSERT_EQ (bb_d, get_immediate_dominator (CDI_POST_DOMINATORS, bb_b));
10379 ASSERT_EQ (bb_d, get_immediate_dominator (CDI_POST_DOMINATORS, bb_c));
10380 auto_vec<basic_block> postdom_by_d = get_dominated_by (CDI_POST_DOMINATORS, bb_d);
10381 ASSERT_EQ (3, postdom_by_d.length ()); /* A, B, C in some order. */
10382 postdom_by_d.release ();
10383 auto_vec<basic_block> postdom_by_b = get_dominated_by (CDI_POST_DOMINATORS, bb_b);
10384 ASSERT_EQ (0, postdom_by_b.length ());
10385 postdom_by_b.release ();
10386 free_dominance_info (CDI_POST_DOMINATORS);
10387
10388 pop_cfun ();
10389}
10390
10391/* Verify that we can handle a CFG containing a "complete" aka
10392 fully-connected subgraph (where A B C D below all have edges
10393 pointing to each other node, also to themselves).
10394 e.g.:
10395 ENTRY EXIT
10396 | ^
10397 | /
10398 | /
10399 | /
10400 V/
10401 A<--->B
10402 ^^ ^^
10403 | \ / |
10404 | X |
10405 | / \ |
10406 VV VV
10407 C<--->D
10408*/
10409
10410static void
10411test_fully_connected ()
10412{
10413 gimple_register_cfg_hooks ();
10414
10415 tree fndecl = push_fndecl (name: "cfg_fully_connected");
10416 function *fun = DECL_STRUCT_FUNCTION (fndecl);
10417
10418 const int n = 4;
10419
10420 /* Create some empty blocks. */
10421 auto_vec <basic_block> subgraph_nodes;
10422 for (int i = 0; i < n; i++)
10423 subgraph_nodes.safe_push (obj: create_empty_bb (ENTRY_BLOCK_PTR_FOR_FN (fun)));
10424
10425 ASSERT_EQ (n + 2, n_basic_blocks_for_fn (fun));
10426 ASSERT_EQ (0, n_edges_for_fn (fun));
10427
10428 /* Create the edges. */
10429 make_edge (ENTRY_BLOCK_PTR_FOR_FN (fun), subgraph_nodes[0], EDGE_FALLTHRU);
10430 make_edge (subgraph_nodes[0], EXIT_BLOCK_PTR_FOR_FN (fun), 0);
10431 for (int i = 0; i < n; i++)
10432 for (int j = 0; j < n; j++)
10433 make_edge (subgraph_nodes[i], subgraph_nodes[j], 0);
10434
10435 /* Verify the edges. */
10436 ASSERT_EQ (2 + (n * n), n_edges_for_fn (fun));
10437 /* The first one is linked to ENTRY/EXIT as well as itself and
10438 everything else. */
10439 ASSERT_EQ (n + 1, subgraph_nodes[0]->preds->length ());
10440 ASSERT_EQ (n + 1, subgraph_nodes[0]->succs->length ());
10441 /* The other ones in the subgraph are linked to everything in
10442 the subgraph (including themselves). */
10443 for (int i = 1; i < n; i++)
10444 {
10445 ASSERT_EQ (n, subgraph_nodes[i]->preds->length ());
10446 ASSERT_EQ (n, subgraph_nodes[i]->succs->length ());
10447 }
10448
10449 /* Verify the dominance information. */
10450 calculate_dominance_info (CDI_DOMINATORS);
10451 /* The initial block in the subgraph should be dominated by ENTRY. */
10452 ASSERT_EQ (ENTRY_BLOCK_PTR_FOR_FN (fun),
10453 get_immediate_dominator (CDI_DOMINATORS,
10454 subgraph_nodes[0]));
10455 /* Every other block in the subgraph should be dominated by the
10456 initial block. */
10457 for (int i = 1; i < n; i++)
10458 ASSERT_EQ (subgraph_nodes[0],
10459 get_immediate_dominator (CDI_DOMINATORS,
10460 subgraph_nodes[i]));
10461 free_dominance_info (CDI_DOMINATORS);
10462
10463 /* Similarly for post-dominance. */
10464 calculate_dominance_info (CDI_POST_DOMINATORS);
10465 /* The initial block in the subgraph should be postdominated by EXIT. */
10466 ASSERT_EQ (EXIT_BLOCK_PTR_FOR_FN (fun),
10467 get_immediate_dominator (CDI_POST_DOMINATORS,
10468 subgraph_nodes[0]));
10469 /* Every other block in the subgraph should be postdominated by the
10470 initial block, since that leads to EXIT. */
10471 for (int i = 1; i < n; i++)
10472 ASSERT_EQ (subgraph_nodes[0],
10473 get_immediate_dominator (CDI_POST_DOMINATORS,
10474 subgraph_nodes[i]));
10475 free_dominance_info (CDI_POST_DOMINATORS);
10476
10477 pop_cfun ();
10478}
10479
10480/* Run all of the selftests within this file. */
10481
10482void
10483tree_cfg_cc_tests ()
10484{
10485 test_linear_chain ();
10486 test_diamond ();
10487 test_fully_connected ();
10488}
10489
10490} // namespace selftest
10491
10492/* TODO: test the dominator/postdominator logic with various graphs/nodes:
10493 - loop
10494 - nested loops
10495 - switch statement (a block with many out-edges)
10496 - something that jumps to itself
10497 - etc */
10498
10499#endif /* CHECKING_P */
10500

source code of gcc/tree-cfg.cc