1/* Control flow redundancy hardening.
2 Copyright (C) 2022-2024 Free Software Foundation, Inc.
3 Contributed by Alexandre Oliva <oliva@adacore.com>.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 3, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
20
21#include "config.h"
22#define INCLUDE_ALGORITHM /* find */
23#include "system.h"
24#include "coretypes.h"
25#include "backend.h"
26#include "memmodel.h"
27#include "tm_p.h"
28#include "tree.h"
29#include "fold-const.h"
30#include "gimple.h"
31#include "gimplify.h"
32#include "tree-pass.h"
33#include "ssa.h"
34#include "gimple-iterator.h"
35#include "gimple-pretty-print.h"
36#include "tree-cfg.h"
37#include "tree-cfgcleanup.h"
38#include "tree-eh.h"
39#include "except.h"
40#include "sbitmap.h"
41#include "basic-block.h"
42#include "cfghooks.h"
43#include "cfgloop.h"
44#include "cgraph.h"
45#include "alias.h"
46#include "varasm.h"
47#include "output.h"
48#include "langhooks.h"
49#include "diagnostic.h"
50#include "intl.h"
51
52namespace {
53
54/* This pass introduces verification, at function exits, that booleans
55 set in each basic block during function execution reflect the
56 control flow graph: for each visited block, check that at least one
57 predecessor and at least one successor were also visited. This
58 sort of hardening may detect various kinds of attacks. */
59
60/* Define a pass to harden code through control flow redundancy. */
61
62const pass_data pass_data_harden_control_flow_redundancy = {
63 .type: GIMPLE_PASS,
64 .name: "hardcfr",
65 .optinfo_flags: OPTGROUP_NONE,
66 .tv_id: TV_NONE,
67 PROP_cfg | PROP_ssa, // properties_required
68 .properties_provided: 0, // properties_provided
69 .properties_destroyed: 0, // properties_destroyed
70 TODO_cleanup_cfg, // properties_start
71 .todo_flags_finish: 0, // properties_finish
72};
73
74class pass_harden_control_flow_redundancy : public gimple_opt_pass
75{
76public:
77 pass_harden_control_flow_redundancy (gcc::context *ctxt)
78 : gimple_opt_pass (pass_data_harden_control_flow_redundancy, ctxt)
79 {}
80 opt_pass *clone () { return new pass_harden_control_flow_redundancy (m_ctxt); }
81 virtual bool gate (function *fun) {
82 /* Return quickly if the pass is disabled, without checking any of
83 the conditions that might give rise to warnings that would only
84 be appropriate if hardening was requested. */
85 if (!flag_harden_control_flow_redundancy)
86 return false;
87
88 /* Functions that return more than once, like setjmp and vfork
89 (that also gets this flag set), will start recording a path
90 after the first return, and then may take another path when
91 they return again. The unterminated path may then be flagged
92 as an error. ??? We could save the visited array before the
93 call and restore it if it returns again. */
94 if (fun->calls_setjmp)
95 {
96 warning_at (DECL_SOURCE_LOCATION (fun->decl), 0,
97 "%qD calls %<setjmp%> or similar,"
98 " %<-fharden-control-flow-redundancy%> is not supported",
99 fun->decl);
100 return false;
101 }
102
103 /* Some targets bypass the abnormal dispatcher block in nonlocal
104 gotos, and then we'd miss its visited bit. It might be doable
105 to make it work uniformly, but this feature is not used often
106 enough to make it worthwhile. */
107 if (fun->has_nonlocal_label)
108 {
109 warning_at (DECL_SOURCE_LOCATION (fun->decl), 0,
110 "%qD receives nonlocal gotos,"
111 " %<-fharden-control-flow-redundancy%> is not supported",
112 fun->decl);
113 return false;
114 }
115
116 if (fun->cfg && param_hardcfr_max_blocks > 0
117 && (n_basic_blocks_for_fn (fun) - NUM_FIXED_BLOCKS
118 > param_hardcfr_max_blocks))
119 {
120 warning_at (DECL_SOURCE_LOCATION (fun->decl), 0,
121 "%qD has more than %u blocks, the requested"
122 " maximum for %<-fharden-control-flow-redundancy%>",
123 fun->decl, param_hardcfr_max_blocks);
124 return false;
125 }
126
127 return true;
128 }
129 virtual unsigned int execute (function *);
130};
131
132}
133
134/* Return TRUE iff CFR checks should be inserted before returning
135 calls. */
136
137static bool
138check_returning_calls_p ()
139{
140 return
141 flag_harden_control_flow_redundancy_check_returning_calls > 0
142 || (flag_harden_control_flow_redundancy_check_returning_calls < 0
143 /* Gates pass_tail_calls. */
144 && flag_optimize_sibling_calls
145 /* Gates pass_all_optimizations. */
146 && optimize >= 1 && !optimize_debug);
147}
148
149/* Scan BB from the end, updating *RETPTR if given as return stmts and
150 copies are found. Return a call or a stmt that cannot appear after
151 a tail call, or NULL if the top of the block is reached without
152 finding any. */
153
154static gimple *
155hardcfr_scan_block (basic_block bb, tree **retptr)
156{
157 gimple_stmt_iterator gsi;
158 for (gsi = gsi_last_bb (bb); !gsi_end_p (i: gsi); gsi_prev (i: &gsi))
159 {
160 gimple *stmt = gsi_stmt (i: gsi);
161
162 /* Ignore labels, returns, nops, clobbers and debug stmts. */
163 if (gimple_code (g: stmt) == GIMPLE_LABEL
164 || gimple_code (g: stmt) == GIMPLE_NOP
165 || gimple_code (g: stmt) == GIMPLE_PREDICT
166 || gimple_clobber_p (s: stmt)
167 || is_gimple_debug (gs: stmt))
168 continue;
169
170 if (gimple_code (g: stmt) == GIMPLE_RETURN)
171 {
172 greturn *gret = as_a <greturn *> (p: stmt);
173 if (retptr)
174 {
175 gcc_checking_assert (!*retptr);
176 *retptr = gimple_return_retval_ptr (gs: gret);
177 }
178 continue;
179 }
180
181 /* Check for a call. */
182 if (is_gimple_call (gs: stmt))
183 return stmt;
184
185 /* Allow simple copies to the return value, updating the return
186 value to be found in earlier assignments. */
187 if (retptr && *retptr && gimple_assign_single_p (gs: stmt)
188 && **retptr == gimple_assign_lhs (gs: stmt))
189 {
190 *retptr = gimple_assign_rhs1_ptr (gs: stmt);
191 continue;
192 }
193
194 return stmt;
195 }
196
197 /* Any other kind of stmt will prevent a tail call. */
198 return NULL;
199}
200
201/* Return TRUE iff CALL is to be preceded by a CFR checkpoint, i.e.,
202 if it's a returning call (one whose result is ultimately returned
203 without intervening non-copy statements) and we're checking
204 returning calls, a __builtin_return call (noreturn with a path to
205 the exit block), a must-tail call, or a tail call. */
206
207static bool
208returning_call_p (gcall *call)
209{
210 if (!(gimple_call_noreturn_p (s: call)
211 || gimple_call_must_tail_p (s: call)
212 || gimple_call_tail_p (s: call)
213 || check_returning_calls_p ()))
214 return false;
215
216 /* Quickly check that there's a path to exit compatible with a
217 returning call. Detect infinite loops by limiting the path
218 length to the basic block count, and by looking for duplicate
219 blocks before allocating more memory for the path, for amortized
220 O(n). */
221 auto_vec<basic_block, 10> path;
222 for (basic_block bb = gimple_bb (g: call);
223 bb != EXIT_BLOCK_PTR_FOR_FN (cfun);
224 bb = single_succ (bb))
225 if (!single_succ_p (bb)
226 || (single_succ_edge (bb)->flags & EDGE_EH) != 0
227 || n_basic_blocks_for_fn (cfun) - path.length () <= NUM_FIXED_BLOCKS
228 || (path.length () == path.allocated ()
229 && std::find (first: path.begin (), last: path.end (), val: bb) != path.end ()))
230 return false;
231 else
232 path.safe_push (obj: bb);
233
234 /* Check the stmts in the blocks and trace the return value. */
235 tree *retptr = NULL;
236 for (;;)
237 {
238 gcc_checking_assert (!path.is_empty ());
239 basic_block bb = path.pop ();
240 gimple *stop = hardcfr_scan_block (bb, retptr: &retptr);
241 if (stop)
242 {
243 if (stop != call)
244 return false;
245 gcc_checking_assert (path.is_empty ());
246 break;
247 }
248
249 gphi *retphi = NULL;
250 if (retptr && *retptr && TREE_CODE (*retptr) == SSA_NAME
251 && !SSA_NAME_IS_DEFAULT_DEF (*retptr)
252 && SSA_NAME_DEF_STMT (*retptr)
253 && is_a <gphi *> (SSA_NAME_DEF_STMT (*retptr))
254 && gimple_bb (SSA_NAME_DEF_STMT (*retptr)) == bb)
255 {
256 retphi = as_a <gphi *> (SSA_NAME_DEF_STMT (*retptr));
257 gcc_checking_assert (gimple_phi_result (retphi) == *retptr);
258 }
259 else
260 continue;
261
262 gcc_checking_assert (!path.is_empty ());
263 edge e = single_succ_edge (bb: path.last ());
264 int i = EDGE_COUNT (bb->preds);
265 while (i--)
266 if (EDGE_PRED (bb, i) == e)
267 break;
268 gcc_checking_assert (i >= 0);
269 retptr = gimple_phi_arg_def_ptr (phi: retphi, index: i);
270 }
271
272 return (gimple_call_noreturn_p (s: call)
273 || gimple_call_must_tail_p (s: call)
274 || gimple_call_tail_p (s: call)
275 || (gimple_call_lhs (gs: call) == (retptr ? *retptr : NULL)
276 && check_returning_calls_p ()));
277}
278
279typedef auto_vec<edge, 10> chk_edges_t;
280
281/* Declare for mutual recursion. */
282static bool hardcfr_sibcall_search_preds (basic_block bb,
283 chk_edges_t &chk_edges,
284 int &count_chkcall,
285 auto_sbitmap &chkcall_blocks,
286 int &count_postchk,
287 auto_sbitmap &postchk_blocks,
288 tree *retptr);
289
290/* Search backwards from the end of BB for a mandatory or potential
291 sibcall. Schedule the block to be handled sort-of like noreturn if
292 so. Recurse to preds, with updated RETPTR, if the block only
293 contains stmts that may follow such a call, scheduling checking at
294 edges and marking blocks as post-check as needed. Return true iff,
295 at the end of the block, a check will have already been
296 performed. */
297
298static bool
299hardcfr_sibcall_search_block (basic_block bb,
300 chk_edges_t &chk_edges,
301 int &count_chkcall,
302 auto_sbitmap &chkcall_blocks,
303 int &count_postchk,
304 auto_sbitmap &postchk_blocks,
305 tree *retptr)
306{
307 /* Conditionals and internal exceptions rule out tail calls. */
308 if (!single_succ_p (bb)
309 || (single_succ_edge (bb)->flags & EDGE_EH) != 0)
310 return false;
311
312 gimple *stmt = hardcfr_scan_block (bb, retptr: &retptr);
313 if (!stmt)
314 return hardcfr_sibcall_search_preds (bb, chk_edges,
315 count_chkcall, chkcall_blocks,
316 count_postchk, postchk_blocks,
317 retptr);
318
319 if (!is_a <gcall *> (p: stmt))
320 return false;
321
322 /* Avoid disrupting mandatory or early-marked tail calls,
323 inserting the check before them. This works for
324 must-tail calls, but tail calling as an optimization is
325 detected too late for us.
326
327 Also check for noreturn calls here. Noreturn calls won't
328 normally have edges to exit, so they won't be found here,
329 but __builtin_return does, and we must check before
330 it, so handle it like a tail call. */
331 gcall *call = as_a <gcall *> (p: stmt);
332 if (!(gimple_call_noreturn_p (s: call)
333 || gimple_call_must_tail_p (s: call)
334 || gimple_call_tail_p (s: call)
335 || (gimple_call_lhs (gs: call) == (retptr ? *retptr : NULL)
336 && check_returning_calls_p ())))
337 return false;
338
339 gcc_checking_assert (returning_call_p (call));
340
341 /* We found a call that is to be preceded by checking. */
342 if (bitmap_set_bit (map: chkcall_blocks, bitno: bb->index))
343 ++count_chkcall;
344 else
345 gcc_unreachable ();
346 return true;
347}
348
349
350/* Search preds of BB for a mandatory or potential sibcall or
351 returning call, and arrange for the blocks containing them to have
352 a check inserted before the call, like noreturn calls. If any
353 preds are found to perform checking, schedule checks at the edges
354 of those that don't, and mark BB as postcheck.. */
355
356static bool
357hardcfr_sibcall_search_preds (basic_block bb,
358 chk_edges_t &chk_edges,
359 int &count_chkcall,
360 auto_sbitmap &chkcall_blocks,
361 int &count_postchk,
362 auto_sbitmap &postchk_blocks,
363 tree *retptr)
364{
365 /* For the exit block, we wish to force a check at every
366 predecessor, so pretend we've already found a pred that had
367 checking, so that we schedule checking at every one of its pred
368 edges. */
369 bool first = bb->index >= NUM_FIXED_BLOCKS;
370 bool postchecked = true;
371
372 gphi *retphi = NULL;
373 if (retptr && *retptr && TREE_CODE (*retptr) == SSA_NAME
374 && !SSA_NAME_IS_DEFAULT_DEF (*retptr)
375 && SSA_NAME_DEF_STMT (*retptr)
376 && is_a <gphi *> (SSA_NAME_DEF_STMT (*retptr))
377 && gimple_bb (SSA_NAME_DEF_STMT (*retptr)) == bb)
378 {
379 retphi = as_a <gphi *> (SSA_NAME_DEF_STMT (*retptr));
380 gcc_checking_assert (gimple_phi_result (retphi) == *retptr);
381 }
382
383 for (int i = EDGE_COUNT (bb->preds); i--; first = false)
384 {
385 edge e = EDGE_PRED (bb, i);
386
387 bool checked
388 = hardcfr_sibcall_search_block (bb: e->src, chk_edges,
389 count_chkcall, chkcall_blocks,
390 count_postchk, postchk_blocks,
391 retptr: !retphi ? retptr
392 : gimple_phi_arg_def_ptr (phi: retphi, index: i));
393
394 if (first)
395 {
396 postchecked = checked;
397 continue;
398 }
399
400 /* When we first find a checked block, force a check at every
401 other incoming edge we've already visited, and those we
402 visit afterwards that don't have their own check, so that
403 when we reach BB, the check has already been performed. */
404 if (!postchecked && checked)
405 {
406 for (int j = EDGE_COUNT (bb->preds); --j > i; )
407 chk_edges.safe_push (EDGE_PRED (bb, j));
408 postchecked = true;
409 }
410 if (postchecked && !checked)
411 chk_edges.safe_push (EDGE_PRED (bb, i));
412 }
413
414 if (postchecked && bb->index >= NUM_FIXED_BLOCKS)
415 {
416 if (bitmap_set_bit (map: postchk_blocks, bitno: bb->index))
417 count_postchk++;
418 else
419 gcc_unreachable ();
420 }
421
422 return postchecked;
423}
424
425
426class rt_bb_visited
427{
428 /* Use a sufficiently wide unsigned type to hold basic block numbers. */
429 typedef size_t blknum;
430
431 /* Record the original block count of the function. */
432 blknum nblocks;
433 /* Record the number of bits per VWORD (short for VISITED WORD), an
434 efficient mode to set and test bits for blocks we visited, and to
435 encode the CFG in case out-of-line verification is used. */
436 unsigned vword_bits;
437
438 /* Hold the unsigned integral VWORD type. */
439 tree vword_type;
440 /* Hold a pointer-to-VWORD type. */
441 tree vword_ptr;
442
443 /* Hold a growing sequence used to check, inline or out-of-line,
444 that VISITED encodes an expected execution path. */
445 gimple_seq ckseq;
446 /* If nonNULL, hold a growing representation of the CFG for
447 out-of-line testing. */
448 tree rtcfg;
449
450 /* Hold the declaration of an array of VWORDs, used as an array of
451 NBLOCKS-2 bits. */
452 tree visited;
453
454 /* If performing inline checking, hold a declarations of boolean
455 variables used for inline checking. CKBLK holds the result of
456 testing whether the VISITED bit corresponding to a predecessor or
457 successor is set, CKINV inverts that bit, CKPART gets cleared if
458 a block was not visited or if CKINV for any of its predecessors
459 or successors is set, and CKFAIL gets set if CKPART remains set
460 at the end of a block's predecessors or successors list. */
461 tree ckfail, ckpart, ckinv, ckblk;
462
463 /* If we need to deal with abnormal edges, we insert SSA_NAMEs for
464 boolean true and false. */
465 tree vfalse, vtrue;
466
467 /* Convert a block index N to a block vindex, the index used to
468 identify it in the VISITED array. Check that it's in range:
469 neither ENTRY nor EXIT, but maybe one-past-the-end, to compute
470 the visited array length. */
471 blknum num2idx (blknum n) {
472 gcc_checking_assert (n >= NUM_FIXED_BLOCKS && n <= nblocks);
473 return (n - NUM_FIXED_BLOCKS);
474 }
475 /* Return the block vindex for BB, that must not be ENTRY or
476 EXIT. */
477 blknum bb2idx (basic_block bb) {
478 gcc_checking_assert (bb != ENTRY_BLOCK_PTR_FOR_FN (cfun)
479 && bb != EXIT_BLOCK_PTR_FOR_FN (cfun));
480 gcc_checking_assert (blknum (bb->index) < nblocks);
481 return num2idx (n: bb->index);
482 }
483
484 /* Compute the type to be used for the VISITED array. */
485 tree vtype ()
486 {
487 blknum n = num2idx (n: nblocks);
488 return build_array_type_nelts (vword_type,
489 (n + vword_bits - 1) / vword_bits);
490 }
491
492 /* Compute and return the index into VISITED for block BB. If BITP
493 is non-NULL, also compute and store the bit mask corresponding to
494 block BB in *BITP, so that (visited[index] & mask) tells whether
495 BB was visited. */
496 tree vwordidx (basic_block bb, tree *bitp = NULL)
497 {
498 blknum idx = bb2idx (bb);
499 if (bitp)
500 {
501 unsigned bit = idx % vword_bits;
502 /* We don't need to adjust shifts to follow native bit
503 endianness here, all of our uses of the CFG and visited
504 bitmaps, whether at compile or runtime, are shifted bits on
505 full words. This adjustment here would require a
506 corresponding adjustment at runtime, which would be nothing
507 but undesirable overhead for us. */
508 if (0 /* && BITS_BIG_ENDIAN */)
509 bit = vword_bits - bit - 1;
510 wide_int wbit = wi::set_bit_in_zero (bit, precision: vword_bits);
511 *bitp = wide_int_to_tree (type: vword_type, cst: wbit);
512 }
513 return build_int_cst (vword_ptr, idx / vword_bits);
514 }
515
516 /* Return an expr to accesses the visited element that holds
517 information about BB. If BITP is non-NULL, set it to the mask to
518 tell which bit in that expr refers to BB. */
519 tree vword (basic_block bb, tree *bitp = NULL)
520 {
521 return build2 (MEM_REF, vword_type,
522 build1 (ADDR_EXPR, vword_ptr, visited),
523 int_const_binop (MULT_EXPR, vwordidx (bb, bitp),
524 fold_convert (vword_ptr,
525 TYPE_SIZE_UNIT
526 (vword_type))));
527 }
528
529 /* Return an expr that evaluates to true iff BB was marked as
530 VISITED. Add any gimple stmts to SEQP. */
531 tree vindex (basic_block bb, gimple_seq *seqp)
532 {
533 if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
534 || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
535 return boolean_true_node;
536
537 tree bit, setme = vword (bb, bitp: &bit);
538 tree temp = create_tmp_var (vword_type, ".cfrtemp");
539
540 gassign *vload = gimple_build_assign (temp, setme);
541 gimple_seq_add_stmt (seqp, vload);
542
543 gassign *vmask = gimple_build_assign (temp, BIT_AND_EXPR, temp, bit);
544 gimple_seq_add_stmt (seqp, vmask);
545
546 return build2 (NE_EXPR, boolean_type_node,
547 temp, build_int_cst (vword_type, 0));
548 }
549
550 /* Set the bit corresponding to BB in VISITED. Add to SEQ any
551 required gimple stmts, and return SEQ, possibly modified. */
552 gimple_seq vset (basic_block bb, gimple_seq seq = NULL)
553 {
554 tree bit, setme = vword (bb, bitp: &bit);
555 tree temp = create_tmp_var (vword_type, ".cfrtemp");
556
557 gassign *vload = gimple_build_assign (temp, setme);
558 gimple_seq_add_stmt (&seq, vload);
559
560 gassign *vbitset = gimple_build_assign (temp, BIT_IOR_EXPR, temp, bit);
561 gimple_seq_add_stmt (&seq, vbitset);
562
563 gassign *vstore = gimple_build_assign (unshare_expr (setme), temp);
564 gimple_seq_add_stmt (&seq, vstore);
565
566 /* Prevent stores into visited from being deferred, forcing
567 subsequent bitsets to reload the word rather than reusing
568 values already in register. The purpose is threefold: make the
569 bitset get to memory in this block, so that control flow
570 attacks in functions called in this block don't easily bypass
571 the bitset; prevent the bitset word from being retained in a
572 register across blocks, which could, in an attack scenario,
573 make a later block set more than one bit; and prevent hoisting
574 or sinking loads or stores of bitset words out of loops or even
575 throughout functions, which could significantly weaken the
576 verification. This is equivalent to making the bitsetting
577 volatile within the function body, but without changing its
578 type; making the bitset volatile would make inline checking far
579 less optimizable for no reason. */
580 vec<tree, va_gc> *inputs = NULL;
581 vec<tree, va_gc> *outputs = NULL;
582 vec_safe_push (v&: outputs,
583 obj: build_tree_list
584 (build_tree_list
585 (NULL_TREE, build_string (2, "=m")),
586 visited));
587 vec_safe_push (v&: inputs,
588 obj: build_tree_list
589 (build_tree_list
590 (NULL_TREE, build_string (1, "m")),
591 visited));
592 gasm *stabilize = gimple_build_asm_vec ("", inputs, outputs,
593 NULL, NULL);
594 gimple_seq_add_stmt (&seq, stabilize);
595
596 return seq;
597 }
598
599public:
600 /* Prepare to add control flow redundancy testing to CFUN. */
601 rt_bb_visited (int checkpoints)
602 : nblocks (n_basic_blocks_for_fn (cfun)),
603 vword_type (NULL), ckseq (NULL), rtcfg (NULL),
604 vfalse (NULL), vtrue (NULL)
605 {
606 /* If we've already added a declaration for the builtin checker,
607 extract vword_type and vword_bits from its declaration. */
608 if (tree checkfn = builtin_decl_explicit (fncode: BUILT_IN___HARDCFR_CHECK))
609 {
610 tree check_arg_list = TYPE_ARG_TYPES (TREE_TYPE (checkfn));
611 tree vword_const_ptr_type = TREE_VALUE (TREE_CHAIN (check_arg_list));
612 vword_type = TYPE_MAIN_VARIANT (TREE_TYPE (vword_const_ptr_type));
613 vword_bits = tree_to_shwi (TYPE_SIZE (vword_type));
614 }
615 /* Otherwise, select vword_bits, vword_type et al, and use it to
616 declare the builtin checker. */
617 else
618 {
619 /* This setting needs to be kept in sync with libgcc/hardcfr.c.
620 We aim for at least 28 bits, which enables us to refer to as
621 many as 28 << 28 blocks in a function's CFG. That's way over
622 4G blocks. */
623 machine_mode VWORDmode;
624 if (BITS_PER_UNIT >= 28)
625 {
626 VWORDmode = QImode;
627 vword_bits = BITS_PER_UNIT;
628 }
629 else if (BITS_PER_UNIT >= 14)
630 {
631 VWORDmode = HImode;
632 vword_bits = 2 * BITS_PER_UNIT;
633 }
634 else
635 {
636 VWORDmode = SImode;
637 vword_bits = 4 * BITS_PER_UNIT;
638 }
639
640 vword_type = lang_hooks.types.type_for_mode (VWORDmode, 1);
641 gcc_checking_assert (vword_bits == tree_to_shwi (TYPE_SIZE
642 (vword_type)));
643
644 vword_type = build_variant_type_copy (vword_type);
645 TYPE_ALIAS_SET (vword_type) = new_alias_set ();
646
647 tree vword_const = build_qualified_type (vword_type, TYPE_QUAL_CONST);
648 tree vword_const_ptr = build_pointer_type (vword_const);
649 tree type = build_function_type_list (void_type_node, sizetype,
650 vword_const_ptr, vword_const_ptr,
651 NULL_TREE);
652 tree decl = add_builtin_function_ext_scope
653 (name: "__builtin___hardcfr_check",
654 type, function_code: BUILT_IN___HARDCFR_CHECK, cl: BUILT_IN_NORMAL,
655 library_name: "__hardcfr_check", NULL_TREE);
656 TREE_NOTHROW (decl) = true;
657 set_builtin_decl (fncode: BUILT_IN___HARDCFR_CHECK, decl, implicit_p: true);
658 }
659
660 /* The checker uses a qualified pointer, so we can't reuse it,
661 so build a new one. */
662 vword_ptr = build_pointer_type (vword_type);
663
664 tree visited_type = vtype ();
665 visited = create_tmp_var (visited_type, ".cfrvisited");
666
667 if (nblocks - NUM_FIXED_BLOCKS > blknum (param_hardcfr_max_inline_blocks)
668 || checkpoints > 1)
669 {
670 /* Make sure vword_bits is wide enough for the representation
671 of nblocks in rtcfg. Compare with vword_bits << vword_bits,
672 but avoiding overflows, shifting nblocks right instead. If
673 vword_bits is wider than HOST_WIDE_INT, assume it fits, so
674 as to avoid undefined shifts. */
675 gcc_assert (HOST_BITS_PER_WIDE_INT <= vword_bits
676 || (((unsigned HOST_WIDE_INT)(num2idx (nblocks))
677 >> vword_bits) < vword_bits));
678
679 /* Build a terminator for the constructor list. */
680 rtcfg = build_tree_list (NULL_TREE, NULL_TREE);
681 return;
682 }
683
684 ckfail = create_tmp_var (boolean_type_node, ".cfrfail");
685 ckpart = create_tmp_var (boolean_type_node, ".cfrpart");
686 ckinv = create_tmp_var (boolean_type_node, ".cfrinv");
687 ckblk = create_tmp_var (boolean_type_node, ".cfrblk");
688
689 gassign *ckfail_init = gimple_build_assign (ckfail, boolean_false_node);
690 gimple_seq_add_stmt (&ckseq, ckfail_init);
691 }
692
693 /* Insert SEQ before a resx or a call in INSBB. */
694 void insert_exit_check_in_block (gimple_seq seq, basic_block insbb)
695 {
696 gimple_stmt_iterator gsi = gsi_last_bb (bb: insbb);
697
698 while (!gsi_end_p (i: gsi))
699 if (is_a <gresx *> (p: gsi_stmt (i: gsi))
700 || is_a <gcall *> (p: gsi_stmt (i: gsi)))
701 break;
702 else
703 gsi_prev (i: &gsi);
704
705 gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT);
706 }
707
708 /* Insert SEQ on E. */
709 void insert_exit_check_on_edge (gimple_seq seq, edge e)
710 {
711 if (!(e->flags & EDGE_ABNORMAL))
712 {
713 gsi_insert_seq_on_edge_immediate (e, seq);
714 return;
715 }
716
717 /* Initialize SSA boolean constants for use in abnormal PHIs. */
718 if (!vfalse)
719 {
720 vfalse = make_ssa_name (boolean_type_node);
721 vtrue = make_ssa_name (boolean_type_node);
722
723 gimple_seq vft_seq = NULL;
724 gassign *vfalse_init = gimple_build_assign (vfalse, boolean_false_node);
725 gimple_seq_add_stmt (&vft_seq, vfalse_init);
726 gassign *vtrue_init = gimple_build_assign (vtrue, boolean_true_node);
727 gimple_seq_add_stmt (&vft_seq, vtrue_init);
728
729 gsi_insert_seq_on_edge_immediate (single_succ_edge
730 (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
731 vft_seq);
732 }
733
734 /* We can't insert on abnormal edges, but we can arrange for SEQ
735 to execute conditionally at dest. Add a PHI boolean with TRUE
736 from E and FALSE from other preds, split the whole block, add a
737 test for the PHI to run a new block with SEQ or skip straight
738 to the original block. If there are multiple incoming abnormal
739 edges, we'll do this multiple times. ??? Unless there are
740 multiple abnormal edges with different postcheck status, we
741 could split the block and redirect other edges, rearranging the
742 PHI nodes. Optimizers already know how to do this, so we can
743 keep things simple here. */
744 basic_block bb = e->dest;
745 basic_block bb_postcheck = split_block_after_labels (bb)->dest;
746
747 basic_block bb_check = create_empty_bb (e->dest);
748 bb_check->count = e->count ();
749 if (dom_info_available_p (CDI_DOMINATORS))
750 set_immediate_dominator (CDI_DOMINATORS, bb_check, bb);
751 if (current_loops)
752 add_bb_to_loop (bb_check, current_loops->tree_root);
753
754 gimple_stmt_iterator chkpt = gsi_after_labels (bb: bb_check);
755 gsi_insert_seq_before_without_update (&chkpt, seq, GSI_SAME_STMT);
756 edge edge_postcheck = make_edge (bb_check, bb_postcheck, EDGE_FALLTHRU);
757 edge_postcheck->probability = profile_probability::always ();
758
759 tree cond_var = make_ssa_name (boolean_type_node);
760 gcond *cond = gimple_build_cond (NE_EXPR, cond_var, boolean_false_node,
761 NULL, NULL);
762 gimple_stmt_iterator condpt = gsi_after_labels (bb);
763 gsi_insert_before (&condpt, cond, GSI_SAME_STMT);
764 edge edge_nocheck = single_succ_edge (bb);
765 edge_nocheck->flags &= ~EDGE_FALLTHRU;
766 edge_nocheck->flags |= EDGE_FALSE_VALUE;
767 edge edge_check = make_edge (bb, bb_check, EDGE_TRUE_VALUE);
768 edge_check->probability = e->count ().probability_in (overall: bb->count);
769 edge_nocheck->probability = edge_check->probability.invert ();
770
771 gphi *cond_phi = create_phi_node (cond_var, bb);
772 for (int i = 0, ei = EDGE_COUNT (bb->preds); i < ei; i++)
773 {
774 edge pred = EDGE_PRED (bb, i);
775 bool check_edge = pred == e;
776 tree val = check_edge ? vtrue : vfalse;
777 add_phi_arg (cond_phi, val, pred, UNKNOWN_LOCATION);
778 }
779 }
780
781 /* Add checking code to CHK_EDGES and CHKCALL_BLOCKS, and
782 initialization code on the entry edge. Before this point, the
783 CFG has been undisturbed, and all the needed data has been
784 collected and safely stowed. */
785 void check (chk_edges_t &chk_edges,
786 int count_chkcall, auto_sbitmap const &chkcall_blocks)
787 {
788 /* If we're using out-of-line checking, create and statically
789 initialize the CFG checking representation, generate the
790 checker call for the checking sequence, and insert it in all
791 exit edges, if there's more than one. If there's only one, we
792 use the same logic as the inline case to insert the check
793 sequence. */
794 if (rtcfg)
795 {
796 /* Unreverse the list, and drop the tail node turned into head. */
797 rtcfg = TREE_CHAIN (nreverse (rtcfg));
798
799 /* Turn the indices stored in TREE_PURPOSE into separate
800 nodes. It was useful to keep them together to enable
801 combination of masks and for clear separation of
802 terminators while constructing it, but now we have to turn
803 it into a sequence of words. */
804 for (tree node = rtcfg; node; node = TREE_CHAIN (node))
805 {
806 tree wordidx = TREE_PURPOSE (node);
807 if (!wordidx)
808 continue;
809
810 TREE_PURPOSE (node) = NULL_TREE;
811 TREE_CHAIN (node) = tree_cons (NULL_TREE,
812 fold_convert (vword_type, wordidx),
813 TREE_CHAIN (node));
814 }
815
816 /* Build the static initializer for the array with the CFG
817 representation for out-of-line checking. */
818 tree init = build_constructor_from_list (NULL_TREE, rtcfg);
819 TREE_TYPE (init) = build_array_type_nelts (vword_type,
820 CONSTRUCTOR_NELTS (init));
821 char buf[32];
822 ASM_GENERATE_INTERNAL_LABEL (buf, "Lhardcfg",
823 current_function_funcdef_no);
824 rtcfg = build_decl (UNKNOWN_LOCATION, VAR_DECL,
825 get_identifier (buf),
826 TREE_TYPE (init));
827 TREE_READONLY (rtcfg) = 1;
828 TREE_STATIC (rtcfg) = 1;
829 TREE_ADDRESSABLE (rtcfg) = 1;
830 TREE_USED (rtcfg) = 1;
831 DECL_ARTIFICIAL (rtcfg) = 1;
832 DECL_IGNORED_P (rtcfg) = 1;
833 DECL_INITIAL (rtcfg) = init;
834 make_decl_rtl (rtcfg);
835 varpool_node::finalize_decl (decl: rtcfg);
836
837 /* Add the checker call to ckseq. */
838 gcall *call_chk = gimple_build_call (builtin_decl_explicit
839 (fncode: BUILT_IN___HARDCFR_CHECK), 3,
840 build_int_cst (sizetype,
841 num2idx (n: nblocks)),
842 build1 (ADDR_EXPR, vword_ptr,
843 visited),
844 build1 (ADDR_EXPR, vword_ptr,
845 rtcfg));
846 gimple_seq_add_stmt (&ckseq, call_chk);
847
848 gimple *clobber = gimple_build_assign (visited,
849 build_clobber
850 (TREE_TYPE (visited)));
851 gimple_seq_add_stmt (&ckseq, clobber);
852
853 /* If we have multiple exit edges, insert (copies of)
854 ckseq in all of them. */
855 for (int i = chk_edges.length (); i--; )
856 {
857 gimple_seq seq = ckseq;
858 /* Copy the sequence, unless we're dealing with the
859 last edge (we're counting down to zero). */
860 if (i || count_chkcall)
861 seq = gimple_seq_copy (seq);
862
863 edge e = chk_edges[i];
864
865 if (dump_file)
866 {
867 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
868 fprintf (stream: dump_file,
869 format: "Inserting out-of-line check in"
870 " block %i's edge to exit.\n",
871 e->src->index);
872 else
873 fprintf (stream: dump_file,
874 format: "Inserting out-of-line check in"
875 " block %i's edge to postcheck block %i.\n",
876 e->src->index, e->dest->index);
877 }
878
879 insert_exit_check_on_edge (seq, e);
880
881 gcc_checking_assert (!bitmap_bit_p (chkcall_blocks, e->src->index));
882 }
883
884 sbitmap_iterator it;
885 unsigned i;
886 EXECUTE_IF_SET_IN_BITMAP (chkcall_blocks, 0, i, it)
887 {
888 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
889
890 gimple_seq seq = ckseq;
891 gcc_checking_assert (count_chkcall > 0);
892 if (--count_chkcall)
893 seq = gimple_seq_copy (seq);
894
895 if (dump_file)
896 fprintf (stream: dump_file,
897 format: "Inserting out-of-line check before stmt in block %i.\n",
898 bb->index);
899
900 insert_exit_check_in_block (seq, insbb: bb);
901 }
902
903 gcc_checking_assert (count_chkcall == 0);
904 }
905 else
906 {
907 /* Inline checking requires a single exit edge. */
908 gimple *last = gimple_build_assign (visited,
909 build_clobber
910 (TREE_TYPE (visited)));
911 gimple_seq_add_stmt (&ckseq, last);
912
913 if (!count_chkcall)
914 {
915 edge e = single_pred_edge (EXIT_BLOCK_PTR_FOR_FN (cfun));
916
917 if (dump_file)
918 {
919 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
920 fprintf (stream: dump_file,
921 format: "Inserting out-of-line check in"
922 " block %i's edge to postcheck block %i.\n",
923 e->src->index, e->dest->index);
924 else
925 fprintf (stream: dump_file,
926 format: "Inserting inline check in"
927 " block %i's edge to exit.\n",
928 e->src->index);
929 }
930
931 insert_exit_check_on_edge (seq: ckseq, e);
932 }
933 else
934 {
935 gcc_checking_assert (count_chkcall == 1);
936
937 sbitmap_iterator it;
938 unsigned i;
939 EXECUTE_IF_SET_IN_BITMAP (chkcall_blocks, 0, i, it)
940 {
941 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
942
943 gimple_seq seq = ckseq;
944 gcc_checking_assert (count_chkcall > 0);
945 if (--count_chkcall)
946 seq = gimple_seq_copy (seq);
947
948 if (dump_file)
949 fprintf (stream: dump_file,
950 format: "Inserting inline check before stmt in block %i.\n",
951 bb->index);
952
953 insert_exit_check_in_block (seq, insbb: bb);
954 }
955
956 gcc_checking_assert (count_chkcall == 0);
957 }
958
959 /* The inserted ckseq computes CKFAIL at LAST. Now we have to
960 conditionally trap on it. */
961 basic_block insbb = gimple_bb (g: last);
962
963 /* Create a block with the unconditional trap. */
964 basic_block trp = create_empty_bb (insbb);
965 gimple_stmt_iterator gsit = gsi_after_labels (bb: trp);
966
967 gcall *trap = gimple_build_call (builtin_decl_explicit
968 (fncode: BUILT_IN_TRAP), 0);
969 gsi_insert_before (&gsit, trap, GSI_SAME_STMT);
970
971 if (BB_PARTITION (insbb))
972 BB_SET_PARTITION (trp, BB_COLD_PARTITION);
973
974 if (current_loops)
975 add_bb_to_loop (trp, current_loops->tree_root);
976
977 /* Insert a conditional branch to the trap block. If the
978 conditional wouldn't be the last stmt, split the block. */
979 gimple_stmt_iterator gsi = gsi_for_stmt (last);
980 if (!gsi_one_before_end_p (i: gsi))
981 split_block (gsi_bb (i: gsi), gsi_stmt (i: gsi));
982
983 gcond *cond = gimple_build_cond (NE_EXPR, ckfail,
984 fold_convert (TREE_TYPE (ckfail),
985 boolean_false_node),
986 NULL, NULL);
987 gsi_insert_after (&gsi, cond, GSI_SAME_STMT);
988
989 /* Adjust the edges. */
990 single_succ_edge (bb: gsi_bb (i: gsi))->flags &= ~EDGE_FALLTHRU;
991 single_succ_edge (bb: gsi_bb (i: gsi))->flags |= EDGE_FALSE_VALUE;
992 single_succ_edge (bb: gsi_bb (i: gsi))->probability
993 = profile_probability::always ();
994 edge e = make_edge (gsi_bb (i: gsi), trp, EDGE_TRUE_VALUE);
995 e->probability = profile_probability::never ();
996 gcc_checking_assert (e->dest == trp);
997 gcc_checking_assert (!e->dest->count.initialized_p ());
998 e->dest->count = e->count ();
999
1000 /* Set the trap's dominator after splitting. */
1001 if (dom_info_available_p (CDI_DOMINATORS))
1002 set_immediate_dominator (CDI_DOMINATORS, trp, gimple_bb (g: last));
1003 }
1004
1005 /* Insert initializers for visited at the entry. Do this after
1006 other insertions, to avoid messing with block numbers. */
1007 gimple_seq iseq = NULL;
1008
1009 gcall *vinit = gimple_build_call (builtin_decl_explicit
1010 (fncode: BUILT_IN_MEMSET), 3,
1011 build1 (ADDR_EXPR,
1012 build_pointer_type
1013 (TREE_TYPE (visited)),
1014 visited),
1015 integer_zero_node,
1016 TYPE_SIZE_UNIT (TREE_TYPE (visited)));
1017 gimple_seq_add_stmt (&iseq, vinit);
1018
1019 gsi_insert_seq_on_edge_immediate (single_succ_edge
1020 (ENTRY_BLOCK_PTR_FOR_FN (cfun)),
1021 iseq);
1022 }
1023
1024 /* Push onto RTCFG a (mask, index) pair to test for IBB when BB is
1025 visited. XSELF is to be the ENTRY or EXIT block (depending on
1026 whether we're looking at preds or succs), to be remapped to BB
1027 because we can't represent them, and there's no point in testing
1028 them anyway. Return true if no further blocks need to be visited
1029 in the list, because we've already encountered a
1030 self-reference. */
1031 bool
1032 push_rtcfg_pair (basic_block ibb, basic_block bb,
1033 basic_block xself)
1034 {
1035 /* We don't have a bit to test for the entry and exit
1036 blocks, but it is always visited, so we test for the
1037 block itself, which gets us the right result and
1038 enables the self-test optimization below. */
1039 if (ibb == xself)
1040 ibb = bb;
1041
1042 tree mask, idx = vwordidx (bb: ibb, bitp: &mask);
1043 /* Combine masks with the same idx, but not if we're going
1044 to optimize for self-test. */
1045 if (ibb != bb && TREE_PURPOSE (rtcfg)
1046 && tree_int_cst_equal (idx, TREE_PURPOSE (rtcfg)))
1047 TREE_VALUE (rtcfg) = int_const_binop (BIT_IOR_EXPR, mask,
1048 TREE_VALUE (rtcfg));
1049 else
1050 rtcfg = tree_cons (idx, mask, rtcfg);
1051
1052 /* For self-tests (i.e., tests that the block itself was
1053 also visited), testing anything else is pointless,
1054 because it's a tautology, so just drop other edges. */
1055 if (ibb == bb)
1056 {
1057 while (TREE_PURPOSE (TREE_CHAIN (rtcfg)))
1058 TREE_CHAIN (rtcfg) = TREE_CHAIN (TREE_CHAIN (rtcfg));
1059 return true;
1060 }
1061
1062 return false;
1063 }
1064
1065 /* Add to CKSEQ stmts to clear CKPART if OBB is visited. */
1066 void
1067 build_block_check (basic_block obb)
1068 {
1069 tree vobb = fold_convert (TREE_TYPE (ckblk),
1070 vindex (obb, &ckseq));
1071 gassign *blkrunp = gimple_build_assign (ckblk, vobb);
1072 gimple_seq_add_stmt (&ckseq, blkrunp);
1073
1074 gassign *blknotrunp = gimple_build_assign (ckinv,
1075 EQ_EXPR,
1076 ckblk,
1077 fold_convert
1078 (TREE_TYPE (ckblk),
1079 boolean_false_node));
1080 gimple_seq_add_stmt (&ckseq, blknotrunp);
1081
1082 gassign *andblk = gimple_build_assign (ckpart,
1083 BIT_AND_EXPR,
1084 ckpart, ckinv);
1085 gimple_seq_add_stmt (&ckseq, andblk);
1086 }
1087
1088 /* Add to BB code to set its bit in VISITED, and add to RTCFG or
1089 CKSEQ the data or code needed to check BB's predecessors and
1090 successors. If CHECKPOINT, assume the block is a checkpoint,
1091 whether or not it has an edge to EXIT. If POSTCHECK, assume the
1092 block post-dominates checkpoints and therefore no bitmap setting
1093 or checks are to be performed in or for it. Do NOT change the
1094 CFG. */
1095 void visit (basic_block bb, bool checkpoint, bool postcheck)
1096 {
1097 /* Set the bit in VISITED when entering the block. */
1098 gimple_stmt_iterator gsi = gsi_after_labels (bb);
1099 if (!postcheck)
1100 gsi_insert_seq_before (&gsi, vset (bb), GSI_SAME_STMT);
1101
1102 if (rtcfg)
1103 {
1104 if (!postcheck)
1105 {
1106 /* Build a list of (index, mask) terminated by (NULL, 0).
1107 Consolidate masks with the same index when they're
1108 adjacent. First, predecessors. Count backwards, because
1109 we're going to reverse the list. The order shouldn't
1110 matter, but let's not make it surprising. */
1111 for (int i = EDGE_COUNT (bb->preds); i--; )
1112 if (push_rtcfg_pair (EDGE_PRED (bb, i)->src, bb,
1113 ENTRY_BLOCK_PTR_FOR_FN (cfun)))
1114 break;
1115 }
1116 rtcfg = tree_cons (NULL_TREE, build_int_cst (vword_type, 0), rtcfg);
1117
1118 if (!postcheck)
1119 {
1120 /* Then, successors. */
1121 if (!checkpoint
1122 || !push_rtcfg_pair (EXIT_BLOCK_PTR_FOR_FN (cfun),
1123 bb, EXIT_BLOCK_PTR_FOR_FN (cfun)))
1124 for (int i = EDGE_COUNT (bb->succs); i--; )
1125 if (push_rtcfg_pair (EDGE_SUCC (bb, i)->dest, bb,
1126 EXIT_BLOCK_PTR_FOR_FN (cfun)))
1127 break;
1128 }
1129 rtcfg = tree_cons (NULL_TREE, build_int_cst (vword_type, 0), rtcfg);
1130 }
1131 else if (!postcheck)
1132 {
1133 /* Schedule test to fail if the block was reached but somehow none
1134 of its predecessors were. */
1135 tree bit = fold_convert (TREE_TYPE (ckpart), vindex (bb, &ckseq));
1136 gassign *blkrunp = gimple_build_assign (ckpart, bit);
1137 gimple_seq_add_stmt (&ckseq, blkrunp);
1138
1139 for (int i = 0, e = EDGE_COUNT (bb->preds); i < e; i++)
1140 build_block_check (EDGE_PRED (bb, i)->src);
1141 gimple *orfailp = gimple_build_assign (ckfail, BIT_IOR_EXPR,
1142 ckfail, ckpart);
1143 gimple_seq_add_stmt (&ckseq, orfailp);
1144
1145 /* Likewise for successors. */
1146 gassign *blkruns = gimple_build_assign (ckpart, unshare_expr (bit));
1147 gimple_seq_add_stmt (&ckseq, blkruns);
1148
1149 if (checkpoint)
1150 build_block_check (EXIT_BLOCK_PTR_FOR_FN (cfun));
1151 for (int i = 0, e = EDGE_COUNT (bb->succs); i < e; i++)
1152 build_block_check (EDGE_SUCC (bb, i)->dest);
1153
1154 gimple *orfails = gimple_build_assign (ckfail, BIT_IOR_EXPR,
1155 ckfail, ckpart);
1156 gimple_seq_add_stmt (&ckseq, orfails);
1157 }
1158 }
1159};
1160
1161/* Avoid checking before noreturn calls that are known (expected,
1162 really) to finish by throwing an exception, rather than by ending
1163 the program or looping forever. Such functions have to be
1164 annotated, with an attribute (expected_throw) or flag (ECF_XTHROW),
1165 so that exception-raising functions, such as C++'s __cxa_throw,
1166 __cxa_rethrow, and Ada's gnat_rcheck_*, gnat_reraise*,
1167 ada.exception.raise_exception*, and the language-independent
1168 unwinders could be detected here and handled differently from other
1169 noreturn functions. */
1170static bool
1171always_throwing_noreturn_call_p (gimple *stmt)
1172{
1173 if (!is_a <gcall *> (p: stmt))
1174 return is_a <gresx *> (p: stmt);
1175
1176 gcall *call = as_a <gcall *> (p: stmt);
1177 return (gimple_call_noreturn_p (s: call)
1178 && gimple_call_expected_throw_p (s: call));
1179}
1180
1181/* Control flow redundancy hardening: record the execution path, and
1182 verify at exit that an expect path was taken. */
1183
1184unsigned int
1185pass_harden_control_flow_redundancy::execute (function *fun)
1186{
1187 bool const check_at_escaping_exceptions
1188 = (flag_exceptions
1189 && flag_harden_control_flow_redundancy_check_exceptions);
1190 bool const check_before_noreturn_calls
1191 = flag_harden_control_flow_redundancy_check_noreturn > HCFRNR_NEVER;
1192 bool const check_before_nothrow_noreturn_calls
1193 = (check_before_noreturn_calls
1194 && flag_harden_control_flow_redundancy_check_noreturn >= HCFRNR_NOTHROW);
1195 bool const check_before_throwing_noreturn_calls
1196 = (flag_exceptions
1197 && check_before_noreturn_calls
1198 && flag_harden_control_flow_redundancy_check_noreturn > HCFRNR_NOTHROW);
1199 bool const check_before_always_throwing_noreturn_calls
1200 = (flag_exceptions
1201 && check_before_noreturn_calls
1202 && flag_harden_control_flow_redundancy_check_noreturn >= HCFRNR_ALWAYS);
1203 basic_block bb;
1204 basic_block bb_eh_cleanup = NULL;
1205
1206 if (flag_harden_control_flow_redundancy_skip_leaf)
1207 {
1208 bool found_calls_p = false;
1209
1210 FOR_EACH_BB_FN (bb, fun)
1211 {
1212 for (gimple_stmt_iterator gsi = gsi_last_bb (bb);
1213 !gsi_end_p (i: gsi); gsi_prev (i: &gsi))
1214 if (is_a <gcall *> (p: gsi_stmt (i: gsi)))
1215 {
1216 found_calls_p = true;
1217 break;
1218 }
1219 if (found_calls_p)
1220 break;
1221 }
1222
1223 if (!found_calls_p)
1224 {
1225 if (dump_file)
1226 fprintf (stream: dump_file,
1227 format: "Disabling CFR for leaf function, as requested\n");
1228
1229 return 0;
1230 }
1231 }
1232
1233 if (check_at_escaping_exceptions)
1234 {
1235 int lp_eh_cleanup = -1;
1236
1237 /* Record the preexisting blocks, to avoid visiting newly-created
1238 blocks. */
1239 auto_sbitmap to_visit (last_basic_block_for_fn (fun));
1240 bitmap_clear (to_visit);
1241
1242 FOR_EACH_BB_FN (bb, fun)
1243 bitmap_set_bit (map: to_visit, bitno: bb->index);
1244
1245 /* Scan the blocks for stmts with escaping exceptions, that
1246 wouldn't be denoted in the CFG, and associate them with an
1247 empty cleanup handler around the whole function. Walk
1248 backwards, so that even when we split the block, */
1249 sbitmap_iterator it;
1250 unsigned i;
1251 EXECUTE_IF_SET_IN_BITMAP (to_visit, 0, i, it)
1252 {
1253 bb = BASIC_BLOCK_FOR_FN (fun, i);
1254
1255 for (gimple_stmt_iterator gsi = gsi_last_bb (bb);
1256 !gsi_end_p (i: gsi); gsi_prev (i: &gsi))
1257 {
1258 gimple *stmt = gsi_stmt (i: gsi);
1259 if (!stmt_could_throw_p (fun, stmt))
1260 continue;
1261
1262 /* If it must not throw, or if it already has a handler,
1263 we need not worry about it. */
1264 if (lookup_stmt_eh_lp (stmt) != 0)
1265 continue;
1266
1267 /* Don't split blocks at, nor add EH edges to, tail
1268 calls, we will add verification before the call
1269 anyway. */
1270 if (is_a <gcall *> (p: stmt)
1271 && (gimple_call_must_tail_p (s: as_a <gcall *> (p: stmt))
1272 || gimple_call_tail_p (s: as_a <gcall *> (p: stmt))
1273 || returning_call_p (call: as_a <gcall *> (p: stmt))))
1274 continue;
1275
1276 if (!gsi_one_before_end_p (i: gsi))
1277 split_block (bb, stmt);
1278 /* A resx or noreturn call needs not be associated with
1279 the cleanup handler if we're going to add checking
1280 before it. We only test cases that didn't require
1281 block splitting because noreturn calls would always
1282 be at the end of blocks, and we test for zero
1283 successors because if there is an edge, it's not
1284 noreturn, as any EH edges would have already been
1285 caught by the lookup_stmt_eh_lp test above. */
1286 else if (check_before_noreturn_calls
1287 && EDGE_COUNT (bb->succs) == 0
1288 && (is_a <gresx *> (p: stmt)
1289 ? check_before_always_throwing_noreturn_calls
1290 : (!is_a <gcall *> (p: stmt)
1291 || !gimple_call_noreturn_p (s: stmt))
1292 ? (gcc_unreachable (), false)
1293 : (!flag_exceptions
1294 || gimple_call_nothrow_p (s: as_a <gcall *> (p: stmt)))
1295 ? check_before_nothrow_noreturn_calls
1296 : always_throwing_noreturn_call_p (stmt)
1297 ? check_before_always_throwing_noreturn_calls
1298 : check_before_throwing_noreturn_calls))
1299 {
1300 if (dump_file)
1301 {
1302 fprintf (stream: dump_file,
1303 format: "Bypassing cleanup for noreturn stmt"
1304 " in block %i:\n",
1305 bb->index);
1306 print_gimple_stmt (dump_file, stmt, 0);
1307 }
1308 continue;
1309 }
1310
1311 if (!bb_eh_cleanup)
1312 {
1313 bb_eh_cleanup = create_empty_bb (bb);
1314 if (dom_info_available_p (CDI_DOMINATORS))
1315 set_immediate_dominator (CDI_DOMINATORS, bb_eh_cleanup, bb);
1316 if (current_loops)
1317 add_bb_to_loop (bb_eh_cleanup, current_loops->tree_root);
1318
1319 /* Make the new block an EH cleanup for the call. */
1320 eh_region new_r = gen_eh_region_cleanup (NULL);
1321 eh_landing_pad lp = gen_eh_landing_pad (new_r);
1322 tree label = gimple_block_label (bb_eh_cleanup);
1323 lp->post_landing_pad = label;
1324 EH_LANDING_PAD_NR (label) = lp_eh_cleanup = lp->index;
1325
1326 /* Just propagate the exception.
1327 We will later insert the verifier call. */
1328 gimple_stmt_iterator ehgsi;
1329 ehgsi = gsi_after_labels (bb: bb_eh_cleanup);
1330 gresx *resx = gimple_build_resx (new_r->index);
1331 gsi_insert_before (&ehgsi, resx, GSI_SAME_STMT);
1332
1333 if (dump_file)
1334 fprintf (stream: dump_file,
1335 format: "Created cleanup block %i:\n",
1336 bb_eh_cleanup->index);
1337 }
1338 else if (dom_info_available_p (CDI_DOMINATORS))
1339 {
1340 basic_block immdom;
1341 immdom = get_immediate_dominator (CDI_DOMINATORS,
1342 bb_eh_cleanup);
1343 if (!dominated_by_p (CDI_DOMINATORS, bb, immdom))
1344 {
1345 immdom = nearest_common_dominator (CDI_DOMINATORS,
1346 immdom, bb);
1347 set_immediate_dominator (CDI_DOMINATORS,
1348 bb_eh_cleanup, immdom);
1349 }
1350 }
1351
1352 if (dump_file)
1353 {
1354 fprintf (stream: dump_file,
1355 format: "Associated cleanup block with stmt in block %i:\n",
1356 bb->index);
1357 print_gimple_stmt (dump_file, stmt, 0);
1358 }
1359
1360 add_stmt_to_eh_lp (stmt, lp_eh_cleanup);
1361 /* Finally, wire the EH cleanup block into the CFG. */
1362 edge neeh = make_eh_edge (stmt);
1363 neeh->probability = profile_probability::never ();
1364 gcc_checking_assert (neeh->dest == bb_eh_cleanup);
1365 if (neeh->dest->count.initialized_p ())
1366 neeh->dest->count += neeh->count ();
1367 else
1368 neeh->dest->count = neeh->count ();
1369 }
1370 }
1371
1372 if (bb_eh_cleanup)
1373 {
1374 /* A cfg_cleanup after bb_eh_cleanup makes for a more compact
1375 rtcfg, and it avoids bb numbering differences when we split
1376 blocks because of trailing debug insns only. */
1377 cleanup_tree_cfg ();
1378 gcc_checking_assert (EDGE_COUNT (bb_eh_cleanup->succs) == 0);
1379 }
1380 }
1381
1382 /* These record blocks with calls that are to be preceded by
1383 checkpoints, such as noreturn calls (if so chosen), must-tail
1384 calls, potential early-marked tail calls, and returning calls (if
1385 so chosen). */
1386 int count_chkcall = 0;
1387 auto_sbitmap chkcall_blocks (last_basic_block_for_fn (fun));
1388 bitmap_clear (chkcall_blocks);
1389
1390 /* We wish to add verification at blocks without successors, such as
1391 noreturn calls (raising or not) and the reraise at the cleanup
1392 block, but not other reraises: they will go through the cleanup
1393 block. */
1394 if (check_before_noreturn_calls)
1395 FOR_EACH_BB_FN (bb, fun)
1396 {
1397 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1398 if (gsi_end_p (i: gsi))
1399 continue;
1400 gimple *stmt = gsi_stmt (i: gsi);
1401
1402 if (EDGE_COUNT (bb->succs) == 0)
1403 {
1404 /* A stmt at the end of a block without any successors is
1405 either a resx or a noreturn call without a local
1406 handler. Check that it's one of the desired
1407 checkpoints. */
1408 if (flag_exceptions && is_a <gresx *> (p: stmt)
1409 ? (check_before_always_throwing_noreturn_calls
1410 || bb == bb_eh_cleanup)
1411 : (!is_a <gcall *> (p: stmt)
1412 || !gimple_call_noreturn_p (s: stmt))
1413 ? (stmt_can_make_abnormal_goto (stmt)
1414 /* ??? Check before indirect nonlocal goto, or
1415 calls thereof? */
1416 ? false
1417 /* Catch cases in which successors would be
1418 expected. */
1419 : (gcc_unreachable (), false))
1420 : (!flag_exceptions
1421 || gimple_call_nothrow_p (s: as_a <gcall *> (p: stmt)))
1422 ? check_before_nothrow_noreturn_calls
1423 : always_throwing_noreturn_call_p (stmt)
1424 ? check_before_always_throwing_noreturn_calls
1425 : check_before_throwing_noreturn_calls)
1426 {
1427 if (dump_file)
1428 {
1429 fprintf (stream: dump_file,
1430 format: "Scheduling check before stmt"
1431 " in succ-less block %i:\n",
1432 bb->index);
1433 print_gimple_stmt (dump_file, stmt, 0);
1434 }
1435
1436 if (bitmap_set_bit (map: chkcall_blocks, bitno: bb->index))
1437 count_chkcall++;
1438 else
1439 gcc_unreachable ();
1440 }
1441 continue;
1442 }
1443
1444 /* If there are no exceptions, it would seem like any noreturn
1445 call must have zero successor edges, but __builtin_return
1446 gets successor edges. We don't want to handle it here, it
1447 will be dealt with in sibcall_search_preds. Otherwise,
1448 check for blocks without non-EH successors, but skip those
1449 with resx stmts and edges (i.e., those other than that in
1450 bb_eh_cleanup), since those will go through bb_eh_cleanup,
1451 that will have been counted as noreturn above because it
1452 has no successors. */
1453 gcc_checking_assert (bb != bb_eh_cleanup
1454 || !check_at_escaping_exceptions);
1455 if (flag_exceptions && is_a <gresx *> (p: stmt)
1456 ? check_before_always_throwing_noreturn_calls
1457 : (!is_a <gcall *> (p: stmt)
1458 || !gimple_call_noreturn_p (s: stmt))
1459 ? false
1460 : (!flag_exceptions
1461 || gimple_call_nothrow_p (s: as_a <gcall *> (p: stmt)))
1462 ? false /* rather than check_before_nothrow_noreturn_calls */
1463 : always_throwing_noreturn_call_p (stmt)
1464 ? check_before_always_throwing_noreturn_calls
1465 : check_before_throwing_noreturn_calls)
1466 {
1467 gcc_checking_assert (single_succ_p (bb)
1468 && (single_succ_edge (bb)->flags & EDGE_EH));
1469
1470 if (dump_file)
1471 {
1472 fprintf (stream: dump_file,
1473 format: "Scheduling check before stmt"
1474 " in EH-succ block %i:\n",
1475 bb->index);
1476 print_gimple_stmt (dump_file, stmt, 0);
1477 }
1478
1479 if (bitmap_set_bit (map: chkcall_blocks, bitno: bb->index))
1480 count_chkcall++;
1481 else
1482 gcc_unreachable ();
1483 }
1484 }
1485 else if (bb_eh_cleanup)
1486 {
1487 if (bitmap_set_bit (map: chkcall_blocks, bitno: bb_eh_cleanup->index))
1488 count_chkcall++;
1489 else
1490 gcc_unreachable ();
1491 }
1492
1493 gcc_checking_assert (!bb_eh_cleanup
1494 || bitmap_bit_p (chkcall_blocks, bb_eh_cleanup->index));
1495
1496 /* If we don't have edges to exit nor noreturn calls (including the
1497 cleanup reraise), then we may skip instrumentation: that would
1498 amount to a function that ends with an infinite loop. */
1499 if (!count_chkcall
1500 && EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (fun)->preds) == 0)
1501 {
1502 if (dump_file)
1503 fprintf (stream: dump_file,
1504 format: "Disabling CFR, no exit paths to check\n");
1505
1506 return 0;
1507 }
1508
1509 /* Search for must-tail calls, early-marked potential tail calls,
1510 and, if requested, returning calls. As we introduce early
1511 checks, */
1512 int count_postchk = 0;
1513 auto_sbitmap postchk_blocks (last_basic_block_for_fn (fun));
1514 bitmap_clear (postchk_blocks);
1515 chk_edges_t chk_edges;
1516 hardcfr_sibcall_search_preds (EXIT_BLOCK_PTR_FOR_FN (fun), chk_edges,
1517 count_chkcall, chkcall_blocks,
1518 count_postchk, postchk_blocks,
1519 NULL);
1520
1521 rt_bb_visited vstd (chk_edges.length () + count_chkcall);
1522
1523 auto_sbitmap combined_blocks (last_basic_block_for_fn (fun));
1524 bitmap_copy (combined_blocks, chkcall_blocks);
1525 int i;
1526 edge *e;
1527 FOR_EACH_VEC_ELT (chk_edges, i, e)
1528 if (!bitmap_set_bit (map: combined_blocks, bitno: (*e)->src->index))
1529 /* There may be multiple chk_edges with the same src block;
1530 guard againt overlaps with chkcall_blocks only. */
1531 gcc_assert (!bitmap_bit_p (chkcall_blocks, (*e)->src->index));
1532
1533 /* Visit blocks in index order, because building rtcfg depends on
1534 that. Blocks must be compact, which the cleanup_cfg requirement
1535 ensures. This would also enable FOR_EACH_BB_FN to be used to
1536 iterate in index order, but bb_eh_cleanup block splits and
1537 insertions changes that. */
1538 gcc_checking_assert (n_basic_blocks_for_fn (fun)
1539 == last_basic_block_for_fn (fun));
1540 for (int i = NUM_FIXED_BLOCKS; i < n_basic_blocks_for_fn (fun); i++)
1541 {
1542 bb = BASIC_BLOCK_FOR_FN (fun, i);
1543 gcc_checking_assert (bb->index == i);
1544 vstd.visit (bb, checkpoint: bitmap_bit_p (map: combined_blocks, bitno: i),
1545 postcheck: bitmap_bit_p (map: postchk_blocks, bitno: i));
1546 }
1547
1548 vstd.check (chk_edges, count_chkcall, chkcall_blocks);
1549
1550 return
1551 TODO_update_ssa
1552 | TODO_cleanup_cfg
1553 | TODO_verify_il;
1554}
1555
1556/* Instantiate a hardcfr pass. */
1557
1558gimple_opt_pass *
1559make_pass_harden_control_flow_redundancy (gcc::context *ctxt)
1560{
1561 return new pass_harden_control_flow_redundancy (ctxt);
1562}
1563

source code of gcc/gimple-harden-control-flow.cc