1 | /* Harden conditionals. |
2 | Copyright (C) 2021-2023 Free Software Foundation, Inc. |
3 | Contributed by Alexandre Oliva <oliva@adacore.com>. |
4 | |
5 | This file is part of GCC. |
6 | |
7 | GCC is free software; you can redistribute it and/or modify it under |
8 | the terms of the GNU General Public License as published by the Free |
9 | Software Foundation; either version 3, or (at your option) any later |
10 | version. |
11 | |
12 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or |
14 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
15 | for more details. |
16 | |
17 | You should have received a copy of the GNU General Public License |
18 | along with GCC; see the file COPYING3. If not see |
19 | <http://www.gnu.org/licenses/>. */ |
20 | |
21 | #include "config.h" |
22 | #include "system.h" |
23 | #include "coretypes.h" |
24 | #include "backend.h" |
25 | #include "target.h" |
26 | #include "rtl.h" |
27 | #include "tree.h" |
28 | #include "fold-const.h" |
29 | #include "gimple.h" |
30 | #include "gimplify.h" |
31 | #include "tree-pass.h" |
32 | #include "ssa.h" |
33 | #include "gimple-iterator.h" |
34 | #include "tree-cfg.h" |
35 | #include "basic-block.h" |
36 | #include "cfghooks.h" |
37 | #include "cfgloop.h" |
38 | #include "tree-eh.h" |
39 | #include "sbitmap.h" |
40 | #include "diagnostic.h" |
41 | #include "intl.h" |
42 | |
43 | namespace { |
44 | |
45 | /* These passes introduces redundant, but reversed conditionals at |
46 | compares, such as those used in conditional branches, and those |
47 | that compute boolean results. This doesn't make much sense for |
48 | abstract CPUs, but this kind of hardening may avoid undesirable |
49 | execution paths on actual CPUs under such attacks as of power |
50 | deprivation. */ |
51 | |
52 | /* Define a pass to harden conditionals other than branches. */ |
53 | |
54 | const pass_data pass_data_harden_compares = { |
55 | .type: GIMPLE_PASS, |
56 | .name: "hardcmp" , |
57 | .optinfo_flags: OPTGROUP_NONE, |
58 | .tv_id: TV_NONE, |
59 | PROP_cfg | PROP_ssa, // properties_required |
60 | .properties_provided: 0, // properties_provided |
61 | .properties_destroyed: 0, // properties_destroyed |
62 | .todo_flags_start: 0, // properties_start |
63 | TODO_update_ssa |
64 | | TODO_cleanup_cfg |
65 | | TODO_verify_il, // properties_finish |
66 | }; |
67 | |
68 | class pass_harden_compares : public gimple_opt_pass |
69 | { |
70 | public: |
71 | pass_harden_compares (gcc::context *ctxt) |
72 | : gimple_opt_pass (pass_data_harden_compares, ctxt) |
73 | {} |
74 | opt_pass *clone () final override |
75 | { |
76 | return new pass_harden_compares (m_ctxt); |
77 | } |
78 | bool gate (function *) final override |
79 | { |
80 | return flag_harden_compares; |
81 | } |
82 | unsigned int execute (function *) final override; |
83 | }; |
84 | |
85 | /* Define a pass to harden conditionals in branches. This pass must |
86 | run after the above, otherwise it will re-harden the checks |
87 | introduced by the above. */ |
88 | |
89 | const pass_data pass_data_harden_conditional_branches = { |
90 | .type: GIMPLE_PASS, |
91 | .name: "hardcbr" , |
92 | .optinfo_flags: OPTGROUP_NONE, |
93 | .tv_id: TV_NONE, |
94 | PROP_cfg | PROP_ssa, // properties_required |
95 | .properties_provided: 0, // properties_provided |
96 | .properties_destroyed: 0, // properties_destroyed |
97 | .todo_flags_start: 0, // properties_start |
98 | TODO_update_ssa |
99 | | TODO_cleanup_cfg |
100 | | TODO_verify_il, // properties_finish |
101 | }; |
102 | |
103 | class pass_harden_conditional_branches : public gimple_opt_pass |
104 | { |
105 | public: |
106 | pass_harden_conditional_branches (gcc::context *ctxt) |
107 | : gimple_opt_pass (pass_data_harden_conditional_branches, ctxt) |
108 | {} |
109 | opt_pass *clone () final override |
110 | { |
111 | return new pass_harden_conditional_branches (m_ctxt); |
112 | } |
113 | bool gate (function *) final override |
114 | { |
115 | return flag_harden_conditional_branches; |
116 | } |
117 | unsigned int execute (function *) final override; |
118 | }; |
119 | |
120 | } |
121 | |
122 | /* If VAL is an SSA name, return an SSA name holding the same value, |
123 | but without the compiler's knowing that it holds the same value, so |
124 | that uses thereof can't be optimized the way VAL might. Insert |
125 | stmts that initialize it before *GSIP, with LOC. |
126 | |
127 | Otherwise, VAL must be an invariant, returned unchanged. */ |
128 | |
129 | static inline tree |
130 | detach_value (location_t loc, gimple_stmt_iterator *gsip, tree val) |
131 | { |
132 | if (TREE_CONSTANT (val) || TREE_CODE (val) != SSA_NAME) |
133 | { |
134 | gcc_checking_assert (is_gimple_min_invariant (val)); |
135 | return val; |
136 | } |
137 | |
138 | /* Create a SSA "copy" of VAL. It would be nice to have it named |
139 | after the corresponding variable, but sharing the same decl is |
140 | problematic when VAL is a DECL_BY_REFERENCE RESULT_DECL, and |
141 | copying just the identifier hits -fcompare-debug failures. */ |
142 | tree ret = make_ssa_name (TREE_TYPE (val)); |
143 | |
144 | /* Some modes won't fit in general regs, so we fall back to memory |
145 | for them. ??? It would be ideal to try to identify an alternate, |
146 | wider or more suitable register class, and use the corresponding |
147 | constraint, but there's no logic to go from register class to |
148 | constraint, even if there is a corresponding constraint, and even |
149 | if we could enumerate constraints, we can't get to their string |
150 | either. So this will do for now. */ |
151 | bool need_memory = true; |
152 | enum machine_mode mode = TYPE_MODE (TREE_TYPE (val)); |
153 | if (mode != BLKmode) |
154 | for (int i = 0; i < FIRST_PSEUDO_REGISTER; i++) |
155 | if (TEST_HARD_REG_BIT (reg_class_contents[GENERAL_REGS], bit: i) |
156 | && targetm.hard_regno_mode_ok (i, mode)) |
157 | { |
158 | need_memory = false; |
159 | break; |
160 | } |
161 | |
162 | tree asminput = val; |
163 | tree asmoutput = ret; |
164 | const char *constraint_out = need_memory ? "=m" : "=g" ; |
165 | const char *constraint_in = need_memory ? "m" : "0" ; |
166 | |
167 | if (need_memory) |
168 | { |
169 | tree temp = create_tmp_var (TREE_TYPE (val), "dtch" ); |
170 | mark_addressable (temp); |
171 | |
172 | gassign *copyin = gimple_build_assign (temp, asminput); |
173 | gimple_set_location (g: copyin, location: loc); |
174 | gsi_insert_before (gsip, copyin, GSI_SAME_STMT); |
175 | |
176 | asminput = asmoutput = temp; |
177 | } |
178 | |
179 | /* Output an asm statement with matching input and output. It does |
180 | nothing, but after it the compiler no longer knows the output |
181 | still holds the same value as the input. */ |
182 | vec<tree, va_gc> *inputs = NULL; |
183 | vec<tree, va_gc> *outputs = NULL; |
184 | vec_safe_push (v&: outputs, |
185 | obj: build_tree_list |
186 | (build_tree_list |
187 | (NULL_TREE, build_string (strlen (s: constraint_out), |
188 | constraint_out)), |
189 | asmoutput)); |
190 | vec_safe_push (v&: inputs, |
191 | obj: build_tree_list |
192 | (build_tree_list |
193 | (NULL_TREE, build_string (strlen (s: constraint_in), |
194 | constraint_in)), |
195 | asminput)); |
196 | gasm *detach = gimple_build_asm_vec ("" , inputs, outputs, |
197 | NULL, NULL); |
198 | gimple_set_location (g: detach, location: loc); |
199 | gsi_insert_before (gsip, detach, GSI_SAME_STMT); |
200 | |
201 | if (need_memory) |
202 | { |
203 | gassign *copyout = gimple_build_assign (ret, asmoutput); |
204 | gimple_set_location (g: copyout, location: loc); |
205 | gsi_insert_before (gsip, copyout, GSI_SAME_STMT); |
206 | SSA_NAME_DEF_STMT (ret) = copyout; |
207 | |
208 | gassign *clobber = gimple_build_assign (asmoutput, |
209 | build_clobber |
210 | (TREE_TYPE (asmoutput))); |
211 | gimple_set_location (g: clobber, location: loc); |
212 | gsi_insert_before (gsip, clobber, GSI_SAME_STMT); |
213 | } |
214 | else |
215 | SSA_NAME_DEF_STMT (ret) = detach; |
216 | |
217 | return ret; |
218 | } |
219 | |
220 | /* Build a cond stmt out of COP, LHS, RHS, insert it before *GSIP with |
221 | location LOC. *GSIP must be at the end of a basic block. The succ |
222 | edge out of the block becomes the true or false edge opposite to |
223 | that in FLAGS. Create a new block with a single trap stmt, in the |
224 | cold partition if the function is partitioned,, and a new edge to |
225 | it as the other edge for the cond. */ |
226 | |
227 | static inline void |
228 | insert_check_and_trap (location_t loc, gimple_stmt_iterator *gsip, |
229 | int flags, enum tree_code cop, tree lhs, tree rhs) |
230 | { |
231 | basic_block chk = gsi_bb (i: *gsip); |
232 | |
233 | gcond *cond = gimple_build_cond (cop, lhs, rhs, NULL, NULL); |
234 | gimple_set_location (g: cond, location: loc); |
235 | gsi_insert_before (gsip, cond, GSI_SAME_STMT); |
236 | |
237 | basic_block trp = create_empty_bb (chk); |
238 | trp->count = profile_count::zero (); |
239 | |
240 | gimple_stmt_iterator gsit = gsi_after_labels (bb: trp); |
241 | gcall *trap = gimple_build_call (builtin_decl_explicit (fncode: BUILT_IN_TRAP), 0); |
242 | gimple_call_set_ctrl_altering (s: trap, ctrl_altering_p: true); |
243 | gimple_set_location (g: trap, location: loc); |
244 | gsi_insert_before (&gsit, trap, GSI_SAME_STMT); |
245 | |
246 | if (dump_file) |
247 | fprintf (stream: dump_file, |
248 | format: "Adding reversed compare to block %i, and trap to block %i\n" , |
249 | chk->index, trp->index); |
250 | |
251 | if (BB_PARTITION (chk)) |
252 | BB_SET_PARTITION (trp, BB_COLD_PARTITION); |
253 | |
254 | int true_false_flag = flags & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE); |
255 | gcc_assert (true_false_flag); |
256 | int neg_true_false_flag = (~flags) & (EDGE_TRUE_VALUE | EDGE_FALSE_VALUE); |
257 | |
258 | /* Remove the fallthru bit, and set the truth value for the |
259 | preexisting edge and for the newly-created one. In hardcbr, |
260 | FLAGS is taken from the edge of the original cond expr that we're |
261 | dealing with, so the reversed compare is expected to yield the |
262 | negated result, and the same result calls for a trap. In |
263 | hardcmp, we're comparing the boolean results of the original and |
264 | of the reversed compare, so we're passed FLAGS to trap on |
265 | equality. */ |
266 | single_succ_edge (bb: chk)->flags &= ~EDGE_FALLTHRU; |
267 | single_succ_edge (bb: chk)->flags |= neg_true_false_flag; |
268 | single_succ_edge (bb: chk)->probability = profile_probability::always (); |
269 | edge e = make_edge (chk, trp, true_false_flag); |
270 | e->goto_locus = loc; |
271 | e->probability = profile_probability::never (); |
272 | |
273 | if (dom_info_available_p (CDI_DOMINATORS)) |
274 | set_immediate_dominator (CDI_DOMINATORS, trp, chk); |
275 | if (current_loops) |
276 | add_bb_to_loop (trp, current_loops->tree_root); |
277 | } |
278 | |
279 | /* Split edge E, and insert_check_and_trap (see above) in the |
280 | newly-created block, using already-detached copies of LHS's and |
281 | RHS's values (see detach_value above) for the COP compare. */ |
282 | |
283 | static inline void |
284 | insert_edge_check_and_trap (location_t loc, edge e, |
285 | enum tree_code cop, tree lhs, tree rhs) |
286 | { |
287 | int flags = e->flags; |
288 | basic_block src = e->src; |
289 | basic_block dest = e->dest; |
290 | location_t eloc = e->goto_locus; |
291 | |
292 | basic_block chk = split_edge (e); |
293 | e = NULL; |
294 | |
295 | single_pred_edge (bb: chk)->goto_locus = loc; |
296 | single_succ_edge (bb: chk)->goto_locus = eloc; |
297 | |
298 | if (dump_file) |
299 | fprintf (stream: dump_file, |
300 | format: "Splitting edge %i->%i into block %i\n" , |
301 | src->index, dest->index, chk->index); |
302 | |
303 | gimple_stmt_iterator gsik = gsi_after_labels (bb: chk); |
304 | |
305 | insert_check_and_trap (loc, gsip: &gsik, flags, cop, lhs, rhs); |
306 | } |
307 | |
308 | /* Harden cond stmts at the end of FUN's blocks. */ |
309 | |
310 | unsigned int |
311 | pass_harden_conditional_branches::execute (function *fun) |
312 | { |
313 | /* Record the preexisting blocks, to avoid visiting newly-created |
314 | blocks. */ |
315 | auto_sbitmap to_visit (last_basic_block_for_fn (fun)); |
316 | bitmap_clear (to_visit); |
317 | |
318 | basic_block bb; |
319 | FOR_EACH_BB_FN (bb, fun) |
320 | bitmap_set_bit (map: to_visit, bitno: bb->index); |
321 | |
322 | sbitmap_iterator it; |
323 | unsigned i; |
324 | EXECUTE_IF_SET_IN_BITMAP (to_visit, 0, i, it) |
325 | { |
326 | bb = BASIC_BLOCK_FOR_FN (fun, i); |
327 | |
328 | gimple_stmt_iterator gsi = gsi_last_bb (bb); |
329 | |
330 | if (gsi_end_p (i: gsi)) |
331 | continue; |
332 | |
333 | gcond *cond = dyn_cast <gcond *> (p: gsi_stmt (i: gsi)); |
334 | if (!cond) |
335 | continue; |
336 | |
337 | /* Turn: |
338 | |
339 | if (x op y) goto l1; else goto l2; |
340 | |
341 | into: |
342 | |
343 | if (x op y) goto l1'; else goto l2'; |
344 | l1': if (x' cop y') goto l1'trap; else goto l1; |
345 | l1'trap: __builtin_trap (); |
346 | l2': if (x' cop y') goto l2; else goto l2'trap; |
347 | l2'trap: __builtin_trap (); |
348 | |
349 | where cop is a complementary boolean operation to op; l1', l1'trap, |
350 | l2' and l2'trap are newly-created labels; and x' and y' hold the same |
351 | value as x and y, but in a way that does not enable the compiler to |
352 | optimize the redundant compare away. |
353 | */ |
354 | |
355 | enum tree_code op = gimple_cond_code (gs: cond); |
356 | tree lhs = gimple_cond_lhs (gs: cond); |
357 | tree rhs = gimple_cond_rhs (gs: cond); |
358 | location_t loc = gimple_location (g: cond); |
359 | |
360 | enum tree_code cop = invert_tree_comparison (op, HONOR_NANS (lhs)); |
361 | |
362 | if (cop == ERROR_MARK) |
363 | /* ??? Can we do better? */ |
364 | continue; |
365 | |
366 | /* Detach the values before the compares. If we do so later, |
367 | the compiler may use values inferred from the compares. */ |
368 | bool same_p = (lhs == rhs); |
369 | lhs = detach_value (loc, gsip: &gsi, val: lhs); |
370 | rhs = same_p ? lhs : detach_value (loc, gsip: &gsi, val: rhs); |
371 | |
372 | insert_edge_check_and_trap (loc, EDGE_SUCC (bb, 0), cop, lhs, rhs); |
373 | insert_edge_check_and_trap (loc, EDGE_SUCC (bb, 1), cop, lhs, rhs); |
374 | } |
375 | |
376 | return 0; |
377 | } |
378 | |
379 | /* Instantiate a hardcbr pass. */ |
380 | |
381 | gimple_opt_pass * |
382 | make_pass_harden_conditional_branches (gcc::context *ctxt) |
383 | { |
384 | return new pass_harden_conditional_branches (ctxt); |
385 | } |
386 | |
387 | /* Return the fallthru edge of a block whose other edge is an EH |
388 | edge. If EHP is not NULL, store the EH edge in it. */ |
389 | static inline edge |
390 | non_eh_succ_edge (basic_block bb, edge *ehp = NULL) |
391 | { |
392 | gcc_checking_assert (EDGE_COUNT (bb->succs) == 2); |
393 | |
394 | edge ret = find_fallthru_edge (edges: bb->succs); |
395 | |
396 | int eh_idx = EDGE_SUCC (bb, 0) == ret; |
397 | edge eh = EDGE_SUCC (bb, eh_idx); |
398 | |
399 | gcc_checking_assert (!(ret->flags & EDGE_EH) |
400 | && (eh->flags & EDGE_EH)); |
401 | |
402 | if (ehp) |
403 | *ehp = eh; |
404 | |
405 | return ret; |
406 | } |
407 | |
408 | /* Harden boolean-yielding compares in FUN. */ |
409 | |
410 | unsigned int |
411 | pass_harden_compares::execute (function *fun) |
412 | { |
413 | /* Record the preexisting blocks, to avoid visiting newly-created |
414 | blocks. */ |
415 | auto_sbitmap to_visit (last_basic_block_for_fn (fun)); |
416 | bitmap_clear (to_visit); |
417 | |
418 | basic_block bb; |
419 | FOR_EACH_BB_FN (bb, fun) |
420 | bitmap_set_bit (map: to_visit, bitno: bb->index); |
421 | |
422 | sbitmap_iterator it; |
423 | unsigned i; |
424 | EXECUTE_IF_SET_IN_BITMAP (to_visit, 0, i, it) |
425 | { |
426 | bb = BASIC_BLOCK_FOR_FN (fun, i); |
427 | |
428 | for (gimple_stmt_iterator gsi = gsi_last_bb (bb); |
429 | !gsi_end_p (i: gsi); gsi_prev (i: &gsi)) |
430 | { |
431 | gassign *asgn = dyn_cast <gassign *> (p: gsi_stmt (i: gsi)); |
432 | if (!asgn) |
433 | continue; |
434 | |
435 | /* Turn: |
436 | |
437 | z = x op y; |
438 | |
439 | into: |
440 | |
441 | z = x op y; |
442 | z' = x' cop y'; |
443 | if (z == z') __builtin_trap (); |
444 | |
445 | where cop is a complementary boolean operation to op; and x' |
446 | and y' hold the same value as x and y, but in a way that does |
447 | not enable the compiler to optimize the redundant compare |
448 | away. |
449 | */ |
450 | |
451 | enum tree_code op = gimple_assign_rhs_code (gs: asgn); |
452 | |
453 | enum tree_code cop; |
454 | |
455 | switch (op) |
456 | { |
457 | case EQ_EXPR: |
458 | case NE_EXPR: |
459 | case GT_EXPR: |
460 | case GE_EXPR: |
461 | case LT_EXPR: |
462 | case LE_EXPR: |
463 | case LTGT_EXPR: |
464 | case UNEQ_EXPR: |
465 | case UNGT_EXPR: |
466 | case UNGE_EXPR: |
467 | case UNLT_EXPR: |
468 | case UNLE_EXPR: |
469 | case ORDERED_EXPR: |
470 | case UNORDERED_EXPR: |
471 | cop = invert_tree_comparison (op, |
472 | HONOR_NANS |
473 | (gimple_assign_rhs1 (gs: asgn))); |
474 | |
475 | if (cop == ERROR_MARK) |
476 | /* ??? Can we do better? */ |
477 | continue; |
478 | |
479 | break; |
480 | |
481 | /* ??? Maybe handle these too? */ |
482 | case TRUTH_NOT_EXPR: |
483 | /* ??? The code below assumes binary ops, it would have to |
484 | be adjusted for TRUTH_NOT_EXPR, since it's unary. */ |
485 | case TRUTH_ANDIF_EXPR: |
486 | case TRUTH_ORIF_EXPR: |
487 | case TRUTH_AND_EXPR: |
488 | case TRUTH_OR_EXPR: |
489 | case TRUTH_XOR_EXPR: |
490 | default: |
491 | continue; |
492 | } |
493 | |
494 | /* These are the operands for the verification. */ |
495 | tree lhs = gimple_assign_lhs (gs: asgn); |
496 | tree op1 = gimple_assign_rhs1 (gs: asgn); |
497 | tree op2 = gimple_assign_rhs2 (gs: asgn); |
498 | location_t loc = gimple_location (g: asgn); |
499 | |
500 | /* Vector booleans can't be used in conditional branches. ??? |
501 | Can we do better? How to reduce compare and |
502 | reversed-compare result vectors to a single boolean? */ |
503 | if (VECTOR_TYPE_P (TREE_TYPE (op1))) |
504 | continue; |
505 | |
506 | /* useless_type_conversion_p enables conversions from 1-bit |
507 | integer types to boolean to be discarded. */ |
508 | gcc_checking_assert (TREE_CODE (TREE_TYPE (lhs)) == BOOLEAN_TYPE |
509 | || (INTEGRAL_TYPE_P (TREE_TYPE (lhs)) |
510 | && TYPE_PRECISION (TREE_TYPE (lhs)) == 1)); |
511 | |
512 | tree rhs = copy_ssa_name (var: lhs); |
513 | |
514 | /* Detach the values before the compares, so that the |
515 | compiler infers nothing from them, not even from a |
516 | throwing compare that didn't throw. */ |
517 | bool same_p = (op1 == op2); |
518 | op1 = detach_value (loc, gsip: &gsi, val: op1); |
519 | op2 = same_p ? op1 : detach_value (loc, gsip: &gsi, val: op2); |
520 | |
521 | gimple_stmt_iterator gsi_split = gsi; |
522 | /* Don't separate the original assignment from debug stmts |
523 | that might be associated with it, and arrange to split the |
524 | block after debug stmts, so as to make sure the split block |
525 | won't be debug stmts only. */ |
526 | gsi_next_nondebug (i: &gsi_split); |
527 | |
528 | bool throwing_compare_p = stmt_ends_bb_p (asgn); |
529 | if (throwing_compare_p) |
530 | { |
531 | basic_block nbb = split_edge (non_eh_succ_edge |
532 | (bb: gimple_bb (g: asgn))); |
533 | gsi_split = gsi_start_bb (bb: nbb); |
534 | |
535 | if (dump_file) |
536 | fprintf (stream: dump_file, |
537 | format: "Splitting non-EH edge from block %i into %i" |
538 | " after a throwing compare\n" , |
539 | gimple_bb (g: asgn)->index, nbb->index); |
540 | } |
541 | |
542 | gassign *asgnck = gimple_build_assign (rhs, cop, op1, op2); |
543 | gimple_set_location (g: asgnck, location: loc); |
544 | gsi_insert_before (&gsi_split, asgnck, GSI_SAME_STMT); |
545 | |
546 | /* We wish to insert a cond_expr after the compare, so arrange |
547 | for it to be at the end of a block if it isn't, and for it |
548 | to have a single successor in case there's more than |
549 | one, as in PR104975. */ |
550 | if (!gsi_end_p (i: gsi_split) |
551 | || !single_succ_p (bb: gsi_bb (i: gsi_split))) |
552 | { |
553 | if (!gsi_end_p (i: gsi_split)) |
554 | gsi_prev (i: &gsi_split); |
555 | else |
556 | gsi_split = gsi_last_bb (bb: gsi_bb (i: gsi_split)); |
557 | basic_block obb = gsi_bb (i: gsi_split); |
558 | basic_block nbb = split_block (obb, gsi_stmt (i: gsi_split))->dest; |
559 | gsi_next (i: &gsi_split); |
560 | gcc_checking_assert (gsi_end_p (gsi_split)); |
561 | |
562 | single_succ_edge (bb)->goto_locus = loc; |
563 | |
564 | if (dump_file) |
565 | fprintf (stream: dump_file, |
566 | format: "Splitting block %i into %i" |
567 | " before the conditional trap branch\n" , |
568 | obb->index, nbb->index); |
569 | } |
570 | |
571 | /* If the check assignment must end a basic block, we can't |
572 | insert the conditional branch in the same block, so split |
573 | the block again, and prepare to insert the conditional |
574 | branch in the new block. |
575 | |
576 | Also assign an EH region to the compare. Even though it's |
577 | unlikely that the hardening compare will throw after the |
578 | original compare didn't, the compiler won't even know that |
579 | it's the same compare operands, so add the EH edge anyway. */ |
580 | if (throwing_compare_p) |
581 | { |
582 | add_stmt_to_eh_lp (asgnck, lookup_stmt_eh_lp (asgn)); |
583 | edge eh = make_eh_edge (asgnck); |
584 | /* This compare looks like it could raise an exception, |
585 | but it's dominated by the original compare, that |
586 | would raise an exception first, so the EH edge from |
587 | this one is never really taken. */ |
588 | eh->probability = profile_probability::never (); |
589 | if (eh->dest->count.initialized_p ()) |
590 | eh->dest->count += eh->count (); |
591 | else |
592 | eh->dest->count = eh->count (); |
593 | |
594 | edge ckeh; |
595 | basic_block nbb = split_edge (non_eh_succ_edge |
596 | (bb: gimple_bb (g: asgnck), ehp: &ckeh)); |
597 | gcc_checking_assert (eh == ckeh); |
598 | gsi_split = gsi_start_bb (bb: nbb); |
599 | |
600 | if (dump_file) |
601 | fprintf (stream: dump_file, |
602 | format: "Splitting non-EH edge from block %i into %i after" |
603 | " the newly-inserted reversed throwing compare\n" , |
604 | gimple_bb (g: asgnck)->index, nbb->index); |
605 | |
606 | if (!gimple_seq_empty_p (s: phi_nodes (bb: ckeh->dest))) |
607 | { |
608 | edge aseh; |
609 | non_eh_succ_edge (bb: gimple_bb (g: asgn), ehp: &aseh); |
610 | |
611 | gcc_checking_assert (aseh->dest == ckeh->dest); |
612 | |
613 | for (gphi_iterator psi = gsi_start_phis (ckeh->dest); |
614 | !gsi_end_p (i: psi); gsi_next (i: &psi)) |
615 | { |
616 | gphi *phi = psi.phi (); |
617 | add_phi_arg (phi, PHI_ARG_DEF_FROM_EDGE (phi, aseh), ckeh, |
618 | gimple_phi_arg_location_from_edge (phi, e: aseh)); |
619 | } |
620 | |
621 | if (dump_file) |
622 | fprintf (stream: dump_file, |
623 | format: "Copying PHI args in EH block %i from %i to %i\n" , |
624 | aseh->dest->index, aseh->src->index, |
625 | ckeh->src->index); |
626 | } |
627 | } |
628 | |
629 | gcc_checking_assert (single_succ_p (gsi_bb (gsi_split))); |
630 | |
631 | insert_check_and_trap (loc, gsip: &gsi_split, flags: EDGE_TRUE_VALUE, |
632 | cop: EQ_EXPR, lhs, rhs); |
633 | } |
634 | } |
635 | |
636 | return 0; |
637 | } |
638 | |
639 | /* Instantiate a hardcmp pass. */ |
640 | |
641 | gimple_opt_pass * |
642 | make_pass_harden_compares (gcc::context *ctxt) |
643 | { |
644 | return new pass_harden_compares (ctxt); |
645 | } |
646 | |