1 | /* Exception handling semantics and decomposition for trees. |
2 | Copyright (C) 2003-2023 Free Software Foundation, Inc. |
3 | |
4 | This file is part of GCC. |
5 | |
6 | GCC is free software; you can redistribute it and/or modify |
7 | it under the terms of the GNU General Public License as published by |
8 | the Free Software Foundation; either version 3, or (at your option) |
9 | any later version. |
10 | |
11 | GCC is distributed in the hope that it will be useful, |
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | GNU General Public License for more details. |
15 | |
16 | You should have received a copy of the GNU General Public License |
17 | along with GCC; see the file COPYING3. If not see |
18 | <http://www.gnu.org/licenses/>. */ |
19 | |
20 | #include "config.h" |
21 | #include "system.h" |
22 | #include "coretypes.h" |
23 | #include "backend.h" |
24 | #include "rtl.h" |
25 | #include "tree.h" |
26 | #include "gimple.h" |
27 | #include "cfghooks.h" |
28 | #include "tree-pass.h" |
29 | #include "ssa.h" |
30 | #include "cgraph.h" |
31 | #include "diagnostic-core.h" |
32 | #include "fold-const.h" |
33 | #include "calls.h" |
34 | #include "except.h" |
35 | #include "cfganal.h" |
36 | #include "cfgcleanup.h" |
37 | #include "tree-eh.h" |
38 | #include "gimple-iterator.h" |
39 | #include "tree-cfg.h" |
40 | #include "tree-into-ssa.h" |
41 | #include "tree-ssa.h" |
42 | #include "tree-inline.h" |
43 | #include "langhooks.h" |
44 | #include "cfgloop.h" |
45 | #include "gimple-low.h" |
46 | #include "stringpool.h" |
47 | #include "attribs.h" |
48 | #include "asan.h" |
49 | #include "gimplify.h" |
50 | |
51 | /* In some instances a tree and a gimple need to be stored in a same table, |
52 | i.e. in hash tables. This is a structure to do this. */ |
53 | typedef union {tree *tp; tree t; gimple *g;} treemple; |
54 | |
55 | /* Misc functions used in this file. */ |
56 | |
57 | /* Remember and lookup EH landing pad data for arbitrary statements. |
58 | Really this means any statement that could_throw_p. We could |
59 | stuff this information into the stmt_ann data structure, but: |
60 | |
61 | (1) We absolutely rely on this information being kept until |
62 | we get to rtl. Once we're done with lowering here, if we lose |
63 | the information there's no way to recover it! |
64 | |
65 | (2) There are many more statements that *cannot* throw as |
66 | compared to those that can. We should be saving some amount |
67 | of space by only allocating memory for those that can throw. */ |
68 | |
69 | /* Add statement T in function IFUN to landing pad NUM. */ |
70 | |
71 | static void |
72 | add_stmt_to_eh_lp_fn (struct function *ifun, gimple *t, int num) |
73 | { |
74 | gcc_assert (num != 0); |
75 | |
76 | if (!get_eh_throw_stmt_table (ifun)) |
77 | set_eh_throw_stmt_table (ifun, hash_map<gimple *, int>::create_ggc (size: 31)); |
78 | |
79 | gcc_assert (!get_eh_throw_stmt_table (ifun)->put (t, num)); |
80 | } |
81 | |
82 | /* Add statement T in the current function (cfun) to EH landing pad NUM. */ |
83 | |
84 | void |
85 | add_stmt_to_eh_lp (gimple *t, int num) |
86 | { |
87 | add_stmt_to_eh_lp_fn (cfun, t, num); |
88 | } |
89 | |
90 | /* Add statement T to the single EH landing pad in REGION. */ |
91 | |
92 | static void |
93 | record_stmt_eh_region (eh_region region, gimple *t) |
94 | { |
95 | if (region == NULL) |
96 | return; |
97 | if (region->type == ERT_MUST_NOT_THROW) |
98 | add_stmt_to_eh_lp_fn (cfun, t, num: -region->index); |
99 | else |
100 | { |
101 | eh_landing_pad lp = region->landing_pads; |
102 | if (lp == NULL) |
103 | lp = gen_eh_landing_pad (region); |
104 | else |
105 | gcc_assert (lp->next_lp == NULL); |
106 | add_stmt_to_eh_lp_fn (cfun, t, num: lp->index); |
107 | } |
108 | } |
109 | |
110 | |
111 | /* Remove statement T in function IFUN from its EH landing pad. */ |
112 | |
113 | bool |
114 | remove_stmt_from_eh_lp_fn (struct function *ifun, gimple *t) |
115 | { |
116 | if (!get_eh_throw_stmt_table (ifun)) |
117 | return false; |
118 | |
119 | if (!get_eh_throw_stmt_table (ifun)->get (k: t)) |
120 | return false; |
121 | |
122 | get_eh_throw_stmt_table (ifun)->remove (k: t); |
123 | return true; |
124 | } |
125 | |
126 | |
127 | /* Remove statement T in the current function (cfun) from its |
128 | EH landing pad. */ |
129 | |
130 | bool |
131 | remove_stmt_from_eh_lp (gimple *t) |
132 | { |
133 | return remove_stmt_from_eh_lp_fn (cfun, t); |
134 | } |
135 | |
136 | /* Determine if statement T is inside an EH region in function IFUN. |
137 | Positive numbers indicate a landing pad index; negative numbers |
138 | indicate a MUST_NOT_THROW region index; zero indicates that the |
139 | statement is not recorded in the region table. */ |
140 | |
141 | int |
142 | lookup_stmt_eh_lp_fn (struct function *ifun, const gimple *t) |
143 | { |
144 | if (ifun->eh->throw_stmt_table == NULL) |
145 | return 0; |
146 | |
147 | int *lp_nr = ifun->eh->throw_stmt_table->get (k: const_cast <gimple *> (t)); |
148 | return lp_nr ? *lp_nr : 0; |
149 | } |
150 | |
151 | /* Likewise, but always use the current function. */ |
152 | |
153 | int |
154 | lookup_stmt_eh_lp (const gimple *t) |
155 | { |
156 | /* We can get called from initialized data when -fnon-call-exceptions |
157 | is on; prevent crash. */ |
158 | if (!cfun) |
159 | return 0; |
160 | return lookup_stmt_eh_lp_fn (cfun, t); |
161 | } |
162 | |
163 | /* First pass of EH node decomposition. Build up a tree of GIMPLE_TRY_FINALLY |
164 | nodes and LABEL_DECL nodes. We will use this during the second phase to |
165 | determine if a goto leaves the body of a TRY_FINALLY_EXPR node. */ |
166 | |
167 | struct finally_tree_node |
168 | { |
169 | /* When storing a GIMPLE_TRY, we have to record a gimple. However |
170 | when deciding whether a GOTO to a certain LABEL_DECL (which is a |
171 | tree) leaves the TRY block, its necessary to record a tree in |
172 | this field. Thus a treemple is used. */ |
173 | treemple child; |
174 | gtry *parent; |
175 | }; |
176 | |
177 | /* Hashtable helpers. */ |
178 | |
179 | struct finally_tree_hasher : free_ptr_hash <finally_tree_node> |
180 | { |
181 | static inline hashval_t hash (const finally_tree_node *); |
182 | static inline bool equal (const finally_tree_node *, |
183 | const finally_tree_node *); |
184 | }; |
185 | |
186 | inline hashval_t |
187 | finally_tree_hasher::hash (const finally_tree_node *v) |
188 | { |
189 | return (intptr_t)v->child.t >> 4; |
190 | } |
191 | |
192 | inline bool |
193 | finally_tree_hasher::equal (const finally_tree_node *v, |
194 | const finally_tree_node *c) |
195 | { |
196 | return v->child.t == c->child.t; |
197 | } |
198 | |
199 | /* Note that this table is *not* marked GTY. It is short-lived. */ |
200 | static hash_table<finally_tree_hasher> *finally_tree; |
201 | |
202 | static void |
203 | record_in_finally_tree (treemple child, gtry *parent) |
204 | { |
205 | struct finally_tree_node *n; |
206 | finally_tree_node **slot; |
207 | |
208 | n = XNEW (struct finally_tree_node); |
209 | n->child = child; |
210 | n->parent = parent; |
211 | |
212 | slot = finally_tree->find_slot (value: n, insert: INSERT); |
213 | gcc_assert (!*slot); |
214 | *slot = n; |
215 | } |
216 | |
217 | static void |
218 | collect_finally_tree (gimple *stmt, gtry *region); |
219 | |
220 | /* Go through the gimple sequence. Works with collect_finally_tree to |
221 | record all GIMPLE_LABEL and GIMPLE_TRY statements. */ |
222 | |
223 | static void |
224 | collect_finally_tree_1 (gimple_seq seq, gtry *region) |
225 | { |
226 | gimple_stmt_iterator gsi; |
227 | |
228 | for (gsi = gsi_start (seq); !gsi_end_p (i: gsi); gsi_next (i: &gsi)) |
229 | collect_finally_tree (stmt: gsi_stmt (i: gsi), region); |
230 | } |
231 | |
232 | static void |
233 | collect_finally_tree (gimple *stmt, gtry *region) |
234 | { |
235 | treemple temp; |
236 | |
237 | switch (gimple_code (g: stmt)) |
238 | { |
239 | case GIMPLE_LABEL: |
240 | temp.t = gimple_label_label (gs: as_a <glabel *> (p: stmt)); |
241 | record_in_finally_tree (child: temp, parent: region); |
242 | break; |
243 | |
244 | case GIMPLE_TRY: |
245 | if (gimple_try_kind (gs: stmt) == GIMPLE_TRY_FINALLY) |
246 | { |
247 | temp.g = stmt; |
248 | record_in_finally_tree (child: temp, parent: region); |
249 | collect_finally_tree_1 (seq: gimple_try_eval (gs: stmt), |
250 | region: as_a <gtry *> (p: stmt)); |
251 | collect_finally_tree_1 (seq: gimple_try_cleanup (gs: stmt), region); |
252 | } |
253 | else if (gimple_try_kind (gs: stmt) == GIMPLE_TRY_CATCH) |
254 | { |
255 | collect_finally_tree_1 (seq: gimple_try_eval (gs: stmt), region); |
256 | collect_finally_tree_1 (seq: gimple_try_cleanup (gs: stmt), region); |
257 | } |
258 | break; |
259 | |
260 | case GIMPLE_CATCH: |
261 | collect_finally_tree_1 (seq: gimple_catch_handler ( |
262 | catch_stmt: as_a <gcatch *> (p: stmt)), |
263 | region); |
264 | break; |
265 | |
266 | case GIMPLE_EH_FILTER: |
267 | collect_finally_tree_1 (seq: gimple_eh_filter_failure (gs: stmt), region); |
268 | break; |
269 | |
270 | case GIMPLE_EH_ELSE: |
271 | { |
272 | geh_else *eh_else_stmt = as_a <geh_else *> (p: stmt); |
273 | collect_finally_tree_1 (seq: gimple_eh_else_n_body (eh_else_stmt), region); |
274 | collect_finally_tree_1 (seq: gimple_eh_else_e_body (eh_else_stmt), region); |
275 | } |
276 | break; |
277 | |
278 | default: |
279 | /* A type, a decl, or some kind of statement that we're not |
280 | interested in. Don't walk them. */ |
281 | break; |
282 | } |
283 | } |
284 | |
285 | |
286 | /* Use the finally tree to determine if a jump from START to TARGET |
287 | would leave the try_finally node that START lives in. */ |
288 | |
289 | static bool |
290 | outside_finally_tree (treemple start, gimple *target) |
291 | { |
292 | struct finally_tree_node n, *p; |
293 | |
294 | do |
295 | { |
296 | n.child = start; |
297 | p = finally_tree->find (value: &n); |
298 | if (!p) |
299 | return true; |
300 | start.g = p->parent; |
301 | } |
302 | while (start.g != target); |
303 | |
304 | return false; |
305 | } |
306 | |
307 | /* Second pass of EH node decomposition. Actually transform the GIMPLE_TRY |
308 | nodes into a set of gotos, magic labels, and eh regions. |
309 | The eh region creation is straight-forward, but frobbing all the gotos |
310 | and such into shape isn't. */ |
311 | |
312 | /* The sequence into which we record all EH stuff. This will be |
313 | placed at the end of the function when we're all done. */ |
314 | static gimple_seq eh_seq; |
315 | |
316 | /* Record whether an EH region contains something that can throw, |
317 | indexed by EH region number. */ |
318 | static bitmap eh_region_may_contain_throw_map; |
319 | |
320 | /* The GOTO_QUEUE is an array of GIMPLE_GOTO and GIMPLE_RETURN |
321 | statements that are seen to escape this GIMPLE_TRY_FINALLY node. |
322 | The idea is to record a gimple statement for everything except for |
323 | the conditionals, which get their labels recorded. Since labels are |
324 | of type 'tree', we need this node to store both gimple and tree |
325 | objects. REPL_STMT is the sequence used to replace the goto/return |
326 | statement. CONT_STMT is used to store the statement that allows |
327 | the return/goto to jump to the original destination. */ |
328 | |
329 | struct goto_queue_node |
330 | { |
331 | treemple stmt; |
332 | location_t location; |
333 | gimple_seq repl_stmt; |
334 | gimple *cont_stmt; |
335 | int index; |
336 | /* This is used when index >= 0 to indicate that stmt is a label (as |
337 | opposed to a goto stmt). */ |
338 | int is_label; |
339 | }; |
340 | |
341 | /* State of the world while lowering. */ |
342 | |
343 | struct leh_state |
344 | { |
345 | /* What's "current" while constructing the eh region tree. These |
346 | correspond to variables of the same name in cfun->eh, which we |
347 | don't have easy access to. */ |
348 | eh_region cur_region; |
349 | |
350 | /* What's "current" for the purposes of __builtin_eh_pointer. For |
351 | a CATCH, this is the associated TRY. For an EH_FILTER, this is |
352 | the associated ALLOWED_EXCEPTIONS, etc. */ |
353 | eh_region ehp_region; |
354 | |
355 | /* Processing of TRY_FINALLY requires a bit more state. This is |
356 | split out into a separate structure so that we don't have to |
357 | copy so much when processing other nodes. */ |
358 | struct leh_tf_state *tf; |
359 | |
360 | /* Outer non-clean up region. */ |
361 | eh_region outer_non_cleanup; |
362 | }; |
363 | |
364 | struct leh_tf_state |
365 | { |
366 | /* Pointer to the GIMPLE_TRY_FINALLY node under discussion. The |
367 | try_finally_expr is the original GIMPLE_TRY_FINALLY. We need to retain |
368 | this so that outside_finally_tree can reliably reference the tree used |
369 | in the collect_finally_tree data structures. */ |
370 | gtry *try_finally_expr; |
371 | gtry *top_p; |
372 | |
373 | /* While lowering a top_p usually it is expanded into multiple statements, |
374 | thus we need the following field to store them. */ |
375 | gimple_seq top_p_seq; |
376 | |
377 | /* The state outside this try_finally node. */ |
378 | struct leh_state *outer; |
379 | |
380 | /* The exception region created for it. */ |
381 | eh_region region; |
382 | |
383 | /* The goto queue. */ |
384 | struct goto_queue_node *goto_queue; |
385 | size_t goto_queue_size; |
386 | size_t goto_queue_active; |
387 | |
388 | /* Pointer map to help in searching goto_queue when it is large. */ |
389 | hash_map<gimple *, goto_queue_node *> *goto_queue_map; |
390 | |
391 | /* The set of unique labels seen as entries in the goto queue. */ |
392 | vec<tree> dest_array; |
393 | |
394 | /* A label to be added at the end of the completed transformed |
395 | sequence. It will be set if may_fallthru was true *at one time*, |
396 | though subsequent transformations may have cleared that flag. */ |
397 | tree fallthru_label; |
398 | |
399 | /* True if it is possible to fall out the bottom of the try block. |
400 | Cleared if the fallthru is converted to a goto. */ |
401 | bool may_fallthru; |
402 | |
403 | /* True if any entry in goto_queue is a GIMPLE_RETURN. */ |
404 | bool may_return; |
405 | |
406 | /* True if the finally block can receive an exception edge. |
407 | Cleared if the exception case is handled by code duplication. */ |
408 | bool may_throw; |
409 | }; |
410 | |
411 | static gimple_seq lower_eh_must_not_throw (struct leh_state *, gtry *); |
412 | |
413 | /* Search for STMT in the goto queue. Return the replacement, |
414 | or null if the statement isn't in the queue. */ |
415 | |
416 | #define LARGE_GOTO_QUEUE 20 |
417 | |
418 | static void lower_eh_constructs_1 (struct leh_state *state, gimple_seq *seq); |
419 | |
420 | static gimple_seq |
421 | find_goto_replacement (struct leh_tf_state *tf, treemple stmt) |
422 | { |
423 | unsigned int i; |
424 | |
425 | if (tf->goto_queue_active < LARGE_GOTO_QUEUE) |
426 | { |
427 | for (i = 0; i < tf->goto_queue_active; i++) |
428 | if ( tf->goto_queue[i].stmt.g == stmt.g) |
429 | return tf->goto_queue[i].repl_stmt; |
430 | return NULL; |
431 | } |
432 | |
433 | /* If we have a large number of entries in the goto_queue, create a |
434 | pointer map and use that for searching. */ |
435 | |
436 | if (!tf->goto_queue_map) |
437 | { |
438 | tf->goto_queue_map = new hash_map<gimple *, goto_queue_node *>; |
439 | for (i = 0; i < tf->goto_queue_active; i++) |
440 | { |
441 | bool existed = tf->goto_queue_map->put (k: tf->goto_queue[i].stmt.g, |
442 | v: &tf->goto_queue[i]); |
443 | gcc_assert (!existed); |
444 | } |
445 | } |
446 | |
447 | goto_queue_node **slot = tf->goto_queue_map->get (k: stmt.g); |
448 | if (slot != NULL) |
449 | return ((*slot)->repl_stmt); |
450 | |
451 | return NULL; |
452 | } |
453 | |
454 | /* A subroutine of replace_goto_queue_1. Handles the sub-clauses of a |
455 | lowered GIMPLE_COND. If, by chance, the replacement is a simple goto, |
456 | then we can just splat it in, otherwise we add the new stmts immediately |
457 | after the GIMPLE_COND and redirect. */ |
458 | |
459 | static void |
460 | replace_goto_queue_cond_clause (tree *tp, struct leh_tf_state *tf, |
461 | gimple_stmt_iterator *gsi) |
462 | { |
463 | tree label; |
464 | gimple_seq new_seq; |
465 | treemple temp; |
466 | location_t loc = gimple_location (g: gsi_stmt (i: *gsi)); |
467 | |
468 | temp.tp = tp; |
469 | new_seq = find_goto_replacement (tf, stmt: temp); |
470 | if (!new_seq) |
471 | return; |
472 | |
473 | if (gimple_seq_singleton_p (seq: new_seq) |
474 | && gimple_code (g: gimple_seq_first_stmt (s: new_seq)) == GIMPLE_GOTO) |
475 | { |
476 | *tp = gimple_goto_dest (gs: gimple_seq_first_stmt (s: new_seq)); |
477 | return; |
478 | } |
479 | |
480 | label = create_artificial_label (loc); |
481 | /* Set the new label for the GIMPLE_COND */ |
482 | *tp = label; |
483 | |
484 | gsi_insert_after (gsi, gimple_build_label (label), GSI_CONTINUE_LINKING); |
485 | gsi_insert_seq_after (gsi, gimple_seq_copy (new_seq), GSI_CONTINUE_LINKING); |
486 | } |
487 | |
488 | /* The real work of replace_goto_queue. Returns with TSI updated to |
489 | point to the next statement. */ |
490 | |
491 | static void replace_goto_queue_stmt_list (gimple_seq *, struct leh_tf_state *); |
492 | |
493 | static void |
494 | replace_goto_queue_1 (gimple *stmt, struct leh_tf_state *tf, |
495 | gimple_stmt_iterator *gsi) |
496 | { |
497 | gimple_seq seq; |
498 | treemple temp; |
499 | temp.g = NULL; |
500 | |
501 | switch (gimple_code (g: stmt)) |
502 | { |
503 | case GIMPLE_GOTO: |
504 | case GIMPLE_RETURN: |
505 | temp.g = stmt; |
506 | seq = find_goto_replacement (tf, stmt: temp); |
507 | if (seq) |
508 | { |
509 | gimple_stmt_iterator i; |
510 | seq = gimple_seq_copy (seq); |
511 | for (i = gsi_start (seq); !gsi_end_p (i); gsi_next (i: &i)) |
512 | gimple_set_location (g: gsi_stmt (i), location: gimple_location (g: stmt)); |
513 | gsi_insert_seq_before (gsi, seq, GSI_SAME_STMT); |
514 | gsi_remove (gsi, false); |
515 | return; |
516 | } |
517 | break; |
518 | |
519 | case GIMPLE_COND: |
520 | replace_goto_queue_cond_clause (tp: gimple_op_ptr (gs: stmt, i: 2), tf, gsi); |
521 | replace_goto_queue_cond_clause (tp: gimple_op_ptr (gs: stmt, i: 3), tf, gsi); |
522 | break; |
523 | |
524 | case GIMPLE_TRY: |
525 | replace_goto_queue_stmt_list (gimple_try_eval_ptr (gs: stmt), tf); |
526 | replace_goto_queue_stmt_list (gimple_try_cleanup_ptr (gs: stmt), tf); |
527 | break; |
528 | case GIMPLE_CATCH: |
529 | replace_goto_queue_stmt_list (gimple_catch_handler_ptr ( |
530 | catch_stmt: as_a <gcatch *> (p: stmt)), |
531 | tf); |
532 | break; |
533 | case GIMPLE_EH_FILTER: |
534 | replace_goto_queue_stmt_list (gimple_eh_filter_failure_ptr (gs: stmt), tf); |
535 | break; |
536 | case GIMPLE_EH_ELSE: |
537 | { |
538 | geh_else *eh_else_stmt = as_a <geh_else *> (p: stmt); |
539 | replace_goto_queue_stmt_list (gimple_eh_else_n_body_ptr (eh_else_stmt), |
540 | tf); |
541 | replace_goto_queue_stmt_list (gimple_eh_else_e_body_ptr (eh_else_stmt), |
542 | tf); |
543 | } |
544 | break; |
545 | |
546 | default: |
547 | /* These won't have gotos in them. */ |
548 | break; |
549 | } |
550 | |
551 | gsi_next (i: gsi); |
552 | } |
553 | |
554 | /* A subroutine of replace_goto_queue. Handles GIMPLE_SEQ. */ |
555 | |
556 | static void |
557 | replace_goto_queue_stmt_list (gimple_seq *seq, struct leh_tf_state *tf) |
558 | { |
559 | gimple_stmt_iterator gsi = gsi_start (seq&: *seq); |
560 | |
561 | while (!gsi_end_p (i: gsi)) |
562 | replace_goto_queue_1 (stmt: gsi_stmt (i: gsi), tf, gsi: &gsi); |
563 | } |
564 | |
565 | /* Replace all goto queue members. */ |
566 | |
567 | static void |
568 | replace_goto_queue (struct leh_tf_state *tf) |
569 | { |
570 | if (tf->goto_queue_active == 0) |
571 | return; |
572 | replace_goto_queue_stmt_list (seq: &tf->top_p_seq, tf); |
573 | replace_goto_queue_stmt_list (seq: &eh_seq, tf); |
574 | } |
575 | |
576 | /* Add a new record to the goto queue contained in TF. NEW_STMT is the |
577 | data to be added, IS_LABEL indicates whether NEW_STMT is a label or |
578 | a gimple return. */ |
579 | |
580 | static void |
581 | record_in_goto_queue (struct leh_tf_state *tf, |
582 | treemple new_stmt, |
583 | int index, |
584 | bool is_label, |
585 | location_t location) |
586 | { |
587 | size_t active, size; |
588 | struct goto_queue_node *q; |
589 | |
590 | gcc_assert (!tf->goto_queue_map); |
591 | |
592 | active = tf->goto_queue_active; |
593 | size = tf->goto_queue_size; |
594 | if (active >= size) |
595 | { |
596 | size = (size ? size * 2 : 32); |
597 | tf->goto_queue_size = size; |
598 | tf->goto_queue |
599 | = XRESIZEVEC (struct goto_queue_node, tf->goto_queue, size); |
600 | } |
601 | |
602 | q = &tf->goto_queue[active]; |
603 | tf->goto_queue_active = active + 1; |
604 | |
605 | memset (s: q, c: 0, n: sizeof (*q)); |
606 | q->stmt = new_stmt; |
607 | q->index = index; |
608 | q->location = location; |
609 | q->is_label = is_label; |
610 | } |
611 | |
612 | /* Record the LABEL label in the goto queue contained in TF. |
613 | TF is not null. */ |
614 | |
615 | static void |
616 | record_in_goto_queue_label (struct leh_tf_state *tf, treemple stmt, tree label, |
617 | location_t location) |
618 | { |
619 | int index; |
620 | treemple temp, new_stmt; |
621 | |
622 | if (!label) |
623 | return; |
624 | |
625 | /* Computed and non-local gotos do not get processed. Given |
626 | their nature we can neither tell whether we've escaped the |
627 | finally block nor redirect them if we knew. */ |
628 | if (TREE_CODE (label) != LABEL_DECL) |
629 | return; |
630 | |
631 | /* No need to record gotos that don't leave the try block. */ |
632 | temp.t = label; |
633 | if (!outside_finally_tree (start: temp, target: tf->try_finally_expr)) |
634 | return; |
635 | |
636 | if (! tf->dest_array.exists ()) |
637 | { |
638 | tf->dest_array.create (nelems: 10); |
639 | tf->dest_array.quick_push (obj: label); |
640 | index = 0; |
641 | } |
642 | else |
643 | { |
644 | int n = tf->dest_array.length (); |
645 | for (index = 0; index < n; ++index) |
646 | if (tf->dest_array[index] == label) |
647 | break; |
648 | if (index == n) |
649 | tf->dest_array.safe_push (obj: label); |
650 | } |
651 | |
652 | /* In the case of a GOTO we want to record the destination label, |
653 | since with a GIMPLE_COND we have an easy access to the then/else |
654 | labels. */ |
655 | new_stmt = stmt; |
656 | record_in_goto_queue (tf, new_stmt, index, is_label: true, location); |
657 | } |
658 | |
659 | /* For any GIMPLE_GOTO or GIMPLE_RETURN, decide whether it leaves a try_finally |
660 | node, and if so record that fact in the goto queue associated with that |
661 | try_finally node. */ |
662 | |
663 | static void |
664 | maybe_record_in_goto_queue (struct leh_state *state, gimple *stmt) |
665 | { |
666 | struct leh_tf_state *tf = state->tf; |
667 | treemple new_stmt; |
668 | |
669 | if (!tf) |
670 | return; |
671 | |
672 | switch (gimple_code (g: stmt)) |
673 | { |
674 | case GIMPLE_COND: |
675 | { |
676 | gcond *cond_stmt = as_a <gcond *> (p: stmt); |
677 | new_stmt.tp = gimple_op_ptr (gs: cond_stmt, i: 2); |
678 | record_in_goto_queue_label (tf, stmt: new_stmt, |
679 | label: gimple_cond_true_label (gs: cond_stmt), |
680 | EXPR_LOCATION (*new_stmt.tp)); |
681 | new_stmt.tp = gimple_op_ptr (gs: cond_stmt, i: 3); |
682 | record_in_goto_queue_label (tf, stmt: new_stmt, |
683 | label: gimple_cond_false_label (gs: cond_stmt), |
684 | EXPR_LOCATION (*new_stmt.tp)); |
685 | } |
686 | break; |
687 | case GIMPLE_GOTO: |
688 | new_stmt.g = stmt; |
689 | record_in_goto_queue_label (tf, stmt: new_stmt, label: gimple_goto_dest (gs: stmt), |
690 | location: gimple_location (g: stmt)); |
691 | break; |
692 | |
693 | case GIMPLE_RETURN: |
694 | tf->may_return = true; |
695 | new_stmt.g = stmt; |
696 | record_in_goto_queue (tf, new_stmt, index: -1, is_label: false, location: gimple_location (g: stmt)); |
697 | break; |
698 | |
699 | default: |
700 | gcc_unreachable (); |
701 | } |
702 | } |
703 | |
704 | |
705 | #if CHECKING_P |
706 | /* We do not process GIMPLE_SWITCHes for now. As long as the original source |
707 | was in fact structured, and we've not yet done jump threading, then none |
708 | of the labels will leave outer GIMPLE_TRY_FINALLY nodes. Verify this. */ |
709 | |
710 | static void |
711 | verify_norecord_switch_expr (struct leh_state *state, |
712 | gswitch *switch_expr) |
713 | { |
714 | struct leh_tf_state *tf = state->tf; |
715 | size_t i, n; |
716 | |
717 | if (!tf) |
718 | return; |
719 | |
720 | n = gimple_switch_num_labels (gs: switch_expr); |
721 | |
722 | for (i = 0; i < n; ++i) |
723 | { |
724 | treemple temp; |
725 | tree lab = CASE_LABEL (gimple_switch_label (switch_expr, i)); |
726 | temp.t = lab; |
727 | gcc_assert (!outside_finally_tree (temp, tf->try_finally_expr)); |
728 | } |
729 | } |
730 | #else |
731 | #define verify_norecord_switch_expr(state, switch_expr) |
732 | #endif |
733 | |
734 | /* Redirect a RETURN_EXPR pointed to by Q to FINLAB. If MOD is |
735 | non-null, insert it before the new branch. */ |
736 | |
737 | static void |
738 | do_return_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod) |
739 | { |
740 | gimple *x; |
741 | |
742 | /* In the case of a return, the queue node must be a gimple statement. */ |
743 | gcc_assert (!q->is_label); |
744 | |
745 | /* Note that the return value may have already been computed, e.g., |
746 | |
747 | int x; |
748 | int foo (void) |
749 | { |
750 | x = 0; |
751 | try { |
752 | return x; |
753 | } finally { |
754 | x++; |
755 | } |
756 | } |
757 | |
758 | should return 0, not 1. We don't have to do anything to make |
759 | this happens because the return value has been placed in the |
760 | RESULT_DECL already. */ |
761 | |
762 | q->cont_stmt = q->stmt.g; |
763 | |
764 | if (mod) |
765 | gimple_seq_add_seq (&q->repl_stmt, mod); |
766 | |
767 | x = gimple_build_goto (dest: finlab); |
768 | gimple_set_location (g: x, location: q->location); |
769 | gimple_seq_add_stmt (&q->repl_stmt, x); |
770 | } |
771 | |
772 | /* Similar, but easier, for GIMPLE_GOTO. */ |
773 | |
774 | static void |
775 | do_goto_redirection (struct goto_queue_node *q, tree finlab, gimple_seq mod, |
776 | struct leh_tf_state *tf) |
777 | { |
778 | ggoto *x; |
779 | |
780 | gcc_assert (q->is_label); |
781 | |
782 | q->cont_stmt = gimple_build_goto (dest: tf->dest_array[q->index]); |
783 | |
784 | if (mod) |
785 | gimple_seq_add_seq (&q->repl_stmt, mod); |
786 | |
787 | x = gimple_build_goto (dest: finlab); |
788 | gimple_set_location (g: x, location: q->location); |
789 | gimple_seq_add_stmt (&q->repl_stmt, x); |
790 | } |
791 | |
792 | /* Emit a standard landing pad sequence into SEQ for REGION. */ |
793 | |
794 | static void |
795 | emit_post_landing_pad (gimple_seq *seq, eh_region region) |
796 | { |
797 | eh_landing_pad lp = region->landing_pads; |
798 | glabel *x; |
799 | |
800 | if (lp == NULL) |
801 | lp = gen_eh_landing_pad (region); |
802 | |
803 | lp->post_landing_pad = create_artificial_label (UNKNOWN_LOCATION); |
804 | EH_LANDING_PAD_NR (lp->post_landing_pad) = lp->index; |
805 | |
806 | x = gimple_build_label (label: lp->post_landing_pad); |
807 | gimple_seq_add_stmt (seq, x); |
808 | } |
809 | |
810 | /* Emit a RESX statement into SEQ for REGION. */ |
811 | |
812 | static void |
813 | emit_resx (gimple_seq *seq, eh_region region) |
814 | { |
815 | gresx *x = gimple_build_resx (region->index); |
816 | gimple_seq_add_stmt (seq, x); |
817 | if (region->outer) |
818 | record_stmt_eh_region (region: region->outer, t: x); |
819 | } |
820 | |
821 | /* Note that the current EH region may contain a throw, or a |
822 | call to a function which itself may contain a throw. */ |
823 | |
824 | static void |
825 | note_eh_region_may_contain_throw (eh_region region) |
826 | { |
827 | while (bitmap_set_bit (eh_region_may_contain_throw_map, region->index)) |
828 | { |
829 | if (region->type == ERT_MUST_NOT_THROW) |
830 | break; |
831 | region = region->outer; |
832 | if (region == NULL) |
833 | break; |
834 | } |
835 | } |
836 | |
837 | /* Check if REGION has been marked as containing a throw. If REGION is |
838 | NULL, this predicate is false. */ |
839 | |
840 | static inline bool |
841 | eh_region_may_contain_throw (eh_region r) |
842 | { |
843 | return r && bitmap_bit_p (eh_region_may_contain_throw_map, r->index); |
844 | } |
845 | |
846 | /* We want to transform |
847 | try { body; } catch { stuff; } |
848 | to |
849 | normal_sequence: |
850 | body; |
851 | over: |
852 | eh_sequence: |
853 | landing_pad: |
854 | stuff; |
855 | goto over; |
856 | |
857 | TP is a GIMPLE_TRY node. REGION is the region whose post_landing_pad |
858 | should be placed before the second operand, or NULL. OVER is |
859 | an existing label that should be put at the exit, or NULL. */ |
860 | |
861 | static gimple_seq |
862 | frob_into_branch_around (gtry *tp, eh_region region, tree over) |
863 | { |
864 | gimple *x; |
865 | gimple_seq cleanup, result; |
866 | location_t loc = gimple_location (g: tp); |
867 | |
868 | cleanup = gimple_try_cleanup (gs: tp); |
869 | result = gimple_try_eval (gs: tp); |
870 | |
871 | if (region) |
872 | emit_post_landing_pad (seq: &eh_seq, region); |
873 | |
874 | if (gimple_seq_may_fallthru (cleanup)) |
875 | { |
876 | if (!over) |
877 | over = create_artificial_label (loc); |
878 | x = gimple_build_goto (dest: over); |
879 | gimple_set_location (g: x, location: loc); |
880 | gimple_seq_add_stmt (&cleanup, x); |
881 | } |
882 | gimple_seq_add_seq (&eh_seq, cleanup); |
883 | |
884 | if (over) |
885 | { |
886 | x = gimple_build_label (label: over); |
887 | gimple_seq_add_stmt (&result, x); |
888 | } |
889 | return result; |
890 | } |
891 | |
892 | /* A subroutine of lower_try_finally. Duplicate the tree rooted at T. |
893 | Make sure to record all new labels found. */ |
894 | |
895 | static gimple_seq |
896 | lower_try_finally_dup_block (gimple_seq seq, struct leh_state *outer_state, |
897 | location_t loc) |
898 | { |
899 | gtry *region = NULL; |
900 | gimple_seq new_seq; |
901 | gimple_stmt_iterator gsi; |
902 | |
903 | new_seq = copy_gimple_seq_and_replace_locals (seq); |
904 | |
905 | for (gsi = gsi_start (seq&: new_seq); !gsi_end_p (i: gsi); gsi_next (i: &gsi)) |
906 | { |
907 | gimple *stmt = gsi_stmt (i: gsi); |
908 | if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION) |
909 | { |
910 | tree block = gimple_block (g: stmt); |
911 | gimple_set_location (g: stmt, location: loc); |
912 | gimple_set_block (g: stmt, block); |
913 | } |
914 | } |
915 | |
916 | if (outer_state->tf) |
917 | region = outer_state->tf->try_finally_expr; |
918 | collect_finally_tree_1 (seq: new_seq, region); |
919 | |
920 | return new_seq; |
921 | } |
922 | |
923 | /* A subroutine of lower_try_finally. Create a fallthru label for |
924 | the given try_finally state. The only tricky bit here is that |
925 | we have to make sure to record the label in our outer context. */ |
926 | |
927 | static tree |
928 | lower_try_finally_fallthru_label (struct leh_tf_state *tf) |
929 | { |
930 | tree label = tf->fallthru_label; |
931 | treemple temp; |
932 | |
933 | if (!label) |
934 | { |
935 | label = create_artificial_label (gimple_location (g: tf->try_finally_expr)); |
936 | tf->fallthru_label = label; |
937 | if (tf->outer->tf) |
938 | { |
939 | temp.t = label; |
940 | record_in_finally_tree (child: temp, parent: tf->outer->tf->try_finally_expr); |
941 | } |
942 | } |
943 | return label; |
944 | } |
945 | |
946 | /* A subroutine of lower_try_finally. If FINALLY consits of a |
947 | GIMPLE_EH_ELSE node, return it. */ |
948 | |
949 | static inline geh_else * |
950 | get_eh_else (gimple_seq finally) |
951 | { |
952 | gimple *x = gimple_seq_first_stmt (s: finally); |
953 | if (gimple_code (g: x) == GIMPLE_EH_ELSE) |
954 | { |
955 | gcc_assert (gimple_seq_singleton_p (finally)); |
956 | return as_a <geh_else *> (p: x); |
957 | } |
958 | return NULL; |
959 | } |
960 | |
961 | /* A subroutine of lower_try_finally. If the eh_protect_cleanup_actions |
962 | langhook returns non-null, then the language requires that the exception |
963 | path out of a try_finally be treated specially. To wit: the code within |
964 | the finally block may not itself throw an exception. We have two choices |
965 | here. First we can duplicate the finally block and wrap it in a |
966 | must_not_throw region. Second, we can generate code like |
967 | |
968 | try { |
969 | finally_block; |
970 | } catch { |
971 | if (fintmp == eh_edge) |
972 | protect_cleanup_actions; |
973 | } |
974 | |
975 | where "fintmp" is the temporary used in the switch statement generation |
976 | alternative considered below. For the nonce, we always choose the first |
977 | option. |
978 | |
979 | THIS_STATE may be null if this is a try-cleanup, not a try-finally. */ |
980 | |
981 | static void |
982 | honor_protect_cleanup_actions (struct leh_state *outer_state, |
983 | struct leh_state *this_state, |
984 | struct leh_tf_state *tf) |
985 | { |
986 | gimple_seq finally = gimple_try_cleanup (gs: tf->top_p); |
987 | |
988 | /* EH_ELSE doesn't come from user code; only compiler generated stuff. |
989 | It does need to be handled here, so as to separate the (different) |
990 | EH path from the normal path. But we should not attempt to wrap |
991 | it with a must-not-throw node (which indeed gets in the way). */ |
992 | if (geh_else *eh_else = get_eh_else (finally)) |
993 | { |
994 | gimple_try_set_cleanup (try_stmt: tf->top_p, cleanup: gimple_eh_else_n_body (eh_else_stmt: eh_else)); |
995 | finally = gimple_eh_else_e_body (eh_else_stmt: eh_else); |
996 | |
997 | /* Let the ELSE see the exception that's being processed, but |
998 | since the cleanup is outside the try block, process it with |
999 | outer_state, otherwise it may be used as a cleanup for |
1000 | itself, and Bad Things (TM) ensue. */ |
1001 | eh_region save_ehp = outer_state->ehp_region; |
1002 | outer_state->ehp_region = this_state->cur_region; |
1003 | lower_eh_constructs_1 (state: outer_state, seq: &finally); |
1004 | outer_state->ehp_region = save_ehp; |
1005 | } |
1006 | else |
1007 | { |
1008 | /* First check for nothing to do. */ |
1009 | if (lang_hooks.eh_protect_cleanup_actions == NULL) |
1010 | return; |
1011 | tree actions = lang_hooks.eh_protect_cleanup_actions (); |
1012 | if (actions == NULL) |
1013 | return; |
1014 | |
1015 | if (this_state) |
1016 | finally = lower_try_finally_dup_block (seq: finally, outer_state, |
1017 | loc: gimple_location (g: tf->try_finally_expr)); |
1018 | |
1019 | /* If this cleanup consists of a TRY_CATCH_EXPR with TRY_CATCH_IS_CLEANUP |
1020 | set, the handler of the TRY_CATCH_EXPR is another cleanup which ought |
1021 | to be in an enclosing scope, but needs to be implemented at this level |
1022 | to avoid a nesting violation (see wrap_temporary_cleanups in |
1023 | cp/decl.cc). Since it's logically at an outer level, we should call |
1024 | terminate before we get to it, so strip it away before adding the |
1025 | MUST_NOT_THROW filter. */ |
1026 | gimple_stmt_iterator gsi = gsi_start (seq&: finally); |
1027 | gimple *x = gsi_stmt (i: gsi); |
1028 | if (gimple_code (g: x) == GIMPLE_TRY |
1029 | && gimple_try_kind (gs: x) == GIMPLE_TRY_CATCH |
1030 | && gimple_try_catch_is_cleanup (gs: x)) |
1031 | { |
1032 | gsi_insert_seq_before (&gsi, gimple_try_eval (gs: x), GSI_SAME_STMT); |
1033 | gsi_remove (&gsi, false); |
1034 | } |
1035 | |
1036 | /* Wrap the block with protect_cleanup_actions as the action. */ |
1037 | geh_mnt *eh_mnt = gimple_build_eh_must_not_throw (actions); |
1038 | gtry *try_stmt = gimple_build_try (finally, |
1039 | gimple_seq_alloc_with_stmt (stmt: eh_mnt), |
1040 | GIMPLE_TRY_CATCH); |
1041 | finally = lower_eh_must_not_throw (outer_state, try_stmt); |
1042 | } |
1043 | |
1044 | /* Drop all of this into the exception sequence. */ |
1045 | emit_post_landing_pad (seq: &eh_seq, region: tf->region); |
1046 | gimple_seq_add_seq (&eh_seq, finally); |
1047 | if (gimple_seq_may_fallthru (finally)) |
1048 | emit_resx (seq: &eh_seq, region: tf->region); |
1049 | |
1050 | /* Having now been handled, EH isn't to be considered with |
1051 | the rest of the outgoing edges. */ |
1052 | tf->may_throw = false; |
1053 | } |
1054 | |
1055 | /* A subroutine of lower_try_finally. We have determined that there is |
1056 | no fallthru edge out of the finally block. This means that there is |
1057 | no outgoing edge corresponding to any incoming edge. Restructure the |
1058 | try_finally node for this special case. */ |
1059 | |
1060 | static void |
1061 | lower_try_finally_nofallthru (struct leh_state *state, |
1062 | struct leh_tf_state *tf) |
1063 | { |
1064 | tree lab; |
1065 | gimple *x; |
1066 | geh_else *eh_else; |
1067 | gimple_seq finally; |
1068 | struct goto_queue_node *q, *qe; |
1069 | |
1070 | lab = create_artificial_label (gimple_location (g: tf->try_finally_expr)); |
1071 | |
1072 | /* We expect that tf->top_p is a GIMPLE_TRY. */ |
1073 | finally = gimple_try_cleanup (gs: tf->top_p); |
1074 | tf->top_p_seq = gimple_try_eval (gs: tf->top_p); |
1075 | |
1076 | x = gimple_build_label (label: lab); |
1077 | gimple_seq_add_stmt (&tf->top_p_seq, x); |
1078 | |
1079 | q = tf->goto_queue; |
1080 | qe = q + tf->goto_queue_active; |
1081 | for (; q < qe; ++q) |
1082 | if (q->index < 0) |
1083 | do_return_redirection (q, finlab: lab, NULL); |
1084 | else |
1085 | do_goto_redirection (q, finlab: lab, NULL, tf); |
1086 | |
1087 | replace_goto_queue (tf); |
1088 | |
1089 | /* Emit the finally block into the stream. Lower EH_ELSE at this time. */ |
1090 | eh_else = get_eh_else (finally); |
1091 | if (eh_else) |
1092 | { |
1093 | finally = gimple_eh_else_n_body (eh_else_stmt: eh_else); |
1094 | lower_eh_constructs_1 (state, seq: &finally); |
1095 | gimple_seq_add_seq (&tf->top_p_seq, finally); |
1096 | |
1097 | if (tf->may_throw) |
1098 | { |
1099 | finally = gimple_eh_else_e_body (eh_else_stmt: eh_else); |
1100 | lower_eh_constructs_1 (state, seq: &finally); |
1101 | |
1102 | emit_post_landing_pad (seq: &eh_seq, region: tf->region); |
1103 | gimple_seq_add_seq (&eh_seq, finally); |
1104 | } |
1105 | } |
1106 | else |
1107 | { |
1108 | lower_eh_constructs_1 (state, seq: &finally); |
1109 | gimple_seq_add_seq (&tf->top_p_seq, finally); |
1110 | |
1111 | if (tf->may_throw) |
1112 | { |
1113 | emit_post_landing_pad (seq: &eh_seq, region: tf->region); |
1114 | |
1115 | x = gimple_build_goto (dest: lab); |
1116 | gimple_set_location (g: x, location: gimple_location (g: tf->try_finally_expr)); |
1117 | gimple_seq_add_stmt (&eh_seq, x); |
1118 | } |
1119 | } |
1120 | } |
1121 | |
1122 | /* A subroutine of lower_try_finally. We have determined that there is |
1123 | exactly one destination of the finally block. Restructure the |
1124 | try_finally node for this special case. */ |
1125 | |
1126 | static void |
1127 | lower_try_finally_onedest (struct leh_state *state, struct leh_tf_state *tf) |
1128 | { |
1129 | struct goto_queue_node *q, *qe; |
1130 | geh_else *eh_else; |
1131 | glabel *label_stmt; |
1132 | gimple *x; |
1133 | gimple_seq finally; |
1134 | gimple_stmt_iterator gsi; |
1135 | tree finally_label; |
1136 | location_t loc = gimple_location (g: tf->try_finally_expr); |
1137 | |
1138 | finally = gimple_try_cleanup (gs: tf->top_p); |
1139 | tf->top_p_seq = gimple_try_eval (gs: tf->top_p); |
1140 | |
1141 | /* Since there's only one destination, and the destination edge can only |
1142 | either be EH or non-EH, that implies that all of our incoming edges |
1143 | are of the same type. Therefore we can lower EH_ELSE immediately. */ |
1144 | eh_else = get_eh_else (finally); |
1145 | if (eh_else) |
1146 | { |
1147 | if (tf->may_throw) |
1148 | finally = gimple_eh_else_e_body (eh_else_stmt: eh_else); |
1149 | else |
1150 | finally = gimple_eh_else_n_body (eh_else_stmt: eh_else); |
1151 | } |
1152 | |
1153 | lower_eh_constructs_1 (state, seq: &finally); |
1154 | |
1155 | for (gsi = gsi_start (seq&: finally); !gsi_end_p (i: gsi); gsi_next (i: &gsi)) |
1156 | { |
1157 | gimple *stmt = gsi_stmt (i: gsi); |
1158 | if (LOCATION_LOCUS (gimple_location (stmt)) == UNKNOWN_LOCATION) |
1159 | { |
1160 | tree block = gimple_block (g: stmt); |
1161 | gimple_set_location (g: stmt, location: gimple_location (g: tf->try_finally_expr)); |
1162 | gimple_set_block (g: stmt, block); |
1163 | } |
1164 | } |
1165 | |
1166 | if (tf->may_throw) |
1167 | { |
1168 | /* Only reachable via the exception edge. Add the given label to |
1169 | the head of the FINALLY block. Append a RESX at the end. */ |
1170 | emit_post_landing_pad (seq: &eh_seq, region: tf->region); |
1171 | gimple_seq_add_seq (&eh_seq, finally); |
1172 | emit_resx (seq: &eh_seq, region: tf->region); |
1173 | return; |
1174 | } |
1175 | |
1176 | if (tf->may_fallthru) |
1177 | { |
1178 | /* Only reachable via the fallthru edge. Do nothing but let |
1179 | the two blocks run together; we'll fall out the bottom. */ |
1180 | gimple_seq_add_seq (&tf->top_p_seq, finally); |
1181 | return; |
1182 | } |
1183 | |
1184 | finally_label = create_artificial_label (loc); |
1185 | label_stmt = gimple_build_label (label: finally_label); |
1186 | gimple_seq_add_stmt (&tf->top_p_seq, label_stmt); |
1187 | |
1188 | gimple_seq_add_seq (&tf->top_p_seq, finally); |
1189 | |
1190 | q = tf->goto_queue; |
1191 | qe = q + tf->goto_queue_active; |
1192 | |
1193 | if (tf->may_return) |
1194 | { |
1195 | /* Reachable by return expressions only. Redirect them. */ |
1196 | for (; q < qe; ++q) |
1197 | do_return_redirection (q, finlab: finally_label, NULL); |
1198 | replace_goto_queue (tf); |
1199 | } |
1200 | else |
1201 | { |
1202 | /* Reachable by goto expressions only. Redirect them. */ |
1203 | for (; q < qe; ++q) |
1204 | do_goto_redirection (q, finlab: finally_label, NULL, tf); |
1205 | replace_goto_queue (tf); |
1206 | |
1207 | if (tf->dest_array[0] == tf->fallthru_label) |
1208 | { |
1209 | /* Reachable by goto to fallthru label only. Redirect it |
1210 | to the new label (already created, sadly), and do not |
1211 | emit the final branch out, or the fallthru label. */ |
1212 | tf->fallthru_label = NULL; |
1213 | return; |
1214 | } |
1215 | } |
1216 | |
1217 | /* Place the original return/goto to the original destination |
1218 | immediately after the finally block. */ |
1219 | x = tf->goto_queue[0].cont_stmt; |
1220 | gimple_seq_add_stmt (&tf->top_p_seq, x); |
1221 | maybe_record_in_goto_queue (state, stmt: x); |
1222 | } |
1223 | |
1224 | /* A subroutine of lower_try_finally. There are multiple edges incoming |
1225 | and outgoing from the finally block. Implement this by duplicating the |
1226 | finally block for every destination. */ |
1227 | |
1228 | static void |
1229 | lower_try_finally_copy (struct leh_state *state, struct leh_tf_state *tf) |
1230 | { |
1231 | gimple_seq finally; |
1232 | gimple_seq new_stmt; |
1233 | gimple_seq seq; |
1234 | gimple *x; |
1235 | geh_else *eh_else; |
1236 | tree tmp; |
1237 | location_t tf_loc = gimple_location (g: tf->try_finally_expr); |
1238 | |
1239 | finally = gimple_try_cleanup (gs: tf->top_p); |
1240 | |
1241 | /* Notice EH_ELSE, and simplify some of the remaining code |
1242 | by considering FINALLY to be the normal return path only. */ |
1243 | eh_else = get_eh_else (finally); |
1244 | if (eh_else) |
1245 | finally = gimple_eh_else_n_body (eh_else_stmt: eh_else); |
1246 | |
1247 | tf->top_p_seq = gimple_try_eval (gs: tf->top_p); |
1248 | new_stmt = NULL; |
1249 | |
1250 | if (tf->may_fallthru) |
1251 | { |
1252 | seq = lower_try_finally_dup_block (seq: finally, outer_state: state, loc: tf_loc); |
1253 | lower_eh_constructs_1 (state, seq: &seq); |
1254 | gimple_seq_add_seq (&new_stmt, seq); |
1255 | |
1256 | tmp = lower_try_finally_fallthru_label (tf); |
1257 | x = gimple_build_goto (dest: tmp); |
1258 | gimple_set_location (g: x, location: tf_loc); |
1259 | gimple_seq_add_stmt (&new_stmt, x); |
1260 | } |
1261 | |
1262 | if (tf->may_throw) |
1263 | { |
1264 | /* We don't need to copy the EH path of EH_ELSE, |
1265 | since it is only emitted once. */ |
1266 | if (eh_else) |
1267 | seq = gimple_eh_else_e_body (eh_else_stmt: eh_else); |
1268 | else |
1269 | seq = lower_try_finally_dup_block (seq: finally, outer_state: state, loc: tf_loc); |
1270 | lower_eh_constructs_1 (state, seq: &seq); |
1271 | |
1272 | emit_post_landing_pad (seq: &eh_seq, region: tf->region); |
1273 | gimple_seq_add_seq (&eh_seq, seq); |
1274 | emit_resx (seq: &eh_seq, region: tf->region); |
1275 | } |
1276 | |
1277 | if (tf->goto_queue) |
1278 | { |
1279 | struct goto_queue_node *q, *qe; |
1280 | int return_index, index; |
1281 | struct labels_s |
1282 | { |
1283 | struct goto_queue_node *q; |
1284 | tree label; |
1285 | } *labels; |
1286 | |
1287 | return_index = tf->dest_array.length (); |
1288 | labels = XCNEWVEC (struct labels_s, return_index + 1); |
1289 | |
1290 | q = tf->goto_queue; |
1291 | qe = q + tf->goto_queue_active; |
1292 | for (; q < qe; q++) |
1293 | { |
1294 | index = q->index < 0 ? return_index : q->index; |
1295 | |
1296 | if (!labels[index].q) |
1297 | labels[index].q = q; |
1298 | } |
1299 | |
1300 | for (index = 0; index < return_index + 1; index++) |
1301 | { |
1302 | tree lab; |
1303 | |
1304 | q = labels[index].q; |
1305 | if (! q) |
1306 | continue; |
1307 | |
1308 | lab = labels[index].label |
1309 | = create_artificial_label (tf_loc); |
1310 | |
1311 | if (index == return_index) |
1312 | do_return_redirection (q, finlab: lab, NULL); |
1313 | else |
1314 | do_goto_redirection (q, finlab: lab, NULL, tf); |
1315 | |
1316 | x = gimple_build_label (label: lab); |
1317 | gimple_seq_add_stmt (&new_stmt, x); |
1318 | |
1319 | seq = lower_try_finally_dup_block (seq: finally, outer_state: state, loc: q->location); |
1320 | lower_eh_constructs_1 (state, seq: &seq); |
1321 | gimple_seq_add_seq (&new_stmt, seq); |
1322 | |
1323 | gimple_seq_add_stmt (&new_stmt, q->cont_stmt); |
1324 | maybe_record_in_goto_queue (state, stmt: q->cont_stmt); |
1325 | } |
1326 | |
1327 | for (q = tf->goto_queue; q < qe; q++) |
1328 | { |
1329 | tree lab; |
1330 | |
1331 | index = q->index < 0 ? return_index : q->index; |
1332 | |
1333 | if (labels[index].q == q) |
1334 | continue; |
1335 | |
1336 | lab = labels[index].label; |
1337 | |
1338 | if (index == return_index) |
1339 | do_return_redirection (q, finlab: lab, NULL); |
1340 | else |
1341 | do_goto_redirection (q, finlab: lab, NULL, tf); |
1342 | } |
1343 | |
1344 | replace_goto_queue (tf); |
1345 | free (ptr: labels); |
1346 | } |
1347 | |
1348 | /* Need to link new stmts after running replace_goto_queue due |
1349 | to not wanting to process the same goto stmts twice. */ |
1350 | gimple_seq_add_seq (&tf->top_p_seq, new_stmt); |
1351 | } |
1352 | |
1353 | /* A subroutine of lower_try_finally. There are multiple edges incoming |
1354 | and outgoing from the finally block. Implement this by instrumenting |
1355 | each incoming edge and creating a switch statement at the end of the |
1356 | finally block that branches to the appropriate destination. */ |
1357 | |
1358 | static void |
1359 | lower_try_finally_switch (struct leh_state *state, struct leh_tf_state *tf) |
1360 | { |
1361 | struct goto_queue_node *q, *qe; |
1362 | tree finally_tmp, finally_label; |
1363 | int return_index, eh_index, fallthru_index; |
1364 | int nlabels, ndests, j, last_case_index; |
1365 | tree last_case; |
1366 | auto_vec<tree> case_label_vec; |
1367 | gimple_seq switch_body = NULL; |
1368 | gimple *x; |
1369 | geh_else *eh_else; |
1370 | tree tmp; |
1371 | gimple *switch_stmt; |
1372 | gimple_seq finally; |
1373 | hash_map<tree, gimple *> *cont_map = NULL; |
1374 | /* The location of the TRY_FINALLY stmt. */ |
1375 | location_t tf_loc = gimple_location (g: tf->try_finally_expr); |
1376 | /* The location of the finally block. */ |
1377 | location_t finally_loc; |
1378 | |
1379 | finally = gimple_try_cleanup (gs: tf->top_p); |
1380 | eh_else = get_eh_else (finally); |
1381 | |
1382 | /* Mash the TRY block to the head of the chain. */ |
1383 | tf->top_p_seq = gimple_try_eval (gs: tf->top_p); |
1384 | |
1385 | /* The location of the finally is either the last stmt in the finally |
1386 | block or the location of the TRY_FINALLY itself. */ |
1387 | x = gimple_seq_last_stmt (s: finally); |
1388 | finally_loc = x ? gimple_location (g: x) : tf_loc; |
1389 | |
1390 | /* Prepare for switch statement generation. */ |
1391 | nlabels = tf->dest_array.length (); |
1392 | return_index = nlabels; |
1393 | eh_index = return_index + tf->may_return; |
1394 | fallthru_index = eh_index + (tf->may_throw && !eh_else); |
1395 | ndests = fallthru_index + tf->may_fallthru; |
1396 | |
1397 | finally_tmp = create_tmp_var (integer_type_node, "finally_tmp" ); |
1398 | finally_label = create_artificial_label (finally_loc); |
1399 | |
1400 | /* We use vec::quick_push on case_label_vec throughout this function, |
1401 | since we know the size in advance and allocate precisely as muce |
1402 | space as needed. */ |
1403 | case_label_vec.create (nelems: ndests); |
1404 | last_case = NULL; |
1405 | last_case_index = 0; |
1406 | |
1407 | /* Begin inserting code for getting to the finally block. Things |
1408 | are done in this order to correspond to the sequence the code is |
1409 | laid out. */ |
1410 | |
1411 | if (tf->may_fallthru) |
1412 | { |
1413 | x = gimple_build_assign (finally_tmp, |
1414 | build_int_cst (integer_type_node, |
1415 | fallthru_index)); |
1416 | gimple_set_location (g: x, location: finally_loc); |
1417 | gimple_seq_add_stmt (&tf->top_p_seq, x); |
1418 | |
1419 | tmp = build_int_cst (integer_type_node, fallthru_index); |
1420 | last_case = build_case_label (tmp, NULL, |
1421 | create_artificial_label (finally_loc)); |
1422 | case_label_vec.quick_push (obj: last_case); |
1423 | last_case_index++; |
1424 | |
1425 | x = gimple_build_label (CASE_LABEL (last_case)); |
1426 | gimple_seq_add_stmt (&switch_body, x); |
1427 | |
1428 | tmp = lower_try_finally_fallthru_label (tf); |
1429 | x = gimple_build_goto (dest: tmp); |
1430 | gimple_set_location (g: x, location: finally_loc); |
1431 | gimple_seq_add_stmt (&switch_body, x); |
1432 | } |
1433 | |
1434 | /* For EH_ELSE, emit the exception path (plus resx) now, then |
1435 | subsequently we only need consider the normal path. */ |
1436 | if (eh_else) |
1437 | { |
1438 | if (tf->may_throw) |
1439 | { |
1440 | finally = gimple_eh_else_e_body (eh_else_stmt: eh_else); |
1441 | lower_eh_constructs_1 (state, seq: &finally); |
1442 | |
1443 | emit_post_landing_pad (seq: &eh_seq, region: tf->region); |
1444 | gimple_seq_add_seq (&eh_seq, finally); |
1445 | emit_resx (seq: &eh_seq, region: tf->region); |
1446 | } |
1447 | |
1448 | finally = gimple_eh_else_n_body (eh_else_stmt: eh_else); |
1449 | } |
1450 | else if (tf->may_throw) |
1451 | { |
1452 | emit_post_landing_pad (seq: &eh_seq, region: tf->region); |
1453 | |
1454 | x = gimple_build_assign (finally_tmp, |
1455 | build_int_cst (integer_type_node, eh_index)); |
1456 | gimple_seq_add_stmt (&eh_seq, x); |
1457 | |
1458 | x = gimple_build_goto (dest: finally_label); |
1459 | gimple_set_location (g: x, location: tf_loc); |
1460 | gimple_seq_add_stmt (&eh_seq, x); |
1461 | |
1462 | tmp = build_int_cst (integer_type_node, eh_index); |
1463 | last_case = build_case_label (tmp, NULL, |
1464 | create_artificial_label (tf_loc)); |
1465 | case_label_vec.quick_push (obj: last_case); |
1466 | last_case_index++; |
1467 | |
1468 | x = gimple_build_label (CASE_LABEL (last_case)); |
1469 | gimple_seq_add_stmt (&eh_seq, x); |
1470 | emit_resx (seq: &eh_seq, region: tf->region); |
1471 | } |
1472 | |
1473 | x = gimple_build_label (label: finally_label); |
1474 | gimple_seq_add_stmt (&tf->top_p_seq, x); |
1475 | |
1476 | lower_eh_constructs_1 (state, seq: &finally); |
1477 | gimple_seq_add_seq (&tf->top_p_seq, finally); |
1478 | |
1479 | /* Redirect each incoming goto edge. */ |
1480 | q = tf->goto_queue; |
1481 | qe = q + tf->goto_queue_active; |
1482 | j = last_case_index + tf->may_return; |
1483 | /* Prepare the assignments to finally_tmp that are executed upon the |
1484 | entrance through a particular edge. */ |
1485 | for (; q < qe; ++q) |
1486 | { |
1487 | gimple_seq mod = NULL; |
1488 | int switch_id; |
1489 | unsigned int case_index; |
1490 | |
1491 | if (q->index < 0) |
1492 | { |
1493 | x = gimple_build_assign (finally_tmp, |
1494 | build_int_cst (integer_type_node, |
1495 | return_index)); |
1496 | gimple_seq_add_stmt (&mod, x); |
1497 | do_return_redirection (q, finlab: finally_label, mod); |
1498 | switch_id = return_index; |
1499 | } |
1500 | else |
1501 | { |
1502 | x = gimple_build_assign (finally_tmp, |
1503 | build_int_cst (integer_type_node, q->index)); |
1504 | gimple_seq_add_stmt (&mod, x); |
1505 | do_goto_redirection (q, finlab: finally_label, mod, tf); |
1506 | switch_id = q->index; |
1507 | } |
1508 | |
1509 | case_index = j + q->index; |
1510 | if (case_label_vec.length () <= case_index || !case_label_vec[case_index]) |
1511 | { |
1512 | tree case_lab; |
1513 | tmp = build_int_cst (integer_type_node, switch_id); |
1514 | case_lab = build_case_label (tmp, NULL, |
1515 | create_artificial_label (tf_loc)); |
1516 | /* We store the cont_stmt in the pointer map, so that we can recover |
1517 | it in the loop below. */ |
1518 | if (!cont_map) |
1519 | cont_map = new hash_map<tree, gimple *>; |
1520 | cont_map->put (k: case_lab, v: q->cont_stmt); |
1521 | case_label_vec.quick_push (obj: case_lab); |
1522 | } |
1523 | } |
1524 | for (j = last_case_index; j < last_case_index + nlabels; j++) |
1525 | { |
1526 | gimple *cont_stmt; |
1527 | |
1528 | last_case = case_label_vec[j]; |
1529 | |
1530 | gcc_assert (last_case); |
1531 | gcc_assert (cont_map); |
1532 | |
1533 | cont_stmt = *cont_map->get (k: last_case); |
1534 | |
1535 | x = gimple_build_label (CASE_LABEL (last_case)); |
1536 | gimple_seq_add_stmt (&switch_body, x); |
1537 | gimple_seq_add_stmt (&switch_body, cont_stmt); |
1538 | maybe_record_in_goto_queue (state, stmt: cont_stmt); |
1539 | } |
1540 | if (cont_map) |
1541 | delete cont_map; |
1542 | |
1543 | replace_goto_queue (tf); |
1544 | |
1545 | /* Make sure that the last case is the default label, as one is required. |
1546 | Then sort the labels, which is also required in GIMPLE. */ |
1547 | CASE_LOW (last_case) = NULL; |
1548 | tree tem = case_label_vec.pop (); |
1549 | gcc_assert (tem == last_case); |
1550 | sort_case_labels (case_label_vec); |
1551 | |
1552 | /* Build the switch statement, setting last_case to be the default |
1553 | label. */ |
1554 | switch_stmt = gimple_build_switch (finally_tmp, last_case, |
1555 | case_label_vec); |
1556 | gimple_set_location (g: switch_stmt, location: finally_loc); |
1557 | |
1558 | /* Need to link SWITCH_STMT after running replace_goto_queue |
1559 | due to not wanting to process the same goto stmts twice. */ |
1560 | gimple_seq_add_stmt (&tf->top_p_seq, switch_stmt); |
1561 | gimple_seq_add_seq (&tf->top_p_seq, switch_body); |
1562 | } |
1563 | |
1564 | /* Decide whether or not we are going to duplicate the finally block. |
1565 | There are several considerations. |
1566 | |
1567 | Second, we'd like to prevent egregious code growth. One way to |
1568 | do this is to estimate the size of the finally block, multiply |
1569 | that by the number of copies we'd need to make, and compare against |
1570 | the estimate of the size of the switch machinery we'd have to add. */ |
1571 | |
1572 | static bool |
1573 | decide_copy_try_finally (int ndests, bool may_throw, gimple_seq finally) |
1574 | { |
1575 | int f_estimate, sw_estimate; |
1576 | geh_else *eh_else; |
1577 | |
1578 | /* If there's an EH_ELSE involved, the exception path is separate |
1579 | and really doesn't come into play for this computation. */ |
1580 | eh_else = get_eh_else (finally); |
1581 | if (eh_else) |
1582 | { |
1583 | ndests -= may_throw; |
1584 | finally = gimple_eh_else_n_body (eh_else_stmt: eh_else); |
1585 | } |
1586 | |
1587 | if (!optimize) |
1588 | { |
1589 | gimple_stmt_iterator gsi; |
1590 | |
1591 | if (ndests == 1) |
1592 | return true; |
1593 | |
1594 | for (gsi = gsi_start (seq&: finally); !gsi_end_p (i: gsi); gsi_next (i: &gsi)) |
1595 | { |
1596 | /* Duplicate __builtin_stack_restore in the hope of eliminating it |
1597 | on the EH paths and, consequently, useless cleanups. */ |
1598 | gimple *stmt = gsi_stmt (i: gsi); |
1599 | if (!is_gimple_debug (gs: stmt) |
1600 | && !gimple_clobber_p (s: stmt) |
1601 | && !gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE)) |
1602 | return false; |
1603 | } |
1604 | return true; |
1605 | } |
1606 | |
1607 | /* Finally estimate N times, plus N gotos. */ |
1608 | f_estimate = estimate_num_insns_seq (finally, &eni_size_weights); |
1609 | f_estimate = (f_estimate + 1) * ndests; |
1610 | |
1611 | /* Switch statement (cost 10), N variable assignments, N gotos. */ |
1612 | sw_estimate = 10 + 2 * ndests; |
1613 | |
1614 | /* Optimize for size clearly wants our best guess. */ |
1615 | if (optimize_function_for_size_p (cfun)) |
1616 | return f_estimate < sw_estimate; |
1617 | |
1618 | /* ??? These numbers are completely made up so far. */ |
1619 | if (optimize > 1) |
1620 | return f_estimate < 100 || f_estimate < sw_estimate * 2; |
1621 | else |
1622 | return f_estimate < 40 || f_estimate * 2 < sw_estimate * 3; |
1623 | } |
1624 | |
1625 | /* REG is current region of a LEH state. |
1626 | is the enclosing region for a possible cleanup region, or the region |
1627 | itself. Returns TRUE if such a region would be unreachable. |
1628 | |
1629 | Cleanup regions within a must-not-throw region aren't actually reachable |
1630 | even if there are throwing stmts within them, because the personality |
1631 | routine will call terminate before unwinding. */ |
1632 | |
1633 | static bool |
1634 | cleanup_is_dead_in (leh_state *state) |
1635 | { |
1636 | if (flag_checking) |
1637 | { |
1638 | eh_region reg = state->cur_region; |
1639 | while (reg && reg->type == ERT_CLEANUP) |
1640 | reg = reg->outer; |
1641 | |
1642 | gcc_assert (reg == state->outer_non_cleanup); |
1643 | } |
1644 | |
1645 | eh_region reg = state->outer_non_cleanup; |
1646 | return (reg && reg->type == ERT_MUST_NOT_THROW); |
1647 | } |
1648 | |
1649 | /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_FINALLY nodes |
1650 | to a sequence of labels and blocks, plus the exception region trees |
1651 | that record all the magic. This is complicated by the need to |
1652 | arrange for the FINALLY block to be executed on all exits. */ |
1653 | |
1654 | static gimple_seq |
1655 | lower_try_finally (struct leh_state *state, gtry *tp) |
1656 | { |
1657 | struct leh_tf_state this_tf; |
1658 | struct leh_state this_state; |
1659 | int ndests; |
1660 | gimple_seq old_eh_seq; |
1661 | |
1662 | /* Process the try block. */ |
1663 | |
1664 | memset (s: &this_tf, c: 0, n: sizeof (this_tf)); |
1665 | this_tf.try_finally_expr = tp; |
1666 | this_tf.top_p = tp; |
1667 | this_tf.outer = state; |
1668 | if (using_eh_for_cleanups_p () && !cleanup_is_dead_in (state)) |
1669 | { |
1670 | this_tf.region = gen_eh_region_cleanup (state->cur_region); |
1671 | this_state.cur_region = this_tf.region; |
1672 | } |
1673 | else |
1674 | { |
1675 | this_tf.region = NULL; |
1676 | this_state.cur_region = state->cur_region; |
1677 | } |
1678 | |
1679 | this_state.outer_non_cleanup = state->outer_non_cleanup; |
1680 | this_state.ehp_region = state->ehp_region; |
1681 | this_state.tf = &this_tf; |
1682 | |
1683 | old_eh_seq = eh_seq; |
1684 | eh_seq = NULL; |
1685 | |
1686 | lower_eh_constructs_1 (state: &this_state, seq: gimple_try_eval_ptr (gs: tp)); |
1687 | |
1688 | /* Determine if the try block is escaped through the bottom. */ |
1689 | this_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (gs: tp)); |
1690 | |
1691 | /* Determine if any exceptions are possible within the try block. */ |
1692 | if (this_tf.region) |
1693 | this_tf.may_throw = eh_region_may_contain_throw (r: this_tf.region); |
1694 | if (this_tf.may_throw) |
1695 | honor_protect_cleanup_actions (outer_state: state, this_state: &this_state, tf: &this_tf); |
1696 | |
1697 | /* Determine how many edges (still) reach the finally block. Or rather, |
1698 | how many destinations are reached by the finally block. Use this to |
1699 | determine how we process the finally block itself. */ |
1700 | |
1701 | ndests = this_tf.dest_array.length (); |
1702 | ndests += this_tf.may_fallthru; |
1703 | ndests += this_tf.may_return; |
1704 | ndests += this_tf.may_throw; |
1705 | |
1706 | /* If the FINALLY block is not reachable, dike it out. */ |
1707 | if (ndests == 0) |
1708 | { |
1709 | gimple_seq_add_seq (&this_tf.top_p_seq, gimple_try_eval (gs: tp)); |
1710 | gimple_try_set_cleanup (try_stmt: tp, NULL); |
1711 | } |
1712 | /* If the finally block doesn't fall through, then any destination |
1713 | we might try to impose there isn't reached either. There may be |
1714 | some minor amount of cleanup and redirection still needed. */ |
1715 | else if (!gimple_seq_may_fallthru (gimple_try_cleanup (gs: tp))) |
1716 | lower_try_finally_nofallthru (state, tf: &this_tf); |
1717 | |
1718 | /* We can easily special-case redirection to a single destination. */ |
1719 | else if (ndests == 1) |
1720 | lower_try_finally_onedest (state, tf: &this_tf); |
1721 | else if (decide_copy_try_finally (ndests, may_throw: this_tf.may_throw, |
1722 | finally: gimple_try_cleanup (gs: tp))) |
1723 | lower_try_finally_copy (state, tf: &this_tf); |
1724 | else |
1725 | lower_try_finally_switch (state, tf: &this_tf); |
1726 | |
1727 | /* If someone requested we add a label at the end of the transformed |
1728 | block, do so. */ |
1729 | if (this_tf.fallthru_label) |
1730 | { |
1731 | /* This must be reached only if ndests == 0. */ |
1732 | gimple *x = gimple_build_label (label: this_tf.fallthru_label); |
1733 | gimple_seq_add_stmt (&this_tf.top_p_seq, x); |
1734 | } |
1735 | |
1736 | this_tf.dest_array.release (); |
1737 | free (ptr: this_tf.goto_queue); |
1738 | if (this_tf.goto_queue_map) |
1739 | delete this_tf.goto_queue_map; |
1740 | |
1741 | /* If there was an old (aka outer) eh_seq, append the current eh_seq. |
1742 | If there was no old eh_seq, then the append is trivially already done. */ |
1743 | if (old_eh_seq) |
1744 | { |
1745 | if (eh_seq == NULL) |
1746 | eh_seq = old_eh_seq; |
1747 | else |
1748 | { |
1749 | gimple_seq new_eh_seq = eh_seq; |
1750 | eh_seq = old_eh_seq; |
1751 | gimple_seq_add_seq (&eh_seq, new_eh_seq); |
1752 | } |
1753 | } |
1754 | |
1755 | return this_tf.top_p_seq; |
1756 | } |
1757 | |
1758 | /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY_CATCH with a |
1759 | list of GIMPLE_CATCH to a sequence of labels and blocks, plus the |
1760 | exception region trees that records all the magic. */ |
1761 | |
1762 | static gimple_seq |
1763 | lower_catch (struct leh_state *state, gtry *tp) |
1764 | { |
1765 | eh_region try_region = NULL; |
1766 | struct leh_state this_state = *state; |
1767 | gimple_stmt_iterator gsi; |
1768 | tree out_label; |
1769 | gimple_seq new_seq, cleanup; |
1770 | gimple *x; |
1771 | geh_dispatch *eh_dispatch; |
1772 | location_t try_catch_loc = gimple_location (g: tp); |
1773 | location_t catch_loc = UNKNOWN_LOCATION; |
1774 | |
1775 | if (flag_exceptions) |
1776 | { |
1777 | try_region = gen_eh_region_try (state->cur_region); |
1778 | this_state.cur_region = try_region; |
1779 | this_state.outer_non_cleanup = this_state.cur_region; |
1780 | } |
1781 | |
1782 | lower_eh_constructs_1 (state: &this_state, seq: gimple_try_eval_ptr (gs: tp)); |
1783 | |
1784 | if (!eh_region_may_contain_throw (r: try_region)) |
1785 | return gimple_try_eval (gs: tp); |
1786 | |
1787 | new_seq = NULL; |
1788 | eh_dispatch = gimple_build_eh_dispatch (try_region->index); |
1789 | gimple_seq_add_stmt (&new_seq, eh_dispatch); |
1790 | emit_resx (seq: &new_seq, region: try_region); |
1791 | |
1792 | this_state.cur_region = state->cur_region; |
1793 | this_state.outer_non_cleanup = state->outer_non_cleanup; |
1794 | this_state.ehp_region = try_region; |
1795 | |
1796 | /* Add eh_seq from lowering EH in the cleanup sequence after the cleanup |
1797 | itself, so that e.g. for coverage purposes the nested cleanups don't |
1798 | appear before the cleanup body. See PR64634 for details. */ |
1799 | gimple_seq old_eh_seq = eh_seq; |
1800 | eh_seq = NULL; |
1801 | |
1802 | out_label = NULL; |
1803 | cleanup = gimple_try_cleanup (gs: tp); |
1804 | for (gsi = gsi_start (seq&: cleanup); |
1805 | !gsi_end_p (i: gsi); |
1806 | gsi_next (i: &gsi)) |
1807 | { |
1808 | eh_catch c; |
1809 | gcatch *catch_stmt; |
1810 | gimple_seq handler; |
1811 | |
1812 | catch_stmt = as_a <gcatch *> (p: gsi_stmt (i: gsi)); |
1813 | if (catch_loc == UNKNOWN_LOCATION) |
1814 | catch_loc = gimple_location (g: catch_stmt); |
1815 | c = gen_eh_region_catch (try_region, gimple_catch_types (catch_stmt)); |
1816 | |
1817 | handler = gimple_catch_handler (catch_stmt); |
1818 | lower_eh_constructs_1 (state: &this_state, seq: &handler); |
1819 | |
1820 | c->label = create_artificial_label (UNKNOWN_LOCATION); |
1821 | x = gimple_build_label (label: c->label); |
1822 | gimple_seq_add_stmt (&new_seq, x); |
1823 | |
1824 | gimple_seq_add_seq (&new_seq, handler); |
1825 | |
1826 | if (gimple_seq_may_fallthru (new_seq)) |
1827 | { |
1828 | if (!out_label) |
1829 | out_label = create_artificial_label (try_catch_loc); |
1830 | |
1831 | x = gimple_build_goto (dest: out_label); |
1832 | gimple_seq_add_stmt (&new_seq, x); |
1833 | } |
1834 | if (!c->type_list) |
1835 | break; |
1836 | } |
1837 | |
1838 | /* Try to set a location on the dispatching construct to avoid inheriting |
1839 | the location of the previous statement. */ |
1840 | gimple_set_location (g: eh_dispatch, location: catch_loc); |
1841 | |
1842 | gimple_try_set_cleanup (try_stmt: tp, cleanup: new_seq); |
1843 | |
1844 | gimple_seq new_eh_seq = eh_seq; |
1845 | eh_seq = old_eh_seq; |
1846 | gimple_seq ret_seq = frob_into_branch_around (tp, region: try_region, over: out_label); |
1847 | gimple_seq_add_seq (&eh_seq, new_eh_seq); |
1848 | return ret_seq; |
1849 | } |
1850 | |
1851 | /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with a |
1852 | GIMPLE_EH_FILTER to a sequence of labels and blocks, plus the exception |
1853 | region trees that record all the magic. */ |
1854 | |
1855 | static gimple_seq |
1856 | lower_eh_filter (struct leh_state *state, gtry *tp) |
1857 | { |
1858 | struct leh_state this_state = *state; |
1859 | eh_region this_region = NULL; |
1860 | gimple *inner, *x; |
1861 | gimple_seq new_seq; |
1862 | |
1863 | inner = gimple_seq_first_stmt (s: gimple_try_cleanup (gs: tp)); |
1864 | |
1865 | if (flag_exceptions) |
1866 | { |
1867 | this_region = gen_eh_region_allowed (state->cur_region, |
1868 | gimple_eh_filter_types (gs: inner)); |
1869 | this_state.cur_region = this_region; |
1870 | this_state.outer_non_cleanup = this_state.cur_region; |
1871 | } |
1872 | |
1873 | lower_eh_constructs_1 (state: &this_state, seq: gimple_try_eval_ptr (gs: tp)); |
1874 | |
1875 | if (!eh_region_may_contain_throw (r: this_region)) |
1876 | return gimple_try_eval (gs: tp); |
1877 | |
1878 | this_state.cur_region = state->cur_region; |
1879 | this_state.ehp_region = this_region; |
1880 | |
1881 | new_seq = NULL; |
1882 | x = gimple_build_eh_dispatch (this_region->index); |
1883 | gimple_set_location (g: x, location: gimple_location (g: tp)); |
1884 | gimple_seq_add_stmt (&new_seq, x); |
1885 | emit_resx (seq: &new_seq, region: this_region); |
1886 | |
1887 | this_region->u.allowed.label = create_artificial_label (UNKNOWN_LOCATION); |
1888 | x = gimple_build_label (label: this_region->u.allowed.label); |
1889 | gimple_seq_add_stmt (&new_seq, x); |
1890 | |
1891 | lower_eh_constructs_1 (state: &this_state, seq: gimple_eh_filter_failure_ptr (gs: inner)); |
1892 | gimple_seq_add_seq (&new_seq, gimple_eh_filter_failure (gs: inner)); |
1893 | |
1894 | gimple_try_set_cleanup (try_stmt: tp, cleanup: new_seq); |
1895 | |
1896 | return frob_into_branch_around (tp, region: this_region, NULL); |
1897 | } |
1898 | |
1899 | /* A subroutine of lower_eh_constructs_1. Lower a GIMPLE_TRY with |
1900 | an GIMPLE_EH_MUST_NOT_THROW to a sequence of labels and blocks, |
1901 | plus the exception region trees that record all the magic. */ |
1902 | |
1903 | static gimple_seq |
1904 | lower_eh_must_not_throw (struct leh_state *state, gtry *tp) |
1905 | { |
1906 | struct leh_state this_state = *state; |
1907 | |
1908 | if (flag_exceptions) |
1909 | { |
1910 | gimple *inner = gimple_seq_first_stmt (s: gimple_try_cleanup (gs: tp)); |
1911 | eh_region this_region; |
1912 | |
1913 | this_region = gen_eh_region_must_not_throw (state->cur_region); |
1914 | this_region->u.must_not_throw.failure_decl |
1915 | = gimple_eh_must_not_throw_fndecl ( |
1916 | eh_mnt_stmt: as_a <geh_mnt *> (p: inner)); |
1917 | this_region->u.must_not_throw.failure_loc |
1918 | = LOCATION_LOCUS (gimple_location (tp)); |
1919 | |
1920 | /* In order to get mangling applied to this decl, we must mark it |
1921 | used now. Otherwise, pass_ipa_free_lang_data won't think it |
1922 | needs to happen. */ |
1923 | TREE_USED (this_region->u.must_not_throw.failure_decl) = 1; |
1924 | |
1925 | this_state.cur_region = this_region; |
1926 | this_state.outer_non_cleanup = this_state.cur_region; |
1927 | } |
1928 | |
1929 | lower_eh_constructs_1 (state: &this_state, seq: gimple_try_eval_ptr (gs: tp)); |
1930 | |
1931 | return gimple_try_eval (gs: tp); |
1932 | } |
1933 | |
1934 | /* Implement a cleanup expression. This is similar to try-finally, |
1935 | except that we only execute the cleanup block for exception edges. */ |
1936 | |
1937 | static gimple_seq |
1938 | lower_cleanup (struct leh_state *state, gtry *tp) |
1939 | { |
1940 | struct leh_state this_state = *state; |
1941 | eh_region this_region = NULL; |
1942 | struct leh_tf_state fake_tf; |
1943 | gimple_seq result; |
1944 | bool cleanup_dead = cleanup_is_dead_in (state); |
1945 | |
1946 | if (flag_exceptions && !cleanup_dead) |
1947 | { |
1948 | this_region = gen_eh_region_cleanup (state->cur_region); |
1949 | this_state.cur_region = this_region; |
1950 | this_state.outer_non_cleanup = state->outer_non_cleanup; |
1951 | } |
1952 | |
1953 | lower_eh_constructs_1 (state: &this_state, seq: gimple_try_eval_ptr (gs: tp)); |
1954 | |
1955 | if (cleanup_dead || !eh_region_may_contain_throw (r: this_region)) |
1956 | return gimple_try_eval (gs: tp); |
1957 | |
1958 | /* Build enough of a try-finally state so that we can reuse |
1959 | honor_protect_cleanup_actions. */ |
1960 | memset (s: &fake_tf, c: 0, n: sizeof (fake_tf)); |
1961 | fake_tf.top_p = fake_tf.try_finally_expr = tp; |
1962 | fake_tf.outer = state; |
1963 | fake_tf.region = this_region; |
1964 | fake_tf.may_fallthru = gimple_seq_may_fallthru (gimple_try_eval (gs: tp)); |
1965 | fake_tf.may_throw = true; |
1966 | |
1967 | honor_protect_cleanup_actions (outer_state: state, NULL, tf: &fake_tf); |
1968 | |
1969 | if (fake_tf.may_throw) |
1970 | { |
1971 | /* In this case honor_protect_cleanup_actions had nothing to do, |
1972 | and we should process this normally. */ |
1973 | lower_eh_constructs_1 (state, seq: gimple_try_cleanup_ptr (gs: tp)); |
1974 | result = frob_into_branch_around (tp, region: this_region, |
1975 | over: fake_tf.fallthru_label); |
1976 | } |
1977 | else |
1978 | { |
1979 | /* In this case honor_protect_cleanup_actions did nearly all of |
1980 | the work. All we have left is to append the fallthru_label. */ |
1981 | |
1982 | result = gimple_try_eval (gs: tp); |
1983 | if (fake_tf.fallthru_label) |
1984 | { |
1985 | gimple *x = gimple_build_label (label: fake_tf.fallthru_label); |
1986 | gimple_seq_add_stmt (&result, x); |
1987 | } |
1988 | } |
1989 | return result; |
1990 | } |
1991 | |
1992 | /* Main loop for lowering eh constructs. Also moves gsi to the next |
1993 | statement. */ |
1994 | |
1995 | static void |
1996 | lower_eh_constructs_2 (struct leh_state *state, gimple_stmt_iterator *gsi) |
1997 | { |
1998 | gimple_seq replace; |
1999 | gimple *x; |
2000 | gimple *stmt = gsi_stmt (i: *gsi); |
2001 | |
2002 | switch (gimple_code (g: stmt)) |
2003 | { |
2004 | case GIMPLE_CALL: |
2005 | { |
2006 | tree fndecl = gimple_call_fndecl (gs: stmt); |
2007 | tree rhs, lhs; |
2008 | |
2009 | if (fndecl && fndecl_built_in_p (node: fndecl, klass: BUILT_IN_NORMAL)) |
2010 | switch (DECL_FUNCTION_CODE (decl: fndecl)) |
2011 | { |
2012 | case BUILT_IN_EH_POINTER: |
2013 | /* The front end may have generated a call to |
2014 | __builtin_eh_pointer (0) within a catch region. Replace |
2015 | this zero argument with the current catch region number. */ |
2016 | if (state->ehp_region) |
2017 | { |
2018 | tree nr = build_int_cst (integer_type_node, |
2019 | state->ehp_region->index); |
2020 | gimple_call_set_arg (gs: stmt, index: 0, arg: nr); |
2021 | } |
2022 | else |
2023 | { |
2024 | /* The user has dome something silly. Remove it. */ |
2025 | rhs = null_pointer_node; |
2026 | goto do_replace; |
2027 | } |
2028 | break; |
2029 | |
2030 | case BUILT_IN_EH_FILTER: |
2031 | /* ??? This should never appear, but since it's a builtin it |
2032 | is accessible to abuse by users. Just remove it and |
2033 | replace the use with the arbitrary value zero. */ |
2034 | rhs = build_int_cst (TREE_TYPE (TREE_TYPE (fndecl)), 0); |
2035 | do_replace: |
2036 | lhs = gimple_call_lhs (gs: stmt); |
2037 | x = gimple_build_assign (lhs, rhs); |
2038 | gsi_insert_before (gsi, x, GSI_SAME_STMT); |
2039 | /* FALLTHRU */ |
2040 | |
2041 | case BUILT_IN_EH_COPY_VALUES: |
2042 | /* Likewise this should not appear. Remove it. */ |
2043 | gsi_remove (gsi, true); |
2044 | return; |
2045 | |
2046 | default: |
2047 | break; |
2048 | } |
2049 | } |
2050 | /* FALLTHRU */ |
2051 | |
2052 | case GIMPLE_ASSIGN: |
2053 | /* If the stmt can throw, use a new temporary for the assignment |
2054 | to a LHS. This makes sure the old value of the LHS is |
2055 | available on the EH edge. Only do so for statements that |
2056 | potentially fall through (no noreturn calls e.g.), otherwise |
2057 | this new assignment might create fake fallthru regions. */ |
2058 | if (stmt_could_throw_p (cfun, stmt) |
2059 | && gimple_has_lhs (stmt) |
2060 | && gimple_stmt_may_fallthru (stmt) |
2061 | && !tree_could_throw_p (gimple_get_lhs (stmt)) |
2062 | && is_gimple_reg_type (TREE_TYPE (gimple_get_lhs (stmt)))) |
2063 | { |
2064 | tree lhs = gimple_get_lhs (stmt); |
2065 | tree tmp = create_tmp_var (TREE_TYPE (lhs)); |
2066 | gimple *s = gimple_build_assign (lhs, tmp); |
2067 | gimple_set_location (g: s, location: gimple_location (g: stmt)); |
2068 | gimple_set_block (g: s, block: gimple_block (g: stmt)); |
2069 | gimple_set_lhs (stmt, tmp); |
2070 | gsi_insert_after (gsi, s, GSI_SAME_STMT); |
2071 | } |
2072 | /* Look for things that can throw exceptions, and record them. */ |
2073 | if (state->cur_region && stmt_could_throw_p (cfun, stmt)) |
2074 | { |
2075 | record_stmt_eh_region (region: state->cur_region, t: stmt); |
2076 | note_eh_region_may_contain_throw (region: state->cur_region); |
2077 | } |
2078 | break; |
2079 | |
2080 | case GIMPLE_COND: |
2081 | case GIMPLE_GOTO: |
2082 | case GIMPLE_RETURN: |
2083 | maybe_record_in_goto_queue (state, stmt); |
2084 | break; |
2085 | |
2086 | case GIMPLE_SWITCH: |
2087 | verify_norecord_switch_expr (state, switch_expr: as_a <gswitch *> (p: stmt)); |
2088 | break; |
2089 | |
2090 | case GIMPLE_TRY: |
2091 | { |
2092 | gtry *try_stmt = as_a <gtry *> (p: stmt); |
2093 | if (gimple_try_kind (gs: try_stmt) == GIMPLE_TRY_FINALLY) |
2094 | replace = lower_try_finally (state, tp: try_stmt); |
2095 | else |
2096 | { |
2097 | x = gimple_seq_first_stmt (s: gimple_try_cleanup (gs: try_stmt)); |
2098 | if (!x) |
2099 | { |
2100 | replace = gimple_try_eval (gs: try_stmt); |
2101 | lower_eh_constructs_1 (state, seq: &replace); |
2102 | } |
2103 | else |
2104 | switch (gimple_code (g: x)) |
2105 | { |
2106 | case GIMPLE_CATCH: |
2107 | replace = lower_catch (state, tp: try_stmt); |
2108 | break; |
2109 | case GIMPLE_EH_FILTER: |
2110 | replace = lower_eh_filter (state, tp: try_stmt); |
2111 | break; |
2112 | case GIMPLE_EH_MUST_NOT_THROW: |
2113 | replace = lower_eh_must_not_throw (state, tp: try_stmt); |
2114 | break; |
2115 | case GIMPLE_EH_ELSE: |
2116 | /* This code is only valid with GIMPLE_TRY_FINALLY. */ |
2117 | gcc_unreachable (); |
2118 | default: |
2119 | replace = lower_cleanup (state, tp: try_stmt); |
2120 | break; |
2121 | } |
2122 | } |
2123 | } |
2124 | |
2125 | /* Remove the old stmt and insert the transformed sequence |
2126 | instead. */ |
2127 | gsi_insert_seq_before (gsi, replace, GSI_SAME_STMT); |
2128 | gsi_remove (gsi, true); |
2129 | |
2130 | /* Return since we don't want gsi_next () */ |
2131 | return; |
2132 | |
2133 | case GIMPLE_EH_ELSE: |
2134 | /* We should be eliminating this in lower_try_finally et al. */ |
2135 | gcc_unreachable (); |
2136 | |
2137 | default: |
2138 | /* A type, a decl, or some kind of statement that we're not |
2139 | interested in. Don't walk them. */ |
2140 | break; |
2141 | } |
2142 | |
2143 | gsi_next (i: gsi); |
2144 | } |
2145 | |
2146 | /* A helper to unwrap a gimple_seq and feed stmts to lower_eh_constructs_2. */ |
2147 | |
2148 | static void |
2149 | lower_eh_constructs_1 (struct leh_state *state, gimple_seq *pseq) |
2150 | { |
2151 | gimple_stmt_iterator gsi; |
2152 | for (gsi = gsi_start (seq&: *pseq); !gsi_end_p (i: gsi);) |
2153 | lower_eh_constructs_2 (state, gsi: &gsi); |
2154 | } |
2155 | |
2156 | namespace { |
2157 | |
2158 | const pass_data pass_data_lower_eh = |
2159 | { |
2160 | .type: GIMPLE_PASS, /* type */ |
2161 | .name: "eh" , /* name */ |
2162 | .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */ |
2163 | .tv_id: TV_TREE_EH, /* tv_id */ |
2164 | PROP_gimple_lcf, /* properties_required */ |
2165 | PROP_gimple_leh, /* properties_provided */ |
2166 | .properties_destroyed: 0, /* properties_destroyed */ |
2167 | .todo_flags_start: 0, /* todo_flags_start */ |
2168 | .todo_flags_finish: 0, /* todo_flags_finish */ |
2169 | }; |
2170 | |
2171 | class pass_lower_eh : public gimple_opt_pass |
2172 | { |
2173 | public: |
2174 | pass_lower_eh (gcc::context *ctxt) |
2175 | : gimple_opt_pass (pass_data_lower_eh, ctxt) |
2176 | {} |
2177 | |
2178 | /* opt_pass methods: */ |
2179 | unsigned int execute (function *) final override; |
2180 | |
2181 | }; // class pass_lower_eh |
2182 | |
2183 | unsigned int |
2184 | pass_lower_eh::execute (function *fun) |
2185 | { |
2186 | struct leh_state null_state; |
2187 | gimple_seq bodyp; |
2188 | |
2189 | bodyp = gimple_body (current_function_decl); |
2190 | if (bodyp == NULL) |
2191 | return 0; |
2192 | |
2193 | finally_tree = new hash_table<finally_tree_hasher> (31); |
2194 | eh_region_may_contain_throw_map = BITMAP_ALLOC (NULL); |
2195 | memset (s: &null_state, c: 0, n: sizeof (null_state)); |
2196 | |
2197 | collect_finally_tree_1 (seq: bodyp, NULL); |
2198 | lower_eh_constructs_1 (state: &null_state, pseq: &bodyp); |
2199 | gimple_set_body (current_function_decl, bodyp); |
2200 | |
2201 | /* We assume there's a return statement, or something, at the end of |
2202 | the function, and thus ploping the EH sequence afterward won't |
2203 | change anything. */ |
2204 | gcc_assert (!gimple_seq_may_fallthru (bodyp)); |
2205 | gimple_seq_add_seq (&bodyp, eh_seq); |
2206 | |
2207 | /* We assume that since BODYP already existed, adding EH_SEQ to it |
2208 | didn't change its value, and we don't have to re-set the function. */ |
2209 | gcc_assert (bodyp == gimple_body (current_function_decl)); |
2210 | |
2211 | delete finally_tree; |
2212 | finally_tree = NULL; |
2213 | BITMAP_FREE (eh_region_may_contain_throw_map); |
2214 | eh_seq = NULL; |
2215 | |
2216 | /* If this function needs a language specific EH personality routine |
2217 | and the frontend didn't already set one do so now. */ |
2218 | if (function_needs_eh_personality (fun) == eh_personality_lang |
2219 | && !DECL_FUNCTION_PERSONALITY (current_function_decl)) |
2220 | DECL_FUNCTION_PERSONALITY (current_function_decl) |
2221 | = lang_hooks.eh_personality (); |
2222 | |
2223 | return 0; |
2224 | } |
2225 | |
2226 | } // anon namespace |
2227 | |
2228 | gimple_opt_pass * |
2229 | make_pass_lower_eh (gcc::context *ctxt) |
2230 | { |
2231 | return new pass_lower_eh (ctxt); |
2232 | } |
2233 | |
2234 | /* Create the multiple edges from an EH_DISPATCH statement to all of |
2235 | the possible handlers for its EH region. Return true if there's |
2236 | no fallthru edge; false if there is. */ |
2237 | |
2238 | bool |
2239 | make_eh_dispatch_edges (geh_dispatch *stmt) |
2240 | { |
2241 | eh_region r; |
2242 | eh_catch c; |
2243 | basic_block src, dst; |
2244 | |
2245 | r = get_eh_region_from_number (gimple_eh_dispatch_region (eh_dispatch_stmt: stmt)); |
2246 | src = gimple_bb (g: stmt); |
2247 | |
2248 | switch (r->type) |
2249 | { |
2250 | case ERT_TRY: |
2251 | for (c = r->u.eh_try.first_catch; c ; c = c->next_catch) |
2252 | { |
2253 | dst = label_to_block (cfun, c->label); |
2254 | make_edge (src, dst, 0); |
2255 | |
2256 | /* A catch-all handler doesn't have a fallthru. */ |
2257 | if (c->type_list == NULL) |
2258 | return false; |
2259 | } |
2260 | break; |
2261 | |
2262 | case ERT_ALLOWED_EXCEPTIONS: |
2263 | dst = label_to_block (cfun, r->u.allowed.label); |
2264 | make_edge (src, dst, 0); |
2265 | break; |
2266 | |
2267 | default: |
2268 | gcc_unreachable (); |
2269 | } |
2270 | |
2271 | return true; |
2272 | } |
2273 | |
2274 | /* Create the single EH edge from STMT to its nearest landing pad, |
2275 | if there is such a landing pad within the current function. */ |
2276 | |
2277 | edge |
2278 | make_eh_edge (gimple *stmt) |
2279 | { |
2280 | basic_block src, dst; |
2281 | eh_landing_pad lp; |
2282 | int lp_nr; |
2283 | |
2284 | lp_nr = lookup_stmt_eh_lp (t: stmt); |
2285 | if (lp_nr <= 0) |
2286 | return NULL; |
2287 | |
2288 | lp = get_eh_landing_pad_from_number (lp_nr); |
2289 | gcc_assert (lp != NULL); |
2290 | |
2291 | src = gimple_bb (g: stmt); |
2292 | dst = label_to_block (cfun, lp->post_landing_pad); |
2293 | return make_edge (src, dst, EDGE_EH); |
2294 | } |
2295 | |
2296 | /* Do the work in redirecting EDGE_IN to NEW_BB within the EH region tree; |
2297 | do not actually perform the final edge redirection. |
2298 | |
2299 | CHANGE_REGION is true when we're being called from cleanup_empty_eh and |
2300 | we intend to change the destination EH region as well; this means |
2301 | EH_LANDING_PAD_NR must already be set on the destination block label. |
2302 | If false, we're being called from generic cfg manipulation code and we |
2303 | should preserve our place within the region tree. */ |
2304 | |
2305 | static void |
2306 | redirect_eh_edge_1 (edge edge_in, basic_block new_bb, bool change_region) |
2307 | { |
2308 | eh_landing_pad old_lp, new_lp; |
2309 | basic_block old_bb; |
2310 | gimple *throw_stmt; |
2311 | int old_lp_nr, new_lp_nr; |
2312 | tree old_label, new_label; |
2313 | edge_iterator ei; |
2314 | edge e; |
2315 | |
2316 | old_bb = edge_in->dest; |
2317 | old_label = gimple_block_label (old_bb); |
2318 | old_lp_nr = EH_LANDING_PAD_NR (old_label); |
2319 | gcc_assert (old_lp_nr > 0); |
2320 | old_lp = get_eh_landing_pad_from_number (old_lp_nr); |
2321 | |
2322 | throw_stmt = *gsi_last_bb (bb: edge_in->src); |
2323 | gcc_checking_assert (lookup_stmt_eh_lp (throw_stmt) == old_lp_nr); |
2324 | |
2325 | new_label = gimple_block_label (new_bb); |
2326 | |
2327 | /* Look for an existing region that might be using NEW_BB already. */ |
2328 | new_lp_nr = EH_LANDING_PAD_NR (new_label); |
2329 | if (new_lp_nr) |
2330 | { |
2331 | new_lp = get_eh_landing_pad_from_number (new_lp_nr); |
2332 | gcc_assert (new_lp); |
2333 | |
2334 | /* Unless CHANGE_REGION is true, the new and old landing pad |
2335 | had better be associated with the same EH region. */ |
2336 | gcc_assert (change_region || new_lp->region == old_lp->region); |
2337 | } |
2338 | else |
2339 | { |
2340 | new_lp = NULL; |
2341 | gcc_assert (!change_region); |
2342 | } |
2343 | |
2344 | /* Notice when we redirect the last EH edge away from OLD_BB. */ |
2345 | FOR_EACH_EDGE (e, ei, old_bb->preds) |
2346 | if (e != edge_in && (e->flags & EDGE_EH)) |
2347 | break; |
2348 | |
2349 | if (new_lp) |
2350 | { |
2351 | /* NEW_LP already exists. If there are still edges into OLD_LP, |
2352 | there's nothing to do with the EH tree. If there are no more |
2353 | edges into OLD_LP, then we want to remove OLD_LP as it is unused. |
2354 | If CHANGE_REGION is true, then our caller is expecting to remove |
2355 | the landing pad. */ |
2356 | if (e == NULL && !change_region) |
2357 | remove_eh_landing_pad (old_lp); |
2358 | } |
2359 | else |
2360 | { |
2361 | /* No correct landing pad exists. If there are no more edges |
2362 | into OLD_LP, then we can simply re-use the existing landing pad. |
2363 | Otherwise, we have to create a new landing pad. */ |
2364 | if (e == NULL) |
2365 | { |
2366 | EH_LANDING_PAD_NR (old_lp->post_landing_pad) = 0; |
2367 | new_lp = old_lp; |
2368 | } |
2369 | else |
2370 | new_lp = gen_eh_landing_pad (old_lp->region); |
2371 | new_lp->post_landing_pad = new_label; |
2372 | EH_LANDING_PAD_NR (new_label) = new_lp->index; |
2373 | } |
2374 | |
2375 | /* Maybe move the throwing statement to the new region. */ |
2376 | if (old_lp != new_lp) |
2377 | { |
2378 | remove_stmt_from_eh_lp (t: throw_stmt); |
2379 | add_stmt_to_eh_lp (t: throw_stmt, num: new_lp->index); |
2380 | } |
2381 | } |
2382 | |
2383 | /* Redirect EH edge E to NEW_BB. */ |
2384 | |
2385 | edge |
2386 | redirect_eh_edge (edge edge_in, basic_block new_bb) |
2387 | { |
2388 | redirect_eh_edge_1 (edge_in, new_bb, change_region: false); |
2389 | return ssa_redirect_edge (edge_in, new_bb); |
2390 | } |
2391 | |
2392 | /* This is a subroutine of gimple_redirect_edge_and_branch. Update the |
2393 | labels for redirecting a non-fallthru EH_DISPATCH edge E to NEW_BB. |
2394 | The actual edge update will happen in the caller. */ |
2395 | |
2396 | void |
2397 | redirect_eh_dispatch_edge (geh_dispatch *stmt, edge e, basic_block new_bb) |
2398 | { |
2399 | tree new_lab = gimple_block_label (new_bb); |
2400 | bool any_changed = false; |
2401 | basic_block old_bb; |
2402 | eh_region r; |
2403 | eh_catch c; |
2404 | |
2405 | r = get_eh_region_from_number (gimple_eh_dispatch_region (eh_dispatch_stmt: stmt)); |
2406 | switch (r->type) |
2407 | { |
2408 | case ERT_TRY: |
2409 | for (c = r->u.eh_try.first_catch; c ; c = c->next_catch) |
2410 | { |
2411 | old_bb = label_to_block (cfun, c->label); |
2412 | if (old_bb == e->dest) |
2413 | { |
2414 | c->label = new_lab; |
2415 | any_changed = true; |
2416 | } |
2417 | } |
2418 | break; |
2419 | |
2420 | case ERT_ALLOWED_EXCEPTIONS: |
2421 | old_bb = label_to_block (cfun, r->u.allowed.label); |
2422 | gcc_assert (old_bb == e->dest); |
2423 | r->u.allowed.label = new_lab; |
2424 | any_changed = true; |
2425 | break; |
2426 | |
2427 | default: |
2428 | gcc_unreachable (); |
2429 | } |
2430 | |
2431 | gcc_assert (any_changed); |
2432 | } |
2433 | |
2434 | /* Helper function for operation_could_trap_p and stmt_could_throw_p. */ |
2435 | |
2436 | bool |
2437 | operation_could_trap_helper_p (enum tree_code op, |
2438 | bool fp_operation, |
2439 | bool honor_trapv, |
2440 | bool honor_nans, |
2441 | bool honor_snans, |
2442 | tree divisor, |
2443 | bool *handled) |
2444 | { |
2445 | *handled = true; |
2446 | switch (op) |
2447 | { |
2448 | case TRUNC_DIV_EXPR: |
2449 | case CEIL_DIV_EXPR: |
2450 | case FLOOR_DIV_EXPR: |
2451 | case ROUND_DIV_EXPR: |
2452 | case EXACT_DIV_EXPR: |
2453 | case CEIL_MOD_EXPR: |
2454 | case FLOOR_MOD_EXPR: |
2455 | case ROUND_MOD_EXPR: |
2456 | case TRUNC_MOD_EXPR: |
2457 | if (!TREE_CONSTANT (divisor) || integer_zerop (divisor)) |
2458 | return true; |
2459 | if (TREE_CODE (divisor) == VECTOR_CST) |
2460 | { |
2461 | /* Inspired by initializer_each_zero_or_onep. */ |
2462 | unsigned HOST_WIDE_INT nelts = vector_cst_encoded_nelts (t: divisor); |
2463 | if (VECTOR_CST_STEPPED_P (divisor) |
2464 | && !TYPE_VECTOR_SUBPARTS (TREE_TYPE (divisor)) |
2465 | .is_constant (const_value: &nelts)) |
2466 | return true; |
2467 | for (unsigned int i = 0; i < nelts; ++i) |
2468 | { |
2469 | tree elt = vector_cst_elt (divisor, i); |
2470 | if (integer_zerop (elt)) |
2471 | return true; |
2472 | } |
2473 | } |
2474 | return false; |
2475 | |
2476 | case RDIV_EXPR: |
2477 | if (fp_operation) |
2478 | { |
2479 | if (honor_snans) |
2480 | return true; |
2481 | return flag_trapping_math; |
2482 | } |
2483 | /* Fixed point operations also use RDIV_EXPR. */ |
2484 | if (!TREE_CONSTANT (divisor) || fixed_zerop (divisor)) |
2485 | return true; |
2486 | return false; |
2487 | |
2488 | case LT_EXPR: |
2489 | case LE_EXPR: |
2490 | case GT_EXPR: |
2491 | case GE_EXPR: |
2492 | case LTGT_EXPR: |
2493 | /* MIN/MAX similar as LT/LE/GT/GE. */ |
2494 | case MIN_EXPR: |
2495 | case MAX_EXPR: |
2496 | /* Some floating point comparisons may trap. */ |
2497 | return honor_nans; |
2498 | |
2499 | case EQ_EXPR: |
2500 | case NE_EXPR: |
2501 | case UNORDERED_EXPR: |
2502 | case ORDERED_EXPR: |
2503 | case UNLT_EXPR: |
2504 | case UNLE_EXPR: |
2505 | case UNGT_EXPR: |
2506 | case UNGE_EXPR: |
2507 | case UNEQ_EXPR: |
2508 | return honor_snans; |
2509 | |
2510 | case NEGATE_EXPR: |
2511 | case ABS_EXPR: |
2512 | case CONJ_EXPR: |
2513 | /* These operations don't trap with floating point. */ |
2514 | if (honor_trapv) |
2515 | return true; |
2516 | return false; |
2517 | |
2518 | case ABSU_EXPR: |
2519 | /* ABSU_EXPR never traps. */ |
2520 | return false; |
2521 | |
2522 | case PLUS_EXPR: |
2523 | case MINUS_EXPR: |
2524 | case MULT_EXPR: |
2525 | /* Any floating arithmetic may trap. */ |
2526 | if (fp_operation && flag_trapping_math) |
2527 | return true; |
2528 | if (honor_trapv) |
2529 | return true; |
2530 | return false; |
2531 | |
2532 | case COMPLEX_EXPR: |
2533 | case CONSTRUCTOR: |
2534 | /* Constructing an object cannot trap. */ |
2535 | return false; |
2536 | |
2537 | case COND_EXPR: |
2538 | case VEC_COND_EXPR: |
2539 | /* Whether *COND_EXPR can trap depends on whether the |
2540 | first argument can trap, so signal it as not handled. |
2541 | Whether lhs is floating or not doesn't matter. */ |
2542 | *handled = false; |
2543 | return false; |
2544 | |
2545 | default: |
2546 | /* Any floating arithmetic may trap. */ |
2547 | if (fp_operation && flag_trapping_math) |
2548 | return true; |
2549 | |
2550 | *handled = false; |
2551 | return false; |
2552 | } |
2553 | } |
2554 | |
2555 | /* Return true if operation OP may trap. FP_OPERATION is true if OP is applied |
2556 | on floating-point values. HONOR_TRAPV is true if OP is applied on integer |
2557 | type operands that may trap. If OP is a division operator, DIVISOR contains |
2558 | the value of the divisor. */ |
2559 | |
2560 | bool |
2561 | operation_could_trap_p (enum tree_code op, bool fp_operation, bool honor_trapv, |
2562 | tree divisor) |
2563 | { |
2564 | bool honor_nans = (fp_operation && flag_trapping_math |
2565 | && !flag_finite_math_only); |
2566 | bool honor_snans = fp_operation && flag_signaling_nans != 0; |
2567 | bool handled; |
2568 | |
2569 | /* This function cannot tell whether or not COND_EXPR could trap, |
2570 | because that depends on its condition op. */ |
2571 | gcc_assert (op != COND_EXPR); |
2572 | |
2573 | if (TREE_CODE_CLASS (op) != tcc_comparison |
2574 | && TREE_CODE_CLASS (op) != tcc_unary |
2575 | && TREE_CODE_CLASS (op) != tcc_binary) |
2576 | return false; |
2577 | |
2578 | return operation_could_trap_helper_p (op, fp_operation, honor_trapv, |
2579 | honor_nans, honor_snans, divisor, |
2580 | handled: &handled); |
2581 | } |
2582 | |
2583 | |
2584 | /* Returns true if it is possible to prove that the index of |
2585 | an array access REF (an ARRAY_REF expression) falls into the |
2586 | array bounds. */ |
2587 | |
2588 | static bool |
2589 | in_array_bounds_p (tree ref) |
2590 | { |
2591 | tree idx = TREE_OPERAND (ref, 1); |
2592 | tree min, max; |
2593 | |
2594 | if (TREE_CODE (idx) != INTEGER_CST) |
2595 | return false; |
2596 | |
2597 | min = array_ref_low_bound (ref); |
2598 | max = array_ref_up_bound (ref); |
2599 | if (!min |
2600 | || !max |
2601 | || TREE_CODE (min) != INTEGER_CST |
2602 | || TREE_CODE (max) != INTEGER_CST) |
2603 | return false; |
2604 | |
2605 | if (tree_int_cst_lt (t1: idx, t2: min) |
2606 | || tree_int_cst_lt (t1: max, t2: idx)) |
2607 | return false; |
2608 | |
2609 | return true; |
2610 | } |
2611 | |
2612 | /* Returns true if it is possible to prove that the range of |
2613 | an array access REF (an ARRAY_RANGE_REF expression) falls |
2614 | into the array bounds. */ |
2615 | |
2616 | static bool |
2617 | range_in_array_bounds_p (tree ref) |
2618 | { |
2619 | tree domain_type = TYPE_DOMAIN (TREE_TYPE (ref)); |
2620 | tree range_min, range_max, min, max; |
2621 | |
2622 | range_min = TYPE_MIN_VALUE (domain_type); |
2623 | range_max = TYPE_MAX_VALUE (domain_type); |
2624 | if (!range_min |
2625 | || !range_max |
2626 | || TREE_CODE (range_min) != INTEGER_CST |
2627 | || TREE_CODE (range_max) != INTEGER_CST) |
2628 | return false; |
2629 | |
2630 | min = array_ref_low_bound (ref); |
2631 | max = array_ref_up_bound (ref); |
2632 | if (!min |
2633 | || !max |
2634 | || TREE_CODE (min) != INTEGER_CST |
2635 | || TREE_CODE (max) != INTEGER_CST) |
2636 | return false; |
2637 | |
2638 | if (tree_int_cst_lt (t1: range_min, t2: min) |
2639 | || tree_int_cst_lt (t1: max, t2: range_max)) |
2640 | return false; |
2641 | |
2642 | return true; |
2643 | } |
2644 | |
2645 | /* Return true if EXPR can trap, as in dereferencing an invalid pointer |
2646 | location or floating point arithmetic. C.f. the rtl version, may_trap_p. |
2647 | This routine expects only GIMPLE lhs or rhs input. */ |
2648 | |
2649 | bool |
2650 | tree_could_trap_p (tree expr) |
2651 | { |
2652 | enum tree_code code; |
2653 | bool fp_operation = false; |
2654 | bool honor_trapv = false; |
2655 | tree t, base, div = NULL_TREE; |
2656 | |
2657 | if (!expr) |
2658 | return false; |
2659 | |
2660 | /* In COND_EXPR and VEC_COND_EXPR only the condition may trap, but |
2661 | they won't appear as operands in GIMPLE form, so this is just for the |
2662 | GENERIC uses where it needs to recurse on the operands and so |
2663 | *COND_EXPR itself doesn't trap. */ |
2664 | if (TREE_CODE (expr) == COND_EXPR || TREE_CODE (expr) == VEC_COND_EXPR) |
2665 | return false; |
2666 | |
2667 | code = TREE_CODE (expr); |
2668 | t = TREE_TYPE (expr); |
2669 | |
2670 | if (t) |
2671 | { |
2672 | if (COMPARISON_CLASS_P (expr)) |
2673 | fp_operation = FLOAT_TYPE_P (TREE_TYPE (TREE_OPERAND (expr, 0))); |
2674 | else |
2675 | fp_operation = FLOAT_TYPE_P (t); |
2676 | honor_trapv = INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t); |
2677 | } |
2678 | |
2679 | if (TREE_CODE_CLASS (code) == tcc_binary) |
2680 | div = TREE_OPERAND (expr, 1); |
2681 | if (operation_could_trap_p (op: code, fp_operation, honor_trapv, divisor: div)) |
2682 | return true; |
2683 | |
2684 | restart: |
2685 | switch (code) |
2686 | { |
2687 | case COMPONENT_REF: |
2688 | case REALPART_EXPR: |
2689 | case IMAGPART_EXPR: |
2690 | case BIT_FIELD_REF: |
2691 | case VIEW_CONVERT_EXPR: |
2692 | case WITH_SIZE_EXPR: |
2693 | expr = TREE_OPERAND (expr, 0); |
2694 | code = TREE_CODE (expr); |
2695 | goto restart; |
2696 | |
2697 | case ARRAY_RANGE_REF: |
2698 | base = TREE_OPERAND (expr, 0); |
2699 | if (tree_could_trap_p (expr: base)) |
2700 | return true; |
2701 | if (TREE_THIS_NOTRAP (expr)) |
2702 | return false; |
2703 | return !range_in_array_bounds_p (ref: expr); |
2704 | |
2705 | case ARRAY_REF: |
2706 | base = TREE_OPERAND (expr, 0); |
2707 | if (tree_could_trap_p (expr: base)) |
2708 | return true; |
2709 | if (TREE_THIS_NOTRAP (expr)) |
2710 | return false; |
2711 | return !in_array_bounds_p (ref: expr); |
2712 | |
2713 | case TARGET_MEM_REF: |
2714 | case MEM_REF: |
2715 | if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR |
2716 | && tree_could_trap_p (TREE_OPERAND (TREE_OPERAND (expr, 0), 0))) |
2717 | return true; |
2718 | if (TREE_THIS_NOTRAP (expr)) |
2719 | return false; |
2720 | /* We cannot prove that the access is in-bounds when we have |
2721 | variable-index TARGET_MEM_REFs. */ |
2722 | if (code == TARGET_MEM_REF |
2723 | && (TMR_INDEX (expr) || TMR_INDEX2 (expr))) |
2724 | return true; |
2725 | if (TREE_CODE (TREE_OPERAND (expr, 0)) == ADDR_EXPR) |
2726 | { |
2727 | tree base = TREE_OPERAND (TREE_OPERAND (expr, 0), 0); |
2728 | poly_offset_int off = mem_ref_offset (expr); |
2729 | if (maybe_lt (a: off, b: 0)) |
2730 | return true; |
2731 | if (TREE_CODE (base) == STRING_CST) |
2732 | return maybe_le (TREE_STRING_LENGTH (base), b: off); |
2733 | tree size = DECL_SIZE_UNIT (base); |
2734 | if (size == NULL_TREE |
2735 | || !poly_int_tree_p (t: size) |
2736 | || maybe_le (a: wi::to_poly_offset (t: size), b: off)) |
2737 | return true; |
2738 | /* Now we are sure the first byte of the access is inside |
2739 | the object. */ |
2740 | return false; |
2741 | } |
2742 | return true; |
2743 | |
2744 | case INDIRECT_REF: |
2745 | return !TREE_THIS_NOTRAP (expr); |
2746 | |
2747 | case ASM_EXPR: |
2748 | return TREE_THIS_VOLATILE (expr); |
2749 | |
2750 | case CALL_EXPR: |
2751 | /* Internal function calls do not trap. */ |
2752 | if (CALL_EXPR_FN (expr) == NULL_TREE) |
2753 | return false; |
2754 | t = get_callee_fndecl (expr); |
2755 | /* Assume that indirect and calls to weak functions may trap. */ |
2756 | if (!t || !DECL_P (t)) |
2757 | return true; |
2758 | if (DECL_WEAK (t)) |
2759 | return tree_could_trap_p (expr: t); |
2760 | return false; |
2761 | |
2762 | case FUNCTION_DECL: |
2763 | /* Assume that accesses to weak functions may trap, unless we know |
2764 | they are certainly defined in current TU or in some other |
2765 | LTO partition. */ |
2766 | if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr)) |
2767 | { |
2768 | cgraph_node *node = cgraph_node::get (decl: expr); |
2769 | if (node) |
2770 | node = node->function_symbol (); |
2771 | return !(node && node->in_other_partition); |
2772 | } |
2773 | return false; |
2774 | |
2775 | case VAR_DECL: |
2776 | /* Assume that accesses to weak vars may trap, unless we know |
2777 | they are certainly defined in current TU or in some other |
2778 | LTO partition. */ |
2779 | if (DECL_WEAK (expr) && !DECL_COMDAT (expr) && DECL_EXTERNAL (expr)) |
2780 | { |
2781 | varpool_node *node = varpool_node::get (decl: expr); |
2782 | if (node) |
2783 | node = node->ultimate_alias_target (); |
2784 | return !(node && node->in_other_partition); |
2785 | } |
2786 | return false; |
2787 | |
2788 | default: |
2789 | return false; |
2790 | } |
2791 | } |
2792 | |
2793 | /* Return non-NULL if there is an integer operation with trapping overflow |
2794 | we can rewrite into non-trapping. Called via walk_tree from |
2795 | rewrite_to_non_trapping_overflow. */ |
2796 | |
2797 | static tree |
2798 | find_trapping_overflow (tree *tp, int *walk_subtrees, void *data) |
2799 | { |
2800 | if (EXPR_P (*tp) |
2801 | && ANY_INTEGRAL_TYPE_P (TREE_TYPE (*tp)) |
2802 | && !operation_no_trapping_overflow (TREE_TYPE (*tp), TREE_CODE (*tp))) |
2803 | return *tp; |
2804 | if (IS_TYPE_OR_DECL_P (*tp) |
2805 | || (TREE_CODE (*tp) == SAVE_EXPR && data == NULL)) |
2806 | *walk_subtrees = 0; |
2807 | return NULL_TREE; |
2808 | } |
2809 | |
2810 | /* Rewrite selected operations into unsigned arithmetics, so that they |
2811 | don't trap on overflow. */ |
2812 | |
2813 | static tree |
2814 | replace_trapping_overflow (tree *tp, int *walk_subtrees, void *data) |
2815 | { |
2816 | if (find_trapping_overflow (tp, walk_subtrees, data)) |
2817 | { |
2818 | tree type = TREE_TYPE (*tp); |
2819 | tree utype = unsigned_type_for (type); |
2820 | *walk_subtrees = 0; |
2821 | int len = TREE_OPERAND_LENGTH (*tp); |
2822 | for (int i = 0; i < len; ++i) |
2823 | walk_tree (&TREE_OPERAND (*tp, i), replace_trapping_overflow, |
2824 | data, (hash_set<tree> *) data); |
2825 | |
2826 | if (TREE_CODE (*tp) == ABS_EXPR) |
2827 | { |
2828 | TREE_SET_CODE (*tp, ABSU_EXPR); |
2829 | TREE_TYPE (*tp) = utype; |
2830 | *tp = fold_convert (type, *tp); |
2831 | } |
2832 | else |
2833 | { |
2834 | TREE_TYPE (*tp) = utype; |
2835 | len = TREE_OPERAND_LENGTH (*tp); |
2836 | for (int i = 0; i < len; ++i) |
2837 | TREE_OPERAND (*tp, i) |
2838 | = fold_convert (utype, TREE_OPERAND (*tp, i)); |
2839 | *tp = fold_convert (type, *tp); |
2840 | } |
2841 | } |
2842 | return NULL_TREE; |
2843 | } |
2844 | |
2845 | /* If any subexpression of EXPR can trap due to -ftrapv, rewrite it |
2846 | using unsigned arithmetics to avoid traps in it. */ |
2847 | |
2848 | tree |
2849 | rewrite_to_non_trapping_overflow (tree expr) |
2850 | { |
2851 | if (!flag_trapv) |
2852 | return expr; |
2853 | hash_set<tree> pset; |
2854 | if (!walk_tree (&expr, find_trapping_overflow, &pset, &pset)) |
2855 | return expr; |
2856 | expr = unshare_expr (expr); |
2857 | pset.empty (); |
2858 | walk_tree (&expr, replace_trapping_overflow, &pset, &pset); |
2859 | return expr; |
2860 | } |
2861 | |
2862 | /* Helper for stmt_could_throw_p. Return true if STMT (assumed to be a |
2863 | an assignment or a conditional) may throw. */ |
2864 | |
2865 | static bool |
2866 | stmt_could_throw_1_p (gassign *stmt) |
2867 | { |
2868 | enum tree_code code = gimple_assign_rhs_code (gs: stmt); |
2869 | bool honor_nans = false; |
2870 | bool honor_snans = false; |
2871 | bool fp_operation = false; |
2872 | bool honor_trapv = false; |
2873 | tree t; |
2874 | size_t i; |
2875 | bool handled, ret; |
2876 | |
2877 | if (TREE_CODE_CLASS (code) == tcc_comparison |
2878 | || TREE_CODE_CLASS (code) == tcc_unary |
2879 | || TREE_CODE_CLASS (code) == tcc_binary) |
2880 | { |
2881 | if (TREE_CODE_CLASS (code) == tcc_comparison) |
2882 | t = TREE_TYPE (gimple_assign_rhs1 (stmt)); |
2883 | else |
2884 | t = TREE_TYPE (gimple_assign_lhs (stmt)); |
2885 | fp_operation = FLOAT_TYPE_P (t); |
2886 | if (fp_operation) |
2887 | { |
2888 | honor_nans = flag_trapping_math && !flag_finite_math_only; |
2889 | honor_snans = flag_signaling_nans != 0; |
2890 | } |
2891 | else if (INTEGRAL_TYPE_P (t) && TYPE_OVERFLOW_TRAPS (t)) |
2892 | honor_trapv = true; |
2893 | } |
2894 | |
2895 | /* First check the LHS. */ |
2896 | if (tree_could_trap_p (expr: gimple_assign_lhs (gs: stmt))) |
2897 | return true; |
2898 | |
2899 | /* Check if the main expression may trap. */ |
2900 | ret = operation_could_trap_helper_p (op: code, fp_operation, honor_trapv, |
2901 | honor_nans, honor_snans, |
2902 | divisor: gimple_assign_rhs2 (gs: stmt), |
2903 | handled: &handled); |
2904 | if (handled) |
2905 | return ret; |
2906 | |
2907 | /* If the expression does not trap, see if any of the individual operands may |
2908 | trap. */ |
2909 | for (i = 1; i < gimple_num_ops (gs: stmt); i++) |
2910 | if (tree_could_trap_p (expr: gimple_op (gs: stmt, i))) |
2911 | return true; |
2912 | |
2913 | return false; |
2914 | } |
2915 | |
2916 | |
2917 | /* Return true if statement STMT within FUN could throw an exception. */ |
2918 | |
2919 | bool |
2920 | stmt_could_throw_p (function *fun, gimple *stmt) |
2921 | { |
2922 | if (!flag_exceptions) |
2923 | return false; |
2924 | |
2925 | /* The only statements that can throw an exception are assignments, |
2926 | conditionals, calls, resx, and asms. */ |
2927 | switch (gimple_code (g: stmt)) |
2928 | { |
2929 | case GIMPLE_RESX: |
2930 | return true; |
2931 | |
2932 | case GIMPLE_CALL: |
2933 | return !gimple_call_nothrow_p (s: as_a <gcall *> (p: stmt)); |
2934 | |
2935 | case GIMPLE_COND: |
2936 | { |
2937 | if (fun && !fun->can_throw_non_call_exceptions) |
2938 | return false; |
2939 | gcond *cond = as_a <gcond *> (p: stmt); |
2940 | tree lhs = gimple_cond_lhs (gs: cond); |
2941 | return operation_could_trap_p (op: gimple_cond_code (gs: cond), |
2942 | FLOAT_TYPE_P (TREE_TYPE (lhs)), |
2943 | honor_trapv: false, NULL_TREE); |
2944 | } |
2945 | |
2946 | case GIMPLE_ASSIGN: |
2947 | if ((fun && !fun->can_throw_non_call_exceptions) |
2948 | || gimple_clobber_p (s: stmt)) |
2949 | return false; |
2950 | return stmt_could_throw_1_p (stmt: as_a <gassign *> (p: stmt)); |
2951 | |
2952 | case GIMPLE_ASM: |
2953 | if (fun && !fun->can_throw_non_call_exceptions) |
2954 | return false; |
2955 | return gimple_asm_volatile_p (asm_stmt: as_a <gasm *> (p: stmt)); |
2956 | |
2957 | default: |
2958 | return false; |
2959 | } |
2960 | } |
2961 | |
2962 | /* Return true if STMT in function FUN must be assumed necessary because of |
2963 | non-call exceptions. */ |
2964 | |
2965 | bool |
2966 | stmt_unremovable_because_of_non_call_eh_p (function *fun, gimple *stmt) |
2967 | { |
2968 | return (fun->can_throw_non_call_exceptions |
2969 | && !fun->can_delete_dead_exceptions |
2970 | && stmt_could_throw_p (fun, stmt)); |
2971 | } |
2972 | |
2973 | /* Return true if expression T could throw an exception. */ |
2974 | |
2975 | bool |
2976 | tree_could_throw_p (tree t) |
2977 | { |
2978 | if (!flag_exceptions) |
2979 | return false; |
2980 | if (TREE_CODE (t) == MODIFY_EXPR) |
2981 | { |
2982 | if (cfun->can_throw_non_call_exceptions |
2983 | && tree_could_trap_p (TREE_OPERAND (t, 0))) |
2984 | return true; |
2985 | t = TREE_OPERAND (t, 1); |
2986 | } |
2987 | |
2988 | if (TREE_CODE (t) == WITH_SIZE_EXPR) |
2989 | t = TREE_OPERAND (t, 0); |
2990 | if (TREE_CODE (t) == CALL_EXPR) |
2991 | return (call_expr_flags (t) & ECF_NOTHROW) == 0; |
2992 | if (cfun->can_throw_non_call_exceptions) |
2993 | return tree_could_trap_p (expr: t); |
2994 | return false; |
2995 | } |
2996 | |
2997 | /* Return true if STMT can throw an exception that is not caught within its |
2998 | function FUN. FUN can be NULL but the function is extra conservative |
2999 | then. */ |
3000 | |
3001 | bool |
3002 | stmt_can_throw_external (function *fun, gimple *stmt) |
3003 | { |
3004 | int lp_nr; |
3005 | |
3006 | if (!stmt_could_throw_p (fun, stmt)) |
3007 | return false; |
3008 | if (!fun) |
3009 | return true; |
3010 | |
3011 | lp_nr = lookup_stmt_eh_lp_fn (ifun: fun, t: stmt); |
3012 | return lp_nr == 0; |
3013 | } |
3014 | |
3015 | /* Return true if STMT can throw an exception that is caught within its |
3016 | function FUN. */ |
3017 | |
3018 | bool |
3019 | stmt_can_throw_internal (function *fun, gimple *stmt) |
3020 | { |
3021 | int lp_nr; |
3022 | |
3023 | gcc_checking_assert (fun); |
3024 | if (!stmt_could_throw_p (fun, stmt)) |
3025 | return false; |
3026 | |
3027 | lp_nr = lookup_stmt_eh_lp_fn (ifun: fun, t: stmt); |
3028 | return lp_nr > 0; |
3029 | } |
3030 | |
3031 | /* Given a statement STMT in IFUN, if STMT can no longer throw, then |
3032 | remove any entry it might have from the EH table. Return true if |
3033 | any change was made. */ |
3034 | |
3035 | bool |
3036 | maybe_clean_eh_stmt_fn (struct function *ifun, gimple *stmt) |
3037 | { |
3038 | if (stmt_could_throw_p (fun: ifun, stmt)) |
3039 | return false; |
3040 | return remove_stmt_from_eh_lp_fn (ifun, t: stmt); |
3041 | } |
3042 | |
3043 | /* Likewise, but always use the current function. */ |
3044 | |
3045 | bool |
3046 | maybe_clean_eh_stmt (gimple *stmt) |
3047 | { |
3048 | return maybe_clean_eh_stmt_fn (cfun, stmt); |
3049 | } |
3050 | |
3051 | /* Given a statement OLD_STMT and a new statement NEW_STMT that has replaced |
3052 | OLD_STMT in the function, remove OLD_STMT from the EH table and put NEW_STMT |
3053 | in the table if it should be in there. Return TRUE if a replacement was |
3054 | done that my require an EH edge purge. */ |
3055 | |
3056 | bool |
3057 | maybe_clean_or_replace_eh_stmt (gimple *old_stmt, gimple *new_stmt) |
3058 | { |
3059 | int lp_nr = lookup_stmt_eh_lp (t: old_stmt); |
3060 | |
3061 | if (lp_nr != 0) |
3062 | { |
3063 | bool new_stmt_could_throw = stmt_could_throw_p (cfun, stmt: new_stmt); |
3064 | |
3065 | if (new_stmt == old_stmt && new_stmt_could_throw) |
3066 | return false; |
3067 | |
3068 | remove_stmt_from_eh_lp (t: old_stmt); |
3069 | if (new_stmt_could_throw) |
3070 | { |
3071 | add_stmt_to_eh_lp (t: new_stmt, num: lp_nr); |
3072 | return false; |
3073 | } |
3074 | else |
3075 | return true; |
3076 | } |
3077 | |
3078 | return false; |
3079 | } |
3080 | |
3081 | /* Given a statement OLD_STMT in OLD_FUN and a duplicate statement NEW_STMT |
3082 | in NEW_FUN, copy the EH table data from OLD_STMT to NEW_STMT. The MAP |
3083 | operand is the return value of duplicate_eh_regions. */ |
3084 | |
3085 | bool |
3086 | maybe_duplicate_eh_stmt_fn (struct function *new_fun, gimple *new_stmt, |
3087 | struct function *old_fun, gimple *old_stmt, |
3088 | hash_map<void *, void *> *map, |
3089 | int default_lp_nr) |
3090 | { |
3091 | int old_lp_nr, new_lp_nr; |
3092 | |
3093 | if (!stmt_could_throw_p (fun: new_fun, stmt: new_stmt)) |
3094 | return false; |
3095 | |
3096 | old_lp_nr = lookup_stmt_eh_lp_fn (ifun: old_fun, t: old_stmt); |
3097 | if (old_lp_nr == 0) |
3098 | { |
3099 | if (default_lp_nr == 0) |
3100 | return false; |
3101 | new_lp_nr = default_lp_nr; |
3102 | } |
3103 | else if (old_lp_nr > 0) |
3104 | { |
3105 | eh_landing_pad old_lp, new_lp; |
3106 | |
3107 | old_lp = (*old_fun->eh->lp_array)[old_lp_nr]; |
3108 | new_lp = static_cast<eh_landing_pad> (*map->get (k: old_lp)); |
3109 | new_lp_nr = new_lp->index; |
3110 | } |
3111 | else |
3112 | { |
3113 | eh_region old_r, new_r; |
3114 | |
3115 | old_r = (*old_fun->eh->region_array)[-old_lp_nr]; |
3116 | new_r = static_cast<eh_region> (*map->get (k: old_r)); |
3117 | new_lp_nr = -new_r->index; |
3118 | } |
3119 | |
3120 | add_stmt_to_eh_lp_fn (ifun: new_fun, t: new_stmt, num: new_lp_nr); |
3121 | return true; |
3122 | } |
3123 | |
3124 | /* Similar, but both OLD_STMT and NEW_STMT are within the current function, |
3125 | and thus no remapping is required. */ |
3126 | |
3127 | bool |
3128 | maybe_duplicate_eh_stmt (gimple *new_stmt, gimple *old_stmt) |
3129 | { |
3130 | int lp_nr; |
3131 | |
3132 | if (!stmt_could_throw_p (cfun, stmt: new_stmt)) |
3133 | return false; |
3134 | |
3135 | lp_nr = lookup_stmt_eh_lp (t: old_stmt); |
3136 | if (lp_nr == 0) |
3137 | return false; |
3138 | |
3139 | add_stmt_to_eh_lp (t: new_stmt, num: lp_nr); |
3140 | return true; |
3141 | } |
3142 | |
3143 | /* Returns TRUE if oneh and twoh are exception handlers (gimple_try_cleanup of |
3144 | GIMPLE_TRY) that are similar enough to be considered the same. Currently |
3145 | this only handles handlers consisting of a single call, as that's the |
3146 | important case for C++: a destructor call for a particular object showing |
3147 | up in multiple handlers. */ |
3148 | |
3149 | static bool |
3150 | same_handler_p (gimple_seq oneh, gimple_seq twoh) |
3151 | { |
3152 | gimple_stmt_iterator gsi; |
3153 | gimple *ones, *twos; |
3154 | unsigned int ai; |
3155 | |
3156 | gsi = gsi_start (seq&: oneh); |
3157 | if (!gsi_one_before_end_p (i: gsi)) |
3158 | return false; |
3159 | ones = gsi_stmt (i: gsi); |
3160 | |
3161 | gsi = gsi_start (seq&: twoh); |
3162 | if (!gsi_one_before_end_p (i: gsi)) |
3163 | return false; |
3164 | twos = gsi_stmt (i: gsi); |
3165 | |
3166 | if (!is_gimple_call (gs: ones) |
3167 | || !is_gimple_call (gs: twos) |
3168 | || gimple_call_lhs (gs: ones) |
3169 | || gimple_call_lhs (gs: twos) |
3170 | || gimple_call_chain (gs: ones) |
3171 | || gimple_call_chain (gs: twos) |
3172 | || !gimple_call_same_target_p (ones, twos) |
3173 | || gimple_call_num_args (gs: ones) != gimple_call_num_args (gs: twos)) |
3174 | return false; |
3175 | |
3176 | for (ai = 0; ai < gimple_call_num_args (gs: ones); ++ai) |
3177 | if (!operand_equal_p (gimple_call_arg (gs: ones, index: ai), |
3178 | gimple_call_arg (gs: twos, index: ai), flags: 0)) |
3179 | return false; |
3180 | |
3181 | return true; |
3182 | } |
3183 | |
3184 | /* Optimize |
3185 | try { A() } finally { try { ~B() } catch { ~A() } } |
3186 | try { ... } finally { ~A() } |
3187 | into |
3188 | try { A() } catch { ~B() } |
3189 | try { ~B() ... } finally { ~A() } |
3190 | |
3191 | This occurs frequently in C++, where A is a local variable and B is a |
3192 | temporary used in the initializer for A. */ |
3193 | |
3194 | static void |
3195 | optimize_double_finally (gtry *one, gtry *two) |
3196 | { |
3197 | gimple *oneh; |
3198 | gimple_stmt_iterator gsi; |
3199 | gimple_seq cleanup; |
3200 | |
3201 | cleanup = gimple_try_cleanup (gs: one); |
3202 | gsi = gsi_start (seq&: cleanup); |
3203 | if (!gsi_one_before_end_p (i: gsi)) |
3204 | return; |
3205 | |
3206 | oneh = gsi_stmt (i: gsi); |
3207 | if (gimple_code (g: oneh) != GIMPLE_TRY |
3208 | || gimple_try_kind (gs: oneh) != GIMPLE_TRY_CATCH) |
3209 | return; |
3210 | |
3211 | if (same_handler_p (oneh: gimple_try_cleanup (gs: oneh), twoh: gimple_try_cleanup (gs: two))) |
3212 | { |
3213 | gimple_seq seq = gimple_try_eval (gs: oneh); |
3214 | |
3215 | gimple_try_set_cleanup (try_stmt: one, cleanup: seq); |
3216 | gimple_try_set_kind (gs: one, kind: GIMPLE_TRY_CATCH); |
3217 | seq = copy_gimple_seq_and_replace_locals (seq); |
3218 | gimple_seq_add_seq (&seq, gimple_try_eval (gs: two)); |
3219 | gimple_try_set_eval (try_stmt: two, eval: seq); |
3220 | } |
3221 | } |
3222 | |
3223 | /* Perform EH refactoring optimizations that are simpler to do when code |
3224 | flow has been lowered but EH structures haven't. */ |
3225 | |
3226 | static void |
3227 | refactor_eh_r (gimple_seq seq) |
3228 | { |
3229 | gimple_stmt_iterator gsi; |
3230 | gimple *one, *two; |
3231 | |
3232 | one = NULL; |
3233 | two = NULL; |
3234 | gsi = gsi_start (seq); |
3235 | while (1) |
3236 | { |
3237 | one = two; |
3238 | if (gsi_end_p (i: gsi)) |
3239 | two = NULL; |
3240 | else |
3241 | two = gsi_stmt (i: gsi); |
3242 | if (one && two) |
3243 | if (gtry *try_one = dyn_cast <gtry *> (p: one)) |
3244 | if (gtry *try_two = dyn_cast <gtry *> (p: two)) |
3245 | if (gimple_try_kind (gs: try_one) == GIMPLE_TRY_FINALLY |
3246 | && gimple_try_kind (gs: try_two) == GIMPLE_TRY_FINALLY) |
3247 | optimize_double_finally (one: try_one, two: try_two); |
3248 | if (one) |
3249 | switch (gimple_code (g: one)) |
3250 | { |
3251 | case GIMPLE_TRY: |
3252 | refactor_eh_r (seq: gimple_try_eval (gs: one)); |
3253 | refactor_eh_r (seq: gimple_try_cleanup (gs: one)); |
3254 | break; |
3255 | case GIMPLE_CATCH: |
3256 | refactor_eh_r (seq: gimple_catch_handler (catch_stmt: as_a <gcatch *> (p: one))); |
3257 | break; |
3258 | case GIMPLE_EH_FILTER: |
3259 | refactor_eh_r (seq: gimple_eh_filter_failure (gs: one)); |
3260 | break; |
3261 | case GIMPLE_EH_ELSE: |
3262 | { |
3263 | geh_else *eh_else_stmt = as_a <geh_else *> (p: one); |
3264 | refactor_eh_r (seq: gimple_eh_else_n_body (eh_else_stmt)); |
3265 | refactor_eh_r (seq: gimple_eh_else_e_body (eh_else_stmt)); |
3266 | } |
3267 | break; |
3268 | default: |
3269 | break; |
3270 | } |
3271 | if (two) |
3272 | gsi_next (i: &gsi); |
3273 | else |
3274 | break; |
3275 | } |
3276 | } |
3277 | |
3278 | namespace { |
3279 | |
3280 | const pass_data pass_data_refactor_eh = |
3281 | { |
3282 | .type: GIMPLE_PASS, /* type */ |
3283 | .name: "ehopt" , /* name */ |
3284 | .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */ |
3285 | .tv_id: TV_TREE_EH, /* tv_id */ |
3286 | PROP_gimple_lcf, /* properties_required */ |
3287 | .properties_provided: 0, /* properties_provided */ |
3288 | .properties_destroyed: 0, /* properties_destroyed */ |
3289 | .todo_flags_start: 0, /* todo_flags_start */ |
3290 | .todo_flags_finish: 0, /* todo_flags_finish */ |
3291 | }; |
3292 | |
3293 | class pass_refactor_eh : public gimple_opt_pass |
3294 | { |
3295 | public: |
3296 | pass_refactor_eh (gcc::context *ctxt) |
3297 | : gimple_opt_pass (pass_data_refactor_eh, ctxt) |
3298 | {} |
3299 | |
3300 | /* opt_pass methods: */ |
3301 | bool gate (function *) final override { return flag_exceptions != 0; } |
3302 | unsigned int execute (function *) final override |
3303 | { |
3304 | refactor_eh_r (seq: gimple_body (current_function_decl)); |
3305 | return 0; |
3306 | } |
3307 | |
3308 | }; // class pass_refactor_eh |
3309 | |
3310 | } // anon namespace |
3311 | |
3312 | gimple_opt_pass * |
3313 | make_pass_refactor_eh (gcc::context *ctxt) |
3314 | { |
3315 | return new pass_refactor_eh (ctxt); |
3316 | } |
3317 | |
3318 | /* At the end of gimple optimization, we can lower RESX. */ |
3319 | |
3320 | static bool |
3321 | lower_resx (basic_block bb, gresx *stmt, |
3322 | hash_map<eh_region, tree> *mnt_map) |
3323 | { |
3324 | int lp_nr; |
3325 | eh_region src_r, dst_r; |
3326 | gimple_stmt_iterator gsi; |
3327 | gcall *x; |
3328 | tree fn, src_nr; |
3329 | bool ret = false; |
3330 | |
3331 | lp_nr = lookup_stmt_eh_lp (t: stmt); |
3332 | if (lp_nr != 0) |
3333 | dst_r = get_eh_region_from_lp_number (lp_nr); |
3334 | else |
3335 | dst_r = NULL; |
3336 | |
3337 | src_r = get_eh_region_from_number (gimple_resx_region (resx_stmt: stmt)); |
3338 | gsi = gsi_last_bb (bb); |
3339 | |
3340 | if (src_r == NULL) |
3341 | { |
3342 | /* We can wind up with no source region when pass_cleanup_eh shows |
3343 | that there are no entries into an eh region and deletes it, but |
3344 | then the block that contains the resx isn't removed. This can |
3345 | happen without optimization when the switch statement created by |
3346 | lower_try_finally_switch isn't simplified to remove the eh case. |
3347 | |
3348 | Resolve this by expanding the resx node to an abort. */ |
3349 | |
3350 | fn = builtin_decl_implicit (fncode: BUILT_IN_TRAP); |
3351 | x = gimple_build_call (fn, 0); |
3352 | gimple_call_set_ctrl_altering (s: x, ctrl_altering_p: true); |
3353 | gsi_insert_before (&gsi, x, GSI_SAME_STMT); |
3354 | |
3355 | while (EDGE_COUNT (bb->succs) > 0) |
3356 | remove_edge (EDGE_SUCC (bb, 0)); |
3357 | } |
3358 | else if (dst_r) |
3359 | { |
3360 | /* When we have a destination region, we resolve this by copying |
3361 | the excptr and filter values into place, and changing the edge |
3362 | to immediately after the landing pad. */ |
3363 | edge e; |
3364 | |
3365 | if (lp_nr < 0) |
3366 | { |
3367 | basic_block new_bb; |
3368 | tree lab; |
3369 | |
3370 | /* We are resuming into a MUST_NOT_CALL region. Expand a call to |
3371 | the failure decl into a new block, if needed. */ |
3372 | gcc_assert (dst_r->type == ERT_MUST_NOT_THROW); |
3373 | |
3374 | tree *slot = mnt_map->get (k: dst_r); |
3375 | if (slot == NULL) |
3376 | { |
3377 | gimple_stmt_iterator gsi2; |
3378 | |
3379 | new_bb = create_empty_bb (bb); |
3380 | new_bb->count = bb->count; |
3381 | add_bb_to_loop (new_bb, bb->loop_father); |
3382 | lab = gimple_block_label (new_bb); |
3383 | gsi2 = gsi_start_bb (bb: new_bb); |
3384 | |
3385 | /* Handle failure fns that expect either no arguments or the |
3386 | exception pointer. */ |
3387 | fn = dst_r->u.must_not_throw.failure_decl; |
3388 | if (TYPE_ARG_TYPES (TREE_TYPE (fn)) != void_list_node) |
3389 | { |
3390 | tree epfn = builtin_decl_implicit (fncode: BUILT_IN_EH_POINTER); |
3391 | src_nr = build_int_cst (integer_type_node, src_r->index); |
3392 | x = gimple_build_call (epfn, 1, src_nr); |
3393 | tree var = create_tmp_var (ptr_type_node); |
3394 | var = make_ssa_name (var, stmt: x); |
3395 | gimple_call_set_lhs (gs: x, lhs: var); |
3396 | gsi_insert_after (&gsi2, x, GSI_CONTINUE_LINKING); |
3397 | x = gimple_build_call (fn, 1, var); |
3398 | } |
3399 | else |
3400 | x = gimple_build_call (fn, 0); |
3401 | gimple_set_location (g: x, location: dst_r->u.must_not_throw.failure_loc); |
3402 | gsi_insert_after (&gsi2, x, GSI_CONTINUE_LINKING); |
3403 | |
3404 | mnt_map->put (k: dst_r, v: lab); |
3405 | } |
3406 | else |
3407 | { |
3408 | lab = *slot; |
3409 | new_bb = label_to_block (cfun, lab); |
3410 | } |
3411 | |
3412 | gcc_assert (EDGE_COUNT (bb->succs) == 0); |
3413 | e = make_single_succ_edge (bb, new_bb, EDGE_FALLTHRU); |
3414 | } |
3415 | else |
3416 | { |
3417 | edge_iterator ei; |
3418 | tree dst_nr = build_int_cst (integer_type_node, dst_r->index); |
3419 | |
3420 | fn = builtin_decl_implicit (fncode: BUILT_IN_EH_COPY_VALUES); |
3421 | src_nr = build_int_cst (integer_type_node, src_r->index); |
3422 | x = gimple_build_call (fn, 2, dst_nr, src_nr); |
3423 | gsi_insert_before (&gsi, x, GSI_SAME_STMT); |
3424 | |
3425 | /* Update the flags for the outgoing edge. */ |
3426 | e = single_succ_edge (bb); |
3427 | gcc_assert (e->flags & EDGE_EH); |
3428 | e->flags = (e->flags & ~EDGE_EH) | EDGE_FALLTHRU; |
3429 | e->probability = profile_probability::always (); |
3430 | |
3431 | /* If there are no more EH users of the landing pad, delete it. */ |
3432 | FOR_EACH_EDGE (e, ei, e->dest->preds) |
3433 | if (e->flags & EDGE_EH) |
3434 | break; |
3435 | if (e == NULL) |
3436 | { |
3437 | eh_landing_pad lp = get_eh_landing_pad_from_number (lp_nr); |
3438 | remove_eh_landing_pad (lp); |
3439 | } |
3440 | } |
3441 | |
3442 | ret = true; |
3443 | } |
3444 | else |
3445 | { |
3446 | tree var; |
3447 | |
3448 | /* When we don't have a destination region, this exception escapes |
3449 | up the call chain. We resolve this by generating a call to the |
3450 | _Unwind_Resume library function. */ |
3451 | |
3452 | /* The ARM EABI redefines _Unwind_Resume as __cxa_end_cleanup |
3453 | with no arguments for C++. Check for that. */ |
3454 | if (src_r->use_cxa_end_cleanup) |
3455 | { |
3456 | fn = builtin_decl_implicit (fncode: BUILT_IN_CXA_END_CLEANUP); |
3457 | x = gimple_build_call (fn, 0); |
3458 | gsi_insert_before (&gsi, x, GSI_SAME_STMT); |
3459 | } |
3460 | else |
3461 | { |
3462 | fn = builtin_decl_implicit (fncode: BUILT_IN_EH_POINTER); |
3463 | src_nr = build_int_cst (integer_type_node, src_r->index); |
3464 | x = gimple_build_call (fn, 1, src_nr); |
3465 | var = create_tmp_var (ptr_type_node); |
3466 | var = make_ssa_name (var, stmt: x); |
3467 | gimple_call_set_lhs (gs: x, lhs: var); |
3468 | gsi_insert_before (&gsi, x, GSI_SAME_STMT); |
3469 | |
3470 | /* When exception handling is delegated to a caller function, we |
3471 | have to guarantee that shadow memory variables living on stack |
3472 | will be cleaner before control is given to a parent function. */ |
3473 | if (sanitize_flags_p (flag: SANITIZE_ADDRESS)) |
3474 | { |
3475 | tree decl |
3476 | = builtin_decl_implicit (fncode: BUILT_IN_ASAN_HANDLE_NO_RETURN); |
3477 | gimple *g = gimple_build_call (decl, 0); |
3478 | gimple_set_location (g, location: gimple_location (g: stmt)); |
3479 | gsi_insert_before (&gsi, g, GSI_SAME_STMT); |
3480 | } |
3481 | |
3482 | fn = builtin_decl_implicit (fncode: BUILT_IN_UNWIND_RESUME); |
3483 | x = gimple_build_call (fn, 1, var); |
3484 | gimple_call_set_ctrl_altering (s: x, ctrl_altering_p: true); |
3485 | gsi_insert_before (&gsi, x, GSI_SAME_STMT); |
3486 | } |
3487 | |
3488 | gcc_assert (EDGE_COUNT (bb->succs) == 0); |
3489 | } |
3490 | |
3491 | gsi_remove (&gsi, true); |
3492 | |
3493 | return ret; |
3494 | } |
3495 | |
3496 | namespace { |
3497 | |
3498 | const pass_data pass_data_lower_resx = |
3499 | { |
3500 | .type: GIMPLE_PASS, /* type */ |
3501 | .name: "resx" , /* name */ |
3502 | .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */ |
3503 | .tv_id: TV_TREE_EH, /* tv_id */ |
3504 | PROP_gimple_lcf, /* properties_required */ |
3505 | .properties_provided: 0, /* properties_provided */ |
3506 | .properties_destroyed: 0, /* properties_destroyed */ |
3507 | .todo_flags_start: 0, /* todo_flags_start */ |
3508 | .todo_flags_finish: 0, /* todo_flags_finish */ |
3509 | }; |
3510 | |
3511 | class pass_lower_resx : public gimple_opt_pass |
3512 | { |
3513 | public: |
3514 | pass_lower_resx (gcc::context *ctxt) |
3515 | : gimple_opt_pass (pass_data_lower_resx, ctxt) |
3516 | {} |
3517 | |
3518 | /* opt_pass methods: */ |
3519 | bool gate (function *) final override { return flag_exceptions != 0; } |
3520 | unsigned int execute (function *) final override; |
3521 | |
3522 | }; // class pass_lower_resx |
3523 | |
3524 | unsigned |
3525 | pass_lower_resx::execute (function *fun) |
3526 | { |
3527 | basic_block bb; |
3528 | bool dominance_invalidated = false; |
3529 | bool any_rewritten = false; |
3530 | |
3531 | hash_map<eh_region, tree> mnt_map; |
3532 | |
3533 | FOR_EACH_BB_FN (bb, fun) |
3534 | { |
3535 | if (gresx *last = safe_dyn_cast <gresx *> (p: *gsi_last_bb (bb))) |
3536 | { |
3537 | dominance_invalidated |= lower_resx (bb, stmt: last, mnt_map: &mnt_map); |
3538 | any_rewritten = true; |
3539 | } |
3540 | } |
3541 | |
3542 | if (dominance_invalidated) |
3543 | { |
3544 | free_dominance_info (CDI_DOMINATORS); |
3545 | free_dominance_info (CDI_POST_DOMINATORS); |
3546 | } |
3547 | |
3548 | return any_rewritten ? TODO_update_ssa_only_virtuals : 0; |
3549 | } |
3550 | |
3551 | } // anon namespace |
3552 | |
3553 | gimple_opt_pass * |
3554 | make_pass_lower_resx (gcc::context *ctxt) |
3555 | { |
3556 | return new pass_lower_resx (ctxt); |
3557 | } |
3558 | |
3559 | /* Try to optimize var = {v} {CLOBBER} stmts followed just by |
3560 | external throw. */ |
3561 | |
3562 | static void |
3563 | optimize_clobbers (basic_block bb) |
3564 | { |
3565 | gimple_stmt_iterator gsi = gsi_last_bb (bb); |
3566 | bool any_clobbers = false; |
3567 | bool seen_stack_restore = false; |
3568 | edge_iterator ei; |
3569 | edge e; |
3570 | |
3571 | /* Only optimize anything if the bb contains at least one clobber, |
3572 | ends with resx (checked by caller), optionally contains some |
3573 | debug stmts or labels, or at most one __builtin_stack_restore |
3574 | call, and has an incoming EH edge. */ |
3575 | for (gsi_prev (i: &gsi); !gsi_end_p (i: gsi); gsi_prev (i: &gsi)) |
3576 | { |
3577 | gimple *stmt = gsi_stmt (i: gsi); |
3578 | if (is_gimple_debug (gs: stmt)) |
3579 | continue; |
3580 | if (gimple_clobber_p (s: stmt)) |
3581 | { |
3582 | any_clobbers = true; |
3583 | continue; |
3584 | } |
3585 | if (!seen_stack_restore |
3586 | && gimple_call_builtin_p (stmt, BUILT_IN_STACK_RESTORE)) |
3587 | { |
3588 | seen_stack_restore = true; |
3589 | continue; |
3590 | } |
3591 | if (gimple_code (g: stmt) == GIMPLE_LABEL) |
3592 | break; |
3593 | return; |
3594 | } |
3595 | if (!any_clobbers) |
3596 | return; |
3597 | FOR_EACH_EDGE (e, ei, bb->preds) |
3598 | if (e->flags & EDGE_EH) |
3599 | break; |
3600 | if (e == NULL) |
3601 | return; |
3602 | gsi = gsi_last_bb (bb); |
3603 | for (gsi_prev (i: &gsi); !gsi_end_p (i: gsi); gsi_prev (i: &gsi)) |
3604 | { |
3605 | gimple *stmt = gsi_stmt (i: gsi); |
3606 | if (!gimple_clobber_p (s: stmt)) |
3607 | continue; |
3608 | unlink_stmt_vdef (stmt); |
3609 | gsi_remove (&gsi, true); |
3610 | release_defs (stmt); |
3611 | } |
3612 | } |
3613 | |
3614 | /* Try to sink var = {v} {CLOBBER} stmts followed just by |
3615 | internal throw to successor BB. |
3616 | SUNK, if not NULL, is an array of sequences indexed by basic-block |
3617 | index to sink to and to pick up sinking opportunities from. |
3618 | If FOUND_OPPORTUNITY is not NULL then do not perform the optimization |
3619 | but set *FOUND_OPPORTUNITY to true. */ |
3620 | |
3621 | static int |
3622 | sink_clobbers (basic_block bb, |
3623 | gimple_seq *sunk = NULL, bool *found_opportunity = NULL) |
3624 | { |
3625 | edge e; |
3626 | edge_iterator ei; |
3627 | gimple_stmt_iterator gsi, dgsi; |
3628 | basic_block succbb; |
3629 | bool any_clobbers = false; |
3630 | unsigned todo = 0; |
3631 | |
3632 | /* Only optimize if BB has a single EH successor and |
3633 | all predecessor edges are EH too. */ |
3634 | if (!single_succ_p (bb) |
3635 | || (single_succ_edge (bb)->flags & EDGE_EH) == 0) |
3636 | return 0; |
3637 | |
3638 | FOR_EACH_EDGE (e, ei, bb->preds) |
3639 | { |
3640 | if ((e->flags & EDGE_EH) == 0) |
3641 | return 0; |
3642 | } |
3643 | |
3644 | /* And BB contains only CLOBBER stmts before the final |
3645 | RESX. */ |
3646 | gsi = gsi_last_bb (bb); |
3647 | for (gsi_prev (i: &gsi); !gsi_end_p (i: gsi); gsi_prev (i: &gsi)) |
3648 | { |
3649 | gimple *stmt = gsi_stmt (i: gsi); |
3650 | if (is_gimple_debug (gs: stmt)) |
3651 | continue; |
3652 | if (gimple_code (g: stmt) == GIMPLE_LABEL) |
3653 | break; |
3654 | if (!gimple_clobber_p (s: stmt)) |
3655 | return 0; |
3656 | any_clobbers = true; |
3657 | } |
3658 | if (!any_clobbers && (!sunk || gimple_seq_empty_p (s: sunk[bb->index]))) |
3659 | return 0; |
3660 | |
3661 | /* If this was a dry run, tell it we found clobbers to sink. */ |
3662 | if (found_opportunity) |
3663 | { |
3664 | *found_opportunity = true; |
3665 | return 0; |
3666 | } |
3667 | |
3668 | edge succe = single_succ_edge (bb); |
3669 | succbb = succe->dest; |
3670 | |
3671 | /* See if there is a virtual PHI node to take an updated virtual |
3672 | operand from. */ |
3673 | gphi *vphi = NULL; |
3674 | for (gphi_iterator gpi = gsi_start_phis (succbb); |
3675 | !gsi_end_p (i: gpi); gsi_next (i: &gpi)) |
3676 | { |
3677 | tree res = gimple_phi_result (gs: gpi.phi ()); |
3678 | if (virtual_operand_p (op: res)) |
3679 | { |
3680 | vphi = gpi.phi (); |
3681 | break; |
3682 | } |
3683 | } |
3684 | |
3685 | gimple *first_sunk = NULL; |
3686 | gimple *last_sunk = NULL; |
3687 | if (sunk && !(succbb->flags & BB_VISITED)) |
3688 | dgsi = gsi_start (seq&: sunk[succbb->index]); |
3689 | else |
3690 | dgsi = gsi_after_labels (bb: succbb); |
3691 | gsi = gsi_last_bb (bb); |
3692 | for (gsi_prev (i: &gsi); !gsi_end_p (i: gsi); gsi_prev (i: &gsi)) |
3693 | { |
3694 | gimple *stmt = gsi_stmt (i: gsi); |
3695 | tree lhs; |
3696 | if (is_gimple_debug (gs: stmt)) |
3697 | continue; |
3698 | if (gimple_code (g: stmt) == GIMPLE_LABEL) |
3699 | break; |
3700 | lhs = gimple_assign_lhs (gs: stmt); |
3701 | /* Unfortunately we don't have dominance info updated at this |
3702 | point, so checking if |
3703 | dominated_by_p (CDI_DOMINATORS, succbb, |
3704 | gimple_bb (SSA_NAME_DEF_STMT (TREE_OPERAND (lhs, 0))) |
3705 | would be too costly. Thus, avoid sinking any clobbers that |
3706 | refer to non-(D) SSA_NAMEs. */ |
3707 | if (TREE_CODE (lhs) == MEM_REF |
3708 | && TREE_CODE (TREE_OPERAND (lhs, 0)) == SSA_NAME |
3709 | && !SSA_NAME_IS_DEFAULT_DEF (TREE_OPERAND (lhs, 0))) |
3710 | { |
3711 | unlink_stmt_vdef (stmt); |
3712 | gsi_remove (&gsi, true); |
3713 | release_defs (stmt); |
3714 | continue; |
3715 | } |
3716 | |
3717 | /* As we do not change stmt order when sinking across a |
3718 | forwarder edge we can keep virtual operands in place. */ |
3719 | gsi_remove (&gsi, false); |
3720 | gsi_insert_before (&dgsi, stmt, GSI_NEW_STMT); |
3721 | if (!first_sunk) |
3722 | first_sunk = stmt; |
3723 | last_sunk = stmt; |
3724 | } |
3725 | if (sunk && !gimple_seq_empty_p (s: sunk[bb->index])) |
3726 | { |
3727 | if (!first_sunk) |
3728 | first_sunk = gsi_stmt (i: gsi_last (seq&: sunk[bb->index])); |
3729 | last_sunk = gsi_stmt (i: gsi_start (seq&: sunk[bb->index])); |
3730 | gsi_insert_seq_before_without_update (&dgsi, |
3731 | sunk[bb->index], GSI_NEW_STMT); |
3732 | sunk[bb->index] = NULL; |
3733 | } |
3734 | if (first_sunk) |
3735 | { |
3736 | /* Adjust virtual operands if we sunk across a virtual PHI. */ |
3737 | if (vphi) |
3738 | { |
3739 | imm_use_iterator iter; |
3740 | use_operand_p use_p; |
3741 | gimple *use_stmt; |
3742 | tree phi_def = gimple_phi_result (gs: vphi); |
3743 | FOR_EACH_IMM_USE_STMT (use_stmt, iter, phi_def) |
3744 | FOR_EACH_IMM_USE_ON_STMT (use_p, iter) |
3745 | SET_USE (use_p, gimple_vdef (first_sunk)); |
3746 | if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (phi_def)) |
3747 | { |
3748 | SSA_NAME_OCCURS_IN_ABNORMAL_PHI (gimple_vdef (first_sunk)) = 1; |
3749 | SSA_NAME_OCCURS_IN_ABNORMAL_PHI (phi_def) = 0; |
3750 | } |
3751 | SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (vphi, succe), |
3752 | gimple_vuse (last_sunk)); |
3753 | SET_USE (gimple_vuse_op (last_sunk), phi_def); |
3754 | } |
3755 | /* If there isn't a single predecessor but no virtual PHI node |
3756 | arrange for virtual operands to be renamed. */ |
3757 | else if (!single_pred_p (bb: succbb) |
3758 | && TREE_CODE (gimple_vuse (last_sunk)) == SSA_NAME) |
3759 | { |
3760 | mark_virtual_operand_for_renaming (gimple_vuse (g: last_sunk)); |
3761 | todo |= TODO_update_ssa_only_virtuals; |
3762 | } |
3763 | } |
3764 | |
3765 | return todo; |
3766 | } |
3767 | |
3768 | /* At the end of inlining, we can lower EH_DISPATCH. Return true when |
3769 | we have found some duplicate labels and removed some edges. */ |
3770 | |
3771 | static bool |
3772 | lower_eh_dispatch (basic_block src, geh_dispatch *stmt) |
3773 | { |
3774 | gimple_stmt_iterator gsi; |
3775 | int region_nr; |
3776 | eh_region r; |
3777 | tree filter, fn; |
3778 | gimple *x; |
3779 | bool redirected = false; |
3780 | |
3781 | region_nr = gimple_eh_dispatch_region (eh_dispatch_stmt: stmt); |
3782 | r = get_eh_region_from_number (region_nr); |
3783 | |
3784 | gsi = gsi_last_bb (bb: src); |
3785 | |
3786 | switch (r->type) |
3787 | { |
3788 | case ERT_TRY: |
3789 | { |
3790 | auto_vec<tree> labels; |
3791 | tree default_label = NULL; |
3792 | eh_catch c; |
3793 | edge_iterator ei; |
3794 | edge e; |
3795 | hash_set<tree> seen_values; |
3796 | |
3797 | /* Collect the labels for a switch. Zero the post_landing_pad |
3798 | field becase we'll no longer have anything keeping these labels |
3799 | in existence and the optimizer will be free to merge these |
3800 | blocks at will. */ |
3801 | for (c = r->u.eh_try.first_catch; c ; c = c->next_catch) |
3802 | { |
3803 | tree tp_node, flt_node, lab = c->label; |
3804 | bool have_label = false; |
3805 | |
3806 | c->label = NULL; |
3807 | tp_node = c->type_list; |
3808 | flt_node = c->filter_list; |
3809 | |
3810 | if (tp_node == NULL) |
3811 | { |
3812 | default_label = lab; |
3813 | break; |
3814 | } |
3815 | do |
3816 | { |
3817 | /* Filter out duplicate labels that arise when this handler |
3818 | is shadowed by an earlier one. When no labels are |
3819 | attached to the handler anymore, we remove |
3820 | the corresponding edge and then we delete unreachable |
3821 | blocks at the end of this pass. */ |
3822 | if (! seen_values.contains (TREE_VALUE (flt_node))) |
3823 | { |
3824 | tree t = build_case_label (TREE_VALUE (flt_node), |
3825 | NULL, lab); |
3826 | labels.safe_push (obj: t); |
3827 | seen_values.add (TREE_VALUE (flt_node)); |
3828 | have_label = true; |
3829 | } |
3830 | |
3831 | tp_node = TREE_CHAIN (tp_node); |
3832 | flt_node = TREE_CHAIN (flt_node); |
3833 | } |
3834 | while (tp_node); |
3835 | if (! have_label) |
3836 | { |
3837 | remove_edge (find_edge (src, label_to_block (cfun, lab))); |
3838 | redirected = true; |
3839 | } |
3840 | } |
3841 | |
3842 | /* Clean up the edge flags. */ |
3843 | FOR_EACH_EDGE (e, ei, src->succs) |
3844 | { |
3845 | if (e->flags & EDGE_FALLTHRU) |
3846 | { |
3847 | /* If there was no catch-all, use the fallthru edge. */ |
3848 | if (default_label == NULL) |
3849 | default_label = gimple_block_label (e->dest); |
3850 | e->flags &= ~EDGE_FALLTHRU; |
3851 | } |
3852 | } |
3853 | gcc_assert (default_label != NULL); |
3854 | |
3855 | /* Don't generate a switch if there's only a default case. |
3856 | This is common in the form of try { A; } catch (...) { B; }. */ |
3857 | if (!labels.exists ()) |
3858 | { |
3859 | e = single_succ_edge (bb: src); |
3860 | e->flags |= EDGE_FALLTHRU; |
3861 | } |
3862 | else |
3863 | { |
3864 | fn = builtin_decl_implicit (fncode: BUILT_IN_EH_FILTER); |
3865 | x = gimple_build_call (fn, 1, build_int_cst (integer_type_node, |
3866 | region_nr)); |
3867 | filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn))); |
3868 | filter = make_ssa_name (var: filter, stmt: x); |
3869 | gimple_call_set_lhs (gs: x, lhs: filter); |
3870 | gimple_set_location (g: x, location: gimple_location (g: stmt)); |
3871 | gsi_insert_before (&gsi, x, GSI_SAME_STMT); |
3872 | |
3873 | /* Turn the default label into a default case. */ |
3874 | default_label = build_case_label (NULL, NULL, default_label); |
3875 | sort_case_labels (labels); |
3876 | |
3877 | x = gimple_build_switch (filter, default_label, labels); |
3878 | gimple_set_location (g: x, location: gimple_location (g: stmt)); |
3879 | gsi_insert_before (&gsi, x, GSI_SAME_STMT); |
3880 | } |
3881 | } |
3882 | break; |
3883 | |
3884 | case ERT_ALLOWED_EXCEPTIONS: |
3885 | { |
3886 | edge b_e = BRANCH_EDGE (src); |
3887 | edge f_e = FALLTHRU_EDGE (src); |
3888 | |
3889 | fn = builtin_decl_implicit (fncode: BUILT_IN_EH_FILTER); |
3890 | x = gimple_build_call (fn, 1, build_int_cst (integer_type_node, |
3891 | region_nr)); |
3892 | filter = create_tmp_var (TREE_TYPE (TREE_TYPE (fn))); |
3893 | filter = make_ssa_name (var: filter, stmt: x); |
3894 | gimple_call_set_lhs (gs: x, lhs: filter); |
3895 | gimple_set_location (g: x, location: gimple_location (g: stmt)); |
3896 | gsi_insert_before (&gsi, x, GSI_SAME_STMT); |
3897 | |
3898 | r->u.allowed.label = NULL; |
3899 | x = gimple_build_cond (EQ_EXPR, filter, |
3900 | build_int_cst (TREE_TYPE (filter), |
3901 | r->u.allowed.filter), |
3902 | NULL_TREE, NULL_TREE); |
3903 | gsi_insert_before (&gsi, x, GSI_SAME_STMT); |
3904 | |
3905 | b_e->flags = b_e->flags | EDGE_TRUE_VALUE; |
3906 | f_e->flags = (f_e->flags & ~EDGE_FALLTHRU) | EDGE_FALSE_VALUE; |
3907 | } |
3908 | break; |
3909 | |
3910 | default: |
3911 | gcc_unreachable (); |
3912 | } |
3913 | |
3914 | /* Replace the EH_DISPATCH with the SWITCH or COND generated above. */ |
3915 | gsi_remove (&gsi, true); |
3916 | return redirected; |
3917 | } |
3918 | |
3919 | namespace { |
3920 | |
3921 | const pass_data pass_data_lower_eh_dispatch = |
3922 | { |
3923 | .type: GIMPLE_PASS, /* type */ |
3924 | .name: "ehdisp" , /* name */ |
3925 | .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */ |
3926 | .tv_id: TV_TREE_EH, /* tv_id */ |
3927 | PROP_gimple_lcf, /* properties_required */ |
3928 | .properties_provided: 0, /* properties_provided */ |
3929 | .properties_destroyed: 0, /* properties_destroyed */ |
3930 | .todo_flags_start: 0, /* todo_flags_start */ |
3931 | .todo_flags_finish: 0, /* todo_flags_finish */ |
3932 | }; |
3933 | |
3934 | class pass_lower_eh_dispatch : public gimple_opt_pass |
3935 | { |
3936 | public: |
3937 | pass_lower_eh_dispatch (gcc::context *ctxt) |
3938 | : gimple_opt_pass (pass_data_lower_eh_dispatch, ctxt) |
3939 | {} |
3940 | |
3941 | /* opt_pass methods: */ |
3942 | bool gate (function *fun) final override |
3943 | { |
3944 | return fun->eh->region_tree != NULL; |
3945 | } |
3946 | unsigned int execute (function *) final override; |
3947 | |
3948 | }; // class pass_lower_eh_dispatch |
3949 | |
3950 | unsigned |
3951 | pass_lower_eh_dispatch::execute (function *fun) |
3952 | { |
3953 | basic_block bb; |
3954 | int flags = 0; |
3955 | bool redirected = false; |
3956 | bool any_resx_to_process = false; |
3957 | |
3958 | assign_filter_values (); |
3959 | |
3960 | FOR_EACH_BB_FN (bb, fun) |
3961 | { |
3962 | gimple *last = *gsi_last_bb (bb); |
3963 | if (last == NULL) |
3964 | continue; |
3965 | if (gimple_code (g: last) == GIMPLE_EH_DISPATCH) |
3966 | { |
3967 | redirected |= lower_eh_dispatch (src: bb, |
3968 | stmt: as_a <geh_dispatch *> (p: last)); |
3969 | flags |= TODO_update_ssa_only_virtuals; |
3970 | } |
3971 | else if (gimple_code (g: last) == GIMPLE_RESX) |
3972 | { |
3973 | if (stmt_can_throw_external (fun, stmt: last)) |
3974 | optimize_clobbers (bb); |
3975 | else if (!any_resx_to_process) |
3976 | sink_clobbers (bb, NULL, found_opportunity: &any_resx_to_process); |
3977 | } |
3978 | bb->flags &= ~BB_VISITED; |
3979 | } |
3980 | if (redirected) |
3981 | { |
3982 | free_dominance_info (CDI_DOMINATORS); |
3983 | delete_unreachable_blocks (); |
3984 | } |
3985 | |
3986 | if (any_resx_to_process) |
3987 | { |
3988 | /* Make sure to catch all secondary sinking opportunities by processing |
3989 | blocks in RPO order and after all CFG modifications from lowering |
3990 | and unreachable block removal. */ |
3991 | int *rpo = XNEWVEC (int, n_basic_blocks_for_fn (fun)); |
3992 | int rpo_n = pre_and_rev_post_order_compute_fn (fun, NULL, rpo, false); |
3993 | gimple_seq *sunk = XCNEWVEC (gimple_seq, last_basic_block_for_fn (fun)); |
3994 | for (int i = 0; i < rpo_n; ++i) |
3995 | { |
3996 | bb = BASIC_BLOCK_FOR_FN (fun, rpo[i]); |
3997 | gimple *last = *gsi_last_bb (bb); |
3998 | if (last |
3999 | && gimple_code (g: last) == GIMPLE_RESX |
4000 | && !stmt_can_throw_external (fun, stmt: last)) |
4001 | flags |= sink_clobbers (bb, sunk); |
4002 | /* If there were any clobbers sunk into this BB, insert them now. */ |
4003 | if (!gimple_seq_empty_p (s: sunk[bb->index])) |
4004 | { |
4005 | gimple_stmt_iterator gsi = gsi_after_labels (bb); |
4006 | gsi_insert_seq_before (&gsi, sunk[bb->index], GSI_NEW_STMT); |
4007 | sunk[bb->index] = NULL; |
4008 | } |
4009 | bb->flags |= BB_VISITED; |
4010 | } |
4011 | free (ptr: rpo); |
4012 | free (ptr: sunk); |
4013 | } |
4014 | |
4015 | return flags; |
4016 | } |
4017 | |
4018 | } // anon namespace |
4019 | |
4020 | gimple_opt_pass * |
4021 | make_pass_lower_eh_dispatch (gcc::context *ctxt) |
4022 | { |
4023 | return new pass_lower_eh_dispatch (ctxt); |
4024 | } |
4025 | |
4026 | /* Walk statements, see what regions and, optionally, landing pads |
4027 | are really referenced. |
4028 | |
4029 | Returns in R_REACHABLEP an sbitmap with bits set for reachable regions, |
4030 | and in LP_REACHABLE an sbitmap with bits set for reachable landing pads. |
4031 | |
4032 | Passing NULL for LP_REACHABLE is valid, in this case only reachable |
4033 | regions are marked. |
4034 | |
4035 | The caller is responsible for freeing the returned sbitmaps. */ |
4036 | |
4037 | static void |
4038 | mark_reachable_handlers (sbitmap *r_reachablep, sbitmap *lp_reachablep) |
4039 | { |
4040 | sbitmap r_reachable, lp_reachable; |
4041 | basic_block bb; |
4042 | bool mark_landing_pads = (lp_reachablep != NULL); |
4043 | gcc_checking_assert (r_reachablep != NULL); |
4044 | |
4045 | r_reachable = sbitmap_alloc (cfun->eh->region_array->length ()); |
4046 | bitmap_clear (r_reachable); |
4047 | *r_reachablep = r_reachable; |
4048 | |
4049 | if (mark_landing_pads) |
4050 | { |
4051 | lp_reachable = sbitmap_alloc (cfun->eh->lp_array->length ()); |
4052 | bitmap_clear (lp_reachable); |
4053 | *lp_reachablep = lp_reachable; |
4054 | } |
4055 | else |
4056 | lp_reachable = NULL; |
4057 | |
4058 | FOR_EACH_BB_FN (bb, cfun) |
4059 | { |
4060 | gimple_stmt_iterator gsi; |
4061 | |
4062 | for (gsi = gsi_start_bb (bb); !gsi_end_p (i: gsi); gsi_next (i: &gsi)) |
4063 | { |
4064 | gimple *stmt = gsi_stmt (i: gsi); |
4065 | |
4066 | if (mark_landing_pads) |
4067 | { |
4068 | int lp_nr = lookup_stmt_eh_lp (t: stmt); |
4069 | |
4070 | /* Negative LP numbers are MUST_NOT_THROW regions which |
4071 | are not considered BB enders. */ |
4072 | if (lp_nr < 0) |
4073 | bitmap_set_bit (map: r_reachable, bitno: -lp_nr); |
4074 | |
4075 | /* Positive LP numbers are real landing pads, and BB enders. */ |
4076 | else if (lp_nr > 0) |
4077 | { |
4078 | gcc_assert (gsi_one_before_end_p (gsi)); |
4079 | eh_region region = get_eh_region_from_lp_number (lp_nr); |
4080 | bitmap_set_bit (map: r_reachable, bitno: region->index); |
4081 | bitmap_set_bit (map: lp_reachable, bitno: lp_nr); |
4082 | } |
4083 | } |
4084 | |
4085 | /* Avoid removing regions referenced from RESX/EH_DISPATCH. */ |
4086 | switch (gimple_code (g: stmt)) |
4087 | { |
4088 | case GIMPLE_RESX: |
4089 | bitmap_set_bit (map: r_reachable, |
4090 | bitno: gimple_resx_region (resx_stmt: as_a <gresx *> (p: stmt))); |
4091 | break; |
4092 | case GIMPLE_EH_DISPATCH: |
4093 | bitmap_set_bit (map: r_reachable, |
4094 | bitno: gimple_eh_dispatch_region ( |
4095 | eh_dispatch_stmt: as_a <geh_dispatch *> (p: stmt))); |
4096 | break; |
4097 | case GIMPLE_CALL: |
4098 | if (gimple_call_builtin_p (stmt, BUILT_IN_EH_COPY_VALUES)) |
4099 | for (int i = 0; i < 2; ++i) |
4100 | { |
4101 | tree rt = gimple_call_arg (gs: stmt, index: i); |
4102 | HOST_WIDE_INT ri = tree_to_shwi (rt); |
4103 | |
4104 | gcc_assert (ri == (int)ri); |
4105 | bitmap_set_bit (map: r_reachable, bitno: ri); |
4106 | } |
4107 | break; |
4108 | default: |
4109 | break; |
4110 | } |
4111 | } |
4112 | } |
4113 | } |
4114 | |
4115 | /* Remove unreachable handlers and unreachable landing pads. */ |
4116 | |
4117 | static void |
4118 | remove_unreachable_handlers (void) |
4119 | { |
4120 | sbitmap r_reachable, lp_reachable; |
4121 | eh_region region; |
4122 | eh_landing_pad lp; |
4123 | unsigned i; |
4124 | |
4125 | mark_reachable_handlers (r_reachablep: &r_reachable, lp_reachablep: &lp_reachable); |
4126 | |
4127 | if (dump_file) |
4128 | { |
4129 | fprintf (stream: dump_file, format: "Before removal of unreachable regions:\n" ); |
4130 | dump_eh_tree (dump_file, cfun); |
4131 | fprintf (stream: dump_file, format: "Reachable regions: " ); |
4132 | dump_bitmap_file (dump_file, r_reachable); |
4133 | fprintf (stream: dump_file, format: "Reachable landing pads: " ); |
4134 | dump_bitmap_file (dump_file, lp_reachable); |
4135 | } |
4136 | |
4137 | if (dump_file) |
4138 | { |
4139 | FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region) |
4140 | if (region && !bitmap_bit_p (map: r_reachable, bitno: region->index)) |
4141 | fprintf (stream: dump_file, |
4142 | format: "Removing unreachable region %d\n" , |
4143 | region->index); |
4144 | } |
4145 | |
4146 | remove_unreachable_eh_regions (r_reachable); |
4147 | |
4148 | FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp) |
4149 | if (lp && !bitmap_bit_p (map: lp_reachable, bitno: lp->index)) |
4150 | { |
4151 | if (dump_file) |
4152 | fprintf (stream: dump_file, |
4153 | format: "Removing unreachable landing pad %d\n" , |
4154 | lp->index); |
4155 | remove_eh_landing_pad (lp); |
4156 | } |
4157 | |
4158 | if (dump_file) |
4159 | { |
4160 | fprintf (stream: dump_file, format: "\n\nAfter removal of unreachable regions:\n" ); |
4161 | dump_eh_tree (dump_file, cfun); |
4162 | fprintf (stream: dump_file, format: "\n\n" ); |
4163 | } |
4164 | |
4165 | sbitmap_free (map: r_reachable); |
4166 | sbitmap_free (map: lp_reachable); |
4167 | |
4168 | if (flag_checking) |
4169 | verify_eh_tree (cfun); |
4170 | } |
4171 | |
4172 | /* Remove unreachable handlers if any landing pads have been removed after |
4173 | last ehcleanup pass (due to gimple_purge_dead_eh_edges). */ |
4174 | |
4175 | void |
4176 | maybe_remove_unreachable_handlers (void) |
4177 | { |
4178 | eh_landing_pad lp; |
4179 | unsigned i; |
4180 | |
4181 | if (cfun->eh == NULL) |
4182 | return; |
4183 | |
4184 | FOR_EACH_VEC_SAFE_ELT (cfun->eh->lp_array, i, lp) |
4185 | if (lp |
4186 | && (lp->post_landing_pad == NULL_TREE |
4187 | || label_to_block (cfun, lp->post_landing_pad) == NULL)) |
4188 | { |
4189 | remove_unreachable_handlers (); |
4190 | return; |
4191 | } |
4192 | } |
4193 | |
4194 | /* Remove regions that do not have landing pads. This assumes |
4195 | that remove_unreachable_handlers has already been run, and |
4196 | that we've just manipulated the landing pads since then. |
4197 | |
4198 | Preserve regions with landing pads and regions that prevent |
4199 | exceptions from propagating further, even if these regions |
4200 | are not reachable. */ |
4201 | |
4202 | static void |
4203 | remove_unreachable_handlers_no_lp (void) |
4204 | { |
4205 | eh_region region; |
4206 | sbitmap r_reachable; |
4207 | unsigned i; |
4208 | |
4209 | mark_reachable_handlers (r_reachablep: &r_reachable, /*lp_reachablep=*/NULL); |
4210 | |
4211 | FOR_EACH_VEC_SAFE_ELT (cfun->eh->region_array, i, region) |
4212 | { |
4213 | if (! region) |
4214 | continue; |
4215 | |
4216 | if (region->landing_pads != NULL |
4217 | || region->type == ERT_MUST_NOT_THROW) |
4218 | bitmap_set_bit (map: r_reachable, bitno: region->index); |
4219 | |
4220 | if (dump_file |
4221 | && !bitmap_bit_p (map: r_reachable, bitno: region->index)) |
4222 | fprintf (stream: dump_file, |
4223 | format: "Removing unreachable region %d\n" , |
4224 | region->index); |
4225 | } |
4226 | |
4227 | remove_unreachable_eh_regions (r_reachable); |
4228 | |
4229 | sbitmap_free (map: r_reachable); |
4230 | } |
4231 | |
4232 | /* Undo critical edge splitting on an EH landing pad. Earlier, we |
4233 | optimisticaly split all sorts of edges, including EH edges. The |
4234 | optimization passes in between may not have needed them; if not, |
4235 | we should undo the split. |
4236 | |
4237 | Recognize this case by having one EH edge incoming to the BB and |
4238 | one normal edge outgoing; BB should be empty apart from the |
4239 | post_landing_pad label. |
4240 | |
4241 | Note that this is slightly different from the empty handler case |
4242 | handled by cleanup_empty_eh, in that the actual handler may yet |
4243 | have actual code but the landing pad has been separated from the |
4244 | handler. As such, cleanup_empty_eh relies on this transformation |
4245 | having been done first. */ |
4246 | |
4247 | static bool |
4248 | unsplit_eh (eh_landing_pad lp) |
4249 | { |
4250 | basic_block bb = label_to_block (cfun, lp->post_landing_pad); |
4251 | gimple_stmt_iterator gsi; |
4252 | edge e_in, e_out; |
4253 | |
4254 | /* Quickly check the edge counts on BB for singularity. */ |
4255 | if (!single_pred_p (bb) || !single_succ_p (bb)) |
4256 | return false; |
4257 | e_in = single_pred_edge (bb); |
4258 | e_out = single_succ_edge (bb); |
4259 | |
4260 | /* Input edge must be EH and output edge must be normal. */ |
4261 | if ((e_in->flags & EDGE_EH) == 0 || (e_out->flags & EDGE_EH) != 0) |
4262 | return false; |
4263 | |
4264 | /* The block must be empty except for the labels and debug insns. */ |
4265 | gsi = gsi_after_labels (bb); |
4266 | if (!gsi_end_p (i: gsi) && is_gimple_debug (gs: gsi_stmt (i: gsi))) |
4267 | gsi_next_nondebug (i: &gsi); |
4268 | if (!gsi_end_p (i: gsi)) |
4269 | return false; |
4270 | |
4271 | /* The destination block must not already have a landing pad |
4272 | for a different region. */ |
4273 | for (gsi = gsi_start_bb (bb: e_out->dest); !gsi_end_p (i: gsi); gsi_next (i: &gsi)) |
4274 | { |
4275 | glabel *label_stmt = dyn_cast <glabel *> (p: gsi_stmt (i: gsi)); |
4276 | tree lab; |
4277 | int lp_nr; |
4278 | |
4279 | if (!label_stmt) |
4280 | break; |
4281 | lab = gimple_label_label (gs: label_stmt); |
4282 | lp_nr = EH_LANDING_PAD_NR (lab); |
4283 | if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region) |
4284 | return false; |
4285 | } |
4286 | |
4287 | /* The new destination block must not already be a destination of |
4288 | the source block, lest we merge fallthru and eh edges and get |
4289 | all sorts of confused. */ |
4290 | if (find_edge (e_in->src, e_out->dest)) |
4291 | return false; |
4292 | |
4293 | /* ??? We can get degenerate phis due to cfg cleanups. I would have |
4294 | thought this should have been cleaned up by a phicprop pass, but |
4295 | that doesn't appear to handle virtuals. Propagate by hand. */ |
4296 | if (!gimple_seq_empty_p (s: phi_nodes (bb))) |
4297 | { |
4298 | for (gphi_iterator gpi = gsi_start_phis (bb); !gsi_end_p (i: gpi); ) |
4299 | { |
4300 | gimple *use_stmt; |
4301 | gphi *phi = gpi.phi (); |
4302 | tree lhs = gimple_phi_result (gs: phi); |
4303 | tree rhs = gimple_phi_arg_def (gs: phi, index: 0); |
4304 | use_operand_p use_p; |
4305 | imm_use_iterator iter; |
4306 | |
4307 | FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs) |
4308 | { |
4309 | FOR_EACH_IMM_USE_ON_STMT (use_p, iter) |
4310 | SET_USE (use_p, rhs); |
4311 | } |
4312 | |
4313 | if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)) |
4314 | SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1; |
4315 | |
4316 | remove_phi_node (&gpi, true); |
4317 | } |
4318 | } |
4319 | |
4320 | if (dump_file && (dump_flags & TDF_DETAILS)) |
4321 | fprintf (stream: dump_file, format: "Unsplit EH landing pad %d to block %i.\n" , |
4322 | lp->index, e_out->dest->index); |
4323 | |
4324 | /* Redirect the edge. Since redirect_eh_edge_1 expects to be moving |
4325 | a successor edge, humor it. But do the real CFG change with the |
4326 | predecessor of E_OUT in order to preserve the ordering of arguments |
4327 | to the PHI nodes in E_OUT->DEST. */ |
4328 | redirect_eh_edge_1 (edge_in: e_in, new_bb: e_out->dest, change_region: false); |
4329 | redirect_edge_pred (e_out, e_in->src); |
4330 | e_out->flags = e_in->flags; |
4331 | e_out->probability = e_in->probability; |
4332 | remove_edge (e_in); |
4333 | |
4334 | return true; |
4335 | } |
4336 | |
4337 | /* Examine each landing pad block and see if it matches unsplit_eh. */ |
4338 | |
4339 | static bool |
4340 | unsplit_all_eh (void) |
4341 | { |
4342 | bool changed = false; |
4343 | eh_landing_pad lp; |
4344 | int i; |
4345 | |
4346 | for (i = 1; vec_safe_iterate (cfun->eh->lp_array, ix: i, ptr: &lp); ++i) |
4347 | if (lp) |
4348 | changed |= unsplit_eh (lp); |
4349 | |
4350 | return changed; |
4351 | } |
4352 | |
4353 | /* Wrapper around unsplit_all_eh that makes it usable everywhere. */ |
4354 | |
4355 | void |
4356 | unsplit_eh_edges (void) |
4357 | { |
4358 | bool changed; |
4359 | |
4360 | /* unsplit_all_eh can die looking up unreachable landing pads. */ |
4361 | maybe_remove_unreachable_handlers (); |
4362 | |
4363 | changed = unsplit_all_eh (); |
4364 | |
4365 | /* If EH edges have been unsplit, delete unreachable forwarder blocks. */ |
4366 | if (changed) |
4367 | { |
4368 | free_dominance_info (CDI_DOMINATORS); |
4369 | free_dominance_info (CDI_POST_DOMINATORS); |
4370 | delete_unreachable_blocks (); |
4371 | } |
4372 | } |
4373 | |
4374 | /* A subroutine of cleanup_empty_eh. Redirect all EH edges incoming |
4375 | to OLD_BB to NEW_BB; return true on success, false on failure. |
4376 | |
4377 | OLD_BB_OUT is the edge into NEW_BB from OLD_BB, so if we miss any |
4378 | PHI variables from OLD_BB we can pick them up from OLD_BB_OUT. |
4379 | Virtual PHIs may be deleted and marked for renaming. */ |
4380 | |
4381 | static bool |
4382 | cleanup_empty_eh_merge_phis (basic_block new_bb, basic_block old_bb, |
4383 | edge old_bb_out, bool change_region) |
4384 | { |
4385 | gphi_iterator ngsi, ogsi; |
4386 | edge_iterator ei; |
4387 | edge e; |
4388 | bitmap ophi_handled; |
4389 | |
4390 | /* The destination block must not be a regular successor for any |
4391 | of the preds of the landing pad. Thus, avoid turning |
4392 | <..> |
4393 | | \ EH |
4394 | | <..> |
4395 | | / |
4396 | <..> |
4397 | into |
4398 | <..> |
4399 | | | EH |
4400 | <..> |
4401 | which CFG verification would choke on. See PR45172 and PR51089. */ |
4402 | if (!single_pred_p (bb: new_bb)) |
4403 | FOR_EACH_EDGE (e, ei, old_bb->preds) |
4404 | if (find_edge (e->src, new_bb)) |
4405 | return false; |
4406 | |
4407 | FOR_EACH_EDGE (e, ei, old_bb->preds) |
4408 | redirect_edge_var_map_clear (e); |
4409 | |
4410 | ophi_handled = BITMAP_ALLOC (NULL); |
4411 | |
4412 | /* First, iterate through the PHIs on NEW_BB and set up the edge_var_map |
4413 | for the edges we're going to move. */ |
4414 | for (ngsi = gsi_start_phis (new_bb); !gsi_end_p (i: ngsi); gsi_next (i: &ngsi)) |
4415 | { |
4416 | gphi *ophi, *nphi = ngsi.phi (); |
4417 | tree nresult, nop; |
4418 | |
4419 | nresult = gimple_phi_result (gs: nphi); |
4420 | nop = gimple_phi_arg_def (gs: nphi, index: old_bb_out->dest_idx); |
4421 | |
4422 | /* Find the corresponding PHI in OLD_BB so we can forward-propagate |
4423 | the source ssa_name. */ |
4424 | ophi = NULL; |
4425 | for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (i: ogsi); gsi_next (i: &ogsi)) |
4426 | { |
4427 | ophi = ogsi.phi (); |
4428 | if (gimple_phi_result (gs: ophi) == nop) |
4429 | break; |
4430 | ophi = NULL; |
4431 | } |
4432 | |
4433 | /* If we did find the corresponding PHI, copy those inputs. */ |
4434 | if (ophi) |
4435 | { |
4436 | /* If NOP is used somewhere else beyond phis in new_bb, give up. */ |
4437 | if (!has_single_use (var: nop)) |
4438 | { |
4439 | imm_use_iterator imm_iter; |
4440 | use_operand_p use_p; |
4441 | |
4442 | FOR_EACH_IMM_USE_FAST (use_p, imm_iter, nop) |
4443 | { |
4444 | if (!gimple_debug_bind_p (USE_STMT (use_p)) |
4445 | && (gimple_code (USE_STMT (use_p)) != GIMPLE_PHI |
4446 | || gimple_bb (USE_STMT (use_p)) != new_bb)) |
4447 | goto fail; |
4448 | } |
4449 | } |
4450 | bitmap_set_bit (ophi_handled, SSA_NAME_VERSION (nop)); |
4451 | FOR_EACH_EDGE (e, ei, old_bb->preds) |
4452 | { |
4453 | location_t oloc; |
4454 | tree oop; |
4455 | |
4456 | if ((e->flags & EDGE_EH) == 0) |
4457 | continue; |
4458 | oop = gimple_phi_arg_def (gs: ophi, index: e->dest_idx); |
4459 | oloc = gimple_phi_arg_location (phi: ophi, i: e->dest_idx); |
4460 | redirect_edge_var_map_add (e, nresult, oop, oloc); |
4461 | } |
4462 | } |
4463 | /* If we didn't find the PHI, if it's a real variable or a VOP, we know |
4464 | from the fact that OLD_BB is tree_empty_eh_handler_p that the |
4465 | variable is unchanged from input to the block and we can simply |
4466 | re-use the input to NEW_BB from the OLD_BB_OUT edge. */ |
4467 | else |
4468 | { |
4469 | location_t nloc |
4470 | = gimple_phi_arg_location (phi: nphi, i: old_bb_out->dest_idx); |
4471 | FOR_EACH_EDGE (e, ei, old_bb->preds) |
4472 | redirect_edge_var_map_add (e, nresult, nop, nloc); |
4473 | } |
4474 | } |
4475 | |
4476 | /* Second, verify that all PHIs from OLD_BB have been handled. If not, |
4477 | we don't know what values from the other edges into NEW_BB to use. */ |
4478 | for (ogsi = gsi_start_phis (old_bb); !gsi_end_p (i: ogsi); gsi_next (i: &ogsi)) |
4479 | { |
4480 | gphi *ophi = ogsi.phi (); |
4481 | tree oresult = gimple_phi_result (gs: ophi); |
4482 | if (!bitmap_bit_p (ophi_handled, SSA_NAME_VERSION (oresult))) |
4483 | goto fail; |
4484 | } |
4485 | |
4486 | /* Finally, move the edges and update the PHIs. */ |
4487 | for (ei = ei_start (old_bb->preds); (e = ei_safe_edge (i: ei)); ) |
4488 | if (e->flags & EDGE_EH) |
4489 | { |
4490 | /* ??? CFG manipluation routines do not try to update loop |
4491 | form on edge redirection. Do so manually here for now. */ |
4492 | /* If we redirect a loop entry or latch edge that will either create |
4493 | a multiple entry loop or rotate the loop. If the loops merge |
4494 | we may have created a loop with multiple latches. |
4495 | All of this isn't easily fixed thus cancel the affected loop |
4496 | and mark the other loop as possibly having multiple latches. */ |
4497 | if (e->dest == e->dest->loop_father->header) |
4498 | { |
4499 | mark_loop_for_removal (e->dest->loop_father); |
4500 | new_bb->loop_father->latch = NULL; |
4501 | loops_state_set (flags: LOOPS_MAY_HAVE_MULTIPLE_LATCHES); |
4502 | } |
4503 | redirect_eh_edge_1 (edge_in: e, new_bb, change_region); |
4504 | redirect_edge_succ (e, new_bb); |
4505 | flush_pending_stmts (e); |
4506 | } |
4507 | else |
4508 | ei_next (i: &ei); |
4509 | |
4510 | BITMAP_FREE (ophi_handled); |
4511 | return true; |
4512 | |
4513 | fail: |
4514 | FOR_EACH_EDGE (e, ei, old_bb->preds) |
4515 | redirect_edge_var_map_clear (e); |
4516 | BITMAP_FREE (ophi_handled); |
4517 | return false; |
4518 | } |
4519 | |
4520 | /* A subroutine of cleanup_empty_eh. Move a landing pad LP from its |
4521 | old region to NEW_REGION at BB. */ |
4522 | |
4523 | static void |
4524 | cleanup_empty_eh_move_lp (basic_block bb, edge e_out, |
4525 | eh_landing_pad lp, eh_region new_region) |
4526 | { |
4527 | gimple_stmt_iterator gsi; |
4528 | eh_landing_pad *pp; |
4529 | |
4530 | for (pp = &lp->region->landing_pads; *pp != lp; pp = &(*pp)->next_lp) |
4531 | continue; |
4532 | *pp = lp->next_lp; |
4533 | |
4534 | lp->region = new_region; |
4535 | lp->next_lp = new_region->landing_pads; |
4536 | new_region->landing_pads = lp; |
4537 | |
4538 | /* Delete the RESX that was matched within the empty handler block. */ |
4539 | gsi = gsi_last_bb (bb); |
4540 | unlink_stmt_vdef (gsi_stmt (i: gsi)); |
4541 | gsi_remove (&gsi, true); |
4542 | |
4543 | /* Clean up E_OUT for the fallthru. */ |
4544 | e_out->flags = (e_out->flags & ~EDGE_EH) | EDGE_FALLTHRU; |
4545 | e_out->probability = profile_probability::always (); |
4546 | } |
4547 | |
4548 | /* A subroutine of cleanup_empty_eh. Handle more complex cases of |
4549 | unsplitting than unsplit_eh was prepared to handle, e.g. when |
4550 | multiple incoming edges and phis are involved. */ |
4551 | |
4552 | static bool |
4553 | cleanup_empty_eh_unsplit (basic_block bb, edge e_out, eh_landing_pad lp) |
4554 | { |
4555 | gimple_stmt_iterator gsi; |
4556 | tree lab; |
4557 | |
4558 | /* We really ought not have totally lost everything following |
4559 | a landing pad label. Given that BB is empty, there had better |
4560 | be a successor. */ |
4561 | gcc_assert (e_out != NULL); |
4562 | |
4563 | /* The destination block must not already have a landing pad |
4564 | for a different region. */ |
4565 | lab = NULL; |
4566 | for (gsi = gsi_start_bb (bb: e_out->dest); !gsi_end_p (i: gsi); gsi_next (i: &gsi)) |
4567 | { |
4568 | glabel *stmt = dyn_cast <glabel *> (p: gsi_stmt (i: gsi)); |
4569 | int lp_nr; |
4570 | |
4571 | if (!stmt) |
4572 | break; |
4573 | lab = gimple_label_label (gs: stmt); |
4574 | lp_nr = EH_LANDING_PAD_NR (lab); |
4575 | if (lp_nr && get_eh_region_from_lp_number (lp_nr) != lp->region) |
4576 | return false; |
4577 | } |
4578 | |
4579 | /* Attempt to move the PHIs into the successor block. */ |
4580 | if (cleanup_empty_eh_merge_phis (new_bb: e_out->dest, old_bb: bb, old_bb_out: e_out, change_region: false)) |
4581 | { |
4582 | if (dump_file && (dump_flags & TDF_DETAILS)) |
4583 | fprintf (stream: dump_file, |
4584 | format: "Unsplit EH landing pad %d to block %i " |
4585 | "(via cleanup_empty_eh).\n" , |
4586 | lp->index, e_out->dest->index); |
4587 | return true; |
4588 | } |
4589 | |
4590 | return false; |
4591 | } |
4592 | |
4593 | /* Return true if edge E_FIRST is part of an empty infinite loop |
4594 | or leads to such a loop through a series of single successor |
4595 | empty bbs. */ |
4596 | |
4597 | static bool |
4598 | infinite_empty_loop_p (edge e_first) |
4599 | { |
4600 | bool inf_loop = false; |
4601 | edge e; |
4602 | |
4603 | if (e_first->dest == e_first->src) |
4604 | return true; |
4605 | |
4606 | e_first->src->aux = (void *) 1; |
4607 | for (e = e_first; single_succ_p (bb: e->dest); e = single_succ_edge (bb: e->dest)) |
4608 | { |
4609 | gimple_stmt_iterator gsi; |
4610 | if (e->dest->aux) |
4611 | { |
4612 | inf_loop = true; |
4613 | break; |
4614 | } |
4615 | e->dest->aux = (void *) 1; |
4616 | gsi = gsi_after_labels (bb: e->dest); |
4617 | if (!gsi_end_p (i: gsi) && is_gimple_debug (gs: gsi_stmt (i: gsi))) |
4618 | gsi_next_nondebug (i: &gsi); |
4619 | if (!gsi_end_p (i: gsi)) |
4620 | break; |
4621 | } |
4622 | e_first->src->aux = NULL; |
4623 | for (e = e_first; e->dest->aux; e = single_succ_edge (bb: e->dest)) |
4624 | e->dest->aux = NULL; |
4625 | |
4626 | return inf_loop; |
4627 | } |
4628 | |
4629 | /* Examine the block associated with LP to determine if it's an empty |
4630 | handler for its EH region. If so, attempt to redirect EH edges to |
4631 | an outer region. Return true the CFG was updated in any way. This |
4632 | is similar to jump forwarding, just across EH edges. */ |
4633 | |
4634 | static bool |
4635 | cleanup_empty_eh (eh_landing_pad lp) |
4636 | { |
4637 | basic_block bb = label_to_block (cfun, lp->post_landing_pad); |
4638 | gimple_stmt_iterator gsi; |
4639 | gimple *resx; |
4640 | eh_region new_region; |
4641 | edge_iterator ei; |
4642 | edge e, e_out; |
4643 | bool has_non_eh_pred; |
4644 | bool ret = false; |
4645 | int new_lp_nr; |
4646 | |
4647 | /* There can be zero or one edges out of BB. This is the quickest test. */ |
4648 | switch (EDGE_COUNT (bb->succs)) |
4649 | { |
4650 | case 0: |
4651 | e_out = NULL; |
4652 | break; |
4653 | case 1: |
4654 | e_out = single_succ_edge (bb); |
4655 | break; |
4656 | default: |
4657 | return false; |
4658 | } |
4659 | |
4660 | gsi = gsi_last_nondebug_bb (bb); |
4661 | resx = gsi_stmt (i: gsi); |
4662 | if (resx && is_gimple_resx (gs: resx)) |
4663 | { |
4664 | if (stmt_can_throw_external (cfun, stmt: resx)) |
4665 | optimize_clobbers (bb); |
4666 | else if (sink_clobbers (bb)) |
4667 | ret = true; |
4668 | } |
4669 | |
4670 | gsi = gsi_after_labels (bb); |
4671 | |
4672 | /* Make sure to skip debug statements. */ |
4673 | if (!gsi_end_p (i: gsi) && is_gimple_debug (gs: gsi_stmt (i: gsi))) |
4674 | gsi_next_nondebug (i: &gsi); |
4675 | |
4676 | /* If the block is totally empty, look for more unsplitting cases. */ |
4677 | if (gsi_end_p (i: gsi)) |
4678 | { |
4679 | /* For the degenerate case of an infinite loop bail out. |
4680 | If bb has no successors and is totally empty, which can happen e.g. |
4681 | because of incorrect noreturn attribute, bail out too. */ |
4682 | if (e_out == NULL |
4683 | || infinite_empty_loop_p (e_first: e_out)) |
4684 | return ret; |
4685 | |
4686 | return ret | cleanup_empty_eh_unsplit (bb, e_out, lp); |
4687 | } |
4688 | |
4689 | /* The block should consist only of a single RESX statement, modulo a |
4690 | preceding call to __builtin_stack_restore if there is no outgoing |
4691 | edge, since the call can be eliminated in this case. */ |
4692 | resx = gsi_stmt (i: gsi); |
4693 | if (!e_out && gimple_call_builtin_p (resx, BUILT_IN_STACK_RESTORE)) |
4694 | { |
4695 | gsi_next_nondebug (i: &gsi); |
4696 | resx = gsi_stmt (i: gsi); |
4697 | } |
4698 | if (!is_gimple_resx (gs: resx)) |
4699 | return ret; |
4700 | gcc_assert (gsi_one_nondebug_before_end_p (gsi)); |
4701 | |
4702 | /* Determine if there are non-EH edges, or resx edges into the handler. */ |
4703 | has_non_eh_pred = false; |
4704 | FOR_EACH_EDGE (e, ei, bb->preds) |
4705 | if (!(e->flags & EDGE_EH)) |
4706 | has_non_eh_pred = true; |
4707 | |
4708 | /* Find the handler that's outer of the empty handler by looking at |
4709 | where the RESX instruction was vectored. */ |
4710 | new_lp_nr = lookup_stmt_eh_lp (t: resx); |
4711 | new_region = get_eh_region_from_lp_number (new_lp_nr); |
4712 | |
4713 | /* If there's no destination region within the current function, |
4714 | redirection is trivial via removing the throwing statements from |
4715 | the EH region, removing the EH edges, and allowing the block |
4716 | to go unreachable. */ |
4717 | if (new_region == NULL) |
4718 | { |
4719 | gcc_assert (e_out == NULL); |
4720 | for (ei = ei_start (bb->preds); (e = ei_safe_edge (i: ei)); ) |
4721 | if (e->flags & EDGE_EH) |
4722 | { |
4723 | gimple *stmt = *gsi_last_bb (bb: e->src); |
4724 | remove_stmt_from_eh_lp (t: stmt); |
4725 | remove_edge (e); |
4726 | } |
4727 | else |
4728 | ei_next (i: &ei); |
4729 | goto succeed; |
4730 | } |
4731 | |
4732 | /* If the destination region is a MUST_NOT_THROW, allow the runtime |
4733 | to handle the abort and allow the blocks to go unreachable. */ |
4734 | if (new_region->type == ERT_MUST_NOT_THROW) |
4735 | { |
4736 | for (ei = ei_start (bb->preds); (e = ei_safe_edge (i: ei)); ) |
4737 | if (e->flags & EDGE_EH) |
4738 | { |
4739 | gimple *stmt = *gsi_last_bb (bb: e->src); |
4740 | remove_stmt_from_eh_lp (t: stmt); |
4741 | add_stmt_to_eh_lp (t: stmt, num: new_lp_nr); |
4742 | remove_edge (e); |
4743 | } |
4744 | else |
4745 | ei_next (i: &ei); |
4746 | goto succeed; |
4747 | } |
4748 | |
4749 | /* Try to redirect the EH edges and merge the PHIs into the destination |
4750 | landing pad block. If the merge succeeds, we'll already have redirected |
4751 | all the EH edges. The handler itself will go unreachable if there were |
4752 | no normal edges. */ |
4753 | if (cleanup_empty_eh_merge_phis (new_bb: e_out->dest, old_bb: bb, old_bb_out: e_out, change_region: true)) |
4754 | goto succeed; |
4755 | |
4756 | /* Finally, if all input edges are EH edges, then we can (potentially) |
4757 | reduce the number of transfers from the runtime by moving the landing |
4758 | pad from the original region to the new region. This is a win when |
4759 | we remove the last CLEANUP region along a particular exception |
4760 | propagation path. Since nothing changes except for the region with |
4761 | which the landing pad is associated, the PHI nodes do not need to be |
4762 | adjusted at all. */ |
4763 | if (!has_non_eh_pred) |
4764 | { |
4765 | cleanup_empty_eh_move_lp (bb, e_out, lp, new_region); |
4766 | if (dump_file && (dump_flags & TDF_DETAILS)) |
4767 | fprintf (stream: dump_file, format: "Empty EH handler %i moved to EH region %i.\n" , |
4768 | lp->index, new_region->index); |
4769 | |
4770 | /* ??? The CFG didn't change, but we may have rendered the |
4771 | old EH region unreachable. Trigger a cleanup there. */ |
4772 | return true; |
4773 | } |
4774 | |
4775 | return ret; |
4776 | |
4777 | succeed: |
4778 | if (dump_file && (dump_flags & TDF_DETAILS)) |
4779 | fprintf (stream: dump_file, format: "Empty EH handler %i removed.\n" , lp->index); |
4780 | remove_eh_landing_pad (lp); |
4781 | return true; |
4782 | } |
4783 | |
4784 | /* Do a post-order traversal of the EH region tree. Examine each |
4785 | post_landing_pad block and see if we can eliminate it as empty. */ |
4786 | |
4787 | static bool |
4788 | cleanup_all_empty_eh (void) |
4789 | { |
4790 | bool changed = false; |
4791 | eh_landing_pad lp; |
4792 | int i; |
4793 | |
4794 | /* The post-order traversal may lead to quadraticness in the redirection |
4795 | of incoming EH edges from inner LPs, so first try to walk the region |
4796 | tree from inner to outer LPs in order to eliminate these edges. */ |
4797 | for (i = vec_safe_length (cfun->eh->lp_array) - 1; i >= 1; --i) |
4798 | { |
4799 | lp = (*cfun->eh->lp_array)[i]; |
4800 | if (lp) |
4801 | changed |= cleanup_empty_eh (lp); |
4802 | } |
4803 | |
4804 | /* Now do the post-order traversal to eliminate outer empty LPs. */ |
4805 | for (i = 1; vec_safe_iterate (cfun->eh->lp_array, ix: i, ptr: &lp); ++i) |
4806 | if (lp) |
4807 | changed |= cleanup_empty_eh (lp); |
4808 | |
4809 | return changed; |
4810 | } |
4811 | |
4812 | /* Perform cleanups and lowering of exception handling |
4813 | 1) cleanups regions with handlers doing nothing are optimized out |
4814 | 2) MUST_NOT_THROW regions that became dead because of 1) are optimized out |
4815 | 3) Info about regions that are containing instructions, and regions |
4816 | reachable via local EH edges is collected |
4817 | 4) Eh tree is pruned for regions no longer necessary. |
4818 | |
4819 | TODO: Push MUST_NOT_THROW regions to the root of the EH tree. |
4820 | Unify those that have the same failure decl and locus. |
4821 | */ |
4822 | |
4823 | static unsigned int |
4824 | execute_cleanup_eh_1 (void) |
4825 | { |
4826 | /* Do this first: unsplit_all_eh and cleanup_all_empty_eh can die |
4827 | looking up unreachable landing pads. */ |
4828 | remove_unreachable_handlers (); |
4829 | |
4830 | /* Watch out for the region tree vanishing due to all unreachable. */ |
4831 | if (cfun->eh->region_tree) |
4832 | { |
4833 | bool changed = false; |
4834 | |
4835 | if (optimize) |
4836 | changed |= unsplit_all_eh (); |
4837 | changed |= cleanup_all_empty_eh (); |
4838 | |
4839 | if (changed) |
4840 | { |
4841 | free_dominance_info (CDI_DOMINATORS); |
4842 | free_dominance_info (CDI_POST_DOMINATORS); |
4843 | |
4844 | /* We delayed all basic block deletion, as we may have performed |
4845 | cleanups on EH edges while non-EH edges were still present. */ |
4846 | delete_unreachable_blocks (); |
4847 | |
4848 | /* We manipulated the landing pads. Remove any region that no |
4849 | longer has a landing pad. */ |
4850 | remove_unreachable_handlers_no_lp (); |
4851 | |
4852 | return TODO_cleanup_cfg | TODO_update_ssa_only_virtuals; |
4853 | } |
4854 | } |
4855 | |
4856 | return 0; |
4857 | } |
4858 | |
4859 | namespace { |
4860 | |
4861 | const pass_data pass_data_cleanup_eh = |
4862 | { |
4863 | .type: GIMPLE_PASS, /* type */ |
4864 | .name: "ehcleanup" , /* name */ |
4865 | .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */ |
4866 | .tv_id: TV_TREE_EH, /* tv_id */ |
4867 | PROP_gimple_lcf, /* properties_required */ |
4868 | .properties_provided: 0, /* properties_provided */ |
4869 | .properties_destroyed: 0, /* properties_destroyed */ |
4870 | .todo_flags_start: 0, /* todo_flags_start */ |
4871 | .todo_flags_finish: 0, /* todo_flags_finish */ |
4872 | }; |
4873 | |
4874 | class pass_cleanup_eh : public gimple_opt_pass |
4875 | { |
4876 | public: |
4877 | pass_cleanup_eh (gcc::context *ctxt) |
4878 | : gimple_opt_pass (pass_data_cleanup_eh, ctxt) |
4879 | {} |
4880 | |
4881 | /* opt_pass methods: */ |
4882 | opt_pass * clone () final override { return new pass_cleanup_eh (m_ctxt); } |
4883 | bool gate (function *fun) final override |
4884 | { |
4885 | return fun->eh != NULL && fun->eh->region_tree != NULL; |
4886 | } |
4887 | |
4888 | unsigned int execute (function *) final override; |
4889 | |
4890 | }; // class pass_cleanup_eh |
4891 | |
4892 | unsigned int |
4893 | pass_cleanup_eh::execute (function *fun) |
4894 | { |
4895 | int ret = execute_cleanup_eh_1 (); |
4896 | |
4897 | /* If the function no longer needs an EH personality routine |
4898 | clear it. This exposes cross-language inlining opportunities |
4899 | and avoids references to a never defined personality routine. */ |
4900 | if (DECL_FUNCTION_PERSONALITY (current_function_decl) |
4901 | && function_needs_eh_personality (fun) != eh_personality_lang) |
4902 | DECL_FUNCTION_PERSONALITY (current_function_decl) = NULL_TREE; |
4903 | |
4904 | return ret; |
4905 | } |
4906 | |
4907 | } // anon namespace |
4908 | |
4909 | gimple_opt_pass * |
4910 | make_pass_cleanup_eh (gcc::context *ctxt) |
4911 | { |
4912 | return new pass_cleanup_eh (ctxt); |
4913 | } |
4914 | |
4915 | /* Disable warnings about missing quoting in GCC diagnostics for |
4916 | the verification errors. Their format strings don't follow GCC |
4917 | diagnostic conventions but are only used for debugging. */ |
4918 | #if __GNUC__ >= 10 |
4919 | # pragma GCC diagnostic push |
4920 | # pragma GCC diagnostic ignored "-Wformat-diag" |
4921 | #endif |
4922 | |
4923 | /* Verify that BB containing STMT as the last statement, has precisely the |
4924 | edge that make_eh_edge would create. */ |
4925 | |
4926 | DEBUG_FUNCTION bool |
4927 | verify_eh_edges (gimple *stmt) |
4928 | { |
4929 | basic_block bb = gimple_bb (g: stmt); |
4930 | eh_landing_pad lp = NULL; |
4931 | int lp_nr; |
4932 | edge_iterator ei; |
4933 | edge e, eh_edge; |
4934 | |
4935 | lp_nr = lookup_stmt_eh_lp (t: stmt); |
4936 | if (lp_nr > 0) |
4937 | lp = get_eh_landing_pad_from_number (lp_nr); |
4938 | |
4939 | eh_edge = NULL; |
4940 | FOR_EACH_EDGE (e, ei, bb->succs) |
4941 | { |
4942 | if (e->flags & EDGE_EH) |
4943 | { |
4944 | if (eh_edge) |
4945 | { |
4946 | error ("BB %i has multiple EH edges" , bb->index); |
4947 | return true; |
4948 | } |
4949 | else |
4950 | eh_edge = e; |
4951 | } |
4952 | } |
4953 | |
4954 | if (lp == NULL) |
4955 | { |
4956 | if (eh_edge) |
4957 | { |
4958 | error ("BB %i cannot throw but has an EH edge" , bb->index); |
4959 | return true; |
4960 | } |
4961 | return false; |
4962 | } |
4963 | |
4964 | if (!stmt_could_throw_p (cfun, stmt)) |
4965 | { |
4966 | error ("BB %i last statement has incorrectly set lp" , bb->index); |
4967 | return true; |
4968 | } |
4969 | |
4970 | if (eh_edge == NULL) |
4971 | { |
4972 | error ("BB %i is missing an EH edge" , bb->index); |
4973 | return true; |
4974 | } |
4975 | |
4976 | if (eh_edge->dest != label_to_block (cfun, lp->post_landing_pad)) |
4977 | { |
4978 | error ("Incorrect EH edge %i->%i" , bb->index, eh_edge->dest->index); |
4979 | return true; |
4980 | } |
4981 | |
4982 | return false; |
4983 | } |
4984 | |
4985 | /* Similarly, but handle GIMPLE_EH_DISPATCH specifically. */ |
4986 | |
4987 | DEBUG_FUNCTION bool |
4988 | verify_eh_dispatch_edge (geh_dispatch *stmt) |
4989 | { |
4990 | eh_region r; |
4991 | eh_catch c; |
4992 | basic_block src, dst; |
4993 | bool want_fallthru = true; |
4994 | edge_iterator ei; |
4995 | edge e, fall_edge; |
4996 | |
4997 | r = get_eh_region_from_number (gimple_eh_dispatch_region (eh_dispatch_stmt: stmt)); |
4998 | src = gimple_bb (g: stmt); |
4999 | |
5000 | FOR_EACH_EDGE (e, ei, src->succs) |
5001 | gcc_assert (e->aux == NULL); |
5002 | |
5003 | switch (r->type) |
5004 | { |
5005 | case ERT_TRY: |
5006 | for (c = r->u.eh_try.first_catch; c ; c = c->next_catch) |
5007 | { |
5008 | dst = label_to_block (cfun, c->label); |
5009 | e = find_edge (src, dst); |
5010 | if (e == NULL) |
5011 | { |
5012 | error ("BB %i is missing an edge" , src->index); |
5013 | return true; |
5014 | } |
5015 | e->aux = (void *)e; |
5016 | |
5017 | /* A catch-all handler doesn't have a fallthru. */ |
5018 | if (c->type_list == NULL) |
5019 | { |
5020 | want_fallthru = false; |
5021 | break; |
5022 | } |
5023 | } |
5024 | break; |
5025 | |
5026 | case ERT_ALLOWED_EXCEPTIONS: |
5027 | dst = label_to_block (cfun, r->u.allowed.label); |
5028 | e = find_edge (src, dst); |
5029 | if (e == NULL) |
5030 | { |
5031 | error ("BB %i is missing an edge" , src->index); |
5032 | return true; |
5033 | } |
5034 | e->aux = (void *)e; |
5035 | break; |
5036 | |
5037 | default: |
5038 | gcc_unreachable (); |
5039 | } |
5040 | |
5041 | fall_edge = NULL; |
5042 | FOR_EACH_EDGE (e, ei, src->succs) |
5043 | { |
5044 | if (e->flags & EDGE_FALLTHRU) |
5045 | { |
5046 | if (fall_edge != NULL) |
5047 | { |
5048 | error ("BB %i too many fallthru edges" , src->index); |
5049 | return true; |
5050 | } |
5051 | fall_edge = e; |
5052 | } |
5053 | else if (e->aux) |
5054 | e->aux = NULL; |
5055 | else |
5056 | { |
5057 | error ("BB %i has incorrect edge" , src->index); |
5058 | return true; |
5059 | } |
5060 | } |
5061 | if ((fall_edge != NULL) ^ want_fallthru) |
5062 | { |
5063 | error ("BB %i has incorrect fallthru edge" , src->index); |
5064 | return true; |
5065 | } |
5066 | |
5067 | return false; |
5068 | } |
5069 | |
5070 | #if __GNUC__ >= 10 |
5071 | # pragma GCC diagnostic pop |
5072 | #endif |
5073 | |