1 | /* Implements exception handling. |
2 | Copyright (C) 1989-2023 Free Software Foundation, Inc. |
3 | Contributed by Mike Stump <mrs@cygnus.com>. |
4 | |
5 | This file is part of GCC. |
6 | |
7 | GCC is free software; you can redistribute it and/or modify it under |
8 | the terms of the GNU General Public License as published by the Free |
9 | Software Foundation; either version 3, or (at your option) any later |
10 | version. |
11 | |
12 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or |
14 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
15 | for more details. |
16 | |
17 | You should have received a copy of the GNU General Public License |
18 | along with GCC; see the file COPYING3. If not see |
19 | <http://www.gnu.org/licenses/>. */ |
20 | |
21 | |
22 | /* An exception is an event that can be "thrown" from within a |
23 | function. This event can then be "caught" by the callers of |
24 | the function. |
25 | |
26 | The representation of exceptions changes several times during |
27 | the compilation process: |
28 | |
29 | In the beginning, in the front end, we have the GENERIC trees |
30 | TRY_CATCH_EXPR, TRY_FINALLY_EXPR, EH_ELSE_EXPR, WITH_CLEANUP_EXPR, |
31 | CLEANUP_POINT_EXPR, CATCH_EXPR, and EH_FILTER_EXPR. |
32 | |
33 | During initial gimplification (gimplify.cc) these are lowered to the |
34 | GIMPLE_TRY, GIMPLE_CATCH, GIMPLE_EH_ELSE, and GIMPLE_EH_FILTER |
35 | nodes. The WITH_CLEANUP_EXPR and CLEANUP_POINT_EXPR nodes are |
36 | converted into GIMPLE_TRY_FINALLY nodes; the others are a more |
37 | direct 1-1 conversion. |
38 | |
39 | During pass_lower_eh (tree-eh.cc) we record the nested structure |
40 | of the TRY nodes in EH_REGION nodes in CFUN->EH->REGION_TREE. |
41 | We expand the eh_protect_cleanup_actions langhook into MUST_NOT_THROW |
42 | regions at this time. We can then flatten the statements within |
43 | the TRY nodes to straight-line code. Statements that had been within |
44 | TRY nodes that can throw are recorded within CFUN->EH->THROW_STMT_TABLE, |
45 | so that we may remember what action is supposed to be taken if |
46 | a given statement does throw. During this lowering process, |
47 | we create an EH_LANDING_PAD node for each EH_REGION that has |
48 | some code within the function that needs to be executed if a |
49 | throw does happen. We also create RESX statements that are |
50 | used to transfer control from an inner EH_REGION to an outer |
51 | EH_REGION. We also create EH_DISPATCH statements as placeholders |
52 | for a runtime type comparison that should be made in order to |
53 | select the action to perform among different CATCH and EH_FILTER |
54 | regions. |
55 | |
56 | During pass_lower_eh_dispatch (tree-eh.cc), which is run after |
57 | all inlining is complete, we are able to run assign_filter_values, |
58 | which allows us to map the set of types manipulated by all of the |
59 | CATCH and EH_FILTER regions to a set of integers. This set of integers |
60 | will be how the exception runtime communicates with the code generated |
61 | within the function. We then expand the GIMPLE_EH_DISPATCH statements |
62 | to a switch or conditional branches that use the argument provided by |
63 | the runtime (__builtin_eh_filter) and the set of integers we computed |
64 | in assign_filter_values. |
65 | |
66 | During pass_lower_resx (tree-eh.cc), which is run near the end |
67 | of optimization, we expand RESX statements. If the eh region |
68 | that is outer to the RESX statement is a MUST_NOT_THROW, then |
69 | the RESX expands to some form of abort statement. If the eh |
70 | region that is outer to the RESX statement is within the current |
71 | function, then the RESX expands to a bookkeeping call |
72 | (__builtin_eh_copy_values) and a goto. Otherwise, the next |
73 | handler for the exception must be within a function somewhere |
74 | up the call chain, so we call back into the exception runtime |
75 | (__builtin_unwind_resume). |
76 | |
77 | During pass_expand (cfgexpand.cc), we generate REG_EH_REGION notes |
78 | that create an rtl to eh_region mapping that corresponds to the |
79 | gimple to eh_region mapping that had been recorded in the |
80 | THROW_STMT_TABLE. |
81 | |
82 | Then, via finish_eh_generation, we generate the real landing pads |
83 | to which the runtime will actually transfer control. These new |
84 | landing pads perform whatever bookkeeping is needed by the target |
85 | backend in order to resume execution within the current function. |
86 | Each of these new landing pads falls through into the post_landing_pad |
87 | label which had been used within the CFG up to this point. All |
88 | exception edges within the CFG are redirected to the new landing pads. |
89 | If the target uses setjmp to implement exceptions, the various extra |
90 | calls into the runtime to register and unregister the current stack |
91 | frame are emitted at this time. |
92 | |
93 | During pass_convert_to_eh_region_ranges (except.cc), we transform |
94 | the REG_EH_REGION notes attached to individual insns into |
95 | non-overlapping ranges of insns bounded by NOTE_INSN_EH_REGION_BEG |
96 | and NOTE_INSN_EH_REGION_END. Each insn within such ranges has the |
97 | same associated action within the exception region tree, meaning |
98 | that (1) the exception is caught by the same landing pad within the |
99 | current function, (2) the exception is blocked by the runtime with |
100 | a MUST_NOT_THROW region, or (3) the exception is not handled at all |
101 | within the current function. |
102 | |
103 | Finally, during assembly generation, we call |
104 | output_function_exception_table (except.cc) to emit the tables with |
105 | which the exception runtime can determine if a given stack frame |
106 | handles a given exception, and if so what filter value to provide |
107 | to the function when the non-local control transfer is effected. |
108 | If the target uses dwarf2 unwinding to implement exceptions, then |
109 | output_call_frame_info (dwarf2out.cc) emits the required unwind data. */ |
110 | |
111 | |
112 | #include "config.h" |
113 | #include "system.h" |
114 | #include "coretypes.h" |
115 | #include "backend.h" |
116 | #include "target.h" |
117 | #include "rtl.h" |
118 | #include "tree.h" |
119 | #include "cfghooks.h" |
120 | #include "tree-pass.h" |
121 | #include "memmodel.h" |
122 | #include "tm_p.h" |
123 | #include "stringpool.h" |
124 | #include "expmed.h" |
125 | #include "optabs.h" |
126 | #include "emit-rtl.h" |
127 | #include "cgraph.h" |
128 | #include "diagnostic.h" |
129 | #include "fold-const.h" |
130 | #include "stor-layout.h" |
131 | #include "explow.h" |
132 | #include "stmt.h" |
133 | #include "expr.h" |
134 | #include "calls.h" |
135 | #include "libfuncs.h" |
136 | #include "except.h" |
137 | #include "output.h" |
138 | #include "dwarf2asm.h" |
139 | #include "dwarf2.h" |
140 | #include "common/common-target.h" |
141 | #include "langhooks.h" |
142 | #include "cfgrtl.h" |
143 | #include "tree-pretty-print.h" |
144 | #include "cfgloop.h" |
145 | #include "builtins.h" |
146 | #include "tree-hash-traits.h" |
147 | #include "flags.h" |
148 | |
149 | static GTY(()) int call_site_base; |
150 | |
151 | static GTY(()) hash_map<tree_hash, tree> *type_to_runtime_map; |
152 | |
153 | static GTY(()) tree setjmp_fn; |
154 | |
155 | /* Describe the SjLj_Function_Context structure. */ |
156 | static GTY(()) tree sjlj_fc_type_node; |
157 | static int sjlj_fc_call_site_ofs; |
158 | static int sjlj_fc_data_ofs; |
159 | static int sjlj_fc_personality_ofs; |
160 | static int sjlj_fc_lsda_ofs; |
161 | static int sjlj_fc_jbuf_ofs; |
162 | |
163 | |
164 | struct GTY(()) call_site_record_d |
165 | { |
166 | rtx landing_pad; |
167 | int action; |
168 | }; |
169 | |
170 | /* In the following structure and associated functions, |
171 | we represent entries in the action table as 1-based indices. |
172 | Special cases are: |
173 | |
174 | 0: null action record, non-null landing pad; implies cleanups |
175 | -1: null action record, null landing pad; implies no action |
176 | -2: no call-site entry; implies must_not_throw |
177 | -3: we have yet to process outer regions |
178 | |
179 | Further, no special cases apply to the "next" field of the record. |
180 | For next, 0 means end of list. */ |
181 | |
182 | struct action_record |
183 | { |
184 | int offset; |
185 | int filter; |
186 | int next; |
187 | }; |
188 | |
189 | /* Hashtable helpers. */ |
190 | |
191 | struct action_record_hasher : free_ptr_hash <action_record> |
192 | { |
193 | static inline hashval_t hash (const action_record *); |
194 | static inline bool equal (const action_record *, const action_record *); |
195 | }; |
196 | |
197 | inline hashval_t |
198 | action_record_hasher::hash (const action_record *entry) |
199 | { |
200 | return entry->next * 1009 + entry->filter; |
201 | } |
202 | |
203 | inline bool |
204 | action_record_hasher::equal (const action_record *entry, |
205 | const action_record *data) |
206 | { |
207 | return entry->filter == data->filter && entry->next == data->next; |
208 | } |
209 | |
210 | typedef hash_table<action_record_hasher> action_hash_type; |
211 | |
212 | static bool get_eh_region_and_lp_from_rtx (const_rtx, eh_region *, |
213 | eh_landing_pad *); |
214 | |
215 | static void dw2_build_landing_pads (void); |
216 | |
217 | static int collect_one_action_chain (action_hash_type *, eh_region); |
218 | static int add_call_site (rtx, int, int); |
219 | |
220 | static void push_uleb128 (vec<uchar, va_gc> **, unsigned int); |
221 | static void push_sleb128 (vec<uchar, va_gc> **, int); |
222 | static int dw2_size_of_call_site_table (int); |
223 | static int sjlj_size_of_call_site_table (void); |
224 | static void dw2_output_call_site_table (int, int); |
225 | static void sjlj_output_call_site_table (void); |
226 | |
227 | |
228 | void |
229 | init_eh (void) |
230 | { |
231 | if (! flag_exceptions) |
232 | return; |
233 | |
234 | type_to_runtime_map = hash_map<tree_hash, tree>::create_ggc (size: 31); |
235 | |
236 | /* Create the SjLj_Function_Context structure. This should match |
237 | the definition in unwind-sjlj.c. */ |
238 | if (targetm_common.except_unwind_info (&global_options) == UI_SJLJ) |
239 | { |
240 | tree f_jbuf, f_per, f_lsda, f_prev, f_cs, f_data, tmp; |
241 | |
242 | sjlj_fc_type_node = lang_hooks.types.make_type (RECORD_TYPE); |
243 | |
244 | f_prev = build_decl (BUILTINS_LOCATION, |
245 | FIELD_DECL, get_identifier ("__prev" ), |
246 | build_pointer_type (sjlj_fc_type_node)); |
247 | DECL_FIELD_CONTEXT (f_prev) = sjlj_fc_type_node; |
248 | |
249 | f_cs = build_decl (BUILTINS_LOCATION, |
250 | FIELD_DECL, get_identifier ("__call_site" ), |
251 | integer_type_node); |
252 | DECL_FIELD_CONTEXT (f_cs) = sjlj_fc_type_node; |
253 | |
254 | tmp = build_index_type (size_int (4 - 1)); |
255 | tmp = build_array_type (lang_hooks.types.type_for_mode |
256 | (targetm.unwind_word_mode (), 1), |
257 | tmp); |
258 | f_data = build_decl (BUILTINS_LOCATION, |
259 | FIELD_DECL, get_identifier ("__data" ), tmp); |
260 | DECL_FIELD_CONTEXT (f_data) = sjlj_fc_type_node; |
261 | |
262 | f_per = build_decl (BUILTINS_LOCATION, |
263 | FIELD_DECL, get_identifier ("__personality" ), |
264 | ptr_type_node); |
265 | DECL_FIELD_CONTEXT (f_per) = sjlj_fc_type_node; |
266 | |
267 | f_lsda = build_decl (BUILTINS_LOCATION, |
268 | FIELD_DECL, get_identifier ("__lsda" ), |
269 | ptr_type_node); |
270 | DECL_FIELD_CONTEXT (f_lsda) = sjlj_fc_type_node; |
271 | |
272 | #ifdef DONT_USE_BUILTIN_SETJMP |
273 | #ifdef JMP_BUF_SIZE |
274 | tmp = size_int (JMP_BUF_SIZE - 1); |
275 | #else |
276 | /* Should be large enough for most systems, if it is not, |
277 | JMP_BUF_SIZE should be defined with the proper value. It will |
278 | also tend to be larger than necessary for most systems, a more |
279 | optimal port will define JMP_BUF_SIZE. */ |
280 | tmp = size_int (FIRST_PSEUDO_REGISTER + 2 - 1); |
281 | #endif |
282 | #else |
283 | /* Compute a minimally sized jump buffer. We need room to store at |
284 | least 3 pointers - stack pointer, frame pointer and return address. |
285 | Plus for some targets we need room for an extra pointer - in the |
286 | case of MIPS this is the global pointer. This makes a total of four |
287 | pointers, but to be safe we actually allocate room for 5. |
288 | |
289 | If pointers are smaller than words then we allocate enough room for |
290 | 5 words, just in case the backend needs this much room. For more |
291 | discussion on this issue see: |
292 | http://gcc.gnu.org/ml/gcc-patches/2014-05/msg00313.html. */ |
293 | if (POINTER_SIZE > BITS_PER_WORD) |
294 | tmp = size_int (5 - 1); |
295 | else |
296 | tmp = size_int ((5 * BITS_PER_WORD / POINTER_SIZE) - 1); |
297 | #endif |
298 | |
299 | tmp = build_index_type (tmp); |
300 | tmp = build_array_type (ptr_type_node, tmp); |
301 | f_jbuf = build_decl (BUILTINS_LOCATION, |
302 | FIELD_DECL, get_identifier ("__jbuf" ), tmp); |
303 | #ifdef DONT_USE_BUILTIN_SETJMP |
304 | /* We don't know what the alignment requirements of the |
305 | runtime's jmp_buf has. Overestimate. */ |
306 | SET_DECL_ALIGN (f_jbuf, BIGGEST_ALIGNMENT); |
307 | DECL_USER_ALIGN (f_jbuf) = 1; |
308 | #endif |
309 | DECL_FIELD_CONTEXT (f_jbuf) = sjlj_fc_type_node; |
310 | |
311 | TYPE_FIELDS (sjlj_fc_type_node) = f_prev; |
312 | TREE_CHAIN (f_prev) = f_cs; |
313 | TREE_CHAIN (f_cs) = f_data; |
314 | TREE_CHAIN (f_data) = f_per; |
315 | TREE_CHAIN (f_per) = f_lsda; |
316 | TREE_CHAIN (f_lsda) = f_jbuf; |
317 | |
318 | layout_type (sjlj_fc_type_node); |
319 | |
320 | /* Cache the interesting field offsets so that we have |
321 | easy access from rtl. */ |
322 | sjlj_fc_call_site_ofs |
323 | = (tree_to_uhwi (DECL_FIELD_OFFSET (f_cs)) |
324 | + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (f_cs)) / BITS_PER_UNIT); |
325 | sjlj_fc_data_ofs |
326 | = (tree_to_uhwi (DECL_FIELD_OFFSET (f_data)) |
327 | + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (f_data)) / BITS_PER_UNIT); |
328 | sjlj_fc_personality_ofs |
329 | = (tree_to_uhwi (DECL_FIELD_OFFSET (f_per)) |
330 | + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (f_per)) / BITS_PER_UNIT); |
331 | sjlj_fc_lsda_ofs |
332 | = (tree_to_uhwi (DECL_FIELD_OFFSET (f_lsda)) |
333 | + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (f_lsda)) / BITS_PER_UNIT); |
334 | sjlj_fc_jbuf_ofs |
335 | = (tree_to_uhwi (DECL_FIELD_OFFSET (f_jbuf)) |
336 | + tree_to_uhwi (DECL_FIELD_BIT_OFFSET (f_jbuf)) / BITS_PER_UNIT); |
337 | |
338 | #ifdef DONT_USE_BUILTIN_SETJMP |
339 | tmp = build_function_type_list (integer_type_node, TREE_TYPE (f_jbuf), |
340 | NULL); |
341 | setjmp_fn = build_decl (BUILTINS_LOCATION, FUNCTION_DECL, |
342 | get_identifier ("setjmp" ), tmp); |
343 | TREE_PUBLIC (setjmp_fn) = 1; |
344 | DECL_EXTERNAL (setjmp_fn) = 1; |
345 | DECL_ASSEMBLER_NAME (setjmp_fn); |
346 | #endif |
347 | } |
348 | } |
349 | |
350 | void |
351 | init_eh_for_function (void) |
352 | { |
353 | cfun->eh = ggc_cleared_alloc<eh_status> (); |
354 | |
355 | /* Make sure zero'th entries are used. */ |
356 | vec_safe_push (cfun->eh->region_array, obj: (eh_region)0); |
357 | vec_safe_push (cfun->eh->lp_array, obj: (eh_landing_pad)0); |
358 | } |
359 | |
360 | /* Routines to generate the exception tree somewhat directly. |
361 | These are used from tree-eh.cc when processing exception related |
362 | nodes during tree optimization. */ |
363 | |
364 | static eh_region |
365 | gen_eh_region (enum eh_region_type type, eh_region outer) |
366 | { |
367 | eh_region new_eh; |
368 | |
369 | /* Insert a new blank region as a leaf in the tree. */ |
370 | new_eh = ggc_cleared_alloc<eh_region_d> (); |
371 | new_eh->type = type; |
372 | new_eh->outer = outer; |
373 | if (outer) |
374 | { |
375 | new_eh->next_peer = outer->inner; |
376 | outer->inner = new_eh; |
377 | } |
378 | else |
379 | { |
380 | new_eh->next_peer = cfun->eh->region_tree; |
381 | cfun->eh->region_tree = new_eh; |
382 | } |
383 | |
384 | new_eh->index = vec_safe_length (cfun->eh->region_array); |
385 | vec_safe_push (cfun->eh->region_array, obj: new_eh); |
386 | |
387 | /* Copy the language's notion of whether to use __cxa_end_cleanup. */ |
388 | if (targetm.arm_eabi_unwinder && lang_hooks.eh_use_cxa_end_cleanup) |
389 | new_eh->use_cxa_end_cleanup = true; |
390 | |
391 | return new_eh; |
392 | } |
393 | |
394 | eh_region |
395 | gen_eh_region_cleanup (eh_region outer) |
396 | { |
397 | return gen_eh_region (type: ERT_CLEANUP, outer); |
398 | } |
399 | |
400 | eh_region |
401 | gen_eh_region_try (eh_region outer) |
402 | { |
403 | return gen_eh_region (type: ERT_TRY, outer); |
404 | } |
405 | |
406 | eh_catch |
407 | gen_eh_region_catch (eh_region t, tree type_or_list) |
408 | { |
409 | eh_catch c, l; |
410 | tree type_list, type_node; |
411 | |
412 | gcc_assert (t->type == ERT_TRY); |
413 | |
414 | /* Ensure to always end up with a type list to normalize further |
415 | processing, then register each type against the runtime types map. */ |
416 | type_list = type_or_list; |
417 | if (type_or_list) |
418 | { |
419 | if (TREE_CODE (type_or_list) != TREE_LIST) |
420 | type_list = tree_cons (NULL_TREE, type_or_list, NULL_TREE); |
421 | |
422 | type_node = type_list; |
423 | for (; type_node; type_node = TREE_CHAIN (type_node)) |
424 | add_type_for_runtime (TREE_VALUE (type_node)); |
425 | } |
426 | |
427 | c = ggc_cleared_alloc<eh_catch_d> (); |
428 | c->type_list = type_list; |
429 | l = t->u.eh_try.last_catch; |
430 | c->prev_catch = l; |
431 | if (l) |
432 | l->next_catch = c; |
433 | else |
434 | t->u.eh_try.first_catch = c; |
435 | t->u.eh_try.last_catch = c; |
436 | |
437 | return c; |
438 | } |
439 | |
440 | eh_region |
441 | gen_eh_region_allowed (eh_region outer, tree allowed) |
442 | { |
443 | eh_region region = gen_eh_region (type: ERT_ALLOWED_EXCEPTIONS, outer); |
444 | region->u.allowed.type_list = allowed; |
445 | |
446 | for (; allowed ; allowed = TREE_CHAIN (allowed)) |
447 | add_type_for_runtime (TREE_VALUE (allowed)); |
448 | |
449 | return region; |
450 | } |
451 | |
452 | eh_region |
453 | gen_eh_region_must_not_throw (eh_region outer) |
454 | { |
455 | return gen_eh_region (type: ERT_MUST_NOT_THROW, outer); |
456 | } |
457 | |
458 | eh_landing_pad |
459 | gen_eh_landing_pad (eh_region region) |
460 | { |
461 | eh_landing_pad lp = ggc_cleared_alloc<eh_landing_pad_d> (); |
462 | |
463 | lp->next_lp = region->landing_pads; |
464 | lp->region = region; |
465 | lp->index = vec_safe_length (cfun->eh->lp_array); |
466 | region->landing_pads = lp; |
467 | |
468 | vec_safe_push (cfun->eh->lp_array, obj: lp); |
469 | |
470 | return lp; |
471 | } |
472 | |
473 | eh_region |
474 | get_eh_region_from_number_fn (struct function *ifun, int i) |
475 | { |
476 | return (*ifun->eh->region_array)[i]; |
477 | } |
478 | |
479 | eh_region |
480 | get_eh_region_from_number (int i) |
481 | { |
482 | return get_eh_region_from_number_fn (cfun, i); |
483 | } |
484 | |
485 | eh_landing_pad |
486 | get_eh_landing_pad_from_number_fn (struct function *ifun, int i) |
487 | { |
488 | return (*ifun->eh->lp_array)[i]; |
489 | } |
490 | |
491 | eh_landing_pad |
492 | get_eh_landing_pad_from_number (int i) |
493 | { |
494 | return get_eh_landing_pad_from_number_fn (cfun, i); |
495 | } |
496 | |
497 | eh_region |
498 | get_eh_region_from_lp_number_fn (struct function *ifun, int i) |
499 | { |
500 | if (i < 0) |
501 | return (*ifun->eh->region_array)[-i]; |
502 | else if (i == 0) |
503 | return NULL; |
504 | else |
505 | { |
506 | eh_landing_pad lp; |
507 | lp = (*ifun->eh->lp_array)[i]; |
508 | return lp->region; |
509 | } |
510 | } |
511 | |
512 | eh_region |
513 | get_eh_region_from_lp_number (int i) |
514 | { |
515 | return get_eh_region_from_lp_number_fn (cfun, i); |
516 | } |
517 | |
518 | /* Returns true if the current function has exception handling regions. */ |
519 | |
520 | bool |
521 | current_function_has_exception_handlers (void) |
522 | { |
523 | return cfun->eh->region_tree != NULL; |
524 | } |
525 | |
526 | /* A subroutine of duplicate_eh_regions. Copy the eh_region tree at OLD. |
527 | Root it at OUTER, and apply LP_OFFSET to the lp numbers. */ |
528 | |
529 | struct duplicate_eh_regions_data |
530 | { |
531 | duplicate_eh_regions_map label_map; |
532 | void *label_map_data; |
533 | hash_map<void *, void *> *eh_map; |
534 | }; |
535 | |
536 | static void |
537 | duplicate_eh_regions_1 (struct duplicate_eh_regions_data *data, |
538 | eh_region old_r, eh_region outer) |
539 | { |
540 | eh_landing_pad old_lp, new_lp; |
541 | eh_region new_r; |
542 | |
543 | new_r = gen_eh_region (type: old_r->type, outer); |
544 | gcc_assert (!data->eh_map->put (old_r, new_r)); |
545 | |
546 | switch (old_r->type) |
547 | { |
548 | case ERT_CLEANUP: |
549 | break; |
550 | |
551 | case ERT_TRY: |
552 | { |
553 | eh_catch oc, nc; |
554 | for (oc = old_r->u.eh_try.first_catch; oc ; oc = oc->next_catch) |
555 | { |
556 | /* We should be doing all our region duplication before and |
557 | during inlining, which is before filter lists are created. */ |
558 | gcc_assert (oc->filter_list == NULL); |
559 | nc = gen_eh_region_catch (t: new_r, type_or_list: oc->type_list); |
560 | nc->label = data->label_map (oc->label, data->label_map_data); |
561 | } |
562 | } |
563 | break; |
564 | |
565 | case ERT_ALLOWED_EXCEPTIONS: |
566 | new_r->u.allowed.type_list = old_r->u.allowed.type_list; |
567 | if (old_r->u.allowed.label) |
568 | new_r->u.allowed.label |
569 | = data->label_map (old_r->u.allowed.label, data->label_map_data); |
570 | else |
571 | new_r->u.allowed.label = NULL_TREE; |
572 | break; |
573 | |
574 | case ERT_MUST_NOT_THROW: |
575 | new_r->u.must_not_throw.failure_loc = |
576 | LOCATION_LOCUS (old_r->u.must_not_throw.failure_loc); |
577 | new_r->u.must_not_throw.failure_decl = |
578 | old_r->u.must_not_throw.failure_decl; |
579 | break; |
580 | } |
581 | |
582 | for (old_lp = old_r->landing_pads; old_lp ; old_lp = old_lp->next_lp) |
583 | { |
584 | /* Don't bother copying unused landing pads. */ |
585 | if (old_lp->post_landing_pad == NULL) |
586 | continue; |
587 | |
588 | new_lp = gen_eh_landing_pad (region: new_r); |
589 | gcc_assert (!data->eh_map->put (old_lp, new_lp)); |
590 | |
591 | new_lp->post_landing_pad |
592 | = data->label_map (old_lp->post_landing_pad, data->label_map_data); |
593 | EH_LANDING_PAD_NR (new_lp->post_landing_pad) = new_lp->index; |
594 | } |
595 | |
596 | /* Make sure to preserve the original use of __cxa_end_cleanup. */ |
597 | new_r->use_cxa_end_cleanup = old_r->use_cxa_end_cleanup; |
598 | |
599 | for (old_r = old_r->inner; old_r ; old_r = old_r->next_peer) |
600 | duplicate_eh_regions_1 (data, old_r, outer: new_r); |
601 | } |
602 | |
603 | /* Duplicate the EH regions from IFUN rooted at COPY_REGION into |
604 | the current function and root the tree below OUTER_REGION. |
605 | The special case of COPY_REGION of NULL means all regions. |
606 | Remap labels using MAP/MAP_DATA callback. Return a pointer map |
607 | that allows the caller to remap uses of both EH regions and |
608 | EH landing pads. */ |
609 | |
610 | hash_map<void *, void *> * |
611 | duplicate_eh_regions (struct function *ifun, |
612 | eh_region copy_region, int outer_lp, |
613 | duplicate_eh_regions_map map, void *map_data) |
614 | { |
615 | struct duplicate_eh_regions_data data; |
616 | eh_region outer_region; |
617 | |
618 | if (flag_checking) |
619 | verify_eh_tree (ifun); |
620 | |
621 | data.label_map = map; |
622 | data.label_map_data = map_data; |
623 | data.eh_map = new hash_map<void *, void *>; |
624 | |
625 | outer_region = get_eh_region_from_lp_number_fn (cfun, i: outer_lp); |
626 | |
627 | /* Copy all the regions in the subtree. */ |
628 | if (copy_region) |
629 | duplicate_eh_regions_1 (data: &data, old_r: copy_region, outer: outer_region); |
630 | else |
631 | { |
632 | eh_region r; |
633 | for (r = ifun->eh->region_tree; r ; r = r->next_peer) |
634 | duplicate_eh_regions_1 (data: &data, old_r: r, outer: outer_region); |
635 | } |
636 | |
637 | if (flag_checking) |
638 | verify_eh_tree (cfun); |
639 | |
640 | return data.eh_map; |
641 | } |
642 | |
643 | /* Return the region that is outer to both REGION_A and REGION_B in IFUN. */ |
644 | |
645 | eh_region |
646 | eh_region_outermost (struct function *ifun, eh_region region_a, |
647 | eh_region region_b) |
648 | { |
649 | gcc_assert (ifun->eh->region_array); |
650 | gcc_assert (ifun->eh->region_tree); |
651 | |
652 | auto_sbitmap b_outer (ifun->eh->region_array->length ()); |
653 | bitmap_clear (b_outer); |
654 | |
655 | do |
656 | { |
657 | bitmap_set_bit (map: b_outer, bitno: region_b->index); |
658 | region_b = region_b->outer; |
659 | } |
660 | while (region_b); |
661 | |
662 | do |
663 | { |
664 | if (bitmap_bit_p (map: b_outer, bitno: region_a->index)) |
665 | break; |
666 | region_a = region_a->outer; |
667 | } |
668 | while (region_a); |
669 | |
670 | return region_a; |
671 | } |
672 | |
673 | void |
674 | add_type_for_runtime (tree type) |
675 | { |
676 | /* If TYPE is NOP_EXPR, it means that it already is a runtime type. */ |
677 | if (TREE_CODE (type) == NOP_EXPR) |
678 | return; |
679 | |
680 | bool existed = false; |
681 | tree *slot = &type_to_runtime_map->get_or_insert (k: type, existed: &existed); |
682 | if (!existed) |
683 | *slot = lang_hooks.eh_runtime_type (type); |
684 | } |
685 | |
686 | tree |
687 | lookup_type_for_runtime (tree type) |
688 | { |
689 | /* If TYPE is NOP_EXPR, it means that it already is a runtime type. */ |
690 | if (TREE_CODE (type) == NOP_EXPR) |
691 | return type; |
692 | |
693 | /* We should have always inserted the data earlier. */ |
694 | return *type_to_runtime_map->get (k: type); |
695 | } |
696 | |
697 | |
698 | /* Represent an entry in @TTypes for either catch actions |
699 | or exception filter actions. */ |
700 | struct ttypes_filter { |
701 | tree t; |
702 | int filter; |
703 | }; |
704 | |
705 | /* Helper for ttypes_filter hashing. */ |
706 | |
707 | struct ttypes_filter_hasher : free_ptr_hash <ttypes_filter> |
708 | { |
709 | typedef tree_node *compare_type; |
710 | static inline hashval_t hash (const ttypes_filter *); |
711 | static inline bool equal (const ttypes_filter *, const tree_node *); |
712 | }; |
713 | |
714 | /* Compare ENTRY (a ttypes_filter entry in the hash table) with DATA |
715 | (a tree) for a @TTypes type node we are thinking about adding. */ |
716 | |
717 | inline bool |
718 | ttypes_filter_hasher::equal (const ttypes_filter *entry, const tree_node *data) |
719 | { |
720 | return entry->t == data; |
721 | } |
722 | |
723 | inline hashval_t |
724 | ttypes_filter_hasher::hash (const ttypes_filter *entry) |
725 | { |
726 | return TREE_HASH (entry->t); |
727 | } |
728 | |
729 | typedef hash_table<ttypes_filter_hasher> ttypes_hash_type; |
730 | |
731 | |
732 | /* Helper for ehspec hashing. */ |
733 | |
734 | struct ehspec_hasher : free_ptr_hash <ttypes_filter> |
735 | { |
736 | static inline hashval_t hash (const ttypes_filter *); |
737 | static inline bool equal (const ttypes_filter *, const ttypes_filter *); |
738 | }; |
739 | |
740 | /* Compare ENTRY with DATA (both struct ttypes_filter) for a @TTypes |
741 | exception specification list we are thinking about adding. */ |
742 | /* ??? Currently we use the type lists in the order given. Someone |
743 | should put these in some canonical order. */ |
744 | |
745 | inline bool |
746 | ehspec_hasher::equal (const ttypes_filter *entry, const ttypes_filter *data) |
747 | { |
748 | return type_list_equal (entry->t, data->t); |
749 | } |
750 | |
751 | /* Hash function for exception specification lists. */ |
752 | |
753 | inline hashval_t |
754 | ehspec_hasher::hash (const ttypes_filter *entry) |
755 | { |
756 | hashval_t h = 0; |
757 | tree list; |
758 | |
759 | for (list = entry->t; list ; list = TREE_CHAIN (list)) |
760 | h = (h << 5) + (h >> 27) + TREE_HASH (TREE_VALUE (list)); |
761 | return h; |
762 | } |
763 | |
764 | typedef hash_table<ehspec_hasher> ehspec_hash_type; |
765 | |
766 | |
767 | /* Add TYPE (which may be NULL) to cfun->eh->ttype_data, using TYPES_HASH |
768 | to speed up the search. Return the filter value to be used. */ |
769 | |
770 | static int |
771 | add_ttypes_entry (ttypes_hash_type *ttypes_hash, tree type) |
772 | { |
773 | struct ttypes_filter **slot, *n; |
774 | |
775 | slot = ttypes_hash->find_slot_with_hash (comparable: type, hash: (hashval_t) TREE_HASH (type), |
776 | insert: INSERT); |
777 | |
778 | if ((n = *slot) == NULL) |
779 | { |
780 | /* Filter value is a 1 based table index. */ |
781 | |
782 | n = XNEW (struct ttypes_filter); |
783 | n->t = type; |
784 | n->filter = vec_safe_length (cfun->eh->ttype_data) + 1; |
785 | *slot = n; |
786 | |
787 | vec_safe_push (cfun->eh->ttype_data, obj: type); |
788 | } |
789 | |
790 | return n->filter; |
791 | } |
792 | |
793 | /* Add LIST to cfun->eh->ehspec_data, using EHSPEC_HASH and TYPES_HASH |
794 | to speed up the search. Return the filter value to be used. */ |
795 | |
796 | static int |
797 | add_ehspec_entry (ehspec_hash_type *ehspec_hash, ttypes_hash_type *ttypes_hash, |
798 | tree list) |
799 | { |
800 | struct ttypes_filter **slot, *n; |
801 | struct ttypes_filter dummy; |
802 | |
803 | dummy.t = list; |
804 | slot = ehspec_hash->find_slot (value: &dummy, insert: INSERT); |
805 | |
806 | if ((n = *slot) == NULL) |
807 | { |
808 | int len; |
809 | |
810 | if (targetm.arm_eabi_unwinder) |
811 | len = vec_safe_length (cfun->eh->ehspec_data.arm_eabi); |
812 | else |
813 | len = vec_safe_length (cfun->eh->ehspec_data.other); |
814 | |
815 | /* Filter value is a -1 based byte index into a uleb128 buffer. */ |
816 | |
817 | n = XNEW (struct ttypes_filter); |
818 | n->t = list; |
819 | n->filter = -(len + 1); |
820 | *slot = n; |
821 | |
822 | /* Generate a 0 terminated list of filter values. */ |
823 | for (; list ; list = TREE_CHAIN (list)) |
824 | { |
825 | if (targetm.arm_eabi_unwinder) |
826 | vec_safe_push (cfun->eh->ehspec_data.arm_eabi, TREE_VALUE (list)); |
827 | else |
828 | { |
829 | /* Look up each type in the list and encode its filter |
830 | value as a uleb128. */ |
831 | push_uleb128 (&cfun->eh->ehspec_data.other, |
832 | add_ttypes_entry (ttypes_hash, TREE_VALUE (list))); |
833 | } |
834 | } |
835 | if (targetm.arm_eabi_unwinder) |
836 | vec_safe_push (cfun->eh->ehspec_data.arm_eabi, NULL_TREE); |
837 | else |
838 | vec_safe_push (cfun->eh->ehspec_data.other, obj: (uchar)0); |
839 | } |
840 | |
841 | return n->filter; |
842 | } |
843 | |
844 | /* Generate the action filter values to be used for CATCH and |
845 | ALLOWED_EXCEPTIONS regions. When using dwarf2 exception regions, |
846 | we use lots of landing pads, and so every type or list can share |
847 | the same filter value, which saves table space. */ |
848 | |
849 | void |
850 | assign_filter_values (void) |
851 | { |
852 | int i; |
853 | eh_region r; |
854 | eh_catch c; |
855 | |
856 | vec_alloc (cfun->eh->ttype_data, nelems: 16); |
857 | if (targetm.arm_eabi_unwinder) |
858 | vec_alloc (cfun->eh->ehspec_data.arm_eabi, nelems: 64); |
859 | else |
860 | vec_alloc (cfun->eh->ehspec_data.other, nelems: 64); |
861 | |
862 | ehspec_hash_type ehspec (31); |
863 | ttypes_hash_type ttypes (31); |
864 | |
865 | for (i = 1; vec_safe_iterate (cfun->eh->region_array, ix: i, ptr: &r); ++i) |
866 | { |
867 | if (r == NULL) |
868 | continue; |
869 | |
870 | switch (r->type) |
871 | { |
872 | case ERT_TRY: |
873 | for (c = r->u.eh_try.first_catch; c ; c = c->next_catch) |
874 | { |
875 | /* Whatever type_list is (NULL or true list), we build a list |
876 | of filters for the region. */ |
877 | c->filter_list = NULL_TREE; |
878 | |
879 | if (c->type_list != NULL) |
880 | { |
881 | /* Get a filter value for each of the types caught and store |
882 | them in the region's dedicated list. */ |
883 | tree tp_node = c->type_list; |
884 | |
885 | for ( ; tp_node; tp_node = TREE_CHAIN (tp_node)) |
886 | { |
887 | int flt |
888 | = add_ttypes_entry (ttypes_hash: &ttypes, TREE_VALUE (tp_node)); |
889 | tree flt_node = build_int_cst (integer_type_node, flt); |
890 | |
891 | c->filter_list |
892 | = tree_cons (NULL_TREE, flt_node, c->filter_list); |
893 | } |
894 | } |
895 | else |
896 | { |
897 | /* Get a filter value for the NULL list also since it |
898 | will need an action record anyway. */ |
899 | int flt = add_ttypes_entry (ttypes_hash: &ttypes, NULL); |
900 | tree flt_node = build_int_cst (integer_type_node, flt); |
901 | |
902 | c->filter_list |
903 | = tree_cons (NULL_TREE, flt_node, NULL); |
904 | } |
905 | } |
906 | break; |
907 | |
908 | case ERT_ALLOWED_EXCEPTIONS: |
909 | r->u.allowed.filter |
910 | = add_ehspec_entry (ehspec_hash: &ehspec, ttypes_hash: &ttypes, list: r->u.allowed.type_list); |
911 | break; |
912 | |
913 | default: |
914 | break; |
915 | } |
916 | } |
917 | } |
918 | |
919 | /* Emit SEQ into basic block just before INSN (that is assumed to be |
920 | first instruction of some existing BB and return the newly |
921 | produced block. */ |
922 | static basic_block |
923 | emit_to_new_bb_before (rtx_insn *seq, rtx_insn *insn) |
924 | { |
925 | rtx_insn *next, *last; |
926 | basic_block bb; |
927 | edge e; |
928 | edge_iterator ei; |
929 | |
930 | /* If there happens to be a fallthru edge (possibly created by cleanup_cfg |
931 | call), we don't want it to go into newly created landing pad or other EH |
932 | construct. */ |
933 | for (ei = ei_start (BLOCK_FOR_INSN (insn)->preds); (e = ei_safe_edge (i: ei)); ) |
934 | if (e->flags & EDGE_FALLTHRU) |
935 | force_nonfallthru (e); |
936 | else |
937 | ei_next (i: &ei); |
938 | |
939 | /* Make sure to put the location of INSN or a subsequent instruction on SEQ |
940 | to avoid inheriting the location of the previous instruction. */ |
941 | next = insn; |
942 | while (next && !NONDEBUG_INSN_P (next)) |
943 | next = NEXT_INSN (insn: next); |
944 | if (next) |
945 | last = emit_insn_before_setloc (seq, insn, INSN_LOCATION (insn: next)); |
946 | else |
947 | last = emit_insn_before (seq, insn); |
948 | if (BARRIER_P (last)) |
949 | last = PREV_INSN (insn: last); |
950 | bb = create_basic_block (seq, last, BLOCK_FOR_INSN (insn)->prev_bb); |
951 | update_bb_for_insn (bb); |
952 | bb->flags |= BB_SUPERBLOCK; |
953 | return bb; |
954 | } |
955 | |
956 | /* A subroutine of dw2_build_landing_pads, also used for edge splitting |
957 | at the rtl level. Emit the code required by the target at a landing |
958 | pad for the given region. */ |
959 | |
960 | static void |
961 | expand_dw2_landing_pad_for_region (eh_region region) |
962 | { |
963 | if (targetm.have_exception_receiver ()) |
964 | emit_insn (targetm.gen_exception_receiver ()); |
965 | else if (targetm.have_nonlocal_goto_receiver ()) |
966 | emit_insn (targetm.gen_nonlocal_goto_receiver ()); |
967 | else |
968 | { /* Nothing */ } |
969 | |
970 | if (region->exc_ptr_reg) |
971 | emit_move_insn (region->exc_ptr_reg, |
972 | gen_rtx_REG (ptr_mode, EH_RETURN_DATA_REGNO (0))); |
973 | if (region->filter_reg) |
974 | emit_move_insn (region->filter_reg, |
975 | gen_rtx_REG (targetm.eh_return_filter_mode (), |
976 | EH_RETURN_DATA_REGNO (1))); |
977 | } |
978 | |
979 | /* Expand the extra code needed at landing pads for dwarf2 unwinding. */ |
980 | |
981 | static void |
982 | dw2_build_landing_pads (void) |
983 | { |
984 | int i; |
985 | eh_landing_pad lp; |
986 | int e_flags = EDGE_FALLTHRU; |
987 | |
988 | /* If we're going to partition blocks, we need to be able to add |
989 | new landing pads later, which means that we need to hold on to |
990 | the post-landing-pad block. Prevent it from being merged away. |
991 | We'll remove this bit after partitioning. */ |
992 | if (flag_reorder_blocks_and_partition) |
993 | e_flags |= EDGE_PRESERVE; |
994 | |
995 | for (i = 1; vec_safe_iterate (cfun->eh->lp_array, ix: i, ptr: &lp); ++i) |
996 | { |
997 | basic_block bb; |
998 | rtx_insn *seq; |
999 | |
1000 | if (lp == NULL || lp->post_landing_pad == NULL) |
1001 | continue; |
1002 | |
1003 | start_sequence (); |
1004 | |
1005 | lp->landing_pad = gen_label_rtx (); |
1006 | emit_label (lp->landing_pad); |
1007 | LABEL_PRESERVE_P (lp->landing_pad) = 1; |
1008 | |
1009 | expand_dw2_landing_pad_for_region (region: lp->region); |
1010 | |
1011 | seq = get_insns (); |
1012 | end_sequence (); |
1013 | |
1014 | bb = emit_to_new_bb_before (seq, insn: label_rtx (lp->post_landing_pad)); |
1015 | bb->count = bb->next_bb->count; |
1016 | make_single_succ_edge (bb, bb->next_bb, e_flags); |
1017 | if (current_loops) |
1018 | { |
1019 | class loop *loop = bb->next_bb->loop_father; |
1020 | /* If we created a pre-header block, add the new block to the |
1021 | outer loop, otherwise to the loop itself. */ |
1022 | if (bb->next_bb == loop->header) |
1023 | add_bb_to_loop (bb, loop_outer (loop)); |
1024 | else |
1025 | add_bb_to_loop (bb, loop); |
1026 | } |
1027 | } |
1028 | } |
1029 | |
1030 | |
1031 | static vec<int> sjlj_lp_call_site_index; |
1032 | |
1033 | /* Process all active landing pads. Assign each one a compact dispatch |
1034 | index, and a call-site index. */ |
1035 | |
1036 | static int |
1037 | sjlj_assign_call_site_values (void) |
1038 | { |
1039 | action_hash_type ar_hash (31); |
1040 | int i, disp_index; |
1041 | eh_landing_pad lp; |
1042 | |
1043 | vec_alloc (crtl->eh.action_record_data, nelems: 64); |
1044 | |
1045 | disp_index = 0; |
1046 | call_site_base = 1; |
1047 | for (i = 1; vec_safe_iterate (cfun->eh->lp_array, ix: i, ptr: &lp); ++i) |
1048 | if (lp && lp->post_landing_pad) |
1049 | { |
1050 | int action, call_site; |
1051 | |
1052 | /* First: build the action table. */ |
1053 | action = collect_one_action_chain (&ar_hash, lp->region); |
1054 | |
1055 | /* Next: assign call-site values. If dwarf2 terms, this would be |
1056 | the region number assigned by convert_to_eh_region_ranges, but |
1057 | handles no-action and must-not-throw differently. */ |
1058 | /* Map must-not-throw to otherwise unused call-site index 0. */ |
1059 | if (action == -2) |
1060 | call_site = 0; |
1061 | /* Map no-action to otherwise unused call-site index -1. */ |
1062 | else if (action == -1) |
1063 | call_site = -1; |
1064 | /* Otherwise, look it up in the table. */ |
1065 | else |
1066 | call_site = add_call_site (GEN_INT (disp_index), action, 0); |
1067 | sjlj_lp_call_site_index[i] = call_site; |
1068 | |
1069 | disp_index++; |
1070 | } |
1071 | |
1072 | return disp_index; |
1073 | } |
1074 | |
1075 | /* Emit code to record the current call-site index before every |
1076 | insn that can throw. */ |
1077 | |
1078 | static void |
1079 | sjlj_mark_call_sites (void) |
1080 | { |
1081 | int last_call_site = -2; |
1082 | rtx_insn *insn; |
1083 | rtx mem; |
1084 | |
1085 | for (insn = get_insns (); insn ; insn = NEXT_INSN (insn)) |
1086 | { |
1087 | eh_landing_pad lp; |
1088 | eh_region r; |
1089 | bool nothrow; |
1090 | int this_call_site; |
1091 | rtx_insn *before, *p; |
1092 | |
1093 | /* Reset value tracking at extended basic block boundaries. */ |
1094 | if (LABEL_P (insn)) |
1095 | last_call_site = -2; |
1096 | |
1097 | /* If the function allocates dynamic stack space, the context must |
1098 | be updated after every allocation/deallocation accordingly. */ |
1099 | if (NOTE_P (insn) && NOTE_KIND (insn) == NOTE_INSN_UPDATE_SJLJ_CONTEXT) |
1100 | { |
1101 | rtx buf_addr; |
1102 | |
1103 | start_sequence (); |
1104 | buf_addr = plus_constant (Pmode, XEXP (crtl->eh.sjlj_fc, 0), |
1105 | sjlj_fc_jbuf_ofs); |
1106 | expand_builtin_update_setjmp_buf (buf_addr); |
1107 | p = get_insns (); |
1108 | end_sequence (); |
1109 | emit_insn_before (p, insn); |
1110 | } |
1111 | |
1112 | if (! INSN_P (insn)) |
1113 | continue; |
1114 | |
1115 | nothrow = get_eh_region_and_lp_from_rtx (insn, &r, &lp); |
1116 | if (nothrow) |
1117 | continue; |
1118 | if (lp) |
1119 | this_call_site = sjlj_lp_call_site_index[lp->index]; |
1120 | else if (r == NULL) |
1121 | { |
1122 | /* Calls (and trapping insns) without notes are outside any |
1123 | exception handling region in this function. Mark them as |
1124 | no action. */ |
1125 | this_call_site = -1; |
1126 | } |
1127 | else |
1128 | { |
1129 | gcc_assert (r->type == ERT_MUST_NOT_THROW); |
1130 | this_call_site = 0; |
1131 | } |
1132 | |
1133 | if (this_call_site != -1) |
1134 | crtl->uses_eh_lsda = 1; |
1135 | |
1136 | if (this_call_site == last_call_site) |
1137 | continue; |
1138 | |
1139 | /* Don't separate a call from it's argument loads. */ |
1140 | before = insn; |
1141 | if (CALL_P (insn)) |
1142 | before = find_first_parameter_load (insn, NULL); |
1143 | |
1144 | start_sequence (); |
1145 | mem = adjust_address (crtl->eh.sjlj_fc, TYPE_MODE (integer_type_node), |
1146 | sjlj_fc_call_site_ofs); |
1147 | emit_move_insn (mem, gen_int_mode (this_call_site, GET_MODE (mem))); |
1148 | p = get_insns (); |
1149 | end_sequence (); |
1150 | |
1151 | emit_insn_before (p, before); |
1152 | last_call_site = this_call_site; |
1153 | } |
1154 | } |
1155 | |
1156 | /* Construct the SjLj_Function_Context. */ |
1157 | |
1158 | static void |
1159 | sjlj_emit_function_enter (rtx_code_label *dispatch_label) |
1160 | { |
1161 | rtx_insn *fn_begin, *seq; |
1162 | rtx fc, mem; |
1163 | bool fn_begin_outside_block; |
1164 | rtx personality = get_personality_function (current_function_decl); |
1165 | |
1166 | fc = crtl->eh.sjlj_fc; |
1167 | |
1168 | start_sequence (); |
1169 | |
1170 | /* We're storing this libcall's address into memory instead of |
1171 | calling it directly. Thus, we must call assemble_external_libcall |
1172 | here, as we cannot depend on emit_library_call to do it for us. */ |
1173 | assemble_external_libcall (personality); |
1174 | mem = adjust_address (fc, Pmode, sjlj_fc_personality_ofs); |
1175 | emit_move_insn (mem, personality); |
1176 | |
1177 | mem = adjust_address (fc, Pmode, sjlj_fc_lsda_ofs); |
1178 | if (crtl->uses_eh_lsda) |
1179 | { |
1180 | char buf[20]; |
1181 | rtx sym; |
1182 | |
1183 | ASM_GENERATE_INTERNAL_LABEL (buf, "LLSDA" , current_function_funcdef_no); |
1184 | sym = gen_rtx_SYMBOL_REF (Pmode, ggc_strdup (buf)); |
1185 | SYMBOL_REF_FLAGS (sym) = SYMBOL_FLAG_LOCAL; |
1186 | emit_move_insn (mem, sym); |
1187 | } |
1188 | else |
1189 | emit_move_insn (mem, const0_rtx); |
1190 | |
1191 | if (dispatch_label) |
1192 | { |
1193 | rtx addr = plus_constant (Pmode, XEXP (fc, 0), sjlj_fc_jbuf_ofs); |
1194 | |
1195 | #ifdef DONT_USE_BUILTIN_SETJMP |
1196 | addr = copy_addr_to_reg (addr); |
1197 | addr = convert_memory_address (ptr_mode, addr); |
1198 | tree addr_tree = make_tree (ptr_type_node, addr); |
1199 | |
1200 | tree call_expr = build_call_expr (setjmp_fn, 1, addr_tree); |
1201 | rtx x = expand_call (call_expr, NULL_RTX, false); |
1202 | |
1203 | emit_cmp_and_jump_insns (x, const0_rtx, NE, 0, |
1204 | TYPE_MODE (integer_type_node), 0, |
1205 | dispatch_label, |
1206 | profile_probability::unlikely ()); |
1207 | #else |
1208 | expand_builtin_setjmp_setup (addr, dispatch_label); |
1209 | #endif |
1210 | } |
1211 | |
1212 | emit_library_call (unwind_sjlj_register_libfunc, fn_type: LCT_NORMAL, VOIDmode, |
1213 | XEXP (fc, 0), Pmode); |
1214 | |
1215 | seq = get_insns (); |
1216 | end_sequence (); |
1217 | |
1218 | /* ??? Instead of doing this at the beginning of the function, |
1219 | do this in a block that is at loop level 0 and dominates all |
1220 | can_throw_internal instructions. */ |
1221 | |
1222 | fn_begin_outside_block = true; |
1223 | for (fn_begin = get_insns (); ; fn_begin = NEXT_INSN (insn: fn_begin)) |
1224 | if (NOTE_P (fn_begin)) |
1225 | { |
1226 | if (NOTE_KIND (fn_begin) == NOTE_INSN_FUNCTION_BEG) |
1227 | break; |
1228 | else if (NOTE_INSN_BASIC_BLOCK_P (fn_begin)) |
1229 | fn_begin_outside_block = false; |
1230 | } |
1231 | |
1232 | #ifdef DONT_USE_BUILTIN_SETJMP |
1233 | if (dispatch_label) |
1234 | { |
1235 | /* The sequence contains a branch in the middle so we need to force |
1236 | the creation of a new basic block by means of BB_SUPERBLOCK. */ |
1237 | if (fn_begin_outside_block) |
1238 | { |
1239 | basic_block bb |
1240 | = split_edge (single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))); |
1241 | if (JUMP_P (BB_END (bb))) |
1242 | emit_insn_before (seq, BB_END (bb)); |
1243 | else |
1244 | emit_insn_after (seq, BB_END (bb)); |
1245 | } |
1246 | else |
1247 | emit_insn_after (seq, fn_begin); |
1248 | |
1249 | single_succ (ENTRY_BLOCK_PTR_FOR_FN (cfun))->flags |= BB_SUPERBLOCK; |
1250 | return; |
1251 | } |
1252 | #endif |
1253 | |
1254 | if (fn_begin_outside_block) |
1255 | insert_insn_on_edge (seq, single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))); |
1256 | else |
1257 | emit_insn_after (seq, fn_begin); |
1258 | } |
1259 | |
1260 | /* Call back from expand_function_end to know where we should put |
1261 | the call to unwind_sjlj_unregister_libfunc if needed. */ |
1262 | |
1263 | void |
1264 | sjlj_emit_function_exit_after (rtx_insn *after) |
1265 | { |
1266 | crtl->eh.sjlj_exit_after = after; |
1267 | } |
1268 | |
1269 | static void |
1270 | sjlj_emit_function_exit (void) |
1271 | { |
1272 | rtx_insn *seq, *insn; |
1273 | |
1274 | start_sequence (); |
1275 | |
1276 | emit_library_call (unwind_sjlj_unregister_libfunc, fn_type: LCT_NORMAL, VOIDmode, |
1277 | XEXP (crtl->eh.sjlj_fc, 0), Pmode); |
1278 | |
1279 | seq = get_insns (); |
1280 | end_sequence (); |
1281 | |
1282 | /* ??? Really this can be done in any block at loop level 0 that |
1283 | post-dominates all can_throw_internal instructions. This is |
1284 | the last possible moment. */ |
1285 | |
1286 | insn = crtl->eh.sjlj_exit_after; |
1287 | if (LABEL_P (insn)) |
1288 | insn = NEXT_INSN (insn); |
1289 | |
1290 | emit_insn_after (seq, insn); |
1291 | } |
1292 | |
1293 | static void |
1294 | sjlj_emit_dispatch_table (rtx_code_label *dispatch_label, int num_dispatch) |
1295 | { |
1296 | scalar_int_mode unwind_word_mode = targetm.unwind_word_mode (); |
1297 | scalar_int_mode filter_mode = targetm.eh_return_filter_mode (); |
1298 | eh_landing_pad lp; |
1299 | rtx mem, fc, exc_ptr_reg, filter_reg; |
1300 | rtx_insn *seq; |
1301 | basic_block bb; |
1302 | eh_region r; |
1303 | int i, disp_index; |
1304 | vec<tree> dispatch_labels = vNULL; |
1305 | |
1306 | fc = crtl->eh.sjlj_fc; |
1307 | |
1308 | start_sequence (); |
1309 | |
1310 | emit_label (dispatch_label); |
1311 | |
1312 | #ifndef DONT_USE_BUILTIN_SETJMP |
1313 | expand_builtin_setjmp_receiver (dispatch_label); |
1314 | |
1315 | /* The caller of expand_builtin_setjmp_receiver is responsible for |
1316 | making sure that the label doesn't vanish. The only other caller |
1317 | is the expander for __builtin_setjmp_receiver, which places this |
1318 | label on the nonlocal_goto_label list. Since we're modeling these |
1319 | CFG edges more exactly, we can use the forced_labels list instead. */ |
1320 | LABEL_PRESERVE_P (dispatch_label) = 1; |
1321 | vec_safe_push<rtx_insn *> (forced_labels, obj: dispatch_label); |
1322 | #endif |
1323 | |
1324 | /* Load up exc_ptr and filter values from the function context. */ |
1325 | mem = adjust_address (fc, unwind_word_mode, sjlj_fc_data_ofs); |
1326 | if (unwind_word_mode != ptr_mode) |
1327 | { |
1328 | #ifdef POINTERS_EXTEND_UNSIGNED |
1329 | mem = convert_memory_address (ptr_mode, mem); |
1330 | #else |
1331 | mem = convert_to_mode (ptr_mode, mem, 0); |
1332 | #endif |
1333 | } |
1334 | exc_ptr_reg = force_reg (ptr_mode, mem); |
1335 | |
1336 | mem = adjust_address (fc, unwind_word_mode, |
1337 | sjlj_fc_data_ofs + GET_MODE_SIZE (unwind_word_mode)); |
1338 | if (unwind_word_mode != filter_mode) |
1339 | mem = convert_to_mode (filter_mode, mem, 0); |
1340 | filter_reg = force_reg (filter_mode, mem); |
1341 | |
1342 | /* Jump to one of the directly reachable regions. */ |
1343 | |
1344 | disp_index = 0; |
1345 | rtx_code_label *first_reachable_label = NULL; |
1346 | |
1347 | /* If there's exactly one call site in the function, don't bother |
1348 | generating a switch statement. */ |
1349 | if (num_dispatch > 1) |
1350 | dispatch_labels.create (nelems: num_dispatch); |
1351 | |
1352 | for (i = 1; vec_safe_iterate (cfun->eh->lp_array, ix: i, ptr: &lp); ++i) |
1353 | if (lp && lp->post_landing_pad) |
1354 | { |
1355 | rtx_insn *seq2; |
1356 | rtx_code_label *label; |
1357 | |
1358 | start_sequence (); |
1359 | |
1360 | lp->landing_pad = dispatch_label; |
1361 | |
1362 | if (num_dispatch > 1) |
1363 | { |
1364 | tree t_label, case_elt, t; |
1365 | |
1366 | t_label = create_artificial_label (UNKNOWN_LOCATION); |
1367 | t = build_int_cst (integer_type_node, disp_index); |
1368 | case_elt = build_case_label (t, NULL, t_label); |
1369 | dispatch_labels.quick_push (obj: case_elt); |
1370 | label = jump_target_rtx (t_label); |
1371 | } |
1372 | else |
1373 | label = gen_label_rtx (); |
1374 | |
1375 | if (disp_index == 0) |
1376 | first_reachable_label = label; |
1377 | emit_label (label); |
1378 | |
1379 | r = lp->region; |
1380 | if (r->exc_ptr_reg) |
1381 | emit_move_insn (r->exc_ptr_reg, exc_ptr_reg); |
1382 | if (r->filter_reg) |
1383 | emit_move_insn (r->filter_reg, filter_reg); |
1384 | |
1385 | seq2 = get_insns (); |
1386 | end_sequence (); |
1387 | |
1388 | rtx_insn *before = label_rtx (lp->post_landing_pad); |
1389 | bb = emit_to_new_bb_before (seq: seq2, insn: before); |
1390 | make_single_succ_edge (bb, bb->next_bb, EDGE_FALLTHRU); |
1391 | if (current_loops) |
1392 | { |
1393 | class loop *loop = bb->next_bb->loop_father; |
1394 | /* If we created a pre-header block, add the new block to the |
1395 | outer loop, otherwise to the loop itself. */ |
1396 | if (bb->next_bb == loop->header) |
1397 | add_bb_to_loop (bb, loop_outer (loop)); |
1398 | else |
1399 | add_bb_to_loop (bb, loop); |
1400 | /* ??? For multiple dispatches we will end up with edges |
1401 | from the loop tree root into this loop, making it a |
1402 | multiple-entry loop. Discard all affected loops. */ |
1403 | if (num_dispatch > 1) |
1404 | { |
1405 | for (loop = bb->loop_father; |
1406 | loop_outer (loop); loop = loop_outer (loop)) |
1407 | mark_loop_for_removal (loop); |
1408 | } |
1409 | } |
1410 | |
1411 | disp_index++; |
1412 | } |
1413 | gcc_assert (disp_index == num_dispatch); |
1414 | |
1415 | if (num_dispatch > 1) |
1416 | { |
1417 | rtx disp = adjust_address (fc, TYPE_MODE (integer_type_node), |
1418 | sjlj_fc_call_site_ofs); |
1419 | expand_sjlj_dispatch_table (disp, dispatch_labels); |
1420 | } |
1421 | |
1422 | seq = get_insns (); |
1423 | end_sequence (); |
1424 | |
1425 | bb = emit_to_new_bb_before (seq, insn: first_reachable_label); |
1426 | if (num_dispatch == 1) |
1427 | { |
1428 | make_single_succ_edge (bb, bb->next_bb, EDGE_FALLTHRU); |
1429 | if (current_loops) |
1430 | { |
1431 | class loop *loop = bb->next_bb->loop_father; |
1432 | /* If we created a pre-header block, add the new block to the |
1433 | outer loop, otherwise to the loop itself. */ |
1434 | if (bb->next_bb == loop->header) |
1435 | add_bb_to_loop (bb, loop_outer (loop)); |
1436 | else |
1437 | add_bb_to_loop (bb, loop); |
1438 | } |
1439 | } |
1440 | else |
1441 | { |
1442 | /* We are not wiring up edges here, but as the dispatcher call |
1443 | is at function begin simply associate the block with the |
1444 | outermost (non-)loop. */ |
1445 | if (current_loops) |
1446 | add_bb_to_loop (bb, current_loops->tree_root); |
1447 | } |
1448 | } |
1449 | |
1450 | static void |
1451 | sjlj_build_landing_pads (void) |
1452 | { |
1453 | int num_dispatch; |
1454 | |
1455 | num_dispatch = vec_safe_length (cfun->eh->lp_array); |
1456 | if (num_dispatch == 0) |
1457 | return; |
1458 | sjlj_lp_call_site_index.safe_grow_cleared (len: num_dispatch, exact: true); |
1459 | |
1460 | num_dispatch = sjlj_assign_call_site_values (); |
1461 | if (num_dispatch > 0) |
1462 | { |
1463 | rtx_code_label *dispatch_label = gen_label_rtx (); |
1464 | int align = STACK_SLOT_ALIGNMENT (sjlj_fc_type_node, |
1465 | TYPE_MODE (sjlj_fc_type_node), |
1466 | TYPE_ALIGN (sjlj_fc_type_node)); |
1467 | crtl->eh.sjlj_fc |
1468 | = assign_stack_local (TYPE_MODE (sjlj_fc_type_node), |
1469 | int_size_in_bytes (sjlj_fc_type_node), |
1470 | align); |
1471 | |
1472 | sjlj_mark_call_sites (); |
1473 | sjlj_emit_function_enter (dispatch_label); |
1474 | sjlj_emit_dispatch_table (dispatch_label, num_dispatch); |
1475 | sjlj_emit_function_exit (); |
1476 | } |
1477 | |
1478 | /* If we do not have any landing pads, we may still need to register a |
1479 | personality routine and (empty) LSDA to handle must-not-throw regions. */ |
1480 | else if (function_needs_eh_personality (cfun) != eh_personality_none) |
1481 | { |
1482 | int align = STACK_SLOT_ALIGNMENT (sjlj_fc_type_node, |
1483 | TYPE_MODE (sjlj_fc_type_node), |
1484 | TYPE_ALIGN (sjlj_fc_type_node)); |
1485 | crtl->eh.sjlj_fc |
1486 | = assign_stack_local (TYPE_MODE (sjlj_fc_type_node), |
1487 | int_size_in_bytes (sjlj_fc_type_node), |
1488 | align); |
1489 | |
1490 | sjlj_mark_call_sites (); |
1491 | sjlj_emit_function_enter (NULL); |
1492 | sjlj_emit_function_exit (); |
1493 | } |
1494 | |
1495 | sjlj_lp_call_site_index.release (); |
1496 | } |
1497 | |
1498 | /* Update the sjlj function context. This function should be called |
1499 | whenever we allocate or deallocate dynamic stack space. */ |
1500 | |
1501 | void |
1502 | update_sjlj_context (void) |
1503 | { |
1504 | if (!flag_exceptions) |
1505 | return; |
1506 | |
1507 | emit_note (NOTE_INSN_UPDATE_SJLJ_CONTEXT); |
1508 | } |
1509 | |
1510 | /* After initial rtl generation, call back to finish generating |
1511 | exception support code. */ |
1512 | |
1513 | void |
1514 | finish_eh_generation (void) |
1515 | { |
1516 | basic_block bb; |
1517 | |
1518 | /* Construct the landing pads. */ |
1519 | if (targetm_common.except_unwind_info (&global_options) == UI_SJLJ) |
1520 | sjlj_build_landing_pads (); |
1521 | else |
1522 | dw2_build_landing_pads (); |
1523 | |
1524 | break_superblocks (); |
1525 | |
1526 | /* Redirect all EH edges from the post_landing_pad to the landing pad. */ |
1527 | FOR_EACH_BB_FN (bb, cfun) |
1528 | { |
1529 | eh_landing_pad lp; |
1530 | edge_iterator ei; |
1531 | edge e; |
1532 | |
1533 | lp = get_eh_landing_pad_from_rtx (BB_END (bb)); |
1534 | |
1535 | FOR_EACH_EDGE (e, ei, bb->succs) |
1536 | if (e->flags & EDGE_EH) |
1537 | break; |
1538 | |
1539 | /* We should not have generated any new throwing insns during this |
1540 | pass, and we should not have lost any EH edges, so we only need |
1541 | to handle two cases here: |
1542 | (1) reachable handler and an existing edge to post-landing-pad, |
1543 | (2) no reachable handler and no edge. */ |
1544 | gcc_assert ((lp != NULL) == (e != NULL)); |
1545 | if (lp != NULL) |
1546 | { |
1547 | gcc_assert (BB_HEAD (e->dest) == label_rtx (lp->post_landing_pad)); |
1548 | |
1549 | redirect_edge_succ (e, BLOCK_FOR_INSN (insn: lp->landing_pad)); |
1550 | e->flags |= (CALL_P (BB_END (bb)) |
1551 | ? EDGE_ABNORMAL | EDGE_ABNORMAL_CALL |
1552 | : EDGE_ABNORMAL); |
1553 | } |
1554 | } |
1555 | |
1556 | if (targetm_common.except_unwind_info (&global_options) == UI_SJLJ |
1557 | /* Kludge for Alpha (see alpha_gp_save_rtx). */ |
1558 | || single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun))->insns.r) |
1559 | commit_edge_insertions (); |
1560 | } |
1561 | |
1562 | /* This section handles removing dead code for flow. */ |
1563 | |
1564 | void |
1565 | remove_eh_landing_pad (eh_landing_pad lp) |
1566 | { |
1567 | eh_landing_pad *pp; |
1568 | |
1569 | for (pp = &lp->region->landing_pads; *pp != lp; pp = &(*pp)->next_lp) |
1570 | continue; |
1571 | *pp = lp->next_lp; |
1572 | |
1573 | if (lp->post_landing_pad) |
1574 | EH_LANDING_PAD_NR (lp->post_landing_pad) = 0; |
1575 | (*cfun->eh->lp_array)[lp->index] = NULL; |
1576 | } |
1577 | |
1578 | /* Splice the EH region at PP from the region tree. */ |
1579 | |
1580 | static void |
1581 | remove_eh_handler_splicer (eh_region *pp) |
1582 | { |
1583 | eh_region region = *pp; |
1584 | eh_landing_pad lp; |
1585 | |
1586 | for (lp = region->landing_pads; lp ; lp = lp->next_lp) |
1587 | { |
1588 | if (lp->post_landing_pad) |
1589 | EH_LANDING_PAD_NR (lp->post_landing_pad) = 0; |
1590 | (*cfun->eh->lp_array)[lp->index] = NULL; |
1591 | } |
1592 | |
1593 | if (region->inner) |
1594 | { |
1595 | eh_region p, outer; |
1596 | outer = region->outer; |
1597 | |
1598 | *pp = p = region->inner; |
1599 | do |
1600 | { |
1601 | p->outer = outer; |
1602 | pp = &p->next_peer; |
1603 | p = *pp; |
1604 | } |
1605 | while (p); |
1606 | } |
1607 | *pp = region->next_peer; |
1608 | |
1609 | (*cfun->eh->region_array)[region->index] = NULL; |
1610 | } |
1611 | |
1612 | /* Splice a single EH region REGION from the region tree. |
1613 | |
1614 | To unlink REGION, we need to find the pointer to it with a relatively |
1615 | expensive search in REGION's outer region. If you are going to |
1616 | remove a number of handlers, using remove_unreachable_eh_regions may |
1617 | be a better option. */ |
1618 | |
1619 | void |
1620 | remove_eh_handler (eh_region region) |
1621 | { |
1622 | eh_region *pp, *pp_start, p, outer; |
1623 | |
1624 | outer = region->outer; |
1625 | if (outer) |
1626 | pp_start = &outer->inner; |
1627 | else |
1628 | pp_start = &cfun->eh->region_tree; |
1629 | for (pp = pp_start, p = *pp; p != region; pp = &p->next_peer, p = *pp) |
1630 | continue; |
1631 | |
1632 | remove_eh_handler_splicer (pp); |
1633 | } |
1634 | |
1635 | /* Worker for remove_unreachable_eh_regions. |
1636 | PP is a pointer to the region to start a region tree depth-first |
1637 | search from. R_REACHABLE is the set of regions that have to be |
1638 | preserved. */ |
1639 | |
1640 | static void |
1641 | remove_unreachable_eh_regions_worker (eh_region *pp, sbitmap r_reachable) |
1642 | { |
1643 | while (*pp) |
1644 | { |
1645 | eh_region region = *pp; |
1646 | remove_unreachable_eh_regions_worker (pp: ®ion->inner, r_reachable); |
1647 | if (!bitmap_bit_p (map: r_reachable, bitno: region->index)) |
1648 | remove_eh_handler_splicer (pp); |
1649 | else |
1650 | pp = ®ion->next_peer; |
1651 | } |
1652 | } |
1653 | |
1654 | /* Splice all EH regions *not* marked in R_REACHABLE from the region tree. |
1655 | Do this by traversing the EH tree top-down and splice out regions that |
1656 | are not marked. By removing regions from the leaves, we avoid costly |
1657 | searches in the region tree. */ |
1658 | |
1659 | void |
1660 | remove_unreachable_eh_regions (sbitmap r_reachable) |
1661 | { |
1662 | remove_unreachable_eh_regions_worker (pp: &cfun->eh->region_tree, r_reachable); |
1663 | } |
1664 | |
1665 | /* Invokes CALLBACK for every exception handler landing pad label. |
1666 | Only used by reload hackery; should not be used by new code. */ |
1667 | |
1668 | void |
1669 | for_each_eh_label (void (*callback) (rtx)) |
1670 | { |
1671 | eh_landing_pad lp; |
1672 | int i; |
1673 | |
1674 | for (i = 1; vec_safe_iterate (cfun->eh->lp_array, ix: i, ptr: &lp); ++i) |
1675 | { |
1676 | if (lp) |
1677 | { |
1678 | rtx_code_label *lab = lp->landing_pad; |
1679 | if (lab && LABEL_P (lab)) |
1680 | (*callback) (lab); |
1681 | } |
1682 | } |
1683 | } |
1684 | |
1685 | /* Create the REG_EH_REGION note for INSN, given its ECF_FLAGS for a |
1686 | call insn. |
1687 | |
1688 | At the gimple level, we use LP_NR |
1689 | > 0 : The statement transfers to landing pad LP_NR |
1690 | = 0 : The statement is outside any EH region |
1691 | < 0 : The statement is within MUST_NOT_THROW region -LP_NR. |
1692 | |
1693 | At the rtl level, we use LP_NR |
1694 | > 0 : The insn transfers to landing pad LP_NR |
1695 | = 0 : The insn cannot throw |
1696 | < 0 : The insn is within MUST_NOT_THROW region -LP_NR |
1697 | = INT_MIN : The insn cannot throw or execute a nonlocal-goto. |
1698 | missing note: The insn is outside any EH region. |
1699 | |
1700 | ??? This difference probably ought to be avoided. We could stand |
1701 | to record nothrow for arbitrary gimple statements, and so avoid |
1702 | some moderately complex lookups in stmt_could_throw_p. Perhaps |
1703 | NOTHROW should be mapped on both sides to INT_MIN. Perhaps the |
1704 | no-nonlocal-goto property should be recorded elsewhere as a bit |
1705 | on the call_insn directly. Perhaps we should make more use of |
1706 | attaching the trees to call_insns (reachable via symbol_ref in |
1707 | direct call cases) and just pull the data out of the trees. */ |
1708 | |
1709 | void |
1710 | make_reg_eh_region_note (rtx_insn *insn, int ecf_flags, int lp_nr) |
1711 | { |
1712 | rtx value; |
1713 | if (ecf_flags & ECF_NOTHROW) |
1714 | value = const0_rtx; |
1715 | else if (lp_nr != 0) |
1716 | value = GEN_INT (lp_nr); |
1717 | else |
1718 | return; |
1719 | add_reg_note (insn, REG_EH_REGION, value); |
1720 | } |
1721 | |
1722 | /* Create a REG_EH_REGION note for a CALL_INSN that cannot throw |
1723 | nor perform a non-local goto. Replace the region note if it |
1724 | already exists. */ |
1725 | |
1726 | void |
1727 | make_reg_eh_region_note_nothrow_nononlocal (rtx_insn *insn) |
1728 | { |
1729 | rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX); |
1730 | rtx intmin = GEN_INT (INT_MIN); |
1731 | |
1732 | if (note != 0) |
1733 | XEXP (note, 0) = intmin; |
1734 | else |
1735 | add_reg_note (insn, REG_EH_REGION, intmin); |
1736 | } |
1737 | |
1738 | /* Return true if INSN could throw, assuming no REG_EH_REGION note |
1739 | to the contrary. */ |
1740 | |
1741 | bool |
1742 | insn_could_throw_p (const_rtx insn) |
1743 | { |
1744 | if (!flag_exceptions) |
1745 | return false; |
1746 | if (CALL_P (insn)) |
1747 | return true; |
1748 | if (INSN_P (insn) && cfun->can_throw_non_call_exceptions) |
1749 | return may_trap_p (PATTERN (insn)); |
1750 | return false; |
1751 | } |
1752 | |
1753 | /* Copy an REG_EH_REGION note to each insn that might throw beginning |
1754 | at FIRST and ending at LAST. NOTE_OR_INSN is either the source insn |
1755 | to look for a note, or the note itself. */ |
1756 | |
1757 | void |
1758 | copy_reg_eh_region_note_forward (rtx note_or_insn, rtx_insn *first, rtx last) |
1759 | { |
1760 | rtx_insn *insn; |
1761 | rtx note = note_or_insn; |
1762 | |
1763 | if (INSN_P (note_or_insn)) |
1764 | { |
1765 | note = find_reg_note (note_or_insn, REG_EH_REGION, NULL_RTX); |
1766 | if (note == NULL) |
1767 | return; |
1768 | } |
1769 | else if (is_a <rtx_insn *> (p: note_or_insn)) |
1770 | return; |
1771 | note = XEXP (note, 0); |
1772 | |
1773 | for (insn = first; insn != last ; insn = NEXT_INSN (insn)) |
1774 | if (!find_reg_note (insn, REG_EH_REGION, NULL_RTX) |
1775 | && insn_could_throw_p (insn)) |
1776 | add_reg_note (insn, REG_EH_REGION, note); |
1777 | } |
1778 | |
1779 | /* Likewise, but iterate backward. */ |
1780 | |
1781 | void |
1782 | copy_reg_eh_region_note_backward (rtx note_or_insn, rtx_insn *last, rtx first) |
1783 | { |
1784 | rtx_insn *insn; |
1785 | rtx note = note_or_insn; |
1786 | |
1787 | if (INSN_P (note_or_insn)) |
1788 | { |
1789 | note = find_reg_note (note_or_insn, REG_EH_REGION, NULL_RTX); |
1790 | if (note == NULL) |
1791 | return; |
1792 | } |
1793 | else if (is_a <rtx_insn *> (p: note_or_insn)) |
1794 | return; |
1795 | note = XEXP (note, 0); |
1796 | |
1797 | for (insn = last; insn != first; insn = PREV_INSN (insn)) |
1798 | if (insn_could_throw_p (insn)) |
1799 | add_reg_note (insn, REG_EH_REGION, note); |
1800 | } |
1801 | |
1802 | |
1803 | /* Extract all EH information from INSN. Return true if the insn |
1804 | was marked NOTHROW. */ |
1805 | |
1806 | static bool |
1807 | get_eh_region_and_lp_from_rtx (const_rtx insn, eh_region *pr, |
1808 | eh_landing_pad *plp) |
1809 | { |
1810 | eh_landing_pad lp = NULL; |
1811 | eh_region r = NULL; |
1812 | bool ret = false; |
1813 | rtx note; |
1814 | int lp_nr; |
1815 | |
1816 | if (! INSN_P (insn)) |
1817 | goto egress; |
1818 | |
1819 | if (NONJUMP_INSN_P (insn) |
1820 | && GET_CODE (PATTERN (insn)) == SEQUENCE) |
1821 | insn = XVECEXP (PATTERN (insn), 0, 0); |
1822 | |
1823 | note = find_reg_note (insn, REG_EH_REGION, NULL_RTX); |
1824 | if (!note) |
1825 | { |
1826 | ret = !insn_could_throw_p (insn); |
1827 | goto egress; |
1828 | } |
1829 | |
1830 | lp_nr = INTVAL (XEXP (note, 0)); |
1831 | if (lp_nr == 0 || lp_nr == INT_MIN) |
1832 | { |
1833 | ret = true; |
1834 | goto egress; |
1835 | } |
1836 | |
1837 | if (lp_nr < 0) |
1838 | r = (*cfun->eh->region_array)[-lp_nr]; |
1839 | else |
1840 | { |
1841 | lp = (*cfun->eh->lp_array)[lp_nr]; |
1842 | r = lp->region; |
1843 | } |
1844 | |
1845 | egress: |
1846 | *plp = lp; |
1847 | *pr = r; |
1848 | return ret; |
1849 | } |
1850 | |
1851 | /* Return the landing pad to which INSN may go, or NULL if it does not |
1852 | have a reachable landing pad within this function. */ |
1853 | |
1854 | eh_landing_pad |
1855 | get_eh_landing_pad_from_rtx (const_rtx insn) |
1856 | { |
1857 | eh_landing_pad lp; |
1858 | eh_region r; |
1859 | |
1860 | get_eh_region_and_lp_from_rtx (insn, pr: &r, plp: &lp); |
1861 | return lp; |
1862 | } |
1863 | |
1864 | /* Return the region to which INSN may go, or NULL if it does not |
1865 | have a reachable region within this function. */ |
1866 | |
1867 | eh_region |
1868 | get_eh_region_from_rtx (const_rtx insn) |
1869 | { |
1870 | eh_landing_pad lp; |
1871 | eh_region r; |
1872 | |
1873 | get_eh_region_and_lp_from_rtx (insn, pr: &r, plp: &lp); |
1874 | return r; |
1875 | } |
1876 | |
1877 | /* Return true if INSN throws and is caught by something in this function. */ |
1878 | |
1879 | bool |
1880 | can_throw_internal (const_rtx insn) |
1881 | { |
1882 | return get_eh_landing_pad_from_rtx (insn) != NULL; |
1883 | } |
1884 | |
1885 | /* Return true if INSN throws and escapes from the current function. */ |
1886 | |
1887 | bool |
1888 | can_throw_external (const_rtx insn) |
1889 | { |
1890 | eh_landing_pad lp; |
1891 | eh_region r; |
1892 | bool nothrow; |
1893 | |
1894 | if (! INSN_P (insn)) |
1895 | return false; |
1896 | |
1897 | if (NONJUMP_INSN_P (insn) |
1898 | && GET_CODE (PATTERN (insn)) == SEQUENCE) |
1899 | { |
1900 | rtx_sequence *seq = as_a <rtx_sequence *> (p: PATTERN (insn)); |
1901 | int i, n = seq->len (); |
1902 | |
1903 | for (i = 0; i < n; i++) |
1904 | if (can_throw_external (insn: seq->element (index: i))) |
1905 | return true; |
1906 | |
1907 | return false; |
1908 | } |
1909 | |
1910 | nothrow = get_eh_region_and_lp_from_rtx (insn, pr: &r, plp: &lp); |
1911 | |
1912 | /* If we can't throw, we obviously can't throw external. */ |
1913 | if (nothrow) |
1914 | return false; |
1915 | |
1916 | /* If we have an internal landing pad, then we're not external. */ |
1917 | if (lp != NULL) |
1918 | return false; |
1919 | |
1920 | /* If we're not within an EH region, then we are external. */ |
1921 | if (r == NULL) |
1922 | return true; |
1923 | |
1924 | /* The only thing that ought to be left is MUST_NOT_THROW regions, |
1925 | which don't always have landing pads. */ |
1926 | gcc_assert (r->type == ERT_MUST_NOT_THROW); |
1927 | return false; |
1928 | } |
1929 | |
1930 | /* Return true if INSN cannot throw at all. */ |
1931 | |
1932 | bool |
1933 | insn_nothrow_p (const_rtx insn) |
1934 | { |
1935 | eh_landing_pad lp; |
1936 | eh_region r; |
1937 | |
1938 | if (! INSN_P (insn)) |
1939 | return true; |
1940 | |
1941 | if (NONJUMP_INSN_P (insn) |
1942 | && GET_CODE (PATTERN (insn)) == SEQUENCE) |
1943 | { |
1944 | rtx_sequence *seq = as_a <rtx_sequence *> (p: PATTERN (insn)); |
1945 | int i, n = seq->len (); |
1946 | |
1947 | for (i = 0; i < n; i++) |
1948 | if (!insn_nothrow_p (insn: seq->element (index: i))) |
1949 | return false; |
1950 | |
1951 | return true; |
1952 | } |
1953 | |
1954 | return get_eh_region_and_lp_from_rtx (insn, pr: &r, plp: &lp); |
1955 | } |
1956 | |
1957 | /* Return true if INSN can perform a non-local goto. */ |
1958 | /* ??? This test is here in this file because it (ab)uses REG_EH_REGION. */ |
1959 | |
1960 | bool |
1961 | can_nonlocal_goto (const rtx_insn *insn) |
1962 | { |
1963 | if (nonlocal_goto_handler_labels && CALL_P (insn)) |
1964 | { |
1965 | rtx note = find_reg_note (insn, REG_EH_REGION, NULL_RTX); |
1966 | if (!note || INTVAL (XEXP (note, 0)) != INT_MIN) |
1967 | return true; |
1968 | } |
1969 | return false; |
1970 | } |
1971 | |
1972 | /* Set TREE_NOTHROW and crtl->all_throwers_are_sibcalls. */ |
1973 | |
1974 | static unsigned int |
1975 | set_nothrow_function_flags (void) |
1976 | { |
1977 | rtx_insn *insn; |
1978 | |
1979 | crtl->nothrow = 1; |
1980 | |
1981 | /* Assume crtl->all_throwers_are_sibcalls until we encounter |
1982 | something that can throw an exception. We specifically exempt |
1983 | CALL_INSNs that are SIBLING_CALL_P, as these are really jumps, |
1984 | and can't throw. Most CALL_INSNs are not SIBLING_CALL_P, so this |
1985 | is optimistic. */ |
1986 | |
1987 | crtl->all_throwers_are_sibcalls = 1; |
1988 | |
1989 | /* If we don't know that this implementation of the function will |
1990 | actually be used, then we must not set TREE_NOTHROW, since |
1991 | callers must not assume that this function does not throw. */ |
1992 | if (TREE_NOTHROW (current_function_decl)) |
1993 | return 0; |
1994 | |
1995 | if (! flag_exceptions) |
1996 | return 0; |
1997 | |
1998 | for (insn = get_insns (); insn; insn = NEXT_INSN (insn)) |
1999 | if (can_throw_external (insn)) |
2000 | { |
2001 | crtl->nothrow = 0; |
2002 | |
2003 | if (!CALL_P (insn) || !SIBLING_CALL_P (insn)) |
2004 | { |
2005 | crtl->all_throwers_are_sibcalls = 0; |
2006 | return 0; |
2007 | } |
2008 | } |
2009 | |
2010 | if (crtl->nothrow |
2011 | && (cgraph_node::get (decl: current_function_decl)->get_availability () |
2012 | >= AVAIL_AVAILABLE)) |
2013 | { |
2014 | struct cgraph_node *node = cgraph_node::get (decl: current_function_decl); |
2015 | struct cgraph_edge *e; |
2016 | for (e = node->callers; e; e = e->next_caller) |
2017 | e->can_throw_external = false; |
2018 | node->set_nothrow_flag (true); |
2019 | |
2020 | if (dump_file) |
2021 | fprintf (stream: dump_file, format: "Marking function nothrow: %s\n\n" , |
2022 | current_function_name ()); |
2023 | } |
2024 | return 0; |
2025 | } |
2026 | |
2027 | namespace { |
2028 | |
2029 | const pass_data pass_data_set_nothrow_function_flags = |
2030 | { |
2031 | .type: RTL_PASS, /* type */ |
2032 | .name: "nothrow" , /* name */ |
2033 | .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */ |
2034 | .tv_id: TV_NONE, /* tv_id */ |
2035 | .properties_required: 0, /* properties_required */ |
2036 | .properties_provided: 0, /* properties_provided */ |
2037 | .properties_destroyed: 0, /* properties_destroyed */ |
2038 | .todo_flags_start: 0, /* todo_flags_start */ |
2039 | .todo_flags_finish: 0, /* todo_flags_finish */ |
2040 | }; |
2041 | |
2042 | class pass_set_nothrow_function_flags : public rtl_opt_pass |
2043 | { |
2044 | public: |
2045 | pass_set_nothrow_function_flags (gcc::context *ctxt) |
2046 | : rtl_opt_pass (pass_data_set_nothrow_function_flags, ctxt) |
2047 | {} |
2048 | |
2049 | /* opt_pass methods: */ |
2050 | unsigned int execute (function *) final override |
2051 | { |
2052 | return set_nothrow_function_flags (); |
2053 | } |
2054 | |
2055 | }; // class pass_set_nothrow_function_flags |
2056 | |
2057 | } // anon namespace |
2058 | |
2059 | rtl_opt_pass * |
2060 | make_pass_set_nothrow_function_flags (gcc::context *ctxt) |
2061 | { |
2062 | return new pass_set_nothrow_function_flags (ctxt); |
2063 | } |
2064 | |
2065 | |
2066 | /* Various hooks for unwind library. */ |
2067 | |
2068 | /* Expand the EH support builtin functions: |
2069 | __builtin_eh_pointer and __builtin_eh_filter. */ |
2070 | |
2071 | static eh_region |
2072 | expand_builtin_eh_common (tree region_nr_t) |
2073 | { |
2074 | HOST_WIDE_INT region_nr; |
2075 | eh_region region; |
2076 | |
2077 | gcc_assert (tree_fits_shwi_p (region_nr_t)); |
2078 | region_nr = tree_to_shwi (region_nr_t); |
2079 | |
2080 | region = (*cfun->eh->region_array)[region_nr]; |
2081 | |
2082 | /* ??? We shouldn't have been able to delete a eh region without |
2083 | deleting all the code that depended on it. */ |
2084 | gcc_assert (region != NULL); |
2085 | |
2086 | return region; |
2087 | } |
2088 | |
2089 | /* Expand to the exc_ptr value from the given eh region. */ |
2090 | |
2091 | rtx |
2092 | expand_builtin_eh_pointer (tree exp) |
2093 | { |
2094 | eh_region region |
2095 | = expand_builtin_eh_common (CALL_EXPR_ARG (exp, 0)); |
2096 | if (region->exc_ptr_reg == NULL) |
2097 | region->exc_ptr_reg = gen_reg_rtx (ptr_mode); |
2098 | return region->exc_ptr_reg; |
2099 | } |
2100 | |
2101 | /* Expand to the filter value from the given eh region. */ |
2102 | |
2103 | rtx |
2104 | expand_builtin_eh_filter (tree exp) |
2105 | { |
2106 | eh_region region |
2107 | = expand_builtin_eh_common (CALL_EXPR_ARG (exp, 0)); |
2108 | if (region->filter_reg == NULL) |
2109 | region->filter_reg = gen_reg_rtx (targetm.eh_return_filter_mode ()); |
2110 | return region->filter_reg; |
2111 | } |
2112 | |
2113 | /* Copy the exc_ptr and filter values from one landing pad's registers |
2114 | to another. This is used to inline the resx statement. */ |
2115 | |
2116 | rtx |
2117 | expand_builtin_eh_copy_values (tree exp) |
2118 | { |
2119 | eh_region dst |
2120 | = expand_builtin_eh_common (CALL_EXPR_ARG (exp, 0)); |
2121 | eh_region src |
2122 | = expand_builtin_eh_common (CALL_EXPR_ARG (exp, 1)); |
2123 | scalar_int_mode fmode = targetm.eh_return_filter_mode (); |
2124 | |
2125 | if (dst->exc_ptr_reg == NULL) |
2126 | dst->exc_ptr_reg = gen_reg_rtx (ptr_mode); |
2127 | if (src->exc_ptr_reg == NULL) |
2128 | src->exc_ptr_reg = gen_reg_rtx (ptr_mode); |
2129 | |
2130 | if (dst->filter_reg == NULL) |
2131 | dst->filter_reg = gen_reg_rtx (fmode); |
2132 | if (src->filter_reg == NULL) |
2133 | src->filter_reg = gen_reg_rtx (fmode); |
2134 | |
2135 | emit_move_insn (dst->exc_ptr_reg, src->exc_ptr_reg); |
2136 | emit_move_insn (dst->filter_reg, src->filter_reg); |
2137 | |
2138 | return const0_rtx; |
2139 | } |
2140 | |
2141 | /* Do any necessary initialization to access arbitrary stack frames. |
2142 | On the SPARC, this means flushing the register windows. */ |
2143 | |
2144 | void |
2145 | expand_builtin_unwind_init (void) |
2146 | { |
2147 | /* Set this so all the registers get saved in our frame; we need to be |
2148 | able to copy the saved values for any registers from frames we unwind. */ |
2149 | crtl->saves_all_registers = 1; |
2150 | |
2151 | SETUP_FRAME_ADDRESSES (); |
2152 | } |
2153 | |
2154 | /* Map a non-negative number to an eh return data register number; expands |
2155 | to -1 if no return data register is associated with the input number. |
2156 | At least the inputs 0 and 1 must be mapped; the target may provide more. */ |
2157 | |
2158 | rtx |
2159 | expand_builtin_eh_return_data_regno (tree exp) |
2160 | { |
2161 | tree which = CALL_EXPR_ARG (exp, 0); |
2162 | unsigned HOST_WIDE_INT iwhich; |
2163 | |
2164 | if (TREE_CODE (which) != INTEGER_CST) |
2165 | { |
2166 | error ("argument of %<__builtin_eh_return_regno%> must be constant" ); |
2167 | return constm1_rtx; |
2168 | } |
2169 | |
2170 | iwhich = tree_to_uhwi (which); |
2171 | iwhich = EH_RETURN_DATA_REGNO (iwhich); |
2172 | if (iwhich == INVALID_REGNUM) |
2173 | return constm1_rtx; |
2174 | |
2175 | #ifdef DWARF_FRAME_REGNUM |
2176 | iwhich = DWARF_FRAME_REGNUM (iwhich); |
2177 | #else |
2178 | iwhich = DEBUGGER_REGNO (iwhich); |
2179 | #endif |
2180 | |
2181 | return GEN_INT (iwhich); |
2182 | } |
2183 | |
2184 | /* Given a value extracted from the return address register or stack slot, |
2185 | return the actual address encoded in that value. */ |
2186 | |
2187 | rtx |
2188 | expand_builtin_extract_return_addr (tree addr_tree) |
2189 | { |
2190 | rtx addr = expand_expr (exp: addr_tree, NULL_RTX, Pmode, modifier: EXPAND_NORMAL); |
2191 | |
2192 | if (GET_MODE (addr) != Pmode |
2193 | && GET_MODE (addr) != VOIDmode) |
2194 | { |
2195 | #ifdef POINTERS_EXTEND_UNSIGNED |
2196 | addr = convert_memory_address (Pmode, addr); |
2197 | #else |
2198 | addr = convert_to_mode (Pmode, addr, 0); |
2199 | #endif |
2200 | } |
2201 | |
2202 | /* First mask out any unwanted bits. */ |
2203 | rtx mask = MASK_RETURN_ADDR; |
2204 | if (mask) |
2205 | expand_and (Pmode, addr, mask, addr); |
2206 | |
2207 | /* Then adjust to find the real return address. */ |
2208 | if (RETURN_ADDR_OFFSET) |
2209 | addr = plus_constant (Pmode, addr, RETURN_ADDR_OFFSET); |
2210 | |
2211 | return addr; |
2212 | } |
2213 | |
2214 | /* Given an actual address in addr_tree, do any necessary encoding |
2215 | and return the value to be stored in the return address register or |
2216 | stack slot so the epilogue will return to that address. */ |
2217 | |
2218 | rtx |
2219 | expand_builtin_frob_return_addr (tree addr_tree) |
2220 | { |
2221 | rtx addr = expand_expr (exp: addr_tree, NULL_RTX, mode: ptr_mode, modifier: EXPAND_NORMAL); |
2222 | |
2223 | addr = convert_memory_address (Pmode, addr); |
2224 | |
2225 | if (RETURN_ADDR_OFFSET) |
2226 | { |
2227 | addr = force_reg (Pmode, addr); |
2228 | addr = plus_constant (Pmode, addr, -RETURN_ADDR_OFFSET); |
2229 | } |
2230 | |
2231 | return addr; |
2232 | } |
2233 | |
2234 | /* Set up the epilogue with the magic bits we'll need to return to the |
2235 | exception handler. */ |
2236 | |
2237 | void |
2238 | expand_builtin_eh_return (tree stackadj_tree ATTRIBUTE_UNUSED, |
2239 | tree handler_tree) |
2240 | { |
2241 | rtx tmp; |
2242 | |
2243 | #ifdef EH_RETURN_STACKADJ_RTX |
2244 | tmp = expand_expr (exp: stackadj_tree, crtl->eh.ehr_stackadj, |
2245 | VOIDmode, modifier: EXPAND_NORMAL); |
2246 | tmp = convert_memory_address (Pmode, tmp); |
2247 | if (!crtl->eh.ehr_stackadj) |
2248 | crtl->eh.ehr_stackadj = copy_addr_to_reg (tmp); |
2249 | else if (tmp != crtl->eh.ehr_stackadj) |
2250 | emit_move_insn (crtl->eh.ehr_stackadj, tmp); |
2251 | #endif |
2252 | |
2253 | tmp = expand_expr (exp: handler_tree, crtl->eh.ehr_handler, |
2254 | VOIDmode, modifier: EXPAND_NORMAL); |
2255 | tmp = convert_memory_address (Pmode, tmp); |
2256 | if (!crtl->eh.ehr_handler) |
2257 | crtl->eh.ehr_handler = copy_addr_to_reg (tmp); |
2258 | else if (tmp != crtl->eh.ehr_handler) |
2259 | emit_move_insn (crtl->eh.ehr_handler, tmp); |
2260 | |
2261 | if (!crtl->eh.ehr_label) |
2262 | crtl->eh.ehr_label = gen_label_rtx (); |
2263 | emit_jump (crtl->eh.ehr_label); |
2264 | } |
2265 | |
2266 | /* Expand __builtin_eh_return. This exit path from the function loads up |
2267 | the eh return data registers, adjusts the stack, and branches to a |
2268 | given PC other than the normal return address. */ |
2269 | |
2270 | void |
2271 | expand_eh_return (void) |
2272 | { |
2273 | rtx_code_label *around_label; |
2274 | |
2275 | if (! crtl->eh.ehr_label) |
2276 | return; |
2277 | |
2278 | crtl->calls_eh_return = 1; |
2279 | |
2280 | #ifdef EH_RETURN_STACKADJ_RTX |
2281 | emit_move_insn (EH_RETURN_STACKADJ_RTX, const0_rtx); |
2282 | #endif |
2283 | |
2284 | around_label = gen_label_rtx (); |
2285 | emit_jump (around_label); |
2286 | |
2287 | emit_label (crtl->eh.ehr_label); |
2288 | clobber_return_register (); |
2289 | |
2290 | #ifdef EH_RETURN_STACKADJ_RTX |
2291 | emit_move_insn (EH_RETURN_STACKADJ_RTX, crtl->eh.ehr_stackadj); |
2292 | #endif |
2293 | |
2294 | if (targetm.have_eh_return ()) |
2295 | emit_insn (targetm.gen_eh_return (crtl->eh.ehr_handler)); |
2296 | else |
2297 | { |
2298 | if (rtx handler = EH_RETURN_HANDLER_RTX) |
2299 | emit_move_insn (handler, crtl->eh.ehr_handler); |
2300 | else |
2301 | error ("%<__builtin_eh_return%> not supported on this target" ); |
2302 | } |
2303 | |
2304 | emit_label (around_label); |
2305 | } |
2306 | |
2307 | /* Convert a ptr_mode address ADDR_TREE to a Pmode address controlled by |
2308 | POINTERS_EXTEND_UNSIGNED and return it. */ |
2309 | |
2310 | rtx |
2311 | expand_builtin_extend_pointer (tree addr_tree) |
2312 | { |
2313 | rtx addr = expand_expr (exp: addr_tree, NULL_RTX, mode: ptr_mode, modifier: EXPAND_NORMAL); |
2314 | int extend; |
2315 | |
2316 | #ifdef POINTERS_EXTEND_UNSIGNED |
2317 | extend = POINTERS_EXTEND_UNSIGNED; |
2318 | #else |
2319 | /* The previous EH code did an unsigned extend by default, so we do this also |
2320 | for consistency. */ |
2321 | extend = 1; |
2322 | #endif |
2323 | |
2324 | return convert_modes (mode: targetm.unwind_word_mode (), oldmode: ptr_mode, x: addr, unsignedp: extend); |
2325 | } |
2326 | |
2327 | static int |
2328 | add_action_record (action_hash_type *ar_hash, int filter, int next) |
2329 | { |
2330 | struct action_record **slot, *new_ar, tmp; |
2331 | |
2332 | tmp.filter = filter; |
2333 | tmp.next = next; |
2334 | slot = ar_hash->find_slot (value: &tmp, insert: INSERT); |
2335 | |
2336 | if ((new_ar = *slot) == NULL) |
2337 | { |
2338 | new_ar = XNEW (struct action_record); |
2339 | new_ar->offset = crtl->eh.action_record_data->length () + 1; |
2340 | new_ar->filter = filter; |
2341 | new_ar->next = next; |
2342 | *slot = new_ar; |
2343 | |
2344 | /* The filter value goes in untouched. The link to the next |
2345 | record is a "self-relative" byte offset, or zero to indicate |
2346 | that there is no next record. So convert the absolute 1 based |
2347 | indices we've been carrying around into a displacement. */ |
2348 | |
2349 | push_sleb128 (&crtl->eh.action_record_data, filter); |
2350 | if (next) |
2351 | next -= crtl->eh.action_record_data->length () + 1; |
2352 | push_sleb128 (&crtl->eh.action_record_data, next); |
2353 | } |
2354 | |
2355 | return new_ar->offset; |
2356 | } |
2357 | |
2358 | static int |
2359 | collect_one_action_chain (action_hash_type *ar_hash, eh_region region) |
2360 | { |
2361 | int next; |
2362 | |
2363 | /* If we've reached the top of the region chain, then we have |
2364 | no actions, and require no landing pad. */ |
2365 | if (region == NULL) |
2366 | return -1; |
2367 | |
2368 | switch (region->type) |
2369 | { |
2370 | case ERT_CLEANUP: |
2371 | { |
2372 | eh_region r; |
2373 | /* A cleanup adds a zero filter to the beginning of the chain, but |
2374 | there are special cases to look out for. If there are *only* |
2375 | cleanups along a path, then it compresses to a zero action. |
2376 | Further, if there are multiple cleanups along a path, we only |
2377 | need to represent one of them, as that is enough to trigger |
2378 | entry to the landing pad at runtime. */ |
2379 | next = collect_one_action_chain (ar_hash, region: region->outer); |
2380 | if (next <= 0) |
2381 | return 0; |
2382 | for (r = region->outer; r ; r = r->outer) |
2383 | if (r->type == ERT_CLEANUP) |
2384 | return next; |
2385 | return add_action_record (ar_hash, filter: 0, next); |
2386 | } |
2387 | |
2388 | case ERT_TRY: |
2389 | { |
2390 | eh_catch c; |
2391 | |
2392 | /* Process the associated catch regions in reverse order. |
2393 | If there's a catch-all handler, then we don't need to |
2394 | search outer regions. Use a magic -3 value to record |
2395 | that we haven't done the outer search. */ |
2396 | next = -3; |
2397 | for (c = region->u.eh_try.last_catch; c ; c = c->prev_catch) |
2398 | { |
2399 | if (c->type_list == NULL) |
2400 | { |
2401 | /* Retrieve the filter from the head of the filter list |
2402 | where we have stored it (see assign_filter_values). */ |
2403 | int filter = TREE_INT_CST_LOW (TREE_VALUE (c->filter_list)); |
2404 | next = add_action_record (ar_hash, filter, next: 0); |
2405 | } |
2406 | else |
2407 | { |
2408 | /* Once the outer search is done, trigger an action record for |
2409 | each filter we have. */ |
2410 | tree flt_node; |
2411 | |
2412 | if (next == -3) |
2413 | { |
2414 | next = collect_one_action_chain (ar_hash, region: region->outer); |
2415 | |
2416 | /* If there is no next action, terminate the chain. */ |
2417 | if (next == -1) |
2418 | next = 0; |
2419 | /* If all outer actions are cleanups or must_not_throw, |
2420 | we'll have no action record for it, since we had wanted |
2421 | to encode these states in the call-site record directly. |
2422 | Add a cleanup action to the chain to catch these. */ |
2423 | else if (next <= 0) |
2424 | next = add_action_record (ar_hash, filter: 0, next: 0); |
2425 | } |
2426 | |
2427 | flt_node = c->filter_list; |
2428 | for (; flt_node; flt_node = TREE_CHAIN (flt_node)) |
2429 | { |
2430 | int filter = TREE_INT_CST_LOW (TREE_VALUE (flt_node)); |
2431 | next = add_action_record (ar_hash, filter, next); |
2432 | } |
2433 | } |
2434 | } |
2435 | return next; |
2436 | } |
2437 | |
2438 | case ERT_ALLOWED_EXCEPTIONS: |
2439 | /* An exception specification adds its filter to the |
2440 | beginning of the chain. */ |
2441 | next = collect_one_action_chain (ar_hash, region: region->outer); |
2442 | |
2443 | /* If there is no next action, terminate the chain. */ |
2444 | if (next == -1) |
2445 | next = 0; |
2446 | /* If all outer actions are cleanups or must_not_throw, |
2447 | we'll have no action record for it, since we had wanted |
2448 | to encode these states in the call-site record directly. |
2449 | Add a cleanup action to the chain to catch these. */ |
2450 | else if (next <= 0) |
2451 | next = add_action_record (ar_hash, filter: 0, next: 0); |
2452 | |
2453 | return add_action_record (ar_hash, filter: region->u.allowed.filter, next); |
2454 | |
2455 | case ERT_MUST_NOT_THROW: |
2456 | /* A must-not-throw region with no inner handlers or cleanups |
2457 | requires no call-site entry. Note that this differs from |
2458 | the no handler or cleanup case in that we do require an lsda |
2459 | to be generated. Return a magic -2 value to record this. */ |
2460 | return -2; |
2461 | } |
2462 | |
2463 | gcc_unreachable (); |
2464 | } |
2465 | |
2466 | static int |
2467 | add_call_site (rtx landing_pad, int action, int section) |
2468 | { |
2469 | call_site_record record; |
2470 | |
2471 | record = ggc_alloc<call_site_record_d> (); |
2472 | record->landing_pad = landing_pad; |
2473 | record->action = action; |
2474 | |
2475 | vec_safe_push (crtl->eh.call_site_record_v[section], obj: record); |
2476 | |
2477 | return call_site_base + crtl->eh.call_site_record_v[section]->length () - 1; |
2478 | } |
2479 | |
2480 | static rtx_note * |
2481 | emit_note_eh_region_end (rtx_insn *insn) |
2482 | { |
2483 | return emit_note_after (NOTE_INSN_EH_REGION_END, insn); |
2484 | } |
2485 | |
2486 | /* Add NOP after NOTE_INSN_SWITCH_TEXT_SECTIONS when the cold section starts |
2487 | with landing pad. |
2488 | With landing pad being at offset 0 from the start label of the section |
2489 | we would miss EH delivery because 0 is special and means no landing pad. */ |
2490 | |
2491 | static bool |
2492 | maybe_add_nop_after_section_switch (void) |
2493 | { |
2494 | if (!crtl->uses_eh_lsda |
2495 | || !crtl->eh.call_site_record_v[1]) |
2496 | return false; |
2497 | int n = vec_safe_length (crtl->eh.call_site_record_v[1]); |
2498 | hash_set<rtx_insn *> visited; |
2499 | |
2500 | for (int i = 0; i < n; ++i) |
2501 | { |
2502 | struct call_site_record_d *cs |
2503 | = (*crtl->eh.call_site_record_v[1])[i]; |
2504 | if (cs->landing_pad) |
2505 | { |
2506 | rtx_insn *insn = as_a <rtx_insn *> (p: cs->landing_pad); |
2507 | while (true) |
2508 | { |
2509 | /* Landing pads have LABEL_PRESERVE_P flag set. This check make |
2510 | sure that we do not walk past landing pad visited earlier |
2511 | which would result in possible quadratic behaviour. */ |
2512 | if (LABEL_P (insn) && LABEL_PRESERVE_P (insn) |
2513 | && visited.add (k: insn)) |
2514 | break; |
2515 | |
2516 | /* Conservatively assume that ASM insn may be empty. We have |
2517 | now way to tell what they contain. */ |
2518 | if (active_insn_p (insn) |
2519 | && GET_CODE (PATTERN (insn)) != ASM_INPUT |
2520 | && GET_CODE (PATTERN (insn)) != ASM_OPERANDS) |
2521 | break; |
2522 | |
2523 | /* If we reached the start of hot section, then NOP will be |
2524 | needed. */ |
2525 | if (GET_CODE (insn) == NOTE |
2526 | && NOTE_KIND (insn) == NOTE_INSN_SWITCH_TEXT_SECTIONS) |
2527 | { |
2528 | emit_insn_after (gen_nop (), insn); |
2529 | break; |
2530 | } |
2531 | |
2532 | /* We visit only labels from cold section. We should never hit |
2533 | begining of the insn stream here. */ |
2534 | insn = PREV_INSN (insn); |
2535 | } |
2536 | } |
2537 | } |
2538 | return false; |
2539 | } |
2540 | |
2541 | /* Turn REG_EH_REGION notes back into NOTE_INSN_EH_REGION notes. |
2542 | The new note numbers will not refer to region numbers, but |
2543 | instead to call site entries. */ |
2544 | |
2545 | static unsigned int |
2546 | convert_to_eh_region_ranges (void) |
2547 | { |
2548 | rtx insn; |
2549 | rtx_insn *iter; |
2550 | rtx_note *note; |
2551 | action_hash_type ar_hash (31); |
2552 | int last_action = -3; |
2553 | rtx_insn *last_action_insn = NULL; |
2554 | rtx last_landing_pad = NULL_RTX; |
2555 | rtx_insn *first_no_action_insn = NULL; |
2556 | int call_site = 0; |
2557 | int cur_sec = 0; |
2558 | rtx_insn *section_switch_note = NULL; |
2559 | rtx_insn *first_no_action_insn_before_switch = NULL; |
2560 | rtx_insn *last_no_action_insn_before_switch = NULL; |
2561 | int saved_call_site_base = call_site_base; |
2562 | |
2563 | vec_alloc (crtl->eh.action_record_data, nelems: 64); |
2564 | |
2565 | for (iter = get_insns (); iter ; iter = NEXT_INSN (insn: iter)) |
2566 | if (INSN_P (iter)) |
2567 | { |
2568 | eh_landing_pad lp; |
2569 | eh_region region; |
2570 | bool nothrow; |
2571 | int this_action; |
2572 | rtx_code_label *this_landing_pad; |
2573 | |
2574 | insn = iter; |
2575 | if (NONJUMP_INSN_P (insn) |
2576 | && GET_CODE (PATTERN (insn)) == SEQUENCE) |
2577 | insn = XVECEXP (PATTERN (insn), 0, 0); |
2578 | |
2579 | nothrow = get_eh_region_and_lp_from_rtx (insn, pr: ®ion, plp: &lp); |
2580 | if (nothrow) |
2581 | continue; |
2582 | if (region) |
2583 | this_action = collect_one_action_chain (ar_hash: &ar_hash, region); |
2584 | else |
2585 | this_action = -1; |
2586 | |
2587 | /* Existence of catch handlers, or must-not-throw regions |
2588 | implies that an lsda is needed (even if empty). */ |
2589 | if (this_action != -1) |
2590 | crtl->uses_eh_lsda = 1; |
2591 | |
2592 | /* Delay creation of region notes for no-action regions |
2593 | until we're sure that an lsda will be required. */ |
2594 | else if (last_action == -3) |
2595 | { |
2596 | first_no_action_insn = iter; |
2597 | last_action = -1; |
2598 | } |
2599 | |
2600 | if (this_action >= 0) |
2601 | this_landing_pad = lp->landing_pad; |
2602 | else |
2603 | this_landing_pad = NULL; |
2604 | |
2605 | /* Differing actions or landing pads implies a change in call-site |
2606 | info, which implies some EH_REGION note should be emitted. */ |
2607 | if (last_action != this_action |
2608 | || last_landing_pad != this_landing_pad) |
2609 | { |
2610 | /* If there is a queued no-action region in the other section |
2611 | with hot/cold partitioning, emit it now. */ |
2612 | if (first_no_action_insn_before_switch) |
2613 | { |
2614 | gcc_assert (this_action != -1 |
2615 | && last_action == (first_no_action_insn |
2616 | ? -1 : -3)); |
2617 | call_site = add_call_site (NULL_RTX, action: 0, section: 0); |
2618 | note = emit_note_before (NOTE_INSN_EH_REGION_BEG, |
2619 | first_no_action_insn_before_switch); |
2620 | NOTE_EH_HANDLER (note) = call_site; |
2621 | note |
2622 | = emit_note_eh_region_end (insn: last_no_action_insn_before_switch); |
2623 | NOTE_EH_HANDLER (note) = call_site; |
2624 | gcc_assert (last_action != -3 |
2625 | || (last_action_insn |
2626 | == last_no_action_insn_before_switch)); |
2627 | first_no_action_insn_before_switch = NULL; |
2628 | last_no_action_insn_before_switch = NULL; |
2629 | call_site_base++; |
2630 | } |
2631 | /* If we'd not seen a previous action (-3) or the previous |
2632 | action was must-not-throw (-2), then we do not need an |
2633 | end note. */ |
2634 | if (last_action >= -1) |
2635 | { |
2636 | /* If we delayed the creation of the begin, do it now. */ |
2637 | if (first_no_action_insn) |
2638 | { |
2639 | call_site = add_call_site (NULL_RTX, action: 0, section: cur_sec); |
2640 | note = emit_note_before (NOTE_INSN_EH_REGION_BEG, |
2641 | first_no_action_insn); |
2642 | NOTE_EH_HANDLER (note) = call_site; |
2643 | first_no_action_insn = NULL; |
2644 | } |
2645 | |
2646 | note = emit_note_eh_region_end (insn: last_action_insn); |
2647 | NOTE_EH_HANDLER (note) = call_site; |
2648 | } |
2649 | |
2650 | /* If the new action is must-not-throw, then no region notes |
2651 | are created. */ |
2652 | if (this_action >= -1) |
2653 | { |
2654 | call_site = add_call_site (landing_pad: this_landing_pad, |
2655 | action: this_action < 0 ? 0 : this_action, |
2656 | section: cur_sec); |
2657 | note = emit_note_before (NOTE_INSN_EH_REGION_BEG, iter); |
2658 | NOTE_EH_HANDLER (note) = call_site; |
2659 | } |
2660 | |
2661 | last_action = this_action; |
2662 | last_landing_pad = this_landing_pad; |
2663 | } |
2664 | last_action_insn = iter; |
2665 | } |
2666 | else if (NOTE_P (iter) |
2667 | && NOTE_KIND (iter) == NOTE_INSN_SWITCH_TEXT_SECTIONS) |
2668 | { |
2669 | gcc_assert (section_switch_note == NULL_RTX); |
2670 | gcc_assert (flag_reorder_blocks_and_partition); |
2671 | section_switch_note = iter; |
2672 | if (first_no_action_insn) |
2673 | { |
2674 | first_no_action_insn_before_switch = first_no_action_insn; |
2675 | last_no_action_insn_before_switch = last_action_insn; |
2676 | first_no_action_insn = NULL; |
2677 | gcc_assert (last_action == -1); |
2678 | last_action = -3; |
2679 | } |
2680 | /* Force closing of current EH region before section switch and |
2681 | opening a new one afterwards. */ |
2682 | else if (last_action != -3) |
2683 | last_landing_pad = pc_rtx; |
2684 | if (crtl->eh.call_site_record_v[cur_sec]) |
2685 | call_site_base += crtl->eh.call_site_record_v[cur_sec]->length (); |
2686 | cur_sec++; |
2687 | gcc_assert (crtl->eh.call_site_record_v[cur_sec] == NULL); |
2688 | vec_alloc (crtl->eh.call_site_record_v[cur_sec], nelems: 10); |
2689 | } |
2690 | |
2691 | if (last_action >= -1 && ! first_no_action_insn) |
2692 | { |
2693 | note = emit_note_eh_region_end (insn: last_action_insn); |
2694 | NOTE_EH_HANDLER (note) = call_site; |
2695 | } |
2696 | |
2697 | call_site_base = saved_call_site_base; |
2698 | |
2699 | return 0; |
2700 | } |
2701 | |
2702 | namespace { |
2703 | |
2704 | const pass_data pass_data_convert_to_eh_region_ranges = |
2705 | { |
2706 | .type: RTL_PASS, /* type */ |
2707 | .name: "eh_ranges" , /* name */ |
2708 | .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */ |
2709 | .tv_id: TV_NONE, /* tv_id */ |
2710 | .properties_required: 0, /* properties_required */ |
2711 | .properties_provided: 0, /* properties_provided */ |
2712 | .properties_destroyed: 0, /* properties_destroyed */ |
2713 | .todo_flags_start: 0, /* todo_flags_start */ |
2714 | .todo_flags_finish: 0, /* todo_flags_finish */ |
2715 | }; |
2716 | |
2717 | class pass_convert_to_eh_region_ranges : public rtl_opt_pass |
2718 | { |
2719 | public: |
2720 | pass_convert_to_eh_region_ranges (gcc::context *ctxt) |
2721 | : rtl_opt_pass (pass_data_convert_to_eh_region_ranges, ctxt) |
2722 | {} |
2723 | |
2724 | /* opt_pass methods: */ |
2725 | bool gate (function *) final override; |
2726 | unsigned int execute (function *) final override |
2727 | { |
2728 | int ret = convert_to_eh_region_ranges (); |
2729 | maybe_add_nop_after_section_switch (); |
2730 | return ret; |
2731 | } |
2732 | |
2733 | }; // class pass_convert_to_eh_region_ranges |
2734 | |
2735 | bool |
2736 | pass_convert_to_eh_region_ranges::gate (function *) |
2737 | { |
2738 | /* Nothing to do for SJLJ exceptions or if no regions created. */ |
2739 | if (cfun->eh->region_tree == NULL) |
2740 | return false; |
2741 | if (targetm_common.except_unwind_info (&global_options) == UI_SJLJ) |
2742 | return false; |
2743 | return true; |
2744 | } |
2745 | |
2746 | } // anon namespace |
2747 | |
2748 | rtl_opt_pass * |
2749 | make_pass_convert_to_eh_region_ranges (gcc::context *ctxt) |
2750 | { |
2751 | return new pass_convert_to_eh_region_ranges (ctxt); |
2752 | } |
2753 | |
2754 | static void |
2755 | push_uleb128 (vec<uchar, va_gc> **data_area, unsigned int value) |
2756 | { |
2757 | do |
2758 | { |
2759 | unsigned char byte = value & 0x7f; |
2760 | value >>= 7; |
2761 | if (value) |
2762 | byte |= 0x80; |
2763 | vec_safe_push (v&: *data_area, obj: byte); |
2764 | } |
2765 | while (value); |
2766 | } |
2767 | |
2768 | static void |
2769 | push_sleb128 (vec<uchar, va_gc> **data_area, int value) |
2770 | { |
2771 | unsigned char byte; |
2772 | int more; |
2773 | |
2774 | do |
2775 | { |
2776 | byte = value & 0x7f; |
2777 | value >>= 7; |
2778 | more = ! ((value == 0 && (byte & 0x40) == 0) |
2779 | || (value == -1 && (byte & 0x40) != 0)); |
2780 | if (more) |
2781 | byte |= 0x80; |
2782 | vec_safe_push (v&: *data_area, obj: byte); |
2783 | } |
2784 | while (more); |
2785 | } |
2786 | |
2787 | |
2788 | static int |
2789 | dw2_size_of_call_site_table (int section) |
2790 | { |
2791 | int n = vec_safe_length (crtl->eh.call_site_record_v[section]); |
2792 | int size = n * (4 + 4 + 4); |
2793 | int i; |
2794 | |
2795 | for (i = 0; i < n; ++i) |
2796 | { |
2797 | struct call_site_record_d *cs = |
2798 | (*crtl->eh.call_site_record_v[section])[i]; |
2799 | size += size_of_uleb128 (cs->action); |
2800 | } |
2801 | |
2802 | return size; |
2803 | } |
2804 | |
2805 | static int |
2806 | sjlj_size_of_call_site_table (void) |
2807 | { |
2808 | int n = vec_safe_length (crtl->eh.call_site_record_v[0]); |
2809 | int size = 0; |
2810 | int i; |
2811 | |
2812 | for (i = 0; i < n; ++i) |
2813 | { |
2814 | struct call_site_record_d *cs = |
2815 | (*crtl->eh.call_site_record_v[0])[i]; |
2816 | size += size_of_uleb128 (INTVAL (cs->landing_pad)); |
2817 | size += size_of_uleb128 (cs->action); |
2818 | } |
2819 | |
2820 | return size; |
2821 | } |
2822 | |
2823 | static void |
2824 | dw2_output_call_site_table (int cs_format, int section) |
2825 | { |
2826 | int n = vec_safe_length (crtl->eh.call_site_record_v[section]); |
2827 | int i; |
2828 | const char *begin; |
2829 | |
2830 | if (section == 0) |
2831 | begin = current_function_func_begin_label; |
2832 | else if (first_function_block_is_cold) |
2833 | begin = crtl->subsections.hot_section_label; |
2834 | else |
2835 | begin = crtl->subsections.cold_section_label; |
2836 | |
2837 | for (i = 0; i < n; ++i) |
2838 | { |
2839 | struct call_site_record_d *cs = (*crtl->eh.call_site_record_v[section])[i]; |
2840 | char reg_start_lab[32]; |
2841 | char reg_end_lab[32]; |
2842 | char landing_pad_lab[32]; |
2843 | |
2844 | ASM_GENERATE_INTERNAL_LABEL (reg_start_lab, "LEHB" , call_site_base + i); |
2845 | ASM_GENERATE_INTERNAL_LABEL (reg_end_lab, "LEHE" , call_site_base + i); |
2846 | |
2847 | if (cs->landing_pad) |
2848 | ASM_GENERATE_INTERNAL_LABEL (landing_pad_lab, "L" , |
2849 | CODE_LABEL_NUMBER (cs->landing_pad)); |
2850 | |
2851 | /* ??? Perhaps use insn length scaling if the assembler supports |
2852 | generic arithmetic. */ |
2853 | /* ??? Perhaps use attr_length to choose data1 or data2 instead of |
2854 | data4 if the function is small enough. */ |
2855 | if (cs_format == DW_EH_PE_uleb128) |
2856 | { |
2857 | dw2_asm_output_delta_uleb128 (reg_start_lab, begin, |
2858 | "region %d start" , i); |
2859 | dw2_asm_output_delta_uleb128 (reg_end_lab, reg_start_lab, |
2860 | "length" ); |
2861 | if (cs->landing_pad) |
2862 | dw2_asm_output_delta_uleb128 (landing_pad_lab, begin, |
2863 | "landing pad" ); |
2864 | else |
2865 | dw2_asm_output_data_uleb128 (0, "landing pad" ); |
2866 | } |
2867 | else |
2868 | { |
2869 | dw2_asm_output_delta (4, reg_start_lab, begin, |
2870 | "region %d start" , i); |
2871 | dw2_asm_output_delta (4, reg_end_lab, reg_start_lab, "length" ); |
2872 | if (cs->landing_pad) |
2873 | dw2_asm_output_delta (4, landing_pad_lab, begin, |
2874 | "landing pad" ); |
2875 | else |
2876 | dw2_asm_output_data (4, 0, "landing pad" ); |
2877 | } |
2878 | dw2_asm_output_data_uleb128 (cs->action, "action" ); |
2879 | } |
2880 | |
2881 | call_site_base += n; |
2882 | } |
2883 | |
2884 | static void |
2885 | sjlj_output_call_site_table (void) |
2886 | { |
2887 | int n = vec_safe_length (crtl->eh.call_site_record_v[0]); |
2888 | int i; |
2889 | |
2890 | for (i = 0; i < n; ++i) |
2891 | { |
2892 | struct call_site_record_d *cs = (*crtl->eh.call_site_record_v[0])[i]; |
2893 | |
2894 | dw2_asm_output_data_uleb128 (INTVAL (cs->landing_pad), |
2895 | "region %d landing pad" , i); |
2896 | dw2_asm_output_data_uleb128 (cs->action, "action" ); |
2897 | } |
2898 | |
2899 | call_site_base += n; |
2900 | } |
2901 | |
2902 | /* Switch to the section that should be used for exception tables. */ |
2903 | |
2904 | static void |
2905 | switch_to_exception_section (const char * ARG_UNUSED (fnname)) |
2906 | { |
2907 | section *s; |
2908 | |
2909 | if (exception_section) |
2910 | s = exception_section; |
2911 | else |
2912 | { |
2913 | int flags; |
2914 | |
2915 | if (EH_TABLES_CAN_BE_READ_ONLY) |
2916 | { |
2917 | int tt_format = |
2918 | ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0, /*global=*/1); |
2919 | flags = ((! flag_pic |
2920 | || ((tt_format & 0x70) != DW_EH_PE_absptr |
2921 | && (tt_format & 0x70) != DW_EH_PE_aligned)) |
2922 | ? 0 : SECTION_WRITE); |
2923 | } |
2924 | else |
2925 | flags = SECTION_WRITE; |
2926 | |
2927 | /* Compute the section and cache it into exception_section, |
2928 | unless it depends on the function name. */ |
2929 | if (targetm_common.have_named_sections) |
2930 | { |
2931 | #ifdef HAVE_LD_EH_GC_SECTIONS |
2932 | if (flag_function_sections |
2933 | || (DECL_COMDAT_GROUP (current_function_decl) && HAVE_COMDAT_GROUP)) |
2934 | { |
2935 | char *section_name = XNEWVEC (char, strlen (fnname) + 32); |
2936 | /* The EH table must match the code section, so only mark |
2937 | it linkonce if we have COMDAT groups to tie them together. */ |
2938 | if (DECL_COMDAT_GROUP (current_function_decl) && HAVE_COMDAT_GROUP) |
2939 | flags |= SECTION_LINKONCE; |
2940 | sprintf (s: section_name, format: ".gcc_except_table.%s" , fnname); |
2941 | s = get_section (section_name, flags, current_function_decl); |
2942 | free (ptr: section_name); |
2943 | } |
2944 | else |
2945 | #endif |
2946 | exception_section |
2947 | = s = get_section (".gcc_except_table" , flags, NULL); |
2948 | } |
2949 | else |
2950 | exception_section |
2951 | = s = flags == SECTION_WRITE ? data_section : readonly_data_section; |
2952 | } |
2953 | |
2954 | switch_to_section (s); |
2955 | } |
2956 | |
2957 | /* Output a reference from an exception table to the type_info object TYPE. |
2958 | TT_FORMAT and TT_FORMAT_SIZE describe the DWARF encoding method used for |
2959 | the value. */ |
2960 | |
2961 | static void |
2962 | output_ttype (tree type, int tt_format, int tt_format_size) |
2963 | { |
2964 | rtx value; |
2965 | bool is_public = true; |
2966 | |
2967 | if (type == NULL_TREE) |
2968 | value = const0_rtx; |
2969 | else |
2970 | { |
2971 | /* FIXME lto. pass_ipa_free_lang_data changes all types to |
2972 | runtime types so TYPE should already be a runtime type |
2973 | reference. When pass_ipa_free_lang data is made a default |
2974 | pass, we can then remove the call to lookup_type_for_runtime |
2975 | below. */ |
2976 | if (TYPE_P (type)) |
2977 | type = lookup_type_for_runtime (type); |
2978 | |
2979 | value = expand_expr (exp: type, NULL_RTX, VOIDmode, modifier: EXPAND_INITIALIZER); |
2980 | |
2981 | /* Let cgraph know that the rtti decl is used. Not all of the |
2982 | paths below go through assemble_integer, which would take |
2983 | care of this for us. */ |
2984 | STRIP_NOPS (type); |
2985 | if (TREE_CODE (type) == ADDR_EXPR) |
2986 | { |
2987 | type = TREE_OPERAND (type, 0); |
2988 | if (VAR_P (type)) |
2989 | is_public = TREE_PUBLIC (type); |
2990 | } |
2991 | else |
2992 | gcc_assert (TREE_CODE (type) == INTEGER_CST); |
2993 | } |
2994 | |
2995 | /* Allow the target to override the type table entry format. */ |
2996 | if (targetm.asm_out.ttype (value)) |
2997 | return; |
2998 | |
2999 | if (tt_format == DW_EH_PE_absptr || tt_format == DW_EH_PE_aligned) |
3000 | assemble_integer (value, tt_format_size, |
3001 | tt_format_size * BITS_PER_UNIT, 1); |
3002 | else |
3003 | dw2_asm_output_encoded_addr_rtx (tt_format, value, is_public, NULL); |
3004 | } |
3005 | |
3006 | /* Output an exception table for the current function according to SECTION. |
3007 | |
3008 | If the function has been partitioned into hot and cold parts, value 0 for |
3009 | SECTION refers to the table associated with the hot part while value 1 |
3010 | refers to the table associated with the cold part. If the function has |
3011 | not been partitioned, value 0 refers to the single exception table. */ |
3012 | |
3013 | static void |
3014 | output_one_function_exception_table (int section) |
3015 | { |
3016 | int tt_format, cs_format, lp_format, i; |
3017 | char ttype_label[32]; |
3018 | char cs_after_size_label[32]; |
3019 | char cs_end_label[32]; |
3020 | int call_site_len; |
3021 | int have_tt_data; |
3022 | int tt_format_size = 0; |
3023 | |
3024 | have_tt_data = (vec_safe_length (cfun->eh->ttype_data) |
3025 | || (targetm.arm_eabi_unwinder |
3026 | ? vec_safe_length (cfun->eh->ehspec_data.arm_eabi) |
3027 | : vec_safe_length (cfun->eh->ehspec_data.other))); |
3028 | |
3029 | /* Indicate the format of the @TType entries. */ |
3030 | if (! have_tt_data) |
3031 | tt_format = DW_EH_PE_omit; |
3032 | else |
3033 | { |
3034 | tt_format = ASM_PREFERRED_EH_DATA_FORMAT (/*code=*/0, /*global=*/1); |
3035 | if (HAVE_AS_LEB128) |
3036 | ASM_GENERATE_INTERNAL_LABEL (ttype_label, |
3037 | section ? "LLSDATTC" : "LLSDATT" , |
3038 | current_function_funcdef_no); |
3039 | |
3040 | tt_format_size = size_of_encoded_value (tt_format); |
3041 | |
3042 | assemble_align (tt_format_size * BITS_PER_UNIT); |
3043 | } |
3044 | |
3045 | targetm.asm_out.internal_label (asm_out_file, section ? "LLSDAC" : "LLSDA" , |
3046 | current_function_funcdef_no); |
3047 | |
3048 | /* The LSDA header. */ |
3049 | |
3050 | /* Indicate the format of the landing pad start pointer. An omitted |
3051 | field implies @LPStart == @Start. */ |
3052 | /* Currently we always put @LPStart == @Start. This field would |
3053 | be most useful in moving the landing pads completely out of |
3054 | line to another section, but it could also be used to minimize |
3055 | the size of uleb128 landing pad offsets. */ |
3056 | lp_format = DW_EH_PE_omit; |
3057 | dw2_asm_output_data (1, lp_format, "@LPStart format (%s)" , |
3058 | eh_data_format_name (lp_format)); |
3059 | |
3060 | /* @LPStart pointer would go here. */ |
3061 | |
3062 | dw2_asm_output_data (1, tt_format, "@TType format (%s)" , |
3063 | eh_data_format_name (tt_format)); |
3064 | |
3065 | if (!HAVE_AS_LEB128) |
3066 | { |
3067 | if (targetm_common.except_unwind_info (&global_options) == UI_SJLJ) |
3068 | call_site_len = sjlj_size_of_call_site_table (); |
3069 | else |
3070 | call_site_len = dw2_size_of_call_site_table (section); |
3071 | } |
3072 | |
3073 | /* A pc-relative 4-byte displacement to the @TType data. */ |
3074 | if (have_tt_data) |
3075 | { |
3076 | if (HAVE_AS_LEB128) |
3077 | { |
3078 | char ttype_after_disp_label[32]; |
3079 | ASM_GENERATE_INTERNAL_LABEL (ttype_after_disp_label, |
3080 | section ? "LLSDATTDC" : "LLSDATTD" , |
3081 | current_function_funcdef_no); |
3082 | dw2_asm_output_delta_uleb128 (ttype_label, ttype_after_disp_label, |
3083 | "@TType base offset" ); |
3084 | ASM_OUTPUT_LABEL (asm_out_file, ttype_after_disp_label); |
3085 | } |
3086 | else |
3087 | { |
3088 | /* Ug. Alignment queers things. */ |
3089 | unsigned int before_disp, after_disp, last_disp, disp; |
3090 | |
3091 | before_disp = 1 + 1; |
3092 | after_disp = (1 + size_of_uleb128 (call_site_len) |
3093 | + call_site_len |
3094 | + vec_safe_length (crtl->eh.action_record_data) |
3095 | + (vec_safe_length (cfun->eh->ttype_data) |
3096 | * tt_format_size)); |
3097 | |
3098 | disp = after_disp; |
3099 | do |
3100 | { |
3101 | unsigned int disp_size, pad; |
3102 | |
3103 | last_disp = disp; |
3104 | disp_size = size_of_uleb128 (disp); |
3105 | pad = before_disp + disp_size + after_disp; |
3106 | if (pad % tt_format_size) |
3107 | pad = tt_format_size - (pad % tt_format_size); |
3108 | else |
3109 | pad = 0; |
3110 | disp = after_disp + pad; |
3111 | } |
3112 | while (disp != last_disp); |
3113 | |
3114 | dw2_asm_output_data_uleb128 (disp, "@TType base offset" ); |
3115 | } |
3116 | } |
3117 | |
3118 | /* Indicate the format of the call-site offsets. */ |
3119 | if (HAVE_AS_LEB128) |
3120 | cs_format = DW_EH_PE_uleb128; |
3121 | else |
3122 | cs_format = DW_EH_PE_udata4; |
3123 | |
3124 | dw2_asm_output_data (1, cs_format, "call-site format (%s)" , |
3125 | eh_data_format_name (cs_format)); |
3126 | |
3127 | if (HAVE_AS_LEB128) |
3128 | { |
3129 | ASM_GENERATE_INTERNAL_LABEL (cs_after_size_label, |
3130 | section ? "LLSDACSBC" : "LLSDACSB" , |
3131 | current_function_funcdef_no); |
3132 | ASM_GENERATE_INTERNAL_LABEL (cs_end_label, |
3133 | section ? "LLSDACSEC" : "LLSDACSE" , |
3134 | current_function_funcdef_no); |
3135 | dw2_asm_output_delta_uleb128 (cs_end_label, cs_after_size_label, |
3136 | "Call-site table length" ); |
3137 | ASM_OUTPUT_LABEL (asm_out_file, cs_after_size_label); |
3138 | if (targetm_common.except_unwind_info (&global_options) == UI_SJLJ) |
3139 | sjlj_output_call_site_table (); |
3140 | else |
3141 | dw2_output_call_site_table (cs_format, section); |
3142 | ASM_OUTPUT_LABEL (asm_out_file, cs_end_label); |
3143 | } |
3144 | else |
3145 | { |
3146 | dw2_asm_output_data_uleb128 (call_site_len, "Call-site table length" ); |
3147 | if (targetm_common.except_unwind_info (&global_options) == UI_SJLJ) |
3148 | sjlj_output_call_site_table (); |
3149 | else |
3150 | dw2_output_call_site_table (cs_format, section); |
3151 | } |
3152 | |
3153 | /* ??? Decode and interpret the data for flag_debug_asm. */ |
3154 | { |
3155 | uchar uc; |
3156 | FOR_EACH_VEC_ELT (*crtl->eh.action_record_data, i, uc) |
3157 | dw2_asm_output_data (1, uc, i ? NULL : "Action record table" ); |
3158 | } |
3159 | |
3160 | if (have_tt_data) |
3161 | assemble_align (tt_format_size * BITS_PER_UNIT); |
3162 | |
3163 | i = vec_safe_length (cfun->eh->ttype_data); |
3164 | while (i-- > 0) |
3165 | { |
3166 | tree type = (*cfun->eh->ttype_data)[i]; |
3167 | output_ttype (type, tt_format, tt_format_size); |
3168 | } |
3169 | |
3170 | if (HAVE_AS_LEB128 && have_tt_data) |
3171 | ASM_OUTPUT_LABEL (asm_out_file, ttype_label); |
3172 | |
3173 | /* ??? Decode and interpret the data for flag_debug_asm. */ |
3174 | if (targetm.arm_eabi_unwinder) |
3175 | { |
3176 | tree type; |
3177 | for (i = 0; |
3178 | vec_safe_iterate (cfun->eh->ehspec_data.arm_eabi, ix: i, ptr: &type); ++i) |
3179 | output_ttype (type, tt_format, tt_format_size); |
3180 | } |
3181 | else |
3182 | { |
3183 | uchar uc; |
3184 | for (i = 0; |
3185 | vec_safe_iterate (cfun->eh->ehspec_data.other, ix: i, ptr: &uc); ++i) |
3186 | dw2_asm_output_data (1, uc, |
3187 | i ? NULL : "Exception specification table" ); |
3188 | } |
3189 | } |
3190 | |
3191 | /* Output an exception table for the current function according to SECTION, |
3192 | switching back and forth from the function section appropriately. |
3193 | |
3194 | If the function has been partitioned into hot and cold parts, value 0 for |
3195 | SECTION refers to the table associated with the hot part while value 1 |
3196 | refers to the table associated with the cold part. If the function has |
3197 | not been partitioned, value 0 refers to the single exception table. */ |
3198 | |
3199 | void |
3200 | output_function_exception_table (int section) |
3201 | { |
3202 | const char *fnname = get_fnname_from_decl (current_function_decl); |
3203 | rtx personality = get_personality_function (current_function_decl); |
3204 | |
3205 | /* Not all functions need anything. */ |
3206 | if (!crtl->uses_eh_lsda |
3207 | || targetm_common.except_unwind_info (&global_options) == UI_NONE) |
3208 | return; |
3209 | |
3210 | /* No need to emit any boilerplate stuff for the cold part. */ |
3211 | if (section == 1 && !crtl->eh.call_site_record_v[1]) |
3212 | return; |
3213 | |
3214 | if (personality) |
3215 | { |
3216 | assemble_external_libcall (personality); |
3217 | |
3218 | if (targetm.asm_out.emit_except_personality) |
3219 | targetm.asm_out.emit_except_personality (personality); |
3220 | } |
3221 | |
3222 | switch_to_exception_section (fnname); |
3223 | |
3224 | /* If the target wants a label to begin the table, emit it here. */ |
3225 | targetm.asm_out.emit_except_table_label (asm_out_file); |
3226 | |
3227 | /* Do the real work. */ |
3228 | output_one_function_exception_table (section); |
3229 | |
3230 | switch_to_section (current_function_section ()); |
3231 | } |
3232 | |
3233 | void |
3234 | set_eh_throw_stmt_table (function *fun, hash_map<gimple *, int> *table) |
3235 | { |
3236 | fun->eh->throw_stmt_table = table; |
3237 | } |
3238 | |
3239 | hash_map<gimple *, int> * |
3240 | get_eh_throw_stmt_table (struct function *fun) |
3241 | { |
3242 | return fun->eh->throw_stmt_table; |
3243 | } |
3244 | |
3245 | /* Determine if the function needs an EH personality function. */ |
3246 | |
3247 | enum eh_personality_kind |
3248 | function_needs_eh_personality (struct function *fn) |
3249 | { |
3250 | enum eh_personality_kind kind = eh_personality_none; |
3251 | eh_region i; |
3252 | |
3253 | FOR_ALL_EH_REGION_FN (i, fn) |
3254 | { |
3255 | switch (i->type) |
3256 | { |
3257 | case ERT_CLEANUP: |
3258 | /* Can do with any personality including the generic C one. */ |
3259 | kind = eh_personality_any; |
3260 | break; |
3261 | |
3262 | case ERT_TRY: |
3263 | case ERT_ALLOWED_EXCEPTIONS: |
3264 | /* Always needs a EH personality function. The generic C |
3265 | personality doesn't handle these even for empty type lists. */ |
3266 | return eh_personality_lang; |
3267 | |
3268 | case ERT_MUST_NOT_THROW: |
3269 | /* Always needs a EH personality function. The language may specify |
3270 | what abort routine that must be used, e.g. std::terminate. */ |
3271 | return eh_personality_lang; |
3272 | } |
3273 | } |
3274 | |
3275 | return kind; |
3276 | } |
3277 | |
3278 | /* Dump EH information to OUT. */ |
3279 | |
3280 | void |
3281 | dump_eh_tree (FILE * out, struct function *fun) |
3282 | { |
3283 | eh_region i; |
3284 | int depth = 0; |
3285 | static const char *const type_name[] = { |
3286 | "cleanup" , "try" , "allowed_exceptions" , "must_not_throw" |
3287 | }; |
3288 | |
3289 | i = fun->eh->region_tree; |
3290 | if (!i) |
3291 | return; |
3292 | |
3293 | fprintf (stream: out, format: "Eh tree:\n" ); |
3294 | while (1) |
3295 | { |
3296 | fprintf (stream: out, format: " %*s %i %s" , depth * 2, "" , |
3297 | i->index, type_name[(int) i->type]); |
3298 | |
3299 | if (i->landing_pads) |
3300 | { |
3301 | eh_landing_pad lp; |
3302 | |
3303 | fprintf (stream: out, format: " land:" ); |
3304 | if (current_ir_type () == IR_GIMPLE) |
3305 | { |
3306 | for (lp = i->landing_pads; lp ; lp = lp->next_lp) |
3307 | { |
3308 | fprintf (stream: out, format: "{%i," , lp->index); |
3309 | print_generic_expr (out, lp->post_landing_pad); |
3310 | fputc (c: '}', stream: out); |
3311 | if (lp->next_lp) |
3312 | fputc (c: ',', stream: out); |
3313 | } |
3314 | } |
3315 | else |
3316 | { |
3317 | for (lp = i->landing_pads; lp ; lp = lp->next_lp) |
3318 | { |
3319 | fprintf (stream: out, format: "{%i," , lp->index); |
3320 | if (lp->landing_pad) |
3321 | fprintf (stream: out, format: "%i%s," , INSN_UID (insn: lp->landing_pad), |
3322 | NOTE_P (lp->landing_pad) ? "(del)" : "" ); |
3323 | else |
3324 | fprintf (stream: out, format: "(nil)," ); |
3325 | if (lp->post_landing_pad) |
3326 | { |
3327 | rtx_insn *lab = label_rtx (lp->post_landing_pad); |
3328 | fprintf (stream: out, format: "%i%s}" , INSN_UID (insn: lab), |
3329 | NOTE_P (lab) ? "(del)" : "" ); |
3330 | } |
3331 | else |
3332 | fprintf (stream: out, format: "(nil)}" ); |
3333 | if (lp->next_lp) |
3334 | fputc (c: ',', stream: out); |
3335 | } |
3336 | } |
3337 | } |
3338 | |
3339 | switch (i->type) |
3340 | { |
3341 | case ERT_CLEANUP: |
3342 | case ERT_MUST_NOT_THROW: |
3343 | break; |
3344 | |
3345 | case ERT_TRY: |
3346 | { |
3347 | eh_catch c; |
3348 | fprintf (stream: out, format: " catch:" ); |
3349 | for (c = i->u.eh_try.first_catch; c; c = c->next_catch) |
3350 | { |
3351 | fputc (c: '{', stream: out); |
3352 | if (c->label) |
3353 | { |
3354 | fprintf (stream: out, format: "lab:" ); |
3355 | print_generic_expr (out, c->label); |
3356 | fputc (c: ';', stream: out); |
3357 | } |
3358 | print_generic_expr (out, c->type_list); |
3359 | fputc (c: '}', stream: out); |
3360 | if (c->next_catch) |
3361 | fputc (c: ',', stream: out); |
3362 | } |
3363 | } |
3364 | break; |
3365 | |
3366 | case ERT_ALLOWED_EXCEPTIONS: |
3367 | fprintf (stream: out, format: " filter :%i types:" , i->u.allowed.filter); |
3368 | print_generic_expr (out, i->u.allowed.type_list); |
3369 | break; |
3370 | } |
3371 | fputc (c: '\n', stream: out); |
3372 | |
3373 | /* If there are sub-regions, process them. */ |
3374 | if (i->inner) |
3375 | i = i->inner, depth++; |
3376 | /* If there are peers, process them. */ |
3377 | else if (i->next_peer) |
3378 | i = i->next_peer; |
3379 | /* Otherwise, step back up the tree to the next peer. */ |
3380 | else |
3381 | { |
3382 | do |
3383 | { |
3384 | i = i->outer; |
3385 | depth--; |
3386 | if (i == NULL) |
3387 | return; |
3388 | } |
3389 | while (i->next_peer == NULL); |
3390 | i = i->next_peer; |
3391 | } |
3392 | } |
3393 | } |
3394 | |
3395 | /* Dump the EH tree for FN on stderr. */ |
3396 | |
3397 | DEBUG_FUNCTION void |
3398 | debug_eh_tree (struct function *fn) |
3399 | { |
3400 | dump_eh_tree (stderr, fun: fn); |
3401 | } |
3402 | |
3403 | /* Verify invariants on EH datastructures. */ |
3404 | |
3405 | DEBUG_FUNCTION void |
3406 | verify_eh_tree (struct function *fun) |
3407 | { |
3408 | eh_region r, outer; |
3409 | int nvisited_lp, nvisited_r; |
3410 | int count_lp, count_r, depth, i; |
3411 | eh_landing_pad lp; |
3412 | bool err = false; |
3413 | |
3414 | if (!fun->eh->region_tree) |
3415 | return; |
3416 | |
3417 | count_r = 0; |
3418 | for (i = 1; vec_safe_iterate (v: fun->eh->region_array, ix: i, ptr: &r); ++i) |
3419 | if (r) |
3420 | { |
3421 | if (r->index == i) |
3422 | count_r++; |
3423 | else |
3424 | { |
3425 | error ("%<region_array%> is corrupted for region %i" , r->index); |
3426 | err = true; |
3427 | } |
3428 | } |
3429 | |
3430 | count_lp = 0; |
3431 | for (i = 1; vec_safe_iterate (v: fun->eh->lp_array, ix: i, ptr: &lp); ++i) |
3432 | if (lp) |
3433 | { |
3434 | if (lp->index == i) |
3435 | count_lp++; |
3436 | else |
3437 | { |
3438 | error ("%<lp_array%> is corrupted for lp %i" , lp->index); |
3439 | err = true; |
3440 | } |
3441 | } |
3442 | |
3443 | depth = nvisited_lp = nvisited_r = 0; |
3444 | outer = NULL; |
3445 | r = fun->eh->region_tree; |
3446 | while (1) |
3447 | { |
3448 | if ((*fun->eh->region_array)[r->index] != r) |
3449 | { |
3450 | error ("%<region_array%> is corrupted for region %i" , r->index); |
3451 | err = true; |
3452 | } |
3453 | if (r->outer != outer) |
3454 | { |
3455 | error ("outer block of region %i is wrong" , r->index); |
3456 | err = true; |
3457 | } |
3458 | if (depth < 0) |
3459 | { |
3460 | error ("negative nesting depth of region %i" , r->index); |
3461 | err = true; |
3462 | } |
3463 | nvisited_r++; |
3464 | |
3465 | for (lp = r->landing_pads; lp ; lp = lp->next_lp) |
3466 | { |
3467 | if ((*fun->eh->lp_array)[lp->index] != lp) |
3468 | { |
3469 | error ("%<lp_array%> is corrupted for lp %i" , lp->index); |
3470 | err = true; |
3471 | } |
3472 | if (lp->region != r) |
3473 | { |
3474 | error ("region of lp %i is wrong" , lp->index); |
3475 | err = true; |
3476 | } |
3477 | nvisited_lp++; |
3478 | } |
3479 | |
3480 | if (r->inner) |
3481 | outer = r, r = r->inner, depth++; |
3482 | else if (r->next_peer) |
3483 | r = r->next_peer; |
3484 | else |
3485 | { |
3486 | do |
3487 | { |
3488 | r = r->outer; |
3489 | if (r == NULL) |
3490 | goto region_done; |
3491 | depth--; |
3492 | outer = r->outer; |
3493 | } |
3494 | while (r->next_peer == NULL); |
3495 | r = r->next_peer; |
3496 | } |
3497 | } |
3498 | region_done: |
3499 | if (depth != 0) |
3500 | { |
3501 | error ("tree list ends on depth %i" , depth); |
3502 | err = true; |
3503 | } |
3504 | if (count_r != nvisited_r) |
3505 | { |
3506 | error ("%<region_array%> does not match %<region_tree%>" ); |
3507 | err = true; |
3508 | } |
3509 | if (count_lp != nvisited_lp) |
3510 | { |
3511 | error ("%<lp_array%> does not match %<region_tree%>" ); |
3512 | err = true; |
3513 | } |
3514 | |
3515 | if (err) |
3516 | { |
3517 | dump_eh_tree (stderr, fun); |
3518 | internal_error ("%qs failed" , __func__); |
3519 | } |
3520 | } |
3521 | |
3522 | #include "gt-except.h" |
3523 | |