1 | /* Search for references that a functions loads or stores. |
2 | Copyright (C) 2020-2023 Free Software Foundation, Inc. |
3 | Contributed by David Cepelik and Jan Hubicka |
4 | |
5 | This file is part of GCC. |
6 | |
7 | GCC is free software; you can redistribute it and/or modify it under |
8 | the terms of the GNU General Public License as published by the Free |
9 | Software Foundation; either version 3, or (at your option) any later |
10 | version. |
11 | |
12 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
13 | WARRANTY; without even the implied warranty of MERCHANTABILITY or |
14 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
15 | for more details. |
16 | |
17 | You should have received a copy of the GNU General Public License |
18 | along with GCC; see the file COPYING3. If not see |
19 | <http://www.gnu.org/licenses/>. */ |
20 | |
21 | /* Mod/ref pass records summary about loads and stores performed by the |
22 | function. This is later used by alias analysis to disambiguate memory |
23 | accesses across function calls. |
24 | |
25 | This file contains a tree pass and an IPA pass. Both performs the same |
26 | analysis however tree pass is executed during early and late optimization |
27 | passes to propagate info downwards in the compilation order. IPA pass |
28 | propagates across the callgraph and is able to handle recursion and works on |
29 | whole program during link-time analysis. |
30 | |
31 | LTO mode differs from the local mode by not recording alias sets but types |
32 | that are translated to alias sets later. This is necessary in order stream |
33 | the information because the alias sets are rebuild at stream-in time and may |
34 | not correspond to ones seen during analysis. For this reason part of |
35 | analysis is duplicated. |
36 | |
37 | The following information is computed |
38 | 1) load/store access tree described in ipa-modref-tree.h |
39 | This is used by tree-ssa-alias to disambiguate load/stores |
40 | 2) EAF flags used by points-to analysis (in tree-ssa-structalias). |
41 | and defined in tree-core.h. |
42 | and stored to optimization_summaries. |
43 | |
44 | There are multiple summaries computed and used during the propagation: |
45 | - summaries holds summaries from analysis to IPA propagation |
46 | time. |
47 | - summaries_lto is same as summaries but holds them in a format |
48 | that can be streamed (as described above). |
49 | - fnspec_summary holds fnspec strings for call. This is |
50 | necessary because gimple_call_fnspec performs additional |
51 | analysis except for looking callee fndecl. |
52 | - escape_summary holds escape points for given call edge. |
53 | That is a vector recording what function parameters |
54 | may escape to a function call (and with what parameter index). */ |
55 | |
56 | #include "config.h" |
57 | #include "system.h" |
58 | #include "coretypes.h" |
59 | #include "backend.h" |
60 | #include "tree.h" |
61 | #include "gimple.h" |
62 | #include "alloc-pool.h" |
63 | #include "tree-pass.h" |
64 | #include "gimple-iterator.h" |
65 | #include "tree-dfa.h" |
66 | #include "cgraph.h" |
67 | #include "ipa-utils.h" |
68 | #include "symbol-summary.h" |
69 | #include "gimple-pretty-print.h" |
70 | #include "gimple-walk.h" |
71 | #include "print-tree.h" |
72 | #include "tree-streamer.h" |
73 | #include "alias.h" |
74 | #include "calls.h" |
75 | #include "ipa-modref-tree.h" |
76 | #include "ipa-modref.h" |
77 | #include "value-range.h" |
78 | #include "ipa-prop.h" |
79 | #include "ipa-fnsummary.h" |
80 | #include "attr-fnspec.h" |
81 | #include "symtab-clones.h" |
82 | #include "gimple-ssa.h" |
83 | #include "tree-phinodes.h" |
84 | #include "tree-ssa-operands.h" |
85 | #include "ssa-iterators.h" |
86 | #include "stringpool.h" |
87 | #include "tree-ssanames.h" |
88 | #include "attribs.h" |
89 | #include "tree-cfg.h" |
90 | #include "tree-eh.h" |
91 | |
92 | |
93 | namespace { |
94 | |
95 | /* We record fnspec specifiers for call edges since they depends on actual |
96 | gimple statements. */ |
97 | |
98 | class fnspec_summary |
99 | { |
100 | public: |
101 | char *fnspec; |
102 | |
103 | fnspec_summary () |
104 | : fnspec (NULL) |
105 | { |
106 | } |
107 | |
108 | ~fnspec_summary () |
109 | { |
110 | free (ptr: fnspec); |
111 | } |
112 | }; |
113 | |
114 | /* Summary holding fnspec string for a given call. */ |
115 | |
116 | class fnspec_summaries_t : public call_summary <fnspec_summary *> |
117 | { |
118 | public: |
119 | fnspec_summaries_t (symbol_table *symtab) |
120 | : call_summary <fnspec_summary *> (symtab) {} |
121 | /* Hook that is called by summary when an edge is duplicated. */ |
122 | void duplicate (cgraph_edge *, |
123 | cgraph_edge *, |
124 | fnspec_summary *src, |
125 | fnspec_summary *dst) final override |
126 | { |
127 | dst->fnspec = xstrdup (src->fnspec); |
128 | } |
129 | }; |
130 | |
131 | static fnspec_summaries_t *fnspec_summaries = NULL; |
132 | |
133 | /* Escape summary holds a vector of param indexes that escape to |
134 | a given call. */ |
135 | struct escape_entry |
136 | { |
137 | /* Parameter that escapes at a given call. */ |
138 | int parm_index; |
139 | /* Argument it escapes to. */ |
140 | unsigned int arg; |
141 | /* Minimal flags known about the argument. */ |
142 | eaf_flags_t min_flags; |
143 | /* Does it escape directly or indirectly? */ |
144 | bool direct; |
145 | }; |
146 | |
147 | /* Dump EAF flags. */ |
148 | |
149 | static void |
150 | dump_eaf_flags (FILE *out, int flags, bool newline = true) |
151 | { |
152 | if (flags & EAF_UNUSED) |
153 | fprintf (stream: out, format: " unused" ); |
154 | if (flags & EAF_NO_DIRECT_CLOBBER) |
155 | fprintf (stream: out, format: " no_direct_clobber" ); |
156 | if (flags & EAF_NO_INDIRECT_CLOBBER) |
157 | fprintf (stream: out, format: " no_indirect_clobber" ); |
158 | if (flags & EAF_NO_DIRECT_ESCAPE) |
159 | fprintf (stream: out, format: " no_direct_escape" ); |
160 | if (flags & EAF_NO_INDIRECT_ESCAPE) |
161 | fprintf (stream: out, format: " no_indirect_escape" ); |
162 | if (flags & EAF_NOT_RETURNED_DIRECTLY) |
163 | fprintf (stream: out, format: " not_returned_directly" ); |
164 | if (flags & EAF_NOT_RETURNED_INDIRECTLY) |
165 | fprintf (stream: out, format: " not_returned_indirectly" ); |
166 | if (flags & EAF_NO_DIRECT_READ) |
167 | fprintf (stream: out, format: " no_direct_read" ); |
168 | if (flags & EAF_NO_INDIRECT_READ) |
169 | fprintf (stream: out, format: " no_indirect_read" ); |
170 | if (newline) |
171 | fprintf (stream: out, format: "\n" ); |
172 | } |
173 | |
174 | struct escape_summary |
175 | { |
176 | auto_vec <escape_entry> esc; |
177 | void dump (FILE *out) |
178 | { |
179 | for (unsigned int i = 0; i < esc.length (); i++) |
180 | { |
181 | fprintf (stream: out, format: " parm %i arg %i %s min:" , |
182 | esc[i].parm_index, |
183 | esc[i].arg, |
184 | esc[i].direct ? "(direct)" : "(indirect)" ); |
185 | dump_eaf_flags (out, flags: esc[i].min_flags, newline: false); |
186 | } |
187 | fprintf (stream: out, format: "\n" ); |
188 | } |
189 | }; |
190 | |
191 | class escape_summaries_t : public call_summary <escape_summary *> |
192 | { |
193 | public: |
194 | escape_summaries_t (symbol_table *symtab) |
195 | : call_summary <escape_summary *> (symtab) {} |
196 | /* Hook that is called by summary when an edge is duplicated. */ |
197 | void duplicate (cgraph_edge *, |
198 | cgraph_edge *, |
199 | escape_summary *src, |
200 | escape_summary *dst) final override |
201 | { |
202 | dst->esc = src->esc.copy (); |
203 | } |
204 | }; |
205 | |
206 | static escape_summaries_t *escape_summaries = NULL; |
207 | |
208 | } /* ANON namespace: GTY annotated summaries can not be anonymous. */ |
209 | |
210 | |
211 | /* Class (from which there is one global instance) that holds modref summaries |
212 | for all analyzed functions. */ |
213 | |
214 | class GTY((user)) modref_summaries |
215 | : public fast_function_summary <modref_summary *, va_gc> |
216 | { |
217 | public: |
218 | modref_summaries (symbol_table *symtab) |
219 | : fast_function_summary <modref_summary *, va_gc> (symtab) {} |
220 | void insert (cgraph_node *, modref_summary *state) final override; |
221 | void duplicate (cgraph_node *src_node, |
222 | cgraph_node *dst_node, |
223 | modref_summary *src_data, |
224 | modref_summary *dst_data) final override; |
225 | static modref_summaries *create_ggc (symbol_table *symtab) |
226 | { |
227 | return new (ggc_alloc_no_dtor<modref_summaries> ()) |
228 | modref_summaries (symtab); |
229 | } |
230 | }; |
231 | |
232 | class modref_summary_lto; |
233 | |
234 | /* Class (from which there is one global instance) that holds modref summaries |
235 | for all analyzed functions. */ |
236 | |
237 | class GTY((user)) modref_summaries_lto |
238 | : public fast_function_summary <modref_summary_lto *, va_gc> |
239 | { |
240 | public: |
241 | modref_summaries_lto (symbol_table *symtab) |
242 | : fast_function_summary <modref_summary_lto *, va_gc> (symtab), |
243 | propagated (false) {} |
244 | void insert (cgraph_node *, modref_summary_lto *state) final override; |
245 | void duplicate (cgraph_node *src_node, |
246 | cgraph_node *dst_node, |
247 | modref_summary_lto *src_data, |
248 | modref_summary_lto *dst_data) final override; |
249 | static modref_summaries_lto *create_ggc (symbol_table *symtab) |
250 | { |
251 | return new (ggc_alloc_no_dtor<modref_summaries_lto> ()) |
252 | modref_summaries_lto (symtab); |
253 | } |
254 | bool propagated; |
255 | }; |
256 | |
257 | /* Global variable holding all modref summaries |
258 | (from analysis to IPA propagation time). */ |
259 | |
260 | static GTY(()) fast_function_summary <modref_summary *, va_gc> |
261 | *summaries; |
262 | |
263 | /* Global variable holding all modref optimization summaries |
264 | (from IPA propagation time or used by local optimization pass). */ |
265 | |
266 | static GTY(()) fast_function_summary <modref_summary *, va_gc> |
267 | *optimization_summaries; |
268 | |
269 | /* LTO summaries hold info from analysis to LTO streaming or from LTO |
270 | stream-in through propagation to LTO stream-out. */ |
271 | |
272 | static GTY(()) fast_function_summary <modref_summary_lto *, va_gc> |
273 | *summaries_lto; |
274 | |
275 | /* Summary for a single function which this pass produces. */ |
276 | |
277 | modref_summary::modref_summary () |
278 | : loads (NULL), stores (NULL), retslot_flags (0), static_chain_flags (0), |
279 | writes_errno (false), side_effects (false), nondeterministic (false), |
280 | calls_interposable (false), global_memory_read (false), |
281 | global_memory_written (false), try_dse (false) |
282 | { |
283 | } |
284 | |
285 | modref_summary::~modref_summary () |
286 | { |
287 | if (loads) |
288 | ggc_delete (ptr: loads); |
289 | if (stores) |
290 | ggc_delete (ptr: stores); |
291 | } |
292 | |
293 | /* Remove all flags from EAF_FLAGS that are implied by ECF_FLAGS and not |
294 | useful to track. If returns_void is true moreover clear |
295 | EAF_NOT_RETURNED. */ |
296 | static int |
297 | remove_useless_eaf_flags (int eaf_flags, int ecf_flags, bool returns_void) |
298 | { |
299 | if (ecf_flags & (ECF_CONST | ECF_NOVOPS)) |
300 | eaf_flags &= ~implicit_const_eaf_flags; |
301 | else if (ecf_flags & ECF_PURE) |
302 | eaf_flags &= ~implicit_pure_eaf_flags; |
303 | else if ((ecf_flags & ECF_NORETURN) || returns_void) |
304 | eaf_flags &= ~(EAF_NOT_RETURNED_DIRECTLY | EAF_NOT_RETURNED_INDIRECTLY); |
305 | return eaf_flags; |
306 | } |
307 | |
308 | /* Return true if FLAGS holds some useful information. */ |
309 | |
310 | static bool |
311 | eaf_flags_useful_p (vec <eaf_flags_t> &flags, int ecf_flags) |
312 | { |
313 | for (unsigned i = 0; i < flags.length (); i++) |
314 | if (remove_useless_eaf_flags (eaf_flags: flags[i], ecf_flags, returns_void: false)) |
315 | return true; |
316 | return false; |
317 | } |
318 | |
319 | /* Return true if summary is potentially useful for optimization. |
320 | If CHECK_FLAGS is false assume that arg_flags are useful. */ |
321 | |
322 | bool |
323 | modref_summary::useful_p (int ecf_flags, bool check_flags) |
324 | { |
325 | if (arg_flags.length () && !check_flags) |
326 | return true; |
327 | if (check_flags && eaf_flags_useful_p (flags&: arg_flags, ecf_flags)) |
328 | return true; |
329 | arg_flags.release (); |
330 | if (check_flags && remove_useless_eaf_flags (eaf_flags: retslot_flags, ecf_flags, returns_void: false)) |
331 | return true; |
332 | if (check_flags |
333 | && remove_useless_eaf_flags (eaf_flags: static_chain_flags, ecf_flags, returns_void: false)) |
334 | return true; |
335 | if (ecf_flags & (ECF_CONST | ECF_NOVOPS)) |
336 | return ((!side_effects || !nondeterministic) |
337 | && (ecf_flags & ECF_LOOPING_CONST_OR_PURE)); |
338 | if (loads && !loads->every_base) |
339 | return true; |
340 | else |
341 | kills.release (); |
342 | if (ecf_flags & ECF_PURE) |
343 | return ((!side_effects || !nondeterministic) |
344 | && (ecf_flags & ECF_LOOPING_CONST_OR_PURE)); |
345 | return stores && !stores->every_base; |
346 | } |
347 | |
348 | /* Single function summary used for LTO. */ |
349 | |
350 | typedef modref_tree <tree> modref_records_lto; |
351 | struct GTY(()) modref_summary_lto |
352 | { |
353 | /* Load and stores in functions using types rather then alias sets. |
354 | |
355 | This is necessary to make the information streamable for LTO but is also |
356 | more verbose and thus more likely to hit the limits. */ |
357 | modref_records_lto *loads; |
358 | modref_records_lto *stores; |
359 | auto_vec<modref_access_node> GTY((skip)) kills; |
360 | auto_vec<eaf_flags_t> GTY((skip)) arg_flags; |
361 | eaf_flags_t retslot_flags; |
362 | eaf_flags_t static_chain_flags; |
363 | unsigned writes_errno : 1; |
364 | unsigned side_effects : 1; |
365 | unsigned nondeterministic : 1; |
366 | unsigned calls_interposable : 1; |
367 | |
368 | modref_summary_lto (); |
369 | ~modref_summary_lto (); |
370 | void dump (FILE *); |
371 | bool useful_p (int ecf_flags, bool check_flags = true); |
372 | }; |
373 | |
374 | /* Summary for a single function which this pass produces. */ |
375 | |
376 | modref_summary_lto::modref_summary_lto () |
377 | : loads (NULL), stores (NULL), retslot_flags (0), static_chain_flags (0), |
378 | writes_errno (false), side_effects (false), nondeterministic (false), |
379 | calls_interposable (false) |
380 | { |
381 | } |
382 | |
383 | modref_summary_lto::~modref_summary_lto () |
384 | { |
385 | if (loads) |
386 | ggc_delete (ptr: loads); |
387 | if (stores) |
388 | ggc_delete (ptr: stores); |
389 | } |
390 | |
391 | |
392 | /* Return true if lto summary is potentially useful for optimization. |
393 | If CHECK_FLAGS is false assume that arg_flags are useful. */ |
394 | |
395 | bool |
396 | modref_summary_lto::useful_p (int ecf_flags, bool check_flags) |
397 | { |
398 | if (arg_flags.length () && !check_flags) |
399 | return true; |
400 | if (check_flags && eaf_flags_useful_p (flags&: arg_flags, ecf_flags)) |
401 | return true; |
402 | arg_flags.release (); |
403 | if (check_flags && remove_useless_eaf_flags (eaf_flags: retslot_flags, ecf_flags, returns_void: false)) |
404 | return true; |
405 | if (check_flags |
406 | && remove_useless_eaf_flags (eaf_flags: static_chain_flags, ecf_flags, returns_void: false)) |
407 | return true; |
408 | if (ecf_flags & (ECF_CONST | ECF_NOVOPS)) |
409 | return ((!side_effects || !nondeterministic) |
410 | && (ecf_flags & ECF_LOOPING_CONST_OR_PURE)); |
411 | if (loads && !loads->every_base) |
412 | return true; |
413 | else |
414 | kills.release (); |
415 | if (ecf_flags & ECF_PURE) |
416 | return ((!side_effects || !nondeterministic) |
417 | && (ecf_flags & ECF_LOOPING_CONST_OR_PURE)); |
418 | return stores && !stores->every_base; |
419 | } |
420 | |
421 | /* Dump records TT to OUT. */ |
422 | |
423 | static void |
424 | dump_records (modref_records *tt, FILE *out) |
425 | { |
426 | if (tt->every_base) |
427 | { |
428 | fprintf (stream: out, format: " Every base\n" ); |
429 | return; |
430 | } |
431 | size_t i; |
432 | modref_base_node <alias_set_type> *n; |
433 | FOR_EACH_VEC_SAFE_ELT (tt->bases, i, n) |
434 | { |
435 | fprintf (stream: out, format: " Base %i: alias set %i\n" , (int)i, n->base); |
436 | if (n->every_ref) |
437 | { |
438 | fprintf (stream: out, format: " Every ref\n" ); |
439 | continue; |
440 | } |
441 | size_t j; |
442 | modref_ref_node <alias_set_type> *r; |
443 | FOR_EACH_VEC_SAFE_ELT (n->refs, j, r) |
444 | { |
445 | fprintf (stream: out, format: " Ref %i: alias set %i\n" , (int)j, r->ref); |
446 | if (r->every_access) |
447 | { |
448 | fprintf (stream: out, format: " Every access\n" ); |
449 | continue; |
450 | } |
451 | size_t k; |
452 | modref_access_node *a; |
453 | FOR_EACH_VEC_SAFE_ELT (r->accesses, k, a) |
454 | { |
455 | fprintf (stream: out, format: " access:" ); |
456 | a->dump (out); |
457 | } |
458 | } |
459 | } |
460 | } |
461 | |
462 | /* Dump records TT to OUT. */ |
463 | |
464 | static void |
465 | dump_lto_records (modref_records_lto *tt, FILE *out) |
466 | { |
467 | if (tt->every_base) |
468 | { |
469 | fprintf (stream: out, format: " Every base\n" ); |
470 | return; |
471 | } |
472 | size_t i; |
473 | modref_base_node <tree> *n; |
474 | FOR_EACH_VEC_SAFE_ELT (tt->bases, i, n) |
475 | { |
476 | fprintf (stream: out, format: " Base %i:" , (int)i); |
477 | print_generic_expr (out, n->base); |
478 | fprintf (stream: out, format: " (alias set %i)\n" , |
479 | n->base ? get_alias_set (n->base) : 0); |
480 | if (n->every_ref) |
481 | { |
482 | fprintf (stream: out, format: " Every ref\n" ); |
483 | continue; |
484 | } |
485 | size_t j; |
486 | modref_ref_node <tree> *r; |
487 | FOR_EACH_VEC_SAFE_ELT (n->refs, j, r) |
488 | { |
489 | fprintf (stream: out, format: " Ref %i:" , (int)j); |
490 | print_generic_expr (out, r->ref); |
491 | fprintf (stream: out, format: " (alias set %i)\n" , |
492 | r->ref ? get_alias_set (r->ref) : 0); |
493 | if (r->every_access) |
494 | { |
495 | fprintf (stream: out, format: " Every access\n" ); |
496 | continue; |
497 | } |
498 | size_t k; |
499 | modref_access_node *a; |
500 | FOR_EACH_VEC_SAFE_ELT (r->accesses, k, a) |
501 | { |
502 | fprintf (stream: out, format: " access:" ); |
503 | a->dump (out); |
504 | } |
505 | } |
506 | } |
507 | } |
508 | |
509 | /* Dump all escape points of NODE to OUT. */ |
510 | |
511 | static void |
512 | dump_modref_edge_summaries (FILE *out, cgraph_node *node, int depth) |
513 | { |
514 | int i = 0; |
515 | if (!escape_summaries) |
516 | return; |
517 | for (cgraph_edge *e = node->indirect_calls; e; e = e->next_callee) |
518 | { |
519 | class escape_summary *sum = escape_summaries->get (edge: e); |
520 | if (sum) |
521 | { |
522 | fprintf (stream: out, format: "%*sIndirect call %i in %s escapes:" , |
523 | depth, "" , i, node->dump_name ()); |
524 | sum->dump (out); |
525 | } |
526 | i++; |
527 | } |
528 | for (cgraph_edge *e = node->callees; e; e = e->next_callee) |
529 | { |
530 | if (!e->inline_failed) |
531 | dump_modref_edge_summaries (out, node: e->callee, depth: depth + 1); |
532 | class escape_summary *sum = escape_summaries->get (edge: e); |
533 | if (sum) |
534 | { |
535 | fprintf (stream: out, format: "%*sCall %s->%s escapes:" , depth, "" , |
536 | node->dump_name (), e->callee->dump_name ()); |
537 | sum->dump (out); |
538 | } |
539 | class fnspec_summary *fsum = fnspec_summaries->get (edge: e); |
540 | if (fsum) |
541 | { |
542 | fprintf (stream: out, format: "%*sCall %s->%s fnspec: %s\n" , depth, "" , |
543 | node->dump_name (), e->callee->dump_name (), |
544 | fsum->fnspec); |
545 | } |
546 | } |
547 | } |
548 | |
549 | /* Remove all call edge summaries associated with NODE. */ |
550 | |
551 | static void |
552 | remove_modref_edge_summaries (cgraph_node *node) |
553 | { |
554 | if (!escape_summaries) |
555 | return; |
556 | for (cgraph_edge *e = node->indirect_calls; e; e = e->next_callee) |
557 | escape_summaries->remove (edge: e); |
558 | for (cgraph_edge *e = node->callees; e; e = e->next_callee) |
559 | { |
560 | if (!e->inline_failed) |
561 | remove_modref_edge_summaries (node: e->callee); |
562 | escape_summaries->remove (edge: e); |
563 | fnspec_summaries->remove (edge: e); |
564 | } |
565 | } |
566 | |
567 | /* Dump summary. */ |
568 | |
569 | void |
570 | modref_summary::dump (FILE *out) const |
571 | { |
572 | if (loads) |
573 | { |
574 | fprintf (stream: out, format: " loads:\n" ); |
575 | dump_records (tt: loads, out); |
576 | } |
577 | if (stores) |
578 | { |
579 | fprintf (stream: out, format: " stores:\n" ); |
580 | dump_records (tt: stores, out); |
581 | } |
582 | if (kills.length ()) |
583 | { |
584 | fprintf (stream: out, format: " kills:\n" ); |
585 | for (auto kill : kills) |
586 | { |
587 | fprintf (stream: out, format: " " ); |
588 | kill.dump (out); |
589 | } |
590 | } |
591 | if (writes_errno) |
592 | fprintf (stream: out, format: " Writes errno\n" ); |
593 | if (side_effects) |
594 | fprintf (stream: out, format: " Side effects\n" ); |
595 | if (nondeterministic) |
596 | fprintf (stream: out, format: " Nondeterministic\n" ); |
597 | if (calls_interposable) |
598 | fprintf (stream: out, format: " Calls interposable\n" ); |
599 | if (global_memory_read) |
600 | fprintf (stream: out, format: " Global memory read\n" ); |
601 | if (global_memory_written) |
602 | fprintf (stream: out, format: " Global memory written\n" ); |
603 | if (try_dse) |
604 | fprintf (stream: out, format: " Try dse\n" ); |
605 | if (arg_flags.length ()) |
606 | { |
607 | for (unsigned int i = 0; i < arg_flags.length (); i++) |
608 | if (arg_flags[i]) |
609 | { |
610 | fprintf (stream: out, format: " parm %i flags:" , i); |
611 | dump_eaf_flags (out, flags: arg_flags[i]); |
612 | } |
613 | } |
614 | if (retslot_flags) |
615 | { |
616 | fprintf (stream: out, format: " Retslot flags:" ); |
617 | dump_eaf_flags (out, flags: retslot_flags); |
618 | } |
619 | if (static_chain_flags) |
620 | { |
621 | fprintf (stream: out, format: " Static chain flags:" ); |
622 | dump_eaf_flags (out, flags: static_chain_flags); |
623 | } |
624 | } |
625 | |
626 | /* Dump summary. */ |
627 | |
628 | void |
629 | modref_summary_lto::dump (FILE *out) |
630 | { |
631 | fprintf (stream: out, format: " loads:\n" ); |
632 | dump_lto_records (tt: loads, out); |
633 | fprintf (stream: out, format: " stores:\n" ); |
634 | dump_lto_records (tt: stores, out); |
635 | if (kills.length ()) |
636 | { |
637 | fprintf (stream: out, format: " kills:\n" ); |
638 | for (auto kill : kills) |
639 | { |
640 | fprintf (stream: out, format: " " ); |
641 | kill.dump (out); |
642 | } |
643 | } |
644 | if (writes_errno) |
645 | fprintf (stream: out, format: " Writes errno\n" ); |
646 | if (side_effects) |
647 | fprintf (stream: out, format: " Side effects\n" ); |
648 | if (nondeterministic) |
649 | fprintf (stream: out, format: " Nondeterministic\n" ); |
650 | if (calls_interposable) |
651 | fprintf (stream: out, format: " Calls interposable\n" ); |
652 | if (arg_flags.length ()) |
653 | { |
654 | for (unsigned int i = 0; i < arg_flags.length (); i++) |
655 | if (arg_flags[i]) |
656 | { |
657 | fprintf (stream: out, format: " parm %i flags:" , i); |
658 | dump_eaf_flags (out, flags: arg_flags[i]); |
659 | } |
660 | } |
661 | if (retslot_flags) |
662 | { |
663 | fprintf (stream: out, format: " Retslot flags:" ); |
664 | dump_eaf_flags (out, flags: retslot_flags); |
665 | } |
666 | if (static_chain_flags) |
667 | { |
668 | fprintf (stream: out, format: " Static chain flags:" ); |
669 | dump_eaf_flags (out, flags: static_chain_flags); |
670 | } |
671 | } |
672 | |
673 | /* Called after summary is produced and before it is used by local analysis. |
674 | Can be called multiple times in case summary needs to update signature. |
675 | FUN is decl of function summary is attached to. */ |
676 | void |
677 | modref_summary::finalize (tree fun) |
678 | { |
679 | global_memory_read = !loads || loads->global_access_p (); |
680 | global_memory_written = !stores || stores->global_access_p (); |
681 | |
682 | /* We can do DSE if we know function has no side effects and |
683 | we can analyze all stores. Disable dse if there are too many |
684 | stores to try. */ |
685 | if (side_effects || global_memory_written || writes_errno) |
686 | try_dse = false; |
687 | else |
688 | { |
689 | try_dse = true; |
690 | size_t i, j, k; |
691 | int num_tests = 0, max_tests |
692 | = opt_for_fn (fun, param_modref_max_tests); |
693 | modref_base_node <alias_set_type> *base_node; |
694 | modref_ref_node <alias_set_type> *ref_node; |
695 | modref_access_node *access_node; |
696 | FOR_EACH_VEC_SAFE_ELT (stores->bases, i, base_node) |
697 | { |
698 | if (base_node->every_ref) |
699 | { |
700 | try_dse = false; |
701 | break; |
702 | } |
703 | FOR_EACH_VEC_SAFE_ELT (base_node->refs, j, ref_node) |
704 | { |
705 | if (base_node->every_ref) |
706 | { |
707 | try_dse = false; |
708 | break; |
709 | } |
710 | FOR_EACH_VEC_SAFE_ELT (ref_node->accesses, k, access_node) |
711 | if (num_tests++ > max_tests |
712 | || !access_node->parm_offset_known) |
713 | { |
714 | try_dse = false; |
715 | break; |
716 | } |
717 | if (!try_dse) |
718 | break; |
719 | } |
720 | if (!try_dse) |
721 | break; |
722 | } |
723 | } |
724 | if (loads->every_base) |
725 | load_accesses = 1; |
726 | else |
727 | { |
728 | load_accesses = 0; |
729 | for (auto base_node : loads->bases) |
730 | { |
731 | if (base_node->every_ref) |
732 | load_accesses++; |
733 | else |
734 | for (auto ref_node : base_node->refs) |
735 | if (ref_node->every_access) |
736 | load_accesses++; |
737 | else |
738 | load_accesses += ref_node->accesses->length (); |
739 | } |
740 | } |
741 | } |
742 | |
743 | /* Get function summary for FUNC if it exists, return NULL otherwise. */ |
744 | |
745 | modref_summary * |
746 | get_modref_function_summary (cgraph_node *func) |
747 | { |
748 | /* Avoid creation of the summary too early (e.g. when front-end calls us). */ |
749 | if (!optimization_summaries) |
750 | return NULL; |
751 | |
752 | /* A single function body may be represented by multiple symbols with |
753 | different visibility. For example, if FUNC is an interposable alias, |
754 | we don't want to return anything, even if we have summary for the target |
755 | function. */ |
756 | enum availability avail; |
757 | func = func->ultimate_alias_target |
758 | (availability: &avail, ref: current_function_decl ? |
759 | cgraph_node::get (decl: current_function_decl) : NULL); |
760 | if (avail <= AVAIL_INTERPOSABLE) |
761 | return NULL; |
762 | |
763 | modref_summary *r = optimization_summaries->get (node: func); |
764 | return r; |
765 | } |
766 | |
767 | /* Get function summary for CALL if it exists, return NULL otherwise. |
768 | If non-null set interposed to indicate whether function may not |
769 | bind to current def. In this case sometimes loads from function |
770 | needs to be ignored. */ |
771 | |
772 | modref_summary * |
773 | get_modref_function_summary (gcall *call, bool *interposed) |
774 | { |
775 | tree callee = gimple_call_fndecl (gs: call); |
776 | if (!callee) |
777 | return NULL; |
778 | struct cgraph_node *node = cgraph_node::get (decl: callee); |
779 | if (!node) |
780 | return NULL; |
781 | modref_summary *r = get_modref_function_summary (func: node); |
782 | if (interposed && r) |
783 | *interposed = r->calls_interposable |
784 | || !node->binds_to_current_def_p (); |
785 | return r; |
786 | } |
787 | |
788 | |
789 | namespace { |
790 | |
791 | /* Return true if ECF flags says that nondeterminism can be ignored. */ |
792 | |
793 | static bool |
794 | ignore_nondeterminism_p (tree caller, int flags) |
795 | { |
796 | if (flags & (ECF_CONST | ECF_PURE)) |
797 | return true; |
798 | if ((flags & (ECF_NORETURN | ECF_NOTHROW)) == (ECF_NORETURN | ECF_NOTHROW) |
799 | || (!opt_for_fn (caller, flag_exceptions) && (flags & ECF_NORETURN))) |
800 | return true; |
801 | return false; |
802 | } |
803 | |
804 | /* Return true if ECF flags says that return value can be ignored. */ |
805 | |
806 | static bool |
807 | ignore_retval_p (tree caller, int flags) |
808 | { |
809 | if ((flags & (ECF_NORETURN | ECF_NOTHROW)) == (ECF_NORETURN | ECF_NOTHROW) |
810 | || (!opt_for_fn (caller, flag_exceptions) && (flags & ECF_NORETURN))) |
811 | return true; |
812 | return false; |
813 | } |
814 | |
815 | /* Return true if ECF flags says that stores can be ignored. */ |
816 | |
817 | static bool |
818 | ignore_stores_p (tree caller, int flags) |
819 | { |
820 | if (flags & (ECF_PURE | ECF_CONST | ECF_NOVOPS)) |
821 | return true; |
822 | if ((flags & (ECF_NORETURN | ECF_NOTHROW)) == (ECF_NORETURN | ECF_NOTHROW) |
823 | || (!opt_for_fn (caller, flag_exceptions) && (flags & ECF_NORETURN))) |
824 | return true; |
825 | return false; |
826 | } |
827 | |
828 | /* Determine parm_map for PTR which is supposed to be a pointer. */ |
829 | |
830 | modref_parm_map |
831 | parm_map_for_ptr (tree op) |
832 | { |
833 | bool offset_known; |
834 | poly_int64 offset; |
835 | struct modref_parm_map parm_map; |
836 | gcall *call; |
837 | |
838 | parm_map.parm_offset_known = false; |
839 | parm_map.parm_offset = 0; |
840 | |
841 | offset_known = unadjusted_ptr_and_unit_offset (op, ret: &op, offset_ret: &offset); |
842 | if (TREE_CODE (op) == SSA_NAME |
843 | && SSA_NAME_IS_DEFAULT_DEF (op) |
844 | && TREE_CODE (SSA_NAME_VAR (op)) == PARM_DECL) |
845 | { |
846 | int index = 0; |
847 | |
848 | if (cfun->static_chain_decl |
849 | && op == ssa_default_def (cfun, cfun->static_chain_decl)) |
850 | index = MODREF_STATIC_CHAIN_PARM; |
851 | else |
852 | for (tree t = DECL_ARGUMENTS (current_function_decl); |
853 | t != SSA_NAME_VAR (op); t = DECL_CHAIN (t)) |
854 | index++; |
855 | parm_map.parm_index = index; |
856 | parm_map.parm_offset_known = offset_known; |
857 | parm_map.parm_offset = offset; |
858 | } |
859 | else if (points_to_local_or_readonly_memory_p (op)) |
860 | parm_map.parm_index = MODREF_LOCAL_MEMORY_PARM; |
861 | /* Memory allocated in the function is not visible to caller before the |
862 | call and thus we do not need to record it as load/stores/kills. */ |
863 | else if (TREE_CODE (op) == SSA_NAME |
864 | && (call = dyn_cast<gcall *>(SSA_NAME_DEF_STMT (op))) != NULL |
865 | && gimple_call_flags (call) & ECF_MALLOC) |
866 | parm_map.parm_index = MODREF_LOCAL_MEMORY_PARM; |
867 | else |
868 | parm_map.parm_index = MODREF_UNKNOWN_PARM; |
869 | return parm_map; |
870 | } |
871 | |
872 | /* Return true if ARG with EAF flags FLAGS can not make any caller's parameter |
873 | used (if LOAD is true we check loads, otherwise stores). */ |
874 | |
875 | static bool |
876 | verify_arg (tree arg, int flags, bool load) |
877 | { |
878 | if (flags & EAF_UNUSED) |
879 | return true; |
880 | if (load && (flags & EAF_NO_DIRECT_READ)) |
881 | return true; |
882 | if (!load |
883 | && (flags & (EAF_NO_DIRECT_CLOBBER | EAF_NO_INDIRECT_CLOBBER)) |
884 | == (EAF_NO_DIRECT_CLOBBER | EAF_NO_INDIRECT_CLOBBER)) |
885 | return true; |
886 | if (is_gimple_constant (t: arg)) |
887 | return true; |
888 | if (DECL_P (arg) && TREE_READONLY (arg)) |
889 | return true; |
890 | if (TREE_CODE (arg) == ADDR_EXPR) |
891 | { |
892 | tree t = get_base_address (TREE_OPERAND (arg, 0)); |
893 | if (is_gimple_constant (t)) |
894 | return true; |
895 | if (DECL_P (t) |
896 | && (TREE_READONLY (t) || TREE_CODE (t) == FUNCTION_DECL)) |
897 | return true; |
898 | } |
899 | return false; |
900 | } |
901 | |
902 | /* Return true if STMT may access memory that is pointed to by parameters |
903 | of caller and which is not seen as an escape by PTA. |
904 | CALLEE_ECF_FLAGS are ECF flags of callee. If LOAD is true then by access |
905 | we mean load, otherwise we mean store. */ |
906 | |
907 | static bool |
908 | may_access_nonescaping_parm_p (gcall *call, int callee_ecf_flags, bool load) |
909 | { |
910 | int implicit_flags = 0; |
911 | |
912 | if (ignore_stores_p (caller: current_function_decl, flags: callee_ecf_flags)) |
913 | implicit_flags |= ignore_stores_eaf_flags; |
914 | if (callee_ecf_flags & ECF_PURE) |
915 | implicit_flags |= implicit_pure_eaf_flags; |
916 | if (callee_ecf_flags & (ECF_CONST | ECF_NOVOPS)) |
917 | implicit_flags |= implicit_const_eaf_flags; |
918 | if (gimple_call_chain (gs: call) |
919 | && !verify_arg (arg: gimple_call_chain (gs: call), |
920 | flags: gimple_call_static_chain_flags (call) | implicit_flags, |
921 | load)) |
922 | return true; |
923 | for (unsigned int i = 0; i < gimple_call_num_args (gs: call); i++) |
924 | if (!verify_arg (arg: gimple_call_arg (gs: call, index: i), |
925 | flags: gimple_call_arg_flags (call, i) | implicit_flags, |
926 | load)) |
927 | return true; |
928 | return false; |
929 | } |
930 | |
931 | |
932 | /* Analyze memory accesses (loads, stores and kills) performed |
933 | by the function. Set also side_effects, calls_interposable |
934 | and nondeterminism flags. */ |
935 | |
936 | class modref_access_analysis |
937 | { |
938 | public: |
939 | modref_access_analysis (bool ipa, modref_summary *summary, |
940 | modref_summary_lto *summary_lto) |
941 | : m_summary (summary), m_summary_lto (summary_lto), m_ipa (ipa) |
942 | { |
943 | } |
944 | void analyze (); |
945 | private: |
946 | bool set_side_effects (); |
947 | bool set_nondeterministic (); |
948 | static modref_access_node get_access (ao_ref *ref); |
949 | static void record_access (modref_records *, ao_ref *, modref_access_node &); |
950 | static void record_access_lto (modref_records_lto *, ao_ref *, |
951 | modref_access_node &a); |
952 | bool record_access_p (tree); |
953 | bool record_unknown_load (); |
954 | bool record_unknown_store (); |
955 | bool record_global_memory_load (); |
956 | bool record_global_memory_store (); |
957 | bool merge_call_side_effects (gimple *, modref_summary *, |
958 | cgraph_node *, bool); |
959 | modref_access_node get_access_for_fnspec (gcall *, attr_fnspec &, |
960 | unsigned int, modref_parm_map &); |
961 | void process_fnspec (gcall *); |
962 | void analyze_call (gcall *); |
963 | static bool analyze_load (gimple *, tree, tree, void *); |
964 | static bool analyze_store (gimple *, tree, tree, void *); |
965 | void analyze_stmt (gimple *, bool); |
966 | void propagate (); |
967 | |
968 | /* Summary being computed. |
969 | We work either with m_summary or m_summary_lto. Never on both. */ |
970 | modref_summary *m_summary; |
971 | modref_summary_lto *m_summary_lto; |
972 | /* Recursive calls needs simplistic dataflow after analysis finished. |
973 | Collect all calls into this vector during analysis and later process |
974 | them in propagate. */ |
975 | auto_vec <gimple *, 32> m_recursive_calls; |
976 | /* ECF flags of function being analyzed. */ |
977 | int m_ecf_flags; |
978 | /* True if IPA propagation will be done later. */ |
979 | bool m_ipa; |
980 | /* Set true if statement currently analyze is known to be |
981 | executed each time function is called. */ |
982 | bool m_always_executed; |
983 | }; |
984 | |
985 | /* Set side_effects flag and return if something changed. */ |
986 | |
987 | bool |
988 | modref_access_analysis::set_side_effects () |
989 | { |
990 | bool changed = false; |
991 | |
992 | if (m_summary && !m_summary->side_effects) |
993 | { |
994 | m_summary->side_effects = true; |
995 | changed = true; |
996 | } |
997 | if (m_summary_lto && !m_summary_lto->side_effects) |
998 | { |
999 | m_summary_lto->side_effects = true; |
1000 | changed = true; |
1001 | } |
1002 | return changed; |
1003 | } |
1004 | |
1005 | /* Set nondeterministic flag and return if something changed. */ |
1006 | |
1007 | bool |
1008 | modref_access_analysis::set_nondeterministic () |
1009 | { |
1010 | bool changed = false; |
1011 | |
1012 | if (m_summary && !m_summary->nondeterministic) |
1013 | { |
1014 | m_summary->side_effects = m_summary->nondeterministic = true; |
1015 | changed = true; |
1016 | } |
1017 | if (m_summary_lto && !m_summary_lto->nondeterministic) |
1018 | { |
1019 | m_summary_lto->side_effects = m_summary_lto->nondeterministic = true; |
1020 | changed = true; |
1021 | } |
1022 | return changed; |
1023 | } |
1024 | |
1025 | /* Construct modref_access_node from REF. */ |
1026 | |
1027 | modref_access_node |
1028 | modref_access_analysis::get_access (ao_ref *ref) |
1029 | { |
1030 | tree base; |
1031 | |
1032 | base = ao_ref_base (ref); |
1033 | modref_access_node a = {.offset: ref->offset, .size: ref->size, .max_size: ref->max_size, |
1034 | .parm_offset: 0, .parm_index: MODREF_UNKNOWN_PARM, .parm_offset_known: false, .adjustments: 0}; |
1035 | if (TREE_CODE (base) == MEM_REF || TREE_CODE (base) == TARGET_MEM_REF) |
1036 | { |
1037 | tree memref = base; |
1038 | modref_parm_map m = parm_map_for_ptr (TREE_OPERAND (base, 0)); |
1039 | |
1040 | a.parm_index = m.parm_index; |
1041 | if (a.parm_index != MODREF_UNKNOWN_PARM && TREE_CODE (memref) == MEM_REF) |
1042 | { |
1043 | a.parm_offset_known |
1044 | = wi::to_poly_wide (TREE_OPERAND |
1045 | (memref, 1)).to_shwi (r: &a.parm_offset); |
1046 | if (a.parm_offset_known && m.parm_offset_known) |
1047 | a.parm_offset += m.parm_offset; |
1048 | else |
1049 | a.parm_offset_known = false; |
1050 | } |
1051 | } |
1052 | else |
1053 | a.parm_index = MODREF_UNKNOWN_PARM; |
1054 | return a; |
1055 | } |
1056 | |
1057 | /* Record access into the modref_records data structure. */ |
1058 | |
1059 | void |
1060 | modref_access_analysis::record_access (modref_records *tt, |
1061 | ao_ref *ref, |
1062 | modref_access_node &a) |
1063 | { |
1064 | alias_set_type base_set = !flag_strict_aliasing |
1065 | || !flag_ipa_strict_aliasing ? 0 |
1066 | : ao_ref_base_alias_set (ref); |
1067 | alias_set_type ref_set = !flag_strict_aliasing |
1068 | || !flag_ipa_strict_aliasing ? 0 |
1069 | : (ao_ref_alias_set (ref)); |
1070 | if (dump_file) |
1071 | { |
1072 | fprintf (stream: dump_file, format: " - Recording base_set=%i ref_set=%i " , |
1073 | base_set, ref_set); |
1074 | a.dump (out: dump_file); |
1075 | } |
1076 | tt->insert (fndecl: current_function_decl, base: base_set, ref: ref_set, a, record_adjustments: false); |
1077 | } |
1078 | |
1079 | /* IPA version of record_access_tree. */ |
1080 | |
1081 | void |
1082 | modref_access_analysis::record_access_lto (modref_records_lto *tt, ao_ref *ref, |
1083 | modref_access_node &a) |
1084 | { |
1085 | /* get_alias_set sometimes use different type to compute the alias set |
1086 | than TREE_TYPE (base). Do same adjustments. */ |
1087 | tree base_type = NULL_TREE, ref_type = NULL_TREE; |
1088 | if (flag_strict_aliasing && flag_ipa_strict_aliasing) |
1089 | { |
1090 | tree base; |
1091 | |
1092 | base = ref->ref; |
1093 | while (handled_component_p (t: base)) |
1094 | base = TREE_OPERAND (base, 0); |
1095 | |
1096 | base_type = reference_alias_ptr_type_1 (&base); |
1097 | |
1098 | if (!base_type) |
1099 | base_type = TREE_TYPE (base); |
1100 | else |
1101 | base_type = TYPE_REF_CAN_ALIAS_ALL (base_type) |
1102 | ? NULL_TREE : TREE_TYPE (base_type); |
1103 | |
1104 | tree ref_expr = ref->ref; |
1105 | ref_type = reference_alias_ptr_type_1 (&ref_expr); |
1106 | |
1107 | if (!ref_type) |
1108 | ref_type = TREE_TYPE (ref_expr); |
1109 | else |
1110 | ref_type = TYPE_REF_CAN_ALIAS_ALL (ref_type) |
1111 | ? NULL_TREE : TREE_TYPE (ref_type); |
1112 | |
1113 | /* Sanity check that we are in sync with what get_alias_set does. */ |
1114 | gcc_checking_assert ((!base_type && !ao_ref_base_alias_set (ref)) |
1115 | || get_alias_set (base_type) |
1116 | == ao_ref_base_alias_set (ref)); |
1117 | gcc_checking_assert ((!ref_type && !ao_ref_alias_set (ref)) |
1118 | || get_alias_set (ref_type) |
1119 | == ao_ref_alias_set (ref)); |
1120 | |
1121 | /* Do not bother to record types that have no meaningful alias set. |
1122 | Also skip variably modified types since these go to local streams. */ |
1123 | if (base_type && (!get_alias_set (base_type) |
1124 | || variably_modified_type_p (base_type, NULL_TREE))) |
1125 | base_type = NULL_TREE; |
1126 | if (ref_type && (!get_alias_set (ref_type) |
1127 | || variably_modified_type_p (ref_type, NULL_TREE))) |
1128 | ref_type = NULL_TREE; |
1129 | } |
1130 | if (dump_file) |
1131 | { |
1132 | fprintf (stream: dump_file, format: " - Recording base type:" ); |
1133 | print_generic_expr (dump_file, base_type); |
1134 | fprintf (stream: dump_file, format: " (alias set %i) ref type:" , |
1135 | base_type ? get_alias_set (base_type) : 0); |
1136 | print_generic_expr (dump_file, ref_type); |
1137 | fprintf (stream: dump_file, format: " (alias set %i) " , |
1138 | ref_type ? get_alias_set (ref_type) : 0); |
1139 | a.dump (out: dump_file); |
1140 | } |
1141 | |
1142 | tt->insert (fndecl: current_function_decl, base: base_type, ref: ref_type, a, record_adjustments: false); |
1143 | } |
1144 | |
1145 | /* Returns true if and only if we should store the access to EXPR. |
1146 | Some accesses, e.g. loads from automatic variables, are not interesting. */ |
1147 | |
1148 | bool |
1149 | modref_access_analysis::record_access_p (tree expr) |
1150 | { |
1151 | if (TREE_THIS_VOLATILE (expr)) |
1152 | { |
1153 | if (dump_file) |
1154 | fprintf (stream: dump_file, format: " (volatile; marking nondeterministic) " ); |
1155 | set_nondeterministic (); |
1156 | } |
1157 | if (cfun->can_throw_non_call_exceptions |
1158 | && tree_could_throw_p (expr)) |
1159 | { |
1160 | if (dump_file) |
1161 | fprintf (stream: dump_file, format: " (can throw; marking side effects) " ); |
1162 | set_side_effects (); |
1163 | } |
1164 | |
1165 | if (refs_local_or_readonly_memory_p (expr)) |
1166 | { |
1167 | if (dump_file) |
1168 | fprintf (stream: dump_file, format: " - Read-only or local, ignoring.\n" ); |
1169 | return false; |
1170 | } |
1171 | return true; |
1172 | } |
1173 | |
1174 | /* Collapse loads and return true if something changed. */ |
1175 | |
1176 | bool |
1177 | modref_access_analysis::record_unknown_load () |
1178 | { |
1179 | bool changed = false; |
1180 | |
1181 | if (m_summary && !m_summary->loads->every_base) |
1182 | { |
1183 | m_summary->loads->collapse (); |
1184 | changed = true; |
1185 | } |
1186 | if (m_summary_lto && !m_summary_lto->loads->every_base) |
1187 | { |
1188 | m_summary_lto->loads->collapse (); |
1189 | changed = true; |
1190 | } |
1191 | return changed; |
1192 | } |
1193 | |
1194 | /* Collapse loads and return true if something changed. */ |
1195 | |
1196 | bool |
1197 | modref_access_analysis::record_unknown_store () |
1198 | { |
1199 | bool changed = false; |
1200 | |
1201 | if (m_summary && !m_summary->stores->every_base) |
1202 | { |
1203 | m_summary->stores->collapse (); |
1204 | changed = true; |
1205 | } |
1206 | if (m_summary_lto && !m_summary_lto->stores->every_base) |
1207 | { |
1208 | m_summary_lto->stores->collapse (); |
1209 | changed = true; |
1210 | } |
1211 | return changed; |
1212 | } |
1213 | |
1214 | /* Record unknown load from global memory. */ |
1215 | |
1216 | bool |
1217 | modref_access_analysis::record_global_memory_load () |
1218 | { |
1219 | bool changed = false; |
1220 | modref_access_node a = {.offset: 0, .size: -1, .max_size: -1, |
1221 | .parm_offset: 0, .parm_index: MODREF_GLOBAL_MEMORY_PARM, .parm_offset_known: false, .adjustments: 0}; |
1222 | |
1223 | if (m_summary && !m_summary->loads->every_base) |
1224 | changed |= m_summary->loads->insert (fndecl: current_function_decl, base: 0, ref: 0, a, record_adjustments: false); |
1225 | if (m_summary_lto && !m_summary_lto->loads->every_base) |
1226 | changed |= m_summary_lto->loads->insert (fndecl: current_function_decl, |
1227 | base: 0, ref: 0, a, record_adjustments: false); |
1228 | return changed; |
1229 | } |
1230 | |
1231 | /* Record unknown store from global memory. */ |
1232 | |
1233 | bool |
1234 | modref_access_analysis::record_global_memory_store () |
1235 | { |
1236 | bool changed = false; |
1237 | modref_access_node a = {.offset: 0, .size: -1, .max_size: -1, |
1238 | .parm_offset: 0, .parm_index: MODREF_GLOBAL_MEMORY_PARM, .parm_offset_known: false, .adjustments: 0}; |
1239 | |
1240 | if (m_summary && !m_summary->stores->every_base) |
1241 | changed |= m_summary->stores->insert (fndecl: current_function_decl, |
1242 | base: 0, ref: 0, a, record_adjustments: false); |
1243 | if (m_summary_lto && !m_summary_lto->stores->every_base) |
1244 | changed |= m_summary_lto->stores->insert (fndecl: current_function_decl, |
1245 | base: 0, ref: 0, a, record_adjustments: false); |
1246 | return changed; |
1247 | } |
1248 | |
1249 | /* Merge side effects of call STMT to function with CALLEE_SUMMARY. |
1250 | Return true if something changed. |
1251 | If IGNORE_STORES is true, do not merge stores. |
1252 | If RECORD_ADJUSTMENTS is true cap number of adjustments to |
1253 | a given access to make dataflow finite. */ |
1254 | |
1255 | bool |
1256 | modref_access_analysis::merge_call_side_effects |
1257 | (gimple *stmt, modref_summary *callee_summary, |
1258 | cgraph_node *callee_node, bool record_adjustments) |
1259 | { |
1260 | gcall *call = as_a <gcall *> (p: stmt); |
1261 | int flags = gimple_call_flags (call); |
1262 | |
1263 | /* Nothing to do for non-looping cont functions. */ |
1264 | if ((flags & (ECF_CONST | ECF_NOVOPS)) |
1265 | && !(flags & ECF_LOOPING_CONST_OR_PURE)) |
1266 | return false; |
1267 | |
1268 | bool changed = false; |
1269 | |
1270 | if (dump_file) |
1271 | fprintf (stream: dump_file, format: " - Merging side effects of %s\n" , |
1272 | callee_node->dump_name ()); |
1273 | |
1274 | /* Merge side effects and non-determinism. |
1275 | PURE/CONST flags makes functions deterministic and if there is |
1276 | no LOOPING_CONST_OR_PURE they also have no side effects. */ |
1277 | if (!(flags & (ECF_CONST | ECF_NOVOPS | ECF_PURE)) |
1278 | || (flags & ECF_LOOPING_CONST_OR_PURE)) |
1279 | { |
1280 | if (!m_summary->side_effects && callee_summary->side_effects) |
1281 | { |
1282 | if (dump_file) |
1283 | fprintf (stream: dump_file, format: " - merging side effects.\n" ); |
1284 | m_summary->side_effects = true; |
1285 | changed = true; |
1286 | } |
1287 | if (!m_summary->nondeterministic && callee_summary->nondeterministic |
1288 | && !ignore_nondeterminism_p (caller: current_function_decl, flags)) |
1289 | { |
1290 | if (dump_file) |
1291 | fprintf (stream: dump_file, format: " - merging nondeterministic.\n" ); |
1292 | m_summary->nondeterministic = true; |
1293 | changed = true; |
1294 | } |
1295 | } |
1296 | |
1297 | /* For const functions we are done. */ |
1298 | if (flags & (ECF_CONST | ECF_NOVOPS)) |
1299 | return changed; |
1300 | |
1301 | /* Merge calls_interposable flags. */ |
1302 | if (!m_summary->calls_interposable && callee_summary->calls_interposable) |
1303 | { |
1304 | if (dump_file) |
1305 | fprintf (stream: dump_file, format: " - merging calls interposable.\n" ); |
1306 | m_summary->calls_interposable = true; |
1307 | changed = true; |
1308 | } |
1309 | |
1310 | if (!callee_node->binds_to_current_def_p () && !m_summary->calls_interposable) |
1311 | { |
1312 | if (dump_file) |
1313 | fprintf (stream: dump_file, format: " - May be interposed.\n" ); |
1314 | m_summary->calls_interposable = true; |
1315 | changed = true; |
1316 | } |
1317 | |
1318 | /* Now merge the actual load, store and kill vectors. For this we need |
1319 | to compute map translating new parameters to old. */ |
1320 | if (dump_file) |
1321 | fprintf (stream: dump_file, format: " Parm map:" ); |
1322 | |
1323 | auto_vec <modref_parm_map, 32> parm_map; |
1324 | parm_map.safe_grow_cleared (len: gimple_call_num_args (gs: call), exact: true); |
1325 | for (unsigned i = 0; i < gimple_call_num_args (gs: call); i++) |
1326 | { |
1327 | parm_map[i] = parm_map_for_ptr (op: gimple_call_arg (gs: call, index: i)); |
1328 | if (dump_file) |
1329 | { |
1330 | fprintf (stream: dump_file, format: " %i" , parm_map[i].parm_index); |
1331 | if (parm_map[i].parm_offset_known) |
1332 | { |
1333 | fprintf (stream: dump_file, format: " offset:" ); |
1334 | print_dec (value: (poly_int64)parm_map[i].parm_offset, |
1335 | file: dump_file, sgn: SIGNED); |
1336 | } |
1337 | } |
1338 | } |
1339 | |
1340 | modref_parm_map chain_map; |
1341 | if (gimple_call_chain (gs: call)) |
1342 | { |
1343 | chain_map = parm_map_for_ptr (op: gimple_call_chain (gs: call)); |
1344 | if (dump_file) |
1345 | { |
1346 | fprintf (stream: dump_file, format: "static chain %i" , chain_map.parm_index); |
1347 | if (chain_map.parm_offset_known) |
1348 | { |
1349 | fprintf (stream: dump_file, format: " offset:" ); |
1350 | print_dec (value: (poly_int64)chain_map.parm_offset, |
1351 | file: dump_file, sgn: SIGNED); |
1352 | } |
1353 | } |
1354 | } |
1355 | if (dump_file) |
1356 | fprintf (stream: dump_file, format: "\n" ); |
1357 | |
1358 | /* Kills can me merged in only if we know the function is going to be |
1359 | always executed. */ |
1360 | if (m_always_executed |
1361 | && callee_summary->kills.length () |
1362 | && (!cfun->can_throw_non_call_exceptions |
1363 | || !stmt_could_throw_p (cfun, call))) |
1364 | { |
1365 | /* Watch for self recursive updates. */ |
1366 | auto_vec<modref_access_node, 32> saved_kills; |
1367 | |
1368 | saved_kills.reserve_exact (nelems: callee_summary->kills.length ()); |
1369 | saved_kills.splice (src: callee_summary->kills); |
1370 | for (auto kill : saved_kills) |
1371 | { |
1372 | if (kill.parm_index >= (int)parm_map.length ()) |
1373 | continue; |
1374 | modref_parm_map &m |
1375 | = kill.parm_index == MODREF_STATIC_CHAIN_PARM |
1376 | ? chain_map |
1377 | : parm_map[kill.parm_index]; |
1378 | if (m.parm_index == MODREF_LOCAL_MEMORY_PARM |
1379 | || m.parm_index == MODREF_UNKNOWN_PARM |
1380 | || m.parm_index == MODREF_RETSLOT_PARM |
1381 | || !m.parm_offset_known) |
1382 | continue; |
1383 | modref_access_node n = kill; |
1384 | n.parm_index = m.parm_index; |
1385 | n.parm_offset += m.parm_offset; |
1386 | if (modref_access_node::insert_kill (kills&: m_summary->kills, a&: n, |
1387 | record_adjustments)) |
1388 | changed = true; |
1389 | } |
1390 | } |
1391 | |
1392 | /* Merge in loads. */ |
1393 | changed |= m_summary->loads->merge (fndecl: current_function_decl, |
1394 | other: callee_summary->loads, |
1395 | parm_map: &parm_map, static_chain_map: &chain_map, |
1396 | record_accesses: record_adjustments, |
1397 | promote_unknown_to_global: !may_access_nonescaping_parm_p |
1398 | (call, callee_ecf_flags: flags, load: true)); |
1399 | /* Merge in stores. */ |
1400 | if (!ignore_stores_p (caller: current_function_decl, flags)) |
1401 | { |
1402 | changed |= m_summary->stores->merge (fndecl: current_function_decl, |
1403 | other: callee_summary->stores, |
1404 | parm_map: &parm_map, static_chain_map: &chain_map, |
1405 | record_accesses: record_adjustments, |
1406 | promote_unknown_to_global: !may_access_nonescaping_parm_p |
1407 | (call, callee_ecf_flags: flags, load: false)); |
1408 | if (!m_summary->writes_errno |
1409 | && callee_summary->writes_errno) |
1410 | { |
1411 | m_summary->writes_errno = true; |
1412 | changed = true; |
1413 | } |
1414 | } |
1415 | return changed; |
1416 | } |
1417 | |
1418 | /* Return access mode for argument I of call STMT with FNSPEC. */ |
1419 | |
1420 | modref_access_node |
1421 | modref_access_analysis::get_access_for_fnspec (gcall *call, attr_fnspec &fnspec, |
1422 | unsigned int i, |
1423 | modref_parm_map &map) |
1424 | { |
1425 | tree size = NULL_TREE; |
1426 | unsigned int size_arg; |
1427 | |
1428 | if (!fnspec.arg_specified_p (i)) |
1429 | ; |
1430 | else if (fnspec.arg_max_access_size_given_by_arg_p (i, arg: &size_arg)) |
1431 | size = gimple_call_arg (gs: call, index: size_arg); |
1432 | else if (fnspec.arg_access_size_given_by_type_p (i)) |
1433 | { |
1434 | tree callee = gimple_call_fndecl (gs: call); |
1435 | tree t = TYPE_ARG_TYPES (TREE_TYPE (callee)); |
1436 | |
1437 | for (unsigned int p = 0; p < i; p++) |
1438 | t = TREE_CHAIN (t); |
1439 | size = TYPE_SIZE_UNIT (TREE_TYPE (TREE_VALUE (t))); |
1440 | } |
1441 | modref_access_node a = {.offset: 0, .size: -1, .max_size: -1, |
1442 | .parm_offset: map.parm_offset, .parm_index: map.parm_index, |
1443 | .parm_offset_known: map.parm_offset_known, .adjustments: 0}; |
1444 | poly_int64 size_hwi; |
1445 | if (size |
1446 | && poly_int_tree_p (t: size, value: &size_hwi) |
1447 | && coeffs_in_range_p (a: size_hwi, b: 0, |
1448 | HOST_WIDE_INT_MAX / BITS_PER_UNIT)) |
1449 | { |
1450 | a.size = -1; |
1451 | a.max_size = size_hwi << LOG2_BITS_PER_UNIT; |
1452 | } |
1453 | return a; |
1454 | } |
1455 | /* Apply side effects of call STMT to CUR_SUMMARY using FNSPEC. |
1456 | If IGNORE_STORES is true ignore them. |
1457 | Return false if no useful summary can be produced. */ |
1458 | |
1459 | void |
1460 | modref_access_analysis::process_fnspec (gcall *call) |
1461 | { |
1462 | int flags = gimple_call_flags (call); |
1463 | |
1464 | /* PURE/CONST flags makes functions deterministic and if there is |
1465 | no LOOPING_CONST_OR_PURE they also have no side effects. */ |
1466 | if (!(flags & (ECF_CONST | ECF_NOVOPS | ECF_PURE)) |
1467 | || (flags & ECF_LOOPING_CONST_OR_PURE) |
1468 | || (cfun->can_throw_non_call_exceptions |
1469 | && stmt_could_throw_p (cfun, call))) |
1470 | { |
1471 | set_side_effects (); |
1472 | if (!ignore_nondeterminism_p (caller: current_function_decl, flags)) |
1473 | set_nondeterministic (); |
1474 | } |
1475 | |
1476 | /* For const functions we are done. */ |
1477 | if (flags & (ECF_CONST | ECF_NOVOPS)) |
1478 | return; |
1479 | |
1480 | attr_fnspec fnspec = gimple_call_fnspec (stmt: call); |
1481 | /* If there is no fnpec we know nothing about loads & stores. */ |
1482 | if (!fnspec.known_p ()) |
1483 | { |
1484 | if (dump_file && gimple_call_builtin_p (call, BUILT_IN_NORMAL)) |
1485 | fprintf (stream: dump_file, format: " Builtin with no fnspec: %s\n" , |
1486 | IDENTIFIER_POINTER (DECL_NAME (gimple_call_fndecl (call)))); |
1487 | if (!ignore_stores_p (caller: current_function_decl, flags)) |
1488 | { |
1489 | if (!may_access_nonescaping_parm_p (call, callee_ecf_flags: flags, load: false)) |
1490 | record_global_memory_store (); |
1491 | else |
1492 | record_unknown_store (); |
1493 | if (!may_access_nonescaping_parm_p (call, callee_ecf_flags: flags, load: true)) |
1494 | record_global_memory_load (); |
1495 | else |
1496 | record_unknown_load (); |
1497 | } |
1498 | else |
1499 | { |
1500 | if (!may_access_nonescaping_parm_p (call, callee_ecf_flags: flags, load: true)) |
1501 | record_global_memory_load (); |
1502 | else |
1503 | record_unknown_load (); |
1504 | } |
1505 | return; |
1506 | } |
1507 | /* Process fnspec. */ |
1508 | if (fnspec.global_memory_read_p ()) |
1509 | { |
1510 | if (may_access_nonescaping_parm_p (call, callee_ecf_flags: flags, load: true)) |
1511 | record_unknown_load (); |
1512 | else |
1513 | record_global_memory_load (); |
1514 | } |
1515 | else |
1516 | { |
1517 | for (unsigned int i = 0; i < gimple_call_num_args (gs: call); i++) |
1518 | if (!POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (call, i)))) |
1519 | ; |
1520 | else if (!fnspec.arg_specified_p (i) |
1521 | || fnspec.arg_maybe_read_p (i)) |
1522 | { |
1523 | modref_parm_map map = parm_map_for_ptr |
1524 | (op: gimple_call_arg (gs: call, index: i)); |
1525 | |
1526 | if (map.parm_index == MODREF_LOCAL_MEMORY_PARM) |
1527 | continue; |
1528 | if (map.parm_index == MODREF_UNKNOWN_PARM) |
1529 | { |
1530 | record_unknown_load (); |
1531 | break; |
1532 | } |
1533 | modref_access_node a = get_access_for_fnspec (call, fnspec, i, map); |
1534 | if (a.parm_index == MODREF_LOCAL_MEMORY_PARM) |
1535 | continue; |
1536 | if (m_summary) |
1537 | m_summary->loads->insert (fndecl: current_function_decl, base: 0, ref: 0, a, record_adjustments: false); |
1538 | if (m_summary_lto) |
1539 | m_summary_lto->loads->insert (fndecl: current_function_decl, base: 0, ref: 0, a, |
1540 | record_adjustments: false); |
1541 | } |
1542 | } |
1543 | if (ignore_stores_p (caller: current_function_decl, flags)) |
1544 | return; |
1545 | if (fnspec.global_memory_written_p ()) |
1546 | { |
1547 | if (may_access_nonescaping_parm_p (call, callee_ecf_flags: flags, load: false)) |
1548 | record_unknown_store (); |
1549 | else |
1550 | record_global_memory_store (); |
1551 | } |
1552 | else |
1553 | { |
1554 | for (unsigned int i = 0; i < gimple_call_num_args (gs: call); i++) |
1555 | if (!POINTER_TYPE_P (TREE_TYPE (gimple_call_arg (call, i)))) |
1556 | ; |
1557 | else if (!fnspec.arg_specified_p (i) |
1558 | || fnspec.arg_maybe_written_p (i)) |
1559 | { |
1560 | modref_parm_map map = parm_map_for_ptr |
1561 | (op: gimple_call_arg (gs: call, index: i)); |
1562 | |
1563 | if (map.parm_index == MODREF_LOCAL_MEMORY_PARM) |
1564 | continue; |
1565 | if (map.parm_index == MODREF_UNKNOWN_PARM) |
1566 | { |
1567 | record_unknown_store (); |
1568 | break; |
1569 | } |
1570 | modref_access_node a = get_access_for_fnspec (call, fnspec, i, map); |
1571 | if (a.parm_index == MODREF_LOCAL_MEMORY_PARM) |
1572 | continue; |
1573 | if (m_summary) |
1574 | m_summary->stores->insert (fndecl: current_function_decl, base: 0, ref: 0, a, record_adjustments: false); |
1575 | if (m_summary_lto) |
1576 | m_summary_lto->stores->insert (fndecl: current_function_decl, |
1577 | base: 0, ref: 0, a, record_adjustments: false); |
1578 | } |
1579 | if (fnspec.errno_maybe_written_p () && flag_errno_math) |
1580 | { |
1581 | if (m_summary) |
1582 | m_summary->writes_errno = true; |
1583 | if (m_summary_lto) |
1584 | m_summary_lto->writes_errno = true; |
1585 | } |
1586 | } |
1587 | } |
1588 | |
1589 | /* Analyze function call STMT in function F. |
1590 | Remember recursive calls in RECURSIVE_CALLS. */ |
1591 | |
1592 | void |
1593 | modref_access_analysis::analyze_call (gcall *stmt) |
1594 | { |
1595 | /* Check flags on the function call. In certain cases, analysis can be |
1596 | simplified. */ |
1597 | int flags = gimple_call_flags (stmt); |
1598 | |
1599 | if (dump_file) |
1600 | { |
1601 | fprintf (stream: dump_file, format: " - Analyzing call:" ); |
1602 | print_gimple_stmt (dump_file, stmt, 0); |
1603 | } |
1604 | |
1605 | if ((flags & (ECF_CONST | ECF_NOVOPS)) |
1606 | && !(flags & ECF_LOOPING_CONST_OR_PURE)) |
1607 | { |
1608 | if (dump_file) |
1609 | fprintf (stream: dump_file, |
1610 | format: " - ECF_CONST | ECF_NOVOPS, ignoring all stores and all loads " |
1611 | "except for args.\n" ); |
1612 | return; |
1613 | } |
1614 | |
1615 | /* Next, we try to get the callee's function declaration. The goal is to |
1616 | merge their summary with ours. */ |
1617 | tree callee = gimple_call_fndecl (gs: stmt); |
1618 | |
1619 | /* Check if this is an indirect call. */ |
1620 | if (!callee) |
1621 | { |
1622 | if (dump_file) |
1623 | fprintf (stream: dump_file, format: gimple_call_internal_p (gs: stmt) |
1624 | ? " - Internal call" : " - Indirect call.\n" ); |
1625 | process_fnspec (call: stmt); |
1626 | return; |
1627 | } |
1628 | /* We only need to handle internal calls in IPA mode. */ |
1629 | gcc_checking_assert (!m_summary_lto && !m_ipa); |
1630 | |
1631 | struct cgraph_node *callee_node = cgraph_node::get_create (callee); |
1632 | |
1633 | /* If this is a recursive call, the target summary is the same as ours, so |
1634 | there's nothing to do. */ |
1635 | if (recursive_call_p (current_function_decl, callee)) |
1636 | { |
1637 | m_recursive_calls.safe_push (obj: stmt); |
1638 | set_side_effects (); |
1639 | if (dump_file) |
1640 | fprintf (stream: dump_file, format: " - Skipping recursive call.\n" ); |
1641 | return; |
1642 | } |
1643 | |
1644 | gcc_assert (callee_node != NULL); |
1645 | |
1646 | /* Get the function symbol and its availability. */ |
1647 | enum availability avail; |
1648 | callee_node = callee_node->function_symbol (avail: &avail); |
1649 | bool looping; |
1650 | if (builtin_safe_for_const_function_p (&looping, callee)) |
1651 | { |
1652 | if (looping) |
1653 | set_side_effects (); |
1654 | if (dump_file) |
1655 | fprintf (stream: dump_file, format: " - Builtin is safe for const.\n" ); |
1656 | return; |
1657 | } |
1658 | if (avail <= AVAIL_INTERPOSABLE) |
1659 | { |
1660 | if (dump_file) |
1661 | fprintf (stream: dump_file, |
1662 | format: " - Function availability <= AVAIL_INTERPOSABLE.\n" ); |
1663 | process_fnspec (call: stmt); |
1664 | return; |
1665 | } |
1666 | |
1667 | /* Get callee's modref summary. As above, if there's no summary, we either |
1668 | have to give up or, if stores are ignored, we can just purge loads. */ |
1669 | modref_summary *callee_summary = optimization_summaries->get (node: callee_node); |
1670 | if (!callee_summary) |
1671 | { |
1672 | if (dump_file) |
1673 | fprintf (stream: dump_file, format: " - No modref summary available for callee.\n" ); |
1674 | process_fnspec (call: stmt); |
1675 | return; |
1676 | } |
1677 | |
1678 | merge_call_side_effects (stmt, callee_summary, callee_node, record_adjustments: false); |
1679 | |
1680 | return; |
1681 | } |
1682 | |
1683 | /* Helper for analyze_stmt. */ |
1684 | |
1685 | bool |
1686 | modref_access_analysis::analyze_load (gimple *, tree, tree op, void *data) |
1687 | { |
1688 | modref_access_analysis *t = (modref_access_analysis *)data; |
1689 | |
1690 | if (dump_file) |
1691 | { |
1692 | fprintf (stream: dump_file, format: " - Analyzing load: " ); |
1693 | print_generic_expr (dump_file, op); |
1694 | fprintf (stream: dump_file, format: "\n" ); |
1695 | } |
1696 | |
1697 | if (!t->record_access_p (expr: op)) |
1698 | return false; |
1699 | |
1700 | ao_ref r; |
1701 | ao_ref_init (&r, op); |
1702 | modref_access_node a = get_access (ref: &r); |
1703 | if (a.parm_index == MODREF_LOCAL_MEMORY_PARM) |
1704 | return false; |
1705 | |
1706 | if (t->m_summary) |
1707 | t->record_access (tt: t->m_summary->loads, ref: &r, a); |
1708 | if (t->m_summary_lto) |
1709 | t->record_access_lto (tt: t->m_summary_lto->loads, ref: &r, a); |
1710 | return false; |
1711 | } |
1712 | |
1713 | /* Helper for analyze_stmt. */ |
1714 | |
1715 | bool |
1716 | modref_access_analysis::analyze_store (gimple *stmt, tree, tree op, void *data) |
1717 | { |
1718 | modref_access_analysis *t = (modref_access_analysis *)data; |
1719 | |
1720 | if (dump_file) |
1721 | { |
1722 | fprintf (stream: dump_file, format: " - Analyzing store: " ); |
1723 | print_generic_expr (dump_file, op); |
1724 | fprintf (stream: dump_file, format: "\n" ); |
1725 | } |
1726 | |
1727 | if (!t->record_access_p (expr: op)) |
1728 | return false; |
1729 | |
1730 | ao_ref r; |
1731 | ao_ref_init (&r, op); |
1732 | modref_access_node a = get_access (ref: &r); |
1733 | if (a.parm_index == MODREF_LOCAL_MEMORY_PARM) |
1734 | return false; |
1735 | |
1736 | if (t->m_summary) |
1737 | t->record_access (tt: t->m_summary->stores, ref: &r, a); |
1738 | if (t->m_summary_lto) |
1739 | t->record_access_lto (tt: t->m_summary_lto->stores, ref: &r, a); |
1740 | if (t->m_always_executed |
1741 | && a.useful_for_kill_p () |
1742 | && (!cfun->can_throw_non_call_exceptions |
1743 | || !stmt_could_throw_p (cfun, stmt))) |
1744 | { |
1745 | if (dump_file) |
1746 | fprintf (stream: dump_file, format: " - Recording kill\n" ); |
1747 | if (t->m_summary) |
1748 | modref_access_node::insert_kill (kills&: t->m_summary->kills, a, record_adjustments: false); |
1749 | if (t->m_summary_lto) |
1750 | modref_access_node::insert_kill (kills&: t->m_summary_lto->kills, a, record_adjustments: false); |
1751 | } |
1752 | return false; |
1753 | } |
1754 | |
1755 | /* Analyze statement STMT of function F. |
1756 | If IPA is true do not merge in side effects of calls. */ |
1757 | |
1758 | void |
1759 | modref_access_analysis::analyze_stmt (gimple *stmt, bool always_executed) |
1760 | { |
1761 | m_always_executed = always_executed; |
1762 | /* In general we can not ignore clobbers because they are barriers for code |
1763 | motion, however after inlining it is safe to do because local optimization |
1764 | passes do not consider clobbers from other functions. |
1765 | Similar logic is in ipa-pure-const.cc. */ |
1766 | if ((m_ipa || cfun->after_inlining) && gimple_clobber_p (s: stmt)) |
1767 | { |
1768 | if (always_executed && record_access_p (expr: gimple_assign_lhs (gs: stmt))) |
1769 | { |
1770 | ao_ref r; |
1771 | ao_ref_init (&r, gimple_assign_lhs (gs: stmt)); |
1772 | modref_access_node a = get_access (ref: &r); |
1773 | if (a.useful_for_kill_p ()) |
1774 | { |
1775 | if (dump_file) |
1776 | fprintf (stream: dump_file, format: " - Recording kill\n" ); |
1777 | if (m_summary) |
1778 | modref_access_node::insert_kill (kills&: m_summary->kills, a, record_adjustments: false); |
1779 | if (m_summary_lto) |
1780 | modref_access_node::insert_kill (kills&: m_summary_lto->kills, |
1781 | a, record_adjustments: false); |
1782 | } |
1783 | } |
1784 | return; |
1785 | } |
1786 | |
1787 | /* Analyze all loads and stores in STMT. */ |
1788 | walk_stmt_load_store_ops (stmt, this, |
1789 | analyze_load, analyze_store); |
1790 | |
1791 | switch (gimple_code (g: stmt)) |
1792 | { |
1793 | case GIMPLE_ASM: |
1794 | if (gimple_asm_volatile_p (asm_stmt: as_a <gasm *> (p: stmt))) |
1795 | set_nondeterministic (); |
1796 | if (cfun->can_throw_non_call_exceptions |
1797 | && stmt_could_throw_p (cfun, stmt)) |
1798 | set_side_effects (); |
1799 | /* If the ASM statement does not read nor write memory, there's nothing |
1800 | to do. Otherwise just give up. */ |
1801 | if (!gimple_asm_clobbers_memory_p (as_a <gasm *> (p: stmt))) |
1802 | return; |
1803 | if (dump_file) |
1804 | fprintf (stream: dump_file, format: " - Function contains GIMPLE_ASM statement " |
1805 | "which clobbers memory.\n" ); |
1806 | record_unknown_load (); |
1807 | record_unknown_store (); |
1808 | return; |
1809 | case GIMPLE_CALL: |
1810 | if (!m_ipa || gimple_call_internal_p (gs: stmt)) |
1811 | analyze_call (stmt: as_a <gcall *> (p: stmt)); |
1812 | else |
1813 | { |
1814 | attr_fnspec fnspec = gimple_call_fnspec (stmt: as_a <gcall *>(p: stmt)); |
1815 | |
1816 | if (fnspec.known_p () |
1817 | && (!fnspec.global_memory_read_p () |
1818 | || !fnspec.global_memory_written_p ())) |
1819 | { |
1820 | cgraph_edge *e = cgraph_node::get |
1821 | (decl: current_function_decl)->get_edge (call_stmt: stmt); |
1822 | if (e->callee) |
1823 | { |
1824 | fnspec_summaries->get_create (edge: e)->fnspec |
1825 | = xstrdup (fnspec.get_str ()); |
1826 | if (dump_file) |
1827 | fprintf (stream: dump_file, format: " Recorded fnspec %s\n" , |
1828 | fnspec.get_str ()); |
1829 | } |
1830 | } |
1831 | } |
1832 | return; |
1833 | default: |
1834 | if (cfun->can_throw_non_call_exceptions |
1835 | && stmt_could_throw_p (cfun, stmt)) |
1836 | set_side_effects (); |
1837 | return; |
1838 | } |
1839 | } |
1840 | |
1841 | /* Propagate load/stores across recursive calls. */ |
1842 | |
1843 | void |
1844 | modref_access_analysis::propagate () |
1845 | { |
1846 | if (m_ipa && m_summary) |
1847 | return; |
1848 | |
1849 | bool changed = true; |
1850 | bool first = true; |
1851 | cgraph_node *fnode = cgraph_node::get (decl: current_function_decl); |
1852 | |
1853 | m_always_executed = false; |
1854 | while (changed && m_summary->useful_p (ecf_flags: m_ecf_flags, check_flags: false)) |
1855 | { |
1856 | changed = false; |
1857 | for (unsigned i = 0; i < m_recursive_calls.length (); i++) |
1858 | { |
1859 | changed |= merge_call_side_effects (stmt: m_recursive_calls[i], callee_summary: m_summary, |
1860 | callee_node: fnode, record_adjustments: !first); |
1861 | } |
1862 | first = false; |
1863 | } |
1864 | } |
1865 | |
1866 | /* Analyze function. */ |
1867 | |
1868 | void |
1869 | modref_access_analysis::analyze () |
1870 | { |
1871 | m_ecf_flags = flags_from_decl_or_type (current_function_decl); |
1872 | bool summary_useful = true; |
1873 | |
1874 | /* Analyze each statement in each basic block of the function. If the |
1875 | statement cannot be analyzed (for any reason), the entire function cannot |
1876 | be analyzed by modref. */ |
1877 | basic_block bb; |
1878 | bitmap always_executed_bbs = find_always_executed_bbs (cfun, assume_return_or_eh: true); |
1879 | FOR_EACH_BB_FN (bb, cfun) |
1880 | { |
1881 | gimple_stmt_iterator si; |
1882 | bool always_executed = bitmap_bit_p (always_executed_bbs, bb->index); |
1883 | |
1884 | for (si = gsi_start_nondebug_after_labels_bb (bb); |
1885 | !gsi_end_p (i: si); gsi_next_nondebug (i: &si)) |
1886 | { |
1887 | /* NULL memory accesses terminates BB. These accesses are known |
1888 | to trip undefined behavior. gimple-ssa-isolate-paths turns them |
1889 | to volatile accesses and adds builtin_trap call which would |
1890 | confuse us otherwise. */ |
1891 | if (infer_nonnull_range_by_dereference (gsi_stmt (i: si), |
1892 | null_pointer_node)) |
1893 | { |
1894 | if (dump_file) |
1895 | fprintf (stream: dump_file, format: " - NULL memory access; terminating BB\n" ); |
1896 | if (flag_non_call_exceptions) |
1897 | set_side_effects (); |
1898 | break; |
1899 | } |
1900 | analyze_stmt (stmt: gsi_stmt (i: si), always_executed); |
1901 | |
1902 | /* Avoid doing useless work. */ |
1903 | if ((!m_summary || !m_summary->useful_p (ecf_flags: m_ecf_flags, check_flags: false)) |
1904 | && (!m_summary_lto |
1905 | || !m_summary_lto->useful_p (ecf_flags: m_ecf_flags, check_flags: false))) |
1906 | { |
1907 | summary_useful = false; |
1908 | break; |
1909 | } |
1910 | if (always_executed |
1911 | && stmt_can_throw_external (cfun, gsi_stmt (i: si))) |
1912 | always_executed = false; |
1913 | } |
1914 | if (!summary_useful) |
1915 | break; |
1916 | } |
1917 | /* In non-IPA mode we need to perform iterative dataflow on recursive calls. |
1918 | This needs to be done after all other side effects are computed. */ |
1919 | if (summary_useful) |
1920 | { |
1921 | if (!m_ipa) |
1922 | propagate (); |
1923 | if (m_summary && !m_summary->side_effects && !finite_function_p ()) |
1924 | m_summary->side_effects = true; |
1925 | if (m_summary_lto && !m_summary_lto->side_effects |
1926 | && !finite_function_p ()) |
1927 | m_summary_lto->side_effects = true; |
1928 | } |
1929 | BITMAP_FREE (always_executed_bbs); |
1930 | } |
1931 | |
1932 | /* Return true if OP accesses memory pointed to by SSA_NAME. */ |
1933 | |
1934 | bool |
1935 | memory_access_to (tree op, tree ssa_name) |
1936 | { |
1937 | tree base = get_base_address (t: op); |
1938 | if (!base) |
1939 | return false; |
1940 | if (TREE_CODE (base) != MEM_REF && TREE_CODE (base) != TARGET_MEM_REF) |
1941 | return false; |
1942 | return TREE_OPERAND (base, 0) == ssa_name; |
1943 | } |
1944 | |
1945 | /* Consider statement val = *arg. |
1946 | return EAF flags of ARG that can be determined from EAF flags of VAL |
1947 | (which are known to be FLAGS). If IGNORE_STORES is true we can ignore |
1948 | all stores to VAL, i.e. when handling noreturn function. */ |
1949 | |
1950 | static int |
1951 | deref_flags (int flags, bool ignore_stores) |
1952 | { |
1953 | /* Dereference is also a direct read but dereferenced value does not |
1954 | yield any other direct use. */ |
1955 | int ret = EAF_NO_DIRECT_CLOBBER | EAF_NO_DIRECT_ESCAPE |
1956 | | EAF_NOT_RETURNED_DIRECTLY; |
1957 | /* If argument is unused just account for |
1958 | the read involved in dereference. */ |
1959 | if (flags & EAF_UNUSED) |
1960 | ret |= EAF_NO_INDIRECT_READ | EAF_NO_INDIRECT_CLOBBER |
1961 | | EAF_NO_INDIRECT_ESCAPE; |
1962 | else |
1963 | { |
1964 | /* Direct or indirect accesses leads to indirect accesses. */ |
1965 | if (((flags & EAF_NO_DIRECT_CLOBBER) |
1966 | && (flags & EAF_NO_INDIRECT_CLOBBER)) |
1967 | || ignore_stores) |
1968 | ret |= EAF_NO_INDIRECT_CLOBBER; |
1969 | if (((flags & EAF_NO_DIRECT_ESCAPE) |
1970 | && (flags & EAF_NO_INDIRECT_ESCAPE)) |
1971 | || ignore_stores) |
1972 | ret |= EAF_NO_INDIRECT_ESCAPE; |
1973 | if ((flags & EAF_NO_DIRECT_READ) |
1974 | && (flags & EAF_NO_INDIRECT_READ)) |
1975 | ret |= EAF_NO_INDIRECT_READ; |
1976 | if ((flags & EAF_NOT_RETURNED_DIRECTLY) |
1977 | && (flags & EAF_NOT_RETURNED_INDIRECTLY)) |
1978 | ret |= EAF_NOT_RETURNED_INDIRECTLY; |
1979 | } |
1980 | return ret; |
1981 | } |
1982 | |
1983 | |
1984 | /* Description of an escape point: a call which affects flags of a given |
1985 | SSA name. */ |
1986 | |
1987 | struct escape_point |
1988 | { |
1989 | /* Value escapes to this call. */ |
1990 | gcall *call; |
1991 | /* Argument it escapes to. */ |
1992 | int arg; |
1993 | /* Flags already known about the argument (this can save us from recording |
1994 | escape points if local analysis did good job already). */ |
1995 | eaf_flags_t min_flags; |
1996 | /* Does value escape directly or indirectly? */ |
1997 | bool direct; |
1998 | }; |
1999 | |
2000 | /* Lattice used during the eaf flags analysis dataflow. For a given SSA name |
2001 | we aim to compute its flags and escape points. We also use the lattice |
2002 | to dynamically build dataflow graph to propagate on. */ |
2003 | |
2004 | class modref_lattice |
2005 | { |
2006 | public: |
2007 | /* EAF flags of the SSA name. */ |
2008 | eaf_flags_t flags; |
2009 | /* Used during DFS walk to mark names where final value was determined |
2010 | without need for dataflow. */ |
2011 | bool known; |
2012 | /* Used during DFS walk to mark open vertices (for cycle detection). */ |
2013 | bool open; |
2014 | /* Set during DFS walk for names that needs dataflow propagation. */ |
2015 | bool do_dataflow; |
2016 | /* Used during the iterative dataflow. */ |
2017 | bool changed; |
2018 | |
2019 | /* When doing IPA analysis we can not merge in callee escape points; |
2020 | Only remember them and do the merging at IPA propagation time. */ |
2021 | vec <escape_point, va_heap, vl_ptr> escape_points; |
2022 | |
2023 | /* Representation of a graph for dataflow. This graph is built on-demand |
2024 | using modref_eaf_analysis::analyze_ssa and later solved by |
2025 | modref_eaf_analysis::propagate. |
2026 | Each edge represents the fact that flags of current lattice should be |
2027 | propagated to lattice of SSA_NAME. */ |
2028 | struct propagate_edge |
2029 | { |
2030 | int ssa_name; |
2031 | bool deref; |
2032 | }; |
2033 | vec <propagate_edge, va_heap, vl_ptr> propagate_to; |
2034 | |
2035 | void init (); |
2036 | void release (); |
2037 | bool merge (const modref_lattice &with); |
2038 | bool merge (int flags); |
2039 | bool merge_deref (const modref_lattice &with, bool ignore_stores); |
2040 | bool merge_direct_load (); |
2041 | bool merge_direct_store (); |
2042 | bool add_escape_point (gcall *call, int arg, int min_flags, bool diret); |
2043 | void dump (FILE *out, int indent = 0) const; |
2044 | }; |
2045 | |
2046 | /* Lattices are saved to vectors, so keep them PODs. */ |
2047 | void |
2048 | modref_lattice::init () |
2049 | { |
2050 | /* All flags we track. */ |
2051 | int f = EAF_NO_DIRECT_CLOBBER | EAF_NO_INDIRECT_CLOBBER |
2052 | | EAF_NO_DIRECT_ESCAPE | EAF_NO_INDIRECT_ESCAPE |
2053 | | EAF_NO_DIRECT_READ | EAF_NO_INDIRECT_READ |
2054 | | EAF_NOT_RETURNED_DIRECTLY | EAF_NOT_RETURNED_INDIRECTLY |
2055 | | EAF_UNUSED; |
2056 | flags = f; |
2057 | /* Check that eaf_flags_t is wide enough to hold all flags. */ |
2058 | gcc_checking_assert (f == flags); |
2059 | open = true; |
2060 | known = false; |
2061 | } |
2062 | |
2063 | /* Release memory. */ |
2064 | void |
2065 | modref_lattice::release () |
2066 | { |
2067 | escape_points.release (); |
2068 | propagate_to.release (); |
2069 | } |
2070 | |
2071 | /* Dump lattice to OUT; indent with INDENT spaces. */ |
2072 | |
2073 | void |
2074 | modref_lattice::dump (FILE *out, int indent) const |
2075 | { |
2076 | dump_eaf_flags (out, flags); |
2077 | if (escape_points.length ()) |
2078 | { |
2079 | fprintf (stream: out, format: "%*sEscapes:\n" , indent, "" ); |
2080 | for (unsigned int i = 0; i < escape_points.length (); i++) |
2081 | { |
2082 | fprintf (stream: out, format: "%*s Arg %i (%s) min flags" , indent, "" , |
2083 | escape_points[i].arg, |
2084 | escape_points[i].direct ? "direct" : "indirect" ); |
2085 | dump_eaf_flags (out, flags: escape_points[i].min_flags, newline: false); |
2086 | fprintf (stream: out, format: " in call " ); |
2087 | print_gimple_stmt (out, escape_points[i].call, 0); |
2088 | } |
2089 | } |
2090 | } |
2091 | |
2092 | /* Add escape point CALL, ARG, MIN_FLAGS, DIRECT. Return false if such escape |
2093 | point exists. */ |
2094 | |
2095 | bool |
2096 | modref_lattice::add_escape_point (gcall *call, int arg, int min_flags, |
2097 | bool direct) |
2098 | { |
2099 | escape_point *ep; |
2100 | unsigned int i; |
2101 | |
2102 | /* If we already determined flags to be bad enough, |
2103 | we do not need to record. */ |
2104 | if ((flags & min_flags) == flags || (min_flags & EAF_UNUSED)) |
2105 | return false; |
2106 | |
2107 | FOR_EACH_VEC_ELT (escape_points, i, ep) |
2108 | if (ep->call == call && ep->arg == arg && ep->direct == direct) |
2109 | { |
2110 | if ((ep->min_flags & min_flags) == min_flags) |
2111 | return false; |
2112 | ep->min_flags &= min_flags; |
2113 | return true; |
2114 | } |
2115 | /* Give up if max escape points is met. */ |
2116 | if ((int)escape_points.length () > param_modref_max_escape_points) |
2117 | { |
2118 | if (dump_file) |
2119 | fprintf (stream: dump_file, format: "--param modref-max-escape-points limit reached\n" ); |
2120 | merge (flags: 0); |
2121 | return true; |
2122 | } |
2123 | escape_point new_ep = {.call: call, .arg: arg, .min_flags: min_flags, .direct: direct}; |
2124 | escape_points.safe_push (obj: new_ep); |
2125 | return true; |
2126 | } |
2127 | |
2128 | /* Merge in flags from F. */ |
2129 | bool |
2130 | modref_lattice::merge (int f) |
2131 | { |
2132 | if (f & EAF_UNUSED) |
2133 | return false; |
2134 | /* Check that flags seems sane: if function does not read the parameter |
2135 | it can not access it indirectly. */ |
2136 | gcc_checking_assert (!(f & EAF_NO_DIRECT_READ) |
2137 | || ((f & EAF_NO_INDIRECT_READ) |
2138 | && (f & EAF_NO_INDIRECT_CLOBBER) |
2139 | && (f & EAF_NO_INDIRECT_ESCAPE) |
2140 | && (f & EAF_NOT_RETURNED_INDIRECTLY))); |
2141 | if ((flags & f) != flags) |
2142 | { |
2143 | flags &= f; |
2144 | /* Prune obviously useless flags; |
2145 | We do not have ECF_FLAGS handy which is not big problem since |
2146 | we will do final flags cleanup before producing summary. |
2147 | Merging should be fast so it can work well with dataflow. */ |
2148 | flags = remove_useless_eaf_flags (eaf_flags: flags, ecf_flags: 0, returns_void: false); |
2149 | if (!flags) |
2150 | escape_points.release (); |
2151 | return true; |
2152 | } |
2153 | return false; |
2154 | } |
2155 | |
2156 | /* Merge in WITH. Return true if anything changed. */ |
2157 | |
2158 | bool |
2159 | modref_lattice::merge (const modref_lattice &with) |
2160 | { |
2161 | if (!with.known) |
2162 | do_dataflow = true; |
2163 | |
2164 | bool changed = merge (f: with.flags); |
2165 | |
2166 | if (!flags) |
2167 | return changed; |
2168 | for (unsigned int i = 0; i < with.escape_points.length (); i++) |
2169 | changed |= add_escape_point (call: with.escape_points[i].call, |
2170 | arg: with.escape_points[i].arg, |
2171 | min_flags: with.escape_points[i].min_flags, |
2172 | direct: with.escape_points[i].direct); |
2173 | return changed; |
2174 | } |
2175 | |
2176 | /* Merge in deref of WITH. If IGNORE_STORES is true do not consider |
2177 | stores. Return true if anything changed. */ |
2178 | |
2179 | bool |
2180 | modref_lattice::merge_deref (const modref_lattice &with, bool ignore_stores) |
2181 | { |
2182 | if (!with.known) |
2183 | do_dataflow = true; |
2184 | |
2185 | bool changed = merge (f: deref_flags (flags: with.flags, ignore_stores)); |
2186 | |
2187 | if (!flags) |
2188 | return changed; |
2189 | for (unsigned int i = 0; i < with.escape_points.length (); i++) |
2190 | { |
2191 | int min_flags = with.escape_points[i].min_flags; |
2192 | |
2193 | if (with.escape_points[i].direct) |
2194 | min_flags = deref_flags (flags: min_flags, ignore_stores); |
2195 | else if (ignore_stores) |
2196 | min_flags |= ignore_stores_eaf_flags; |
2197 | changed |= add_escape_point (call: with.escape_points[i].call, |
2198 | arg: with.escape_points[i].arg, |
2199 | min_flags, |
2200 | direct: false); |
2201 | } |
2202 | return changed; |
2203 | } |
2204 | |
2205 | /* Merge in flags for direct load. */ |
2206 | |
2207 | bool |
2208 | modref_lattice::merge_direct_load () |
2209 | { |
2210 | return merge (f: ~(EAF_UNUSED | EAF_NO_DIRECT_READ)); |
2211 | } |
2212 | |
2213 | /* Merge in flags for direct store. */ |
2214 | |
2215 | bool |
2216 | modref_lattice::merge_direct_store () |
2217 | { |
2218 | return merge (f: ~(EAF_UNUSED | EAF_NO_DIRECT_CLOBBER)); |
2219 | } |
2220 | |
2221 | /* Analyzer of EAF flags. |
2222 | This is generally dataflow problem over the SSA graph, however we only |
2223 | care about flags of few selected ssa names (arguments, return slot and |
2224 | static chain). So we first call analyze_ssa_name on all relevant names |
2225 | and perform a DFS walk to discover SSA names where flags needs to be |
2226 | determined. For acyclic graphs we try to determine final flags during |
2227 | this walk. Once cycles or recursion depth is met we enlist SSA names |
2228 | for dataflow which is done by propagate call. |
2229 | |
2230 | After propagation the flags can be obtained using get_ssa_name_flags. */ |
2231 | |
2232 | class modref_eaf_analysis |
2233 | { |
2234 | public: |
2235 | /* Mark NAME as relevant for analysis. */ |
2236 | void analyze_ssa_name (tree name, bool deferred = false); |
2237 | /* Dataflow solver. */ |
2238 | void propagate (); |
2239 | /* Return flags computed earlier for NAME. */ |
2240 | int get_ssa_name_flags (tree name) |
2241 | { |
2242 | int version = SSA_NAME_VERSION (name); |
2243 | gcc_checking_assert (m_lattice[version].known); |
2244 | return m_lattice[version].flags; |
2245 | } |
2246 | /* In IPA mode this will record all escape points |
2247 | determined for NAME to PARM_IDNEX. Flags are minimal |
2248 | flags known. */ |
2249 | void record_escape_points (tree name, int parm_index, int flags); |
2250 | modref_eaf_analysis (bool ipa) |
2251 | { |
2252 | m_ipa = ipa; |
2253 | m_depth = 0; |
2254 | m_lattice.safe_grow_cleared (num_ssa_names, exact: true); |
2255 | } |
2256 | ~modref_eaf_analysis () |
2257 | { |
2258 | gcc_checking_assert (!m_depth); |
2259 | if (m_ipa || m_names_to_propagate.length ()) |
2260 | for (unsigned int i = 0; i < num_ssa_names; i++) |
2261 | m_lattice[i].release (); |
2262 | } |
2263 | private: |
2264 | /* If true, we produce analysis for IPA mode. In this case escape points are |
2265 | collected. */ |
2266 | bool m_ipa; |
2267 | /* Depth of recursion of analyze_ssa_name. */ |
2268 | int m_depth; |
2269 | /* Propagation lattice for individual ssa names. */ |
2270 | auto_vec<modref_lattice> m_lattice; |
2271 | auto_vec<tree> m_deferred_names; |
2272 | auto_vec<int> m_names_to_propagate; |
2273 | |
2274 | void merge_with_ssa_name (tree dest, tree src, bool deref); |
2275 | void merge_call_lhs_flags (gcall *call, int arg, tree name, bool direct, |
2276 | bool deref); |
2277 | }; |
2278 | |
2279 | |
2280 | /* Call statements may return their parameters. Consider argument number |
2281 | ARG of USE_STMT and determine flags that can needs to be cleared |
2282 | in case pointer possibly indirectly references from ARG I is returned. |
2283 | If DIRECT is true consider direct returns and if INDIRECT consider |
2284 | indirect returns. |
2285 | LATTICE, DEPTH and ipa are same as in analyze_ssa_name. |
2286 | ARG is set to -1 for static chain. */ |
2287 | |
2288 | void |
2289 | modref_eaf_analysis::merge_call_lhs_flags (gcall *call, int arg, |
2290 | tree name, bool direct, |
2291 | bool indirect) |
2292 | { |
2293 | int index = SSA_NAME_VERSION (name); |
2294 | bool returned_directly = false; |
2295 | |
2296 | /* If there is no return value, no flags are affected. */ |
2297 | if (!gimple_call_lhs (gs: call)) |
2298 | return; |
2299 | |
2300 | /* If we know that function returns given argument and it is not ARG |
2301 | we can still be happy. */ |
2302 | if (arg >= 0) |
2303 | { |
2304 | int flags = gimple_call_return_flags (call); |
2305 | if (flags & ERF_RETURNS_ARG) |
2306 | { |
2307 | if ((flags & ERF_RETURN_ARG_MASK) == arg) |
2308 | returned_directly = true; |
2309 | else |
2310 | return; |
2311 | } |
2312 | } |
2313 | /* Make ERF_RETURNS_ARG overwrite EAF_UNUSED. */ |
2314 | if (returned_directly) |
2315 | { |
2316 | direct = true; |
2317 | indirect = false; |
2318 | } |
2319 | /* If value is not returned at all, do nothing. */ |
2320 | else if (!direct && !indirect) |
2321 | return; |
2322 | |
2323 | /* If return value is SSA name determine its flags. */ |
2324 | if (TREE_CODE (gimple_call_lhs (call)) == SSA_NAME) |
2325 | { |
2326 | tree lhs = gimple_call_lhs (gs: call); |
2327 | if (direct) |
2328 | merge_with_ssa_name (dest: name, src: lhs, deref: false); |
2329 | if (indirect) |
2330 | merge_with_ssa_name (dest: name, src: lhs, deref: true); |
2331 | } |
2332 | /* In the case of memory store we can do nothing. */ |
2333 | else if (!direct) |
2334 | m_lattice[index].merge (f: deref_flags (flags: 0, ignore_stores: false)); |
2335 | else |
2336 | m_lattice[index].merge (f: 0); |
2337 | } |
2338 | |
2339 | /* CALL_FLAGS are EAF_FLAGS of the argument. Turn them |
2340 | into flags for caller, update LATTICE of corresponding |
2341 | argument if needed. */ |
2342 | |
2343 | static int |
2344 | callee_to_caller_flags (int call_flags, bool ignore_stores, |
2345 | modref_lattice &lattice) |
2346 | { |
2347 | /* call_flags is about callee returning a value |
2348 | that is not the same as caller returning it. */ |
2349 | call_flags |= EAF_NOT_RETURNED_DIRECTLY |
2350 | | EAF_NOT_RETURNED_INDIRECTLY; |
2351 | if (!ignore_stores && !(call_flags & EAF_UNUSED)) |
2352 | { |
2353 | /* If value escapes we are no longer able to track what happens |
2354 | with it because we can read it from the escaped location |
2355 | anytime. */ |
2356 | if (!(call_flags & EAF_NO_DIRECT_ESCAPE)) |
2357 | lattice.merge (f: 0); |
2358 | else if (!(call_flags & EAF_NO_INDIRECT_ESCAPE)) |
2359 | lattice.merge (f: ~(EAF_NOT_RETURNED_INDIRECTLY |
2360 | | EAF_NO_DIRECT_READ |
2361 | | EAF_NO_INDIRECT_READ |
2362 | | EAF_NO_INDIRECT_CLOBBER |
2363 | | EAF_UNUSED)); |
2364 | } |
2365 | else |
2366 | call_flags |= ignore_stores_eaf_flags; |
2367 | return call_flags; |
2368 | } |
2369 | |
2370 | /* Analyze EAF flags for SSA name NAME and store result to LATTICE. |
2371 | LATTICE is an array of modref_lattices. |
2372 | DEPTH is a recursion depth used to make debug output prettier. |
2373 | If IPA is true we analyze for IPA propagation (and thus call escape points |
2374 | are processed later) */ |
2375 | |
2376 | void |
2377 | modref_eaf_analysis::analyze_ssa_name (tree name, bool deferred) |
2378 | { |
2379 | imm_use_iterator ui; |
2380 | gimple *use_stmt; |
2381 | int index = SSA_NAME_VERSION (name); |
2382 | |
2383 | if (!deferred) |
2384 | { |
2385 | /* See if value is already computed. */ |
2386 | if (m_lattice[index].known || m_lattice[index].do_dataflow) |
2387 | return; |
2388 | if (m_lattice[index].open) |
2389 | { |
2390 | if (dump_file) |
2391 | fprintf (stream: dump_file, |
2392 | format: "%*sCycle in SSA graph\n" , |
2393 | m_depth * 4, "" ); |
2394 | return; |
2395 | } |
2396 | /* Recursion guard. */ |
2397 | m_lattice[index].init (); |
2398 | if (m_depth == param_modref_max_depth) |
2399 | { |
2400 | if (dump_file) |
2401 | fprintf (stream: dump_file, |
2402 | format: "%*sMax recursion depth reached; postponing\n" , |
2403 | m_depth * 4, "" ); |
2404 | m_deferred_names.safe_push (obj: name); |
2405 | return; |
2406 | } |
2407 | } |
2408 | |
2409 | if (dump_file) |
2410 | { |
2411 | fprintf (stream: dump_file, |
2412 | format: "%*sAnalyzing flags of ssa name: " , m_depth * 4, "" ); |
2413 | print_generic_expr (dump_file, name); |
2414 | fprintf (stream: dump_file, format: "\n" ); |
2415 | } |
2416 | |
2417 | FOR_EACH_IMM_USE_STMT (use_stmt, ui, name) |
2418 | { |
2419 | if (m_lattice[index].flags == 0) |
2420 | break; |
2421 | if (is_gimple_debug (gs: use_stmt)) |
2422 | continue; |
2423 | if (dump_file) |
2424 | { |
2425 | fprintf (stream: dump_file, format: "%*s Analyzing stmt: " , m_depth * 4, "" ); |
2426 | print_gimple_stmt (dump_file, use_stmt, 0); |
2427 | } |
2428 | /* If we see a direct non-debug use, clear unused bit. |
2429 | All dereferences should be accounted below using deref_flags. */ |
2430 | m_lattice[index].merge (f: ~EAF_UNUSED); |
2431 | |
2432 | /* Gimple return may load the return value. |
2433 | Returning name counts as an use by tree-ssa-structalias.cc */ |
2434 | if (greturn *ret = dyn_cast <greturn *> (p: use_stmt)) |
2435 | { |
2436 | /* Returning through return slot is seen as memory write earlier. */ |
2437 | if (DECL_RESULT (current_function_decl) |
2438 | && DECL_BY_REFERENCE (DECL_RESULT (current_function_decl))) |
2439 | ; |
2440 | else if (gimple_return_retval (gs: ret) == name) |
2441 | m_lattice[index].merge (f: ~(EAF_UNUSED | EAF_NOT_RETURNED_DIRECTLY |
2442 | | EAF_NOT_RETURNED_DIRECTLY)); |
2443 | else if (memory_access_to (op: gimple_return_retval (gs: ret), ssa_name: name)) |
2444 | { |
2445 | m_lattice[index].merge_direct_load (); |
2446 | m_lattice[index].merge (f: ~(EAF_UNUSED |
2447 | | EAF_NOT_RETURNED_INDIRECTLY)); |
2448 | } |
2449 | } |
2450 | /* Account for LHS store, arg loads and flags from callee function. */ |
2451 | else if (gcall *call = dyn_cast <gcall *> (p: use_stmt)) |
2452 | { |
2453 | tree callee = gimple_call_fndecl (gs: call); |
2454 | |
2455 | /* IPA PTA internally it treats calling a function as "writing" to |
2456 | the argument space of all functions the function pointer points to |
2457 | (PR101949). We can not drop EAF_NOCLOBBER only when ipa-pta |
2458 | is on since that would allow propagation of this from -fno-ipa-pta |
2459 | to -fipa-pta functions. */ |
2460 | if (gimple_call_fn (gs: use_stmt) == name) |
2461 | m_lattice[index].merge (f: ~(EAF_NO_DIRECT_CLOBBER | EAF_UNUSED)); |
2462 | |
2463 | /* Recursion would require bit of propagation; give up for now. */ |
2464 | if (callee && !m_ipa && recursive_call_p (current_function_decl, |
2465 | callee)) |
2466 | m_lattice[index].merge (f: 0); |
2467 | else |
2468 | { |
2469 | int ecf_flags = gimple_call_flags (call); |
2470 | bool ignore_stores = ignore_stores_p (caller: current_function_decl, |
2471 | flags: ecf_flags); |
2472 | bool ignore_retval = ignore_retval_p (caller: current_function_decl, |
2473 | flags: ecf_flags); |
2474 | |
2475 | /* Handle *name = func (...). */ |
2476 | if (gimple_call_lhs (gs: call) |
2477 | && memory_access_to (op: gimple_call_lhs (gs: call), ssa_name: name)) |
2478 | { |
2479 | m_lattice[index].merge_direct_store (); |
2480 | /* Return slot optimization passes address of |
2481 | LHS to callee via hidden parameter and this |
2482 | may make LHS to escape. See PR 98499. */ |
2483 | if (gimple_call_return_slot_opt_p (s: call) |
2484 | && TREE_ADDRESSABLE (TREE_TYPE (gimple_call_lhs (call)))) |
2485 | { |
2486 | int call_flags = gimple_call_retslot_flags (call); |
2487 | bool isretslot = false; |
2488 | |
2489 | if (DECL_RESULT (current_function_decl) |
2490 | && DECL_BY_REFERENCE |
2491 | (DECL_RESULT (current_function_decl))) |
2492 | isretslot = ssa_default_def |
2493 | (cfun, |
2494 | DECL_RESULT (current_function_decl)) |
2495 | == name; |
2496 | |
2497 | /* Passing returnslot to return slot is special because |
2498 | not_returned and escape has same meaning. |
2499 | However passing arg to return slot is different. If |
2500 | the callee's return slot is returned it means that |
2501 | arg is written to itself which is an escape. |
2502 | Since we do not track the memory it is written to we |
2503 | need to give up on analyzing it. */ |
2504 | if (!isretslot) |
2505 | { |
2506 | if (!(call_flags & (EAF_NOT_RETURNED_DIRECTLY |
2507 | | EAF_UNUSED))) |
2508 | m_lattice[index].merge (f: 0); |
2509 | else gcc_checking_assert |
2510 | (call_flags & (EAF_NOT_RETURNED_INDIRECTLY |
2511 | | EAF_UNUSED)); |
2512 | call_flags = callee_to_caller_flags |
2513 | (call_flags, ignore_stores: false, |
2514 | lattice&: m_lattice[index]); |
2515 | } |
2516 | m_lattice[index].merge (f: call_flags); |
2517 | } |
2518 | } |
2519 | |
2520 | if (gimple_call_chain (gs: call) |
2521 | && (gimple_call_chain (gs: call) == name)) |
2522 | { |
2523 | int call_flags = gimple_call_static_chain_flags (call); |
2524 | if (!ignore_retval && !(call_flags & EAF_UNUSED)) |
2525 | merge_call_lhs_flags |
2526 | (call, arg: -1, name, |
2527 | direct: !(call_flags & EAF_NOT_RETURNED_DIRECTLY), |
2528 | indirect: !(call_flags & EAF_NOT_RETURNED_INDIRECTLY)); |
2529 | call_flags = callee_to_caller_flags |
2530 | (call_flags, ignore_stores, |
2531 | lattice&: m_lattice[index]); |
2532 | if (!(ecf_flags & (ECF_CONST | ECF_NOVOPS))) |
2533 | m_lattice[index].merge (f: call_flags); |
2534 | } |
2535 | |
2536 | /* Process internal functions and right away. */ |
2537 | bool record_ipa = m_ipa && !gimple_call_internal_p (gs: call); |
2538 | |
2539 | /* Handle all function parameters. */ |
2540 | for (unsigned i = 0; |
2541 | i < gimple_call_num_args (gs: call) |
2542 | && m_lattice[index].flags; i++) |
2543 | /* Name is directly passed to the callee. */ |
2544 | if (gimple_call_arg (gs: call, index: i) == name) |
2545 | { |
2546 | int call_flags = gimple_call_arg_flags (call, i); |
2547 | if (!ignore_retval) |
2548 | merge_call_lhs_flags |
2549 | (call, arg: i, name, |
2550 | direct: !(call_flags & (EAF_NOT_RETURNED_DIRECTLY |
2551 | | EAF_UNUSED)), |
2552 | indirect: !(call_flags & (EAF_NOT_RETURNED_INDIRECTLY |
2553 | | EAF_UNUSED))); |
2554 | if (!(ecf_flags & (ECF_CONST | ECF_NOVOPS))) |
2555 | { |
2556 | call_flags = callee_to_caller_flags |
2557 | (call_flags, ignore_stores, |
2558 | lattice&: m_lattice[index]); |
2559 | if (!record_ipa) |
2560 | m_lattice[index].merge (f: call_flags); |
2561 | else |
2562 | m_lattice[index].add_escape_point (call, arg: i, |
2563 | min_flags: call_flags, direct: true); |
2564 | } |
2565 | } |
2566 | /* Name is dereferenced and passed to a callee. */ |
2567 | else if (memory_access_to (op: gimple_call_arg (gs: call, index: i), ssa_name: name)) |
2568 | { |
2569 | int call_flags = deref_flags |
2570 | (flags: gimple_call_arg_flags (call, i), ignore_stores); |
2571 | if (!ignore_retval && !(call_flags & EAF_UNUSED) |
2572 | && !(call_flags & EAF_NOT_RETURNED_DIRECTLY) |
2573 | && !(call_flags & EAF_NOT_RETURNED_INDIRECTLY)) |
2574 | merge_call_lhs_flags (call, arg: i, name, direct: false, indirect: true); |
2575 | if (ecf_flags & (ECF_CONST | ECF_NOVOPS)) |
2576 | m_lattice[index].merge_direct_load (); |
2577 | else |
2578 | { |
2579 | call_flags = callee_to_caller_flags |
2580 | (call_flags, ignore_stores, |
2581 | lattice&: m_lattice[index]); |
2582 | if (!record_ipa) |
2583 | m_lattice[index].merge (f: call_flags); |
2584 | else |
2585 | m_lattice[index].add_escape_point (call, arg: i, |
2586 | min_flags: call_flags, direct: false); |
2587 | } |
2588 | } |
2589 | } |
2590 | } |
2591 | else if (gimple_assign_load_p (use_stmt)) |
2592 | { |
2593 | gassign *assign = as_a <gassign *> (p: use_stmt); |
2594 | /* Memory to memory copy. */ |
2595 | if (gimple_store_p (gs: assign)) |
2596 | { |
2597 | /* Handle *lhs = *name. |
2598 | |
2599 | We do not track memory locations, so assume that value |
2600 | is used arbitrarily. */ |
2601 | if (memory_access_to (op: gimple_assign_rhs1 (gs: assign), ssa_name: name)) |
2602 | m_lattice[index].merge (f: deref_flags (flags: 0, ignore_stores: false)); |
2603 | /* Handle *name = *exp. */ |
2604 | else if (memory_access_to (op: gimple_assign_lhs (gs: assign), ssa_name: name)) |
2605 | m_lattice[index].merge_direct_store (); |
2606 | } |
2607 | /* Handle lhs = *name. */ |
2608 | else if (memory_access_to (op: gimple_assign_rhs1 (gs: assign), ssa_name: name)) |
2609 | { |
2610 | tree lhs = gimple_assign_lhs (gs: assign); |
2611 | merge_with_ssa_name (dest: name, src: lhs, deref: true); |
2612 | } |
2613 | } |
2614 | else if (gimple_store_p (gs: use_stmt)) |
2615 | { |
2616 | gassign *assign = dyn_cast <gassign *> (p: use_stmt); |
2617 | |
2618 | /* Handle *lhs = name. */ |
2619 | if (assign && gimple_assign_rhs1 (gs: assign) == name) |
2620 | { |
2621 | if (dump_file) |
2622 | fprintf (stream: dump_file, format: "%*s ssa name saved to memory\n" , |
2623 | m_depth * 4, "" ); |
2624 | m_lattice[index].merge (f: 0); |
2625 | } |
2626 | /* Handle *name = exp. */ |
2627 | else if (assign |
2628 | && memory_access_to (op: gimple_assign_lhs (gs: assign), ssa_name: name)) |
2629 | { |
2630 | /* In general we can not ignore clobbers because they are |
2631 | barriers for code motion, however after inlining it is safe to |
2632 | do because local optimization passes do not consider clobbers |
2633 | from other functions. |
2634 | Similar logic is in ipa-pure-const.cc. */ |
2635 | if (!cfun->after_inlining || !gimple_clobber_p (s: assign)) |
2636 | m_lattice[index].merge_direct_store (); |
2637 | } |
2638 | /* ASM statements etc. */ |
2639 | else if (!assign) |
2640 | { |
2641 | if (dump_file) |
2642 | fprintf (stream: dump_file, format: "%*s Unhandled store\n" , m_depth * 4, "" ); |
2643 | m_lattice[index].merge (f: 0); |
2644 | } |
2645 | } |
2646 | else if (gassign *assign = dyn_cast <gassign *> (p: use_stmt)) |
2647 | { |
2648 | enum tree_code code = gimple_assign_rhs_code (gs: assign); |
2649 | |
2650 | /* See if operation is a merge as considered by |
2651 | tree-ssa-structalias.cc:find_func_aliases. */ |
2652 | if (!truth_value_p (code) |
2653 | && code != POINTER_DIFF_EXPR |
2654 | && (code != POINTER_PLUS_EXPR |
2655 | || gimple_assign_rhs1 (gs: assign) == name)) |
2656 | { |
2657 | tree lhs = gimple_assign_lhs (gs: assign); |
2658 | merge_with_ssa_name (dest: name, src: lhs, deref: false); |
2659 | } |
2660 | } |
2661 | else if (gphi *phi = dyn_cast <gphi *> (p: use_stmt)) |
2662 | { |
2663 | tree result = gimple_phi_result (gs: phi); |
2664 | merge_with_ssa_name (dest: name, src: result, deref: false); |
2665 | } |
2666 | /* Conditions are not considered escape points |
2667 | by tree-ssa-structalias. */ |
2668 | else if (gimple_code (g: use_stmt) == GIMPLE_COND) |
2669 | ; |
2670 | else |
2671 | { |
2672 | if (dump_file) |
2673 | fprintf (stream: dump_file, format: "%*s Unhandled stmt\n" , m_depth * 4, "" ); |
2674 | m_lattice[index].merge (f: 0); |
2675 | } |
2676 | |
2677 | if (dump_file) |
2678 | { |
2679 | fprintf (stream: dump_file, format: "%*s current flags of " , m_depth * 4, "" ); |
2680 | print_generic_expr (dump_file, name); |
2681 | m_lattice[index].dump (out: dump_file, indent: m_depth * 4 + 4); |
2682 | } |
2683 | } |
2684 | if (dump_file) |
2685 | { |
2686 | fprintf (stream: dump_file, format: "%*sflags of ssa name " , m_depth * 4, "" ); |
2687 | print_generic_expr (dump_file, name); |
2688 | m_lattice[index].dump (out: dump_file, indent: m_depth * 4 + 2); |
2689 | } |
2690 | m_lattice[index].open = false; |
2691 | if (!m_lattice[index].do_dataflow) |
2692 | m_lattice[index].known = true; |
2693 | } |
2694 | |
2695 | /* Propagate info from SRC to DEST. If DEREF it true, assume that SRC |
2696 | is dereferenced. */ |
2697 | |
2698 | void |
2699 | modref_eaf_analysis::merge_with_ssa_name (tree dest, tree src, bool deref) |
2700 | { |
2701 | int index = SSA_NAME_VERSION (dest); |
2702 | int src_index = SSA_NAME_VERSION (src); |
2703 | |
2704 | /* Merging lattice with itself is a no-op. */ |
2705 | if (!deref && src == dest) |
2706 | return; |
2707 | |
2708 | m_depth++; |
2709 | analyze_ssa_name (name: src); |
2710 | m_depth--; |
2711 | if (deref) |
2712 | m_lattice[index].merge_deref (with: m_lattice[src_index], ignore_stores: false); |
2713 | else |
2714 | m_lattice[index].merge (with: m_lattice[src_index]); |
2715 | |
2716 | /* If we failed to produce final solution add an edge to the dataflow |
2717 | graph. */ |
2718 | if (!m_lattice[src_index].known) |
2719 | { |
2720 | modref_lattice::propagate_edge e = {.ssa_name: index, .deref: deref}; |
2721 | |
2722 | if (!m_lattice[src_index].propagate_to.length ()) |
2723 | m_names_to_propagate.safe_push (obj: src_index); |
2724 | m_lattice[src_index].propagate_to.safe_push (obj: e); |
2725 | m_lattice[src_index].changed = true; |
2726 | m_lattice[src_index].do_dataflow = true; |
2727 | if (dump_file) |
2728 | fprintf (stream: dump_file, |
2729 | format: "%*sWill propgate from ssa_name %i to %i%s\n" , |
2730 | m_depth * 4 + 4, |
2731 | "" , src_index, index, deref ? " (deref)" : "" ); |
2732 | } |
2733 | } |
2734 | |
2735 | /* In the case we deferred some SSA names, reprocess them. In the case some |
2736 | dataflow edges were introduced, do the actual iterative dataflow. */ |
2737 | |
2738 | void |
2739 | modref_eaf_analysis::propagate () |
2740 | { |
2741 | int iterations = 0; |
2742 | size_t i; |
2743 | int index; |
2744 | bool changed = true; |
2745 | |
2746 | while (m_deferred_names.length ()) |
2747 | { |
2748 | tree name = m_deferred_names.pop (); |
2749 | if (dump_file) |
2750 | fprintf (stream: dump_file, format: "Analyzing deferred SSA name\n" ); |
2751 | analyze_ssa_name (name, deferred: true); |
2752 | } |
2753 | |
2754 | if (!m_names_to_propagate.length ()) |
2755 | return; |
2756 | if (dump_file) |
2757 | fprintf (stream: dump_file, format: "Propagating EAF flags\n" ); |
2758 | |
2759 | /* Compute reverse postorder. */ |
2760 | auto_vec <int> rpo; |
2761 | struct stack_entry |
2762 | { |
2763 | int name; |
2764 | unsigned pos; |
2765 | }; |
2766 | auto_vec <struct stack_entry> stack; |
2767 | int pos = m_names_to_propagate.length () - 1; |
2768 | |
2769 | rpo.safe_grow (len: m_names_to_propagate.length (), exact: true); |
2770 | stack.reserve_exact (nelems: m_names_to_propagate.length ()); |
2771 | |
2772 | /* We reuse known flag for RPO DFS walk bookkeeping. */ |
2773 | if (flag_checking) |
2774 | FOR_EACH_VEC_ELT (m_names_to_propagate, i, index) |
2775 | gcc_assert (!m_lattice[index].known && m_lattice[index].changed); |
2776 | |
2777 | FOR_EACH_VEC_ELT (m_names_to_propagate, i, index) |
2778 | { |
2779 | if (!m_lattice[index].known) |
2780 | { |
2781 | stack_entry e = {.name: index, .pos: 0}; |
2782 | |
2783 | stack.quick_push (obj: e); |
2784 | m_lattice[index].known = true; |
2785 | } |
2786 | while (stack.length ()) |
2787 | { |
2788 | bool found = false; |
2789 | int index1 = stack.last ().name; |
2790 | |
2791 | while (stack.last ().pos < m_lattice[index1].propagate_to.length ()) |
2792 | { |
2793 | int index2 = m_lattice[index1] |
2794 | .propagate_to[stack.last ().pos].ssa_name; |
2795 | |
2796 | stack.last ().pos++; |
2797 | if (!m_lattice[index2].known |
2798 | && m_lattice[index2].propagate_to.length ()) |
2799 | { |
2800 | stack_entry e = {.name: index2, .pos: 0}; |
2801 | |
2802 | stack.quick_push (obj: e); |
2803 | m_lattice[index2].known = true; |
2804 | found = true; |
2805 | break; |
2806 | } |
2807 | } |
2808 | if (!found |
2809 | && stack.last ().pos == m_lattice[index1].propagate_to.length ()) |
2810 | { |
2811 | rpo[pos--] = index1; |
2812 | stack.pop (); |
2813 | } |
2814 | } |
2815 | } |
2816 | |
2817 | /* Perform iterative dataflow. */ |
2818 | while (changed) |
2819 | { |
2820 | changed = false; |
2821 | iterations++; |
2822 | if (dump_file) |
2823 | fprintf (stream: dump_file, format: " iteration %i\n" , iterations); |
2824 | FOR_EACH_VEC_ELT (rpo, i, index) |
2825 | { |
2826 | if (m_lattice[index].changed) |
2827 | { |
2828 | size_t j; |
2829 | |
2830 | m_lattice[index].changed = false; |
2831 | if (dump_file) |
2832 | fprintf (stream: dump_file, format: " Visiting ssa name %i\n" , index); |
2833 | for (j = 0; j < m_lattice[index].propagate_to.length (); j++) |
2834 | { |
2835 | bool ch; |
2836 | int target = m_lattice[index].propagate_to[j].ssa_name; |
2837 | bool deref = m_lattice[index].propagate_to[j].deref; |
2838 | |
2839 | if (dump_file) |
2840 | fprintf (stream: dump_file, format: " Propagating flags of ssa name" |
2841 | " %i to %i%s\n" , |
2842 | index, target, deref ? " (deref)" : "" ); |
2843 | m_lattice[target].known = true; |
2844 | if (!m_lattice[index].propagate_to[j].deref) |
2845 | ch = m_lattice[target].merge (with: m_lattice[index]); |
2846 | else |
2847 | ch = m_lattice[target].merge_deref (with: m_lattice[index], |
2848 | ignore_stores: false); |
2849 | if (!ch) |
2850 | continue; |
2851 | if (dump_file) |
2852 | { |
2853 | fprintf (stream: dump_file, format: " New lattice: " ); |
2854 | m_lattice[target].dump (out: dump_file); |
2855 | } |
2856 | changed = true; |
2857 | m_lattice[target].changed = true; |
2858 | } |
2859 | } |
2860 | } |
2861 | } |
2862 | if (dump_file) |
2863 | fprintf (stream: dump_file, format: "EAF flags propagated in %i iterations\n" , iterations); |
2864 | } |
2865 | |
2866 | /* Record escape points of PARM_INDEX according to LATTICE. */ |
2867 | |
2868 | void |
2869 | modref_eaf_analysis::record_escape_points (tree name, int parm_index, int flags) |
2870 | { |
2871 | modref_lattice &lattice = m_lattice[SSA_NAME_VERSION (name)]; |
2872 | |
2873 | if (lattice.escape_points.length ()) |
2874 | { |
2875 | escape_point *ep; |
2876 | unsigned int ip; |
2877 | cgraph_node *node = cgraph_node::get (decl: current_function_decl); |
2878 | |
2879 | gcc_assert (m_ipa); |
2880 | FOR_EACH_VEC_ELT (lattice.escape_points, ip, ep) |
2881 | if ((ep->min_flags & flags) != flags) |
2882 | { |
2883 | cgraph_edge *e = node->get_edge (call_stmt: ep->call); |
2884 | struct escape_entry ee = {.parm_index: parm_index, .arg: ep->arg, |
2885 | .min_flags: ep->min_flags, .direct: ep->direct}; |
2886 | |
2887 | escape_summaries->get_create (edge: e)->esc.safe_push (obj: ee); |
2888 | } |
2889 | } |
2890 | } |
2891 | |
2892 | /* Determine EAF flags for function parameters |
2893 | and fill in SUMMARY/SUMMARY_LTO. If IPA is true work in IPA mode |
2894 | where we also collect escape points. |
2895 | PAST_FLAGS, PAST_RETSLOT_FLAGS, PAST_STATIC_CHAIN_FLAGS can be |
2896 | used to preserve flags from previous (IPA) run for cases where |
2897 | late optimizations changed code in a way we can no longer analyze |
2898 | it easily. */ |
2899 | |
2900 | static void |
2901 | analyze_parms (modref_summary *summary, modref_summary_lto *summary_lto, |
2902 | bool ipa, vec<eaf_flags_t> &past_flags, |
2903 | int past_retslot_flags, int past_static_chain_flags) |
2904 | { |
2905 | unsigned int parm_index = 0; |
2906 | unsigned int count = 0; |
2907 | int ecf_flags = flags_from_decl_or_type (current_function_decl); |
2908 | tree retslot = NULL; |
2909 | tree static_chain = NULL; |
2910 | |
2911 | /* If there is return slot, look up its SSA name. */ |
2912 | if (DECL_RESULT (current_function_decl) |
2913 | && DECL_BY_REFERENCE (DECL_RESULT (current_function_decl))) |
2914 | retslot = ssa_default_def (cfun, DECL_RESULT (current_function_decl)); |
2915 | if (cfun->static_chain_decl) |
2916 | static_chain = ssa_default_def (cfun, cfun->static_chain_decl); |
2917 | |
2918 | for (tree parm = DECL_ARGUMENTS (current_function_decl); parm; |
2919 | parm = TREE_CHAIN (parm)) |
2920 | count++; |
2921 | |
2922 | if (!count && !retslot && !static_chain) |
2923 | return; |
2924 | |
2925 | modref_eaf_analysis eaf_analysis (ipa); |
2926 | |
2927 | /* Determine all SSA names we need to know flags for. */ |
2928 | for (tree parm = DECL_ARGUMENTS (current_function_decl); parm; |
2929 | parm = TREE_CHAIN (parm)) |
2930 | { |
2931 | tree name = ssa_default_def (cfun, parm); |
2932 | if (name) |
2933 | eaf_analysis.analyze_ssa_name (name); |
2934 | } |
2935 | if (retslot) |
2936 | eaf_analysis.analyze_ssa_name (name: retslot); |
2937 | if (static_chain) |
2938 | eaf_analysis.analyze_ssa_name (name: static_chain); |
2939 | |
2940 | /* Do the dataflow. */ |
2941 | eaf_analysis.propagate (); |
2942 | |
2943 | tree attr = lookup_attribute (attr_name: "fn spec" , |
2944 | TYPE_ATTRIBUTES |
2945 | (TREE_TYPE (current_function_decl))); |
2946 | attr_fnspec fnspec (attr |
2947 | ? TREE_STRING_POINTER (TREE_VALUE (TREE_VALUE (attr))) |
2948 | : "" ); |
2949 | |
2950 | |
2951 | /* Store results to summaries. */ |
2952 | for (tree parm = DECL_ARGUMENTS (current_function_decl); parm; parm_index++, |
2953 | parm = TREE_CHAIN (parm)) |
2954 | { |
2955 | tree name = ssa_default_def (cfun, parm); |
2956 | if (!name || has_zero_uses (var: name)) |
2957 | { |
2958 | /* We do not track non-SSA parameters, |
2959 | but we want to track unused gimple_regs. */ |
2960 | if (!is_gimple_reg (parm)) |
2961 | continue; |
2962 | if (summary) |
2963 | { |
2964 | if (parm_index >= summary->arg_flags.length ()) |
2965 | summary->arg_flags.safe_grow_cleared (len: count, exact: true); |
2966 | summary->arg_flags[parm_index] = EAF_UNUSED; |
2967 | } |
2968 | else if (summary_lto) |
2969 | { |
2970 | if (parm_index >= summary_lto->arg_flags.length ()) |
2971 | summary_lto->arg_flags.safe_grow_cleared (len: count, exact: true); |
2972 | summary_lto->arg_flags[parm_index] = EAF_UNUSED; |
2973 | } |
2974 | continue; |
2975 | } |
2976 | int flags = eaf_analysis.get_ssa_name_flags (name); |
2977 | int attr_flags = fnspec.arg_eaf_flags (i: parm_index); |
2978 | |
2979 | if (dump_file && (flags | attr_flags) != flags && !(flags & EAF_UNUSED)) |
2980 | { |
2981 | fprintf (stream: dump_file, |
2982 | format: " Flags for param %i combined with fnspec flags:" , |
2983 | (int)parm_index); |
2984 | dump_eaf_flags (out: dump_file, flags: attr_flags, newline: false); |
2985 | fprintf (stream: dump_file, format: " determined: " ); |
2986 | dump_eaf_flags (out: dump_file, flags, newline: true); |
2987 | } |
2988 | flags |= attr_flags; |
2989 | |
2990 | /* Eliminate useless flags so we do not end up storing unnecessary |
2991 | summaries. */ |
2992 | |
2993 | flags = remove_useless_eaf_flags |
2994 | (eaf_flags: flags, ecf_flags, |
2995 | VOID_TYPE_P (TREE_TYPE (TREE_TYPE (current_function_decl)))); |
2996 | if (past_flags.length () > parm_index) |
2997 | { |
2998 | int past = past_flags[parm_index]; |
2999 | past = remove_useless_eaf_flags |
3000 | (eaf_flags: past, ecf_flags, |
3001 | VOID_TYPE_P (TREE_TYPE |
3002 | (TREE_TYPE (current_function_decl)))); |
3003 | if (dump_file && (flags | past) != flags && !(flags & EAF_UNUSED)) |
3004 | { |
3005 | fprintf (stream: dump_file, |
3006 | format: " Flags for param %i combined with IPA pass:" , |
3007 | (int)parm_index); |
3008 | dump_eaf_flags (out: dump_file, flags: past, newline: false); |
3009 | fprintf (stream: dump_file, format: " determined: " ); |
3010 | dump_eaf_flags (out: dump_file, flags, newline: true); |
3011 | } |
3012 | if (!(flags & EAF_UNUSED)) |
3013 | flags |= past; |
3014 | } |
3015 | |
3016 | if (flags) |
3017 | { |
3018 | if (summary) |
3019 | { |
3020 | if (parm_index >= summary->arg_flags.length ()) |
3021 | summary->arg_flags.safe_grow_cleared (len: count, exact: true); |
3022 | summary->arg_flags[parm_index] = flags; |
3023 | } |
3024 | else if (summary_lto) |
3025 | { |
3026 | if (parm_index >= summary_lto->arg_flags.length ()) |
3027 | summary_lto->arg_flags.safe_grow_cleared (len: count, exact: true); |
3028 | summary_lto->arg_flags[parm_index] = flags; |
3029 | } |
3030 | eaf_analysis.record_escape_points (name, parm_index, flags); |
3031 | } |
3032 | } |
3033 | if (retslot) |
3034 | { |
3035 | int flags = eaf_analysis.get_ssa_name_flags (name: retslot); |
3036 | int past = past_retslot_flags; |
3037 | |
3038 | flags = remove_useless_eaf_flags (eaf_flags: flags, ecf_flags, returns_void: false); |
3039 | past = remove_useless_eaf_flags |
3040 | (eaf_flags: past, ecf_flags, |
3041 | VOID_TYPE_P (TREE_TYPE |
3042 | (TREE_TYPE (current_function_decl)))); |
3043 | if (dump_file && (flags | past) != flags && !(flags & EAF_UNUSED)) |
3044 | { |
3045 | fprintf (stream: dump_file, |
3046 | format: " Retslot flags combined with IPA pass:" ); |
3047 | dump_eaf_flags (out: dump_file, flags: past, newline: false); |
3048 | fprintf (stream: dump_file, format: " determined: " ); |
3049 | dump_eaf_flags (out: dump_file, flags, newline: true); |
3050 | } |
3051 | if (!(flags & EAF_UNUSED)) |
3052 | flags |= past; |
3053 | if (flags) |
3054 | { |
3055 | if (summary) |
3056 | summary->retslot_flags = flags; |
3057 | if (summary_lto) |
3058 | summary_lto->retslot_flags = flags; |
3059 | eaf_analysis.record_escape_points (name: retslot, |
3060 | parm_index: MODREF_RETSLOT_PARM, flags); |
3061 | } |
3062 | } |
3063 | if (static_chain) |
3064 | { |
3065 | int flags = eaf_analysis.get_ssa_name_flags (name: static_chain); |
3066 | int past = past_static_chain_flags; |
3067 | |
3068 | flags = remove_useless_eaf_flags (eaf_flags: flags, ecf_flags, returns_void: false); |
3069 | past = remove_useless_eaf_flags |
3070 | (eaf_flags: past, ecf_flags, |
3071 | VOID_TYPE_P (TREE_TYPE |
3072 | (TREE_TYPE (current_function_decl)))); |
3073 | if (dump_file && (flags | past) != flags && !(flags & EAF_UNUSED)) |
3074 | { |
3075 | fprintf (stream: dump_file, |
3076 | format: " Static chain flags combined with IPA pass:" ); |
3077 | dump_eaf_flags (out: dump_file, flags: past, newline: false); |
3078 | fprintf (stream: dump_file, format: " determined: " ); |
3079 | dump_eaf_flags (out: dump_file, flags, newline: true); |
3080 | } |
3081 | if (!(flags & EAF_UNUSED)) |
3082 | flags |= past; |
3083 | if (flags) |
3084 | { |
3085 | if (summary) |
3086 | summary->static_chain_flags = flags; |
3087 | if (summary_lto) |
3088 | summary_lto->static_chain_flags = flags; |
3089 | eaf_analysis.record_escape_points (name: static_chain, |
3090 | parm_index: MODREF_STATIC_CHAIN_PARM, |
3091 | flags); |
3092 | } |
3093 | } |
3094 | } |
3095 | |
3096 | /* Analyze function. IPA indicates whether we're running in local mode |
3097 | (false) or the IPA mode (true). |
3098 | Return true if fixup cfg is needed after the pass. */ |
3099 | |
3100 | static bool |
3101 | analyze_function (bool ipa) |
3102 | { |
3103 | bool fixup_cfg = false; |
3104 | if (dump_file) |
3105 | fprintf (stream: dump_file, format: "\n\nmodref analyzing '%s' (ipa=%i)%s%s\n" , |
3106 | cgraph_node::get (decl: current_function_decl)->dump_name (), ipa, |
3107 | TREE_READONLY (current_function_decl) ? " (const)" : "" , |
3108 | DECL_PURE_P (current_function_decl) ? " (pure)" : "" ); |
3109 | |
3110 | /* Don't analyze this function if it's compiled with -fno-strict-aliasing. */ |
3111 | if (!flag_ipa_modref |
3112 | || lookup_attribute (attr_name: "noipa" , DECL_ATTRIBUTES (current_function_decl))) |
3113 | return false; |
3114 | |
3115 | /* Compute no-LTO summaries when local optimization is going to happen. */ |
3116 | bool nolto = (!ipa || ((!flag_lto || flag_fat_lto_objects) && !in_lto_p) |
3117 | || (in_lto_p && !flag_wpa |
3118 | && flag_incremental_link != INCREMENTAL_LINK_LTO)); |
3119 | /* Compute LTO when LTO streaming is going to happen. */ |
3120 | bool lto = ipa && ((flag_lto && !in_lto_p) |
3121 | || flag_wpa |
3122 | || flag_incremental_link == INCREMENTAL_LINK_LTO); |
3123 | cgraph_node *fnode = cgraph_node::get (decl: current_function_decl); |
3124 | |
3125 | modref_summary *summary = NULL; |
3126 | modref_summary_lto *summary_lto = NULL; |
3127 | |
3128 | bool past_flags_known = false; |
3129 | auto_vec <eaf_flags_t> past_flags; |
3130 | int past_retslot_flags = 0; |
3131 | int past_static_chain_flags = 0; |
3132 | |
3133 | /* Initialize the summary. |
3134 | If we run in local mode there is possibly pre-existing summary from |
3135 | IPA pass. Dump it so it is easy to compare if mod-ref info has |
3136 | improved. */ |
3137 | if (!ipa) |
3138 | { |
3139 | if (!optimization_summaries) |
3140 | optimization_summaries = modref_summaries::create_ggc (symtab); |
3141 | else /* Remove existing summary if we are re-running the pass. */ |
3142 | { |
3143 | summary = optimization_summaries->get (node: fnode); |
3144 | if (summary != NULL |
3145 | && summary->loads) |
3146 | { |
3147 | if (dump_file) |
3148 | { |
3149 | fprintf (stream: dump_file, format: "Past summary:\n" ); |
3150 | optimization_summaries->get (node: fnode)->dump (out: dump_file); |
3151 | } |
3152 | past_flags.reserve_exact (nelems: summary->arg_flags.length ()); |
3153 | past_flags.splice (src: summary->arg_flags); |
3154 | past_retslot_flags = summary->retslot_flags; |
3155 | past_static_chain_flags = summary->static_chain_flags; |
3156 | past_flags_known = true; |
3157 | } |
3158 | optimization_summaries->remove (node: fnode); |
3159 | } |
3160 | summary = optimization_summaries->get_create (node: fnode); |
3161 | gcc_checking_assert (nolto && !lto); |
3162 | } |
3163 | /* In IPA mode we analyze every function precisely once. Assert that. */ |
3164 | else |
3165 | { |
3166 | if (nolto) |
3167 | { |
3168 | if (!summaries) |
3169 | summaries = modref_summaries::create_ggc (symtab); |
3170 | else |
3171 | summaries->remove (node: fnode); |
3172 | summary = summaries->get_create (node: fnode); |
3173 | } |
3174 | if (lto) |
3175 | { |
3176 | if (!summaries_lto) |
3177 | summaries_lto = modref_summaries_lto::create_ggc (symtab); |
3178 | else |
3179 | summaries_lto->remove (node: fnode); |
3180 | summary_lto = summaries_lto->get_create (node: fnode); |
3181 | } |
3182 | if (!fnspec_summaries) |
3183 | fnspec_summaries = new fnspec_summaries_t (symtab); |
3184 | if (!escape_summaries) |
3185 | escape_summaries = new escape_summaries_t (symtab); |
3186 | } |
3187 | |
3188 | |
3189 | /* Create and initialize summary for F. |
3190 | Note that summaries may be already allocated from previous |
3191 | run of the pass. */ |
3192 | if (nolto) |
3193 | { |
3194 | gcc_assert (!summary->loads); |
3195 | summary->loads = modref_records::create_ggc (); |
3196 | gcc_assert (!summary->stores); |
3197 | summary->stores = modref_records::create_ggc (); |
3198 | summary->writes_errno = false; |
3199 | summary->side_effects = false; |
3200 | summary->nondeterministic = false; |
3201 | summary->calls_interposable = false; |
3202 | } |
3203 | if (lto) |
3204 | { |
3205 | gcc_assert (!summary_lto->loads); |
3206 | summary_lto->loads = modref_records_lto::create_ggc (); |
3207 | gcc_assert (!summary_lto->stores); |
3208 | summary_lto->stores = modref_records_lto::create_ggc (); |
3209 | summary_lto->writes_errno = false; |
3210 | summary_lto->side_effects = false; |
3211 | summary_lto->nondeterministic = false; |
3212 | summary_lto->calls_interposable = false; |
3213 | } |
3214 | |
3215 | analyze_parms (summary, summary_lto, ipa, |
3216 | past_flags, past_retslot_flags, past_static_chain_flags); |
3217 | |
3218 | { |
3219 | modref_access_analysis analyzer (ipa, summary, summary_lto); |
3220 | analyzer.analyze (); |
3221 | } |
3222 | |
3223 | if (!ipa && flag_ipa_pure_const) |
3224 | { |
3225 | if (!summary->stores->every_base && !summary->stores->bases |
3226 | && !summary->nondeterministic) |
3227 | { |
3228 | if (!summary->loads->every_base && !summary->loads->bases |
3229 | && !summary->calls_interposable) |
3230 | fixup_cfg = ipa_make_function_const (fnode, |
3231 | summary->side_effects, true); |
3232 | else |
3233 | fixup_cfg = ipa_make_function_pure (fnode, |
3234 | summary->side_effects, true); |
3235 | } |
3236 | } |
3237 | int ecf_flags = flags_from_decl_or_type (current_function_decl); |
3238 | if (summary && !summary->useful_p (ecf_flags)) |
3239 | { |
3240 | if (!ipa) |
3241 | optimization_summaries->remove (node: fnode); |
3242 | else |
3243 | summaries->remove (node: fnode); |
3244 | summary = NULL; |
3245 | } |
3246 | if (summary) |
3247 | summary->finalize (fun: current_function_decl); |
3248 | if (summary_lto && !summary_lto->useful_p (ecf_flags)) |
3249 | { |
3250 | summaries_lto->remove (node: fnode); |
3251 | summary_lto = NULL; |
3252 | } |
3253 | |
3254 | if (ipa && !summary && !summary_lto) |
3255 | remove_modref_edge_summaries (node: fnode); |
3256 | |
3257 | if (dump_file) |
3258 | { |
3259 | fprintf (stream: dump_file, format: " - modref done with result: tracked.\n" ); |
3260 | if (summary) |
3261 | summary->dump (out: dump_file); |
3262 | if (summary_lto) |
3263 | summary_lto->dump (out: dump_file); |
3264 | dump_modref_edge_summaries (out: dump_file, node: fnode, depth: 2); |
3265 | /* To simplify debugging, compare IPA and local solutions. */ |
3266 | if (past_flags_known && summary) |
3267 | { |
3268 | size_t len = summary->arg_flags.length (); |
3269 | |
3270 | if (past_flags.length () > len) |
3271 | len = past_flags.length (); |
3272 | for (size_t i = 0; i < len; i++) |
3273 | { |
3274 | int old_flags = i < past_flags.length () ? past_flags[i] : 0; |
3275 | int new_flags = i < summary->arg_flags.length () |
3276 | ? summary->arg_flags[i] : 0; |
3277 | old_flags = remove_useless_eaf_flags |
3278 | (eaf_flags: old_flags, ecf_flags: flags_from_decl_or_type (current_function_decl), |
3279 | VOID_TYPE_P (TREE_TYPE (TREE_TYPE (current_function_decl)))); |
3280 | if (old_flags != new_flags) |
3281 | { |
3282 | if ((old_flags & ~new_flags) == 0 |
3283 | || (new_flags & EAF_UNUSED)) |
3284 | fprintf (stream: dump_file, format: " Flags for param %i improved:" , |
3285 | (int)i); |
3286 | else |
3287 | gcc_unreachable (); |
3288 | dump_eaf_flags (out: dump_file, flags: old_flags, newline: false); |
3289 | fprintf (stream: dump_file, format: " -> " ); |
3290 | dump_eaf_flags (out: dump_file, flags: new_flags, newline: true); |
3291 | } |
3292 | } |
3293 | past_retslot_flags = remove_useless_eaf_flags |
3294 | (eaf_flags: past_retslot_flags, |
3295 | ecf_flags: flags_from_decl_or_type (current_function_decl), |
3296 | VOID_TYPE_P (TREE_TYPE (TREE_TYPE (current_function_decl)))); |
3297 | if (past_retslot_flags != summary->retslot_flags) |
3298 | { |
3299 | if ((past_retslot_flags & ~summary->retslot_flags) == 0 |
3300 | || (summary->retslot_flags & EAF_UNUSED)) |
3301 | fprintf (stream: dump_file, format: " Flags for retslot improved:" ); |
3302 | else |
3303 | gcc_unreachable (); |
3304 | dump_eaf_flags (out: dump_file, flags: past_retslot_flags, newline: false); |
3305 | fprintf (stream: dump_file, format: " -> " ); |
3306 | dump_eaf_flags (out: dump_file, flags: summary->retslot_flags, newline: true); |
3307 | } |
3308 | past_static_chain_flags = remove_useless_eaf_flags |
3309 | (eaf_flags: past_static_chain_flags, |
3310 | ecf_flags: flags_from_decl_or_type (current_function_decl), |
3311 | VOID_TYPE_P (TREE_TYPE (TREE_TYPE (current_function_decl)))); |
3312 | if (past_static_chain_flags != summary->static_chain_flags) |
3313 | { |
3314 | if ((past_static_chain_flags & ~summary->static_chain_flags) == 0 |
3315 | || (summary->static_chain_flags & EAF_UNUSED)) |
3316 | fprintf (stream: dump_file, format: " Flags for static chain improved:" ); |
3317 | else |
3318 | gcc_unreachable (); |
3319 | dump_eaf_flags (out: dump_file, flags: past_static_chain_flags, newline: false); |
3320 | fprintf (stream: dump_file, format: " -> " ); |
3321 | dump_eaf_flags (out: dump_file, flags: summary->static_chain_flags, newline: true); |
3322 | } |
3323 | } |
3324 | else if (past_flags_known && !summary) |
3325 | { |
3326 | for (size_t i = 0; i < past_flags.length (); i++) |
3327 | { |
3328 | int old_flags = past_flags[i]; |
3329 | old_flags = remove_useless_eaf_flags |
3330 | (eaf_flags: old_flags, ecf_flags: flags_from_decl_or_type (current_function_decl), |
3331 | VOID_TYPE_P (TREE_TYPE (TREE_TYPE (current_function_decl)))); |
3332 | if (old_flags) |
3333 | { |
3334 | fprintf (stream: dump_file, format: " Flags for param %i worsened:" , |
3335 | (int)i); |
3336 | dump_eaf_flags (out: dump_file, flags: old_flags, newline: false); |
3337 | fprintf (stream: dump_file, format: " -> \n" ); |
3338 | } |
3339 | } |
3340 | past_retslot_flags = remove_useless_eaf_flags |
3341 | (eaf_flags: past_retslot_flags, |
3342 | ecf_flags: flags_from_decl_or_type (current_function_decl), |
3343 | VOID_TYPE_P (TREE_TYPE (TREE_TYPE (current_function_decl)))); |
3344 | if (past_retslot_flags) |
3345 | { |
3346 | fprintf (stream: dump_file, format: " Flags for retslot worsened:" ); |
3347 | dump_eaf_flags (out: dump_file, flags: past_retslot_flags, newline: false); |
3348 | fprintf (stream: dump_file, format: " ->\n" ); |
3349 | } |
3350 | past_static_chain_flags = remove_useless_eaf_flags |
3351 | (eaf_flags: past_static_chain_flags, |
3352 | ecf_flags: flags_from_decl_or_type (current_function_decl), |
3353 | VOID_TYPE_P (TREE_TYPE (TREE_TYPE (current_function_decl)))); |
3354 | if (past_static_chain_flags) |
3355 | { |
3356 | fprintf (stream: dump_file, format: " Flags for static chain worsened:" ); |
3357 | dump_eaf_flags (out: dump_file, flags: past_static_chain_flags, newline: false); |
3358 | fprintf (stream: dump_file, format: " ->\n" ); |
3359 | } |
3360 | } |
3361 | } |
3362 | return fixup_cfg; |
3363 | } |
3364 | |
3365 | /* Callback for generate_summary. */ |
3366 | |
3367 | static void |
3368 | modref_generate (void) |
3369 | { |
3370 | struct cgraph_node *node; |
3371 | FOR_EACH_FUNCTION_WITH_GIMPLE_BODY (node) |
3372 | { |
3373 | function *f = DECL_STRUCT_FUNCTION (node->decl); |
3374 | if (!f) |
3375 | continue; |
3376 | push_cfun (new_cfun: f); |
3377 | analyze_function (ipa: true); |
3378 | pop_cfun (); |
3379 | } |
3380 | } |
3381 | |
3382 | } /* ANON namespace. */ |
3383 | |
3384 | /* Debugging helper. */ |
3385 | |
3386 | void |
3387 | debug_eaf_flags (int flags) |
3388 | { |
3389 | dump_eaf_flags (stderr, flags, newline: true); |
3390 | } |
3391 | |
3392 | /* Called when a new function is inserted to callgraph late. */ |
3393 | |
3394 | void |
3395 | modref_summaries::insert (struct cgraph_node *node, modref_summary *) |
3396 | { |
3397 | /* Local passes ought to be executed by the pass manager. */ |
3398 | if (this == optimization_summaries) |
3399 | { |
3400 | optimization_summaries->remove (node); |
3401 | return; |
3402 | } |
3403 | if (!DECL_STRUCT_FUNCTION (node->decl) |
3404 | || !opt_for_fn (node->decl, flag_ipa_modref)) |
3405 | { |
3406 | summaries->remove (node); |
3407 | return; |
3408 | } |
3409 | push_cfun (DECL_STRUCT_FUNCTION (node->decl)); |
3410 | analyze_function (ipa: true); |
3411 | pop_cfun (); |
3412 | } |
3413 | |
3414 | /* Called when a new function is inserted to callgraph late. */ |
3415 | |
3416 | void |
3417 | modref_summaries_lto::insert (struct cgraph_node *node, modref_summary_lto *) |
3418 | { |
3419 | /* We do not support adding new function when IPA information is already |
3420 | propagated. This is done only by SIMD cloning that is not very |
3421 | critical. */ |
3422 | if (!DECL_STRUCT_FUNCTION (node->decl) |
3423 | || !opt_for_fn (node->decl, flag_ipa_modref) |
3424 | || propagated) |
3425 | { |
3426 | summaries_lto->remove (node); |
3427 | return; |
3428 | } |
3429 | push_cfun (DECL_STRUCT_FUNCTION (node->decl)); |
3430 | analyze_function (ipa: true); |
3431 | pop_cfun (); |
3432 | } |
3433 | |
3434 | /* Called when new clone is inserted to callgraph late. */ |
3435 | |
3436 | void |
3437 | modref_summaries::duplicate (cgraph_node *, cgraph_node *dst, |
3438 | modref_summary *src_data, |
3439 | modref_summary *dst_data) |
3440 | { |
3441 | /* Do not duplicate optimization summaries; we do not handle parameter |
3442 | transforms on them. */ |
3443 | if (this == optimization_summaries) |
3444 | { |
3445 | optimization_summaries->remove (node: dst); |
3446 | return; |
3447 | } |
3448 | dst_data->stores = modref_records::create_ggc (); |
3449 | dst_data->stores->copy_from (other: src_data->stores); |
3450 | dst_data->loads = modref_records::create_ggc (); |
3451 | dst_data->loads->copy_from (other: src_data->loads); |
3452 | dst_data->kills.reserve_exact (nelems: src_data->kills.length ()); |
3453 | dst_data->kills.splice (src: src_data->kills); |
3454 | dst_data->writes_errno = src_data->writes_errno; |
3455 | dst_data->side_effects = src_data->side_effects; |
3456 | dst_data->nondeterministic = src_data->nondeterministic; |
3457 | dst_data->calls_interposable = src_data->calls_interposable; |
3458 | if (src_data->arg_flags.length ()) |
3459 | dst_data->arg_flags = src_data->arg_flags.copy (); |
3460 | dst_data->retslot_flags = src_data->retslot_flags; |
3461 | dst_data->static_chain_flags = src_data->static_chain_flags; |
3462 | } |
3463 | |
3464 | /* Called when new clone is inserted to callgraph late. */ |
3465 | |
3466 | void |
3467 | modref_summaries_lto::duplicate (cgraph_node *, cgraph_node *, |
3468 | modref_summary_lto *src_data, |
3469 | modref_summary_lto *dst_data) |
3470 | { |
3471 | /* Be sure that no further cloning happens after ipa-modref. If it does |
3472 | we will need to update signatures for possible param changes. */ |
3473 | gcc_checking_assert (!((modref_summaries_lto *)summaries_lto)->propagated); |
3474 | dst_data->stores = modref_records_lto::create_ggc (); |
3475 | dst_data->stores->copy_from (other: src_data->stores); |
3476 | dst_data->loads = modref_records_lto::create_ggc (); |
3477 | dst_data->loads->copy_from (other: src_data->loads); |
3478 | dst_data->kills.reserve_exact (nelems: src_data->kills.length ()); |
3479 | dst_data->kills.splice (src: src_data->kills); |
3480 | dst_data->writes_errno = src_data->writes_errno; |
3481 | dst_data->side_effects = src_data->side_effects; |
3482 | dst_data->nondeterministic = src_data->nondeterministic; |
3483 | dst_data->calls_interposable = src_data->calls_interposable; |
3484 | if (src_data->arg_flags.length ()) |
3485 | dst_data->arg_flags = src_data->arg_flags.copy (); |
3486 | dst_data->retslot_flags = src_data->retslot_flags; |
3487 | dst_data->static_chain_flags = src_data->static_chain_flags; |
3488 | } |
3489 | |
3490 | namespace |
3491 | { |
3492 | /* Definition of the modref pass on GIMPLE. */ |
3493 | const pass_data pass_data_modref = { |
3494 | .type: GIMPLE_PASS, |
3495 | .name: "modref" , |
3496 | .optinfo_flags: OPTGROUP_IPA, |
3497 | .tv_id: TV_TREE_MODREF, |
3498 | .properties_required: (PROP_cfg | PROP_ssa), |
3499 | .properties_provided: 0, |
3500 | .properties_destroyed: 0, |
3501 | .todo_flags_start: 0, |
3502 | .todo_flags_finish: 0, |
3503 | }; |
3504 | |
3505 | class pass_modref : public gimple_opt_pass |
3506 | { |
3507 | public: |
3508 | pass_modref (gcc::context *ctxt) |
3509 | : gimple_opt_pass (pass_data_modref, ctxt) {} |
3510 | |
3511 | /* opt_pass methods: */ |
3512 | opt_pass *clone () final override |
3513 | { |
3514 | return new pass_modref (m_ctxt); |
3515 | } |
3516 | bool gate (function *) final override |
3517 | { |
3518 | return flag_ipa_modref; |
3519 | } |
3520 | unsigned int execute (function *) final override; |
3521 | }; |
3522 | |
3523 | /* Encode TT to the output block OB using the summary streaming API. */ |
3524 | |
3525 | static void |
3526 | write_modref_records (modref_records_lto *tt, struct output_block *ob) |
3527 | { |
3528 | streamer_write_uhwi (ob, tt->every_base); |
3529 | streamer_write_uhwi (ob, vec_safe_length (v: tt->bases)); |
3530 | for (auto base_node : tt->bases) |
3531 | { |
3532 | stream_write_tree (ob, base_node->base, true); |
3533 | |
3534 | streamer_write_uhwi (ob, base_node->every_ref); |
3535 | streamer_write_uhwi (ob, vec_safe_length (v: base_node->refs)); |
3536 | |
3537 | for (auto ref_node : base_node->refs) |
3538 | { |
3539 | stream_write_tree (ob, ref_node->ref, true); |
3540 | streamer_write_uhwi (ob, ref_node->every_access); |
3541 | streamer_write_uhwi (ob, vec_safe_length (v: ref_node->accesses)); |
3542 | |
3543 | for (auto access_node : ref_node->accesses) |
3544 | access_node.stream_out (ob); |
3545 | } |
3546 | } |
3547 | } |
3548 | |
3549 | /* Read a modref_tree from the input block IB using the data from DATA_IN. |
3550 | This assumes that the tree was encoded using write_modref_tree. |
3551 | Either nolto_ret or lto_ret is initialized by the tree depending whether |
3552 | LTO streaming is expected or not. */ |
3553 | |
3554 | static void |
3555 | read_modref_records (tree decl, |
3556 | lto_input_block *ib, struct data_in *data_in, |
3557 | modref_records **nolto_ret, |
3558 | modref_records_lto **lto_ret) |
3559 | { |
3560 | size_t max_bases = opt_for_fn (decl, param_modref_max_bases); |
3561 | size_t max_refs = opt_for_fn (decl, param_modref_max_refs); |
3562 | size_t max_accesses = opt_for_fn (decl, param_modref_max_accesses); |
3563 | |
3564 | if (lto_ret) |
3565 | *lto_ret = modref_records_lto::create_ggc (); |
3566 | if (nolto_ret) |
3567 | *nolto_ret = modref_records::create_ggc (); |
3568 | gcc_checking_assert (lto_ret || nolto_ret); |
3569 | |
3570 | size_t every_base = streamer_read_uhwi (ib); |
3571 | size_t nbase = streamer_read_uhwi (ib); |
3572 | |
3573 | gcc_assert (!every_base || nbase == 0); |
3574 | if (every_base) |
3575 | { |
3576 | if (nolto_ret) |
3577 | (*nolto_ret)->collapse (); |
3578 | if (lto_ret) |
3579 | (*lto_ret)->collapse (); |
3580 | } |
3581 | for (size_t i = 0; i < nbase; i++) |
3582 | { |
3583 | tree base_tree = stream_read_tree (ib, data_in); |
3584 | modref_base_node <alias_set_type> *nolto_base_node = NULL; |
3585 | modref_base_node <tree> *lto_base_node = NULL; |
3586 | |
3587 | /* At stream in time we have LTO alias info. Check if we streamed in |
3588 | something obviously unnecessary. Do not glob types by alias sets; |
3589 | it is not 100% clear that ltrans types will get merged same way. |
3590 | Types may get refined based on ODR type conflicts. */ |
3591 | if (base_tree && !get_alias_set (base_tree)) |
3592 | { |
3593 | if (dump_file) |
3594 | { |
3595 | fprintf (stream: dump_file, format: "Streamed in alias set 0 type " ); |
3596 | print_generic_expr (dump_file, base_tree); |
3597 | fprintf (stream: dump_file, format: "\n" ); |
3598 | } |
3599 | base_tree = NULL; |
3600 | } |
3601 | |
3602 | if (nolto_ret) |
3603 | nolto_base_node = (*nolto_ret)->insert_base (base: base_tree |
3604 | ? get_alias_set (base_tree) |
3605 | : 0, ref: 0, INT_MAX); |
3606 | if (lto_ret) |
3607 | lto_base_node = (*lto_ret)->insert_base (base: base_tree, ref: 0, max_bases); |
3608 | size_t every_ref = streamer_read_uhwi (ib); |
3609 | size_t nref = streamer_read_uhwi (ib); |
3610 | |
3611 | gcc_assert (!every_ref || nref == 0); |
3612 | if (every_ref) |
3613 | { |
3614 | if (nolto_base_node) |
3615 | nolto_base_node->collapse (); |
3616 | if (lto_base_node) |
3617 | lto_base_node->collapse (); |
3618 | } |
3619 | for (size_t j = 0; j < nref; j++) |
3620 | { |
3621 | tree ref_tree = stream_read_tree (ib, data_in); |
3622 | |
3623 | if (ref_tree && !get_alias_set (ref_tree)) |
3624 | { |
3625 | if (dump_file) |
3626 | { |
3627 | fprintf (stream: dump_file, format: "Streamed in alias set 0 type " ); |
3628 | print_generic_expr (dump_file, ref_tree); |
3629 | fprintf (stream: dump_file, format: "\n" ); |
3630 | } |
3631 | ref_tree = NULL; |
3632 | } |
3633 | |
3634 | modref_ref_node <alias_set_type> *nolto_ref_node = NULL; |
3635 | modref_ref_node <tree> *lto_ref_node = NULL; |
3636 | |
3637 | if (nolto_base_node) |
3638 | nolto_ref_node |
3639 | = nolto_base_node->insert_ref (ref: ref_tree |
3640 | ? get_alias_set (ref_tree) : 0, |
3641 | max_refs); |
3642 | if (lto_base_node) |
3643 | lto_ref_node = lto_base_node->insert_ref (ref: ref_tree, max_refs); |
3644 | |
3645 | size_t every_access = streamer_read_uhwi (ib); |
3646 | size_t naccesses = streamer_read_uhwi (ib); |
3647 | |
3648 | if (nolto_ref_node && every_access) |
3649 | nolto_ref_node->collapse (); |
3650 | if (lto_ref_node && every_access) |
3651 | lto_ref_node->collapse (); |
3652 | |
3653 | for (size_t k = 0; k < naccesses; k++) |
3654 | { |
3655 | modref_access_node a = modref_access_node::stream_in (ib); |
3656 | if (nolto_ref_node) |
3657 | nolto_ref_node->insert_access (a, max_accesses, record_adjustments: false); |
3658 | if (lto_ref_node) |
3659 | lto_ref_node->insert_access (a, max_accesses, record_adjustments: false); |
3660 | } |
3661 | } |
3662 | } |
3663 | if (lto_ret) |
3664 | (*lto_ret)->cleanup (); |
3665 | if (nolto_ret) |
3666 | (*nolto_ret)->cleanup (); |
3667 | } |
3668 | |
3669 | /* Write ESUM to BP. */ |
3670 | |
3671 | static void |
3672 | modref_write_escape_summary (struct bitpack_d *bp, escape_summary *esum) |
3673 | { |
3674 | if (!esum) |
3675 | { |
3676 | bp_pack_var_len_unsigned (bp, 0); |
3677 | return; |
3678 | } |
3679 | bp_pack_var_len_unsigned (bp, esum->esc.length ()); |
3680 | unsigned int i; |
3681 | escape_entry *ee; |
3682 | FOR_EACH_VEC_ELT (esum->esc, i, ee) |
3683 | { |
3684 | bp_pack_var_len_int (bp, ee->parm_index); |
3685 | bp_pack_var_len_unsigned (bp, ee->arg); |
3686 | bp_pack_var_len_unsigned (bp, ee->min_flags); |
3687 | bp_pack_value (bp, val: ee->direct, nbits: 1); |
3688 | } |
3689 | } |
3690 | |
3691 | /* Read escape summary for E from BP. */ |
3692 | |
3693 | static void |
3694 | modref_read_escape_summary (struct bitpack_d *bp, cgraph_edge *e) |
3695 | { |
3696 | unsigned int n = bp_unpack_var_len_unsigned (bp); |
3697 | if (!n) |
3698 | return; |
3699 | escape_summary *esum = escape_summaries->get_create (edge: e); |
3700 | esum->esc.reserve_exact (nelems: n); |
3701 | for (unsigned int i = 0; i < n; i++) |
3702 | { |
3703 | escape_entry ee; |
3704 | ee.parm_index = bp_unpack_var_len_int (bp); |
3705 | ee.arg = bp_unpack_var_len_unsigned (bp); |
3706 | ee.min_flags = bp_unpack_var_len_unsigned (bp); |
3707 | ee.direct = bp_unpack_value (bp, nbits: 1); |
3708 | esum->esc.quick_push (obj: ee); |
3709 | } |
3710 | } |
3711 | |
3712 | /* Callback for write_summary. */ |
3713 | |
3714 | static void |
3715 | modref_write () |
3716 | { |
3717 | struct output_block *ob = create_output_block (LTO_section_ipa_modref); |
3718 | lto_symtab_encoder_t encoder = ob->decl_state->symtab_node_encoder; |
3719 | unsigned int count = 0; |
3720 | int i; |
3721 | |
3722 | if (!summaries_lto) |
3723 | { |
3724 | streamer_write_uhwi (ob, 0); |
3725 | streamer_write_char_stream (obs: ob->main_stream, c: 0); |
3726 | produce_asm (ob, NULL); |
3727 | destroy_output_block (ob); |
3728 | return; |
3729 | } |
3730 | |
3731 | for (i = 0; i < lto_symtab_encoder_size (encoder); i++) |
3732 | { |
3733 | symtab_node *snode = lto_symtab_encoder_deref (encoder, ref: i); |
3734 | cgraph_node *cnode = dyn_cast <cgraph_node *> (p: snode); |
3735 | modref_summary_lto *r; |
3736 | |
3737 | if (cnode && cnode->definition && !cnode->alias |
3738 | && (r = summaries_lto->get (node: cnode)) |
3739 | && r->useful_p (ecf_flags: flags_from_decl_or_type (cnode->decl))) |
3740 | count++; |
3741 | } |
3742 | streamer_write_uhwi (ob, count); |
3743 | |
3744 | for (i = 0; i < lto_symtab_encoder_size (encoder); i++) |
3745 | { |
3746 | symtab_node *snode = lto_symtab_encoder_deref (encoder, ref: i); |
3747 | cgraph_node *cnode = dyn_cast <cgraph_node *> (p: snode); |
3748 | |
3749 | if (cnode && cnode->definition && !cnode->alias) |
3750 | { |
3751 | modref_summary_lto *r = summaries_lto->get (node: cnode); |
3752 | |
3753 | if (!r || !r->useful_p (ecf_flags: flags_from_decl_or_type (cnode->decl))) |
3754 | continue; |
3755 | |
3756 | streamer_write_uhwi (ob, lto_symtab_encoder_encode (encoder, cnode)); |
3757 | |
3758 | streamer_write_uhwi (ob, r->arg_flags.length ()); |
3759 | for (unsigned int i = 0; i < r->arg_flags.length (); i++) |
3760 | streamer_write_uhwi (ob, r->arg_flags[i]); |
3761 | streamer_write_uhwi (ob, r->retslot_flags); |
3762 | streamer_write_uhwi (ob, r->static_chain_flags); |
3763 | |
3764 | write_modref_records (tt: r->loads, ob); |
3765 | write_modref_records (tt: r->stores, ob); |
3766 | streamer_write_uhwi (ob, r->kills.length ()); |
3767 | for (auto kill : r->kills) |
3768 | kill.stream_out (ob); |
3769 | |
3770 | struct bitpack_d bp = bitpack_create (s: ob->main_stream); |
3771 | bp_pack_value (bp: &bp, val: r->writes_errno, nbits: 1); |
3772 | bp_pack_value (bp: &bp, val: r->side_effects, nbits: 1); |
3773 | bp_pack_value (bp: &bp, val: r->nondeterministic, nbits: 1); |
3774 | bp_pack_value (bp: &bp, val: r->calls_interposable, nbits: 1); |
3775 | if (!flag_wpa) |
3776 | { |
3777 | for (cgraph_edge *e = cnode->indirect_calls; |
3778 | e; e = e->next_callee) |
3779 | { |
3780 | class fnspec_summary *sum = fnspec_summaries->get (edge: e); |
3781 | bp_pack_value (bp: &bp, val: sum != NULL, nbits: 1); |
3782 | if (sum) |
3783 | bp_pack_string (ob, &bp, sum->fnspec, true); |
3784 | class escape_summary *esum = escape_summaries->get (edge: e); |
3785 | modref_write_escape_summary (bp: &bp,esum); |
3786 | } |
3787 | for (cgraph_edge *e = cnode->callees; e; e = e->next_callee) |
3788 | { |
3789 | class fnspec_summary *sum = fnspec_summaries->get (edge: e); |
3790 | bp_pack_value (bp: &bp, val: sum != NULL, nbits: 1); |
3791 | if (sum) |
3792 | bp_pack_string (ob, &bp, sum->fnspec, true); |
3793 | class escape_summary *esum = escape_summaries->get (edge: e); |
3794 | modref_write_escape_summary (bp: &bp,esum); |
3795 | } |
3796 | } |
3797 | streamer_write_bitpack (bp: &bp); |
3798 | } |
3799 | } |
3800 | streamer_write_char_stream (obs: ob->main_stream, c: 0); |
3801 | produce_asm (ob, NULL); |
3802 | destroy_output_block (ob); |
3803 | } |
3804 | |
3805 | static void |
3806 | read_section (struct lto_file_decl_data *file_data, const char *data, |
3807 | size_t len) |
3808 | { |
3809 | const struct lto_function_header * |
3810 | = (const struct lto_function_header *) data; |
3811 | const int cfg_offset = sizeof (struct lto_function_header); |
3812 | const int main_offset = cfg_offset + header->cfg_size; |
3813 | const int string_offset = main_offset + header->main_size; |
3814 | struct data_in *data_in; |
3815 | unsigned int i; |
3816 | unsigned int f_count; |
3817 | |
3818 | lto_input_block ib ((const char *) data + main_offset, header->main_size, |
3819 | file_data); |
3820 | |
3821 | data_in |
3822 | = lto_data_in_create (file_data, (const char *) data + string_offset, |
3823 | header->string_size, vNULL); |
3824 | f_count = streamer_read_uhwi (&ib); |
3825 | for (i = 0; i < f_count; i++) |
3826 | { |
3827 | struct cgraph_node *node; |
3828 | lto_symtab_encoder_t encoder; |
3829 | |
3830 | unsigned int index = streamer_read_uhwi (&ib); |
3831 | encoder = file_data->symtab_node_encoder; |
3832 | node = dyn_cast <cgraph_node *> (p: lto_symtab_encoder_deref (encoder, |
3833 | ref: index)); |
3834 | |
3835 | modref_summary *modref_sum = summaries |
3836 | ? summaries->get_create (node) : NULL; |
3837 | modref_summary_lto *modref_sum_lto = summaries_lto |
3838 | ? summaries_lto->get_create (node) |
3839 | : NULL; |
3840 | if (optimization_summaries) |
3841 | modref_sum = optimization_summaries->get_create (node); |
3842 | |
3843 | if (modref_sum) |
3844 | { |
3845 | modref_sum->writes_errno = false; |
3846 | modref_sum->side_effects = false; |
3847 | modref_sum->nondeterministic = false; |
3848 | modref_sum->calls_interposable = false; |
3849 | } |
3850 | if (modref_sum_lto) |
3851 | { |
3852 | modref_sum_lto->writes_errno = false; |
3853 | modref_sum_lto->side_effects = false; |
3854 | modref_sum_lto->nondeterministic = false; |
3855 | modref_sum_lto->calls_interposable = false; |
3856 | } |
3857 | |
3858 | gcc_assert (!modref_sum || (!modref_sum->loads |
3859 | && !modref_sum->stores)); |
3860 | gcc_assert (!modref_sum_lto || (!modref_sum_lto->loads |
3861 | && !modref_sum_lto->stores)); |
3862 | unsigned int args = streamer_read_uhwi (&ib); |
3863 | if (args && modref_sum) |
3864 | modref_sum->arg_flags.reserve_exact (nelems: args); |
3865 | if (args && modref_sum_lto) |
3866 | modref_sum_lto->arg_flags.reserve_exact (nelems: args); |
3867 | for (unsigned int i = 0; i < args; i++) |
3868 | { |
3869 | eaf_flags_t flags = streamer_read_uhwi (&ib); |
3870 | if (modref_sum) |
3871 | modref_sum->arg_flags.quick_push (obj: flags); |
3872 | if (modref_sum_lto) |
3873 | modref_sum_lto->arg_flags.quick_push (obj: flags); |
3874 | } |
3875 | eaf_flags_t flags = streamer_read_uhwi (&ib); |
3876 | if (modref_sum) |
3877 | modref_sum->retslot_flags = flags; |
3878 | if (modref_sum_lto) |
3879 | modref_sum_lto->retslot_flags = flags; |
3880 | |
3881 | flags = streamer_read_uhwi (&ib); |
3882 | if (modref_sum) |
3883 | modref_sum->static_chain_flags = flags; |
3884 | if (modref_sum_lto) |
3885 | modref_sum_lto->static_chain_flags = flags; |
3886 | |
3887 | read_modref_records (decl: node->decl, ib: &ib, data_in, |
3888 | nolto_ret: modref_sum ? &modref_sum->loads : NULL, |
3889 | lto_ret: modref_sum_lto ? &modref_sum_lto->loads : NULL); |
3890 | read_modref_records (decl: node->decl, ib: &ib, data_in, |
3891 | nolto_ret: modref_sum ? &modref_sum->stores : NULL, |
3892 | lto_ret: modref_sum_lto ? &modref_sum_lto->stores : NULL); |
3893 | int j = streamer_read_uhwi (&ib); |
3894 | if (j && modref_sum) |
3895 | modref_sum->kills.reserve_exact (nelems: j); |
3896 | if (j && modref_sum_lto) |
3897 | modref_sum_lto->kills.reserve_exact (nelems: j); |
3898 | for (int k = 0; k < j; k++) |
3899 | { |
3900 | modref_access_node a = modref_access_node::stream_in (ib: &ib); |
3901 | |
3902 | if (modref_sum) |
3903 | modref_sum->kills.quick_push (obj: a); |
3904 | if (modref_sum_lto) |
3905 | modref_sum_lto->kills.quick_push (obj: a); |
3906 | } |
3907 | struct bitpack_d bp = streamer_read_bitpack (ib: &ib); |
3908 | if (bp_unpack_value (bp: &bp, nbits: 1)) |
3909 | { |
3910 | if (modref_sum) |
3911 | modref_sum->writes_errno = true; |
3912 | if (modref_sum_lto) |
3913 | modref_sum_lto->writes_errno = true; |
3914 | } |
3915 | if (bp_unpack_value (bp: &bp, nbits: 1)) |
3916 | { |
3917 | if (modref_sum) |
3918 | modref_sum->side_effects = true; |
3919 | if (modref_sum_lto) |
3920 | modref_sum_lto->side_effects = true; |
3921 | } |
3922 | if (bp_unpack_value (bp: &bp, nbits: 1)) |
3923 | { |
3924 | if (modref_sum) |
3925 | modref_sum->nondeterministic = true; |
3926 | if (modref_sum_lto) |
3927 | modref_sum_lto->nondeterministic = true; |
3928 | } |
3929 | if (bp_unpack_value (bp: &bp, nbits: 1)) |
3930 | { |
3931 | if (modref_sum) |
3932 | modref_sum->calls_interposable = true; |
3933 | if (modref_sum_lto) |
3934 | modref_sum_lto->calls_interposable = true; |
3935 | } |
3936 | if (!flag_ltrans) |
3937 | { |
3938 | for (cgraph_edge *e = node->indirect_calls; e; e = e->next_callee) |
3939 | { |
3940 | if (bp_unpack_value (bp: &bp, nbits: 1)) |
3941 | { |
3942 | class fnspec_summary *sum = fnspec_summaries->get_create (edge: e); |
3943 | sum->fnspec = xstrdup (bp_unpack_string (data_in, &bp)); |
3944 | } |
3945 | modref_read_escape_summary (bp: &bp, e); |
3946 | } |
3947 | for (cgraph_edge *e = node->callees; e; e = e->next_callee) |
3948 | { |
3949 | if (bp_unpack_value (bp: &bp, nbits: 1)) |
3950 | { |
3951 | class fnspec_summary *sum = fnspec_summaries->get_create (edge: e); |
3952 | sum->fnspec = xstrdup (bp_unpack_string (data_in, &bp)); |
3953 | } |
3954 | modref_read_escape_summary (bp: &bp, e); |
3955 | } |
3956 | } |
3957 | if (flag_ltrans) |
3958 | modref_sum->finalize (fun: node->decl); |
3959 | if (dump_file) |
3960 | { |
3961 | fprintf (stream: dump_file, format: "Read modref for %s\n" , |
3962 | node->dump_name ()); |
3963 | if (modref_sum) |
3964 | modref_sum->dump (out: dump_file); |
3965 | if (modref_sum_lto) |
3966 | modref_sum_lto->dump (out: dump_file); |
3967 | dump_modref_edge_summaries (out: dump_file, node, depth: 4); |
3968 | } |
3969 | } |
3970 | |
3971 | lto_free_section_data (file_data, LTO_section_ipa_modref, NULL, data, |
3972 | len); |
3973 | lto_data_in_delete (data_in); |
3974 | } |
3975 | |
3976 | /* Callback for read_summary. */ |
3977 | |
3978 | static void |
3979 | modref_read (void) |
3980 | { |
3981 | struct lto_file_decl_data **file_data_vec = lto_get_file_decl_data (); |
3982 | struct lto_file_decl_data *file_data; |
3983 | unsigned int j = 0; |
3984 | |
3985 | gcc_checking_assert (!optimization_summaries && !summaries && !summaries_lto); |
3986 | if (flag_ltrans) |
3987 | optimization_summaries = modref_summaries::create_ggc (symtab); |
3988 | else |
3989 | { |
3990 | if (flag_wpa || flag_incremental_link == INCREMENTAL_LINK_LTO) |
3991 | summaries_lto = modref_summaries_lto::create_ggc (symtab); |
3992 | if (!flag_wpa |
3993 | || (flag_incremental_link == INCREMENTAL_LINK_LTO |
3994 | && flag_fat_lto_objects)) |
3995 | summaries = modref_summaries::create_ggc (symtab); |
3996 | if (!fnspec_summaries) |
3997 | fnspec_summaries = new fnspec_summaries_t (symtab); |
3998 | if (!escape_summaries) |
3999 | escape_summaries = new escape_summaries_t (symtab); |
4000 | } |
4001 | |
4002 | while ((file_data = file_data_vec[j++])) |
4003 | { |
4004 | size_t len; |
4005 | const char *data = lto_get_summary_section_data (file_data, |
4006 | LTO_section_ipa_modref, |
4007 | &len); |
4008 | if (data) |
4009 | read_section (file_data, data, len); |
4010 | else |
4011 | /* Fatal error here. We do not want to support compiling ltrans units |
4012 | with different version of compiler or different flags than the WPA |
4013 | unit, so this should never happen. */ |
4014 | fatal_error (input_location, |
4015 | "IPA modref summary is missing in input file" ); |
4016 | } |
4017 | } |
4018 | |
4019 | /* Recompute arg_flags for param adjustments in INFO. */ |
4020 | |
4021 | static void |
4022 | remap_arg_flags (auto_vec <eaf_flags_t> &arg_flags, clone_info *info) |
4023 | { |
4024 | auto_vec<eaf_flags_t> old = arg_flags.copy (); |
4025 | int max = -1; |
4026 | size_t i; |
4027 | ipa_adjusted_param *p; |
4028 | |
4029 | arg_flags.release (); |
4030 | |
4031 | FOR_EACH_VEC_SAFE_ELT (info->param_adjustments->m_adj_params, i, p) |
4032 | { |
4033 | int o = info->param_adjustments->get_original_index (newidx: i); |
4034 | if (o >= 0 && (int)old.length () > o && old[o]) |
4035 | max = i; |
4036 | } |
4037 | if (max >= 0) |
4038 | arg_flags.safe_grow_cleared (len: max + 1, exact: true); |
4039 | FOR_EACH_VEC_SAFE_ELT (info->param_adjustments->m_adj_params, i, p) |
4040 | { |
4041 | int o = info->param_adjustments->get_original_index (newidx: i); |
4042 | if (o >= 0 && (int)old.length () > o && old[o]) |
4043 | arg_flags[i] = old[o]; |
4044 | } |
4045 | } |
4046 | |
4047 | /* Update kills according to the parm map MAP. */ |
4048 | |
4049 | static void |
4050 | remap_kills (vec <modref_access_node> &kills, const vec <int> &map) |
4051 | { |
4052 | for (size_t i = 0; i < kills.length ();) |
4053 | if (kills[i].parm_index >= 0) |
4054 | { |
4055 | if (kills[i].parm_index < (int)map.length () |
4056 | && map[kills[i].parm_index] != MODREF_UNKNOWN_PARM) |
4057 | { |
4058 | kills[i].parm_index = map[kills[i].parm_index]; |
4059 | i++; |
4060 | } |
4061 | else |
4062 | kills.unordered_remove (ix: i); |
4063 | } |
4064 | else |
4065 | i++; |
4066 | } |
4067 | |
4068 | /* Return true if the V can overlap with KILL. */ |
4069 | |
4070 | static bool |
4071 | ipcp_argagg_and_kill_overlap_p (const ipa_argagg_value &v, |
4072 | const modref_access_node &kill) |
4073 | { |
4074 | if (kill.parm_index == v.index) |
4075 | { |
4076 | gcc_assert (kill.parm_offset_known); |
4077 | gcc_assert (known_eq (kill.max_size, kill.size)); |
4078 | poly_int64 repl_size; |
4079 | bool ok = poly_int_tree_p (TYPE_SIZE (TREE_TYPE (v.value)), |
4080 | value: &repl_size); |
4081 | gcc_assert (ok); |
4082 | poly_int64 repl_offset (v.unit_offset); |
4083 | repl_offset <<= LOG2_BITS_PER_UNIT; |
4084 | poly_int64 combined_offset |
4085 | = (kill.parm_offset << LOG2_BITS_PER_UNIT) + kill.offset; |
4086 | if (ranges_maybe_overlap_p (pos1: repl_offset, size1: repl_size, |
4087 | pos2: combined_offset, size2: kill.size)) |
4088 | return true; |
4089 | } |
4090 | return false; |
4091 | } |
4092 | |
4093 | /* If signature changed, update the summary. */ |
4094 | |
4095 | static void |
4096 | update_signature (struct cgraph_node *node) |
4097 | { |
4098 | modref_summary *r = optimization_summaries |
4099 | ? optimization_summaries->get (node) : NULL; |
4100 | modref_summary_lto *r_lto = summaries_lto |
4101 | ? summaries_lto->get (node) : NULL; |
4102 | if (!r && !r_lto) |
4103 | return; |
4104 | |
4105 | /* Propagating constants in killed memory can lead to eliminated stores in |
4106 | both callees (because they are considered redundant) and callers, leading |
4107 | to missing them altogether. */ |
4108 | ipcp_transformation *ipcp_ts = ipcp_get_transformation_summary (node); |
4109 | if (ipcp_ts) |
4110 | { |
4111 | for (auto &v : ipcp_ts->m_agg_values) |
4112 | { |
4113 | if (!v.by_ref) |
4114 | continue; |
4115 | if (r) |
4116 | for (const modref_access_node &kill : r->kills) |
4117 | if (ipcp_argagg_and_kill_overlap_p (v, kill)) |
4118 | { |
4119 | v.killed = true; |
4120 | break; |
4121 | } |
4122 | if (!v.killed && r_lto) |
4123 | for (const modref_access_node &kill : r_lto->kills) |
4124 | if (ipcp_argagg_and_kill_overlap_p (v, kill)) |
4125 | { |
4126 | v.killed = true; |
4127 | break; |
4128 | } |
4129 | } |
4130 | } |
4131 | |
4132 | clone_info *info = clone_info::get (node); |
4133 | if (!info || !info->param_adjustments) |
4134 | return; |
4135 | |
4136 | if (dump_file) |
4137 | { |
4138 | fprintf (stream: dump_file, format: "Updating summary for %s from:\n" , |
4139 | node->dump_name ()); |
4140 | if (r) |
4141 | r->dump (out: dump_file); |
4142 | if (r_lto) |
4143 | r_lto->dump (out: dump_file); |
4144 | } |
4145 | |
4146 | size_t i, max = 0; |
4147 | ipa_adjusted_param *p; |
4148 | |
4149 | FOR_EACH_VEC_SAFE_ELT (info->param_adjustments->m_adj_params, i, p) |
4150 | { |
4151 | int idx = info->param_adjustments->get_original_index (newidx: i); |
4152 | if (idx > (int)max) |
4153 | max = idx; |
4154 | } |
4155 | |
4156 | auto_vec <int, 32> map; |
4157 | |
4158 | map.reserve (nelems: max + 1); |
4159 | for (i = 0; i <= max; i++) |
4160 | map.quick_push (obj: MODREF_UNKNOWN_PARM); |
4161 | FOR_EACH_VEC_SAFE_ELT (info->param_adjustments->m_adj_params, i, p) |
4162 | { |
4163 | int idx = info->param_adjustments->get_original_index (newidx: i); |
4164 | if (idx >= 0) |
4165 | map[idx] = i; |
4166 | } |
4167 | if (r) |
4168 | { |
4169 | r->loads->remap_params (map: &map); |
4170 | r->stores->remap_params (map: &map); |
4171 | remap_kills (kills&: r->kills, map); |
4172 | if (r->arg_flags.length ()) |
4173 | remap_arg_flags (arg_flags&: r->arg_flags, info); |
4174 | } |
4175 | if (r_lto) |
4176 | { |
4177 | r_lto->loads->remap_params (map: &map); |
4178 | r_lto->stores->remap_params (map: &map); |
4179 | remap_kills (kills&: r_lto->kills, map); |
4180 | if (r_lto->arg_flags.length ()) |
4181 | remap_arg_flags (arg_flags&: r_lto->arg_flags, info); |
4182 | } |
4183 | if (dump_file) |
4184 | { |
4185 | fprintf (stream: dump_file, format: "to:\n" ); |
4186 | if (r) |
4187 | r->dump (out: dump_file); |
4188 | if (r_lto) |
4189 | r_lto->dump (out: dump_file); |
4190 | } |
4191 | if (r) |
4192 | r->finalize (fun: node->decl); |
4193 | return; |
4194 | } |
4195 | |
4196 | /* Definition of the modref IPA pass. */ |
4197 | const pass_data pass_data_ipa_modref = |
4198 | { |
4199 | .type: IPA_PASS, /* type */ |
4200 | .name: "modref" , /* name */ |
4201 | .optinfo_flags: OPTGROUP_IPA, /* optinfo_flags */ |
4202 | .tv_id: TV_IPA_MODREF, /* tv_id */ |
4203 | .properties_required: 0, /* properties_required */ |
4204 | .properties_provided: 0, /* properties_provided */ |
4205 | .properties_destroyed: 0, /* properties_destroyed */ |
4206 | .todo_flags_start: 0, /* todo_flags_start */ |
4207 | .todo_flags_finish: ( TODO_dump_symtab ), /* todo_flags_finish */ |
4208 | }; |
4209 | |
4210 | class pass_ipa_modref : public ipa_opt_pass_d |
4211 | { |
4212 | public: |
4213 | pass_ipa_modref (gcc::context *ctxt) |
4214 | : ipa_opt_pass_d (pass_data_ipa_modref, ctxt, |
4215 | modref_generate, /* generate_summary */ |
4216 | modref_write, /* write_summary */ |
4217 | modref_read, /* read_summary */ |
4218 | modref_write, /* write_optimization_summary */ |
4219 | modref_read, /* read_optimization_summary */ |
4220 | NULL, /* stmt_fixup */ |
4221 | 0, /* function_transform_todo_flags_start */ |
4222 | NULL, /* function_transform */ |
4223 | NULL) /* variable_transform */ |
4224 | {} |
4225 | |
4226 | /* opt_pass methods: */ |
4227 | opt_pass *clone () final override { return new pass_ipa_modref (m_ctxt); } |
4228 | bool gate (function *) final override |
4229 | { |
4230 | return true; |
4231 | } |
4232 | unsigned int execute (function *) final override; |
4233 | |
4234 | }; |
4235 | |
4236 | } |
4237 | |
4238 | unsigned int pass_modref::execute (function *) |
4239 | { |
4240 | if (analyze_function (ipa: false)) |
4241 | return execute_fixup_cfg (); |
4242 | return 0; |
4243 | } |
4244 | |
4245 | gimple_opt_pass * |
4246 | make_pass_modref (gcc::context *ctxt) |
4247 | { |
4248 | return new pass_modref (ctxt); |
4249 | } |
4250 | |
4251 | ipa_opt_pass_d * |
4252 | make_pass_ipa_modref (gcc::context *ctxt) |
4253 | { |
4254 | return new pass_ipa_modref (ctxt); |
4255 | } |
4256 | |
4257 | namespace { |
4258 | |
4259 | /* Skip edges from and to nodes without ipa_pure_const enabled. |
4260 | Ignore not available symbols. */ |
4261 | |
4262 | static bool |
4263 | ignore_edge (struct cgraph_edge *e) |
4264 | { |
4265 | /* We merge summaries of inline clones into summaries of functions they |
4266 | are inlined to. For that reason the complete function bodies must |
4267 | act as unit. */ |
4268 | if (!e->inline_failed) |
4269 | return false; |
4270 | enum availability avail; |
4271 | cgraph_node *callee = e->callee->ultimate_alias_target |
4272 | (availability: &avail, ref: e->caller); |
4273 | |
4274 | return (avail <= AVAIL_INTERPOSABLE |
4275 | || ((!optimization_summaries || !optimization_summaries->get (node: callee)) |
4276 | && (!summaries_lto || !summaries_lto->get (node: callee)))); |
4277 | } |
4278 | |
4279 | /* Compute parm_map for CALLEE_EDGE. */ |
4280 | |
4281 | static bool |
4282 | compute_parm_map (cgraph_edge *callee_edge, vec<modref_parm_map> *parm_map) |
4283 | { |
4284 | class ipa_edge_args *args; |
4285 | if (ipa_node_params_sum |
4286 | && !callee_edge->call_stmt_cannot_inline_p |
4287 | && (args = ipa_edge_args_sum->get (edge: callee_edge)) != NULL) |
4288 | { |
4289 | int i, count = ipa_get_cs_argument_count (args); |
4290 | class ipa_node_params *caller_parms_info, *callee_pi; |
4291 | class ipa_call_summary *es |
4292 | = ipa_call_summaries->get (edge: callee_edge); |
4293 | cgraph_node *callee |
4294 | = callee_edge->callee->ultimate_alias_target |
4295 | (NULL, ref: callee_edge->caller); |
4296 | |
4297 | caller_parms_info |
4298 | = ipa_node_params_sum->get (node: callee_edge->caller->inlined_to |
4299 | ? callee_edge->caller->inlined_to |
4300 | : callee_edge->caller); |
4301 | callee_pi = ipa_node_params_sum->get (node: callee); |
4302 | |
4303 | (*parm_map).safe_grow_cleared (len: count, exact: true); |
4304 | |
4305 | for (i = 0; i < count; i++) |
4306 | { |
4307 | if (es && es->param[i].points_to_local_or_readonly_memory) |
4308 | { |
4309 | (*parm_map)[i].parm_index = MODREF_LOCAL_MEMORY_PARM; |
4310 | continue; |
4311 | } |
4312 | |
4313 | struct ipa_jump_func *jf |
4314 | = ipa_get_ith_jump_func (args, i); |
4315 | if (jf && callee_pi) |
4316 | { |
4317 | tree cst = ipa_value_from_jfunc (info: caller_parms_info, |
4318 | jfunc: jf, |
4319 | type: ipa_get_type |
4320 | (info: callee_pi, i)); |
4321 | if (cst && points_to_local_or_readonly_memory_p (cst)) |
4322 | { |
4323 | (*parm_map)[i].parm_index = MODREF_LOCAL_MEMORY_PARM; |
4324 | continue; |
4325 | } |
4326 | } |
4327 | if (jf && jf->type == IPA_JF_PASS_THROUGH) |
4328 | { |
4329 | (*parm_map)[i].parm_index |
4330 | = ipa_get_jf_pass_through_formal_id (jfunc: jf); |
4331 | if (ipa_get_jf_pass_through_operation (jfunc: jf) == NOP_EXPR) |
4332 | { |
4333 | (*parm_map)[i].parm_offset_known = true; |
4334 | (*parm_map)[i].parm_offset = 0; |
4335 | } |
4336 | else if (ipa_get_jf_pass_through_operation (jfunc: jf) |
4337 | == POINTER_PLUS_EXPR |
4338 | && ptrdiff_tree_p (ipa_get_jf_pass_through_operand (jfunc: jf), |
4339 | &(*parm_map)[i].parm_offset)) |
4340 | (*parm_map)[i].parm_offset_known = true; |
4341 | else |
4342 | (*parm_map)[i].parm_offset_known = false; |
4343 | continue; |
4344 | } |
4345 | if (jf && jf->type == IPA_JF_ANCESTOR) |
4346 | { |
4347 | (*parm_map)[i].parm_index = ipa_get_jf_ancestor_formal_id (jfunc: jf); |
4348 | (*parm_map)[i].parm_offset_known = true; |
4349 | gcc_checking_assert |
4350 | (!(ipa_get_jf_ancestor_offset (jf) & (BITS_PER_UNIT - 1))); |
4351 | (*parm_map)[i].parm_offset |
4352 | = ipa_get_jf_ancestor_offset (jfunc: jf) >> LOG2_BITS_PER_UNIT; |
4353 | } |
4354 | else |
4355 | (*parm_map)[i].parm_index = -1; |
4356 | } |
4357 | if (dump_file) |
4358 | { |
4359 | fprintf (stream: dump_file, format: " Parm map: " ); |
4360 | for (i = 0; i < count; i++) |
4361 | fprintf (stream: dump_file, format: " %i" , (*parm_map)[i].parm_index); |
4362 | fprintf (stream: dump_file, format: "\n" ); |
4363 | } |
4364 | return true; |
4365 | } |
4366 | return false; |
4367 | } |
4368 | |
4369 | /* Map used to translate escape infos. */ |
4370 | |
4371 | struct escape_map |
4372 | { |
4373 | int parm_index; |
4374 | bool direct; |
4375 | }; |
4376 | |
4377 | /* Update escape map for E. */ |
4378 | |
4379 | static void |
4380 | update_escape_summary_1 (cgraph_edge *e, |
4381 | vec <vec <escape_map>> &map, |
4382 | bool ignore_stores) |
4383 | { |
4384 | escape_summary *sum = escape_summaries->get (edge: e); |
4385 | if (!sum) |
4386 | return; |
4387 | auto_vec <escape_entry> old = sum->esc.copy (); |
4388 | sum->esc.release (); |
4389 | |
4390 | unsigned int i; |
4391 | escape_entry *ee; |
4392 | FOR_EACH_VEC_ELT (old, i, ee) |
4393 | { |
4394 | unsigned int j; |
4395 | struct escape_map *em; |
4396 | /* TODO: We do not have jump functions for return slots, so we |
4397 | never propagate them to outer function. */ |
4398 | if (ee->parm_index >= (int)map.length () |
4399 | || ee->parm_index < 0) |
4400 | continue; |
4401 | FOR_EACH_VEC_ELT (map[ee->parm_index], j, em) |
4402 | { |
4403 | int min_flags = ee->min_flags; |
4404 | if (ee->direct && !em->direct) |
4405 | min_flags = deref_flags (flags: min_flags, ignore_stores); |
4406 | struct escape_entry entry = {.parm_index: em->parm_index, .arg: ee->arg, |
4407 | .min_flags: min_flags, |
4408 | .direct: ee->direct & em->direct}; |
4409 | sum->esc.safe_push (obj: entry); |
4410 | } |
4411 | } |
4412 | if (!sum->esc.length ()) |
4413 | escape_summaries->remove (edge: e); |
4414 | } |
4415 | |
4416 | /* Update escape map for NODE. */ |
4417 | |
4418 | static void |
4419 | update_escape_summary (cgraph_node *node, |
4420 | vec <vec <escape_map>> &map, |
4421 | bool ignore_stores) |
4422 | { |
4423 | if (!escape_summaries) |
4424 | return; |
4425 | for (cgraph_edge *e = node->indirect_calls; e; e = e->next_callee) |
4426 | update_escape_summary_1 (e, map, ignore_stores); |
4427 | for (cgraph_edge *e = node->callees; e; e = e->next_callee) |
4428 | { |
4429 | if (!e->inline_failed) |
4430 | update_escape_summary (node: e->callee, map, ignore_stores); |
4431 | else |
4432 | update_escape_summary_1 (e, map, ignore_stores); |
4433 | } |
4434 | } |
4435 | |
4436 | /* Get parameter type from DECL. This is only safe for special cases |
4437 | like builtins we create fnspec for because the type match is checked |
4438 | at fnspec creation time. */ |
4439 | |
4440 | static tree |
4441 | get_parm_type (tree decl, unsigned int i) |
4442 | { |
4443 | tree t = TYPE_ARG_TYPES (TREE_TYPE (decl)); |
4444 | |
4445 | for (unsigned int p = 0; p < i; p++) |
4446 | t = TREE_CHAIN (t); |
4447 | return TREE_VALUE (t); |
4448 | } |
4449 | |
4450 | /* Return access mode for argument I of call E with FNSPEC. */ |
4451 | |
4452 | static modref_access_node |
4453 | get_access_for_fnspec (cgraph_edge *e, attr_fnspec &fnspec, |
4454 | unsigned int i, modref_parm_map &map) |
4455 | { |
4456 | tree size = NULL_TREE; |
4457 | unsigned int size_arg; |
4458 | |
4459 | if (!fnspec.arg_specified_p (i)) |
4460 | ; |
4461 | else if (fnspec.arg_max_access_size_given_by_arg_p (i, arg: &size_arg)) |
4462 | { |
4463 | cgraph_node *node = e->caller->inlined_to |
4464 | ? e->caller->inlined_to : e->caller; |
4465 | ipa_node_params *caller_parms_info = ipa_node_params_sum->get (node); |
4466 | ipa_edge_args *args = ipa_edge_args_sum->get (edge: e); |
4467 | struct ipa_jump_func *jf = ipa_get_ith_jump_func (args, i: size_arg); |
4468 | |
4469 | if (jf) |
4470 | size = ipa_value_from_jfunc (info: caller_parms_info, jfunc: jf, |
4471 | type: get_parm_type (decl: e->callee->decl, i: size_arg)); |
4472 | } |
4473 | else if (fnspec.arg_access_size_given_by_type_p (i)) |
4474 | size = TYPE_SIZE_UNIT (get_parm_type (e->callee->decl, i)); |
4475 | modref_access_node a = {.offset: 0, .size: -1, .max_size: -1, |
4476 | .parm_offset: map.parm_offset, .parm_index: map.parm_index, |
4477 | .parm_offset_known: map.parm_offset_known, .adjustments: 0}; |
4478 | poly_int64 size_hwi; |
4479 | if (size |
4480 | && poly_int_tree_p (t: size, value: &size_hwi) |
4481 | && coeffs_in_range_p (a: size_hwi, b: 0, |
4482 | HOST_WIDE_INT_MAX / BITS_PER_UNIT)) |
4483 | { |
4484 | a.size = -1; |
4485 | a.max_size = size_hwi << LOG2_BITS_PER_UNIT; |
4486 | } |
4487 | return a; |
4488 | } |
4489 | |
4490 | /* Collapse loads and return true if something changed. */ |
4491 | static bool |
4492 | collapse_loads (modref_summary *cur_summary, |
4493 | modref_summary_lto *cur_summary_lto) |
4494 | { |
4495 | bool changed = false; |
4496 | |
4497 | if (cur_summary && !cur_summary->loads->every_base) |
4498 | { |
4499 | cur_summary->loads->collapse (); |
4500 | changed = true; |
4501 | } |
4502 | if (cur_summary_lto |
4503 | && !cur_summary_lto->loads->every_base) |
4504 | { |
4505 | cur_summary_lto->loads->collapse (); |
4506 | changed = true; |
4507 | } |
4508 | return changed; |
4509 | } |
4510 | |
4511 | /* Collapse loads and return true if something changed. */ |
4512 | |
4513 | static bool |
4514 | collapse_stores (modref_summary *cur_summary, |
4515 | modref_summary_lto *cur_summary_lto) |
4516 | { |
4517 | bool changed = false; |
4518 | |
4519 | if (cur_summary && !cur_summary->stores->every_base) |
4520 | { |
4521 | cur_summary->stores->collapse (); |
4522 | changed = true; |
4523 | } |
4524 | if (cur_summary_lto |
4525 | && !cur_summary_lto->stores->every_base) |
4526 | { |
4527 | cur_summary_lto->stores->collapse (); |
4528 | changed = true; |
4529 | } |
4530 | return changed; |
4531 | } |
4532 | |
4533 | /* Call E in NODE with ECF_FLAGS has no summary; update MODREF_SUMMARY and |
4534 | CUR_SUMMARY_LTO accordingly. Return true if something changed. */ |
4535 | |
4536 | static bool |
4537 | propagate_unknown_call (cgraph_node *node, |
4538 | cgraph_edge *e, int ecf_flags, |
4539 | modref_summary *cur_summary, |
4540 | modref_summary_lto *cur_summary_lto, |
4541 | bool nontrivial_scc) |
4542 | { |
4543 | bool changed = false; |
4544 | class fnspec_summary *fnspec_sum = fnspec_summaries->get (edge: e); |
4545 | auto_vec <modref_parm_map, 32> parm_map; |
4546 | bool looping; |
4547 | |
4548 | if (e->callee |
4549 | && builtin_safe_for_const_function_p (&looping, e->callee->decl)) |
4550 | { |
4551 | if (looping && cur_summary && !cur_summary->side_effects) |
4552 | { |
4553 | cur_summary->side_effects = true; |
4554 | changed = true; |
4555 | } |
4556 | if (looping && cur_summary_lto && !cur_summary_lto->side_effects) |
4557 | { |
4558 | cur_summary_lto->side_effects = true; |
4559 | changed = true; |
4560 | } |
4561 | return changed; |
4562 | } |
4563 | |
4564 | if (!(ecf_flags & (ECF_CONST | ECF_NOVOPS | ECF_PURE)) |
4565 | || (ecf_flags & ECF_LOOPING_CONST_OR_PURE) |
4566 | || nontrivial_scc) |
4567 | { |
4568 | if (cur_summary && !cur_summary->side_effects) |
4569 | { |
4570 | cur_summary->side_effects = true; |
4571 | changed = true; |
4572 | } |
4573 | if (cur_summary_lto && !cur_summary_lto->side_effects) |
4574 | { |
4575 | cur_summary_lto->side_effects = true; |
4576 | changed = true; |
4577 | } |
4578 | if (cur_summary && !cur_summary->nondeterministic |
4579 | && !ignore_nondeterminism_p (caller: node->decl, flags: ecf_flags)) |
4580 | { |
4581 | cur_summary->nondeterministic = true; |
4582 | changed = true; |
4583 | } |
4584 | if (cur_summary_lto && !cur_summary_lto->nondeterministic |
4585 | && !ignore_nondeterminism_p (caller: node->decl, flags: ecf_flags)) |
4586 | { |
4587 | cur_summary_lto->nondeterministic = true; |
4588 | changed = true; |
4589 | } |
4590 | } |
4591 | if (ecf_flags & (ECF_CONST | ECF_NOVOPS)) |
4592 | return changed; |
4593 | |
4594 | if (fnspec_sum |
4595 | && compute_parm_map (callee_edge: e, parm_map: &parm_map)) |
4596 | { |
4597 | attr_fnspec fnspec (fnspec_sum->fnspec); |
4598 | |
4599 | gcc_checking_assert (fnspec.known_p ()); |
4600 | if (fnspec.global_memory_read_p ()) |
4601 | collapse_loads (cur_summary, cur_summary_lto); |
4602 | else |
4603 | { |
4604 | tree t = TYPE_ARG_TYPES (TREE_TYPE (e->callee->decl)); |
4605 | for (unsigned i = 0; i < parm_map.length () && t; |
4606 | i++, t = TREE_CHAIN (t)) |
4607 | if (!POINTER_TYPE_P (TREE_VALUE (t))) |
4608 | ; |
4609 | else if (!fnspec.arg_specified_p (i) |
4610 | || fnspec.arg_maybe_read_p (i)) |
4611 | { |
4612 | modref_parm_map map = parm_map[i]; |
4613 | if (map.parm_index == MODREF_LOCAL_MEMORY_PARM) |
4614 | continue; |
4615 | if (map.parm_index == MODREF_UNKNOWN_PARM) |
4616 | { |
4617 | collapse_loads (cur_summary, cur_summary_lto); |
4618 | break; |
4619 | } |
4620 | if (cur_summary) |
4621 | changed |= cur_summary->loads->insert |
4622 | (fndecl: node->decl, base: 0, ref: 0, |
4623 | a: get_access_for_fnspec (e, fnspec, i, map), record_adjustments: false); |
4624 | if (cur_summary_lto) |
4625 | changed |= cur_summary_lto->loads->insert |
4626 | (fndecl: node->decl, base: 0, ref: 0, |
4627 | a: get_access_for_fnspec (e, fnspec, i, map), record_adjustments: false); |
4628 | } |
4629 | } |
4630 | if (ignore_stores_p (caller: node->decl, flags: ecf_flags)) |
4631 | ; |
4632 | else if (fnspec.global_memory_written_p ()) |
4633 | collapse_stores (cur_summary, cur_summary_lto); |
4634 | else |
4635 | { |
4636 | tree t = TYPE_ARG_TYPES (TREE_TYPE (e->callee->decl)); |
4637 | for (unsigned i = 0; i < parm_map.length () && t; |
4638 | i++, t = TREE_CHAIN (t)) |
4639 | if (!POINTER_TYPE_P (TREE_VALUE (t))) |
4640 | ; |
4641 | else if (!fnspec.arg_specified_p (i) |
4642 | || fnspec.arg_maybe_written_p (i)) |
4643 | { |
4644 | modref_parm_map map = parm_map[i]; |
4645 | if (map.parm_index == MODREF_LOCAL_MEMORY_PARM) |
4646 | continue; |
4647 | if (map.parm_index == MODREF_UNKNOWN_PARM) |
4648 | { |
4649 | collapse_stores (cur_summary, cur_summary_lto); |
4650 | break; |
4651 | } |
4652 | if (cur_summary) |
4653 | changed |= cur_summary->stores->insert |
4654 | (fndecl: node->decl, base: 0, ref: 0, |
4655 | a: get_access_for_fnspec (e, fnspec, i, map), record_adjustments: false); |
4656 | if (cur_summary_lto) |
4657 | changed |= cur_summary_lto->stores->insert |
4658 | (fndecl: node->decl, base: 0, ref: 0, |
4659 | a: get_access_for_fnspec (e, fnspec, i, map), record_adjustments: false); |
4660 | } |
4661 | } |
4662 | if (fnspec.errno_maybe_written_p () && flag_errno_math) |
4663 | { |
4664 | if (cur_summary && !cur_summary->writes_errno) |
4665 | { |
4666 | cur_summary->writes_errno = true; |
4667 | changed = true; |
4668 | } |
4669 | if (cur_summary_lto && !cur_summary_lto->writes_errno) |
4670 | { |
4671 | cur_summary_lto->writes_errno = true; |
4672 | changed = true; |
4673 | } |
4674 | } |
4675 | return changed; |
4676 | } |
4677 | if (dump_file) |
4678 | fprintf (stream: dump_file, format: " collapsing loads\n" ); |
4679 | changed |= collapse_loads (cur_summary, cur_summary_lto); |
4680 | if (!ignore_stores_p (caller: node->decl, flags: ecf_flags)) |
4681 | { |
4682 | if (dump_file) |
4683 | fprintf (stream: dump_file, format: " collapsing stores\n" ); |
4684 | changed |= collapse_stores (cur_summary, cur_summary_lto); |
4685 | } |
4686 | return changed; |
4687 | } |
4688 | |
4689 | /* Maybe remove summaries of NODE pointed to by CUR_SUMMARY_PTR |
4690 | and CUR_SUMMARY_LTO_PTR if they are useless according to ECF_FLAGS. */ |
4691 | |
4692 | static void |
4693 | remove_useless_summaries (cgraph_node *node, |
4694 | modref_summary **cur_summary_ptr, |
4695 | modref_summary_lto **cur_summary_lto_ptr, |
4696 | int ecf_flags) |
4697 | { |
4698 | if (*cur_summary_ptr && !(*cur_summary_ptr)->useful_p (ecf_flags, check_flags: false)) |
4699 | { |
4700 | optimization_summaries->remove (node); |
4701 | *cur_summary_ptr = NULL; |
4702 | } |
4703 | if (*cur_summary_lto_ptr |
4704 | && !(*cur_summary_lto_ptr)->useful_p (ecf_flags, check_flags: false)) |
4705 | { |
4706 | summaries_lto->remove (node); |
4707 | *cur_summary_lto_ptr = NULL; |
4708 | } |
4709 | } |
4710 | |
4711 | /* Perform iterative dataflow on SCC component starting in COMPONENT_NODE |
4712 | and propagate loads/stores. */ |
4713 | |
4714 | static bool |
4715 | modref_propagate_in_scc (cgraph_node *component_node) |
4716 | { |
4717 | bool changed = true; |
4718 | bool first = true; |
4719 | int iteration = 0; |
4720 | |
4721 | while (changed) |
4722 | { |
4723 | bool nontrivial_scc |
4724 | = ((struct ipa_dfs_info *) component_node->aux)->next_cycle; |
4725 | changed = false; |
4726 | for (struct cgraph_node *cur = component_node; cur; |
4727 | cur = ((struct ipa_dfs_info *) cur->aux)->next_cycle) |
4728 | { |
4729 | cgraph_node *node = cur->inlined_to ? cur->inlined_to : cur; |
4730 | modref_summary *cur_summary = optimization_summaries |
4731 | ? optimization_summaries->get (node) |
4732 | : NULL; |
4733 | modref_summary_lto *cur_summary_lto = summaries_lto |
4734 | ? summaries_lto->get (node) |
4735 | : NULL; |
4736 | |
4737 | if (!cur_summary && !cur_summary_lto) |
4738 | continue; |
4739 | |
4740 | int cur_ecf_flags = flags_from_decl_or_type (node->decl); |
4741 | |
4742 | if (dump_file) |
4743 | fprintf (stream: dump_file, format: " Processing %s%s%s\n" , |
4744 | cur->dump_name (), |
4745 | TREE_READONLY (cur->decl) ? " (const)" : "" , |
4746 | DECL_PURE_P (cur->decl) ? " (pure)" : "" ); |
4747 | |
4748 | for (cgraph_edge *e = cur->indirect_calls; e; e = e->next_callee) |
4749 | { |
4750 | if (dump_file) |
4751 | fprintf (stream: dump_file, format: " Indirect call\n" ); |
4752 | if (propagate_unknown_call |
4753 | (node, e, ecf_flags: e->indirect_info->ecf_flags, |
4754 | cur_summary, cur_summary_lto, |
4755 | nontrivial_scc)) |
4756 | { |
4757 | changed = true; |
4758 | remove_useless_summaries (node, cur_summary_ptr: &cur_summary, |
4759 | cur_summary_lto_ptr: &cur_summary_lto, |
4760 | ecf_flags: cur_ecf_flags); |
4761 | if (!cur_summary && !cur_summary_lto) |
4762 | break; |
4763 | } |
4764 | } |
4765 | |
4766 | if (!cur_summary && !cur_summary_lto) |
4767 | continue; |
4768 | |
4769 | for (cgraph_edge *callee_edge = cur->callees; callee_edge; |
4770 | callee_edge = callee_edge->next_callee) |
4771 | { |
4772 | int flags = flags_from_decl_or_type (callee_edge->callee->decl); |
4773 | modref_summary *callee_summary = NULL; |
4774 | modref_summary_lto *callee_summary_lto = NULL; |
4775 | struct cgraph_node *callee; |
4776 | |
4777 | if (!callee_edge->inline_failed |
4778 | || ((flags & (ECF_CONST | ECF_NOVOPS)) |
4779 | && !(flags & ECF_LOOPING_CONST_OR_PURE))) |
4780 | continue; |
4781 | |
4782 | /* Get the callee and its summary. */ |
4783 | enum availability avail; |
4784 | callee = callee_edge->callee->ultimate_alias_target |
4785 | (availability: &avail, ref: cur); |
4786 | |
4787 | /* It is not necessary to re-process calls outside of the |
4788 | SCC component. */ |
4789 | if (iteration > 0 |
4790 | && (!callee->aux |
4791 | || ((struct ipa_dfs_info *)cur->aux)->scc_no |
4792 | != ((struct ipa_dfs_info *)callee->aux)->scc_no)) |
4793 | continue; |
4794 | |
4795 | if (dump_file) |
4796 | fprintf (stream: dump_file, format: " Call to %s\n" , |
4797 | callee_edge->callee->dump_name ()); |
4798 | |
4799 | bool ignore_stores = ignore_stores_p (caller: cur->decl, flags); |
4800 | |
4801 | if (avail <= AVAIL_INTERPOSABLE) |
4802 | { |
4803 | if (dump_file) |
4804 | fprintf (stream: dump_file, format: " Call target interposable" |
4805 | " or not available\n" ); |
4806 | changed |= propagate_unknown_call |
4807 | (node, e: callee_edge, ecf_flags: flags, |
4808 | cur_summary, cur_summary_lto, |
4809 | nontrivial_scc); |
4810 | if (!cur_summary && !cur_summary_lto) |
4811 | break; |
4812 | continue; |
4813 | } |
4814 | |
4815 | /* We don't know anything about CALLEE, hence we cannot tell |
4816 | anything about the entire component. */ |
4817 | |
4818 | if (cur_summary |
4819 | && !(callee_summary = optimization_summaries->get (node: callee))) |
4820 | { |
4821 | if (dump_file) |
4822 | fprintf (stream: dump_file, format: " No call target summary\n" ); |
4823 | changed |= propagate_unknown_call |
4824 | (node, e: callee_edge, ecf_flags: flags, |
4825 | cur_summary, NULL, |
4826 | nontrivial_scc); |
4827 | } |
4828 | if (cur_summary_lto |
4829 | && !(callee_summary_lto = summaries_lto->get (node: callee))) |
4830 | { |
4831 | if (dump_file) |
4832 | fprintf (stream: dump_file, format: " No call target summary\n" ); |
4833 | changed |= propagate_unknown_call |
4834 | (node, e: callee_edge, ecf_flags: flags, |
4835 | NULL, cur_summary_lto, |
4836 | nontrivial_scc); |
4837 | } |
4838 | |
4839 | if (callee_summary && !cur_summary->side_effects |
4840 | && (callee_summary->side_effects |
4841 | || callee_edge->recursive_p ())) |
4842 | { |
4843 | cur_summary->side_effects = true; |
4844 | changed = true; |
4845 | } |
4846 | if (callee_summary_lto && !cur_summary_lto->side_effects |
4847 | && (callee_summary_lto->side_effects |
4848 | || callee_edge->recursive_p ())) |
4849 | { |
4850 | cur_summary_lto->side_effects = true; |
4851 | changed = true; |
4852 | } |
4853 | if (callee_summary && !cur_summary->nondeterministic |
4854 | && callee_summary->nondeterministic |
4855 | && !ignore_nondeterminism_p (caller: cur->decl, flags)) |
4856 | { |
4857 | cur_summary->nondeterministic = true; |
4858 | changed = true; |
4859 | } |
4860 | if (callee_summary_lto && !cur_summary_lto->nondeterministic |
4861 | && callee_summary_lto->nondeterministic |
4862 | && !ignore_nondeterminism_p (caller: cur->decl, flags)) |
4863 | { |
4864 | cur_summary_lto->nondeterministic = true; |
4865 | changed = true; |
4866 | } |
4867 | if (flags & (ECF_CONST | ECF_NOVOPS)) |
4868 | continue; |
4869 | |
4870 | /* We can not safely optimize based on summary of callee if it |
4871 | does not always bind to current def: it is possible that |
4872 | memory load was optimized out earlier which may not happen in |
4873 | the interposed variant. */ |
4874 | if (!callee_edge->binds_to_current_def_p ()) |
4875 | { |
4876 | if (cur_summary && !cur_summary->calls_interposable) |
4877 | { |
4878 | cur_summary->calls_interposable = true; |
4879 | changed = true; |
4880 | } |
4881 | if (cur_summary_lto && !cur_summary_lto->calls_interposable) |
4882 | { |
4883 | cur_summary_lto->calls_interposable = true; |
4884 | changed = true; |
4885 | } |
4886 | if (dump_file) |
4887 | fprintf (stream: dump_file, format: " May not bind local;" |
4888 | " collapsing loads\n" ); |
4889 | } |
4890 | |
4891 | |
4892 | auto_vec <modref_parm_map, 32> parm_map; |
4893 | modref_parm_map chain_map; |
4894 | /* TODO: Once we get jump functions for static chains we could |
4895 | compute this. */ |
4896 | chain_map.parm_index = MODREF_UNKNOWN_PARM; |
4897 | |
4898 | compute_parm_map (callee_edge, parm_map: &parm_map); |
4899 | |
4900 | /* Merge in callee's information. */ |
4901 | if (callee_summary) |
4902 | { |
4903 | changed |= cur_summary->loads->merge |
4904 | (fndecl: node->decl, other: callee_summary->loads, |
4905 | parm_map: &parm_map, static_chain_map: &chain_map, record_accesses: !first); |
4906 | if (!ignore_stores) |
4907 | { |
4908 | changed |= cur_summary->stores->merge |
4909 | (fndecl: node->decl, other: callee_summary->stores, |
4910 | parm_map: &parm_map, static_chain_map: &chain_map, record_accesses: !first); |
4911 | if (!cur_summary->writes_errno |
4912 | && callee_summary->writes_errno) |
4913 | { |
4914 | cur_summary->writes_errno = true; |
4915 | changed = true; |
4916 | } |
4917 | } |
4918 | } |
4919 | if (callee_summary_lto) |
4920 | { |
4921 | changed |= cur_summary_lto->loads->merge |
4922 | (fndecl: node->decl, other: callee_summary_lto->loads, |
4923 | parm_map: &parm_map, static_chain_map: &chain_map, record_accesses: !first); |
4924 | if (!ignore_stores) |
4925 | { |
4926 | changed |= cur_summary_lto->stores->merge |
4927 | (fndecl: node->decl, other: callee_summary_lto->stores, |
4928 | parm_map: &parm_map, static_chain_map: &chain_map, record_accesses: !first); |
4929 | if (!cur_summary_lto->writes_errno |
4930 | && callee_summary_lto->writes_errno) |
4931 | { |
4932 | cur_summary_lto->writes_errno = true; |
4933 | changed = true; |
4934 | } |
4935 | } |
4936 | } |
4937 | if (changed) |
4938 | remove_useless_summaries (node, cur_summary_ptr: &cur_summary, |
4939 | cur_summary_lto_ptr: &cur_summary_lto, |
4940 | ecf_flags: cur_ecf_flags); |
4941 | if (!cur_summary && !cur_summary_lto) |
4942 | break; |
4943 | if (dump_file && changed) |
4944 | { |
4945 | if (cur_summary) |
4946 | cur_summary->dump (out: dump_file); |
4947 | if (cur_summary_lto) |
4948 | cur_summary_lto->dump (out: dump_file); |
4949 | dump_modref_edge_summaries (out: dump_file, node, depth: 4); |
4950 | } |
4951 | } |
4952 | } |
4953 | iteration++; |
4954 | first = false; |
4955 | } |
4956 | if (dump_file) |
4957 | fprintf (stream: dump_file, |
4958 | format: "Propagation finished in %i iterations\n" , iteration); |
4959 | bool pureconst = false; |
4960 | for (struct cgraph_node *cur = component_node; cur; |
4961 | cur = ((struct ipa_dfs_info *) cur->aux)->next_cycle) |
4962 | if (!cur->inlined_to && opt_for_fn (cur->decl, flag_ipa_pure_const)) |
4963 | { |
4964 | modref_summary *summary = optimization_summaries |
4965 | ? optimization_summaries->get (node: cur) |
4966 | : NULL; |
4967 | modref_summary_lto *summary_lto = summaries_lto |
4968 | ? summaries_lto->get (node: cur) |
4969 | : NULL; |
4970 | if (summary && !summary->stores->every_base && !summary->stores->bases |
4971 | && !summary->nondeterministic) |
4972 | { |
4973 | if (!summary->loads->every_base && !summary->loads->bases |
4974 | && !summary->calls_interposable) |
4975 | pureconst |= ipa_make_function_const |
4976 | (cur, summary->side_effects, false); |
4977 | else |
4978 | pureconst |= ipa_make_function_pure |
4979 | (cur, summary->side_effects, false); |
4980 | } |
4981 | if (summary_lto && !summary_lto->stores->every_base |
4982 | && !summary_lto->stores->bases && !summary_lto->nondeterministic) |
4983 | { |
4984 | if (!summary_lto->loads->every_base && !summary_lto->loads->bases |
4985 | && !summary_lto->calls_interposable) |
4986 | pureconst |= ipa_make_function_const |
4987 | (cur, summary_lto->side_effects, false); |
4988 | else |
4989 | pureconst |= ipa_make_function_pure |
4990 | (cur, summary_lto->side_effects, false); |
4991 | } |
4992 | } |
4993 | return pureconst; |
4994 | } |
4995 | |
4996 | /* Dump results of propagation in SCC rooted in COMPONENT_NODE. */ |
4997 | |
4998 | static void |
4999 | modref_propagate_dump_scc (cgraph_node *component_node) |
5000 | { |
5001 | for (struct cgraph_node *cur = component_node; cur; |
5002 | cur = ((struct ipa_dfs_info *) cur->aux)->next_cycle) |
5003 | if (!cur->inlined_to) |
5004 | { |
5005 | modref_summary *cur_summary = optimization_summaries |
5006 | ? optimization_summaries->get (node: cur) |
5007 | : NULL; |
5008 | modref_summary_lto *cur_summary_lto = summaries_lto |
5009 | ? summaries_lto->get (node: cur) |
5010 | : NULL; |
5011 | |
5012 | fprintf (stream: dump_file, format: "Propagated modref for %s%s%s\n" , |
5013 | cur->dump_name (), |
5014 | TREE_READONLY (cur->decl) ? " (const)" : "" , |
5015 | DECL_PURE_P (cur->decl) ? " (pure)" : "" ); |
5016 | if (optimization_summaries) |
5017 | { |
5018 | if (cur_summary) |
5019 | cur_summary->dump (out: dump_file); |
5020 | else |
5021 | fprintf (stream: dump_file, format: " Not tracked\n" ); |
5022 | } |
5023 | if (summaries_lto) |
5024 | { |
5025 | if (cur_summary_lto) |
5026 | cur_summary_lto->dump (out: dump_file); |
5027 | else |
5028 | fprintf (stream: dump_file, format: " Not tracked (lto)\n" ); |
5029 | } |
5030 | } |
5031 | } |
5032 | |
5033 | /* Determine EAF flags know for call E with CALLEE_ECF_FLAGS and ARG. */ |
5034 | |
5035 | int |
5036 | implicit_eaf_flags_for_edge_and_arg (cgraph_edge *e, int callee_ecf_flags, |
5037 | bool ignore_stores, int arg) |
5038 | { |
5039 | /* Returning the value is already accounted to at local propagation. */ |
5040 | int implicit_flags = EAF_NOT_RETURNED_DIRECTLY |
5041 | | EAF_NOT_RETURNED_INDIRECTLY; |
5042 | if (ignore_stores) |
5043 | implicit_flags |= ignore_stores_eaf_flags; |
5044 | if (callee_ecf_flags & ECF_PURE) |
5045 | implicit_flags |= implicit_pure_eaf_flags; |
5046 | if (callee_ecf_flags & (ECF_CONST | ECF_NOVOPS)) |
5047 | implicit_flags |= implicit_const_eaf_flags; |
5048 | class fnspec_summary *fnspec_sum = fnspec_summaries->get (edge: e); |
5049 | if (fnspec_sum) |
5050 | { |
5051 | attr_fnspec fnspec (fnspec_sum->fnspec); |
5052 | implicit_flags |= fnspec.arg_eaf_flags (i: arg); |
5053 | } |
5054 | return implicit_flags; |
5055 | } |
5056 | |
5057 | /* Process escapes in SUM and merge SUMMARY to CUR_SUMMARY |
5058 | and SUMMARY_LTO to CUR_SUMMARY_LTO. |
5059 | Return true if something changed. */ |
5060 | |
5061 | static bool |
5062 | modref_merge_call_site_flags (escape_summary *sum, |
5063 | modref_summary *cur_summary, |
5064 | modref_summary_lto *cur_summary_lto, |
5065 | modref_summary *summary, |
5066 | modref_summary_lto *summary_lto, |
5067 | tree caller, |
5068 | cgraph_edge *e, |
5069 | int caller_ecf_flags, |
5070 | int callee_ecf_flags, |
5071 | bool binds_to_current_def) |
5072 | { |
5073 | escape_entry *ee; |
5074 | unsigned int i; |
5075 | bool changed = false; |
5076 | bool ignore_stores = ignore_stores_p (caller, flags: callee_ecf_flags); |
5077 | |
5078 | /* Return early if we have no useful info to propagate. */ |
5079 | if ((!cur_summary |
5080 | || (!cur_summary->arg_flags.length () |
5081 | && !cur_summary->static_chain_flags |
5082 | && !cur_summary->retslot_flags)) |
5083 | && (!cur_summary_lto |
5084 | || (!cur_summary_lto->arg_flags.length () |
5085 | && !cur_summary_lto->static_chain_flags |
5086 | && !cur_summary_lto->retslot_flags))) |
5087 | return false; |
5088 | |
5089 | FOR_EACH_VEC_ELT (sum->esc, i, ee) |
5090 | { |
5091 | int flags = 0; |
5092 | int flags_lto = 0; |
5093 | int implicit_flags = implicit_eaf_flags_for_edge_and_arg |
5094 | (e, callee_ecf_flags, ignore_stores, arg: ee->arg); |
5095 | |
5096 | if (summary && ee->arg < summary->arg_flags.length ()) |
5097 | flags = summary->arg_flags[ee->arg]; |
5098 | if (summary_lto |
5099 | && ee->arg < summary_lto->arg_flags.length ()) |
5100 | flags_lto = summary_lto->arg_flags[ee->arg]; |
5101 | if (!ee->direct) |
5102 | { |
5103 | flags = deref_flags (flags, ignore_stores); |
5104 | flags_lto = deref_flags (flags: flags_lto, ignore_stores); |
5105 | } |
5106 | if (ignore_stores) |
5107 | implicit_flags |= ignore_stores_eaf_flags; |
5108 | if (callee_ecf_flags & ECF_PURE) |
5109 | implicit_flags |= implicit_pure_eaf_flags; |
5110 | if (callee_ecf_flags & (ECF_CONST | ECF_NOVOPS)) |
5111 | implicit_flags |= implicit_const_eaf_flags; |
5112 | class fnspec_summary *fnspec_sum = fnspec_summaries->get (edge: e); |
5113 | if (fnspec_sum) |
5114 | { |
5115 | attr_fnspec fnspec (fnspec_sum->fnspec); |
5116 | implicit_flags |= fnspec.arg_eaf_flags (i: ee->arg); |
5117 | } |
5118 | if (!ee->direct) |
5119 | implicit_flags = deref_flags (flags: implicit_flags, ignore_stores); |
5120 | flags |= implicit_flags; |
5121 | flags_lto |= implicit_flags; |
5122 | if (!binds_to_current_def && (flags || flags_lto)) |
5123 | { |
5124 | flags = interposable_eaf_flags (modref_flags: flags, flags: implicit_flags); |
5125 | flags_lto = interposable_eaf_flags (modref_flags: flags_lto, flags: implicit_flags); |
5126 | } |
5127 | if (!(flags & EAF_UNUSED) |
5128 | && cur_summary && ee->parm_index < (int)cur_summary->arg_flags.length ()) |
5129 | { |
5130 | eaf_flags_t &f = ee->parm_index == MODREF_RETSLOT_PARM |
5131 | ? cur_summary->retslot_flags |
5132 | : ee->parm_index == MODREF_STATIC_CHAIN_PARM |
5133 | ? cur_summary->static_chain_flags |
5134 | : cur_summary->arg_flags[ee->parm_index]; |
5135 | if ((f & flags) != f) |
5136 | { |
5137 | f = remove_useless_eaf_flags |
5138 | (eaf_flags: f & flags, ecf_flags: caller_ecf_flags, |
5139 | VOID_TYPE_P (TREE_TYPE (TREE_TYPE (caller)))); |
5140 | changed = true; |
5141 | } |
5142 | } |
5143 | if (!(flags_lto & EAF_UNUSED) |
5144 | && cur_summary_lto |
5145 | && ee->parm_index < (int)cur_summary_lto->arg_flags.length ()) |
5146 | { |
5147 | eaf_flags_t &f = ee->parm_index == MODREF_RETSLOT_PARM |
5148 | ? cur_summary_lto->retslot_flags |
5149 | : ee->parm_index == MODREF_STATIC_CHAIN_PARM |
5150 | ? cur_summary_lto->static_chain_flags |
5151 | : cur_summary_lto->arg_flags[ee->parm_index]; |
5152 | if ((f & flags_lto) != f) |
5153 | { |
5154 | f = remove_useless_eaf_flags |
5155 | (eaf_flags: f & flags_lto, ecf_flags: caller_ecf_flags, |
5156 | VOID_TYPE_P (TREE_TYPE (TREE_TYPE (caller)))); |
5157 | changed = true; |
5158 | } |
5159 | } |
5160 | } |
5161 | return changed; |
5162 | } |
5163 | |
5164 | /* Perform iterative dataflow on SCC component starting in COMPONENT_NODE |
5165 | and propagate arg flags. */ |
5166 | |
5167 | static void |
5168 | modref_propagate_flags_in_scc (cgraph_node *component_node) |
5169 | { |
5170 | bool changed = true; |
5171 | int iteration = 0; |
5172 | |
5173 | while (changed) |
5174 | { |
5175 | changed = false; |
5176 | for (struct cgraph_node *cur = component_node; cur; |
5177 | cur = ((struct ipa_dfs_info *) cur->aux)->next_cycle) |
5178 | { |
5179 | cgraph_node *node = cur->inlined_to ? cur->inlined_to : cur; |
5180 | modref_summary *cur_summary = optimization_summaries |
5181 | ? optimization_summaries->get (node) |
5182 | : NULL; |
5183 | modref_summary_lto *cur_summary_lto = summaries_lto |
5184 | ? summaries_lto->get (node) |
5185 | : NULL; |
5186 | |
5187 | if (!cur_summary && !cur_summary_lto) |
5188 | continue; |
5189 | int caller_ecf_flags = flags_from_decl_or_type (cur->decl); |
5190 | |
5191 | if (dump_file) |
5192 | fprintf (stream: dump_file, format: " Processing %s%s%s\n" , |
5193 | cur->dump_name (), |
5194 | TREE_READONLY (cur->decl) ? " (const)" : "" , |
5195 | DECL_PURE_P (cur->decl) ? " (pure)" : "" ); |
5196 | |
5197 | for (cgraph_edge *e = cur->indirect_calls; e; e = e->next_callee) |
5198 | { |
5199 | escape_summary *sum = escape_summaries->get (edge: e); |
5200 | |
5201 | if (!sum || (e->indirect_info->ecf_flags |
5202 | & (ECF_CONST | ECF_NOVOPS))) |
5203 | continue; |
5204 | |
5205 | changed |= modref_merge_call_site_flags |
5206 | (sum, cur_summary, cur_summary_lto, |
5207 | NULL, NULL, |
5208 | caller: node->decl, |
5209 | e, |
5210 | caller_ecf_flags, |
5211 | callee_ecf_flags: e->indirect_info->ecf_flags, |
5212 | binds_to_current_def: false); |
5213 | } |
5214 | |
5215 | if (!cur_summary && !cur_summary_lto) |
5216 | continue; |
5217 | |
5218 | for (cgraph_edge *callee_edge = cur->callees; callee_edge; |
5219 | callee_edge = callee_edge->next_callee) |
5220 | { |
5221 | int ecf_flags = flags_from_decl_or_type |
5222 | (callee_edge->callee->decl); |
5223 | modref_summary *callee_summary = NULL; |
5224 | modref_summary_lto *callee_summary_lto = NULL; |
5225 | struct cgraph_node *callee; |
5226 | |
5227 | if (ecf_flags & (ECF_CONST | ECF_NOVOPS) |
5228 | || !callee_edge->inline_failed) |
5229 | continue; |
5230 | |
5231 | /* Get the callee and its summary. */ |
5232 | enum availability avail; |
5233 | callee = callee_edge->callee->ultimate_alias_target |
5234 | (availability: &avail, ref: cur); |
5235 | |
5236 | /* It is not necessary to re-process calls outside of the |
5237 | SCC component. */ |
5238 | if (iteration > 0 |
5239 | && (!callee->aux |
5240 | || ((struct ipa_dfs_info *)cur->aux)->scc_no |
5241 | != ((struct ipa_dfs_info *)callee->aux)->scc_no)) |
5242 | continue; |
5243 | |
5244 | escape_summary *sum = escape_summaries->get (edge: callee_edge); |
5245 | if (!sum) |
5246 | continue; |
5247 | |
5248 | if (dump_file) |
5249 | fprintf (stream: dump_file, format: " Call to %s\n" , |
5250 | callee_edge->callee->dump_name ()); |
5251 | |
5252 | if (avail <= AVAIL_INTERPOSABLE |
5253 | || callee_edge->call_stmt_cannot_inline_p) |
5254 | ; |
5255 | else |
5256 | { |
5257 | if (cur_summary) |
5258 | callee_summary = optimization_summaries->get (node: callee); |
5259 | if (cur_summary_lto) |
5260 | callee_summary_lto = summaries_lto->get (node: callee); |
5261 | } |
5262 | changed |= modref_merge_call_site_flags |
5263 | (sum, cur_summary, cur_summary_lto, |
5264 | summary: callee_summary, summary_lto: callee_summary_lto, |
5265 | caller: node->decl, |
5266 | e: callee_edge, |
5267 | caller_ecf_flags, |
5268 | callee_ecf_flags: ecf_flags, |
5269 | binds_to_current_def: callee->binds_to_current_def_p ()); |
5270 | if (dump_file && changed) |
5271 | { |
5272 | if (cur_summary) |
5273 | cur_summary->dump (out: dump_file); |
5274 | if (cur_summary_lto) |
5275 | cur_summary_lto->dump (out: dump_file); |
5276 | } |
5277 | } |
5278 | } |
5279 | iteration++; |
5280 | } |
5281 | if (dump_file) |
5282 | fprintf (stream: dump_file, |
5283 | format: "Propagation of flags finished in %i iterations\n" , iteration); |
5284 | } |
5285 | |
5286 | } /* ANON namespace. */ |
5287 | |
5288 | /* Call EDGE was inlined; merge summary from callee to the caller. */ |
5289 | |
5290 | void |
5291 | ipa_merge_modref_summary_after_inlining (cgraph_edge *edge) |
5292 | { |
5293 | if (!summaries && !summaries_lto) |
5294 | return; |
5295 | |
5296 | struct cgraph_node *to = (edge->caller->inlined_to |
5297 | ? edge->caller->inlined_to : edge->caller); |
5298 | class modref_summary *to_info = summaries ? summaries->get (node: to) : NULL; |
5299 | class modref_summary_lto *to_info_lto = summaries_lto |
5300 | ? summaries_lto->get (node: to) : NULL; |
5301 | |
5302 | if (!to_info && !to_info_lto) |
5303 | { |
5304 | if (summaries) |
5305 | summaries->remove (node: edge->callee); |
5306 | if (summaries_lto) |
5307 | summaries_lto->remove (node: edge->callee); |
5308 | remove_modref_edge_summaries (node: edge->callee); |
5309 | return; |
5310 | } |
5311 | |
5312 | class modref_summary *callee_info = summaries ? summaries->get (node: edge->callee) |
5313 | : NULL; |
5314 | class modref_summary_lto *callee_info_lto |
5315 | = summaries_lto ? summaries_lto->get (node: edge->callee) : NULL; |
5316 | int flags = flags_from_decl_or_type (edge->callee->decl); |
5317 | /* Combine in outer flags. */ |
5318 | cgraph_node *n; |
5319 | for (n = edge->caller; n->inlined_to; n = n->callers->caller) |
5320 | flags |= flags_from_decl_or_type (n->decl); |
5321 | flags |= flags_from_decl_or_type (n->decl); |
5322 | bool ignore_stores = ignore_stores_p (caller: edge->caller->decl, flags); |
5323 | |
5324 | if (!callee_info && to_info) |
5325 | { |
5326 | if (!(flags & (ECF_CONST | ECF_NOVOPS))) |
5327 | to_info->loads->collapse (); |
5328 | if (!ignore_stores) |
5329 | to_info->stores->collapse (); |
5330 | } |
5331 | if (!callee_info_lto && to_info_lto) |
5332 | { |
5333 | if (!(flags & (ECF_CONST | ECF_NOVOPS))) |
5334 | to_info_lto->loads->collapse (); |
5335 | if (!ignore_stores) |
5336 | to_info_lto->stores->collapse (); |
5337 | } |
5338 | /* Merge side effects and non-determinism. |
5339 | PURE/CONST flags makes functions deterministic and if there is |
5340 | no LOOPING_CONST_OR_PURE they also have no side effects. */ |
5341 | if (!(flags & (ECF_CONST | ECF_NOVOPS | ECF_PURE)) |
5342 | || (flags & ECF_LOOPING_CONST_OR_PURE)) |
5343 | { |
5344 | if (to_info) |
5345 | { |
5346 | if (!callee_info || callee_info->side_effects) |
5347 | to_info->side_effects = true; |
5348 | if ((!callee_info || callee_info->nondeterministic) |
5349 | && !ignore_nondeterminism_p (caller: edge->caller->decl, flags)) |
5350 | to_info->nondeterministic = true; |
5351 | } |
5352 | if (to_info_lto) |
5353 | { |
5354 | if (!callee_info_lto || callee_info_lto->side_effects) |
5355 | to_info_lto->side_effects = true; |
5356 | if ((!callee_info_lto || callee_info_lto->nondeterministic) |
5357 | && !ignore_nondeterminism_p (caller: edge->caller->decl, flags)) |
5358 | to_info_lto->nondeterministic = true; |
5359 | } |
5360 | } |
5361 | if (callee_info || callee_info_lto) |
5362 | { |
5363 | auto_vec <modref_parm_map, 32> parm_map; |
5364 | modref_parm_map chain_map; |
5365 | /* TODO: Once we get jump functions for static chains we could |
5366 | compute parm_index. */ |
5367 | |
5368 | compute_parm_map (callee_edge: edge, parm_map: &parm_map); |
5369 | |
5370 | if (!ignore_stores) |
5371 | { |
5372 | if (to_info && callee_info) |
5373 | to_info->stores->merge (fndecl: to->decl, other: callee_info->stores, parm_map: &parm_map, |
5374 | static_chain_map: &chain_map, record_accesses: false); |
5375 | if (to_info_lto && callee_info_lto) |
5376 | to_info_lto->stores->merge (fndecl: to->decl, other: callee_info_lto->stores, |
5377 | parm_map: &parm_map, static_chain_map: &chain_map, record_accesses: false); |
5378 | } |
5379 | if (!(flags & (ECF_CONST | ECF_NOVOPS))) |
5380 | { |
5381 | if (to_info && callee_info) |
5382 | to_info->loads->merge (fndecl: to->decl, other: callee_info->loads, parm_map: &parm_map, |
5383 | static_chain_map: &chain_map, record_accesses: false); |
5384 | if (to_info_lto && callee_info_lto) |
5385 | to_info_lto->loads->merge (fndecl: to->decl, other: callee_info_lto->loads, |
5386 | parm_map: &parm_map, static_chain_map: &chain_map, record_accesses: false); |
5387 | } |
5388 | } |
5389 | |
5390 | /* Now merge escape summaries. |
5391 | For every escape to the callee we need to merge callee flags |
5392 | and remap callee's escapes. */ |
5393 | class escape_summary *sum = escape_summaries->get (edge); |
5394 | int max_escape = -1; |
5395 | escape_entry *ee; |
5396 | unsigned int i; |
5397 | |
5398 | if (sum && !(flags & (ECF_CONST | ECF_NOVOPS))) |
5399 | FOR_EACH_VEC_ELT (sum->esc, i, ee) |
5400 | if ((int)ee->arg > max_escape) |
5401 | max_escape = ee->arg; |
5402 | |
5403 | auto_vec <vec <struct escape_map>, 32> emap (max_escape + 1); |
5404 | emap.safe_grow (len: max_escape + 1, exact: true); |
5405 | for (i = 0; (int)i < max_escape + 1; i++) |
5406 | emap[i] = vNULL; |
5407 | |
5408 | if (sum && !(flags & (ECF_CONST | ECF_NOVOPS))) |
5409 | FOR_EACH_VEC_ELT (sum->esc, i, ee) |
5410 | { |
5411 | bool needed = false; |
5412 | int implicit_flags = implicit_eaf_flags_for_edge_and_arg |
5413 | (e: edge, callee_ecf_flags: flags, ignore_stores, |
5414 | arg: ee->arg); |
5415 | if (!ee->direct) |
5416 | implicit_flags = deref_flags (flags: implicit_flags, ignore_stores); |
5417 | if (to_info && (int)to_info->arg_flags.length () > ee->parm_index) |
5418 | { |
5419 | int flags = callee_info |
5420 | && callee_info->arg_flags.length () > ee->arg |
5421 | ? callee_info->arg_flags[ee->arg] : 0; |
5422 | if (!ee->direct) |
5423 | flags = deref_flags (flags, ignore_stores); |
5424 | flags |= ee->min_flags | implicit_flags; |
5425 | eaf_flags_t &f = ee->parm_index == MODREF_RETSLOT_PARM |
5426 | ? to_info->retslot_flags |
5427 | : ee->parm_index == MODREF_STATIC_CHAIN_PARM |
5428 | ? to_info->static_chain_flags |
5429 | : to_info->arg_flags[ee->parm_index]; |
5430 | f &= flags; |
5431 | if (f) |
5432 | needed = true; |
5433 | } |
5434 | if (to_info_lto |
5435 | && (int)to_info_lto->arg_flags.length () > ee->parm_index) |
5436 | { |
5437 | int flags = callee_info_lto |
5438 | && callee_info_lto->arg_flags.length () > ee->arg |
5439 | ? callee_info_lto->arg_flags[ee->arg] : 0; |
5440 | if (!ee->direct) |
5441 | flags = deref_flags (flags, ignore_stores); |
5442 | flags |= ee->min_flags | implicit_flags; |
5443 | eaf_flags_t &f = ee->parm_index == MODREF_RETSLOT_PARM |
5444 | ? to_info_lto->retslot_flags |
5445 | : ee->parm_index == MODREF_STATIC_CHAIN_PARM |
5446 | ? to_info_lto->static_chain_flags |
5447 | : to_info_lto->arg_flags[ee->parm_index]; |
5448 | f &= flags; |
5449 | if (f) |
5450 | needed = true; |
5451 | } |
5452 | struct escape_map entry = {.parm_index: ee->parm_index, .direct: ee->direct}; |
5453 | if (needed) |
5454 | emap[ee->arg].safe_push (obj: entry); |
5455 | } |
5456 | update_escape_summary (node: edge->callee, map&: emap, ignore_stores); |
5457 | for (i = 0; (int)i < max_escape + 1; i++) |
5458 | emap[i].release (); |
5459 | if (sum) |
5460 | escape_summaries->remove (edge); |
5461 | |
5462 | if (summaries) |
5463 | { |
5464 | if (to_info && !to_info->useful_p (ecf_flags: flags)) |
5465 | { |
5466 | if (dump_file) |
5467 | fprintf (stream: dump_file, format: "Removed mod-ref summary for %s\n" , |
5468 | to->dump_name ()); |
5469 | summaries->remove (node: to); |
5470 | to_info = NULL; |
5471 | } |
5472 | else if (to_info && dump_file) |
5473 | { |
5474 | if (dump_file) |
5475 | fprintf (stream: dump_file, format: "Updated mod-ref summary for %s\n" , |
5476 | to->dump_name ()); |
5477 | to_info->dump (out: dump_file); |
5478 | } |
5479 | if (callee_info) |
5480 | summaries->remove (node: edge->callee); |
5481 | } |
5482 | if (summaries_lto) |
5483 | { |
5484 | if (to_info_lto && !to_info_lto->useful_p (ecf_flags: flags)) |
5485 | { |
5486 | if (dump_file) |
5487 | fprintf (stream: dump_file, format: "Removed mod-ref summary for %s\n" , |
5488 | to->dump_name ()); |
5489 | summaries_lto->remove (node: to); |
5490 | to_info_lto = NULL; |
5491 | } |
5492 | else if (to_info_lto && dump_file) |
5493 | { |
5494 | if (dump_file) |
5495 | fprintf (stream: dump_file, format: "Updated mod-ref summary for %s\n" , |
5496 | to->dump_name ()); |
5497 | to_info_lto->dump (out: dump_file); |
5498 | } |
5499 | if (callee_info_lto) |
5500 | summaries_lto->remove (node: edge->callee); |
5501 | } |
5502 | if (!to_info && !to_info_lto) |
5503 | remove_modref_edge_summaries (node: to); |
5504 | return; |
5505 | } |
5506 | |
5507 | /* Run the IPA pass. This will take a function's summaries and calls and |
5508 | construct new summaries which represent a transitive closure. So that |
5509 | summary of an analyzed function contains information about the loads and |
5510 | stores that the function or any function that it calls does. */ |
5511 | |
5512 | unsigned int |
5513 | pass_ipa_modref::execute (function *) |
5514 | { |
5515 | if (!summaries && !summaries_lto) |
5516 | return 0; |
5517 | bool pureconst = false; |
5518 | |
5519 | if (optimization_summaries) |
5520 | ggc_delete (ptr: optimization_summaries); |
5521 | optimization_summaries = summaries; |
5522 | summaries = NULL; |
5523 | |
5524 | struct cgraph_node **order = XCNEWVEC (struct cgraph_node *, |
5525 | symtab->cgraph_count); |
5526 | int order_pos; |
5527 | order_pos = ipa_reduced_postorder (order, true, ignore_edge); |
5528 | int i; |
5529 | |
5530 | /* Iterate over all strongly connected components in post-order. */ |
5531 | for (i = 0; i < order_pos; i++) |
5532 | { |
5533 | /* Get the component's representative. That's just any node in the |
5534 | component from which we can traverse the entire component. */ |
5535 | struct cgraph_node *component_node = order[i]; |
5536 | |
5537 | if (dump_file) |
5538 | fprintf (stream: dump_file, format: "\n\nStart of SCC component\n" ); |
5539 | |
5540 | pureconst |= modref_propagate_in_scc (component_node); |
5541 | modref_propagate_flags_in_scc (component_node); |
5542 | if (optimization_summaries) |
5543 | for (struct cgraph_node *cur = component_node; cur; |
5544 | cur = ((struct ipa_dfs_info *) cur->aux)->next_cycle) |
5545 | if (modref_summary *sum = optimization_summaries->get (node: cur)) |
5546 | sum->finalize (fun: cur->decl); |
5547 | if (dump_file) |
5548 | modref_propagate_dump_scc (component_node); |
5549 | } |
5550 | cgraph_node *node; |
5551 | FOR_EACH_FUNCTION (node) |
5552 | update_signature (node); |
5553 | if (summaries_lto) |
5554 | ((modref_summaries_lto *)summaries_lto)->propagated = true; |
5555 | ipa_free_postorder_info (); |
5556 | free (ptr: order); |
5557 | delete fnspec_summaries; |
5558 | fnspec_summaries = NULL; |
5559 | delete escape_summaries; |
5560 | escape_summaries = NULL; |
5561 | |
5562 | /* If we possibly made constructors const/pure we may need to remove |
5563 | them. */ |
5564 | return pureconst ? TODO_remove_functions : 0; |
5565 | } |
5566 | |
5567 | /* Summaries must stay alive until end of compilation. */ |
5568 | |
5569 | void |
5570 | ipa_modref_cc_finalize () |
5571 | { |
5572 | if (optimization_summaries) |
5573 | ggc_delete (ptr: optimization_summaries); |
5574 | optimization_summaries = NULL; |
5575 | if (summaries_lto) |
5576 | ggc_delete (ptr: summaries_lto); |
5577 | summaries_lto = NULL; |
5578 | if (fnspec_summaries) |
5579 | delete fnspec_summaries; |
5580 | fnspec_summaries = NULL; |
5581 | if (escape_summaries) |
5582 | delete escape_summaries; |
5583 | escape_summaries = NULL; |
5584 | } |
5585 | |
5586 | #include "gt-ipa-modref.h" |
5587 | |