1 | /* SSA Dominator optimizations for trees |
2 | Copyright (C) 2001-2023 Free Software Foundation, Inc. |
3 | Contributed by Diego Novillo <dnovillo@redhat.com> |
4 | |
5 | This file is part of GCC. |
6 | |
7 | GCC is free software; you can redistribute it and/or modify |
8 | it under the terms of the GNU General Public License as published by |
9 | the Free Software Foundation; either version 3, or (at your option) |
10 | any later version. |
11 | |
12 | GCC is distributed in the hope that it will be useful, |
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
15 | GNU General Public License for more details. |
16 | |
17 | You should have received a copy of the GNU General Public License |
18 | along with GCC; see the file COPYING3. If not see |
19 | <http://www.gnu.org/licenses/>. */ |
20 | |
21 | #include "config.h" |
22 | #include "system.h" |
23 | #include "coretypes.h" |
24 | #include "backend.h" |
25 | #include "tree.h" |
26 | #include "gimple.h" |
27 | #include "tree-pass.h" |
28 | #include "ssa.h" |
29 | #include "gimple-pretty-print.h" |
30 | #include "fold-const.h" |
31 | #include "cfganal.h" |
32 | #include "cfgloop.h" |
33 | #include "gimple-iterator.h" |
34 | #include "gimple-fold.h" |
35 | #include "tree-eh.h" |
36 | #include "tree-inline.h" |
37 | #include "tree-cfg.h" |
38 | #include "tree-into-ssa.h" |
39 | #include "domwalk.h" |
40 | #include "tree-ssa-propagate.h" |
41 | #include "tree-ssa-threadupdate.h" |
42 | #include "tree-ssa-scopedtables.h" |
43 | #include "tree-ssa-threadedge.h" |
44 | #include "tree-ssa-dom.h" |
45 | #include "gimplify.h" |
46 | #include "tree-cfgcleanup.h" |
47 | #include "dbgcnt.h" |
48 | #include "alloc-pool.h" |
49 | #include "tree-vrp.h" |
50 | #include "vr-values.h" |
51 | #include "gimple-range.h" |
52 | #include "gimple-range-path.h" |
53 | #include "alias.h" |
54 | |
55 | /* This file implements optimizations on the dominator tree. */ |
56 | |
57 | /* Structure for recording edge equivalences. |
58 | |
59 | Computing and storing the edge equivalences instead of creating |
60 | them on-demand can save significant amounts of time, particularly |
61 | for pathological cases involving switch statements. |
62 | |
63 | These structures live for a single iteration of the dominator |
64 | optimizer in the edge's AUX field. At the end of an iteration we |
65 | free each of these structures. */ |
66 | class edge_info |
67 | { |
68 | public: |
69 | typedef std::pair <tree, tree> equiv_pair; |
70 | edge_info (edge); |
71 | ~edge_info (); |
72 | |
73 | /* Record a simple LHS = RHS equivalence. This may trigger |
74 | calls to derive_equivalences. */ |
75 | void record_simple_equiv (tree, tree); |
76 | |
77 | /* If traversing this edge creates simple equivalences, we store |
78 | them as LHS/RHS pairs within this vector. */ |
79 | vec<equiv_pair> simple_equivalences; |
80 | |
81 | /* Traversing an edge may also indicate one or more particular conditions |
82 | are true or false. */ |
83 | vec<cond_equivalence> cond_equivalences; |
84 | |
85 | private: |
86 | /* Derive equivalences by walking the use-def chains. */ |
87 | void derive_equivalences (tree, tree, int); |
88 | }; |
89 | |
90 | /* Track whether or not we have changed the control flow graph. */ |
91 | static bool cfg_altered; |
92 | |
93 | /* Bitmap of blocks that have had EH statements cleaned. We should |
94 | remove their dead edges eventually. */ |
95 | static bitmap need_eh_cleanup; |
96 | static vec<gimple *> need_noreturn_fixup; |
97 | |
98 | /* Statistics for dominator optimizations. */ |
99 | struct opt_stats_d |
100 | { |
101 | long num_stmts; |
102 | long num_exprs_considered; |
103 | long num_re; |
104 | long num_const_prop; |
105 | long num_copy_prop; |
106 | }; |
107 | |
108 | static struct opt_stats_d opt_stats; |
109 | |
110 | /* Local functions. */ |
111 | static void record_equality (tree, tree, class const_and_copies *); |
112 | static void record_equivalences_from_phis (basic_block); |
113 | static void record_equivalences_from_incoming_edge (basic_block, |
114 | class const_and_copies *, |
115 | class avail_exprs_stack *, |
116 | bitmap blocks_on_stack); |
117 | static void eliminate_redundant_computations (gimple_stmt_iterator *, |
118 | class const_and_copies *, |
119 | class avail_exprs_stack *); |
120 | static void record_equivalences_from_stmt (gimple *, int, |
121 | class avail_exprs_stack *); |
122 | static void dump_dominator_optimization_stats (FILE *file, |
123 | hash_table<expr_elt_hasher> *); |
124 | static void record_temporary_equivalences (edge, class const_and_copies *, |
125 | class avail_exprs_stack *, bitmap); |
126 | |
127 | /* Constructor for EDGE_INFO. An EDGE_INFO instance is always |
128 | associated with an edge E. */ |
129 | |
130 | edge_info::edge_info (edge e) |
131 | { |
132 | /* Free the old one associated with E, if it exists and |
133 | associate our new object with E. */ |
134 | free_dom_edge_info (e); |
135 | e->aux = this; |
136 | |
137 | /* And initialize the embedded vectors. */ |
138 | simple_equivalences = vNULL; |
139 | cond_equivalences = vNULL; |
140 | } |
141 | |
142 | /* Destructor just needs to release the vectors. */ |
143 | |
144 | edge_info::~edge_info (void) |
145 | { |
146 | this->cond_equivalences.release (); |
147 | this->simple_equivalences.release (); |
148 | } |
149 | |
150 | /* NAME is known to have the value VALUE, which must be a constant. |
151 | |
152 | Walk through its use-def chain to see if there are other equivalences |
153 | we might be able to derive. |
154 | |
155 | RECURSION_LIMIT controls how far back we recurse through the use-def |
156 | chains. */ |
157 | |
158 | void |
159 | edge_info::derive_equivalences (tree name, tree value, int recursion_limit) |
160 | { |
161 | if (TREE_CODE (name) != SSA_NAME || TREE_CODE (value) != INTEGER_CST) |
162 | return; |
163 | |
164 | /* This records the equivalence for the toplevel object. Do |
165 | this before checking the recursion limit. */ |
166 | simple_equivalences.safe_push (obj: equiv_pair (name, value)); |
167 | |
168 | /* Limit how far up the use-def chains we are willing to walk. */ |
169 | if (recursion_limit == 0) |
170 | return; |
171 | |
172 | /* We can walk up the use-def chains to potentially find more |
173 | equivalences. */ |
174 | gimple *def_stmt = SSA_NAME_DEF_STMT (name); |
175 | if (is_gimple_assign (gs: def_stmt)) |
176 | { |
177 | enum tree_code code = gimple_assign_rhs_code (gs: def_stmt); |
178 | switch (code) |
179 | { |
180 | /* If the result of an OR is zero, then its operands are, too. */ |
181 | case BIT_IOR_EXPR: |
182 | if (integer_zerop (value)) |
183 | { |
184 | tree rhs1 = gimple_assign_rhs1 (gs: def_stmt); |
185 | tree rhs2 = gimple_assign_rhs2 (gs: def_stmt); |
186 | |
187 | value = build_zero_cst (TREE_TYPE (rhs1)); |
188 | derive_equivalences (name: rhs1, value, recursion_limit: recursion_limit - 1); |
189 | value = build_zero_cst (TREE_TYPE (rhs2)); |
190 | derive_equivalences (name: rhs2, value, recursion_limit: recursion_limit - 1); |
191 | } |
192 | break; |
193 | |
194 | /* If the result of an AND is nonzero, then its operands are, too. */ |
195 | case BIT_AND_EXPR: |
196 | if (!integer_zerop (value)) |
197 | { |
198 | tree rhs1 = gimple_assign_rhs1 (gs: def_stmt); |
199 | tree rhs2 = gimple_assign_rhs2 (gs: def_stmt); |
200 | |
201 | /* If either operand has a boolean range, then we |
202 | know its value must be one, otherwise we just know it |
203 | is nonzero. The former is clearly useful, I haven't |
204 | seen cases where the latter is helpful yet. */ |
205 | if (TREE_CODE (rhs1) == SSA_NAME) |
206 | { |
207 | if (ssa_name_has_boolean_range (rhs1)) |
208 | { |
209 | value = build_one_cst (TREE_TYPE (rhs1)); |
210 | derive_equivalences (name: rhs1, value, recursion_limit: recursion_limit - 1); |
211 | } |
212 | } |
213 | if (TREE_CODE (rhs2) == SSA_NAME) |
214 | { |
215 | if (ssa_name_has_boolean_range (rhs2)) |
216 | { |
217 | value = build_one_cst (TREE_TYPE (rhs2)); |
218 | derive_equivalences (name: rhs2, value, recursion_limit: recursion_limit - 1); |
219 | } |
220 | } |
221 | } |
222 | break; |
223 | |
224 | /* If LHS is an SSA_NAME and RHS is a constant integer and LHS was |
225 | set via a widening type conversion, then we may be able to record |
226 | additional equivalences. */ |
227 | CASE_CONVERT: |
228 | { |
229 | tree rhs = gimple_assign_rhs1 (gs: def_stmt); |
230 | tree rhs_type = TREE_TYPE (rhs); |
231 | if (INTEGRAL_TYPE_P (rhs_type) |
232 | && (TYPE_PRECISION (TREE_TYPE (name)) |
233 | >= TYPE_PRECISION (rhs_type)) |
234 | && int_fits_type_p (value, rhs_type)) |
235 | derive_equivalences (name: rhs, |
236 | fold_convert (rhs_type, value), |
237 | recursion_limit: recursion_limit - 1); |
238 | break; |
239 | } |
240 | |
241 | /* We can invert the operation of these codes trivially if |
242 | one of the RHS operands is a constant to produce a known |
243 | value for the other RHS operand. */ |
244 | case POINTER_PLUS_EXPR: |
245 | case PLUS_EXPR: |
246 | { |
247 | tree rhs1 = gimple_assign_rhs1 (gs: def_stmt); |
248 | tree rhs2 = gimple_assign_rhs2 (gs: def_stmt); |
249 | |
250 | /* If either argument is a constant, then we can compute |
251 | a constant value for the nonconstant argument. */ |
252 | if (TREE_CODE (rhs1) == INTEGER_CST |
253 | && TREE_CODE (rhs2) == SSA_NAME) |
254 | derive_equivalences (name: rhs2, |
255 | fold_binary (MINUS_EXPR, TREE_TYPE (rhs1), |
256 | value, rhs1), |
257 | recursion_limit: recursion_limit - 1); |
258 | else if (TREE_CODE (rhs2) == INTEGER_CST |
259 | && TREE_CODE (rhs1) == SSA_NAME) |
260 | derive_equivalences (name: rhs1, |
261 | fold_binary (MINUS_EXPR, TREE_TYPE (rhs1), |
262 | value, rhs2), |
263 | recursion_limit: recursion_limit - 1); |
264 | break; |
265 | } |
266 | |
267 | /* If one of the operands is a constant, then we can compute |
268 | the value of the other operand. If both operands are |
269 | SSA_NAMEs, then they must be equal if the result is zero. */ |
270 | case MINUS_EXPR: |
271 | { |
272 | tree rhs1 = gimple_assign_rhs1 (gs: def_stmt); |
273 | tree rhs2 = gimple_assign_rhs2 (gs: def_stmt); |
274 | |
275 | /* If either argument is a constant, then we can compute |
276 | a constant value for the nonconstant argument. */ |
277 | if (TREE_CODE (rhs1) == INTEGER_CST |
278 | && TREE_CODE (rhs2) == SSA_NAME) |
279 | derive_equivalences (name: rhs2, |
280 | fold_binary (MINUS_EXPR, TREE_TYPE (rhs1), |
281 | rhs1, value), |
282 | recursion_limit: recursion_limit - 1); |
283 | else if (TREE_CODE (rhs2) == INTEGER_CST |
284 | && TREE_CODE (rhs1) == SSA_NAME) |
285 | derive_equivalences (name: rhs1, |
286 | fold_binary (PLUS_EXPR, TREE_TYPE (rhs1), |
287 | value, rhs2), |
288 | recursion_limit: recursion_limit - 1); |
289 | else if (integer_zerop (value)) |
290 | { |
291 | tree cond = build2 (EQ_EXPR, boolean_type_node, |
292 | gimple_assign_rhs1 (gs: def_stmt), |
293 | gimple_assign_rhs2 (gs: def_stmt)); |
294 | tree inverted = invert_truthvalue (cond); |
295 | record_conditions (p: &this->cond_equivalences, cond, inverted); |
296 | } |
297 | break; |
298 | } |
299 | |
300 | case EQ_EXPR: |
301 | case NE_EXPR: |
302 | { |
303 | if ((code == EQ_EXPR && integer_onep (value)) |
304 | || (code == NE_EXPR && integer_zerop (value))) |
305 | { |
306 | tree rhs1 = gimple_assign_rhs1 (gs: def_stmt); |
307 | tree rhs2 = gimple_assign_rhs2 (gs: def_stmt); |
308 | |
309 | /* If either argument is a constant, then record the |
310 | other argument as being the same as that constant. |
311 | |
312 | If neither operand is a constant, then we have a |
313 | conditional name == name equivalence. */ |
314 | if (TREE_CODE (rhs1) == INTEGER_CST) |
315 | derive_equivalences (name: rhs2, value: rhs1, recursion_limit: recursion_limit - 1); |
316 | else if (TREE_CODE (rhs2) == INTEGER_CST) |
317 | derive_equivalences (name: rhs1, value: rhs2, recursion_limit: recursion_limit - 1); |
318 | } |
319 | else |
320 | { |
321 | tree cond = build2 (code, boolean_type_node, |
322 | gimple_assign_rhs1 (gs: def_stmt), |
323 | gimple_assign_rhs2 (gs: def_stmt)); |
324 | tree inverted = invert_truthvalue (cond); |
325 | if (integer_zerop (value)) |
326 | std::swap (a&: cond, b&: inverted); |
327 | record_conditions (p: &this->cond_equivalences, cond, inverted); |
328 | } |
329 | break; |
330 | } |
331 | |
332 | /* For BIT_NOT and NEGATE, we can just apply the operation to the |
333 | VALUE to get the new equivalence. It will always be a constant |
334 | so we can recurse. */ |
335 | case BIT_NOT_EXPR: |
336 | case NEGATE_EXPR: |
337 | { |
338 | tree rhs = gimple_assign_rhs1 (gs: def_stmt); |
339 | tree res; |
340 | /* If this is a NOT and the operand has a boolean range, then we |
341 | know its value must be zero or one. We are not supposed to |
342 | have a BIT_NOT_EXPR for boolean types with precision > 1 in |
343 | the general case, see e.g. the handling of TRUTH_NOT_EXPR in |
344 | the gimplifier, but it can be generated by match.pd out of |
345 | a BIT_XOR_EXPR wrapped in a BIT_AND_EXPR. Now the handling |
346 | of BIT_AND_EXPR above already forces a specific semantics for |
347 | boolean types with precision > 1 so we must do the same here, |
348 | otherwise we could change the semantics of TRUTH_NOT_EXPR for |
349 | boolean types with precision > 1. */ |
350 | if (code == BIT_NOT_EXPR |
351 | && TREE_CODE (rhs) == SSA_NAME |
352 | && ssa_name_has_boolean_range (rhs)) |
353 | { |
354 | if ((TREE_INT_CST_LOW (value) & 1) == 0) |
355 | res = build_one_cst (TREE_TYPE (rhs)); |
356 | else |
357 | res = build_zero_cst (TREE_TYPE (rhs)); |
358 | } |
359 | else |
360 | res = fold_build1 (code, TREE_TYPE (rhs), value); |
361 | derive_equivalences (name: rhs, value: res, recursion_limit: recursion_limit - 1); |
362 | break; |
363 | } |
364 | |
365 | default: |
366 | { |
367 | if (TREE_CODE_CLASS (code) == tcc_comparison) |
368 | { |
369 | tree cond = build2 (code, boolean_type_node, |
370 | gimple_assign_rhs1 (gs: def_stmt), |
371 | gimple_assign_rhs2 (gs: def_stmt)); |
372 | tree inverted = invert_truthvalue (cond); |
373 | if (integer_zerop (value)) |
374 | std::swap (a&: cond, b&: inverted); |
375 | record_conditions (p: &this->cond_equivalences, cond, inverted); |
376 | break; |
377 | } |
378 | break; |
379 | } |
380 | } |
381 | } |
382 | } |
383 | |
384 | void |
385 | edge_info::record_simple_equiv (tree lhs, tree rhs) |
386 | { |
387 | /* If the RHS is a constant, then we may be able to derive |
388 | further equivalences. Else just record the name = name |
389 | equivalence. */ |
390 | if (TREE_CODE (rhs) == INTEGER_CST) |
391 | derive_equivalences (name: lhs, value: rhs, recursion_limit: 4); |
392 | else |
393 | simple_equivalences.safe_push (obj: equiv_pair (lhs, rhs)); |
394 | } |
395 | |
396 | /* Free the edge_info data attached to E, if it exists and |
397 | clear e->aux. */ |
398 | |
399 | void |
400 | free_dom_edge_info (edge e) |
401 | { |
402 | class edge_info *edge_info = (class edge_info *)e->aux; |
403 | |
404 | if (edge_info) |
405 | delete edge_info; |
406 | e->aux = NULL; |
407 | } |
408 | |
409 | /* Free all EDGE_INFO structures associated with edges in the CFG. |
410 | If a particular edge can be threaded, copy the redirection |
411 | target from the EDGE_INFO structure into the edge's AUX field |
412 | as required by code to update the CFG and SSA graph for |
413 | jump threading. */ |
414 | |
415 | static void |
416 | free_all_edge_infos (void) |
417 | { |
418 | basic_block bb; |
419 | edge_iterator ei; |
420 | edge e; |
421 | |
422 | FOR_EACH_BB_FN (bb, cfun) |
423 | { |
424 | FOR_EACH_EDGE (e, ei, bb->preds) |
425 | free_dom_edge_info (e); |
426 | } |
427 | } |
428 | |
429 | /* Return TRUE if BB has precisely two preds, one of which |
430 | is a backedge from a forwarder block where the forwarder |
431 | block is a direct successor of BB. Being a forwarder |
432 | block, it has no side effects other than transfer of |
433 | control. Otherwise return FALSE. */ |
434 | |
435 | static bool |
436 | single_block_loop_p (basic_block bb) |
437 | { |
438 | /* Two preds. */ |
439 | if (EDGE_COUNT (bb->preds) != 2) |
440 | return false; |
441 | |
442 | /* One and only one of the edges must be marked with |
443 | EDGE_DFS_BACK. */ |
444 | basic_block pred = NULL; |
445 | unsigned int count = 0; |
446 | if (EDGE_PRED (bb, 0)->flags & EDGE_DFS_BACK) |
447 | { |
448 | pred = EDGE_PRED (bb, 0)->src; |
449 | count++; |
450 | } |
451 | if (EDGE_PRED (bb, 1)->flags & EDGE_DFS_BACK) |
452 | { |
453 | pred = EDGE_PRED (bb, 1)->src; |
454 | count++; |
455 | } |
456 | |
457 | if (count != 1) |
458 | return false; |
459 | |
460 | /* Now examine PRED. It should have a single predecessor which |
461 | is BB and a single successor that is also BB. */ |
462 | if (EDGE_COUNT (pred->preds) != 1 |
463 | || EDGE_COUNT (pred->succs) != 1 |
464 | || EDGE_PRED (pred, 0)->src != bb |
465 | || EDGE_SUCC (pred, 0)->dest != bb) |
466 | return false; |
467 | |
468 | /* This looks good from a CFG standpoint. Now look at the guts |
469 | of PRED. Basically we want to verify there are no PHI nodes |
470 | and no real statements. */ |
471 | if (! gimple_seq_empty_p (s: phi_nodes (bb: pred))) |
472 | return false; |
473 | |
474 | gimple_stmt_iterator gsi; |
475 | for (gsi = gsi_last_bb (bb: pred); !gsi_end_p (i: gsi); gsi_prev (i: &gsi)) |
476 | { |
477 | gimple *stmt = gsi_stmt (i: gsi); |
478 | |
479 | switch (gimple_code (g: stmt)) |
480 | { |
481 | case GIMPLE_LABEL: |
482 | if (DECL_NONLOCAL (gimple_label_label (as_a <glabel *> (stmt)))) |
483 | return false; |
484 | break; |
485 | |
486 | case GIMPLE_DEBUG: |
487 | break; |
488 | |
489 | default: |
490 | return false; |
491 | } |
492 | } |
493 | |
494 | return true; |
495 | } |
496 | |
497 | /* We have finished optimizing BB, record any information implied by |
498 | taking a specific outgoing edge from BB. */ |
499 | |
500 | static void |
501 | record_edge_info (basic_block bb) |
502 | { |
503 | gimple_stmt_iterator gsi = gsi_last_bb (bb); |
504 | class edge_info *edge_info; |
505 | |
506 | /* Free all the outgoing edge info data associated with |
507 | BB's outgoing edges. */ |
508 | edge e; |
509 | edge_iterator ei; |
510 | FOR_EACH_EDGE (e, ei, bb->succs) |
511 | free_dom_edge_info (e); |
512 | |
513 | if (! gsi_end_p (i: gsi)) |
514 | { |
515 | gimple *stmt = gsi_stmt (i: gsi); |
516 | location_t loc = gimple_location (g: stmt); |
517 | |
518 | if (gimple_code (g: stmt) == GIMPLE_SWITCH) |
519 | { |
520 | gswitch *switch_stmt = as_a <gswitch *> (p: stmt); |
521 | tree index = gimple_switch_index (gs: switch_stmt); |
522 | |
523 | if (TREE_CODE (index) == SSA_NAME) |
524 | { |
525 | int i; |
526 | int n_labels = gimple_switch_num_labels (gs: switch_stmt); |
527 | tree *info = XCNEWVEC (tree, last_basic_block_for_fn (cfun)); |
528 | |
529 | for (i = 0; i < n_labels; i++) |
530 | { |
531 | tree label = gimple_switch_label (gs: switch_stmt, index: i); |
532 | basic_block target_bb |
533 | = label_to_block (cfun, CASE_LABEL (label)); |
534 | if (CASE_HIGH (label) |
535 | || !CASE_LOW (label) |
536 | || info[target_bb->index]) |
537 | info[target_bb->index] = error_mark_node; |
538 | else |
539 | info[target_bb->index] = label; |
540 | } |
541 | |
542 | FOR_EACH_EDGE (e, ei, bb->succs) |
543 | { |
544 | basic_block target_bb = e->dest; |
545 | tree label = info[target_bb->index]; |
546 | |
547 | if (label != NULL && label != error_mark_node) |
548 | { |
549 | tree x = fold_convert_loc (loc, TREE_TYPE (index), |
550 | CASE_LOW (label)); |
551 | edge_info = new class edge_info (e); |
552 | edge_info->record_simple_equiv (lhs: index, rhs: x); |
553 | } |
554 | } |
555 | free (ptr: info); |
556 | } |
557 | } |
558 | |
559 | /* A COND_EXPR may create equivalences too. */ |
560 | if (gimple_code (g: stmt) == GIMPLE_COND) |
561 | { |
562 | edge true_edge; |
563 | edge false_edge; |
564 | |
565 | tree op0 = gimple_cond_lhs (gs: stmt); |
566 | tree op1 = gimple_cond_rhs (gs: stmt); |
567 | enum tree_code code = gimple_cond_code (gs: stmt); |
568 | |
569 | extract_true_false_edges_from_block (bb, &true_edge, &false_edge); |
570 | |
571 | /* Special case comparing booleans against a constant as we |
572 | know the value of OP0 on both arms of the branch. i.e., we |
573 | can record an equivalence for OP0 rather than COND. |
574 | |
575 | However, don't do this if the constant isn't zero or one. |
576 | Such conditionals will get optimized more thoroughly during |
577 | the domwalk. */ |
578 | if ((code == EQ_EXPR || code == NE_EXPR) |
579 | && TREE_CODE (op0) == SSA_NAME |
580 | && ssa_name_has_boolean_range (op0) |
581 | && is_gimple_min_invariant (op1) |
582 | && (integer_zerop (op1) || integer_onep (op1))) |
583 | { |
584 | tree true_val = constant_boolean_node (true, TREE_TYPE (op0)); |
585 | tree false_val = constant_boolean_node (false, TREE_TYPE (op0)); |
586 | |
587 | if (code == EQ_EXPR) |
588 | { |
589 | edge_info = new class edge_info (true_edge); |
590 | edge_info->record_simple_equiv (lhs: op0, |
591 | rhs: (integer_zerop (op1) |
592 | ? false_val : true_val)); |
593 | edge_info = new class edge_info (false_edge); |
594 | edge_info->record_simple_equiv (lhs: op0, |
595 | rhs: (integer_zerop (op1) |
596 | ? true_val : false_val)); |
597 | } |
598 | else |
599 | { |
600 | edge_info = new class edge_info (true_edge); |
601 | edge_info->record_simple_equiv (lhs: op0, |
602 | rhs: (integer_zerop (op1) |
603 | ? true_val : false_val)); |
604 | edge_info = new class edge_info (false_edge); |
605 | edge_info->record_simple_equiv (lhs: op0, |
606 | rhs: (integer_zerop (op1) |
607 | ? false_val : true_val)); |
608 | } |
609 | } |
610 | /* This can show up in the IL as a result of copy propagation |
611 | it will eventually be canonicalized, but we have to cope |
612 | with this case within the pass. */ |
613 | else if (is_gimple_min_invariant (op0) |
614 | && TREE_CODE (op1) == SSA_NAME) |
615 | { |
616 | tree cond = build2 (code, boolean_type_node, op0, op1); |
617 | tree inverted = invert_truthvalue_loc (loc, cond); |
618 | bool can_infer_simple_equiv |
619 | = !(HONOR_SIGNED_ZEROS (op0) && real_maybe_zerop (op0)) |
620 | && !DECIMAL_FLOAT_MODE_P (element_mode (TREE_TYPE (op0))); |
621 | class edge_info *edge_info; |
622 | |
623 | edge_info = new class edge_info (true_edge); |
624 | record_conditions (p: &edge_info->cond_equivalences, cond, inverted); |
625 | |
626 | if (can_infer_simple_equiv && code == EQ_EXPR) |
627 | edge_info->record_simple_equiv (lhs: op1, rhs: op0); |
628 | |
629 | edge_info = new class edge_info (false_edge); |
630 | record_conditions (p: &edge_info->cond_equivalences, inverted, cond); |
631 | |
632 | if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR) |
633 | edge_info->record_simple_equiv (lhs: op1, rhs: op0); |
634 | } |
635 | |
636 | else if (TREE_CODE (op0) == SSA_NAME |
637 | && (TREE_CODE (op1) == SSA_NAME |
638 | || is_gimple_min_invariant (op1))) |
639 | { |
640 | tree cond = build2 (code, boolean_type_node, op0, op1); |
641 | tree inverted = invert_truthvalue_loc (loc, cond); |
642 | bool can_infer_simple_equiv |
643 | = !(HONOR_SIGNED_ZEROS (op1) && real_maybe_zerop (op1)) |
644 | && !DECIMAL_FLOAT_MODE_P (element_mode (TREE_TYPE (op1))); |
645 | class edge_info *edge_info; |
646 | |
647 | edge_info = new class edge_info (true_edge); |
648 | record_conditions (p: &edge_info->cond_equivalences, cond, inverted); |
649 | |
650 | if (can_infer_simple_equiv && code == EQ_EXPR) |
651 | edge_info->record_simple_equiv (lhs: op0, rhs: op1); |
652 | |
653 | edge_info = new class edge_info (false_edge); |
654 | record_conditions (p: &edge_info->cond_equivalences, inverted, cond); |
655 | |
656 | if (can_infer_simple_equiv && TREE_CODE (inverted) == EQ_EXPR) |
657 | edge_info->record_simple_equiv (lhs: op0, rhs: op1); |
658 | } |
659 | |
660 | /* If this block is a single block loop, then we may be able to |
661 | record some equivalences on the loop's exit edge. */ |
662 | if (single_block_loop_p (bb)) |
663 | { |
664 | /* We know it's a single block loop. Now look at the loop |
665 | exit condition. What we're looking for is whether or not |
666 | the exit condition is loop invariant which we can detect |
667 | by checking if all the SSA_NAMEs referenced are defined |
668 | outside the loop. */ |
669 | if ((TREE_CODE (op0) != SSA_NAME |
670 | || gimple_bb (SSA_NAME_DEF_STMT (op0)) != bb) |
671 | && (TREE_CODE (op1) != SSA_NAME |
672 | || gimple_bb (SSA_NAME_DEF_STMT (op1)) != bb)) |
673 | { |
674 | /* At this point we know the exit condition is loop |
675 | invariant. The only way to get out of the loop is |
676 | if it never traverses the backedge to begin with. This |
677 | implies that any PHI nodes create equivalances that we |
678 | can attach to the loop exit edge. */ |
679 | bool alternative |
680 | = (EDGE_PRED (bb, 0)->flags & EDGE_DFS_BACK) ? 1 : 0; |
681 | |
682 | gphi_iterator gsi; |
683 | for (gsi = gsi_start_phis (bb); |
684 | !gsi_end_p (i: gsi); |
685 | gsi_next (i: &gsi)) |
686 | { |
687 | /* Now get the EDGE_INFO class so we can append |
688 | it to our list. We want the successor edge |
689 | where the destination is not the source of |
690 | an incoming edge. */ |
691 | gphi *phi = gsi.phi (); |
692 | tree src = PHI_ARG_DEF (phi, alternative); |
693 | tree dst = PHI_RESULT (phi); |
694 | |
695 | /* If the other alternative is the same as the result, |
696 | then this is a degenerate and can be ignored. */ |
697 | if (dst == PHI_ARG_DEF (phi, !alternative)) |
698 | continue; |
699 | |
700 | if (EDGE_SUCC (bb, 0)->dest |
701 | != EDGE_PRED (bb, !alternative)->src) |
702 | edge_info = (class edge_info *)EDGE_SUCC (bb, 0)->aux; |
703 | else |
704 | edge_info = (class edge_info *)EDGE_SUCC (bb, 1)->aux; |
705 | |
706 | /* Note that since this processing is done independently |
707 | of other edge equivalency processing, we may not |
708 | have an EDGE_INFO structure set up yet. */ |
709 | if (edge_info == NULL) |
710 | edge_info = new class edge_info (false_edge); |
711 | edge_info->record_simple_equiv (lhs: dst, rhs: src); |
712 | } |
713 | } |
714 | } |
715 | } |
716 | } |
717 | } |
718 | |
719 | class dom_jt_state : public jt_state |
720 | { |
721 | public: |
722 | dom_jt_state (const_and_copies *copies, avail_exprs_stack *avails) |
723 | : m_copies (copies), m_avails (avails) |
724 | { |
725 | bitmap_tree_view (m_blocks_on_stack); |
726 | } |
727 | void push (edge e) override |
728 | { |
729 | m_copies->push_marker (); |
730 | m_avails->push_marker (); |
731 | jt_state::push (e); |
732 | } |
733 | void pop () override |
734 | { |
735 | m_copies->pop_to_marker (); |
736 | m_avails->pop_to_marker (); |
737 | jt_state::pop (); |
738 | } |
739 | void register_equivs_edge (edge e) override |
740 | { |
741 | record_temporary_equivalences (e, m_copies, m_avails, m_blocks_on_stack); |
742 | } |
743 | void register_equiv (tree dest, tree src, bool update) override; |
744 | bitmap get_blocks_on_stack () { return m_blocks_on_stack; } |
745 | private: |
746 | const_and_copies *m_copies; |
747 | avail_exprs_stack *m_avails; |
748 | /* Set of blocks on the stack, to be used for medium-fast |
749 | dominance queries in back_propagate_equivalences. */ |
750 | auto_bitmap m_blocks_on_stack; |
751 | }; |
752 | |
753 | void |
754 | dom_jt_state::register_equiv (tree dest, tree src, bool) |
755 | { |
756 | m_copies->record_const_or_copy (dest, src); |
757 | } |
758 | |
759 | class dom_jt_simplifier : public hybrid_jt_simplifier |
760 | { |
761 | public: |
762 | dom_jt_simplifier (avail_exprs_stack *avails, gimple_ranger *ranger, |
763 | path_range_query *query) |
764 | : hybrid_jt_simplifier (ranger, query), m_avails (avails) { } |
765 | |
766 | private: |
767 | tree simplify (gimple *, gimple *, basic_block, jt_state *) override; |
768 | avail_exprs_stack *m_avails; |
769 | }; |
770 | |
771 | tree |
772 | dom_jt_simplifier::simplify (gimple *stmt, gimple *within_stmt, |
773 | basic_block bb, jt_state *state) |
774 | { |
775 | /* First see if the conditional is in the hash table. */ |
776 | tree cached_lhs = m_avails->lookup_avail_expr (stmt, false, true); |
777 | if (cached_lhs) |
778 | return cached_lhs; |
779 | |
780 | /* Otherwise call the ranger if possible. */ |
781 | if (state) |
782 | return hybrid_jt_simplifier::simplify (stmt, within_stmt, bb, state); |
783 | |
784 | return NULL; |
785 | } |
786 | |
787 | class dom_opt_dom_walker : public dom_walker |
788 | { |
789 | public: |
790 | dom_opt_dom_walker (cdi_direction direction, |
791 | jump_threader *threader, |
792 | dom_jt_state *state, |
793 | gimple_ranger *ranger, |
794 | const_and_copies *const_and_copies, |
795 | avail_exprs_stack *avail_exprs_stack) |
796 | : dom_walker (direction, REACHABLE_BLOCKS) |
797 | { |
798 | m_ranger = ranger; |
799 | m_state = state; |
800 | m_dummy_cond = gimple_build_cond (NE_EXPR, integer_zero_node, |
801 | integer_zero_node, NULL, NULL); |
802 | m_const_and_copies = const_and_copies; |
803 | m_avail_exprs_stack = avail_exprs_stack; |
804 | m_threader = threader; |
805 | } |
806 | |
807 | edge before_dom_children (basic_block) final override; |
808 | void after_dom_children (basic_block) final override; |
809 | |
810 | private: |
811 | |
812 | /* Unwindable equivalences, both const/copy and expression varieties. */ |
813 | class const_and_copies *m_const_and_copies; |
814 | class avail_exprs_stack *m_avail_exprs_stack; |
815 | |
816 | /* Dummy condition to avoid creating lots of throw away statements. */ |
817 | gcond *m_dummy_cond; |
818 | |
819 | /* Optimize a single statement within a basic block using the |
820 | various tables mantained by DOM. Returns the taken edge if |
821 | the statement is a conditional with a statically determined |
822 | value. */ |
823 | edge optimize_stmt (basic_block, gimple_stmt_iterator *, bool *); |
824 | |
825 | void set_global_ranges_from_unreachable_edges (basic_block); |
826 | |
827 | void test_for_singularity (gimple *, avail_exprs_stack *); |
828 | edge fold_cond (gcond *cond); |
829 | |
830 | jump_threader *m_threader; |
831 | gimple_ranger *m_ranger; |
832 | dom_jt_state *m_state; |
833 | }; |
834 | |
835 | /* Jump threading, redundancy elimination and const/copy propagation. |
836 | |
837 | This pass may expose new symbols that need to be renamed into SSA. For |
838 | every new symbol exposed, its corresponding bit will be set in |
839 | VARS_TO_RENAME. */ |
840 | |
841 | namespace { |
842 | |
843 | const pass_data pass_data_dominator = |
844 | { |
845 | .type: GIMPLE_PASS, /* type */ |
846 | .name: "dom" , /* name */ |
847 | .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */ |
848 | .tv_id: TV_TREE_SSA_DOMINATOR_OPTS, /* tv_id */ |
849 | .properties_required: ( PROP_cfg | PROP_ssa ), /* properties_required */ |
850 | .properties_provided: 0, /* properties_provided */ |
851 | .properties_destroyed: 0, /* properties_destroyed */ |
852 | .todo_flags_start: 0, /* todo_flags_start */ |
853 | .todo_flags_finish: ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */ |
854 | }; |
855 | |
856 | class pass_dominator : public gimple_opt_pass |
857 | { |
858 | public: |
859 | pass_dominator (gcc::context *ctxt) |
860 | : gimple_opt_pass (pass_data_dominator, ctxt), |
861 | may_peel_loop_headers_p (false) |
862 | {} |
863 | |
864 | /* opt_pass methods: */ |
865 | opt_pass * clone () final override { return new pass_dominator (m_ctxt); } |
866 | void set_pass_param (unsigned int n, bool param) final override |
867 | { |
868 | gcc_assert (n == 0); |
869 | may_peel_loop_headers_p = param; |
870 | } |
871 | bool gate (function *) final override { return flag_tree_dom != 0; } |
872 | unsigned int execute (function *) final override; |
873 | |
874 | private: |
875 | /* This flag is used to prevent loops from being peeled repeatedly in jump |
876 | threading; it will be removed once we preserve loop structures throughout |
877 | the compilation -- we will be able to mark the affected loops directly in |
878 | jump threading, and avoid peeling them next time. */ |
879 | bool ; |
880 | }; // class pass_dominator |
881 | |
882 | unsigned int |
883 | pass_dominator::execute (function *fun) |
884 | { |
885 | memset (s: &opt_stats, c: 0, n: sizeof (opt_stats)); |
886 | |
887 | /* Create our hash tables. */ |
888 | hash_table<expr_elt_hasher> *avail_exprs |
889 | = new hash_table<expr_elt_hasher> (1024); |
890 | class avail_exprs_stack *avail_exprs_stack |
891 | = new class avail_exprs_stack (avail_exprs); |
892 | class const_and_copies *const_and_copies = new class const_and_copies (); |
893 | need_eh_cleanup = BITMAP_ALLOC (NULL); |
894 | need_noreturn_fixup.create (nelems: 0); |
895 | |
896 | calculate_dominance_info (CDI_DOMINATORS); |
897 | cfg_altered = false; |
898 | |
899 | /* We need to know loop structures in order to avoid destroying them |
900 | in jump threading. Note that we still can e.g. thread through loop |
901 | headers to an exit edge, or through loop header to the loop body, assuming |
902 | that we update the loop info. |
903 | |
904 | TODO: We don't need to set LOOPS_HAVE_PREHEADERS generally, but due |
905 | to several overly conservative bail-outs in jump threading, case |
906 | gcc.dg/tree-ssa/pr21417.c can't be threaded if loop preheader is |
907 | missing. We should improve jump threading in future then |
908 | LOOPS_HAVE_PREHEADERS won't be needed here. */ |
909 | loop_optimizer_init (LOOPS_HAVE_PREHEADERS | LOOPS_HAVE_SIMPLE_LATCHES |
910 | | LOOPS_HAVE_MARKED_IRREDUCIBLE_REGIONS); |
911 | |
912 | /* We need accurate information regarding back edges in the CFG |
913 | for jump threading; this may include back edges that are not part of |
914 | a single loop. */ |
915 | mark_dfs_back_edges (); |
916 | |
917 | /* We want to create the edge info structures before the dominator walk |
918 | so that they'll be in place for the jump threader, particularly when |
919 | threading through a join block. |
920 | |
921 | The conditions will be lazily updated with global equivalences as |
922 | we reach them during the dominator walk. */ |
923 | basic_block bb; |
924 | FOR_EACH_BB_FN (bb, fun) |
925 | record_edge_info (bb); |
926 | |
927 | /* Recursively walk the dominator tree optimizing statements. */ |
928 | gimple_ranger *ranger = enable_ranger (m: fun); |
929 | path_range_query path_query (*ranger); |
930 | dom_jt_simplifier simplifier (avail_exprs_stack, ranger, &path_query); |
931 | dom_jt_state state (const_and_copies, avail_exprs_stack); |
932 | jump_threader threader (&simplifier, &state); |
933 | dom_opt_dom_walker walker (CDI_DOMINATORS, |
934 | &threader, |
935 | &state, |
936 | ranger, |
937 | const_and_copies, |
938 | avail_exprs_stack); |
939 | walker.walk (fun->cfg->x_entry_block_ptr); |
940 | |
941 | ranger->export_global_ranges (); |
942 | disable_ranger (fun); |
943 | |
944 | /* Look for blocks where we cleared EDGE_EXECUTABLE on an outgoing |
945 | edge. When found, remove jump threads which contain any outgoing |
946 | edge from the affected block. */ |
947 | if (cfg_altered) |
948 | { |
949 | FOR_EACH_BB_FN (bb, fun) |
950 | { |
951 | edge_iterator ei; |
952 | edge e; |
953 | |
954 | /* First see if there are any edges without EDGE_EXECUTABLE |
955 | set. */ |
956 | bool found = false; |
957 | FOR_EACH_EDGE (e, ei, bb->succs) |
958 | { |
959 | if ((e->flags & EDGE_EXECUTABLE) == 0) |
960 | { |
961 | found = true; |
962 | break; |
963 | } |
964 | } |
965 | |
966 | /* If there were any such edges found, then remove jump threads |
967 | containing any edge leaving BB. */ |
968 | if (found) |
969 | FOR_EACH_EDGE (e, ei, bb->succs) |
970 | threader.remove_jump_threads_including (e); |
971 | } |
972 | } |
973 | |
974 | { |
975 | gimple_stmt_iterator gsi; |
976 | basic_block bb; |
977 | FOR_EACH_BB_FN (bb, fun) |
978 | { |
979 | for (gsi = gsi_start_bb (bb); !gsi_end_p (i: gsi); gsi_next (i: &gsi)) |
980 | update_stmt_if_modified (s: gsi_stmt (i: gsi)); |
981 | } |
982 | } |
983 | |
984 | /* If we exposed any new variables, go ahead and put them into |
985 | SSA form now, before we handle jump threading. This simplifies |
986 | interactions between rewriting of _DECL nodes into SSA form |
987 | and rewriting SSA_NAME nodes into SSA form after block |
988 | duplication and CFG manipulation. */ |
989 | update_ssa (TODO_update_ssa); |
990 | |
991 | free_all_edge_infos (); |
992 | |
993 | /* Thread jumps, creating duplicate blocks as needed. */ |
994 | cfg_altered |= threader.thread_through_all_blocks (may_peel_loop_headers: may_peel_loop_headers_p); |
995 | |
996 | if (cfg_altered) |
997 | free_dominance_info (CDI_DOMINATORS); |
998 | |
999 | /* Removal of statements may make some EH edges dead. Purge |
1000 | such edges from the CFG as needed. */ |
1001 | if (!bitmap_empty_p (map: need_eh_cleanup)) |
1002 | { |
1003 | unsigned i; |
1004 | bitmap_iterator bi; |
1005 | |
1006 | /* Jump threading may have created forwarder blocks from blocks |
1007 | needing EH cleanup; the new successor of these blocks, which |
1008 | has inherited from the original block, needs the cleanup. |
1009 | Don't clear bits in the bitmap, as that can break the bitmap |
1010 | iterator. */ |
1011 | EXECUTE_IF_SET_IN_BITMAP (need_eh_cleanup, 0, i, bi) |
1012 | { |
1013 | basic_block bb = BASIC_BLOCK_FOR_FN (fun, i); |
1014 | if (bb == NULL) |
1015 | continue; |
1016 | while (single_succ_p (bb) |
1017 | && (single_succ_edge (bb)->flags |
1018 | & (EDGE_EH|EDGE_DFS_BACK)) == 0) |
1019 | bb = single_succ (bb); |
1020 | if (bb == EXIT_BLOCK_PTR_FOR_FN (fun)) |
1021 | continue; |
1022 | if ((unsigned) bb->index != i) |
1023 | bitmap_set_bit (need_eh_cleanup, bb->index); |
1024 | } |
1025 | |
1026 | gimple_purge_all_dead_eh_edges (need_eh_cleanup); |
1027 | bitmap_clear (need_eh_cleanup); |
1028 | } |
1029 | |
1030 | /* Fixup stmts that became noreturn calls. This may require splitting |
1031 | blocks and thus isn't possible during the dominator walk or before |
1032 | jump threading finished. Do this in reverse order so we don't |
1033 | inadvertedly remove a stmt we want to fixup by visiting a dominating |
1034 | now noreturn call first. */ |
1035 | while (!need_noreturn_fixup.is_empty ()) |
1036 | { |
1037 | gimple *stmt = need_noreturn_fixup.pop (); |
1038 | if (dump_file && dump_flags & TDF_DETAILS) |
1039 | { |
1040 | fprintf (stream: dump_file, format: "Fixing up noreturn call " ); |
1041 | print_gimple_stmt (dump_file, stmt, 0); |
1042 | fprintf (stream: dump_file, format: "\n" ); |
1043 | } |
1044 | fixup_noreturn_call (stmt); |
1045 | } |
1046 | |
1047 | statistics_counter_event (fun, "Redundant expressions eliminated" , |
1048 | opt_stats.num_re); |
1049 | statistics_counter_event (fun, "Constants propagated" , |
1050 | opt_stats.num_const_prop); |
1051 | statistics_counter_event (fun, "Copies propagated" , |
1052 | opt_stats.num_copy_prop); |
1053 | |
1054 | /* Debugging dumps. */ |
1055 | if (dump_file && (dump_flags & TDF_STATS)) |
1056 | dump_dominator_optimization_stats (file: dump_file, avail_exprs); |
1057 | |
1058 | loop_optimizer_finalize (); |
1059 | |
1060 | /* Delete our main hashtable. */ |
1061 | delete avail_exprs; |
1062 | avail_exprs = NULL; |
1063 | |
1064 | /* Free asserted bitmaps and stacks. */ |
1065 | BITMAP_FREE (need_eh_cleanup); |
1066 | need_noreturn_fixup.release (); |
1067 | delete avail_exprs_stack; |
1068 | delete const_and_copies; |
1069 | |
1070 | return 0; |
1071 | } |
1072 | |
1073 | } // anon namespace |
1074 | |
1075 | gimple_opt_pass * |
1076 | make_pass_dominator (gcc::context *ctxt) |
1077 | { |
1078 | return new pass_dominator (ctxt); |
1079 | } |
1080 | |
1081 | /* Valueize hook for gimple_fold_stmt_to_constant_1. */ |
1082 | |
1083 | static tree |
1084 | dom_valueize (tree t) |
1085 | { |
1086 | if (TREE_CODE (t) == SSA_NAME) |
1087 | { |
1088 | tree tem = SSA_NAME_VALUE (t); |
1089 | if (tem) |
1090 | return tem; |
1091 | } |
1092 | return t; |
1093 | } |
1094 | |
1095 | /* We have just found an equivalence for LHS on an edge E. |
1096 | Look backwards to other uses of LHS and see if we can derive |
1097 | additional equivalences that are valid on edge E. */ |
1098 | static void |
1099 | back_propagate_equivalences (tree lhs, edge e, |
1100 | class const_and_copies *const_and_copies, |
1101 | bitmap domby) |
1102 | { |
1103 | use_operand_p use_p; |
1104 | imm_use_iterator iter; |
1105 | basic_block dest = e->dest; |
1106 | bool domok = (dom_info_state (CDI_DOMINATORS) == DOM_OK); |
1107 | |
1108 | /* Iterate over the uses of LHS to see if any dominate E->dest. |
1109 | If so, they may create useful equivalences too. |
1110 | |
1111 | ??? If the code gets re-organized to a worklist to catch more |
1112 | indirect opportunities and it is made to handle PHIs then this |
1113 | should only consider use_stmts in basic-blocks we have already visited. */ |
1114 | FOR_EACH_IMM_USE_FAST (use_p, iter, lhs) |
1115 | { |
1116 | gimple *use_stmt = USE_STMT (use_p); |
1117 | |
1118 | /* Often the use is in DEST, which we trivially know we can't use. |
1119 | This is cheaper than the dominator set tests below. */ |
1120 | if (dest == gimple_bb (g: use_stmt)) |
1121 | continue; |
1122 | |
1123 | /* Filter out statements that can never produce a useful |
1124 | equivalence. */ |
1125 | tree lhs2 = gimple_get_lhs (use_stmt); |
1126 | if (!lhs2 || TREE_CODE (lhs2) != SSA_NAME) |
1127 | continue; |
1128 | |
1129 | if (domok) |
1130 | { |
1131 | if (!dominated_by_p (CDI_DOMINATORS, dest, gimple_bb (g: use_stmt))) |
1132 | continue; |
1133 | } |
1134 | else |
1135 | { |
1136 | /* We can use the set of BBs on the stack from a domwalk |
1137 | for a medium fast way to query dominance. Profiling |
1138 | has shown non-fast query dominance tests here can be fairly |
1139 | expensive. */ |
1140 | /* This tests if USE_STMT does not dominate DEST. */ |
1141 | if (!bitmap_bit_p (domby, gimple_bb (g: use_stmt)->index)) |
1142 | continue; |
1143 | } |
1144 | |
1145 | /* At this point USE_STMT dominates DEST and may result in a |
1146 | useful equivalence. Try to simplify its RHS to a constant |
1147 | or SSA_NAME. */ |
1148 | tree res = gimple_fold_stmt_to_constant_1 (use_stmt, dom_valueize, |
1149 | no_follow_ssa_edges); |
1150 | if (res && (TREE_CODE (res) == SSA_NAME || is_gimple_min_invariant (res))) |
1151 | record_equality (lhs2, res, const_and_copies); |
1152 | } |
1153 | } |
1154 | |
1155 | /* Record into CONST_AND_COPIES and AVAIL_EXPRS_STACK any equivalences implied |
1156 | by traversing edge E (which are cached in E->aux). |
1157 | |
1158 | Callers are responsible for managing the unwinding markers. */ |
1159 | static void |
1160 | record_temporary_equivalences (edge e, |
1161 | class const_and_copies *const_and_copies, |
1162 | class avail_exprs_stack *avail_exprs_stack, |
1163 | bitmap blocks_on_stack) |
1164 | { |
1165 | int i; |
1166 | class edge_info *edge_info = (class edge_info *) e->aux; |
1167 | |
1168 | /* If we have info associated with this edge, record it into |
1169 | our equivalence tables. */ |
1170 | if (edge_info) |
1171 | { |
1172 | cond_equivalence *eq; |
1173 | /* If we have 0 = COND or 1 = COND equivalences, record them |
1174 | into our expression hash tables. */ |
1175 | for (i = 0; edge_info->cond_equivalences.iterate (ix: i, ptr: &eq); ++i) |
1176 | avail_exprs_stack->record_cond (eq); |
1177 | |
1178 | edge_info::equiv_pair *seq; |
1179 | for (i = 0; edge_info->simple_equivalences.iterate (ix: i, ptr: &seq); ++i) |
1180 | { |
1181 | tree lhs = seq->first; |
1182 | if (!lhs || TREE_CODE (lhs) != SSA_NAME) |
1183 | continue; |
1184 | |
1185 | /* Record the simple NAME = VALUE equivalence. */ |
1186 | tree rhs = seq->second; |
1187 | |
1188 | /* If this is a SSA_NAME = SSA_NAME equivalence and one operand is |
1189 | cheaper to compute than the other, then set up the equivalence |
1190 | such that we replace the expensive one with the cheap one. |
1191 | |
1192 | If they are the same cost to compute, then do not record |
1193 | anything. */ |
1194 | if (TREE_CODE (lhs) == SSA_NAME && TREE_CODE (rhs) == SSA_NAME) |
1195 | { |
1196 | gimple *rhs_def = SSA_NAME_DEF_STMT (rhs); |
1197 | int rhs_cost = estimate_num_insns (rhs_def, &eni_size_weights); |
1198 | |
1199 | gimple *lhs_def = SSA_NAME_DEF_STMT (lhs); |
1200 | int lhs_cost = estimate_num_insns (lhs_def, &eni_size_weights); |
1201 | |
1202 | if (rhs_cost > lhs_cost) |
1203 | record_equality (rhs, lhs, const_and_copies); |
1204 | else if (rhs_cost < lhs_cost) |
1205 | record_equality (lhs, rhs, const_and_copies); |
1206 | } |
1207 | else |
1208 | record_equality (lhs, rhs, const_and_copies); |
1209 | |
1210 | |
1211 | /* Any equivalence found for LHS may result in additional |
1212 | equivalences for other uses of LHS that we have already |
1213 | processed. */ |
1214 | back_propagate_equivalences (lhs, e, const_and_copies, |
1215 | domby: blocks_on_stack); |
1216 | } |
1217 | } |
1218 | } |
1219 | |
1220 | /* PHI nodes can create equivalences too. |
1221 | |
1222 | Ignoring any alternatives which are the same as the result, if |
1223 | all the alternatives are equal, then the PHI node creates an |
1224 | equivalence. */ |
1225 | |
1226 | static void |
1227 | record_equivalences_from_phis (basic_block bb) |
1228 | { |
1229 | gphi_iterator gsi; |
1230 | |
1231 | for (gsi = gsi_start_phis (bb); !gsi_end_p (i: gsi); ) |
1232 | { |
1233 | gphi *phi = gsi.phi (); |
1234 | |
1235 | /* We might eliminate the PHI, so advance GSI now. */ |
1236 | gsi_next (i: &gsi); |
1237 | |
1238 | tree lhs = gimple_phi_result (gs: phi); |
1239 | tree rhs = NULL; |
1240 | size_t i; |
1241 | |
1242 | for (i = 0; i < gimple_phi_num_args (gs: phi); i++) |
1243 | { |
1244 | tree t = gimple_phi_arg_def (gs: phi, index: i); |
1245 | |
1246 | /* Ignore alternatives which are the same as our LHS. Since |
1247 | LHS is a PHI_RESULT, it is known to be a SSA_NAME, so we |
1248 | can simply compare pointers. */ |
1249 | if (lhs == t) |
1250 | continue; |
1251 | |
1252 | /* If the associated edge is not marked as executable, then it |
1253 | can be ignored. */ |
1254 | if ((gimple_phi_arg_edge (phi, i)->flags & EDGE_EXECUTABLE) == 0) |
1255 | continue; |
1256 | |
1257 | t = dom_valueize (t); |
1258 | |
1259 | /* If T is an SSA_NAME and its associated edge is a backedge, |
1260 | then quit as we cannot utilize this equivalence. */ |
1261 | if (TREE_CODE (t) == SSA_NAME |
1262 | && (gimple_phi_arg_edge (phi, i)->flags & EDGE_DFS_BACK)) |
1263 | break; |
1264 | |
1265 | /* If we have not processed an alternative yet, then set |
1266 | RHS to this alternative. */ |
1267 | if (rhs == NULL) |
1268 | rhs = t; |
1269 | /* If we have processed an alternative (stored in RHS), then |
1270 | see if it is equal to this one. If it isn't, then stop |
1271 | the search. */ |
1272 | else if (! operand_equal_for_phi_arg_p (rhs, t)) |
1273 | break; |
1274 | } |
1275 | |
1276 | /* If we had no interesting alternatives, then all the RHS alternatives |
1277 | must have been the same as LHS. */ |
1278 | if (!rhs) |
1279 | rhs = lhs; |
1280 | |
1281 | /* If we managed to iterate through each PHI alternative without |
1282 | breaking out of the loop, then we have a PHI which may create |
1283 | a useful equivalence. We do not need to record unwind data for |
1284 | this, since this is a true assignment and not an equivalence |
1285 | inferred from a comparison. All uses of this ssa name are dominated |
1286 | by this assignment, so unwinding just costs time and space. */ |
1287 | if (i == gimple_phi_num_args (gs: phi)) |
1288 | { |
1289 | if (may_propagate_copy (lhs, rhs)) |
1290 | set_ssa_name_value (lhs, rhs); |
1291 | else if (virtual_operand_p (op: lhs)) |
1292 | { |
1293 | gimple *use_stmt; |
1294 | imm_use_iterator iter; |
1295 | use_operand_p use_p; |
1296 | /* For virtual operands we have to propagate into all uses as |
1297 | otherwise we will create overlapping life-ranges. */ |
1298 | FOR_EACH_IMM_USE_STMT (use_stmt, iter, lhs) |
1299 | FOR_EACH_IMM_USE_ON_STMT (use_p, iter) |
1300 | SET_USE (use_p, rhs); |
1301 | if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs)) |
1302 | SSA_NAME_OCCURS_IN_ABNORMAL_PHI (rhs) = 1; |
1303 | gimple_stmt_iterator tmp_gsi = gsi_for_stmt (phi); |
1304 | remove_phi_node (&tmp_gsi, true); |
1305 | } |
1306 | } |
1307 | } |
1308 | } |
1309 | |
1310 | /* Return true if all uses of NAME are dominated by STMT or feed STMT |
1311 | via a chain of single immediate uses. */ |
1312 | |
1313 | static bool |
1314 | all_uses_feed_or_dominated_by_stmt (tree name, gimple *stmt) |
1315 | { |
1316 | use_operand_p use_p, use2_p; |
1317 | imm_use_iterator iter; |
1318 | basic_block stmt_bb = gimple_bb (g: stmt); |
1319 | |
1320 | FOR_EACH_IMM_USE_FAST (use_p, iter, name) |
1321 | { |
1322 | gimple *use_stmt = USE_STMT (use_p), *use_stmt2; |
1323 | if (use_stmt == stmt |
1324 | || is_gimple_debug (gs: use_stmt) |
1325 | || (gimple_bb (g: use_stmt) != stmt_bb |
1326 | && dominated_by_p (CDI_DOMINATORS, |
1327 | gimple_bb (g: use_stmt), stmt_bb))) |
1328 | continue; |
1329 | while (use_stmt != stmt |
1330 | && is_gimple_assign (gs: use_stmt) |
1331 | && TREE_CODE (gimple_assign_lhs (use_stmt)) == SSA_NAME |
1332 | && single_imm_use (var: gimple_assign_lhs (gs: use_stmt), |
1333 | use_p: &use2_p, stmt: &use_stmt2)) |
1334 | use_stmt = use_stmt2; |
1335 | if (use_stmt != stmt) |
1336 | return false; |
1337 | } |
1338 | return true; |
1339 | } |
1340 | |
1341 | /* Handle |
1342 | _4 = x_3 & 31; |
1343 | if (_4 != 0) |
1344 | goto <bb 6>; |
1345 | else |
1346 | goto <bb 7>; |
1347 | <bb 6>: |
1348 | __builtin_unreachable (); |
1349 | <bb 7>: |
1350 | |
1351 | If x_3 has no other immediate uses (checked by caller), var is the |
1352 | x_3 var, we can clear low 5 bits from the non-zero bitmask. */ |
1353 | |
1354 | static void |
1355 | maybe_set_nonzero_bits (edge e, tree var) |
1356 | { |
1357 | basic_block cond_bb = e->src; |
1358 | gcond *cond = safe_dyn_cast <gcond *> (p: *gsi_last_bb (bb: cond_bb)); |
1359 | tree cst; |
1360 | |
1361 | if (cond == NULL |
1362 | || gimple_cond_code (gs: cond) != ((e->flags & EDGE_TRUE_VALUE) |
1363 | ? EQ_EXPR : NE_EXPR) |
1364 | || TREE_CODE (gimple_cond_lhs (cond)) != SSA_NAME |
1365 | || !integer_zerop (gimple_cond_rhs (gs: cond))) |
1366 | return; |
1367 | |
1368 | gimple *stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (cond)); |
1369 | if (!is_gimple_assign (gs: stmt) |
1370 | || gimple_assign_rhs_code (gs: stmt) != BIT_AND_EXPR |
1371 | || TREE_CODE (gimple_assign_rhs2 (stmt)) != INTEGER_CST) |
1372 | return; |
1373 | if (gimple_assign_rhs1 (gs: stmt) != var) |
1374 | { |
1375 | gimple *stmt2; |
1376 | |
1377 | if (TREE_CODE (gimple_assign_rhs1 (stmt)) != SSA_NAME) |
1378 | return; |
1379 | stmt2 = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt)); |
1380 | if (!gimple_assign_cast_p (s: stmt2) |
1381 | || gimple_assign_rhs1 (gs: stmt2) != var |
1382 | || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt2)) |
1383 | || (TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt))) |
1384 | != TYPE_PRECISION (TREE_TYPE (var)))) |
1385 | return; |
1386 | } |
1387 | cst = gimple_assign_rhs2 (gs: stmt); |
1388 | if (POINTER_TYPE_P (TREE_TYPE (var))) |
1389 | { |
1390 | struct ptr_info_def *pi = SSA_NAME_PTR_INFO (var); |
1391 | if (pi && pi->misalign) |
1392 | return; |
1393 | wide_int w = wi::bit_not (x: wi::to_wide (t: cst)); |
1394 | unsigned int bits = wi::ctz (w); |
1395 | if (bits == 0 || bits >= HOST_BITS_PER_INT) |
1396 | return; |
1397 | unsigned int align = 1U << bits; |
1398 | if (pi == NULL || pi->align < align) |
1399 | set_ptr_info_alignment (get_ptr_info (var), align, 0); |
1400 | } |
1401 | else |
1402 | set_nonzero_bits (var, wi::bit_and_not (x: get_nonzero_bits (var), |
1403 | y: wi::to_wide (t: cst))); |
1404 | } |
1405 | |
1406 | /* Set global ranges that can be determined from the C->M edge: |
1407 | |
1408 | <bb C>: |
1409 | ... |
1410 | if (something) |
1411 | goto <bb N>; |
1412 | else |
1413 | goto <bb M>; |
1414 | <bb N>: |
1415 | __builtin_unreachable (); |
1416 | <bb M>: |
1417 | */ |
1418 | |
1419 | void |
1420 | dom_opt_dom_walker::set_global_ranges_from_unreachable_edges (basic_block bb) |
1421 | { |
1422 | edge pred_e = single_pred_edge_ignoring_loop_edges (bb, false); |
1423 | if (!pred_e) |
1424 | return; |
1425 | |
1426 | gimple *stmt = *gsi_last_bb (bb: pred_e->src); |
1427 | if (!stmt |
1428 | || gimple_code (g: stmt) != GIMPLE_COND |
1429 | || !assert_unreachable_fallthru_edge_p (pred_e)) |
1430 | return; |
1431 | |
1432 | tree name; |
1433 | gori_compute &gori = m_ranger->gori (); |
1434 | FOR_EACH_GORI_EXPORT_NAME (gori, pred_e->src, name) |
1435 | if (all_uses_feed_or_dominated_by_stmt (name, stmt) |
1436 | // The condition must post-dominate the definition point. |
1437 | && (SSA_NAME_IS_DEFAULT_DEF (name) |
1438 | || (gimple_bb (SSA_NAME_DEF_STMT (name)) |
1439 | == pred_e->src))) |
1440 | { |
1441 | Value_Range r (TREE_TYPE (name)); |
1442 | |
1443 | if (m_ranger->range_on_edge (r, e: pred_e, name) |
1444 | && !r.varying_p () |
1445 | && !r.undefined_p ()) |
1446 | { |
1447 | set_range_info (name, r); |
1448 | maybe_set_nonzero_bits (e: pred_e, var: name); |
1449 | } |
1450 | } |
1451 | } |
1452 | |
1453 | /* Record any equivalences created by the incoming edge to BB into |
1454 | CONST_AND_COPIES and AVAIL_EXPRS_STACK. If BB has more than one |
1455 | incoming edge, then no equivalence is created. */ |
1456 | |
1457 | static void |
1458 | record_equivalences_from_incoming_edge (basic_block bb, |
1459 | class const_and_copies *const_and_copies, |
1460 | class avail_exprs_stack *avail_exprs_stack, |
1461 | bitmap blocks_on_stack) |
1462 | { |
1463 | edge e; |
1464 | basic_block parent; |
1465 | |
1466 | /* If our parent block ended with a control statement, then we may be |
1467 | able to record some equivalences based on which outgoing edge from |
1468 | the parent was followed. */ |
1469 | parent = get_immediate_dominator (CDI_DOMINATORS, bb); |
1470 | |
1471 | e = single_pred_edge_ignoring_loop_edges (bb, true); |
1472 | |
1473 | /* If we had a single incoming edge from our parent block, then enter |
1474 | any data associated with the edge into our tables. */ |
1475 | if (e && e->src == parent) |
1476 | record_temporary_equivalences (e, const_and_copies, avail_exprs_stack, |
1477 | blocks_on_stack); |
1478 | } |
1479 | |
1480 | /* Dump statistics for the hash table HTAB. */ |
1481 | |
1482 | static void |
1483 | htab_statistics (FILE *file, const hash_table<expr_elt_hasher> &htab) |
1484 | { |
1485 | fprintf (stream: file, format: "size %ld, %ld elements, %f collision/search ratio\n" , |
1486 | (long) htab.size (), |
1487 | (long) htab.elements (), |
1488 | htab.collisions ()); |
1489 | } |
1490 | |
1491 | /* Dump SSA statistics on FILE. */ |
1492 | |
1493 | static void |
1494 | dump_dominator_optimization_stats (FILE *file, |
1495 | hash_table<expr_elt_hasher> *avail_exprs) |
1496 | { |
1497 | fprintf (stream: file, format: "Total number of statements: %6ld\n\n" , |
1498 | opt_stats.num_stmts); |
1499 | fprintf (stream: file, format: "Exprs considered for dominator optimizations: %6ld\n" , |
1500 | opt_stats.num_exprs_considered); |
1501 | |
1502 | fprintf (stream: file, format: "\nHash table statistics:\n" ); |
1503 | |
1504 | fprintf (stream: file, format: " avail_exprs: " ); |
1505 | htab_statistics (file, htab: *avail_exprs); |
1506 | } |
1507 | |
1508 | |
1509 | /* Similarly, but assume that X and Y are the two operands of an EQ_EXPR. |
1510 | This constrains the cases in which we may treat this as assignment. */ |
1511 | |
1512 | static void |
1513 | record_equality (tree x, tree y, class const_and_copies *const_and_copies) |
1514 | { |
1515 | tree prev_x = NULL, prev_y = NULL; |
1516 | |
1517 | if (tree_swap_operands_p (x, y)) |
1518 | std::swap (a&: x, b&: y); |
1519 | |
1520 | /* Most of the time tree_swap_operands_p does what we want. But there |
1521 | are cases where we know one operand is better for copy propagation than |
1522 | the other. Given no other code cares about ordering of equality |
1523 | comparison operators for that purpose, we just handle the special cases |
1524 | here. */ |
1525 | if (TREE_CODE (x) == SSA_NAME && TREE_CODE (y) == SSA_NAME) |
1526 | { |
1527 | /* If one operand is a single use operand, then make it |
1528 | X. This will preserve its single use properly and if this |
1529 | conditional is eliminated, the computation of X can be |
1530 | eliminated as well. */ |
1531 | if (has_single_use (var: y) && ! has_single_use (var: x)) |
1532 | std::swap (a&: x, b&: y); |
1533 | } |
1534 | if (TREE_CODE (x) == SSA_NAME) |
1535 | prev_x = SSA_NAME_VALUE (x); |
1536 | if (TREE_CODE (y) == SSA_NAME) |
1537 | prev_y = SSA_NAME_VALUE (y); |
1538 | |
1539 | /* If one of the previous values is invariant, or invariant in more loops |
1540 | (by depth), then use that. |
1541 | Otherwise it doesn't matter which value we choose, just so |
1542 | long as we canonicalize on one value. */ |
1543 | if (is_gimple_min_invariant (y)) |
1544 | ; |
1545 | else if (is_gimple_min_invariant (x)) |
1546 | prev_x = x, x = y, y = prev_x, prev_x = prev_y; |
1547 | else if (prev_x && is_gimple_min_invariant (prev_x)) |
1548 | x = y, y = prev_x, prev_x = prev_y; |
1549 | else if (prev_y) |
1550 | y = prev_y; |
1551 | |
1552 | /* After the swapping, we must have one SSA_NAME. */ |
1553 | if (TREE_CODE (x) != SSA_NAME) |
1554 | return; |
1555 | |
1556 | /* For IEEE, -0.0 == 0.0, so we don't necessarily know the sign of a |
1557 | variable compared against zero. If we're honoring signed zeros, |
1558 | then we cannot record this value unless we know that the value is |
1559 | nonzero. */ |
1560 | if (HONOR_SIGNED_ZEROS (x) |
1561 | && (TREE_CODE (y) != REAL_CST |
1562 | || real_equal (&dconst0, &TREE_REAL_CST (y)))) |
1563 | return; |
1564 | |
1565 | const_and_copies->record_const_or_copy (x, y, prev_x); |
1566 | } |
1567 | |
1568 | /* Returns true when STMT is a simple iv increment. It detects the |
1569 | following situation: |
1570 | |
1571 | i_1 = phi (..., i_k) |
1572 | [...] |
1573 | i_j = i_{j-1} for each j : 2 <= j <= k-1 |
1574 | [...] |
1575 | i_k = i_{k-1} +/- ... */ |
1576 | |
1577 | bool |
1578 | simple_iv_increment_p (gimple *stmt) |
1579 | { |
1580 | enum tree_code code; |
1581 | tree lhs, preinc; |
1582 | gimple *phi; |
1583 | size_t i; |
1584 | |
1585 | if (gimple_code (g: stmt) != GIMPLE_ASSIGN) |
1586 | return false; |
1587 | |
1588 | lhs = gimple_assign_lhs (gs: stmt); |
1589 | if (TREE_CODE (lhs) != SSA_NAME) |
1590 | return false; |
1591 | |
1592 | code = gimple_assign_rhs_code (gs: stmt); |
1593 | if (code != PLUS_EXPR |
1594 | && code != MINUS_EXPR |
1595 | && code != POINTER_PLUS_EXPR) |
1596 | return false; |
1597 | |
1598 | preinc = gimple_assign_rhs1 (gs: stmt); |
1599 | if (TREE_CODE (preinc) != SSA_NAME) |
1600 | return false; |
1601 | |
1602 | phi = SSA_NAME_DEF_STMT (preinc); |
1603 | while (gimple_code (g: phi) != GIMPLE_PHI) |
1604 | { |
1605 | /* Follow trivial copies, but not the DEF used in a back edge, |
1606 | so that we don't prevent coalescing. */ |
1607 | if (!gimple_assign_ssa_name_copy_p (phi)) |
1608 | return false; |
1609 | preinc = gimple_assign_rhs1 (gs: phi); |
1610 | phi = SSA_NAME_DEF_STMT (preinc); |
1611 | } |
1612 | |
1613 | for (i = 0; i < gimple_phi_num_args (gs: phi); i++) |
1614 | if (gimple_phi_arg_def (gs: phi, index: i) == lhs) |
1615 | return true; |
1616 | |
1617 | return false; |
1618 | } |
1619 | |
1620 | /* Propagate know values from SSA_NAME_VALUE into the PHI nodes of the |
1621 | successors of BB. */ |
1622 | |
1623 | static void |
1624 | cprop_into_successor_phis (basic_block bb, |
1625 | class const_and_copies *const_and_copies) |
1626 | { |
1627 | edge e; |
1628 | edge_iterator ei; |
1629 | |
1630 | FOR_EACH_EDGE (e, ei, bb->succs) |
1631 | { |
1632 | int indx; |
1633 | gphi_iterator gsi; |
1634 | |
1635 | /* If this is an abnormal edge, then we do not want to copy propagate |
1636 | into the PHI alternative associated with this edge. */ |
1637 | if (e->flags & EDGE_ABNORMAL) |
1638 | continue; |
1639 | |
1640 | gsi = gsi_start_phis (e->dest); |
1641 | if (gsi_end_p (i: gsi)) |
1642 | continue; |
1643 | |
1644 | /* We may have an equivalence associated with this edge. While |
1645 | we cannot propagate it into non-dominated blocks, we can |
1646 | propagate them into PHIs in non-dominated blocks. */ |
1647 | |
1648 | /* Push the unwind marker so we can reset the const and copies |
1649 | table back to its original state after processing this edge. */ |
1650 | const_and_copies->push_marker (); |
1651 | |
1652 | /* Extract and record any simple NAME = VALUE equivalences. |
1653 | |
1654 | Don't bother with [01] = COND equivalences, they're not useful |
1655 | here. */ |
1656 | class edge_info *edge_info = (class edge_info *) e->aux; |
1657 | |
1658 | if (edge_info) |
1659 | { |
1660 | edge_info::equiv_pair *seq; |
1661 | for (int i = 0; edge_info->simple_equivalences.iterate (ix: i, ptr: &seq); ++i) |
1662 | { |
1663 | tree lhs = seq->first; |
1664 | tree rhs = seq->second; |
1665 | |
1666 | if (lhs && TREE_CODE (lhs) == SSA_NAME) |
1667 | const_and_copies->record_const_or_copy (lhs, rhs); |
1668 | } |
1669 | |
1670 | } |
1671 | |
1672 | indx = e->dest_idx; |
1673 | for ( ; !gsi_end_p (i: gsi); gsi_next (i: &gsi)) |
1674 | { |
1675 | tree new_val; |
1676 | use_operand_p orig_p; |
1677 | tree orig_val; |
1678 | gphi *phi = gsi.phi (); |
1679 | |
1680 | /* The alternative may be associated with a constant, so verify |
1681 | it is an SSA_NAME before doing anything with it. */ |
1682 | orig_p = gimple_phi_arg_imm_use_ptr (gs: phi, i: indx); |
1683 | orig_val = get_use_from_ptr (use: orig_p); |
1684 | if (TREE_CODE (orig_val) != SSA_NAME) |
1685 | continue; |
1686 | |
1687 | /* If we have *ORIG_P in our constant/copy table, then replace |
1688 | ORIG_P with its value in our constant/copy table. */ |
1689 | new_val = SSA_NAME_VALUE (orig_val); |
1690 | if (new_val |
1691 | && new_val != orig_val |
1692 | && may_propagate_copy (orig_val, new_val)) |
1693 | propagate_value (orig_p, new_val); |
1694 | } |
1695 | |
1696 | const_and_copies->pop_to_marker (); |
1697 | } |
1698 | } |
1699 | |
1700 | edge |
1701 | dom_opt_dom_walker::before_dom_children (basic_block bb) |
1702 | { |
1703 | gimple_stmt_iterator gsi; |
1704 | |
1705 | if (dump_file && (dump_flags & TDF_DETAILS)) |
1706 | fprintf (stream: dump_file, format: "\n\nOptimizing block #%d\n\n" , bb->index); |
1707 | |
1708 | /* Push a marker on the stacks of local information so that we know how |
1709 | far to unwind when we finalize this block. */ |
1710 | m_avail_exprs_stack->push_marker (); |
1711 | m_const_and_copies->push_marker (); |
1712 | bitmap_set_bit (m_state->get_blocks_on_stack (), bb->index); |
1713 | |
1714 | record_equivalences_from_incoming_edge (bb, const_and_copies: m_const_and_copies, |
1715 | avail_exprs_stack: m_avail_exprs_stack, |
1716 | blocks_on_stack: m_state->get_blocks_on_stack ()); |
1717 | set_global_ranges_from_unreachable_edges (bb); |
1718 | |
1719 | /* PHI nodes can create equivalences too. */ |
1720 | record_equivalences_from_phis (bb); |
1721 | |
1722 | /* Create equivalences from redundant PHIs. PHIs are only truly |
1723 | redundant when they exist in the same block, so push another |
1724 | marker and unwind right afterwards. */ |
1725 | m_avail_exprs_stack->push_marker (); |
1726 | for (gsi = gsi_start_phis (bb); !gsi_end_p (i: gsi); gsi_next (i: &gsi)) |
1727 | eliminate_redundant_computations (&gsi, m_const_and_copies, |
1728 | m_avail_exprs_stack); |
1729 | m_avail_exprs_stack->pop_to_marker (); |
1730 | |
1731 | edge taken_edge = NULL; |
1732 | /* Initialize visited flag ahead of us, it has undefined state on |
1733 | pass entry. */ |
1734 | for (gsi = gsi_start_bb (bb); !gsi_end_p (i: gsi); gsi_next (i: &gsi)) |
1735 | gimple_set_visited (stmt: gsi_stmt (i: gsi), visited_p: false); |
1736 | for (gsi = gsi_start_bb (bb); !gsi_end_p (i: gsi);) |
1737 | { |
1738 | /* Do not optimize a stmt twice, substitution might end up with |
1739 | _3 = _3 which is not valid. */ |
1740 | if (gimple_visited_p (stmt: gsi_stmt (i: gsi))) |
1741 | { |
1742 | gsi_next (i: &gsi); |
1743 | continue; |
1744 | } |
1745 | |
1746 | bool removed_p = false; |
1747 | taken_edge = this->optimize_stmt (bb, &gsi, &removed_p); |
1748 | if (!removed_p) |
1749 | gimple_set_visited (stmt: gsi_stmt (i: gsi), visited_p: true); |
1750 | |
1751 | /* Go back and visit stmts inserted by folding after substituting |
1752 | into the stmt at gsi. */ |
1753 | if (gsi_end_p (i: gsi)) |
1754 | { |
1755 | gcc_checking_assert (removed_p); |
1756 | gsi = gsi_last_bb (bb); |
1757 | while (!gsi_end_p (i: gsi) && !gimple_visited_p (stmt: gsi_stmt (i: gsi))) |
1758 | gsi_prev (i: &gsi); |
1759 | } |
1760 | else |
1761 | { |
1762 | do |
1763 | { |
1764 | gsi_prev (i: &gsi); |
1765 | } |
1766 | while (!gsi_end_p (i: gsi) && !gimple_visited_p (stmt: gsi_stmt (i: gsi))); |
1767 | } |
1768 | if (gsi_end_p (i: gsi)) |
1769 | gsi = gsi_start_bb (bb); |
1770 | else |
1771 | gsi_next (i: &gsi); |
1772 | } |
1773 | |
1774 | /* Now prepare to process dominated blocks. */ |
1775 | record_edge_info (bb); |
1776 | cprop_into_successor_phis (bb, const_and_copies: m_const_and_copies); |
1777 | if (taken_edge && !dbg_cnt (index: dom_unreachable_edges)) |
1778 | return NULL; |
1779 | |
1780 | return taken_edge; |
1781 | } |
1782 | |
1783 | /* We have finished processing the dominator children of BB, perform |
1784 | any finalization actions in preparation for leaving this node in |
1785 | the dominator tree. */ |
1786 | |
1787 | void |
1788 | dom_opt_dom_walker::after_dom_children (basic_block bb) |
1789 | { |
1790 | m_threader->thread_outgoing_edges (bb); |
1791 | bitmap_clear_bit (m_state->get_blocks_on_stack (), bb->index); |
1792 | m_avail_exprs_stack->pop_to_marker (); |
1793 | m_const_and_copies->pop_to_marker (); |
1794 | } |
1795 | |
1796 | /* Search for redundant computations in STMT. If any are found, then |
1797 | replace them with the variable holding the result of the computation. |
1798 | |
1799 | If safe, record this expression into AVAIL_EXPRS_STACK and |
1800 | CONST_AND_COPIES. */ |
1801 | |
1802 | static void |
1803 | eliminate_redundant_computations (gimple_stmt_iterator* gsi, |
1804 | class const_and_copies *const_and_copies, |
1805 | class avail_exprs_stack *avail_exprs_stack) |
1806 | { |
1807 | tree expr_type; |
1808 | tree cached_lhs; |
1809 | tree def; |
1810 | bool insert = true; |
1811 | bool assigns_var_p = false; |
1812 | |
1813 | gimple *stmt = gsi_stmt (i: *gsi); |
1814 | |
1815 | if (gimple_code (g: stmt) == GIMPLE_PHI) |
1816 | def = gimple_phi_result (gs: stmt); |
1817 | else |
1818 | def = gimple_get_lhs (stmt); |
1819 | |
1820 | /* Certain expressions on the RHS can be optimized away, but cannot |
1821 | themselves be entered into the hash tables. */ |
1822 | if (! def |
1823 | || TREE_CODE (def) != SSA_NAME |
1824 | || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (def) |
1825 | || gimple_vdef (g: stmt) |
1826 | /* Do not record equivalences for increments of ivs. This would create |
1827 | overlapping live ranges for a very questionable gain. */ |
1828 | || simple_iv_increment_p (stmt)) |
1829 | insert = false; |
1830 | |
1831 | /* Check if the expression has been computed before. */ |
1832 | cached_lhs = avail_exprs_stack->lookup_avail_expr (stmt, insert, true); |
1833 | |
1834 | opt_stats.num_exprs_considered++; |
1835 | |
1836 | /* Get the type of the expression we are trying to optimize. */ |
1837 | if (is_gimple_assign (gs: stmt)) |
1838 | { |
1839 | expr_type = TREE_TYPE (gimple_assign_lhs (stmt)); |
1840 | assigns_var_p = true; |
1841 | } |
1842 | else if (gimple_code (g: stmt) == GIMPLE_COND) |
1843 | expr_type = boolean_type_node; |
1844 | else if (is_gimple_call (gs: stmt)) |
1845 | { |
1846 | gcc_assert (gimple_call_lhs (stmt)); |
1847 | expr_type = TREE_TYPE (gimple_call_lhs (stmt)); |
1848 | assigns_var_p = true; |
1849 | } |
1850 | else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (p: stmt)) |
1851 | expr_type = TREE_TYPE (gimple_switch_index (swtch_stmt)); |
1852 | else if (gimple_code (g: stmt) == GIMPLE_PHI) |
1853 | /* We can't propagate into a phi, so the logic below doesn't apply. |
1854 | Instead record an equivalence between the cached LHS and the |
1855 | PHI result of this statement, provided they are in the same block. |
1856 | This should be sufficient to kill the redundant phi. */ |
1857 | { |
1858 | if (def && cached_lhs) |
1859 | const_and_copies->record_const_or_copy (def, cached_lhs); |
1860 | return; |
1861 | } |
1862 | else |
1863 | gcc_unreachable (); |
1864 | |
1865 | if (!cached_lhs) |
1866 | return; |
1867 | |
1868 | /* It is safe to ignore types here since we have already done |
1869 | type checking in the hashing and equality routines. In fact |
1870 | type checking here merely gets in the way of constant |
1871 | propagation. Also, make sure that it is safe to propagate |
1872 | CACHED_LHS into the expression in STMT. */ |
1873 | if ((TREE_CODE (cached_lhs) != SSA_NAME |
1874 | && (assigns_var_p |
1875 | || useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs)))) |
1876 | || may_propagate_copy_into_stmt (stmt, cached_lhs)) |
1877 | { |
1878 | gcc_checking_assert (TREE_CODE (cached_lhs) == SSA_NAME |
1879 | || is_gimple_min_invariant (cached_lhs)); |
1880 | |
1881 | if (dump_file && (dump_flags & TDF_DETAILS)) |
1882 | { |
1883 | fprintf (stream: dump_file, format: " Replaced redundant expr '" ); |
1884 | print_gimple_expr (dump_file, stmt, 0, dump_flags); |
1885 | fprintf (stream: dump_file, format: "' with '" ); |
1886 | print_generic_expr (dump_file, cached_lhs, dump_flags); |
1887 | fprintf (stream: dump_file, format: "'\n" ); |
1888 | } |
1889 | |
1890 | opt_stats.num_re++; |
1891 | |
1892 | if (assigns_var_p |
1893 | && !useless_type_conversion_p (expr_type, TREE_TYPE (cached_lhs))) |
1894 | cached_lhs = fold_convert (expr_type, cached_lhs); |
1895 | |
1896 | propagate_tree_value_into_stmt (gsi, cached_lhs); |
1897 | |
1898 | /* Since it is always necessary to mark the result as modified, |
1899 | perhaps we should move this into propagate_tree_value_into_stmt |
1900 | itself. */ |
1901 | gimple_set_modified (s: gsi_stmt (i: *gsi), modifiedp: true); |
1902 | } |
1903 | } |
1904 | |
1905 | /* STMT, a GIMPLE_ASSIGN, may create certain equivalences, in either |
1906 | the available expressions table or the const_and_copies table. |
1907 | Detect and record those equivalences into AVAIL_EXPRS_STACK. |
1908 | |
1909 | We handle only very simple copy equivalences here. The heavy |
1910 | lifing is done by eliminate_redundant_computations. */ |
1911 | |
1912 | static void |
1913 | record_equivalences_from_stmt (gimple *stmt, int may_optimize_p, |
1914 | class avail_exprs_stack *avail_exprs_stack) |
1915 | { |
1916 | tree lhs; |
1917 | enum tree_code lhs_code; |
1918 | |
1919 | gcc_assert (is_gimple_assign (stmt)); |
1920 | |
1921 | lhs = gimple_assign_lhs (gs: stmt); |
1922 | lhs_code = TREE_CODE (lhs); |
1923 | |
1924 | if (lhs_code == SSA_NAME |
1925 | && gimple_assign_single_p (gs: stmt)) |
1926 | { |
1927 | tree rhs = gimple_assign_rhs1 (gs: stmt); |
1928 | |
1929 | /* If the RHS of the assignment is a constant or another variable that |
1930 | may be propagated, register it in the CONST_AND_COPIES table. We |
1931 | do not need to record unwind data for this, since this is a true |
1932 | assignment and not an equivalence inferred from a comparison. All |
1933 | uses of this ssa name are dominated by this assignment, so unwinding |
1934 | just costs time and space. */ |
1935 | if (may_optimize_p |
1936 | && (TREE_CODE (rhs) == SSA_NAME |
1937 | || is_gimple_min_invariant (rhs))) |
1938 | { |
1939 | rhs = dom_valueize (t: rhs); |
1940 | |
1941 | if (dump_file && (dump_flags & TDF_DETAILS)) |
1942 | { |
1943 | fprintf (stream: dump_file, format: "==== ASGN " ); |
1944 | print_generic_expr (dump_file, lhs); |
1945 | fprintf (stream: dump_file, format: " = " ); |
1946 | print_generic_expr (dump_file, rhs); |
1947 | fprintf (stream: dump_file, format: "\n" ); |
1948 | } |
1949 | |
1950 | set_ssa_name_value (lhs, rhs); |
1951 | } |
1952 | } |
1953 | |
1954 | /* Make sure we can propagate &x + CST. */ |
1955 | if (lhs_code == SSA_NAME |
1956 | && gimple_assign_rhs_code (gs: stmt) == POINTER_PLUS_EXPR |
1957 | && TREE_CODE (gimple_assign_rhs1 (stmt)) == ADDR_EXPR |
1958 | && TREE_CODE (gimple_assign_rhs2 (stmt)) == INTEGER_CST) |
1959 | { |
1960 | tree op0 = gimple_assign_rhs1 (gs: stmt); |
1961 | tree op1 = gimple_assign_rhs2 (gs: stmt); |
1962 | tree new_rhs |
1963 | = build1 (ADDR_EXPR, TREE_TYPE (op0), |
1964 | fold_build2 (MEM_REF, TREE_TYPE (TREE_TYPE (op0)), |
1965 | unshare_expr (op0), fold_convert (ptr_type_node, |
1966 | op1))); |
1967 | if (dump_file && (dump_flags & TDF_DETAILS)) |
1968 | { |
1969 | fprintf (stream: dump_file, format: "==== ASGN " ); |
1970 | print_generic_expr (dump_file, lhs); |
1971 | fprintf (stream: dump_file, format: " = " ); |
1972 | print_generic_expr (dump_file, new_rhs); |
1973 | fprintf (stream: dump_file, format: "\n" ); |
1974 | } |
1975 | |
1976 | set_ssa_name_value (lhs, new_rhs); |
1977 | } |
1978 | |
1979 | /* A memory store, even an aliased store, creates a useful |
1980 | equivalence. By exchanging the LHS and RHS, creating suitable |
1981 | vops and recording the result in the available expression table, |
1982 | we may be able to expose more redundant loads. */ |
1983 | if (!gimple_has_volatile_ops (stmt) |
1984 | && gimple_references_memory_p (stmt) |
1985 | && gimple_assign_single_p (gs: stmt) |
1986 | && (TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME |
1987 | || is_gimple_min_invariant (gimple_assign_rhs1 (gs: stmt))) |
1988 | && !is_gimple_reg (lhs)) |
1989 | { |
1990 | tree rhs = gimple_assign_rhs1 (gs: stmt); |
1991 | gassign *new_stmt; |
1992 | |
1993 | /* Build a new statement with the RHS and LHS exchanged. */ |
1994 | if (TREE_CODE (rhs) == SSA_NAME) |
1995 | { |
1996 | /* NOTE tuples. The call to gimple_build_assign below replaced |
1997 | a call to build_gimple_modify_stmt, which did not set the |
1998 | SSA_NAME_DEF_STMT on the LHS of the assignment. Doing so |
1999 | may cause an SSA validation failure, as the LHS may be a |
2000 | default-initialized name and should have no definition. I'm |
2001 | a bit dubious of this, as the artificial statement that we |
2002 | generate here may in fact be ill-formed, but it is simply |
2003 | used as an internal device in this pass, and never becomes |
2004 | part of the CFG. */ |
2005 | gimple *defstmt = SSA_NAME_DEF_STMT (rhs); |
2006 | new_stmt = gimple_build_assign (rhs, lhs); |
2007 | SSA_NAME_DEF_STMT (rhs) = defstmt; |
2008 | } |
2009 | else |
2010 | new_stmt = gimple_build_assign (rhs, lhs); |
2011 | |
2012 | gimple_set_vuse (g: new_stmt, vuse: gimple_vdef (g: stmt)); |
2013 | |
2014 | /* Finally enter the statement into the available expression |
2015 | table. */ |
2016 | avail_exprs_stack->lookup_avail_expr (new_stmt, true, true); |
2017 | } |
2018 | } |
2019 | |
2020 | /* Replace *OP_P in STMT with any known equivalent value for *OP_P from |
2021 | CONST_AND_COPIES. */ |
2022 | |
2023 | static void |
2024 | cprop_operand (gimple *stmt, use_operand_p op_p, range_query *query) |
2025 | { |
2026 | tree val; |
2027 | tree op = USE_FROM_PTR (op_p); |
2028 | |
2029 | /* If the operand has a known constant value or it is known to be a |
2030 | copy of some other variable, use the value or copy stored in |
2031 | CONST_AND_COPIES. */ |
2032 | val = SSA_NAME_VALUE (op); |
2033 | if (!val) |
2034 | { |
2035 | Value_Range r (TREE_TYPE (op)); |
2036 | tree single; |
2037 | if (query->range_of_expr (r, expr: op, stmt) && r.singleton_p (result: &single)) |
2038 | val = single; |
2039 | } |
2040 | |
2041 | if (val && val != op) |
2042 | { |
2043 | /* Do not replace hard register operands in asm statements. */ |
2044 | if (gimple_code (g: stmt) == GIMPLE_ASM |
2045 | && !may_propagate_copy_into_asm (op)) |
2046 | return; |
2047 | |
2048 | /* Certain operands are not allowed to be copy propagated due |
2049 | to their interaction with exception handling and some GCC |
2050 | extensions. */ |
2051 | if (!may_propagate_copy (op, val)) |
2052 | return; |
2053 | |
2054 | /* Do not propagate copies into BIVs. |
2055 | See PR23821 and PR62217 for how this can disturb IV and |
2056 | number of iteration analysis. */ |
2057 | if (TREE_CODE (val) != INTEGER_CST) |
2058 | { |
2059 | gimple *def = SSA_NAME_DEF_STMT (op); |
2060 | if (gimple_code (g: def) == GIMPLE_PHI |
2061 | && gimple_bb (g: def)->loop_father->header == gimple_bb (g: def)) |
2062 | return; |
2063 | } |
2064 | |
2065 | /* Dump details. */ |
2066 | if (dump_file && (dump_flags & TDF_DETAILS)) |
2067 | { |
2068 | fprintf (stream: dump_file, format: " Replaced '" ); |
2069 | print_generic_expr (dump_file, op, dump_flags); |
2070 | fprintf (stream: dump_file, format: "' with %s '" , |
2071 | (TREE_CODE (val) != SSA_NAME ? "constant" : "variable" )); |
2072 | print_generic_expr (dump_file, val, dump_flags); |
2073 | fprintf (stream: dump_file, format: "'\n" ); |
2074 | } |
2075 | |
2076 | if (TREE_CODE (val) != SSA_NAME) |
2077 | opt_stats.num_const_prop++; |
2078 | else |
2079 | opt_stats.num_copy_prop++; |
2080 | |
2081 | propagate_value (op_p, val); |
2082 | |
2083 | /* And note that we modified this statement. This is now |
2084 | safe, even if we changed virtual operands since we will |
2085 | rescan the statement and rewrite its operands again. */ |
2086 | gimple_set_modified (s: stmt, modifiedp: true); |
2087 | } |
2088 | } |
2089 | |
2090 | /* CONST_AND_COPIES is a table which maps an SSA_NAME to the current |
2091 | known value for that SSA_NAME (or NULL if no value is known). |
2092 | |
2093 | Propagate values from CONST_AND_COPIES into the uses, vuses and |
2094 | vdef_ops of STMT. */ |
2095 | |
2096 | static void |
2097 | cprop_into_stmt (gimple *stmt, range_query *query) |
2098 | { |
2099 | use_operand_p op_p; |
2100 | ssa_op_iter iter; |
2101 | tree last_copy_propagated_op = NULL; |
2102 | |
2103 | FOR_EACH_SSA_USE_OPERAND (op_p, stmt, iter, SSA_OP_USE) |
2104 | { |
2105 | tree old_op = USE_FROM_PTR (op_p); |
2106 | |
2107 | /* If we have A = B and B = A in the copy propagation tables |
2108 | (due to an equality comparison), avoid substituting B for A |
2109 | then A for B in the trivially discovered cases. This allows |
2110 | optimization of statements were A and B appear as input |
2111 | operands. */ |
2112 | if (old_op != last_copy_propagated_op) |
2113 | { |
2114 | cprop_operand (stmt, op_p, query); |
2115 | |
2116 | tree new_op = USE_FROM_PTR (op_p); |
2117 | if (new_op != old_op && TREE_CODE (new_op) == SSA_NAME) |
2118 | last_copy_propagated_op = new_op; |
2119 | } |
2120 | } |
2121 | } |
2122 | |
2123 | /* If STMT contains a relational test, try to convert it into an |
2124 | equality test if there is only a single value which can ever |
2125 | make the test true. |
2126 | |
2127 | For example, if the expression hash table contains: |
2128 | |
2129 | TRUE = (i <= 1) |
2130 | |
2131 | And we have a test within statement of i >= 1, then we can safely |
2132 | rewrite the test as i == 1 since there only a single value where |
2133 | the test is true. |
2134 | |
2135 | This is similar to code in VRP. */ |
2136 | |
2137 | void |
2138 | dom_opt_dom_walker::test_for_singularity (gimple *stmt, |
2139 | avail_exprs_stack *avail_exprs_stack) |
2140 | { |
2141 | /* We want to support gimple conditionals as well as assignments |
2142 | where the RHS contains a conditional. */ |
2143 | if (is_gimple_assign (gs: stmt) || gimple_code (g: stmt) == GIMPLE_COND) |
2144 | { |
2145 | enum tree_code code = ERROR_MARK; |
2146 | tree lhs, rhs; |
2147 | |
2148 | /* Extract the condition of interest from both forms we support. */ |
2149 | if (is_gimple_assign (gs: stmt)) |
2150 | { |
2151 | code = gimple_assign_rhs_code (gs: stmt); |
2152 | lhs = gimple_assign_rhs1 (gs: stmt); |
2153 | rhs = gimple_assign_rhs2 (gs: stmt); |
2154 | } |
2155 | else if (gimple_code (g: stmt) == GIMPLE_COND) |
2156 | { |
2157 | code = gimple_cond_code (gs: as_a <gcond *> (p: stmt)); |
2158 | lhs = gimple_cond_lhs (gs: as_a <gcond *> (p: stmt)); |
2159 | rhs = gimple_cond_rhs (gs: as_a <gcond *> (p: stmt)); |
2160 | } |
2161 | |
2162 | /* We're looking for a relational test using LE/GE. Also note we can |
2163 | canonicalize LT/GT tests against constants into LE/GT tests. */ |
2164 | if (code == LE_EXPR || code == GE_EXPR |
2165 | || ((code == LT_EXPR || code == GT_EXPR) |
2166 | && TREE_CODE (rhs) == INTEGER_CST)) |
2167 | { |
2168 | /* For LT_EXPR and GT_EXPR, canonicalize to LE_EXPR and GE_EXPR. */ |
2169 | if (code == LT_EXPR) |
2170 | rhs = fold_build2 (MINUS_EXPR, TREE_TYPE (rhs), |
2171 | rhs, build_int_cst (TREE_TYPE (rhs), 1)); |
2172 | |
2173 | if (code == GT_EXPR) |
2174 | rhs = fold_build2 (PLUS_EXPR, TREE_TYPE (rhs), |
2175 | rhs, build_int_cst (TREE_TYPE (rhs), 1)); |
2176 | |
2177 | /* Determine the code we want to check for in the hash table. */ |
2178 | enum tree_code test_code; |
2179 | if (code == GE_EXPR || code == GT_EXPR) |
2180 | test_code = LE_EXPR; |
2181 | else |
2182 | test_code = GE_EXPR; |
2183 | |
2184 | /* Update the dummy statement so we can query the hash tables. */ |
2185 | gimple_cond_set_code (gs: m_dummy_cond, code: test_code); |
2186 | gimple_cond_set_lhs (gs: m_dummy_cond, lhs); |
2187 | gimple_cond_set_rhs (gs: m_dummy_cond, rhs); |
2188 | tree cached_lhs |
2189 | = avail_exprs_stack->lookup_avail_expr (m_dummy_cond, |
2190 | false, false); |
2191 | |
2192 | /* If the lookup returned 1 (true), then the expression we |
2193 | queried was in the hash table. As a result there is only |
2194 | one value that makes the original conditional true. Update |
2195 | STMT accordingly. */ |
2196 | if (cached_lhs && integer_onep (cached_lhs)) |
2197 | { |
2198 | if (is_gimple_assign (gs: stmt)) |
2199 | { |
2200 | gimple_assign_set_rhs_code (s: stmt, code: EQ_EXPR); |
2201 | gimple_assign_set_rhs2 (gs: stmt, rhs); |
2202 | gimple_set_modified (s: stmt, modifiedp: true); |
2203 | } |
2204 | else |
2205 | { |
2206 | gimple_set_modified (s: stmt, modifiedp: true); |
2207 | gimple_cond_set_code (gs: as_a <gcond *> (p: stmt), code: EQ_EXPR); |
2208 | gimple_cond_set_rhs (gs: as_a <gcond *> (p: stmt), rhs); |
2209 | gimple_set_modified (s: stmt, modifiedp: true); |
2210 | } |
2211 | } |
2212 | } |
2213 | } |
2214 | } |
2215 | |
2216 | /* If STMT is a comparison of two uniform vectors reduce it to a comparison |
2217 | of scalar objects, otherwise leave STMT unchanged. */ |
2218 | |
2219 | static void |
2220 | reduce_vector_comparison_to_scalar_comparison (gimple *stmt) |
2221 | { |
2222 | if (gimple_code (g: stmt) == GIMPLE_COND) |
2223 | { |
2224 | tree lhs = gimple_cond_lhs (gs: stmt); |
2225 | tree rhs = gimple_cond_rhs (gs: stmt); |
2226 | |
2227 | /* We may have a vector comparison where both arms are uniform |
2228 | vectors. If so, we can simplify the vector comparison down |
2229 | to a scalar comparison. */ |
2230 | if (VECTOR_TYPE_P (TREE_TYPE (lhs)) |
2231 | && VECTOR_TYPE_P (TREE_TYPE (rhs))) |
2232 | { |
2233 | /* If either operand is an SSA_NAME, then look back to its |
2234 | defining statement to try and get at a suitable source. */ |
2235 | if (TREE_CODE (rhs) == SSA_NAME) |
2236 | { |
2237 | gimple *def_stmt = SSA_NAME_DEF_STMT (rhs); |
2238 | if (gimple_assign_single_p (gs: def_stmt)) |
2239 | rhs = gimple_assign_rhs1 (gs: def_stmt); |
2240 | } |
2241 | |
2242 | if (TREE_CODE (lhs) == SSA_NAME) |
2243 | { |
2244 | gimple *def_stmt = SSA_NAME_DEF_STMT (lhs); |
2245 | if (gimple_assign_single_p (gs: def_stmt)) |
2246 | lhs = gimple_assign_rhs1 (gs: def_stmt); |
2247 | } |
2248 | |
2249 | /* Now see if they are both uniform vectors and if so replace |
2250 | the vector comparison with a scalar comparison. */ |
2251 | tree rhs_elem = rhs ? uniform_vector_p (rhs) : NULL_TREE; |
2252 | tree lhs_elem = lhs ? uniform_vector_p (lhs) : NULL_TREE; |
2253 | if (rhs_elem && lhs_elem) |
2254 | { |
2255 | if (dump_file && dump_flags & TDF_DETAILS) |
2256 | { |
2257 | fprintf (stream: dump_file, format: "Reducing vector comparison: " ); |
2258 | print_gimple_stmt (dump_file, stmt, 0); |
2259 | } |
2260 | |
2261 | gimple_cond_set_rhs (gs: as_a <gcond *>(p: stmt), rhs: rhs_elem); |
2262 | gimple_cond_set_lhs (gs: as_a <gcond *>(p: stmt), lhs: lhs_elem); |
2263 | gimple_set_modified (s: stmt, modifiedp: true); |
2264 | |
2265 | if (dump_file && dump_flags & TDF_DETAILS) |
2266 | { |
2267 | fprintf (stream: dump_file, format: "To scalar equivalent: " ); |
2268 | print_gimple_stmt (dump_file, stmt, 0); |
2269 | fprintf (stream: dump_file, format: "\n" ); |
2270 | } |
2271 | } |
2272 | } |
2273 | } |
2274 | } |
2275 | |
2276 | /* If possible, rewrite the conditional as TRUE or FALSE, and return |
2277 | the taken edge. Otherwise, return NULL. */ |
2278 | |
2279 | edge |
2280 | dom_opt_dom_walker::fold_cond (gcond *cond) |
2281 | { |
2282 | simplify_using_ranges simplify (m_ranger); |
2283 | if (simplify.fold_cond (cond)) |
2284 | { |
2285 | basic_block bb = gimple_bb (g: cond); |
2286 | if (gimple_cond_true_p (gs: cond)) |
2287 | return find_taken_edge (bb, boolean_true_node); |
2288 | if (gimple_cond_false_p (gs: cond)) |
2289 | return find_taken_edge (bb, boolean_false_node); |
2290 | } |
2291 | return NULL; |
2292 | } |
2293 | |
2294 | /* Optimize the statement in block BB pointed to by iterator SI. |
2295 | |
2296 | We try to perform some simplistic global redundancy elimination and |
2297 | constant propagation: |
2298 | |
2299 | 1- To detect global redundancy, we keep track of expressions that have |
2300 | been computed in this block and its dominators. If we find that the |
2301 | same expression is computed more than once, we eliminate repeated |
2302 | computations by using the target of the first one. |
2303 | |
2304 | 2- Constant values and copy assignments. This is used to do very |
2305 | simplistic constant and copy propagation. When a constant or copy |
2306 | assignment is found, we map the value on the RHS of the assignment to |
2307 | the variable in the LHS in the CONST_AND_COPIES table. |
2308 | |
2309 | 3- Very simple redundant store elimination is performed. |
2310 | |
2311 | 4- We can simplify a condition to a constant or from a relational |
2312 | condition to an equality condition. */ |
2313 | |
2314 | edge |
2315 | dom_opt_dom_walker::optimize_stmt (basic_block bb, gimple_stmt_iterator *si, |
2316 | bool *removed_p) |
2317 | { |
2318 | gimple *stmt, *old_stmt; |
2319 | bool may_optimize_p; |
2320 | bool modified_p = false; |
2321 | bool was_noreturn; |
2322 | edge retval = NULL; |
2323 | |
2324 | old_stmt = stmt = gsi_stmt (i: *si); |
2325 | was_noreturn = is_gimple_call (gs: stmt) && gimple_call_noreturn_p (s: stmt); |
2326 | |
2327 | if (dump_file && (dump_flags & TDF_DETAILS)) |
2328 | { |
2329 | fprintf (stream: dump_file, format: "Optimizing statement " ); |
2330 | print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM); |
2331 | } |
2332 | |
2333 | /* STMT may be a comparison of uniform vectors that we can simplify |
2334 | down to a comparison of scalars. Do that transformation first |
2335 | so that all the scalar optimizations from here onward apply. */ |
2336 | reduce_vector_comparison_to_scalar_comparison (stmt); |
2337 | |
2338 | update_stmt_if_modified (s: stmt); |
2339 | opt_stats.num_stmts++; |
2340 | |
2341 | /* Const/copy propagate into USES, VUSES and the RHS of VDEFs. */ |
2342 | cprop_into_stmt (stmt, query: m_ranger); |
2343 | |
2344 | /* If the statement has been modified with constant replacements, |
2345 | fold its RHS before checking for redundant computations. */ |
2346 | if (gimple_modified_p (g: stmt)) |
2347 | { |
2348 | tree rhs = NULL; |
2349 | |
2350 | /* Try to fold the statement making sure that STMT is kept |
2351 | up to date. */ |
2352 | if (fold_stmt (si)) |
2353 | { |
2354 | stmt = gsi_stmt (i: *si); |
2355 | gimple_set_modified (s: stmt, modifiedp: true); |
2356 | |
2357 | if (dump_file && (dump_flags & TDF_DETAILS)) |
2358 | { |
2359 | fprintf (stream: dump_file, format: " Folded to: " ); |
2360 | print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM); |
2361 | } |
2362 | } |
2363 | |
2364 | /* We only need to consider cases that can yield a gimple operand. */ |
2365 | if (gimple_assign_single_p (gs: stmt)) |
2366 | rhs = gimple_assign_rhs1 (gs: stmt); |
2367 | else if (gimple_code (g: stmt) == GIMPLE_GOTO) |
2368 | rhs = gimple_goto_dest (gs: stmt); |
2369 | else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (p: stmt)) |
2370 | /* This should never be an ADDR_EXPR. */ |
2371 | rhs = gimple_switch_index (gs: swtch_stmt); |
2372 | |
2373 | if (rhs && TREE_CODE (rhs) == ADDR_EXPR) |
2374 | recompute_tree_invariant_for_addr_expr (rhs); |
2375 | |
2376 | /* Indicate that maybe_clean_or_replace_eh_stmt needs to be called, |
2377 | even if fold_stmt updated the stmt already and thus cleared |
2378 | gimple_modified_p flag on it. */ |
2379 | modified_p = true; |
2380 | } |
2381 | |
2382 | /* Check for redundant computations. Do this optimization only |
2383 | for assignments that have no volatile ops and conditionals. */ |
2384 | may_optimize_p = (!gimple_has_side_effects (stmt) |
2385 | && (is_gimple_assign (gs: stmt) |
2386 | || (is_gimple_call (gs: stmt) |
2387 | && gimple_call_lhs (gs: stmt) != NULL_TREE) |
2388 | || gimple_code (g: stmt) == GIMPLE_COND |
2389 | || gimple_code (g: stmt) == GIMPLE_SWITCH)); |
2390 | |
2391 | if (may_optimize_p) |
2392 | { |
2393 | if (gimple_code (g: stmt) == GIMPLE_CALL) |
2394 | { |
2395 | /* Resolve __builtin_constant_p. If it hasn't been |
2396 | folded to integer_one_node by now, it's fairly |
2397 | certain that the value simply isn't constant. */ |
2398 | tree callee = gimple_call_fndecl (gs: stmt); |
2399 | if (callee |
2400 | && fndecl_built_in_p (node: callee, name1: BUILT_IN_CONSTANT_P)) |
2401 | { |
2402 | propagate_tree_value_into_stmt (si, integer_zero_node); |
2403 | stmt = gsi_stmt (i: *si); |
2404 | } |
2405 | } |
2406 | |
2407 | if (gimple_code (g: stmt) == GIMPLE_COND) |
2408 | { |
2409 | tree lhs = gimple_cond_lhs (gs: stmt); |
2410 | tree rhs = gimple_cond_rhs (gs: stmt); |
2411 | |
2412 | /* If the LHS has a range [0..1] and the RHS has a range ~[0..1], |
2413 | then this conditional is computable at compile time. We can just |
2414 | shove either 0 or 1 into the LHS, mark the statement as modified |
2415 | and all the right things will just happen below. |
2416 | |
2417 | Note this would apply to any case where LHS has a range |
2418 | narrower than its type implies and RHS is outside that |
2419 | narrower range. Future work. */ |
2420 | if (TREE_CODE (lhs) == SSA_NAME |
2421 | && ssa_name_has_boolean_range (lhs) |
2422 | && TREE_CODE (rhs) == INTEGER_CST |
2423 | && ! (integer_zerop (rhs) || integer_onep (rhs))) |
2424 | { |
2425 | gimple_cond_set_lhs (gs: as_a <gcond *> (p: stmt), |
2426 | fold_convert (TREE_TYPE (lhs), |
2427 | integer_zero_node)); |
2428 | gimple_set_modified (s: stmt, modifiedp: true); |
2429 | } |
2430 | else if (TREE_CODE (lhs) == SSA_NAME) |
2431 | { |
2432 | /* Exploiting EVRP data is not yet fully integrated into DOM |
2433 | but we need to do something for this case to avoid regressing |
2434 | udr4.f90 and new1.C which have unexecutable blocks with |
2435 | undefined behavior that get diagnosed if they're left in the |
2436 | IL because we've attached range information to new |
2437 | SSA_NAMES. */ |
2438 | update_stmt_if_modified (s: stmt); |
2439 | edge taken_edge = fold_cond (cond: as_a <gcond *> (p: stmt)); |
2440 | if (taken_edge) |
2441 | { |
2442 | gimple_set_modified (s: stmt, modifiedp: true); |
2443 | update_stmt (s: stmt); |
2444 | cfg_altered = true; |
2445 | return taken_edge; |
2446 | } |
2447 | } |
2448 | } |
2449 | |
2450 | update_stmt_if_modified (s: stmt); |
2451 | eliminate_redundant_computations (gsi: si, const_and_copies: m_const_and_copies, |
2452 | avail_exprs_stack: m_avail_exprs_stack); |
2453 | stmt = gsi_stmt (i: *si); |
2454 | |
2455 | /* Perform simple redundant store elimination. */ |
2456 | if (gimple_assign_single_p (gs: stmt) |
2457 | && TREE_CODE (gimple_assign_lhs (stmt)) != SSA_NAME) |
2458 | { |
2459 | tree lhs = gimple_assign_lhs (gs: stmt); |
2460 | tree rhs = gimple_assign_rhs1 (gs: stmt); |
2461 | tree cached_lhs; |
2462 | gassign *new_stmt; |
2463 | rhs = dom_valueize (t: rhs); |
2464 | /* Build a new statement with the RHS and LHS exchanged. */ |
2465 | if (TREE_CODE (rhs) == SSA_NAME) |
2466 | { |
2467 | gimple *defstmt = SSA_NAME_DEF_STMT (rhs); |
2468 | new_stmt = gimple_build_assign (rhs, lhs); |
2469 | SSA_NAME_DEF_STMT (rhs) = defstmt; |
2470 | } |
2471 | else |
2472 | new_stmt = gimple_build_assign (rhs, lhs); |
2473 | gimple_set_vuse (g: new_stmt, vuse: gimple_vuse (g: stmt)); |
2474 | expr_hash_elt *elt = NULL; |
2475 | cached_lhs = m_avail_exprs_stack->lookup_avail_expr (new_stmt, false, |
2476 | false, &elt); |
2477 | if (cached_lhs |
2478 | && operand_equal_p (rhs, cached_lhs, flags: 0) |
2479 | && refs_same_for_tbaa_p (elt->expr ()->kind == EXPR_SINGLE |
2480 | ? elt->expr ()->ops.single.rhs |
2481 | : NULL_TREE, lhs)) |
2482 | { |
2483 | basic_block bb = gimple_bb (g: stmt); |
2484 | unlink_stmt_vdef (stmt); |
2485 | if (gsi_remove (si, true)) |
2486 | { |
2487 | bitmap_set_bit (need_eh_cleanup, bb->index); |
2488 | if (dump_file && (dump_flags & TDF_DETAILS)) |
2489 | fprintf (stream: dump_file, format: " Flagged to clear EH edges.\n" ); |
2490 | } |
2491 | release_defs (stmt); |
2492 | *removed_p = true; |
2493 | return retval; |
2494 | } |
2495 | } |
2496 | |
2497 | /* If this statement was not redundant, we may still be able to simplify |
2498 | it, which may in turn allow other part of DOM or other passes to do |
2499 | a better job. */ |
2500 | test_for_singularity (stmt, avail_exprs_stack: m_avail_exprs_stack); |
2501 | } |
2502 | |
2503 | /* Record any additional equivalences created by this statement. */ |
2504 | if (is_gimple_assign (gs: stmt)) |
2505 | record_equivalences_from_stmt (stmt, may_optimize_p, avail_exprs_stack: m_avail_exprs_stack); |
2506 | |
2507 | /* If STMT is a COND_EXPR or SWITCH_EXPR and it was modified, then we may |
2508 | know where it goes. */ |
2509 | if (gimple_modified_p (g: stmt) || modified_p) |
2510 | { |
2511 | tree val = NULL; |
2512 | |
2513 | if (gimple_code (g: stmt) == GIMPLE_COND) |
2514 | val = fold_binary_loc (gimple_location (g: stmt), |
2515 | gimple_cond_code (gs: stmt), boolean_type_node, |
2516 | gimple_cond_lhs (gs: stmt), |
2517 | gimple_cond_rhs (gs: stmt)); |
2518 | else if (gswitch *swtch_stmt = dyn_cast <gswitch *> (p: stmt)) |
2519 | val = gimple_switch_index (gs: swtch_stmt); |
2520 | |
2521 | if (val && TREE_CODE (val) == INTEGER_CST) |
2522 | { |
2523 | retval = find_taken_edge (bb, val); |
2524 | if (retval) |
2525 | { |
2526 | /* Fix the condition to be either true or false. */ |
2527 | if (gimple_code (g: stmt) == GIMPLE_COND) |
2528 | { |
2529 | if (integer_zerop (val)) |
2530 | gimple_cond_make_false (gs: as_a <gcond *> (p: stmt)); |
2531 | else if (integer_onep (val)) |
2532 | gimple_cond_make_true (gs: as_a <gcond *> (p: stmt)); |
2533 | else |
2534 | gcc_unreachable (); |
2535 | |
2536 | gimple_set_modified (s: stmt, modifiedp: true); |
2537 | } |
2538 | |
2539 | /* Further simplifications may be possible. */ |
2540 | cfg_altered = true; |
2541 | } |
2542 | } |
2543 | |
2544 | update_stmt_if_modified (s: stmt); |
2545 | |
2546 | /* If we simplified a statement in such a way as to be shown that it |
2547 | cannot trap, update the eh information and the cfg to match. */ |
2548 | if (maybe_clean_or_replace_eh_stmt (old_stmt, stmt)) |
2549 | { |
2550 | bitmap_set_bit (need_eh_cleanup, bb->index); |
2551 | if (dump_file && (dump_flags & TDF_DETAILS)) |
2552 | fprintf (stream: dump_file, format: " Flagged to clear EH edges.\n" ); |
2553 | } |
2554 | |
2555 | if (!was_noreturn |
2556 | && is_gimple_call (gs: stmt) && gimple_call_noreturn_p (s: stmt)) |
2557 | need_noreturn_fixup.safe_push (obj: stmt); |
2558 | } |
2559 | return retval; |
2560 | } |
2561 | |