1 | /* Support routines for Value Range Propagation (VRP). |
2 | Copyright (C) 2005-2023 Free Software Foundation, Inc. |
3 | Contributed by Diego Novillo <dnovillo@redhat.com>. |
4 | |
5 | This file is part of GCC. |
6 | |
7 | GCC is free software; you can redistribute it and/or modify |
8 | it under the terms of the GNU General Public License as published by |
9 | the Free Software Foundation; either version 3, or (at your option) |
10 | any later version. |
11 | |
12 | GCC is distributed in the hope that it will be useful, |
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
15 | GNU General Public License for more details. |
16 | |
17 | You should have received a copy of the GNU General Public License |
18 | along with GCC; see the file COPYING3. If not see |
19 | <http://www.gnu.org/licenses/>. */ |
20 | |
21 | #include "config.h" |
22 | #include "system.h" |
23 | #include "coretypes.h" |
24 | #include "basic-block.h" |
25 | #include "bitmap.h" |
26 | #include "sbitmap.h" |
27 | #include "options.h" |
28 | #include "dominance.h" |
29 | #include "function.h" |
30 | #include "cfg.h" |
31 | #include "tree.h" |
32 | #include "gimple.h" |
33 | #include "tree-pass.h" |
34 | #include "ssa.h" |
35 | #include "gimple-pretty-print.h" |
36 | #include "fold-const.h" |
37 | #include "cfganal.h" |
38 | #include "gimple-iterator.h" |
39 | #include "tree-cfg.h" |
40 | #include "tree-ssa-loop-manip.h" |
41 | #include "tree-ssa-loop-niter.h" |
42 | #include "tree-into-ssa.h" |
43 | #include "cfgloop.h" |
44 | #include "tree-scalar-evolution.h" |
45 | #include "tree-ssa-propagate.h" |
46 | #include "domwalk.h" |
47 | #include "vr-values.h" |
48 | #include "gimple-array-bounds.h" |
49 | #include "gimple-range.h" |
50 | #include "gimple-range-path.h" |
51 | #include "value-pointer-equiv.h" |
52 | #include "gimple-fold.h" |
53 | #include "tree-dfa.h" |
54 | #include "tree-ssa-dce.h" |
55 | |
56 | // This class is utilized by VRP and ranger to remove __builtin_unreachable |
57 | // calls, and reflect any resulting global ranges. |
58 | // |
59 | // maybe_register() is called on condition statements , and if that |
60 | // matches the pattern of one branch being a builtin_unreachable, either check |
61 | // for early removal or register the resulting executable edge in a list. |
62 | // |
63 | // During early/non-final processing, we check to see if ALL exports from the |
64 | // block can be safely updated with a new global value. If they can, then |
65 | // we rewrite the condition and update those values immediately. Otherwise |
66 | // the unreachable condition is left in the IL until the final pass. |
67 | // |
68 | // During final processing, after all blocks have been registered, |
69 | // remove_and_update_globals() will |
70 | // - check all exports from registered blocks |
71 | // - ensure the cache entry of each export is set with the appropriate range |
72 | // - rewrite the conditions to take the executable edge |
73 | // - perform DCE on any feeding instructions to those rewritten conditions |
74 | // |
75 | // Then each of the immediate use chain of each export is walked, and a new |
76 | // global range created by unioning the ranges at all remaining use locations. |
77 | |
78 | class remove_unreachable { |
79 | public: |
80 | remove_unreachable (gimple_ranger &r, bool all) : m_ranger (r), final_p (all) |
81 | { m_list.create (nelems: 30); } |
82 | ~remove_unreachable () { m_list.release (); } |
83 | void handle_early (gimple *s, edge e); |
84 | void maybe_register (gimple *s); |
85 | bool remove_and_update_globals (); |
86 | vec<std::pair<int, int> > m_list; |
87 | gimple_ranger &m_ranger; |
88 | bool final_p; |
89 | }; |
90 | |
91 | // Check if block BB has a __builtin_unreachable () call on one arm, and |
92 | // register the executable edge if so. |
93 | |
94 | void |
95 | remove_unreachable::maybe_register (gimple *s) |
96 | { |
97 | gcc_checking_assert (gimple_code (s) == GIMPLE_COND); |
98 | basic_block bb = gimple_bb (g: s); |
99 | |
100 | edge e0 = EDGE_SUCC (bb, 0); |
101 | basic_block bb0 = e0->dest; |
102 | bool un0 = EDGE_COUNT (bb0->succs) == 0 |
103 | && gimple_seq_unreachable_p (bb_seq (bb: bb0)); |
104 | edge e1 = EDGE_SUCC (bb, 1); |
105 | basic_block bb1 = e1->dest; |
106 | bool un1 = EDGE_COUNT (bb1->succs) == 0 |
107 | && gimple_seq_unreachable_p (bb_seq (bb: bb1)); |
108 | |
109 | // If the 2 blocks are not different, ignore. |
110 | if (un0 == un1) |
111 | return; |
112 | |
113 | // Constant expressions are ignored. |
114 | if (TREE_CODE (gimple_cond_lhs (s)) != SSA_NAME |
115 | && TREE_CODE (gimple_cond_rhs (s)) != SSA_NAME) |
116 | return; |
117 | |
118 | edge e = un0 ? e1 : e0; |
119 | |
120 | if (!final_p) |
121 | handle_early (s, e); |
122 | else |
123 | m_list.safe_push (obj: std::make_pair (x&: e->src->index, y&: e->dest->index)); |
124 | } |
125 | |
126 | // Return true if all uses of NAME are dominated by by block BB. 1 use |
127 | // is allowed in block BB, This is one we hope to remove. |
128 | // ie |
129 | // _2 = _1 & 7; |
130 | // if (_2 != 0) |
131 | // goto <bb 3>; [0.00%] |
132 | // Any additional use of _1 or _2 in this block invalidates early replacement. |
133 | |
134 | static bool |
135 | fully_replaceable (tree name, basic_block bb) |
136 | { |
137 | use_operand_p use_p; |
138 | imm_use_iterator iter; |
139 | bool saw_in_bb = false; |
140 | |
141 | // If a name loads from memory, we may lose information used in |
142 | // commoning opportunities later. See tree-ssa/ssa-pre-34.c. |
143 | gimple *def_stmt = SSA_NAME_DEF_STMT (name); |
144 | if (gimple_vuse (g: def_stmt)) |
145 | return false; |
146 | |
147 | FOR_EACH_IMM_USE_FAST (use_p, iter, name) |
148 | { |
149 | gimple *use_stmt = USE_STMT (use_p); |
150 | // Ignore debug stmts and the branch. |
151 | if (is_gimple_debug (gs: use_stmt)) |
152 | continue; |
153 | basic_block use_bb = gimple_bb (g: use_stmt); |
154 | // Only one use in the block allowed to avoid complicated cases. |
155 | if (use_bb == bb) |
156 | { |
157 | if (saw_in_bb) |
158 | return false; |
159 | else |
160 | saw_in_bb = true; |
161 | } |
162 | else if (!dominated_by_p (CDI_DOMINATORS, use_bb, bb)) |
163 | return false; |
164 | } |
165 | return true; |
166 | } |
167 | |
168 | // This routine is called to check builtin_unreachable calls during any |
169 | // time before final removal. The only way we can be sure it does not |
170 | // provide any additional information is to expect that we can update the |
171 | // global values of all exports from a block. This means the branch |
172 | // to the unreachable call must dominate all uses of those ssa-names, with |
173 | // the exception that there can be a single use in the block containing |
174 | // the branch. IF the name used in the branch is defined in the block, it may |
175 | // contain the name of something else that will be an export. And likewise |
176 | // that may also use another name that is an export etc. |
177 | // |
178 | // As long as there is only a single use, we can be sure that there are no other |
179 | // side effects (like being passed to a call, or stored to a global, etc. |
180 | // This means we will miss cases where there are 2 or more uses that have |
181 | // no interveneing statements that may had side effects, but it catches most |
182 | // of the caes we care about, and prevents expensive in depth analysis. |
183 | // |
184 | // Ranger will still reflect the proper ranges at other places in these missed |
185 | // cases, we simply will not remove/set globals early. |
186 | |
187 | void |
188 | remove_unreachable::handle_early (gimple *s, edge e) |
189 | { |
190 | bool lhs_p = TREE_CODE (gimple_cond_lhs (s)) == SSA_NAME; |
191 | bool rhs_p = TREE_CODE (gimple_cond_rhs (s)) == SSA_NAME; |
192 | // Do not remove __builtin_unreachable if it confers a relation, or |
193 | // that relation may be lost in subsequent passes. |
194 | if (lhs_p && rhs_p) |
195 | return; |
196 | // Do not remove addresses early. ie if (x == &y) |
197 | if (lhs_p && TREE_CODE (gimple_cond_rhs (s)) == ADDR_EXPR) |
198 | return; |
199 | |
200 | gcc_checking_assert (gimple_outgoing_range_stmt_p (e->src) == s); |
201 | gcc_checking_assert (!final_p); |
202 | |
203 | // Check if every export use is dominated by this branch. |
204 | tree name; |
205 | FOR_EACH_GORI_EXPORT_NAME (m_ranger.gori (), e->src, name) |
206 | { |
207 | if (!fully_replaceable (name, bb: e->src)) |
208 | return; |
209 | } |
210 | |
211 | // Set the global value for each. |
212 | FOR_EACH_GORI_EXPORT_NAME (m_ranger.gori (), e->src, name) |
213 | { |
214 | Value_Range r (TREE_TYPE (name)); |
215 | m_ranger.range_on_entry (r, bb: e->dest, name); |
216 | // Nothing at this late stage we can do if the write fails. |
217 | if (!set_range_info (name, r)) |
218 | continue; |
219 | if (dump_file) |
220 | { |
221 | fprintf (stream: dump_file, format: "Global Exported (via early unreachable): " ); |
222 | print_generic_expr (dump_file, name, TDF_SLIM); |
223 | fprintf (stream: dump_file, format: " = " ); |
224 | gimple_range_global (v&: r, name); |
225 | r.dump (dump_file); |
226 | fputc (c: '\n', stream: dump_file); |
227 | } |
228 | } |
229 | |
230 | tree ssa = lhs_p ? gimple_cond_lhs (gs: s) : gimple_cond_rhs (gs: s); |
231 | |
232 | // Rewrite the condition. |
233 | if (e->flags & EDGE_TRUE_VALUE) |
234 | gimple_cond_make_true (gs: as_a<gcond *> (p: s)); |
235 | else |
236 | gimple_cond_make_false (gs: as_a<gcond *> (p: s)); |
237 | update_stmt (s); |
238 | |
239 | // If the name on S is defined in this block, see if there is DCE work to do. |
240 | if (gimple_bb (SSA_NAME_DEF_STMT (ssa)) == e->src) |
241 | { |
242 | auto_bitmap dce; |
243 | bitmap_set_bit (dce, SSA_NAME_VERSION (ssa)); |
244 | simple_dce_from_worklist (dce); |
245 | } |
246 | } |
247 | |
248 | |
249 | // Process the edges in the list, change the conditions and removing any |
250 | // dead code feeding those conditions. Calculate the range of any |
251 | // names that may have been exported from those blocks, and determine if |
252 | // there is any updates to their global ranges.. |
253 | // Return true if any builtin_unreachables/globals eliminated/updated. |
254 | |
255 | bool |
256 | remove_unreachable::remove_and_update_globals () |
257 | { |
258 | if (m_list.length () == 0) |
259 | return false; |
260 | |
261 | // Ensure the cache in SCEV has been cleared before processing |
262 | // globals to be removed. |
263 | scev_reset (); |
264 | |
265 | bool change = false; |
266 | tree name; |
267 | unsigned i; |
268 | bitmap_iterator bi; |
269 | auto_bitmap all_exports; |
270 | for (i = 0; i < m_list.length (); i++) |
271 | { |
272 | auto eb = m_list[i]; |
273 | basic_block src = BASIC_BLOCK_FOR_FN (cfun, eb.first); |
274 | basic_block dest = BASIC_BLOCK_FOR_FN (cfun, eb.second); |
275 | if (!src || !dest) |
276 | continue; |
277 | edge e = find_edge (src, dest); |
278 | gimple *s = gimple_outgoing_range_stmt_p (bb: e->src); |
279 | gcc_checking_assert (gimple_code (s) == GIMPLE_COND); |
280 | |
281 | bool dominate_exit_p = true; |
282 | FOR_EACH_GORI_EXPORT_NAME (m_ranger.gori (), e->src, name) |
283 | { |
284 | // Ensure the cache is set for NAME in the succ block. |
285 | Value_Range r(TREE_TYPE (name)); |
286 | Value_Range ex(TREE_TYPE (name)); |
287 | m_ranger.range_on_entry (r, bb: e->dest, name); |
288 | m_ranger.range_on_entry (r&: ex, EXIT_BLOCK_PTR_FOR_FN (cfun), name); |
289 | // If the range produced by this __builtin_unreachacble expression |
290 | // is not fully reflected in the range at exit, then it does not |
291 | // dominate the exit of the function. |
292 | if (ex.intersect (r)) |
293 | dominate_exit_p = false; |
294 | } |
295 | |
296 | // If the exit is dominated, add to the export list. Otherwise if this |
297 | // isn't the final VRP pass, leave the call in the IL. |
298 | if (dominate_exit_p) |
299 | bitmap_ior_into (all_exports, m_ranger.gori ().exports (bb: e->src)); |
300 | else if (!final_p) |
301 | continue; |
302 | |
303 | change = true; |
304 | // Rewrite the condition. |
305 | if (e->flags & EDGE_TRUE_VALUE) |
306 | gimple_cond_make_true (gs: as_a<gcond *> (p: s)); |
307 | else |
308 | gimple_cond_make_false (gs: as_a<gcond *> (p: s)); |
309 | update_stmt (s); |
310 | } |
311 | |
312 | if (bitmap_empty_p (map: all_exports)) |
313 | return false; |
314 | // Invoke DCE on all exported names to eliminate dead feeding defs. |
315 | auto_bitmap dce; |
316 | bitmap_copy (dce, all_exports); |
317 | // Don't attempt to DCE parameters. |
318 | EXECUTE_IF_SET_IN_BITMAP (all_exports, 0, i, bi) |
319 | if (!ssa_name (i) || SSA_NAME_IS_DEFAULT_DEF (ssa_name (i))) |
320 | bitmap_clear_bit (dce, i); |
321 | simple_dce_from_worklist (dce); |
322 | |
323 | // Loop over all uses of each name and find maximal range. This is the |
324 | // new global range. |
325 | use_operand_p use_p; |
326 | imm_use_iterator iter; |
327 | EXECUTE_IF_SET_IN_BITMAP (all_exports, 0, i, bi) |
328 | { |
329 | name = ssa_name (i); |
330 | if (!name || SSA_NAME_IN_FREE_LIST (name)) |
331 | continue; |
332 | Value_Range r (TREE_TYPE (name)); |
333 | Value_Range exp_range (TREE_TYPE (name)); |
334 | r.set_undefined (); |
335 | FOR_EACH_IMM_USE_FAST (use_p, iter, name) |
336 | { |
337 | gimple *use_stmt = USE_STMT (use_p); |
338 | if (is_gimple_debug (gs: use_stmt)) |
339 | continue; |
340 | if (!m_ranger.range_of_expr (r&: exp_range, name, use_stmt)) |
341 | exp_range.set_varying (TREE_TYPE (name)); |
342 | r.union_ (r: exp_range); |
343 | if (r.varying_p ()) |
344 | break; |
345 | } |
346 | // Include the on-exit range to ensure non-dominated unreachables |
347 | // don't incorrectly impact the global range. |
348 | m_ranger.range_on_entry (r&: exp_range, EXIT_BLOCK_PTR_FOR_FN (cfun), name); |
349 | r.union_ (r: exp_range); |
350 | if (r.varying_p () || r.undefined_p ()) |
351 | continue; |
352 | if (!set_range_info (name, r)) |
353 | continue; |
354 | change = true; |
355 | if (dump_file) |
356 | { |
357 | fprintf (stream: dump_file, format: "Global Exported (via unreachable): " ); |
358 | print_generic_expr (dump_file, name, TDF_SLIM); |
359 | fprintf (stream: dump_file, format: " = " ); |
360 | gimple_range_global (v&: r, name); |
361 | r.dump (dump_file); |
362 | fputc (c: '\n', stream: dump_file); |
363 | } |
364 | } |
365 | return change; |
366 | } |
367 | |
368 | /* VR_TYPE describes a range with minimum value *MIN and maximum |
369 | value *MAX. Restrict the range to the set of values that have |
370 | no bits set outside NONZERO_BITS. Update *MIN and *MAX and |
371 | return the new range type. |
372 | |
373 | SGN gives the sign of the values described by the range. */ |
374 | |
375 | enum value_range_kind |
376 | intersect_range_with_nonzero_bits (enum value_range_kind vr_type, |
377 | wide_int *min, wide_int *max, |
378 | const wide_int &nonzero_bits, |
379 | signop sgn) |
380 | { |
381 | if (vr_type == VR_ANTI_RANGE) |
382 | { |
383 | /* The VR_ANTI_RANGE is equivalent to the union of the ranges |
384 | A: [-INF, *MIN) and B: (*MAX, +INF]. First use NONZERO_BITS |
385 | to create an inclusive upper bound for A and an inclusive lower |
386 | bound for B. */ |
387 | wide_int a_max = wi::round_down_for_mask (*min - 1, nonzero_bits); |
388 | wide_int b_min = wi::round_up_for_mask (*max + 1, nonzero_bits); |
389 | |
390 | /* If the calculation of A_MAX wrapped, A is effectively empty |
391 | and A_MAX is the highest value that satisfies NONZERO_BITS. |
392 | Likewise if the calculation of B_MIN wrapped, B is effectively |
393 | empty and B_MIN is the lowest value that satisfies NONZERO_BITS. */ |
394 | bool a_empty = wi::ge_p (x: a_max, y: *min, sgn); |
395 | bool b_empty = wi::le_p (x: b_min, y: *max, sgn); |
396 | |
397 | /* If both A and B are empty, there are no valid values. */ |
398 | if (a_empty && b_empty) |
399 | return VR_UNDEFINED; |
400 | |
401 | /* If exactly one of A or B is empty, return a VR_RANGE for the |
402 | other one. */ |
403 | if (a_empty || b_empty) |
404 | { |
405 | *min = b_min; |
406 | *max = a_max; |
407 | gcc_checking_assert (wi::le_p (*min, *max, sgn)); |
408 | return VR_RANGE; |
409 | } |
410 | |
411 | /* Update the VR_ANTI_RANGE bounds. */ |
412 | *min = a_max + 1; |
413 | *max = b_min - 1; |
414 | gcc_checking_assert (wi::le_p (*min, *max, sgn)); |
415 | |
416 | /* Now check whether the excluded range includes any values that |
417 | satisfy NONZERO_BITS. If not, switch to a full VR_RANGE. */ |
418 | if (wi::round_up_for_mask (*min, nonzero_bits) == b_min) |
419 | { |
420 | unsigned int precision = min->get_precision (); |
421 | *min = wi::min_value (precision, sgn); |
422 | *max = wi::max_value (precision, sgn); |
423 | vr_type = VR_RANGE; |
424 | } |
425 | } |
426 | if (vr_type == VR_RANGE || vr_type == VR_VARYING) |
427 | { |
428 | *max = wi::round_down_for_mask (*max, nonzero_bits); |
429 | |
430 | /* Check that the range contains at least one valid value. */ |
431 | if (wi::gt_p (x: *min, y: *max, sgn)) |
432 | return VR_UNDEFINED; |
433 | |
434 | *min = wi::round_up_for_mask (*min, nonzero_bits); |
435 | gcc_checking_assert (wi::le_p (*min, *max, sgn)); |
436 | } |
437 | return vr_type; |
438 | } |
439 | |
440 | /* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE |
441 | otherwise. We only handle additive operations and set NEG to true if the |
442 | symbol is negated and INV to the invariant part, if any. */ |
443 | |
444 | static tree |
445 | get_single_symbol (tree t, bool *neg, tree *inv) |
446 | { |
447 | bool neg_; |
448 | tree inv_; |
449 | |
450 | *inv = NULL_TREE; |
451 | *neg = false; |
452 | |
453 | if (TREE_CODE (t) == PLUS_EXPR |
454 | || TREE_CODE (t) == POINTER_PLUS_EXPR |
455 | || TREE_CODE (t) == MINUS_EXPR) |
456 | { |
457 | if (is_gimple_min_invariant (TREE_OPERAND (t, 0))) |
458 | { |
459 | neg_ = (TREE_CODE (t) == MINUS_EXPR); |
460 | inv_ = TREE_OPERAND (t, 0); |
461 | t = TREE_OPERAND (t, 1); |
462 | } |
463 | else if (is_gimple_min_invariant (TREE_OPERAND (t, 1))) |
464 | { |
465 | neg_ = false; |
466 | inv_ = TREE_OPERAND (t, 1); |
467 | t = TREE_OPERAND (t, 0); |
468 | } |
469 | else |
470 | return NULL_TREE; |
471 | } |
472 | else |
473 | { |
474 | neg_ = false; |
475 | inv_ = NULL_TREE; |
476 | } |
477 | |
478 | if (TREE_CODE (t) == NEGATE_EXPR) |
479 | { |
480 | t = TREE_OPERAND (t, 0); |
481 | neg_ = !neg_; |
482 | } |
483 | |
484 | if (TREE_CODE (t) != SSA_NAME) |
485 | return NULL_TREE; |
486 | |
487 | if (inv_ && TREE_OVERFLOW_P (inv_)) |
488 | inv_ = drop_tree_overflow (inv_); |
489 | |
490 | *neg = neg_; |
491 | *inv = inv_; |
492 | return t; |
493 | } |
494 | |
495 | /* Compare two values VAL1 and VAL2. Return |
496 | |
497 | -2 if VAL1 and VAL2 cannot be compared at compile-time, |
498 | -1 if VAL1 < VAL2, |
499 | 0 if VAL1 == VAL2, |
500 | +1 if VAL1 > VAL2, and |
501 | +2 if VAL1 != VAL2 |
502 | |
503 | This is similar to tree_int_cst_compare but supports pointer values |
504 | and values that cannot be compared at compile time. |
505 | |
506 | If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to |
507 | true if the return value is only valid if we assume that signed |
508 | overflow is undefined. */ |
509 | |
510 | int |
511 | compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p) |
512 | { |
513 | if (val1 == val2) |
514 | return 0; |
515 | |
516 | /* Below we rely on the fact that VAL1 and VAL2 are both pointers or |
517 | both integers. */ |
518 | gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1)) |
519 | == POINTER_TYPE_P (TREE_TYPE (val2))); |
520 | |
521 | /* Convert the two values into the same type. This is needed because |
522 | sizetype causes sign extension even for unsigned types. */ |
523 | if (!useless_type_conversion_p (TREE_TYPE (val1), TREE_TYPE (val2))) |
524 | val2 = fold_convert (TREE_TYPE (val1), val2); |
525 | |
526 | const bool overflow_undefined |
527 | = INTEGRAL_TYPE_P (TREE_TYPE (val1)) |
528 | && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1)); |
529 | tree inv1, inv2; |
530 | bool neg1, neg2; |
531 | tree sym1 = get_single_symbol (t: val1, neg: &neg1, inv: &inv1); |
532 | tree sym2 = get_single_symbol (t: val2, neg: &neg2, inv: &inv2); |
533 | |
534 | /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1 |
535 | accordingly. If VAL1 and VAL2 don't use the same name, return -2. */ |
536 | if (sym1 && sym2) |
537 | { |
538 | /* Both values must use the same name with the same sign. */ |
539 | if (sym1 != sym2 || neg1 != neg2) |
540 | return -2; |
541 | |
542 | /* [-]NAME + CST == [-]NAME + CST. */ |
543 | if (inv1 == inv2) |
544 | return 0; |
545 | |
546 | /* If overflow is defined we cannot simplify more. */ |
547 | if (!overflow_undefined) |
548 | return -2; |
549 | |
550 | if (strict_overflow_p != NULL |
551 | /* Symbolic range building sets the no-warning bit to declare |
552 | that overflow doesn't happen. */ |
553 | && (!inv1 || !warning_suppressed_p (val1, OPT_Woverflow)) |
554 | && (!inv2 || !warning_suppressed_p (val2, OPT_Woverflow))) |
555 | *strict_overflow_p = true; |
556 | |
557 | if (!inv1) |
558 | inv1 = build_int_cst (TREE_TYPE (val1), 0); |
559 | if (!inv2) |
560 | inv2 = build_int_cst (TREE_TYPE (val2), 0); |
561 | |
562 | return wi::cmp (x: wi::to_wide (t: inv1), y: wi::to_wide (t: inv2), |
563 | TYPE_SIGN (TREE_TYPE (val1))); |
564 | } |
565 | |
566 | const bool cst1 = is_gimple_min_invariant (val1); |
567 | const bool cst2 = is_gimple_min_invariant (val2); |
568 | |
569 | /* If one is of the form '[-]NAME + CST' and the other is constant, then |
570 | it might be possible to say something depending on the constants. */ |
571 | if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1)) |
572 | { |
573 | if (!overflow_undefined) |
574 | return -2; |
575 | |
576 | if (strict_overflow_p != NULL |
577 | /* Symbolic range building sets the no-warning bit to declare |
578 | that overflow doesn't happen. */ |
579 | && (!sym1 || !warning_suppressed_p (val1, OPT_Woverflow)) |
580 | && (!sym2 || !warning_suppressed_p (val2, OPT_Woverflow))) |
581 | *strict_overflow_p = true; |
582 | |
583 | const signop sgn = TYPE_SIGN (TREE_TYPE (val1)); |
584 | tree cst = cst1 ? val1 : val2; |
585 | tree inv = cst1 ? inv2 : inv1; |
586 | |
587 | /* Compute the difference between the constants. If it overflows or |
588 | underflows, this means that we can trivially compare the NAME with |
589 | it and, consequently, the two values with each other. */ |
590 | wide_int diff = wi::to_wide (t: cst) - wi::to_wide (t: inv); |
591 | if (wi::cmp (x: 0, y: wi::to_wide (t: inv), sgn) |
592 | != wi::cmp (x: diff, y: wi::to_wide (t: cst), sgn)) |
593 | { |
594 | const int res = wi::cmp (x: wi::to_wide (t: cst), y: wi::to_wide (t: inv), sgn); |
595 | return cst1 ? res : -res; |
596 | } |
597 | |
598 | return -2; |
599 | } |
600 | |
601 | /* We cannot say anything more for non-constants. */ |
602 | if (!cst1 || !cst2) |
603 | return -2; |
604 | |
605 | if (!POINTER_TYPE_P (TREE_TYPE (val1))) |
606 | { |
607 | /* We cannot compare overflowed values. */ |
608 | if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2)) |
609 | return -2; |
610 | |
611 | if (TREE_CODE (val1) == INTEGER_CST |
612 | && TREE_CODE (val2) == INTEGER_CST) |
613 | return tree_int_cst_compare (t1: val1, t2: val2); |
614 | |
615 | if (poly_int_tree_p (t: val1) && poly_int_tree_p (t: val2)) |
616 | { |
617 | if (known_eq (wi::to_poly_widest (val1), |
618 | wi::to_poly_widest (val2))) |
619 | return 0; |
620 | if (known_lt (wi::to_poly_widest (val1), |
621 | wi::to_poly_widest (val2))) |
622 | return -1; |
623 | if (known_gt (wi::to_poly_widest (val1), |
624 | wi::to_poly_widest (val2))) |
625 | return 1; |
626 | } |
627 | |
628 | return -2; |
629 | } |
630 | else |
631 | { |
632 | if (TREE_CODE (val1) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST) |
633 | { |
634 | /* We cannot compare overflowed values. */ |
635 | if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2)) |
636 | return -2; |
637 | |
638 | return tree_int_cst_compare (t1: val1, t2: val2); |
639 | } |
640 | |
641 | /* First see if VAL1 and VAL2 are not the same. */ |
642 | if (operand_equal_p (val1, val2, flags: 0)) |
643 | return 0; |
644 | |
645 | fold_defer_overflow_warnings (); |
646 | |
647 | /* If VAL1 is a lower address than VAL2, return -1. */ |
648 | tree t = fold_binary_to_constant (LT_EXPR, boolean_type_node, val1, val2); |
649 | if (t && integer_onep (t)) |
650 | { |
651 | fold_undefer_and_ignore_overflow_warnings (); |
652 | return -1; |
653 | } |
654 | |
655 | /* If VAL1 is a higher address than VAL2, return +1. */ |
656 | t = fold_binary_to_constant (LT_EXPR, boolean_type_node, val2, val1); |
657 | if (t && integer_onep (t)) |
658 | { |
659 | fold_undefer_and_ignore_overflow_warnings (); |
660 | return 1; |
661 | } |
662 | |
663 | /* If VAL1 is different than VAL2, return +2. */ |
664 | t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2); |
665 | fold_undefer_and_ignore_overflow_warnings (); |
666 | if (t && integer_onep (t)) |
667 | return 2; |
668 | |
669 | return -2; |
670 | } |
671 | } |
672 | |
673 | /* Compare values like compare_values_warnv. */ |
674 | |
675 | int |
676 | compare_values (tree val1, tree val2) |
677 | { |
678 | bool sop; |
679 | return compare_values_warnv (val1, val2, strict_overflow_p: &sop); |
680 | } |
681 | |
682 | /* Helper for overflow_comparison_p |
683 | |
684 | OP0 CODE OP1 is a comparison. Examine the comparison and potentially |
685 | OP1's defining statement to see if it ultimately has the form |
686 | OP0 CODE (OP0 PLUS INTEGER_CST) |
687 | |
688 | If so, return TRUE indicating this is an overflow test and store into |
689 | *NEW_CST an updated constant that can be used in a narrowed range test. |
690 | |
691 | REVERSED indicates if the comparison was originally: |
692 | |
693 | OP1 CODE' OP0. |
694 | |
695 | This affects how we build the updated constant. */ |
696 | |
697 | static bool |
698 | overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1, |
699 | bool reversed, tree *new_cst) |
700 | { |
701 | /* See if this is a relational operation between two SSA_NAMES with |
702 | unsigned, overflow wrapping values. If so, check it more deeply. */ |
703 | if ((code == LT_EXPR || code == LE_EXPR |
704 | || code == GE_EXPR || code == GT_EXPR) |
705 | && TREE_CODE (op0) == SSA_NAME |
706 | && TREE_CODE (op1) == SSA_NAME |
707 | && INTEGRAL_TYPE_P (TREE_TYPE (op0)) |
708 | && TYPE_UNSIGNED (TREE_TYPE (op0)) |
709 | && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0))) |
710 | { |
711 | gimple *op1_def = SSA_NAME_DEF_STMT (op1); |
712 | |
713 | /* Now look at the defining statement of OP1 to see if it adds |
714 | or subtracts a nonzero constant from another operand. */ |
715 | if (op1_def |
716 | && is_gimple_assign (gs: op1_def) |
717 | && gimple_assign_rhs_code (gs: op1_def) == PLUS_EXPR |
718 | && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST |
719 | && !integer_zerop (gimple_assign_rhs2 (gs: op1_def))) |
720 | { |
721 | tree target = gimple_assign_rhs1 (gs: op1_def); |
722 | |
723 | /* If we did not find our target SSA_NAME, then this is not |
724 | an overflow test. */ |
725 | if (op0 != target) |
726 | return false; |
727 | |
728 | tree type = TREE_TYPE (op0); |
729 | wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED); |
730 | tree inc = gimple_assign_rhs2 (gs: op1_def); |
731 | if (reversed) |
732 | *new_cst = wide_int_to_tree (type, cst: max + wi::to_wide (t: inc)); |
733 | else |
734 | *new_cst = wide_int_to_tree (type, cst: max - wi::to_wide (t: inc)); |
735 | return true; |
736 | } |
737 | } |
738 | return false; |
739 | } |
740 | |
741 | /* OP0 CODE OP1 is a comparison. Examine the comparison and potentially |
742 | OP1's defining statement to see if it ultimately has the form |
743 | OP0 CODE (OP0 PLUS INTEGER_CST) |
744 | |
745 | If so, return TRUE indicating this is an overflow test and store into |
746 | *NEW_CST an updated constant that can be used in a narrowed range test. |
747 | |
748 | These statements are left as-is in the IL to facilitate discovery of |
749 | {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But |
750 | the alternate range representation is often useful within VRP. */ |
751 | |
752 | bool |
753 | overflow_comparison_p (tree_code code, tree name, tree val, tree *new_cst) |
754 | { |
755 | if (overflow_comparison_p_1 (code, op0: name, op1: val, reversed: false, new_cst)) |
756 | return true; |
757 | return overflow_comparison_p_1 (code: swap_tree_comparison (code), op0: val, op1: name, |
758 | reversed: true, new_cst); |
759 | } |
760 | |
761 | /* Searches the case label vector VEC for the index *IDX of the CASE_LABEL |
762 | that includes the value VAL. The search is restricted to the range |
763 | [START_IDX, n - 1] where n is the size of VEC. |
764 | |
765 | If there is a CASE_LABEL for VAL, its index is placed in IDX and true is |
766 | returned. |
767 | |
768 | If there is no CASE_LABEL for VAL and there is one that is larger than VAL, |
769 | it is placed in IDX and false is returned. |
770 | |
771 | If VAL is larger than any CASE_LABEL, n is placed on IDX and false is |
772 | returned. */ |
773 | |
774 | bool |
775 | find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx) |
776 | { |
777 | size_t n = gimple_switch_num_labels (gs: stmt); |
778 | size_t low, high; |
779 | |
780 | /* Find case label for minimum of the value range or the next one. |
781 | At each iteration we are searching in [low, high - 1]. */ |
782 | |
783 | for (low = start_idx, high = n; high != low; ) |
784 | { |
785 | tree t; |
786 | int cmp; |
787 | /* Note that i != high, so we never ask for n. */ |
788 | size_t i = (high + low) / 2; |
789 | t = gimple_switch_label (gs: stmt, index: i); |
790 | |
791 | /* Cache the result of comparing CASE_LOW and val. */ |
792 | cmp = tree_int_cst_compare (CASE_LOW (t), t2: val); |
793 | |
794 | if (cmp == 0) |
795 | { |
796 | /* Ranges cannot be empty. */ |
797 | *idx = i; |
798 | return true; |
799 | } |
800 | else if (cmp > 0) |
801 | high = i; |
802 | else |
803 | { |
804 | low = i + 1; |
805 | if (CASE_HIGH (t) != NULL |
806 | && tree_int_cst_compare (CASE_HIGH (t), t2: val) >= 0) |
807 | { |
808 | *idx = i; |
809 | return true; |
810 | } |
811 | } |
812 | } |
813 | |
814 | *idx = high; |
815 | return false; |
816 | } |
817 | |
818 | /* Searches the case label vector VEC for the range of CASE_LABELs that is used |
819 | for values between MIN and MAX. The first index is placed in MIN_IDX. The |
820 | last index is placed in MAX_IDX. If the range of CASE_LABELs is empty |
821 | then MAX_IDX < MIN_IDX. |
822 | Returns true if the default label is not needed. */ |
823 | |
824 | bool |
825 | find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx, |
826 | size_t *max_idx) |
827 | { |
828 | size_t i, j; |
829 | bool min_take_default = !find_case_label_index (stmt, start_idx: 1, val: min, idx: &i); |
830 | bool max_take_default = !find_case_label_index (stmt, start_idx: i, val: max, idx: &j); |
831 | |
832 | if (i == j |
833 | && min_take_default |
834 | && max_take_default) |
835 | { |
836 | /* Only the default case label reached. |
837 | Return an empty range. */ |
838 | *min_idx = 1; |
839 | *max_idx = 0; |
840 | return false; |
841 | } |
842 | else |
843 | { |
844 | bool take_default = min_take_default || max_take_default; |
845 | tree low, high; |
846 | size_t k; |
847 | |
848 | if (max_take_default) |
849 | j--; |
850 | |
851 | /* If the case label range is continuous, we do not need |
852 | the default case label. Verify that. */ |
853 | high = CASE_LOW (gimple_switch_label (stmt, i)); |
854 | if (CASE_HIGH (gimple_switch_label (stmt, i))) |
855 | high = CASE_HIGH (gimple_switch_label (stmt, i)); |
856 | for (k = i + 1; k <= j; ++k) |
857 | { |
858 | low = CASE_LOW (gimple_switch_label (stmt, k)); |
859 | if (!integer_onep (int_const_binop (MINUS_EXPR, low, high))) |
860 | { |
861 | take_default = true; |
862 | break; |
863 | } |
864 | high = low; |
865 | if (CASE_HIGH (gimple_switch_label (stmt, k))) |
866 | high = CASE_HIGH (gimple_switch_label (stmt, k)); |
867 | } |
868 | |
869 | *min_idx = i; |
870 | *max_idx = j; |
871 | return !take_default; |
872 | } |
873 | } |
874 | |
875 | /* Given a SWITCH_STMT, return the case label that encompasses the |
876 | known possible values for the switch operand. RANGE_OF_OP is a |
877 | range for the known values of the switch operand. */ |
878 | |
879 | tree |
880 | find_case_label_range (gswitch *switch_stmt, const irange *range_of_op) |
881 | { |
882 | if (range_of_op->undefined_p () |
883 | || range_of_op->varying_p ()) |
884 | return NULL_TREE; |
885 | |
886 | size_t i, j; |
887 | tree op = gimple_switch_index (gs: switch_stmt); |
888 | tree type = TREE_TYPE (op); |
889 | unsigned prec = TYPE_PRECISION (type); |
890 | signop sign = TYPE_SIGN (type); |
891 | tree tmin = wide_int_to_tree (type, cst: range_of_op->lower_bound ()); |
892 | tree tmax = wide_int_to_tree (type, cst: range_of_op->upper_bound ()); |
893 | find_case_label_range (stmt: switch_stmt, min: tmin, max: tmax, min_idx: &i, max_idx: &j); |
894 | if (i == j) |
895 | { |
896 | /* Look for exactly one label that encompasses the range of |
897 | the operand. */ |
898 | tree label = gimple_switch_label (gs: switch_stmt, index: i); |
899 | tree case_high |
900 | = CASE_HIGH (label) ? CASE_HIGH (label) : CASE_LOW (label); |
901 | wide_int wlow = wi::to_wide (CASE_LOW (label)); |
902 | wide_int whigh = wi::to_wide (t: case_high); |
903 | int_range_max label_range (type, |
904 | wide_int::from (x: wlow, precision: prec, sgn: sign), |
905 | wide_int::from (x: whigh, precision: prec, sgn: sign)); |
906 | if (!types_compatible_p (type1: label_range.type (), type2: range_of_op->type ())) |
907 | range_cast (r&: label_range, type: range_of_op->type ()); |
908 | label_range.intersect (*range_of_op); |
909 | if (label_range == *range_of_op) |
910 | return label; |
911 | } |
912 | else if (i > j) |
913 | { |
914 | /* If there are no labels at all, take the default. */ |
915 | return gimple_switch_label (gs: switch_stmt, index: 0); |
916 | } |
917 | else |
918 | { |
919 | /* Otherwise, there are various labels that can encompass |
920 | the range of operand. In which case, see if the range of |
921 | the operand is entirely *outside* the bounds of all the |
922 | (non-default) case labels. If so, take the default. */ |
923 | unsigned n = gimple_switch_num_labels (gs: switch_stmt); |
924 | tree min_label = gimple_switch_label (gs: switch_stmt, index: 1); |
925 | tree max_label = gimple_switch_label (gs: switch_stmt, index: n - 1); |
926 | tree case_high = CASE_HIGH (max_label); |
927 | if (!case_high) |
928 | case_high = CASE_LOW (max_label); |
929 | int_range_max label_range (TREE_TYPE (CASE_LOW (min_label)), |
930 | wi::to_wide (CASE_LOW (min_label)), |
931 | wi::to_wide (t: case_high)); |
932 | if (!types_compatible_p (type1: label_range.type (), type2: range_of_op->type ())) |
933 | range_cast (r&: label_range, type: range_of_op->type ()); |
934 | label_range.intersect (*range_of_op); |
935 | if (label_range.undefined_p ()) |
936 | return gimple_switch_label (gs: switch_stmt, index: 0); |
937 | } |
938 | return NULL_TREE; |
939 | } |
940 | |
941 | struct case_info |
942 | { |
943 | tree expr; |
944 | basic_block bb; |
945 | }; |
946 | |
947 | // This is a ranger based folder which continues to use the dominator |
948 | // walk to access the substitute and fold machinery. Ranges are calculated |
949 | // on demand. |
950 | |
951 | class rvrp_folder : public substitute_and_fold_engine |
952 | { |
953 | public: |
954 | |
955 | rvrp_folder (gimple_ranger *r, bool all) |
956 | : substitute_and_fold_engine (), |
957 | m_unreachable (*r, all), |
958 | m_simplifier (r, r->non_executable_edge_flag) |
959 | { |
960 | m_ranger = r; |
961 | m_pta = new pointer_equiv_analyzer (m_ranger); |
962 | m_last_bb_stmt = NULL; |
963 | } |
964 | |
965 | ~rvrp_folder () |
966 | { |
967 | delete m_pta; |
968 | } |
969 | |
970 | tree value_of_expr (tree name, gimple *s = NULL) override |
971 | { |
972 | // Shortcircuit subst_and_fold callbacks for abnormal ssa_names. |
973 | if (TREE_CODE (name) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name)) |
974 | return NULL; |
975 | tree ret = m_ranger->value_of_expr (expr: name, s); |
976 | if (!ret && supported_pointer_equiv_p (expr: name)) |
977 | ret = m_pta->get_equiv (ssa: name); |
978 | return ret; |
979 | } |
980 | |
981 | tree value_on_edge (edge e, tree name) override |
982 | { |
983 | // Shortcircuit subst_and_fold callbacks for abnormal ssa_names. |
984 | if (TREE_CODE (name) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name)) |
985 | return NULL; |
986 | tree ret = m_ranger->value_on_edge (e, expr: name); |
987 | if (!ret && supported_pointer_equiv_p (expr: name)) |
988 | ret = m_pta->get_equiv (ssa: name); |
989 | return ret; |
990 | } |
991 | |
992 | tree value_of_stmt (gimple *s, tree name = NULL) override |
993 | { |
994 | // Shortcircuit subst_and_fold callbacks for abnormal ssa_names. |
995 | if (TREE_CODE (name) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name)) |
996 | return NULL; |
997 | return m_ranger->value_of_stmt (s, name); |
998 | } |
999 | |
1000 | void pre_fold_bb (basic_block bb) override |
1001 | { |
1002 | m_pta->enter (bb); |
1003 | for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (i: gsi); |
1004 | gsi_next (i: &gsi)) |
1005 | m_ranger->register_inferred_ranges (s: gsi.phi ()); |
1006 | m_last_bb_stmt = last_nondebug_stmt (bb); |
1007 | } |
1008 | |
1009 | void post_fold_bb (basic_block bb) override |
1010 | { |
1011 | m_pta->leave (bb); |
1012 | } |
1013 | |
1014 | void pre_fold_stmt (gimple *stmt) override |
1015 | { |
1016 | m_pta->visit_stmt (stmt); |
1017 | // If this is the last stmt and there are inferred ranges, reparse the |
1018 | // block for transitive inferred ranges that occur earlier in the block. |
1019 | if (stmt == m_last_bb_stmt) |
1020 | { |
1021 | m_ranger->register_transitive_inferred_ranges (bb: gimple_bb (g: stmt)); |
1022 | // Also check for builtin_unreachable calls. |
1023 | if (cfun->after_inlining && gimple_code (g: stmt) == GIMPLE_COND) |
1024 | m_unreachable.maybe_register (s: stmt); |
1025 | } |
1026 | } |
1027 | |
1028 | bool fold_stmt (gimple_stmt_iterator *gsi) override |
1029 | { |
1030 | bool ret = m_simplifier.simplify (gsi); |
1031 | if (!ret) |
1032 | ret = m_ranger->fold_stmt (gsi, follow_single_use_edges); |
1033 | m_ranger->register_inferred_ranges (s: gsi_stmt (i: *gsi)); |
1034 | return ret; |
1035 | } |
1036 | |
1037 | remove_unreachable m_unreachable; |
1038 | private: |
1039 | DISABLE_COPY_AND_ASSIGN (rvrp_folder); |
1040 | gimple_ranger *m_ranger; |
1041 | simplify_using_ranges m_simplifier; |
1042 | pointer_equiv_analyzer *m_pta; |
1043 | gimple *m_last_bb_stmt; |
1044 | }; |
1045 | |
1046 | /* Main entry point for a VRP pass using just ranger. This can be called |
1047 | from anywhere to perform a VRP pass, including from EVRP. */ |
1048 | |
1049 | unsigned int |
1050 | execute_ranger_vrp (struct function *fun, bool warn_array_bounds_p, |
1051 | bool final_p) |
1052 | { |
1053 | loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS); |
1054 | rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa); |
1055 | scev_initialize (); |
1056 | calculate_dominance_info (CDI_DOMINATORS); |
1057 | |
1058 | set_all_edges_as_executable (fun); |
1059 | gimple_ranger *ranger = enable_ranger (m: fun, use_imm_uses: false); |
1060 | rvrp_folder folder (ranger, final_p); |
1061 | phi_analysis_initialize (ranger->const_query ()); |
1062 | folder.substitute_and_fold (); |
1063 | // Remove tagged builtin-unreachable and maybe update globals. |
1064 | folder.m_unreachable.remove_and_update_globals (); |
1065 | if (dump_file && (dump_flags & TDF_DETAILS)) |
1066 | ranger->dump (f: dump_file); |
1067 | |
1068 | if ((warn_array_bounds || warn_strict_flex_arrays) && warn_array_bounds_p) |
1069 | { |
1070 | // Set all edges as executable, except those ranger says aren't. |
1071 | int non_exec_flag = ranger->non_executable_edge_flag; |
1072 | basic_block bb; |
1073 | FOR_ALL_BB_FN (bb, fun) |
1074 | { |
1075 | edge_iterator ei; |
1076 | edge e; |
1077 | FOR_EACH_EDGE (e, ei, bb->succs) |
1078 | if (e->flags & non_exec_flag) |
1079 | e->flags &= ~EDGE_EXECUTABLE; |
1080 | else |
1081 | e->flags |= EDGE_EXECUTABLE; |
1082 | } |
1083 | scev_reset (); |
1084 | array_bounds_checker array_checker (fun, ranger); |
1085 | array_checker.check (); |
1086 | } |
1087 | |
1088 | phi_analysis_finalize (); |
1089 | disable_ranger (fun); |
1090 | scev_finalize (); |
1091 | loop_optimizer_finalize (); |
1092 | return 0; |
1093 | } |
1094 | |
1095 | // Implement a Fast VRP folder. Not quite as effective but faster. |
1096 | |
1097 | class fvrp_folder : public substitute_and_fold_engine |
1098 | { |
1099 | public: |
1100 | fvrp_folder (dom_ranger *dr) : substitute_and_fold_engine (), |
1101 | m_simplifier (dr) |
1102 | { m_dom_ranger = dr; } |
1103 | |
1104 | ~fvrp_folder () { } |
1105 | |
1106 | tree value_of_expr (tree name, gimple *s = NULL) override |
1107 | { |
1108 | // Shortcircuit subst_and_fold callbacks for abnormal ssa_names. |
1109 | if (TREE_CODE (name) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name)) |
1110 | return NULL; |
1111 | return m_dom_ranger->value_of_expr (expr: name, s); |
1112 | } |
1113 | |
1114 | tree value_on_edge (edge e, tree name) override |
1115 | { |
1116 | // Shortcircuit subst_and_fold callbacks for abnormal ssa_names. |
1117 | if (TREE_CODE (name) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name)) |
1118 | return NULL; |
1119 | return m_dom_ranger->value_on_edge (e, expr: name); |
1120 | } |
1121 | |
1122 | tree value_of_stmt (gimple *s, tree name = NULL) override |
1123 | { |
1124 | // Shortcircuit subst_and_fold callbacks for abnormal ssa_names. |
1125 | if (TREE_CODE (name) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name)) |
1126 | return NULL; |
1127 | return m_dom_ranger->value_of_stmt (s, name); |
1128 | } |
1129 | |
1130 | void pre_fold_bb (basic_block bb) override |
1131 | { |
1132 | m_dom_ranger->pre_bb (bb); |
1133 | // Now process the PHIs in advance. |
1134 | gphi_iterator psi = gsi_start_phis (bb); |
1135 | for ( ; !gsi_end_p (i: psi); gsi_next (i: &psi)) |
1136 | { |
1137 | tree name = gimple_range_ssa_p (PHI_RESULT (psi.phi ())); |
1138 | if (name) |
1139 | { |
1140 | Value_Range vr(TREE_TYPE (name)); |
1141 | m_dom_ranger->range_of_stmt (r&: vr, s: psi.phi (), name); |
1142 | } |
1143 | } |
1144 | } |
1145 | |
1146 | void post_fold_bb (basic_block bb) override |
1147 | { |
1148 | m_dom_ranger->post_bb (bb); |
1149 | } |
1150 | |
1151 | void pre_fold_stmt (gimple *s) override |
1152 | { |
1153 | // Ensure range_of_stmt has been called. |
1154 | tree type = gimple_range_type (s); |
1155 | if (type) |
1156 | { |
1157 | Value_Range vr(type); |
1158 | m_dom_ranger->range_of_stmt (r&: vr, s); |
1159 | } |
1160 | } |
1161 | |
1162 | bool fold_stmt (gimple_stmt_iterator *gsi) override |
1163 | { |
1164 | bool ret = m_simplifier.simplify (gsi); |
1165 | if (!ret) |
1166 | ret = ::fold_stmt (gsi, follow_single_use_edges); |
1167 | return ret; |
1168 | } |
1169 | |
1170 | private: |
1171 | DISABLE_COPY_AND_ASSIGN (fvrp_folder); |
1172 | simplify_using_ranges m_simplifier; |
1173 | dom_ranger *m_dom_ranger; |
1174 | }; |
1175 | |
1176 | |
1177 | // Main entry point for a FAST VRP pass using a dom ranger. |
1178 | |
1179 | unsigned int |
1180 | execute_fast_vrp (struct function *fun) |
1181 | { |
1182 | calculate_dominance_info (CDI_DOMINATORS); |
1183 | dom_ranger dr; |
1184 | fvrp_folder folder (&dr); |
1185 | |
1186 | gcc_checking_assert (!fun->x_range_query); |
1187 | fun->x_range_query = &dr; |
1188 | |
1189 | folder.substitute_and_fold (); |
1190 | |
1191 | fun->x_range_query = NULL; |
1192 | return 0; |
1193 | } |
1194 | |
1195 | namespace { |
1196 | |
1197 | const pass_data pass_data_vrp = |
1198 | { |
1199 | .type: GIMPLE_PASS, /* type */ |
1200 | .name: "vrp" , /* name */ |
1201 | .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */ |
1202 | .tv_id: TV_TREE_VRP, /* tv_id */ |
1203 | PROP_ssa, /* properties_required */ |
1204 | .properties_provided: 0, /* properties_provided */ |
1205 | .properties_destroyed: 0, /* properties_destroyed */ |
1206 | .todo_flags_start: 0, /* todo_flags_start */ |
1207 | .todo_flags_finish: ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */ |
1208 | }; |
1209 | |
1210 | const pass_data pass_data_early_vrp = |
1211 | { |
1212 | .type: GIMPLE_PASS, /* type */ |
1213 | .name: "evrp" , /* name */ |
1214 | .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */ |
1215 | .tv_id: TV_TREE_EARLY_VRP, /* tv_id */ |
1216 | PROP_ssa, /* properties_required */ |
1217 | .properties_provided: 0, /* properties_provided */ |
1218 | .properties_destroyed: 0, /* properties_destroyed */ |
1219 | .todo_flags_start: 0, /* todo_flags_start */ |
1220 | .todo_flags_finish: ( TODO_cleanup_cfg | TODO_update_ssa | TODO_verify_all ), |
1221 | }; |
1222 | |
1223 | const pass_data pass_data_fast_vrp = |
1224 | { |
1225 | .type: GIMPLE_PASS, /* type */ |
1226 | .name: "fvrp" , /* name */ |
1227 | .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */ |
1228 | .tv_id: TV_TREE_FAST_VRP, /* tv_id */ |
1229 | PROP_ssa, /* properties_required */ |
1230 | .properties_provided: 0, /* properties_provided */ |
1231 | .properties_destroyed: 0, /* properties_destroyed */ |
1232 | .todo_flags_start: 0, /* todo_flags_start */ |
1233 | .todo_flags_finish: ( TODO_cleanup_cfg | TODO_update_ssa | TODO_verify_all ), |
1234 | }; |
1235 | |
1236 | |
1237 | class pass_vrp : public gimple_opt_pass |
1238 | { |
1239 | public: |
1240 | pass_vrp (gcc::context *ctxt, const pass_data &data_, bool warn_p) |
1241 | : gimple_opt_pass (data_, ctxt), data (data_), |
1242 | warn_array_bounds_p (warn_p), final_p (false) |
1243 | { } |
1244 | |
1245 | /* opt_pass methods: */ |
1246 | opt_pass * clone () final override |
1247 | { return new pass_vrp (m_ctxt, data, false); } |
1248 | void set_pass_param (unsigned int n, bool param) final override |
1249 | { |
1250 | gcc_assert (n == 0); |
1251 | final_p = param; |
1252 | } |
1253 | bool gate (function *) final override { return flag_tree_vrp != 0; } |
1254 | unsigned int execute (function *fun) final override |
1255 | { |
1256 | // Check for fast vrp. |
1257 | if (&data == &pass_data_fast_vrp) |
1258 | return execute_fast_vrp (fun); |
1259 | |
1260 | return execute_ranger_vrp (fun, warn_array_bounds_p, final_p); |
1261 | } |
1262 | |
1263 | private: |
1264 | const pass_data &data; |
1265 | bool warn_array_bounds_p; |
1266 | bool final_p; |
1267 | }; // class pass_vrp |
1268 | |
1269 | const pass_data pass_data_assumptions = |
1270 | { |
1271 | .type: GIMPLE_PASS, /* type */ |
1272 | .name: "assumptions" , /* name */ |
1273 | .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */ |
1274 | .tv_id: TV_TREE_ASSUMPTIONS, /* tv_id */ |
1275 | PROP_ssa, /* properties_required */ |
1276 | PROP_assumptions_done, /* properties_provided */ |
1277 | .properties_destroyed: 0, /* properties_destroyed */ |
1278 | .todo_flags_start: 0, /* todo_flags_start */ |
1279 | .todo_flags_finish: 0, /* todo_flags_end */ |
1280 | }; |
1281 | |
1282 | class pass_assumptions : public gimple_opt_pass |
1283 | { |
1284 | public: |
1285 | pass_assumptions (gcc::context *ctxt) |
1286 | : gimple_opt_pass (pass_data_assumptions, ctxt) |
1287 | {} |
1288 | |
1289 | /* opt_pass methods: */ |
1290 | bool gate (function *fun) final override { return fun->assume_function; } |
1291 | unsigned int execute (function *) final override |
1292 | { |
1293 | assume_query query; |
1294 | if (dump_file) |
1295 | fprintf (stream: dump_file, format: "Assumptions :\n--------------\n" ); |
1296 | |
1297 | for (tree arg = DECL_ARGUMENTS (cfun->decl); arg; arg = DECL_CHAIN (arg)) |
1298 | { |
1299 | tree name = ssa_default_def (cfun, arg); |
1300 | if (!name || !gimple_range_ssa_p (exp: name)) |
1301 | continue; |
1302 | tree type = TREE_TYPE (name); |
1303 | if (!Value_Range::supports_type_p (type)) |
1304 | continue; |
1305 | Value_Range assume_range (type); |
1306 | if (query.assume_range_p (r&: assume_range, name)) |
1307 | { |
1308 | // Set the global range of NAME to anything calculated. |
1309 | set_range_info (name, assume_range); |
1310 | if (dump_file) |
1311 | { |
1312 | print_generic_expr (dump_file, name, TDF_SLIM); |
1313 | fprintf (stream: dump_file, format: " -> " ); |
1314 | assume_range.dump (dump_file); |
1315 | fputc (c: '\n', stream: dump_file); |
1316 | } |
1317 | } |
1318 | } |
1319 | if (dump_file) |
1320 | { |
1321 | fputc (c: '\n', stream: dump_file); |
1322 | gimple_dump_cfg (dump_file, dump_flags & ~TDF_DETAILS); |
1323 | if (dump_flags & TDF_DETAILS) |
1324 | query.dump (f: dump_file); |
1325 | } |
1326 | return TODO_discard_function; |
1327 | } |
1328 | |
1329 | }; // class pass_assumptions |
1330 | |
1331 | } // anon namespace |
1332 | |
1333 | gimple_opt_pass * |
1334 | make_pass_vrp (gcc::context *ctxt) |
1335 | { |
1336 | return new pass_vrp (ctxt, pass_data_vrp, true); |
1337 | } |
1338 | |
1339 | gimple_opt_pass * |
1340 | make_pass_early_vrp (gcc::context *ctxt) |
1341 | { |
1342 | return new pass_vrp (ctxt, pass_data_early_vrp, false); |
1343 | } |
1344 | |
1345 | gimple_opt_pass * |
1346 | make_pass_fast_vrp (gcc::context *ctxt) |
1347 | { |
1348 | return new pass_vrp (ctxt, pass_data_fast_vrp, false); |
1349 | } |
1350 | |
1351 | gimple_opt_pass * |
1352 | make_pass_assumptions (gcc::context *ctx) |
1353 | { |
1354 | return new pass_assumptions (ctx); |
1355 | } |
1356 | |