1 | /* Support routines for Value Range Propagation (VRP). |
2 | Copyright (C) 2005-2023 Free Software Foundation, Inc. |
3 | |
4 | This file is part of GCC. |
5 | |
6 | GCC is free software; you can redistribute it and/or modify |
7 | it under the terms of the GNU General Public License as published by |
8 | the Free Software Foundation; either version 3, or (at your option) |
9 | any later version. |
10 | |
11 | GCC is distributed in the hope that it will be useful, |
12 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
13 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
14 | GNU General Public License for more details. |
15 | |
16 | You should have received a copy of the GNU General Public License |
17 | along with GCC; see the file COPYING3. If not see |
18 | <http://www.gnu.org/licenses/>. */ |
19 | |
20 | #include "config.h" |
21 | #include "system.h" |
22 | #include "coretypes.h" |
23 | #include "backend.h" |
24 | #include "insn-codes.h" |
25 | #include "tree.h" |
26 | #include "gimple.h" |
27 | #include "ssa.h" |
28 | #include "optabs-tree.h" |
29 | #include "gimple-pretty-print.h" |
30 | #include "diagnostic-core.h" |
31 | #include "flags.h" |
32 | #include "fold-const.h" |
33 | #include "calls.h" |
34 | #include "cfganal.h" |
35 | #include "gimple-iterator.h" |
36 | #include "gimple-fold.h" |
37 | #include "tree-cfg.h" |
38 | #include "tree-ssa-loop-niter.h" |
39 | #include "tree-ssa-loop.h" |
40 | #include "intl.h" |
41 | #include "cfgloop.h" |
42 | #include "tree-scalar-evolution.h" |
43 | #include "tree-ssa-propagate.h" |
44 | #include "tree-chrec.h" |
45 | #include "omp-general.h" |
46 | #include "case-cfn-macros.h" |
47 | #include "alloc-pool.h" |
48 | #include "attribs.h" |
49 | #include "range.h" |
50 | #include "vr-values.h" |
51 | #include "cfghooks.h" |
52 | #include "range-op.h" |
53 | #include "gimple-range.h" |
54 | |
55 | /* Return true if op is in a boolean [0, 1] value-range. */ |
56 | |
57 | bool |
58 | simplify_using_ranges::op_with_boolean_value_range_p (tree op, gimple *s) |
59 | { |
60 | if (TYPE_PRECISION (TREE_TYPE (op)) == 1) |
61 | return true; |
62 | |
63 | if (integer_zerop (op) |
64 | || integer_onep (op)) |
65 | return true; |
66 | |
67 | if (TREE_CODE (op) != SSA_NAME) |
68 | return false; |
69 | |
70 | /* ?? Errr, this should probably check for [0,0] and [1,1] as well |
71 | as [0,1]. */ |
72 | value_range vr; |
73 | return (query->range_of_expr (r&: vr, expr: op, s) |
74 | && vr == range_true_and_false (TREE_TYPE (op))); |
75 | } |
76 | |
77 | /* Helper function for simplify_internal_call_using_ranges and |
78 | extract_range_basic. Return true if OP0 SUBCODE OP1 for |
79 | SUBCODE {PLUS,MINUS,MULT}_EXPR is known to never overflow or |
80 | always overflow. Set *OVF to true if it is known to always |
81 | overflow. */ |
82 | |
83 | static bool |
84 | check_for_binary_op_overflow (range_query *query, |
85 | enum tree_code subcode, tree type, |
86 | tree op0, tree op1, bool *ovf, gimple *s = NULL) |
87 | { |
88 | value_range vr0, vr1; |
89 | if (!query->range_of_expr (r&: vr0, expr: op0, s) || vr0.undefined_p ()) |
90 | vr0.set_varying (TREE_TYPE (op0)); |
91 | if (!query->range_of_expr (r&: vr1, expr: op1, s) || vr1.undefined_p ()) |
92 | vr1.set_varying (TREE_TYPE (op1)); |
93 | |
94 | tree vr0min = wide_int_to_tree (TREE_TYPE (op0), cst: vr0.lower_bound ()); |
95 | tree vr0max = wide_int_to_tree (TREE_TYPE (op0), cst: vr0.upper_bound ()); |
96 | tree vr1min = wide_int_to_tree (TREE_TYPE (op1), cst: vr1.lower_bound ()); |
97 | tree vr1max = wide_int_to_tree (TREE_TYPE (op1), cst: vr1.upper_bound ()); |
98 | |
99 | *ovf = arith_overflowed_p (subcode, type, vr0min, |
100 | subcode == MINUS_EXPR ? vr1max : vr1min); |
101 | if (arith_overflowed_p (subcode, type, vr0max, |
102 | subcode == MINUS_EXPR ? vr1min : vr1max) != *ovf) |
103 | return false; |
104 | if (subcode == MULT_EXPR) |
105 | { |
106 | if (arith_overflowed_p (subcode, type, vr0min, vr1max) != *ovf |
107 | || arith_overflowed_p (subcode, type, vr0max, vr1min) != *ovf) |
108 | return false; |
109 | } |
110 | if (*ovf) |
111 | { |
112 | /* So far we found that there is an overflow on the boundaries. |
113 | That doesn't prove that there is an overflow even for all values |
114 | in between the boundaries. For that compute widest2_int range |
115 | of the result and see if it doesn't overlap the range of |
116 | type. */ |
117 | widest2_int wmin, wmax; |
118 | widest2_int w[4]; |
119 | int i; |
120 | signop sign0 = TYPE_SIGN (TREE_TYPE (op0)); |
121 | signop sign1 = TYPE_SIGN (TREE_TYPE (op1)); |
122 | w[0] = widest2_int::from (x: vr0.lower_bound (), sgn: sign0); |
123 | w[1] = widest2_int::from (x: vr0.upper_bound (), sgn: sign0); |
124 | w[2] = widest2_int::from (x: vr1.lower_bound (), sgn: sign1); |
125 | w[3] = widest2_int::from (x: vr1.upper_bound (), sgn: sign1); |
126 | for (i = 0; i < 4; i++) |
127 | { |
128 | widest2_int wt; |
129 | switch (subcode) |
130 | { |
131 | case PLUS_EXPR: |
132 | wt = wi::add (x: w[i & 1], y: w[2 + (i & 2) / 2]); |
133 | break; |
134 | case MINUS_EXPR: |
135 | wt = wi::sub (x: w[i & 1], y: w[2 + (i & 2) / 2]); |
136 | break; |
137 | case MULT_EXPR: |
138 | wt = wi::mul (x: w[i & 1], y: w[2 + (i & 2) / 2]); |
139 | break; |
140 | default: |
141 | gcc_unreachable (); |
142 | } |
143 | if (i == 0) |
144 | { |
145 | wmin = wt; |
146 | wmax = wt; |
147 | } |
148 | else |
149 | { |
150 | wmin = wi::smin (x: wmin, y: wt); |
151 | wmax = wi::smax (x: wmax, y: wt); |
152 | } |
153 | } |
154 | /* The result of op0 CODE op1 is known to be in range |
155 | [wmin, wmax]. */ |
156 | widest2_int wtmin |
157 | = widest2_int::from (x: irange_val_min (type), TYPE_SIGN (type)); |
158 | widest2_int wtmax |
159 | = widest2_int::from (x: irange_val_max (type), TYPE_SIGN (type)); |
160 | /* If all values in [wmin, wmax] are smaller than |
161 | [wtmin, wtmax] or all are larger than [wtmin, wtmax], |
162 | the arithmetic operation will always overflow. */ |
163 | if (wmax < wtmin || wmin > wtmax) |
164 | return true; |
165 | return false; |
166 | } |
167 | return true; |
168 | } |
169 | |
170 | /* Set INIT, STEP, and DIRECTION the the corresponding values of NAME |
171 | within LOOP, and return TRUE. Otherwise return FALSE, and set R to |
172 | the conservative range of NAME within the loop. */ |
173 | |
174 | static bool |
175 | get_scev_info (vrange &r, tree name, gimple *stmt, class loop *l, |
176 | tree &init, tree &step, enum ev_direction &dir) |
177 | { |
178 | tree ev = analyze_scalar_evolution (l, name); |
179 | tree chrec = instantiate_parameters (loop: l, chrec: ev); |
180 | tree type = TREE_TYPE (name); |
181 | if (TREE_CODE (chrec) != POLYNOMIAL_CHREC) |
182 | { |
183 | r.set_varying (type); |
184 | return false; |
185 | } |
186 | if (is_gimple_min_invariant (chrec)) |
187 | { |
188 | if (is_gimple_constant (t: chrec)) |
189 | r.set (chrec, chrec); |
190 | else |
191 | r.set_varying (type); |
192 | return false; |
193 | } |
194 | |
195 | init = initial_condition_in_loop_num (chrec, l->num); |
196 | step = evolution_part_in_loop_num (chrec, l->num); |
197 | if (!init || !step) |
198 | { |
199 | r.set_varying (type); |
200 | return false; |
201 | } |
202 | dir = scev_direction (chrec); |
203 | if (dir == EV_DIR_UNKNOWN |
204 | || scev_probably_wraps_p (NULL, init, step, stmt, |
205 | get_chrec_loop (chrec), true)) |
206 | { |
207 | r.set_varying (type); |
208 | return false; |
209 | } |
210 | return true; |
211 | } |
212 | |
213 | /* Return TRUE if STEP * NIT may overflow when calculated in TYPE. */ |
214 | |
215 | static bool |
216 | induction_variable_may_overflow_p (tree type, |
217 | const wide_int &step, const widest_int &nit) |
218 | { |
219 | wi::overflow_type ovf; |
220 | signop sign = TYPE_SIGN (type); |
221 | widest_int max_step = wi::mul (x: widest_int::from (x: step, sgn: sign), |
222 | y: nit, sgn: sign, overflow: &ovf); |
223 | |
224 | if (ovf || !wi::fits_to_tree_p (x: max_step, type)) |
225 | return true; |
226 | |
227 | /* For a signed type we have to check whether the result has the |
228 | expected signedness which is that of the step as number of |
229 | iterations is unsigned. */ |
230 | return (sign == SIGNED |
231 | && wi::gts_p (x: max_step, y: 0) != wi::gts_p (x: step, y: 0)); |
232 | } |
233 | |
234 | /* Set R to the range from BEGIN to END, assuming the direction of the |
235 | loop is DIR. */ |
236 | |
237 | static void |
238 | range_from_loop_direction (irange &r, tree type, |
239 | const irange &begin, const irange &end, |
240 | ev_direction dir) |
241 | { |
242 | signop sign = TYPE_SIGN (type); |
243 | |
244 | if (begin.undefined_p () || end.undefined_p ()) |
245 | r.set_varying (type); |
246 | else if (dir == EV_DIR_GROWS) |
247 | { |
248 | if (wi::gt_p (x: begin.lower_bound (), y: end.upper_bound (), sgn: sign)) |
249 | r.set_varying (type); |
250 | else |
251 | r = int_range<1> (type, begin.lower_bound (), end.upper_bound ()); |
252 | } |
253 | else |
254 | { |
255 | if (wi::gt_p (x: end.lower_bound (), y: begin.upper_bound (), sgn: sign)) |
256 | r.set_varying (type); |
257 | else |
258 | r = int_range<1> (type, end.lower_bound (), begin.upper_bound ()); |
259 | } |
260 | } |
261 | |
262 | /* Set V to the range of NAME in STMT within LOOP. Return TRUE if a |
263 | range was found. */ |
264 | |
265 | bool |
266 | range_of_var_in_loop (vrange &v, tree name, class loop *l, gimple *stmt, |
267 | range_query *query) |
268 | { |
269 | tree init, step; |
270 | enum ev_direction dir; |
271 | if (!get_scev_info (r&: v, name, stmt, l, init, step, dir)) |
272 | return true; |
273 | |
274 | // Calculate ranges for the values from SCEV. |
275 | irange &r = as_a <irange> (v); |
276 | tree type = TREE_TYPE (init); |
277 | int_range<2> rinit (type), rstep (type), max_init (type); |
278 | if (!query->range_of_expr (r&: rinit, expr: init, stmt) |
279 | || !query->range_of_expr (r&: rstep, expr: step, stmt)) |
280 | return false; |
281 | |
282 | // Calculate the final range of NAME if possible. |
283 | if (rinit.singleton_p () && rstep.singleton_p ()) |
284 | { |
285 | widest_int nit; |
286 | if (!max_loop_iterations (l, &nit)) |
287 | return false; |
288 | |
289 | if (!induction_variable_may_overflow_p (type, step: rstep.lower_bound (), nit)) |
290 | { |
291 | // Calculate the max bounds for init (init + niter * step). |
292 | wide_int w = wide_int::from (x: nit, TYPE_PRECISION (type), TYPE_SIGN (type)); |
293 | int_range<1> niter (type, w, w); |
294 | int_range_max max_step; |
295 | range_op_handler mult_handler (MULT_EXPR); |
296 | range_op_handler plus_handler (PLUS_EXPR); |
297 | if (!mult_handler.fold_range (r&: max_step, type, lh: niter, rh: rstep) |
298 | || !plus_handler.fold_range (r&: max_init, type, lh: rinit, rh: max_step)) |
299 | return false; |
300 | } |
301 | } |
302 | range_from_loop_direction (r, type, begin: rinit, end: max_init, dir); |
303 | return true; |
304 | } |
305 | |
306 | /* Helper function for vrp_evaluate_conditional_warnv & other |
307 | optimizers. */ |
308 | |
309 | tree |
310 | simplify_using_ranges::fold_cond_with_ops (enum tree_code code, |
311 | tree op0, tree op1, gimple *s) |
312 | { |
313 | int_range_max r0, r1; |
314 | if (!query->range_of_expr (r&: r0, expr: op0, s) |
315 | || !query->range_of_expr (r&: r1, expr: op1, s)) |
316 | return NULL_TREE; |
317 | |
318 | tree type = TREE_TYPE (op0); |
319 | int_range<1> res; |
320 | range_op_handler handler (code); |
321 | if (handler && handler.fold_range (r&: res, type, lh: r0, rh: r1)) |
322 | { |
323 | if (res == range_true (type)) |
324 | return boolean_true_node; |
325 | if (res == range_false (type)) |
326 | return boolean_false_node; |
327 | } |
328 | return NULL; |
329 | } |
330 | |
331 | /* Helper function for legacy_fold_cond. */ |
332 | |
333 | tree |
334 | simplify_using_ranges::legacy_fold_cond_overflow (gimple *stmt) |
335 | { |
336 | tree ret; |
337 | tree_code code = gimple_cond_code (gs: stmt); |
338 | tree op0 = gimple_cond_lhs (gs: stmt); |
339 | tree op1 = gimple_cond_rhs (gs: stmt); |
340 | |
341 | /* We only deal with integral and pointer types. */ |
342 | if (!INTEGRAL_TYPE_P (TREE_TYPE (op0)) |
343 | && !POINTER_TYPE_P (TREE_TYPE (op0))) |
344 | return NULL_TREE; |
345 | |
346 | /* If OP0 CODE OP1 is an overflow comparison, if it can be expressed |
347 | as a simple equality test, then prefer that over its current form |
348 | for evaluation. |
349 | |
350 | An overflow test which collapses to an equality test can always be |
351 | expressed as a comparison of one argument against zero. Overflow |
352 | occurs when the chosen argument is zero and does not occur if the |
353 | chosen argument is not zero. */ |
354 | tree x; |
355 | if (overflow_comparison_p (code, op0, op1, &x)) |
356 | { |
357 | wide_int max = wi::max_value (TYPE_PRECISION (TREE_TYPE (op0)), UNSIGNED); |
358 | /* B = A - 1; if (A < B) -> B = A - 1; if (A == 0) |
359 | B = A - 1; if (A > B) -> B = A - 1; if (A != 0) |
360 | B = A + 1; if (B < A) -> B = A + 1; if (B == 0) |
361 | B = A + 1; if (B > A) -> B = A + 1; if (B != 0) */ |
362 | if (integer_zerop (x)) |
363 | { |
364 | op1 = x; |
365 | code = (code == LT_EXPR || code == LE_EXPR) ? EQ_EXPR : NE_EXPR; |
366 | } |
367 | /* B = A + 1; if (A > B) -> B = A + 1; if (B == 0) |
368 | B = A + 1; if (A < B) -> B = A + 1; if (B != 0) |
369 | B = A - 1; if (B > A) -> B = A - 1; if (A == 0) |
370 | B = A - 1; if (B < A) -> B = A - 1; if (A != 0) */ |
371 | else if (wi::to_wide (t: x) == max - 1) |
372 | { |
373 | op0 = op1; |
374 | op1 = wide_int_to_tree (TREE_TYPE (op0), cst: 0); |
375 | code = (code == GT_EXPR || code == GE_EXPR) ? EQ_EXPR : NE_EXPR; |
376 | } |
377 | else |
378 | { |
379 | value_range vro, vri; |
380 | tree type = TREE_TYPE (op0); |
381 | if (code == GT_EXPR || code == GE_EXPR) |
382 | { |
383 | vro.set (type, |
384 | wi::to_wide (TYPE_MIN_VALUE (type)), |
385 | wi::to_wide (t: x), VR_ANTI_RANGE); |
386 | vri.set (type, |
387 | wi::to_wide (TYPE_MIN_VALUE (type)), |
388 | wi::to_wide (t: x)); |
389 | } |
390 | else if (code == LT_EXPR || code == LE_EXPR) |
391 | { |
392 | vro.set (type, |
393 | wi::to_wide (TYPE_MIN_VALUE (type)), |
394 | wi::to_wide (t: x)); |
395 | vri.set (type, |
396 | wi::to_wide (TYPE_MIN_VALUE (type)), |
397 | wi::to_wide (t: x), |
398 | VR_ANTI_RANGE); |
399 | } |
400 | else |
401 | gcc_unreachable (); |
402 | value_range vr0; |
403 | if (!query->range_of_expr (r&: vr0, expr: op0, stmt)) |
404 | vr0.set_varying (TREE_TYPE (op0)); |
405 | /* If vro, the range for OP0 to pass the overflow test, has |
406 | no intersection with *vr0, OP0's known range, then the |
407 | overflow test can't pass, so return the node for false. |
408 | If it is the inverted range, vri, that has no |
409 | intersection, then the overflow test must pass, so return |
410 | the node for true. In other cases, we could proceed with |
411 | a simplified condition comparing OP0 and X, with LE_EXPR |
412 | for previously LE_ or LT_EXPR and GT_EXPR otherwise, but |
413 | the comments next to the enclosing if suggest it's not |
414 | generally profitable to do so. */ |
415 | vro.intersect (vr0); |
416 | if (vro.undefined_p ()) |
417 | return boolean_false_node; |
418 | vri.intersect (vr0); |
419 | if (vri.undefined_p ()) |
420 | return boolean_true_node; |
421 | } |
422 | } |
423 | |
424 | if ((ret = fold_cond_with_ops (code, op0, op1, s: stmt))) |
425 | return ret; |
426 | return NULL_TREE; |
427 | } |
428 | |
429 | /* Visit conditional statement STMT. If we can determine which edge |
430 | will be taken out of STMT's basic block, record it in |
431 | *TAKEN_EDGE_P. Otherwise, set *TAKEN_EDGE_P to NULL. */ |
432 | |
433 | void |
434 | simplify_using_ranges::legacy_fold_cond (gcond *stmt, edge *taken_edge_p) |
435 | { |
436 | tree val; |
437 | |
438 | *taken_edge_p = NULL; |
439 | |
440 | if (dump_file && (dump_flags & TDF_DETAILS)) |
441 | { |
442 | tree use; |
443 | ssa_op_iter i; |
444 | |
445 | fprintf (stream: dump_file, format: "\nVisiting conditional with predicate: " ); |
446 | print_gimple_stmt (dump_file, stmt, 0); |
447 | fprintf (stream: dump_file, format: "\nWith known ranges\n" ); |
448 | |
449 | FOR_EACH_SSA_TREE_OPERAND (use, stmt, i, SSA_OP_USE) |
450 | { |
451 | fprintf (stream: dump_file, format: "\t" ); |
452 | print_generic_expr (dump_file, use); |
453 | fprintf (stream: dump_file, format: ": " ); |
454 | Value_Range r (TREE_TYPE (use)); |
455 | query->range_of_expr (r, expr: use, stmt); |
456 | r.dump (dump_file); |
457 | } |
458 | |
459 | fprintf (stream: dump_file, format: "\n" ); |
460 | } |
461 | |
462 | val = legacy_fold_cond_overflow (stmt); |
463 | if (val) |
464 | *taken_edge_p = find_taken_edge (gimple_bb (g: stmt), val); |
465 | |
466 | if (dump_file && (dump_flags & TDF_DETAILS)) |
467 | { |
468 | fprintf (stream: dump_file, format: "\nPredicate evaluates to: " ); |
469 | if (val == NULL_TREE) |
470 | fprintf (stream: dump_file, format: "DON'T KNOW\n" ); |
471 | else |
472 | print_generic_stmt (dump_file, val); |
473 | } |
474 | } |
475 | |
476 | /* Searches the case label vector VEC for the ranges of CASE_LABELs that are |
477 | used in range VR. The indices are placed in MIN_IDX1, MAX_IDX, MIN_IDX2 and |
478 | MAX_IDX2. If the ranges of CASE_LABELs are empty then MAX_IDX1 < MIN_IDX1. |
479 | Returns true if the default label is not needed. */ |
480 | |
481 | static bool |
482 | find_case_label_ranges (gswitch *stmt, const value_range *vr, |
483 | size_t *min_idx1, size_t *max_idx1, |
484 | size_t *min_idx2, size_t *max_idx2) |
485 | { |
486 | size_t i, j, k, l; |
487 | unsigned int n = gimple_switch_num_labels (gs: stmt); |
488 | bool take_default; |
489 | tree case_low, case_high; |
490 | tree min, max; |
491 | value_range_kind kind = get_legacy_range (*vr, min, max); |
492 | |
493 | gcc_checking_assert (!vr->varying_p () && !vr->undefined_p ()); |
494 | |
495 | take_default = !find_case_label_range (stmt, min, max, &i, &j); |
496 | |
497 | /* Set second range to empty. */ |
498 | *min_idx2 = 1; |
499 | *max_idx2 = 0; |
500 | |
501 | if (kind == VR_RANGE) |
502 | { |
503 | *min_idx1 = i; |
504 | *max_idx1 = j; |
505 | return !take_default; |
506 | } |
507 | |
508 | /* Set first range to all case labels. */ |
509 | *min_idx1 = 1; |
510 | *max_idx1 = n - 1; |
511 | |
512 | if (i > j) |
513 | return false; |
514 | |
515 | /* Make sure all the values of case labels [i , j] are contained in |
516 | range [MIN, MAX]. */ |
517 | case_low = CASE_LOW (gimple_switch_label (stmt, i)); |
518 | case_high = CASE_HIGH (gimple_switch_label (stmt, j)); |
519 | if (tree_int_cst_compare (t1: case_low, t2: min) < 0) |
520 | i += 1; |
521 | if (case_high != NULL_TREE |
522 | && tree_int_cst_compare (t1: max, t2: case_high) < 0) |
523 | j -= 1; |
524 | |
525 | if (i > j) |
526 | return false; |
527 | |
528 | /* If the range spans case labels [i, j], the corresponding anti-range spans |
529 | the labels [1, i - 1] and [j + 1, n - 1]. */ |
530 | k = j + 1; |
531 | l = n - 1; |
532 | if (k > l) |
533 | { |
534 | k = 1; |
535 | l = 0; |
536 | } |
537 | |
538 | j = i - 1; |
539 | i = 1; |
540 | if (i > j) |
541 | { |
542 | i = k; |
543 | j = l; |
544 | k = 1; |
545 | l = 0; |
546 | } |
547 | |
548 | *min_idx1 = i; |
549 | *max_idx1 = j; |
550 | *min_idx2 = k; |
551 | *max_idx2 = l; |
552 | return false; |
553 | } |
554 | |
555 | /* Simplify boolean operations if the source is known |
556 | to be already a boolean. */ |
557 | bool |
558 | simplify_using_ranges::simplify_truth_ops_using_ranges |
559 | (gimple_stmt_iterator *gsi, |
560 | gimple *stmt) |
561 | { |
562 | enum tree_code rhs_code = gimple_assign_rhs_code (gs: stmt); |
563 | tree lhs, op0, op1; |
564 | bool need_conversion; |
565 | |
566 | /* We handle only !=/== case here. */ |
567 | gcc_assert (rhs_code == EQ_EXPR || rhs_code == NE_EXPR); |
568 | |
569 | op0 = gimple_assign_rhs1 (gs: stmt); |
570 | if (!op_with_boolean_value_range_p (op: op0, s: stmt)) |
571 | return false; |
572 | |
573 | op1 = gimple_assign_rhs2 (gs: stmt); |
574 | if (!op_with_boolean_value_range_p (op: op1, s: stmt)) |
575 | return false; |
576 | |
577 | /* Reduce number of cases to handle to NE_EXPR. As there is no |
578 | BIT_XNOR_EXPR we cannot replace A == B with a single statement. */ |
579 | if (rhs_code == EQ_EXPR) |
580 | { |
581 | if (TREE_CODE (op1) == INTEGER_CST) |
582 | op1 = int_const_binop (BIT_XOR_EXPR, op1, |
583 | build_int_cst (TREE_TYPE (op1), 1)); |
584 | else |
585 | return false; |
586 | } |
587 | |
588 | lhs = gimple_assign_lhs (gs: stmt); |
589 | need_conversion |
590 | = !useless_type_conversion_p (TREE_TYPE (lhs), TREE_TYPE (op0)); |
591 | |
592 | /* Make sure to not sign-extend a 1-bit 1 when converting the result. */ |
593 | if (need_conversion |
594 | && !TYPE_UNSIGNED (TREE_TYPE (op0)) |
595 | && TYPE_PRECISION (TREE_TYPE (op0)) == 1 |
596 | && TYPE_PRECISION (TREE_TYPE (lhs)) > 1) |
597 | return false; |
598 | |
599 | /* For A != 0 we can substitute A itself. */ |
600 | if (integer_zerop (op1)) |
601 | gimple_assign_set_rhs_with_ops (gsi, |
602 | code: need_conversion |
603 | ? NOP_EXPR : TREE_CODE (op0), op1: op0); |
604 | /* For A != B we substitute A ^ B. Either with conversion. */ |
605 | else if (need_conversion) |
606 | { |
607 | tree tem = make_ssa_name (TREE_TYPE (op0)); |
608 | gassign *newop |
609 | = gimple_build_assign (tem, BIT_XOR_EXPR, op0, op1); |
610 | gsi_insert_before (gsi, newop, GSI_SAME_STMT); |
611 | if (INTEGRAL_TYPE_P (TREE_TYPE (tem)) |
612 | && TYPE_PRECISION (TREE_TYPE (tem)) > 1) |
613 | { |
614 | value_range vr (TREE_TYPE (tem), |
615 | wi::zero (TYPE_PRECISION (TREE_TYPE (tem))), |
616 | wi::one (TYPE_PRECISION (TREE_TYPE (tem)))); |
617 | set_range_info (tem, vr); |
618 | } |
619 | gimple_assign_set_rhs_with_ops (gsi, code: NOP_EXPR, op1: tem); |
620 | } |
621 | /* Or without. */ |
622 | else |
623 | gimple_assign_set_rhs_with_ops (gsi, code: BIT_XOR_EXPR, op1: op0, op2: op1); |
624 | update_stmt (s: gsi_stmt (i: *gsi)); |
625 | fold_stmt (gsi, follow_single_use_edges); |
626 | |
627 | return true; |
628 | } |
629 | |
630 | /* Simplify a division or modulo operator to a right shift or bitwise and |
631 | if the first operand is unsigned or is greater than zero and the second |
632 | operand is an exact power of two. For TRUNC_MOD_EXPR op0 % op1 with |
633 | constant op1 (op1min = op1) or with op1 in [op1min, op1max] range, |
634 | optimize it into just op0 if op0's range is known to be a subset of |
635 | [-op1min + 1, op1min - 1] for signed and [0, op1min - 1] for unsigned |
636 | modulo. */ |
637 | |
638 | bool |
639 | simplify_using_ranges::simplify_div_or_mod_using_ranges |
640 | (gimple_stmt_iterator *gsi, |
641 | gimple *stmt) |
642 | { |
643 | enum tree_code rhs_code = gimple_assign_rhs_code (gs: stmt); |
644 | tree val = NULL; |
645 | tree op0 = gimple_assign_rhs1 (gs: stmt); |
646 | tree op1 = gimple_assign_rhs2 (gs: stmt); |
647 | tree op0min = NULL_TREE, op0max = NULL_TREE; |
648 | tree op1min = op1; |
649 | value_range vr; |
650 | |
651 | if (TREE_CODE (op0) == INTEGER_CST) |
652 | { |
653 | op0min = op0; |
654 | op0max = op0; |
655 | } |
656 | else |
657 | { |
658 | if (!query->range_of_expr (r&: vr, expr: op0, stmt)) |
659 | vr.set_varying (TREE_TYPE (op0)); |
660 | if (!vr.varying_p () && !vr.undefined_p ()) |
661 | { |
662 | tree type = vr.type (); |
663 | op0min = wide_int_to_tree (type, cst: vr.lower_bound ()); |
664 | op0max = wide_int_to_tree (type, cst: vr.upper_bound ()); |
665 | } |
666 | } |
667 | |
668 | if (rhs_code == TRUNC_MOD_EXPR |
669 | && TREE_CODE (op1) == SSA_NAME) |
670 | { |
671 | value_range vr1; |
672 | if (!query->range_of_expr (r&: vr1, expr: op1, stmt)) |
673 | vr1.set_varying (TREE_TYPE (op1)); |
674 | if (!vr1.varying_p () && !vr1.undefined_p ()) |
675 | op1min = wide_int_to_tree (type: vr1.type (), cst: vr1.lower_bound ()); |
676 | } |
677 | if (rhs_code == TRUNC_MOD_EXPR |
678 | && TREE_CODE (op1min) == INTEGER_CST |
679 | && tree_int_cst_sgn (op1min) == 1 |
680 | && op0max |
681 | && tree_int_cst_lt (t1: op0max, t2: op1min)) |
682 | { |
683 | if (TYPE_UNSIGNED (TREE_TYPE (op0)) |
684 | || tree_int_cst_sgn (op0min) >= 0 |
685 | || tree_int_cst_lt (fold_unary (NEGATE_EXPR, TREE_TYPE (op1min), op1min), |
686 | t2: op0min)) |
687 | { |
688 | /* If op0 already has the range op0 % op1 has, |
689 | then TRUNC_MOD_EXPR won't change anything. */ |
690 | gimple_assign_set_rhs_from_tree (gsi, op0); |
691 | return true; |
692 | } |
693 | } |
694 | |
695 | if (TREE_CODE (op0) != SSA_NAME) |
696 | return false; |
697 | |
698 | if (!integer_pow2p (op1)) |
699 | { |
700 | /* X % -Y can be only optimized into X % Y either if |
701 | X is not INT_MIN, or Y is not -1. Fold it now, as after |
702 | remove_range_assertions the range info might be not available |
703 | anymore. */ |
704 | if (rhs_code == TRUNC_MOD_EXPR |
705 | && fold_stmt (gsi, follow_single_use_edges)) |
706 | return true; |
707 | return false; |
708 | } |
709 | |
710 | if (TYPE_UNSIGNED (TREE_TYPE (op0))) |
711 | val = integer_one_node; |
712 | else |
713 | { |
714 | tree zero = build_zero_cst (TREE_TYPE (op0)); |
715 | val = fold_cond_with_ops (code: GE_EXPR, op0, op1: zero, s: stmt); |
716 | } |
717 | |
718 | if (val && integer_onep (val)) |
719 | { |
720 | tree t; |
721 | |
722 | if (rhs_code == TRUNC_DIV_EXPR) |
723 | { |
724 | t = build_int_cst (integer_type_node, tree_log2 (op1)); |
725 | gimple_assign_set_rhs_code (s: stmt, code: RSHIFT_EXPR); |
726 | gimple_assign_set_rhs1 (gs: stmt, rhs: op0); |
727 | gimple_assign_set_rhs2 (gs: stmt, rhs: t); |
728 | } |
729 | else |
730 | { |
731 | t = build_int_cst (TREE_TYPE (op1), 1); |
732 | t = int_const_binop (MINUS_EXPR, op1, t); |
733 | t = fold_convert (TREE_TYPE (op0), t); |
734 | |
735 | gimple_assign_set_rhs_code (s: stmt, code: BIT_AND_EXPR); |
736 | gimple_assign_set_rhs1 (gs: stmt, rhs: op0); |
737 | gimple_assign_set_rhs2 (gs: stmt, rhs: t); |
738 | } |
739 | |
740 | update_stmt (s: stmt); |
741 | fold_stmt (gsi, follow_single_use_edges); |
742 | return true; |
743 | } |
744 | |
745 | return false; |
746 | } |
747 | |
748 | /* Simplify a min or max if the ranges of the two operands are |
749 | disjoint. Return true if we do simplify. */ |
750 | |
751 | bool |
752 | simplify_using_ranges::simplify_min_or_max_using_ranges |
753 | (gimple_stmt_iterator *gsi, |
754 | gimple *stmt) |
755 | { |
756 | tree op0 = gimple_assign_rhs1 (gs: stmt); |
757 | tree op1 = gimple_assign_rhs2 (gs: stmt); |
758 | tree val; |
759 | |
760 | val = fold_cond_with_ops (code: LE_EXPR, op0, op1, s: stmt); |
761 | if (!val) |
762 | val = fold_cond_with_ops (code: LT_EXPR, op0, op1, s: stmt); |
763 | |
764 | if (val) |
765 | { |
766 | /* VAL == TRUE -> OP0 < or <= op1 |
767 | VAL == FALSE -> OP0 > or >= op1. */ |
768 | tree res = ((gimple_assign_rhs_code (gs: stmt) == MAX_EXPR) |
769 | == integer_zerop (val)) ? op0 : op1; |
770 | gimple_assign_set_rhs_from_tree (gsi, res); |
771 | return true; |
772 | } |
773 | |
774 | return false; |
775 | } |
776 | |
777 | /* If the operand to an ABS_EXPR is >= 0, then eliminate the |
778 | ABS_EXPR. If the operand is <= 0, then simplify the |
779 | ABS_EXPR into a NEGATE_EXPR. */ |
780 | |
781 | bool |
782 | simplify_using_ranges::simplify_abs_using_ranges (gimple_stmt_iterator *gsi, |
783 | gimple *stmt) |
784 | { |
785 | tree op = gimple_assign_rhs1 (gs: stmt); |
786 | tree zero = build_zero_cst (TREE_TYPE (op)); |
787 | tree val = fold_cond_with_ops (code: LE_EXPR, op0: op, op1: zero, s: stmt); |
788 | |
789 | if (!val) |
790 | { |
791 | /* The range is neither <= 0 nor > 0. Now see if it is |
792 | either < 0 or >= 0. */ |
793 | val = fold_cond_with_ops (code: LT_EXPR, op0: op, op1: zero, s: stmt); |
794 | } |
795 | if (val) |
796 | { |
797 | gimple_assign_set_rhs1 (gs: stmt, rhs: op); |
798 | if (integer_zerop (val)) |
799 | gimple_assign_set_rhs_code (s: stmt, code: SSA_NAME); |
800 | else |
801 | gimple_assign_set_rhs_code (s: stmt, code: NEGATE_EXPR); |
802 | update_stmt (s: stmt); |
803 | fold_stmt (gsi, follow_single_use_edges); |
804 | return true; |
805 | } |
806 | return false; |
807 | } |
808 | |
809 | /* value_range wrapper for wi_set_zero_nonzero_bits. |
810 | |
811 | Return TRUE if VR was a constant range and we were able to compute |
812 | the bit masks. */ |
813 | |
814 | static bool |
815 | vr_set_zero_nonzero_bits (const tree expr_type, |
816 | const irange *vr, |
817 | wide_int *may_be_nonzero, |
818 | wide_int *must_be_nonzero) |
819 | { |
820 | if (vr->varying_p () || vr->undefined_p ()) |
821 | { |
822 | *may_be_nonzero = wi::minus_one (TYPE_PRECISION (expr_type)); |
823 | *must_be_nonzero = wi::zero (TYPE_PRECISION (expr_type)); |
824 | return false; |
825 | } |
826 | wi_set_zero_nonzero_bits (type: expr_type, vr->lower_bound (), vr->upper_bound (), |
827 | maybe_nonzero&: *may_be_nonzero, mustbe_nonzero&: *must_be_nonzero); |
828 | return true; |
829 | } |
830 | |
831 | /* Optimize away redundant BIT_AND_EXPR and BIT_IOR_EXPR. |
832 | If all the bits that are being cleared by & are already |
833 | known to be zero from VR, or all the bits that are being |
834 | set by | are already known to be one from VR, the bit |
835 | operation is redundant. */ |
836 | |
837 | bool |
838 | simplify_using_ranges::simplify_bit_ops_using_ranges |
839 | (gimple_stmt_iterator *gsi, |
840 | gimple *stmt) |
841 | { |
842 | tree op0 = gimple_assign_rhs1 (gs: stmt); |
843 | tree op1 = gimple_assign_rhs2 (gs: stmt); |
844 | tree op = NULL_TREE; |
845 | value_range vr0, vr1; |
846 | wide_int may_be_nonzero0, may_be_nonzero1; |
847 | wide_int must_be_nonzero0, must_be_nonzero1; |
848 | wide_int mask; |
849 | |
850 | if (!query->range_of_expr (r&: vr0, expr: op0, stmt) |
851 | || vr0.undefined_p ()) |
852 | return false; |
853 | if (!query->range_of_expr (r&: vr1, expr: op1, stmt) |
854 | || vr1.undefined_p ()) |
855 | return false; |
856 | |
857 | if (!vr_set_zero_nonzero_bits (TREE_TYPE (op0), vr: &vr0, may_be_nonzero: &may_be_nonzero0, |
858 | must_be_nonzero: &must_be_nonzero0)) |
859 | return false; |
860 | if (!vr_set_zero_nonzero_bits (TREE_TYPE (op1), vr: &vr1, may_be_nonzero: &may_be_nonzero1, |
861 | must_be_nonzero: &must_be_nonzero1)) |
862 | return false; |
863 | |
864 | switch (gimple_assign_rhs_code (gs: stmt)) |
865 | { |
866 | case BIT_AND_EXPR: |
867 | mask = wi::bit_and_not (x: may_be_nonzero0, y: must_be_nonzero1); |
868 | if (mask == 0) |
869 | { |
870 | op = op0; |
871 | break; |
872 | } |
873 | mask = wi::bit_and_not (x: may_be_nonzero1, y: must_be_nonzero0); |
874 | if (mask == 0) |
875 | { |
876 | op = op1; |
877 | break; |
878 | } |
879 | break; |
880 | case BIT_IOR_EXPR: |
881 | mask = wi::bit_and_not (x: may_be_nonzero0, y: must_be_nonzero1); |
882 | if (mask == 0) |
883 | { |
884 | op = op1; |
885 | break; |
886 | } |
887 | mask = wi::bit_and_not (x: may_be_nonzero1, y: must_be_nonzero0); |
888 | if (mask == 0) |
889 | { |
890 | op = op0; |
891 | break; |
892 | } |
893 | break; |
894 | default: |
895 | gcc_unreachable (); |
896 | } |
897 | |
898 | if (op == NULL_TREE) |
899 | return false; |
900 | |
901 | gimple_assign_set_rhs_with_ops (gsi, TREE_CODE (op), op1: op); |
902 | update_stmt (s: gsi_stmt (i: *gsi)); |
903 | return true; |
904 | } |
905 | |
906 | /* We are comparing trees OP0 and OP1 using COND_CODE. OP0 has |
907 | a known value range VR. |
908 | |
909 | If there is one and only one value which will satisfy the |
910 | conditional, then return that value. Else return NULL. |
911 | |
912 | If signed overflow must be undefined for the value to satisfy |
913 | the conditional, then set *STRICT_OVERFLOW_P to true. */ |
914 | |
915 | static tree |
916 | test_for_singularity (enum tree_code cond_code, tree op0, |
917 | tree op1, const value_range *vr) |
918 | { |
919 | tree min = NULL; |
920 | tree max = NULL; |
921 | |
922 | /* Extract minimum/maximum values which satisfy the conditional as it was |
923 | written. */ |
924 | if (cond_code == LE_EXPR || cond_code == LT_EXPR) |
925 | { |
926 | min = TYPE_MIN_VALUE (TREE_TYPE (op0)); |
927 | |
928 | max = op1; |
929 | if (cond_code == LT_EXPR) |
930 | { |
931 | tree one = build_int_cst (TREE_TYPE (op0), 1); |
932 | max = fold_build2 (MINUS_EXPR, TREE_TYPE (op0), max, one); |
933 | /* Signal to compare_values_warnv this expr doesn't overflow. */ |
934 | if (EXPR_P (max)) |
935 | suppress_warning (max, OPT_Woverflow); |
936 | } |
937 | } |
938 | else if (cond_code == GE_EXPR || cond_code == GT_EXPR) |
939 | { |
940 | max = TYPE_MAX_VALUE (TREE_TYPE (op0)); |
941 | |
942 | min = op1; |
943 | if (cond_code == GT_EXPR) |
944 | { |
945 | tree one = build_int_cst (TREE_TYPE (op0), 1); |
946 | min = fold_build2 (PLUS_EXPR, TREE_TYPE (op0), min, one); |
947 | /* Signal to compare_values_warnv this expr doesn't overflow. */ |
948 | if (EXPR_P (min)) |
949 | suppress_warning (min, OPT_Woverflow); |
950 | } |
951 | } |
952 | |
953 | /* Now refine the minimum and maximum values using any |
954 | value range information we have for op0. */ |
955 | if (min && max) |
956 | { |
957 | tree type = TREE_TYPE (op0); |
958 | tree tmin = wide_int_to_tree (type, cst: vr->lower_bound ()); |
959 | tree tmax = wide_int_to_tree (type, cst: vr->upper_bound ()); |
960 | if (compare_values (tmin, min) == 1) |
961 | min = tmin; |
962 | if (compare_values (tmax, max) == -1) |
963 | max = tmax; |
964 | |
965 | /* If the new min/max values have converged to a single value, |
966 | then there is only one value which can satisfy the condition, |
967 | return that value. */ |
968 | if (operand_equal_p (min, max, flags: 0) && is_gimple_min_invariant (min)) |
969 | return min; |
970 | } |
971 | return NULL; |
972 | } |
973 | |
974 | /* Return whether the value range *VR fits in an integer type specified |
975 | by PRECISION and UNSIGNED_P. */ |
976 | |
977 | bool |
978 | range_fits_type_p (const irange *vr, |
979 | unsigned dest_precision, signop dest_sgn) |
980 | { |
981 | tree src_type; |
982 | unsigned src_precision; |
983 | widest_int tem; |
984 | signop src_sgn; |
985 | |
986 | /* We can only handle integral and pointer types. */ |
987 | src_type = vr->type (); |
988 | if (!INTEGRAL_TYPE_P (src_type) |
989 | && !POINTER_TYPE_P (src_type)) |
990 | return false; |
991 | |
992 | /* An extension is fine unless VR is SIGNED and dest_sgn is UNSIGNED, |
993 | and so is an identity transform. */ |
994 | src_precision = TYPE_PRECISION (vr->type ()); |
995 | src_sgn = TYPE_SIGN (src_type); |
996 | if ((src_precision < dest_precision |
997 | && !(dest_sgn == UNSIGNED && src_sgn == SIGNED)) |
998 | || (src_precision == dest_precision && src_sgn == dest_sgn)) |
999 | return true; |
1000 | |
1001 | /* Now we can only handle ranges with constant bounds. */ |
1002 | if (vr->undefined_p () || vr->varying_p ()) |
1003 | return false; |
1004 | |
1005 | wide_int vrmin = vr->lower_bound (); |
1006 | wide_int vrmax = vr->upper_bound (); |
1007 | |
1008 | /* For sign changes, the MSB of the wide_int has to be clear. |
1009 | An unsigned value with its MSB set cannot be represented by |
1010 | a signed wide_int, while a negative value cannot be represented |
1011 | by an unsigned wide_int. */ |
1012 | if (src_sgn != dest_sgn |
1013 | && (wi::lts_p (x: vrmin, y: 0) || wi::lts_p (x: vrmax, y: 0))) |
1014 | return false; |
1015 | |
1016 | /* Then we can perform the conversion on both ends and compare |
1017 | the result for equality. */ |
1018 | signop sign = TYPE_SIGN (vr->type ()); |
1019 | tem = wi::ext (x: widest_int::from (x: vrmin, sgn: sign), offset: dest_precision, sgn: dest_sgn); |
1020 | if (tem != widest_int::from (x: vrmin, sgn: sign)) |
1021 | return false; |
1022 | tem = wi::ext (x: widest_int::from (x: vrmax, sgn: sign), offset: dest_precision, sgn: dest_sgn); |
1023 | if (tem != widest_int::from (x: vrmax, sgn: sign)) |
1024 | return false; |
1025 | |
1026 | return true; |
1027 | } |
1028 | |
1029 | // Clear edge E of EDGE_EXECUTABLE (it is unexecutable). If it wasn't |
1030 | // previously clear, propagate to successor blocks if appropriate. |
1031 | |
1032 | void |
1033 | simplify_using_ranges::set_and_propagate_unexecutable (edge e) |
1034 | { |
1035 | // If not_executable is already set, we're done. |
1036 | // This works in the absence of a flag as well. |
1037 | if ((e->flags & m_not_executable_flag) == m_not_executable_flag) |
1038 | return; |
1039 | |
1040 | e->flags |= m_not_executable_flag; |
1041 | m_flag_set_edges.safe_push (obj: e); |
1042 | |
1043 | // Check if the destination block needs to propagate the property. |
1044 | basic_block bb = e->dest; |
1045 | |
1046 | // If any incoming edge is executable, we are done. |
1047 | edge_iterator ei; |
1048 | FOR_EACH_EDGE (e, ei, bb->preds) |
1049 | if ((e->flags & m_not_executable_flag) == 0) |
1050 | return; |
1051 | |
1052 | // This block is also unexecutable, propagate to all exit edges as well. |
1053 | FOR_EACH_EDGE (e, ei, bb->succs) |
1054 | set_and_propagate_unexecutable (e); |
1055 | } |
1056 | |
1057 | /* If COND can be folded entirely as TRUE or FALSE, rewrite the |
1058 | conditional as such, and return TRUE. */ |
1059 | |
1060 | bool |
1061 | simplify_using_ranges::fold_cond (gcond *cond) |
1062 | { |
1063 | int_range_max r; |
1064 | if (query->range_of_stmt (r, cond) && r.singleton_p ()) |
1065 | { |
1066 | // COND has already been folded if arguments are constant. |
1067 | if (TREE_CODE (gimple_cond_lhs (cond)) != SSA_NAME |
1068 | && TREE_CODE (gimple_cond_rhs (cond)) != SSA_NAME) |
1069 | return false; |
1070 | if (dump_file) |
1071 | { |
1072 | fprintf (stream: dump_file, format: "Folding predicate " ); |
1073 | print_gimple_expr (dump_file, cond, 0); |
1074 | fprintf (stream: dump_file, format: " to " ); |
1075 | } |
1076 | edge e0 = EDGE_SUCC (gimple_bb (cond), 0); |
1077 | edge e1 = EDGE_SUCC (gimple_bb (cond), 1); |
1078 | if (r.zero_p ()) |
1079 | { |
1080 | if (dump_file) |
1081 | fprintf (stream: dump_file, format: "0\n" ); |
1082 | gimple_cond_make_false (gs: cond); |
1083 | if (e0->flags & EDGE_TRUE_VALUE) |
1084 | set_and_propagate_unexecutable (e0); |
1085 | else |
1086 | set_and_propagate_unexecutable (e1); |
1087 | } |
1088 | else |
1089 | { |
1090 | if (dump_file) |
1091 | fprintf (stream: dump_file, format: "1\n" ); |
1092 | gimple_cond_make_true (gs: cond); |
1093 | if (e0->flags & EDGE_FALSE_VALUE) |
1094 | set_and_propagate_unexecutable (e0); |
1095 | else |
1096 | set_and_propagate_unexecutable (e1); |
1097 | } |
1098 | update_stmt (s: cond); |
1099 | return true; |
1100 | } |
1101 | |
1102 | // FIXME: Audit the code below and make sure it never finds anything. |
1103 | edge taken_edge; |
1104 | legacy_fold_cond (stmt: cond, taken_edge_p: &taken_edge); |
1105 | |
1106 | if (taken_edge) |
1107 | { |
1108 | if (taken_edge->flags & EDGE_TRUE_VALUE) |
1109 | { |
1110 | if (dump_file && (dump_flags & TDF_DETAILS)) |
1111 | fprintf (stream: dump_file, format: "\nVRP Predicate evaluates to: 1\n" ); |
1112 | gimple_cond_make_true (gs: cond); |
1113 | } |
1114 | else if (taken_edge->flags & EDGE_FALSE_VALUE) |
1115 | { |
1116 | if (dump_file && (dump_flags & TDF_DETAILS)) |
1117 | fprintf (stream: dump_file, format: "\nVRP Predicate evaluates to: 0\n" ); |
1118 | gimple_cond_make_false (gs: cond); |
1119 | } |
1120 | else |
1121 | gcc_unreachable (); |
1122 | update_stmt (s: cond); |
1123 | return true; |
1124 | } |
1125 | return false; |
1126 | } |
1127 | |
1128 | /* Simplify a conditional using a relational operator to an equality |
1129 | test if the range information indicates only one value can satisfy |
1130 | the original conditional. */ |
1131 | |
1132 | bool |
1133 | simplify_using_ranges::simplify_cond_using_ranges_1 (gcond *stmt) |
1134 | { |
1135 | tree op0 = gimple_cond_lhs (gs: stmt); |
1136 | tree op1 = gimple_cond_rhs (gs: stmt); |
1137 | enum tree_code cond_code = gimple_cond_code (gs: stmt); |
1138 | |
1139 | if (fold_cond (cond: stmt)) |
1140 | return true; |
1141 | |
1142 | if (simplify_compare_using_ranges_1 (cond_code, op0, op1, stmt)) |
1143 | { |
1144 | if (dump_file) |
1145 | { |
1146 | fprintf (stream: dump_file, format: "Simplified relational " ); |
1147 | print_gimple_stmt (dump_file, stmt, 0); |
1148 | fprintf (stream: dump_file, format: " into " ); |
1149 | } |
1150 | |
1151 | gimple_cond_set_code (gs: stmt, code: cond_code); |
1152 | gimple_cond_set_lhs (gs: stmt, lhs: op0); |
1153 | gimple_cond_set_rhs (gs: stmt, rhs: op1); |
1154 | |
1155 | update_stmt (s: stmt); |
1156 | |
1157 | if (dump_file) |
1158 | { |
1159 | print_gimple_stmt (dump_file, stmt, 0); |
1160 | fprintf (stream: dump_file, format: "\n" ); |
1161 | } |
1162 | return true; |
1163 | } |
1164 | return false; |
1165 | } |
1166 | |
1167 | /* Like simplify_cond_using_ranges_1 but for assignments rather |
1168 | than GIMPLE_COND. */ |
1169 | |
1170 | bool |
1171 | simplify_using_ranges::simplify_compare_assign_using_ranges_1 |
1172 | (gimple_stmt_iterator *gsi, |
1173 | gimple *stmt) |
1174 | { |
1175 | enum tree_code code = gimple_assign_rhs_code (gs: stmt); |
1176 | tree op0 = gimple_assign_rhs1 (gs: stmt); |
1177 | tree op1 = gimple_assign_rhs2 (gs: stmt); |
1178 | gcc_assert (TREE_CODE_CLASS (code) == tcc_comparison); |
1179 | bool happened = false; |
1180 | |
1181 | if (simplify_compare_using_ranges_1 (code, op0, op1, stmt)) |
1182 | { |
1183 | if (dump_file) |
1184 | { |
1185 | fprintf (stream: dump_file, format: "Simplified relational " ); |
1186 | print_gimple_stmt (dump_file, stmt, 0); |
1187 | fprintf (stream: dump_file, format: " into " ); |
1188 | } |
1189 | |
1190 | gimple_assign_set_rhs_code (s: stmt, code); |
1191 | gimple_assign_set_rhs1 (gs: stmt, rhs: op0); |
1192 | gimple_assign_set_rhs2 (gs: stmt, rhs: op1); |
1193 | |
1194 | update_stmt (s: stmt); |
1195 | |
1196 | if (dump_file) |
1197 | { |
1198 | print_gimple_stmt (dump_file, stmt, 0); |
1199 | fprintf (stream: dump_file, format: "\n" ); |
1200 | } |
1201 | happened = true; |
1202 | } |
1203 | |
1204 | /* Transform EQ_EXPR, NE_EXPR into BIT_XOR_EXPR or identity |
1205 | if the RHS is zero or one, and the LHS are known to be boolean |
1206 | values. */ |
1207 | if ((code == EQ_EXPR || code == NE_EXPR) |
1208 | && INTEGRAL_TYPE_P (TREE_TYPE (op0)) |
1209 | && simplify_truth_ops_using_ranges (gsi, stmt)) |
1210 | happened = true; |
1211 | |
1212 | return happened; |
1213 | } |
1214 | |
1215 | /* Try to simplify OP0 COND_CODE OP1 using a relational operator to an |
1216 | equality test if the range information indicates only one value can |
1217 | satisfy the original conditional. */ |
1218 | |
1219 | bool |
1220 | simplify_using_ranges::simplify_compare_using_ranges_1 (tree_code &cond_code, tree &op0, tree &op1, gimple *stmt) |
1221 | { |
1222 | bool happened = false; |
1223 | if (cond_code != NE_EXPR |
1224 | && cond_code != EQ_EXPR |
1225 | && TREE_CODE (op0) == SSA_NAME |
1226 | && INTEGRAL_TYPE_P (TREE_TYPE (op0)) |
1227 | && is_gimple_min_invariant (op1)) |
1228 | { |
1229 | value_range vr; |
1230 | |
1231 | if (!query->range_of_expr (r&: vr, expr: op0, stmt)) |
1232 | vr.set_undefined (); |
1233 | |
1234 | /* If we have range information for OP0, then we might be |
1235 | able to simplify this conditional. */ |
1236 | if (!vr.undefined_p () && !vr.varying_p ()) |
1237 | { |
1238 | tree new_tree = test_for_singularity (cond_code, op0, op1, vr: &vr); |
1239 | if (new_tree) |
1240 | { |
1241 | cond_code = EQ_EXPR; |
1242 | op1 = new_tree; |
1243 | happened = true; |
1244 | } |
1245 | |
1246 | /* Try again after inverting the condition. We only deal |
1247 | with integral types here, so no need to worry about |
1248 | issues with inverting FP comparisons. */ |
1249 | new_tree = test_for_singularity |
1250 | (cond_code: invert_tree_comparison (cond_code, false), |
1251 | op0, op1, vr: &vr); |
1252 | if (new_tree) |
1253 | { |
1254 | cond_code = NE_EXPR; |
1255 | op1 = new_tree; |
1256 | happened = true; |
1257 | } |
1258 | } |
1259 | } |
1260 | // Try to simplify casted conditions. |
1261 | if (simplify_casted_compare (cond_code, op0, op1)) |
1262 | happened = true; |
1263 | return happened; |
1264 | } |
1265 | |
1266 | /* Simplify OP0 code OP1 when OP1 is a constant and OP0 was a SSA_NAME |
1267 | defined by a type conversion. Replacing OP0 with RHS of the type conversion. |
1268 | Doing so makes the conversion dead which helps subsequent passes. */ |
1269 | |
1270 | bool |
1271 | simplify_using_ranges::simplify_casted_compare (tree_code &, tree &op0, tree &op1) |
1272 | { |
1273 | |
1274 | /* If we have a comparison of an SSA_NAME (OP0) against a constant, |
1275 | see if OP0 was set by a type conversion where the source of |
1276 | the conversion is another SSA_NAME with a range that fits |
1277 | into the range of OP0's type. |
1278 | |
1279 | If so, the conversion is redundant as the earlier SSA_NAME can be |
1280 | used for the comparison directly if we just massage the constant in the |
1281 | comparison. */ |
1282 | if (TREE_CODE (op0) == SSA_NAME |
1283 | && TREE_CODE (op1) == INTEGER_CST) |
1284 | { |
1285 | gimple *def_stmt = SSA_NAME_DEF_STMT (op0); |
1286 | tree innerop; |
1287 | |
1288 | if (!is_gimple_assign (gs: def_stmt)) |
1289 | return false; |
1290 | |
1291 | switch (gimple_assign_rhs_code (gs: def_stmt)) |
1292 | { |
1293 | CASE_CONVERT: |
1294 | innerop = gimple_assign_rhs1 (gs: def_stmt); |
1295 | break; |
1296 | case VIEW_CONVERT_EXPR: |
1297 | innerop = TREE_OPERAND (gimple_assign_rhs1 (def_stmt), 0); |
1298 | if (!INTEGRAL_TYPE_P (TREE_TYPE (innerop))) |
1299 | return false; |
1300 | break; |
1301 | default: |
1302 | return false; |
1303 | } |
1304 | |
1305 | if (TREE_CODE (innerop) == SSA_NAME |
1306 | && !POINTER_TYPE_P (TREE_TYPE (innerop)) |
1307 | && !SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop) |
1308 | && desired_pro_or_demotion_p (TREE_TYPE (innerop), TREE_TYPE (op0))) |
1309 | { |
1310 | value_range vr; |
1311 | |
1312 | if (query->range_of_expr (r&: vr, expr: innerop) |
1313 | && !vr.varying_p () |
1314 | && !vr.undefined_p () |
1315 | && range_fits_type_p (vr: &vr, |
1316 | TYPE_PRECISION (TREE_TYPE (op0)), |
1317 | TYPE_SIGN (TREE_TYPE (op0))) |
1318 | && int_fits_type_p (op1, TREE_TYPE (innerop))) |
1319 | { |
1320 | tree newconst = fold_convert (TREE_TYPE (innerop), op1); |
1321 | op0 = innerop; |
1322 | op1 = newconst; |
1323 | return true; |
1324 | } |
1325 | } |
1326 | } |
1327 | return false; |
1328 | } |
1329 | |
1330 | /* Simplify a switch statement using the value range of the switch |
1331 | argument. */ |
1332 | |
1333 | bool |
1334 | simplify_using_ranges::simplify_switch_using_ranges (gswitch *stmt) |
1335 | { |
1336 | tree op = gimple_switch_index (gs: stmt); |
1337 | value_range vr; |
1338 | bool take_default; |
1339 | edge e; |
1340 | edge_iterator ei; |
1341 | size_t i = 0, j = 0, n, n2; |
1342 | tree vec2; |
1343 | switch_update su; |
1344 | size_t k = 1, l = 0; |
1345 | |
1346 | if (TREE_CODE (op) == SSA_NAME) |
1347 | { |
1348 | if (!query->range_of_expr (r&: vr, expr: op, stmt) |
1349 | || vr.varying_p () || vr.undefined_p ()) |
1350 | return false; |
1351 | |
1352 | /* Find case label for min/max of the value range. */ |
1353 | take_default = !find_case_label_ranges (stmt, vr: &vr, min_idx1: &i, max_idx1: &j, min_idx2: &k, max_idx2: &l); |
1354 | } |
1355 | else if (TREE_CODE (op) == INTEGER_CST) |
1356 | { |
1357 | take_default = !find_case_label_index (stmt, 1, op, &i); |
1358 | if (take_default) |
1359 | { |
1360 | i = 1; |
1361 | j = 0; |
1362 | } |
1363 | else |
1364 | { |
1365 | j = i; |
1366 | } |
1367 | } |
1368 | else |
1369 | return false; |
1370 | |
1371 | n = gimple_switch_num_labels (gs: stmt); |
1372 | |
1373 | /* We can truncate the case label ranges that partially overlap with OP's |
1374 | value range. */ |
1375 | size_t min_idx = 1, max_idx = 0; |
1376 | tree min, max; |
1377 | value_range_kind kind = get_legacy_range (vr, min, max); |
1378 | if (!vr.undefined_p ()) |
1379 | find_case_label_range (stmt, min, max, &min_idx, &max_idx); |
1380 | if (min_idx <= max_idx) |
1381 | { |
1382 | tree min_label = gimple_switch_label (gs: stmt, index: min_idx); |
1383 | tree max_label = gimple_switch_label (gs: stmt, index: max_idx); |
1384 | |
1385 | /* Avoid changing the type of the case labels when truncating. */ |
1386 | tree case_label_type = TREE_TYPE (CASE_LOW (min_label)); |
1387 | tree vr_min = fold_convert (case_label_type, min); |
1388 | tree vr_max = fold_convert (case_label_type, max); |
1389 | |
1390 | if (kind == VR_RANGE) |
1391 | { |
1392 | /* If OP's value range is [2,8] and the low label range is |
1393 | 0 ... 3, truncate the label's range to 2 .. 3. */ |
1394 | if (tree_int_cst_compare (CASE_LOW (min_label), t2: vr_min) < 0 |
1395 | && CASE_HIGH (min_label) != NULL_TREE |
1396 | && tree_int_cst_compare (CASE_HIGH (min_label), t2: vr_min) >= 0) |
1397 | CASE_LOW (min_label) = vr_min; |
1398 | |
1399 | /* If OP's value range is [2,8] and the high label range is |
1400 | 7 ... 10, truncate the label's range to 7 .. 8. */ |
1401 | if (tree_int_cst_compare (CASE_LOW (max_label), t2: vr_max) <= 0 |
1402 | && CASE_HIGH (max_label) != NULL_TREE |
1403 | && tree_int_cst_compare (CASE_HIGH (max_label), t2: vr_max) > 0) |
1404 | CASE_HIGH (max_label) = vr_max; |
1405 | } |
1406 | else if (kind == VR_ANTI_RANGE) |
1407 | { |
1408 | tree one_cst = build_one_cst (case_label_type); |
1409 | |
1410 | if (min_label == max_label) |
1411 | { |
1412 | /* If OP's value range is ~[7,8] and the label's range is |
1413 | 7 ... 10, truncate the label's range to 9 ... 10. */ |
1414 | if (tree_int_cst_compare (CASE_LOW (min_label), t2: vr_min) == 0 |
1415 | && CASE_HIGH (min_label) != NULL_TREE |
1416 | && tree_int_cst_compare (CASE_HIGH (min_label), t2: vr_max) > 0) |
1417 | CASE_LOW (min_label) |
1418 | = int_const_binop (PLUS_EXPR, vr_max, one_cst); |
1419 | |
1420 | /* If OP's value range is ~[7,8] and the label's range is |
1421 | 5 ... 8, truncate the label's range to 5 ... 6. */ |
1422 | if (tree_int_cst_compare (CASE_LOW (min_label), t2: vr_min) < 0 |
1423 | && CASE_HIGH (min_label) != NULL_TREE |
1424 | && tree_int_cst_compare (CASE_HIGH (min_label), t2: vr_max) == 0) |
1425 | CASE_HIGH (min_label) |
1426 | = int_const_binop (MINUS_EXPR, vr_min, one_cst); |
1427 | } |
1428 | else |
1429 | { |
1430 | /* If OP's value range is ~[2,8] and the low label range is |
1431 | 0 ... 3, truncate the label's range to 0 ... 1. */ |
1432 | if (tree_int_cst_compare (CASE_LOW (min_label), t2: vr_min) < 0 |
1433 | && CASE_HIGH (min_label) != NULL_TREE |
1434 | && tree_int_cst_compare (CASE_HIGH (min_label), t2: vr_min) >= 0) |
1435 | CASE_HIGH (min_label) |
1436 | = int_const_binop (MINUS_EXPR, vr_min, one_cst); |
1437 | |
1438 | /* If OP's value range is ~[2,8] and the high label range is |
1439 | 7 ... 10, truncate the label's range to 9 ... 10. */ |
1440 | if (tree_int_cst_compare (CASE_LOW (max_label), t2: vr_max) <= 0 |
1441 | && CASE_HIGH (max_label) != NULL_TREE |
1442 | && tree_int_cst_compare (CASE_HIGH (max_label), t2: vr_max) > 0) |
1443 | CASE_LOW (max_label) |
1444 | = int_const_binop (PLUS_EXPR, vr_max, one_cst); |
1445 | } |
1446 | } |
1447 | |
1448 | /* Canonicalize singleton case ranges. */ |
1449 | if (tree_int_cst_equal (CASE_LOW (min_label), CASE_HIGH (min_label))) |
1450 | CASE_HIGH (min_label) = NULL_TREE; |
1451 | if (tree_int_cst_equal (CASE_LOW (max_label), CASE_HIGH (max_label))) |
1452 | CASE_HIGH (max_label) = NULL_TREE; |
1453 | } |
1454 | |
1455 | /* We can also eliminate case labels that lie completely outside OP's value |
1456 | range. */ |
1457 | |
1458 | /* Bail out if this is just all edges taken. */ |
1459 | if (i == 1 |
1460 | && j == n - 1 |
1461 | && take_default) |
1462 | return false; |
1463 | |
1464 | /* Build a new vector of taken case labels. */ |
1465 | vec2 = make_tree_vec (j - i + 1 + l - k + 1 + (int)take_default); |
1466 | n2 = 0; |
1467 | |
1468 | /* Add the default edge, if necessary. */ |
1469 | if (take_default) |
1470 | TREE_VEC_ELT (vec2, n2++) = gimple_switch_default_label (gs: stmt); |
1471 | |
1472 | for (; i <= j; ++i, ++n2) |
1473 | TREE_VEC_ELT (vec2, n2) = gimple_switch_label (gs: stmt, index: i); |
1474 | |
1475 | for (; k <= l; ++k, ++n2) |
1476 | TREE_VEC_ELT (vec2, n2) = gimple_switch_label (gs: stmt, index: k); |
1477 | |
1478 | /* Mark needed edges. */ |
1479 | for (i = 0; i < n2; ++i) |
1480 | { |
1481 | e = find_edge (gimple_bb (g: stmt), |
1482 | label_to_block (cfun, |
1483 | CASE_LABEL (TREE_VEC_ELT (vec2, i)))); |
1484 | e->aux = (void *)-1; |
1485 | } |
1486 | |
1487 | /* Queue not needed edges for later removal. */ |
1488 | FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs) |
1489 | { |
1490 | if (e->aux == (void *)-1) |
1491 | { |
1492 | e->aux = NULL; |
1493 | continue; |
1494 | } |
1495 | |
1496 | if (dump_file && (dump_flags & TDF_DETAILS)) |
1497 | { |
1498 | fprintf (stream: dump_file, format: "removing unreachable case label\n" ); |
1499 | } |
1500 | to_remove_edges.safe_push (obj: e); |
1501 | set_and_propagate_unexecutable (e); |
1502 | e->flags &= ~EDGE_EXECUTABLE; |
1503 | e->flags |= EDGE_IGNORE; |
1504 | } |
1505 | |
1506 | /* And queue an update for the stmt. */ |
1507 | su.stmt = stmt; |
1508 | su.vec = vec2; |
1509 | to_update_switch_stmts.safe_push (obj: su); |
1510 | return true; |
1511 | } |
1512 | |
1513 | void |
1514 | simplify_using_ranges::cleanup_edges_and_switches (void) |
1515 | { |
1516 | int i; |
1517 | edge e; |
1518 | switch_update *su; |
1519 | |
1520 | /* Clear any edges marked as not executable. */ |
1521 | if (m_not_executable_flag) |
1522 | { |
1523 | FOR_EACH_VEC_ELT (m_flag_set_edges, i, e) |
1524 | e->flags &= ~m_not_executable_flag; |
1525 | } |
1526 | /* Remove dead edges from SWITCH_EXPR optimization. This leaves the |
1527 | CFG in a broken state and requires a cfg_cleanup run. */ |
1528 | FOR_EACH_VEC_ELT (to_remove_edges, i, e) |
1529 | remove_edge (e); |
1530 | |
1531 | /* Update SWITCH_EXPR case label vector. */ |
1532 | FOR_EACH_VEC_ELT (to_update_switch_stmts, i, su) |
1533 | { |
1534 | size_t j; |
1535 | size_t n = TREE_VEC_LENGTH (su->vec); |
1536 | tree label; |
1537 | gimple_switch_set_num_labels (g: su->stmt, nlabels: n); |
1538 | for (j = 0; j < n; j++) |
1539 | gimple_switch_set_label (gs: su->stmt, index: j, TREE_VEC_ELT (su->vec, j)); |
1540 | /* As we may have replaced the default label with a regular one |
1541 | make sure to make it a real default label again. This ensures |
1542 | optimal expansion. */ |
1543 | label = gimple_switch_label (gs: su->stmt, index: 0); |
1544 | CASE_LOW (label) = NULL_TREE; |
1545 | CASE_HIGH (label) = NULL_TREE; |
1546 | } |
1547 | |
1548 | if (!to_remove_edges.is_empty ()) |
1549 | { |
1550 | free_dominance_info (CDI_DOMINATORS); |
1551 | loops_state_set (flags: LOOPS_NEED_FIXUP); |
1552 | } |
1553 | |
1554 | to_remove_edges.release (); |
1555 | to_update_switch_stmts.release (); |
1556 | } |
1557 | |
1558 | /* Simplify an integral conversion from an SSA name in STMT. */ |
1559 | |
1560 | static bool |
1561 | simplify_conversion_using_ranges (gimple_stmt_iterator *gsi, gimple *stmt) |
1562 | { |
1563 | tree innerop, middleop, finaltype; |
1564 | gimple *def_stmt; |
1565 | signop inner_sgn, middle_sgn, final_sgn; |
1566 | unsigned inner_prec, middle_prec, final_prec; |
1567 | widest_int innermin, innermed, innermax, middlemin, middlemed, middlemax; |
1568 | |
1569 | finaltype = TREE_TYPE (gimple_assign_lhs (stmt)); |
1570 | if (!INTEGRAL_TYPE_P (finaltype)) |
1571 | return false; |
1572 | middleop = gimple_assign_rhs1 (gs: stmt); |
1573 | def_stmt = SSA_NAME_DEF_STMT (middleop); |
1574 | if (!is_gimple_assign (gs: def_stmt) |
1575 | || !CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt))) |
1576 | return false; |
1577 | innerop = gimple_assign_rhs1 (gs: def_stmt); |
1578 | if (TREE_CODE (innerop) != SSA_NAME |
1579 | || SSA_NAME_OCCURS_IN_ABNORMAL_PHI (innerop)) |
1580 | return false; |
1581 | |
1582 | /* Get the value-range of the inner operand. Use global ranges in |
1583 | case innerop was created during substitute-and-fold. */ |
1584 | wide_int imin, imax; |
1585 | value_range vr; |
1586 | if (!INTEGRAL_TYPE_P (TREE_TYPE (innerop))) |
1587 | return false; |
1588 | get_range_query (cfun)->range_of_expr (r&: vr, expr: innerop, stmt); |
1589 | if (vr.undefined_p () || vr.varying_p ()) |
1590 | return false; |
1591 | innermin = widest_int::from (x: vr.lower_bound (), TYPE_SIGN (TREE_TYPE (innerop))); |
1592 | innermax = widest_int::from (x: vr.upper_bound (), TYPE_SIGN (TREE_TYPE (innerop))); |
1593 | |
1594 | /* Simulate the conversion chain to check if the result is equal if |
1595 | the middle conversion is removed. */ |
1596 | inner_prec = TYPE_PRECISION (TREE_TYPE (innerop)); |
1597 | middle_prec = TYPE_PRECISION (TREE_TYPE (middleop)); |
1598 | final_prec = TYPE_PRECISION (finaltype); |
1599 | |
1600 | /* If the first conversion is not injective, the second must not |
1601 | be widening. */ |
1602 | if (wi::gtu_p (x: innermax - innermin, |
1603 | y: wi::mask <widest_int> (width: middle_prec, negate_p: false)) |
1604 | && middle_prec < final_prec) |
1605 | return false; |
1606 | /* We also want a medium value so that we can track the effect that |
1607 | narrowing conversions with sign change have. */ |
1608 | inner_sgn = TYPE_SIGN (TREE_TYPE (innerop)); |
1609 | if (inner_sgn == UNSIGNED) |
1610 | innermed = wi::shifted_mask <widest_int> (start: 1, width: inner_prec - 1, negate_p: false); |
1611 | else |
1612 | innermed = 0; |
1613 | if (wi::cmp (x: innermin, y: innermed, sgn: inner_sgn) >= 0 |
1614 | || wi::cmp (x: innermed, y: innermax, sgn: inner_sgn) >= 0) |
1615 | innermed = innermin; |
1616 | |
1617 | middle_sgn = TYPE_SIGN (TREE_TYPE (middleop)); |
1618 | middlemin = wi::ext (x: innermin, offset: middle_prec, sgn: middle_sgn); |
1619 | middlemed = wi::ext (x: innermed, offset: middle_prec, sgn: middle_sgn); |
1620 | middlemax = wi::ext (x: innermax, offset: middle_prec, sgn: middle_sgn); |
1621 | |
1622 | /* Require that the final conversion applied to both the original |
1623 | and the intermediate range produces the same result. */ |
1624 | final_sgn = TYPE_SIGN (finaltype); |
1625 | if (wi::ext (x: middlemin, offset: final_prec, sgn: final_sgn) |
1626 | != wi::ext (x: innermin, offset: final_prec, sgn: final_sgn) |
1627 | || wi::ext (x: middlemed, offset: final_prec, sgn: final_sgn) |
1628 | != wi::ext (x: innermed, offset: final_prec, sgn: final_sgn) |
1629 | || wi::ext (x: middlemax, offset: final_prec, sgn: final_sgn) |
1630 | != wi::ext (x: innermax, offset: final_prec, sgn: final_sgn)) |
1631 | return false; |
1632 | |
1633 | gimple_assign_set_rhs1 (gs: stmt, rhs: innerop); |
1634 | fold_stmt (gsi, follow_single_use_edges); |
1635 | return true; |
1636 | } |
1637 | |
1638 | /* Simplify a conversion from integral SSA name to float in STMT. */ |
1639 | |
1640 | bool |
1641 | simplify_using_ranges::simplify_float_conversion_using_ranges |
1642 | (gimple_stmt_iterator *gsi, |
1643 | gimple *stmt) |
1644 | { |
1645 | tree rhs1 = gimple_assign_rhs1 (gs: stmt); |
1646 | value_range vr; |
1647 | scalar_float_mode fltmode |
1648 | = SCALAR_FLOAT_TYPE_MODE (TREE_TYPE (gimple_assign_lhs (stmt))); |
1649 | scalar_int_mode mode; |
1650 | tree tem; |
1651 | gassign *conv; |
1652 | |
1653 | /* We can only handle constant ranges. */ |
1654 | if (!query->range_of_expr (r&: vr, expr: rhs1, stmt) |
1655 | || vr.varying_p () |
1656 | || vr.undefined_p ()) |
1657 | return false; |
1658 | |
1659 | /* First check if we can use a signed type in place of an unsigned. */ |
1660 | scalar_int_mode rhs_mode = SCALAR_INT_TYPE_MODE (TREE_TYPE (rhs1)); |
1661 | if (TYPE_UNSIGNED (TREE_TYPE (rhs1)) |
1662 | && can_float_p (fltmode, rhs_mode, 0) != CODE_FOR_nothing |
1663 | && range_fits_type_p (vr: &vr, TYPE_PRECISION (TREE_TYPE (rhs1)), dest_sgn: SIGNED)) |
1664 | mode = rhs_mode; |
1665 | /* If we can do the conversion in the current input mode do nothing. */ |
1666 | else if (can_float_p (fltmode, rhs_mode, |
1667 | TYPE_UNSIGNED (TREE_TYPE (rhs1))) != CODE_FOR_nothing) |
1668 | return false; |
1669 | /* Otherwise search for a mode we can use, starting from the narrowest |
1670 | integer mode available. */ |
1671 | else |
1672 | { |
1673 | mode = NARROWEST_INT_MODE; |
1674 | for (;;) |
1675 | { |
1676 | /* If we cannot do a signed conversion to float from mode |
1677 | or if the value-range does not fit in the signed type |
1678 | try with a wider mode. */ |
1679 | if (can_float_p (fltmode, mode, 0) != CODE_FOR_nothing |
1680 | && range_fits_type_p (vr: &vr, dest_precision: GET_MODE_PRECISION (mode), dest_sgn: SIGNED)) |
1681 | break; |
1682 | |
1683 | /* But do not widen the input. Instead leave that to the |
1684 | optabs expansion code. */ |
1685 | if (!GET_MODE_WIDER_MODE (m: mode).exists (mode: &mode) |
1686 | || GET_MODE_PRECISION (mode) > TYPE_PRECISION (TREE_TYPE (rhs1))) |
1687 | return false; |
1688 | } |
1689 | } |
1690 | |
1691 | /* It works, insert a truncation or sign-change before the |
1692 | float conversion. */ |
1693 | tem = make_ssa_name (var: build_nonstandard_integer_type |
1694 | (GET_MODE_PRECISION (mode), 0)); |
1695 | conv = gimple_build_assign (tem, NOP_EXPR, rhs1); |
1696 | gsi_insert_before (gsi, conv, GSI_SAME_STMT); |
1697 | gimple_assign_set_rhs1 (gs: stmt, rhs: tem); |
1698 | fold_stmt (gsi, follow_single_use_edges); |
1699 | |
1700 | return true; |
1701 | } |
1702 | |
1703 | /* Simplify an internal fn call using ranges if possible. */ |
1704 | |
1705 | bool |
1706 | simplify_using_ranges::simplify_internal_call_using_ranges |
1707 | (gimple_stmt_iterator *gsi, |
1708 | gimple *stmt) |
1709 | { |
1710 | enum tree_code subcode; |
1711 | bool is_ubsan = false; |
1712 | bool ovf = false; |
1713 | switch (gimple_call_internal_fn (gs: stmt)) |
1714 | { |
1715 | case IFN_UBSAN_CHECK_ADD: |
1716 | subcode = PLUS_EXPR; |
1717 | is_ubsan = true; |
1718 | break; |
1719 | case IFN_UBSAN_CHECK_SUB: |
1720 | subcode = MINUS_EXPR; |
1721 | is_ubsan = true; |
1722 | break; |
1723 | case IFN_UBSAN_CHECK_MUL: |
1724 | subcode = MULT_EXPR; |
1725 | is_ubsan = true; |
1726 | break; |
1727 | case IFN_ADD_OVERFLOW: |
1728 | subcode = PLUS_EXPR; |
1729 | break; |
1730 | case IFN_SUB_OVERFLOW: |
1731 | subcode = MINUS_EXPR; |
1732 | break; |
1733 | case IFN_MUL_OVERFLOW: |
1734 | subcode = MULT_EXPR; |
1735 | break; |
1736 | default: |
1737 | return false; |
1738 | } |
1739 | |
1740 | tree op0 = gimple_call_arg (gs: stmt, index: 0); |
1741 | tree op1 = gimple_call_arg (gs: stmt, index: 1); |
1742 | tree type; |
1743 | if (is_ubsan) |
1744 | { |
1745 | type = TREE_TYPE (op0); |
1746 | if (VECTOR_TYPE_P (type)) |
1747 | return false; |
1748 | } |
1749 | else if (gimple_call_lhs (gs: stmt) == NULL_TREE) |
1750 | return false; |
1751 | else |
1752 | type = TREE_TYPE (TREE_TYPE (gimple_call_lhs (stmt))); |
1753 | if (!check_for_binary_op_overflow (query, subcode, type, op0, op1, ovf: &ovf, s: stmt) |
1754 | || (is_ubsan && ovf)) |
1755 | return false; |
1756 | |
1757 | gimple *g; |
1758 | location_t loc = gimple_location (g: stmt); |
1759 | if (is_ubsan) |
1760 | g = gimple_build_assign (gimple_call_lhs (gs: stmt), subcode, op0, op1); |
1761 | else |
1762 | { |
1763 | tree utype = type; |
1764 | if (ovf |
1765 | || !useless_type_conversion_p (type, TREE_TYPE (op0)) |
1766 | || !useless_type_conversion_p (type, TREE_TYPE (op1))) |
1767 | utype = unsigned_type_for (type); |
1768 | if (TREE_CODE (op0) == INTEGER_CST) |
1769 | op0 = fold_convert (utype, op0); |
1770 | else if (!useless_type_conversion_p (utype, TREE_TYPE (op0))) |
1771 | { |
1772 | g = gimple_build_assign (make_ssa_name (var: utype), NOP_EXPR, op0); |
1773 | gimple_set_location (g, location: loc); |
1774 | gsi_insert_before (gsi, g, GSI_SAME_STMT); |
1775 | op0 = gimple_assign_lhs (gs: g); |
1776 | } |
1777 | if (TREE_CODE (op1) == INTEGER_CST) |
1778 | op1 = fold_convert (utype, op1); |
1779 | else if (!useless_type_conversion_p (utype, TREE_TYPE (op1))) |
1780 | { |
1781 | g = gimple_build_assign (make_ssa_name (var: utype), NOP_EXPR, op1); |
1782 | gimple_set_location (g, location: loc); |
1783 | gsi_insert_before (gsi, g, GSI_SAME_STMT); |
1784 | op1 = gimple_assign_lhs (gs: g); |
1785 | } |
1786 | g = gimple_build_assign (make_ssa_name (var: utype), subcode, op0, op1); |
1787 | gimple_set_location (g, location: loc); |
1788 | gsi_insert_before (gsi, g, GSI_SAME_STMT); |
1789 | if (utype != type) |
1790 | { |
1791 | g = gimple_build_assign (make_ssa_name (var: type), NOP_EXPR, |
1792 | gimple_assign_lhs (gs: g)); |
1793 | gimple_set_location (g, location: loc); |
1794 | gsi_insert_before (gsi, g, GSI_SAME_STMT); |
1795 | } |
1796 | g = gimple_build_assign (gimple_call_lhs (gs: stmt), COMPLEX_EXPR, |
1797 | gimple_assign_lhs (gs: g), |
1798 | build_int_cst (type, ovf)); |
1799 | } |
1800 | gimple_set_location (g, location: loc); |
1801 | gsi_replace (gsi, g, false); |
1802 | return true; |
1803 | } |
1804 | |
1805 | /* Return true if VAR is a two-valued variable. Set a and b with the |
1806 | two-values when it is true. Return false otherwise. */ |
1807 | |
1808 | bool |
1809 | simplify_using_ranges::two_valued_val_range_p (tree var, tree *a, tree *b, |
1810 | gimple *s) |
1811 | { |
1812 | value_range vr; |
1813 | if (!query->range_of_expr (r&: vr, expr: var, s)) |
1814 | return false; |
1815 | if (vr.varying_p () || vr.undefined_p ()) |
1816 | return false; |
1817 | |
1818 | if ((vr.num_pairs () == 1 && vr.upper_bound () - vr.lower_bound () == 1) |
1819 | || (vr.num_pairs () == 2 |
1820 | && vr.lower_bound (pair: 0) == vr.upper_bound (pair: 0) |
1821 | && vr.lower_bound (pair: 1) == vr.upper_bound (pair: 1))) |
1822 | { |
1823 | *a = wide_int_to_tree (TREE_TYPE (var), cst: vr.lower_bound ()); |
1824 | *b = wide_int_to_tree (TREE_TYPE (var), cst: vr.upper_bound ()); |
1825 | return true; |
1826 | } |
1827 | return false; |
1828 | } |
1829 | |
1830 | simplify_using_ranges::simplify_using_ranges (range_query *query, |
1831 | int not_executable_flag) |
1832 | : query (query) |
1833 | { |
1834 | to_remove_edges = vNULL; |
1835 | to_update_switch_stmts = vNULL; |
1836 | m_not_executable_flag = not_executable_flag; |
1837 | m_flag_set_edges = vNULL; |
1838 | } |
1839 | |
1840 | simplify_using_ranges::~simplify_using_ranges () |
1841 | { |
1842 | cleanup_edges_and_switches (); |
1843 | m_flag_set_edges.release (); |
1844 | } |
1845 | |
1846 | /* Simplify STMT using ranges if possible. */ |
1847 | |
1848 | bool |
1849 | simplify_using_ranges::simplify (gimple_stmt_iterator *gsi) |
1850 | { |
1851 | gcc_checking_assert (query); |
1852 | |
1853 | gimple *stmt = gsi_stmt (i: *gsi); |
1854 | if (is_gimple_assign (gs: stmt)) |
1855 | { |
1856 | enum tree_code rhs_code = gimple_assign_rhs_code (gs: stmt); |
1857 | tree rhs1 = gimple_assign_rhs1 (gs: stmt); |
1858 | tree rhs2 = gimple_assign_rhs2 (gs: stmt); |
1859 | tree lhs = gimple_assign_lhs (gs: stmt); |
1860 | tree val1 = NULL_TREE, val2 = NULL_TREE; |
1861 | use_operand_p use_p; |
1862 | gimple *use_stmt; |
1863 | |
1864 | /* Convert: |
1865 | LHS = CST BINOP VAR |
1866 | Where VAR is two-valued and LHS is used in GIMPLE_COND only |
1867 | To: |
1868 | LHS = VAR == VAL1 ? (CST BINOP VAL1) : (CST BINOP VAL2) |
1869 | |
1870 | Also handles: |
1871 | LHS = VAR BINOP CST |
1872 | Where VAR is two-valued and LHS is used in GIMPLE_COND only |
1873 | To: |
1874 | LHS = VAR == VAL1 ? (VAL1 BINOP CST) : (VAL2 BINOP CST) */ |
1875 | |
1876 | if (TREE_CODE_CLASS (rhs_code) == tcc_binary |
1877 | && INTEGRAL_TYPE_P (TREE_TYPE (rhs1)) |
1878 | && ((TREE_CODE (rhs1) == INTEGER_CST |
1879 | && TREE_CODE (rhs2) == SSA_NAME) |
1880 | || (TREE_CODE (rhs2) == INTEGER_CST |
1881 | && TREE_CODE (rhs1) == SSA_NAME)) |
1882 | && single_imm_use (var: lhs, use_p: &use_p, stmt: &use_stmt) |
1883 | && gimple_code (g: use_stmt) == GIMPLE_COND) |
1884 | |
1885 | { |
1886 | tree new_rhs1 = NULL_TREE; |
1887 | tree new_rhs2 = NULL_TREE; |
1888 | tree cmp_var = NULL_TREE; |
1889 | |
1890 | if (TREE_CODE (rhs2) == SSA_NAME |
1891 | && two_valued_val_range_p (var: rhs2, a: &val1, b: &val2, s: stmt)) |
1892 | { |
1893 | /* Optimize RHS1 OP [VAL1, VAL2]. */ |
1894 | new_rhs1 = int_const_binop (rhs_code, rhs1, val1); |
1895 | new_rhs2 = int_const_binop (rhs_code, rhs1, val2); |
1896 | cmp_var = rhs2; |
1897 | } |
1898 | else if (TREE_CODE (rhs1) == SSA_NAME |
1899 | && two_valued_val_range_p (var: rhs1, a: &val1, b: &val2, s: stmt)) |
1900 | { |
1901 | /* Optimize [VAL1, VAL2] OP RHS2. */ |
1902 | new_rhs1 = int_const_binop (rhs_code, val1, rhs2); |
1903 | new_rhs2 = int_const_binop (rhs_code, val2, rhs2); |
1904 | cmp_var = rhs1; |
1905 | } |
1906 | |
1907 | /* If we could not find two-vals or the optimzation is invalid as |
1908 | in divide by zero, new_rhs1 / new_rhs will be NULL_TREE. */ |
1909 | if (new_rhs1 && new_rhs2) |
1910 | { |
1911 | tree cond = gimple_build (gsi, true, GSI_SAME_STMT, |
1912 | UNKNOWN_LOCATION, |
1913 | EQ_EXPR, boolean_type_node, |
1914 | cmp_var, val1); |
1915 | gimple_assign_set_rhs_with_ops (gsi, |
1916 | COND_EXPR, cond, |
1917 | new_rhs1, |
1918 | new_rhs2); |
1919 | update_stmt (s: gsi_stmt (i: *gsi)); |
1920 | fold_stmt (gsi, follow_single_use_edges); |
1921 | return true; |
1922 | } |
1923 | } |
1924 | |
1925 | if (TREE_CODE_CLASS (rhs_code) == tcc_comparison) |
1926 | return simplify_compare_assign_using_ranges_1 (gsi, stmt); |
1927 | |
1928 | switch (rhs_code) |
1929 | { |
1930 | |
1931 | /* Transform TRUNC_DIV_EXPR and TRUNC_MOD_EXPR into RSHIFT_EXPR |
1932 | and BIT_AND_EXPR respectively if the first operand is greater |
1933 | than zero and the second operand is an exact power of two. |
1934 | Also optimize TRUNC_MOD_EXPR away if the second operand is |
1935 | constant and the first operand already has the right value |
1936 | range. */ |
1937 | case TRUNC_DIV_EXPR: |
1938 | case TRUNC_MOD_EXPR: |
1939 | if ((TREE_CODE (rhs1) == SSA_NAME |
1940 | || TREE_CODE (rhs1) == INTEGER_CST) |
1941 | && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) |
1942 | return simplify_div_or_mod_using_ranges (gsi, stmt); |
1943 | break; |
1944 | |
1945 | /* Transform ABS (X) into X or -X as appropriate. */ |
1946 | case ABS_EXPR: |
1947 | if (TREE_CODE (rhs1) == SSA_NAME |
1948 | && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) |
1949 | return simplify_abs_using_ranges (gsi, stmt); |
1950 | break; |
1951 | |
1952 | case BIT_AND_EXPR: |
1953 | case BIT_IOR_EXPR: |
1954 | /* Optimize away BIT_AND_EXPR and BIT_IOR_EXPR |
1955 | if all the bits being cleared are already cleared or |
1956 | all the bits being set are already set. */ |
1957 | if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) |
1958 | return simplify_bit_ops_using_ranges (gsi, stmt); |
1959 | break; |
1960 | |
1961 | CASE_CONVERT: |
1962 | if (TREE_CODE (rhs1) == SSA_NAME |
1963 | && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) |
1964 | return simplify_conversion_using_ranges (gsi, stmt); |
1965 | break; |
1966 | |
1967 | case FLOAT_EXPR: |
1968 | if (TREE_CODE (rhs1) == SSA_NAME |
1969 | && INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) |
1970 | return simplify_float_conversion_using_ranges (gsi, stmt); |
1971 | break; |
1972 | |
1973 | case MIN_EXPR: |
1974 | case MAX_EXPR: |
1975 | if (INTEGRAL_TYPE_P (TREE_TYPE (rhs1))) |
1976 | return simplify_min_or_max_using_ranges (gsi, stmt); |
1977 | break; |
1978 | |
1979 | case RSHIFT_EXPR: |
1980 | { |
1981 | tree op0 = gimple_assign_rhs1 (gs: stmt); |
1982 | tree type = TREE_TYPE (op0); |
1983 | int_range_max range; |
1984 | if (TYPE_SIGN (type) == SIGNED |
1985 | && query->range_of_expr (r&: range, expr: op0, stmt)) |
1986 | { |
1987 | unsigned prec = TYPE_PRECISION (TREE_TYPE (op0)); |
1988 | int_range<2> nzm1 (type, wi::minus_one (precision: prec), wi::zero (precision: prec), |
1989 | VR_ANTI_RANGE); |
1990 | range.intersect (nzm1); |
1991 | // If there are no ranges other than [-1, 0] remove the shift. |
1992 | if (range.undefined_p ()) |
1993 | { |
1994 | gimple_assign_set_rhs_from_tree (gsi, op0); |
1995 | return true; |
1996 | } |
1997 | return false; |
1998 | } |
1999 | break; |
2000 | } |
2001 | default: |
2002 | break; |
2003 | } |
2004 | } |
2005 | else if (gimple_code (g: stmt) == GIMPLE_COND) |
2006 | return simplify_cond_using_ranges_1 (stmt: as_a <gcond *> (p: stmt)); |
2007 | else if (gimple_code (g: stmt) == GIMPLE_SWITCH) |
2008 | return simplify_switch_using_ranges (stmt: as_a <gswitch *> (p: stmt)); |
2009 | else if (is_gimple_call (gs: stmt) |
2010 | && gimple_call_internal_p (gs: stmt)) |
2011 | return simplify_internal_call_using_ranges (gsi, stmt); |
2012 | |
2013 | return false; |
2014 | } |
2015 | |