1/* Combining of if-expressions on trees.
2 Copyright (C) 2007-2023 Free Software Foundation, Inc.
3 Contributed by Richard Guenther <rguenther@suse.de>
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 3, or (at your option)
10any later version.
11
12GCC is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
24#include "backend.h"
25#include "rtl.h"
26#include "tree.h"
27#include "gimple.h"
28#include "cfghooks.h"
29#include "tree-pass.h"
30#include "memmodel.h"
31#include "tm_p.h"
32#include "ssa.h"
33#include "tree-pretty-print.h"
34/* rtl is needed only because arm back-end requires it for
35 BRANCH_COST. */
36#include "fold-const.h"
37#include "cfganal.h"
38#include "gimple-iterator.h"
39#include "gimple-fold.h"
40#include "gimplify-me.h"
41#include "tree-cfg.h"
42#include "tree-ssa.h"
43#include "attribs.h"
44#include "asan.h"
45
46#ifndef LOGICAL_OP_NON_SHORT_CIRCUIT
47#define LOGICAL_OP_NON_SHORT_CIRCUIT \
48 (BRANCH_COST (optimize_function_for_speed_p (cfun), \
49 false) >= 2)
50#endif
51
52/* This pass combines COND_EXPRs to simplify control flow. It
53 currently recognizes bit tests and comparisons in chains that
54 represent logical and or logical or of two COND_EXPRs.
55
56 It does so by walking basic blocks in a approximate reverse
57 post-dominator order and trying to match CFG patterns that
58 represent logical and or logical or of two COND_EXPRs.
59 Transformations are done if the COND_EXPR conditions match
60 either
61
62 1. two single bit tests X & (1 << Yn) (for logical and)
63
64 2. two bit tests X & Yn (for logical or)
65
66 3. two comparisons X OPn Y (for logical or)
67
68 To simplify this pass, removing basic blocks and dead code
69 is left to CFG cleanup and DCE. */
70
71
72/* Recognize a if-then-else CFG pattern starting to match with the
73 COND_BB basic-block containing the COND_EXPR. The recognized
74 then end else blocks are stored to *THEN_BB and *ELSE_BB. If
75 *THEN_BB and/or *ELSE_BB are already set, they are required to
76 match the then and else basic-blocks to make the pattern match.
77 Returns true if the pattern matched, false otherwise. */
78
79static bool
80recognize_if_then_else (basic_block cond_bb,
81 basic_block *then_bb, basic_block *else_bb)
82{
83 edge t, e;
84
85 if (EDGE_COUNT (cond_bb->succs) != 2)
86 return false;
87
88 /* Find the then/else edges. */
89 t = EDGE_SUCC (cond_bb, 0);
90 e = EDGE_SUCC (cond_bb, 1);
91 if (!(t->flags & EDGE_TRUE_VALUE))
92 std::swap (a&: t, b&: e);
93 if (!(t->flags & EDGE_TRUE_VALUE)
94 || !(e->flags & EDGE_FALSE_VALUE))
95 return false;
96
97 /* Check if the edge destinations point to the required block. */
98 if (*then_bb
99 && t->dest != *then_bb)
100 return false;
101 if (*else_bb
102 && e->dest != *else_bb)
103 return false;
104
105 if (!*then_bb)
106 *then_bb = t->dest;
107 if (!*else_bb)
108 *else_bb = e->dest;
109
110 return true;
111}
112
113/* Verify if the basic block BB does not have side-effects. Return
114 true in this case, else false. */
115
116static bool
117bb_no_side_effects_p (basic_block bb)
118{
119 gimple_stmt_iterator gsi;
120
121 for (gsi = gsi_start_bb (bb); !gsi_end_p (i: gsi); gsi_next (i: &gsi))
122 {
123 gimple *stmt = gsi_stmt (i: gsi);
124
125 if (is_gimple_debug (gs: stmt))
126 continue;
127
128 gassign *ass;
129 enum tree_code rhs_code;
130 if (gimple_has_side_effects (stmt)
131 || gimple_could_trap_p (stmt)
132 || gimple_vuse (g: stmt)
133 /* We need to rewrite stmts with undefined overflow to use
134 unsigned arithmetic but cannot do so for signed division. */
135 || ((ass = dyn_cast <gassign *> (p: stmt))
136 && INTEGRAL_TYPE_P (TREE_TYPE (gimple_assign_lhs (ass)))
137 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (gimple_assign_lhs (ass)))
138 && ((rhs_code = gimple_assign_rhs_code (gs: ass)), true)
139 && (rhs_code == TRUNC_DIV_EXPR
140 || rhs_code == CEIL_DIV_EXPR
141 || rhs_code == FLOOR_DIV_EXPR
142 || rhs_code == ROUND_DIV_EXPR)
143 /* We cannot use expr_not_equal_to since we'd have to restrict
144 flow-sensitive info to whats known at the outer if. */
145 && (TREE_CODE (gimple_assign_rhs2 (ass)) != INTEGER_CST
146 || !integer_minus_onep (gimple_assign_rhs2 (gs: ass))))
147 /* const calls don't match any of the above, yet they could
148 still have some side-effects - they could contain
149 gimple_could_trap_p statements, like floating point
150 exceptions or integer division by zero. See PR70586.
151 FIXME: perhaps gimple_has_side_effects or gimple_could_trap_p
152 should handle this. */
153 || is_gimple_call (gs: stmt))
154 return false;
155
156 ssa_op_iter it;
157 tree use;
158 FOR_EACH_SSA_TREE_OPERAND (use, stmt, it, SSA_OP_USE)
159 if (ssa_name_maybe_undef_p (var: use))
160 return false;
161 }
162
163 return true;
164}
165
166/* Return true if BB is an empty forwarder block to TO_BB. */
167
168static bool
169forwarder_block_to (basic_block bb, basic_block to_bb)
170{
171 return empty_block_p (bb)
172 && single_succ_p (bb)
173 && single_succ (bb) == to_bb;
174}
175
176/* Verify if all PHI node arguments in DEST for edges from BB1 or
177 BB2 to DEST are the same. This makes the CFG merge point
178 free from side-effects. Return true in this case, else false. */
179
180static bool
181same_phi_args_p (basic_block bb1, basic_block bb2, basic_block dest)
182{
183 edge e1 = find_edge (bb1, dest);
184 edge e2 = find_edge (bb2, dest);
185 gphi_iterator gsi;
186 gphi *phi;
187
188 for (gsi = gsi_start_phis (dest); !gsi_end_p (i: gsi); gsi_next (i: &gsi))
189 {
190 phi = gsi.phi ();
191 if (!operand_equal_p (PHI_ARG_DEF_FROM_EDGE (phi, e1),
192 PHI_ARG_DEF_FROM_EDGE (phi, e2), flags: 0))
193 return false;
194 }
195
196 return true;
197}
198
199/* Return the best representative SSA name for CANDIDATE which is used
200 in a bit test. */
201
202static tree
203get_name_for_bit_test (tree candidate)
204{
205 /* Skip single-use names in favor of using the name from a
206 non-widening conversion definition. */
207 if (TREE_CODE (candidate) == SSA_NAME
208 && has_single_use (var: candidate))
209 {
210 gimple *def_stmt = SSA_NAME_DEF_STMT (candidate);
211 if (is_gimple_assign (gs: def_stmt)
212 && CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (def_stmt)))
213 {
214 if (TYPE_PRECISION (TREE_TYPE (candidate))
215 <= TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (def_stmt))))
216 return gimple_assign_rhs1 (gs: def_stmt);
217 }
218 }
219
220 return candidate;
221}
222
223/* Recognize a single bit test pattern in GIMPLE_COND and its defining
224 statements. Store the name being tested in *NAME and the bit
225 in *BIT. The GIMPLE_COND computes *NAME & (1 << *BIT).
226 Returns true if the pattern matched, false otherwise. */
227
228static bool
229recognize_single_bit_test (gcond *cond, tree *name, tree *bit, bool inv)
230{
231 gimple *stmt;
232
233 /* Get at the definition of the result of the bit test. */
234 if (gimple_cond_code (gs: cond) != (inv ? EQ_EXPR : NE_EXPR)
235 || TREE_CODE (gimple_cond_lhs (cond)) != SSA_NAME
236 || !integer_zerop (gimple_cond_rhs (gs: cond)))
237 return false;
238 stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (cond));
239 if (!is_gimple_assign (gs: stmt))
240 return false;
241
242 /* Look at which bit is tested. One form to recognize is
243 D.1985_5 = state_3(D) >> control1_4(D);
244 D.1986_6 = (int) D.1985_5;
245 D.1987_7 = op0 & 1;
246 if (D.1987_7 != 0) */
247 if (gimple_assign_rhs_code (gs: stmt) == BIT_AND_EXPR
248 && integer_onep (gimple_assign_rhs2 (gs: stmt))
249 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
250 {
251 tree orig_name = gimple_assign_rhs1 (gs: stmt);
252
253 /* Look through copies and conversions to eventually
254 find the stmt that computes the shift. */
255 stmt = SSA_NAME_DEF_STMT (orig_name);
256
257 while (is_gimple_assign (gs: stmt)
258 && ((CONVERT_EXPR_CODE_P (gimple_assign_rhs_code (stmt))
259 && (TYPE_PRECISION (TREE_TYPE (gimple_assign_lhs (stmt)))
260 <= TYPE_PRECISION (TREE_TYPE (gimple_assign_rhs1 (stmt))))
261 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME)
262 || gimple_assign_ssa_name_copy_p (stmt)))
263 stmt = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
264
265 /* If we found such, decompose it. */
266 if (is_gimple_assign (gs: stmt)
267 && gimple_assign_rhs_code (gs: stmt) == RSHIFT_EXPR)
268 {
269 /* op0 & (1 << op1) */
270 *bit = gimple_assign_rhs2 (gs: stmt);
271 *name = gimple_assign_rhs1 (gs: stmt);
272 }
273 else
274 {
275 /* t & 1 */
276 *bit = integer_zero_node;
277 *name = get_name_for_bit_test (candidate: orig_name);
278 }
279
280 return true;
281 }
282
283 /* Another form is
284 D.1987_7 = op0 & (1 << CST)
285 if (D.1987_7 != 0) */
286 if (gimple_assign_rhs_code (gs: stmt) == BIT_AND_EXPR
287 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
288 && integer_pow2p (gimple_assign_rhs2 (gs: stmt)))
289 {
290 *name = gimple_assign_rhs1 (gs: stmt);
291 *bit = build_int_cst (integer_type_node,
292 tree_log2 (gimple_assign_rhs2 (gs: stmt)));
293 return true;
294 }
295
296 /* Another form is
297 D.1986_6 = 1 << control1_4(D)
298 D.1987_7 = op0 & D.1986_6
299 if (D.1987_7 != 0) */
300 if (gimple_assign_rhs_code (gs: stmt) == BIT_AND_EXPR
301 && TREE_CODE (gimple_assign_rhs1 (stmt)) == SSA_NAME
302 && TREE_CODE (gimple_assign_rhs2 (stmt)) == SSA_NAME)
303 {
304 gimple *tmp;
305
306 /* Both arguments of the BIT_AND_EXPR can be the single-bit
307 specifying expression. */
308 tmp = SSA_NAME_DEF_STMT (gimple_assign_rhs1 (stmt));
309 if (is_gimple_assign (gs: tmp)
310 && gimple_assign_rhs_code (gs: tmp) == LSHIFT_EXPR
311 && integer_onep (gimple_assign_rhs1 (gs: tmp)))
312 {
313 *name = gimple_assign_rhs2 (gs: stmt);
314 *bit = gimple_assign_rhs2 (gs: tmp);
315 return true;
316 }
317
318 tmp = SSA_NAME_DEF_STMT (gimple_assign_rhs2 (stmt));
319 if (is_gimple_assign (gs: tmp)
320 && gimple_assign_rhs_code (gs: tmp) == LSHIFT_EXPR
321 && integer_onep (gimple_assign_rhs1 (gs: tmp)))
322 {
323 *name = gimple_assign_rhs1 (gs: stmt);
324 *bit = gimple_assign_rhs2 (gs: tmp);
325 return true;
326 }
327 }
328
329 return false;
330}
331
332/* Recognize a bit test pattern in a GIMPLE_COND and its defining
333 statements. Store the name being tested in *NAME and the bits
334 in *BITS. The COND_EXPR computes *NAME & *BITS.
335 Returns true if the pattern matched, false otherwise. */
336
337static bool
338recognize_bits_test (gcond *cond, tree *name, tree *bits, bool inv)
339{
340 gimple *stmt;
341
342 /* Get at the definition of the result of the bit test. */
343 if (gimple_cond_code (gs: cond) != (inv ? EQ_EXPR : NE_EXPR)
344 || TREE_CODE (gimple_cond_lhs (cond)) != SSA_NAME
345 || !integer_zerop (gimple_cond_rhs (gs: cond)))
346 return false;
347 stmt = SSA_NAME_DEF_STMT (gimple_cond_lhs (cond));
348 if (!is_gimple_assign (gs: stmt)
349 || gimple_assign_rhs_code (gs: stmt) != BIT_AND_EXPR)
350 return false;
351
352 *name = get_name_for_bit_test (candidate: gimple_assign_rhs1 (gs: stmt));
353 *bits = gimple_assign_rhs2 (gs: stmt);
354
355 return true;
356}
357
358
359/* Update profile after code in outer_cond_bb was adjusted so
360 outer_cond_bb has no condition. */
361
362static void
363update_profile_after_ifcombine (basic_block inner_cond_bb,
364 basic_block outer_cond_bb)
365{
366 edge outer_to_inner = find_edge (outer_cond_bb, inner_cond_bb);
367 edge outer2 = (EDGE_SUCC (outer_cond_bb, 0) == outer_to_inner
368 ? EDGE_SUCC (outer_cond_bb, 1)
369 : EDGE_SUCC (outer_cond_bb, 0));
370 edge inner_taken = EDGE_SUCC (inner_cond_bb, 0);
371 edge inner_not_taken = EDGE_SUCC (inner_cond_bb, 1);
372
373 if (inner_taken->dest != outer2->dest)
374 std::swap (a&: inner_taken, b&: inner_not_taken);
375 gcc_assert (inner_taken->dest == outer2->dest);
376
377 /* In the following we assume that inner_cond_bb has single predecessor. */
378 gcc_assert (single_pred_p (inner_cond_bb));
379
380 /* Path outer_cond_bb->(outer2) needs to be merged into path
381 outer_cond_bb->(outer_to_inner)->inner_cond_bb->(inner_taken)
382 and probability of inner_not_taken updated. */
383
384 inner_cond_bb->count = outer_cond_bb->count;
385
386 /* Handle special case where inner_taken probability is always. In this case
387 we know that the overall outcome will be always as well, but combining
388 probabilities will be conservative because it does not know that
389 outer2->probability is inverse of outer_to_inner->probability. */
390 if (inner_taken->probability == profile_probability::always ())
391 ;
392 else
393 inner_taken->probability = outer2->probability + outer_to_inner->probability
394 * inner_taken->probability;
395 inner_not_taken->probability = profile_probability::always ()
396 - inner_taken->probability;
397
398 outer_to_inner->probability = profile_probability::always ();
399 outer2->probability = profile_probability::never ();
400}
401
402/* If-convert on a and pattern with a common else block. The inner
403 if is specified by its INNER_COND_BB, the outer by OUTER_COND_BB.
404 inner_inv, outer_inv and result_inv indicate whether the conditions
405 are inverted.
406 Returns true if the edges to the common else basic-block were merged. */
407
408static bool
409ifcombine_ifandif (basic_block inner_cond_bb, bool inner_inv,
410 basic_block outer_cond_bb, bool outer_inv, bool result_inv)
411{
412 gimple_stmt_iterator gsi;
413 tree name1, name2, bit1, bit2, bits1, bits2;
414
415 gcond *inner_cond = safe_dyn_cast <gcond *> (p: *gsi_last_bb (bb: inner_cond_bb));
416 if (!inner_cond)
417 return false;
418
419 gcond *outer_cond = safe_dyn_cast <gcond *> (p: *gsi_last_bb (bb: outer_cond_bb));
420 if (!outer_cond)
421 return false;
422
423 /* See if we test a single bit of the same name in both tests. In
424 that case remove the outer test, merging both else edges,
425 and change the inner one to test for
426 name & (bit1 | bit2) == (bit1 | bit2). */
427 if (recognize_single_bit_test (cond: inner_cond, name: &name1, bit: &bit1, inv: inner_inv)
428 && recognize_single_bit_test (cond: outer_cond, name: &name2, bit: &bit2, inv: outer_inv)
429 && name1 == name2)
430 {
431 tree t, t2;
432
433 if (TREE_CODE (name1) == SSA_NAME
434 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name1))
435 return false;
436
437 /* Do it. */
438 gsi = gsi_for_stmt (inner_cond);
439 t = fold_build2 (LSHIFT_EXPR, TREE_TYPE (name1),
440 build_int_cst (TREE_TYPE (name1), 1), bit1);
441 t2 = fold_build2 (LSHIFT_EXPR, TREE_TYPE (name1),
442 build_int_cst (TREE_TYPE (name1), 1), bit2);
443 t = fold_build2 (BIT_IOR_EXPR, TREE_TYPE (name1), t, t2);
444 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
445 true, GSI_SAME_STMT);
446 t2 = fold_build2 (BIT_AND_EXPR, TREE_TYPE (name1), name1, t);
447 t2 = force_gimple_operand_gsi (&gsi, t2, true, NULL_TREE,
448 true, GSI_SAME_STMT);
449 t = fold_build2 (result_inv ? NE_EXPR : EQ_EXPR,
450 boolean_type_node, t2, t);
451 t = canonicalize_cond_expr_cond (t);
452 if (!t)
453 return false;
454 if (!is_gimple_condexpr_for_cond (t))
455 {
456 gsi = gsi_for_stmt (inner_cond);
457 t = force_gimple_operand_gsi_1 (&gsi, t, is_gimple_condexpr_for_cond,
458 NULL, true, GSI_SAME_STMT);
459 }
460 gimple_cond_set_condition_from_tree (inner_cond, t);
461 update_stmt (s: inner_cond);
462
463 /* Leave CFG optimization to cfg_cleanup. */
464 gimple_cond_set_condition_from_tree (outer_cond,
465 outer_inv ? boolean_false_node : boolean_true_node);
466 update_stmt (s: outer_cond);
467
468 update_profile_after_ifcombine (inner_cond_bb, outer_cond_bb);
469
470 if (dump_file)
471 {
472 fprintf (stream: dump_file, format: "optimizing double bit test to ");
473 print_generic_expr (dump_file, name1);
474 fprintf (stream: dump_file, format: " & T == T\nwith temporary T = (1 << ");
475 print_generic_expr (dump_file, bit1);
476 fprintf (stream: dump_file, format: ") | (1 << ");
477 print_generic_expr (dump_file, bit2);
478 fprintf (stream: dump_file, format: ")\n");
479 }
480
481 return true;
482 }
483
484 /* See if we have two bit tests of the same name in both tests.
485 In that case remove the outer test and change the inner one to
486 test for name & (bits1 | bits2) != 0. */
487 else if (recognize_bits_test (cond: inner_cond, name: &name1, bits: &bits1, inv: !inner_inv)
488 && recognize_bits_test (cond: outer_cond, name: &name2, bits: &bits2, inv: !outer_inv))
489 {
490 gimple_stmt_iterator gsi;
491 tree t;
492
493 if ((TREE_CODE (name1) == SSA_NAME
494 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name1))
495 || (TREE_CODE (name2) == SSA_NAME
496 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name2)))
497 return false;
498
499 /* Find the common name which is bit-tested. */
500 if (name1 == name2)
501 ;
502 else if (bits1 == bits2)
503 {
504 std::swap (a&: name2, b&: bits2);
505 std::swap (a&: name1, b&: bits1);
506 }
507 else if (name1 == bits2)
508 std::swap (a&: name2, b&: bits2);
509 else if (bits1 == name2)
510 std::swap (a&: name1, b&: bits1);
511 else
512 return false;
513
514 /* As we strip non-widening conversions in finding a common
515 name that is tested make sure to end up with an integral
516 type for building the bit operations. */
517 if (TYPE_PRECISION (TREE_TYPE (bits1))
518 >= TYPE_PRECISION (TREE_TYPE (bits2)))
519 {
520 bits1 = fold_convert (unsigned_type_for (TREE_TYPE (bits1)), bits1);
521 name1 = fold_convert (TREE_TYPE (bits1), name1);
522 bits2 = fold_convert (unsigned_type_for (TREE_TYPE (bits2)), bits2);
523 bits2 = fold_convert (TREE_TYPE (bits1), bits2);
524 }
525 else
526 {
527 bits2 = fold_convert (unsigned_type_for (TREE_TYPE (bits2)), bits2);
528 name1 = fold_convert (TREE_TYPE (bits2), name1);
529 bits1 = fold_convert (unsigned_type_for (TREE_TYPE (bits1)), bits1);
530 bits1 = fold_convert (TREE_TYPE (bits2), bits1);
531 }
532
533 /* Do it. */
534 gsi = gsi_for_stmt (inner_cond);
535 t = fold_build2 (BIT_IOR_EXPR, TREE_TYPE (name1), bits1, bits2);
536 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
537 true, GSI_SAME_STMT);
538 t = fold_build2 (BIT_AND_EXPR, TREE_TYPE (name1), name1, t);
539 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
540 true, GSI_SAME_STMT);
541 t = fold_build2 (result_inv ? NE_EXPR : EQ_EXPR, boolean_type_node, t,
542 build_int_cst (TREE_TYPE (t), 0));
543 t = canonicalize_cond_expr_cond (t);
544 if (!t)
545 return false;
546 if (!is_gimple_condexpr_for_cond (t))
547 {
548 gsi = gsi_for_stmt (inner_cond);
549 t = force_gimple_operand_gsi_1 (&gsi, t, is_gimple_condexpr_for_cond,
550 NULL, true, GSI_SAME_STMT);
551 }
552 gimple_cond_set_condition_from_tree (inner_cond, t);
553 update_stmt (s: inner_cond);
554
555 /* Leave CFG optimization to cfg_cleanup. */
556 gimple_cond_set_condition_from_tree (outer_cond,
557 outer_inv ? boolean_false_node : boolean_true_node);
558 update_stmt (s: outer_cond);
559 update_profile_after_ifcombine (inner_cond_bb, outer_cond_bb);
560
561 if (dump_file)
562 {
563 fprintf (stream: dump_file, format: "optimizing bits or bits test to ");
564 print_generic_expr (dump_file, name1);
565 fprintf (stream: dump_file, format: " & T != 0\nwith temporary T = ");
566 print_generic_expr (dump_file, bits1);
567 fprintf (stream: dump_file, format: " | ");
568 print_generic_expr (dump_file, bits2);
569 fprintf (stream: dump_file, format: "\n");
570 }
571
572 return true;
573 }
574
575 /* See if we have two comparisons that we can merge into one. */
576 else if (TREE_CODE_CLASS (gimple_cond_code (inner_cond)) == tcc_comparison
577 && TREE_CODE_CLASS (gimple_cond_code (outer_cond)) == tcc_comparison)
578 {
579 tree t;
580 enum tree_code inner_cond_code = gimple_cond_code (gs: inner_cond);
581 enum tree_code outer_cond_code = gimple_cond_code (gs: outer_cond);
582
583 /* Invert comparisons if necessary (and possible). */
584 if (inner_inv)
585 inner_cond_code = invert_tree_comparison (inner_cond_code,
586 HONOR_NANS (gimple_cond_lhs (gs: inner_cond)));
587 if (inner_cond_code == ERROR_MARK)
588 return false;
589 if (outer_inv)
590 outer_cond_code = invert_tree_comparison (outer_cond_code,
591 HONOR_NANS (gimple_cond_lhs (gs: outer_cond)));
592 if (outer_cond_code == ERROR_MARK)
593 return false;
594 /* Don't return false so fast, try maybe_fold_or_comparisons? */
595
596 if (!(t = maybe_fold_and_comparisons (boolean_type_node, inner_cond_code,
597 gimple_cond_lhs (gs: inner_cond),
598 gimple_cond_rhs (gs: inner_cond),
599 outer_cond_code,
600 gimple_cond_lhs (gs: outer_cond),
601 gimple_cond_rhs (gs: outer_cond),
602 gimple_bb (g: outer_cond))))
603 {
604 tree t1, t2;
605 gimple_stmt_iterator gsi;
606 bool logical_op_non_short_circuit = LOGICAL_OP_NON_SHORT_CIRCUIT;
607 if (param_logical_op_non_short_circuit != -1)
608 logical_op_non_short_circuit
609 = param_logical_op_non_short_circuit;
610 if (!logical_op_non_short_circuit || sanitize_coverage_p ())
611 return false;
612 /* Only do this optimization if the inner bb contains only the conditional. */
613 if (!gsi_one_before_end_p (i: gsi_start_nondebug_after_labels_bb (bb: inner_cond_bb)))
614 return false;
615 t1 = fold_build2_loc (gimple_location (g: inner_cond),
616 inner_cond_code,
617 boolean_type_node,
618 gimple_cond_lhs (gs: inner_cond),
619 gimple_cond_rhs (gs: inner_cond));
620 t2 = fold_build2_loc (gimple_location (g: outer_cond),
621 outer_cond_code,
622 boolean_type_node,
623 gimple_cond_lhs (gs: outer_cond),
624 gimple_cond_rhs (gs: outer_cond));
625 t = fold_build2_loc (gimple_location (g: inner_cond),
626 TRUTH_AND_EXPR, boolean_type_node, t1, t2);
627 if (result_inv)
628 {
629 t = fold_build1 (TRUTH_NOT_EXPR, TREE_TYPE (t), t);
630 result_inv = false;
631 }
632 gsi = gsi_for_stmt (inner_cond);
633 t = force_gimple_operand_gsi_1 (&gsi, t, is_gimple_condexpr_for_cond,
634 NULL, true, GSI_SAME_STMT);
635 }
636 if (result_inv)
637 t = fold_build1 (TRUTH_NOT_EXPR, TREE_TYPE (t), t);
638 t = canonicalize_cond_expr_cond (t);
639 if (!t)
640 return false;
641 if (!is_gimple_condexpr_for_cond (t))
642 {
643 gsi = gsi_for_stmt (inner_cond);
644 t = force_gimple_operand_gsi_1 (&gsi, t, is_gimple_condexpr_for_cond,
645 NULL, true, GSI_SAME_STMT);
646 }
647 gimple_cond_set_condition_from_tree (inner_cond, t);
648 update_stmt (s: inner_cond);
649
650 /* Leave CFG optimization to cfg_cleanup. */
651 gimple_cond_set_condition_from_tree (outer_cond,
652 outer_inv ? boolean_false_node : boolean_true_node);
653 update_stmt (s: outer_cond);
654 update_profile_after_ifcombine (inner_cond_bb, outer_cond_bb);
655
656 if (dump_file)
657 {
658 fprintf (stream: dump_file, format: "optimizing two comparisons to ");
659 print_generic_expr (dump_file, t);
660 fprintf (stream: dump_file, format: "\n");
661 }
662
663 return true;
664 }
665
666 return false;
667}
668
669/* Helper function for tree_ssa_ifcombine_bb. Recognize a CFG pattern and
670 dispatch to the appropriate if-conversion helper for a particular
671 set of INNER_COND_BB, OUTER_COND_BB, THEN_BB and ELSE_BB.
672 PHI_PRED_BB should be one of INNER_COND_BB, THEN_BB or ELSE_BB. */
673
674static bool
675tree_ssa_ifcombine_bb_1 (basic_block inner_cond_bb, basic_block outer_cond_bb,
676 basic_block then_bb, basic_block else_bb,
677 basic_block phi_pred_bb)
678{
679 /* The && form is characterized by a common else_bb with
680 the two edges leading to it mergable. The latter is
681 guaranteed by matching PHI arguments in the else_bb and
682 the inner cond_bb having no side-effects. */
683 if (phi_pred_bb != else_bb
684 && recognize_if_then_else (cond_bb: outer_cond_bb, then_bb: &inner_cond_bb, else_bb: &else_bb)
685 && same_phi_args_p (bb1: outer_cond_bb, bb2: phi_pred_bb, dest: else_bb))
686 {
687 /* We have
688 <outer_cond_bb>
689 if (q) goto inner_cond_bb; else goto else_bb;
690 <inner_cond_bb>
691 if (p) goto ...; else goto else_bb;
692 ...
693 <else_bb>
694 ...
695 */
696 return ifcombine_ifandif (inner_cond_bb, inner_inv: false, outer_cond_bb, outer_inv: false,
697 result_inv: false);
698 }
699
700 /* And a version where the outer condition is negated. */
701 if (phi_pred_bb != else_bb
702 && recognize_if_then_else (cond_bb: outer_cond_bb, then_bb: &else_bb, else_bb: &inner_cond_bb)
703 && same_phi_args_p (bb1: outer_cond_bb, bb2: phi_pred_bb, dest: else_bb))
704 {
705 /* We have
706 <outer_cond_bb>
707 if (q) goto else_bb; else goto inner_cond_bb;
708 <inner_cond_bb>
709 if (p) goto ...; else goto else_bb;
710 ...
711 <else_bb>
712 ...
713 */
714 return ifcombine_ifandif (inner_cond_bb, inner_inv: false, outer_cond_bb, outer_inv: true,
715 result_inv: false);
716 }
717
718 /* The || form is characterized by a common then_bb with the
719 two edges leading to it mergable. The latter is guaranteed
720 by matching PHI arguments in the then_bb and the inner cond_bb
721 having no side-effects. */
722 if (phi_pred_bb != then_bb
723 && recognize_if_then_else (cond_bb: outer_cond_bb, then_bb: &then_bb, else_bb: &inner_cond_bb)
724 && same_phi_args_p (bb1: outer_cond_bb, bb2: phi_pred_bb, dest: then_bb))
725 {
726 /* We have
727 <outer_cond_bb>
728 if (q) goto then_bb; else goto inner_cond_bb;
729 <inner_cond_bb>
730 if (q) goto then_bb; else goto ...;
731 <then_bb>
732 ...
733 */
734 return ifcombine_ifandif (inner_cond_bb, inner_inv: true, outer_cond_bb, outer_inv: true,
735 result_inv: true);
736 }
737
738 /* And a version where the outer condition is negated. */
739 if (phi_pred_bb != then_bb
740 && recognize_if_then_else (cond_bb: outer_cond_bb, then_bb: &inner_cond_bb, else_bb: &then_bb)
741 && same_phi_args_p (bb1: outer_cond_bb, bb2: phi_pred_bb, dest: then_bb))
742 {
743 /* We have
744 <outer_cond_bb>
745 if (q) goto inner_cond_bb; else goto then_bb;
746 <inner_cond_bb>
747 if (q) goto then_bb; else goto ...;
748 <then_bb>
749 ...
750 */
751 return ifcombine_ifandif (inner_cond_bb, inner_inv: true, outer_cond_bb, outer_inv: false,
752 result_inv: true);
753 }
754
755 return false;
756}
757
758/* Recognize a CFG pattern and dispatch to the appropriate
759 if-conversion helper. We start with BB as the innermost
760 worker basic-block. Returns true if a transformation was done. */
761
762static bool
763tree_ssa_ifcombine_bb (basic_block inner_cond_bb)
764{
765 basic_block then_bb = NULL, else_bb = NULL;
766
767 if (!recognize_if_then_else (cond_bb: inner_cond_bb, then_bb: &then_bb, else_bb: &else_bb))
768 return false;
769
770 /* Recognize && and || of two conditions with a common
771 then/else block which entry edges we can merge. That is:
772 if (a || b)
773 ;
774 and
775 if (a && b)
776 ;
777 This requires a single predecessor of the inner cond_bb. */
778 if (single_pred_p (bb: inner_cond_bb)
779 && bb_no_side_effects_p (bb: inner_cond_bb))
780 {
781 basic_block outer_cond_bb = single_pred (bb: inner_cond_bb);
782
783 if (tree_ssa_ifcombine_bb_1 (inner_cond_bb, outer_cond_bb,
784 then_bb, else_bb, phi_pred_bb: inner_cond_bb))
785 return true;
786
787 if (forwarder_block_to (bb: else_bb, to_bb: then_bb))
788 {
789 /* Other possibilities for the && form, if else_bb is
790 empty forwarder block to then_bb. Compared to the above simpler
791 forms this can be treated as if then_bb and else_bb were swapped,
792 and the corresponding inner_cond_bb not inverted because of that.
793 For same_phi_args_p we look at equality of arguments between
794 edge from outer_cond_bb and the forwarder block. */
795 if (tree_ssa_ifcombine_bb_1 (inner_cond_bb, outer_cond_bb, then_bb: else_bb,
796 else_bb: then_bb, phi_pred_bb: else_bb))
797 return true;
798 }
799 else if (forwarder_block_to (bb: then_bb, to_bb: else_bb))
800 {
801 /* Other possibilities for the || form, if then_bb is
802 empty forwarder block to else_bb. Compared to the above simpler
803 forms this can be treated as if then_bb and else_bb were swapped,
804 and the corresponding inner_cond_bb not inverted because of that.
805 For same_phi_args_p we look at equality of arguments between
806 edge from outer_cond_bb and the forwarder block. */
807 if (tree_ssa_ifcombine_bb_1 (inner_cond_bb, outer_cond_bb, then_bb: else_bb,
808 else_bb: then_bb, phi_pred_bb: then_bb))
809 return true;
810 }
811 }
812
813 return false;
814}
815
816/* Main entry for the tree if-conversion pass. */
817
818namespace {
819
820const pass_data pass_data_tree_ifcombine =
821{
822 .type: GIMPLE_PASS, /* type */
823 .name: "ifcombine", /* name */
824 .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */
825 .tv_id: TV_TREE_IFCOMBINE, /* tv_id */
826 .properties_required: ( PROP_cfg | PROP_ssa ), /* properties_required */
827 .properties_provided: 0, /* properties_provided */
828 .properties_destroyed: 0, /* properties_destroyed */
829 .todo_flags_start: 0, /* todo_flags_start */
830 TODO_update_ssa, /* todo_flags_finish */
831};
832
833class pass_tree_ifcombine : public gimple_opt_pass
834{
835public:
836 pass_tree_ifcombine (gcc::context *ctxt)
837 : gimple_opt_pass (pass_data_tree_ifcombine, ctxt)
838 {}
839
840 /* opt_pass methods: */
841 unsigned int execute (function *) final override;
842
843}; // class pass_tree_ifcombine
844
845unsigned int
846pass_tree_ifcombine::execute (function *fun)
847{
848 basic_block *bbs;
849 bool cfg_changed = false;
850 int i;
851
852 bbs = single_pred_before_succ_order ();
853 calculate_dominance_info (CDI_DOMINATORS);
854 mark_ssa_maybe_undefs ();
855
856 /* Search every basic block for COND_EXPR we may be able to optimize.
857
858 We walk the blocks in order that guarantees that a block with
859 a single predecessor is processed after the predecessor.
860 This ensures that we collapse outter ifs before visiting the
861 inner ones, and also that we do not try to visit a removed
862 block. This is opposite of PHI-OPT, because we cascade the
863 combining rather than cascading PHIs. */
864 for (i = n_basic_blocks_for_fn (fun) - NUM_FIXED_BLOCKS - 1; i >= 0; i--)
865 {
866 basic_block bb = bbs[i];
867
868 if (safe_is_a <gcond *> (p: *gsi_last_bb (bb)))
869 if (tree_ssa_ifcombine_bb (inner_cond_bb: bb))
870 {
871 /* Clear range info from all stmts in BB which is now executed
872 conditional on a always true/false condition. */
873 reset_flow_sensitive_info_in_bb (bb);
874 for (gimple_stmt_iterator gsi = gsi_start_bb (bb); !gsi_end_p (i: gsi);
875 gsi_next (i: &gsi))
876 {
877 gassign *ass = dyn_cast <gassign *> (p: gsi_stmt (i: gsi));
878 if (!ass)
879 continue;
880 tree lhs = gimple_assign_lhs (gs: ass);
881 if ((INTEGRAL_TYPE_P (TREE_TYPE (lhs))
882 || POINTER_TYPE_P (TREE_TYPE (lhs)))
883 && arith_code_with_undefined_signed_overflow
884 (gimple_assign_rhs_code (gs: ass)))
885 rewrite_to_defined_overflow (&gsi);
886 }
887 cfg_changed |= true;
888 }
889 }
890
891 free (ptr: bbs);
892
893 return cfg_changed ? TODO_cleanup_cfg : 0;
894}
895
896} // anon namespace
897
898gimple_opt_pass *
899make_pass_tree_ifcombine (gcc::context *ctxt)
900{
901 return new pass_tree_ifcombine (ctxt);
902}
903

source code of gcc/tree-ssa-ifcombine.cc