1/* Lower GIMPLE_SWITCH expressions to something more efficient than
2 a jump table.
3 Copyright (C) 2006-2023 Free Software Foundation, Inc.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it
8under the terms of the GNU General Public License as published by the
9Free Software Foundation; either version 3, or (at your option) any
10later version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT
13ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING3. If not, write to the Free
19Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
2002110-1301, USA. */
21
22/* This file handles the lowering of GIMPLE_SWITCH to an indexed
23 load, or a series of bit-test-and-branch expressions. */
24
25#include "config.h"
26#include "system.h"
27#include "coretypes.h"
28#include "backend.h"
29#include "insn-codes.h"
30#include "rtl.h"
31#include "tree.h"
32#include "gimple.h"
33#include "cfghooks.h"
34#include "tree-pass.h"
35#include "ssa.h"
36#include "optabs-tree.h"
37#include "cgraph.h"
38#include "gimple-pretty-print.h"
39#include "fold-const.h"
40#include "varasm.h"
41#include "stor-layout.h"
42#include "cfganal.h"
43#include "gimplify.h"
44#include "gimple-iterator.h"
45#include "gimplify-me.h"
46#include "gimple-fold.h"
47#include "tree-cfg.h"
48#include "cfgloop.h"
49#include "alloc-pool.h"
50#include "target.h"
51#include "tree-into-ssa.h"
52#include "omp-general.h"
53#include "gimple-range.h"
54#include "tree-cfgcleanup.h"
55
56/* ??? For lang_hooks.types.type_for_mode, but is there a word_mode
57 type in the GIMPLE type system that is language-independent? */
58#include "langhooks.h"
59
60#include "tree-switch-conversion.h"
61
62using namespace tree_switch_conversion;
63
64/* Constructor. */
65
66switch_conversion::switch_conversion (): m_final_bb (NULL),
67 m_constructors (NULL), m_default_values (NULL),
68 m_arr_ref_first (NULL), m_arr_ref_last (NULL),
69 m_reason (NULL), m_default_case_nonstandard (false), m_cfg_altered (false)
70{
71}
72
73/* Collection information about SWTCH statement. */
74
75void
76switch_conversion::collect (gswitch *swtch)
77{
78 unsigned int branch_num = gimple_switch_num_labels (gs: swtch);
79 tree min_case, max_case;
80 unsigned int i;
81 edge e, e_default, e_first;
82 edge_iterator ei;
83
84 m_switch = swtch;
85
86 /* The gimplifier has already sorted the cases by CASE_LOW and ensured there
87 is a default label which is the first in the vector.
88 Collect the bits we can deduce from the CFG. */
89 m_index_expr = gimple_switch_index (gs: swtch);
90 m_switch_bb = gimple_bb (g: swtch);
91 e_default = gimple_switch_default_edge (cfun, swtch);
92 m_default_bb = e_default->dest;
93 m_default_prob = e_default->probability;
94
95 /* Get upper and lower bounds of case values, and the covered range. */
96 min_case = gimple_switch_label (gs: swtch, index: 1);
97 max_case = gimple_switch_label (gs: swtch, index: branch_num - 1);
98
99 m_range_min = CASE_LOW (min_case);
100 if (CASE_HIGH (max_case) != NULL_TREE)
101 m_range_max = CASE_HIGH (max_case);
102 else
103 m_range_max = CASE_LOW (max_case);
104
105 m_contiguous_range = true;
106 tree last = CASE_HIGH (min_case) ? CASE_HIGH (min_case) : m_range_min;
107 for (i = 2; i < branch_num; i++)
108 {
109 tree elt = gimple_switch_label (gs: swtch, index: i);
110 if (wi::to_wide (t: last) + 1 != wi::to_wide (CASE_LOW (elt)))
111 {
112 m_contiguous_range = false;
113 break;
114 }
115 last = CASE_HIGH (elt) ? CASE_HIGH (elt) : CASE_LOW (elt);
116 }
117
118 if (m_contiguous_range)
119 e_first = gimple_switch_edge (cfun, swtch, 1);
120 else
121 e_first = e_default;
122
123 /* See if there is one common successor block for all branch
124 targets. If it exists, record it in FINAL_BB.
125 Start with the destination of the first non-default case
126 if the range is contiguous and default case otherwise as
127 guess or its destination in case it is a forwarder block. */
128 if (! single_pred_p (bb: e_first->dest))
129 m_final_bb = e_first->dest;
130 else if (single_succ_p (bb: e_first->dest)
131 && ! single_pred_p (bb: single_succ (bb: e_first->dest)))
132 m_final_bb = single_succ (bb: e_first->dest);
133 /* Require that all switch destinations are either that common
134 FINAL_BB or a forwarder to it, except for the default
135 case if contiguous range. */
136 auto_vec<edge, 10> fw_edges;
137 m_uniq = 0;
138 if (m_final_bb)
139 FOR_EACH_EDGE (e, ei, m_switch_bb->succs)
140 {
141 edge phi_e = nullptr;
142 if (e->dest == m_final_bb)
143 phi_e = e;
144 else if (single_pred_p (bb: e->dest)
145 && single_succ_p (bb: e->dest)
146 && single_succ (bb: e->dest) == m_final_bb)
147 phi_e = single_succ_edge (bb: e->dest);
148 if (phi_e)
149 {
150 if (e == e_default)
151 ;
152 else if (phi_e == e || empty_block_p (e->dest))
153 {
154 /* For empty blocks consider forwarders with equal
155 PHI arguments in m_final_bb as unique. */
156 unsigned i;
157 for (i = 0; i < fw_edges.length (); ++i)
158 if (phi_alternatives_equal (m_final_bb, fw_edges[i], phi_e))
159 break;
160 if (i == fw_edges.length ())
161 {
162 /* But limit the above possibly quadratic search. */
163 if (fw_edges.length () < 10)
164 fw_edges.quick_push (obj: phi_e);
165 m_uniq++;
166 }
167 }
168 else
169 m_uniq++;
170 continue;
171 }
172
173 if (e == e_default && m_contiguous_range)
174 {
175 m_default_case_nonstandard = true;
176 continue;
177 }
178
179 m_final_bb = NULL;
180 break;
181 }
182
183 /* When there's not a single common successor block conservatively
184 approximate the number of unique non-default targets. */
185 if (!m_final_bb)
186 m_uniq = EDGE_COUNT (gimple_bb (swtch)->succs) - 1;
187
188 m_range_size
189 = int_const_binop (MINUS_EXPR, m_range_max, m_range_min);
190
191 /* Get a count of the number of case labels. Single-valued case labels
192 simply count as one, but a case range counts double, since it may
193 require two compares if it gets lowered as a branching tree. */
194 m_count = 0;
195 for (i = 1; i < branch_num; i++)
196 {
197 tree elt = gimple_switch_label (gs: swtch, index: i);
198 m_count++;
199 if (CASE_HIGH (elt)
200 && ! tree_int_cst_equal (CASE_LOW (elt), CASE_HIGH (elt)))
201 m_count++;
202 }
203}
204
205/* Checks whether the range given by individual case statements of the switch
206 switch statement isn't too big and whether the number of branches actually
207 satisfies the size of the new array. */
208
209bool
210switch_conversion::check_range ()
211{
212 gcc_assert (m_range_size);
213 if (!tree_fits_uhwi_p (m_range_size))
214 {
215 m_reason = "index range way too large or otherwise unusable";
216 return false;
217 }
218
219 if (tree_to_uhwi (m_range_size)
220 > ((unsigned) m_count * param_switch_conversion_branch_ratio))
221 {
222 m_reason = "the maximum range-branch ratio exceeded";
223 return false;
224 }
225
226 return true;
227}
228
229/* Checks whether all but the final BB basic blocks are empty. */
230
231bool
232switch_conversion::check_all_empty_except_final ()
233{
234 edge e, e_default = find_edge (m_switch_bb, m_default_bb);
235 edge_iterator ei;
236
237 FOR_EACH_EDGE (e, ei, m_switch_bb->succs)
238 {
239 if (e->dest == m_final_bb)
240 continue;
241
242 if (!empty_block_p (e->dest))
243 {
244 if (m_contiguous_range && e == e_default)
245 {
246 m_default_case_nonstandard = true;
247 continue;
248 }
249
250 m_reason = "bad case - a non-final BB not empty";
251 return false;
252 }
253 }
254
255 return true;
256}
257
258/* This function checks whether all required values in phi nodes in final_bb
259 are constants. Required values are those that correspond to a basic block
260 which is a part of the examined switch statement. It returns true if the
261 phi nodes are OK, otherwise false. */
262
263bool
264switch_conversion::check_final_bb ()
265{
266 gphi_iterator gsi;
267
268 m_phi_count = 0;
269 for (gsi = gsi_start_phis (m_final_bb); !gsi_end_p (i: gsi); gsi_next (i: &gsi))
270 {
271 gphi *phi = gsi.phi ();
272 unsigned int i;
273
274 if (virtual_operand_p (op: gimple_phi_result (gs: phi)))
275 continue;
276
277 m_phi_count++;
278
279 for (i = 0; i < gimple_phi_num_args (gs: phi); i++)
280 {
281 basic_block bb = gimple_phi_arg_edge (phi, i)->src;
282
283 if (bb == m_switch_bb
284 || (single_pred_p (bb)
285 && single_pred (bb) == m_switch_bb
286 && (!m_default_case_nonstandard
287 || empty_block_p (bb))))
288 {
289 tree reloc, val;
290 const char *reason = NULL;
291
292 val = gimple_phi_arg_def (gs: phi, index: i);
293 if (!is_gimple_ip_invariant (val))
294 reason = "non-invariant value from a case";
295 else
296 {
297 reloc = initializer_constant_valid_p (val, TREE_TYPE (val));
298 if ((flag_pic && reloc != null_pointer_node)
299 || (!flag_pic && reloc == NULL_TREE))
300 {
301 if (reloc)
302 reason
303 = "value from a case would need runtime relocations";
304 else
305 reason
306 = "value from a case is not a valid initializer";
307 }
308 }
309 if (reason)
310 {
311 /* For contiguous range, we can allow non-constant
312 or one that needs relocation, as long as it is
313 only reachable from the default case. */
314 if (bb == m_switch_bb)
315 bb = m_final_bb;
316 if (!m_contiguous_range || bb != m_default_bb)
317 {
318 m_reason = reason;
319 return false;
320 }
321
322 unsigned int branch_num = gimple_switch_num_labels (gs: m_switch);
323 for (unsigned int i = 1; i < branch_num; i++)
324 {
325 if (gimple_switch_label_bb (cfun, m_switch, i) == bb)
326 {
327 m_reason = reason;
328 return false;
329 }
330 }
331 m_default_case_nonstandard = true;
332 }
333 }
334 }
335 }
336
337 return true;
338}
339
340/* The following function allocates default_values, target_{in,out}_names and
341 constructors arrays. The last one is also populated with pointers to
342 vectors that will become constructors of new arrays. */
343
344void
345switch_conversion::create_temp_arrays ()
346{
347 int i;
348
349 m_default_values = XCNEWVEC (tree, m_phi_count * 3);
350 /* ??? Macros do not support multi argument templates in their
351 argument list. We create a typedef to work around that problem. */
352 typedef vec<constructor_elt, va_gc> *vec_constructor_elt_gc;
353 m_constructors = XCNEWVEC (vec_constructor_elt_gc, m_phi_count);
354 m_target_inbound_names = m_default_values + m_phi_count;
355 m_target_outbound_names = m_target_inbound_names + m_phi_count;
356 for (i = 0; i < m_phi_count; i++)
357 vec_alloc (v&: m_constructors[i], nelems: tree_to_uhwi (m_range_size) + 1);
358}
359
360/* Populate the array of default values in the order of phi nodes.
361 DEFAULT_CASE is the CASE_LABEL_EXPR for the default switch branch
362 if the range is non-contiguous or the default case has standard
363 structure, otherwise it is the first non-default case instead. */
364
365void
366switch_conversion::gather_default_values (tree default_case)
367{
368 gphi_iterator gsi;
369 basic_block bb = label_to_block (cfun, CASE_LABEL (default_case));
370 edge e;
371 int i = 0;
372
373 gcc_assert (CASE_LOW (default_case) == NULL_TREE
374 || m_default_case_nonstandard);
375
376 if (bb == m_final_bb)
377 e = find_edge (m_switch_bb, bb);
378 else
379 e = single_succ_edge (bb);
380
381 for (gsi = gsi_start_phis (m_final_bb); !gsi_end_p (i: gsi); gsi_next (i: &gsi))
382 {
383 gphi *phi = gsi.phi ();
384 if (virtual_operand_p (op: gimple_phi_result (gs: phi)))
385 continue;
386 tree val = PHI_ARG_DEF_FROM_EDGE (phi, e);
387 gcc_assert (val);
388 m_default_values[i++] = val;
389 }
390}
391
392/* The following function populates the vectors in the constructors array with
393 future contents of the static arrays. The vectors are populated in the
394 order of phi nodes. */
395
396void
397switch_conversion::build_constructors ()
398{
399 unsigned i, branch_num = gimple_switch_num_labels (gs: m_switch);
400 tree pos = m_range_min;
401 tree pos_one = build_int_cst (TREE_TYPE (pos), 1);
402
403 for (i = 1; i < branch_num; i++)
404 {
405 tree cs = gimple_switch_label (gs: m_switch, index: i);
406 basic_block bb = label_to_block (cfun, CASE_LABEL (cs));
407 edge e;
408 tree high;
409 gphi_iterator gsi;
410 int j;
411
412 if (bb == m_final_bb)
413 e = find_edge (m_switch_bb, bb);
414 else
415 e = single_succ_edge (bb);
416 gcc_assert (e);
417
418 while (tree_int_cst_lt (t1: pos, CASE_LOW (cs)))
419 {
420 int k;
421 for (k = 0; k < m_phi_count; k++)
422 {
423 constructor_elt elt;
424
425 elt.index = int_const_binop (MINUS_EXPR, pos, m_range_min);
426 elt.value
427 = unshare_expr_without_location (m_default_values[k]);
428 m_constructors[k]->quick_push (obj: elt);
429 }
430
431 pos = int_const_binop (PLUS_EXPR, pos, pos_one);
432 }
433 gcc_assert (tree_int_cst_equal (pos, CASE_LOW (cs)));
434
435 j = 0;
436 if (CASE_HIGH (cs))
437 high = CASE_HIGH (cs);
438 else
439 high = CASE_LOW (cs);
440 for (gsi = gsi_start_phis (m_final_bb);
441 !gsi_end_p (i: gsi); gsi_next (i: &gsi))
442 {
443 gphi *phi = gsi.phi ();
444 if (virtual_operand_p (op: gimple_phi_result (gs: phi)))
445 continue;
446 tree val = PHI_ARG_DEF_FROM_EDGE (phi, e);
447 tree low = CASE_LOW (cs);
448 pos = CASE_LOW (cs);
449
450 do
451 {
452 constructor_elt elt;
453
454 elt.index = int_const_binop (MINUS_EXPR, pos, m_range_min);
455 elt.value = unshare_expr_without_location (val);
456 m_constructors[j]->quick_push (obj: elt);
457
458 pos = int_const_binop (PLUS_EXPR, pos, pos_one);
459 } while (!tree_int_cst_lt (t1: high, t2: pos)
460 && tree_int_cst_lt (t1: low, t2: pos));
461 j++;
462 }
463 }
464}
465
466/* If all values in the constructor vector are products of a linear function
467 a * x + b, then return true. When true, COEFF_A and COEFF_B and
468 coefficients of the linear function. Note that equal values are special
469 case of a linear function with a and b equal to zero. */
470
471bool
472switch_conversion::contains_linear_function_p (vec<constructor_elt, va_gc> *vec,
473 wide_int *coeff_a,
474 wide_int *coeff_b)
475{
476 unsigned int i;
477 constructor_elt *elt;
478
479 gcc_assert (vec->length () >= 2);
480
481 /* Let's try to find any linear function a * x + y that can apply to
482 given values. 'a' can be calculated as follows:
483
484 a = (y2 - y1) / (x2 - x1) where x2 - x1 = 1 (consecutive case indices)
485 a = y2 - y1
486
487 and
488
489 b = y2 - a * x2
490
491 */
492
493 tree elt0 = (*vec)[0].value;
494 tree elt1 = (*vec)[1].value;
495
496 if (TREE_CODE (elt0) != INTEGER_CST || TREE_CODE (elt1) != INTEGER_CST)
497 return false;
498
499 wide_int range_min
500 = wide_int::from (x: wi::to_wide (t: m_range_min),
501 TYPE_PRECISION (TREE_TYPE (elt0)),
502 TYPE_SIGN (TREE_TYPE (m_range_min)));
503 wide_int y1 = wi::to_wide (t: elt0);
504 wide_int y2 = wi::to_wide (t: elt1);
505 wide_int a = y2 - y1;
506 wide_int b = y2 - a * (range_min + 1);
507
508 /* Verify that all values fulfill the linear function. */
509 FOR_EACH_VEC_SAFE_ELT (vec, i, elt)
510 {
511 if (TREE_CODE (elt->value) != INTEGER_CST)
512 return false;
513
514 wide_int value = wi::to_wide (t: elt->value);
515 if (a * range_min + b != value)
516 return false;
517
518 ++range_min;
519 }
520
521 *coeff_a = a;
522 *coeff_b = b;
523
524 return true;
525}
526
527/* Return type which should be used for array elements, either TYPE's
528 main variant or, for integral types, some smaller integral type
529 that can still hold all the constants. */
530
531tree
532switch_conversion::array_value_type (tree type, int num)
533{
534 unsigned int i, len = vec_safe_length (v: m_constructors[num]);
535 constructor_elt *elt;
536 int sign = 0;
537 tree smaller_type;
538
539 /* Types with alignments greater than their size can reach here, e.g. out of
540 SRA. We couldn't use these as an array component type so get back to the
541 main variant first, which, for our purposes, is fine for other types as
542 well. */
543
544 type = TYPE_MAIN_VARIANT (type);
545
546 if (!INTEGRAL_TYPE_P (type))
547 return type;
548
549 scalar_int_mode type_mode = SCALAR_INT_TYPE_MODE (type);
550 scalar_int_mode mode = get_narrowest_mode (mode: type_mode);
551 if (GET_MODE_SIZE (mode: type_mode) <= GET_MODE_SIZE (mode))
552 return type;
553
554 if (len < (optimize_bb_for_size_p (gimple_bb (g: m_switch)) ? 2 : 32))
555 return type;
556
557 FOR_EACH_VEC_SAFE_ELT (m_constructors[num], i, elt)
558 {
559 wide_int cst;
560
561 if (TREE_CODE (elt->value) != INTEGER_CST)
562 return type;
563
564 cst = wi::to_wide (t: elt->value);
565 while (1)
566 {
567 unsigned int prec = GET_MODE_BITSIZE (mode);
568 if (prec > HOST_BITS_PER_WIDE_INT)
569 return type;
570
571 if (sign >= 0 && cst == wi::zext (x: cst, offset: prec))
572 {
573 if (sign == 0 && cst == wi::sext (x: cst, offset: prec))
574 break;
575 sign = 1;
576 break;
577 }
578 if (sign <= 0 && cst == wi::sext (x: cst, offset: prec))
579 {
580 sign = -1;
581 break;
582 }
583
584 if (sign == 1)
585 sign = 0;
586
587 if (!GET_MODE_WIDER_MODE (m: mode).exists (mode: &mode)
588 || GET_MODE_SIZE (mode) >= GET_MODE_SIZE (mode: type_mode))
589 return type;
590 }
591 }
592
593 if (sign == 0)
594 sign = TYPE_UNSIGNED (type) ? 1 : -1;
595 smaller_type = lang_hooks.types.type_for_mode (mode, sign >= 0);
596 if (GET_MODE_SIZE (mode: type_mode)
597 <= GET_MODE_SIZE (SCALAR_INT_TYPE_MODE (smaller_type)))
598 return type;
599
600 return smaller_type;
601}
602
603/* Create an appropriate array type and declaration and assemble a static
604 array variable. Also create a load statement that initializes
605 the variable in question with a value from the static array. SWTCH is
606 the switch statement being converted, NUM is the index to
607 arrays of constructors, default values and target SSA names
608 for this particular array. ARR_INDEX_TYPE is the type of the index
609 of the new array, PHI is the phi node of the final BB that corresponds
610 to the value that will be loaded from the created array. TIDX
611 is an ssa name of a temporary variable holding the index for loads from the
612 new array. */
613
614void
615switch_conversion::build_one_array (int num, tree arr_index_type,
616 gphi *phi, tree tidx)
617{
618 tree name;
619 gimple *load;
620 gimple_stmt_iterator gsi = gsi_for_stmt (m_switch);
621 location_t loc = gimple_location (g: m_switch);
622
623 gcc_assert (m_default_values[num]);
624
625 name = copy_ssa_name (PHI_RESULT (phi));
626 m_target_inbound_names[num] = name;
627
628 vec<constructor_elt, va_gc> *constructor = m_constructors[num];
629 wide_int coeff_a, coeff_b;
630 bool linear_p = contains_linear_function_p (vec: constructor, coeff_a: &coeff_a, coeff_b: &coeff_b);
631 tree type;
632 if (linear_p
633 && (type = range_check_type (TREE_TYPE ((*constructor)[0].value))))
634 {
635 if (dump_file && coeff_a.to_uhwi () > 0)
636 fprintf (stream: dump_file, format: "Linear transformation with A = %" PRId64
637 " and B = %" PRId64 "\n", coeff_a.to_shwi (),
638 coeff_b.to_shwi ());
639
640 /* We must use type of constructor values. */
641 gimple_seq seq = NULL;
642 tree tmp = gimple_convert (seq: &seq, type, op: m_index_expr);
643 tree tmp2 = gimple_build (seq: &seq, code: MULT_EXPR, type,
644 ops: wide_int_to_tree (type, cst: coeff_a), ops: tmp);
645 tree tmp3 = gimple_build (seq: &seq, code: PLUS_EXPR, type, ops: tmp2,
646 ops: wide_int_to_tree (type, cst: coeff_b));
647 tree tmp4 = gimple_convert (seq: &seq, TREE_TYPE (name), op: tmp3);
648 gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT);
649 load = gimple_build_assign (name, tmp4);
650 }
651 else
652 {
653 tree array_type, ctor, decl, value_type, fetch, default_type;
654
655 default_type = TREE_TYPE (m_default_values[num]);
656 value_type = array_value_type (type: default_type, num);
657 array_type = build_array_type (value_type, arr_index_type);
658 if (default_type != value_type)
659 {
660 unsigned int i;
661 constructor_elt *elt;
662
663 FOR_EACH_VEC_SAFE_ELT (constructor, i, elt)
664 elt->value = fold_convert (value_type, elt->value);
665 }
666 ctor = build_constructor (array_type, constructor);
667 TREE_CONSTANT (ctor) = true;
668 TREE_STATIC (ctor) = true;
669
670 decl = build_decl (loc, VAR_DECL, NULL_TREE, array_type);
671 TREE_STATIC (decl) = 1;
672 DECL_INITIAL (decl) = ctor;
673
674 DECL_NAME (decl) = create_tmp_var_name ("CSWTCH");
675 DECL_ARTIFICIAL (decl) = 1;
676 DECL_IGNORED_P (decl) = 1;
677 TREE_CONSTANT (decl) = 1;
678 TREE_READONLY (decl) = 1;
679 DECL_IGNORED_P (decl) = 1;
680 if (offloading_function_p (cfun->decl))
681 DECL_ATTRIBUTES (decl)
682 = tree_cons (get_identifier ("omp declare target"), NULL_TREE,
683 NULL_TREE);
684 varpool_node::finalize_decl (decl);
685
686 fetch = build4 (ARRAY_REF, value_type, decl, tidx, NULL_TREE,
687 NULL_TREE);
688 if (default_type != value_type)
689 {
690 fetch = fold_convert (default_type, fetch);
691 fetch = force_gimple_operand_gsi (&gsi, fetch, true, NULL_TREE,
692 true, GSI_SAME_STMT);
693 }
694 load = gimple_build_assign (name, fetch);
695 }
696
697 gsi_insert_before (&gsi, load, GSI_SAME_STMT);
698 update_stmt (s: load);
699 m_arr_ref_last = load;
700}
701
702/* Builds and initializes static arrays initialized with values gathered from
703 the switch statement. Also creates statements that load values from
704 them. */
705
706void
707switch_conversion::build_arrays ()
708{
709 tree arr_index_type;
710 tree tidx, sub, utype;
711 gimple *stmt;
712 gimple_stmt_iterator gsi;
713 gphi_iterator gpi;
714 int i;
715 location_t loc = gimple_location (g: m_switch);
716
717 gsi = gsi_for_stmt (m_switch);
718
719 /* Make sure we do not generate arithmetics in a subrange. */
720 utype = TREE_TYPE (m_index_expr);
721 if (TREE_TYPE (utype))
722 utype = lang_hooks.types.type_for_mode (TYPE_MODE (TREE_TYPE (utype)), 1);
723 else
724 utype = lang_hooks.types.type_for_mode (TYPE_MODE (utype), 1);
725
726 arr_index_type = build_index_type (m_range_size);
727 tidx = make_ssa_name (var: utype);
728 sub = fold_build2_loc (loc, MINUS_EXPR, utype,
729 fold_convert_loc (loc, utype, m_index_expr),
730 fold_convert_loc (loc, utype, m_range_min));
731 sub = force_gimple_operand_gsi (&gsi, sub,
732 false, NULL, true, GSI_SAME_STMT);
733 stmt = gimple_build_assign (tidx, sub);
734
735 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
736 update_stmt (s: stmt);
737 m_arr_ref_first = stmt;
738
739 for (gpi = gsi_start_phis (m_final_bb), i = 0;
740 !gsi_end_p (i: gpi); gsi_next (i: &gpi))
741 {
742 gphi *phi = gpi.phi ();
743 if (!virtual_operand_p (op: gimple_phi_result (gs: phi)))
744 build_one_array (num: i++, arr_index_type, phi, tidx);
745 else
746 {
747 edge e;
748 edge_iterator ei;
749 FOR_EACH_EDGE (e, ei, m_switch_bb->succs)
750 {
751 if (e->dest == m_final_bb)
752 break;
753 if (!m_default_case_nonstandard
754 || e->dest != m_default_bb)
755 {
756 e = single_succ_edge (bb: e->dest);
757 break;
758 }
759 }
760 gcc_assert (e && e->dest == m_final_bb);
761 m_target_vop = PHI_ARG_DEF_FROM_EDGE (phi, e);
762 }
763 }
764}
765
766/* Generates and appropriately inserts loads of default values at the position
767 given by GSI. Returns the last inserted statement. */
768
769gassign *
770switch_conversion::gen_def_assigns (gimple_stmt_iterator *gsi)
771{
772 int i;
773 gassign *assign = NULL;
774
775 for (i = 0; i < m_phi_count; i++)
776 {
777 tree name = copy_ssa_name (var: m_target_inbound_names[i]);
778 m_target_outbound_names[i] = name;
779 assign = gimple_build_assign (name, m_default_values[i]);
780 gsi_insert_before (gsi, assign, GSI_SAME_STMT);
781 update_stmt (s: assign);
782 }
783 return assign;
784}
785
786/* Deletes the unused bbs and edges that now contain the switch statement and
787 its empty branch bbs. BBD is the now dead BB containing
788 the original switch statement, FINAL is the last BB of the converted
789 switch statement (in terms of succession). */
790
791void
792switch_conversion::prune_bbs (basic_block bbd, basic_block final,
793 basic_block default_bb)
794{
795 edge_iterator ei;
796 edge e;
797
798 for (ei = ei_start (bbd->succs); (e = ei_safe_edge (i: ei)); )
799 {
800 basic_block bb;
801 bb = e->dest;
802 remove_edge (e);
803 if (bb != final && bb != default_bb)
804 delete_basic_block (bb);
805 }
806 delete_basic_block (bbd);
807}
808
809/* Add values to phi nodes in final_bb for the two new edges. E1F is the edge
810 from the basic block loading values from an array and E2F from the basic
811 block loading default values. BBF is the last switch basic block (see the
812 bbf description in the comment below). */
813
814void
815switch_conversion::fix_phi_nodes (edge e1f, edge e2f, basic_block bbf)
816{
817 gphi_iterator gsi;
818 int i;
819
820 for (gsi = gsi_start_phis (bbf), i = 0;
821 !gsi_end_p (i: gsi); gsi_next (i: &gsi))
822 {
823 gphi *phi = gsi.phi ();
824 tree inbound, outbound;
825 if (virtual_operand_p (op: gimple_phi_result (gs: phi)))
826 inbound = outbound = m_target_vop;
827 else
828 {
829 inbound = m_target_inbound_names[i];
830 outbound = m_target_outbound_names[i++];
831 }
832 add_phi_arg (phi, inbound, e1f, UNKNOWN_LOCATION);
833 if (!m_default_case_nonstandard)
834 add_phi_arg (phi, outbound, e2f, UNKNOWN_LOCATION);
835 }
836}
837
838/* Creates a check whether the switch expression value actually falls into the
839 range given by all the cases. If it does not, the temporaries are loaded
840 with default values instead. */
841
842void
843switch_conversion::gen_inbound_check ()
844{
845 tree label_decl1 = create_artificial_label (UNKNOWN_LOCATION);
846 tree label_decl2 = create_artificial_label (UNKNOWN_LOCATION);
847 tree label_decl3 = create_artificial_label (UNKNOWN_LOCATION);
848 glabel *label1, *label2, *label3;
849 tree utype, tidx;
850 tree bound;
851
852 gcond *cond_stmt;
853
854 gassign *last_assign = NULL;
855 gimple_stmt_iterator gsi;
856 basic_block bb0, bb1, bb2, bbf, bbd;
857 edge e01 = NULL, e02, e21, e1d, e1f, e2f;
858 location_t loc = gimple_location (g: m_switch);
859
860 gcc_assert (m_default_values);
861
862 bb0 = gimple_bb (g: m_switch);
863
864 tidx = gimple_assign_lhs (gs: m_arr_ref_first);
865 utype = TREE_TYPE (tidx);
866
867 /* (end of) block 0 */
868 gsi = gsi_for_stmt (m_arr_ref_first);
869 gsi_next (i: &gsi);
870
871 bound = fold_convert_loc (loc, utype, m_range_size);
872 cond_stmt = gimple_build_cond (LE_EXPR, tidx, bound, NULL_TREE, NULL_TREE);
873 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
874 update_stmt (s: cond_stmt);
875
876 /* block 2 */
877 if (!m_default_case_nonstandard)
878 {
879 label2 = gimple_build_label (label: label_decl2);
880 gsi_insert_before (&gsi, label2, GSI_SAME_STMT);
881 last_assign = gen_def_assigns (gsi: &gsi);
882 }
883
884 /* block 1 */
885 label1 = gimple_build_label (label: label_decl1);
886 gsi_insert_before (&gsi, label1, GSI_SAME_STMT);
887
888 /* block F */
889 gsi = gsi_start_bb (bb: m_final_bb);
890 label3 = gimple_build_label (label: label_decl3);
891 gsi_insert_before (&gsi, label3, GSI_SAME_STMT);
892
893 /* cfg fix */
894 e02 = split_block (bb0, cond_stmt);
895 bb2 = e02->dest;
896
897 if (m_default_case_nonstandard)
898 {
899 bb1 = bb2;
900 bb2 = m_default_bb;
901 e01 = e02;
902 e01->flags = EDGE_TRUE_VALUE;
903 e02 = make_edge (bb0, bb2, EDGE_FALSE_VALUE);
904 edge e_default = find_edge (bb1, bb2);
905 for (gphi_iterator gsi = gsi_start_phis (bb2);
906 !gsi_end_p (i: gsi); gsi_next (i: &gsi))
907 {
908 gphi *phi = gsi.phi ();
909 tree arg = PHI_ARG_DEF_FROM_EDGE (phi, e_default);
910 add_phi_arg (phi, arg, e02,
911 gimple_phi_arg_location_from_edge (phi, e: e_default));
912 }
913 /* Partially fix the dominator tree, if it is available. */
914 if (dom_info_available_p (CDI_DOMINATORS))
915 redirect_immediate_dominators (CDI_DOMINATORS, bb1, bb0);
916 }
917 else
918 {
919 e21 = split_block (bb2, last_assign);
920 bb1 = e21->dest;
921 remove_edge (e21);
922 }
923
924 e1d = split_block (bb1, m_arr_ref_last);
925 bbd = e1d->dest;
926 remove_edge (e1d);
927
928 /* Flags and profiles of the edge for in-range values. */
929 if (!m_default_case_nonstandard)
930 e01 = make_edge (bb0, bb1, EDGE_TRUE_VALUE);
931 e01->probability = m_default_prob.invert ();
932
933 /* Flags and profiles of the edge taking care of out-of-range values. */
934 e02->flags &= ~EDGE_FALLTHRU;
935 e02->flags |= EDGE_FALSE_VALUE;
936 e02->probability = m_default_prob;
937
938 bbf = m_final_bb;
939
940 e1f = make_edge (bb1, bbf, EDGE_FALLTHRU);
941 e1f->probability = profile_probability::always ();
942
943 if (m_default_case_nonstandard)
944 e2f = NULL;
945 else
946 {
947 e2f = make_edge (bb2, bbf, EDGE_FALLTHRU);
948 e2f->probability = profile_probability::always ();
949 }
950
951 /* frequencies of the new BBs */
952 bb1->count = e01->count ();
953 bb2->count = e02->count ();
954 if (!m_default_case_nonstandard)
955 bbf->count = e1f->count () + e2f->count ();
956
957 /* Tidy blocks that have become unreachable. */
958 prune_bbs (bbd, final: m_final_bb,
959 default_bb: m_default_case_nonstandard ? m_default_bb : NULL);
960
961 /* Fixup the PHI nodes in bbF. */
962 fix_phi_nodes (e1f, e2f, bbf);
963
964 /* Fix the dominator tree, if it is available. */
965 if (dom_info_available_p (CDI_DOMINATORS))
966 {
967 vec<basic_block> bbs_to_fix_dom;
968
969 set_immediate_dominator (CDI_DOMINATORS, bb1, bb0);
970 if (!m_default_case_nonstandard)
971 set_immediate_dominator (CDI_DOMINATORS, bb2, bb0);
972 if (! get_immediate_dominator (CDI_DOMINATORS, bbf))
973 /* If bbD was the immediate dominator ... */
974 set_immediate_dominator (CDI_DOMINATORS, bbf, bb0);
975
976 bbs_to_fix_dom.create (nelems: 3 + (bb2 != bbf));
977 bbs_to_fix_dom.quick_push (obj: bb0);
978 bbs_to_fix_dom.quick_push (obj: bb1);
979 if (bb2 != bbf)
980 bbs_to_fix_dom.quick_push (obj: bb2);
981 bbs_to_fix_dom.quick_push (obj: bbf);
982
983 iterate_fix_dominators (CDI_DOMINATORS, bbs_to_fix_dom, true);
984 bbs_to_fix_dom.release ();
985 }
986}
987
988/* The following function is invoked on every switch statement (the current
989 one is given in SWTCH) and runs the individual phases of switch
990 conversion on it one after another until one fails or the conversion
991 is completed. On success, NULL is in m_reason, otherwise points
992 to a string with the reason why the conversion failed. */
993
994void
995switch_conversion::expand (gswitch *swtch)
996{
997 /* Group case labels so that we get the right results from the heuristics
998 that decide on the code generation approach for this switch. */
999 m_cfg_altered |= group_case_labels_stmt (swtch);
1000
1001 /* If this switch is now a degenerate case with only a default label,
1002 there is nothing left for us to do. */
1003 if (gimple_switch_num_labels (gs: swtch) < 2)
1004 {
1005 m_reason = "switch is a degenerate case";
1006 return;
1007 }
1008
1009 collect (swtch);
1010
1011 /* No error markers should reach here (they should be filtered out
1012 during gimplification). */
1013 gcc_checking_assert (TREE_TYPE (m_index_expr) != error_mark_node);
1014
1015 /* Prefer bit test if possible. */
1016 if (tree_fits_uhwi_p (m_range_size)
1017 && bit_test_cluster::can_be_handled (range: tree_to_uhwi (m_range_size), uniq: m_uniq)
1018 && bit_test_cluster::is_beneficial (count: m_count, uniq: m_uniq))
1019 {
1020 m_reason = "expanding as bit test is preferable";
1021 return;
1022 }
1023
1024 if (m_uniq <= 2)
1025 {
1026 /* This will be expanded as a decision tree . */
1027 m_reason = "expanding as jumps is preferable";
1028 return;
1029 }
1030
1031 /* If there is no common successor, we cannot do the transformation. */
1032 if (!m_final_bb)
1033 {
1034 m_reason = "no common successor to all case label target blocks found";
1035 return;
1036 }
1037
1038 /* Check the case label values are within reasonable range: */
1039 if (!check_range ())
1040 {
1041 gcc_assert (m_reason);
1042 return;
1043 }
1044
1045 /* For all the cases, see whether they are empty, the assignments they
1046 represent constant and so on... */
1047 if (!check_all_empty_except_final ())
1048 {
1049 gcc_assert (m_reason);
1050 return;
1051 }
1052 if (!check_final_bb ())
1053 {
1054 gcc_assert (m_reason);
1055 return;
1056 }
1057
1058 /* At this point all checks have passed and we can proceed with the
1059 transformation. */
1060
1061 create_temp_arrays ();
1062 gather_default_values (default_case: m_default_case_nonstandard
1063 ? gimple_switch_label (gs: swtch, index: 1)
1064 : gimple_switch_default_label (gs: swtch));
1065 build_constructors ();
1066
1067 build_arrays (); /* Build the static arrays and assignments. */
1068 gen_inbound_check (); /* Build the bounds check. */
1069
1070 m_cfg_altered = true;
1071}
1072
1073/* Destructor. */
1074
1075switch_conversion::~switch_conversion ()
1076{
1077 XDELETEVEC (m_constructors);
1078 XDELETEVEC (m_default_values);
1079}
1080
1081/* Constructor. */
1082
1083group_cluster::group_cluster (vec<cluster *> &clusters,
1084 unsigned start, unsigned end)
1085{
1086 gcc_checking_assert (end - start + 1 >= 1);
1087 m_prob = profile_probability::never ();
1088 m_cases.create (nelems: end - start + 1);
1089 for (unsigned i = start; i <= end; i++)
1090 {
1091 m_cases.quick_push (obj: static_cast<simple_cluster *> (clusters[i]));
1092 m_prob += clusters[i]->m_prob;
1093 }
1094 m_subtree_prob = m_prob;
1095}
1096
1097/* Destructor. */
1098
1099group_cluster::~group_cluster ()
1100{
1101 for (unsigned i = 0; i < m_cases.length (); i++)
1102 delete m_cases[i];
1103
1104 m_cases.release ();
1105}
1106
1107/* Dump content of a cluster. */
1108
1109void
1110group_cluster::dump (FILE *f, bool details)
1111{
1112 unsigned total_values = 0;
1113 for (unsigned i = 0; i < m_cases.length (); i++)
1114 total_values += m_cases[i]->get_range (low: m_cases[i]->get_low (),
1115 high: m_cases[i]->get_high ());
1116
1117 unsigned comparison_count = 0;
1118 for (unsigned i = 0; i < m_cases.length (); i++)
1119 {
1120 simple_cluster *sc = static_cast<simple_cluster *> (m_cases[i]);
1121 comparison_count += sc->get_comparison_count ();
1122 }
1123
1124 unsigned HOST_WIDE_INT range = get_range (low: get_low (), high: get_high ());
1125 fprintf (stream: f, format: "%s", get_type () == JUMP_TABLE ? "JT" : "BT");
1126
1127 if (details)
1128 fprintf (stream: f, format: "(values:%d comparisons:%d range:" HOST_WIDE_INT_PRINT_DEC
1129 " density: %.2f%%)", total_values, comparison_count, range,
1130 100.0f * comparison_count / range);
1131
1132 fprintf (stream: f, format: ":");
1133 PRINT_CASE (f, get_low ());
1134 fprintf (stream: f, format: "-");
1135 PRINT_CASE (f, get_high ());
1136 fprintf (stream: f, format: " ");
1137}
1138
1139/* Emit GIMPLE code to handle the cluster. */
1140
1141void
1142jump_table_cluster::emit (tree index_expr, tree,
1143 tree default_label_expr, basic_block default_bb,
1144 location_t loc)
1145{
1146 tree low = get_low ();
1147 unsigned HOST_WIDE_INT range = get_range (low, high: get_high ());
1148 unsigned HOST_WIDE_INT nondefault_range = 0;
1149 bool bitint = false;
1150 gimple_stmt_iterator gsi = gsi_start_bb (bb: m_case_bb);
1151
1152 /* For large/huge _BitInt, subtract low from index_expr, cast to unsigned
1153 DImode type (get_range doesn't support ranges larger than 64-bits)
1154 and subtract low from all case values as well. */
1155 if (TREE_CODE (TREE_TYPE (index_expr)) == BITINT_TYPE
1156 && TYPE_PRECISION (TREE_TYPE (index_expr)) > GET_MODE_PRECISION (DImode))
1157 {
1158 bitint = true;
1159 tree this_low = low, type;
1160 gimple *g;
1161 gimple_seq seq = NULL;
1162 if (!TYPE_OVERFLOW_WRAPS (TREE_TYPE (index_expr)))
1163 {
1164 type = unsigned_type_for (TREE_TYPE (index_expr));
1165 index_expr = gimple_convert (seq: &seq, type, op: index_expr);
1166 this_low = fold_convert (type, this_low);
1167 }
1168 this_low = const_unop (NEGATE_EXPR, TREE_TYPE (this_low), this_low);
1169 index_expr = gimple_build (seq: &seq, code: PLUS_EXPR, TREE_TYPE (index_expr),
1170 ops: index_expr, ops: this_low);
1171 type = build_nonstandard_integer_type (GET_MODE_PRECISION (DImode), 1);
1172 g = gimple_build_cond (GT_EXPR, index_expr,
1173 fold_convert (TREE_TYPE (index_expr),
1174 TYPE_MAX_VALUE (type)),
1175 NULL_TREE, NULL_TREE);
1176 gimple_seq_add_stmt (&seq, g);
1177 gimple_seq_set_location (seq, loc);
1178 gsi_insert_seq_after (&gsi, seq, GSI_NEW_STMT);
1179 edge e1 = split_block (m_case_bb, g);
1180 e1->flags = EDGE_FALSE_VALUE;
1181 e1->probability = profile_probability::likely ();
1182 edge e2 = make_edge (e1->src, default_bb, EDGE_TRUE_VALUE);
1183 e2->probability = e1->probability.invert ();
1184 gsi = gsi_start_bb (bb: e1->dest);
1185 seq = NULL;
1186 index_expr = gimple_convert (seq: &seq, type, op: index_expr);
1187 gimple_seq_set_location (seq, loc);
1188 gsi_insert_seq_after (&gsi, seq, GSI_NEW_STMT);
1189 }
1190
1191 /* For jump table we just emit a new gswitch statement that will
1192 be latter lowered to jump table. */
1193 auto_vec <tree> labels;
1194 labels.create (nelems: m_cases.length ());
1195
1196 basic_block case_bb = gsi_bb (i: gsi);
1197 make_edge (case_bb, default_bb, 0);
1198 for (unsigned i = 0; i < m_cases.length (); i++)
1199 {
1200 tree lab = unshare_expr (m_cases[i]->m_case_label_expr);
1201 if (bitint)
1202 {
1203 CASE_LOW (lab)
1204 = fold_convert (TREE_TYPE (index_expr),
1205 const_binop (MINUS_EXPR,
1206 TREE_TYPE (CASE_LOW (lab)),
1207 CASE_LOW (lab), low));
1208 if (CASE_HIGH (lab))
1209 CASE_HIGH (lab)
1210 = fold_convert (TREE_TYPE (index_expr),
1211 const_binop (MINUS_EXPR,
1212 TREE_TYPE (CASE_HIGH (lab)),
1213 CASE_HIGH (lab), low));
1214 }
1215 labels.quick_push (obj: lab);
1216 make_edge (case_bb, m_cases[i]->m_case_bb, 0);
1217 }
1218
1219 gswitch *s = gimple_build_switch (index_expr,
1220 unshare_expr (default_label_expr), labels);
1221 gimple_set_location (g: s, location: loc);
1222 gsi_insert_after (&gsi, s, GSI_NEW_STMT);
1223
1224 /* Set up even probabilities for all cases. */
1225 for (unsigned i = 0; i < m_cases.length (); i++)
1226 {
1227 simple_cluster *sc = static_cast<simple_cluster *> (m_cases[i]);
1228 edge case_edge = find_edge (case_bb, sc->m_case_bb);
1229 unsigned HOST_WIDE_INT case_range
1230 = sc->get_range (low: sc->get_low (), high: sc->get_high ());
1231 nondefault_range += case_range;
1232
1233 /* case_edge->aux is number of values in a jump-table that are covered
1234 by the case_edge. */
1235 case_edge->aux = (void *) ((intptr_t) (case_edge->aux) + case_range);
1236 }
1237
1238 edge default_edge = gimple_switch_default_edge (cfun, s);
1239 default_edge->probability = profile_probability::never ();
1240
1241 for (unsigned i = 0; i < m_cases.length (); i++)
1242 {
1243 simple_cluster *sc = static_cast<simple_cluster *> (m_cases[i]);
1244 edge case_edge = find_edge (case_bb, sc->m_case_bb);
1245 case_edge->probability
1246 = profile_probability::always ().apply_scale (num: (intptr_t)case_edge->aux,
1247 den: range);
1248 }
1249
1250 /* Number of non-default values is probability of default edge. */
1251 default_edge->probability
1252 += profile_probability::always ().apply_scale (num: nondefault_range,
1253 den: range).invert ();
1254
1255 switch_decision_tree::reset_out_edges_aux (swtch: s);
1256}
1257
1258/* Find jump tables of given CLUSTERS, where all members of the vector
1259 are of type simple_cluster. New clusters are returned. */
1260
1261vec<cluster *>
1262jump_table_cluster::find_jump_tables (vec<cluster *> &clusters)
1263{
1264 if (!is_enabled ())
1265 return clusters.copy ();
1266
1267 unsigned l = clusters.length ();
1268 auto_vec<min_cluster_item> min;
1269 min.reserve (nelems: l + 1);
1270
1271 min.quick_push (obj: min_cluster_item (0, 0, 0));
1272
1273 unsigned HOST_WIDE_INT max_ratio
1274 = (optimize_insn_for_size_p ()
1275 ? param_jump_table_max_growth_ratio_for_size
1276 : param_jump_table_max_growth_ratio_for_speed);
1277
1278 for (unsigned i = 1; i <= l; i++)
1279 {
1280 /* Set minimal # of clusters with i-th item to infinite. */
1281 min.quick_push (obj: min_cluster_item (INT_MAX, INT_MAX, INT_MAX));
1282
1283 /* Pre-calculate number of comparisons for the clusters. */
1284 HOST_WIDE_INT comparison_count = 0;
1285 for (unsigned k = 0; k <= i - 1; k++)
1286 {
1287 simple_cluster *sc = static_cast<simple_cluster *> (clusters[k]);
1288 comparison_count += sc->get_comparison_count ();
1289 }
1290
1291 for (unsigned j = 0; j < i; j++)
1292 {
1293 unsigned HOST_WIDE_INT s = min[j].m_non_jt_cases;
1294 if (i - j < case_values_threshold ())
1295 s += i - j;
1296
1297 /* Prefer clusters with smaller number of numbers covered. */
1298 if ((min[j].m_count + 1 < min[i].m_count
1299 || (min[j].m_count + 1 == min[i].m_count
1300 && s < min[i].m_non_jt_cases))
1301 && can_be_handled (clusters, start: j, end: i - 1, max_ratio,
1302 comparison_count))
1303 min[i] = min_cluster_item (min[j].m_count + 1, j, s);
1304
1305 simple_cluster *sc = static_cast<simple_cluster *> (clusters[j]);
1306 comparison_count -= sc->get_comparison_count ();
1307 }
1308
1309 gcc_checking_assert (comparison_count == 0);
1310 gcc_checking_assert (min[i].m_count != INT_MAX);
1311 }
1312
1313 /* No result. */
1314 if (min[l].m_count == l)
1315 return clusters.copy ();
1316
1317 vec<cluster *> output;
1318 output.create (nelems: 4);
1319
1320 /* Find and build the clusters. */
1321 for (unsigned int end = l;;)
1322 {
1323 int start = min[end].m_start;
1324
1325 /* Do not allow clusters with small number of cases. */
1326 if (is_beneficial (clusters, start, end: end - 1))
1327 output.safe_push (obj: new jump_table_cluster (clusters, start, end - 1));
1328 else
1329 for (int i = end - 1; i >= start; i--)
1330 output.safe_push (obj: clusters[i]);
1331
1332 end = start;
1333
1334 if (start <= 0)
1335 break;
1336 }
1337
1338 output.reverse ();
1339 return output;
1340}
1341
1342/* Return true when cluster starting at START and ending at END (inclusive)
1343 can build a jump-table. */
1344
1345bool
1346jump_table_cluster::can_be_handled (const vec<cluster *> &clusters,
1347 unsigned start, unsigned end,
1348 unsigned HOST_WIDE_INT max_ratio,
1349 unsigned HOST_WIDE_INT comparison_count)
1350{
1351 /* If the switch is relatively small such that the cost of one
1352 indirect jump on the target are higher than the cost of a
1353 decision tree, go with the decision tree.
1354
1355 If range of values is much bigger than number of values,
1356 or if it is too large to represent in a HOST_WIDE_INT,
1357 make a sequence of conditional branches instead of a dispatch.
1358
1359 The definition of "much bigger" depends on whether we are
1360 optimizing for size or for speed.
1361
1362 For algorithm correctness, jump table for a single case must return
1363 true. We bail out in is_beneficial if it's called just for
1364 a single case. */
1365 if (start == end)
1366 return true;
1367
1368 unsigned HOST_WIDE_INT range = get_range (low: clusters[start]->get_low (),
1369 high: clusters[end]->get_high ());
1370 /* Check overflow. */
1371 if (range == 0)
1372 return false;
1373
1374 if (range > HOST_WIDE_INT_M1U / 100)
1375 return false;
1376
1377 unsigned HOST_WIDE_INT lhs = 100 * range;
1378 if (lhs < range)
1379 return false;
1380
1381 return lhs <= max_ratio * comparison_count;
1382}
1383
1384/* Return true if cluster starting at START and ending at END (inclusive)
1385 is profitable transformation. */
1386
1387bool
1388jump_table_cluster::is_beneficial (const vec<cluster *> &,
1389 unsigned start, unsigned end)
1390{
1391 /* Single case bail out. */
1392 if (start == end)
1393 return false;
1394
1395 return end - start + 1 >= case_values_threshold ();
1396}
1397
1398/* Find bit tests of given CLUSTERS, where all members of the vector
1399 are of type simple_cluster. New clusters are returned. */
1400
1401vec<cluster *>
1402bit_test_cluster::find_bit_tests (vec<cluster *> &clusters)
1403{
1404 if (!is_enabled ())
1405 return clusters.copy ();
1406
1407 unsigned l = clusters.length ();
1408 auto_vec<min_cluster_item> min;
1409 min.reserve (nelems: l + 1);
1410
1411 min.quick_push (obj: min_cluster_item (0, 0, 0));
1412
1413 for (unsigned i = 1; i <= l; i++)
1414 {
1415 /* Set minimal # of clusters with i-th item to infinite. */
1416 min.quick_push (obj: min_cluster_item (INT_MAX, INT_MAX, INT_MAX));
1417
1418 for (unsigned j = 0; j < i; j++)
1419 {
1420 if (min[j].m_count + 1 < min[i].m_count
1421 && can_be_handled (clusters, start: j, end: i - 1))
1422 min[i] = min_cluster_item (min[j].m_count + 1, j, INT_MAX);
1423 }
1424
1425 gcc_checking_assert (min[i].m_count != INT_MAX);
1426 }
1427
1428 /* No result. */
1429 if (min[l].m_count == l)
1430 return clusters.copy ();
1431
1432 vec<cluster *> output;
1433 output.create (nelems: 4);
1434
1435 /* Find and build the clusters. */
1436 for (unsigned end = l;;)
1437 {
1438 int start = min[end].m_start;
1439
1440 if (is_beneficial (clusters, start, end: end - 1))
1441 {
1442 bool entire = start == 0 && end == clusters.length ();
1443 output.safe_push (obj: new bit_test_cluster (clusters, start, end - 1,
1444 entire));
1445 }
1446 else
1447 for (int i = end - 1; i >= start; i--)
1448 output.safe_push (obj: clusters[i]);
1449
1450 end = start;
1451
1452 if (start <= 0)
1453 break;
1454 }
1455
1456 output.reverse ();
1457 return output;
1458}
1459
1460/* Return true when RANGE of case values with UNIQ labels
1461 can build a bit test. */
1462
1463bool
1464bit_test_cluster::can_be_handled (unsigned HOST_WIDE_INT range,
1465 unsigned int uniq)
1466{
1467 /* Check overflow. */
1468 if (range == 0)
1469 return false;
1470
1471 if (range >= GET_MODE_BITSIZE (mode: word_mode))
1472 return false;
1473
1474 return uniq <= m_max_case_bit_tests;
1475}
1476
1477/* Return true when cluster starting at START and ending at END (inclusive)
1478 can build a bit test. */
1479
1480bool
1481bit_test_cluster::can_be_handled (const vec<cluster *> &clusters,
1482 unsigned start, unsigned end)
1483{
1484 auto_vec<int, m_max_case_bit_tests> dest_bbs;
1485 /* For algorithm correctness, bit test for a single case must return
1486 true. We bail out in is_beneficial if it's called just for
1487 a single case. */
1488 if (start == end)
1489 return true;
1490
1491 unsigned HOST_WIDE_INT range = get_range (low: clusters[start]->get_low (),
1492 high: clusters[end]->get_high ());
1493
1494 /* Make a guess first. */
1495 if (!can_be_handled (range, uniq: m_max_case_bit_tests))
1496 return false;
1497
1498 for (unsigned i = start; i <= end; i++)
1499 {
1500 simple_cluster *sc = static_cast<simple_cluster *> (clusters[i]);
1501 /* m_max_case_bit_tests is very small integer, thus the operation
1502 is constant. */
1503 if (!dest_bbs.contains (search: sc->m_case_bb->index))
1504 {
1505 if (dest_bbs.length () >= m_max_case_bit_tests)
1506 return false;
1507 dest_bbs.quick_push (obj: sc->m_case_bb->index);
1508 }
1509 }
1510
1511 return true;
1512}
1513
1514/* Return true when COUNT of cases of UNIQ labels is beneficial for bit test
1515 transformation. */
1516
1517bool
1518bit_test_cluster::is_beneficial (unsigned count, unsigned uniq)
1519{
1520 return (((uniq == 1 && count >= 3)
1521 || (uniq == 2 && count >= 5)
1522 || (uniq == 3 && count >= 6)));
1523}
1524
1525/* Return true if cluster starting at START and ending at END (inclusive)
1526 is profitable transformation. */
1527
1528bool
1529bit_test_cluster::is_beneficial (const vec<cluster *> &clusters,
1530 unsigned start, unsigned end)
1531{
1532 /* Single case bail out. */
1533 if (start == end)
1534 return false;
1535
1536 auto_bitmap dest_bbs;
1537
1538 for (unsigned i = start; i <= end; i++)
1539 {
1540 simple_cluster *sc = static_cast<simple_cluster *> (clusters[i]);
1541 bitmap_set_bit (dest_bbs, sc->m_case_bb->index);
1542 }
1543
1544 unsigned uniq = bitmap_count_bits (dest_bbs);
1545 unsigned count = end - start + 1;
1546 return is_beneficial (count, uniq);
1547}
1548
1549/* Comparison function for qsort to order bit tests by decreasing
1550 probability of execution. */
1551
1552int
1553case_bit_test::cmp (const void *p1, const void *p2)
1554{
1555 const case_bit_test *const d1 = (const case_bit_test *) p1;
1556 const case_bit_test *const d2 = (const case_bit_test *) p2;
1557
1558 if (d2->bits != d1->bits)
1559 return d2->bits - d1->bits;
1560
1561 /* Stabilize the sort. */
1562 return (LABEL_DECL_UID (CASE_LABEL (d2->label))
1563 - LABEL_DECL_UID (CASE_LABEL (d1->label)));
1564}
1565
1566/* Expand a switch statement by a short sequence of bit-wise
1567 comparisons. "switch(x)" is effectively converted into
1568 "if ((1 << (x-MINVAL)) & CST)" where CST and MINVAL are
1569 integer constants.
1570
1571 INDEX_EXPR is the value being switched on.
1572
1573 MINVAL is the lowest case value of in the case nodes,
1574 and RANGE is highest value minus MINVAL. MINVAL and RANGE
1575 are not guaranteed to be of the same type as INDEX_EXPR
1576 (the gimplifier doesn't change the type of case label values,
1577 and MINVAL and RANGE are derived from those values).
1578 MAXVAL is MINVAL + RANGE.
1579
1580 There *MUST* be max_case_bit_tests or less unique case
1581 node targets. */
1582
1583void
1584bit_test_cluster::emit (tree index_expr, tree index_type,
1585 tree, basic_block default_bb, location_t loc)
1586{
1587 case_bit_test test[m_max_case_bit_tests] = { {} };
1588 unsigned int i, j, k;
1589 unsigned int count;
1590
1591 tree unsigned_index_type = range_check_type (index_type);
1592
1593 gimple_stmt_iterator gsi;
1594 gassign *shift_stmt;
1595
1596 tree idx, tmp, csui;
1597 tree word_type_node = lang_hooks.types.type_for_mode (word_mode, 1);
1598 tree word_mode_zero = fold_convert (word_type_node, integer_zero_node);
1599 tree word_mode_one = fold_convert (word_type_node, integer_one_node);
1600 int prec = TYPE_PRECISION (word_type_node);
1601 wide_int wone = wi::one (precision: prec);
1602
1603 tree minval = get_low ();
1604 tree maxval = get_high ();
1605
1606 /* Go through all case labels, and collect the case labels, profile
1607 counts, and other information we need to build the branch tests. */
1608 count = 0;
1609 for (i = 0; i < m_cases.length (); i++)
1610 {
1611 unsigned int lo, hi;
1612 simple_cluster *n = static_cast<simple_cluster *> (m_cases[i]);
1613 for (k = 0; k < count; k++)
1614 if (n->m_case_bb == test[k].target_bb)
1615 break;
1616
1617 if (k == count)
1618 {
1619 gcc_checking_assert (count < m_max_case_bit_tests);
1620 test[k].mask = wi::zero (precision: prec);
1621 test[k].target_bb = n->m_case_bb;
1622 test[k].label = n->m_case_label_expr;
1623 test[k].bits = 0;
1624 test[k].prob = profile_probability::never ();
1625 count++;
1626 }
1627
1628 test[k].bits += n->get_range (low: n->get_low (), high: n->get_high ());
1629 test[k].prob += n->m_prob;
1630
1631 lo = tree_to_uhwi (int_const_binop (MINUS_EXPR, n->get_low (), minval));
1632 if (n->get_high () == NULL_TREE)
1633 hi = lo;
1634 else
1635 hi = tree_to_uhwi (int_const_binop (MINUS_EXPR, n->get_high (),
1636 minval));
1637
1638 for (j = lo; j <= hi; j++)
1639 test[k].mask |= wi::lshift (x: wone, y: j);
1640 }
1641
1642 qsort (test, count, sizeof (*test), case_bit_test::cmp);
1643
1644 /* If every possible relative value of the index expression is a valid shift
1645 amount, then we can merge the entry test in the bit test. */
1646 bool entry_test_needed;
1647 value_range r;
1648 if (TREE_CODE (index_expr) == SSA_NAME
1649 && get_range_query (cfun)->range_of_expr (r, expr: index_expr)
1650 && !r.undefined_p ()
1651 && !r.varying_p ()
1652 && wi::leu_p (x: r.upper_bound () - r.lower_bound (), y: prec - 1))
1653 {
1654 wide_int min = r.lower_bound ();
1655 wide_int max = r.upper_bound ();
1656 tree index_type = TREE_TYPE (index_expr);
1657 minval = fold_convert (index_type, minval);
1658 wide_int iminval = wi::to_wide (t: minval);
1659 if (wi::lt_p (x: min, y: iminval, TYPE_SIGN (index_type)))
1660 {
1661 minval = wide_int_to_tree (type: index_type, cst: min);
1662 for (i = 0; i < count; i++)
1663 test[i].mask = wi::lshift (x: test[i].mask, y: iminval - min);
1664 }
1665 else if (wi::gt_p (x: min, y: iminval, TYPE_SIGN (index_type)))
1666 {
1667 minval = wide_int_to_tree (type: index_type, cst: min);
1668 for (i = 0; i < count; i++)
1669 test[i].mask = wi::lrshift (x: test[i].mask, y: min - iminval);
1670 }
1671 maxval = wide_int_to_tree (type: index_type, cst: max);
1672 entry_test_needed = false;
1673 }
1674 else
1675 entry_test_needed = true;
1676
1677 /* If all values are in the 0 .. BITS_PER_WORD-1 range, we can get rid of
1678 the minval subtractions, but it might make the mask constants more
1679 expensive. So, compare the costs. */
1680 if (compare_tree_int (minval, 0) > 0 && compare_tree_int (maxval, prec) < 0)
1681 {
1682 int cost_diff;
1683 HOST_WIDE_INT m = tree_to_uhwi (minval);
1684 rtx reg = gen_raw_REG (word_mode, 10000);
1685 bool speed_p = optimize_insn_for_speed_p ();
1686 cost_diff = set_src_cost (gen_rtx_PLUS (word_mode, reg,
1687 GEN_INT (-m)),
1688 mode: word_mode, speed_p);
1689 for (i = 0; i < count; i++)
1690 {
1691 rtx r = immed_wide_int_const (test[i].mask, word_mode);
1692 cost_diff += set_src_cost (gen_rtx_AND (word_mode, reg, r),
1693 mode: word_mode, speed_p);
1694 r = immed_wide_int_const (wi::lshift (x: test[i].mask, y: m), word_mode);
1695 cost_diff -= set_src_cost (gen_rtx_AND (word_mode, reg, r),
1696 mode: word_mode, speed_p);
1697 }
1698 if (cost_diff > 0)
1699 {
1700 for (i = 0; i < count; i++)
1701 test[i].mask = wi::lshift (x: test[i].mask, y: m);
1702 minval = build_zero_cst (TREE_TYPE (minval));
1703 }
1704 }
1705
1706 /* Now build the test-and-branch code. */
1707
1708 gsi = gsi_last_bb (bb: m_case_bb);
1709
1710 /* idx = (unsigned)x - minval. */
1711 idx = fold_convert_loc (loc, unsigned_index_type, index_expr);
1712 idx = fold_build2_loc (loc, MINUS_EXPR, unsigned_index_type, idx,
1713 fold_convert_loc (loc, unsigned_index_type, minval));
1714 idx = force_gimple_operand_gsi (&gsi, idx,
1715 /*simple=*/true, NULL_TREE,
1716 /*before=*/true, GSI_SAME_STMT);
1717
1718 profile_probability subtree_prob = m_subtree_prob;
1719 profile_probability default_prob = m_default_prob;
1720 if (!default_prob.initialized_p ())
1721 default_prob = m_subtree_prob.invert ();
1722
1723 if (m_handles_entire_switch && entry_test_needed)
1724 {
1725 tree range = int_const_binop (MINUS_EXPR, maxval, minval);
1726 /* if (idx > range) goto default */
1727 range
1728 = force_gimple_operand_gsi (&gsi,
1729 fold_convert (unsigned_index_type, range),
1730 /*simple=*/true, NULL_TREE,
1731 /*before=*/true, GSI_SAME_STMT);
1732 tmp = fold_build2 (GT_EXPR, boolean_type_node, idx, range);
1733 default_prob = default_prob / 2;
1734 basic_block new_bb
1735 = hoist_edge_and_branch_if_true (gsip: &gsi, cond: tmp, case_bb: default_bb,
1736 prob: default_prob, loc);
1737 gsi = gsi_last_bb (bb: new_bb);
1738 }
1739
1740 tmp = fold_build2_loc (loc, LSHIFT_EXPR, word_type_node, word_mode_one,
1741 fold_convert_loc (loc, word_type_node, idx));
1742
1743 /* csui = (1 << (word_mode) idx) */
1744 if (count > 1)
1745 {
1746 csui = make_ssa_name (var: word_type_node);
1747 tmp = force_gimple_operand_gsi (&gsi, tmp,
1748 /*simple=*/false, NULL_TREE,
1749 /*before=*/true, GSI_SAME_STMT);
1750 shift_stmt = gimple_build_assign (csui, tmp);
1751 gsi_insert_before (&gsi, shift_stmt, GSI_SAME_STMT);
1752 update_stmt (s: shift_stmt);
1753 }
1754 else
1755 csui = tmp;
1756
1757 /* for each unique set of cases:
1758 if (const & csui) goto target */
1759 for (k = 0; k < count; k++)
1760 {
1761 profile_probability prob = test[k].prob / (subtree_prob + default_prob);
1762 subtree_prob -= test[k].prob;
1763 tmp = wide_int_to_tree (type: word_type_node, cst: test[k].mask);
1764 tmp = fold_build2_loc (loc, BIT_AND_EXPR, word_type_node, csui, tmp);
1765 tmp = fold_build2_loc (loc, NE_EXPR, boolean_type_node,
1766 tmp, word_mode_zero);
1767 tmp = force_gimple_operand_gsi (&gsi, tmp,
1768 /*simple=*/true, NULL_TREE,
1769 /*before=*/true, GSI_SAME_STMT);
1770 basic_block new_bb
1771 = hoist_edge_and_branch_if_true (gsip: &gsi, cond: tmp, case_bb: test[k].target_bb,
1772 prob, loc);
1773 gsi = gsi_last_bb (bb: new_bb);
1774 }
1775
1776 /* We should have removed all edges now. */
1777 gcc_assert (EDGE_COUNT (gsi_bb (gsi)->succs) == 0);
1778
1779 /* If nothing matched, go to the default label. */
1780 edge e = make_edge (gsi_bb (i: gsi), default_bb, EDGE_FALLTHRU);
1781 e->probability = profile_probability::always ();
1782}
1783
1784/* Split the basic block at the statement pointed to by GSIP, and insert
1785 a branch to the target basic block of E_TRUE conditional on tree
1786 expression COND.
1787
1788 It is assumed that there is already an edge from the to-be-split
1789 basic block to E_TRUE->dest block. This edge is removed, and the
1790 profile information on the edge is re-used for the new conditional
1791 jump.
1792
1793 The CFG is updated. The dominator tree will not be valid after
1794 this transformation, but the immediate dominators are updated if
1795 UPDATE_DOMINATORS is true.
1796
1797 Returns the newly created basic block. */
1798
1799basic_block
1800bit_test_cluster::hoist_edge_and_branch_if_true (gimple_stmt_iterator *gsip,
1801 tree cond, basic_block case_bb,
1802 profile_probability prob,
1803 location_t loc)
1804{
1805 tree tmp;
1806 gcond *cond_stmt;
1807 edge e_false;
1808 basic_block new_bb, split_bb = gsi_bb (i: *gsip);
1809
1810 edge e_true = make_edge (split_bb, case_bb, EDGE_TRUE_VALUE);
1811 e_true->probability = prob;
1812 gcc_assert (e_true->src == split_bb);
1813
1814 tmp = force_gimple_operand_gsi (gsip, cond, /*simple=*/true, NULL,
1815 /*before=*/true, GSI_SAME_STMT);
1816 cond_stmt = gimple_build_cond_from_tree (tmp, NULL_TREE, NULL_TREE);
1817 gimple_set_location (g: cond_stmt, location: loc);
1818 gsi_insert_before (gsip, cond_stmt, GSI_SAME_STMT);
1819
1820 e_false = split_block (split_bb, cond_stmt);
1821 new_bb = e_false->dest;
1822 redirect_edge_pred (e_true, split_bb);
1823
1824 e_false->flags &= ~EDGE_FALLTHRU;
1825 e_false->flags |= EDGE_FALSE_VALUE;
1826 e_false->probability = e_true->probability.invert ();
1827 new_bb->count = e_false->count ();
1828
1829 return new_bb;
1830}
1831
1832/* Compute the number of case labels that correspond to each outgoing edge of
1833 switch statement. Record this information in the aux field of the edge. */
1834
1835void
1836switch_decision_tree::compute_cases_per_edge ()
1837{
1838 reset_out_edges_aux (swtch: m_switch);
1839 int ncases = gimple_switch_num_labels (gs: m_switch);
1840 for (int i = ncases - 1; i >= 1; --i)
1841 {
1842 edge case_edge = gimple_switch_edge (cfun, m_switch, i);
1843 case_edge->aux = (void *) ((intptr_t) (case_edge->aux) + 1);
1844 }
1845}
1846
1847/* Analyze switch statement and return true when the statement is expanded
1848 as decision tree. */
1849
1850bool
1851switch_decision_tree::analyze_switch_statement ()
1852{
1853 unsigned l = gimple_switch_num_labels (gs: m_switch);
1854 basic_block bb = gimple_bb (g: m_switch);
1855 auto_vec<cluster *> clusters;
1856 clusters.create (nelems: l - 1);
1857
1858 basic_block default_bb = gimple_switch_default_bb (cfun, m_switch);
1859 m_case_bbs.reserve (nelems: l);
1860 m_case_bbs.quick_push (obj: default_bb);
1861
1862 compute_cases_per_edge ();
1863
1864 for (unsigned i = 1; i < l; i++)
1865 {
1866 tree elt = gimple_switch_label (gs: m_switch, index: i);
1867 tree lab = CASE_LABEL (elt);
1868 basic_block case_bb = label_to_block (cfun, lab);
1869 edge case_edge = find_edge (bb, case_bb);
1870 tree low = CASE_LOW (elt);
1871 tree high = CASE_HIGH (elt);
1872
1873 profile_probability p
1874 = case_edge->probability / ((intptr_t) (case_edge->aux));
1875 clusters.quick_push (obj: new simple_cluster (low, high, elt, case_edge->dest,
1876 p));
1877 m_case_bbs.quick_push (obj: case_edge->dest);
1878 }
1879
1880 reset_out_edges_aux (swtch: m_switch);
1881
1882 /* Find bit-test clusters. */
1883 vec<cluster *> output = bit_test_cluster::find_bit_tests (clusters);
1884
1885 /* Find jump table clusters. */
1886 vec<cluster *> output2;
1887 auto_vec<cluster *> tmp;
1888 output2.create (nelems: 1);
1889 tmp.create (nelems: 1);
1890
1891 for (unsigned i = 0; i < output.length (); i++)
1892 {
1893 cluster *c = output[i];
1894 if (c->get_type () != SIMPLE_CASE)
1895 {
1896 if (!tmp.is_empty ())
1897 {
1898 vec<cluster *> n = jump_table_cluster::find_jump_tables (clusters&: tmp);
1899 output2.safe_splice (src: n);
1900 n.release ();
1901 tmp.truncate (size: 0);
1902 }
1903 output2.safe_push (obj: c);
1904 }
1905 else
1906 tmp.safe_push (obj: c);
1907 }
1908
1909 /* We still can have a temporary vector to test. */
1910 if (!tmp.is_empty ())
1911 {
1912 vec<cluster *> n = jump_table_cluster::find_jump_tables (clusters&: tmp);
1913 output2.safe_splice (src: n);
1914 n.release ();
1915 }
1916
1917 if (dump_file)
1918 {
1919 fprintf (stream: dump_file, format: ";; GIMPLE switch case clusters: ");
1920 for (unsigned i = 0; i < output2.length (); i++)
1921 output2[i]->dump (f: dump_file, details: dump_flags & TDF_DETAILS);
1922 fprintf (stream: dump_file, format: "\n");
1923 }
1924
1925 output.release ();
1926
1927 bool expanded = try_switch_expansion (clusters&: output2);
1928 release_clusters (clusters&: output2);
1929 return expanded;
1930}
1931
1932/* Attempt to expand CLUSTERS as a decision tree. Return true when
1933 expanded. */
1934
1935bool
1936switch_decision_tree::try_switch_expansion (vec<cluster *> &clusters)
1937{
1938 tree index_expr = gimple_switch_index (gs: m_switch);
1939 tree index_type = TREE_TYPE (index_expr);
1940 basic_block bb = gimple_bb (g: m_switch);
1941
1942 if (gimple_switch_num_labels (gs: m_switch) == 1
1943 || range_check_type (index_type) == NULL_TREE)
1944 return false;
1945
1946 /* Find the default case target label. */
1947 edge default_edge = gimple_switch_default_edge (cfun, m_switch);
1948 m_default_bb = default_edge->dest;
1949
1950 /* Do the insertion of a case label into m_case_list. The labels are
1951 fed to us in descending order from the sorted vector of case labels used
1952 in the tree part of the middle end. So the list we construct is
1953 sorted in ascending order. */
1954
1955 for (int i = clusters.length () - 1; i >= 0; i--)
1956 {
1957 case_tree_node *r = m_case_list;
1958 m_case_list = m_case_node_pool.allocate ();
1959 m_case_list->m_right = r;
1960 m_case_list->m_c = clusters[i];
1961 }
1962
1963 record_phi_operand_mapping ();
1964
1965 /* Split basic block that contains the gswitch statement. */
1966 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1967 edge e;
1968 if (gsi_end_p (i: gsi))
1969 e = split_block_after_labels (bb);
1970 else
1971 {
1972 gsi_prev (i: &gsi);
1973 e = split_block (bb, gsi_stmt (i: gsi));
1974 }
1975 bb = split_edge (e);
1976
1977 /* Create new basic blocks for non-case clusters where specific expansion
1978 needs to happen. */
1979 for (unsigned i = 0; i < clusters.length (); i++)
1980 if (clusters[i]->get_type () != SIMPLE_CASE)
1981 {
1982 clusters[i]->m_case_bb = create_empty_bb (bb);
1983 clusters[i]->m_case_bb->count = bb->count;
1984 clusters[i]->m_case_bb->loop_father = bb->loop_father;
1985 }
1986
1987 /* Do not do an extra work for a single cluster. */
1988 if (clusters.length () == 1
1989 && clusters[0]->get_type () != SIMPLE_CASE)
1990 {
1991 cluster *c = clusters[0];
1992 c->emit (index_expr, index_type,
1993 gimple_switch_default_label (gs: m_switch), m_default_bb,
1994 gimple_location (g: m_switch));
1995 redirect_edge_succ (single_succ_edge (bb), c->m_case_bb);
1996 }
1997 else
1998 {
1999 emit (bb, index_expr, default_prob: default_edge->probability, index_type);
2000
2001 /* Emit cluster-specific switch handling. */
2002 for (unsigned i = 0; i < clusters.length (); i++)
2003 if (clusters[i]->get_type () != SIMPLE_CASE)
2004 {
2005 edge e = single_pred_edge (bb: clusters[i]->m_case_bb);
2006 e->dest->count = e->src->count.apply_probability (prob: e->probability);
2007 clusters[i]->emit (index_expr, index_type,
2008 gimple_switch_default_label (gs: m_switch),
2009 m_default_bb, gimple_location (g: m_switch));
2010 }
2011 }
2012
2013 fix_phi_operands_for_edges ();
2014
2015 return true;
2016}
2017
2018/* Before switch transformation, record all SSA_NAMEs defined in switch BB
2019 and used in a label basic block. */
2020
2021void
2022switch_decision_tree::record_phi_operand_mapping ()
2023{
2024 basic_block switch_bb = gimple_bb (g: m_switch);
2025 /* Record all PHI nodes that have to be fixed after conversion. */
2026 for (unsigned i = 0; i < m_case_bbs.length (); i++)
2027 {
2028 gphi_iterator gsi;
2029 basic_block bb = m_case_bbs[i];
2030 for (gsi = gsi_start_phis (bb); !gsi_end_p (i: gsi); gsi_next (i: &gsi))
2031 {
2032 gphi *phi = gsi.phi ();
2033
2034 for (unsigned i = 0; i < gimple_phi_num_args (gs: phi); i++)
2035 {
2036 basic_block phi_src_bb = gimple_phi_arg_edge (phi, i)->src;
2037 if (phi_src_bb == switch_bb)
2038 {
2039 tree def = gimple_phi_arg_def (gs: phi, index: i);
2040 tree result = gimple_phi_result (gs: phi);
2041 m_phi_mapping.put (k: result, v: def);
2042 break;
2043 }
2044 }
2045 }
2046 }
2047}
2048
2049/* Append new operands to PHI statements that were introduced due to
2050 addition of new edges to case labels. */
2051
2052void
2053switch_decision_tree::fix_phi_operands_for_edges ()
2054{
2055 gphi_iterator gsi;
2056
2057 for (unsigned i = 0; i < m_case_bbs.length (); i++)
2058 {
2059 basic_block bb = m_case_bbs[i];
2060 for (gsi = gsi_start_phis (bb); !gsi_end_p (i: gsi); gsi_next (i: &gsi))
2061 {
2062 gphi *phi = gsi.phi ();
2063 for (unsigned j = 0; j < gimple_phi_num_args (gs: phi); j++)
2064 {
2065 tree def = gimple_phi_arg_def (gs: phi, index: j);
2066 if (def == NULL_TREE)
2067 {
2068 edge e = gimple_phi_arg_edge (phi, i: j);
2069 tree *definition
2070 = m_phi_mapping.get (k: gimple_phi_result (gs: phi));
2071 gcc_assert (definition);
2072 add_phi_arg (phi, *definition, e, UNKNOWN_LOCATION);
2073 }
2074 }
2075 }
2076 }
2077}
2078
2079/* Generate a decision tree, switching on INDEX_EXPR and jumping to
2080 one of the labels in CASE_LIST or to the DEFAULT_LABEL.
2081
2082 We generate a binary decision tree to select the appropriate target
2083 code. */
2084
2085void
2086switch_decision_tree::emit (basic_block bb, tree index_expr,
2087 profile_probability default_prob, tree index_type)
2088{
2089 balance_case_nodes (head: &m_case_list, NULL);
2090
2091 if (dump_file)
2092 dump_function_to_file (current_function_decl, dump_file, dump_flags);
2093 if (dump_file && (dump_flags & TDF_DETAILS))
2094 {
2095 int indent_step = ceil_log2 (TYPE_PRECISION (index_type)) + 2;
2096 fprintf (stream: dump_file, format: ";; Expanding GIMPLE switch as decision tree:\n");
2097 gcc_assert (m_case_list != NULL);
2098 dump_case_nodes (f: dump_file, root: m_case_list, indent_step, indent_level: 0);
2099 }
2100
2101 bb = emit_case_nodes (bb, index: index_expr, node: m_case_list, default_prob, index_type,
2102 gimple_location (g: m_switch));
2103
2104 if (bb)
2105 emit_jump (bb, case_bb: m_default_bb);
2106
2107 /* Remove all edges and do just an edge that will reach default_bb. */
2108 bb = gimple_bb (g: m_switch);
2109 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2110 gsi_remove (&gsi, true);
2111
2112 delete_basic_block (bb);
2113}
2114
2115/* Take an ordered list of case nodes
2116 and transform them into a near optimal binary tree,
2117 on the assumption that any target code selection value is as
2118 likely as any other.
2119
2120 The transformation is performed by splitting the ordered
2121 list into two equal sections plus a pivot. The parts are
2122 then attached to the pivot as left and right branches. Each
2123 branch is then transformed recursively. */
2124
2125void
2126switch_decision_tree::balance_case_nodes (case_tree_node **head,
2127 case_tree_node *parent)
2128{
2129 case_tree_node *np;
2130
2131 np = *head;
2132 if (np)
2133 {
2134 int i = 0;
2135 case_tree_node **npp;
2136 case_tree_node *left;
2137 profile_probability prob = profile_probability::never ();
2138
2139 /* Count the number of entries on branch. */
2140
2141 while (np)
2142 {
2143 i++;
2144 prob += np->m_c->m_prob;
2145 np = np->m_right;
2146 }
2147
2148 if (i > 2)
2149 {
2150 /* Split this list if it is long enough for that to help. */
2151 npp = head;
2152 left = *npp;
2153 profile_probability pivot_prob = prob / 2;
2154
2155 /* Find the place in the list that bisects the list's total cost
2156 by probability. */
2157 while (1)
2158 {
2159 /* Skip nodes while their probability does not reach
2160 that amount. */
2161 prob -= (*npp)->m_c->m_prob;
2162 if ((prob.initialized_p () && prob < pivot_prob)
2163 || ! (*npp)->m_right)
2164 break;
2165 npp = &(*npp)->m_right;
2166 }
2167
2168 np = *npp;
2169 *npp = 0;
2170 *head = np;
2171 np->m_parent = parent;
2172 np->m_left = left == np ? NULL : left;
2173
2174 /* Optimize each of the two split parts. */
2175 balance_case_nodes (head: &np->m_left, parent: np);
2176 balance_case_nodes (head: &np->m_right, parent: np);
2177 np->m_c->m_subtree_prob = np->m_c->m_prob;
2178 if (np->m_left)
2179 np->m_c->m_subtree_prob += np->m_left->m_c->m_subtree_prob;
2180 if (np->m_right)
2181 np->m_c->m_subtree_prob += np->m_right->m_c->m_subtree_prob;
2182 }
2183 else
2184 {
2185 /* Else leave this branch as one level,
2186 but fill in `parent' fields. */
2187 np = *head;
2188 np->m_parent = parent;
2189 np->m_c->m_subtree_prob = np->m_c->m_prob;
2190 for (; np->m_right; np = np->m_right)
2191 {
2192 np->m_right->m_parent = np;
2193 (*head)->m_c->m_subtree_prob += np->m_right->m_c->m_subtree_prob;
2194 }
2195 }
2196 }
2197}
2198
2199/* Dump ROOT, a list or tree of case nodes, to file. */
2200
2201void
2202switch_decision_tree::dump_case_nodes (FILE *f, case_tree_node *root,
2203 int indent_step, int indent_level)
2204{
2205 if (root == 0)
2206 return;
2207 indent_level++;
2208
2209 dump_case_nodes (f, root: root->m_left, indent_step, indent_level);
2210
2211 fputs (s: ";; ", stream: f);
2212 fprintf (stream: f, format: "%*s", indent_step * indent_level, "");
2213 root->m_c->dump (f);
2214 root->m_c->m_prob.dump (f);
2215 fputs (s: " subtree: ", stream: f);
2216 root->m_c->m_subtree_prob.dump (f);
2217 fputs (s: ")\n", stream: f);
2218
2219 dump_case_nodes (f, root: root->m_right, indent_step, indent_level);
2220}
2221
2222
2223/* Add an unconditional jump to CASE_BB that happens in basic block BB. */
2224
2225void
2226switch_decision_tree::emit_jump (basic_block bb, basic_block case_bb)
2227{
2228 edge e = single_succ_edge (bb);
2229 redirect_edge_succ (e, case_bb);
2230}
2231
2232/* Generate code to compare OP0 with OP1 so that the condition codes are
2233 set and to jump to LABEL_BB if the condition is true.
2234 COMPARISON is the GIMPLE comparison (EQ, NE, GT, etc.).
2235 PROB is the probability of jumping to LABEL_BB. */
2236
2237basic_block
2238switch_decision_tree::emit_cmp_and_jump_insns (basic_block bb, tree op0,
2239 tree op1, tree_code comparison,
2240 basic_block label_bb,
2241 profile_probability prob,
2242 location_t loc)
2243{
2244 // TODO: it's once called with lhs != index.
2245 op1 = fold_convert (TREE_TYPE (op0), op1);
2246
2247 gcond *cond = gimple_build_cond (comparison, op0, op1, NULL_TREE, NULL_TREE);
2248 gimple_set_location (g: cond, location: loc);
2249 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2250 gsi_insert_after (&gsi, cond, GSI_NEW_STMT);
2251
2252 gcc_assert (single_succ_p (bb));
2253
2254 /* Make a new basic block where false branch will take place. */
2255 edge false_edge = split_block (bb, cond);
2256 false_edge->flags = EDGE_FALSE_VALUE;
2257 false_edge->probability = prob.invert ();
2258 false_edge->dest->count = bb->count.apply_probability (prob: prob.invert ());
2259
2260 edge true_edge = make_edge (bb, label_bb, EDGE_TRUE_VALUE);
2261 true_edge->probability = prob;
2262
2263 return false_edge->dest;
2264}
2265
2266/* Generate code to jump to LABEL if OP0 and OP1 are equal.
2267 PROB is the probability of jumping to LABEL_BB.
2268 BB is a basic block where the new condition will be placed. */
2269
2270basic_block
2271switch_decision_tree::do_jump_if_equal (basic_block bb, tree op0, tree op1,
2272 basic_block label_bb,
2273 profile_probability prob,
2274 location_t loc)
2275{
2276 op1 = fold_convert (TREE_TYPE (op0), op1);
2277
2278 gcond *cond = gimple_build_cond (EQ_EXPR, op0, op1, NULL_TREE, NULL_TREE);
2279 gimple_set_location (g: cond, location: loc);
2280 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2281 gsi_insert_before (&gsi, cond, GSI_SAME_STMT);
2282
2283 gcc_assert (single_succ_p (bb));
2284
2285 /* Make a new basic block where false branch will take place. */
2286 edge false_edge = split_block (bb, cond);
2287 false_edge->flags = EDGE_FALSE_VALUE;
2288 false_edge->probability = prob.invert ();
2289 false_edge->dest->count = bb->count.apply_probability (prob: prob.invert ());
2290
2291 edge true_edge = make_edge (bb, label_bb, EDGE_TRUE_VALUE);
2292 true_edge->probability = prob;
2293
2294 return false_edge->dest;
2295}
2296
2297/* Emit step-by-step code to select a case for the value of INDEX.
2298 The thus generated decision tree follows the form of the
2299 case-node binary tree NODE, whose nodes represent test conditions.
2300 DEFAULT_PROB is probability of cases leading to default BB.
2301 INDEX_TYPE is the type of the index of the switch. */
2302
2303basic_block
2304switch_decision_tree::emit_case_nodes (basic_block bb, tree index,
2305 case_tree_node *node,
2306 profile_probability default_prob,
2307 tree index_type, location_t loc)
2308{
2309 profile_probability p;
2310
2311 /* If node is null, we are done. */
2312 if (node == NULL)
2313 return bb;
2314
2315 /* Single value case. */
2316 if (node->m_c->is_single_value_p ())
2317 {
2318 /* Node is single valued. First see if the index expression matches
2319 this node and then check our children, if any. */
2320 p = node->m_c->m_prob / (node->m_c->m_subtree_prob + default_prob);
2321 bb = do_jump_if_equal (bb, op0: index, op1: node->m_c->get_low (),
2322 label_bb: node->m_c->m_case_bb, prob: p, loc);
2323 /* Since this case is taken at this point, reduce its weight from
2324 subtree_weight. */
2325 node->m_c->m_subtree_prob -= node->m_c->m_prob;
2326
2327 if (node->m_left != NULL && node->m_right != NULL)
2328 {
2329 /* 1) the node has both children
2330
2331 If both children are single-valued cases with no
2332 children, finish up all the work. This way, we can save
2333 one ordered comparison. */
2334
2335 if (!node->m_left->has_child ()
2336 && node->m_left->m_c->is_single_value_p ()
2337 && !node->m_right->has_child ()
2338 && node->m_right->m_c->is_single_value_p ())
2339 {
2340 p = (node->m_right->m_c->m_prob
2341 / (node->m_c->m_subtree_prob + default_prob));
2342 bb = do_jump_if_equal (bb, op0: index, op1: node->m_right->m_c->get_low (),
2343 label_bb: node->m_right->m_c->m_case_bb, prob: p, loc);
2344 node->m_c->m_subtree_prob -= node->m_right->m_c->m_prob;
2345
2346 p = (node->m_left->m_c->m_prob
2347 / (node->m_c->m_subtree_prob + default_prob));
2348 bb = do_jump_if_equal (bb, op0: index, op1: node->m_left->m_c->get_low (),
2349 label_bb: node->m_left->m_c->m_case_bb, prob: p, loc);
2350 }
2351 else
2352 {
2353 /* Branch to a label where we will handle it later. */
2354 basic_block test_bb = split_edge (single_succ_edge (bb));
2355 redirect_edge_succ (single_pred_edge (bb: test_bb),
2356 single_succ_edge (bb)->dest);
2357
2358 p = ((node->m_right->m_c->m_subtree_prob + default_prob / 2)
2359 / (node->m_c->m_subtree_prob + default_prob));
2360 test_bb->count = bb->count.apply_probability (prob: p);
2361 bb = emit_cmp_and_jump_insns (bb, op0: index, op1: node->m_c->get_high (),
2362 comparison: GT_EXPR, label_bb: test_bb, prob: p, loc);
2363 default_prob /= 2;
2364
2365 /* Handle the left-hand subtree. */
2366 bb = emit_case_nodes (bb, index, node: node->m_left,
2367 default_prob, index_type, loc);
2368
2369 /* If the left-hand subtree fell through,
2370 don't let it fall into the right-hand subtree. */
2371 if (bb && m_default_bb)
2372 emit_jump (bb, case_bb: m_default_bb);
2373
2374 bb = emit_case_nodes (bb: test_bb, index, node: node->m_right,
2375 default_prob, index_type, loc);
2376 }
2377 }
2378 else if (node->m_left == NULL && node->m_right != NULL)
2379 {
2380 /* 2) the node has only right child. */
2381
2382 /* Here we have a right child but no left so we issue a conditional
2383 branch to default and process the right child.
2384
2385 Omit the conditional branch to default if the right child
2386 does not have any children and is single valued; it would
2387 cost too much space to save so little time. */
2388
2389 if (node->m_right->has_child ()
2390 || !node->m_right->m_c->is_single_value_p ())
2391 {
2392 p = ((default_prob / 2)
2393 / (node->m_c->m_subtree_prob + default_prob));
2394 bb = emit_cmp_and_jump_insns (bb, op0: index, op1: node->m_c->get_low (),
2395 comparison: LT_EXPR, label_bb: m_default_bb, prob: p, loc);
2396 default_prob /= 2;
2397
2398 bb = emit_case_nodes (bb, index, node: node->m_right, default_prob,
2399 index_type, loc);
2400 }
2401 else
2402 {
2403 /* We cannot process node->right normally
2404 since we haven't ruled out the numbers less than
2405 this node's value. So handle node->right explicitly. */
2406 p = (node->m_right->m_c->m_subtree_prob
2407 / (node->m_c->m_subtree_prob + default_prob));
2408 bb = do_jump_if_equal (bb, op0: index, op1: node->m_right->m_c->get_low (),
2409 label_bb: node->m_right->m_c->m_case_bb, prob: p, loc);
2410 }
2411 }
2412 else if (node->m_left != NULL && node->m_right == NULL)
2413 {
2414 /* 3) just one subtree, on the left. Similar case as previous. */
2415
2416 if (node->m_left->has_child ()
2417 || !node->m_left->m_c->is_single_value_p ())
2418 {
2419 p = ((default_prob / 2)
2420 / (node->m_c->m_subtree_prob + default_prob));
2421 bb = emit_cmp_and_jump_insns (bb, op0: index, op1: node->m_c->get_high (),
2422 comparison: GT_EXPR, label_bb: m_default_bb, prob: p, loc);
2423 default_prob /= 2;
2424
2425 bb = emit_case_nodes (bb, index, node: node->m_left, default_prob,
2426 index_type, loc);
2427 }
2428 else
2429 {
2430 /* We cannot process node->left normally
2431 since we haven't ruled out the numbers less than
2432 this node's value. So handle node->left explicitly. */
2433 p = (node->m_left->m_c->m_subtree_prob
2434 / (node->m_c->m_subtree_prob + default_prob));
2435 bb = do_jump_if_equal (bb, op0: index, op1: node->m_left->m_c->get_low (),
2436 label_bb: node->m_left->m_c->m_case_bb, prob: p, loc);
2437 }
2438 }
2439 }
2440 else
2441 {
2442 /* Node is a range. These cases are very similar to those for a single
2443 value, except that we do not start by testing whether this node
2444 is the one to branch to. */
2445 if (node->has_child () || node->m_c->get_type () != SIMPLE_CASE)
2446 {
2447 bool is_bt = node->m_c->get_type () == BIT_TEST;
2448 int parts = is_bt ? 3 : 2;
2449
2450 /* Branch to a label where we will handle it later. */
2451 basic_block test_bb = split_edge (single_succ_edge (bb));
2452 redirect_edge_succ (single_pred_edge (bb: test_bb),
2453 single_succ_edge (bb)->dest);
2454
2455 profile_probability right_prob = profile_probability::never ();
2456 if (node->m_right)
2457 right_prob = node->m_right->m_c->m_subtree_prob;
2458 p = ((right_prob + default_prob / parts)
2459 / (node->m_c->m_subtree_prob + default_prob));
2460 test_bb->count = bb->count.apply_probability (prob: p);
2461
2462 bb = emit_cmp_and_jump_insns (bb, op0: index, op1: node->m_c->get_high (),
2463 comparison: GT_EXPR, label_bb: test_bb, prob: p, loc);
2464
2465 default_prob /= parts;
2466 node->m_c->m_subtree_prob -= right_prob;
2467 if (is_bt)
2468 node->m_c->m_default_prob = default_prob;
2469
2470 /* Value belongs to this node or to the left-hand subtree. */
2471 p = node->m_c->m_prob / (node->m_c->m_subtree_prob + default_prob);
2472 bb = emit_cmp_and_jump_insns (bb, op0: index, op1: node->m_c->get_low (),
2473 comparison: GE_EXPR, label_bb: node->m_c->m_case_bb, prob: p, loc);
2474
2475 /* Handle the left-hand subtree. */
2476 bb = emit_case_nodes (bb, index, node: node->m_left, default_prob,
2477 index_type, loc);
2478
2479 /* If the left-hand subtree fell through,
2480 don't let it fall into the right-hand subtree. */
2481 if (bb && m_default_bb)
2482 emit_jump (bb, case_bb: m_default_bb);
2483
2484 bb = emit_case_nodes (bb: test_bb, index, node: node->m_right, default_prob,
2485 index_type, loc);
2486 }
2487 else
2488 {
2489 /* Node has no children so we check low and high bounds to remove
2490 redundant tests. Only one of the bounds can exist,
2491 since otherwise this node is bounded--a case tested already. */
2492 tree lhs, rhs;
2493 generate_range_test (bb, index, low: node->m_c->get_low (),
2494 high: node->m_c->get_high (), lhs: &lhs, rhs: &rhs);
2495 p = default_prob / (node->m_c->m_subtree_prob + default_prob);
2496
2497 bb = emit_cmp_and_jump_insns (bb, op0: lhs, op1: rhs, comparison: GT_EXPR,
2498 label_bb: m_default_bb, prob: p, loc);
2499
2500 emit_jump (bb, case_bb: node->m_c->m_case_bb);
2501 return NULL;
2502 }
2503 }
2504
2505 return bb;
2506}
2507
2508/* The main function of the pass scans statements for switches and invokes
2509 process_switch on them. */
2510
2511namespace {
2512
2513const pass_data pass_data_convert_switch =
2514{
2515 .type: GIMPLE_PASS, /* type */
2516 .name: "switchconv", /* name */
2517 .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */
2518 .tv_id: TV_TREE_SWITCH_CONVERSION, /* tv_id */
2519 .properties_required: ( PROP_cfg | PROP_ssa ), /* properties_required */
2520 .properties_provided: 0, /* properties_provided */
2521 .properties_destroyed: 0, /* properties_destroyed */
2522 .todo_flags_start: 0, /* todo_flags_start */
2523 TODO_update_ssa, /* todo_flags_finish */
2524};
2525
2526class pass_convert_switch : public gimple_opt_pass
2527{
2528public:
2529 pass_convert_switch (gcc::context *ctxt)
2530 : gimple_opt_pass (pass_data_convert_switch, ctxt)
2531 {}
2532
2533 /* opt_pass methods: */
2534 bool gate (function *) final override
2535 {
2536 return flag_tree_switch_conversion != 0;
2537 }
2538 unsigned int execute (function *) final override;
2539
2540}; // class pass_convert_switch
2541
2542unsigned int
2543pass_convert_switch::execute (function *fun)
2544{
2545 basic_block bb;
2546 bool cfg_altered = false;
2547
2548 FOR_EACH_BB_FN (bb, fun)
2549 {
2550 if (gswitch *stmt = safe_dyn_cast <gswitch *> (p: *gsi_last_bb (bb)))
2551 {
2552 if (dump_file)
2553 {
2554 expanded_location loc = expand_location (gimple_location (g: stmt));
2555
2556 fprintf (stream: dump_file, format: "beginning to process the following "
2557 "SWITCH statement (%s:%d) : ------- \n",
2558 loc.file, loc.line);
2559 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2560 putc (c: '\n', stream: dump_file);
2561 }
2562
2563 switch_conversion sconv;
2564 sconv.expand (swtch: stmt);
2565 cfg_altered |= sconv.m_cfg_altered;
2566 if (!sconv.m_reason)
2567 {
2568 if (dump_file)
2569 {
2570 fputs (s: "Switch converted\n", stream: dump_file);
2571 fputs (s: "--------------------------------\n", stream: dump_file);
2572 }
2573
2574 /* Make no effort to update the post-dominator tree.
2575 It is actually not that hard for the transformations
2576 we have performed, but it is not supported
2577 by iterate_fix_dominators. */
2578 free_dominance_info (CDI_POST_DOMINATORS);
2579 }
2580 else
2581 {
2582 if (dump_file)
2583 {
2584 fputs (s: "Bailing out - ", stream: dump_file);
2585 fputs (s: sconv.m_reason, stream: dump_file);
2586 fputs (s: "\n--------------------------------\n", stream: dump_file);
2587 }
2588 }
2589 }
2590 }
2591
2592 return cfg_altered ? TODO_cleanup_cfg : 0;;
2593}
2594
2595} // anon namespace
2596
2597gimple_opt_pass *
2598make_pass_convert_switch (gcc::context *ctxt)
2599{
2600 return new pass_convert_switch (ctxt);
2601}
2602
2603/* The main function of the pass scans statements for switches and invokes
2604 process_switch on them. */
2605
2606namespace {
2607
2608template <bool O0> class pass_lower_switch: public gimple_opt_pass
2609{
2610public:
2611 pass_lower_switch (gcc::context *ctxt) : gimple_opt_pass (data, ctxt) {}
2612
2613 static const pass_data data;
2614 opt_pass *
2615 clone () final override
2616 {
2617 return new pass_lower_switch<O0> (m_ctxt);
2618 }
2619
2620 bool
2621 gate (function *) final override
2622 {
2623 return !O0 || !optimize;
2624 }
2625
2626 unsigned int execute (function *fun) final override;
2627}; // class pass_lower_switch
2628
2629template <bool O0>
2630const pass_data pass_lower_switch<O0>::data = {
2631 .type: .type: .type: GIMPLE_PASS, /* type */
2632 .name: .name: .name: O0 ? "switchlower_O0" : "switchlower", /* name */
2633 .optinfo_flags: .optinfo_flags: .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */
2634 .tv_id: .tv_id: .tv_id: TV_TREE_SWITCH_LOWERING, /* tv_id */
2635 .properties_required: .properties_required: .properties_required: ( PROP_cfg | PROP_ssa ), /* properties_required */
2636 .properties_provided: .properties_provided: .properties_provided: 0, /* properties_provided */
2637 .properties_destroyed: .properties_destroyed: .properties_destroyed: 0, /* properties_destroyed */
2638 .todo_flags_start: .todo_flags_start: .todo_flags_start: 0, /* todo_flags_start */
2639 TODO_update_ssa | TODO_cleanup_cfg, /* todo_flags_finish */
2640};
2641
2642template <bool O0>
2643unsigned int
2644pass_lower_switch<O0>::execute (function *fun)
2645{
2646 basic_block bb;
2647 bool expanded = false;
2648
2649 auto_vec<gimple *> switch_statements;
2650 switch_statements.create (nelems: 1);
2651
2652 FOR_EACH_BB_FN (bb, fun)
2653 {
2654 if (gswitch *swtch = safe_dyn_cast <gswitch *> (p: *gsi_last_bb (bb)))
2655 {
2656 if (!O0)
2657 group_case_labels_stmt (swtch);
2658 switch_statements.safe_push (obj: swtch);
2659 }
2660 }
2661
2662 for (unsigned i = 0; i < switch_statements.length (); i++)
2663 {
2664 gimple *stmt = switch_statements[i];
2665 if (dump_file)
2666 {
2667 expanded_location loc = expand_location (gimple_location (g: stmt));
2668
2669 fprintf (stream: dump_file, format: "beginning to process the following "
2670 "SWITCH statement (%s:%d) : ------- \n",
2671 loc.file, loc.line);
2672 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2673 putc (c: '\n', stream: dump_file);
2674 }
2675
2676 gswitch *swtch = dyn_cast<gswitch *> (p: stmt);
2677 if (swtch)
2678 {
2679 switch_decision_tree dt (swtch);
2680 expanded |= dt.analyze_switch_statement ();
2681 }
2682 }
2683
2684 if (expanded)
2685 {
2686 free_dominance_info (CDI_DOMINATORS);
2687 free_dominance_info (CDI_POST_DOMINATORS);
2688 mark_virtual_operands_for_renaming (cfun);
2689 }
2690
2691 return 0;
2692}
2693
2694} // anon namespace
2695
2696gimple_opt_pass *
2697make_pass_lower_switch_O0 (gcc::context *ctxt)
2698{
2699 return new pass_lower_switch<true> (ctxt);
2700}
2701gimple_opt_pass *
2702make_pass_lower_switch (gcc::context *ctxt)
2703{
2704 return new pass_lower_switch<false> (ctxt);
2705}
2706

source code of gcc/tree-switch-conversion.cc