1/* Lower GIMPLE_SWITCH expressions to something more efficient than
2 a jump table.
3 Copyright (C) 2006-2024 Free Software Foundation, Inc.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it
8under the terms of the GNU General Public License as published by the
9Free Software Foundation; either version 3, or (at your option) any
10later version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT
13ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING3. If not, write to the Free
19Software Foundation, 51 Franklin Street, Fifth Floor, Boston, MA
2002110-1301, USA. */
21
22/* This file handles the lowering of GIMPLE_SWITCH to an indexed
23 load, or a series of bit-test-and-branch expressions. */
24
25#include "config.h"
26#include "system.h"
27#include "coretypes.h"
28#include "backend.h"
29#include "insn-codes.h"
30#include "rtl.h"
31#include "tree.h"
32#include "gimple.h"
33#include "cfghooks.h"
34#include "tree-pass.h"
35#include "ssa.h"
36#include "optabs-tree.h"
37#include "cgraph.h"
38#include "gimple-pretty-print.h"
39#include "fold-const.h"
40#include "varasm.h"
41#include "stor-layout.h"
42#include "cfganal.h"
43#include "gimplify.h"
44#include "gimple-iterator.h"
45#include "gimplify-me.h"
46#include "gimple-fold.h"
47#include "tree-cfg.h"
48#include "cfgloop.h"
49#include "alloc-pool.h"
50#include "target.h"
51#include "tree-into-ssa.h"
52#include "omp-general.h"
53#include "gimple-range.h"
54#include "tree-cfgcleanup.h"
55
56/* ??? For lang_hooks.types.type_for_mode, but is there a word_mode
57 type in the GIMPLE type system that is language-independent? */
58#include "langhooks.h"
59
60#include "tree-switch-conversion.h"
61
62using namespace tree_switch_conversion;
63
64/* Constructor. */
65
66switch_conversion::switch_conversion (): m_final_bb (NULL),
67 m_constructors (NULL), m_default_values (NULL),
68 m_arr_ref_first (NULL), m_arr_ref_last (NULL),
69 m_reason (NULL), m_default_case_nonstandard (false), m_cfg_altered (false)
70{
71}
72
73/* Collection information about SWTCH statement. */
74
75void
76switch_conversion::collect (gswitch *swtch)
77{
78 unsigned int branch_num = gimple_switch_num_labels (gs: swtch);
79 tree min_case, max_case;
80 unsigned int i;
81 edge e, e_default, e_first;
82 edge_iterator ei;
83
84 m_switch = swtch;
85
86 /* The gimplifier has already sorted the cases by CASE_LOW and ensured there
87 is a default label which is the first in the vector.
88 Collect the bits we can deduce from the CFG. */
89 m_index_expr = gimple_switch_index (gs: swtch);
90 m_switch_bb = gimple_bb (g: swtch);
91 e_default = gimple_switch_default_edge (cfun, swtch);
92 m_default_bb = e_default->dest;
93 m_default_prob = e_default->probability;
94
95 /* Get upper and lower bounds of case values, and the covered range. */
96 min_case = gimple_switch_label (gs: swtch, index: 1);
97 max_case = gimple_switch_label (gs: swtch, index: branch_num - 1);
98
99 m_range_min = CASE_LOW (min_case);
100 if (CASE_HIGH (max_case) != NULL_TREE)
101 m_range_max = CASE_HIGH (max_case);
102 else
103 m_range_max = CASE_LOW (max_case);
104
105 m_contiguous_range = true;
106 tree last = CASE_HIGH (min_case) ? CASE_HIGH (min_case) : m_range_min;
107 for (i = 2; i < branch_num; i++)
108 {
109 tree elt = gimple_switch_label (gs: swtch, index: i);
110 if (wi::to_wide (t: last) + 1 != wi::to_wide (CASE_LOW (elt)))
111 {
112 m_contiguous_range = false;
113 break;
114 }
115 last = CASE_HIGH (elt) ? CASE_HIGH (elt) : CASE_LOW (elt);
116 }
117
118 if (m_contiguous_range)
119 e_first = gimple_switch_edge (cfun, swtch, 1);
120 else
121 e_first = e_default;
122
123 /* See if there is one common successor block for all branch
124 targets. If it exists, record it in FINAL_BB.
125 Start with the destination of the first non-default case
126 if the range is contiguous and default case otherwise as
127 guess or its destination in case it is a forwarder block. */
128 if (! single_pred_p (bb: e_first->dest))
129 m_final_bb = e_first->dest;
130 else if (single_succ_p (bb: e_first->dest)
131 && ! single_pred_p (bb: single_succ (bb: e_first->dest)))
132 m_final_bb = single_succ (bb: e_first->dest);
133 /* Require that all switch destinations are either that common
134 FINAL_BB or a forwarder to it, except for the default
135 case if contiguous range. */
136 auto_vec<edge, 10> fw_edges;
137 m_uniq = 0;
138 if (m_final_bb)
139 FOR_EACH_EDGE (e, ei, m_switch_bb->succs)
140 {
141 edge phi_e = nullptr;
142 if (e->dest == m_final_bb)
143 phi_e = e;
144 else if (single_pred_p (bb: e->dest)
145 && single_succ_p (bb: e->dest)
146 && single_succ (bb: e->dest) == m_final_bb)
147 phi_e = single_succ_edge (bb: e->dest);
148 if (phi_e)
149 {
150 if (e == e_default)
151 ;
152 else if (phi_e == e || empty_block_p (e->dest))
153 {
154 /* For empty blocks consider forwarders with equal
155 PHI arguments in m_final_bb as unique. */
156 unsigned i;
157 for (i = 0; i < fw_edges.length (); ++i)
158 if (phi_alternatives_equal (m_final_bb, fw_edges[i], phi_e))
159 break;
160 if (i == fw_edges.length ())
161 {
162 /* But limit the above possibly quadratic search. */
163 if (fw_edges.length () < 10)
164 fw_edges.quick_push (obj: phi_e);
165 m_uniq++;
166 }
167 }
168 else
169 m_uniq++;
170 continue;
171 }
172
173 if (e == e_default && m_contiguous_range)
174 {
175 m_default_case_nonstandard = true;
176 continue;
177 }
178
179 m_final_bb = NULL;
180 break;
181 }
182
183 /* When there's not a single common successor block conservatively
184 approximate the number of unique non-default targets. */
185 if (!m_final_bb)
186 m_uniq = EDGE_COUNT (gimple_bb (swtch)->succs) - 1;
187
188 m_range_size
189 = int_const_binop (MINUS_EXPR, m_range_max, m_range_min);
190
191 /* Get a count of the number of case labels. Single-valued case labels
192 simply count as one, but a case range counts double, since it may
193 require two compares if it gets lowered as a branching tree. */
194 m_count = 0;
195 for (i = 1; i < branch_num; i++)
196 {
197 tree elt = gimple_switch_label (gs: swtch, index: i);
198 m_count++;
199 if (CASE_HIGH (elt)
200 && ! tree_int_cst_equal (CASE_LOW (elt), CASE_HIGH (elt)))
201 m_count++;
202 }
203}
204
205/* Checks whether the range given by individual case statements of the switch
206 switch statement isn't too big and whether the number of branches actually
207 satisfies the size of the new array. */
208
209bool
210switch_conversion::check_range ()
211{
212 gcc_assert (m_range_size);
213 if (!tree_fits_uhwi_p (m_range_size))
214 {
215 m_reason = "index range way too large or otherwise unusable";
216 return false;
217 }
218
219 if (tree_to_uhwi (m_range_size)
220 > ((unsigned) m_count * param_switch_conversion_branch_ratio))
221 {
222 m_reason = "the maximum range-branch ratio exceeded";
223 return false;
224 }
225
226 return true;
227}
228
229/* Checks whether all but the final BB basic blocks are empty. */
230
231bool
232switch_conversion::check_all_empty_except_final ()
233{
234 edge e, e_default = find_edge (m_switch_bb, m_default_bb);
235 edge_iterator ei;
236
237 FOR_EACH_EDGE (e, ei, m_switch_bb->succs)
238 {
239 if (e->dest == m_final_bb)
240 continue;
241
242 if (!empty_block_p (e->dest))
243 {
244 if (m_contiguous_range && e == e_default)
245 {
246 m_default_case_nonstandard = true;
247 continue;
248 }
249
250 m_reason = "bad case - a non-final BB not empty";
251 return false;
252 }
253 }
254
255 return true;
256}
257
258/* This function checks whether all required values in phi nodes in final_bb
259 are constants. Required values are those that correspond to a basic block
260 which is a part of the examined switch statement. It returns true if the
261 phi nodes are OK, otherwise false. */
262
263bool
264switch_conversion::check_final_bb ()
265{
266 gphi_iterator gsi;
267
268 m_phi_count = 0;
269 for (gsi = gsi_start_phis (m_final_bb); !gsi_end_p (i: gsi); gsi_next (i: &gsi))
270 {
271 gphi *phi = gsi.phi ();
272 unsigned int i;
273
274 if (virtual_operand_p (op: gimple_phi_result (gs: phi)))
275 continue;
276
277 m_phi_count++;
278
279 for (i = 0; i < gimple_phi_num_args (gs: phi); i++)
280 {
281 basic_block bb = gimple_phi_arg_edge (phi, i)->src;
282
283 if (bb == m_switch_bb
284 || (single_pred_p (bb)
285 && single_pred (bb) == m_switch_bb
286 && (!m_default_case_nonstandard
287 || empty_block_p (bb))))
288 {
289 tree reloc, val;
290 const char *reason = NULL;
291
292 val = gimple_phi_arg_def (gs: phi, index: i);
293 if (!is_gimple_ip_invariant (val))
294 reason = "non-invariant value from a case";
295 else
296 {
297 reloc = initializer_constant_valid_p (val, TREE_TYPE (val));
298 if ((flag_pic && reloc != null_pointer_node)
299 || (!flag_pic && reloc == NULL_TREE))
300 {
301 if (reloc)
302 reason
303 = "value from a case would need runtime relocations";
304 else
305 reason
306 = "value from a case is not a valid initializer";
307 }
308 }
309 if (reason)
310 {
311 /* For contiguous range, we can allow non-constant
312 or one that needs relocation, as long as it is
313 only reachable from the default case. */
314 if (bb == m_switch_bb)
315 bb = m_final_bb;
316 if (!m_contiguous_range || bb != m_default_bb)
317 {
318 m_reason = reason;
319 return false;
320 }
321
322 unsigned int branch_num = gimple_switch_num_labels (gs: m_switch);
323 for (unsigned int i = 1; i < branch_num; i++)
324 {
325 if (gimple_switch_label_bb (cfun, m_switch, i) == bb)
326 {
327 m_reason = reason;
328 return false;
329 }
330 }
331 m_default_case_nonstandard = true;
332 }
333 }
334 }
335 }
336
337 return true;
338}
339
340/* The following function allocates default_values, target_{in,out}_names and
341 constructors arrays. The last one is also populated with pointers to
342 vectors that will become constructors of new arrays. */
343
344void
345switch_conversion::create_temp_arrays ()
346{
347 int i;
348
349 m_default_values = XCNEWVEC (tree, m_phi_count * 3);
350 /* ??? Macros do not support multi argument templates in their
351 argument list. We create a typedef to work around that problem. */
352 typedef vec<constructor_elt, va_gc> *vec_constructor_elt_gc;
353 m_constructors = XCNEWVEC (vec_constructor_elt_gc, m_phi_count);
354 m_target_inbound_names = m_default_values + m_phi_count;
355 m_target_outbound_names = m_target_inbound_names + m_phi_count;
356 for (i = 0; i < m_phi_count; i++)
357 vec_alloc (v&: m_constructors[i], nelems: tree_to_uhwi (m_range_size) + 1);
358}
359
360/* Populate the array of default values in the order of phi nodes.
361 DEFAULT_CASE is the CASE_LABEL_EXPR for the default switch branch
362 if the range is non-contiguous or the default case has standard
363 structure, otherwise it is the first non-default case instead. */
364
365void
366switch_conversion::gather_default_values (tree default_case)
367{
368 gphi_iterator gsi;
369 basic_block bb = label_to_block (cfun, CASE_LABEL (default_case));
370 edge e;
371 int i = 0;
372
373 gcc_assert (CASE_LOW (default_case) == NULL_TREE
374 || m_default_case_nonstandard);
375
376 if (bb == m_final_bb)
377 e = find_edge (m_switch_bb, bb);
378 else
379 e = single_succ_edge (bb);
380
381 for (gsi = gsi_start_phis (m_final_bb); !gsi_end_p (i: gsi); gsi_next (i: &gsi))
382 {
383 gphi *phi = gsi.phi ();
384 if (virtual_operand_p (op: gimple_phi_result (gs: phi)))
385 continue;
386 tree val = PHI_ARG_DEF_FROM_EDGE (phi, e);
387 gcc_assert (val);
388 m_default_values[i++] = val;
389 }
390}
391
392/* The following function populates the vectors in the constructors array with
393 future contents of the static arrays. The vectors are populated in the
394 order of phi nodes. */
395
396void
397switch_conversion::build_constructors ()
398{
399 unsigned i, branch_num = gimple_switch_num_labels (gs: m_switch);
400 tree pos = m_range_min;
401 tree pos_one = build_int_cst (TREE_TYPE (pos), 1);
402
403 for (i = 1; i < branch_num; i++)
404 {
405 tree cs = gimple_switch_label (gs: m_switch, index: i);
406 basic_block bb = label_to_block (cfun, CASE_LABEL (cs));
407 edge e;
408 tree high;
409 gphi_iterator gsi;
410 int j;
411
412 if (bb == m_final_bb)
413 e = find_edge (m_switch_bb, bb);
414 else
415 e = single_succ_edge (bb);
416 gcc_assert (e);
417
418 while (tree_int_cst_lt (t1: pos, CASE_LOW (cs)))
419 {
420 int k;
421 for (k = 0; k < m_phi_count; k++)
422 {
423 constructor_elt elt;
424
425 elt.index = int_const_binop (MINUS_EXPR, pos, m_range_min);
426 if (TYPE_PRECISION (TREE_TYPE (elt.index))
427 > TYPE_PRECISION (sizetype))
428 elt.index = fold_convert (sizetype, elt.index);
429 elt.value
430 = unshare_expr_without_location (m_default_values[k]);
431 m_constructors[k]->quick_push (obj: elt);
432 }
433
434 pos = int_const_binop (PLUS_EXPR, pos, pos_one);
435 }
436 gcc_assert (tree_int_cst_equal (pos, CASE_LOW (cs)));
437
438 j = 0;
439 if (CASE_HIGH (cs))
440 high = CASE_HIGH (cs);
441 else
442 high = CASE_LOW (cs);
443 for (gsi = gsi_start_phis (m_final_bb);
444 !gsi_end_p (i: gsi); gsi_next (i: &gsi))
445 {
446 gphi *phi = gsi.phi ();
447 if (virtual_operand_p (op: gimple_phi_result (gs: phi)))
448 continue;
449 tree val = PHI_ARG_DEF_FROM_EDGE (phi, e);
450 tree low = CASE_LOW (cs);
451 pos = CASE_LOW (cs);
452
453 do
454 {
455 constructor_elt elt;
456
457 elt.index = int_const_binop (MINUS_EXPR, pos, m_range_min);
458 if (TYPE_PRECISION (TREE_TYPE (elt.index))
459 > TYPE_PRECISION (sizetype))
460 elt.index = fold_convert (sizetype, elt.index);
461 elt.value = unshare_expr_without_location (val);
462 m_constructors[j]->quick_push (obj: elt);
463
464 pos = int_const_binop (PLUS_EXPR, pos, pos_one);
465 } while (!tree_int_cst_lt (t1: high, t2: pos)
466 && tree_int_cst_lt (t1: low, t2: pos));
467 j++;
468 }
469 }
470}
471
472/* If all values in the constructor vector are products of a linear function
473 a * x + b, then return true. When true, COEFF_A and COEFF_B and
474 coefficients of the linear function. Note that equal values are special
475 case of a linear function with a and b equal to zero. */
476
477bool
478switch_conversion::contains_linear_function_p (vec<constructor_elt, va_gc> *vec,
479 wide_int *coeff_a,
480 wide_int *coeff_b)
481{
482 unsigned int i;
483 constructor_elt *elt;
484
485 gcc_assert (vec->length () >= 2);
486
487 /* Let's try to find any linear function a * x + y that can apply to
488 given values. 'a' can be calculated as follows:
489
490 a = (y2 - y1) / (x2 - x1) where x2 - x1 = 1 (consecutive case indices)
491 a = y2 - y1
492
493 and
494
495 b = y2 - a * x2
496
497 */
498
499 tree elt0 = (*vec)[0].value;
500 tree elt1 = (*vec)[1].value;
501
502 if (TREE_CODE (elt0) != INTEGER_CST || TREE_CODE (elt1) != INTEGER_CST)
503 return false;
504
505 wide_int range_min
506 = wide_int::from (x: wi::to_wide (t: m_range_min),
507 TYPE_PRECISION (TREE_TYPE (elt0)),
508 TYPE_SIGN (TREE_TYPE (m_range_min)));
509 wide_int y1 = wi::to_wide (t: elt0);
510 wide_int y2 = wi::to_wide (t: elt1);
511 wide_int a = y2 - y1;
512 wide_int b = y2 - a * (range_min + 1);
513
514 /* Verify that all values fulfill the linear function. */
515 FOR_EACH_VEC_SAFE_ELT (vec, i, elt)
516 {
517 if (TREE_CODE (elt->value) != INTEGER_CST)
518 return false;
519
520 wide_int value = wi::to_wide (t: elt->value);
521 if (a * range_min + b != value)
522 return false;
523
524 ++range_min;
525 }
526
527 *coeff_a = a;
528 *coeff_b = b;
529
530 return true;
531}
532
533/* Return type which should be used for array elements, either TYPE's
534 main variant or, for integral types, some smaller integral type
535 that can still hold all the constants. */
536
537tree
538switch_conversion::array_value_type (tree type, int num)
539{
540 unsigned int i, len = vec_safe_length (v: m_constructors[num]);
541 constructor_elt *elt;
542 int sign = 0;
543 tree smaller_type;
544
545 /* Types with alignments greater than their size can reach here, e.g. out of
546 SRA. We couldn't use these as an array component type so get back to the
547 main variant first, which, for our purposes, is fine for other types as
548 well. */
549
550 type = TYPE_MAIN_VARIANT (type);
551
552 if (!INTEGRAL_TYPE_P (type)
553 || (TREE_CODE (type) == BITINT_TYPE
554 && (TYPE_PRECISION (type) > MAX_FIXED_MODE_SIZE
555 || TYPE_MODE (type) == BLKmode)))
556 return type;
557
558 scalar_int_mode type_mode = SCALAR_INT_TYPE_MODE (type);
559 scalar_int_mode mode = get_narrowest_mode (mode: type_mode);
560 if (GET_MODE_SIZE (mode: type_mode) <= GET_MODE_SIZE (mode))
561 return type;
562
563 if (len < (optimize_bb_for_size_p (gimple_bb (g: m_switch)) ? 2 : 32))
564 return type;
565
566 FOR_EACH_VEC_SAFE_ELT (m_constructors[num], i, elt)
567 {
568 wide_int cst;
569
570 if (TREE_CODE (elt->value) != INTEGER_CST)
571 return type;
572
573 cst = wi::to_wide (t: elt->value);
574 while (1)
575 {
576 unsigned int prec = GET_MODE_BITSIZE (mode);
577 if (prec > HOST_BITS_PER_WIDE_INT)
578 return type;
579
580 if (sign >= 0 && cst == wi::zext (x: cst, offset: prec))
581 {
582 if (sign == 0 && cst == wi::sext (x: cst, offset: prec))
583 break;
584 sign = 1;
585 break;
586 }
587 if (sign <= 0 && cst == wi::sext (x: cst, offset: prec))
588 {
589 sign = -1;
590 break;
591 }
592
593 if (sign == 1)
594 sign = 0;
595
596 if (!GET_MODE_WIDER_MODE (m: mode).exists (mode: &mode)
597 || GET_MODE_SIZE (mode) >= GET_MODE_SIZE (mode: type_mode))
598 return type;
599 }
600 }
601
602 if (sign == 0)
603 sign = TYPE_UNSIGNED (type) ? 1 : -1;
604 smaller_type = lang_hooks.types.type_for_mode (mode, sign >= 0);
605 if (GET_MODE_SIZE (mode: type_mode)
606 <= GET_MODE_SIZE (SCALAR_INT_TYPE_MODE (smaller_type)))
607 return type;
608
609 return smaller_type;
610}
611
612/* Create an appropriate array type and declaration and assemble a static
613 array variable. Also create a load statement that initializes
614 the variable in question with a value from the static array. SWTCH is
615 the switch statement being converted, NUM is the index to
616 arrays of constructors, default values and target SSA names
617 for this particular array. ARR_INDEX_TYPE is the type of the index
618 of the new array, PHI is the phi node of the final BB that corresponds
619 to the value that will be loaded from the created array. TIDX
620 is an ssa name of a temporary variable holding the index for loads from the
621 new array. */
622
623void
624switch_conversion::build_one_array (int num, tree arr_index_type,
625 gphi *phi, tree tidx)
626{
627 tree name;
628 gimple *load;
629 gimple_stmt_iterator gsi = gsi_for_stmt (m_switch);
630 location_t loc = gimple_location (g: m_switch);
631
632 gcc_assert (m_default_values[num]);
633
634 name = copy_ssa_name (PHI_RESULT (phi));
635 m_target_inbound_names[num] = name;
636
637 vec<constructor_elt, va_gc> *constructor = m_constructors[num];
638 wide_int coeff_a, coeff_b;
639 bool linear_p = contains_linear_function_p (vec: constructor, coeff_a: &coeff_a, coeff_b: &coeff_b);
640 tree type;
641 if (linear_p
642 && (type = range_check_type (TREE_TYPE ((*constructor)[0].value))))
643 {
644 if (dump_file && coeff_a.to_uhwi () > 0)
645 fprintf (stream: dump_file, format: "Linear transformation with A = %" PRId64
646 " and B = %" PRId64 "\n", coeff_a.to_shwi (),
647 coeff_b.to_shwi ());
648
649 /* We must use type of constructor values. */
650 gimple_seq seq = NULL;
651 tree tmp = gimple_convert (seq: &seq, type, op: m_index_expr);
652 tree tmp2 = gimple_build (seq: &seq, code: MULT_EXPR, type,
653 ops: wide_int_to_tree (type, cst: coeff_a), ops: tmp);
654 tree tmp3 = gimple_build (seq: &seq, code: PLUS_EXPR, type, ops: tmp2,
655 ops: wide_int_to_tree (type, cst: coeff_b));
656 tree tmp4 = gimple_convert (seq: &seq, TREE_TYPE (name), op: tmp3);
657 gsi_insert_seq_before (&gsi, seq, GSI_SAME_STMT);
658 load = gimple_build_assign (name, tmp4);
659 }
660 else
661 {
662 tree array_type, ctor, decl, value_type, fetch, default_type;
663
664 default_type = TREE_TYPE (m_default_values[num]);
665 value_type = array_value_type (type: default_type, num);
666 array_type = build_array_type (value_type, arr_index_type);
667 if (default_type != value_type)
668 {
669 unsigned int i;
670 constructor_elt *elt;
671
672 FOR_EACH_VEC_SAFE_ELT (constructor, i, elt)
673 elt->value = fold_convert (value_type, elt->value);
674 }
675 ctor = build_constructor (array_type, constructor);
676 TREE_CONSTANT (ctor) = true;
677 TREE_STATIC (ctor) = true;
678
679 decl = build_decl (loc, VAR_DECL, NULL_TREE, array_type);
680 TREE_STATIC (decl) = 1;
681 DECL_INITIAL (decl) = ctor;
682
683 DECL_NAME (decl) = create_tmp_var_name ("CSWTCH");
684 DECL_ARTIFICIAL (decl) = 1;
685 DECL_IGNORED_P (decl) = 1;
686 TREE_CONSTANT (decl) = 1;
687 TREE_READONLY (decl) = 1;
688 DECL_IGNORED_P (decl) = 1;
689 if (offloading_function_p (cfun->decl))
690 DECL_ATTRIBUTES (decl)
691 = tree_cons (get_identifier ("omp declare target"), NULL_TREE,
692 NULL_TREE);
693 varpool_node::finalize_decl (decl);
694
695 fetch = build4 (ARRAY_REF, value_type, decl, tidx, NULL_TREE,
696 NULL_TREE);
697 if (default_type != value_type)
698 {
699 fetch = fold_convert (default_type, fetch);
700 fetch = force_gimple_operand_gsi (&gsi, fetch, true, NULL_TREE,
701 true, GSI_SAME_STMT);
702 }
703 load = gimple_build_assign (name, fetch);
704 }
705
706 gsi_insert_before (&gsi, load, GSI_SAME_STMT);
707 update_stmt (s: load);
708 m_arr_ref_last = load;
709}
710
711/* Builds and initializes static arrays initialized with values gathered from
712 the switch statement. Also creates statements that load values from
713 them. */
714
715void
716switch_conversion::build_arrays ()
717{
718 tree arr_index_type;
719 tree tidx, sub, utype, tidxtype;
720 gimple *stmt;
721 gimple_stmt_iterator gsi;
722 gphi_iterator gpi;
723 int i;
724 location_t loc = gimple_location (g: m_switch);
725
726 gsi = gsi_for_stmt (m_switch);
727
728 /* Make sure we do not generate arithmetics in a subrange. */
729 utype = TREE_TYPE (m_index_expr);
730 if (TREE_TYPE (utype))
731 utype = lang_hooks.types.type_for_mode (TYPE_MODE (TREE_TYPE (utype)), 1);
732 else if (TREE_CODE (utype) == BITINT_TYPE
733 && (TYPE_PRECISION (utype) > MAX_FIXED_MODE_SIZE
734 || TYPE_MODE (utype) == BLKmode))
735 utype = unsigned_type_for (utype);
736 else
737 utype = lang_hooks.types.type_for_mode (TYPE_MODE (utype), 1);
738 if (TYPE_PRECISION (utype) > TYPE_PRECISION (sizetype))
739 tidxtype = sizetype;
740 else
741 tidxtype = utype;
742
743 arr_index_type = build_index_type (m_range_size);
744 tidx = make_ssa_name (var: tidxtype);
745 sub = fold_build2_loc (loc, MINUS_EXPR, utype,
746 fold_convert_loc (loc, utype, m_index_expr),
747 fold_convert_loc (loc, utype, m_range_min));
748 sub = fold_convert (tidxtype, sub);
749 sub = force_gimple_operand_gsi (&gsi, sub,
750 false, NULL, true, GSI_SAME_STMT);
751 stmt = gimple_build_assign (tidx, sub);
752
753 gsi_insert_before (&gsi, stmt, GSI_SAME_STMT);
754 update_stmt (s: stmt);
755 m_arr_ref_first = stmt;
756
757 for (gpi = gsi_start_phis (m_final_bb), i = 0;
758 !gsi_end_p (i: gpi); gsi_next (i: &gpi))
759 {
760 gphi *phi = gpi.phi ();
761 if (!virtual_operand_p (op: gimple_phi_result (gs: phi)))
762 build_one_array (num: i++, arr_index_type, phi, tidx);
763 else
764 {
765 edge e;
766 edge_iterator ei;
767 FOR_EACH_EDGE (e, ei, m_switch_bb->succs)
768 {
769 if (e->dest == m_final_bb)
770 break;
771 if (!m_default_case_nonstandard
772 || e->dest != m_default_bb)
773 {
774 e = single_succ_edge (bb: e->dest);
775 break;
776 }
777 }
778 gcc_assert (e && e->dest == m_final_bb);
779 m_target_vop = PHI_ARG_DEF_FROM_EDGE (phi, e);
780 }
781 }
782}
783
784/* Generates and appropriately inserts loads of default values at the position
785 given by GSI. Returns the last inserted statement. */
786
787gassign *
788switch_conversion::gen_def_assigns (gimple_stmt_iterator *gsi)
789{
790 int i;
791 gassign *assign = NULL;
792
793 for (i = 0; i < m_phi_count; i++)
794 {
795 tree name = copy_ssa_name (var: m_target_inbound_names[i]);
796 m_target_outbound_names[i] = name;
797 assign = gimple_build_assign (name, m_default_values[i]);
798 gsi_insert_before (gsi, assign, GSI_SAME_STMT);
799 update_stmt (s: assign);
800 }
801 return assign;
802}
803
804/* Deletes the unused bbs and edges that now contain the switch statement and
805 its empty branch bbs. BBD is the now dead BB containing
806 the original switch statement, FINAL is the last BB of the converted
807 switch statement (in terms of succession). */
808
809void
810switch_conversion::prune_bbs (basic_block bbd, basic_block final,
811 basic_block default_bb)
812{
813 edge_iterator ei;
814 edge e;
815
816 for (ei = ei_start (bbd->succs); (e = ei_safe_edge (i: ei)); )
817 {
818 basic_block bb;
819 bb = e->dest;
820 remove_edge (e);
821 if (bb != final && bb != default_bb)
822 delete_basic_block (bb);
823 }
824 delete_basic_block (bbd);
825}
826
827/* Add values to phi nodes in final_bb for the two new edges. E1F is the edge
828 from the basic block loading values from an array and E2F from the basic
829 block loading default values. BBF is the last switch basic block (see the
830 bbf description in the comment below). */
831
832void
833switch_conversion::fix_phi_nodes (edge e1f, edge e2f, basic_block bbf)
834{
835 gphi_iterator gsi;
836 int i;
837
838 for (gsi = gsi_start_phis (bbf), i = 0;
839 !gsi_end_p (i: gsi); gsi_next (i: &gsi))
840 {
841 gphi *phi = gsi.phi ();
842 tree inbound, outbound;
843 if (virtual_operand_p (op: gimple_phi_result (gs: phi)))
844 inbound = outbound = m_target_vop;
845 else
846 {
847 inbound = m_target_inbound_names[i];
848 outbound = m_target_outbound_names[i++];
849 }
850 add_phi_arg (phi, inbound, e1f, UNKNOWN_LOCATION);
851 if (!m_default_case_nonstandard)
852 add_phi_arg (phi, outbound, e2f, UNKNOWN_LOCATION);
853 }
854}
855
856/* Creates a check whether the switch expression value actually falls into the
857 range given by all the cases. If it does not, the temporaries are loaded
858 with default values instead. */
859
860void
861switch_conversion::gen_inbound_check ()
862{
863 tree label_decl1 = create_artificial_label (UNKNOWN_LOCATION);
864 tree label_decl2 = create_artificial_label (UNKNOWN_LOCATION);
865 tree label_decl3 = create_artificial_label (UNKNOWN_LOCATION);
866 glabel *label1, *label2, *label3;
867 tree utype, tidx;
868 tree bound;
869
870 gcond *cond_stmt;
871
872 gassign *last_assign = NULL;
873 gimple_stmt_iterator gsi;
874 basic_block bb0, bb1, bb2, bbf, bbd;
875 edge e01 = NULL, e02, e21, e1d, e1f, e2f;
876 location_t loc = gimple_location (g: m_switch);
877
878 gcc_assert (m_default_values);
879
880 bb0 = gimple_bb (g: m_switch);
881
882 tidx = gimple_assign_lhs (gs: m_arr_ref_first);
883 utype = TREE_TYPE (tidx);
884
885 /* (end of) block 0 */
886 gsi = gsi_for_stmt (m_arr_ref_first);
887 gsi_next (i: &gsi);
888
889 bound = fold_convert_loc (loc, utype, m_range_size);
890 cond_stmt = gimple_build_cond (LE_EXPR, tidx, bound, NULL_TREE, NULL_TREE);
891 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
892 update_stmt (s: cond_stmt);
893
894 /* block 2 */
895 if (!m_default_case_nonstandard)
896 {
897 label2 = gimple_build_label (label: label_decl2);
898 gsi_insert_before (&gsi, label2, GSI_SAME_STMT);
899 last_assign = gen_def_assigns (gsi: &gsi);
900 }
901
902 /* block 1 */
903 label1 = gimple_build_label (label: label_decl1);
904 gsi_insert_before (&gsi, label1, GSI_SAME_STMT);
905
906 /* block F */
907 gsi = gsi_start_bb (bb: m_final_bb);
908 label3 = gimple_build_label (label: label_decl3);
909 gsi_insert_before (&gsi, label3, GSI_SAME_STMT);
910
911 /* cfg fix */
912 e02 = split_block (bb0, cond_stmt);
913 bb2 = e02->dest;
914
915 if (m_default_case_nonstandard)
916 {
917 bb1 = bb2;
918 bb2 = m_default_bb;
919 e01 = e02;
920 e01->flags = EDGE_TRUE_VALUE;
921 e02 = make_edge (bb0, bb2, EDGE_FALSE_VALUE);
922 edge e_default = find_edge (bb1, bb2);
923 for (gphi_iterator gsi = gsi_start_phis (bb2);
924 !gsi_end_p (i: gsi); gsi_next (i: &gsi))
925 {
926 gphi *phi = gsi.phi ();
927 tree arg = PHI_ARG_DEF_FROM_EDGE (phi, e_default);
928 add_phi_arg (phi, arg, e02,
929 gimple_phi_arg_location_from_edge (phi, e: e_default));
930 }
931 /* Partially fix the dominator tree, if it is available. */
932 if (dom_info_available_p (CDI_DOMINATORS))
933 redirect_immediate_dominators (CDI_DOMINATORS, bb1, bb0);
934 }
935 else
936 {
937 e21 = split_block (bb2, last_assign);
938 bb1 = e21->dest;
939 remove_edge (e21);
940 }
941
942 e1d = split_block (bb1, m_arr_ref_last);
943 bbd = e1d->dest;
944 remove_edge (e1d);
945
946 /* Flags and profiles of the edge for in-range values. */
947 if (!m_default_case_nonstandard)
948 e01 = make_edge (bb0, bb1, EDGE_TRUE_VALUE);
949 e01->probability = m_default_prob.invert ();
950
951 /* Flags and profiles of the edge taking care of out-of-range values. */
952 e02->flags &= ~EDGE_FALLTHRU;
953 e02->flags |= EDGE_FALSE_VALUE;
954 e02->probability = m_default_prob;
955
956 bbf = m_final_bb;
957
958 e1f = make_edge (bb1, bbf, EDGE_FALLTHRU);
959 e1f->probability = profile_probability::always ();
960
961 if (m_default_case_nonstandard)
962 e2f = NULL;
963 else
964 {
965 e2f = make_edge (bb2, bbf, EDGE_FALLTHRU);
966 e2f->probability = profile_probability::always ();
967 }
968
969 /* frequencies of the new BBs */
970 bb1->count = e01->count ();
971 bb2->count = e02->count ();
972 if (!m_default_case_nonstandard)
973 bbf->count = e1f->count () + e2f->count ();
974
975 /* Tidy blocks that have become unreachable. */
976 prune_bbs (bbd, final: m_final_bb,
977 default_bb: m_default_case_nonstandard ? m_default_bb : NULL);
978
979 /* Fixup the PHI nodes in bbF. */
980 fix_phi_nodes (e1f, e2f, bbf);
981
982 /* Fix the dominator tree, if it is available. */
983 if (dom_info_available_p (CDI_DOMINATORS))
984 {
985 vec<basic_block> bbs_to_fix_dom;
986
987 set_immediate_dominator (CDI_DOMINATORS, bb1, bb0);
988 if (!m_default_case_nonstandard)
989 set_immediate_dominator (CDI_DOMINATORS, bb2, bb0);
990 if (! get_immediate_dominator (CDI_DOMINATORS, bbf))
991 /* If bbD was the immediate dominator ... */
992 set_immediate_dominator (CDI_DOMINATORS, bbf, bb0);
993
994 bbs_to_fix_dom.create (nelems: 3 + (bb2 != bbf));
995 bbs_to_fix_dom.quick_push (obj: bb0);
996 bbs_to_fix_dom.quick_push (obj: bb1);
997 if (bb2 != bbf)
998 bbs_to_fix_dom.quick_push (obj: bb2);
999 bbs_to_fix_dom.quick_push (obj: bbf);
1000
1001 iterate_fix_dominators (CDI_DOMINATORS, bbs_to_fix_dom, true);
1002 bbs_to_fix_dom.release ();
1003 }
1004}
1005
1006/* The following function is invoked on every switch statement (the current
1007 one is given in SWTCH) and runs the individual phases of switch
1008 conversion on it one after another until one fails or the conversion
1009 is completed. On success, NULL is in m_reason, otherwise points
1010 to a string with the reason why the conversion failed. */
1011
1012void
1013switch_conversion::expand (gswitch *swtch)
1014{
1015 /* Group case labels so that we get the right results from the heuristics
1016 that decide on the code generation approach for this switch. */
1017 m_cfg_altered |= group_case_labels_stmt (swtch);
1018
1019 /* If this switch is now a degenerate case with only a default label,
1020 there is nothing left for us to do. */
1021 if (gimple_switch_num_labels (gs: swtch) < 2)
1022 {
1023 m_reason = "switch is a degenerate case";
1024 return;
1025 }
1026
1027 collect (swtch);
1028
1029 /* No error markers should reach here (they should be filtered out
1030 during gimplification). */
1031 gcc_checking_assert (TREE_TYPE (m_index_expr) != error_mark_node);
1032
1033 /* Prefer bit test if possible. */
1034 if (tree_fits_uhwi_p (m_range_size)
1035 && bit_test_cluster::can_be_handled (range: tree_to_uhwi (m_range_size), uniq: m_uniq)
1036 && bit_test_cluster::is_beneficial (count: m_count, uniq: m_uniq))
1037 {
1038 m_reason = "expanding as bit test is preferable";
1039 return;
1040 }
1041
1042 if (m_uniq <= 2)
1043 {
1044 /* This will be expanded as a decision tree . */
1045 m_reason = "expanding as jumps is preferable";
1046 return;
1047 }
1048
1049 /* If there is no common successor, we cannot do the transformation. */
1050 if (!m_final_bb)
1051 {
1052 m_reason = "no common successor to all case label target blocks found";
1053 return;
1054 }
1055
1056 /* Check the case label values are within reasonable range: */
1057 if (!check_range ())
1058 {
1059 gcc_assert (m_reason);
1060 return;
1061 }
1062
1063 /* For all the cases, see whether they are empty, the assignments they
1064 represent constant and so on... */
1065 if (!check_all_empty_except_final ())
1066 {
1067 gcc_assert (m_reason);
1068 return;
1069 }
1070 if (!check_final_bb ())
1071 {
1072 gcc_assert (m_reason);
1073 return;
1074 }
1075
1076 /* At this point all checks have passed and we can proceed with the
1077 transformation. */
1078
1079 create_temp_arrays ();
1080 gather_default_values (default_case: m_default_case_nonstandard
1081 ? gimple_switch_label (gs: swtch, index: 1)
1082 : gimple_switch_default_label (gs: swtch));
1083 build_constructors ();
1084
1085 build_arrays (); /* Build the static arrays and assignments. */
1086 gen_inbound_check (); /* Build the bounds check. */
1087
1088 m_cfg_altered = true;
1089}
1090
1091/* Destructor. */
1092
1093switch_conversion::~switch_conversion ()
1094{
1095 XDELETEVEC (m_constructors);
1096 XDELETEVEC (m_default_values);
1097}
1098
1099/* Constructor. */
1100
1101group_cluster::group_cluster (vec<cluster *> &clusters,
1102 unsigned start, unsigned end)
1103{
1104 gcc_checking_assert (end - start + 1 >= 1);
1105 m_prob = profile_probability::never ();
1106 m_cases.create (nelems: end - start + 1);
1107 for (unsigned i = start; i <= end; i++)
1108 {
1109 m_cases.quick_push (obj: static_cast<simple_cluster *> (clusters[i]));
1110 m_prob += clusters[i]->m_prob;
1111 }
1112 m_subtree_prob = m_prob;
1113}
1114
1115/* Destructor. */
1116
1117group_cluster::~group_cluster ()
1118{
1119 for (unsigned i = 0; i < m_cases.length (); i++)
1120 delete m_cases[i];
1121
1122 m_cases.release ();
1123}
1124
1125/* Dump content of a cluster. */
1126
1127void
1128group_cluster::dump (FILE *f, bool details)
1129{
1130 unsigned total_values = 0;
1131 for (unsigned i = 0; i < m_cases.length (); i++)
1132 total_values += m_cases[i]->get_range (low: m_cases[i]->get_low (),
1133 high: m_cases[i]->get_high ());
1134
1135 unsigned comparison_count = 0;
1136 for (unsigned i = 0; i < m_cases.length (); i++)
1137 {
1138 simple_cluster *sc = static_cast<simple_cluster *> (m_cases[i]);
1139 comparison_count += sc->get_comparison_count ();
1140 }
1141
1142 unsigned HOST_WIDE_INT range = get_range (low: get_low (), high: get_high ());
1143 fprintf (stream: f, format: "%s", get_type () == JUMP_TABLE ? "JT" : "BT");
1144
1145 if (details)
1146 fprintf (stream: f, format: "(values:%d comparisons:%d range:" HOST_WIDE_INT_PRINT_DEC
1147 " density: %.2f%%)", total_values, comparison_count, range,
1148 100.0f * comparison_count / range);
1149
1150 fprintf (stream: f, format: ":");
1151 PRINT_CASE (f, get_low ());
1152 fprintf (stream: f, format: "-");
1153 PRINT_CASE (f, get_high ());
1154 fprintf (stream: f, format: " ");
1155}
1156
1157/* Emit GIMPLE code to handle the cluster. */
1158
1159void
1160jump_table_cluster::emit (tree index_expr, tree,
1161 tree default_label_expr, basic_block default_bb,
1162 location_t loc)
1163{
1164 tree low = get_low ();
1165 unsigned HOST_WIDE_INT range = get_range (low, high: get_high ());
1166 unsigned HOST_WIDE_INT nondefault_range = 0;
1167 bool bitint = false;
1168 gimple_stmt_iterator gsi = gsi_start_bb (bb: m_case_bb);
1169
1170 /* For large/huge _BitInt, subtract low from index_expr, cast to unsigned
1171 DImode type (get_range doesn't support ranges larger than 64-bits)
1172 and subtract low from all case values as well. */
1173 if (TREE_CODE (TREE_TYPE (index_expr)) == BITINT_TYPE
1174 && TYPE_PRECISION (TREE_TYPE (index_expr)) > GET_MODE_PRECISION (DImode))
1175 {
1176 bitint = true;
1177 tree this_low = low, type;
1178 gimple *g;
1179 gimple_seq seq = NULL;
1180 if (!TYPE_OVERFLOW_WRAPS (TREE_TYPE (index_expr)))
1181 {
1182 type = unsigned_type_for (TREE_TYPE (index_expr));
1183 index_expr = gimple_convert (seq: &seq, type, op: index_expr);
1184 this_low = fold_convert (type, this_low);
1185 }
1186 this_low = const_unop (NEGATE_EXPR, TREE_TYPE (this_low), this_low);
1187 index_expr = gimple_build (seq: &seq, code: PLUS_EXPR, TREE_TYPE (index_expr),
1188 ops: index_expr, ops: this_low);
1189 type = build_nonstandard_integer_type (GET_MODE_PRECISION (DImode), 1);
1190 g = gimple_build_cond (GT_EXPR, index_expr,
1191 fold_convert (TREE_TYPE (index_expr),
1192 TYPE_MAX_VALUE (type)),
1193 NULL_TREE, NULL_TREE);
1194 gimple_seq_add_stmt (&seq, g);
1195 gimple_seq_set_location (seq, loc);
1196 gsi_insert_seq_after (&gsi, seq, GSI_NEW_STMT);
1197 edge e1 = split_block (m_case_bb, g);
1198 e1->flags = EDGE_FALSE_VALUE;
1199 e1->probability = profile_probability::likely ();
1200 edge e2 = make_edge (e1->src, default_bb, EDGE_TRUE_VALUE);
1201 e2->probability = e1->probability.invert ();
1202 gsi = gsi_start_bb (bb: e1->dest);
1203 seq = NULL;
1204 index_expr = gimple_convert (seq: &seq, type, op: index_expr);
1205 gimple_seq_set_location (seq, loc);
1206 gsi_insert_seq_after (&gsi, seq, GSI_NEW_STMT);
1207 }
1208
1209 /* For jump table we just emit a new gswitch statement that will
1210 be latter lowered to jump table. */
1211 auto_vec <tree> labels;
1212 labels.create (nelems: m_cases.length ());
1213
1214 basic_block case_bb = gsi_bb (i: gsi);
1215 make_edge (case_bb, default_bb, 0);
1216 for (unsigned i = 0; i < m_cases.length (); i++)
1217 {
1218 tree lab = unshare_expr (m_cases[i]->m_case_label_expr);
1219 if (bitint)
1220 {
1221 CASE_LOW (lab)
1222 = fold_convert (TREE_TYPE (index_expr),
1223 const_binop (MINUS_EXPR,
1224 TREE_TYPE (CASE_LOW (lab)),
1225 CASE_LOW (lab), low));
1226 if (CASE_HIGH (lab))
1227 CASE_HIGH (lab)
1228 = fold_convert (TREE_TYPE (index_expr),
1229 const_binop (MINUS_EXPR,
1230 TREE_TYPE (CASE_HIGH (lab)),
1231 CASE_HIGH (lab), low));
1232 }
1233 labels.quick_push (obj: lab);
1234 make_edge (case_bb, m_cases[i]->m_case_bb, 0);
1235 }
1236
1237 gswitch *s = gimple_build_switch (index_expr,
1238 unshare_expr (default_label_expr), labels);
1239 gimple_set_location (g: s, location: loc);
1240 gsi_insert_after (&gsi, s, GSI_NEW_STMT);
1241
1242 /* Set up even probabilities for all cases. */
1243 for (unsigned i = 0; i < m_cases.length (); i++)
1244 {
1245 simple_cluster *sc = static_cast<simple_cluster *> (m_cases[i]);
1246 edge case_edge = find_edge (case_bb, sc->m_case_bb);
1247 unsigned HOST_WIDE_INT case_range
1248 = sc->get_range (low: sc->get_low (), high: sc->get_high ());
1249 nondefault_range += case_range;
1250
1251 /* case_edge->aux is number of values in a jump-table that are covered
1252 by the case_edge. */
1253 case_edge->aux = (void *) ((intptr_t) (case_edge->aux) + case_range);
1254 }
1255
1256 edge default_edge = gimple_switch_default_edge (cfun, s);
1257 default_edge->probability = profile_probability::never ();
1258
1259 for (unsigned i = 0; i < m_cases.length (); i++)
1260 {
1261 simple_cluster *sc = static_cast<simple_cluster *> (m_cases[i]);
1262 edge case_edge = find_edge (case_bb, sc->m_case_bb);
1263 case_edge->probability
1264 = profile_probability::always ().apply_scale (num: (intptr_t)case_edge->aux,
1265 den: range);
1266 }
1267
1268 /* Number of non-default values is probability of default edge. */
1269 default_edge->probability
1270 += profile_probability::always ().apply_scale (num: nondefault_range,
1271 den: range).invert ();
1272
1273 switch_decision_tree::reset_out_edges_aux (swtch: s);
1274}
1275
1276/* Find jump tables of given CLUSTERS, where all members of the vector
1277 are of type simple_cluster. New clusters are returned. */
1278
1279vec<cluster *>
1280jump_table_cluster::find_jump_tables (vec<cluster *> &clusters)
1281{
1282 if (!is_enabled ())
1283 return clusters.copy ();
1284
1285 unsigned l = clusters.length ();
1286 auto_vec<min_cluster_item> min;
1287 min.reserve (nelems: l + 1);
1288
1289 min.quick_push (obj: min_cluster_item (0, 0, 0));
1290
1291 unsigned HOST_WIDE_INT max_ratio
1292 = (optimize_insn_for_size_p ()
1293 ? param_jump_table_max_growth_ratio_for_size
1294 : param_jump_table_max_growth_ratio_for_speed);
1295
1296 for (unsigned i = 1; i <= l; i++)
1297 {
1298 /* Set minimal # of clusters with i-th item to infinite. */
1299 min.quick_push (obj: min_cluster_item (INT_MAX, INT_MAX, INT_MAX));
1300
1301 /* Pre-calculate number of comparisons for the clusters. */
1302 HOST_WIDE_INT comparison_count = 0;
1303 for (unsigned k = 0; k <= i - 1; k++)
1304 {
1305 simple_cluster *sc = static_cast<simple_cluster *> (clusters[k]);
1306 comparison_count += sc->get_comparison_count ();
1307 }
1308
1309 for (unsigned j = 0; j < i; j++)
1310 {
1311 unsigned HOST_WIDE_INT s = min[j].m_non_jt_cases;
1312 if (i - j < case_values_threshold ())
1313 s += i - j;
1314
1315 /* Prefer clusters with smaller number of numbers covered. */
1316 if ((min[j].m_count + 1 < min[i].m_count
1317 || (min[j].m_count + 1 == min[i].m_count
1318 && s < min[i].m_non_jt_cases))
1319 && can_be_handled (clusters, start: j, end: i - 1, max_ratio,
1320 comparison_count))
1321 min[i] = min_cluster_item (min[j].m_count + 1, j, s);
1322
1323 simple_cluster *sc = static_cast<simple_cluster *> (clusters[j]);
1324 comparison_count -= sc->get_comparison_count ();
1325 }
1326
1327 gcc_checking_assert (comparison_count == 0);
1328 gcc_checking_assert (min[i].m_count != INT_MAX);
1329 }
1330
1331 /* No result. */
1332 if (min[l].m_count == l)
1333 return clusters.copy ();
1334
1335 vec<cluster *> output;
1336 output.create (nelems: 4);
1337
1338 /* Find and build the clusters. */
1339 for (unsigned int end = l;;)
1340 {
1341 int start = min[end].m_start;
1342
1343 /* Do not allow clusters with small number of cases. */
1344 if (is_beneficial (clusters, start, end: end - 1))
1345 output.safe_push (obj: new jump_table_cluster (clusters, start, end - 1));
1346 else
1347 for (int i = end - 1; i >= start; i--)
1348 output.safe_push (obj: clusters[i]);
1349
1350 end = start;
1351
1352 if (start <= 0)
1353 break;
1354 }
1355
1356 output.reverse ();
1357 return output;
1358}
1359
1360/* Return true when cluster starting at START and ending at END (inclusive)
1361 can build a jump-table. */
1362
1363bool
1364jump_table_cluster::can_be_handled (const vec<cluster *> &clusters,
1365 unsigned start, unsigned end,
1366 unsigned HOST_WIDE_INT max_ratio,
1367 unsigned HOST_WIDE_INT comparison_count)
1368{
1369 /* If the switch is relatively small such that the cost of one
1370 indirect jump on the target are higher than the cost of a
1371 decision tree, go with the decision tree.
1372
1373 If range of values is much bigger than number of values,
1374 or if it is too large to represent in a HOST_WIDE_INT,
1375 make a sequence of conditional branches instead of a dispatch.
1376
1377 The definition of "much bigger" depends on whether we are
1378 optimizing for size or for speed.
1379
1380 For algorithm correctness, jump table for a single case must return
1381 true. We bail out in is_beneficial if it's called just for
1382 a single case. */
1383 if (start == end)
1384 return true;
1385
1386 unsigned HOST_WIDE_INT range = get_range (low: clusters[start]->get_low (),
1387 high: clusters[end]->get_high ());
1388 /* Check overflow. */
1389 if (range == 0)
1390 return false;
1391
1392 if (range > HOST_WIDE_INT_M1U / 100)
1393 return false;
1394
1395 unsigned HOST_WIDE_INT lhs = 100 * range;
1396 if (lhs < range)
1397 return false;
1398
1399 return lhs <= max_ratio * comparison_count;
1400}
1401
1402/* Return true if cluster starting at START and ending at END (inclusive)
1403 is profitable transformation. */
1404
1405bool
1406jump_table_cluster::is_beneficial (const vec<cluster *> &,
1407 unsigned start, unsigned end)
1408{
1409 /* Single case bail out. */
1410 if (start == end)
1411 return false;
1412
1413 return end - start + 1 >= case_values_threshold ();
1414}
1415
1416/* Find bit tests of given CLUSTERS, where all members of the vector
1417 are of type simple_cluster. New clusters are returned. */
1418
1419vec<cluster *>
1420bit_test_cluster::find_bit_tests (vec<cluster *> &clusters)
1421{
1422 if (!is_enabled ())
1423 return clusters.copy ();
1424
1425 unsigned l = clusters.length ();
1426 auto_vec<min_cluster_item> min;
1427 min.reserve (nelems: l + 1);
1428
1429 min.quick_push (obj: min_cluster_item (0, 0, 0));
1430
1431 for (unsigned i = 1; i <= l; i++)
1432 {
1433 /* Set minimal # of clusters with i-th item to infinite. */
1434 min.quick_push (obj: min_cluster_item (INT_MAX, INT_MAX, INT_MAX));
1435
1436 for (unsigned j = 0; j < i; j++)
1437 {
1438 if (min[j].m_count + 1 < min[i].m_count
1439 && can_be_handled (clusters, start: j, end: i - 1))
1440 min[i] = min_cluster_item (min[j].m_count + 1, j, INT_MAX);
1441 }
1442
1443 gcc_checking_assert (min[i].m_count != INT_MAX);
1444 }
1445
1446 /* No result. */
1447 if (min[l].m_count == l)
1448 return clusters.copy ();
1449
1450 vec<cluster *> output;
1451 output.create (nelems: 4);
1452
1453 /* Find and build the clusters. */
1454 for (unsigned end = l;;)
1455 {
1456 int start = min[end].m_start;
1457
1458 if (is_beneficial (clusters, start, end: end - 1))
1459 {
1460 bool entire = start == 0 && end == clusters.length ();
1461 output.safe_push (obj: new bit_test_cluster (clusters, start, end - 1,
1462 entire));
1463 }
1464 else
1465 for (int i = end - 1; i >= start; i--)
1466 output.safe_push (obj: clusters[i]);
1467
1468 end = start;
1469
1470 if (start <= 0)
1471 break;
1472 }
1473
1474 output.reverse ();
1475 return output;
1476}
1477
1478/* Return true when RANGE of case values with UNIQ labels
1479 can build a bit test. */
1480
1481bool
1482bit_test_cluster::can_be_handled (unsigned HOST_WIDE_INT range,
1483 unsigned int uniq)
1484{
1485 /* Check overflow. */
1486 if (range == 0)
1487 return false;
1488
1489 if (range >= GET_MODE_BITSIZE (mode: word_mode))
1490 return false;
1491
1492 return uniq <= m_max_case_bit_tests;
1493}
1494
1495/* Return true when cluster starting at START and ending at END (inclusive)
1496 can build a bit test. */
1497
1498bool
1499bit_test_cluster::can_be_handled (const vec<cluster *> &clusters,
1500 unsigned start, unsigned end)
1501{
1502 auto_vec<int, m_max_case_bit_tests> dest_bbs;
1503 /* For algorithm correctness, bit test for a single case must return
1504 true. We bail out in is_beneficial if it's called just for
1505 a single case. */
1506 if (start == end)
1507 return true;
1508
1509 unsigned HOST_WIDE_INT range = get_range (low: clusters[start]->get_low (),
1510 high: clusters[end]->get_high ());
1511
1512 /* Make a guess first. */
1513 if (!can_be_handled (range, uniq: m_max_case_bit_tests))
1514 return false;
1515
1516 for (unsigned i = start; i <= end; i++)
1517 {
1518 simple_cluster *sc = static_cast<simple_cluster *> (clusters[i]);
1519 /* m_max_case_bit_tests is very small integer, thus the operation
1520 is constant. */
1521 if (!dest_bbs.contains (search: sc->m_case_bb->index))
1522 {
1523 if (dest_bbs.length () >= m_max_case_bit_tests)
1524 return false;
1525 dest_bbs.quick_push (obj: sc->m_case_bb->index);
1526 }
1527 }
1528
1529 return true;
1530}
1531
1532/* Return true when COUNT of cases of UNIQ labels is beneficial for bit test
1533 transformation. */
1534
1535bool
1536bit_test_cluster::is_beneficial (unsigned count, unsigned uniq)
1537{
1538 return (((uniq == 1 && count >= 3)
1539 || (uniq == 2 && count >= 5)
1540 || (uniq == 3 && count >= 6)));
1541}
1542
1543/* Return true if cluster starting at START and ending at END (inclusive)
1544 is profitable transformation. */
1545
1546bool
1547bit_test_cluster::is_beneficial (const vec<cluster *> &clusters,
1548 unsigned start, unsigned end)
1549{
1550 /* Single case bail out. */
1551 if (start == end)
1552 return false;
1553
1554 auto_bitmap dest_bbs;
1555
1556 for (unsigned i = start; i <= end; i++)
1557 {
1558 simple_cluster *sc = static_cast<simple_cluster *> (clusters[i]);
1559 bitmap_set_bit (dest_bbs, sc->m_case_bb->index);
1560 }
1561
1562 unsigned uniq = bitmap_count_bits (dest_bbs);
1563 unsigned count = end - start + 1;
1564 return is_beneficial (count, uniq);
1565}
1566
1567/* Comparison function for qsort to order bit tests by decreasing
1568 probability of execution. */
1569
1570int
1571case_bit_test::cmp (const void *p1, const void *p2)
1572{
1573 const case_bit_test *const d1 = (const case_bit_test *) p1;
1574 const case_bit_test *const d2 = (const case_bit_test *) p2;
1575
1576 if (d2->bits != d1->bits)
1577 return d2->bits - d1->bits;
1578
1579 /* Stabilize the sort. */
1580 return (LABEL_DECL_UID (CASE_LABEL (d2->label))
1581 - LABEL_DECL_UID (CASE_LABEL (d1->label)));
1582}
1583
1584/* Expand a switch statement by a short sequence of bit-wise
1585 comparisons. "switch(x)" is effectively converted into
1586 "if ((1 << (x-MINVAL)) & CST)" where CST and MINVAL are
1587 integer constants.
1588
1589 INDEX_EXPR is the value being switched on.
1590
1591 MINVAL is the lowest case value of in the case nodes,
1592 and RANGE is highest value minus MINVAL. MINVAL and RANGE
1593 are not guaranteed to be of the same type as INDEX_EXPR
1594 (the gimplifier doesn't change the type of case label values,
1595 and MINVAL and RANGE are derived from those values).
1596 MAXVAL is MINVAL + RANGE.
1597
1598 There *MUST* be max_case_bit_tests or less unique case
1599 node targets. */
1600
1601void
1602bit_test_cluster::emit (tree index_expr, tree index_type,
1603 tree, basic_block default_bb, location_t loc)
1604{
1605 case_bit_test test[m_max_case_bit_tests] = { {} };
1606 unsigned int i, j, k;
1607 unsigned int count;
1608
1609 tree unsigned_index_type = range_check_type (index_type);
1610
1611 gimple_stmt_iterator gsi;
1612 gassign *shift_stmt;
1613
1614 tree idx, tmp, csui;
1615 tree word_type_node = lang_hooks.types.type_for_mode (word_mode, 1);
1616 tree word_mode_zero = fold_convert (word_type_node, integer_zero_node);
1617 tree word_mode_one = fold_convert (word_type_node, integer_one_node);
1618 int prec = TYPE_PRECISION (word_type_node);
1619 wide_int wone = wi::one (precision: prec);
1620
1621 tree minval = get_low ();
1622 tree maxval = get_high ();
1623
1624 /* Go through all case labels, and collect the case labels, profile
1625 counts, and other information we need to build the branch tests. */
1626 count = 0;
1627 for (i = 0; i < m_cases.length (); i++)
1628 {
1629 unsigned int lo, hi;
1630 simple_cluster *n = static_cast<simple_cluster *> (m_cases[i]);
1631 for (k = 0; k < count; k++)
1632 if (n->m_case_bb == test[k].target_bb)
1633 break;
1634
1635 if (k == count)
1636 {
1637 gcc_checking_assert (count < m_max_case_bit_tests);
1638 test[k].mask = wi::zero (precision: prec);
1639 test[k].target_bb = n->m_case_bb;
1640 test[k].label = n->m_case_label_expr;
1641 test[k].bits = 0;
1642 test[k].prob = profile_probability::never ();
1643 count++;
1644 }
1645
1646 test[k].bits += n->get_range (low: n->get_low (), high: n->get_high ());
1647 test[k].prob += n->m_prob;
1648
1649 lo = tree_to_uhwi (int_const_binop (MINUS_EXPR, n->get_low (), minval));
1650 if (n->get_high () == NULL_TREE)
1651 hi = lo;
1652 else
1653 hi = tree_to_uhwi (int_const_binop (MINUS_EXPR, n->get_high (),
1654 minval));
1655
1656 for (j = lo; j <= hi; j++)
1657 test[k].mask |= wi::lshift (x: wone, y: j);
1658 }
1659
1660 qsort (test, count, sizeof (*test), case_bit_test::cmp);
1661
1662 /* If every possible relative value of the index expression is a valid shift
1663 amount, then we can merge the entry test in the bit test. */
1664 bool entry_test_needed;
1665 value_range r;
1666 if (TREE_CODE (index_expr) == SSA_NAME
1667 && get_range_query (cfun)->range_of_expr (r, expr: index_expr)
1668 && !r.undefined_p ()
1669 && !r.varying_p ()
1670 && wi::leu_p (x: r.upper_bound () - r.lower_bound (), y: prec - 1))
1671 {
1672 wide_int min = r.lower_bound ();
1673 wide_int max = r.upper_bound ();
1674 tree index_type = TREE_TYPE (index_expr);
1675 minval = fold_convert (index_type, minval);
1676 wide_int iminval = wi::to_wide (t: minval);
1677 if (wi::lt_p (x: min, y: iminval, TYPE_SIGN (index_type)))
1678 {
1679 minval = wide_int_to_tree (type: index_type, cst: min);
1680 for (i = 0; i < count; i++)
1681 test[i].mask = wi::lshift (x: test[i].mask, y: iminval - min);
1682 }
1683 else if (wi::gt_p (x: min, y: iminval, TYPE_SIGN (index_type)))
1684 {
1685 minval = wide_int_to_tree (type: index_type, cst: min);
1686 for (i = 0; i < count; i++)
1687 test[i].mask = wi::lrshift (x: test[i].mask, y: min - iminval);
1688 }
1689 maxval = wide_int_to_tree (type: index_type, cst: max);
1690 entry_test_needed = false;
1691 }
1692 else
1693 entry_test_needed = true;
1694
1695 /* If all values are in the 0 .. BITS_PER_WORD-1 range, we can get rid of
1696 the minval subtractions, but it might make the mask constants more
1697 expensive. So, compare the costs. */
1698 if (compare_tree_int (minval, 0) > 0 && compare_tree_int (maxval, prec) < 0)
1699 {
1700 int cost_diff;
1701 HOST_WIDE_INT m = tree_to_uhwi (minval);
1702 rtx reg = gen_raw_REG (word_mode, 10000);
1703 bool speed_p = optimize_insn_for_speed_p ();
1704 cost_diff = set_src_cost (gen_rtx_PLUS (word_mode, reg,
1705 GEN_INT (-m)),
1706 mode: word_mode, speed_p);
1707 for (i = 0; i < count; i++)
1708 {
1709 rtx r = immed_wide_int_const (test[i].mask, word_mode);
1710 cost_diff += set_src_cost (gen_rtx_AND (word_mode, reg, r),
1711 mode: word_mode, speed_p);
1712 r = immed_wide_int_const (wi::lshift (x: test[i].mask, y: m), word_mode);
1713 cost_diff -= set_src_cost (gen_rtx_AND (word_mode, reg, r),
1714 mode: word_mode, speed_p);
1715 }
1716 if (cost_diff > 0)
1717 {
1718 for (i = 0; i < count; i++)
1719 test[i].mask = wi::lshift (x: test[i].mask, y: m);
1720 minval = build_zero_cst (TREE_TYPE (minval));
1721 }
1722 }
1723
1724 /* Now build the test-and-branch code. */
1725
1726 gsi = gsi_last_bb (bb: m_case_bb);
1727
1728 /* idx = (unsigned)x - minval. */
1729 idx = fold_convert_loc (loc, unsigned_index_type, index_expr);
1730 idx = fold_build2_loc (loc, MINUS_EXPR, unsigned_index_type, idx,
1731 fold_convert_loc (loc, unsigned_index_type, minval));
1732 idx = force_gimple_operand_gsi (&gsi, idx,
1733 /*simple=*/true, NULL_TREE,
1734 /*before=*/true, GSI_SAME_STMT);
1735
1736 profile_probability subtree_prob = m_subtree_prob;
1737 profile_probability default_prob = m_default_prob;
1738 if (!default_prob.initialized_p ())
1739 default_prob = m_subtree_prob.invert ();
1740
1741 if (m_handles_entire_switch && entry_test_needed)
1742 {
1743 tree range = int_const_binop (MINUS_EXPR, maxval, minval);
1744 /* if (idx > range) goto default */
1745 range
1746 = force_gimple_operand_gsi (&gsi,
1747 fold_convert (unsigned_index_type, range),
1748 /*simple=*/true, NULL_TREE,
1749 /*before=*/true, GSI_SAME_STMT);
1750 tmp = fold_build2 (GT_EXPR, boolean_type_node, idx, range);
1751 default_prob = default_prob / 2;
1752 basic_block new_bb
1753 = hoist_edge_and_branch_if_true (gsip: &gsi, cond: tmp, case_bb: default_bb,
1754 prob: default_prob, loc);
1755 gsi = gsi_last_bb (bb: new_bb);
1756 }
1757
1758 tmp = fold_build2_loc (loc, LSHIFT_EXPR, word_type_node, word_mode_one,
1759 fold_convert_loc (loc, word_type_node, idx));
1760
1761 /* csui = (1 << (word_mode) idx) */
1762 if (count > 1)
1763 {
1764 csui = make_ssa_name (var: word_type_node);
1765 tmp = force_gimple_operand_gsi (&gsi, tmp,
1766 /*simple=*/false, NULL_TREE,
1767 /*before=*/true, GSI_SAME_STMT);
1768 shift_stmt = gimple_build_assign (csui, tmp);
1769 gsi_insert_before (&gsi, shift_stmt, GSI_SAME_STMT);
1770 update_stmt (s: shift_stmt);
1771 }
1772 else
1773 csui = tmp;
1774
1775 /* for each unique set of cases:
1776 if (const & csui) goto target */
1777 for (k = 0; k < count; k++)
1778 {
1779 profile_probability prob = test[k].prob / (subtree_prob + default_prob);
1780 subtree_prob -= test[k].prob;
1781 tmp = wide_int_to_tree (type: word_type_node, cst: test[k].mask);
1782 tmp = fold_build2_loc (loc, BIT_AND_EXPR, word_type_node, csui, tmp);
1783 tmp = fold_build2_loc (loc, NE_EXPR, boolean_type_node,
1784 tmp, word_mode_zero);
1785 tmp = force_gimple_operand_gsi (&gsi, tmp,
1786 /*simple=*/true, NULL_TREE,
1787 /*before=*/true, GSI_SAME_STMT);
1788 basic_block new_bb
1789 = hoist_edge_and_branch_if_true (gsip: &gsi, cond: tmp, case_bb: test[k].target_bb,
1790 prob, loc);
1791 gsi = gsi_last_bb (bb: new_bb);
1792 }
1793
1794 /* We should have removed all edges now. */
1795 gcc_assert (EDGE_COUNT (gsi_bb (gsi)->succs) == 0);
1796
1797 /* If nothing matched, go to the default label. */
1798 edge e = make_edge (gsi_bb (i: gsi), default_bb, EDGE_FALLTHRU);
1799 e->probability = profile_probability::always ();
1800}
1801
1802/* Split the basic block at the statement pointed to by GSIP, and insert
1803 a branch to the target basic block of E_TRUE conditional on tree
1804 expression COND.
1805
1806 It is assumed that there is already an edge from the to-be-split
1807 basic block to E_TRUE->dest block. This edge is removed, and the
1808 profile information on the edge is re-used for the new conditional
1809 jump.
1810
1811 The CFG is updated. The dominator tree will not be valid after
1812 this transformation, but the immediate dominators are updated if
1813 UPDATE_DOMINATORS is true.
1814
1815 Returns the newly created basic block. */
1816
1817basic_block
1818bit_test_cluster::hoist_edge_and_branch_if_true (gimple_stmt_iterator *gsip,
1819 tree cond, basic_block case_bb,
1820 profile_probability prob,
1821 location_t loc)
1822{
1823 tree tmp;
1824 gcond *cond_stmt;
1825 edge e_false;
1826 basic_block new_bb, split_bb = gsi_bb (i: *gsip);
1827
1828 edge e_true = make_edge (split_bb, case_bb, EDGE_TRUE_VALUE);
1829 e_true->probability = prob;
1830 gcc_assert (e_true->src == split_bb);
1831
1832 tmp = force_gimple_operand_gsi (gsip, cond, /*simple=*/true, NULL,
1833 /*before=*/true, GSI_SAME_STMT);
1834 cond_stmt = gimple_build_cond_from_tree (tmp, NULL_TREE, NULL_TREE);
1835 gimple_set_location (g: cond_stmt, location: loc);
1836 gsi_insert_before (gsip, cond_stmt, GSI_SAME_STMT);
1837
1838 e_false = split_block (split_bb, cond_stmt);
1839 new_bb = e_false->dest;
1840 redirect_edge_pred (e_true, split_bb);
1841
1842 e_false->flags &= ~EDGE_FALLTHRU;
1843 e_false->flags |= EDGE_FALSE_VALUE;
1844 e_false->probability = e_true->probability.invert ();
1845 new_bb->count = e_false->count ();
1846
1847 return new_bb;
1848}
1849
1850/* Compute the number of case labels that correspond to each outgoing edge of
1851 switch statement. Record this information in the aux field of the edge. */
1852
1853void
1854switch_decision_tree::compute_cases_per_edge ()
1855{
1856 reset_out_edges_aux (swtch: m_switch);
1857 int ncases = gimple_switch_num_labels (gs: m_switch);
1858 for (int i = ncases - 1; i >= 1; --i)
1859 {
1860 edge case_edge = gimple_switch_edge (cfun, m_switch, i);
1861 case_edge->aux = (void *) ((intptr_t) (case_edge->aux) + 1);
1862 }
1863}
1864
1865/* Analyze switch statement and return true when the statement is expanded
1866 as decision tree. */
1867
1868bool
1869switch_decision_tree::analyze_switch_statement ()
1870{
1871 unsigned l = gimple_switch_num_labels (gs: m_switch);
1872 basic_block bb = gimple_bb (g: m_switch);
1873 auto_vec<cluster *> clusters;
1874 clusters.create (nelems: l - 1);
1875
1876 basic_block default_bb = gimple_switch_default_bb (cfun, m_switch);
1877 m_case_bbs.reserve (nelems: l);
1878 m_case_bbs.quick_push (obj: default_bb);
1879
1880 compute_cases_per_edge ();
1881
1882 for (unsigned i = 1; i < l; i++)
1883 {
1884 tree elt = gimple_switch_label (gs: m_switch, index: i);
1885 tree lab = CASE_LABEL (elt);
1886 basic_block case_bb = label_to_block (cfun, lab);
1887 edge case_edge = find_edge (bb, case_bb);
1888 tree low = CASE_LOW (elt);
1889 tree high = CASE_HIGH (elt);
1890
1891 profile_probability p
1892 = case_edge->probability / ((intptr_t) (case_edge->aux));
1893 clusters.quick_push (obj: new simple_cluster (low, high, elt, case_edge->dest,
1894 p));
1895 m_case_bbs.quick_push (obj: case_edge->dest);
1896 }
1897
1898 reset_out_edges_aux (swtch: m_switch);
1899
1900 /* Find bit-test clusters. */
1901 vec<cluster *> output = bit_test_cluster::find_bit_tests (clusters);
1902
1903 /* Find jump table clusters. */
1904 vec<cluster *> output2;
1905 auto_vec<cluster *> tmp;
1906 output2.create (nelems: 1);
1907 tmp.create (nelems: 1);
1908
1909 for (unsigned i = 0; i < output.length (); i++)
1910 {
1911 cluster *c = output[i];
1912 if (c->get_type () != SIMPLE_CASE)
1913 {
1914 if (!tmp.is_empty ())
1915 {
1916 vec<cluster *> n = jump_table_cluster::find_jump_tables (clusters&: tmp);
1917 output2.safe_splice (src: n);
1918 n.release ();
1919 tmp.truncate (size: 0);
1920 }
1921 output2.safe_push (obj: c);
1922 }
1923 else
1924 tmp.safe_push (obj: c);
1925 }
1926
1927 /* We still can have a temporary vector to test. */
1928 if (!tmp.is_empty ())
1929 {
1930 vec<cluster *> n = jump_table_cluster::find_jump_tables (clusters&: tmp);
1931 output2.safe_splice (src: n);
1932 n.release ();
1933 }
1934
1935 if (dump_file)
1936 {
1937 fprintf (stream: dump_file, format: ";; GIMPLE switch case clusters: ");
1938 for (unsigned i = 0; i < output2.length (); i++)
1939 output2[i]->dump (f: dump_file, details: dump_flags & TDF_DETAILS);
1940 fprintf (stream: dump_file, format: "\n");
1941 }
1942
1943 output.release ();
1944
1945 bool expanded = try_switch_expansion (clusters&: output2);
1946 release_clusters (clusters&: output2);
1947 return expanded;
1948}
1949
1950/* Attempt to expand CLUSTERS as a decision tree. Return true when
1951 expanded. */
1952
1953bool
1954switch_decision_tree::try_switch_expansion (vec<cluster *> &clusters)
1955{
1956 tree index_expr = gimple_switch_index (gs: m_switch);
1957 tree index_type = TREE_TYPE (index_expr);
1958 basic_block bb = gimple_bb (g: m_switch);
1959
1960 if (gimple_switch_num_labels (gs: m_switch) == 1
1961 || range_check_type (index_type) == NULL_TREE)
1962 return false;
1963
1964 /* Find the default case target label. */
1965 edge default_edge = gimple_switch_default_edge (cfun, m_switch);
1966 m_default_bb = default_edge->dest;
1967
1968 /* Do the insertion of a case label into m_case_list. The labels are
1969 fed to us in descending order from the sorted vector of case labels used
1970 in the tree part of the middle end. So the list we construct is
1971 sorted in ascending order. */
1972
1973 for (int i = clusters.length () - 1; i >= 0; i--)
1974 {
1975 case_tree_node *r = m_case_list;
1976 m_case_list = m_case_node_pool.allocate ();
1977 m_case_list->m_right = r;
1978 m_case_list->m_c = clusters[i];
1979 }
1980
1981 record_phi_operand_mapping ();
1982
1983 /* Split basic block that contains the gswitch statement. */
1984 gimple_stmt_iterator gsi = gsi_last_bb (bb);
1985 edge e;
1986 if (gsi_end_p (i: gsi))
1987 e = split_block_after_labels (bb);
1988 else
1989 {
1990 gsi_prev (i: &gsi);
1991 e = split_block (bb, gsi_stmt (i: gsi));
1992 }
1993 bb = split_edge (e);
1994
1995 /* Create new basic blocks for non-case clusters where specific expansion
1996 needs to happen. */
1997 for (unsigned i = 0; i < clusters.length (); i++)
1998 if (clusters[i]->get_type () != SIMPLE_CASE)
1999 {
2000 clusters[i]->m_case_bb = create_empty_bb (bb);
2001 clusters[i]->m_case_bb->count = bb->count;
2002 clusters[i]->m_case_bb->loop_father = bb->loop_father;
2003 }
2004
2005 /* Do not do an extra work for a single cluster. */
2006 if (clusters.length () == 1
2007 && clusters[0]->get_type () != SIMPLE_CASE)
2008 {
2009 cluster *c = clusters[0];
2010 c->emit (index_expr, index_type,
2011 gimple_switch_default_label (gs: m_switch), m_default_bb,
2012 gimple_location (g: m_switch));
2013 redirect_edge_succ (single_succ_edge (bb), c->m_case_bb);
2014 }
2015 else
2016 {
2017 emit (bb, index_expr, default_prob: default_edge->probability, index_type);
2018
2019 /* Emit cluster-specific switch handling. */
2020 for (unsigned i = 0; i < clusters.length (); i++)
2021 if (clusters[i]->get_type () != SIMPLE_CASE)
2022 {
2023 edge e = single_pred_edge (bb: clusters[i]->m_case_bb);
2024 e->dest->count = e->src->count.apply_probability (prob: e->probability);
2025 clusters[i]->emit (index_expr, index_type,
2026 gimple_switch_default_label (gs: m_switch),
2027 m_default_bb, gimple_location (g: m_switch));
2028 }
2029 }
2030
2031 fix_phi_operands_for_edges ();
2032
2033 return true;
2034}
2035
2036/* Before switch transformation, record all SSA_NAMEs defined in switch BB
2037 and used in a label basic block. */
2038
2039void
2040switch_decision_tree::record_phi_operand_mapping ()
2041{
2042 basic_block switch_bb = gimple_bb (g: m_switch);
2043 /* Record all PHI nodes that have to be fixed after conversion. */
2044 for (unsigned i = 0; i < m_case_bbs.length (); i++)
2045 {
2046 gphi_iterator gsi;
2047 basic_block bb = m_case_bbs[i];
2048 for (gsi = gsi_start_phis (bb); !gsi_end_p (i: gsi); gsi_next (i: &gsi))
2049 {
2050 gphi *phi = gsi.phi ();
2051
2052 for (unsigned i = 0; i < gimple_phi_num_args (gs: phi); i++)
2053 {
2054 basic_block phi_src_bb = gimple_phi_arg_edge (phi, i)->src;
2055 if (phi_src_bb == switch_bb)
2056 {
2057 tree def = gimple_phi_arg_def (gs: phi, index: i);
2058 tree result = gimple_phi_result (gs: phi);
2059 m_phi_mapping.put (k: result, v: def);
2060 break;
2061 }
2062 }
2063 }
2064 }
2065}
2066
2067/* Append new operands to PHI statements that were introduced due to
2068 addition of new edges to case labels. */
2069
2070void
2071switch_decision_tree::fix_phi_operands_for_edges ()
2072{
2073 gphi_iterator gsi;
2074
2075 for (unsigned i = 0; i < m_case_bbs.length (); i++)
2076 {
2077 basic_block bb = m_case_bbs[i];
2078 for (gsi = gsi_start_phis (bb); !gsi_end_p (i: gsi); gsi_next (i: &gsi))
2079 {
2080 gphi *phi = gsi.phi ();
2081 for (unsigned j = 0; j < gimple_phi_num_args (gs: phi); j++)
2082 {
2083 tree def = gimple_phi_arg_def (gs: phi, index: j);
2084 if (def == NULL_TREE)
2085 {
2086 edge e = gimple_phi_arg_edge (phi, i: j);
2087 tree *definition
2088 = m_phi_mapping.get (k: gimple_phi_result (gs: phi));
2089 gcc_assert (definition);
2090 add_phi_arg (phi, *definition, e, UNKNOWN_LOCATION);
2091 }
2092 }
2093 }
2094 }
2095}
2096
2097/* Generate a decision tree, switching on INDEX_EXPR and jumping to
2098 one of the labels in CASE_LIST or to the DEFAULT_LABEL.
2099
2100 We generate a binary decision tree to select the appropriate target
2101 code. */
2102
2103void
2104switch_decision_tree::emit (basic_block bb, tree index_expr,
2105 profile_probability default_prob, tree index_type)
2106{
2107 balance_case_nodes (head: &m_case_list, NULL);
2108
2109 if (dump_file)
2110 dump_function_to_file (current_function_decl, dump_file, dump_flags);
2111 if (dump_file && (dump_flags & TDF_DETAILS))
2112 {
2113 int indent_step = ceil_log2 (TYPE_PRECISION (index_type)) + 2;
2114 fprintf (stream: dump_file, format: ";; Expanding GIMPLE switch as decision tree:\n");
2115 gcc_assert (m_case_list != NULL);
2116 dump_case_nodes (f: dump_file, root: m_case_list, indent_step, indent_level: 0);
2117 }
2118
2119 bb = emit_case_nodes (bb, index: index_expr, node: m_case_list, default_prob, index_type,
2120 gimple_location (g: m_switch));
2121
2122 if (bb)
2123 emit_jump (bb, case_bb: m_default_bb);
2124
2125 /* Remove all edges and do just an edge that will reach default_bb. */
2126 bb = gimple_bb (g: m_switch);
2127 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2128 gsi_remove (&gsi, true);
2129
2130 delete_basic_block (bb);
2131}
2132
2133/* Take an ordered list of case nodes
2134 and transform them into a near optimal binary tree,
2135 on the assumption that any target code selection value is as
2136 likely as any other.
2137
2138 The transformation is performed by splitting the ordered
2139 list into two equal sections plus a pivot. The parts are
2140 then attached to the pivot as left and right branches. Each
2141 branch is then transformed recursively. */
2142
2143void
2144switch_decision_tree::balance_case_nodes (case_tree_node **head,
2145 case_tree_node *parent)
2146{
2147 case_tree_node *np;
2148
2149 np = *head;
2150 if (np)
2151 {
2152 int i = 0;
2153 case_tree_node **npp;
2154 case_tree_node *left;
2155 profile_probability prob = profile_probability::never ();
2156
2157 /* Count the number of entries on branch. */
2158
2159 while (np)
2160 {
2161 i++;
2162 prob += np->m_c->m_prob;
2163 np = np->m_right;
2164 }
2165
2166 if (i > 2)
2167 {
2168 /* Split this list if it is long enough for that to help. */
2169 npp = head;
2170 left = *npp;
2171 profile_probability pivot_prob = prob / 2;
2172
2173 /* Find the place in the list that bisects the list's total cost
2174 by probability. */
2175 while (1)
2176 {
2177 /* Skip nodes while their probability does not reach
2178 that amount. */
2179 prob -= (*npp)->m_c->m_prob;
2180 if ((prob.initialized_p () && prob < pivot_prob)
2181 || ! (*npp)->m_right)
2182 break;
2183 npp = &(*npp)->m_right;
2184 }
2185
2186 np = *npp;
2187 *npp = 0;
2188 *head = np;
2189 np->m_parent = parent;
2190 np->m_left = left == np ? NULL : left;
2191
2192 /* Optimize each of the two split parts. */
2193 balance_case_nodes (head: &np->m_left, parent: np);
2194 balance_case_nodes (head: &np->m_right, parent: np);
2195 np->m_c->m_subtree_prob = np->m_c->m_prob;
2196 if (np->m_left)
2197 np->m_c->m_subtree_prob += np->m_left->m_c->m_subtree_prob;
2198 if (np->m_right)
2199 np->m_c->m_subtree_prob += np->m_right->m_c->m_subtree_prob;
2200 }
2201 else
2202 {
2203 /* Else leave this branch as one level,
2204 but fill in `parent' fields. */
2205 np = *head;
2206 np->m_parent = parent;
2207 np->m_c->m_subtree_prob = np->m_c->m_prob;
2208 for (; np->m_right; np = np->m_right)
2209 {
2210 np->m_right->m_parent = np;
2211 (*head)->m_c->m_subtree_prob += np->m_right->m_c->m_subtree_prob;
2212 }
2213 }
2214 }
2215}
2216
2217/* Dump ROOT, a list or tree of case nodes, to file. */
2218
2219void
2220switch_decision_tree::dump_case_nodes (FILE *f, case_tree_node *root,
2221 int indent_step, int indent_level)
2222{
2223 if (root == 0)
2224 return;
2225 indent_level++;
2226
2227 dump_case_nodes (f, root: root->m_left, indent_step, indent_level);
2228
2229 fputs (s: ";; ", stream: f);
2230 fprintf (stream: f, format: "%*s", indent_step * indent_level, "");
2231 root->m_c->dump (f);
2232 root->m_c->m_prob.dump (f);
2233 fputs (s: " subtree: ", stream: f);
2234 root->m_c->m_subtree_prob.dump (f);
2235 fputs (s: ")\n", stream: f);
2236
2237 dump_case_nodes (f, root: root->m_right, indent_step, indent_level);
2238}
2239
2240
2241/* Add an unconditional jump to CASE_BB that happens in basic block BB. */
2242
2243void
2244switch_decision_tree::emit_jump (basic_block bb, basic_block case_bb)
2245{
2246 edge e = single_succ_edge (bb);
2247 redirect_edge_succ (e, case_bb);
2248}
2249
2250/* Generate code to compare OP0 with OP1 so that the condition codes are
2251 set and to jump to LABEL_BB if the condition is true.
2252 COMPARISON is the GIMPLE comparison (EQ, NE, GT, etc.).
2253 PROB is the probability of jumping to LABEL_BB. */
2254
2255basic_block
2256switch_decision_tree::emit_cmp_and_jump_insns (basic_block bb, tree op0,
2257 tree op1, tree_code comparison,
2258 basic_block label_bb,
2259 profile_probability prob,
2260 location_t loc)
2261{
2262 // TODO: it's once called with lhs != index.
2263 op1 = fold_convert (TREE_TYPE (op0), op1);
2264
2265 gcond *cond = gimple_build_cond (comparison, op0, op1, NULL_TREE, NULL_TREE);
2266 gimple_set_location (g: cond, location: loc);
2267 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2268 gsi_insert_after (&gsi, cond, GSI_NEW_STMT);
2269
2270 gcc_assert (single_succ_p (bb));
2271
2272 /* Make a new basic block where false branch will take place. */
2273 edge false_edge = split_block (bb, cond);
2274 false_edge->flags = EDGE_FALSE_VALUE;
2275 false_edge->probability = prob.invert ();
2276 false_edge->dest->count = bb->count.apply_probability (prob: prob.invert ());
2277
2278 edge true_edge = make_edge (bb, label_bb, EDGE_TRUE_VALUE);
2279 true_edge->probability = prob;
2280
2281 return false_edge->dest;
2282}
2283
2284/* Generate code to jump to LABEL if OP0 and OP1 are equal.
2285 PROB is the probability of jumping to LABEL_BB.
2286 BB is a basic block where the new condition will be placed. */
2287
2288basic_block
2289switch_decision_tree::do_jump_if_equal (basic_block bb, tree op0, tree op1,
2290 basic_block label_bb,
2291 profile_probability prob,
2292 location_t loc)
2293{
2294 op1 = fold_convert (TREE_TYPE (op0), op1);
2295
2296 gcond *cond = gimple_build_cond (EQ_EXPR, op0, op1, NULL_TREE, NULL_TREE);
2297 gimple_set_location (g: cond, location: loc);
2298 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2299 gsi_insert_before (&gsi, cond, GSI_SAME_STMT);
2300
2301 gcc_assert (single_succ_p (bb));
2302
2303 /* Make a new basic block where false branch will take place. */
2304 edge false_edge = split_block (bb, cond);
2305 false_edge->flags = EDGE_FALSE_VALUE;
2306 false_edge->probability = prob.invert ();
2307 false_edge->dest->count = bb->count.apply_probability (prob: prob.invert ());
2308
2309 edge true_edge = make_edge (bb, label_bb, EDGE_TRUE_VALUE);
2310 true_edge->probability = prob;
2311
2312 return false_edge->dest;
2313}
2314
2315/* Emit step-by-step code to select a case for the value of INDEX.
2316 The thus generated decision tree follows the form of the
2317 case-node binary tree NODE, whose nodes represent test conditions.
2318 DEFAULT_PROB is probability of cases leading to default BB.
2319 INDEX_TYPE is the type of the index of the switch. */
2320
2321basic_block
2322switch_decision_tree::emit_case_nodes (basic_block bb, tree index,
2323 case_tree_node *node,
2324 profile_probability default_prob,
2325 tree index_type, location_t loc)
2326{
2327 profile_probability p;
2328
2329 /* If node is null, we are done. */
2330 if (node == NULL)
2331 return bb;
2332
2333 /* Single value case. */
2334 if (node->m_c->is_single_value_p ())
2335 {
2336 /* Node is single valued. First see if the index expression matches
2337 this node and then check our children, if any. */
2338 p = node->m_c->m_prob / (node->m_c->m_subtree_prob + default_prob);
2339 bb = do_jump_if_equal (bb, op0: index, op1: node->m_c->get_low (),
2340 label_bb: node->m_c->m_case_bb, prob: p, loc);
2341 /* Since this case is taken at this point, reduce its weight from
2342 subtree_weight. */
2343 node->m_c->m_subtree_prob -= node->m_c->m_prob;
2344
2345 if (node->m_left != NULL && node->m_right != NULL)
2346 {
2347 /* 1) the node has both children
2348
2349 If both children are single-valued cases with no
2350 children, finish up all the work. This way, we can save
2351 one ordered comparison. */
2352
2353 if (!node->m_left->has_child ()
2354 && node->m_left->m_c->is_single_value_p ()
2355 && !node->m_right->has_child ()
2356 && node->m_right->m_c->is_single_value_p ())
2357 {
2358 p = (node->m_right->m_c->m_prob
2359 / (node->m_c->m_subtree_prob + default_prob));
2360 bb = do_jump_if_equal (bb, op0: index, op1: node->m_right->m_c->get_low (),
2361 label_bb: node->m_right->m_c->m_case_bb, prob: p, loc);
2362 node->m_c->m_subtree_prob -= node->m_right->m_c->m_prob;
2363
2364 p = (node->m_left->m_c->m_prob
2365 / (node->m_c->m_subtree_prob + default_prob));
2366 bb = do_jump_if_equal (bb, op0: index, op1: node->m_left->m_c->get_low (),
2367 label_bb: node->m_left->m_c->m_case_bb, prob: p, loc);
2368 }
2369 else
2370 {
2371 /* Branch to a label where we will handle it later. */
2372 basic_block test_bb = split_edge (single_succ_edge (bb));
2373 redirect_edge_succ (single_pred_edge (bb: test_bb),
2374 single_succ_edge (bb)->dest);
2375
2376 p = ((node->m_right->m_c->m_subtree_prob + default_prob / 2)
2377 / (node->m_c->m_subtree_prob + default_prob));
2378 test_bb->count = bb->count.apply_probability (prob: p);
2379 bb = emit_cmp_and_jump_insns (bb, op0: index, op1: node->m_c->get_high (),
2380 comparison: GT_EXPR, label_bb: test_bb, prob: p, loc);
2381 default_prob /= 2;
2382
2383 /* Handle the left-hand subtree. */
2384 bb = emit_case_nodes (bb, index, node: node->m_left,
2385 default_prob, index_type, loc);
2386
2387 /* If the left-hand subtree fell through,
2388 don't let it fall into the right-hand subtree. */
2389 if (bb && m_default_bb)
2390 emit_jump (bb, case_bb: m_default_bb);
2391
2392 bb = emit_case_nodes (bb: test_bb, index, node: node->m_right,
2393 default_prob, index_type, loc);
2394 }
2395 }
2396 else if (node->m_left == NULL && node->m_right != NULL)
2397 {
2398 /* 2) the node has only right child. */
2399
2400 /* Here we have a right child but no left so we issue a conditional
2401 branch to default and process the right child.
2402
2403 Omit the conditional branch to default if the right child
2404 does not have any children and is single valued; it would
2405 cost too much space to save so little time. */
2406
2407 if (node->m_right->has_child ()
2408 || !node->m_right->m_c->is_single_value_p ())
2409 {
2410 p = ((default_prob / 2)
2411 / (node->m_c->m_subtree_prob + default_prob));
2412 bb = emit_cmp_and_jump_insns (bb, op0: index, op1: node->m_c->get_low (),
2413 comparison: LT_EXPR, label_bb: m_default_bb, prob: p, loc);
2414 default_prob /= 2;
2415
2416 bb = emit_case_nodes (bb, index, node: node->m_right, default_prob,
2417 index_type, loc);
2418 }
2419 else
2420 {
2421 /* We cannot process node->right normally
2422 since we haven't ruled out the numbers less than
2423 this node's value. So handle node->right explicitly. */
2424 p = (node->m_right->m_c->m_subtree_prob
2425 / (node->m_c->m_subtree_prob + default_prob));
2426 bb = do_jump_if_equal (bb, op0: index, op1: node->m_right->m_c->get_low (),
2427 label_bb: node->m_right->m_c->m_case_bb, prob: p, loc);
2428 }
2429 }
2430 else if (node->m_left != NULL && node->m_right == NULL)
2431 {
2432 /* 3) just one subtree, on the left. Similar case as previous. */
2433
2434 if (node->m_left->has_child ()
2435 || !node->m_left->m_c->is_single_value_p ())
2436 {
2437 p = ((default_prob / 2)
2438 / (node->m_c->m_subtree_prob + default_prob));
2439 bb = emit_cmp_and_jump_insns (bb, op0: index, op1: node->m_c->get_high (),
2440 comparison: GT_EXPR, label_bb: m_default_bb, prob: p, loc);
2441 default_prob /= 2;
2442
2443 bb = emit_case_nodes (bb, index, node: node->m_left, default_prob,
2444 index_type, loc);
2445 }
2446 else
2447 {
2448 /* We cannot process node->left normally
2449 since we haven't ruled out the numbers less than
2450 this node's value. So handle node->left explicitly. */
2451 p = (node->m_left->m_c->m_subtree_prob
2452 / (node->m_c->m_subtree_prob + default_prob));
2453 bb = do_jump_if_equal (bb, op0: index, op1: node->m_left->m_c->get_low (),
2454 label_bb: node->m_left->m_c->m_case_bb, prob: p, loc);
2455 }
2456 }
2457 }
2458 else
2459 {
2460 /* Node is a range. These cases are very similar to those for a single
2461 value, except that we do not start by testing whether this node
2462 is the one to branch to. */
2463 if (node->has_child () || node->m_c->get_type () != SIMPLE_CASE)
2464 {
2465 bool is_bt = node->m_c->get_type () == BIT_TEST;
2466 int parts = is_bt ? 3 : 2;
2467
2468 /* Branch to a label where we will handle it later. */
2469 basic_block test_bb = split_edge (single_succ_edge (bb));
2470 redirect_edge_succ (single_pred_edge (bb: test_bb),
2471 single_succ_edge (bb)->dest);
2472
2473 profile_probability right_prob = profile_probability::never ();
2474 if (node->m_right)
2475 right_prob = node->m_right->m_c->m_subtree_prob;
2476 p = ((right_prob + default_prob / parts)
2477 / (node->m_c->m_subtree_prob + default_prob));
2478 test_bb->count = bb->count.apply_probability (prob: p);
2479
2480 bb = emit_cmp_and_jump_insns (bb, op0: index, op1: node->m_c->get_high (),
2481 comparison: GT_EXPR, label_bb: test_bb, prob: p, loc);
2482
2483 default_prob /= parts;
2484 node->m_c->m_subtree_prob -= right_prob;
2485 if (is_bt)
2486 node->m_c->m_default_prob = default_prob;
2487
2488 /* Value belongs to this node or to the left-hand subtree. */
2489 p = node->m_c->m_prob / (node->m_c->m_subtree_prob + default_prob);
2490 bb = emit_cmp_and_jump_insns (bb, op0: index, op1: node->m_c->get_low (),
2491 comparison: GE_EXPR, label_bb: node->m_c->m_case_bb, prob: p, loc);
2492
2493 /* Handle the left-hand subtree. */
2494 bb = emit_case_nodes (bb, index, node: node->m_left, default_prob,
2495 index_type, loc);
2496
2497 /* If the left-hand subtree fell through,
2498 don't let it fall into the right-hand subtree. */
2499 if (bb && m_default_bb)
2500 emit_jump (bb, case_bb: m_default_bb);
2501
2502 bb = emit_case_nodes (bb: test_bb, index, node: node->m_right, default_prob,
2503 index_type, loc);
2504 }
2505 else
2506 {
2507 /* Node has no children so we check low and high bounds to remove
2508 redundant tests. Only one of the bounds can exist,
2509 since otherwise this node is bounded--a case tested already. */
2510 tree lhs, rhs;
2511 generate_range_test (bb, index, low: node->m_c->get_low (),
2512 high: node->m_c->get_high (), lhs: &lhs, rhs: &rhs);
2513 p = default_prob / (node->m_c->m_subtree_prob + default_prob);
2514
2515 bb = emit_cmp_and_jump_insns (bb, op0: lhs, op1: rhs, comparison: GT_EXPR,
2516 label_bb: m_default_bb, prob: p, loc);
2517
2518 emit_jump (bb, case_bb: node->m_c->m_case_bb);
2519 return NULL;
2520 }
2521 }
2522
2523 return bb;
2524}
2525
2526/* The main function of the pass scans statements for switches and invokes
2527 process_switch on them. */
2528
2529namespace {
2530
2531const pass_data pass_data_convert_switch =
2532{
2533 .type: GIMPLE_PASS, /* type */
2534 .name: "switchconv", /* name */
2535 .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */
2536 .tv_id: TV_TREE_SWITCH_CONVERSION, /* tv_id */
2537 .properties_required: ( PROP_cfg | PROP_ssa ), /* properties_required */
2538 .properties_provided: 0, /* properties_provided */
2539 .properties_destroyed: 0, /* properties_destroyed */
2540 .todo_flags_start: 0, /* todo_flags_start */
2541 TODO_update_ssa, /* todo_flags_finish */
2542};
2543
2544class pass_convert_switch : public gimple_opt_pass
2545{
2546public:
2547 pass_convert_switch (gcc::context *ctxt)
2548 : gimple_opt_pass (pass_data_convert_switch, ctxt)
2549 {}
2550
2551 /* opt_pass methods: */
2552 bool gate (function *) final override
2553 {
2554 return flag_tree_switch_conversion != 0;
2555 }
2556 unsigned int execute (function *) final override;
2557
2558}; // class pass_convert_switch
2559
2560unsigned int
2561pass_convert_switch::execute (function *fun)
2562{
2563 basic_block bb;
2564 bool cfg_altered = false;
2565
2566 FOR_EACH_BB_FN (bb, fun)
2567 {
2568 if (gswitch *stmt = safe_dyn_cast <gswitch *> (p: *gsi_last_bb (bb)))
2569 {
2570 if (dump_file)
2571 {
2572 expanded_location loc = expand_location (gimple_location (g: stmt));
2573
2574 fprintf (stream: dump_file, format: "beginning to process the following "
2575 "SWITCH statement (%s:%d) : ------- \n",
2576 loc.file, loc.line);
2577 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2578 putc (c: '\n', stream: dump_file);
2579 }
2580
2581 switch_conversion sconv;
2582 sconv.expand (swtch: stmt);
2583 cfg_altered |= sconv.m_cfg_altered;
2584 if (!sconv.m_reason)
2585 {
2586 if (dump_file)
2587 {
2588 fputs (s: "Switch converted\n", stream: dump_file);
2589 fputs (s: "--------------------------------\n", stream: dump_file);
2590 }
2591
2592 /* Make no effort to update the post-dominator tree.
2593 It is actually not that hard for the transformations
2594 we have performed, but it is not supported
2595 by iterate_fix_dominators. */
2596 free_dominance_info (CDI_POST_DOMINATORS);
2597 }
2598 else
2599 {
2600 if (dump_file)
2601 {
2602 fputs (s: "Bailing out - ", stream: dump_file);
2603 fputs (s: sconv.m_reason, stream: dump_file);
2604 fputs (s: "\n--------------------------------\n", stream: dump_file);
2605 }
2606 }
2607 }
2608 }
2609
2610 return cfg_altered ? TODO_cleanup_cfg : 0;;
2611}
2612
2613} // anon namespace
2614
2615gimple_opt_pass *
2616make_pass_convert_switch (gcc::context *ctxt)
2617{
2618 return new pass_convert_switch (ctxt);
2619}
2620
2621/* The main function of the pass scans statements for switches and invokes
2622 process_switch on them. */
2623
2624namespace {
2625
2626template <bool O0> class pass_lower_switch: public gimple_opt_pass
2627{
2628public:
2629 pass_lower_switch (gcc::context *ctxt) : gimple_opt_pass (data, ctxt) {}
2630
2631 static const pass_data data;
2632 opt_pass *
2633 clone () final override
2634 {
2635 return new pass_lower_switch<O0> (m_ctxt);
2636 }
2637
2638 bool
2639 gate (function *) final override
2640 {
2641 return !O0 || !optimize;
2642 }
2643
2644 unsigned int execute (function *fun) final override;
2645}; // class pass_lower_switch
2646
2647template <bool O0>
2648const pass_data pass_lower_switch<O0>::data = {
2649 .type: .type: .type: GIMPLE_PASS, /* type */
2650 .name: .name: .name: O0 ? "switchlower_O0" : "switchlower", /* name */
2651 .optinfo_flags: .optinfo_flags: .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */
2652 .tv_id: .tv_id: .tv_id: TV_TREE_SWITCH_LOWERING, /* tv_id */
2653 .properties_required: .properties_required: .properties_required: ( PROP_cfg | PROP_ssa ), /* properties_required */
2654 .properties_provided: .properties_provided: .properties_provided: 0, /* properties_provided */
2655 .properties_destroyed: .properties_destroyed: .properties_destroyed: 0, /* properties_destroyed */
2656 .todo_flags_start: .todo_flags_start: .todo_flags_start: 0, /* todo_flags_start */
2657 TODO_update_ssa | TODO_cleanup_cfg, /* todo_flags_finish */
2658};
2659
2660template <bool O0>
2661unsigned int
2662pass_lower_switch<O0>::execute (function *fun)
2663{
2664 basic_block bb;
2665 bool expanded = false;
2666
2667 auto_vec<gimple *> switch_statements;
2668 switch_statements.create (nelems: 1);
2669
2670 FOR_EACH_BB_FN (bb, fun)
2671 {
2672 if (gswitch *swtch = safe_dyn_cast <gswitch *> (p: *gsi_last_bb (bb)))
2673 {
2674 if (!O0)
2675 group_case_labels_stmt (swtch);
2676 switch_statements.safe_push (obj: swtch);
2677 }
2678 }
2679
2680 for (unsigned i = 0; i < switch_statements.length (); i++)
2681 {
2682 gimple *stmt = switch_statements[i];
2683 if (dump_file)
2684 {
2685 expanded_location loc = expand_location (gimple_location (g: stmt));
2686
2687 fprintf (stream: dump_file, format: "beginning to process the following "
2688 "SWITCH statement (%s:%d) : ------- \n",
2689 loc.file, loc.line);
2690 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2691 putc (c: '\n', stream: dump_file);
2692 }
2693
2694 gswitch *swtch = dyn_cast<gswitch *> (p: stmt);
2695 if (swtch)
2696 {
2697 switch_decision_tree dt (swtch);
2698 expanded |= dt.analyze_switch_statement ();
2699 }
2700 }
2701
2702 if (expanded)
2703 {
2704 free_dominance_info (CDI_DOMINATORS);
2705 free_dominance_info (CDI_POST_DOMINATORS);
2706 mark_virtual_operands_for_renaming (cfun);
2707 }
2708
2709 return 0;
2710}
2711
2712} // anon namespace
2713
2714gimple_opt_pass *
2715make_pass_lower_switch_O0 (gcc::context *ctxt)
2716{
2717 return new pass_lower_switch<true> (ctxt);
2718}
2719gimple_opt_pass *
2720make_pass_lower_switch (gcc::context *ctxt)
2721{
2722 return new pass_lower_switch<false> (ctxt);
2723}
2724

source code of gcc/tree-switch-conversion.cc