1/* Support routines for Value Range Propagation (VRP).
2 Copyright (C) 2005-2024 Free Software Foundation, Inc.
3 Contributed by Diego Novillo <dnovillo@redhat.com>.
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify
8it under the terms of the GNU General Public License as published by
9the Free Software Foundation; either version 3, or (at your option)
10any later version.
11
12GCC is distributed in the hope that it will be useful,
13but WITHOUT ANY WARRANTY; without even the implied warranty of
14MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15GNU General Public License for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
24#include "basic-block.h"
25#include "bitmap.h"
26#include "sbitmap.h"
27#include "options.h"
28#include "dominance.h"
29#include "function.h"
30#include "cfg.h"
31#include "tree.h"
32#include "gimple.h"
33#include "tree-pass.h"
34#include "ssa.h"
35#include "gimple-pretty-print.h"
36#include "fold-const.h"
37#include "cfganal.h"
38#include "gimple-iterator.h"
39#include "tree-cfg.h"
40#include "tree-ssa-loop-manip.h"
41#include "tree-ssa-loop-niter.h"
42#include "tree-into-ssa.h"
43#include "cfgloop.h"
44#include "tree-scalar-evolution.h"
45#include "tree-ssa-propagate.h"
46#include "domwalk.h"
47#include "vr-values.h"
48#include "gimple-array-bounds.h"
49#include "gimple-range.h"
50#include "gimple-range-path.h"
51#include "value-pointer-equiv.h"
52#include "gimple-fold.h"
53#include "tree-dfa.h"
54#include "tree-ssa-dce.h"
55#include "alloc-pool.h"
56#include "cgraph.h"
57#include "symbol-summary.h"
58#include "ipa-utils.h"
59#include "sreal.h"
60#include "ipa-cp.h"
61#include "ipa-prop.h"
62#include "attribs.h"
63
64// This class is utilized by VRP and ranger to remove __builtin_unreachable
65// calls, and reflect any resulting global ranges.
66//
67// maybe_register() is called on condition statements , and if that
68// matches the pattern of one branch being a builtin_unreachable, either check
69// for early removal or register the resulting executable edge in a list.
70//
71// During early/non-final processing, we check to see if ALL exports from the
72// block can be safely updated with a new global value. If they can, then
73// we rewrite the condition and update those values immediately. Otherwise
74// the unreachable condition is left in the IL until the final pass.
75//
76// During final processing, after all blocks have been registered,
77// remove_and_update_globals() will
78// - check all exports from registered blocks
79// - ensure the cache entry of each export is set with the appropriate range
80// - rewrite the conditions to take the executable edge
81// - perform DCE on any feeding instructions to those rewritten conditions
82//
83// Then each of the immediate use chain of each export is walked, and a new
84// global range created by unioning the ranges at all remaining use locations.
85
86class remove_unreachable {
87public:
88 remove_unreachable (gimple_ranger &r, bool all) : m_ranger (r), final_p (all)
89 { m_list.create (nelems: 30); }
90 ~remove_unreachable () { m_list.release (); }
91 void handle_early (gimple *s, edge e);
92 void maybe_register (gimple *s);
93 bool remove_and_update_globals ();
94 vec<std::pair<int, int> > m_list;
95 gimple_ranger &m_ranger;
96 bool final_p;
97};
98
99// Check if block BB has a __builtin_unreachable () call on one arm, and
100// register the executable edge if so.
101
102void
103remove_unreachable::maybe_register (gimple *s)
104{
105 gcc_checking_assert (gimple_code (s) == GIMPLE_COND);
106 basic_block bb = gimple_bb (g: s);
107
108 edge e0 = EDGE_SUCC (bb, 0);
109 basic_block bb0 = e0->dest;
110 bool un0 = EDGE_COUNT (bb0->succs) == 0
111 && gimple_seq_unreachable_p (bb_seq (bb: bb0));
112 edge e1 = EDGE_SUCC (bb, 1);
113 basic_block bb1 = e1->dest;
114 bool un1 = EDGE_COUNT (bb1->succs) == 0
115 && gimple_seq_unreachable_p (bb_seq (bb: bb1));
116
117 // If the 2 blocks are not different, ignore.
118 if (un0 == un1)
119 return;
120
121 // Constant expressions are ignored.
122 if (TREE_CODE (gimple_cond_lhs (s)) != SSA_NAME
123 && TREE_CODE (gimple_cond_rhs (s)) != SSA_NAME)
124 return;
125
126 edge e = un0 ? e1 : e0;
127
128 if (!final_p)
129 handle_early (s, e);
130 else
131 m_list.safe_push (obj: std::make_pair (x&: e->src->index, y&: e->dest->index));
132}
133
134// Return true if all uses of NAME are dominated by block BB. 1 use
135// is allowed in block BB, This is one we hope to remove.
136// ie
137// _2 = _1 & 7;
138// if (_2 != 0)
139// goto <bb 3>; [0.00%]
140// Any additional use of _1 or _2 in this block invalidates early replacement.
141
142static bool
143fully_replaceable (tree name, basic_block bb)
144{
145 use_operand_p use_p;
146 imm_use_iterator iter;
147 bool saw_in_bb = false;
148
149 // If a name loads from memory, we may lose information used in
150 // commoning opportunities later. See tree-ssa/ssa-pre-34.c.
151 gimple *def_stmt = SSA_NAME_DEF_STMT (name);
152 if (gimple_vuse (g: def_stmt))
153 return false;
154
155 FOR_EACH_IMM_USE_FAST (use_p, iter, name)
156 {
157 gimple *use_stmt = USE_STMT (use_p);
158 // Ignore debug stmts and the branch.
159 if (is_gimple_debug (gs: use_stmt))
160 continue;
161 basic_block use_bb = gimple_bb (g: use_stmt);
162 // Only one use in the block allowed to avoid complicated cases.
163 if (use_bb == bb)
164 {
165 if (saw_in_bb)
166 return false;
167 else
168 saw_in_bb = true;
169 }
170 else if (!dominated_by_p (CDI_DOMINATORS, use_bb, bb))
171 return false;
172 }
173 return true;
174}
175
176// This routine is called to check builtin_unreachable calls during any
177// time before final removal. The only way we can be sure it does not
178// provide any additional information is to expect that we can update the
179// global values of all exports from a block. This means the branch
180// to the unreachable call must dominate all uses of those ssa-names, with
181// the exception that there can be a single use in the block containing
182// the branch. IF the name used in the branch is defined in the block, it may
183// contain the name of something else that will be an export. And likewise
184// that may also use another name that is an export etc.
185//
186// As long as there is only a single use, we can be sure that there are no other
187// side effects (like being passed to a call, or stored to a global, etc.
188// This means we will miss cases where there are 2 or more uses that have
189// no interveneing statements that may had side effects, but it catches most
190// of the caes we care about, and prevents expensive in depth analysis.
191//
192// Ranger will still reflect the proper ranges at other places in these missed
193// cases, we simply will not remove/set globals early.
194
195void
196remove_unreachable::handle_early (gimple *s, edge e)
197{
198 bool lhs_p = TREE_CODE (gimple_cond_lhs (s)) == SSA_NAME;
199 bool rhs_p = TREE_CODE (gimple_cond_rhs (s)) == SSA_NAME;
200 // Do not remove __builtin_unreachable if it confers a relation, or
201 // that relation may be lost in subsequent passes.
202 if (lhs_p && rhs_p)
203 return;
204 // Do not remove addresses early. ie if (x == &y)
205 if (lhs_p && TREE_CODE (gimple_cond_rhs (s)) == ADDR_EXPR)
206 return;
207
208 gcc_checking_assert (gimple_outgoing_range_stmt_p (e->src) == s);
209 gcc_checking_assert (!final_p);
210
211 // Check if every export use is dominated by this branch.
212 tree name;
213 FOR_EACH_GORI_EXPORT_NAME (m_ranger.gori (), e->src, name)
214 {
215 if (!fully_replaceable (name, bb: e->src))
216 return;
217 }
218
219 // Set the global value for each.
220 FOR_EACH_GORI_EXPORT_NAME (m_ranger.gori (), e->src, name)
221 {
222 Value_Range r (TREE_TYPE (name));
223 m_ranger.range_on_entry (r, bb: e->dest, name);
224 // Nothing at this late stage we can do if the write fails.
225 if (!set_range_info (name, r))
226 continue;
227 if (dump_file)
228 {
229 fprintf (stream: dump_file, format: "Global Exported (via early unreachable): ");
230 print_generic_expr (dump_file, name, TDF_SLIM);
231 fprintf (stream: dump_file, format: " = ");
232 gimple_range_global (v&: r, name);
233 r.dump (dump_file);
234 fputc (c: '\n', stream: dump_file);
235 }
236 }
237
238 tree ssa = lhs_p ? gimple_cond_lhs (gs: s) : gimple_cond_rhs (gs: s);
239
240 // Rewrite the condition.
241 if (e->flags & EDGE_TRUE_VALUE)
242 gimple_cond_make_true (gs: as_a<gcond *> (p: s));
243 else
244 gimple_cond_make_false (gs: as_a<gcond *> (p: s));
245 update_stmt (s);
246
247 // If the name on S is defined in this block, see if there is DCE work to do.
248 if (gimple_bb (SSA_NAME_DEF_STMT (ssa)) == e->src)
249 {
250 auto_bitmap dce;
251 bitmap_set_bit (dce, SSA_NAME_VERSION (ssa));
252 simple_dce_from_worklist (dce);
253 }
254}
255
256
257// Process the edges in the list, change the conditions and removing any
258// dead code feeding those conditions. Calculate the range of any
259// names that may have been exported from those blocks, and determine if
260// there is any updates to their global ranges..
261// Return true if any builtin_unreachables/globals eliminated/updated.
262
263bool
264remove_unreachable::remove_and_update_globals ()
265{
266 if (m_list.length () == 0)
267 return false;
268
269 // Ensure the cache in SCEV has been cleared before processing
270 // globals to be removed.
271 scev_reset ();
272
273 bool change = false;
274 tree name;
275 unsigned i;
276 bitmap_iterator bi;
277 auto_bitmap all_exports;
278 for (i = 0; i < m_list.length (); i++)
279 {
280 auto eb = m_list[i];
281 basic_block src = BASIC_BLOCK_FOR_FN (cfun, eb.first);
282 basic_block dest = BASIC_BLOCK_FOR_FN (cfun, eb.second);
283 if (!src || !dest)
284 continue;
285 edge e = find_edge (src, dest);
286 gimple *s = gimple_outgoing_range_stmt_p (bb: e->src);
287 gcc_checking_assert (gimple_code (s) == GIMPLE_COND);
288
289 bool dominate_exit_p = true;
290 FOR_EACH_GORI_EXPORT_NAME (m_ranger.gori (), e->src, name)
291 {
292 // Ensure the cache is set for NAME in the succ block.
293 Value_Range r(TREE_TYPE (name));
294 Value_Range ex(TREE_TYPE (name));
295 m_ranger.range_on_entry (r, bb: e->dest, name);
296 m_ranger.range_on_entry (r&: ex, EXIT_BLOCK_PTR_FOR_FN (cfun), name);
297 // If the range produced by this __builtin_unreachacble expression
298 // is not fully reflected in the range at exit, then it does not
299 // dominate the exit of the function.
300 if (ex.intersect (r))
301 dominate_exit_p = false;
302 }
303
304 // If the exit is dominated, add to the export list. Otherwise if this
305 // isn't the final VRP pass, leave the call in the IL.
306 if (dominate_exit_p)
307 bitmap_ior_into (all_exports, m_ranger.gori ().exports (bb: e->src));
308 else if (!final_p)
309 continue;
310
311 change = true;
312 // Rewrite the condition.
313 if (e->flags & EDGE_TRUE_VALUE)
314 gimple_cond_make_true (gs: as_a<gcond *> (p: s));
315 else
316 gimple_cond_make_false (gs: as_a<gcond *> (p: s));
317 update_stmt (s);
318 }
319
320 if (bitmap_empty_p (map: all_exports))
321 return false;
322 // Invoke DCE on all exported names to eliminate dead feeding defs.
323 auto_bitmap dce;
324 bitmap_copy (dce, all_exports);
325 // Don't attempt to DCE parameters.
326 EXECUTE_IF_SET_IN_BITMAP (all_exports, 0, i, bi)
327 if (!ssa_name (i) || SSA_NAME_IS_DEFAULT_DEF (ssa_name (i)))
328 bitmap_clear_bit (dce, i);
329 simple_dce_from_worklist (dce);
330
331 // Loop over all uses of each name and find maximal range. This is the
332 // new global range.
333 use_operand_p use_p;
334 imm_use_iterator iter;
335 EXECUTE_IF_SET_IN_BITMAP (all_exports, 0, i, bi)
336 {
337 name = ssa_name (i);
338 if (!name || SSA_NAME_IN_FREE_LIST (name))
339 continue;
340 Value_Range r (TREE_TYPE (name));
341 Value_Range exp_range (TREE_TYPE (name));
342 r.set_undefined ();
343 FOR_EACH_IMM_USE_FAST (use_p, iter, name)
344 {
345 gimple *use_stmt = USE_STMT (use_p);
346 if (is_gimple_debug (gs: use_stmt))
347 continue;
348 if (!m_ranger.range_of_expr (r&: exp_range, name, use_stmt))
349 exp_range.set_varying (TREE_TYPE (name));
350 r.union_ (r: exp_range);
351 if (r.varying_p ())
352 break;
353 }
354 // Include the on-exit range to ensure non-dominated unreachables
355 // don't incorrectly impact the global range.
356 m_ranger.range_on_entry (r&: exp_range, EXIT_BLOCK_PTR_FOR_FN (cfun), name);
357 r.union_ (r: exp_range);
358 if (r.varying_p () || r.undefined_p ())
359 continue;
360 if (!set_range_info (name, r))
361 continue;
362 change = true;
363 if (dump_file)
364 {
365 fprintf (stream: dump_file, format: "Global Exported (via unreachable): ");
366 print_generic_expr (dump_file, name, TDF_SLIM);
367 fprintf (stream: dump_file, format: " = ");
368 gimple_range_global (v&: r, name);
369 r.dump (dump_file);
370 fputc (c: '\n', stream: dump_file);
371 }
372 }
373 return change;
374}
375
376/* VR_TYPE describes a range with minimum value *MIN and maximum
377 value *MAX. Restrict the range to the set of values that have
378 no bits set outside NONZERO_BITS. Update *MIN and *MAX and
379 return the new range type.
380
381 SGN gives the sign of the values described by the range. */
382
383enum value_range_kind
384intersect_range_with_nonzero_bits (enum value_range_kind vr_type,
385 wide_int *min, wide_int *max,
386 const wide_int &nonzero_bits,
387 signop sgn)
388{
389 if (vr_type == VR_ANTI_RANGE)
390 {
391 /* The VR_ANTI_RANGE is equivalent to the union of the ranges
392 A: [-INF, *MIN) and B: (*MAX, +INF]. First use NONZERO_BITS
393 to create an inclusive upper bound for A and an inclusive lower
394 bound for B. */
395 wide_int a_max = wi::round_down_for_mask (*min - 1, nonzero_bits);
396 wide_int b_min = wi::round_up_for_mask (*max + 1, nonzero_bits);
397
398 /* If the calculation of A_MAX wrapped, A is effectively empty
399 and A_MAX is the highest value that satisfies NONZERO_BITS.
400 Likewise if the calculation of B_MIN wrapped, B is effectively
401 empty and B_MIN is the lowest value that satisfies NONZERO_BITS. */
402 bool a_empty = wi::ge_p (x: a_max, y: *min, sgn);
403 bool b_empty = wi::le_p (x: b_min, y: *max, sgn);
404
405 /* If both A and B are empty, there are no valid values. */
406 if (a_empty && b_empty)
407 return VR_UNDEFINED;
408
409 /* If exactly one of A or B is empty, return a VR_RANGE for the
410 other one. */
411 if (a_empty || b_empty)
412 {
413 *min = b_min;
414 *max = a_max;
415 gcc_checking_assert (wi::le_p (*min, *max, sgn));
416 return VR_RANGE;
417 }
418
419 /* Update the VR_ANTI_RANGE bounds. */
420 *min = a_max + 1;
421 *max = b_min - 1;
422 gcc_checking_assert (wi::le_p (*min, *max, sgn));
423
424 /* Now check whether the excluded range includes any values that
425 satisfy NONZERO_BITS. If not, switch to a full VR_RANGE. */
426 if (wi::round_up_for_mask (*min, nonzero_bits) == b_min)
427 {
428 unsigned int precision = min->get_precision ();
429 *min = wi::min_value (precision, sgn);
430 *max = wi::max_value (precision, sgn);
431 vr_type = VR_RANGE;
432 }
433 }
434 if (vr_type == VR_RANGE || vr_type == VR_VARYING)
435 {
436 *max = wi::round_down_for_mask (*max, nonzero_bits);
437
438 /* Check that the range contains at least one valid value. */
439 if (wi::gt_p (x: *min, y: *max, sgn))
440 return VR_UNDEFINED;
441
442 *min = wi::round_up_for_mask (*min, nonzero_bits);
443 gcc_checking_assert (wi::le_p (*min, *max, sgn));
444 }
445 return vr_type;
446}
447
448/* Return the single symbol (an SSA_NAME) contained in T if any, or NULL_TREE
449 otherwise. We only handle additive operations and set NEG to true if the
450 symbol is negated and INV to the invariant part, if any. */
451
452static tree
453get_single_symbol (tree t, bool *neg, tree *inv)
454{
455 bool neg_;
456 tree inv_;
457
458 *inv = NULL_TREE;
459 *neg = false;
460
461 if (TREE_CODE (t) == PLUS_EXPR
462 || TREE_CODE (t) == POINTER_PLUS_EXPR
463 || TREE_CODE (t) == MINUS_EXPR)
464 {
465 if (is_gimple_min_invariant (TREE_OPERAND (t, 0)))
466 {
467 neg_ = (TREE_CODE (t) == MINUS_EXPR);
468 inv_ = TREE_OPERAND (t, 0);
469 t = TREE_OPERAND (t, 1);
470 }
471 else if (is_gimple_min_invariant (TREE_OPERAND (t, 1)))
472 {
473 neg_ = false;
474 inv_ = TREE_OPERAND (t, 1);
475 t = TREE_OPERAND (t, 0);
476 }
477 else
478 return NULL_TREE;
479 }
480 else
481 {
482 neg_ = false;
483 inv_ = NULL_TREE;
484 }
485
486 if (TREE_CODE (t) == NEGATE_EXPR)
487 {
488 t = TREE_OPERAND (t, 0);
489 neg_ = !neg_;
490 }
491
492 if (TREE_CODE (t) != SSA_NAME)
493 return NULL_TREE;
494
495 if (inv_ && TREE_OVERFLOW_P (inv_))
496 inv_ = drop_tree_overflow (inv_);
497
498 *neg = neg_;
499 *inv = inv_;
500 return t;
501}
502
503/* Compare two values VAL1 and VAL2. Return
504
505 -2 if VAL1 and VAL2 cannot be compared at compile-time,
506 -1 if VAL1 < VAL2,
507 0 if VAL1 == VAL2,
508 +1 if VAL1 > VAL2, and
509 +2 if VAL1 != VAL2
510
511 This is similar to tree_int_cst_compare but supports pointer values
512 and values that cannot be compared at compile time.
513
514 If STRICT_OVERFLOW_P is not NULL, then set *STRICT_OVERFLOW_P to
515 true if the return value is only valid if we assume that signed
516 overflow is undefined. */
517
518int
519compare_values_warnv (tree val1, tree val2, bool *strict_overflow_p)
520{
521 if (val1 == val2)
522 return 0;
523
524 /* Below we rely on the fact that VAL1 and VAL2 are both pointers or
525 both integers. */
526 gcc_assert (POINTER_TYPE_P (TREE_TYPE (val1))
527 == POINTER_TYPE_P (TREE_TYPE (val2)));
528
529 /* Convert the two values into the same type. This is needed because
530 sizetype causes sign extension even for unsigned types. */
531 if (!useless_type_conversion_p (TREE_TYPE (val1), TREE_TYPE (val2)))
532 val2 = fold_convert (TREE_TYPE (val1), val2);
533
534 const bool overflow_undefined
535 = INTEGRAL_TYPE_P (TREE_TYPE (val1))
536 && TYPE_OVERFLOW_UNDEFINED (TREE_TYPE (val1));
537 tree inv1, inv2;
538 bool neg1, neg2;
539 tree sym1 = get_single_symbol (t: val1, neg: &neg1, inv: &inv1);
540 tree sym2 = get_single_symbol (t: val2, neg: &neg2, inv: &inv2);
541
542 /* If VAL1 and VAL2 are of the form '[-]NAME [+ CST]', return -1 or +1
543 accordingly. If VAL1 and VAL2 don't use the same name, return -2. */
544 if (sym1 && sym2)
545 {
546 /* Both values must use the same name with the same sign. */
547 if (sym1 != sym2 || neg1 != neg2)
548 return -2;
549
550 /* [-]NAME + CST == [-]NAME + CST. */
551 if (inv1 == inv2)
552 return 0;
553
554 /* If overflow is defined we cannot simplify more. */
555 if (!overflow_undefined)
556 return -2;
557
558 if (strict_overflow_p != NULL
559 /* Symbolic range building sets the no-warning bit to declare
560 that overflow doesn't happen. */
561 && (!inv1 || !warning_suppressed_p (val1, OPT_Woverflow))
562 && (!inv2 || !warning_suppressed_p (val2, OPT_Woverflow)))
563 *strict_overflow_p = true;
564
565 if (!inv1)
566 inv1 = build_int_cst (TREE_TYPE (val1), 0);
567 if (!inv2)
568 inv2 = build_int_cst (TREE_TYPE (val2), 0);
569
570 return wi::cmp (x: wi::to_wide (t: inv1), y: wi::to_wide (t: inv2),
571 TYPE_SIGN (TREE_TYPE (val1)));
572 }
573
574 const bool cst1 = is_gimple_min_invariant (val1);
575 const bool cst2 = is_gimple_min_invariant (val2);
576
577 /* If one is of the form '[-]NAME + CST' and the other is constant, then
578 it might be possible to say something depending on the constants. */
579 if ((sym1 && inv1 && cst2) || (sym2 && inv2 && cst1))
580 {
581 if (!overflow_undefined)
582 return -2;
583
584 if (strict_overflow_p != NULL
585 /* Symbolic range building sets the no-warning bit to declare
586 that overflow doesn't happen. */
587 && (!sym1 || !warning_suppressed_p (val1, OPT_Woverflow))
588 && (!sym2 || !warning_suppressed_p (val2, OPT_Woverflow)))
589 *strict_overflow_p = true;
590
591 const signop sgn = TYPE_SIGN (TREE_TYPE (val1));
592 tree cst = cst1 ? val1 : val2;
593 tree inv = cst1 ? inv2 : inv1;
594
595 /* Compute the difference between the constants. If it overflows or
596 underflows, this means that we can trivially compare the NAME with
597 it and, consequently, the two values with each other. */
598 wide_int diff = wi::to_wide (t: cst) - wi::to_wide (t: inv);
599 if (wi::cmp (x: 0, y: wi::to_wide (t: inv), sgn)
600 != wi::cmp (x: diff, y: wi::to_wide (t: cst), sgn))
601 {
602 const int res = wi::cmp (x: wi::to_wide (t: cst), y: wi::to_wide (t: inv), sgn);
603 return cst1 ? res : -res;
604 }
605
606 return -2;
607 }
608
609 /* We cannot say anything more for non-constants. */
610 if (!cst1 || !cst2)
611 return -2;
612
613 if (!POINTER_TYPE_P (TREE_TYPE (val1)))
614 {
615 /* We cannot compare overflowed values. */
616 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
617 return -2;
618
619 if (TREE_CODE (val1) == INTEGER_CST
620 && TREE_CODE (val2) == INTEGER_CST)
621 return tree_int_cst_compare (t1: val1, t2: val2);
622
623 if (poly_int_tree_p (t: val1) && poly_int_tree_p (t: val2))
624 {
625 if (known_eq (wi::to_poly_widest (val1),
626 wi::to_poly_widest (val2)))
627 return 0;
628 if (known_lt (wi::to_poly_widest (val1),
629 wi::to_poly_widest (val2)))
630 return -1;
631 if (known_gt (wi::to_poly_widest (val1),
632 wi::to_poly_widest (val2)))
633 return 1;
634 }
635
636 return -2;
637 }
638 else
639 {
640 if (TREE_CODE (val1) == INTEGER_CST && TREE_CODE (val2) == INTEGER_CST)
641 {
642 /* We cannot compare overflowed values. */
643 if (TREE_OVERFLOW (val1) || TREE_OVERFLOW (val2))
644 return -2;
645
646 return tree_int_cst_compare (t1: val1, t2: val2);
647 }
648
649 /* First see if VAL1 and VAL2 are not the same. */
650 if (operand_equal_p (val1, val2, flags: 0))
651 return 0;
652
653 fold_defer_overflow_warnings ();
654
655 /* If VAL1 is a lower address than VAL2, return -1. */
656 tree t = fold_binary_to_constant (LT_EXPR, boolean_type_node, val1, val2);
657 if (t && integer_onep (t))
658 {
659 fold_undefer_and_ignore_overflow_warnings ();
660 return -1;
661 }
662
663 /* If VAL1 is a higher address than VAL2, return +1. */
664 t = fold_binary_to_constant (LT_EXPR, boolean_type_node, val2, val1);
665 if (t && integer_onep (t))
666 {
667 fold_undefer_and_ignore_overflow_warnings ();
668 return 1;
669 }
670
671 /* If VAL1 is different than VAL2, return +2. */
672 t = fold_binary_to_constant (NE_EXPR, boolean_type_node, val1, val2);
673 fold_undefer_and_ignore_overflow_warnings ();
674 if (t && integer_onep (t))
675 return 2;
676
677 return -2;
678 }
679}
680
681/* Compare values like compare_values_warnv. */
682
683int
684compare_values (tree val1, tree val2)
685{
686 bool sop;
687 return compare_values_warnv (val1, val2, strict_overflow_p: &sop);
688}
689
690/* Helper for overflow_comparison_p
691
692 OP0 CODE OP1 is a comparison. Examine the comparison and potentially
693 OP1's defining statement to see if it ultimately has the form
694 OP0 CODE (OP0 PLUS INTEGER_CST)
695
696 If so, return TRUE indicating this is an overflow test and store into
697 *NEW_CST an updated constant that can be used in a narrowed range test.
698
699 REVERSED indicates if the comparison was originally:
700
701 OP1 CODE' OP0.
702
703 This affects how we build the updated constant. */
704
705static bool
706overflow_comparison_p_1 (enum tree_code code, tree op0, tree op1,
707 bool reversed, tree *new_cst)
708{
709 /* See if this is a relational operation between two SSA_NAMES with
710 unsigned, overflow wrapping values. If so, check it more deeply. */
711 if ((code == LT_EXPR || code == LE_EXPR
712 || code == GE_EXPR || code == GT_EXPR)
713 && TREE_CODE (op0) == SSA_NAME
714 && TREE_CODE (op1) == SSA_NAME
715 && INTEGRAL_TYPE_P (TREE_TYPE (op0))
716 && TYPE_UNSIGNED (TREE_TYPE (op0))
717 && TYPE_OVERFLOW_WRAPS (TREE_TYPE (op0)))
718 {
719 gimple *op1_def = SSA_NAME_DEF_STMT (op1);
720
721 /* Now look at the defining statement of OP1 to see if it adds
722 or subtracts a nonzero constant from another operand. */
723 if (op1_def
724 && is_gimple_assign (gs: op1_def)
725 && gimple_assign_rhs_code (gs: op1_def) == PLUS_EXPR
726 && TREE_CODE (gimple_assign_rhs2 (op1_def)) == INTEGER_CST
727 && !integer_zerop (gimple_assign_rhs2 (gs: op1_def)))
728 {
729 tree target = gimple_assign_rhs1 (gs: op1_def);
730
731 /* If we did not find our target SSA_NAME, then this is not
732 an overflow test. */
733 if (op0 != target)
734 return false;
735
736 tree type = TREE_TYPE (op0);
737 wide_int max = wi::max_value (TYPE_PRECISION (type), UNSIGNED);
738 tree inc = gimple_assign_rhs2 (gs: op1_def);
739 if (reversed)
740 *new_cst = wide_int_to_tree (type, cst: max + wi::to_wide (t: inc));
741 else
742 *new_cst = wide_int_to_tree (type, cst: max - wi::to_wide (t: inc));
743 return true;
744 }
745 }
746 return false;
747}
748
749/* OP0 CODE OP1 is a comparison. Examine the comparison and potentially
750 OP1's defining statement to see if it ultimately has the form
751 OP0 CODE (OP0 PLUS INTEGER_CST)
752
753 If so, return TRUE indicating this is an overflow test and store into
754 *NEW_CST an updated constant that can be used in a narrowed range test.
755
756 These statements are left as-is in the IL to facilitate discovery of
757 {ADD,SUB}_OVERFLOW sequences later in the optimizer pipeline. But
758 the alternate range representation is often useful within VRP. */
759
760bool
761overflow_comparison_p (tree_code code, tree name, tree val, tree *new_cst)
762{
763 if (overflow_comparison_p_1 (code, op0: name, op1: val, reversed: false, new_cst))
764 return true;
765 return overflow_comparison_p_1 (code: swap_tree_comparison (code), op0: val, op1: name,
766 reversed: true, new_cst);
767}
768
769/* Searches the case label vector VEC for the index *IDX of the CASE_LABEL
770 that includes the value VAL. The search is restricted to the range
771 [START_IDX, n - 1] where n is the size of VEC.
772
773 If there is a CASE_LABEL for VAL, its index is placed in IDX and true is
774 returned.
775
776 If there is no CASE_LABEL for VAL and there is one that is larger than VAL,
777 it is placed in IDX and false is returned.
778
779 If VAL is larger than any CASE_LABEL, n is placed on IDX and false is
780 returned. */
781
782bool
783find_case_label_index (gswitch *stmt, size_t start_idx, tree val, size_t *idx)
784{
785 size_t n = gimple_switch_num_labels (gs: stmt);
786 size_t low, high;
787
788 /* Find case label for minimum of the value range or the next one.
789 At each iteration we are searching in [low, high - 1]. */
790
791 for (low = start_idx, high = n; high != low; )
792 {
793 tree t;
794 int cmp;
795 /* Note that i != high, so we never ask for n. */
796 size_t i = (high + low) / 2;
797 t = gimple_switch_label (gs: stmt, index: i);
798
799 /* Cache the result of comparing CASE_LOW and val. */
800 cmp = tree_int_cst_compare (CASE_LOW (t), t2: val);
801
802 if (cmp == 0)
803 {
804 /* Ranges cannot be empty. */
805 *idx = i;
806 return true;
807 }
808 else if (cmp > 0)
809 high = i;
810 else
811 {
812 low = i + 1;
813 if (CASE_HIGH (t) != NULL
814 && tree_int_cst_compare (CASE_HIGH (t), t2: val) >= 0)
815 {
816 *idx = i;
817 return true;
818 }
819 }
820 }
821
822 *idx = high;
823 return false;
824}
825
826/* Searches the case label vector VEC for the range of CASE_LABELs that is used
827 for values between MIN and MAX. The first index is placed in MIN_IDX. The
828 last index is placed in MAX_IDX. If the range of CASE_LABELs is empty
829 then MAX_IDX < MIN_IDX.
830 Returns true if the default label is not needed. */
831
832bool
833find_case_label_range (gswitch *stmt, tree min, tree max, size_t *min_idx,
834 size_t *max_idx)
835{
836 size_t i, j;
837 bool min_take_default = !find_case_label_index (stmt, start_idx: 1, val: min, idx: &i);
838 bool max_take_default = !find_case_label_index (stmt, start_idx: i, val: max, idx: &j);
839
840 if (i == j
841 && min_take_default
842 && max_take_default)
843 {
844 /* Only the default case label reached.
845 Return an empty range. */
846 *min_idx = 1;
847 *max_idx = 0;
848 return false;
849 }
850 else
851 {
852 bool take_default = min_take_default || max_take_default;
853 tree low, high;
854 size_t k;
855
856 if (max_take_default)
857 j--;
858
859 /* If the case label range is continuous, we do not need
860 the default case label. Verify that. */
861 high = CASE_LOW (gimple_switch_label (stmt, i));
862 if (CASE_HIGH (gimple_switch_label (stmt, i)))
863 high = CASE_HIGH (gimple_switch_label (stmt, i));
864 for (k = i + 1; k <= j; ++k)
865 {
866 low = CASE_LOW (gimple_switch_label (stmt, k));
867 if (!integer_onep (int_const_binop (MINUS_EXPR, low, high)))
868 {
869 take_default = true;
870 break;
871 }
872 high = low;
873 if (CASE_HIGH (gimple_switch_label (stmt, k)))
874 high = CASE_HIGH (gimple_switch_label (stmt, k));
875 }
876
877 *min_idx = i;
878 *max_idx = j;
879 return !take_default;
880 }
881}
882
883/* Given a SWITCH_STMT, return the case label that encompasses the
884 known possible values for the switch operand. RANGE_OF_OP is a
885 range for the known values of the switch operand. */
886
887tree
888find_case_label_range (gswitch *switch_stmt, const irange *range_of_op)
889{
890 if (range_of_op->undefined_p ()
891 || range_of_op->varying_p ())
892 return NULL_TREE;
893
894 size_t i, j;
895 tree op = gimple_switch_index (gs: switch_stmt);
896 tree type = TREE_TYPE (op);
897 tree tmin = wide_int_to_tree (type, cst: range_of_op->lower_bound ());
898 tree tmax = wide_int_to_tree (type, cst: range_of_op->upper_bound ());
899 find_case_label_range (stmt: switch_stmt, min: tmin, max: tmax, min_idx: &i, max_idx: &j);
900 if (i == j)
901 {
902 /* Look for exactly one label that encompasses the range of
903 the operand. */
904 tree label = gimple_switch_label (gs: switch_stmt, index: i);
905 tree case_high
906 = CASE_HIGH (label) ? CASE_HIGH (label) : CASE_LOW (label);
907 wide_int wlow = wi::to_wide (CASE_LOW (label));
908 wide_int whigh = wi::to_wide (t: case_high);
909 int_range_max label_range (TREE_TYPE (case_high), wlow, whigh);
910 if (!types_compatible_p (type1: label_range.type (), type2: range_of_op->type ()))
911 range_cast (r&: label_range, type: range_of_op->type ());
912 label_range.intersect (*range_of_op);
913 if (label_range == *range_of_op)
914 return label;
915 }
916 else if (i > j)
917 {
918 /* If there are no labels at all, take the default. */
919 return gimple_switch_label (gs: switch_stmt, index: 0);
920 }
921 else
922 {
923 /* Otherwise, there are various labels that can encompass
924 the range of operand. In which case, see if the range of
925 the operand is entirely *outside* the bounds of all the
926 (non-default) case labels. If so, take the default. */
927 unsigned n = gimple_switch_num_labels (gs: switch_stmt);
928 tree min_label = gimple_switch_label (gs: switch_stmt, index: 1);
929 tree max_label = gimple_switch_label (gs: switch_stmt, index: n - 1);
930 tree case_high = CASE_HIGH (max_label);
931 if (!case_high)
932 case_high = CASE_LOW (max_label);
933 int_range_max label_range (TREE_TYPE (CASE_LOW (min_label)),
934 wi::to_wide (CASE_LOW (min_label)),
935 wi::to_wide (t: case_high));
936 if (!types_compatible_p (type1: label_range.type (), type2: range_of_op->type ()))
937 range_cast (r&: label_range, type: range_of_op->type ());
938 label_range.intersect (*range_of_op);
939 if (label_range.undefined_p ())
940 return gimple_switch_label (gs: switch_stmt, index: 0);
941 }
942 return NULL_TREE;
943}
944
945struct case_info
946{
947 tree expr;
948 basic_block bb;
949};
950
951// This is a ranger based folder which continues to use the dominator
952// walk to access the substitute and fold machinery. Ranges are calculated
953// on demand.
954
955class rvrp_folder : public substitute_and_fold_engine
956{
957public:
958
959 rvrp_folder (gimple_ranger *r, bool all)
960 : substitute_and_fold_engine (),
961 m_unreachable (*r, all),
962 m_simplifier (r, r->non_executable_edge_flag)
963 {
964 m_ranger = r;
965 m_pta = new pointer_equiv_analyzer (m_ranger);
966 m_last_bb_stmt = NULL;
967 }
968
969 ~rvrp_folder ()
970 {
971 delete m_pta;
972 }
973
974 tree value_of_expr (tree name, gimple *s = NULL) override
975 {
976 // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
977 if (TREE_CODE (name) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
978 return NULL;
979 tree ret = m_ranger->value_of_expr (expr: name, s);
980 if (!ret && supported_pointer_equiv_p (expr: name))
981 ret = m_pta->get_equiv (ssa: name);
982 return ret;
983 }
984
985 tree value_on_edge (edge e, tree name) override
986 {
987 // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
988 if (TREE_CODE (name) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
989 return NULL;
990 tree ret = m_ranger->value_on_edge (e, expr: name);
991 if (!ret && supported_pointer_equiv_p (expr: name))
992 ret = m_pta->get_equiv (ssa: name);
993 return ret;
994 }
995
996 tree value_of_stmt (gimple *s, tree name = NULL) override
997 {
998 // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
999 if (TREE_CODE (name) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
1000 return NULL;
1001 return m_ranger->value_of_stmt (s, name);
1002 }
1003
1004 void pre_fold_bb (basic_block bb) override
1005 {
1006 m_pta->enter (bb);
1007 for (gphi_iterator gsi = gsi_start_phis (bb); !gsi_end_p (i: gsi);
1008 gsi_next (i: &gsi))
1009 m_ranger->register_inferred_ranges (s: gsi.phi ());
1010 m_last_bb_stmt = last_nondebug_stmt (bb);
1011 }
1012
1013 void post_fold_bb (basic_block bb) override
1014 {
1015 m_pta->leave (bb);
1016 }
1017
1018 void pre_fold_stmt (gimple *stmt) override
1019 {
1020 m_pta->visit_stmt (stmt);
1021 // If this is the last stmt and there are inferred ranges, reparse the
1022 // block for transitive inferred ranges that occur earlier in the block.
1023 if (stmt == m_last_bb_stmt)
1024 {
1025 m_ranger->register_transitive_inferred_ranges (bb: gimple_bb (g: stmt));
1026 // Also check for builtin_unreachable calls.
1027 if (cfun->after_inlining && gimple_code (g: stmt) == GIMPLE_COND)
1028 m_unreachable.maybe_register (s: stmt);
1029 }
1030 }
1031
1032 bool fold_stmt (gimple_stmt_iterator *gsi) override
1033 {
1034 bool ret = m_simplifier.simplify (gsi);
1035 if (!ret)
1036 ret = m_ranger->fold_stmt (gsi, follow_single_use_edges);
1037 m_ranger->register_inferred_ranges (s: gsi_stmt (i: *gsi));
1038 return ret;
1039 }
1040
1041 remove_unreachable m_unreachable;
1042private:
1043 DISABLE_COPY_AND_ASSIGN (rvrp_folder);
1044 gimple_ranger *m_ranger;
1045 simplify_using_ranges m_simplifier;
1046 pointer_equiv_analyzer *m_pta;
1047 gimple *m_last_bb_stmt;
1048};
1049
1050/* Main entry point for a VRP pass using just ranger. This can be called
1051 from anywhere to perform a VRP pass, including from EVRP. */
1052
1053unsigned int
1054execute_ranger_vrp (struct function *fun, bool warn_array_bounds_p,
1055 bool final_p)
1056{
1057 loop_optimizer_init (LOOPS_NORMAL | LOOPS_HAVE_RECORDED_EXITS);
1058 rewrite_into_loop_closed_ssa (NULL, TODO_update_ssa);
1059 scev_initialize ();
1060 calculate_dominance_info (CDI_DOMINATORS);
1061
1062 set_all_edges_as_executable (fun);
1063 gimple_ranger *ranger = enable_ranger (m: fun, use_imm_uses: false);
1064 rvrp_folder folder (ranger, final_p);
1065 phi_analysis_initialize (ranger->const_query ());
1066 folder.substitute_and_fold ();
1067 // Remove tagged builtin-unreachable and maybe update globals.
1068 folder.m_unreachable.remove_and_update_globals ();
1069 if (dump_file && (dump_flags & TDF_DETAILS))
1070 ranger->dump (f: dump_file);
1071
1072 if ((warn_array_bounds || warn_strict_flex_arrays) && warn_array_bounds_p)
1073 {
1074 // Set all edges as executable, except those ranger says aren't.
1075 int non_exec_flag = ranger->non_executable_edge_flag;
1076 basic_block bb;
1077 FOR_ALL_BB_FN (bb, fun)
1078 {
1079 edge_iterator ei;
1080 edge e;
1081 FOR_EACH_EDGE (e, ei, bb->succs)
1082 if (e->flags & non_exec_flag)
1083 e->flags &= ~EDGE_EXECUTABLE;
1084 else
1085 e->flags |= EDGE_EXECUTABLE;
1086 }
1087 scev_reset ();
1088 array_bounds_checker array_checker (fun, ranger);
1089 array_checker.check ();
1090 }
1091
1092
1093 if (Value_Range::supports_type_p (TREE_TYPE
1094 (TREE_TYPE (current_function_decl)))
1095 && flag_ipa_vrp
1096 && !lookup_attribute (attr_name: "noipa", DECL_ATTRIBUTES (current_function_decl)))
1097 {
1098 edge e;
1099 edge_iterator ei;
1100 bool found = false;
1101 Value_Range return_range (TREE_TYPE (TREE_TYPE (current_function_decl)));
1102 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
1103 if (greturn *ret = dyn_cast <greturn *> (p: *gsi_last_bb (bb: e->src)))
1104 {
1105 tree retval = gimple_return_retval (gs: ret);
1106 if (!retval)
1107 {
1108 return_range.set_varying (TREE_TYPE (TREE_TYPE (current_function_decl)));
1109 found = true;
1110 continue;
1111 }
1112 Value_Range r (TREE_TYPE (retval));
1113 if (ranger->range_of_expr (r, name: retval, ret)
1114 && !r.undefined_p ()
1115 && !r.varying_p ())
1116 {
1117 if (!found)
1118 return_range = r;
1119 else
1120 return_range.union_ (r);
1121 }
1122 else
1123 return_range.set_varying (TREE_TYPE (retval));
1124 found = true;
1125 }
1126 if (found && !return_range.varying_p ())
1127 {
1128 ipa_record_return_value_range (val: return_range);
1129 if (POINTER_TYPE_P (TREE_TYPE (TREE_TYPE (current_function_decl)))
1130 && return_range.nonzero_p ()
1131 && cgraph_node::get (decl: current_function_decl)
1132 ->add_detected_attribute (attr: "returns_nonnull"))
1133 warn_function_returns_nonnull (current_function_decl);
1134 }
1135 }
1136
1137 phi_analysis_finalize ();
1138 disable_ranger (fun);
1139 scev_finalize ();
1140 loop_optimizer_finalize ();
1141 return 0;
1142}
1143
1144// Implement a Fast VRP folder. Not quite as effective but faster.
1145
1146class fvrp_folder : public substitute_and_fold_engine
1147{
1148public:
1149 fvrp_folder (dom_ranger *dr) : substitute_and_fold_engine (),
1150 m_simplifier (dr)
1151 { m_dom_ranger = dr; }
1152
1153 ~fvrp_folder () { }
1154
1155 tree value_of_expr (tree name, gimple *s = NULL) override
1156 {
1157 // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
1158 if (TREE_CODE (name) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
1159 return NULL;
1160 return m_dom_ranger->value_of_expr (expr: name, s);
1161 }
1162
1163 tree value_on_edge (edge e, tree name) override
1164 {
1165 // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
1166 if (TREE_CODE (name) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
1167 return NULL;
1168 return m_dom_ranger->value_on_edge (e, expr: name);
1169 }
1170
1171 tree value_of_stmt (gimple *s, tree name = NULL) override
1172 {
1173 // Shortcircuit subst_and_fold callbacks for abnormal ssa_names.
1174 if (TREE_CODE (name) == SSA_NAME && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (name))
1175 return NULL;
1176 return m_dom_ranger->value_of_stmt (s, name);
1177 }
1178
1179 void pre_fold_bb (basic_block bb) override
1180 {
1181 m_dom_ranger->pre_bb (bb);
1182 // Now process the PHIs in advance.
1183 gphi_iterator psi = gsi_start_phis (bb);
1184 for ( ; !gsi_end_p (i: psi); gsi_next (i: &psi))
1185 {
1186 tree name = gimple_range_ssa_p (PHI_RESULT (psi.phi ()));
1187 if (name)
1188 {
1189 Value_Range vr(TREE_TYPE (name));
1190 m_dom_ranger->range_of_stmt (r&: vr, s: psi.phi (), name);
1191 }
1192 }
1193 }
1194
1195 void post_fold_bb (basic_block bb) override
1196 {
1197 m_dom_ranger->post_bb (bb);
1198 }
1199
1200 void pre_fold_stmt (gimple *s) override
1201 {
1202 // Ensure range_of_stmt has been called.
1203 tree type = gimple_range_type (s);
1204 if (type)
1205 {
1206 Value_Range vr(type);
1207 m_dom_ranger->range_of_stmt (r&: vr, s);
1208 }
1209 }
1210
1211 bool fold_stmt (gimple_stmt_iterator *gsi) override
1212 {
1213 bool ret = m_simplifier.simplify (gsi);
1214 if (!ret)
1215 ret = ::fold_stmt (gsi, follow_single_use_edges);
1216 return ret;
1217 }
1218
1219private:
1220 DISABLE_COPY_AND_ASSIGN (fvrp_folder);
1221 simplify_using_ranges m_simplifier;
1222 dom_ranger *m_dom_ranger;
1223};
1224
1225
1226// Main entry point for a FAST VRP pass using a dom ranger.
1227
1228unsigned int
1229execute_fast_vrp (struct function *fun)
1230{
1231 calculate_dominance_info (CDI_DOMINATORS);
1232 dom_ranger dr;
1233 fvrp_folder folder (&dr);
1234
1235 gcc_checking_assert (!fun->x_range_query);
1236 fun->x_range_query = &dr;
1237
1238 folder.substitute_and_fold ();
1239
1240 fun->x_range_query = NULL;
1241 return 0;
1242}
1243
1244namespace {
1245
1246const pass_data pass_data_vrp =
1247{
1248 .type: GIMPLE_PASS, /* type */
1249 .name: "vrp", /* name */
1250 .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */
1251 .tv_id: TV_TREE_VRP, /* tv_id */
1252 PROP_ssa, /* properties_required */
1253 .properties_provided: 0, /* properties_provided */
1254 .properties_destroyed: 0, /* properties_destroyed */
1255 .todo_flags_start: 0, /* todo_flags_start */
1256 .todo_flags_finish: ( TODO_cleanup_cfg | TODO_update_ssa ), /* todo_flags_finish */
1257};
1258
1259const pass_data pass_data_early_vrp =
1260{
1261 .type: GIMPLE_PASS, /* type */
1262 .name: "evrp", /* name */
1263 .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */
1264 .tv_id: TV_TREE_EARLY_VRP, /* tv_id */
1265 PROP_ssa, /* properties_required */
1266 .properties_provided: 0, /* properties_provided */
1267 .properties_destroyed: 0, /* properties_destroyed */
1268 .todo_flags_start: 0, /* todo_flags_start */
1269 .todo_flags_finish: ( TODO_cleanup_cfg | TODO_update_ssa | TODO_verify_all ),
1270};
1271
1272const pass_data pass_data_fast_vrp =
1273{
1274 .type: GIMPLE_PASS, /* type */
1275 .name: "fvrp", /* name */
1276 .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */
1277 .tv_id: TV_TREE_FAST_VRP, /* tv_id */
1278 PROP_ssa, /* properties_required */
1279 .properties_provided: 0, /* properties_provided */
1280 .properties_destroyed: 0, /* properties_destroyed */
1281 .todo_flags_start: 0, /* todo_flags_start */
1282 .todo_flags_finish: ( TODO_cleanup_cfg | TODO_update_ssa | TODO_verify_all ),
1283};
1284
1285
1286class pass_vrp : public gimple_opt_pass
1287{
1288public:
1289 pass_vrp (gcc::context *ctxt, const pass_data &data_, bool warn_p)
1290 : gimple_opt_pass (data_, ctxt), data (data_),
1291 warn_array_bounds_p (warn_p), final_p (false)
1292 { }
1293
1294 /* opt_pass methods: */
1295 opt_pass * clone () final override
1296 { return new pass_vrp (m_ctxt, data, false); }
1297 void set_pass_param (unsigned int n, bool param) final override
1298 {
1299 gcc_assert (n == 0);
1300 final_p = param;
1301 }
1302 bool gate (function *) final override { return flag_tree_vrp != 0; }
1303 unsigned int execute (function *fun) final override
1304 {
1305 // Check for fast vrp.
1306 if (&data == &pass_data_fast_vrp)
1307 return execute_fast_vrp (fun);
1308
1309 return execute_ranger_vrp (fun, warn_array_bounds_p, final_p);
1310 }
1311
1312 private:
1313 const pass_data &data;
1314 bool warn_array_bounds_p;
1315 bool final_p;
1316}; // class pass_vrp
1317
1318const pass_data pass_data_assumptions =
1319{
1320 .type: GIMPLE_PASS, /* type */
1321 .name: "assumptions", /* name */
1322 .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */
1323 .tv_id: TV_TREE_ASSUMPTIONS, /* tv_id */
1324 PROP_ssa, /* properties_required */
1325 PROP_assumptions_done, /* properties_provided */
1326 .properties_destroyed: 0, /* properties_destroyed */
1327 .todo_flags_start: 0, /* todo_flags_start */
1328 .todo_flags_finish: 0, /* todo_flags_end */
1329};
1330
1331class pass_assumptions : public gimple_opt_pass
1332{
1333public:
1334 pass_assumptions (gcc::context *ctxt)
1335 : gimple_opt_pass (pass_data_assumptions, ctxt)
1336 {}
1337
1338 /* opt_pass methods: */
1339 bool gate (function *fun) final override { return fun->assume_function; }
1340 unsigned int execute (function *) final override
1341 {
1342 assume_query query;
1343 if (dump_file)
1344 fprintf (stream: dump_file, format: "Assumptions :\n--------------\n");
1345
1346 for (tree arg = DECL_ARGUMENTS (cfun->decl); arg; arg = DECL_CHAIN (arg))
1347 {
1348 tree name = ssa_default_def (cfun, arg);
1349 if (!name || !gimple_range_ssa_p (exp: name))
1350 continue;
1351 tree type = TREE_TYPE (name);
1352 if (!Value_Range::supports_type_p (type))
1353 continue;
1354 Value_Range assume_range (type);
1355 if (query.assume_range_p (r&: assume_range, name))
1356 {
1357 // Set the global range of NAME to anything calculated.
1358 set_range_info (name, assume_range);
1359 if (dump_file)
1360 {
1361 print_generic_expr (dump_file, name, TDF_SLIM);
1362 fprintf (stream: dump_file, format: " -> ");
1363 assume_range.dump (dump_file);
1364 fputc (c: '\n', stream: dump_file);
1365 }
1366 }
1367 }
1368 if (dump_file)
1369 {
1370 fputc (c: '\n', stream: dump_file);
1371 gimple_dump_cfg (dump_file, dump_flags & ~TDF_DETAILS);
1372 if (dump_flags & TDF_DETAILS)
1373 query.dump (f: dump_file);
1374 }
1375 return TODO_discard_function;
1376 }
1377
1378}; // class pass_assumptions
1379
1380} // anon namespace
1381
1382gimple_opt_pass *
1383make_pass_vrp (gcc::context *ctxt)
1384{
1385 return new pass_vrp (ctxt, pass_data_vrp, true);
1386}
1387
1388gimple_opt_pass *
1389make_pass_early_vrp (gcc::context *ctxt)
1390{
1391 return new pass_vrp (ctxt, pass_data_early_vrp, false);
1392}
1393
1394gimple_opt_pass *
1395make_pass_fast_vrp (gcc::context *ctxt)
1396{
1397 return new pass_vrp (ctxt, pass_data_fast_vrp, false);
1398}
1399
1400gimple_opt_pass *
1401make_pass_assumptions (gcc::context *ctx)
1402{
1403 return new pass_assumptions (ctx);
1404}
1405

source code of gcc/tree-vrp.cc