1/* Control flow graph analysis code for GNU compiler.
2 Copyright (C) 1987-2024 Free Software Foundation, Inc.
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
8Software Foundation; either version 3, or (at your option) any later
9version.
10
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
15
16You should have received a copy of the GNU General Public License
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
19
20/* This file contains various simple utilities to analyze the CFG. */
21
22#include "config.h"
23#include "system.h"
24#include "coretypes.h"
25#include "backend.h"
26#include "cfghooks.h"
27#include "timevar.h"
28#include "cfganal.h"
29#include "cfgloop.h"
30#include "diagnostic.h"
31
32namespace {
33/* Store the data structures necessary for depth-first search. */
34class depth_first_search
35 {
36public:
37 depth_first_search ();
38
39 basic_block execute (basic_block);
40 void add_bb (basic_block);
41
42private:
43 /* stack for backtracking during the algorithm */
44 auto_vec<basic_block, 20> m_stack;
45
46 /* record of basic blocks already seen by depth-first search */
47 auto_sbitmap m_visited_blocks;
48};
49}
50
51/* Mark the back edges in DFS traversal.
52 Return nonzero if a loop (natural or otherwise) is present.
53 Inspired by Depth_First_Search_PP described in:
54
55 Advanced Compiler Design and Implementation
56 Steven Muchnick
57 Morgan Kaufmann, 1997
58
59 and heavily borrowed from pre_and_rev_post_order_compute. */
60
61bool
62mark_dfs_back_edges (struct function *fun)
63{
64 int *pre;
65 int *post;
66 int prenum = 1;
67 int postnum = 1;
68 bool found = false;
69
70 /* Allocate the preorder and postorder number arrays. */
71 pre = XCNEWVEC (int, last_basic_block_for_fn (fun));
72 post = XCNEWVEC (int, last_basic_block_for_fn (fun));
73
74 /* Allocate stack for back-tracking up CFG. */
75 auto_vec<edge_iterator, 20> stack (n_basic_blocks_for_fn (fun) + 1);
76
77 /* Allocate bitmap to track nodes that have been visited. */
78 auto_sbitmap visited (last_basic_block_for_fn (fun));
79
80 /* None of the nodes in the CFG have been visited yet. */
81 bitmap_clear (visited);
82
83 /* Push the first edge on to the stack. */
84 stack.quick_push (ei_start (ENTRY_BLOCK_PTR_FOR_FN (fun)->succs));
85
86 while (!stack.is_empty ())
87 {
88 basic_block src;
89 basic_block dest;
90
91 /* Look at the edge on the top of the stack. */
92 edge_iterator ei = stack.last ();
93 src = ei_edge (i: ei)->src;
94 dest = ei_edge (i: ei)->dest;
95 ei_edge (i: ei)->flags &= ~EDGE_DFS_BACK;
96
97 /* Check if the edge destination has been visited yet. */
98 if (dest != EXIT_BLOCK_PTR_FOR_FN (fun) && ! bitmap_bit_p (map: visited,
99 bitno: dest->index))
100 {
101 /* Mark that we have visited the destination. */
102 bitmap_set_bit (map: visited, bitno: dest->index);
103
104 pre[dest->index] = prenum++;
105 if (EDGE_COUNT (dest->succs) > 0)
106 {
107 /* Since the DEST node has been visited for the first
108 time, check its successors. */
109 stack.quick_push (ei_start (dest->succs));
110 }
111 else
112 post[dest->index] = postnum++;
113 }
114 else
115 {
116 if (dest != EXIT_BLOCK_PTR_FOR_FN (fun)
117 && src != ENTRY_BLOCK_PTR_FOR_FN (fun)
118 && pre[src->index] >= pre[dest->index]
119 && post[dest->index] == 0)
120 ei_edge (i: ei)->flags |= EDGE_DFS_BACK, found = true;
121
122 if (ei_one_before_end_p (i: ei)
123 && src != ENTRY_BLOCK_PTR_FOR_FN (fun))
124 post[src->index] = postnum++;
125
126 if (!ei_one_before_end_p (i: ei))
127 ei_next (i: &stack.last ());
128 else
129 stack.pop ();
130 }
131 }
132
133 free (ptr: pre);
134 free (ptr: post);
135
136 return found;
137}
138
139bool
140mark_dfs_back_edges (void)
141{
142 return mark_dfs_back_edges (cfun);
143}
144
145/* Return TRUE if EDGE_DFS_BACK is up to date for CFUN. */
146
147void
148verify_marked_backedges (struct function *fun)
149{
150 auto_edge_flag saved_dfs_back (fun);
151 basic_block bb;
152 edge e;
153 edge_iterator ei;
154
155 // Save all the back edges...
156 FOR_EACH_BB_FN (bb, fun)
157 FOR_EACH_EDGE (e, ei, bb->succs)
158 {
159 if (e->flags & EDGE_DFS_BACK)
160 {
161 e->flags |= saved_dfs_back;
162 e->flags &= ~EDGE_DFS_BACK;
163 }
164 }
165
166 // ...and verify that recalculating them agrees with the saved ones.
167 mark_dfs_back_edges ();
168 FOR_EACH_BB_FN (bb, fun)
169 FOR_EACH_EDGE (e, ei, bb->succs)
170 {
171 if (((e->flags & EDGE_DFS_BACK) != 0)
172 != ((e->flags & saved_dfs_back) != 0))
173 internal_error ("%<verify_marked_backedges%> failed");
174
175 e->flags &= ~saved_dfs_back;
176 }
177}
178
179/* Find unreachable blocks. An unreachable block will have 0 in
180 the reachable bit in block->flags. A nonzero value indicates the
181 block is reachable. */
182
183void
184find_unreachable_blocks (void)
185{
186 edge e;
187 edge_iterator ei;
188 basic_block *tos, *worklist, bb;
189
190 tos = worklist = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
191
192 /* Clear all the reachability flags. */
193
194 FOR_EACH_BB_FN (bb, cfun)
195 bb->flags &= ~BB_REACHABLE;
196
197 /* Add our starting points to the worklist. Almost always there will
198 be only one. It isn't inconceivable that we might one day directly
199 support Fortran alternate entry points. */
200
201 FOR_EACH_EDGE (e, ei, ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs)
202 {
203 *tos++ = e->dest;
204
205 /* Mark the block reachable. */
206 e->dest->flags |= BB_REACHABLE;
207 }
208
209 /* Iterate: find everything reachable from what we've already seen. */
210
211 while (tos != worklist)
212 {
213 basic_block b = *--tos;
214
215 FOR_EACH_EDGE (e, ei, b->succs)
216 {
217 basic_block dest = e->dest;
218
219 if (!(dest->flags & BB_REACHABLE))
220 {
221 *tos++ = dest;
222 dest->flags |= BB_REACHABLE;
223 }
224 }
225 }
226
227 free (ptr: worklist);
228}
229
230/* Verify that there are no unreachable blocks in the current function. */
231
232void
233verify_no_unreachable_blocks (void)
234{
235 find_unreachable_blocks ();
236
237 basic_block bb;
238 FOR_EACH_BB_FN (bb, cfun)
239 gcc_assert ((bb->flags & BB_REACHABLE) != 0);
240}
241
242
243/* Functions to access an edge list with a vector representation.
244 Enough data is kept such that given an index number, the
245 pred and succ that edge represents can be determined, or
246 given a pred and a succ, its index number can be returned.
247 This allows algorithms which consume a lot of memory to
248 represent the normally full matrix of edge (pred,succ) with a
249 single indexed vector, edge (EDGE_INDEX (pred, succ)), with no
250 wasted space in the client code due to sparse flow graphs. */
251
252/* This functions initializes the edge list. Basically the entire
253 flowgraph is processed, and all edges are assigned a number,
254 and the data structure is filled in. */
255
256struct edge_list *
257create_edge_list (void)
258{
259 struct edge_list *elist;
260 edge e;
261 int num_edges;
262 basic_block bb;
263 edge_iterator ei;
264
265 /* Determine the number of edges in the flow graph by counting successor
266 edges on each basic block. */
267 num_edges = 0;
268 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
269 EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
270 {
271 num_edges += EDGE_COUNT (bb->succs);
272 }
273
274 elist = XNEW (struct edge_list);
275 elist->num_edges = num_edges;
276 elist->index_to_edge = XNEWVEC (edge, num_edges);
277
278 num_edges = 0;
279
280 /* Follow successors of blocks, and register these edges. */
281 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
282 EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
283 FOR_EACH_EDGE (e, ei, bb->succs)
284 elist->index_to_edge[num_edges++] = e;
285
286 return elist;
287}
288
289/* This function free's memory associated with an edge list. */
290
291void
292free_edge_list (struct edge_list *elist)
293{
294 if (elist)
295 {
296 free (ptr: elist->index_to_edge);
297 free (ptr: elist);
298 }
299}
300
301/* This function provides debug output showing an edge list. */
302
303DEBUG_FUNCTION void
304print_edge_list (FILE *f, struct edge_list *elist)
305{
306 int x;
307
308 fprintf (stream: f, format: "Compressed edge list, %d BBs + entry & exit, and %d edges\n",
309 n_basic_blocks_for_fn (cfun), elist->num_edges);
310
311 for (x = 0; x < elist->num_edges; x++)
312 {
313 fprintf (stream: f, format: " %-4d - edge(", x);
314 if (INDEX_EDGE_PRED_BB (elist, x) == ENTRY_BLOCK_PTR_FOR_FN (cfun))
315 fprintf (stream: f, format: "entry,");
316 else
317 fprintf (stream: f, format: "%d,", INDEX_EDGE_PRED_BB (elist, x)->index);
318
319 if (INDEX_EDGE_SUCC_BB (elist, x) == EXIT_BLOCK_PTR_FOR_FN (cfun))
320 fprintf (stream: f, format: "exit)\n");
321 else
322 fprintf (stream: f, format: "%d)\n", INDEX_EDGE_SUCC_BB (elist, x)->index);
323 }
324}
325
326/* This function provides an internal consistency check of an edge list,
327 verifying that all edges are present, and that there are no
328 extra edges. */
329
330DEBUG_FUNCTION void
331verify_edge_list (FILE *f, struct edge_list *elist)
332{
333 int pred, succ, index;
334 edge e;
335 basic_block bb, p, s;
336 edge_iterator ei;
337
338 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
339 EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
340 {
341 FOR_EACH_EDGE (e, ei, bb->succs)
342 {
343 pred = e->src->index;
344 succ = e->dest->index;
345 index = EDGE_INDEX (elist, e->src, e->dest);
346 if (index == EDGE_INDEX_NO_EDGE)
347 {
348 fprintf (stream: f, format: "*p* No index for edge from %d to %d\n", pred, succ);
349 continue;
350 }
351
352 if (INDEX_EDGE_PRED_BB (elist, index)->index != pred)
353 fprintf (stream: f, format: "*p* Pred for index %d should be %d not %d\n",
354 index, pred, INDEX_EDGE_PRED_BB (elist, index)->index);
355 if (INDEX_EDGE_SUCC_BB (elist, index)->index != succ)
356 fprintf (stream: f, format: "*p* Succ for index %d should be %d not %d\n",
357 index, succ, INDEX_EDGE_SUCC_BB (elist, index)->index);
358 }
359 }
360
361 /* We've verified that all the edges are in the list, now lets make sure
362 there are no spurious edges in the list. This is an expensive check! */
363
364 FOR_BB_BETWEEN (p, ENTRY_BLOCK_PTR_FOR_FN (cfun),
365 EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
366 FOR_BB_BETWEEN (s, ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb, NULL, next_bb)
367 {
368 int found_edge = 0;
369
370 FOR_EACH_EDGE (e, ei, p->succs)
371 if (e->dest == s)
372 {
373 found_edge = 1;
374 break;
375 }
376
377 FOR_EACH_EDGE (e, ei, s->preds)
378 if (e->src == p)
379 {
380 found_edge = 1;
381 break;
382 }
383
384 if (EDGE_INDEX (elist, p, s)
385 == EDGE_INDEX_NO_EDGE && found_edge != 0)
386 fprintf (stream: f, format: "*** Edge (%d, %d) appears to not have an index\n",
387 p->index, s->index);
388 if (EDGE_INDEX (elist, p, s)
389 != EDGE_INDEX_NO_EDGE && found_edge == 0)
390 fprintf (stream: f, format: "*** Edge (%d, %d) has index %d, but there is no edge\n",
391 p->index, s->index, EDGE_INDEX (elist, p, s));
392 }
393}
394
395
396/* Functions to compute control dependences. */
397
398/* Indicate block BB is control dependent on an edge with index EDGE_INDEX. */
399void
400control_dependences::set_control_dependence_map_bit (basic_block bb,
401 int edge_index)
402{
403 if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun))
404 return;
405 gcc_assert (bb != EXIT_BLOCK_PTR_FOR_FN (cfun));
406 bitmap_set_bit (&control_dependence_map[bb->index], edge_index);
407}
408
409/* Clear all control dependences for block BB. */
410void
411control_dependences::clear_control_dependence_bitmap (basic_block bb)
412{
413 bitmap_clear (&control_dependence_map[bb->index]);
414}
415
416/* Determine all blocks' control dependences on the given edge with edge_list
417 EL index EDGE_INDEX, ala Morgan, Section 3.6. */
418
419void
420control_dependences::find_control_dependence (int edge_index)
421{
422 basic_block current_block;
423 basic_block ending_block;
424
425 gcc_assert (get_edge_src (edge_index) != EXIT_BLOCK_PTR_FOR_FN (cfun));
426
427 ending_block = get_immediate_dominator (CDI_POST_DOMINATORS,
428 get_edge_src (edge_index));
429
430 for (current_block = get_edge_dest (edge_index);
431 current_block != ending_block
432 && current_block != EXIT_BLOCK_PTR_FOR_FN (cfun);
433 current_block = get_immediate_dominator (CDI_POST_DOMINATORS,
434 current_block))
435 set_control_dependence_map_bit (bb: current_block, edge_index);
436}
437
438/* Record all blocks' control dependences on all edges in the edge
439 list EL, ala Morgan, Section 3.6. */
440
441control_dependences::control_dependences ()
442{
443 timevar_push (tv: TV_CONTROL_DEPENDENCES);
444
445 /* Initialize the edge list. */
446 int num_edges = 0;
447 basic_block bb;
448 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
449 EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
450 num_edges += EDGE_COUNT (bb->succs);
451 m_el.create (nelems: num_edges);
452 edge e;
453 edge_iterator ei;
454 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
455 EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
456 FOR_EACH_EDGE (e, ei, bb->succs)
457 {
458 /* For abnormal edges, we don't make current_block control
459 dependent because instructions that throw are always necessary
460 anyway. */
461 if (e->flags & EDGE_ABNORMAL)
462 {
463 num_edges--;
464 continue;
465 }
466 m_el.quick_push (obj: std::make_pair (x&: e->src->index, y&: e->dest->index));
467 }
468
469 bitmap_obstack_initialize (&m_bitmaps);
470 control_dependence_map.create (last_basic_block_for_fn (cfun));
471 control_dependence_map.quick_grow_cleared (last_basic_block_for_fn (cfun));
472 for (int i = 0; i < last_basic_block_for_fn (cfun); ++i)
473 bitmap_initialize (head: &control_dependence_map[i], obstack: &m_bitmaps);
474 for (int i = 0; i < num_edges; ++i)
475 find_control_dependence (edge_index: i);
476
477 timevar_pop (tv: TV_CONTROL_DEPENDENCES);
478}
479
480/* Free control dependences and the associated edge list. */
481
482control_dependences::~control_dependences ()
483{
484 control_dependence_map.release ();
485 m_el.release ();
486 bitmap_obstack_release (&m_bitmaps);
487}
488
489/* Returns the bitmap of edges the basic-block I is dependent on. */
490
491bitmap
492control_dependences::get_edges_dependent_on (int i)
493{
494 return &control_dependence_map[i];
495}
496
497/* Returns the edge source with index I from the edge list. */
498
499basic_block
500control_dependences::get_edge_src (int i)
501{
502 return BASIC_BLOCK_FOR_FN (cfun, m_el[i].first);
503}
504
505/* Returns the edge destination with index I from the edge list. */
506
507basic_block
508control_dependences::get_edge_dest (int i)
509{
510 return BASIC_BLOCK_FOR_FN (cfun, m_el[i].second);
511}
512
513
514/* Given PRED and SUCC blocks, return the edge which connects the blocks.
515 If no such edge exists, return NULL. */
516
517edge
518find_edge (basic_block pred, basic_block succ)
519{
520 edge e;
521 edge_iterator ei;
522
523 if (EDGE_COUNT (pred->succs) <= EDGE_COUNT (succ->preds))
524 {
525 FOR_EACH_EDGE (e, ei, pred->succs)
526 if (e->dest == succ)
527 return e;
528 }
529 else
530 {
531 FOR_EACH_EDGE (e, ei, succ->preds)
532 if (e->src == pred)
533 return e;
534 }
535
536 return NULL;
537}
538
539/* This routine will determine what, if any, edge there is between
540 a specified predecessor and successor. */
541
542int
543find_edge_index (struct edge_list *edge_list, basic_block pred, basic_block succ)
544{
545 int x;
546
547 for (x = 0; x < NUM_EDGES (edge_list); x++)
548 if (INDEX_EDGE_PRED_BB (edge_list, x) == pred
549 && INDEX_EDGE_SUCC_BB (edge_list, x) == succ)
550 return x;
551
552 return (EDGE_INDEX_NO_EDGE);
553}
554
555/* This routine will remove any fake predecessor edges for a basic block.
556 When the edge is removed, it is also removed from whatever successor
557 list it is in. */
558
559static void
560remove_fake_predecessors (basic_block bb)
561{
562 edge e;
563 edge_iterator ei;
564
565 for (ei = ei_start (bb->preds); (e = ei_safe_edge (i: ei)); )
566 {
567 if ((e->flags & EDGE_FAKE) == EDGE_FAKE)
568 remove_edge (e);
569 else
570 ei_next (i: &ei);
571 }
572}
573
574/* This routine will remove all fake edges from the flow graph. If
575 we remove all fake successors, it will automatically remove all
576 fake predecessors. */
577
578void
579remove_fake_edges (void)
580{
581 basic_block bb;
582
583 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb, NULL, next_bb)
584 remove_fake_predecessors (bb);
585}
586
587/* This routine will remove all fake edges to the EXIT_BLOCK. */
588
589void
590remove_fake_exit_edges (void)
591{
592 remove_fake_predecessors (EXIT_BLOCK_PTR_FOR_FN (cfun));
593}
594
595
596/* This function will add a fake edge between any block which has no
597 successors, and the exit block. Some data flow equations require these
598 edges to exist. */
599
600void
601add_noreturn_fake_exit_edges (void)
602{
603 basic_block bb;
604
605 FOR_EACH_BB_FN (bb, cfun)
606 if (EDGE_COUNT (bb->succs) == 0)
607 make_single_succ_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
608}
609
610/* This function adds a fake edge between any noreturn block and
611 infinite loops to the exit block. Some optimizations require a path
612 from each node to the exit node.
613
614 See also Morgan, Figure 3.10, pp. 82-83.
615
616 The current implementation is ugly, not attempting to minimize the
617 number of inserted fake edges. To reduce the number of fake edges
618 to insert, add fake edges from _innermost_ loops containing only
619 nodes not reachable from the exit block. */
620
621void
622connect_infinite_loops_to_exit (void)
623{
624 /* First add fake exits to noreturn blocks, this is required to
625 discover only truly infinite loops below. */
626 add_noreturn_fake_exit_edges ();
627
628 /* Perform depth-first search in the reverse graph to find nodes
629 reachable from the exit block. */
630 depth_first_search dfs;
631 dfs.add_bb (EXIT_BLOCK_PTR_FOR_FN (cfun));
632
633 /* Repeatedly add fake edges, updating the unreachable nodes. */
634 basic_block unvisited_block = EXIT_BLOCK_PTR_FOR_FN (cfun);
635 while (1)
636 {
637 unvisited_block = dfs.execute (unvisited_block);
638 if (!unvisited_block)
639 break;
640
641 basic_block deadend_block = dfs_find_deadend (unvisited_block);
642 edge e = make_edge (deadend_block, EXIT_BLOCK_PTR_FOR_FN (cfun),
643 EDGE_FAKE);
644 e->probability = profile_probability::never ();
645 dfs.add_bb (deadend_block);
646 }
647}
648
649/* Compute reverse top sort order. This is computing a post order
650 numbering of the graph. If INCLUDE_ENTRY_EXIT is true, then
651 ENTRY_BLOCK and EXIT_BLOCK are included. If DELETE_UNREACHABLE is
652 true, unreachable blocks are deleted. */
653
654int
655post_order_compute (int *post_order, bool include_entry_exit,
656 bool delete_unreachable)
657{
658 int post_order_num = 0;
659 int count;
660
661 if (include_entry_exit)
662 post_order[post_order_num++] = EXIT_BLOCK;
663
664 /* Allocate stack for back-tracking up CFG. */
665 auto_vec<edge_iterator, 20> stack (n_basic_blocks_for_fn (cfun) + 1);
666
667 /* Allocate bitmap to track nodes that have been visited. */
668 auto_sbitmap visited (last_basic_block_for_fn (cfun));
669
670 /* None of the nodes in the CFG have been visited yet. */
671 bitmap_clear (visited);
672
673 /* Push the first edge on to the stack. */
674 stack.quick_push (ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs));
675
676 while (!stack.is_empty ())
677 {
678 basic_block src;
679 basic_block dest;
680
681 /* Look at the edge on the top of the stack. */
682 edge_iterator ei = stack.last ();
683 src = ei_edge (i: ei)->src;
684 dest = ei_edge (i: ei)->dest;
685
686 /* Check if the edge destination has been visited yet. */
687 if (dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
688 && ! bitmap_bit_p (map: visited, bitno: dest->index))
689 {
690 /* Mark that we have visited the destination. */
691 bitmap_set_bit (map: visited, bitno: dest->index);
692
693 if (EDGE_COUNT (dest->succs) > 0)
694 /* Since the DEST node has been visited for the first
695 time, check its successors. */
696 stack.quick_push (ei_start (dest->succs));
697 else
698 post_order[post_order_num++] = dest->index;
699 }
700 else
701 {
702 if (ei_one_before_end_p (i: ei)
703 && src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
704 post_order[post_order_num++] = src->index;
705
706 if (!ei_one_before_end_p (i: ei))
707 ei_next (i: &stack.last ());
708 else
709 stack.pop ();
710 }
711 }
712
713 if (include_entry_exit)
714 {
715 post_order[post_order_num++] = ENTRY_BLOCK;
716 count = post_order_num;
717 }
718 else
719 count = post_order_num + 2;
720
721 /* Delete the unreachable blocks if some were found and we are
722 supposed to do it. */
723 if (delete_unreachable && (count != n_basic_blocks_for_fn (cfun)))
724 {
725 basic_block b;
726 basic_block next_bb;
727 for (b = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; b
728 != EXIT_BLOCK_PTR_FOR_FN (cfun); b = next_bb)
729 {
730 next_bb = b->next_bb;
731
732 if (!(bitmap_bit_p (map: visited, bitno: b->index)))
733 delete_basic_block (b);
734 }
735
736 tidy_fallthru_edges ();
737 }
738
739 return post_order_num;
740}
741
742
743/* Helper routine for inverted_rev_post_order_compute
744 flow_dfs_compute_reverse_execute, and the reverse-CFG
745 deapth first search in dominance.cc.
746 BB has to belong to a region of CFG
747 unreachable by inverted traversal from the exit.
748 i.e. there's no control flow path from ENTRY to EXIT
749 that contains this BB.
750 This can happen in two cases - if there's an infinite loop
751 or if there's a block that has no successor
752 (call to a function with no return).
753 Some RTL passes deal with this condition by
754 calling connect_infinite_loops_to_exit () and/or
755 add_noreturn_fake_exit_edges ().
756 However, those methods involve modifying the CFG itself
757 which may not be desirable.
758 Hence, we deal with the infinite loop/no return cases
759 by identifying a unique basic block that can reach all blocks
760 in such a region by inverted traversal.
761 This function returns a basic block that guarantees
762 that all blocks in the region are reachable
763 by starting an inverted traversal from the returned block. */
764
765basic_block
766dfs_find_deadend (basic_block bb)
767{
768 auto_bitmap visited;
769 basic_block next = bb;
770
771 for (;;)
772 {
773 if (EDGE_COUNT (next->succs) == 0)
774 return next;
775
776 if (! bitmap_set_bit (visited, next->index))
777 return bb;
778
779 bb = next;
780 /* If we are in an analyzed cycle make sure to try exiting it.
781 Note this is a heuristic only and expected to work when loop
782 fixup is needed as well. */
783 if (! bb->loop_father
784 || ! loop_outer (loop: bb->loop_father))
785 next = EDGE_SUCC (bb, 0)->dest;
786 else
787 {
788 edge_iterator ei;
789 edge e;
790 FOR_EACH_EDGE (e, ei, bb->succs)
791 if (loop_exit_edge_p (bb->loop_father, e))
792 break;
793 next = e ? e->dest : EDGE_SUCC (bb, 0)->dest;
794 }
795 }
796}
797
798
799/* Compute the reverse top sort order of the inverted CFG
800 i.e. starting from the exit block and following the edges backward
801 (from successors to predecessors).
802 This ordering can be used for forward dataflow problems among others.
803
804 Optionally if START_POINTS is specified, start from exit block and all
805 basic blocks in START_POINTS. This is used by CD-DCE.
806
807 This function assumes that all blocks in the CFG are reachable
808 from the ENTRY (but not necessarily from EXIT).
809
810 If there's an infinite loop,
811 a simple inverted traversal starting from the blocks
812 with no successors can't visit all blocks.
813 To solve this problem, we first do inverted traversal
814 starting from the blocks with no successor.
815 And if there's any block left that's not visited by the regular
816 inverted traversal from EXIT,
817 those blocks are in such problematic region.
818 Among those, we find one block that has
819 any visited predecessor (which is an entry into such a region),
820 and start looking for a "dead end" from that block
821 and do another inverted traversal from that block. */
822
823int
824inverted_rev_post_order_compute (struct function *fn,
825 int *rev_post_order,
826 sbitmap *start_points)
827{
828 basic_block bb;
829
830 int rev_post_order_num = n_basic_blocks_for_fn (fn) - 1;
831
832 if (flag_checking)
833 verify_no_unreachable_blocks ();
834
835 /* Allocate stack for back-tracking up CFG. */
836 auto_vec<edge_iterator, 20> stack (n_basic_blocks_for_fn (cfun) + 1);
837
838 /* Allocate bitmap to track nodes that have been visited. */
839 auto_sbitmap visited (last_basic_block_for_fn (cfun));
840
841 /* None of the nodes in the CFG have been visited yet. */
842 bitmap_clear (visited);
843
844 if (start_points)
845 {
846 FOR_ALL_BB_FN (bb, cfun)
847 if (bitmap_bit_p (map: *start_points, bitno: bb->index)
848 && EDGE_COUNT (bb->preds) > 0)
849 {
850 stack.quick_push (ei_start (bb->preds));
851 bitmap_set_bit (map: visited, bitno: bb->index);
852 }
853 if (EDGE_COUNT (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds))
854 {
855 stack.quick_push (ei_start (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds));
856 bitmap_set_bit (map: visited, EXIT_BLOCK_PTR_FOR_FN (cfun)->index);
857 }
858 }
859 else
860 /* Put all blocks that have no successor into the initial work list. */
861 FOR_ALL_BB_FN (bb, cfun)
862 if (EDGE_COUNT (bb->succs) == 0)
863 {
864 /* Push the initial edge on to the stack. */
865 if (EDGE_COUNT (bb->preds) > 0)
866 {
867 stack.quick_push (ei_start (bb->preds));
868 bitmap_set_bit (map: visited, bitno: bb->index);
869 }
870 }
871
872 do
873 {
874 bool has_unvisited_bb = false;
875
876 /* The inverted traversal loop. */
877 while (!stack.is_empty ())
878 {
879 edge_iterator ei;
880 basic_block pred;
881
882 /* Look at the edge on the top of the stack. */
883 ei = stack.last ();
884 bb = ei_edge (i: ei)->dest;
885 pred = ei_edge (i: ei)->src;
886
887 /* Check if the predecessor has been visited yet. */
888 if (! bitmap_bit_p (map: visited, bitno: pred->index))
889 {
890 /* Mark that we have visited the destination. */
891 bitmap_set_bit (map: visited, bitno: pred->index);
892
893 if (EDGE_COUNT (pred->preds) > 0)
894 /* Since the predecessor node has been visited for the first
895 time, check its predecessors. */
896 stack.quick_push (ei_start (pred->preds));
897 else
898 rev_post_order[rev_post_order_num--] = pred->index;
899 }
900 else
901 {
902 if (bb != EXIT_BLOCK_PTR_FOR_FN (cfun)
903 && ei_one_before_end_p (i: ei))
904 rev_post_order[rev_post_order_num--] = bb->index;
905
906 if (!ei_one_before_end_p (i: ei))
907 ei_next (i: &stack.last ());
908 else
909 stack.pop ();
910 }
911 }
912
913 /* Detect any infinite loop and activate the kludge.
914 Note that this doesn't check EXIT_BLOCK itself
915 since EXIT_BLOCK is always added after the outer do-while loop. */
916 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
917 EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
918 if (!bitmap_bit_p (map: visited, bitno: bb->index))
919 {
920 has_unvisited_bb = true;
921
922 if (EDGE_COUNT (bb->preds) > 0)
923 {
924 edge_iterator ei;
925 edge e;
926 basic_block visited_pred = NULL;
927
928 /* Find an already visited predecessor. */
929 FOR_EACH_EDGE (e, ei, bb->preds)
930 {
931 if (bitmap_bit_p (map: visited, bitno: e->src->index))
932 visited_pred = e->src;
933 }
934
935 if (visited_pred)
936 {
937 basic_block be = dfs_find_deadend (bb);
938 gcc_assert (be != NULL);
939 bitmap_set_bit (map: visited, bitno: be->index);
940 stack.quick_push (ei_start (be->preds));
941 break;
942 }
943 }
944 }
945
946 if (has_unvisited_bb && stack.is_empty ())
947 {
948 /* No blocks are reachable from EXIT at all.
949 Find a dead-end from the ENTRY, and restart the iteration. */
950 basic_block be = dfs_find_deadend (ENTRY_BLOCK_PTR_FOR_FN (cfun));
951 gcc_assert (be != NULL);
952 bitmap_set_bit (map: visited, bitno: be->index);
953 stack.quick_push (ei_start (be->preds));
954 }
955
956 /* The only case the below while fires is
957 when there's an infinite loop. */
958 }
959 while (!stack.is_empty ());
960
961 /* EXIT_BLOCK is always included. */
962 rev_post_order[rev_post_order_num--] = EXIT_BLOCK;
963 return n_basic_blocks_for_fn (fn);
964}
965
966/* Compute the depth first search order of FN and store in the array
967 PRE_ORDER if nonzero. If REV_POST_ORDER is nonzero, return the
968 reverse completion number for each node. Returns the number of nodes
969 visited. A depth first search tries to get as far away from the starting
970 point as quickly as possible.
971
972 In case the function has unreachable blocks the number of nodes
973 visited does not include them.
974
975 pre_order is a really a preorder numbering of the graph.
976 rev_post_order is really a reverse postorder numbering of the graph. */
977
978int
979pre_and_rev_post_order_compute_fn (struct function *fn,
980 int *pre_order, int *rev_post_order,
981 bool include_entry_exit)
982{
983 int pre_order_num = 0;
984 int rev_post_order_num = n_basic_blocks_for_fn (fn) - 1;
985
986 /* Allocate stack for back-tracking up CFG. */
987 auto_vec<edge_iterator, 20> stack (n_basic_blocks_for_fn (fn) + 1);
988
989 if (include_entry_exit)
990 {
991 if (pre_order)
992 pre_order[pre_order_num] = ENTRY_BLOCK;
993 pre_order_num++;
994 if (rev_post_order)
995 rev_post_order[rev_post_order_num--] = EXIT_BLOCK;
996 }
997 else
998 rev_post_order_num -= NUM_FIXED_BLOCKS;
999
1000 /* BB flag to track nodes that have been visited. */
1001 auto_bb_flag visited (fn);
1002
1003 /* Push the first edge on to the stack. */
1004 stack.quick_push (ei_start (ENTRY_BLOCK_PTR_FOR_FN (fn)->succs));
1005
1006 while (!stack.is_empty ())
1007 {
1008 basic_block src;
1009 basic_block dest;
1010
1011 /* Look at the edge on the top of the stack. */
1012 edge_iterator ei = stack.last ();
1013 src = ei_edge (i: ei)->src;
1014 dest = ei_edge (i: ei)->dest;
1015
1016 /* Check if the edge destination has been visited yet. */
1017 if (dest != EXIT_BLOCK_PTR_FOR_FN (fn)
1018 && ! (dest->flags & visited))
1019 {
1020 /* Mark that we have visited the destination. */
1021 dest->flags |= visited;
1022
1023 if (pre_order)
1024 pre_order[pre_order_num] = dest->index;
1025
1026 pre_order_num++;
1027
1028 if (EDGE_COUNT (dest->succs) > 0)
1029 /* Since the DEST node has been visited for the first
1030 time, check its successors. */
1031 stack.quick_push (ei_start (dest->succs));
1032 else if (rev_post_order)
1033 /* There are no successors for the DEST node so assign
1034 its reverse completion number. */
1035 rev_post_order[rev_post_order_num--] = dest->index;
1036 }
1037 else
1038 {
1039 if (ei_one_before_end_p (i: ei)
1040 && src != ENTRY_BLOCK_PTR_FOR_FN (fn)
1041 && rev_post_order)
1042 /* There are no more successors for the SRC node
1043 so assign its reverse completion number. */
1044 rev_post_order[rev_post_order_num--] = src->index;
1045
1046 if (!ei_one_before_end_p (i: ei))
1047 ei_next (i: &stack.last ());
1048 else
1049 stack.pop ();
1050 }
1051 }
1052
1053 if (include_entry_exit)
1054 {
1055 if (pre_order)
1056 pre_order[pre_order_num] = EXIT_BLOCK;
1057 pre_order_num++;
1058 if (rev_post_order)
1059 rev_post_order[rev_post_order_num--] = ENTRY_BLOCK;
1060 }
1061
1062 /* Clear the temporarily allocated flag. */
1063 if (!rev_post_order)
1064 rev_post_order = pre_order;
1065 for (int i = 0; i < pre_order_num; ++i)
1066 BASIC_BLOCK_FOR_FN (fn, rev_post_order[i])->flags &= ~visited;
1067
1068 return pre_order_num;
1069}
1070
1071/* Like pre_and_rev_post_order_compute_fn but operating on the
1072 current function and asserting that all nodes were visited. */
1073
1074int
1075pre_and_rev_post_order_compute (int *pre_order, int *rev_post_order,
1076 bool include_entry_exit)
1077{
1078 int pre_order_num
1079 = pre_and_rev_post_order_compute_fn (cfun, pre_order, rev_post_order,
1080 include_entry_exit);
1081 if (include_entry_exit)
1082 /* The number of nodes visited should be the number of blocks. */
1083 gcc_assert (pre_order_num == n_basic_blocks_for_fn (cfun));
1084 else
1085 /* The number of nodes visited should be the number of blocks minus
1086 the entry and exit blocks which are not visited here. */
1087 gcc_assert (pre_order_num
1088 == (n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS));
1089
1090 return pre_order_num;
1091}
1092
1093
1094/* Per basic-block data for rev_post_order_and_mark_dfs_back_seme,
1095 element of a sparsely populated array indexed by basic-block number. */
1096typedef auto_vec<int, 2> scc_exit_vec_t;
1097struct rpoamdbs_bb_data {
1098 int depth;
1099 int bb_to_pre;
1100 /* The basic-block index of the SCC entry of the block visited first
1101 (the SCC leader). */
1102 int scc;
1103 /* The index into the RPO array where the blocks SCC entries end
1104 (only valid for the SCC leader). */
1105 int scc_end;
1106 /* The indexes of the exits destinations of this SCC (only valid
1107 for the SCC leader). Initialized upon discovery of SCC leaders. */
1108 scc_exit_vec_t scc_exits;
1109};
1110
1111/* Tag H as a header of B, weaving H and its loop header list into the
1112 current loop header list of B. */
1113
1114static void
1115tag_header (int b, int h, rpoamdbs_bb_data *bb_data)
1116{
1117 if (h == -1 || b == h)
1118 return;
1119 int cur1 = b;
1120 int cur2 = h;
1121 while (bb_data[cur1].scc != -1)
1122 {
1123 int ih = bb_data[cur1].scc;
1124 if (ih == cur2)
1125 return;
1126 if (bb_data[ih].depth < bb_data[cur2].depth)
1127 {
1128 bb_data[cur1].scc = cur2;
1129 cur1 = cur2;
1130 cur2 = ih;
1131 }
1132 else
1133 cur1 = ih;
1134 }
1135 bb_data[cur1].scc = cur2;
1136}
1137
1138/* Comparator for a sort of two edges destinations E1 and E2 after their index
1139 in the PRE array as specified by BB_TO_PRE. */
1140
1141static int
1142cmp_edge_dest_pre (const void *e1_, const void *e2_, void *data_)
1143{
1144 const int *e1 = (const int *)e1_;
1145 const int *e2 = (const int *)e2_;
1146 rpoamdbs_bb_data *bb_data = (rpoamdbs_bb_data *)data_;
1147 return (bb_data[*e1].bb_to_pre - bb_data[*e2].bb_to_pre);
1148}
1149
1150/* Compute the reverse completion number of a depth first search
1151 on the SEME region denoted by the ENTRY edge and the EXIT_BBS set of
1152 exit block indexes and store it in the array REV_POST_ORDER.
1153 Also sets the EDGE_DFS_BACK edge flags according to this visitation
1154 order.
1155 Returns the number of nodes visited.
1156
1157 In case the function has unreachable blocks the number of nodes
1158 visited does not include them.
1159
1160 If FOR_ITERATION is true then compute an RPO where SCCs form a
1161 contiguous region in the RPO array.
1162 *TOPLEVEL_SCC_EXTENTS if not NULL is filled with pairs of
1163 *REV_POST_ORDER indexes denoting extents of the toplevel SCCs in
1164 this region. */
1165
1166int
1167rev_post_order_and_mark_dfs_back_seme (struct function *fn, edge entry,
1168 bitmap exit_bbs, bool for_iteration,
1169 int *rev_post_order,
1170 vec<std::pair<int, int> >
1171 *toplevel_scc_extents)
1172{
1173 int rev_post_order_num = 0;
1174
1175 /* BB flag to track nodes that have been visited. */
1176 auto_bb_flag visited (fn);
1177
1178 /* Lazily initialized per-BB data for the two DFS walks below. */
1179 rpoamdbs_bb_data *bb_data
1180 = XNEWVEC (rpoamdbs_bb_data, last_basic_block_for_fn (fn));
1181
1182 /* First DFS walk, loop discovery according to
1183 A New Algorithm for Identifying Loops in Decompilation
1184 by Tao Wei, Jian Mao, Wei Zou and You Chen of the Institute of
1185 Computer Science and Technology of the Peking University. */
1186 auto_vec<edge_iterator, 20> ei_stack (n_basic_blocks_for_fn (fn) + 1);
1187 auto_bb_flag is_header (fn);
1188 int depth = 1;
1189 unsigned n_sccs = 0;
1190
1191 basic_block dest = entry->dest;
1192 edge_iterator ei;
1193 int pre_num = 0;
1194
1195 /* DFS process DEST. */
1196find_loops:
1197 bb_data[dest->index].bb_to_pre = pre_num++;
1198 bb_data[dest->index].depth = depth;
1199 bb_data[dest->index].scc = -1;
1200 depth++;
1201 gcc_assert ((dest->flags & (is_header|visited)) == 0);
1202 dest->flags |= visited;
1203 ei = ei_start (dest->succs);
1204 while (!ei_end_p (i: ei))
1205 {
1206 ei_edge (i: ei)->flags &= ~EDGE_DFS_BACK;
1207 if (bitmap_bit_p (exit_bbs, ei_edge (i: ei)->dest->index))
1208 ;
1209 else if (!(ei_edge (i: ei)->dest->flags & visited))
1210 {
1211 ei_stack.quick_push (obj: ei);
1212 dest = ei_edge (i: ei)->dest;
1213 /* DFS recurse on DEST. */
1214 goto find_loops;
1215
1216ret_from_find_loops:
1217 /* Return point of DFS recursion. */
1218 ei = ei_stack.pop ();
1219 dest = ei_edge (i: ei)->src;
1220 int header = bb_data[ei_edge (i: ei)->dest->index].scc;
1221 tag_header (b: dest->index, h: header, bb_data);
1222 depth = bb_data[dest->index].depth + 1;
1223 }
1224 else
1225 {
1226 if (bb_data[ei_edge (i: ei)->dest->index].depth > 0) /* on the stack */
1227 {
1228 ei_edge (i: ei)->flags |= EDGE_DFS_BACK;
1229 n_sccs++;
1230 ei_edge (i: ei)->dest->flags |= is_header;
1231 ::new (&bb_data[ei_edge (i: ei)->dest->index].scc_exits)
1232 auto_vec<int, 2> ();
1233 tag_header (b: dest->index, h: ei_edge (i: ei)->dest->index, bb_data);
1234 }
1235 else if (bb_data[ei_edge (i: ei)->dest->index].scc == -1)
1236 ;
1237 else
1238 {
1239 int header = bb_data[ei_edge (i: ei)->dest->index].scc;
1240 if (bb_data[header].depth > 0)
1241 tag_header (b: dest->index, h: header, bb_data);
1242 else
1243 {
1244 /* A re-entry into an existing loop. */
1245 /* ??? Need to mark is_header? */
1246 while (bb_data[header].scc != -1)
1247 {
1248 header = bb_data[header].scc;
1249 if (bb_data[header].depth > 0)
1250 {
1251 tag_header (b: dest->index, h: header, bb_data);
1252 break;
1253 }
1254 }
1255 }
1256 }
1257 }
1258 ei_next (i: &ei);
1259 }
1260 rev_post_order[rev_post_order_num++] = dest->index;
1261 /* not on the stack anymore */
1262 bb_data[dest->index].depth = -bb_data[dest->index].depth;
1263 if (!ei_stack.is_empty ())
1264 /* Return from DFS recursion. */
1265 goto ret_from_find_loops;
1266
1267 /* Optimize for no SCCs found or !for_iteration. */
1268 if (n_sccs == 0 || !for_iteration)
1269 {
1270 /* Clear the temporarily allocated flags. */
1271 for (int i = 0; i < rev_post_order_num; ++i)
1272 BASIC_BLOCK_FOR_FN (fn, rev_post_order[i])->flags
1273 &= ~(is_header|visited);
1274 /* And swap elements. */
1275 for (int i = 0; i < rev_post_order_num/2; ++i)
1276 std::swap (a&: rev_post_order[i], b&: rev_post_order[rev_post_order_num-i-1]);
1277 XDELETEVEC (bb_data);
1278
1279 return rev_post_order_num;
1280 }
1281
1282 /* Next find SCC exits, clear the visited flag and compute an upper bound
1283 for the edge stack below. */
1284 unsigned edge_count = 0;
1285 for (int i = 0; i < rev_post_order_num; ++i)
1286 {
1287 int bb = rev_post_order[i];
1288 BASIC_BLOCK_FOR_FN (fn, bb)->flags &= ~visited;
1289 edge e;
1290 FOR_EACH_EDGE (e, ei, BASIC_BLOCK_FOR_FN (fn, bb)->succs)
1291 {
1292 if (bitmap_bit_p (exit_bbs, e->dest->index))
1293 continue;
1294 edge_count++;
1295 /* if e is an exit from e->src, record it for
1296 bb_data[e->src].scc. */
1297 int src_scc = e->src->index;
1298 if (!(e->src->flags & is_header))
1299 src_scc = bb_data[src_scc].scc;
1300 if (src_scc == -1)
1301 continue;
1302 int dest_scc = e->dest->index;
1303 if (!(e->dest->flags & is_header))
1304 dest_scc = bb_data[dest_scc].scc;
1305 if (src_scc == dest_scc)
1306 continue;
1307 /* When dest_scc is nested insde src_scc it's not an
1308 exit. */
1309 int tem_dest_scc = dest_scc;
1310 unsigned dest_scc_depth = 0;
1311 while (tem_dest_scc != -1)
1312 {
1313 dest_scc_depth++;
1314 if ((tem_dest_scc = bb_data[tem_dest_scc].scc) == src_scc)
1315 break;
1316 }
1317 if (tem_dest_scc != -1)
1318 continue;
1319 /* When src_scc is nested inside dest_scc record an
1320 exit from the outermost SCC this edge exits. */
1321 int tem_src_scc = src_scc;
1322 unsigned src_scc_depth = 0;
1323 while (tem_src_scc != -1)
1324 {
1325 if (bb_data[tem_src_scc].scc == dest_scc)
1326 {
1327 edge_count++;
1328 bb_data[tem_src_scc].scc_exits.safe_push (obj: e->dest->index);
1329 break;
1330 }
1331 tem_src_scc = bb_data[tem_src_scc].scc;
1332 src_scc_depth++;
1333 }
1334 /* Else find the outermost SCC this edge exits (exits
1335 from the inner SCCs are not important for the DFS
1336 walk adjustment). Do so by computing the common
1337 ancestor SCC where the immediate child it to the source
1338 SCC is the exited SCC. */
1339 if (tem_src_scc == -1)
1340 {
1341 edge_count++;
1342 while (src_scc_depth > dest_scc_depth)
1343 {
1344 src_scc = bb_data[src_scc].scc;
1345 src_scc_depth--;
1346 }
1347 while (dest_scc_depth > src_scc_depth)
1348 {
1349 dest_scc = bb_data[dest_scc].scc;
1350 dest_scc_depth--;
1351 }
1352 while (bb_data[src_scc].scc != bb_data[dest_scc].scc)
1353 {
1354 src_scc = bb_data[src_scc].scc;
1355 dest_scc = bb_data[dest_scc].scc;
1356 }
1357 bb_data[src_scc].scc_exits.safe_push (obj: e->dest->index);
1358 }
1359 }
1360 }
1361
1362 /* Now the second DFS walk to compute a RPO where the extent of SCCs
1363 is minimized thus SCC members are adjacent in the RPO array.
1364 This is done by performing a DFS walk computing RPO with first visiting
1365 extra direct edges from SCC entry to its exits.
1366 That simulates a DFS walk over the graph with SCCs collapsed and
1367 walking the SCCs themselves only when all outgoing edges from the
1368 SCCs have been visited.
1369 SCC_END[scc-header-index] is the position in the RPO array of the
1370 last member of the SCC. */
1371 auto_vec<std::pair<basic_block, basic_block>, 20> estack (edge_count + 1);
1372 int idx = rev_post_order_num;
1373 basic_block edest;
1374 dest = entry->dest;
1375
1376 /* DFS process DEST. */
1377dfs_rpo:
1378 gcc_checking_assert ((dest->flags & visited) == 0);
1379 /* Verify we enter SCCs through the same header and SCC nesting appears
1380 the same. */
1381 gcc_assert (bb_data[dest->index].scc == -1
1382 || (BASIC_BLOCK_FOR_FN (fn, bb_data[dest->index].scc)->flags
1383 & visited));
1384 dest->flags |= visited;
1385 bb_data[dest->index].scc_end = -1;
1386 if ((dest->flags & is_header)
1387 && !bb_data[dest->index].scc_exits.is_empty ())
1388 {
1389 /* Push the all SCC exits as outgoing edges from its header to
1390 be visited first.
1391 To process exits in the same relative order as in the first
1392 DFS walk sort them after their destination PRE order index. */
1393 gcc_sort_r (&bb_data[dest->index].scc_exits[0],
1394 bb_data[dest->index].scc_exits.length (),
1395 sizeof (int), cmp_edge_dest_pre, bb_data);
1396 /* Process edges in reverse to match previous DFS walk order. */
1397 for (int i = bb_data[dest->index].scc_exits.length () - 1; i >= 0; --i)
1398 estack.quick_push (obj: std::make_pair
1399 (x&: dest, BASIC_BLOCK_FOR_FN (fn, bb_data[dest->index].scc_exits[i])));
1400 }
1401 else
1402 {
1403 if (dest->flags & is_header)
1404 bb_data[dest->index].scc_end = idx - 1;
1405 /* Push the edge vector in reverse to match the iteration order
1406 from the DFS walk above. */
1407 for (int i = EDGE_COUNT (dest->succs) - 1; i >= 0; --i)
1408 if (!bitmap_bit_p (exit_bbs, EDGE_SUCC (dest, i)->dest->index))
1409 estack.quick_push (obj: std::make_pair (x&: dest,
1410 EDGE_SUCC (dest, i)->dest));
1411 }
1412 while (!estack.is_empty ()
1413 && estack.last ().first == dest)
1414 {
1415 edest = estack.last ().second;
1416 if (!(edest->flags & visited))
1417 {
1418 dest = edest;
1419 /* DFS recurse on DEST. */
1420 goto dfs_rpo;
1421
1422ret_from_dfs_rpo:
1423 /* Return point of DFS recursion. */
1424 dest = estack.last ().first;
1425 }
1426 estack.pop ();
1427 /* If we processed all SCC exits from DEST mark the SCC end
1428 since all RPO entries up to DEST itself will now belong
1429 to its SCC. The special-case of no SCC exits is already
1430 dealt with above. */
1431 if (dest->flags & is_header
1432 /* When the last exit edge was processed mark the SCC end
1433 and push the regular edges. */
1434 && bb_data[dest->index].scc_end == -1
1435 && (estack.is_empty ()
1436 || estack.last ().first != dest))
1437 {
1438 bb_data[dest->index].scc_end = idx - 1;
1439 /* Push the edge vector in reverse to match the iteration order
1440 from the DFS walk above. */
1441 for (int i = EDGE_COUNT (dest->succs) - 1; i >= 0; --i)
1442 if (!bitmap_bit_p (exit_bbs, EDGE_SUCC (dest, i)->dest->index))
1443 estack.quick_push (obj: std::make_pair (x&: dest,
1444 EDGE_SUCC (dest, i)->dest));
1445 }
1446 }
1447 rev_post_order[--idx] = dest->index;
1448 if (!estack.is_empty ())
1449 /* Return from DFS recursion. */
1450 goto ret_from_dfs_rpo;
1451
1452 /* Each SCC extends are from the position of the header inside
1453 the RPO array up to RPO array index scc_end[header-index]. */
1454 if (toplevel_scc_extents)
1455 for (int i = 0; i < rev_post_order_num; i++)
1456 {
1457 basic_block bb = BASIC_BLOCK_FOR_FN (fn, rev_post_order[i]);
1458 if (bb->flags & is_header)
1459 {
1460 toplevel_scc_extents->safe_push
1461 (obj: std::make_pair (x&: i, y&: bb_data[bb->index].scc_end));
1462 i = bb_data[bb->index].scc_end;
1463 }
1464 }
1465
1466 /* Clear the temporarily allocated flags and free memory. */
1467 for (int i = 0; i < rev_post_order_num; ++i)
1468 {
1469 basic_block bb = BASIC_BLOCK_FOR_FN (fn, rev_post_order[i]);
1470 if (bb->flags & is_header)
1471 bb_data[bb->index].scc_exits.~scc_exit_vec_t ();
1472 bb->flags &= ~(visited|is_header);
1473 }
1474
1475 XDELETEVEC (bb_data);
1476
1477 return rev_post_order_num;
1478}
1479
1480
1481
1482/* Compute the depth first search order on the _reverse_ graph and
1483 store it in the array DFS_ORDER, marking the nodes visited in VISITED.
1484 Returns the number of nodes visited.
1485
1486 The computation is split into three pieces:
1487
1488 flow_dfs_compute_reverse_init () creates the necessary data
1489 structures.
1490
1491 flow_dfs_compute_reverse_add_bb () adds a basic block to the data
1492 structures. The block will start the search.
1493
1494 flow_dfs_compute_reverse_execute () continues (or starts) the
1495 search using the block on the top of the stack, stopping when the
1496 stack is empty.
1497
1498 flow_dfs_compute_reverse_finish () destroys the necessary data
1499 structures.
1500
1501 Thus, the user will probably call ..._init(), call ..._add_bb() to
1502 add a beginning basic block to the stack, call ..._execute(),
1503 possibly add another bb to the stack and again call ..._execute(),
1504 ..., and finally call _finish(). */
1505
1506/* Initialize the data structures used for depth-first search on the
1507 reverse graph. If INITIALIZE_STACK is nonzero, the exit block is
1508 added to the basic block stack. DATA is the current depth-first
1509 search context. If INITIALIZE_STACK is nonzero, there is an
1510 element on the stack. */
1511
1512depth_first_search::depth_first_search () :
1513 m_stack (n_basic_blocks_for_fn (cfun)),
1514 m_visited_blocks (last_basic_block_for_fn (cfun))
1515{
1516 bitmap_clear (m_visited_blocks);
1517}
1518
1519/* Add the specified basic block to the top of the dfs data
1520 structures. When the search continues, it will start at the
1521 block. */
1522
1523void
1524depth_first_search::add_bb (basic_block bb)
1525{
1526 m_stack.quick_push (obj: bb);
1527 bitmap_set_bit (map: m_visited_blocks, bitno: bb->index);
1528}
1529
1530/* Continue the depth-first search through the reverse graph starting with the
1531 block at the stack's top and ending when the stack is empty. Visited nodes
1532 are marked. Returns an unvisited basic block, or NULL if there is none
1533 available. */
1534
1535basic_block
1536depth_first_search::execute (basic_block last_unvisited)
1537{
1538 basic_block bb;
1539 edge e;
1540 edge_iterator ei;
1541
1542 while (!m_stack.is_empty ())
1543 {
1544 bb = m_stack.pop ();
1545
1546 /* Perform depth-first search on adjacent vertices. */
1547 FOR_EACH_EDGE (e, ei, bb->preds)
1548 if (!bitmap_bit_p (map: m_visited_blocks, bitno: e->src->index))
1549 add_bb (bb: e->src);
1550 }
1551
1552 /* Determine if there are unvisited basic blocks. */
1553 FOR_BB_BETWEEN (bb, last_unvisited, NULL, prev_bb)
1554 if (!bitmap_bit_p (map: m_visited_blocks, bitno: bb->index))
1555 return bb;
1556
1557 return NULL;
1558}
1559
1560/* Performs dfs search from BB over vertices satisfying PREDICATE;
1561 if REVERSE, go against direction of edges. Returns number of blocks
1562 found and their list in RSLT. RSLT can contain at most RSLT_MAX items. */
1563int
1564dfs_enumerate_from (basic_block bb, int reverse,
1565 bool (*predicate) (const_basic_block, const void *),
1566 basic_block *rslt, int rslt_max, const void *data)
1567{
1568 basic_block *st, lbb;
1569 int sp = 0, tv = 0;
1570
1571 auto_bb_flag visited (cfun);
1572
1573#define MARK_VISITED(BB) ((BB)->flags |= visited)
1574#define UNMARK_VISITED(BB) ((BB)->flags &= ~visited)
1575#define VISITED_P(BB) (((BB)->flags & visited) != 0)
1576
1577 st = XNEWVEC (basic_block, rslt_max);
1578 rslt[tv++] = st[sp++] = bb;
1579 MARK_VISITED (bb);
1580 while (sp)
1581 {
1582 edge e;
1583 edge_iterator ei;
1584 lbb = st[--sp];
1585 if (reverse)
1586 {
1587 FOR_EACH_EDGE (e, ei, lbb->preds)
1588 if (!VISITED_P (e->src) && predicate (e->src, data))
1589 {
1590 gcc_assert (tv != rslt_max);
1591 rslt[tv++] = st[sp++] = e->src;
1592 MARK_VISITED (e->src);
1593 }
1594 }
1595 else
1596 {
1597 FOR_EACH_EDGE (e, ei, lbb->succs)
1598 if (!VISITED_P (e->dest) && predicate (e->dest, data))
1599 {
1600 gcc_assert (tv != rslt_max);
1601 rslt[tv++] = st[sp++] = e->dest;
1602 MARK_VISITED (e->dest);
1603 }
1604 }
1605 }
1606 free (ptr: st);
1607 for (sp = 0; sp < tv; sp++)
1608 UNMARK_VISITED (rslt[sp]);
1609 return tv;
1610#undef MARK_VISITED
1611#undef UNMARK_VISITED
1612#undef VISITED_P
1613}
1614
1615
1616/* Compute dominance frontiers, ala Harvey, Ferrante, et al.
1617
1618 This algorithm can be found in Timothy Harvey's PhD thesis, at
1619 http://www.cs.rice.edu/~harv/dissertation.pdf in the section on iterative
1620 dominance algorithms.
1621
1622 First, we identify each join point, j (any node with more than one
1623 incoming edge is a join point).
1624
1625 We then examine each predecessor, p, of j and walk up the dominator tree
1626 starting at p.
1627
1628 We stop the walk when we reach j's immediate dominator - j is in the
1629 dominance frontier of each of the nodes in the walk, except for j's
1630 immediate dominator. Intuitively, all of the rest of j's dominators are
1631 shared by j's predecessors as well.
1632 Since they dominate j, they will not have j in their dominance frontiers.
1633
1634 The number of nodes touched by this algorithm is equal to the size
1635 of the dominance frontiers, no more, no less.
1636*/
1637
1638void
1639compute_dominance_frontiers (bitmap_head *frontiers)
1640{
1641 timevar_push (tv: TV_DOM_FRONTIERS);
1642
1643 edge p;
1644 edge_iterator ei;
1645 basic_block b;
1646 FOR_EACH_BB_FN (b, cfun)
1647 {
1648 if (EDGE_COUNT (b->preds) >= 2)
1649 {
1650 basic_block domsb = get_immediate_dominator (CDI_DOMINATORS, b);
1651 FOR_EACH_EDGE (p, ei, b->preds)
1652 {
1653 basic_block runner = p->src;
1654 if (runner == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1655 continue;
1656
1657 while (runner != domsb)
1658 {
1659 if (!bitmap_set_bit (&frontiers[runner->index], b->index))
1660 break;
1661 runner = get_immediate_dominator (CDI_DOMINATORS, runner);
1662 }
1663 }
1664 }
1665 }
1666
1667 timevar_pop (tv: TV_DOM_FRONTIERS);
1668}
1669
1670/* Given a set of blocks with variable definitions (DEF_BLOCKS),
1671 return a bitmap with all the blocks in the iterated dominance
1672 frontier of the blocks in DEF_BLOCKS. DFS contains dominance
1673 frontier information as returned by compute_dominance_frontiers.
1674
1675 The resulting set of blocks are the potential sites where PHI nodes
1676 are needed. The caller is responsible for freeing the memory
1677 allocated for the return value. */
1678
1679bitmap
1680compute_idf (bitmap def_blocks, bitmap_head *dfs)
1681{
1682 bitmap_iterator bi;
1683 unsigned bb_index, i;
1684 bitmap phi_insertion_points;
1685
1686 phi_insertion_points = BITMAP_ALLOC (NULL);
1687
1688 /* Seed the work set with all the blocks in DEF_BLOCKS. */
1689 auto_bitmap work_set;
1690 bitmap_copy (work_set, def_blocks);
1691 bitmap_tree_view (work_set);
1692
1693 /* Pop a block off the workset, add every block that appears in
1694 the original block's DF that we have not already processed to
1695 the workset. Iterate until the workset is empty. Blocks
1696 which are added to the workset are potential sites for
1697 PHI nodes. */
1698 while (!bitmap_empty_p (map: work_set))
1699 {
1700 /* The dominance frontier of a block is blocks after it so iterating
1701 on earlier blocks first is better.
1702 ??? Basic blocks are by no means guaranteed to be ordered in
1703 optimal order for this iteration. */
1704 bb_index = bitmap_clear_first_set_bit (work_set);
1705
1706 /* Since the registration of NEW -> OLD name mappings is done
1707 separately from the call to update_ssa, when updating the SSA
1708 form, the basic blocks where new and/or old names are defined
1709 may have disappeared by CFG cleanup calls. In this case,
1710 we may pull a non-existing block from the work stack. */
1711 gcc_checking_assert (bb_index
1712 < (unsigned) last_basic_block_for_fn (cfun));
1713
1714 /* The population counts of the dominance frontiers is low
1715 compared to that of phi_insertion_points which approaches
1716 the IDF and of work_set which is at most that of the IDF
1717 as well. That makes iterating over the DFS bitmap preferential
1718 to whole bitmap operations involving also phi_insertion_points. */
1719 EXECUTE_IF_SET_IN_BITMAP (&dfs[bb_index], 0, i, bi)
1720 if (bitmap_set_bit (phi_insertion_points, i))
1721 bitmap_set_bit (work_set, i);
1722 }
1723
1724 return phi_insertion_points;
1725}
1726
1727/* Intersection and union of preds/succs for sbitmap based data flow
1728 solvers. All four functions defined below take the same arguments:
1729 B is the basic block to perform the operation for. DST is the
1730 target sbitmap, i.e. the result. SRC is an sbitmap vector of size
1731 last_basic_block so that it can be indexed with basic block indices.
1732 DST may be (but does not have to be) SRC[B->index]. */
1733
1734/* Set the bitmap DST to the intersection of SRC of successors of
1735 basic block B. */
1736
1737void
1738bitmap_intersection_of_succs (sbitmap dst, sbitmap *src, basic_block b)
1739{
1740 unsigned int set_size = dst->size;
1741 edge e;
1742 unsigned ix;
1743
1744 for (e = NULL, ix = 0; ix < EDGE_COUNT (b->succs); ix++)
1745 {
1746 e = EDGE_SUCC (b, ix);
1747 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
1748 continue;
1749
1750 bitmap_copy (dst, src[e->dest->index]);
1751 break;
1752 }
1753
1754 if (e == 0)
1755 bitmap_ones (dst);
1756 else
1757 for (++ix; ix < EDGE_COUNT (b->succs); ix++)
1758 {
1759 unsigned int i;
1760 SBITMAP_ELT_TYPE *p, *r;
1761
1762 e = EDGE_SUCC (b, ix);
1763 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
1764 continue;
1765
1766 p = src[e->dest->index]->elms;
1767 r = dst->elms;
1768 for (i = 0; i < set_size; i++)
1769 *r++ &= *p++;
1770 }
1771}
1772
1773/* Set the bitmap DST to the intersection of SRC of predecessors of
1774 basic block B. */
1775
1776void
1777bitmap_intersection_of_preds (sbitmap dst, sbitmap *src, basic_block b)
1778{
1779 unsigned int set_size = dst->size;
1780 edge e;
1781 unsigned ix;
1782
1783 for (e = NULL, ix = 0; ix < EDGE_COUNT (b->preds); ix++)
1784 {
1785 e = EDGE_PRED (b, ix);
1786 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1787 continue;
1788
1789 bitmap_copy (dst, src[e->src->index]);
1790 break;
1791 }
1792
1793 if (e == 0)
1794 bitmap_ones (dst);
1795 else
1796 for (++ix; ix < EDGE_COUNT (b->preds); ix++)
1797 {
1798 unsigned int i;
1799 SBITMAP_ELT_TYPE *p, *r;
1800
1801 e = EDGE_PRED (b, ix);
1802 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1803 continue;
1804
1805 p = src[e->src->index]->elms;
1806 r = dst->elms;
1807 for (i = 0; i < set_size; i++)
1808 *r++ &= *p++;
1809 }
1810}
1811
1812/* Set the bitmap DST to the union of SRC of successors of
1813 basic block B. */
1814
1815void
1816bitmap_union_of_succs (sbitmap dst, sbitmap *src, basic_block b)
1817{
1818 unsigned int set_size = dst->size;
1819 edge e;
1820 unsigned ix;
1821
1822 for (ix = 0; ix < EDGE_COUNT (b->succs); ix++)
1823 {
1824 e = EDGE_SUCC (b, ix);
1825 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
1826 continue;
1827
1828 bitmap_copy (dst, src[e->dest->index]);
1829 break;
1830 }
1831
1832 if (ix == EDGE_COUNT (b->succs))
1833 bitmap_clear (dst);
1834 else
1835 for (ix++; ix < EDGE_COUNT (b->succs); ix++)
1836 {
1837 unsigned int i;
1838 SBITMAP_ELT_TYPE *p, *r;
1839
1840 e = EDGE_SUCC (b, ix);
1841 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
1842 continue;
1843
1844 p = src[e->dest->index]->elms;
1845 r = dst->elms;
1846 for (i = 0; i < set_size; i++)
1847 *r++ |= *p++;
1848 }
1849}
1850
1851/* Set the bitmap DST to the union of SRC of predecessors of
1852 basic block B. */
1853
1854void
1855bitmap_union_of_preds (sbitmap dst, sbitmap *src, basic_block b)
1856{
1857 unsigned int set_size = dst->size;
1858 edge e;
1859 unsigned ix;
1860
1861 for (ix = 0; ix < EDGE_COUNT (b->preds); ix++)
1862 {
1863 e = EDGE_PRED (b, ix);
1864 if (e->src== ENTRY_BLOCK_PTR_FOR_FN (cfun))
1865 continue;
1866
1867 bitmap_copy (dst, src[e->src->index]);
1868 break;
1869 }
1870
1871 if (ix == EDGE_COUNT (b->preds))
1872 bitmap_clear (dst);
1873 else
1874 for (ix++; ix < EDGE_COUNT (b->preds); ix++)
1875 {
1876 unsigned int i;
1877 SBITMAP_ELT_TYPE *p, *r;
1878
1879 e = EDGE_PRED (b, ix);
1880 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1881 continue;
1882
1883 p = src[e->src->index]->elms;
1884 r = dst->elms;
1885 for (i = 0; i < set_size; i++)
1886 *r++ |= *p++;
1887 }
1888}
1889
1890/* Returns the list of basic blocks in the function in an order that guarantees
1891 that if a block X has just a single predecessor Y, then Y is after X in the
1892 ordering. */
1893
1894basic_block *
1895single_pred_before_succ_order (void)
1896{
1897 basic_block x, y;
1898 basic_block *order = XNEWVEC (basic_block, n_basic_blocks_for_fn (cfun));
1899 unsigned n = n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS;
1900 unsigned np, i;
1901 auto_sbitmap visited (last_basic_block_for_fn (cfun));
1902
1903#define MARK_VISITED(BB) (bitmap_set_bit (visited, (BB)->index))
1904#define VISITED_P(BB) (bitmap_bit_p (visited, (BB)->index))
1905
1906 bitmap_clear (visited);
1907
1908 MARK_VISITED (ENTRY_BLOCK_PTR_FOR_FN (cfun));
1909 FOR_EACH_BB_FN (x, cfun)
1910 {
1911 if (VISITED_P (x))
1912 continue;
1913
1914 /* Walk the predecessors of x as long as they have precisely one
1915 predecessor and add them to the list, so that they get stored
1916 after x. */
1917 for (y = x, np = 1;
1918 single_pred_p (bb: y) && !VISITED_P (single_pred (y));
1919 y = single_pred (bb: y))
1920 np++;
1921 for (y = x, i = n - np;
1922 single_pred_p (bb: y) && !VISITED_P (single_pred (y));
1923 y = single_pred (bb: y), i++)
1924 {
1925 order[i] = y;
1926 MARK_VISITED (y);
1927 }
1928 order[i] = y;
1929 MARK_VISITED (y);
1930
1931 gcc_assert (i == n - 1);
1932 n -= np;
1933 }
1934
1935 gcc_assert (n == 0);
1936 return order;
1937
1938#undef MARK_VISITED
1939#undef VISITED_P
1940}
1941
1942/* Ignoring loop backedges, if BB has precisely one incoming edge then
1943 return that edge. Otherwise return NULL.
1944
1945 When IGNORE_NOT_EXECUTABLE is true, also ignore edges that are not marked
1946 as executable. */
1947
1948edge
1949single_pred_edge_ignoring_loop_edges (basic_block bb,
1950 bool ignore_not_executable)
1951{
1952 edge retval = NULL;
1953 edge e;
1954 edge_iterator ei;
1955
1956 FOR_EACH_EDGE (e, ei, bb->preds)
1957 {
1958 /* A loop back edge can be identified by the destination of
1959 the edge dominating the source of the edge. */
1960 if (dominated_by_p (CDI_DOMINATORS, e->src, e->dest))
1961 continue;
1962
1963 /* We can safely ignore edges that are not executable. */
1964 if (ignore_not_executable
1965 && (e->flags & EDGE_EXECUTABLE) == 0)
1966 continue;
1967
1968 /* If we have already seen a non-loop edge, then we must have
1969 multiple incoming non-loop edges and thus we return NULL. */
1970 if (retval)
1971 return NULL;
1972
1973 /* This is the first non-loop incoming edge we have found. Record
1974 it. */
1975 retval = e;
1976 }
1977
1978 return retval;
1979}
1980

source code of gcc/cfganal.cc