1/* Control flow graph manipulation code for GNU compiler.
2 Copyright (C) 1987-2024 Free Software Foundation, Inc.
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
8Software Foundation; either version 3, or (at your option) any later
9version.
10
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14for more details.
15
16You should have received a copy of the GNU General Public License
17along with GCC; see the file COPYING3. If not see
18<http://www.gnu.org/licenses/>. */
19
20/* This file contains low level functions to manipulate the CFG and analyze it
21 that are aware of the RTL intermediate language.
22
23 Available functionality:
24 - Basic CFG/RTL manipulation API documented in cfghooks.h
25 - CFG-aware instruction chain manipulation
26 delete_insn, delete_insn_chain
27 - Edge splitting and committing to edges
28 insert_insn_on_edge, prepend_insn_to_edge, commit_edge_insertions
29 - CFG updating after insn simplification
30 purge_dead_edges, purge_all_dead_edges
31 - CFG fixing after coarse manipulation
32 fixup_abnormal_edges
33
34 Functions not supposed for generic use:
35 - Infrastructure to determine quickly basic block for insn
36 compute_bb_for_insn, update_bb_for_insn, set_block_for_insn,
37 - Edge redirection with updating and optimizing of insn chain
38 block_label, tidy_fallthru_edge, force_nonfallthru */
39
40#include "config.h"
41#include "system.h"
42#include "coretypes.h"
43#include "backend.h"
44#include "target.h"
45#include "rtl.h"
46#include "tree.h"
47#include "cfghooks.h"
48#include "df.h"
49#include "insn-config.h"
50#include "memmodel.h"
51#include "emit-rtl.h"
52#include "cfgrtl.h"
53#include "cfganal.h"
54#include "cfgbuild.h"
55#include "cfgcleanup.h"
56#include "bb-reorder.h"
57#include "rtl-error.h"
58#include "insn-attr.h"
59#include "dojump.h"
60#include "expr.h"
61#include "cfgloop.h"
62#include "tree-pass.h"
63#include "print-rtl.h"
64#include "rtl-iter.h"
65#include "gimplify.h"
66#include "profile.h"
67#include "sreal.h"
68
69/* Disable warnings about missing quoting in GCC diagnostics. */
70#if __GNUC__ >= 10
71# pragma GCC diagnostic push
72# pragma GCC diagnostic ignored "-Wformat-diag"
73#endif
74
75/* Holds the interesting leading and trailing notes for the function.
76 Only applicable if the CFG is in cfglayout mode. */
77static GTY(()) rtx_insn *cfg_layout_function_footer;
78static GTY(()) rtx_insn *cfg_layout_function_header;
79
80static rtx_insn *skip_insns_after_block (basic_block);
81static void record_effective_endpoints (void);
82static void fixup_reorder_chain (void);
83
84void verify_insn_chain (void);
85static void fixup_fallthru_exit_predecessor (void);
86static bool can_delete_note_p (const rtx_note *);
87static bool can_delete_label_p (const rtx_code_label *);
88static basic_block rtl_split_edge (edge);
89static bool rtl_move_block_after (basic_block, basic_block);
90static bool rtl_verify_flow_info (void);
91static basic_block cfg_layout_split_block (basic_block, void *);
92static edge cfg_layout_redirect_edge_and_branch (edge, basic_block);
93static basic_block cfg_layout_redirect_edge_and_branch_force (edge, basic_block);
94static void cfg_layout_delete_block (basic_block);
95static void rtl_delete_block (basic_block);
96static basic_block rtl_redirect_edge_and_branch_force (edge, basic_block);
97static edge rtl_redirect_edge_and_branch (edge, basic_block);
98static basic_block rtl_split_block (basic_block, void *);
99static void rtl_dump_bb (FILE *, basic_block, int, dump_flags_t);
100static bool rtl_verify_flow_info_1 (void);
101static void rtl_make_forwarder_block (edge);
102static bool rtl_bb_info_initialized_p (basic_block bb);
103
104/* Return true if NOTE is not one of the ones that must be kept paired,
105 so that we may simply delete it. */
106
107static bool
108can_delete_note_p (const rtx_note *note)
109{
110 switch (NOTE_KIND (note))
111 {
112 case NOTE_INSN_DELETED:
113 case NOTE_INSN_BASIC_BLOCK:
114 case NOTE_INSN_EPILOGUE_BEG:
115 return true;
116
117 default:
118 return false;
119 }
120}
121
122/* True if a given label can be deleted. */
123
124static bool
125can_delete_label_p (const rtx_code_label *label)
126{
127 return (!LABEL_PRESERVE_P (label)
128 /* User declared labels must be preserved. */
129 && LABEL_NAME (label) == 0
130 && !vec_safe_contains<rtx_insn *> (forced_labels,
131 search: const_cast<rtx_code_label *> (label)));
132}
133
134/* Delete INSN by patching it out. */
135
136void
137delete_insn (rtx_insn *insn)
138{
139 rtx note;
140 bool really_delete = true;
141
142 if (LABEL_P (insn))
143 {
144 /* Some labels can't be directly removed from the INSN chain, as they
145 might be references via variables, constant pool etc.
146 Convert them to the special NOTE_INSN_DELETED_LABEL note. */
147 if (! can_delete_label_p (label: as_a <rtx_code_label *> (p: insn)))
148 {
149 const char *name = LABEL_NAME (insn);
150 basic_block bb = BLOCK_FOR_INSN (insn);
151 rtx_insn *bb_note = NEXT_INSN (insn);
152
153 really_delete = false;
154 PUT_CODE (insn, NOTE);
155 NOTE_KIND (insn) = NOTE_INSN_DELETED_LABEL;
156 NOTE_DELETED_LABEL_NAME (insn) = name;
157
158 /* If the note following the label starts a basic block, and the
159 label is a member of the same basic block, interchange the two. */
160 if (bb_note != NULL_RTX
161 && NOTE_INSN_BASIC_BLOCK_P (bb_note)
162 && bb != NULL
163 && bb == BLOCK_FOR_INSN (insn: bb_note))
164 {
165 reorder_insns_nobb (insn, insn, bb_note);
166 BB_HEAD (bb) = bb_note;
167 if (BB_END (bb) == bb_note)
168 BB_END (bb) = insn;
169 }
170 }
171
172 remove_node_from_insn_list (insn, &nonlocal_goto_handler_labels);
173 }
174
175 if (really_delete)
176 {
177 /* If this insn has already been deleted, something is very wrong. */
178 gcc_assert (!insn->deleted ());
179 if (INSN_P (insn))
180 df_insn_delete (insn);
181 remove_insn (insn);
182 insn->set_deleted ();
183 }
184
185 /* If deleting a jump, decrement the use count of the label. Deleting
186 the label itself should happen in the normal course of block merging. */
187 if (JUMP_P (insn))
188 {
189 if (JUMP_LABEL (insn)
190 && LABEL_P (JUMP_LABEL (insn)))
191 LABEL_NUSES (JUMP_LABEL (insn))--;
192
193 /* If there are more targets, remove them too. */
194 while ((note
195 = find_reg_note (insn, REG_LABEL_TARGET, NULL_RTX)) != NULL_RTX
196 && LABEL_P (XEXP (note, 0)))
197 {
198 LABEL_NUSES (XEXP (note, 0))--;
199 remove_note (insn, note);
200 }
201 }
202
203 /* Also if deleting any insn that references a label as an operand. */
204 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, NULL_RTX)) != NULL_RTX
205 && LABEL_P (XEXP (note, 0)))
206 {
207 LABEL_NUSES (XEXP (note, 0))--;
208 remove_note (insn, note);
209 }
210
211 if (rtx_jump_table_data *table = dyn_cast <rtx_jump_table_data *> (p: insn))
212 {
213 rtvec vec = table->get_labels ();
214 int len = GET_NUM_ELEM (vec);
215 int i;
216
217 for (i = 0; i < len; i++)
218 {
219 rtx label = XEXP (RTVEC_ELT (vec, i), 0);
220
221 /* When deleting code in bulk (e.g. removing many unreachable
222 blocks) we can delete a label that's a target of the vector
223 before deleting the vector itself. */
224 if (!NOTE_P (label))
225 LABEL_NUSES (label)--;
226 }
227 }
228}
229
230/* Like delete_insn but also purge dead edges from BB.
231 Return true if any edges are eliminated. */
232
233bool
234delete_insn_and_edges (rtx_insn *insn)
235{
236 bool purge = false;
237
238 if (NONDEBUG_INSN_P (insn) && BLOCK_FOR_INSN (insn))
239 {
240 basic_block bb = BLOCK_FOR_INSN (insn);
241 if (BB_END (bb) == insn)
242 purge = true;
243 else if (DEBUG_INSN_P (BB_END (bb)))
244 for (rtx_insn *dinsn = NEXT_INSN (insn);
245 DEBUG_INSN_P (dinsn); dinsn = NEXT_INSN (insn: dinsn))
246 if (BB_END (bb) == dinsn)
247 {
248 purge = true;
249 break;
250 }
251 }
252 delete_insn (insn);
253 if (purge)
254 return purge_dead_edges (BLOCK_FOR_INSN (insn));
255 return false;
256}
257
258/* Unlink a chain of insns between START and FINISH, leaving notes
259 that must be paired. If CLEAR_BB is true, we set bb field for
260 insns that cannot be removed to NULL. */
261
262void
263delete_insn_chain (rtx start, rtx_insn *finish, bool clear_bb)
264{
265 /* Unchain the insns one by one. It would be quicker to delete all of these
266 with a single unchaining, rather than one at a time, but we need to keep
267 the NOTE's. */
268 rtx_insn *current = finish;
269 while (1)
270 {
271 rtx_insn *prev = PREV_INSN (insn: current);
272 if (NOTE_P (current) && !can_delete_note_p (note: as_a <rtx_note *> (p: current)))
273 ;
274 else
275 delete_insn (insn: current);
276
277 if (clear_bb && !current->deleted ())
278 set_block_for_insn (insn: current, NULL);
279
280 if (current == start)
281 break;
282 current = prev;
283 }
284}
285
286/* Create a new basic block consisting of the instructions between HEAD and END
287 inclusive. This function is designed to allow fast BB construction - reuses
288 the note and basic block struct in BB_NOTE, if any and do not grow
289 BASIC_BLOCK chain and should be used directly only by CFG construction code.
290 END can be NULL in to create new empty basic block before HEAD. Both END
291 and HEAD can be NULL to create basic block at the end of INSN chain.
292 AFTER is the basic block we should be put after. */
293
294basic_block
295create_basic_block_structure (rtx_insn *head, rtx_insn *end, rtx_note *bb_note,
296 basic_block after)
297{
298 basic_block bb;
299
300 if (bb_note
301 && (bb = NOTE_BASIC_BLOCK (bb_note)) != NULL
302 && bb->aux == NULL)
303 {
304 /* If we found an existing note, thread it back onto the chain. */
305
306 rtx_insn *after;
307
308 if (LABEL_P (head))
309 after = head;
310 else
311 {
312 after = PREV_INSN (insn: head);
313 head = bb_note;
314 }
315
316 if (after != bb_note && NEXT_INSN (insn: after) != bb_note)
317 reorder_insns_nobb (bb_note, bb_note, after);
318 }
319 else
320 {
321 /* Otherwise we must create a note and a basic block structure. */
322
323 bb = alloc_block ();
324
325 init_rtl_bb_info (bb);
326 if (!head && !end)
327 head = end = bb_note
328 = emit_note_after (NOTE_INSN_BASIC_BLOCK, get_last_insn ());
329 else if (LABEL_P (head) && end)
330 {
331 bb_note = emit_note_after (NOTE_INSN_BASIC_BLOCK, head);
332 if (head == end)
333 end = bb_note;
334 }
335 else
336 {
337 bb_note = emit_note_before (NOTE_INSN_BASIC_BLOCK, head);
338 head = bb_note;
339 if (!end)
340 end = head;
341 }
342
343 NOTE_BASIC_BLOCK (bb_note) = bb;
344 }
345
346 /* Always include the bb note in the block. */
347 if (NEXT_INSN (insn: end) == bb_note)
348 end = bb_note;
349
350 BB_HEAD (bb) = head;
351 BB_END (bb) = end;
352 bb->index = last_basic_block_for_fn (cfun)++;
353 bb->flags = BB_NEW | BB_RTL;
354 link_block (bb, after);
355 SET_BASIC_BLOCK_FOR_FN (cfun, bb->index, bb);
356 df_bb_refs_record (bb->index, false);
357 update_bb_for_insn (bb);
358 BB_SET_PARTITION (bb, BB_UNPARTITIONED);
359
360 /* Tag the block so that we know it has been used when considering
361 other basic block notes. */
362 bb->aux = bb;
363
364 return bb;
365}
366
367/* Create new basic block consisting of instructions in between HEAD and END
368 and place it to the BB chain after block AFTER. END can be NULL to
369 create a new empty basic block before HEAD. Both END and HEAD can be
370 NULL to create basic block at the end of INSN chain. */
371
372static basic_block
373rtl_create_basic_block (void *headp, void *endp, basic_block after)
374{
375 rtx_insn *head = (rtx_insn *) headp;
376 rtx_insn *end = (rtx_insn *) endp;
377 basic_block bb;
378
379 /* Grow the basic block array if needed. */
380 if ((size_t) last_basic_block_for_fn (cfun)
381 >= basic_block_info_for_fn (cfun)->length ())
382 vec_safe_grow_cleared (basic_block_info_for_fn (cfun),
383 last_basic_block_for_fn (cfun) + 1);
384
385 n_basic_blocks_for_fn (cfun)++;
386
387 bb = create_basic_block_structure (head, end, NULL, after);
388 bb->aux = NULL;
389 return bb;
390}
391
392static basic_block
393cfg_layout_create_basic_block (void *head, void *end, basic_block after)
394{
395 basic_block newbb = rtl_create_basic_block (headp: head, endp: end, after);
396
397 return newbb;
398}
399
400/* Delete the insns in a (non-live) block. We physically delete every
401 non-deleted-note insn, and update the flow graph appropriately.
402
403 Return nonzero if we deleted an exception handler. */
404
405/* ??? Preserving all such notes strikes me as wrong. It would be nice
406 to post-process the stream to remove empty blocks, loops, ranges, etc. */
407
408static void
409rtl_delete_block (basic_block b)
410{
411 rtx_insn *insn, *end;
412
413 /* If the head of this block is a CODE_LABEL, then it might be the
414 label for an exception handler which can't be reached. We need
415 to remove the label from the exception_handler_label list. */
416 insn = BB_HEAD (b);
417
418 end = get_last_bb_insn (b);
419
420 /* Selectively delete the entire chain. */
421 BB_HEAD (b) = NULL;
422 delete_insn_chain (start: insn, finish: end, clear_bb: true);
423
424
425 if (dump_file)
426 fprintf (stream: dump_file, format: "deleting block %d\n", b->index);
427 df_bb_delete (b->index);
428}
429
430/* Records the basic block struct in BLOCK_FOR_INSN for every insn. */
431
432void
433compute_bb_for_insn (void)
434{
435 basic_block bb;
436
437 FOR_EACH_BB_FN (bb, cfun)
438 {
439 rtx_insn *end = BB_END (bb);
440 rtx_insn *insn;
441
442 for (insn = BB_HEAD (bb); ; insn = NEXT_INSN (insn))
443 {
444 BLOCK_FOR_INSN (insn) = bb;
445 if (insn == end)
446 break;
447 }
448 }
449}
450
451/* Release the basic_block_for_insn array. */
452
453void
454free_bb_for_insn (void)
455{
456 rtx_insn *insn;
457 for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
458 if (!BARRIER_P (insn))
459 BLOCK_FOR_INSN (insn) = NULL;
460}
461
462namespace {
463
464const pass_data pass_data_free_cfg =
465{
466 .type: RTL_PASS, /* type */
467 .name: "*free_cfg", /* name */
468 .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */
469 .tv_id: TV_NONE, /* tv_id */
470 .properties_required: 0, /* properties_required */
471 .properties_provided: 0, /* properties_provided */
472 PROP_cfg, /* properties_destroyed */
473 .todo_flags_start: 0, /* todo_flags_start */
474 .todo_flags_finish: 0, /* todo_flags_finish */
475};
476
477class pass_free_cfg : public rtl_opt_pass
478{
479public:
480 pass_free_cfg (gcc::context *ctxt)
481 : rtl_opt_pass (pass_data_free_cfg, ctxt)
482 {}
483
484 /* opt_pass methods: */
485 unsigned int execute (function *) final override;
486
487}; // class pass_free_cfg
488
489unsigned int
490pass_free_cfg::execute (function *)
491{
492 /* The resource.cc machinery uses DF but the CFG isn't guaranteed to be
493 valid at that point so it would be too late to call df_analyze. */
494 if (DELAY_SLOTS && optimize > 0 && flag_delayed_branch)
495 {
496 df_note_add_problem ();
497 df_analyze ();
498 }
499
500 if (crtl->has_bb_partition)
501 insert_section_boundary_note ();
502
503 free_bb_for_insn ();
504 return 0;
505}
506
507} // anon namespace
508
509rtl_opt_pass *
510make_pass_free_cfg (gcc::context *ctxt)
511{
512 return new pass_free_cfg (ctxt);
513}
514
515/* Return RTX to emit after when we want to emit code on the entry of function. */
516rtx_insn *
517entry_of_function (void)
518{
519 return (n_basic_blocks_for_fn (cfun) > NUM_FIXED_BLOCKS ?
520 BB_HEAD (ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb) : get_insns ());
521}
522
523/* Emit INSN at the entry point of the function, ensuring that it is only
524 executed once per function. */
525void
526emit_insn_at_entry (rtx insn)
527{
528 edge_iterator ei = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
529 edge e = ei_safe_edge (i: ei);
530 gcc_assert (e->flags & EDGE_FALLTHRU);
531
532 insert_insn_on_edge (insn, e);
533 commit_edge_insertions ();
534}
535
536/* Update BLOCK_FOR_INSN of insns between BEGIN and END
537 (or BARRIER if found) and notify df of the bb change.
538 The insn chain range is inclusive
539 (i.e. both BEGIN and END will be updated. */
540
541static void
542update_bb_for_insn_chain (rtx_insn *begin, rtx_insn *end, basic_block bb)
543{
544 rtx_insn *insn;
545
546 end = NEXT_INSN (insn: end);
547 for (insn = begin; insn != end; insn = NEXT_INSN (insn))
548 if (!BARRIER_P (insn))
549 df_insn_change_bb (insn, bb);
550}
551
552/* Update BLOCK_FOR_INSN of insns in BB to BB,
553 and notify df of the change. */
554
555void
556update_bb_for_insn (basic_block bb)
557{
558 update_bb_for_insn_chain (BB_HEAD (bb), BB_END (bb), bb);
559}
560
561
562/* Like active_insn_p, except keep the return value use or clobber around
563 even after reload. */
564
565static bool
566flow_active_insn_p (const rtx_insn *insn)
567{
568 if (active_insn_p (insn))
569 return true;
570
571 /* A clobber of the function return value exists for buggy
572 programs that fail to return a value. Its effect is to
573 keep the return value from being live across the entire
574 function. If we allow it to be skipped, we introduce the
575 possibility for register lifetime confusion.
576 Similarly, keep a USE of the function return value, otherwise
577 the USE is dropped and we could fail to thread jump if USE
578 appears on some paths and not on others, see PR90257. */
579 if ((GET_CODE (PATTERN (insn)) == CLOBBER
580 || GET_CODE (PATTERN (insn)) == USE)
581 && REG_P (XEXP (PATTERN (insn), 0))
582 && REG_FUNCTION_VALUE_P (XEXP (PATTERN (insn), 0)))
583 return true;
584
585 return false;
586}
587
588/* Return true if the block has no effect and only forwards control flow to
589 its single destination. */
590
591bool
592contains_no_active_insn_p (const_basic_block bb)
593{
594 rtx_insn *insn;
595
596 if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
597 || bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
598 || !single_succ_p (bb)
599 || (single_succ_edge (bb)->flags & EDGE_FAKE) != 0)
600 return false;
601
602 for (insn = BB_HEAD (bb); insn != BB_END (bb); insn = NEXT_INSN (insn))
603 if (INSN_P (insn) && flow_active_insn_p (insn))
604 return false;
605
606 return (!INSN_P (insn)
607 || (JUMP_P (insn) && simplejump_p (insn))
608 || !flow_active_insn_p (insn));
609}
610
611/* Likewise, but protect loop latches, headers and preheaders. */
612/* FIXME: Make this a cfg hook. */
613
614bool
615forwarder_block_p (const_basic_block bb)
616{
617 if (!contains_no_active_insn_p (bb))
618 return false;
619
620 /* Protect loop latches, headers and preheaders. */
621 if (current_loops)
622 {
623 basic_block dest;
624 if (bb->loop_father->header == bb)
625 return false;
626 dest = EDGE_SUCC (bb, 0)->dest;
627 if (dest->loop_father->header == dest)
628 return false;
629 }
630
631 return true;
632}
633
634/* Return nonzero if we can reach target from src by falling through. */
635/* FIXME: Make this a cfg hook, the result is only valid in cfgrtl mode. */
636
637bool
638can_fallthru (basic_block src, basic_block target)
639{
640 rtx_insn *insn = BB_END (src);
641 rtx_insn *insn2;
642 edge e;
643 edge_iterator ei;
644
645 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
646 return true;
647 if (src->next_bb != target)
648 return false;
649
650 /* ??? Later we may add code to move jump tables offline. */
651 if (tablejump_p (insn, NULL, NULL))
652 return false;
653
654 FOR_EACH_EDGE (e, ei, src->succs)
655 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
656 && e->flags & EDGE_FALLTHRU)
657 return false;
658
659 insn2 = BB_HEAD (target);
660 if (!active_insn_p (insn2))
661 insn2 = next_active_insn (insn2);
662
663 return next_active_insn (insn) == insn2;
664}
665
666/* Return nonzero if we could reach target from src by falling through,
667 if the target was made adjacent. If we already have a fall-through
668 edge to the exit block, we can't do that. */
669static bool
670could_fall_through (basic_block src, basic_block target)
671{
672 edge e;
673 edge_iterator ei;
674
675 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
676 return true;
677 FOR_EACH_EDGE (e, ei, src->succs)
678 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
679 && e->flags & EDGE_FALLTHRU)
680 return 0;
681 return true;
682}
683
684/* Return the NOTE_INSN_BASIC_BLOCK of BB. */
685rtx_note *
686bb_note (basic_block bb)
687{
688 rtx_insn *note;
689
690 note = BB_HEAD (bb);
691 if (LABEL_P (note))
692 note = NEXT_INSN (insn: note);
693
694 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (note));
695 return as_a <rtx_note *> (p: note);
696}
697
698/* Return the INSN immediately following the NOTE_INSN_BASIC_BLOCK
699 note associated with the BLOCK. */
700
701static rtx_insn *
702first_insn_after_basic_block_note (basic_block block)
703{
704 rtx_insn *insn;
705
706 /* Get the first instruction in the block. */
707 insn = BB_HEAD (block);
708
709 if (insn == NULL_RTX)
710 return NULL;
711 if (LABEL_P (insn))
712 insn = NEXT_INSN (insn);
713 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
714
715 return NEXT_INSN (insn);
716}
717
718/* Creates a new basic block just after basic block BB by splitting
719 everything after specified instruction INSNP. */
720
721static basic_block
722rtl_split_block (basic_block bb, void *insnp)
723{
724 basic_block new_bb;
725 rtx_insn *insn = (rtx_insn *) insnp;
726 edge e;
727 edge_iterator ei;
728
729 if (!insn)
730 {
731 insn = first_insn_after_basic_block_note (block: bb);
732
733 if (insn)
734 {
735 rtx_insn *next = insn;
736
737 insn = PREV_INSN (insn);
738
739 /* If the block contains only debug insns, insn would have
740 been NULL in a non-debug compilation, and then we'd end
741 up emitting a DELETED note. For -fcompare-debug
742 stability, emit the note too. */
743 if (insn != BB_END (bb)
744 && DEBUG_INSN_P (next)
745 && DEBUG_INSN_P (BB_END (bb)))
746 {
747 while (next != BB_END (bb) && DEBUG_INSN_P (next))
748 next = NEXT_INSN (insn: next);
749
750 if (next == BB_END (bb))
751 emit_note_after (NOTE_INSN_DELETED, next);
752 }
753 }
754 else
755 insn = get_last_insn ();
756 }
757
758 /* We probably should check type of the insn so that we do not create
759 inconsistent cfg. It is checked in verify_flow_info anyway, so do not
760 bother. */
761 if (insn == BB_END (bb))
762 emit_note_after (NOTE_INSN_DELETED, insn);
763
764 /* Create the new basic block. */
765 new_bb = create_basic_block (NEXT_INSN (insn), BB_END (bb), bb);
766 BB_COPY_PARTITION (new_bb, bb);
767 BB_END (bb) = insn;
768
769 /* Redirect the outgoing edges. */
770 new_bb->succs = bb->succs;
771 bb->succs = NULL;
772 FOR_EACH_EDGE (e, ei, new_bb->succs)
773 e->src = new_bb;
774
775 /* The new block starts off being dirty. */
776 df_set_bb_dirty (bb);
777 return new_bb;
778}
779
780/* Return true if LOC1 and LOC2 are equivalent for
781 unique_locus_on_edge_between_p purposes. */
782
783static bool
784loc_equal (location_t loc1, location_t loc2)
785{
786 if (loc1 == loc2)
787 return true;
788
789 expanded_location loce1 = expand_location (loc1);
790 expanded_location loce2 = expand_location (loc2);
791
792 if (loce1.line != loce2.line
793 || loce1.column != loce2.column
794 || loce1.data != loce2.data)
795 return false;
796 if (loce1.file == loce2.file)
797 return true;
798 return (loce1.file != NULL
799 && loce2.file != NULL
800 && filename_cmp (s1: loce1.file, s2: loce2.file) == 0);
801}
802
803/* Return true if the single edge between blocks A and B is the only place
804 in RTL which holds some unique locus. */
805
806static bool
807unique_locus_on_edge_between_p (basic_block a, basic_block b)
808{
809 const location_t goto_locus = EDGE_SUCC (a, 0)->goto_locus;
810 rtx_insn *insn, *end;
811
812 if (LOCATION_LOCUS (goto_locus) == UNKNOWN_LOCATION)
813 return false;
814
815 /* First scan block A backward. */
816 insn = BB_END (a);
817 end = PREV_INSN (BB_HEAD (a));
818 while (insn != end && (!NONDEBUG_INSN_P (insn) || !INSN_HAS_LOCATION (insn)))
819 insn = PREV_INSN (insn);
820
821 if (insn != end && loc_equal (loc1: INSN_LOCATION (insn), loc2: goto_locus))
822 return false;
823
824 /* Then scan block B forward. */
825 insn = BB_HEAD (b);
826 if (insn)
827 {
828 end = NEXT_INSN (BB_END (b));
829 while (insn != end && !NONDEBUG_INSN_P (insn))
830 insn = NEXT_INSN (insn);
831
832 if (insn != end && INSN_HAS_LOCATION (insn)
833 && loc_equal (loc1: INSN_LOCATION (insn), loc2: goto_locus))
834 return false;
835 }
836
837 return true;
838}
839
840/* If the single edge between blocks A and B is the only place in RTL which
841 holds some unique locus, emit a nop with that locus between the blocks. */
842
843static void
844emit_nop_for_unique_locus_between (basic_block a, basic_block b)
845{
846 if (!unique_locus_on_edge_between_p (a, b))
847 return;
848
849 BB_END (a) = emit_insn_after_noloc (gen_nop (), BB_END (a), a);
850 INSN_LOCATION (BB_END (a)) = EDGE_SUCC (a, 0)->goto_locus;
851}
852
853/* Blocks A and B are to be merged into a single block A. The insns
854 are already contiguous. */
855
856static void
857rtl_merge_blocks (basic_block a, basic_block b)
858{
859 /* If B is a forwarder block whose outgoing edge has no location, we'll
860 propagate the locus of the edge between A and B onto it. */
861 const bool forward_edge_locus
862 = (b->flags & BB_FORWARDER_BLOCK) != 0
863 && LOCATION_LOCUS (EDGE_SUCC (b, 0)->goto_locus) == UNKNOWN_LOCATION;
864 rtx_insn *b_head = BB_HEAD (b), *b_end = BB_END (b), *a_end = BB_END (a);
865 rtx_insn *del_first = NULL, *del_last = NULL;
866 rtx_insn *b_debug_start = b_end, *b_debug_end = b_end;
867 bool b_empty = false;
868
869 if (dump_file)
870 fprintf (stream: dump_file, format: "Merging block %d into block %d...\n", b->index,
871 a->index);
872
873 while (DEBUG_INSN_P (b_end))
874 b_end = PREV_INSN (insn: b_debug_start = b_end);
875
876 /* If there was a CODE_LABEL beginning B, delete it. */
877 if (LABEL_P (b_head))
878 {
879 /* Detect basic blocks with nothing but a label. This can happen
880 in particular at the end of a function. */
881 if (b_head == b_end)
882 b_empty = true;
883
884 del_first = del_last = b_head;
885 b_head = NEXT_INSN (insn: b_head);
886 }
887
888 /* Delete the basic block note and handle blocks containing just that
889 note. */
890 if (NOTE_INSN_BASIC_BLOCK_P (b_head))
891 {
892 if (b_head == b_end)
893 b_empty = true;
894 if (! del_last)
895 del_first = b_head;
896
897 del_last = b_head;
898 b_head = NEXT_INSN (insn: b_head);
899 }
900
901 /* If there was a jump out of A, delete it. */
902 if (JUMP_P (a_end))
903 {
904 rtx_insn *prev;
905
906 for (prev = PREV_INSN (insn: a_end); ; prev = PREV_INSN (insn: prev))
907 if (!NOTE_P (prev)
908 || NOTE_INSN_BASIC_BLOCK_P (prev)
909 || prev == BB_HEAD (a))
910 break;
911
912 del_first = a_end;
913
914 a_end = PREV_INSN (insn: del_first);
915 }
916 else if (BARRIER_P (NEXT_INSN (a_end)))
917 del_first = NEXT_INSN (insn: a_end);
918
919 /* Delete everything marked above as well as crap that might be
920 hanging out between the two blocks. */
921 BB_END (a) = a_end;
922 BB_HEAD (b) = b_empty ? NULL : b_head;
923 delete_insn_chain (start: del_first, finish: del_last, clear_bb: true);
924
925 /* If not optimizing, preserve the locus of the single edge between
926 blocks A and B if necessary by emitting a nop. */
927 if (!optimize
928 && !forward_edge_locus
929 && !DECL_IGNORED_P (current_function_decl))
930 {
931 emit_nop_for_unique_locus_between (a, b);
932 a_end = BB_END (a);
933 }
934
935 /* Reassociate the insns of B with A. */
936 if (!b_empty)
937 {
938 update_bb_for_insn_chain (begin: a_end, end: b_debug_end, bb: a);
939
940 BB_END (a) = b_debug_end;
941 BB_HEAD (b) = NULL;
942 }
943 else if (b_end != b_debug_end)
944 {
945 /* Move any deleted labels and other notes between the end of A
946 and the debug insns that make up B after the debug insns,
947 bringing the debug insns into A while keeping the notes after
948 the end of A. */
949 if (NEXT_INSN (insn: a_end) != b_debug_start)
950 reorder_insns_nobb (NEXT_INSN (insn: a_end), PREV_INSN (insn: b_debug_start),
951 b_debug_end);
952 update_bb_for_insn_chain (begin: b_debug_start, end: b_debug_end, bb: a);
953 BB_END (a) = b_debug_end;
954 }
955
956 df_bb_delete (b->index);
957
958 if (forward_edge_locus)
959 EDGE_SUCC (b, 0)->goto_locus = EDGE_SUCC (a, 0)->goto_locus;
960
961 if (dump_file)
962 fprintf (stream: dump_file, format: "Merged blocks %d and %d.\n", a->index, b->index);
963}
964
965
966/* Return true when block A and B can be merged. */
967
968static bool
969rtl_can_merge_blocks (basic_block a, basic_block b)
970{
971 /* If we are partitioning hot/cold basic blocks, we don't want to
972 mess up unconditional or indirect jumps that cross between hot
973 and cold sections.
974
975 Basic block partitioning may result in some jumps that appear to
976 be optimizable (or blocks that appear to be mergeable), but which really
977 must be left untouched (they are required to make it safely across
978 partition boundaries). See the comments at the top of
979 bb-reorder.cc:partition_hot_cold_basic_blocks for complete details. */
980
981 if (BB_PARTITION (a) != BB_PARTITION (b))
982 return false;
983
984 /* Protect the loop latches. */
985 if (current_loops && b->loop_father->latch == b)
986 return false;
987
988 /* There must be exactly one edge in between the blocks. */
989 return (single_succ_p (bb: a)
990 && single_succ (bb: a) == b
991 && single_pred_p (bb: b)
992 && a != b
993 /* Must be simple edge. */
994 && !(single_succ_edge (bb: a)->flags & EDGE_COMPLEX)
995 && a->next_bb == b
996 && a != ENTRY_BLOCK_PTR_FOR_FN (cfun)
997 && b != EXIT_BLOCK_PTR_FOR_FN (cfun)
998 /* If the jump insn has side effects,
999 we can't kill the edge. */
1000 && (!JUMP_P (BB_END (a))
1001 || (reload_completed
1002 ? simplejump_p (BB_END (a)) : onlyjump_p (BB_END (a)))));
1003}
1004
1005/* Return the label in the head of basic block BLOCK. Create one if it doesn't
1006 exist. */
1007
1008rtx_code_label *
1009block_label (basic_block block)
1010{
1011 if (block == EXIT_BLOCK_PTR_FOR_FN (cfun))
1012 return NULL;
1013
1014 if (!LABEL_P (BB_HEAD (block)))
1015 {
1016 BB_HEAD (block) = emit_label_before (gen_label_rtx (), BB_HEAD (block));
1017 }
1018
1019 return as_a <rtx_code_label *> (BB_HEAD (block));
1020}
1021
1022/* Remove all barriers from BB_FOOTER of a BB. */
1023
1024static void
1025remove_barriers_from_footer (basic_block bb)
1026{
1027 rtx_insn *insn = BB_FOOTER (bb);
1028
1029 /* Remove barriers but keep jumptables. */
1030 while (insn)
1031 {
1032 if (BARRIER_P (insn))
1033 {
1034 if (PREV_INSN (insn))
1035 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
1036 else
1037 BB_FOOTER (bb) = NEXT_INSN (insn);
1038 if (NEXT_INSN (insn))
1039 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
1040 }
1041 if (LABEL_P (insn))
1042 return;
1043 insn = NEXT_INSN (insn);
1044 }
1045}
1046
1047/* Attempt to perform edge redirection by replacing possibly complex jump
1048 instruction by unconditional jump or removing jump completely. This can
1049 apply only if all edges now point to the same block. The parameters and
1050 return values are equivalent to redirect_edge_and_branch. */
1051
1052edge
1053try_redirect_by_replacing_jump (edge e, basic_block target, bool in_cfglayout)
1054{
1055 basic_block src = e->src;
1056 rtx_insn *insn = BB_END (src);
1057 rtx set;
1058 bool fallthru = false;
1059
1060 /* If we are partitioning hot/cold basic blocks, we don't want to
1061 mess up unconditional or indirect jumps that cross between hot
1062 and cold sections.
1063
1064 Basic block partitioning may result in some jumps that appear to
1065 be optimizable (or blocks that appear to be mergeable), but which really
1066 must be left untouched (they are required to make it safely across
1067 partition boundaries). See the comments at the top of
1068 bb-reorder.cc:partition_hot_cold_basic_blocks for complete details. */
1069
1070 if (BB_PARTITION (src) != BB_PARTITION (target))
1071 return NULL;
1072
1073 /* We can replace or remove a complex jump only when we have exactly
1074 two edges. Also, if we have exactly one outgoing edge, we can
1075 redirect that. */
1076 if (EDGE_COUNT (src->succs) >= 3
1077 /* Verify that all targets will be TARGET. Specifically, the
1078 edge that is not E must also go to TARGET. */
1079 || (EDGE_COUNT (src->succs) == 2
1080 && EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest != target))
1081 return NULL;
1082
1083 if (!onlyjump_p (insn))
1084 return NULL;
1085 if ((!optimize || reload_completed) && tablejump_p (insn, NULL, NULL))
1086 return NULL;
1087
1088 /* Avoid removing branch with side effects. */
1089 set = single_set (insn);
1090 if (!set || side_effects_p (set))
1091 return NULL;
1092
1093 /* See if we can create the fallthru edge. */
1094 if (in_cfglayout || can_fallthru (src, target))
1095 {
1096 if (dump_file)
1097 fprintf (stream: dump_file, format: "Removing jump %i.\n", INSN_UID (insn));
1098 fallthru = true;
1099
1100 /* Selectively unlink whole insn chain. */
1101 if (in_cfglayout)
1102 {
1103 delete_insn_chain (start: insn, BB_END (src), clear_bb: false);
1104 remove_barriers_from_footer (bb: src);
1105 }
1106 else
1107 delete_insn_chain (start: insn, finish: PREV_INSN (BB_HEAD (target)), clear_bb: false);
1108 }
1109
1110 /* If this already is simplejump, redirect it. */
1111 else if (simplejump_p (insn))
1112 {
1113 if (e->dest == target)
1114 return NULL;
1115 if (dump_file)
1116 fprintf (stream: dump_file, format: "Redirecting jump %i from %i to %i.\n",
1117 INSN_UID (insn), e->dest->index, target->index);
1118 if (!redirect_jump (as_a <rtx_jump_insn *> (p: insn),
1119 block_label (block: target), 0))
1120 {
1121 gcc_assert (target == EXIT_BLOCK_PTR_FOR_FN (cfun));
1122 return NULL;
1123 }
1124 }
1125
1126 /* Cannot do anything for target exit block. */
1127 else if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
1128 return NULL;
1129
1130 /* Or replace possibly complicated jump insn by simple jump insn. */
1131 else
1132 {
1133 rtx_code_label *target_label = block_label (block: target);
1134 rtx_insn *barrier;
1135 rtx_insn *label;
1136 rtx_jump_table_data *table;
1137
1138 emit_jump_insn_after_noloc (targetm.gen_jump (target_label), insn);
1139 JUMP_LABEL (BB_END (src)) = target_label;
1140 LABEL_NUSES (target_label)++;
1141 if (dump_file)
1142 fprintf (stream: dump_file, format: "Replacing insn %i by jump %i\n",
1143 INSN_UID (insn), INSN_UID (BB_END (src)));
1144
1145
1146 delete_insn_chain (start: insn, finish: insn, clear_bb: false);
1147
1148 /* Recognize a tablejump that we are converting to a
1149 simple jump and remove its associated CODE_LABEL
1150 and ADDR_VEC or ADDR_DIFF_VEC. */
1151 if (tablejump_p (insn, &label, &table))
1152 delete_insn_chain (start: label, finish: table, clear_bb: false);
1153
1154 barrier = next_nonnote_nondebug_insn (BB_END (src));
1155 if (!barrier || !BARRIER_P (barrier))
1156 emit_barrier_after (BB_END (src));
1157 else
1158 {
1159 if (barrier != NEXT_INSN (BB_END (src)))
1160 {
1161 /* Move the jump before barrier so that the notes
1162 which originally were or were created before jump table are
1163 inside the basic block. */
1164 rtx_insn *new_insn = BB_END (src);
1165
1166 update_bb_for_insn_chain (begin: NEXT_INSN (BB_END (src)),
1167 end: PREV_INSN (insn: barrier), bb: src);
1168
1169 SET_NEXT_INSN (PREV_INSN (insn: new_insn)) = NEXT_INSN (insn: new_insn);
1170 SET_PREV_INSN (NEXT_INSN (insn: new_insn)) = PREV_INSN (insn: new_insn);
1171
1172 SET_NEXT_INSN (new_insn) = barrier;
1173 SET_NEXT_INSN (PREV_INSN (insn: barrier)) = new_insn;
1174
1175 SET_PREV_INSN (new_insn) = PREV_INSN (insn: barrier);
1176 SET_PREV_INSN (barrier) = new_insn;
1177 }
1178 }
1179 }
1180
1181 /* Keep only one edge out and set proper flags. */
1182 if (!single_succ_p (bb: src))
1183 remove_edge (e);
1184 gcc_assert (single_succ_p (src));
1185
1186 e = single_succ_edge (bb: src);
1187 if (fallthru)
1188 e->flags = EDGE_FALLTHRU;
1189 else
1190 e->flags = 0;
1191
1192 e->probability = profile_probability::always ();
1193
1194 if (e->dest != target)
1195 redirect_edge_succ (e, target);
1196 return e;
1197}
1198
1199/* Subroutine of redirect_branch_edge that tries to patch the jump
1200 instruction INSN so that it reaches block NEW. Do this
1201 only when it originally reached block OLD. Return true if this
1202 worked or the original target wasn't OLD, return false if redirection
1203 doesn't work. */
1204
1205static bool
1206patch_jump_insn (rtx_insn *insn, rtx_insn *old_label, basic_block new_bb)
1207{
1208 rtx_jump_table_data *table;
1209 rtx tmp;
1210 /* Recognize a tablejump and adjust all matching cases. */
1211 if (tablejump_p (insn, NULL, &table))
1212 {
1213 rtvec vec;
1214 int j;
1215 rtx_code_label *new_label = block_label (block: new_bb);
1216
1217 if (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
1218 return false;
1219 vec = table->get_labels ();
1220
1221 for (j = GET_NUM_ELEM (vec) - 1; j >= 0; --j)
1222 if (XEXP (RTVEC_ELT (vec, j), 0) == old_label)
1223 {
1224 RTVEC_ELT (vec, j) = gen_rtx_LABEL_REF (Pmode, new_label);
1225 --LABEL_NUSES (old_label);
1226 ++LABEL_NUSES (new_label);
1227 }
1228
1229 /* Handle casesi dispatch insns. */
1230 if ((tmp = tablejump_casesi_pattern (insn)) != NULL_RTX
1231 && label_ref_label (XEXP (SET_SRC (tmp), 2)) == old_label)
1232 {
1233 XEXP (SET_SRC (tmp), 2) = gen_rtx_LABEL_REF (Pmode,
1234 new_label);
1235 --LABEL_NUSES (old_label);
1236 ++LABEL_NUSES (new_label);
1237 }
1238 }
1239 else if ((tmp = extract_asm_operands (PATTERN (insn))) != NULL)
1240 {
1241 int i, n = ASM_OPERANDS_LABEL_LENGTH (tmp);
1242 rtx note;
1243
1244 if (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
1245 return false;
1246 rtx_code_label *new_label = block_label (block: new_bb);
1247
1248 for (i = 0; i < n; ++i)
1249 {
1250 rtx old_ref = ASM_OPERANDS_LABEL (tmp, i);
1251 gcc_assert (GET_CODE (old_ref) == LABEL_REF);
1252 if (XEXP (old_ref, 0) == old_label)
1253 {
1254 ASM_OPERANDS_LABEL (tmp, i)
1255 = gen_rtx_LABEL_REF (Pmode, new_label);
1256 --LABEL_NUSES (old_label);
1257 ++LABEL_NUSES (new_label);
1258 }
1259 }
1260
1261 if (JUMP_LABEL (insn) == old_label)
1262 {
1263 JUMP_LABEL (insn) = new_label;
1264 note = find_reg_note (insn, REG_LABEL_TARGET, new_label);
1265 if (note)
1266 remove_note (insn, note);
1267 }
1268 else
1269 {
1270 note = find_reg_note (insn, REG_LABEL_TARGET, old_label);
1271 if (note)
1272 remove_note (insn, note);
1273 if (JUMP_LABEL (insn) != new_label
1274 && !find_reg_note (insn, REG_LABEL_TARGET, new_label))
1275 add_reg_note (insn, REG_LABEL_TARGET, new_label);
1276 }
1277 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, old_label))
1278 != NULL_RTX)
1279 XEXP (note, 0) = new_label;
1280 }
1281 else
1282 {
1283 /* ?? We may play the games with moving the named labels from
1284 one basic block to the other in case only one computed_jump is
1285 available. */
1286 if (computed_jump_p (insn)
1287 /* A return instruction can't be redirected. */
1288 || returnjump_p (insn))
1289 return false;
1290
1291 if (!currently_expanding_to_rtl || JUMP_LABEL (insn) == old_label)
1292 {
1293 /* If the insn doesn't go where we think, we're confused. */
1294 gcc_assert (JUMP_LABEL (insn) == old_label);
1295
1296 /* If the substitution doesn't succeed, die. This can happen
1297 if the back end emitted unrecognizable instructions or if
1298 target is exit block on some arches. Or for crossing
1299 jumps. */
1300 if (!redirect_jump (as_a <rtx_jump_insn *> (p: insn),
1301 block_label (block: new_bb), 0))
1302 {
1303 gcc_assert (new_bb == EXIT_BLOCK_PTR_FOR_FN (cfun)
1304 || CROSSING_JUMP_P (insn));
1305 return false;
1306 }
1307 }
1308 }
1309 return true;
1310}
1311
1312
1313/* Redirect edge representing branch of (un)conditional jump or tablejump,
1314 NULL on failure */
1315static edge
1316redirect_branch_edge (edge e, basic_block target)
1317{
1318 rtx_insn *old_label = BB_HEAD (e->dest);
1319 basic_block src = e->src;
1320 rtx_insn *insn = BB_END (src);
1321
1322 /* We can only redirect non-fallthru edges of jump insn. */
1323 if (e->flags & EDGE_FALLTHRU)
1324 return NULL;
1325 else if (!JUMP_P (insn) && !currently_expanding_to_rtl)
1326 return NULL;
1327
1328 if (!currently_expanding_to_rtl)
1329 {
1330 if (!patch_jump_insn (insn: as_a <rtx_jump_insn *> (p: insn), old_label, new_bb: target))
1331 return NULL;
1332 }
1333 else
1334 /* When expanding this BB might actually contain multiple
1335 jumps (i.e. not yet split by find_many_sub_basic_blocks).
1336 Redirect all of those that match our label. */
1337 FOR_BB_INSNS (src, insn)
1338 if (JUMP_P (insn) && !patch_jump_insn (insn: as_a <rtx_jump_insn *> (p: insn),
1339 old_label, new_bb: target))
1340 return NULL;
1341
1342 if (dump_file)
1343 fprintf (stream: dump_file, format: "Edge %i->%i redirected to %i\n",
1344 e->src->index, e->dest->index, target->index);
1345
1346 if (e->dest != target)
1347 e = redirect_edge_succ_nodup (e, target);
1348
1349 return e;
1350}
1351
1352/* Called when edge E has been redirected to a new destination,
1353 in order to update the region crossing flag on the edge and
1354 jump. */
1355
1356static void
1357fixup_partition_crossing (edge e)
1358{
1359 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun) || e->dest
1360 == EXIT_BLOCK_PTR_FOR_FN (cfun))
1361 return;
1362 /* If we redirected an existing edge, it may already be marked
1363 crossing, even though the new src is missing a reg crossing note.
1364 But make sure reg crossing note doesn't already exist before
1365 inserting. */
1366 if (BB_PARTITION (e->src) != BB_PARTITION (e->dest))
1367 {
1368 e->flags |= EDGE_CROSSING;
1369 if (JUMP_P (BB_END (e->src)))
1370 CROSSING_JUMP_P (BB_END (e->src)) = 1;
1371 }
1372 else if (BB_PARTITION (e->src) == BB_PARTITION (e->dest))
1373 {
1374 e->flags &= ~EDGE_CROSSING;
1375 /* Remove the section crossing note from jump at end of
1376 src if it exists, and if no other successors are
1377 still crossing. */
1378 if (JUMP_P (BB_END (e->src)) && CROSSING_JUMP_P (BB_END (e->src)))
1379 {
1380 bool has_crossing_succ = false;
1381 edge e2;
1382 edge_iterator ei;
1383 FOR_EACH_EDGE (e2, ei, e->src->succs)
1384 {
1385 has_crossing_succ |= (e2->flags & EDGE_CROSSING);
1386 if (has_crossing_succ)
1387 break;
1388 }
1389 if (!has_crossing_succ)
1390 CROSSING_JUMP_P (BB_END (e->src)) = 0;
1391 }
1392 }
1393}
1394
1395/* Called when block BB has been reassigned to the cold partition,
1396 because it is now dominated by another cold block,
1397 to ensure that the region crossing attributes are updated. */
1398
1399static void
1400fixup_new_cold_bb (basic_block bb)
1401{
1402 edge e;
1403 edge_iterator ei;
1404
1405 /* This is called when a hot bb is found to now be dominated
1406 by a cold bb and therefore needs to become cold. Therefore,
1407 its preds will no longer be region crossing. Any non-dominating
1408 preds that were previously hot would also have become cold
1409 in the caller for the same region. Any preds that were previously
1410 region-crossing will be adjusted in fixup_partition_crossing. */
1411 FOR_EACH_EDGE (e, ei, bb->preds)
1412 {
1413 fixup_partition_crossing (e);
1414 }
1415
1416 /* Possibly need to make bb's successor edges region crossing,
1417 or remove stale region crossing. */
1418 FOR_EACH_EDGE (e, ei, bb->succs)
1419 {
1420 /* We can't have fall-through edges across partition boundaries.
1421 Note that force_nonfallthru will do any necessary partition
1422 boundary fixup by calling fixup_partition_crossing itself. */
1423 if ((e->flags & EDGE_FALLTHRU)
1424 && BB_PARTITION (bb) != BB_PARTITION (e->dest)
1425 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1426 force_nonfallthru (e);
1427 else
1428 fixup_partition_crossing (e);
1429 }
1430}
1431
1432/* Attempt to change code to redirect edge E to TARGET. Don't do that on
1433 expense of adding new instructions or reordering basic blocks.
1434
1435 Function can be also called with edge destination equivalent to the TARGET.
1436 Then it should try the simplifications and do nothing if none is possible.
1437
1438 Return edge representing the branch if transformation succeeded. Return NULL
1439 on failure.
1440 We still return NULL in case E already destinated TARGET and we didn't
1441 managed to simplify instruction stream. */
1442
1443static edge
1444rtl_redirect_edge_and_branch (edge e, basic_block target)
1445{
1446 edge ret;
1447 basic_block src = e->src;
1448 basic_block dest = e->dest;
1449
1450 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
1451 return NULL;
1452
1453 if (dest == target)
1454 return e;
1455
1456 if ((ret = try_redirect_by_replacing_jump (e, target, in_cfglayout: false)) != NULL)
1457 {
1458 df_set_bb_dirty (src);
1459 fixup_partition_crossing (e: ret);
1460 return ret;
1461 }
1462
1463 ret = redirect_branch_edge (e, target);
1464 if (!ret)
1465 return NULL;
1466
1467 df_set_bb_dirty (src);
1468 fixup_partition_crossing (e: ret);
1469 return ret;
1470}
1471
1472/* Emit a barrier after BB, into the footer if we are in CFGLAYOUT mode. */
1473
1474void
1475emit_barrier_after_bb (basic_block bb)
1476{
1477 rtx_barrier *barrier = emit_barrier_after (BB_END (bb));
1478 gcc_assert (current_ir_type () == IR_RTL_CFGRTL
1479 || current_ir_type () == IR_RTL_CFGLAYOUT);
1480 if (current_ir_type () == IR_RTL_CFGLAYOUT)
1481 {
1482 rtx_insn *insn = unlink_insn_chain (barrier, barrier);
1483
1484 if (BB_FOOTER (bb))
1485 {
1486 rtx_insn *footer_tail = BB_FOOTER (bb);
1487
1488 while (NEXT_INSN (insn: footer_tail))
1489 footer_tail = NEXT_INSN (insn: footer_tail);
1490 if (!BARRIER_P (footer_tail))
1491 {
1492 SET_NEXT_INSN (footer_tail) = insn;
1493 SET_PREV_INSN (insn) = footer_tail;
1494 }
1495 }
1496 else
1497 BB_FOOTER (bb) = insn;
1498 }
1499}
1500
1501/* Like force_nonfallthru below, but additionally performs redirection
1502 Used by redirect_edge_and_branch_force. JUMP_LABEL is used only
1503 when redirecting to the EXIT_BLOCK, it is either ret_rtx or
1504 simple_return_rtx, indicating which kind of returnjump to create.
1505 It should be NULL otherwise. */
1506
1507basic_block
1508force_nonfallthru_and_redirect (edge e, basic_block target, rtx jump_label)
1509{
1510 basic_block jump_block, new_bb = NULL, src = e->src;
1511 rtx note;
1512 edge new_edge;
1513 int abnormal_edge_flags = 0;
1514 bool asm_goto_edge = false;
1515 int loc;
1516
1517 /* In the case the last instruction is conditional jump to the next
1518 instruction, first redirect the jump itself and then continue
1519 by creating a basic block afterwards to redirect fallthru edge. */
1520 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
1521 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
1522 && any_condjump_p (BB_END (e->src))
1523 && JUMP_LABEL (BB_END (e->src)) == BB_HEAD (e->dest))
1524 {
1525 rtx note;
1526 edge b = unchecked_make_edge (e->src, target, 0);
1527 bool redirected;
1528
1529 redirected = redirect_jump (as_a <rtx_jump_insn *> (BB_END (e->src)),
1530 block_label (block: target), 0);
1531 gcc_assert (redirected);
1532
1533 note = find_reg_note (BB_END (e->src), REG_BR_PROB, NULL_RTX);
1534 if (note)
1535 {
1536 int prob = XINT (note, 0);
1537
1538 b->probability = profile_probability::from_reg_br_prob_note (v: prob);
1539 e->probability -= e->probability;
1540 }
1541 }
1542
1543 if (e->flags & EDGE_ABNORMAL)
1544 {
1545 /* Irritating special case - fallthru edge to the same block as abnormal
1546 edge.
1547 We can't redirect abnormal edge, but we still can split the fallthru
1548 one and create separate abnormal edge to original destination.
1549 This allows bb-reorder to make such edge non-fallthru. */
1550 gcc_assert (e->dest == target);
1551 abnormal_edge_flags = e->flags & ~EDGE_FALLTHRU;
1552 e->flags &= EDGE_FALLTHRU;
1553 }
1554 else
1555 {
1556 gcc_assert (e->flags & EDGE_FALLTHRU);
1557 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1558 {
1559 /* We can't redirect the entry block. Create an empty block
1560 at the start of the function which we use to add the new
1561 jump. */
1562 edge tmp;
1563 edge_iterator ei;
1564 bool found = false;
1565
1566 basic_block bb = create_basic_block (BB_HEAD (e->dest), NULL,
1567 ENTRY_BLOCK_PTR_FOR_FN (cfun));
1568 bb->count = ENTRY_BLOCK_PTR_FOR_FN (cfun)->count;
1569
1570 /* Make sure new block ends up in correct hot/cold section. */
1571 BB_COPY_PARTITION (bb, e->dest);
1572
1573 /* Change the existing edge's source to be the new block, and add
1574 a new edge from the entry block to the new block. */
1575 e->src = bb;
1576 for (ei = ei_start (ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs);
1577 (tmp = ei_safe_edge (i: ei)); )
1578 {
1579 if (tmp == e)
1580 {
1581 ENTRY_BLOCK_PTR_FOR_FN (cfun)->succs->unordered_remove (ix: ei.index);
1582 found = true;
1583 break;
1584 }
1585 else
1586 ei_next (i: &ei);
1587 }
1588
1589 gcc_assert (found);
1590
1591 vec_safe_push (v&: bb->succs, obj: e);
1592 make_single_succ_edge (ENTRY_BLOCK_PTR_FOR_FN (cfun), bb,
1593 EDGE_FALLTHRU);
1594 }
1595 }
1596
1597 /* If e->src ends with asm goto, see if any of the ASM_OPERANDS_LABELs
1598 don't point to the target or fallthru label. */
1599 if (JUMP_P (BB_END (e->src))
1600 && target != EXIT_BLOCK_PTR_FOR_FN (cfun)
1601 && (e->flags & EDGE_FALLTHRU)
1602 && (note = extract_asm_operands (PATTERN (BB_END (e->src)))))
1603 {
1604 int i, n = ASM_OPERANDS_LABEL_LENGTH (note);
1605 bool adjust_jump_target = false;
1606
1607 for (i = 0; i < n; ++i)
1608 {
1609 if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (e->dest))
1610 {
1611 LABEL_NUSES (XEXP (ASM_OPERANDS_LABEL (note, i), 0))--;
1612 XEXP (ASM_OPERANDS_LABEL (note, i), 0) = block_label (block: target);
1613 LABEL_NUSES (XEXP (ASM_OPERANDS_LABEL (note, i), 0))++;
1614 adjust_jump_target = true;
1615 }
1616 if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (target))
1617 asm_goto_edge = true;
1618 }
1619 if (adjust_jump_target)
1620 {
1621 rtx_insn *insn = BB_END (e->src);
1622 rtx note;
1623 rtx_insn *old_label = BB_HEAD (e->dest);
1624 rtx_insn *new_label = BB_HEAD (target);
1625
1626 if (JUMP_LABEL (insn) == old_label)
1627 {
1628 JUMP_LABEL (insn) = new_label;
1629 note = find_reg_note (insn, REG_LABEL_TARGET, new_label);
1630 if (note)
1631 remove_note (insn, note);
1632 }
1633 else
1634 {
1635 note = find_reg_note (insn, REG_LABEL_TARGET, old_label);
1636 if (note)
1637 remove_note (insn, note);
1638 if (JUMP_LABEL (insn) != new_label
1639 && !find_reg_note (insn, REG_LABEL_TARGET, new_label))
1640 add_reg_note (insn, REG_LABEL_TARGET, new_label);
1641 }
1642 while ((note = find_reg_note (insn, REG_LABEL_OPERAND, old_label))
1643 != NULL_RTX)
1644 XEXP (note, 0) = new_label;
1645 }
1646 }
1647
1648 if (EDGE_COUNT (e->src->succs) >= 2 || abnormal_edge_flags || asm_goto_edge)
1649 {
1650 rtx_insn *new_head;
1651 profile_count count = e->count ();
1652 profile_probability probability = e->probability;
1653 /* Create the new structures. */
1654
1655 /* If the old block ended with a tablejump, skip its table
1656 by searching forward from there. Otherwise start searching
1657 forward from the last instruction of the old block. */
1658 rtx_jump_table_data *table;
1659 if (tablejump_p (BB_END (e->src), NULL, &table))
1660 new_head = table;
1661 else
1662 new_head = BB_END (e->src);
1663 new_head = NEXT_INSN (insn: new_head);
1664
1665 jump_block = create_basic_block (new_head, NULL, e->src);
1666 jump_block->count = count;
1667
1668 /* Make sure new block ends up in correct hot/cold section. */
1669
1670 BB_COPY_PARTITION (jump_block, e->src);
1671
1672 /* Wire edge in. */
1673 new_edge = make_edge (e->src, jump_block, EDGE_FALLTHRU);
1674 new_edge->probability = probability;
1675
1676 /* Redirect old edge. */
1677 redirect_edge_pred (e, jump_block);
1678 e->probability = profile_probability::always ();
1679
1680 /* If e->src was previously region crossing, it no longer is
1681 and the reg crossing note should be removed. */
1682 fixup_partition_crossing (e: new_edge);
1683
1684 /* If asm goto has any label refs to target's label,
1685 add also edge from asm goto bb to target. */
1686 if (asm_goto_edge)
1687 {
1688 new_edge->probability /= 2;
1689 jump_block->count /= 2;
1690 edge new_edge2 = make_edge (new_edge->src, target,
1691 e->flags & ~EDGE_FALLTHRU);
1692 new_edge2->probability = probability - new_edge->probability;
1693 }
1694
1695 new_bb = jump_block;
1696 }
1697 else
1698 jump_block = e->src;
1699
1700 loc = e->goto_locus;
1701 e->flags &= ~EDGE_FALLTHRU;
1702 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
1703 {
1704 if (jump_label == ret_rtx)
1705 emit_jump_insn_after_setloc (targetm.gen_return (),
1706 BB_END (jump_block), loc);
1707 else
1708 {
1709 gcc_assert (jump_label == simple_return_rtx);
1710 emit_jump_insn_after_setloc (targetm.gen_simple_return (),
1711 BB_END (jump_block), loc);
1712 }
1713 set_return_jump_label (BB_END (jump_block));
1714 }
1715 else
1716 {
1717 rtx_code_label *label = block_label (block: target);
1718 emit_jump_insn_after_setloc (targetm.gen_jump (label),
1719 BB_END (jump_block), loc);
1720 JUMP_LABEL (BB_END (jump_block)) = label;
1721 LABEL_NUSES (label)++;
1722 }
1723
1724 /* We might be in cfg layout mode, and if so, the following routine will
1725 insert the barrier correctly. */
1726 emit_barrier_after_bb (bb: jump_block);
1727 redirect_edge_succ_nodup (e, target);
1728
1729 if (abnormal_edge_flags)
1730 make_edge (src, target, abnormal_edge_flags);
1731
1732 df_mark_solutions_dirty ();
1733 fixup_partition_crossing (e);
1734 return new_bb;
1735}
1736
1737/* Edge E is assumed to be fallthru edge. Emit needed jump instruction
1738 (and possibly create new basic block) to make edge non-fallthru.
1739 Return newly created BB or NULL if none. */
1740
1741static basic_block
1742rtl_force_nonfallthru (edge e)
1743{
1744 return force_nonfallthru_and_redirect (e, target: e->dest, NULL_RTX);
1745}
1746
1747/* Redirect edge even at the expense of creating new jump insn or
1748 basic block. Return new basic block if created, NULL otherwise.
1749 Conversion must be possible. */
1750
1751static basic_block
1752rtl_redirect_edge_and_branch_force (edge e, basic_block target)
1753{
1754 if (redirect_edge_and_branch (e, target)
1755 || e->dest == target)
1756 return NULL;
1757
1758 /* In case the edge redirection failed, try to force it to be non-fallthru
1759 and redirect newly created simplejump. */
1760 df_set_bb_dirty (e->src);
1761 return force_nonfallthru_and_redirect (e, target, NULL_RTX);
1762}
1763
1764/* The given edge should potentially be a fallthru edge. If that is in
1765 fact true, delete the jump and barriers that are in the way. */
1766
1767static void
1768rtl_tidy_fallthru_edge (edge e)
1769{
1770 rtx_insn *q;
1771 basic_block b = e->src, c = b->next_bb;
1772
1773 /* ??? In a late-running flow pass, other folks may have deleted basic
1774 blocks by nopping out blocks, leaving multiple BARRIERs between here
1775 and the target label. They ought to be chastised and fixed.
1776
1777 We can also wind up with a sequence of undeletable labels between
1778 one block and the next.
1779
1780 So search through a sequence of barriers, labels, and notes for
1781 the head of block C and assert that we really do fall through. */
1782
1783 for (q = NEXT_INSN (BB_END (b)); q != BB_HEAD (c); q = NEXT_INSN (insn: q))
1784 if (NONDEBUG_INSN_P (q))
1785 return;
1786
1787 /* Remove what will soon cease being the jump insn from the source block.
1788 If block B consisted only of this single jump, turn it into a deleted
1789 note. */
1790 q = BB_END (b);
1791 if (JUMP_P (q)
1792 && onlyjump_p (q)
1793 && (any_uncondjump_p (q)
1794 || single_succ_p (bb: b)))
1795 {
1796 rtx_insn *label;
1797 rtx_jump_table_data *table;
1798
1799 if (tablejump_p (q, &label, &table))
1800 {
1801 /* The label is likely mentioned in some instruction before
1802 the tablejump and might not be DCEd, so turn it into
1803 a note instead and move before the tablejump that is going to
1804 be deleted. */
1805 const char *name = LABEL_NAME (label);
1806 PUT_CODE (label, NOTE);
1807 NOTE_KIND (label) = NOTE_INSN_DELETED_LABEL;
1808 NOTE_DELETED_LABEL_NAME (label) = name;
1809 reorder_insns (label, label, PREV_INSN (insn: q));
1810 delete_insn (insn: table);
1811 }
1812
1813 q = PREV_INSN (insn: q);
1814 }
1815 /* Unconditional jumps with side-effects (i.e. which we can't just delete
1816 together with the barrier) should never have a fallthru edge. */
1817 else if (JUMP_P (q) && any_uncondjump_p (q))
1818 return;
1819
1820 /* Selectively unlink the sequence. */
1821 if (q != PREV_INSN (BB_HEAD (c)))
1822 delete_insn_chain (start: NEXT_INSN (insn: q), finish: PREV_INSN (BB_HEAD (c)), clear_bb: false);
1823
1824 e->flags |= EDGE_FALLTHRU;
1825}
1826
1827/* Should move basic block BB after basic block AFTER. NIY. */
1828
1829static bool
1830rtl_move_block_after (basic_block bb ATTRIBUTE_UNUSED,
1831 basic_block after ATTRIBUTE_UNUSED)
1832{
1833 return false;
1834}
1835
1836/* Locate the last bb in the same partition as START_BB. */
1837
1838static basic_block
1839last_bb_in_partition (basic_block start_bb)
1840{
1841 basic_block bb;
1842 FOR_BB_BETWEEN (bb, start_bb, EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
1843 {
1844 if (BB_PARTITION (start_bb) != BB_PARTITION (bb->next_bb))
1845 return bb;
1846 }
1847 /* Return bb before the exit block. */
1848 return bb->prev_bb;
1849}
1850
1851/* Split a (typically critical) edge. Return the new block.
1852 The edge must not be abnormal.
1853
1854 ??? The code generally expects to be called on critical edges.
1855 The case of a block ending in an unconditional jump to a
1856 block with multiple predecessors is not handled optimally. */
1857
1858static basic_block
1859rtl_split_edge (edge edge_in)
1860{
1861 basic_block bb, new_bb;
1862 rtx_insn *before;
1863
1864 /* Abnormal edges cannot be split. */
1865 gcc_assert (!(edge_in->flags & EDGE_ABNORMAL));
1866
1867 /* We are going to place the new block in front of edge destination.
1868 Avoid existence of fallthru predecessors. */
1869 if ((edge_in->flags & EDGE_FALLTHRU) == 0)
1870 {
1871 edge e = find_fallthru_edge (edges: edge_in->dest->preds);
1872
1873 if (e)
1874 force_nonfallthru (e);
1875 }
1876
1877 /* Create the basic block note. */
1878 if (edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1879 before = BB_HEAD (edge_in->dest);
1880 else
1881 before = NULL;
1882
1883 /* If this is a fall through edge to the exit block, the blocks might be
1884 not adjacent, and the right place is after the source. */
1885 if ((edge_in->flags & EDGE_FALLTHRU)
1886 && edge_in->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
1887 {
1888 before = NEXT_INSN (BB_END (edge_in->src));
1889 bb = create_basic_block (before, NULL, edge_in->src);
1890 BB_COPY_PARTITION (bb, edge_in->src);
1891 }
1892 else
1893 {
1894 if (edge_in->src == ENTRY_BLOCK_PTR_FOR_FN (cfun))
1895 {
1896 bb = create_basic_block (before, NULL, edge_in->dest->prev_bb);
1897 BB_COPY_PARTITION (bb, edge_in->dest);
1898 }
1899 else
1900 {
1901 basic_block after = edge_in->dest->prev_bb;
1902 /* If this is post-bb reordering, and the edge crosses a partition
1903 boundary, the new block needs to be inserted in the bb chain
1904 at the end of the src partition (since we put the new bb into
1905 that partition, see below). Otherwise we may end up creating
1906 an extra partition crossing in the chain, which is illegal.
1907 It can't go after the src, because src may have a fall-through
1908 to a different block. */
1909 if (crtl->bb_reorder_complete
1910 && (edge_in->flags & EDGE_CROSSING))
1911 {
1912 after = last_bb_in_partition (start_bb: edge_in->src);
1913 before = get_last_bb_insn (after);
1914 /* The instruction following the last bb in partition should
1915 be a barrier, since it cannot end in a fall-through. */
1916 gcc_checking_assert (BARRIER_P (before));
1917 before = NEXT_INSN (insn: before);
1918 }
1919 bb = create_basic_block (before, NULL, after);
1920 /* Put the split bb into the src partition, to avoid creating
1921 a situation where a cold bb dominates a hot bb, in the case
1922 where src is cold and dest is hot. The src will dominate
1923 the new bb (whereas it might not have dominated dest). */
1924 BB_COPY_PARTITION (bb, edge_in->src);
1925 }
1926 }
1927
1928 make_single_succ_edge (bb, edge_in->dest, EDGE_FALLTHRU);
1929
1930 /* Can't allow a region crossing edge to be fallthrough. */
1931 if (BB_PARTITION (bb) != BB_PARTITION (edge_in->dest)
1932 && edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
1933 {
1934 new_bb = force_nonfallthru (single_succ_edge (bb));
1935 gcc_assert (!new_bb);
1936 }
1937
1938 /* For non-fallthru edges, we must adjust the predecessor's
1939 jump instruction to target our new block. */
1940 if ((edge_in->flags & EDGE_FALLTHRU) == 0)
1941 {
1942 edge redirected = redirect_edge_and_branch (edge_in, bb);
1943 gcc_assert (redirected);
1944 }
1945 else
1946 {
1947 if (edge_in->src != ENTRY_BLOCK_PTR_FOR_FN (cfun))
1948 {
1949 /* For asm goto even splitting of fallthru edge might
1950 need insn patching, as other labels might point to the
1951 old label. */
1952 rtx_insn *last = BB_END (edge_in->src);
1953 if (last
1954 && JUMP_P (last)
1955 && edge_in->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
1956 && (extract_asm_operands (PATTERN (insn: last))
1957 || JUMP_LABEL (last) == before)
1958 && patch_jump_insn (insn: last, old_label: before, new_bb: bb))
1959 df_set_bb_dirty (edge_in->src);
1960 }
1961 redirect_edge_succ (edge_in, bb);
1962 }
1963
1964 return bb;
1965}
1966
1967/* Queue instructions for insertion on an edge between two basic blocks.
1968 The new instructions and basic blocks (if any) will not appear in the
1969 CFG until commit_edge_insertions is called. If there are already
1970 queued instructions on the edge, PATTERN is appended to them. */
1971
1972void
1973insert_insn_on_edge (rtx pattern, edge e)
1974{
1975 /* We cannot insert instructions on an abnormal critical edge.
1976 It will be easier to find the culprit if we die now. */
1977 gcc_assert (!((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e)));
1978
1979 if (e->insns.r == NULL_RTX)
1980 start_sequence ();
1981 else
1982 push_to_sequence (e->insns.r);
1983
1984 emit_insn (pattern);
1985
1986 e->insns.r = get_insns ();
1987 end_sequence ();
1988}
1989
1990/* Like insert_insn_on_edge, but if there are already queued instructions
1991 on the edge, PATTERN is prepended to them. */
1992
1993void
1994prepend_insn_to_edge (rtx pattern, edge e)
1995{
1996 /* We cannot insert instructions on an abnormal critical edge.
1997 It will be easier to find the culprit if we die now. */
1998 gcc_assert (!((e->flags & EDGE_ABNORMAL) && EDGE_CRITICAL_P (e)));
1999
2000 start_sequence ();
2001
2002 emit_insn (pattern);
2003 emit_insn (e->insns.r);
2004
2005 e->insns.r = get_insns ();
2006 end_sequence ();
2007}
2008
2009/* Update the CFG for the instructions queued on edge E. */
2010
2011void
2012commit_one_edge_insertion (edge e)
2013{
2014 rtx_insn *before = NULL, *after = NULL, *insns, *tmp, *last;
2015 basic_block bb;
2016
2017 /* Pull the insns off the edge now since the edge might go away. */
2018 insns = e->insns.r;
2019 e->insns.r = NULL;
2020
2021 /* Allow the sequence to contain internal jumps, such as a memcpy loop
2022 or an allocation loop. If such a sequence is emitted during RTL
2023 expansion, we'll create the appropriate basic blocks later,
2024 at the end of the pass. But if such a sequence is emitted after
2025 initial expansion, we'll need to find the subblocks ourselves. */
2026 bool contains_jump = false;
2027 if (!currently_expanding_to_rtl)
2028 for (rtx_insn *insn = insns; insn; insn = NEXT_INSN (insn))
2029 if (JUMP_P (insn))
2030 {
2031 rebuild_jump_labels_chain (insns);
2032 contains_jump = true;
2033 break;
2034 }
2035
2036 /* Figure out where to put these insns. If the destination has
2037 one predecessor, insert there. Except for the exit block. */
2038 if (single_pred_p (bb: e->dest) && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
2039 {
2040 bb = e->dest;
2041
2042 /* Get the location correct wrt a code label, and "nice" wrt
2043 a basic block note, and before everything else. */
2044 tmp = BB_HEAD (bb);
2045 if (LABEL_P (tmp))
2046 tmp = NEXT_INSN (insn: tmp);
2047 if (NOTE_INSN_BASIC_BLOCK_P (tmp))
2048 tmp = NEXT_INSN (insn: tmp);
2049 if (tmp == BB_HEAD (bb))
2050 before = tmp;
2051 else if (tmp)
2052 after = PREV_INSN (insn: tmp);
2053 else
2054 after = get_last_insn ();
2055 }
2056
2057 /* If the source has one successor and the edge is not abnormal,
2058 insert there. Except for the entry block.
2059 Don't do this if the predecessor ends in a jump other than
2060 unconditional simple jump. E.g. for asm goto that points all
2061 its labels at the fallthru basic block, we can't insert instructions
2062 before the asm goto, as the asm goto can have various of side effects,
2063 and can't emit instructions after the asm goto, as it must end
2064 the basic block. */
2065 else if ((e->flags & EDGE_ABNORMAL) == 0
2066 && single_succ_p (bb: e->src)
2067 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
2068 && (!JUMP_P (BB_END (e->src))
2069 || simplejump_p (BB_END (e->src))))
2070 {
2071 bb = e->src;
2072
2073 /* It is possible to have a non-simple jump here. Consider a target
2074 where some forms of unconditional jumps clobber a register. This
2075 happens on the fr30 for example.
2076
2077 We know this block has a single successor, so we can just emit
2078 the queued insns before the jump. */
2079 if (JUMP_P (BB_END (bb)))
2080 before = BB_END (bb);
2081 else
2082 {
2083 /* We'd better be fallthru, or we've lost track of what's what. */
2084 gcc_assert (e->flags & EDGE_FALLTHRU);
2085
2086 after = BB_END (bb);
2087 }
2088 }
2089
2090 /* Otherwise we must split the edge. */
2091 else
2092 {
2093 bb = split_edge (e);
2094
2095 /* If E crossed a partition boundary, we needed to make bb end in
2096 a region-crossing jump, even though it was originally fallthru. */
2097 if (JUMP_P (BB_END (bb)))
2098 before = BB_END (bb);
2099 else
2100 after = BB_END (bb);
2101 }
2102
2103 /* Now that we've found the spot, do the insertion. */
2104 if (before)
2105 {
2106 emit_insn_before_noloc (insns, before, bb);
2107 last = prev_nonnote_insn (before);
2108 }
2109 else
2110 last = emit_insn_after_noloc (insns, after, bb);
2111
2112 if (returnjump_p (last))
2113 {
2114 /* ??? Remove all outgoing edges from BB and add one for EXIT.
2115 This is not currently a problem because this only happens
2116 for the (single) epilogue, which already has a fallthru edge
2117 to EXIT. */
2118
2119 e = single_succ_edge (bb);
2120 gcc_assert (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
2121 && single_succ_p (bb) && (e->flags & EDGE_FALLTHRU));
2122
2123 e->flags &= ~EDGE_FALLTHRU;
2124 emit_barrier_after (last);
2125
2126 if (before)
2127 delete_insn (insn: before);
2128 }
2129 else
2130 /* Sequences inserted after RTL expansion are expected to be SESE,
2131 with only internal branches allowed. If the sequence jumps outside
2132 itself then we do not know how to add the associated edges here. */
2133 gcc_assert (!JUMP_P (last) || currently_expanding_to_rtl);
2134
2135 if (contains_jump)
2136 find_sub_basic_blocks (bb);
2137}
2138
2139/* Update the CFG for all queued instructions. */
2140
2141void
2142commit_edge_insertions (void)
2143{
2144 basic_block bb;
2145
2146 /* Optimization passes that invoke this routine can cause hot blocks
2147 previously reached by both hot and cold blocks to become dominated only
2148 by cold blocks. This will cause the verification below to fail,
2149 and lead to now cold code in the hot section. In some cases this
2150 may only be visible after newly unreachable blocks are deleted,
2151 which will be done by fixup_partitions. */
2152 fixup_partitions ();
2153
2154 if (!currently_expanding_to_rtl)
2155 checking_verify_flow_info ();
2156
2157 FOR_BB_BETWEEN (bb, ENTRY_BLOCK_PTR_FOR_FN (cfun),
2158 EXIT_BLOCK_PTR_FOR_FN (cfun), next_bb)
2159 {
2160 edge e;
2161 edge_iterator ei;
2162
2163 FOR_EACH_EDGE (e, ei, bb->succs)
2164 if (e->insns.r)
2165 {
2166 if (currently_expanding_to_rtl)
2167 rebuild_jump_labels_chain (e->insns.r);
2168 commit_one_edge_insertion (e);
2169 }
2170 }
2171}
2172
2173
2174/* Print out RTL-specific basic block information (live information
2175 at start and end with TDF_DETAILS). FLAGS are the TDF_* masks
2176 documented in dumpfile.h. */
2177
2178static void
2179rtl_dump_bb (FILE *outf, basic_block bb, int indent, dump_flags_t flags)
2180{
2181 char *s_indent;
2182
2183 s_indent = (char *) alloca ((size_t) indent + 1);
2184 memset (s: s_indent, c: ' ', n: (size_t) indent);
2185 s_indent[indent] = '\0';
2186
2187 if (df && (flags & TDF_DETAILS))
2188 {
2189 df_dump_top (bb, outf);
2190 putc (c: '\n', stream: outf);
2191 }
2192
2193 if (bb->index != ENTRY_BLOCK && bb->index != EXIT_BLOCK
2194 && rtl_bb_info_initialized_p (bb))
2195 {
2196 rtx_insn *last = BB_END (bb);
2197 if (last)
2198 last = NEXT_INSN (insn: last);
2199 for (rtx_insn *insn = BB_HEAD (bb); insn != last; insn = NEXT_INSN (insn))
2200 {
2201 if (flags & TDF_DETAILS)
2202 df_dump_insn_top (insn, outf);
2203 if (! (flags & TDF_SLIM))
2204 print_rtl_single (outf, insn);
2205 else
2206 dump_insn_slim (outf, insn);
2207 if (flags & TDF_DETAILS)
2208 df_dump_insn_bottom (insn, outf);
2209 }
2210 }
2211
2212 if (df && (flags & TDF_DETAILS))
2213 {
2214 df_dump_bottom (bb, outf);
2215 putc (c: '\n', stream: outf);
2216 }
2217
2218}
2219
2220/* Like dump_function_to_file, but for RTL. Print out dataflow information
2221 for the start of each basic block. FLAGS are the TDF_* masks documented
2222 in dumpfile.h. */
2223
2224void
2225print_rtl_with_bb (FILE *outf, const rtx_insn *rtx_first, dump_flags_t flags)
2226{
2227 const rtx_insn *tmp_rtx;
2228 if (rtx_first == 0)
2229 fprintf (stream: outf, format: "(nil)\n");
2230 else
2231 {
2232 enum bb_state { NOT_IN_BB, IN_ONE_BB, IN_MULTIPLE_BB };
2233 int max_uid = get_max_uid ();
2234 basic_block *start = XCNEWVEC (basic_block, max_uid);
2235 basic_block *end = XCNEWVEC (basic_block, max_uid);
2236 enum bb_state *in_bb_p = XCNEWVEC (enum bb_state, max_uid);
2237 basic_block bb;
2238
2239 /* After freeing the CFG, we still have BLOCK_FOR_INSN set on most
2240 insns, but the CFG is not maintained so the basic block info
2241 is not reliable. Therefore it's omitted from the dumps. */
2242 if (! (cfun->curr_properties & PROP_cfg))
2243 flags &= ~TDF_BLOCKS;
2244
2245 if (df)
2246 df_dump_start (outf);
2247
2248 if (cfun->curr_properties & PROP_cfg)
2249 {
2250 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2251 {
2252 rtx_insn *x;
2253
2254 start[INSN_UID (BB_HEAD (bb))] = bb;
2255 end[INSN_UID (BB_END (bb))] = bb;
2256 if (flags & TDF_BLOCKS)
2257 {
2258 for (x = BB_HEAD (bb); x != NULL_RTX; x = NEXT_INSN (insn: x))
2259 {
2260 enum bb_state state = IN_MULTIPLE_BB;
2261
2262 if (in_bb_p[INSN_UID (insn: x)] == NOT_IN_BB)
2263 state = IN_ONE_BB;
2264 in_bb_p[INSN_UID (insn: x)] = state;
2265
2266 if (x == BB_END (bb))
2267 break;
2268 }
2269 }
2270 }
2271 }
2272
2273 for (tmp_rtx = rtx_first; tmp_rtx != NULL; tmp_rtx = NEXT_INSN (insn: tmp_rtx))
2274 {
2275 if (flags & TDF_BLOCKS)
2276 {
2277 bb = start[INSN_UID (insn: tmp_rtx)];
2278 if (bb != NULL)
2279 {
2280 dump_bb_info (outf, bb, 0, dump_flags, true, false);
2281 if (df && (flags & TDF_DETAILS))
2282 df_dump_top (bb, outf);
2283 }
2284
2285 if (in_bb_p[INSN_UID (insn: tmp_rtx)] == NOT_IN_BB
2286 && !NOTE_P (tmp_rtx)
2287 && !BARRIER_P (tmp_rtx))
2288 fprintf (stream: outf, format: ";; Insn is not within a basic block\n");
2289 else if (in_bb_p[INSN_UID (insn: tmp_rtx)] == IN_MULTIPLE_BB)
2290 fprintf (stream: outf, format: ";; Insn is in multiple basic blocks\n");
2291 }
2292
2293 if (flags & TDF_DETAILS)
2294 df_dump_insn_top (tmp_rtx, outf);
2295 if (! (flags & TDF_SLIM))
2296 print_rtl_single (outf, tmp_rtx);
2297 else
2298 dump_insn_slim (outf, tmp_rtx);
2299 if (flags & TDF_DETAILS)
2300 df_dump_insn_bottom (tmp_rtx, outf);
2301
2302 bb = end[INSN_UID (insn: tmp_rtx)];
2303 if (bb != NULL)
2304 {
2305 if (flags & TDF_BLOCKS)
2306 {
2307 dump_bb_info (outf, bb, 0, dump_flags, false, true);
2308 if (df && (flags & TDF_DETAILS))
2309 df_dump_bottom (bb, outf);
2310 putc (c: '\n', stream: outf);
2311 }
2312 /* Emit a hint if the fallthrough target of current basic block
2313 isn't the one placed right next. */
2314 else if (EDGE_COUNT (bb->succs) > 0)
2315 {
2316 gcc_assert (BB_END (bb) == tmp_rtx);
2317 const rtx_insn *ninsn = NEXT_INSN (insn: tmp_rtx);
2318 /* Bypass intervening deleted-insn notes and debug insns. */
2319 while (ninsn
2320 && !NONDEBUG_INSN_P (ninsn)
2321 && !start[INSN_UID (insn: ninsn)])
2322 ninsn = NEXT_INSN (insn: ninsn);
2323 edge e = find_fallthru_edge (edges: bb->succs);
2324 if (e && ninsn)
2325 {
2326 basic_block dest = e->dest;
2327 if (start[INSN_UID (insn: ninsn)] != dest)
2328 fprintf (stream: outf, format: "%s ; pc falls through to BB %d\n",
2329 print_rtx_head, dest->index);
2330 }
2331 }
2332 }
2333 }
2334
2335 free (ptr: start);
2336 free (ptr: end);
2337 free (ptr: in_bb_p);
2338 }
2339}
2340
2341/* Update the branch probability of BB if a REG_BR_PROB is present. */
2342
2343void
2344update_br_prob_note (basic_block bb)
2345{
2346 rtx note;
2347 note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX);
2348 if (!JUMP_P (BB_END (bb)) || !BRANCH_EDGE (bb)->probability.initialized_p ())
2349 {
2350 if (note)
2351 {
2352 rtx *note_link, this_rtx;
2353
2354 note_link = &REG_NOTES (BB_END (bb));
2355 for (this_rtx = *note_link; this_rtx; this_rtx = XEXP (this_rtx, 1))
2356 if (this_rtx == note)
2357 {
2358 *note_link = XEXP (this_rtx, 1);
2359 break;
2360 }
2361 }
2362 return;
2363 }
2364 if (!note
2365 || XINT (note, 0) == BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ())
2366 return;
2367 XINT (note, 0) = BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ();
2368}
2369
2370/* Get the last insn associated with block BB (that includes barriers and
2371 tablejumps after BB). */
2372rtx_insn *
2373get_last_bb_insn (basic_block bb)
2374{
2375 rtx_jump_table_data *table;
2376 rtx_insn *tmp;
2377 rtx_insn *end = BB_END (bb);
2378
2379 /* Include any jump table following the basic block. */
2380 if (tablejump_p (end, NULL, &table))
2381 end = table;
2382
2383 /* Include any barriers that may follow the basic block. */
2384 tmp = next_nonnote_nondebug_insn_bb (end);
2385 while (tmp && BARRIER_P (tmp))
2386 {
2387 end = tmp;
2388 tmp = next_nonnote_nondebug_insn_bb (end);
2389 }
2390
2391 return end;
2392}
2393
2394/* Add all BBs reachable from entry via hot paths into the SET. */
2395
2396void
2397find_bbs_reachable_by_hot_paths (hash_set<basic_block> *set)
2398{
2399 auto_vec<basic_block, 64> worklist;
2400
2401 set->add (ENTRY_BLOCK_PTR_FOR_FN (cfun));
2402 worklist.safe_push (ENTRY_BLOCK_PTR_FOR_FN (cfun));
2403
2404 while (worklist.length () > 0)
2405 {
2406 basic_block bb = worklist.pop ();
2407 edge_iterator ei;
2408 edge e;
2409
2410 FOR_EACH_EDGE (e, ei, bb->succs)
2411 if (BB_PARTITION (e->dest) != BB_COLD_PARTITION
2412 && !set->add (k: e->dest))
2413 worklist.safe_push (obj: e->dest);
2414 }
2415}
2416
2417/* Sanity check partition hotness to ensure that basic blocks in
2418   the cold partition don't dominate basic blocks in the hot partition.
2419 If FLAG_ONLY is true, report violations as errors. Otherwise
2420 re-mark the dominated blocks as cold, since this is run after
2421 cfg optimizations that may make hot blocks previously reached
2422 by both hot and cold blocks now only reachable along cold paths. */
2423
2424static auto_vec<basic_block>
2425find_partition_fixes (bool flag_only)
2426{
2427 basic_block bb;
2428 auto_vec<basic_block> bbs_to_fix;
2429 hash_set<basic_block> set;
2430
2431 /* Callers check this. */
2432 gcc_checking_assert (crtl->has_bb_partition);
2433
2434 find_bbs_reachable_by_hot_paths (set: &set);
2435
2436 FOR_EACH_BB_FN (bb, cfun)
2437 if (!set.contains (k: bb)
2438 && BB_PARTITION (bb) != BB_COLD_PARTITION)
2439 {
2440 if (flag_only)
2441 error ("non-cold basic block %d reachable only "
2442 "by paths crossing the cold partition", bb->index);
2443 else
2444 BB_SET_PARTITION (bb, BB_COLD_PARTITION);
2445 bbs_to_fix.safe_push (obj: bb);
2446 }
2447
2448 return bbs_to_fix;
2449}
2450
2451/* Perform cleanup on the hot/cold bb partitioning after optimization
2452 passes that modify the cfg. */
2453
2454void
2455fixup_partitions (void)
2456{
2457 if (!crtl->has_bb_partition)
2458 return;
2459
2460 /* Delete any blocks that became unreachable and weren't
2461 already cleaned up, for example during edge forwarding
2462 and convert_jumps_to_returns. This will expose more
2463 opportunities for fixing the partition boundaries here.
2464 Also, the calculation of the dominance graph during verification
2465 will assert if there are unreachable nodes. */
2466 delete_unreachable_blocks ();
2467
2468 /* If there are partitions, do a sanity check on them: A basic block in
2469   a cold partition cannot dominate a basic block in a hot partition.
2470 Fixup any that now violate this requirement, as a result of edge
2471 forwarding and unreachable block deletion.  */
2472 auto_vec<basic_block> bbs_to_fix = find_partition_fixes (flag_only: false);
2473
2474 /* Do the partition fixup after all necessary blocks have been converted to
2475 cold, so that we only update the region crossings the minimum number of
2476 places, which can require forcing edges to be non fallthru. */
2477 if (! bbs_to_fix.is_empty ())
2478 {
2479 do
2480 {
2481 basic_block bb = bbs_to_fix.pop ();
2482 fixup_new_cold_bb (bb);
2483 }
2484 while (! bbs_to_fix.is_empty ());
2485
2486 /* Fix up hot cold block grouping if needed. */
2487 if (crtl->bb_reorder_complete && current_ir_type () == IR_RTL_CFGRTL)
2488 {
2489 basic_block bb, first = NULL, second = NULL;
2490 int current_partition = BB_UNPARTITIONED;
2491
2492 FOR_EACH_BB_FN (bb, cfun)
2493 {
2494 if (current_partition != BB_UNPARTITIONED
2495 && BB_PARTITION (bb) != current_partition)
2496 {
2497 if (first == NULL)
2498 first = bb;
2499 else if (second == NULL)
2500 second = bb;
2501 else
2502 {
2503 /* If we switch partitions for the 3rd, 5th etc. time,
2504 move bbs first (inclusive) .. second (exclusive) right
2505 before bb. */
2506 basic_block prev_first = first->prev_bb;
2507 basic_block prev_second = second->prev_bb;
2508 basic_block prev_bb = bb->prev_bb;
2509 prev_first->next_bb = second;
2510 second->prev_bb = prev_first;
2511 prev_second->next_bb = bb;
2512 bb->prev_bb = prev_second;
2513 prev_bb->next_bb = first;
2514 first->prev_bb = prev_bb;
2515 rtx_insn *prev_first_insn = PREV_INSN (BB_HEAD (first));
2516 rtx_insn *prev_second_insn
2517 = PREV_INSN (BB_HEAD (second));
2518 rtx_insn *prev_bb_insn = PREV_INSN (BB_HEAD (bb));
2519 SET_NEXT_INSN (prev_first_insn) = BB_HEAD (second);
2520 SET_PREV_INSN (BB_HEAD (second)) = prev_first_insn;
2521 SET_NEXT_INSN (prev_second_insn) = BB_HEAD (bb);
2522 SET_PREV_INSN (BB_HEAD (bb)) = prev_second_insn;
2523 SET_NEXT_INSN (prev_bb_insn) = BB_HEAD (first);
2524 SET_PREV_INSN (BB_HEAD (first)) = prev_bb_insn;
2525 second = NULL;
2526 }
2527 }
2528 current_partition = BB_PARTITION (bb);
2529 }
2530 gcc_assert (!second);
2531 }
2532 }
2533}
2534
2535/* Verify, in the basic block chain, that there is at most one switch
2536 between hot/cold partitions. This condition will not be true until
2537 after reorder_basic_blocks is called. */
2538
2539static bool
2540verify_hot_cold_block_grouping (void)
2541{
2542 basic_block bb;
2543 bool err = false;
2544 bool switched_sections = false;
2545 int current_partition = BB_UNPARTITIONED;
2546
2547 /* Even after bb reordering is complete, we go into cfglayout mode
2548 again (in compgoto). Ensure we don't call this before going back
2549 into linearized RTL when any layout fixes would have been committed. */
2550 if (!crtl->bb_reorder_complete
2551 || current_ir_type () != IR_RTL_CFGRTL)
2552 return err;
2553
2554 FOR_EACH_BB_FN (bb, cfun)
2555 {
2556 if (current_partition != BB_UNPARTITIONED
2557 && BB_PARTITION (bb) != current_partition)
2558 {
2559 if (switched_sections)
2560 {
2561 error ("multiple hot/cold transitions found (bb %i)",
2562 bb->index);
2563 err = true;
2564 }
2565 else
2566 switched_sections = true;
2567
2568 if (!crtl->has_bb_partition)
2569 error ("partition found but function partition flag not set");
2570 }
2571 current_partition = BB_PARTITION (bb);
2572 }
2573
2574 return err;
2575}
2576
2577
2578/* Perform several checks on the edges out of each block, such as
2579 the consistency of the branch probabilities, the correctness
2580 of hot/cold partition crossing edges, and the number of expected
2581 successor edges. Also verify that the dominance relationship
2582 between hot/cold blocks is sane. */
2583
2584static bool
2585rtl_verify_edges (void)
2586{
2587 bool err = false;
2588 basic_block bb;
2589
2590 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2591 {
2592 int n_fallthru = 0, n_branch = 0, n_abnormal_call = 0, n_sibcall = 0;
2593 int n_eh = 0, n_abnormal = 0;
2594 edge e, fallthru = NULL;
2595 edge_iterator ei;
2596 rtx note;
2597 bool has_crossing_edge = false;
2598
2599 if (JUMP_P (BB_END (bb))
2600 && (note = find_reg_note (BB_END (bb), REG_BR_PROB, NULL_RTX))
2601 && EDGE_COUNT (bb->succs) >= 2
2602 && any_condjump_p (BB_END (bb)))
2603 {
2604 if (!BRANCH_EDGE (bb)->probability.initialized_p ())
2605 {
2606 if (profile_status_for_fn (cfun) != PROFILE_ABSENT)
2607 {
2608 error ("verify_flow_info: "
2609 "REG_BR_PROB is set but cfg probability is not");
2610 err = true;
2611 }
2612 }
2613 else if (XINT (note, 0)
2614 != BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ()
2615 && profile_status_for_fn (cfun) != PROFILE_ABSENT)
2616 {
2617 error ("verify_flow_info: REG_BR_PROB does not match cfg %i %i",
2618 XINT (note, 0),
2619 BRANCH_EDGE (bb)->probability.to_reg_br_prob_note ());
2620 err = true;
2621 }
2622 }
2623
2624 FOR_EACH_EDGE (e, ei, bb->succs)
2625 {
2626 bool is_crossing;
2627
2628 if (e->flags & EDGE_FALLTHRU)
2629 n_fallthru++, fallthru = e;
2630
2631 is_crossing = (BB_PARTITION (e->src) != BB_PARTITION (e->dest)
2632 && e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
2633 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun));
2634 has_crossing_edge |= is_crossing;
2635 if (e->flags & EDGE_CROSSING)
2636 {
2637 if (!is_crossing)
2638 {
2639 error ("EDGE_CROSSING incorrectly set across same section");
2640 err = true;
2641 }
2642 if (e->flags & EDGE_FALLTHRU)
2643 {
2644 error ("fallthru edge crosses section boundary in bb %i",
2645 e->src->index);
2646 err = true;
2647 }
2648 if (e->flags & EDGE_EH)
2649 {
2650 error ("EH edge crosses section boundary in bb %i",
2651 e->src->index);
2652 err = true;
2653 }
2654 if (JUMP_P (BB_END (bb)) && !CROSSING_JUMP_P (BB_END (bb)))
2655 {
2656 error ("No region crossing jump at section boundary in bb %i",
2657 bb->index);
2658 err = true;
2659 }
2660 }
2661 else if (is_crossing)
2662 {
2663 error ("EDGE_CROSSING missing across section boundary");
2664 err = true;
2665 }
2666
2667 if ((e->flags & ~(EDGE_DFS_BACK
2668 | EDGE_CAN_FALLTHRU
2669 | EDGE_IRREDUCIBLE_LOOP
2670 | EDGE_LOOP_EXIT
2671 | EDGE_CROSSING
2672 | EDGE_PRESERVE)) == 0)
2673 n_branch++;
2674
2675 if (e->flags & EDGE_ABNORMAL_CALL)
2676 n_abnormal_call++;
2677
2678 if (e->flags & EDGE_SIBCALL)
2679 n_sibcall++;
2680
2681 if (e->flags & EDGE_EH)
2682 n_eh++;
2683
2684 if (e->flags & EDGE_ABNORMAL)
2685 n_abnormal++;
2686 }
2687
2688 if (!has_crossing_edge
2689 && JUMP_P (BB_END (bb))
2690 && CROSSING_JUMP_P (BB_END (bb)))
2691 {
2692 print_rtl_with_bb (stderr, rtx_first: get_insns (), flags: TDF_BLOCKS | TDF_DETAILS);
2693 error ("Region crossing jump across same section in bb %i",
2694 bb->index);
2695 err = true;
2696 }
2697
2698 if (n_eh && !find_reg_note (BB_END (bb), REG_EH_REGION, NULL_RTX))
2699 {
2700 error ("missing REG_EH_REGION note at the end of bb %i", bb->index);
2701 err = true;
2702 }
2703 if (n_eh > 1)
2704 {
2705 error ("too many exception handling edges in bb %i", bb->index);
2706 err = true;
2707 }
2708 if (n_branch
2709 && (!JUMP_P (BB_END (bb))
2710 || (n_branch > 1 && (any_uncondjump_p (BB_END (bb))
2711 || any_condjump_p (BB_END (bb))))))
2712 {
2713 error ("too many outgoing branch edges from bb %i", bb->index);
2714 err = true;
2715 }
2716 if (n_fallthru && any_uncondjump_p (BB_END (bb)))
2717 {
2718 error ("fallthru edge after unconditional jump in bb %i", bb->index);
2719 err = true;
2720 }
2721 if (n_branch != 1 && any_uncondjump_p (BB_END (bb)))
2722 {
2723 error ("wrong number of branch edges after unconditional jump"
2724 " in bb %i", bb->index);
2725 err = true;
2726 }
2727 if (n_branch != 1 && any_condjump_p (BB_END (bb))
2728 && JUMP_LABEL (BB_END (bb)) != BB_HEAD (fallthru->dest))
2729 {
2730 error ("wrong amount of branch edges after conditional jump"
2731 " in bb %i", bb->index);
2732 err = true;
2733 }
2734 if (n_abnormal_call && !CALL_P (BB_END (bb)))
2735 {
2736 error ("abnormal call edges for non-call insn in bb %i", bb->index);
2737 err = true;
2738 }
2739 if (n_sibcall && !CALL_P (BB_END (bb)))
2740 {
2741 error ("sibcall edges for non-call insn in bb %i", bb->index);
2742 err = true;
2743 }
2744 if (n_abnormal > n_eh
2745 && !(CALL_P (BB_END (bb))
2746 && n_abnormal == n_abnormal_call + n_sibcall)
2747 && (!JUMP_P (BB_END (bb))
2748 || any_condjump_p (BB_END (bb))
2749 || any_uncondjump_p (BB_END (bb))))
2750 {
2751 error ("abnormal edges for no purpose in bb %i", bb->index);
2752 err = true;
2753 }
2754
2755 int has_eh = -1;
2756 FOR_EACH_EDGE (e, ei, bb->preds)
2757 {
2758 if (has_eh == -1)
2759 has_eh = (e->flags & EDGE_EH);
2760 if ((e->flags & EDGE_EH) == has_eh)
2761 continue;
2762 error ("EH incoming edge mixed with non-EH incoming edges "
2763 "in bb %i", bb->index);
2764 err = true;
2765 break;
2766 }
2767 }
2768
2769 /* If there are partitions, do a sanity check on them: A basic block in
2770   a cold partition cannot dominate a basic block in a hot partition.  */
2771 if (crtl->has_bb_partition && !err
2772 && current_ir_type () == IR_RTL_CFGLAYOUT)
2773 {
2774 auto_vec<basic_block> bbs_to_fix = find_partition_fixes (flag_only: true);
2775 err = !bbs_to_fix.is_empty ();
2776 }
2777
2778 /* Clean up. */
2779 return err;
2780}
2781
2782/* Checks on the instructions within blocks. Currently checks that each
2783 block starts with a basic block note, and that basic block notes and
2784 control flow jumps are not found in the middle of the block. */
2785
2786static bool
2787rtl_verify_bb_insns (void)
2788{
2789 rtx_insn *x;
2790 bool err = false;
2791 basic_block bb;
2792
2793 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2794 {
2795 /* Now check the header of basic
2796 block. It ought to contain optional CODE_LABEL followed
2797 by NOTE_BASIC_BLOCK. */
2798 x = BB_HEAD (bb);
2799 if (LABEL_P (x))
2800 {
2801 if (BB_END (bb) == x)
2802 {
2803 error ("NOTE_INSN_BASIC_BLOCK is missing for block %d",
2804 bb->index);
2805 err = true;
2806 }
2807
2808 x = NEXT_INSN (insn: x);
2809 }
2810
2811 if (!NOTE_INSN_BASIC_BLOCK_P (x) || NOTE_BASIC_BLOCK (x) != bb)
2812 {
2813 error ("NOTE_INSN_BASIC_BLOCK is missing for block %d",
2814 bb->index);
2815 err = true;
2816 }
2817
2818 if (BB_END (bb) == x)
2819 /* Do checks for empty blocks here. */
2820 ;
2821 else
2822 for (x = NEXT_INSN (insn: x); x; x = NEXT_INSN (insn: x))
2823 {
2824 if (NOTE_INSN_BASIC_BLOCK_P (x))
2825 {
2826 error ("NOTE_INSN_BASIC_BLOCK %d in middle of basic block %d",
2827 INSN_UID (insn: x), bb->index);
2828 err = true;
2829 }
2830
2831 if (x == BB_END (bb))
2832 break;
2833
2834 if (control_flow_insn_p (x))
2835 {
2836 error ("in basic block %d:", bb->index);
2837 fatal_insn ("flow control insn inside a basic block", x);
2838 }
2839 }
2840 }
2841
2842 /* Clean up. */
2843 return err;
2844}
2845
2846/* Verify that block pointers for instructions in basic blocks, headers and
2847 footers are set appropriately. */
2848
2849static bool
2850rtl_verify_bb_pointers (void)
2851{
2852 bool err = false;
2853 basic_block bb;
2854
2855 /* Check the general integrity of the basic blocks. */
2856 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2857 {
2858 rtx_insn *insn;
2859
2860 if (!(bb->flags & BB_RTL))
2861 {
2862 error ("BB_RTL flag not set for block %d", bb->index);
2863 err = true;
2864 }
2865
2866 FOR_BB_INSNS (bb, insn)
2867 if (BLOCK_FOR_INSN (insn) != bb)
2868 {
2869 error ("insn %d basic block pointer is %d, should be %d",
2870 INSN_UID (insn),
2871 BLOCK_FOR_INSN (insn) ? BLOCK_FOR_INSN (insn)->index : 0,
2872 bb->index);
2873 err = true;
2874 }
2875
2876 for (insn = BB_HEADER (bb); insn; insn = NEXT_INSN (insn))
2877 if (!BARRIER_P (insn)
2878 && BLOCK_FOR_INSN (insn) != NULL)
2879 {
2880 error ("insn %d in header of bb %d has non-NULL basic block",
2881 INSN_UID (insn), bb->index);
2882 err = true;
2883 }
2884 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
2885 if (!BARRIER_P (insn)
2886 && BLOCK_FOR_INSN (insn) != NULL)
2887 {
2888 error ("insn %d in footer of bb %d has non-NULL basic block",
2889 INSN_UID (insn), bb->index);
2890 err = true;
2891 }
2892 }
2893
2894 /* Clean up. */
2895 return err;
2896}
2897
2898/* Verify the CFG and RTL consistency common for both underlying RTL and
2899 cfglayout RTL.
2900
2901 Currently it does following checks:
2902
2903 - overlapping of basic blocks
2904 - insns with wrong BLOCK_FOR_INSN pointers
2905 - headers of basic blocks (the NOTE_INSN_BASIC_BLOCK note)
2906 - tails of basic blocks (ensure that boundary is necessary)
2907 - scans body of the basic block for JUMP_INSN, CODE_LABEL
2908 and NOTE_INSN_BASIC_BLOCK
2909 - verify that no fall_thru edge crosses hot/cold partition boundaries
2910 - verify that there are no pending RTL branch predictions
2911 - verify that hot blocks are not dominated by cold blocks
2912
2913 In future it can be extended check a lot of other stuff as well
2914 (reachability of basic blocks, life information, etc. etc.). */
2915
2916static bool
2917rtl_verify_flow_info_1 (void)
2918{
2919 bool err = false;
2920
2921 if (rtl_verify_bb_pointers ())
2922 err = true;
2923
2924 if (rtl_verify_bb_insns ())
2925 err = true;
2926
2927 if (rtl_verify_edges ())
2928 err = true;
2929
2930 return err;
2931}
2932
2933/* Walk the instruction chain and verify that bb head/end pointers
2934 are correct, and that instructions are in exactly one bb and have
2935 correct block pointers. */
2936
2937static bool
2938rtl_verify_bb_insn_chain (void)
2939{
2940 basic_block bb;
2941 bool err = false;
2942 rtx_insn *x;
2943 rtx_insn *last_head = get_last_insn ();
2944 basic_block *bb_info;
2945 const int max_uid = get_max_uid ();
2946
2947 bb_info = XCNEWVEC (basic_block, max_uid);
2948
2949 FOR_EACH_BB_REVERSE_FN (bb, cfun)
2950 {
2951 rtx_insn *head = BB_HEAD (bb);
2952 rtx_insn *end = BB_END (bb);
2953
2954 for (x = last_head; x != NULL_RTX; x = PREV_INSN (insn: x))
2955 {
2956 /* Verify the end of the basic block is in the INSN chain. */
2957 if (x == end)
2958 break;
2959
2960 /* And that the code outside of basic blocks has NULL bb field. */
2961 if (!BARRIER_P (x)
2962 && BLOCK_FOR_INSN (insn: x) != NULL)
2963 {
2964 error ("insn %d outside of basic blocks has non-NULL bb field",
2965 INSN_UID (insn: x));
2966 err = true;
2967 }
2968 }
2969
2970 if (!x)
2971 {
2972 error ("end insn %d for block %d not found in the insn stream",
2973 INSN_UID (insn: end), bb->index);
2974 err = true;
2975 }
2976
2977 /* Work backwards from the end to the head of the basic block
2978 to verify the head is in the RTL chain. */
2979 for (; x != NULL_RTX; x = PREV_INSN (insn: x))
2980 {
2981 /* While walking over the insn chain, verify insns appear
2982 in only one basic block. */
2983 if (bb_info[INSN_UID (insn: x)] != NULL)
2984 {
2985 error ("insn %d is in multiple basic blocks (%d and %d)",
2986 INSN_UID (insn: x), bb->index, bb_info[INSN_UID (insn: x)]->index);
2987 err = true;
2988 }
2989
2990 bb_info[INSN_UID (insn: x)] = bb;
2991
2992 if (x == head)
2993 break;
2994 }
2995 if (!x)
2996 {
2997 error ("head insn %d for block %d not found in the insn stream",
2998 INSN_UID (insn: head), bb->index);
2999 err = true;
3000 }
3001
3002 last_head = PREV_INSN (insn: x);
3003 }
3004
3005 for (x = last_head; x != NULL_RTX; x = PREV_INSN (insn: x))
3006 {
3007 /* Check that the code before the first basic block has NULL
3008 bb field. */
3009 if (!BARRIER_P (x)
3010 && BLOCK_FOR_INSN (insn: x) != NULL)
3011 {
3012 error ("insn %d outside of basic blocks has non-NULL bb field",
3013 INSN_UID (insn: x));
3014 err = true;
3015 }
3016 }
3017 free (ptr: bb_info);
3018
3019 return err;
3020}
3021
3022/* Verify that fallthru edges point to adjacent blocks in layout order and
3023 that barriers exist after non-fallthru blocks. */
3024
3025static bool
3026rtl_verify_fallthru (void)
3027{
3028 basic_block bb;
3029 bool err = false;
3030
3031 FOR_EACH_BB_REVERSE_FN (bb, cfun)
3032 {
3033 edge e;
3034
3035 e = find_fallthru_edge (edges: bb->succs);
3036 if (!e)
3037 {
3038 rtx_insn *insn;
3039
3040 /* Ensure existence of barrier in BB with no fallthru edges. */
3041 for (insn = NEXT_INSN (BB_END (bb)); ; insn = NEXT_INSN (insn))
3042 {
3043 if (!insn || NOTE_INSN_BASIC_BLOCK_P (insn))
3044 {
3045 error ("missing barrier after block %i", bb->index);
3046 err = true;
3047 break;
3048 }
3049 if (BARRIER_P (insn))
3050 break;
3051 }
3052 }
3053 else if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
3054 && e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun))
3055 {
3056 rtx_insn *insn;
3057
3058 if (e->src->next_bb != e->dest)
3059 {
3060 error
3061 ("verify_flow_info: Incorrect blocks for fallthru %i->%i",
3062 e->src->index, e->dest->index);
3063 err = true;
3064 }
3065 else
3066 for (insn = NEXT_INSN (BB_END (e->src)); insn != BB_HEAD (e->dest);
3067 insn = NEXT_INSN (insn))
3068 if (BARRIER_P (insn) || NONDEBUG_INSN_P (insn))
3069 {
3070 error ("verify_flow_info: Incorrect fallthru %i->%i",
3071 e->src->index, e->dest->index);
3072 error ("wrong insn in the fallthru edge");
3073 debug_rtx (insn);
3074 err = true;
3075 }
3076 }
3077 }
3078
3079 return err;
3080}
3081
3082/* Verify that blocks are laid out in consecutive order. While walking the
3083 instructions, verify that all expected instructions are inside the basic
3084 blocks, and that all returns are followed by barriers. */
3085
3086static bool
3087rtl_verify_bb_layout (void)
3088{
3089 basic_block bb;
3090 bool err = false;
3091 rtx_insn *x, *y;
3092 int num_bb_notes;
3093 rtx_insn * const rtx_first = get_insns ();
3094 basic_block last_bb_seen = ENTRY_BLOCK_PTR_FOR_FN (cfun), curr_bb = NULL;
3095
3096 num_bb_notes = 0;
3097
3098 for (x = rtx_first; x; x = NEXT_INSN (insn: x))
3099 {
3100 if (NOTE_INSN_BASIC_BLOCK_P (x))
3101 {
3102 bb = NOTE_BASIC_BLOCK (x);
3103
3104 num_bb_notes++;
3105 if (bb != last_bb_seen->next_bb)
3106 internal_error ("basic blocks not laid down consecutively");
3107
3108 curr_bb = last_bb_seen = bb;
3109 }
3110
3111 if (!curr_bb)
3112 {
3113 switch (GET_CODE (x))
3114 {
3115 case BARRIER:
3116 case NOTE:
3117 break;
3118
3119 case CODE_LABEL:
3120 /* An ADDR_VEC is placed outside any basic block. */
3121 if (NEXT_INSN (insn: x)
3122 && JUMP_TABLE_DATA_P (NEXT_INSN (x)))
3123 x = NEXT_INSN (insn: x);
3124
3125 /* But in any case, non-deletable labels can appear anywhere. */
3126 break;
3127
3128 default:
3129 fatal_insn ("insn outside basic block", x);
3130 }
3131 }
3132
3133 if (JUMP_P (x)
3134 && returnjump_p (x) && ! condjump_p (x)
3135 && ! ((y = next_nonnote_nondebug_insn (x))
3136 && BARRIER_P (y)))
3137 fatal_insn ("return not followed by barrier", x);
3138
3139 if (curr_bb && x == BB_END (curr_bb))
3140 curr_bb = NULL;
3141 }
3142
3143 if (num_bb_notes != n_basic_blocks_for_fn (cfun) - NUM_FIXED_BLOCKS)
3144 internal_error
3145 ("number of bb notes in insn chain (%d) != n_basic_blocks (%d)",
3146 num_bb_notes, n_basic_blocks_for_fn (cfun));
3147
3148 return err;
3149}
3150
3151/* Verify the CFG and RTL consistency common for both underlying RTL and
3152 cfglayout RTL, plus consistency checks specific to linearized RTL mode.
3153
3154 Currently it does following checks:
3155 - all checks of rtl_verify_flow_info_1
3156 - test head/end pointers
3157 - check that blocks are laid out in consecutive order
3158 - check that all insns are in the basic blocks
3159 (except the switch handling code, barriers and notes)
3160 - check that all returns are followed by barriers
3161 - check that all fallthru edge points to the adjacent blocks
3162 - verify that there is a single hot/cold partition boundary after bbro */
3163
3164static bool
3165rtl_verify_flow_info (void)
3166{
3167 bool err = false;
3168
3169 if (rtl_verify_flow_info_1 ())
3170 err = true;
3171
3172 if (rtl_verify_bb_insn_chain ())
3173 err = true;
3174
3175 if (rtl_verify_fallthru ())
3176 err = true;
3177
3178 if (rtl_verify_bb_layout ())
3179 err = true;
3180
3181 if (verify_hot_cold_block_grouping ())
3182 err = true;
3183
3184 return err;
3185}
3186
3187/* Assume that the preceding pass has possibly eliminated jump instructions
3188 or converted the unconditional jumps. Eliminate the edges from CFG.
3189 Return true if any edges are eliminated. */
3190
3191bool
3192purge_dead_edges (basic_block bb)
3193{
3194 edge e;
3195 rtx_insn *insn = BB_END (bb);
3196 rtx note;
3197 bool purged = false;
3198 bool found;
3199 edge_iterator ei;
3200
3201 if ((DEBUG_INSN_P (insn) || NOTE_P (insn)) && insn != BB_HEAD (bb))
3202 do
3203 insn = PREV_INSN (insn);
3204 while ((DEBUG_INSN_P (insn) || NOTE_P (insn)) && insn != BB_HEAD (bb));
3205
3206 /* If this instruction cannot trap, remove REG_EH_REGION notes. */
3207 if (NONJUMP_INSN_P (insn)
3208 && (note = find_reg_note (insn, REG_EH_REGION, NULL)))
3209 {
3210 rtx eqnote;
3211
3212 if (! may_trap_p (PATTERN (insn))
3213 || ((eqnote = find_reg_equal_equiv_note (insn))
3214 && ! may_trap_p (XEXP (eqnote, 0))))
3215 remove_note (insn, note);
3216 }
3217
3218 /* Cleanup abnormal edges caused by exceptions or non-local gotos. */
3219 for (ei = ei_start (bb->succs); (e = ei_safe_edge (i: ei)); )
3220 {
3221 bool remove = false;
3222
3223 /* There are three types of edges we need to handle correctly here: EH
3224 edges, abnormal call EH edges, and abnormal call non-EH edges. The
3225 latter can appear when nonlocal gotos are used. */
3226 if (e->flags & EDGE_ABNORMAL_CALL)
3227 {
3228 if (!CALL_P (insn))
3229 remove = true;
3230 else if (can_nonlocal_goto (insn))
3231 ;
3232 else if ((e->flags & EDGE_EH) && can_throw_internal (insn))
3233 ;
3234 else if (flag_tm && find_reg_note (insn, REG_TM, NULL))
3235 ;
3236 else
3237 remove = true;
3238 }
3239 else if (e->flags & EDGE_EH)
3240 remove = !can_throw_internal (insn);
3241
3242 if (remove)
3243 {
3244 remove_edge (e);
3245 df_set_bb_dirty (bb);
3246 purged = true;
3247 }
3248 else
3249 ei_next (i: &ei);
3250 }
3251
3252 if (JUMP_P (insn))
3253 {
3254 rtx note;
3255 edge b,f;
3256 edge_iterator ei;
3257
3258 /* We do care only about conditional jumps and simplejumps. */
3259 if (!any_condjump_p (insn)
3260 && !returnjump_p (insn)
3261 && !simplejump_p (insn))
3262 return purged;
3263
3264 /* Branch probability/prediction notes are defined only for
3265 condjumps. We've possibly turned condjump into simplejump. */
3266 if (simplejump_p (insn))
3267 {
3268 note = find_reg_note (insn, REG_BR_PROB, NULL);
3269 if (note)
3270 remove_note (insn, note);
3271 while ((note = find_reg_note (insn, REG_BR_PRED, NULL)))
3272 remove_note (insn, note);
3273 }
3274
3275 for (ei = ei_start (bb->succs); (e = ei_safe_edge (i: ei)); )
3276 {
3277 /* Avoid abnormal flags to leak from computed jumps turned
3278 into simplejumps. */
3279
3280 e->flags &= ~EDGE_ABNORMAL;
3281
3282 /* See if this edge is one we should keep. */
3283 if ((e->flags & EDGE_FALLTHRU) && any_condjump_p (insn))
3284 /* A conditional jump can fall through into the next
3285 block, so we should keep the edge. */
3286 {
3287 ei_next (i: &ei);
3288 continue;
3289 }
3290 else if (e->dest != EXIT_BLOCK_PTR_FOR_FN (cfun)
3291 && BB_HEAD (e->dest) == JUMP_LABEL (insn))
3292 /* If the destination block is the target of the jump,
3293 keep the edge. */
3294 {
3295 ei_next (i: &ei);
3296 continue;
3297 }
3298 else if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun)
3299 && returnjump_p (insn))
3300 /* If the destination block is the exit block, and this
3301 instruction is a return, then keep the edge. */
3302 {
3303 ei_next (i: &ei);
3304 continue;
3305 }
3306 else if ((e->flags & EDGE_EH) && can_throw_internal (insn))
3307 /* Keep the edges that correspond to exceptions thrown by
3308 this instruction and rematerialize the EDGE_ABNORMAL
3309 flag we just cleared above. */
3310 {
3311 e->flags |= EDGE_ABNORMAL;
3312 ei_next (i: &ei);
3313 continue;
3314 }
3315
3316 /* We do not need this edge. */
3317 df_set_bb_dirty (bb);
3318 purged = true;
3319 remove_edge (e);
3320 }
3321
3322 if (EDGE_COUNT (bb->succs) == 0 || !purged)
3323 return purged;
3324
3325 if (dump_file)
3326 fprintf (stream: dump_file, format: "Purged edges from bb %i\n", bb->index);
3327
3328 if (!optimize)
3329 return purged;
3330
3331 /* Redistribute probabilities. */
3332 if (single_succ_p (bb))
3333 {
3334 single_succ_edge (bb)->probability = profile_probability::always ();
3335 }
3336 else
3337 {
3338 note = find_reg_note (insn, REG_BR_PROB, NULL);
3339 if (!note)
3340 return purged;
3341
3342 b = BRANCH_EDGE (bb);
3343 f = FALLTHRU_EDGE (bb);
3344 b->probability = profile_probability::from_reg_br_prob_note
3345 (XINT (note, 0));
3346 f->probability = b->probability.invert ();
3347 }
3348
3349 return purged;
3350 }
3351 else if (CALL_P (insn) && SIBLING_CALL_P (insn))
3352 {
3353 /* First, there should not be any EH or ABCALL edges resulting
3354 from non-local gotos and the like. If there were, we shouldn't
3355 have created the sibcall in the first place. Second, there
3356 should of course never have been a fallthru edge. */
3357 gcc_assert (single_succ_p (bb));
3358 gcc_assert (single_succ_edge (bb)->flags
3359 == (EDGE_SIBCALL | EDGE_ABNORMAL));
3360
3361 return false;
3362 }
3363
3364 /* If we don't see a jump insn, we don't know exactly why the block would
3365 have been broken at this point. Look for a simple, non-fallthru edge,
3366 as these are only created by conditional branches. If we find such an
3367 edge we know that there used to be a jump here and can then safely
3368 remove all non-fallthru edges. */
3369 found = false;
3370 FOR_EACH_EDGE (e, ei, bb->succs)
3371 if (! (e->flags & (EDGE_COMPLEX | EDGE_FALLTHRU)))
3372 {
3373 found = true;
3374 break;
3375 }
3376
3377 if (!found)
3378 return purged;
3379
3380 /* Remove all but the fake and fallthru edges. The fake edge may be
3381 the only successor for this block in the case of noreturn
3382 calls. */
3383 for (ei = ei_start (bb->succs); (e = ei_safe_edge (i: ei)); )
3384 {
3385 if (!(e->flags & (EDGE_FALLTHRU | EDGE_FAKE)))
3386 {
3387 df_set_bb_dirty (bb);
3388 remove_edge (e);
3389 purged = true;
3390 }
3391 else
3392 ei_next (i: &ei);
3393 }
3394
3395 gcc_assert (single_succ_p (bb));
3396
3397 single_succ_edge (bb)->probability = profile_probability::always ();
3398
3399 if (dump_file)
3400 fprintf (stream: dump_file, format: "Purged non-fallthru edges from bb %i\n",
3401 bb->index);
3402 return purged;
3403}
3404
3405/* Search all basic blocks for potentially dead edges and purge them. Return
3406 true if some edge has been eliminated. */
3407
3408bool
3409purge_all_dead_edges (void)
3410{
3411 bool purged = false;
3412 basic_block bb;
3413
3414 FOR_EACH_BB_FN (bb, cfun)
3415 if (purge_dead_edges (bb))
3416 purged = true;
3417
3418 return purged;
3419}
3420
3421/* This is used by a few passes that emit some instructions after abnormal
3422 calls, moving the basic block's end, while they in fact do want to emit
3423 them on the fallthru edge. Look for abnormal call edges, find backward
3424 the call in the block and insert the instructions on the edge instead.
3425
3426 Similarly, handle instructions throwing exceptions internally.
3427
3428 Return true when instructions have been found and inserted on edges. */
3429
3430bool
3431fixup_abnormal_edges (void)
3432{
3433 bool inserted = false;
3434 basic_block bb;
3435
3436 FOR_EACH_BB_FN (bb, cfun)
3437 {
3438 edge e;
3439 edge_iterator ei;
3440
3441 /* Look for cases we are interested in - calls or instructions causing
3442 exceptions. */
3443 FOR_EACH_EDGE (e, ei, bb->succs)
3444 if ((e->flags & EDGE_ABNORMAL_CALL)
3445 || ((e->flags & (EDGE_ABNORMAL | EDGE_EH))
3446 == (EDGE_ABNORMAL | EDGE_EH)))
3447 break;
3448
3449 if (e && !CALL_P (BB_END (bb)) && !can_throw_internal (BB_END (bb)))
3450 {
3451 rtx_insn *insn;
3452
3453 /* Get past the new insns generated. Allow notes, as the insns
3454 may be already deleted. */
3455 insn = BB_END (bb);
3456 while ((NONJUMP_INSN_P (insn) || NOTE_P (insn))
3457 && !can_throw_internal (insn)
3458 && insn != BB_HEAD (bb))
3459 insn = PREV_INSN (insn);
3460
3461 if (CALL_P (insn) || can_throw_internal (insn))
3462 {
3463 rtx_insn *stop, *next;
3464
3465 e = find_fallthru_edge (edges: bb->succs);
3466
3467 stop = NEXT_INSN (BB_END (bb));
3468 BB_END (bb) = insn;
3469
3470 for (insn = NEXT_INSN (insn); insn != stop; insn = next)
3471 {
3472 next = NEXT_INSN (insn);
3473 if (INSN_P (insn))
3474 {
3475 delete_insn (insn);
3476
3477 /* Sometimes there's still the return value USE.
3478 If it's placed after a trapping call (i.e. that
3479 call is the last insn anyway), we have no fallthru
3480 edge. Simply delete this use and don't try to insert
3481 on the non-existent edge.
3482 Similarly, sometimes a call that can throw is
3483 followed in the source with __builtin_unreachable (),
3484 meaning that there is UB if the call returns rather
3485 than throws. If there weren't any instructions
3486 following such calls before, supposedly even the ones
3487 we've deleted aren't significant and can be
3488 removed. */
3489 if (e)
3490 {
3491 /* We're not deleting it, we're moving it. */
3492 insn->set_undeleted ();
3493 SET_PREV_INSN (insn) = NULL_RTX;
3494 SET_NEXT_INSN (insn) = NULL_RTX;
3495
3496 insert_insn_on_edge (pattern: insn, e);
3497 inserted = true;
3498 }
3499 }
3500 else if (!BARRIER_P (insn))
3501 set_block_for_insn (insn, NULL);
3502 }
3503 }
3504
3505 /* It may be that we don't find any trapping insn. In this
3506 case we discovered quite late that the insn that had been
3507 marked as can_throw_internal in fact couldn't trap at all.
3508 So we should in fact delete the EH edges out of the block. */
3509 else
3510 purge_dead_edges (bb);
3511 }
3512 }
3513
3514 return inserted;
3515}
3516
3517/* Delete the unconditional jump INSN and adjust the CFG correspondingly.
3518 Note that the INSN should be deleted *after* removing dead edges, so
3519 that the kept edge is the fallthrough edge for a (set (pc) (pc))
3520 but not for a (set (pc) (label_ref FOO)). */
3521
3522void
3523update_cfg_for_uncondjump (rtx_insn *insn)
3524{
3525 basic_block bb = BLOCK_FOR_INSN (insn);
3526 gcc_assert (BB_END (bb) == insn);
3527
3528 purge_dead_edges (bb);
3529
3530 if (current_ir_type () != IR_RTL_CFGLAYOUT)
3531 {
3532 if (!find_fallthru_edge (edges: bb->succs))
3533 {
3534 auto barrier = next_nonnote_nondebug_insn (insn);
3535 if (!barrier || !BARRIER_P (barrier))
3536 emit_barrier_after (insn);
3537 }
3538 return;
3539 }
3540
3541 delete_insn (insn);
3542 if (EDGE_COUNT (bb->succs) == 1)
3543 {
3544 rtx_insn *insn;
3545
3546 single_succ_edge (bb)->flags |= EDGE_FALLTHRU;
3547
3548 /* Remove barriers from the footer if there are any. */
3549 for (insn = BB_FOOTER (bb); insn; insn = NEXT_INSN (insn))
3550 if (BARRIER_P (insn))
3551 {
3552 if (PREV_INSN (insn))
3553 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
3554 else
3555 BB_FOOTER (bb) = NEXT_INSN (insn);
3556 if (NEXT_INSN (insn))
3557 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
3558 }
3559 else if (LABEL_P (insn))
3560 break;
3561 }
3562}
3563
3564/* Cut the insns from FIRST to LAST out of the insns stream. */
3565
3566rtx_insn *
3567unlink_insn_chain (rtx_insn *first, rtx_insn *last)
3568{
3569 rtx_insn *prevfirst = PREV_INSN (insn: first);
3570 rtx_insn *nextlast = NEXT_INSN (insn: last);
3571
3572 SET_PREV_INSN (first) = NULL;
3573 SET_NEXT_INSN (last) = NULL;
3574 if (prevfirst)
3575 SET_NEXT_INSN (prevfirst) = nextlast;
3576 if (nextlast)
3577 SET_PREV_INSN (nextlast) = prevfirst;
3578 else
3579 set_last_insn (prevfirst);
3580 if (!prevfirst)
3581 set_first_insn (nextlast);
3582 return first;
3583}
3584
3585/* Skip over inter-block insns occurring after BB which are typically
3586 associated with BB (e.g., barriers). If there are any such insns,
3587 we return the last one. Otherwise, we return the end of BB. */
3588
3589static rtx_insn *
3590skip_insns_after_block (basic_block bb)
3591{
3592 rtx_insn *insn, *last_insn, *next_head, *prev;
3593
3594 next_head = NULL;
3595 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
3596 next_head = BB_HEAD (bb->next_bb);
3597
3598 for (last_insn = insn = BB_END (bb); (insn = NEXT_INSN (insn)) != 0; )
3599 {
3600 if (insn == next_head)
3601 break;
3602
3603 switch (GET_CODE (insn))
3604 {
3605 case BARRIER:
3606 last_insn = insn;
3607 continue;
3608
3609 case NOTE:
3610 gcc_assert (NOTE_KIND (insn) != NOTE_INSN_BLOCK_END);
3611 continue;
3612
3613 case CODE_LABEL:
3614 if (NEXT_INSN (insn)
3615 && JUMP_TABLE_DATA_P (NEXT_INSN (insn)))
3616 {
3617 insn = NEXT_INSN (insn);
3618 last_insn = insn;
3619 continue;
3620 }
3621 break;
3622
3623 default:
3624 break;
3625 }
3626
3627 break;
3628 }
3629
3630 /* It is possible to hit contradictory sequence. For instance:
3631
3632 jump_insn
3633 NOTE_INSN_BLOCK_BEG
3634 barrier
3635
3636 Where barrier belongs to jump_insn, but the note does not. This can be
3637 created by removing the basic block originally following
3638 NOTE_INSN_BLOCK_BEG. In such case reorder the notes. */
3639
3640 for (insn = last_insn; insn != BB_END (bb); insn = prev)
3641 {
3642 prev = PREV_INSN (insn);
3643 if (NOTE_P (insn))
3644 switch (NOTE_KIND (insn))
3645 {
3646 case NOTE_INSN_BLOCK_END:
3647 gcc_unreachable ();
3648 break;
3649 case NOTE_INSN_DELETED:
3650 case NOTE_INSN_DELETED_LABEL:
3651 case NOTE_INSN_DELETED_DEBUG_LABEL:
3652 continue;
3653 default:
3654 reorder_insns (insn, insn, last_insn);
3655 }
3656 }
3657
3658 return last_insn;
3659}
3660
3661/* Locate or create a label for a given basic block. */
3662
3663static rtx_insn *
3664label_for_bb (basic_block bb)
3665{
3666 rtx_insn *label = BB_HEAD (bb);
3667
3668 if (!LABEL_P (label))
3669 {
3670 if (dump_file)
3671 fprintf (stream: dump_file, format: "Emitting label for block %d\n", bb->index);
3672
3673 label = block_label (block: bb);
3674 }
3675
3676 return label;
3677}
3678
3679/* Locate the effective beginning and end of the insn chain for each
3680 block, as defined by skip_insns_after_block above. */
3681
3682static void
3683record_effective_endpoints (void)
3684{
3685 rtx_insn *next_insn;
3686 basic_block bb;
3687 rtx_insn *insn;
3688
3689 for (insn = get_insns ();
3690 insn
3691 && NOTE_P (insn)
3692 && NOTE_KIND (insn) != NOTE_INSN_BASIC_BLOCK;
3693 insn = NEXT_INSN (insn))
3694 continue;
3695 /* No basic blocks at all? */
3696 gcc_assert (insn);
3697
3698 if (PREV_INSN (insn))
3699 cfg_layout_function_header =
3700 unlink_insn_chain (first: get_insns (), last: PREV_INSN (insn));
3701 else
3702 cfg_layout_function_header = NULL;
3703
3704 next_insn = get_insns ();
3705 FOR_EACH_BB_FN (bb, cfun)
3706 {
3707 rtx_insn *end;
3708
3709 if (PREV_INSN (BB_HEAD (bb)) && next_insn != BB_HEAD (bb))
3710 BB_HEADER (bb) = unlink_insn_chain (first: next_insn,
3711 last: PREV_INSN (BB_HEAD (bb)));
3712 end = skip_insns_after_block (bb);
3713 if (NEXT_INSN (BB_END (bb)) && BB_END (bb) != end)
3714 BB_FOOTER (bb) = unlink_insn_chain (first: NEXT_INSN (BB_END (bb)), last: end);
3715 next_insn = NEXT_INSN (BB_END (bb));
3716 }
3717
3718 cfg_layout_function_footer = next_insn;
3719 if (cfg_layout_function_footer)
3720 cfg_layout_function_footer = unlink_insn_chain (first: cfg_layout_function_footer, last: get_last_insn ());
3721}
3722
3723namespace {
3724
3725const pass_data pass_data_into_cfg_layout_mode =
3726{
3727 .type: RTL_PASS, /* type */
3728 .name: "into_cfglayout", /* name */
3729 .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */
3730 .tv_id: TV_CFG, /* tv_id */
3731 .properties_required: 0, /* properties_required */
3732 PROP_cfglayout, /* properties_provided */
3733 .properties_destroyed: 0, /* properties_destroyed */
3734 .todo_flags_start: 0, /* todo_flags_start */
3735 .todo_flags_finish: 0, /* todo_flags_finish */
3736};
3737
3738class pass_into_cfg_layout_mode : public rtl_opt_pass
3739{
3740public:
3741 pass_into_cfg_layout_mode (gcc::context *ctxt)
3742 : rtl_opt_pass (pass_data_into_cfg_layout_mode, ctxt)
3743 {}
3744
3745 /* opt_pass methods: */
3746 unsigned int execute (function *) final override
3747 {
3748 cfg_layout_initialize (0);
3749 return 0;
3750 }
3751
3752}; // class pass_into_cfg_layout_mode
3753
3754} // anon namespace
3755
3756rtl_opt_pass *
3757make_pass_into_cfg_layout_mode (gcc::context *ctxt)
3758{
3759 return new pass_into_cfg_layout_mode (ctxt);
3760}
3761
3762namespace {
3763
3764const pass_data pass_data_outof_cfg_layout_mode =
3765{
3766 .type: RTL_PASS, /* type */
3767 .name: "outof_cfglayout", /* name */
3768 .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */
3769 .tv_id: TV_CFG, /* tv_id */
3770 .properties_required: 0, /* properties_required */
3771 .properties_provided: 0, /* properties_provided */
3772 PROP_cfglayout, /* properties_destroyed */
3773 .todo_flags_start: 0, /* todo_flags_start */
3774 .todo_flags_finish: 0, /* todo_flags_finish */
3775};
3776
3777class pass_outof_cfg_layout_mode : public rtl_opt_pass
3778{
3779public:
3780 pass_outof_cfg_layout_mode (gcc::context *ctxt)
3781 : rtl_opt_pass (pass_data_outof_cfg_layout_mode, ctxt)
3782 {}
3783
3784 /* opt_pass methods: */
3785 unsigned int execute (function *) final override;
3786
3787}; // class pass_outof_cfg_layout_mode
3788
3789unsigned int
3790pass_outof_cfg_layout_mode::execute (function *fun)
3791{
3792 basic_block bb;
3793
3794 FOR_EACH_BB_FN (bb, fun)
3795 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (fun))
3796 bb->aux = bb->next_bb;
3797
3798 cfg_layout_finalize ();
3799
3800 return 0;
3801}
3802
3803} // anon namespace
3804
3805rtl_opt_pass *
3806make_pass_outof_cfg_layout_mode (gcc::context *ctxt)
3807{
3808 return new pass_outof_cfg_layout_mode (ctxt);
3809}
3810
3811
3812/* Link the basic blocks in the correct order, compacting the basic
3813 block queue while at it. If STAY_IN_CFGLAYOUT_MODE is false, this
3814 function also clears the basic block header and footer fields.
3815
3816 This function is usually called after a pass (e.g. tracer) finishes
3817 some transformations while in cfglayout mode. The required sequence
3818 of the basic blocks is in a linked list along the bb->aux field.
3819 This functions re-links the basic block prev_bb and next_bb pointers
3820 accordingly, and it compacts and renumbers the blocks.
3821
3822 FIXME: This currently works only for RTL, but the only RTL-specific
3823 bits are the STAY_IN_CFGLAYOUT_MODE bits. The tracer pass was moved
3824 to GIMPLE a long time ago, but it doesn't relink the basic block
3825 chain. It could do that (to give better initial RTL) if this function
3826 is made IR-agnostic (and moved to cfganal.cc or cfg.cc while at it). */
3827
3828void
3829relink_block_chain (bool stay_in_cfglayout_mode)
3830{
3831 basic_block bb, prev_bb;
3832 int index;
3833
3834 /* Maybe dump the re-ordered sequence. */
3835 if (dump_file)
3836 {
3837 fprintf (stream: dump_file, format: "Reordered sequence:\n");
3838 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb, index =
3839 NUM_FIXED_BLOCKS;
3840 bb;
3841 bb = (basic_block) bb->aux, index++)
3842 {
3843 fprintf (stream: dump_file, format: " %i ", index);
3844 if (get_bb_original (bb))
3845 fprintf (stream: dump_file, format: "duplicate of %i\n",
3846 get_bb_original (bb)->index);
3847 else if (forwarder_block_p (bb)
3848 && !LABEL_P (BB_HEAD (bb)))
3849 fprintf (stream: dump_file, format: "compensation\n");
3850 else
3851 fprintf (stream: dump_file, format: "bb %i\n", bb->index);
3852 }
3853 }
3854
3855 /* Now reorder the blocks. */
3856 prev_bb = ENTRY_BLOCK_PTR_FOR_FN (cfun);
3857 bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
3858 for (; bb; prev_bb = bb, bb = (basic_block) bb->aux)
3859 {
3860 bb->prev_bb = prev_bb;
3861 prev_bb->next_bb = bb;
3862 }
3863 prev_bb->next_bb = EXIT_BLOCK_PTR_FOR_FN (cfun);
3864 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb = prev_bb;
3865
3866 /* Then, clean up the aux fields. */
3867 FOR_ALL_BB_FN (bb, cfun)
3868 {
3869 bb->aux = NULL;
3870 if (!stay_in_cfglayout_mode)
3871 BB_HEADER (bb) = BB_FOOTER (bb) = NULL;
3872 }
3873
3874 /* Maybe reset the original copy tables, they are not valid anymore
3875 when we renumber the basic blocks in compact_blocks. If we are
3876 are going out of cfglayout mode, don't re-allocate the tables. */
3877 if (original_copy_tables_initialized_p ())
3878 free_original_copy_tables ();
3879 if (stay_in_cfglayout_mode)
3880 initialize_original_copy_tables ();
3881
3882 /* Finally, put basic_block_info in the new order. */
3883 compact_blocks ();
3884}
3885
3886
3887/* Given a reorder chain, rearrange the code to match. */
3888
3889static void
3890fixup_reorder_chain (void)
3891{
3892 basic_block bb;
3893 rtx_insn *insn = NULL;
3894
3895 if (cfg_layout_function_header)
3896 {
3897 set_first_insn (cfg_layout_function_header);
3898 insn = cfg_layout_function_header;
3899 while (NEXT_INSN (insn))
3900 insn = NEXT_INSN (insn);
3901 }
3902
3903 /* First do the bulk reordering -- rechain the blocks without regard to
3904 the needed changes to jumps and labels. */
3905
3906 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; bb; bb = (basic_block)
3907 bb->aux)
3908 {
3909 if (BB_HEADER (bb))
3910 {
3911 if (insn)
3912 SET_NEXT_INSN (insn) = BB_HEADER (bb);
3913 else
3914 set_first_insn (BB_HEADER (bb));
3915 SET_PREV_INSN (BB_HEADER (bb)) = insn;
3916 insn = BB_HEADER (bb);
3917 while (NEXT_INSN (insn))
3918 insn = NEXT_INSN (insn);
3919 }
3920 if (insn)
3921 SET_NEXT_INSN (insn) = BB_HEAD (bb);
3922 else
3923 set_first_insn (BB_HEAD (bb));
3924 SET_PREV_INSN (BB_HEAD (bb)) = insn;
3925 insn = BB_END (bb);
3926 if (BB_FOOTER (bb))
3927 {
3928 SET_NEXT_INSN (insn) = BB_FOOTER (bb);
3929 SET_PREV_INSN (BB_FOOTER (bb)) = insn;
3930 while (NEXT_INSN (insn))
3931 insn = NEXT_INSN (insn);
3932 }
3933 }
3934
3935 SET_NEXT_INSN (insn) = cfg_layout_function_footer;
3936 if (cfg_layout_function_footer)
3937 SET_PREV_INSN (cfg_layout_function_footer) = insn;
3938
3939 while (NEXT_INSN (insn))
3940 insn = NEXT_INSN (insn);
3941
3942 set_last_insn (insn);
3943 if (flag_checking)
3944 verify_insn_chain ();
3945
3946 /* Now add jumps and labels as needed to match the blocks new
3947 outgoing edges. */
3948
3949 bool remove_unreachable_blocks = false;
3950 for (bb = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb; bb ; bb = (basic_block)
3951 bb->aux)
3952 {
3953 edge e_fall, e_taken, e;
3954 rtx_insn *bb_end_insn;
3955 rtx ret_label = NULL_RTX;
3956 basic_block nb;
3957 edge_iterator ei;
3958 bool asm_goto = false;
3959
3960 if (EDGE_COUNT (bb->succs) == 0)
3961 continue;
3962
3963 /* Find the old fallthru edge, and another non-EH edge for
3964 a taken jump. */
3965 e_taken = e_fall = NULL;
3966
3967 FOR_EACH_EDGE (e, ei, bb->succs)
3968 if (e->flags & EDGE_FALLTHRU)
3969 e_fall = e;
3970 else if (! (e->flags & EDGE_EH))
3971 e_taken = e;
3972
3973 bb_end_insn = BB_END (bb);
3974 if (rtx_jump_insn *bb_end_jump = dyn_cast <rtx_jump_insn *> (p: bb_end_insn))
3975 {
3976 ret_label = JUMP_LABEL (bb_end_jump);
3977 if (any_condjump_p (bb_end_jump))
3978 {
3979 /* This might happen if the conditional jump has side
3980 effects and could therefore not be optimized away.
3981 Make the basic block to end with a barrier in order
3982 to prevent rtl_verify_flow_info from complaining. */
3983 if (!e_fall)
3984 {
3985 gcc_assert (!onlyjump_p (bb_end_jump)
3986 || returnjump_p (bb_end_jump)
3987 || (e_taken->flags & EDGE_CROSSING));
3988 emit_barrier_after (bb_end_jump);
3989 continue;
3990 }
3991
3992 /* If the old fallthru is still next, nothing to do. */
3993 if (bb->aux == e_fall->dest
3994 || e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
3995 continue;
3996
3997 /* The degenerated case of conditional jump jumping to the next
3998 instruction can happen for jumps with side effects. We need
3999 to construct a forwarder block and this will be done just
4000 fine by force_nonfallthru below. */
4001 if (!e_taken)
4002 ;
4003
4004 /* There is another special case: if *neither* block is next,
4005 such as happens at the very end of a function, then we'll
4006 need to add a new unconditional jump. Choose the taken
4007 edge based on known or assumed probability. */
4008 else if (bb->aux != e_taken->dest)
4009 {
4010 rtx note = find_reg_note (bb_end_jump, REG_BR_PROB, 0);
4011
4012 if (note
4013 && profile_probability::from_reg_br_prob_note
4014 (XINT (note, 0)) < profile_probability::even ()
4015 && invert_jump (bb_end_jump,
4016 (e_fall->dest
4017 == EXIT_BLOCK_PTR_FOR_FN (cfun)
4018 ? NULL_RTX
4019 : label_for_bb (bb: e_fall->dest)), 0))
4020 {
4021 e_fall->flags &= ~EDGE_FALLTHRU;
4022 gcc_checking_assert (could_fall_through
4023 (e_taken->src, e_taken->dest));
4024 e_taken->flags |= EDGE_FALLTHRU;
4025 update_br_prob_note (bb);
4026 e = e_fall, e_fall = e_taken, e_taken = e;
4027 }
4028 }
4029
4030 /* If the "jumping" edge is a crossing edge, and the fall
4031 through edge is non-crossing, leave things as they are. */
4032 else if ((e_taken->flags & EDGE_CROSSING)
4033 && !(e_fall->flags & EDGE_CROSSING))
4034 continue;
4035
4036 /* Otherwise we can try to invert the jump. This will
4037 basically never fail, however, keep up the pretense. */
4038 else if (invert_jump (bb_end_jump,
4039 (e_fall->dest
4040 == EXIT_BLOCK_PTR_FOR_FN (cfun)
4041 ? NULL_RTX
4042 : label_for_bb (bb: e_fall->dest)), 0))
4043 {
4044 e_fall->flags &= ~EDGE_FALLTHRU;
4045 gcc_checking_assert (could_fall_through
4046 (e_taken->src, e_taken->dest));
4047 e_taken->flags |= EDGE_FALLTHRU;
4048 update_br_prob_note (bb);
4049 if (LABEL_NUSES (ret_label) == 0
4050 && single_pred_p (bb: e_taken->dest))
4051 delete_insn (insn: as_a<rtx_insn *> (p: ret_label));
4052 continue;
4053 }
4054 }
4055 else if (extract_asm_operands (PATTERN (insn: bb_end_insn)) != NULL)
4056 {
4057 /* If the old fallthru is still next or if
4058 asm goto doesn't have a fallthru (e.g. when followed by
4059 __builtin_unreachable ()), nothing to do. */
4060 if (! e_fall
4061 || bb->aux == e_fall->dest
4062 || e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4063 continue;
4064
4065 /* Otherwise we'll have to use the fallthru fixup below.
4066 But avoid redirecting asm goto to EXIT. */
4067 asm_goto = true;
4068 }
4069 else
4070 {
4071 /* Otherwise we have some return, switch or computed
4072 jump. In the 99% case, there should not have been a
4073 fallthru edge. */
4074 gcc_assert (returnjump_p (bb_end_insn) || !e_fall);
4075 continue;
4076 }
4077 }
4078 else
4079 {
4080 /* No fallthru implies a noreturn function with EH edges, or
4081 something similarly bizarre. In any case, we don't need to
4082 do anything. */
4083 if (! e_fall)
4084 continue;
4085
4086 /* If the fallthru block is still next, nothing to do. */
4087 if (bb->aux == e_fall->dest)
4088 continue;
4089
4090 /* A fallthru to exit block. */
4091 if (e_fall->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4092 continue;
4093 }
4094
4095 /* If E_FALL->dest is just a return block, then we can emit a
4096 return rather than a jump to the return block. */
4097 rtx_insn *ret, *use;
4098 basic_block dest;
4099 if (!asm_goto
4100 && bb_is_just_return (e_fall->dest, &ret, &use)
4101 && ((PATTERN (insn: ret) == simple_return_rtx && targetm.have_simple_return ())
4102 || (PATTERN (insn: ret) == ret_rtx && targetm.have_return ())))
4103 {
4104 ret_label = PATTERN (insn: ret);
4105 dest = EXIT_BLOCK_PTR_FOR_FN (cfun);
4106
4107 e_fall->flags &= ~EDGE_CROSSING;
4108 /* E_FALL->dest might become unreachable as a result of
4109 replacing the jump with a return. So arrange to remove
4110 unreachable blocks. */
4111 remove_unreachable_blocks = true;
4112 }
4113 else
4114 {
4115 dest = e_fall->dest;
4116 }
4117
4118 /* We got here if we need to add a new jump insn.
4119 Note force_nonfallthru can delete E_FALL and thus we have to
4120 save E_FALL->src prior to the call to force_nonfallthru. */
4121 nb = force_nonfallthru_and_redirect (e: e_fall, target: dest, jump_label: ret_label);
4122 if (nb)
4123 {
4124 nb->aux = bb->aux;
4125 bb->aux = nb;
4126 /* Don't process this new block. */
4127 bb = nb;
4128 }
4129 }
4130
4131 relink_block_chain (/*stay_in_cfglayout_mode=*/false);
4132
4133 /* Annoying special case - jump around dead jumptables left in the code. */
4134 FOR_EACH_BB_FN (bb, cfun)
4135 {
4136 edge e = find_fallthru_edge (edges: bb->succs);
4137
4138 if (e && !can_fallthru (src: e->src, target: e->dest))
4139 force_nonfallthru (e);
4140 }
4141
4142 /* Ensure goto_locus from edges has some instructions with that locus in RTL
4143 when not optimizing. */
4144 if (!optimize && !DECL_IGNORED_P (current_function_decl))
4145 FOR_EACH_BB_FN (bb, cfun)
4146 {
4147 edge e;
4148 edge_iterator ei;
4149
4150 FOR_EACH_EDGE (e, ei, bb->succs)
4151 if (LOCATION_LOCUS (e->goto_locus) != UNKNOWN_LOCATION
4152 && !(e->flags & EDGE_ABNORMAL))
4153 {
4154 edge e2;
4155 edge_iterator ei2;
4156 basic_block dest, nb;
4157 rtx_insn *end;
4158
4159 insn = BB_END (e->src);
4160 end = PREV_INSN (BB_HEAD (e->src));
4161 while (insn != end
4162 && (!NONDEBUG_INSN_P (insn) || !INSN_HAS_LOCATION (insn)))
4163 insn = PREV_INSN (insn);
4164 if (insn != end
4165 && loc_equal (loc1: INSN_LOCATION (insn), loc2: e->goto_locus))
4166 continue;
4167 if (simplejump_p (BB_END (e->src))
4168 && !INSN_HAS_LOCATION (BB_END (e->src)))
4169 {
4170 INSN_LOCATION (BB_END (e->src)) = e->goto_locus;
4171 continue;
4172 }
4173 dest = e->dest;
4174 if (dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4175 {
4176 /* Non-fallthru edges to the exit block cannot be split. */
4177 if (!(e->flags & EDGE_FALLTHRU))
4178 continue;
4179 }
4180 else
4181 {
4182 insn = BB_HEAD (dest);
4183 end = NEXT_INSN (BB_END (dest));
4184 while (insn != end && !NONDEBUG_INSN_P (insn))
4185 insn = NEXT_INSN (insn);
4186 if (insn != end && INSN_HAS_LOCATION (insn)
4187 && loc_equal (loc1: INSN_LOCATION (insn), loc2: e->goto_locus))
4188 continue;
4189 }
4190 nb = split_edge (e);
4191 if (!INSN_P (BB_END (nb)))
4192 BB_END (nb) = emit_insn_after_noloc (gen_nop (), BB_END (nb),
4193 nb);
4194 INSN_LOCATION (BB_END (nb)) = e->goto_locus;
4195
4196 /* If there are other incoming edges to the destination block
4197 with the same goto locus, redirect them to the new block as
4198 well, this can prevent other such blocks from being created
4199 in subsequent iterations of the loop. */
4200 for (ei2 = ei_start (dest->preds); (e2 = ei_safe_edge (i: ei2)); )
4201 if (LOCATION_LOCUS (e2->goto_locus) != UNKNOWN_LOCATION
4202 && !(e2->flags & (EDGE_ABNORMAL | EDGE_FALLTHRU))
4203 && e->goto_locus == e2->goto_locus)
4204 redirect_edge_and_branch (e2, nb);
4205 else
4206 ei_next (i: &ei2);
4207 }
4208 }
4209
4210 /* Replacing a jump with a return may have exposed an unreachable
4211 block. Conditionally remove them if such transformations were
4212 made. */
4213 if (remove_unreachable_blocks)
4214 delete_unreachable_blocks ();
4215}
4216
4217/* Perform sanity checks on the insn chain.
4218 1. Check that next/prev pointers are consistent in both the forward and
4219 reverse direction.
4220 2. Count insns in chain, going both directions, and check if equal.
4221 3. Check that get_last_insn () returns the actual end of chain. */
4222
4223DEBUG_FUNCTION void
4224verify_insn_chain (void)
4225{
4226 rtx_insn *x, *prevx, *nextx;
4227 int insn_cnt1, insn_cnt2;
4228
4229 for (prevx = NULL, insn_cnt1 = 1, x = get_insns ();
4230 x != 0;
4231 prevx = x, insn_cnt1++, x = NEXT_INSN (insn: x))
4232 gcc_assert (PREV_INSN (x) == prevx);
4233
4234 gcc_assert (prevx == get_last_insn ());
4235
4236 for (nextx = NULL, insn_cnt2 = 1, x = get_last_insn ();
4237 x != 0;
4238 nextx = x, insn_cnt2++, x = PREV_INSN (insn: x))
4239 gcc_assert (NEXT_INSN (x) == nextx);
4240
4241 gcc_assert (insn_cnt1 == insn_cnt2);
4242}
4243
4244/* If we have assembler epilogues, the block falling through to exit must
4245 be the last one in the reordered chain when we reach final. Ensure
4246 that this condition is met. */
4247static void
4248fixup_fallthru_exit_predecessor (void)
4249{
4250 edge e;
4251 basic_block bb = NULL;
4252
4253 /* This transformation is not valid before reload, because we might
4254 separate a call from the instruction that copies the return
4255 value. */
4256 gcc_assert (reload_completed);
4257
4258 e = find_fallthru_edge (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
4259 if (e)
4260 bb = e->src;
4261
4262 if (bb && bb->aux)
4263 {
4264 basic_block c = ENTRY_BLOCK_PTR_FOR_FN (cfun)->next_bb;
4265
4266 /* If the very first block is the one with the fall-through exit
4267 edge, we have to split that block. */
4268 if (c == bb)
4269 {
4270 bb = split_block_after_labels (bb)->dest;
4271 bb->aux = c->aux;
4272 c->aux = bb;
4273 BB_FOOTER (bb) = BB_FOOTER (c);
4274 BB_FOOTER (c) = NULL;
4275 }
4276
4277 while (c->aux != bb)
4278 c = (basic_block) c->aux;
4279
4280 c->aux = bb->aux;
4281 while (c->aux)
4282 c = (basic_block) c->aux;
4283
4284 c->aux = bb;
4285 bb->aux = NULL;
4286 }
4287}
4288
4289/* In case there are more than one fallthru predecessors of exit, force that
4290 there is only one. */
4291
4292static void
4293force_one_exit_fallthru (void)
4294{
4295 edge e, predecessor = NULL;
4296 bool more = false;
4297 edge_iterator ei;
4298 basic_block forwarder, bb;
4299
4300 FOR_EACH_EDGE (e, ei, EXIT_BLOCK_PTR_FOR_FN (cfun)->preds)
4301 if (e->flags & EDGE_FALLTHRU)
4302 {
4303 if (predecessor == NULL)
4304 predecessor = e;
4305 else
4306 {
4307 more = true;
4308 break;
4309 }
4310 }
4311
4312 if (!more)
4313 return;
4314
4315 /* Exit has several fallthru predecessors. Create a forwarder block for
4316 them. */
4317 forwarder = split_edge (predecessor);
4318 for (ei = ei_start (EXIT_BLOCK_PTR_FOR_FN (cfun)->preds);
4319 (e = ei_safe_edge (i: ei)); )
4320 {
4321 if (e->src == forwarder
4322 || !(e->flags & EDGE_FALLTHRU))
4323 ei_next (i: &ei);
4324 else
4325 redirect_edge_and_branch_force (e, forwarder);
4326 }
4327
4328 /* Fix up the chain of blocks -- make FORWARDER immediately precede the
4329 exit block. */
4330 FOR_EACH_BB_FN (bb, cfun)
4331 {
4332 if (bb->aux == NULL && bb != forwarder)
4333 {
4334 bb->aux = forwarder;
4335 break;
4336 }
4337 }
4338}
4339
4340/* Return true in case it is possible to duplicate the basic block BB. */
4341
4342static bool
4343cfg_layout_can_duplicate_bb_p (const_basic_block bb)
4344{
4345 /* Do not attempt to duplicate tablejumps, as we need to unshare
4346 the dispatch table. This is difficult to do, as the instructions
4347 computing jump destination may be hoisted outside the basic block. */
4348 if (tablejump_p (BB_END (bb), NULL, NULL))
4349 return false;
4350
4351 /* Do not duplicate blocks containing insns that can't be copied. */
4352 if (targetm.cannot_copy_insn_p)
4353 {
4354 rtx_insn *insn = BB_HEAD (bb);
4355 while (1)
4356 {
4357 if (INSN_P (insn) && targetm.cannot_copy_insn_p (insn))
4358 return false;
4359 if (insn == BB_END (bb))
4360 break;
4361 insn = NEXT_INSN (insn);
4362 }
4363 }
4364
4365 return true;
4366}
4367
4368rtx_insn *
4369duplicate_insn_chain (rtx_insn *from, rtx_insn *to,
4370 class loop *loop, copy_bb_data *id)
4371{
4372 rtx_insn *insn, *next, *copy;
4373 rtx_note *last;
4374
4375 /* Avoid updating of boundaries of previous basic block. The
4376 note will get removed from insn stream in fixup. */
4377 last = emit_note (NOTE_INSN_DELETED);
4378
4379 /* Create copy at the end of INSN chain. The chain will
4380 be reordered later. */
4381 for (insn = from; insn != NEXT_INSN (insn: to); insn = NEXT_INSN (insn))
4382 {
4383 switch (GET_CODE (insn))
4384 {
4385 case DEBUG_INSN:
4386 /* Don't duplicate label debug insns. */
4387 if (DEBUG_BIND_INSN_P (insn)
4388 && TREE_CODE (INSN_VAR_LOCATION_DECL (insn)) == LABEL_DECL)
4389 break;
4390 /* FALLTHRU */
4391 case INSN:
4392 case CALL_INSN:
4393 case JUMP_INSN:
4394 copy = emit_copy_of_insn_after (insn, get_last_insn ());
4395 if (JUMP_P (insn) && JUMP_LABEL (insn) != NULL_RTX
4396 && ANY_RETURN_P (JUMP_LABEL (insn)))
4397 JUMP_LABEL (copy) = JUMP_LABEL (insn);
4398 maybe_copy_prologue_epilogue_insn (insn, copy);
4399 /* If requested remap dependence info of cliques brought in
4400 via inlining. */
4401 if (id)
4402 {
4403 subrtx_iterator::array_type array;
4404 FOR_EACH_SUBRTX (iter, array, PATTERN (insn), ALL)
4405 if (MEM_P (*iter) && MEM_EXPR (*iter))
4406 {
4407 tree op = MEM_EXPR (*iter);
4408 if (TREE_CODE (op) == WITH_SIZE_EXPR)
4409 op = TREE_OPERAND (op, 0);
4410 while (handled_component_p (t: op))
4411 op = TREE_OPERAND (op, 0);
4412 if ((TREE_CODE (op) == MEM_REF
4413 || TREE_CODE (op) == TARGET_MEM_REF)
4414 && MR_DEPENDENCE_CLIQUE (op) > 1
4415 && (!loop
4416 || (MR_DEPENDENCE_CLIQUE (op)
4417 != loop->owned_clique)))
4418 {
4419 if (!id->dependence_map)
4420 id->dependence_map = new hash_map<dependence_hash,
4421 unsigned short>;
4422 bool existed;
4423 unsigned short &newc = id->dependence_map->get_or_insert
4424 (MR_DEPENDENCE_CLIQUE (op), existed: &existed);
4425 if (!existed)
4426 {
4427 gcc_assert
4428 (MR_DEPENDENCE_CLIQUE (op) <= cfun->last_clique);
4429 newc = get_new_clique (cfun);
4430 }
4431 /* We cannot adjust MR_DEPENDENCE_CLIQUE in-place
4432 since MEM_EXPR is shared so make a copy and
4433 walk to the subtree again. */
4434 tree new_expr = unshare_expr (MEM_EXPR (*iter));
4435 if (TREE_CODE (new_expr) == WITH_SIZE_EXPR)
4436 new_expr = TREE_OPERAND (new_expr, 0);
4437 while (handled_component_p (t: new_expr))
4438 new_expr = TREE_OPERAND (new_expr, 0);
4439 MR_DEPENDENCE_CLIQUE (new_expr) = newc;
4440 set_mem_expr (const_cast <rtx> (*iter), new_expr);
4441 }
4442 }
4443 }
4444 break;
4445
4446 case JUMP_TABLE_DATA:
4447 /* Avoid copying of dispatch tables. We never duplicate
4448 tablejumps, so this can hit only in case the table got
4449 moved far from original jump.
4450 Avoid copying following barrier as well if any
4451 (and debug insns in between). */
4452 for (next = NEXT_INSN (insn);
4453 next != NEXT_INSN (insn: to);
4454 next = NEXT_INSN (insn: next))
4455 if (!DEBUG_INSN_P (next))
4456 break;
4457 if (next != NEXT_INSN (insn: to) && BARRIER_P (next))
4458 insn = next;
4459 break;
4460
4461 case CODE_LABEL:
4462 break;
4463
4464 case BARRIER:
4465 emit_barrier ();
4466 break;
4467
4468 case NOTE:
4469 switch (NOTE_KIND (insn))
4470 {
4471 /* In case prologue is empty and function contain label
4472 in first BB, we may want to copy the block. */
4473 case NOTE_INSN_PROLOGUE_END:
4474
4475 case NOTE_INSN_DELETED:
4476 case NOTE_INSN_DELETED_LABEL:
4477 case NOTE_INSN_DELETED_DEBUG_LABEL:
4478 /* No problem to strip these. */
4479 case NOTE_INSN_FUNCTION_BEG:
4480 /* There is always just single entry to function. */
4481 case NOTE_INSN_BASIC_BLOCK:
4482 /* We should only switch text sections once. */
4483 case NOTE_INSN_SWITCH_TEXT_SECTIONS:
4484 break;
4485
4486 case NOTE_INSN_EPILOGUE_BEG:
4487 case NOTE_INSN_UPDATE_SJLJ_CONTEXT:
4488 emit_note_copy (as_a <rtx_note *> (p: insn));
4489 break;
4490
4491 default:
4492 /* All other notes should have already been eliminated. */
4493 gcc_unreachable ();
4494 }
4495 break;
4496 default:
4497 gcc_unreachable ();
4498 }
4499 }
4500 insn = NEXT_INSN (insn: last);
4501 delete_insn (insn: last);
4502 return insn;
4503}
4504
4505/* Create a duplicate of the basic block BB. */
4506
4507static basic_block
4508cfg_layout_duplicate_bb (basic_block bb, copy_bb_data *id)
4509{
4510 rtx_insn *insn;
4511 basic_block new_bb;
4512
4513 class loop *loop = (id && current_loops) ? bb->loop_father : NULL;
4514
4515 insn = duplicate_insn_chain (BB_HEAD (bb), BB_END (bb), loop, id);
4516 new_bb = create_basic_block (insn,
4517 insn ? get_last_insn () : NULL,
4518 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
4519
4520 BB_COPY_PARTITION (new_bb, bb);
4521 if (BB_HEADER (bb))
4522 {
4523 insn = BB_HEADER (bb);
4524 while (NEXT_INSN (insn))
4525 insn = NEXT_INSN (insn);
4526 insn = duplicate_insn_chain (BB_HEADER (bb), to: insn, loop, id);
4527 if (insn)
4528 BB_HEADER (new_bb) = unlink_insn_chain (first: insn, last: get_last_insn ());
4529 }
4530
4531 if (BB_FOOTER (bb))
4532 {
4533 insn = BB_FOOTER (bb);
4534 while (NEXT_INSN (insn))
4535 insn = NEXT_INSN (insn);
4536 insn = duplicate_insn_chain (BB_FOOTER (bb), to: insn, loop, id);
4537 if (insn)
4538 BB_FOOTER (new_bb) = unlink_insn_chain (first: insn, last: get_last_insn ());
4539 }
4540
4541 return new_bb;
4542}
4543
4544
4545/* Main entry point to this module - initialize the datastructures for
4546 CFG layout changes. It keeps LOOPS up-to-date if not null.
4547
4548 FLAGS is a set of additional flags to pass to cleanup_cfg(). */
4549
4550void
4551cfg_layout_initialize (int flags)
4552{
4553 rtx_insn_list *x;
4554 basic_block bb;
4555
4556 /* Once bb partitioning is complete, cfg layout mode should not be
4557 re-entered. Entering cfg layout mode may require fixups. As an
4558 example, if edge forwarding performed when optimizing the cfg
4559 layout required moving a block from the hot to the cold
4560 section. This would create an illegal partitioning unless some
4561 manual fixup was performed. */
4562 gcc_assert (!crtl->bb_reorder_complete || !crtl->has_bb_partition);
4563
4564 initialize_original_copy_tables ();
4565
4566 cfg_layout_rtl_register_cfg_hooks ();
4567
4568 record_effective_endpoints ();
4569
4570 /* Make sure that the targets of non local gotos are marked. */
4571 for (x = nonlocal_goto_handler_labels; x; x = x->next ())
4572 {
4573 bb = BLOCK_FOR_INSN (insn: x->insn ());
4574 bb->flags |= BB_NON_LOCAL_GOTO_TARGET;
4575 }
4576
4577 cleanup_cfg (CLEANUP_CFGLAYOUT | flags);
4578}
4579
4580/* Splits superblocks. */
4581void
4582break_superblocks (void)
4583{
4584 bool need = false;
4585 basic_block bb;
4586
4587 auto_sbitmap superblocks (last_basic_block_for_fn (cfun));
4588 bitmap_clear (superblocks);
4589
4590 FOR_EACH_BB_FN (bb, cfun)
4591 if (bb->flags & BB_SUPERBLOCK)
4592 {
4593 bb->flags &= ~BB_SUPERBLOCK;
4594 bitmap_set_bit (map: superblocks, bitno: bb->index);
4595 need = true;
4596 }
4597
4598 if (need)
4599 {
4600 rebuild_jump_labels (get_insns ());
4601 find_many_sub_basic_blocks (superblocks);
4602 }
4603}
4604
4605/* Finalize the changes: reorder insn list according to the sequence specified
4606 by aux pointers, enter compensation code, rebuild scope forest. */
4607
4608void
4609cfg_layout_finalize (void)
4610{
4611 free_dominance_info (CDI_DOMINATORS);
4612 force_one_exit_fallthru ();
4613 rtl_register_cfg_hooks ();
4614 if (reload_completed && !targetm.have_epilogue ())
4615 fixup_fallthru_exit_predecessor ();
4616 fixup_reorder_chain ();
4617
4618 rebuild_jump_labels (get_insns ());
4619 delete_dead_jumptables ();
4620
4621 if (flag_checking)
4622 verify_insn_chain ();
4623 checking_verify_flow_info ();
4624}
4625
4626
4627/* Same as split_block but update cfg_layout structures. */
4628
4629static basic_block
4630cfg_layout_split_block (basic_block bb, void *insnp)
4631{
4632 rtx insn = (rtx) insnp;
4633 basic_block new_bb = rtl_split_block (bb, insnp: insn);
4634
4635 BB_FOOTER (new_bb) = BB_FOOTER (bb);
4636 BB_FOOTER (bb) = NULL;
4637
4638 return new_bb;
4639}
4640
4641/* Redirect Edge to DEST. */
4642static edge
4643cfg_layout_redirect_edge_and_branch (edge e, basic_block dest)
4644{
4645 basic_block src = e->src;
4646 edge ret;
4647
4648 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
4649 return NULL;
4650
4651 if (e->dest == dest)
4652 return e;
4653
4654 if (e->flags & EDGE_CROSSING
4655 && BB_PARTITION (e->src) == BB_PARTITION (dest)
4656 && simplejump_p (BB_END (src)))
4657 {
4658 if (dump_file)
4659 fprintf (stream: dump_file,
4660 format: "Removing crossing jump while redirecting edge form %i to %i\n",
4661 e->src->index, dest->index);
4662 delete_insn (BB_END (src));
4663 remove_barriers_from_footer (bb: src);
4664 e->flags |= EDGE_FALLTHRU;
4665 }
4666
4667 if (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4668 && (ret = try_redirect_by_replacing_jump (e, target: dest, in_cfglayout: true)))
4669 {
4670 df_set_bb_dirty (src);
4671 return ret;
4672 }
4673
4674 if (e->src == ENTRY_BLOCK_PTR_FOR_FN (cfun)
4675 && (e->flags & EDGE_FALLTHRU) && !(e->flags & EDGE_COMPLEX))
4676 {
4677 if (dump_file)
4678 fprintf (stream: dump_file, format: "Redirecting entry edge from bb %i to %i\n",
4679 e->src->index, dest->index);
4680
4681 df_set_bb_dirty (e->src);
4682 redirect_edge_succ (e, dest);
4683 return e;
4684 }
4685
4686 /* Redirect_edge_and_branch may decide to turn branch into fallthru edge
4687 in the case the basic block appears to be in sequence. Avoid this
4688 transformation. */
4689
4690 if (e->flags & EDGE_FALLTHRU)
4691 {
4692 /* Redirect any branch edges unified with the fallthru one. */
4693 if (JUMP_P (BB_END (src))
4694 && label_is_jump_target_p (BB_HEAD (e->dest),
4695 BB_END (src)))
4696 {
4697 edge redirected;
4698
4699 if (dump_file)
4700 fprintf (stream: dump_file, format: "Fallthru edge unified with branch "
4701 "%i->%i redirected to %i\n",
4702 e->src->index, e->dest->index, dest->index);
4703 e->flags &= ~EDGE_FALLTHRU;
4704 redirected = redirect_branch_edge (e, target: dest);
4705 gcc_assert (redirected);
4706 redirected->flags |= EDGE_FALLTHRU;
4707 df_set_bb_dirty (redirected->src);
4708 return redirected;
4709 }
4710 /* In case we are redirecting fallthru edge to the branch edge
4711 of conditional jump, remove it. */
4712 if (EDGE_COUNT (src->succs) == 2)
4713 {
4714 /* Find the edge that is different from E. */
4715 edge s = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e);
4716
4717 if (s->dest == dest
4718 && any_condjump_p (BB_END (src))
4719 && onlyjump_p (BB_END (src)))
4720 delete_insn (BB_END (src));
4721 }
4722 if (dump_file)
4723 fprintf (stream: dump_file, format: "Redirecting fallthru edge %i->%i to %i\n",
4724 e->src->index, e->dest->index, dest->index);
4725 ret = redirect_edge_succ_nodup (e, dest);
4726 }
4727 else
4728 ret = redirect_branch_edge (e, target: dest);
4729
4730 if (!ret)
4731 return NULL;
4732
4733 fixup_partition_crossing (e: ret);
4734 /* We don't want simplejumps in the insn stream during cfglayout. */
4735 gcc_assert (!simplejump_p (BB_END (src)) || CROSSING_JUMP_P (BB_END (src)));
4736
4737 df_set_bb_dirty (src);
4738 return ret;
4739}
4740
4741/* Simple wrapper as we always can redirect fallthru edges. */
4742static basic_block
4743cfg_layout_redirect_edge_and_branch_force (edge e, basic_block dest)
4744{
4745 edge redirected = cfg_layout_redirect_edge_and_branch (e, dest);
4746
4747 gcc_assert (redirected);
4748 return NULL;
4749}
4750
4751/* Same as delete_basic_block but update cfg_layout structures. */
4752
4753static void
4754cfg_layout_delete_block (basic_block bb)
4755{
4756 rtx_insn *insn, *next, *prev = PREV_INSN (BB_HEAD (bb)), *remaints;
4757 rtx_insn **to;
4758
4759 if (BB_HEADER (bb))
4760 {
4761 next = BB_HEAD (bb);
4762 if (prev)
4763 SET_NEXT_INSN (prev) = BB_HEADER (bb);
4764 else
4765 set_first_insn (BB_HEADER (bb));
4766 SET_PREV_INSN (BB_HEADER (bb)) = prev;
4767 insn = BB_HEADER (bb);
4768 while (NEXT_INSN (insn))
4769 insn = NEXT_INSN (insn);
4770 SET_NEXT_INSN (insn) = next;
4771 SET_PREV_INSN (next) = insn;
4772 }
4773 next = NEXT_INSN (BB_END (bb));
4774 if (BB_FOOTER (bb))
4775 {
4776 insn = BB_FOOTER (bb);
4777 while (insn)
4778 {
4779 if (BARRIER_P (insn))
4780 {
4781 if (PREV_INSN (insn))
4782 SET_NEXT_INSN (PREV_INSN (insn)) = NEXT_INSN (insn);
4783 else
4784 BB_FOOTER (bb) = NEXT_INSN (insn);
4785 if (NEXT_INSN (insn))
4786 SET_PREV_INSN (NEXT_INSN (insn)) = PREV_INSN (insn);
4787 }
4788 if (LABEL_P (insn))
4789 break;
4790 insn = NEXT_INSN (insn);
4791 }
4792 if (BB_FOOTER (bb))
4793 {
4794 insn = BB_END (bb);
4795 SET_NEXT_INSN (insn) = BB_FOOTER (bb);
4796 SET_PREV_INSN (BB_FOOTER (bb)) = insn;
4797 while (NEXT_INSN (insn))
4798 insn = NEXT_INSN (insn);
4799 SET_NEXT_INSN (insn) = next;
4800 if (next)
4801 SET_PREV_INSN (next) = insn;
4802 else
4803 set_last_insn (insn);
4804 }
4805 }
4806 if (bb->next_bb != EXIT_BLOCK_PTR_FOR_FN (cfun))
4807 to = &BB_HEADER (bb->next_bb);
4808 else
4809 to = &cfg_layout_function_footer;
4810
4811 rtl_delete_block (b: bb);
4812
4813 if (prev)
4814 prev = NEXT_INSN (insn: prev);
4815 else
4816 prev = get_insns ();
4817 if (next)
4818 next = PREV_INSN (insn: next);
4819 else
4820 next = get_last_insn ();
4821
4822 if (next && NEXT_INSN (insn: next) != prev)
4823 {
4824 remaints = unlink_insn_chain (first: prev, last: next);
4825 insn = remaints;
4826 while (NEXT_INSN (insn))
4827 insn = NEXT_INSN (insn);
4828 SET_NEXT_INSN (insn) = *to;
4829 if (*to)
4830 SET_PREV_INSN (*to) = insn;
4831 *to = remaints;
4832 }
4833}
4834
4835/* Return true when blocks A and B can be safely merged. */
4836
4837static bool
4838cfg_layout_can_merge_blocks_p (basic_block a, basic_block b)
4839{
4840 /* If we are partitioning hot/cold basic blocks, we don't want to
4841 mess up unconditional or indirect jumps that cross between hot
4842 and cold sections.
4843
4844 Basic block partitioning may result in some jumps that appear to
4845 be optimizable (or blocks that appear to be mergeable), but which really
4846 must be left untouched (they are required to make it safely across
4847 partition boundaries). See the comments at the top of
4848 bb-reorder.cc:partition_hot_cold_basic_blocks for complete details. */
4849
4850 if (BB_PARTITION (a) != BB_PARTITION (b))
4851 return false;
4852
4853 /* Protect the loop latches. */
4854 if (current_loops && b->loop_father->latch == b)
4855 return false;
4856
4857 /* If we would end up moving B's instructions, make sure it doesn't fall
4858 through into the exit block, since we cannot recover from a fallthrough
4859 edge into the exit block occurring in the middle of a function. */
4860 if (NEXT_INSN (BB_END (a)) != BB_HEAD (b))
4861 {
4862 edge e = find_fallthru_edge (edges: b->succs);
4863 if (e && e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
4864 return false;
4865 }
4866
4867 /* There must be exactly one edge in between the blocks. */
4868 return (single_succ_p (bb: a)
4869 && single_succ (bb: a) == b
4870 && single_pred_p (bb: b) == 1
4871 && a != b
4872 /* Must be simple edge. */
4873 && !(single_succ_edge (bb: a)->flags & EDGE_COMPLEX)
4874 && a != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4875 && b != EXIT_BLOCK_PTR_FOR_FN (cfun)
4876 /* If the jump insn has side effects, we can't kill the edge.
4877 When not optimizing, try_redirect_by_replacing_jump will
4878 not allow us to redirect an edge by replacing a table jump. */
4879 && (!JUMP_P (BB_END (a))
4880 || ((!optimize || reload_completed)
4881 ? simplejump_p (BB_END (a)) : onlyjump_p (BB_END (a)))));
4882}
4883
4884/* Merge block A and B. The blocks must be mergeable. */
4885
4886static void
4887cfg_layout_merge_blocks (basic_block a, basic_block b)
4888{
4889 /* If B is a forwarder block whose outgoing edge has no location, we'll
4890 propagate the locus of the edge between A and B onto it. */
4891 const bool forward_edge_locus
4892 = (b->flags & BB_FORWARDER_BLOCK) != 0
4893 && LOCATION_LOCUS (EDGE_SUCC (b, 0)->goto_locus) == UNKNOWN_LOCATION;
4894 rtx_insn *insn;
4895
4896 gcc_checking_assert (cfg_layout_can_merge_blocks_p (a, b));
4897
4898 if (dump_file)
4899 fprintf (stream: dump_file, format: "Merging block %d into block %d...\n", b->index,
4900 a->index);
4901
4902 /* If there was a CODE_LABEL beginning B, delete it. */
4903 if (LABEL_P (BB_HEAD (b)))
4904 {
4905 delete_insn (BB_HEAD (b));
4906 }
4907
4908 /* We should have fallthru edge in a, or we can do dummy redirection to get
4909 it cleaned up. */
4910 if (JUMP_P (BB_END (a)))
4911 try_redirect_by_replacing_jump (EDGE_SUCC (a, 0), target: b, in_cfglayout: true);
4912 gcc_assert (!JUMP_P (BB_END (a)));
4913
4914 /* If not optimizing, preserve the locus of the single edge between
4915 blocks A and B if necessary by emitting a nop. */
4916 if (!optimize
4917 && !forward_edge_locus
4918 && !DECL_IGNORED_P (current_function_decl))
4919 emit_nop_for_unique_locus_between (a, b);
4920
4921 /* Move things from b->footer after a->footer. */
4922 if (BB_FOOTER (b))
4923 {
4924 if (!BB_FOOTER (a))
4925 BB_FOOTER (a) = BB_FOOTER (b);
4926 else
4927 {
4928 rtx_insn *last = BB_FOOTER (a);
4929
4930 while (NEXT_INSN (insn: last))
4931 last = NEXT_INSN (insn: last);
4932 SET_NEXT_INSN (last) = BB_FOOTER (b);
4933 SET_PREV_INSN (BB_FOOTER (b)) = last;
4934 }
4935 BB_FOOTER (b) = NULL;
4936 }
4937
4938 /* Move things from b->header before a->footer.
4939 Note that this may include dead tablejump data, but we don't clean
4940 those up until we go out of cfglayout mode. */
4941 if (BB_HEADER (b))
4942 {
4943 if (! BB_FOOTER (a))
4944 BB_FOOTER (a) = BB_HEADER (b);
4945 else
4946 {
4947 rtx_insn *last = BB_HEADER (b);
4948
4949 while (NEXT_INSN (insn: last))
4950 last = NEXT_INSN (insn: last);
4951 SET_NEXT_INSN (last) = BB_FOOTER (a);
4952 SET_PREV_INSN (BB_FOOTER (a)) = last;
4953 BB_FOOTER (a) = BB_HEADER (b);
4954 }
4955 BB_HEADER (b) = NULL;
4956 }
4957
4958 /* In the case basic blocks are not adjacent, move them around. */
4959 if (NEXT_INSN (BB_END (a)) != BB_HEAD (b))
4960 {
4961 insn = unlink_insn_chain (BB_HEAD (b), BB_END (b));
4962
4963 emit_insn_after_noloc (insn, BB_END (a), a);
4964 }
4965 /* Otherwise just re-associate the instructions. */
4966 else
4967 {
4968 insn = BB_HEAD (b);
4969 BB_END (a) = BB_END (b);
4970 }
4971
4972 /* emit_insn_after_noloc doesn't call df_insn_change_bb.
4973 We need to explicitly call. */
4974 update_bb_for_insn_chain (begin: insn, BB_END (b), bb: a);
4975
4976 /* Skip possible DELETED_LABEL insn. */
4977 if (!NOTE_INSN_BASIC_BLOCK_P (insn))
4978 insn = NEXT_INSN (insn);
4979 gcc_assert (NOTE_INSN_BASIC_BLOCK_P (insn));
4980 BB_HEAD (b) = BB_END (b) = NULL;
4981 delete_insn (insn);
4982
4983 df_bb_delete (b->index);
4984
4985 if (forward_edge_locus)
4986 EDGE_SUCC (b, 0)->goto_locus = EDGE_SUCC (a, 0)->goto_locus;
4987
4988 if (dump_file)
4989 fprintf (stream: dump_file, format: "Merged blocks %d and %d.\n", a->index, b->index);
4990}
4991
4992/* Split edge E. */
4993
4994static basic_block
4995cfg_layout_split_edge (edge e)
4996{
4997 basic_block new_bb =
4998 create_basic_block (e->src != ENTRY_BLOCK_PTR_FOR_FN (cfun)
4999 ? NEXT_INSN (BB_END (e->src)) : get_insns (),
5000 NULL_RTX, e->src);
5001
5002 if (e->dest == EXIT_BLOCK_PTR_FOR_FN (cfun))
5003 BB_COPY_PARTITION (new_bb, e->src);
5004 else
5005 BB_COPY_PARTITION (new_bb, e->dest);
5006 make_edge (new_bb, e->dest, EDGE_FALLTHRU);
5007 redirect_edge_and_branch_force (e, new_bb);
5008
5009 return new_bb;
5010}
5011
5012/* Do postprocessing after making a forwarder block joined by edge FALLTHRU. */
5013
5014static void
5015rtl_make_forwarder_block (edge fallthru ATTRIBUTE_UNUSED)
5016{
5017}
5018
5019/* Return true if BB contains only labels or non-executable
5020 instructions. */
5021
5022static bool
5023rtl_block_empty_p (basic_block bb)
5024{
5025 rtx_insn *insn;
5026
5027 if (bb == ENTRY_BLOCK_PTR_FOR_FN (cfun)
5028 || bb == EXIT_BLOCK_PTR_FOR_FN (cfun))
5029 return true;
5030
5031 FOR_BB_INSNS (bb, insn)
5032 if (NONDEBUG_INSN_P (insn)
5033 && (!any_uncondjump_p (insn) || !onlyjump_p (insn)))
5034 return false;
5035
5036 return true;
5037}
5038
5039/* Split a basic block if it ends with a conditional branch and if
5040 the other part of the block is not empty. */
5041
5042static basic_block
5043rtl_split_block_before_cond_jump (basic_block bb)
5044{
5045 rtx_insn *insn;
5046 rtx_insn *split_point = NULL;
5047 rtx_insn *last = NULL;
5048 bool found_code = false;
5049
5050 FOR_BB_INSNS (bb, insn)
5051 {
5052 if (any_condjump_p (insn))
5053 split_point = last;
5054 else if (NONDEBUG_INSN_P (insn))
5055 found_code = true;
5056 last = insn;
5057 }
5058
5059 /* Did not find everything. */
5060 if (found_code && split_point)
5061 return split_block (bb, split_point)->dest;
5062 else
5063 return NULL;
5064}
5065
5066/* Return true if BB ends with a call, possibly followed by some
5067 instructions that must stay with the call, false otherwise. */
5068
5069static bool
5070rtl_block_ends_with_call_p (basic_block bb)
5071{
5072 rtx_insn *insn = BB_END (bb);
5073
5074 while (!CALL_P (insn)
5075 && insn != BB_HEAD (bb)
5076 && (keep_with_call_p (insn)
5077 || NOTE_P (insn)
5078 || DEBUG_INSN_P (insn)))
5079 insn = PREV_INSN (insn);
5080 return (CALL_P (insn));
5081}
5082
5083/* Return true if BB ends with a conditional branch, false otherwise. */
5084
5085static bool
5086rtl_block_ends_with_condjump_p (const_basic_block bb)
5087{
5088 return any_condjump_p (BB_END (bb));
5089}
5090
5091/* Return true if we need to add fake edge to exit.
5092 Helper function for rtl_flow_call_edges_add. */
5093
5094static bool
5095need_fake_edge_p (const rtx_insn *insn)
5096{
5097 if (!INSN_P (insn))
5098 return false;
5099
5100 if ((CALL_P (insn)
5101 && !SIBLING_CALL_P (insn)
5102 && !find_reg_note (insn, REG_NORETURN, NULL)
5103 && !(RTL_CONST_OR_PURE_CALL_P (insn))))
5104 return true;
5105
5106 return ((GET_CODE (PATTERN (insn)) == ASM_OPERANDS
5107 && MEM_VOLATILE_P (PATTERN (insn)))
5108 || (GET_CODE (PATTERN (insn)) == PARALLEL
5109 && asm_noperands (insn) != -1
5110 && MEM_VOLATILE_P (XVECEXP (PATTERN (insn), 0, 0)))
5111 || GET_CODE (PATTERN (insn)) == ASM_INPUT);
5112}
5113
5114/* Add fake edges to the function exit for any non constant and non noreturn
5115 calls, volatile inline assembly in the bitmap of blocks specified by
5116 BLOCKS or to the whole CFG if BLOCKS is zero. Return the number of blocks
5117 that were split.
5118
5119 The goal is to expose cases in which entering a basic block does not imply
5120 that all subsequent instructions must be executed. */
5121
5122static int
5123rtl_flow_call_edges_add (sbitmap blocks)
5124{
5125 int i;
5126 int blocks_split = 0;
5127 int last_bb = last_basic_block_for_fn (cfun);
5128 bool check_last_block = false;
5129
5130 if (n_basic_blocks_for_fn (cfun) == NUM_FIXED_BLOCKS)
5131 return 0;
5132
5133 if (! blocks)
5134 check_last_block = true;
5135 else
5136 check_last_block = bitmap_bit_p (map: blocks,
5137 EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb->index);
5138
5139 /* In the last basic block, before epilogue generation, there will be
5140 a fallthru edge to EXIT. Special care is required if the last insn
5141 of the last basic block is a call because make_edge folds duplicate
5142 edges, which would result in the fallthru edge also being marked
5143 fake, which would result in the fallthru edge being removed by
5144 remove_fake_edges, which would result in an invalid CFG.
5145
5146 Moreover, we can't elide the outgoing fake edge, since the block
5147 profiler needs to take this into account in order to solve the minimal
5148 spanning tree in the case that the call doesn't return.
5149
5150 Handle this by adding a dummy instruction in a new last basic block. */
5151 if (check_last_block)
5152 {
5153 basic_block bb = EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb;
5154 rtx_insn *insn = BB_END (bb);
5155
5156 /* Back up past insns that must be kept in the same block as a call. */
5157 while (insn != BB_HEAD (bb)
5158 && keep_with_call_p (insn))
5159 insn = PREV_INSN (insn);
5160
5161 if (need_fake_edge_p (insn))
5162 {
5163 edge e;
5164
5165 e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
5166 if (e)
5167 {
5168 insert_insn_on_edge (pattern: gen_use (const0_rtx), e);
5169 commit_edge_insertions ();
5170 }
5171 }
5172 }
5173
5174 /* Now add fake edges to the function exit for any non constant
5175 calls since there is no way that we can determine if they will
5176 return or not... */
5177
5178 for (i = NUM_FIXED_BLOCKS; i < last_bb; i++)
5179 {
5180 basic_block bb = BASIC_BLOCK_FOR_FN (cfun, i);
5181 rtx_insn *insn;
5182 rtx_insn *prev_insn;
5183
5184 if (!bb)
5185 continue;
5186
5187 if (blocks && !bitmap_bit_p (map: blocks, bitno: i))
5188 continue;
5189
5190 for (insn = BB_END (bb); ; insn = prev_insn)
5191 {
5192 prev_insn = PREV_INSN (insn);
5193 if (need_fake_edge_p (insn))
5194 {
5195 edge e;
5196 rtx_insn *split_at_insn = insn;
5197
5198 /* Don't split the block between a call and an insn that should
5199 remain in the same block as the call. */
5200 if (CALL_P (insn))
5201 while (split_at_insn != BB_END (bb)
5202 && keep_with_call_p (NEXT_INSN (insn: split_at_insn)))
5203 split_at_insn = NEXT_INSN (insn: split_at_insn);
5204
5205 /* The handling above of the final block before the epilogue
5206 should be enough to verify that there is no edge to the exit
5207 block in CFG already. Calling make_edge in such case would
5208 cause us to mark that edge as fake and remove it later. */
5209
5210 if (flag_checking && split_at_insn == BB_END (bb))
5211 {
5212 e = find_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun));
5213 gcc_assert (e == NULL);
5214 }
5215
5216 /* Note that the following may create a new basic block
5217 and renumber the existing basic blocks. */
5218 if (split_at_insn != BB_END (bb))
5219 {
5220 e = split_block (bb, split_at_insn);
5221 if (e)
5222 blocks_split++;
5223 }
5224
5225 edge ne = make_edge (bb, EXIT_BLOCK_PTR_FOR_FN (cfun), EDGE_FAKE);
5226 ne->probability = profile_probability::guessed_never ();
5227 }
5228
5229 if (insn == BB_HEAD (bb))
5230 break;
5231 }
5232 }
5233
5234 if (blocks_split)
5235 verify_flow_info ();
5236
5237 return blocks_split;
5238}
5239
5240/* Add COMP_RTX as a condition at end of COND_BB. FIRST_HEAD is
5241 the conditional branch target, SECOND_HEAD should be the fall-thru
5242 there is no need to handle this here the loop versioning code handles
5243 this. the reason for SECON_HEAD is that it is needed for condition
5244 in trees, and this should be of the same type since it is a hook. */
5245static void
5246rtl_lv_add_condition_to_bb (basic_block first_head ,
5247 basic_block second_head ATTRIBUTE_UNUSED,
5248 basic_block cond_bb, void *comp_rtx)
5249{
5250 rtx_code_label *label;
5251 rtx_insn *seq, *jump;
5252 rtx op0 = XEXP ((rtx)comp_rtx, 0);
5253 rtx op1 = XEXP ((rtx)comp_rtx, 1);
5254 enum rtx_code comp = GET_CODE ((rtx)comp_rtx);
5255 machine_mode mode;
5256
5257
5258 label = block_label (block: first_head);
5259 mode = GET_MODE (op0);
5260 if (mode == VOIDmode)
5261 mode = GET_MODE (op1);
5262
5263 start_sequence ();
5264 op0 = force_operand (op0, NULL_RTX);
5265 op1 = force_operand (op1, NULL_RTX);
5266 do_compare_rtx_and_jump (op0, op1, comp, 0, mode, NULL_RTX, NULL, label,
5267 profile_probability::uninitialized ());
5268 jump = get_last_insn ();
5269 JUMP_LABEL (jump) = label;
5270 LABEL_NUSES (label)++;
5271 seq = get_insns ();
5272 end_sequence ();
5273
5274 /* Add the new cond, in the new head. */
5275 emit_insn_after (seq, BB_END (cond_bb));
5276}
5277
5278
5279/* Given a block B with unconditional branch at its end, get the
5280 store the return the branch edge and the fall-thru edge in
5281 BRANCH_EDGE and FALLTHRU_EDGE respectively. */
5282static void
5283rtl_extract_cond_bb_edges (basic_block b, edge *branch_edge,
5284 edge *fallthru_edge)
5285{
5286 edge e = EDGE_SUCC (b, 0);
5287
5288 if (e->flags & EDGE_FALLTHRU)
5289 {
5290 *fallthru_edge = e;
5291 *branch_edge = EDGE_SUCC (b, 1);
5292 }
5293 else
5294 {
5295 *branch_edge = e;
5296 *fallthru_edge = EDGE_SUCC (b, 1);
5297 }
5298}
5299
5300void
5301init_rtl_bb_info (basic_block bb)
5302{
5303 gcc_assert (!bb->il.x.rtl);
5304 bb->il.x.head_ = NULL;
5305 bb->il.x.rtl = ggc_cleared_alloc<rtl_bb_info> ();
5306}
5307
5308static bool
5309rtl_bb_info_initialized_p (basic_block bb)
5310{
5311 return bb->il.x.rtl;
5312}
5313
5314/* Returns true if it is possible to remove edge E by redirecting
5315 it to the destination of the other edge from E->src. */
5316
5317static bool
5318rtl_can_remove_branch_p (const_edge e)
5319{
5320 const_basic_block src = e->src;
5321 const_basic_block target = EDGE_SUCC (src, EDGE_SUCC (src, 0) == e)->dest;
5322 const rtx_insn *insn = BB_END (src);
5323 rtx set;
5324
5325 /* The conditions are taken from try_redirect_by_replacing_jump. */
5326 if (target == EXIT_BLOCK_PTR_FOR_FN (cfun))
5327 return false;
5328
5329 if (e->flags & (EDGE_ABNORMAL_CALL | EDGE_EH))
5330 return false;
5331
5332 if (BB_PARTITION (src) != BB_PARTITION (target))
5333 return false;
5334
5335 if (!onlyjump_p (insn)
5336 || tablejump_p (insn, NULL, NULL))
5337 return false;
5338
5339 set = single_set (insn);
5340 if (!set || side_effects_p (set))
5341 return false;
5342
5343 return true;
5344}
5345
5346static basic_block
5347rtl_duplicate_bb (basic_block bb, copy_bb_data *id)
5348{
5349 bb = cfg_layout_duplicate_bb (bb, id);
5350 bb->aux = NULL;
5351 return bb;
5352}
5353
5354/* Do book-keeping of basic block BB for the profile consistency checker.
5355 Store the counting in RECORD. */
5356static void
5357rtl_account_profile_record (basic_block bb, struct profile_record *record)
5358{
5359 rtx_insn *insn;
5360 FOR_BB_INSNS (bb, insn)
5361 if (INSN_P (insn))
5362 {
5363 record->size += insn_cost (insn, false);
5364 if (profile_info)
5365 {
5366 if (ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.ipa ().initialized_p ()
5367 && ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.ipa ().nonzero_p ()
5368 && bb->count.ipa ().initialized_p ())
5369 record->time
5370 += insn_cost (insn, true) * bb->count.ipa ().to_gcov_type ();
5371 }
5372 else if (bb->count.initialized_p ()
5373 && ENTRY_BLOCK_PTR_FOR_FN (cfun)->count.initialized_p ())
5374 record->time
5375 += insn_cost (insn, true)
5376 * bb->count.to_sreal_scale
5377 (ENTRY_BLOCK_PTR_FOR_FN (cfun)->count).to_double ();
5378 else
5379 record->time += insn_cost (insn, true);
5380 }
5381}
5382
5383/* Implementation of CFG manipulation for linearized RTL. */
5384struct cfg_hooks rtl_cfg_hooks = {
5385 .name: "rtl",
5386 .verify_flow_info: rtl_verify_flow_info,
5387 .dump_bb: rtl_dump_bb,
5388 .dump_bb_for_graph: rtl_dump_bb_for_graph,
5389 .create_basic_block: rtl_create_basic_block,
5390 .redirect_edge_and_branch: rtl_redirect_edge_and_branch,
5391 .redirect_edge_and_branch_force: rtl_redirect_edge_and_branch_force,
5392 .can_remove_branch_p: rtl_can_remove_branch_p,
5393 .delete_basic_block: rtl_delete_block,
5394 .split_block: rtl_split_block,
5395 .move_block_after: rtl_move_block_after,
5396 .can_merge_blocks_p: rtl_can_merge_blocks, /* can_merge_blocks_p */
5397 .merge_blocks: rtl_merge_blocks,
5398 .predict_edge: rtl_predict_edge,
5399 .predicted_by_p: rtl_predicted_by_p,
5400 .can_duplicate_block_p: cfg_layout_can_duplicate_bb_p,
5401 .duplicate_block: rtl_duplicate_bb,
5402 .split_edge: rtl_split_edge,
5403 .make_forwarder_block: rtl_make_forwarder_block,
5404 .tidy_fallthru_edge: rtl_tidy_fallthru_edge,
5405 .force_nonfallthru: rtl_force_nonfallthru,
5406 .block_ends_with_call_p: rtl_block_ends_with_call_p,
5407 .block_ends_with_condjump_p: rtl_block_ends_with_condjump_p,
5408 .flow_call_edges_add: rtl_flow_call_edges_add,
5409 NULL, /* execute_on_growing_pred */
5410 NULL, /* execute_on_shrinking_pred */
5411 NULL, /* duplicate loop for trees */
5412 NULL, /* lv_add_condition_to_bb */
5413 NULL, /* lv_adjust_loop_header_phi*/
5414 NULL, /* extract_cond_bb_edges */
5415 NULL, /* flush_pending_stmts */
5416 .empty_block_p: rtl_block_empty_p, /* block_empty_p */
5417 .split_block_before_cond_jump: rtl_split_block_before_cond_jump, /* split_block_before_cond_jump */
5418 .account_profile_record: rtl_account_profile_record,
5419};
5420
5421/* Implementation of CFG manipulation for cfg layout RTL, where
5422 basic block connected via fallthru edges does not have to be adjacent.
5423 This representation will hopefully become the default one in future
5424 version of the compiler. */
5425
5426struct cfg_hooks cfg_layout_rtl_cfg_hooks = {
5427 .name: "cfglayout mode",
5428 .verify_flow_info: rtl_verify_flow_info_1,
5429 .dump_bb: rtl_dump_bb,
5430 .dump_bb_for_graph: rtl_dump_bb_for_graph,
5431 .create_basic_block: cfg_layout_create_basic_block,
5432 .redirect_edge_and_branch: cfg_layout_redirect_edge_and_branch,
5433 .redirect_edge_and_branch_force: cfg_layout_redirect_edge_and_branch_force,
5434 .can_remove_branch_p: rtl_can_remove_branch_p,
5435 .delete_basic_block: cfg_layout_delete_block,
5436 .split_block: cfg_layout_split_block,
5437 .move_block_after: rtl_move_block_after,
5438 .can_merge_blocks_p: cfg_layout_can_merge_blocks_p,
5439 .merge_blocks: cfg_layout_merge_blocks,
5440 .predict_edge: rtl_predict_edge,
5441 .predicted_by_p: rtl_predicted_by_p,
5442 .can_duplicate_block_p: cfg_layout_can_duplicate_bb_p,
5443 .duplicate_block: cfg_layout_duplicate_bb,
5444 .split_edge: cfg_layout_split_edge,
5445 .make_forwarder_block: rtl_make_forwarder_block,
5446 NULL, /* tidy_fallthru_edge */
5447 .force_nonfallthru: rtl_force_nonfallthru,
5448 .block_ends_with_call_p: rtl_block_ends_with_call_p,
5449 .block_ends_with_condjump_p: rtl_block_ends_with_condjump_p,
5450 .flow_call_edges_add: rtl_flow_call_edges_add,
5451 NULL, /* execute_on_growing_pred */
5452 NULL, /* execute_on_shrinking_pred */
5453 .cfg_hook_duplicate_loop_body_to_header_edge: duplicate_loop_body_to_header_edge, /* duplicate loop for rtl */
5454 .lv_add_condition_to_bb: rtl_lv_add_condition_to_bb, /* lv_add_condition_to_bb */
5455 NULL, /* lv_adjust_loop_header_phi*/
5456 .extract_cond_bb_edges: rtl_extract_cond_bb_edges, /* extract_cond_bb_edges */
5457 NULL, /* flush_pending_stmts */
5458 .empty_block_p: rtl_block_empty_p, /* block_empty_p */
5459 .split_block_before_cond_jump: rtl_split_block_before_cond_jump, /* split_block_before_cond_jump */
5460 .account_profile_record: rtl_account_profile_record,
5461};
5462
5463#include "gt-cfgrtl.h"
5464
5465#if __GNUC__ >= 10
5466# pragma GCC diagnostic pop
5467#endif
5468

source code of gcc/cfgrtl.cc