1/* Instruction scheduling pass. This file computes dependencies between
2 instructions.
3 Copyright (C) 1992-2024 Free Software Foundation, Inc.
4 Contributed by Michael Tiemann (tiemann@cygnus.com) Enhanced by,
5 and currently maintained by, Jim Wilson (wilson@cygnus.com)
6
7This file is part of GCC.
8
9GCC is free software; you can redistribute it and/or modify it under
10the terms of the GNU General Public License as published by the Free
11Software Foundation; either version 3, or (at your option) any later
12version.
13
14GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15WARRANTY; without even the implied warranty of MERCHANTABILITY or
16FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17for more details.
18
19You should have received a copy of the GNU General Public License
20along with GCC; see the file COPYING3. If not see
21<http://www.gnu.org/licenses/>. */
22
23#include "config.h"
24#include "system.h"
25#include "coretypes.h"
26#include "backend.h"
27#include "target.h"
28#include "rtl.h"
29#include "tree.h"
30#include "df.h"
31#include "insn-config.h"
32#include "regs.h"
33#include "memmodel.h"
34#include "ira.h"
35#include "ira-int.h"
36#include "insn-attr.h"
37#include "cfgbuild.h"
38#include "sched-int.h"
39#include "cselib.h"
40#include "function-abi.h"
41
42#ifdef INSN_SCHEDULING
43
44/* Holds current parameters for the dependency analyzer. */
45struct sched_deps_info_def *sched_deps_info;
46
47/* The data is specific to the Haifa scheduler. */
48vec<haifa_deps_insn_data_def>
49 h_d_i_d = vNULL;
50
51/* Return the major type present in the DS. */
52enum reg_note
53ds_to_dk (ds_t ds)
54{
55 if (ds & DEP_TRUE)
56 return REG_DEP_TRUE;
57
58 if (ds & DEP_OUTPUT)
59 return REG_DEP_OUTPUT;
60
61 if (ds & DEP_CONTROL)
62 return REG_DEP_CONTROL;
63
64 gcc_assert (ds & DEP_ANTI);
65
66 return REG_DEP_ANTI;
67}
68
69/* Return equivalent dep_status. */
70ds_t
71dk_to_ds (enum reg_note dk)
72{
73 switch (dk)
74 {
75 case REG_DEP_TRUE:
76 return DEP_TRUE;
77
78 case REG_DEP_OUTPUT:
79 return DEP_OUTPUT;
80
81 case REG_DEP_CONTROL:
82 return DEP_CONTROL;
83
84 default:
85 gcc_assert (dk == REG_DEP_ANTI);
86 return DEP_ANTI;
87 }
88}
89
90/* Functions to operate with dependence information container - dep_t. */
91
92/* Init DEP with the arguments. */
93void
94init_dep_1 (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note type, ds_t ds)
95{
96 DEP_PRO (dep) = pro;
97 DEP_CON (dep) = con;
98 DEP_TYPE (dep) = type;
99 DEP_STATUS (dep) = ds;
100 DEP_COST (dep) = UNKNOWN_DEP_COST;
101 DEP_NONREG (dep) = 0;
102 DEP_MULTIPLE (dep) = 0;
103 DEP_REPLACE (dep) = NULL;
104 dep->unused = 0;
105}
106
107/* Init DEP with the arguments.
108 While most of the scheduler (including targets) only need the major type
109 of the dependency, it is convenient to hide full dep_status from them. */
110void
111init_dep (dep_t dep, rtx_insn *pro, rtx_insn *con, enum reg_note kind)
112{
113 ds_t ds;
114
115 if ((current_sched_info->flags & USE_DEPS_LIST))
116 ds = dk_to_ds (dk: kind);
117 else
118 ds = 0;
119
120 init_dep_1 (dep, pro, con, type: kind, ds);
121}
122
123/* Make a copy of FROM in TO. */
124static void
125copy_dep (dep_t to, dep_t from)
126{
127 memcpy (dest: to, src: from, n: sizeof (*to));
128}
129
130static void dump_ds (FILE *, ds_t);
131
132/* Define flags for dump_dep (). */
133
134/* Dump producer of the dependence. */
135#define DUMP_DEP_PRO (2)
136
137/* Dump consumer of the dependence. */
138#define DUMP_DEP_CON (4)
139
140/* Dump type of the dependence. */
141#define DUMP_DEP_TYPE (8)
142
143/* Dump status of the dependence. */
144#define DUMP_DEP_STATUS (16)
145
146/* Dump all information about the dependence. */
147#define DUMP_DEP_ALL (DUMP_DEP_PRO | DUMP_DEP_CON | DUMP_DEP_TYPE \
148 |DUMP_DEP_STATUS)
149
150/* Dump DEP to DUMP.
151 FLAGS is a bit mask specifying what information about DEP needs
152 to be printed.
153 If FLAGS has the very first bit set, then dump all information about DEP
154 and propagate this bit into the callee dump functions. */
155static void
156dump_dep (FILE *dump, dep_t dep, int flags)
157{
158 if (flags & 1)
159 flags |= DUMP_DEP_ALL;
160
161 fprintf (stream: dump, format: "<");
162
163 if (flags & DUMP_DEP_PRO)
164 fprintf (stream: dump, format: "%d; ", INSN_UID (DEP_PRO (dep)));
165
166 if (flags & DUMP_DEP_CON)
167 fprintf (stream: dump, format: "%d; ", INSN_UID (DEP_CON (dep)));
168
169 if (flags & DUMP_DEP_TYPE)
170 {
171 char t;
172 enum reg_note type = DEP_TYPE (dep);
173
174 switch (type)
175 {
176 case REG_DEP_TRUE:
177 t = 't';
178 break;
179
180 case REG_DEP_OUTPUT:
181 t = 'o';
182 break;
183
184 case REG_DEP_CONTROL:
185 t = 'c';
186 break;
187
188 case REG_DEP_ANTI:
189 t = 'a';
190 break;
191
192 default:
193 gcc_unreachable ();
194 break;
195 }
196
197 fprintf (stream: dump, format: "%c; ", t);
198 }
199
200 if (flags & DUMP_DEP_STATUS)
201 {
202 if (current_sched_info->flags & USE_DEPS_LIST)
203 dump_ds (dump, DEP_STATUS (dep));
204 }
205
206 fprintf (stream: dump, format: ">");
207}
208
209/* Default flags for dump_dep (). */
210static int dump_dep_flags = (DUMP_DEP_PRO | DUMP_DEP_CON);
211
212/* Dump all fields of DEP to STDERR. */
213void
214sd_debug_dep (dep_t dep)
215{
216 dump_dep (stderr, dep, flags: 1);
217 fprintf (stderr, format: "\n");
218}
219
220/* Determine whether DEP is a dependency link of a non-debug insn on a
221 debug insn. */
222
223static inline bool
224depl_on_debug_p (dep_link_t dep)
225{
226 return (DEBUG_INSN_P (DEP_LINK_PRO (dep))
227 && !DEBUG_INSN_P (DEP_LINK_CON (dep)));
228}
229
230/* Functions to operate with a single link from the dependencies lists -
231 dep_link_t. */
232
233/* Attach L to appear after link X whose &DEP_LINK_NEXT (X) is given by
234 PREV_NEXT_P. */
235static void
236attach_dep_link (dep_link_t l, dep_link_t *prev_nextp)
237{
238 dep_link_t next = *prev_nextp;
239
240 gcc_assert (DEP_LINK_PREV_NEXTP (l) == NULL
241 && DEP_LINK_NEXT (l) == NULL);
242
243 /* Init node being inserted. */
244 DEP_LINK_PREV_NEXTP (l) = prev_nextp;
245 DEP_LINK_NEXT (l) = next;
246
247 /* Fix next node. */
248 if (next != NULL)
249 {
250 gcc_assert (DEP_LINK_PREV_NEXTP (next) == prev_nextp);
251
252 DEP_LINK_PREV_NEXTP (next) = &DEP_LINK_NEXT (l);
253 }
254
255 /* Fix prev node. */
256 *prev_nextp = l;
257}
258
259/* Add dep_link LINK to deps_list L. */
260static void
261add_to_deps_list (dep_link_t link, deps_list_t l)
262{
263 attach_dep_link (l: link, prev_nextp: &DEPS_LIST_FIRST (l));
264
265 /* Don't count debug deps. */
266 if (!depl_on_debug_p (dep: link))
267 ++DEPS_LIST_N_LINKS (l);
268}
269
270/* Detach dep_link L from the list. */
271static void
272detach_dep_link (dep_link_t l)
273{
274 dep_link_t *prev_nextp = DEP_LINK_PREV_NEXTP (l);
275 dep_link_t next = DEP_LINK_NEXT (l);
276
277 *prev_nextp = next;
278
279 if (next != NULL)
280 DEP_LINK_PREV_NEXTP (next) = prev_nextp;
281
282 DEP_LINK_PREV_NEXTP (l) = NULL;
283 DEP_LINK_NEXT (l) = NULL;
284}
285
286/* Remove link LINK from list LIST. */
287static void
288remove_from_deps_list (dep_link_t link, deps_list_t list)
289{
290 detach_dep_link (l: link);
291
292 /* Don't count debug deps. */
293 if (!depl_on_debug_p (dep: link))
294 --DEPS_LIST_N_LINKS (list);
295}
296
297/* Move link LINK from list FROM to list TO. */
298static void
299move_dep_link (dep_link_t link, deps_list_t from, deps_list_t to)
300{
301 remove_from_deps_list (link, list: from);
302 add_to_deps_list (link, l: to);
303}
304
305/* Return true of LINK is not attached to any list. */
306static bool
307dep_link_is_detached_p (dep_link_t link)
308{
309 return DEP_LINK_PREV_NEXTP (link) == NULL;
310}
311
312/* Pool to hold all dependency nodes (dep_node_t). */
313static object_allocator<_dep_node> *dn_pool;
314
315/* Number of dep_nodes out there. */
316static int dn_pool_diff = 0;
317
318/* Create a dep_node. */
319static dep_node_t
320create_dep_node (void)
321{
322 dep_node_t n = dn_pool->allocate ();
323 dep_link_t back = DEP_NODE_BACK (n);
324 dep_link_t forw = DEP_NODE_FORW (n);
325
326 DEP_LINK_NODE (back) = n;
327 DEP_LINK_NEXT (back) = NULL;
328 DEP_LINK_PREV_NEXTP (back) = NULL;
329
330 DEP_LINK_NODE (forw) = n;
331 DEP_LINK_NEXT (forw) = NULL;
332 DEP_LINK_PREV_NEXTP (forw) = NULL;
333
334 ++dn_pool_diff;
335
336 return n;
337}
338
339/* Delete dep_node N. N must not be connected to any deps_list. */
340static void
341delete_dep_node (dep_node_t n)
342{
343 gcc_assert (dep_link_is_detached_p (DEP_NODE_BACK (n))
344 && dep_link_is_detached_p (DEP_NODE_FORW (n)));
345
346 XDELETE (DEP_REPLACE (DEP_NODE_DEP (n)));
347
348 --dn_pool_diff;
349
350 dn_pool->remove (object: n);
351}
352
353/* Pool to hold dependencies lists (deps_list_t). */
354static object_allocator<_deps_list> *dl_pool;
355
356/* Number of deps_lists out there. */
357static int dl_pool_diff = 0;
358
359/* Functions to operate with dependences lists - deps_list_t. */
360
361/* Return true if list L is empty. */
362static bool
363deps_list_empty_p (deps_list_t l)
364{
365 return DEPS_LIST_N_LINKS (l) == 0;
366}
367
368/* Create a new deps_list. */
369static deps_list_t
370create_deps_list (void)
371{
372 deps_list_t l = dl_pool->allocate ();
373
374 DEPS_LIST_FIRST (l) = NULL;
375 DEPS_LIST_N_LINKS (l) = 0;
376
377 ++dl_pool_diff;
378 return l;
379}
380
381/* Free deps_list L. */
382static void
383free_deps_list (deps_list_t l)
384{
385 gcc_assert (deps_list_empty_p (l));
386
387 --dl_pool_diff;
388
389 dl_pool->remove (object: l);
390}
391
392/* Return true if there is no dep_nodes and deps_lists out there.
393 After the region is scheduled all the dependency nodes and lists
394 should [generally] be returned to pool. */
395bool
396deps_pools_are_empty_p (void)
397{
398 return dn_pool_diff == 0 && dl_pool_diff == 0;
399}
400
401/* Remove all elements from L. */
402static void
403clear_deps_list (deps_list_t l)
404{
405 do
406 {
407 dep_link_t link = DEPS_LIST_FIRST (l);
408
409 if (link == NULL)
410 break;
411
412 remove_from_deps_list (link, list: l);
413 }
414 while (1);
415}
416
417/* Decide whether a dependency should be treated as a hard or a speculative
418 dependency. */
419static bool
420dep_spec_p (dep_t dep)
421{
422 if (current_sched_info->flags & DO_SPECULATION)
423 {
424 if (DEP_STATUS (dep) & SPECULATIVE)
425 return true;
426 }
427 if (current_sched_info->flags & DO_PREDICATION)
428 {
429 if (DEP_TYPE (dep) == REG_DEP_CONTROL)
430 return true;
431 }
432 if (DEP_REPLACE (dep) != NULL)
433 return true;
434 return false;
435}
436
437static regset reg_pending_sets;
438static regset reg_pending_clobbers;
439static regset reg_pending_uses;
440static regset reg_pending_control_uses;
441static enum reg_pending_barrier_mode reg_pending_barrier;
442
443/* Hard registers implicitly clobbered or used (or may be implicitly
444 clobbered or used) by the currently analyzed insn. For example,
445 insn in its constraint has one register class. Even if there is
446 currently no hard register in the insn, the particular hard
447 register will be in the insn after reload pass because the
448 constraint requires it. */
449static HARD_REG_SET implicit_reg_pending_clobbers;
450static HARD_REG_SET implicit_reg_pending_uses;
451
452/* To speed up the test for duplicate dependency links we keep a
453 record of dependencies created by add_dependence when the average
454 number of instructions in a basic block is very large.
455
456 Studies have shown that there is typically around 5 instructions between
457 branches for typical C code. So we can make a guess that the average
458 basic block is approximately 5 instructions long; we will choose 100X
459 the average size as a very large basic block.
460
461 Each insn has associated bitmaps for its dependencies. Each bitmap
462 has enough entries to represent a dependency on any other insn in
463 the insn chain. All bitmap for true dependencies cache is
464 allocated then the rest two ones are also allocated. */
465static bitmap true_dependency_cache = NULL;
466static bitmap output_dependency_cache = NULL;
467static bitmap anti_dependency_cache = NULL;
468static bitmap control_dependency_cache = NULL;
469static bitmap spec_dependency_cache = NULL;
470static int cache_size;
471
472/* True if we should mark added dependencies as a non-register deps. */
473static bool mark_as_hard;
474
475static bool deps_may_trap_p (const_rtx);
476static void add_dependence_1 (rtx_insn *, rtx_insn *, enum reg_note);
477static void add_dependence_list (rtx_insn *, rtx_insn_list *, int,
478 enum reg_note, bool);
479static void add_dependence_list_and_free (class deps_desc *, rtx_insn *,
480 rtx_insn_list **, int, enum reg_note,
481 bool);
482static void delete_all_dependences (rtx_insn *);
483static void chain_to_prev_insn (rtx_insn *);
484
485static void flush_pending_lists (class deps_desc *, rtx_insn *, int, int);
486static void sched_analyze_1 (class deps_desc *, rtx, rtx_insn *);
487static void sched_analyze_2 (class deps_desc *, rtx, rtx_insn *);
488static void sched_analyze_insn (class deps_desc *, rtx, rtx_insn *);
489
490static bool sched_has_condition_p (const rtx_insn *);
491static bool conditions_mutex_p (const_rtx, const_rtx, bool, bool);
492
493static enum DEPS_ADJUST_RESULT maybe_add_or_update_dep_1 (dep_t, bool,
494 rtx, rtx);
495static enum DEPS_ADJUST_RESULT add_or_update_dep_1 (dep_t, bool, rtx, rtx);
496
497static void check_dep (dep_t, bool);
498
499
500/* Return true if a load of the memory reference MEM can cause a trap. */
501
502static bool
503deps_may_trap_p (const_rtx mem)
504{
505 const_rtx addr = XEXP (mem, 0);
506
507 if (REG_P (addr) && REGNO (addr) >= FIRST_PSEUDO_REGISTER)
508 {
509 const_rtx t = get_reg_known_value (REGNO (addr));
510 if (t)
511 addr = t;
512 }
513 return rtx_addr_can_trap_p (addr);
514}
515
516
517/* Find the condition under which INSN is executed. If REV is not NULL,
518 it is set to TRUE when the returned comparison should be reversed
519 to get the actual condition. */
520static rtx
521sched_get_condition_with_rev_uncached (const rtx_insn *insn, bool *rev)
522{
523 rtx pat = PATTERN (insn);
524 rtx src;
525
526 if (rev)
527 *rev = false;
528
529 if (GET_CODE (pat) == COND_EXEC)
530 return COND_EXEC_TEST (pat);
531
532 if (!any_condjump_p (insn) || !onlyjump_p (insn))
533 return 0;
534
535 src = SET_SRC (pc_set (insn));
536
537 if (XEXP (src, 2) == pc_rtx)
538 return XEXP (src, 0);
539 else if (XEXP (src, 1) == pc_rtx)
540 {
541 rtx cond = XEXP (src, 0);
542 enum rtx_code revcode = reversed_comparison_code (cond, insn);
543
544 if (revcode == UNKNOWN)
545 return 0;
546
547 if (rev)
548 *rev = true;
549 return cond;
550 }
551
552 return 0;
553}
554
555/* Return the condition under which INSN does not execute (i.e. the
556 not-taken condition for a conditional branch), or NULL if we cannot
557 find such a condition. The caller should make a copy of the condition
558 before using it. */
559rtx
560sched_get_reverse_condition_uncached (const rtx_insn *insn)
561{
562 bool rev;
563 rtx cond = sched_get_condition_with_rev_uncached (insn, rev: &rev);
564 if (cond == NULL_RTX)
565 return cond;
566 if (!rev)
567 {
568 enum rtx_code revcode = reversed_comparison_code (cond, insn);
569 cond = gen_rtx_fmt_ee (revcode, GET_MODE (cond),
570 XEXP (cond, 0),
571 XEXP (cond, 1));
572 }
573 return cond;
574}
575
576/* Caching variant of sched_get_condition_with_rev_uncached.
577 We only do actual work the first time we come here for an insn; the
578 results are cached in INSN_CACHED_COND and INSN_REVERSE_COND. */
579static rtx
580sched_get_condition_with_rev (const rtx_insn *insn, bool *rev)
581{
582 bool tmp;
583
584 if (INSN_LUID (insn) == 0)
585 return sched_get_condition_with_rev_uncached (insn, rev);
586
587 if (INSN_CACHED_COND (insn) == const_true_rtx)
588 return NULL_RTX;
589
590 if (INSN_CACHED_COND (insn) != NULL_RTX)
591 {
592 if (rev)
593 *rev = INSN_REVERSE_COND (insn);
594 return INSN_CACHED_COND (insn);
595 }
596
597 INSN_CACHED_COND (insn) = sched_get_condition_with_rev_uncached (insn, rev: &tmp);
598 INSN_REVERSE_COND (insn) = tmp;
599
600 if (INSN_CACHED_COND (insn) == NULL_RTX)
601 {
602 INSN_CACHED_COND (insn) = const_true_rtx;
603 return NULL_RTX;
604 }
605
606 if (rev)
607 *rev = INSN_REVERSE_COND (insn);
608 return INSN_CACHED_COND (insn);
609}
610
611/* True when we can find a condition under which INSN is executed. */
612static bool
613sched_has_condition_p (const rtx_insn *insn)
614{
615 return !! sched_get_condition_with_rev (insn, NULL);
616}
617
618
619
620/* Return true if conditions COND1 and COND2 can never be both true. */
621static bool
622conditions_mutex_p (const_rtx cond1, const_rtx cond2, bool rev1, bool rev2)
623{
624 if (COMPARISON_P (cond1)
625 && COMPARISON_P (cond2)
626 && GET_CODE (cond1) ==
627 (rev1==rev2
628 ? reversed_comparison_code (cond2, NULL)
629 : GET_CODE (cond2))
630 && rtx_equal_p (XEXP (cond1, 0), XEXP (cond2, 0))
631 && XEXP (cond1, 1) == XEXP (cond2, 1))
632 return true;
633 return false;
634}
635
636/* Return true if insn1 and insn2 can never depend on one another because
637 the conditions under which they are executed are mutually exclusive. */
638bool
639sched_insns_conditions_mutex_p (const rtx_insn *insn1, const rtx_insn *insn2)
640{
641 rtx cond1, cond2;
642 bool rev1 = false, rev2 = false;
643
644 /* df doesn't handle conditional lifetimes entirely correctly;
645 calls mess up the conditional lifetimes. */
646 if (!CALL_P (insn1) && !CALL_P (insn2))
647 {
648 cond1 = sched_get_condition_with_rev (insn: insn1, rev: &rev1);
649 cond2 = sched_get_condition_with_rev (insn: insn2, rev: &rev2);
650 if (cond1 && cond2
651 && conditions_mutex_p (cond1, cond2, rev1, rev2)
652 /* Make sure first instruction doesn't affect condition of second
653 instruction if switched. */
654 && !modified_in_p (cond1, insn2)
655 /* Make sure second instruction doesn't affect condition of first
656 instruction if switched. */
657 && !modified_in_p (cond2, insn1))
658 return true;
659 }
660 return false;
661}
662
663
664/* Return true if INSN can potentially be speculated with type DS. */
665bool
666sched_insn_is_legitimate_for_speculation_p (const rtx_insn *insn, ds_t ds)
667{
668 if (HAS_INTERNAL_DEP (insn))
669 return false;
670
671 if (!NONJUMP_INSN_P (insn))
672 return false;
673
674 if (SCHED_GROUP_P (insn))
675 return false;
676
677 if (IS_SPECULATION_CHECK_P (CONST_CAST_RTX_INSN (insn)))
678 return false;
679
680 if (side_effects_p (PATTERN (insn)))
681 return false;
682
683 if (ds & BE_IN_SPEC)
684 /* The following instructions, which depend on a speculatively scheduled
685 instruction, cannot be speculatively scheduled along. */
686 {
687 if (may_trap_or_fault_p (PATTERN (insn)))
688 /* If instruction might fault, it cannot be speculatively scheduled.
689 For control speculation it's obvious why and for data speculation
690 it's because the insn might get wrong input if speculation
691 wasn't successful. */
692 return false;
693
694 if ((ds & BE_IN_DATA)
695 && sched_has_condition_p (insn))
696 /* If this is a predicated instruction, then it cannot be
697 speculatively scheduled. See PR35659. */
698 return false;
699 }
700
701 return true;
702}
703
704/* Initialize LIST_PTR to point to one of the lists present in TYPES_PTR,
705 initialize RESOLVED_P_PTR with true if that list consists of resolved deps,
706 and remove the type of returned [through LIST_PTR] list from TYPES_PTR.
707 This function is used to switch sd_iterator to the next list.
708 !!! For internal use only. Might consider moving it to sched-int.h. */
709void
710sd_next_list (const_rtx insn, sd_list_types_def *types_ptr,
711 deps_list_t *list_ptr, bool *resolved_p_ptr)
712{
713 sd_list_types_def types = *types_ptr;
714
715 if (types & SD_LIST_HARD_BACK)
716 {
717 *list_ptr = INSN_HARD_BACK_DEPS (insn);
718 *resolved_p_ptr = false;
719 *types_ptr = types & ~SD_LIST_HARD_BACK;
720 }
721 else if (types & SD_LIST_SPEC_BACK)
722 {
723 *list_ptr = INSN_SPEC_BACK_DEPS (insn);
724 *resolved_p_ptr = false;
725 *types_ptr = types & ~SD_LIST_SPEC_BACK;
726 }
727 else if (types & SD_LIST_FORW)
728 {
729 *list_ptr = INSN_FORW_DEPS (insn);
730 *resolved_p_ptr = false;
731 *types_ptr = types & ~SD_LIST_FORW;
732 }
733 else if (types & SD_LIST_RES_BACK)
734 {
735 *list_ptr = INSN_RESOLVED_BACK_DEPS (insn);
736 *resolved_p_ptr = true;
737 *types_ptr = types & ~SD_LIST_RES_BACK;
738 }
739 else if (types & SD_LIST_RES_FORW)
740 {
741 *list_ptr = INSN_RESOLVED_FORW_DEPS (insn);
742 *resolved_p_ptr = true;
743 *types_ptr = types & ~SD_LIST_RES_FORW;
744 }
745 else
746 {
747 *list_ptr = NULL;
748 *resolved_p_ptr = false;
749 *types_ptr = SD_LIST_NONE;
750 }
751}
752
753/* Return the summary size of INSN's lists defined by LIST_TYPES. */
754int
755sd_lists_size (const_rtx insn, sd_list_types_def list_types)
756{
757 int size = 0;
758
759 while (list_types != SD_LIST_NONE)
760 {
761 deps_list_t list;
762 bool resolved_p;
763
764 sd_next_list (insn, types_ptr: &list_types, list_ptr: &list, resolved_p_ptr: &resolved_p);
765 if (list)
766 size += DEPS_LIST_N_LINKS (list);
767 }
768
769 return size;
770}
771
772/* Return true if INSN's lists defined by LIST_TYPES are all empty. */
773
774bool
775sd_lists_empty_p (const_rtx insn, sd_list_types_def list_types)
776{
777 while (list_types != SD_LIST_NONE)
778 {
779 deps_list_t list;
780 bool resolved_p;
781
782 sd_next_list (insn, types_ptr: &list_types, list_ptr: &list, resolved_p_ptr: &resolved_p);
783 if (!deps_list_empty_p (l: list))
784 return false;
785 }
786
787 return true;
788}
789
790/* Initialize data for INSN. */
791void
792sd_init_insn (rtx_insn *insn)
793{
794 INSN_HARD_BACK_DEPS (insn) = create_deps_list ();
795 INSN_SPEC_BACK_DEPS (insn) = create_deps_list ();
796 INSN_RESOLVED_BACK_DEPS (insn) = create_deps_list ();
797 INSN_FORW_DEPS (insn) = create_deps_list ();
798 INSN_RESOLVED_FORW_DEPS (insn) = create_deps_list ();
799
800 /* ??? It would be nice to allocate dependency caches here. */
801}
802
803/* Free data for INSN. */
804void
805sd_finish_insn (rtx_insn *insn)
806{
807 /* ??? It would be nice to deallocate dependency caches here. */
808
809 free_deps_list (INSN_HARD_BACK_DEPS (insn));
810 INSN_HARD_BACK_DEPS (insn) = NULL;
811
812 free_deps_list (INSN_SPEC_BACK_DEPS (insn));
813 INSN_SPEC_BACK_DEPS (insn) = NULL;
814
815 free_deps_list (INSN_RESOLVED_BACK_DEPS (insn));
816 INSN_RESOLVED_BACK_DEPS (insn) = NULL;
817
818 free_deps_list (INSN_FORW_DEPS (insn));
819 INSN_FORW_DEPS (insn) = NULL;
820
821 free_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
822 INSN_RESOLVED_FORW_DEPS (insn) = NULL;
823}
824
825/* Find a dependency between producer PRO and consumer CON.
826 Search through resolved dependency lists if RESOLVED_P is true.
827 If no such dependency is found return NULL,
828 otherwise return the dependency and initialize SD_IT_PTR [if it is nonnull]
829 with an iterator pointing to it. */
830static dep_t
831sd_find_dep_between_no_cache (rtx pro, rtx con, bool resolved_p,
832 sd_iterator_def *sd_it_ptr)
833{
834 sd_list_types_def pro_list_type;
835 sd_list_types_def con_list_type;
836 sd_iterator_def sd_it;
837 dep_t dep;
838 bool found_p = false;
839
840 if (resolved_p)
841 {
842 pro_list_type = SD_LIST_RES_FORW;
843 con_list_type = SD_LIST_RES_BACK;
844 }
845 else
846 {
847 pro_list_type = SD_LIST_FORW;
848 con_list_type = SD_LIST_BACK;
849 }
850
851 /* Walk through either back list of INSN or forw list of ELEM
852 depending on which one is shorter. */
853 if (sd_lists_size (insn: con, list_types: con_list_type) < sd_lists_size (insn: pro, list_types: pro_list_type))
854 {
855 /* Find the dep_link with producer PRO in consumer's back_deps. */
856 FOR_EACH_DEP (con, con_list_type, sd_it, dep)
857 if (DEP_PRO (dep) == pro)
858 {
859 found_p = true;
860 break;
861 }
862 }
863 else
864 {
865 /* Find the dep_link with consumer CON in producer's forw_deps. */
866 FOR_EACH_DEP (pro, pro_list_type, sd_it, dep)
867 if (DEP_CON (dep) == con)
868 {
869 found_p = true;
870 break;
871 }
872 }
873
874 if (found_p)
875 {
876 if (sd_it_ptr != NULL)
877 *sd_it_ptr = sd_it;
878
879 return dep;
880 }
881
882 return NULL;
883}
884
885/* Find a dependency between producer PRO and consumer CON.
886 Use dependency [if available] to check if dependency is present at all.
887 Search through resolved dependency lists if RESOLVED_P is true.
888 If the dependency or NULL if none found. */
889dep_t
890sd_find_dep_between (rtx pro, rtx con, bool resolved_p)
891{
892 if (true_dependency_cache != NULL)
893 /* Avoiding the list walk below can cut compile times dramatically
894 for some code. */
895 {
896 int elem_luid = INSN_LUID (pro);
897 int insn_luid = INSN_LUID (con);
898
899 if (!bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid)
900 && !bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid)
901 && !bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid)
902 && !bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
903 return NULL;
904 }
905
906 return sd_find_dep_between_no_cache (pro, con, resolved_p, NULL);
907}
908
909/* Add or update a dependence described by DEP.
910 MEM1 and MEM2, if non-null, correspond to memory locations in case of
911 data speculation.
912
913 The function returns a value indicating if an old entry has been changed
914 or a new entry has been added to insn's backward deps.
915
916 This function merely checks if producer and consumer is the same insn
917 and doesn't create a dep in this case. Actual manipulation of
918 dependence data structures is performed in add_or_update_dep_1. */
919static enum DEPS_ADJUST_RESULT
920maybe_add_or_update_dep_1 (dep_t dep, bool resolved_p, rtx mem1, rtx mem2)
921{
922 rtx_insn *elem = DEP_PRO (dep);
923 rtx_insn *insn = DEP_CON (dep);
924
925 gcc_assert (INSN_P (insn) && INSN_P (elem));
926
927 /* Don't depend an insn on itself. */
928 if (insn == elem)
929 {
930 if (sched_deps_info->generate_spec_deps)
931 /* INSN has an internal dependence, which we can't overcome. */
932 HAS_INTERNAL_DEP (insn) = 1;
933
934 return DEP_NODEP;
935 }
936
937 return add_or_update_dep_1 (dep, resolved_p, mem1, mem2);
938}
939
940/* Ask dependency caches what needs to be done for dependence DEP.
941 Return DEP_CREATED if new dependence should be created and there is no
942 need to try to find one searching the dependencies lists.
943 Return DEP_PRESENT if there already is a dependence described by DEP and
944 hence nothing is to be done.
945 Return DEP_CHANGED if there already is a dependence, but it should be
946 updated to incorporate additional information from DEP. */
947static enum DEPS_ADJUST_RESULT
948ask_dependency_caches (dep_t dep)
949{
950 int elem_luid = INSN_LUID (DEP_PRO (dep));
951 int insn_luid = INSN_LUID (DEP_CON (dep));
952
953 gcc_assert (true_dependency_cache != NULL
954 && output_dependency_cache != NULL
955 && anti_dependency_cache != NULL
956 && control_dependency_cache != NULL);
957
958 if (!(current_sched_info->flags & USE_DEPS_LIST))
959 {
960 enum reg_note present_dep_type;
961
962 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
963 present_dep_type = REG_DEP_TRUE;
964 else if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
965 present_dep_type = REG_DEP_OUTPUT;
966 else if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
967 present_dep_type = REG_DEP_ANTI;
968 else if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
969 present_dep_type = REG_DEP_CONTROL;
970 else
971 /* There is no existing dep so it should be created. */
972 return DEP_CREATED;
973
974 if ((int) DEP_TYPE (dep) >= (int) present_dep_type)
975 /* DEP does not add anything to the existing dependence. */
976 return DEP_PRESENT;
977 }
978 else
979 {
980 ds_t present_dep_types = 0;
981
982 if (bitmap_bit_p (&true_dependency_cache[insn_luid], elem_luid))
983 present_dep_types |= DEP_TRUE;
984 if (bitmap_bit_p (&output_dependency_cache[insn_luid], elem_luid))
985 present_dep_types |= DEP_OUTPUT;
986 if (bitmap_bit_p (&anti_dependency_cache[insn_luid], elem_luid))
987 present_dep_types |= DEP_ANTI;
988 if (bitmap_bit_p (&control_dependency_cache[insn_luid], elem_luid))
989 present_dep_types |= DEP_CONTROL;
990
991 if (present_dep_types == 0)
992 /* There is no existing dep so it should be created. */
993 return DEP_CREATED;
994
995 if (!(current_sched_info->flags & DO_SPECULATION)
996 || !bitmap_bit_p (&spec_dependency_cache[insn_luid], elem_luid))
997 {
998 if ((present_dep_types | (DEP_STATUS (dep) & DEP_TYPES))
999 == present_dep_types)
1000 /* DEP does not add anything to the existing dependence. */
1001 return DEP_PRESENT;
1002 }
1003 else
1004 {
1005 /* Only true dependencies can be data speculative and
1006 only anti dependencies can be control speculative. */
1007 gcc_assert ((present_dep_types & (DEP_TRUE | DEP_ANTI))
1008 == present_dep_types);
1009
1010 /* if (DEP is SPECULATIVE) then
1011 ..we should update DEP_STATUS
1012 else
1013 ..we should reset existing dep to non-speculative. */
1014 }
1015 }
1016
1017 return DEP_CHANGED;
1018}
1019
1020/* Set dependency caches according to DEP. */
1021static void
1022set_dependency_caches (dep_t dep)
1023{
1024 int elem_luid = INSN_LUID (DEP_PRO (dep));
1025 int insn_luid = INSN_LUID (DEP_CON (dep));
1026
1027 if (!(current_sched_info->flags & USE_DEPS_LIST))
1028 {
1029 switch (DEP_TYPE (dep))
1030 {
1031 case REG_DEP_TRUE:
1032 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1033 break;
1034
1035 case REG_DEP_OUTPUT:
1036 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1037 break;
1038
1039 case REG_DEP_ANTI:
1040 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1041 break;
1042
1043 case REG_DEP_CONTROL:
1044 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1045 break;
1046
1047 default:
1048 gcc_unreachable ();
1049 }
1050 }
1051 else
1052 {
1053 ds_t ds = DEP_STATUS (dep);
1054
1055 if (ds & DEP_TRUE)
1056 bitmap_set_bit (&true_dependency_cache[insn_luid], elem_luid);
1057 if (ds & DEP_OUTPUT)
1058 bitmap_set_bit (&output_dependency_cache[insn_luid], elem_luid);
1059 if (ds & DEP_ANTI)
1060 bitmap_set_bit (&anti_dependency_cache[insn_luid], elem_luid);
1061 if (ds & DEP_CONTROL)
1062 bitmap_set_bit (&control_dependency_cache[insn_luid], elem_luid);
1063
1064 if (ds & SPECULATIVE)
1065 {
1066 gcc_assert (current_sched_info->flags & DO_SPECULATION);
1067 bitmap_set_bit (&spec_dependency_cache[insn_luid], elem_luid);
1068 }
1069 }
1070}
1071
1072/* Type of dependence DEP have changed from OLD_TYPE. Update dependency
1073 caches accordingly. */
1074static void
1075update_dependency_caches (dep_t dep, enum reg_note old_type)
1076{
1077 int elem_luid = INSN_LUID (DEP_PRO (dep));
1078 int insn_luid = INSN_LUID (DEP_CON (dep));
1079
1080 /* Clear corresponding cache entry because type of the link
1081 may have changed. Keep them if we use_deps_list. */
1082 if (!(current_sched_info->flags & USE_DEPS_LIST))
1083 {
1084 switch (old_type)
1085 {
1086 case REG_DEP_OUTPUT:
1087 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1088 break;
1089
1090 case REG_DEP_ANTI:
1091 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1092 break;
1093
1094 case REG_DEP_CONTROL:
1095 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1096 break;
1097
1098 default:
1099 gcc_unreachable ();
1100 }
1101 }
1102
1103 set_dependency_caches (dep);
1104}
1105
1106/* Convert a dependence pointed to by SD_IT to be non-speculative. */
1107static void
1108change_spec_dep_to_hard (sd_iterator_def sd_it)
1109{
1110 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1111 dep_link_t link = DEP_NODE_BACK (node);
1112 dep_t dep = DEP_NODE_DEP (node);
1113 rtx_insn *elem = DEP_PRO (dep);
1114 rtx_insn *insn = DEP_CON (dep);
1115
1116 move_dep_link (link, INSN_SPEC_BACK_DEPS (insn), INSN_HARD_BACK_DEPS (insn));
1117
1118 DEP_STATUS (dep) &= ~SPECULATIVE;
1119
1120 if (true_dependency_cache != NULL)
1121 /* Clear the cache entry. */
1122 bitmap_clear_bit (&spec_dependency_cache[INSN_LUID (insn)],
1123 INSN_LUID (elem));
1124}
1125
1126/* Update DEP to incorporate information from NEW_DEP.
1127 SD_IT points to DEP in case it should be moved to another list.
1128 MEM1 and MEM2, if nonnull, correspond to memory locations in case if
1129 data-speculative dependence should be updated. */
1130static enum DEPS_ADJUST_RESULT
1131update_dep (dep_t dep, dep_t new_dep,
1132 sd_iterator_def sd_it ATTRIBUTE_UNUSED,
1133 rtx mem1 ATTRIBUTE_UNUSED,
1134 rtx mem2 ATTRIBUTE_UNUSED)
1135{
1136 enum DEPS_ADJUST_RESULT res = DEP_PRESENT;
1137 enum reg_note old_type = DEP_TYPE (dep);
1138 bool was_spec = dep_spec_p (dep);
1139
1140 DEP_NONREG (dep) |= DEP_NONREG (new_dep);
1141 DEP_MULTIPLE (dep) = 1;
1142
1143 /* If this is a more restrictive type of dependence than the
1144 existing one, then change the existing dependence to this
1145 type. */
1146 if ((int) DEP_TYPE (new_dep) < (int) old_type)
1147 {
1148 DEP_TYPE (dep) = DEP_TYPE (new_dep);
1149 res = DEP_CHANGED;
1150 }
1151
1152 if (current_sched_info->flags & USE_DEPS_LIST)
1153 /* Update DEP_STATUS. */
1154 {
1155 ds_t dep_status = DEP_STATUS (dep);
1156 ds_t ds = DEP_STATUS (new_dep);
1157 ds_t new_status = ds | dep_status;
1158
1159 if (new_status & SPECULATIVE)
1160 {
1161 /* Either existing dep or a dep we're adding or both are
1162 speculative. */
1163 if (!(ds & SPECULATIVE)
1164 || !(dep_status & SPECULATIVE))
1165 /* The new dep can't be speculative. */
1166 new_status &= ~SPECULATIVE;
1167 else
1168 {
1169 /* Both are speculative. Merge probabilities. */
1170 if (mem1 != NULL)
1171 {
1172 dw_t dw;
1173
1174 dw = estimate_dep_weak (mem1, mem2);
1175 ds = set_dep_weak (ds, BEGIN_DATA, dw);
1176 }
1177
1178 new_status = ds_merge (dep_status, ds);
1179 }
1180 }
1181
1182 ds = new_status;
1183
1184 if (dep_status != ds)
1185 {
1186 DEP_STATUS (dep) = ds;
1187 res = DEP_CHANGED;
1188 }
1189 }
1190
1191 if (was_spec && !dep_spec_p (dep))
1192 /* The old dep was speculative, but now it isn't. */
1193 change_spec_dep_to_hard (sd_it);
1194
1195 if (true_dependency_cache != NULL
1196 && res == DEP_CHANGED)
1197 update_dependency_caches (dep, old_type);
1198
1199 return res;
1200}
1201
1202/* Add or update a dependence described by DEP.
1203 MEM1 and MEM2, if non-null, correspond to memory locations in case of
1204 data speculation.
1205
1206 The function returns a value indicating if an old entry has been changed
1207 or a new entry has been added to insn's backward deps or nothing has
1208 been updated at all. */
1209static enum DEPS_ADJUST_RESULT
1210add_or_update_dep_1 (dep_t new_dep, bool resolved_p,
1211 rtx mem1 ATTRIBUTE_UNUSED, rtx mem2 ATTRIBUTE_UNUSED)
1212{
1213 bool maybe_present_p = true;
1214 bool present_p = false;
1215
1216 gcc_assert (INSN_P (DEP_PRO (new_dep)) && INSN_P (DEP_CON (new_dep))
1217 && DEP_PRO (new_dep) != DEP_CON (new_dep));
1218
1219 if (flag_checking)
1220 check_dep (new_dep, mem1 != NULL);
1221
1222 if (true_dependency_cache != NULL)
1223 {
1224 switch (ask_dependency_caches (dep: new_dep))
1225 {
1226 case DEP_PRESENT:
1227 dep_t present_dep;
1228 sd_iterator_def sd_it;
1229
1230 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1231 DEP_CON (new_dep),
1232 resolved_p, sd_it_ptr: &sd_it);
1233 DEP_MULTIPLE (present_dep) = 1;
1234 return DEP_PRESENT;
1235
1236 case DEP_CHANGED:
1237 maybe_present_p = true;
1238 present_p = true;
1239 break;
1240
1241 case DEP_CREATED:
1242 maybe_present_p = false;
1243 present_p = false;
1244 break;
1245
1246 default:
1247 gcc_unreachable ();
1248 break;
1249 }
1250 }
1251
1252 /* Check that we don't already have this dependence. */
1253 if (maybe_present_p)
1254 {
1255 dep_t present_dep;
1256 sd_iterator_def sd_it;
1257
1258 gcc_assert (true_dependency_cache == NULL || present_p);
1259
1260 present_dep = sd_find_dep_between_no_cache (DEP_PRO (new_dep),
1261 DEP_CON (new_dep),
1262 resolved_p, sd_it_ptr: &sd_it);
1263
1264 if (present_dep != NULL)
1265 /* We found an existing dependency between ELEM and INSN. */
1266 return update_dep (dep: present_dep, new_dep, sd_it, mem1, mem2);
1267 else
1268 /* We didn't find a dep, it shouldn't present in the cache. */
1269 gcc_assert (!present_p);
1270 }
1271
1272 /* Might want to check one level of transitivity to save conses.
1273 This check should be done in maybe_add_or_update_dep_1.
1274 Since we made it to add_or_update_dep_1, we must create
1275 (or update) a link. */
1276
1277 if (mem1 != NULL_RTX)
1278 {
1279 gcc_assert (sched_deps_info->generate_spec_deps);
1280 DEP_STATUS (new_dep) = set_dep_weak (DEP_STATUS (new_dep), BEGIN_DATA,
1281 estimate_dep_weak (mem1, mem2));
1282 }
1283
1284 sd_add_dep (new_dep, resolved_p);
1285
1286 return DEP_CREATED;
1287}
1288
1289/* Initialize BACK_LIST_PTR with consumer's backward list and
1290 FORW_LIST_PTR with producer's forward list. If RESOLVED_P is true
1291 initialize with lists that hold resolved deps. */
1292static void
1293get_back_and_forw_lists (dep_t dep, bool resolved_p,
1294 deps_list_t *back_list_ptr,
1295 deps_list_t *forw_list_ptr)
1296{
1297 rtx_insn *con = DEP_CON (dep);
1298
1299 if (!resolved_p)
1300 {
1301 if (dep_spec_p (dep))
1302 *back_list_ptr = INSN_SPEC_BACK_DEPS (con);
1303 else
1304 *back_list_ptr = INSN_HARD_BACK_DEPS (con);
1305
1306 *forw_list_ptr = INSN_FORW_DEPS (DEP_PRO (dep));
1307 }
1308 else
1309 {
1310 *back_list_ptr = INSN_RESOLVED_BACK_DEPS (con);
1311 *forw_list_ptr = INSN_RESOLVED_FORW_DEPS (DEP_PRO (dep));
1312 }
1313}
1314
1315/* Add dependence described by DEP.
1316 If RESOLVED_P is true treat the dependence as a resolved one. */
1317void
1318sd_add_dep (dep_t dep, bool resolved_p)
1319{
1320 dep_node_t n = create_dep_node ();
1321 deps_list_t con_back_deps;
1322 deps_list_t pro_forw_deps;
1323 rtx_insn *elem = DEP_PRO (dep);
1324 rtx_insn *insn = DEP_CON (dep);
1325
1326 gcc_assert (INSN_P (insn) && INSN_P (elem) && insn != elem);
1327
1328 if ((current_sched_info->flags & DO_SPECULATION) == 0
1329 || !sched_insn_is_legitimate_for_speculation_p (insn, DEP_STATUS (dep)))
1330 DEP_STATUS (dep) &= ~SPECULATIVE;
1331
1332 copy_dep (DEP_NODE_DEP (n), from: dep);
1333
1334 get_back_and_forw_lists (dep, resolved_p, back_list_ptr: &con_back_deps, forw_list_ptr: &pro_forw_deps);
1335
1336 add_to_deps_list (DEP_NODE_BACK (n), l: con_back_deps);
1337
1338 if (flag_checking)
1339 check_dep (dep, false);
1340
1341 add_to_deps_list (DEP_NODE_FORW (n), l: pro_forw_deps);
1342
1343 /* If we are adding a dependency to INSN's LOG_LINKs, then note that
1344 in the bitmap caches of dependency information. */
1345 if (true_dependency_cache != NULL)
1346 set_dependency_caches (dep);
1347}
1348
1349/* Add or update backward dependence between INSN and ELEM
1350 with given type DEP_TYPE and dep_status DS.
1351 This function is a convenience wrapper. */
1352enum DEPS_ADJUST_RESULT
1353sd_add_or_update_dep (dep_t dep, bool resolved_p)
1354{
1355 return add_or_update_dep_1 (new_dep: dep, resolved_p, NULL_RTX, NULL_RTX);
1356}
1357
1358/* Resolved dependence pointed to by SD_IT.
1359 SD_IT will advance to the next element. */
1360void
1361sd_resolve_dep (sd_iterator_def sd_it)
1362{
1363 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1364 dep_t dep = DEP_NODE_DEP (node);
1365 rtx_insn *pro = DEP_PRO (dep);
1366 rtx_insn *con = DEP_CON (dep);
1367
1368 if (dep_spec_p (dep))
1369 move_dep_link (DEP_NODE_BACK (node), INSN_SPEC_BACK_DEPS (con),
1370 INSN_RESOLVED_BACK_DEPS (con));
1371 else
1372 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
1373 INSN_RESOLVED_BACK_DEPS (con));
1374
1375 move_dep_link (DEP_NODE_FORW (node), INSN_FORW_DEPS (pro),
1376 INSN_RESOLVED_FORW_DEPS (pro));
1377}
1378
1379/* Perform the inverse operation of sd_resolve_dep. Restore the dependence
1380 pointed to by SD_IT to unresolved state. */
1381void
1382sd_unresolve_dep (sd_iterator_def sd_it)
1383{
1384 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
1385 dep_t dep = DEP_NODE_DEP (node);
1386 rtx_insn *pro = DEP_PRO (dep);
1387 rtx_insn *con = DEP_CON (dep);
1388
1389 if (dep_spec_p (dep))
1390 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1391 INSN_SPEC_BACK_DEPS (con));
1392 else
1393 move_dep_link (DEP_NODE_BACK (node), INSN_RESOLVED_BACK_DEPS (con),
1394 INSN_HARD_BACK_DEPS (con));
1395
1396 move_dep_link (DEP_NODE_FORW (node), INSN_RESOLVED_FORW_DEPS (pro),
1397 INSN_FORW_DEPS (pro));
1398}
1399
1400/* Make TO depend on all the FROM's producers.
1401 If RESOLVED_P is true add dependencies to the resolved lists. */
1402void
1403sd_copy_back_deps (rtx_insn *to, rtx_insn *from, bool resolved_p)
1404{
1405 sd_list_types_def list_type;
1406 sd_iterator_def sd_it;
1407 dep_t dep;
1408
1409 list_type = resolved_p ? SD_LIST_RES_BACK : SD_LIST_BACK;
1410
1411 FOR_EACH_DEP (from, list_type, sd_it, dep)
1412 {
1413 dep_def _new_dep, *new_dep = &_new_dep;
1414
1415 copy_dep (to: new_dep, from: dep);
1416 DEP_CON (new_dep) = to;
1417 sd_add_dep (dep: new_dep, resolved_p);
1418 }
1419}
1420
1421/* Remove a dependency referred to by SD_IT.
1422 SD_IT will point to the next dependence after removal. */
1423void
1424sd_delete_dep (sd_iterator_def sd_it)
1425{
1426 dep_node_t n = DEP_LINK_NODE (*sd_it.linkp);
1427 dep_t dep = DEP_NODE_DEP (n);
1428 rtx_insn *pro = DEP_PRO (dep);
1429 rtx_insn *con = DEP_CON (dep);
1430 deps_list_t con_back_deps;
1431 deps_list_t pro_forw_deps;
1432
1433 if (true_dependency_cache != NULL)
1434 {
1435 int elem_luid = INSN_LUID (pro);
1436 int insn_luid = INSN_LUID (con);
1437
1438 bitmap_clear_bit (&true_dependency_cache[insn_luid], elem_luid);
1439 bitmap_clear_bit (&anti_dependency_cache[insn_luid], elem_luid);
1440 bitmap_clear_bit (&control_dependency_cache[insn_luid], elem_luid);
1441 bitmap_clear_bit (&output_dependency_cache[insn_luid], elem_luid);
1442
1443 if (current_sched_info->flags & DO_SPECULATION)
1444 bitmap_clear_bit (&spec_dependency_cache[insn_luid], elem_luid);
1445 }
1446
1447 get_back_and_forw_lists (dep, resolved_p: sd_it.resolved_p,
1448 back_list_ptr: &con_back_deps, forw_list_ptr: &pro_forw_deps);
1449
1450 remove_from_deps_list (DEP_NODE_BACK (n), list: con_back_deps);
1451 remove_from_deps_list (DEP_NODE_FORW (n), list: pro_forw_deps);
1452
1453 delete_dep_node (n);
1454}
1455
1456/* Dump size of the lists. */
1457#define DUMP_LISTS_SIZE (2)
1458
1459/* Dump dependencies of the lists. */
1460#define DUMP_LISTS_DEPS (4)
1461
1462/* Dump all information about the lists. */
1463#define DUMP_LISTS_ALL (DUMP_LISTS_SIZE | DUMP_LISTS_DEPS)
1464
1465/* Dump deps_lists of INSN specified by TYPES to DUMP.
1466 FLAGS is a bit mask specifying what information about the lists needs
1467 to be printed.
1468 If FLAGS has the very first bit set, then dump all information about
1469 the lists and propagate this bit into the callee dump functions. */
1470static void
1471dump_lists (FILE *dump, rtx insn, sd_list_types_def types, int flags)
1472{
1473 sd_iterator_def sd_it;
1474 dep_t dep;
1475 int all;
1476
1477 all = (flags & 1);
1478
1479 if (all)
1480 flags |= DUMP_LISTS_ALL;
1481
1482 fprintf (stream: dump, format: "[");
1483
1484 if (flags & DUMP_LISTS_SIZE)
1485 fprintf (stream: dump, format: "%d; ", sd_lists_size (insn, list_types: types));
1486
1487 if (flags & DUMP_LISTS_DEPS)
1488 {
1489 FOR_EACH_DEP (insn, types, sd_it, dep)
1490 {
1491 dump_dep (dump, dep, flags: dump_dep_flags | all);
1492 fprintf (stream: dump, format: " ");
1493 }
1494 }
1495}
1496
1497/* Dump all information about deps_lists of INSN specified by TYPES
1498 to STDERR. */
1499void
1500sd_debug_lists (rtx insn, sd_list_types_def types)
1501{
1502 dump_lists (stderr, insn, types, flags: 1);
1503 fprintf (stderr, format: "\n");
1504}
1505
1506/* A wrapper around add_dependence_1, to add a dependence of CON on
1507 PRO, with type DEP_TYPE. This function implements special handling
1508 for REG_DEP_CONTROL dependencies. For these, we optionally promote
1509 the type to REG_DEP_ANTI if we can determine that predication is
1510 impossible; otherwise we add additional true dependencies on the
1511 INSN_COND_DEPS list of the jump (which PRO must be). */
1512void
1513add_dependence (rtx_insn *con, rtx_insn *pro, enum reg_note dep_type)
1514{
1515 if (dep_type == REG_DEP_CONTROL
1516 && !(current_sched_info->flags & DO_PREDICATION))
1517 dep_type = REG_DEP_ANTI;
1518
1519 /* A REG_DEP_CONTROL dependence may be eliminated through predication,
1520 so we must also make the insn dependent on the setter of the
1521 condition. */
1522 if (dep_type == REG_DEP_CONTROL)
1523 {
1524 rtx_insn *real_pro = pro;
1525 rtx_insn *other = real_insn_for_shadow (real_pro);
1526 rtx cond;
1527
1528 if (other != NULL_RTX)
1529 real_pro = other;
1530 cond = sched_get_reverse_condition_uncached (insn: real_pro);
1531 /* Verify that the insn does not use a different value in
1532 the condition register than the one that was present at
1533 the jump. */
1534 if (cond == NULL_RTX)
1535 dep_type = REG_DEP_ANTI;
1536 else if (INSN_CACHED_COND (real_pro) == const_true_rtx)
1537 {
1538 HARD_REG_SET uses;
1539 CLEAR_HARD_REG_SET (set&: uses);
1540 note_uses (&PATTERN (insn: con), record_hard_reg_uses, &uses);
1541 if (TEST_HARD_REG_BIT (set: uses, REGNO (XEXP (cond, 0))))
1542 dep_type = REG_DEP_ANTI;
1543 }
1544 if (dep_type == REG_DEP_CONTROL)
1545 {
1546 if (sched_verbose >= 5)
1547 fprintf (stream: sched_dump, format: "making DEP_CONTROL for %d\n",
1548 INSN_UID (insn: real_pro));
1549 add_dependence_list (con, INSN_COND_DEPS (real_pro), 0,
1550 REG_DEP_TRUE, false);
1551 }
1552 }
1553
1554 add_dependence_1 (con, pro, dep_type);
1555}
1556
1557/* A convenience wrapper to operate on an entire list. HARD should be
1558 true if DEP_NONREG should be set on newly created dependencies. */
1559
1560static void
1561add_dependence_list (rtx_insn *insn, rtx_insn_list *list, int uncond,
1562 enum reg_note dep_type, bool hard)
1563{
1564 mark_as_hard = hard;
1565 for (; list; list = list->next ())
1566 {
1567 if (uncond || ! sched_insns_conditions_mutex_p (insn1: insn, insn2: list->insn ()))
1568 add_dependence (con: insn, pro: list->insn (), dep_type);
1569 }
1570 mark_as_hard = false;
1571}
1572
1573/* Similar, but free *LISTP at the same time, when the context
1574 is not readonly. HARD should be true if DEP_NONREG should be set on
1575 newly created dependencies. */
1576
1577static void
1578add_dependence_list_and_free (class deps_desc *deps, rtx_insn *insn,
1579 rtx_insn_list **listp,
1580 int uncond, enum reg_note dep_type, bool hard)
1581{
1582 add_dependence_list (insn, list: *listp, uncond, dep_type, hard);
1583
1584 /* We don't want to short-circuit dependencies involving debug
1585 insns, because they may cause actual dependencies to be
1586 disregarded. */
1587 if (deps->readonly || DEBUG_INSN_P (insn))
1588 return;
1589
1590 free_INSN_LIST_list (listp);
1591}
1592
1593/* Remove all occurrences of INSN from LIST. Return the number of
1594 occurrences removed. */
1595
1596static int
1597remove_from_dependence_list (rtx_insn *insn, rtx_insn_list **listp)
1598{
1599 int removed = 0;
1600
1601 while (*listp)
1602 {
1603 if ((*listp)->insn () == insn)
1604 {
1605 remove_free_INSN_LIST_node (listp);
1606 removed++;
1607 continue;
1608 }
1609
1610 listp = (rtx_insn_list **)&XEXP (*listp, 1);
1611 }
1612
1613 return removed;
1614}
1615
1616/* Same as above, but process two lists at once. */
1617static int
1618remove_from_both_dependence_lists (rtx_insn *insn,
1619 rtx_insn_list **listp,
1620 rtx_expr_list **exprp)
1621{
1622 int removed = 0;
1623
1624 while (*listp)
1625 {
1626 if (XEXP (*listp, 0) == insn)
1627 {
1628 remove_free_INSN_LIST_node (listp);
1629 remove_free_EXPR_LIST_node (exprp);
1630 removed++;
1631 continue;
1632 }
1633
1634 listp = (rtx_insn_list **)&XEXP (*listp, 1);
1635 exprp = (rtx_expr_list **)&XEXP (*exprp, 1);
1636 }
1637
1638 return removed;
1639}
1640
1641/* Clear all dependencies for an insn. */
1642static void
1643delete_all_dependences (rtx_insn *insn)
1644{
1645 sd_iterator_def sd_it;
1646 dep_t dep;
1647
1648 /* The below cycle can be optimized to clear the caches and back_deps
1649 in one call but that would provoke duplication of code from
1650 delete_dep (). */
1651
1652 for (sd_it = sd_iterator_start (insn, SD_LIST_BACK);
1653 sd_iterator_cond (it_ptr: &sd_it, dep_ptr: &dep);)
1654 sd_delete_dep (sd_it);
1655}
1656
1657/* All insns in a scheduling group except the first should only have
1658 dependencies on the previous insn in the group. So we find the
1659 first instruction in the scheduling group by walking the dependence
1660 chains backwards. Then we add the dependencies for the group to
1661 the previous nonnote insn. */
1662
1663static void
1664chain_to_prev_insn (rtx_insn *insn)
1665{
1666 sd_iterator_def sd_it;
1667 dep_t dep;
1668 rtx_insn *prev_nonnote;
1669
1670 FOR_EACH_DEP (insn, SD_LIST_BACK, sd_it, dep)
1671 {
1672 rtx_insn *i = insn;
1673 rtx_insn *pro = DEP_PRO (dep);
1674
1675 do
1676 {
1677 i = prev_nonnote_insn (i);
1678
1679 if (pro == i)
1680 goto next_link;
1681 } while (SCHED_GROUP_P (i) || DEBUG_INSN_P (i));
1682
1683 if (! sched_insns_conditions_mutex_p (insn1: i, insn2: pro))
1684 add_dependence (con: i, pro, DEP_TYPE (dep));
1685 next_link:;
1686 }
1687
1688 delete_all_dependences (insn);
1689
1690 prev_nonnote = prev_nonnote_nondebug_insn (insn);
1691 if (BLOCK_FOR_INSN (insn) == BLOCK_FOR_INSN (insn: prev_nonnote)
1692 && ! sched_insns_conditions_mutex_p (insn1: insn, insn2: prev_nonnote))
1693 add_dependence (con: insn, pro: prev_nonnote, dep_type: REG_DEP_ANTI);
1694}
1695
1696/* Process an insn's memory dependencies. There are four kinds of
1697 dependencies:
1698
1699 (0) read dependence: read follows read
1700 (1) true dependence: read follows write
1701 (2) output dependence: write follows write
1702 (3) anti dependence: write follows read
1703
1704 We are careful to build only dependencies which actually exist, and
1705 use transitivity to avoid building too many links. */
1706
1707/* Add an INSN and MEM reference pair to a pending INSN_LIST and MEM_LIST.
1708 The MEM is a memory reference contained within INSN, which we are saving
1709 so that we can do memory aliasing on it. */
1710
1711static void
1712add_insn_mem_dependence (class deps_desc *deps, bool read_p,
1713 rtx_insn *insn, rtx mem)
1714{
1715 rtx_insn_list **insn_list;
1716 rtx_insn_list *insn_node;
1717 rtx_expr_list **mem_list;
1718 rtx_expr_list *mem_node;
1719
1720 gcc_assert (!deps->readonly);
1721 if (read_p)
1722 {
1723 insn_list = &deps->pending_read_insns;
1724 mem_list = &deps->pending_read_mems;
1725 if (!DEBUG_INSN_P (insn))
1726 deps->pending_read_list_length++;
1727 }
1728 else
1729 {
1730 insn_list = &deps->pending_write_insns;
1731 mem_list = &deps->pending_write_mems;
1732 deps->pending_write_list_length++;
1733 }
1734
1735 insn_node = alloc_INSN_LIST (insn, *insn_list);
1736 *insn_list = insn_node;
1737
1738 if (sched_deps_info->use_cselib && MEM_P (mem))
1739 {
1740 mem = shallow_copy_rtx (mem);
1741 XEXP (mem, 0) = cselib_subst_to_values_from_insn (XEXP (mem, 0),
1742 GET_MODE (mem), insn);
1743 }
1744 mem_node = alloc_EXPR_LIST (VOIDmode, canon_rtx (mem), *mem_list);
1745 *mem_list = mem_node;
1746}
1747
1748/* Make a dependency between every memory reference on the pending lists
1749 and INSN, thus flushing the pending lists. FOR_READ is true if emitting
1750 dependencies for a read operation, similarly with FOR_WRITE. */
1751
1752static void
1753flush_pending_lists (class deps_desc *deps, rtx_insn *insn, int for_read,
1754 int for_write)
1755{
1756 if (for_write)
1757 {
1758 add_dependence_list_and_free (deps, insn, listp: &deps->pending_read_insns,
1759 uncond: 1, dep_type: REG_DEP_ANTI, hard: true);
1760 if (!deps->readonly)
1761 {
1762 free_EXPR_LIST_list (&deps->pending_read_mems);
1763 deps->pending_read_list_length = 0;
1764 }
1765 }
1766
1767 add_dependence_list_and_free (deps, insn, listp: &deps->pending_write_insns, uncond: 1,
1768 dep_type: for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1769 hard: true);
1770
1771 add_dependence_list_and_free (deps, insn,
1772 listp: &deps->last_pending_memory_flush, uncond: 1,
1773 dep_type: for_read ? REG_DEP_ANTI : REG_DEP_OUTPUT,
1774 hard: true);
1775
1776 add_dependence_list_and_free (deps, insn, listp: &deps->pending_jump_insns, uncond: 1,
1777 dep_type: REG_DEP_ANTI, hard: true);
1778
1779 if (DEBUG_INSN_P (insn))
1780 {
1781 if (for_write)
1782 free_INSN_LIST_list (&deps->pending_read_insns);
1783 free_INSN_LIST_list (&deps->pending_write_insns);
1784 free_INSN_LIST_list (&deps->last_pending_memory_flush);
1785 free_INSN_LIST_list (&deps->pending_jump_insns);
1786 }
1787
1788 if (!deps->readonly)
1789 {
1790 free_EXPR_LIST_list (&deps->pending_write_mems);
1791 deps->pending_write_list_length = 0;
1792
1793 deps->last_pending_memory_flush = alloc_INSN_LIST (insn, NULL_RTX);
1794 deps->pending_flush_length = 1;
1795 }
1796 mark_as_hard = false;
1797}
1798
1799/* Instruction which dependencies we are analyzing. */
1800static rtx_insn *cur_insn = NULL;
1801
1802/* Implement hooks for haifa scheduler. */
1803
1804static void
1805haifa_start_insn (rtx_insn *insn)
1806{
1807 gcc_assert (insn && !cur_insn);
1808
1809 cur_insn = insn;
1810}
1811
1812static void
1813haifa_finish_insn (void)
1814{
1815 cur_insn = NULL;
1816}
1817
1818void
1819haifa_note_reg_set (int regno)
1820{
1821 SET_REGNO_REG_SET (reg_pending_sets, regno);
1822}
1823
1824void
1825haifa_note_reg_clobber (int regno)
1826{
1827 SET_REGNO_REG_SET (reg_pending_clobbers, regno);
1828}
1829
1830void
1831haifa_note_reg_use (int regno)
1832{
1833 SET_REGNO_REG_SET (reg_pending_uses, regno);
1834}
1835
1836static void
1837haifa_note_mem_dep (rtx mem, rtx pending_mem, rtx_insn *pending_insn, ds_t ds)
1838{
1839 if (!(ds & SPECULATIVE))
1840 {
1841 mem = NULL_RTX;
1842 pending_mem = NULL_RTX;
1843 }
1844 else
1845 gcc_assert (ds & BEGIN_DATA);
1846
1847 {
1848 dep_def _dep, *dep = &_dep;
1849
1850 init_dep_1 (dep, pro: pending_insn, con: cur_insn, type: ds_to_dt (ds),
1851 ds: current_sched_info->flags & USE_DEPS_LIST ? ds : 0);
1852 DEP_NONREG (dep) = 1;
1853 maybe_add_or_update_dep_1 (dep, resolved_p: false, mem1: pending_mem, mem2: mem);
1854 }
1855
1856}
1857
1858static void
1859haifa_note_dep (rtx_insn *elem, ds_t ds)
1860{
1861 dep_def _dep;
1862 dep_t dep = &_dep;
1863
1864 init_dep (dep, pro: elem, con: cur_insn, kind: ds_to_dt (ds));
1865 if (mark_as_hard)
1866 DEP_NONREG (dep) = 1;
1867 maybe_add_or_update_dep_1 (dep, resolved_p: false, NULL_RTX, NULL_RTX);
1868}
1869
1870static void
1871note_reg_use (int r)
1872{
1873 if (sched_deps_info->note_reg_use)
1874 sched_deps_info->note_reg_use (r);
1875}
1876
1877static void
1878note_reg_set (int r)
1879{
1880 if (sched_deps_info->note_reg_set)
1881 sched_deps_info->note_reg_set (r);
1882}
1883
1884static void
1885note_reg_clobber (int r)
1886{
1887 if (sched_deps_info->note_reg_clobber)
1888 sched_deps_info->note_reg_clobber (r);
1889}
1890
1891static void
1892note_mem_dep (rtx m1, rtx m2, rtx_insn *e, ds_t ds)
1893{
1894 if (sched_deps_info->note_mem_dep)
1895 sched_deps_info->note_mem_dep (m1, m2, e, ds);
1896}
1897
1898static void
1899note_dep (rtx_insn *e, ds_t ds)
1900{
1901 if (sched_deps_info->note_dep)
1902 sched_deps_info->note_dep (e, ds);
1903}
1904
1905/* Return corresponding to DS reg_note. */
1906enum reg_note
1907ds_to_dt (ds_t ds)
1908{
1909 if (ds & DEP_TRUE)
1910 return REG_DEP_TRUE;
1911 else if (ds & DEP_OUTPUT)
1912 return REG_DEP_OUTPUT;
1913 else if (ds & DEP_ANTI)
1914 return REG_DEP_ANTI;
1915 else
1916 {
1917 gcc_assert (ds & DEP_CONTROL);
1918 return REG_DEP_CONTROL;
1919 }
1920}
1921
1922
1923
1924/* Functions for computation of info needed for register pressure
1925 sensitive insn scheduling. */
1926
1927
1928/* Allocate and return reg_use_data structure for REGNO and INSN. */
1929static struct reg_use_data *
1930create_insn_reg_use (int regno, rtx_insn *insn)
1931{
1932 struct reg_use_data *use;
1933
1934 use = (struct reg_use_data *) xmalloc (sizeof (struct reg_use_data));
1935 use->regno = regno;
1936 use->insn = insn;
1937 use->next_insn_use = INSN_REG_USE_LIST (insn);
1938 INSN_REG_USE_LIST (insn) = use;
1939 return use;
1940}
1941
1942/* Allocate reg_set_data structure for REGNO and INSN. */
1943static void
1944create_insn_reg_set (int regno, rtx insn)
1945{
1946 struct reg_set_data *set;
1947
1948 set = (struct reg_set_data *) xmalloc (sizeof (struct reg_set_data));
1949 set->regno = regno;
1950 set->insn = insn;
1951 set->next_insn_set = INSN_REG_SET_LIST (insn);
1952 INSN_REG_SET_LIST (insn) = set;
1953}
1954
1955/* Set up insn register uses for INSN and dependency context DEPS. */
1956static void
1957setup_insn_reg_uses (class deps_desc *deps, rtx_insn *insn)
1958{
1959 unsigned i;
1960 reg_set_iterator rsi;
1961 struct reg_use_data *use, *use2, *next;
1962 struct deps_reg *reg_last;
1963
1964 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
1965 {
1966 if (i < FIRST_PSEUDO_REGISTER
1967 && TEST_HARD_REG_BIT (ira_no_alloc_regs, bit: i))
1968 continue;
1969
1970 if (find_regno_note (insn, REG_DEAD, i) == NULL_RTX
1971 && ! REGNO_REG_SET_P (reg_pending_sets, i)
1972 && ! REGNO_REG_SET_P (reg_pending_clobbers, i))
1973 /* Ignore use which is not dying. */
1974 continue;
1975
1976 use = create_insn_reg_use (regno: i, insn);
1977 use->next_regno_use = use;
1978 reg_last = &deps->reg_last[i];
1979
1980 /* Create the cycle list of uses. */
1981 for (rtx_insn_list *list = reg_last->uses; list; list = list->next ())
1982 {
1983 use2 = create_insn_reg_use (regno: i, insn: list->insn ());
1984 next = use->next_regno_use;
1985 use->next_regno_use = use2;
1986 use2->next_regno_use = next;
1987 }
1988 }
1989}
1990
1991/* Register pressure info for the currently processed insn. */
1992static struct reg_pressure_data reg_pressure_info[N_REG_CLASSES];
1993
1994/* Return TRUE if INSN has the use structure for REGNO. */
1995static bool
1996insn_use_p (rtx insn, int regno)
1997{
1998 struct reg_use_data *use;
1999
2000 for (use = INSN_REG_USE_LIST (insn); use != NULL; use = use->next_insn_use)
2001 if (use->regno == regno)
2002 return true;
2003 return false;
2004}
2005
2006/* Update the register pressure info after birth of pseudo register REGNO
2007 in INSN. Arguments CLOBBER_P and UNUSED_P say correspondingly that
2008 the register is in clobber or unused after the insn. */
2009static void
2010mark_insn_pseudo_birth (rtx insn, int regno, bool clobber_p, bool unused_p)
2011{
2012 int incr, new_incr;
2013 enum reg_class cl;
2014
2015 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2016 cl = sched_regno_pressure_class[regno];
2017 if (cl != NO_REGS)
2018 {
2019 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2020 if (clobber_p)
2021 {
2022 new_incr = reg_pressure_info[cl].clobber_increase + incr;
2023 reg_pressure_info[cl].clobber_increase = new_incr;
2024 }
2025 else if (unused_p)
2026 {
2027 new_incr = reg_pressure_info[cl].unused_set_increase + incr;
2028 reg_pressure_info[cl].unused_set_increase = new_incr;
2029 }
2030 else
2031 {
2032 new_incr = reg_pressure_info[cl].set_increase + incr;
2033 reg_pressure_info[cl].set_increase = new_incr;
2034 if (! insn_use_p (insn, regno))
2035 reg_pressure_info[cl].change += incr;
2036 create_insn_reg_set (regno, insn);
2037 }
2038 gcc_assert (new_incr < (1 << INCREASE_BITS));
2039 }
2040}
2041
2042/* Like mark_insn_pseudo_regno_birth except that NREGS saying how many
2043 hard registers involved in the birth. */
2044static void
2045mark_insn_hard_regno_birth (rtx insn, int regno, int nregs,
2046 bool clobber_p, bool unused_p)
2047{
2048 enum reg_class cl;
2049 int new_incr, last = regno + nregs;
2050
2051 while (regno < last)
2052 {
2053 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2054 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, bit: regno))
2055 {
2056 cl = sched_regno_pressure_class[regno];
2057 if (cl != NO_REGS)
2058 {
2059 if (clobber_p)
2060 {
2061 new_incr = reg_pressure_info[cl].clobber_increase + 1;
2062 reg_pressure_info[cl].clobber_increase = new_incr;
2063 }
2064 else if (unused_p)
2065 {
2066 new_incr = reg_pressure_info[cl].unused_set_increase + 1;
2067 reg_pressure_info[cl].unused_set_increase = new_incr;
2068 }
2069 else
2070 {
2071 new_incr = reg_pressure_info[cl].set_increase + 1;
2072 reg_pressure_info[cl].set_increase = new_incr;
2073 if (! insn_use_p (insn, regno))
2074 reg_pressure_info[cl].change += 1;
2075 create_insn_reg_set (regno, insn);
2076 }
2077 gcc_assert (new_incr < (1 << INCREASE_BITS));
2078 }
2079 }
2080 regno++;
2081 }
2082}
2083
2084/* Update the register pressure info after birth of pseudo or hard
2085 register REG in INSN. Arguments CLOBBER_P and UNUSED_P say
2086 correspondingly that the register is in clobber or unused after the
2087 insn. */
2088static void
2089mark_insn_reg_birth (rtx insn, rtx reg, bool clobber_p, bool unused_p)
2090{
2091 int regno;
2092
2093 if (GET_CODE (reg) == SUBREG)
2094 reg = SUBREG_REG (reg);
2095
2096 if (! REG_P (reg))
2097 return;
2098
2099 regno = REGNO (reg);
2100 if (regno < FIRST_PSEUDO_REGISTER)
2101 mark_insn_hard_regno_birth (insn, regno, REG_NREGS (reg),
2102 clobber_p, unused_p);
2103 else
2104 mark_insn_pseudo_birth (insn, regno, clobber_p, unused_p);
2105}
2106
2107/* Update the register pressure info after death of pseudo register
2108 REGNO. */
2109static void
2110mark_pseudo_death (int regno)
2111{
2112 int incr;
2113 enum reg_class cl;
2114
2115 gcc_assert (regno >= FIRST_PSEUDO_REGISTER);
2116 cl = sched_regno_pressure_class[regno];
2117 if (cl != NO_REGS)
2118 {
2119 incr = ira_reg_class_max_nregs[cl][PSEUDO_REGNO_MODE (regno)];
2120 reg_pressure_info[cl].change -= incr;
2121 }
2122}
2123
2124/* Like mark_pseudo_death except that NREGS saying how many hard
2125 registers involved in the death. */
2126static void
2127mark_hard_regno_death (int regno, int nregs)
2128{
2129 enum reg_class cl;
2130 int last = regno + nregs;
2131
2132 while (regno < last)
2133 {
2134 gcc_assert (regno < FIRST_PSEUDO_REGISTER);
2135 if (! TEST_HARD_REG_BIT (ira_no_alloc_regs, bit: regno))
2136 {
2137 cl = sched_regno_pressure_class[regno];
2138 if (cl != NO_REGS)
2139 reg_pressure_info[cl].change -= 1;
2140 }
2141 regno++;
2142 }
2143}
2144
2145/* Update the register pressure info after death of pseudo or hard
2146 register REG. */
2147static void
2148mark_reg_death (rtx reg)
2149{
2150 int regno;
2151
2152 if (GET_CODE (reg) == SUBREG)
2153 reg = SUBREG_REG (reg);
2154
2155 if (! REG_P (reg))
2156 return;
2157
2158 regno = REGNO (reg);
2159 if (regno < FIRST_PSEUDO_REGISTER)
2160 mark_hard_regno_death (regno, REG_NREGS (reg));
2161 else
2162 mark_pseudo_death (regno);
2163}
2164
2165/* Process SETTER of REG. DATA is an insn containing the setter. */
2166static void
2167mark_insn_reg_store (rtx reg, const_rtx setter, void *data)
2168{
2169 if (setter != NULL_RTX && GET_CODE (setter) != SET)
2170 return;
2171 mark_insn_reg_birth
2172 (insn: (rtx) data, reg, clobber_p: false,
2173 unused_p: find_reg_note ((const_rtx) data, REG_UNUSED, reg) != NULL_RTX);
2174}
2175
2176/* Like mark_insn_reg_store except notice just CLOBBERs; ignore SETs. */
2177static void
2178mark_insn_reg_clobber (rtx reg, const_rtx setter, void *data)
2179{
2180 if (GET_CODE (setter) == CLOBBER)
2181 mark_insn_reg_birth (insn: (rtx) data, reg, clobber_p: true, unused_p: false);
2182}
2183
2184/* Set up reg pressure info related to INSN. */
2185void
2186init_insn_reg_pressure_info (rtx_insn *insn)
2187{
2188 int i, len;
2189 enum reg_class cl;
2190 static struct reg_pressure_data *pressure_info;
2191 rtx link;
2192
2193 gcc_assert (sched_pressure != SCHED_PRESSURE_NONE);
2194
2195 if (! INSN_P (insn))
2196 return;
2197
2198 for (i = 0; i < ira_pressure_classes_num; i++)
2199 {
2200 cl = ira_pressure_classes[i];
2201 reg_pressure_info[cl].clobber_increase = 0;
2202 reg_pressure_info[cl].set_increase = 0;
2203 reg_pressure_info[cl].unused_set_increase = 0;
2204 reg_pressure_info[cl].change = 0;
2205 }
2206
2207 note_stores (insn, mark_insn_reg_clobber, insn);
2208
2209 note_stores (insn, mark_insn_reg_store, insn);
2210
2211 if (AUTO_INC_DEC)
2212 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2213 if (REG_NOTE_KIND (link) == REG_INC)
2214 mark_insn_reg_store (XEXP (link, 0), NULL_RTX, data: insn);
2215
2216 for (link = REG_NOTES (insn); link; link = XEXP (link, 1))
2217 if (REG_NOTE_KIND (link) == REG_DEAD)
2218 mark_reg_death (XEXP (link, 0));
2219
2220 len = sizeof (struct reg_pressure_data) * ira_pressure_classes_num;
2221 pressure_info
2222 = INSN_REG_PRESSURE (insn) = (struct reg_pressure_data *) xmalloc (len);
2223 if (sched_pressure == SCHED_PRESSURE_WEIGHTED)
2224 INSN_MAX_REG_PRESSURE (insn) = (int *) xcalloc (ira_pressure_classes_num
2225 * sizeof (int), 1);
2226 for (i = 0; i < ira_pressure_classes_num; i++)
2227 {
2228 cl = ira_pressure_classes[i];
2229 pressure_info[i].clobber_increase
2230 = reg_pressure_info[cl].clobber_increase;
2231 pressure_info[i].set_increase = reg_pressure_info[cl].set_increase;
2232 pressure_info[i].unused_set_increase
2233 = reg_pressure_info[cl].unused_set_increase;
2234 pressure_info[i].change = reg_pressure_info[cl].change;
2235 }
2236}
2237
2238
2239
2240
2241/* Internal variable for sched_analyze_[12] () functions.
2242 If it is nonzero, this means that sched_analyze_[12] looks
2243 at the most toplevel SET. */
2244static bool can_start_lhs_rhs_p;
2245
2246/* Extend reg info for the deps context DEPS given that
2247 we have just generated a register numbered REGNO. */
2248static void
2249extend_deps_reg_info (class deps_desc *deps, int regno)
2250{
2251 int max_regno = regno + 1;
2252
2253 gcc_assert (!reload_completed);
2254
2255 /* In a readonly context, it would not hurt to extend info,
2256 but it should not be needed. */
2257 if (reload_completed && deps->readonly)
2258 {
2259 deps->max_reg = max_regno;
2260 return;
2261 }
2262
2263 if (max_regno > deps->max_reg)
2264 {
2265 deps->reg_last = XRESIZEVEC (struct deps_reg, deps->reg_last,
2266 max_regno);
2267 memset (s: &deps->reg_last[deps->max_reg],
2268 c: 0, n: (max_regno - deps->max_reg)
2269 * sizeof (struct deps_reg));
2270 deps->max_reg = max_regno;
2271 }
2272}
2273
2274/* Extends REG_INFO_P if needed. */
2275void
2276maybe_extend_reg_info_p (void)
2277{
2278 /* Extend REG_INFO_P, if needed. */
2279 if ((unsigned int)max_regno - 1 >= reg_info_p_size)
2280 {
2281 size_t new_reg_info_p_size = max_regno + 128;
2282
2283 gcc_assert (!reload_completed && sel_sched_p ());
2284
2285 reg_info_p = (struct reg_info_t *) xrecalloc (reg_info_p,
2286 new_reg_info_p_size,
2287 reg_info_p_size,
2288 sizeof (*reg_info_p));
2289 reg_info_p_size = new_reg_info_p_size;
2290 }
2291}
2292
2293/* Analyze a single reference to register (reg:MODE REGNO) in INSN.
2294 The type of the reference is specified by REF and can be SET,
2295 CLOBBER, PRE_DEC, POST_DEC, PRE_INC, POST_INC or USE. */
2296
2297static void
2298sched_analyze_reg (class deps_desc *deps, int regno, machine_mode mode,
2299 enum rtx_code ref, rtx_insn *insn)
2300{
2301 /* We could emit new pseudos in renaming. Extend the reg structures. */
2302 if (!reload_completed && sel_sched_p ()
2303 && (regno >= max_reg_num () - 1 || regno >= deps->max_reg))
2304 extend_deps_reg_info (deps, regno);
2305
2306 maybe_extend_reg_info_p ();
2307
2308 /* A hard reg in a wide mode may really be multiple registers.
2309 If so, mark all of them just like the first. */
2310 if (regno < FIRST_PSEUDO_REGISTER)
2311 {
2312 int i = hard_regno_nregs (regno, mode);
2313 if (ref == SET)
2314 {
2315 while (--i >= 0)
2316 note_reg_set (r: regno + i);
2317 }
2318 else if (ref == USE)
2319 {
2320 while (--i >= 0)
2321 note_reg_use (r: regno + i);
2322 }
2323 else
2324 {
2325 while (--i >= 0)
2326 note_reg_clobber (r: regno + i);
2327 }
2328 }
2329
2330 /* ??? Reload sometimes emits USEs and CLOBBERs of pseudos that
2331 it does not reload. Ignore these as they have served their
2332 purpose already. */
2333 else if (regno >= deps->max_reg)
2334 {
2335 enum rtx_code code = GET_CODE (PATTERN (insn));
2336 gcc_assert (code == USE || code == CLOBBER);
2337 }
2338
2339 else
2340 {
2341 if (ref == SET)
2342 note_reg_set (r: regno);
2343 else if (ref == USE)
2344 note_reg_use (r: regno);
2345 else
2346 note_reg_clobber (r: regno);
2347
2348 /* Pseudos that are REG_EQUIV to something may be replaced
2349 by that during reloading. We need only add dependencies for
2350 the address in the REG_EQUIV note. */
2351 if (!reload_completed && get_reg_known_equiv_p (regno))
2352 {
2353 rtx t = get_reg_known_value (regno);
2354 if (MEM_P (t))
2355 sched_analyze_2 (deps, XEXP (t, 0), insn);
2356 }
2357
2358 /* Don't let it cross a call after scheduling if it doesn't
2359 already cross one. */
2360 if (REG_N_CALLS_CROSSED (regno) == 0)
2361 {
2362 if (!deps->readonly && ref == USE && !DEBUG_INSN_P (insn))
2363 deps->sched_before_next_call
2364 = alloc_INSN_LIST (insn, deps->sched_before_next_call);
2365 else
2366 add_dependence_list (insn, list: deps->last_function_call, uncond: 1,
2367 dep_type: REG_DEP_ANTI, hard: false);
2368 }
2369 }
2370}
2371
2372/* Analyze a single SET, CLOBBER, PRE_DEC, POST_DEC, PRE_INC or POST_INC
2373 rtx, X, creating all dependencies generated by the write to the
2374 destination of X, and reads of everything mentioned. */
2375
2376static void
2377sched_analyze_1 (class deps_desc *deps, rtx x, rtx_insn *insn)
2378{
2379 rtx dest = XEXP (x, 0);
2380 enum rtx_code code = GET_CODE (x);
2381 bool cslr_p = can_start_lhs_rhs_p;
2382
2383 can_start_lhs_rhs_p = false;
2384
2385 gcc_assert (dest);
2386 if (dest == 0)
2387 return;
2388
2389 if (cslr_p && sched_deps_info->start_lhs)
2390 sched_deps_info->start_lhs (dest);
2391
2392 if (GET_CODE (dest) == PARALLEL)
2393 {
2394 int i;
2395
2396 for (i = XVECLEN (dest, 0) - 1; i >= 0; i--)
2397 if (XEXP (XVECEXP (dest, 0, i), 0) != 0)
2398 sched_analyze_1 (deps,
2399 gen_rtx_CLOBBER (VOIDmode,
2400 XEXP (XVECEXP (dest, 0, i), 0)),
2401 insn);
2402
2403 if (cslr_p && sched_deps_info->finish_lhs)
2404 sched_deps_info->finish_lhs ();
2405
2406 if (code == SET)
2407 {
2408 can_start_lhs_rhs_p = cslr_p;
2409
2410 sched_analyze_2 (deps, SET_SRC (x), insn);
2411
2412 can_start_lhs_rhs_p = false;
2413 }
2414
2415 return;
2416 }
2417
2418 while (GET_CODE (dest) == STRICT_LOW_PART || GET_CODE (dest) == SUBREG
2419 || GET_CODE (dest) == ZERO_EXTRACT)
2420 {
2421 if (GET_CODE (dest) == STRICT_LOW_PART
2422 || GET_CODE (dest) == ZERO_EXTRACT
2423 || read_modify_subreg_p (dest))
2424 {
2425 /* These both read and modify the result. We must handle
2426 them as writes to get proper dependencies for following
2427 instructions. We must handle them as reads to get proper
2428 dependencies from this to previous instructions.
2429 Thus we need to call sched_analyze_2. */
2430
2431 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2432 }
2433 if (GET_CODE (dest) == ZERO_EXTRACT)
2434 {
2435 /* The second and third arguments are values read by this insn. */
2436 sched_analyze_2 (deps, XEXP (dest, 1), insn);
2437 sched_analyze_2 (deps, XEXP (dest, 2), insn);
2438 }
2439 dest = XEXP (dest, 0);
2440 }
2441
2442 if (REG_P (dest))
2443 {
2444 int regno = REGNO (dest);
2445 machine_mode mode = GET_MODE (dest);
2446
2447 sched_analyze_reg (deps, regno, mode, ref: code, insn);
2448
2449#ifdef STACK_REGS
2450 /* Treat all writes to a stack register as modifying the TOS. */
2451 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2452 {
2453 /* Avoid analyzing the same register twice. */
2454 if (regno != FIRST_STACK_REG)
2455 sched_analyze_reg (deps, FIRST_STACK_REG, mode, ref: code, insn);
2456
2457 add_to_hard_reg_set (regs: &implicit_reg_pending_uses, mode,
2458 FIRST_STACK_REG);
2459 }
2460#endif
2461 if (!deps->readonly && regno == STACK_POINTER_REGNUM)
2462 {
2463 /* Please see PR114115. We have insn modifying memory on the stack
2464 and not addressed by stack pointer and we have insn reserving the
2465 stack space. If we move the insn modifying memory before insn
2466 reserving the stack space, we can change memory out of the red
2467 zone. Even worse, some optimizations (e.g. peephole) can add
2468 insns using temporary stack slots before insn reserving the stack
2469 space but after the insn modifying memory. This will corrupt the
2470 modified memory. Therefore we treat insn changing the stack as
2471 reading unknown memory. This will create anti-dependence. We
2472 don't need to treat the insn as writing memory because GCC by
2473 itself does not generate code reading undefined stack memory. */
2474 if ((deps->pending_read_list_length + deps->pending_write_list_length)
2475 >= param_max_pending_list_length
2476 && !DEBUG_INSN_P (insn))
2477 flush_pending_lists (deps, insn, for_read: true, for_write: true);
2478 add_insn_mem_dependence (deps, read_p: true, insn, mem: dest);
2479 }
2480 }
2481 else if (MEM_P (dest))
2482 {
2483 /* Writing memory. */
2484 rtx t = dest;
2485
2486 if (sched_deps_info->use_cselib)
2487 {
2488 machine_mode address_mode = get_address_mode (mem: dest);
2489
2490 t = shallow_copy_rtx (dest);
2491 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2492 GET_MODE (t), insn);
2493 XEXP (t, 0)
2494 = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2495 insn);
2496 }
2497 t = canon_rtx (t);
2498
2499 /* Pending lists can't get larger with a readonly context. */
2500 if (!deps->readonly
2501 && ((deps->pending_read_list_length + deps->pending_write_list_length)
2502 >= param_max_pending_list_length))
2503 {
2504 /* Flush all pending reads and writes to prevent the pending lists
2505 from getting any larger. Insn scheduling runs too slowly when
2506 these lists get long. When compiling GCC with itself,
2507 this flush occurs 8 times for sparc, and 10 times for m88k using
2508 the default value of 32. */
2509 flush_pending_lists (deps, insn, for_read: false, for_write: true);
2510 }
2511 else
2512 {
2513 rtx_insn_list *pending;
2514 rtx_expr_list *pending_mem;
2515
2516 pending = deps->pending_read_insns;
2517 pending_mem = deps->pending_read_mems;
2518 while (pending)
2519 {
2520 rtx mem = pending_mem->element ();
2521 if (REG_P (mem)
2522 || (anti_dependence (mem, t)
2523 && ! sched_insns_conditions_mutex_p (insn1: insn, insn2: pending->insn ())))
2524 note_mem_dep (m1: t, m2: mem, e: pending->insn (), DEP_ANTI);
2525
2526 pending = pending->next ();
2527 pending_mem = pending_mem->next ();
2528 }
2529
2530 pending = deps->pending_write_insns;
2531 pending_mem = deps->pending_write_mems;
2532 while (pending)
2533 {
2534 if (output_dependence (pending_mem->element (), t)
2535 && ! sched_insns_conditions_mutex_p (insn1: insn, insn2: pending->insn ()))
2536 note_mem_dep (m1: t, m2: pending_mem->element (),
2537 e: pending->insn (),
2538 DEP_OUTPUT);
2539
2540 pending = pending->next ();
2541 pending_mem = pending_mem-> next ();
2542 }
2543
2544 add_dependence_list (insn, list: deps->last_pending_memory_flush, uncond: 1,
2545 dep_type: REG_DEP_ANTI, hard: true);
2546 add_dependence_list (insn, list: deps->pending_jump_insns, uncond: 1,
2547 dep_type: REG_DEP_CONTROL, hard: true);
2548
2549 if (!deps->readonly)
2550 add_insn_mem_dependence (deps, read_p: false, insn, mem: dest);
2551 }
2552 sched_analyze_2 (deps, XEXP (dest, 0), insn);
2553 }
2554
2555 if (cslr_p && sched_deps_info->finish_lhs)
2556 sched_deps_info->finish_lhs ();
2557
2558 /* Analyze reads. */
2559 if (GET_CODE (x) == SET)
2560 {
2561 can_start_lhs_rhs_p = cslr_p;
2562
2563 sched_analyze_2 (deps, SET_SRC (x), insn);
2564
2565 can_start_lhs_rhs_p = false;
2566 }
2567}
2568
2569/* Analyze the uses of memory and registers in rtx X in INSN. */
2570static void
2571sched_analyze_2 (class deps_desc *deps, rtx x, rtx_insn *insn)
2572{
2573 int i;
2574 int j;
2575 enum rtx_code code;
2576 const char *fmt;
2577 bool cslr_p = can_start_lhs_rhs_p;
2578
2579 can_start_lhs_rhs_p = false;
2580
2581 gcc_assert (x);
2582 if (x == 0)
2583 return;
2584
2585 if (cslr_p && sched_deps_info->start_rhs)
2586 sched_deps_info->start_rhs (x);
2587
2588 code = GET_CODE (x);
2589
2590 switch (code)
2591 {
2592 CASE_CONST_ANY:
2593 case SYMBOL_REF:
2594 case CONST:
2595 case LABEL_REF:
2596 /* Ignore constants. */
2597 if (cslr_p && sched_deps_info->finish_rhs)
2598 sched_deps_info->finish_rhs ();
2599
2600 return;
2601
2602 case REG:
2603 {
2604 int regno = REGNO (x);
2605 machine_mode mode = GET_MODE (x);
2606
2607 sched_analyze_reg (deps, regno, mode, ref: USE, insn);
2608
2609#ifdef STACK_REGS
2610 /* Treat all reads of a stack register as modifying the TOS. */
2611 if (regno >= FIRST_STACK_REG && regno <= LAST_STACK_REG)
2612 {
2613 /* Avoid analyzing the same register twice. */
2614 if (regno != FIRST_STACK_REG)
2615 sched_analyze_reg (deps, FIRST_STACK_REG, mode, ref: USE, insn);
2616 sched_analyze_reg (deps, FIRST_STACK_REG, mode, ref: SET, insn);
2617 }
2618#endif
2619
2620 if (cslr_p && sched_deps_info->finish_rhs)
2621 sched_deps_info->finish_rhs ();
2622
2623 return;
2624 }
2625
2626 case MEM:
2627 {
2628 if (DEBUG_INSN_P (insn) && sched_deps_info->use_cselib)
2629 {
2630 machine_mode address_mode = get_address_mode (mem: x);
2631
2632 cselib_lookup_from_insn (XEXP (x, 0), address_mode, 1,
2633 GET_MODE (x), insn);
2634 }
2635 else if (!DEBUG_INSN_P (insn))
2636 {
2637 /* Reading memory. */
2638 rtx_insn_list *u;
2639 rtx_insn_list *pending;
2640 rtx_expr_list *pending_mem;
2641 rtx t = x;
2642
2643 if (sched_deps_info->use_cselib)
2644 {
2645 machine_mode address_mode = get_address_mode (mem: t);
2646
2647 t = shallow_copy_rtx (t);
2648 cselib_lookup_from_insn (XEXP (t, 0), address_mode, 1,
2649 GET_MODE (t), insn);
2650 XEXP (t, 0)
2651 = cselib_subst_to_values_from_insn (XEXP (t, 0), GET_MODE (t),
2652 insn);
2653 }
2654
2655 t = canon_rtx (t);
2656 pending = deps->pending_read_insns;
2657 pending_mem = deps->pending_read_mems;
2658 while (pending)
2659 {
2660 rtx mem = pending_mem->element ();
2661 if (MEM_P (mem) && read_dependence (mem, t)
2662 && ! sched_insns_conditions_mutex_p (insn1: insn, insn2: pending->insn ()))
2663 note_mem_dep (m1: t, m2: mem, e: pending->insn (), DEP_ANTI);
2664
2665 pending = pending->next ();
2666 pending_mem = pending_mem->next ();
2667 }
2668
2669 pending = deps->pending_write_insns;
2670 pending_mem = deps->pending_write_mems;
2671 while (pending)
2672 {
2673 if (true_dependence (pending_mem->element (), VOIDmode, t)
2674 && ! sched_insns_conditions_mutex_p (insn1: insn,
2675 insn2: pending->insn ()))
2676 note_mem_dep (m1: t, m2: pending_mem->element (),
2677 e: pending->insn (),
2678 ds: sched_deps_info->generate_spec_deps
2679 ? BEGIN_DATA | DEP_TRUE : DEP_TRUE);
2680
2681 pending = pending->next ();
2682 pending_mem = pending_mem->next ();
2683 }
2684
2685 for (u = deps->last_pending_memory_flush; u; u = u->next ())
2686 add_dependence (con: insn, pro: u->insn (), dep_type: REG_DEP_ANTI);
2687
2688 for (u = deps->pending_jump_insns; u; u = u->next ())
2689 if (deps_may_trap_p (mem: x))
2690 {
2691 if ((sched_deps_info->generate_spec_deps)
2692 && sel_sched_p () && (spec_info->mask & BEGIN_CONTROL))
2693 {
2694 ds_t ds = set_dep_weak (DEP_ANTI, BEGIN_CONTROL,
2695 MAX_DEP_WEAK);
2696
2697 note_dep (e: u->insn (), ds);
2698 }
2699 else
2700 add_dependence (con: insn, pro: u->insn (), dep_type: REG_DEP_CONTROL);
2701 }
2702 }
2703
2704 /* Always add these dependencies to pending_reads, since
2705 this insn may be followed by a write. */
2706 if (!deps->readonly)
2707 {
2708 if ((deps->pending_read_list_length
2709 + deps->pending_write_list_length)
2710 >= param_max_pending_list_length
2711 && !DEBUG_INSN_P (insn))
2712 flush_pending_lists (deps, insn, for_read: true, for_write: true);
2713 add_insn_mem_dependence (deps, read_p: true, insn, mem: x);
2714 }
2715
2716 sched_analyze_2 (deps, XEXP (x, 0), insn);
2717
2718 if (cslr_p && sched_deps_info->finish_rhs)
2719 sched_deps_info->finish_rhs ();
2720
2721 return;
2722 }
2723
2724 /* Force pending stores to memory in case a trap handler needs them.
2725 Also force pending loads from memory; loads and stores can segfault
2726 and the signal handler won't be triggered if the trap insn was moved
2727 above load or store insn. */
2728 case TRAP_IF:
2729 flush_pending_lists (deps, insn, for_read: true, for_write: true);
2730 break;
2731
2732 case PREFETCH:
2733 if (PREFETCH_SCHEDULE_BARRIER_P (x))
2734 reg_pending_barrier = TRUE_BARRIER;
2735 /* Prefetch insn contains addresses only. So if the prefetch
2736 address has no registers, there will be no dependencies on
2737 the prefetch insn. This is wrong with result code
2738 correctness point of view as such prefetch can be moved below
2739 a jump insn which usually generates MOVE_BARRIER preventing
2740 to move insns containing registers or memories through the
2741 barrier. It is also wrong with generated code performance
2742 point of view as prefetch withouth dependecies will have a
2743 tendency to be issued later instead of earlier. It is hard
2744 to generate accurate dependencies for prefetch insns as
2745 prefetch has only the start address but it is better to have
2746 something than nothing. */
2747 if (!deps->readonly)
2748 {
2749 rtx x = gen_rtx_MEM (Pmode, XEXP (PATTERN (insn), 0));
2750 if (sched_deps_info->use_cselib)
2751 cselib_lookup_from_insn (x, Pmode, true, VOIDmode, insn);
2752 add_insn_mem_dependence (deps, read_p: true, insn, mem: x);
2753 }
2754 break;
2755
2756 case UNSPEC_VOLATILE:
2757 flush_pending_lists (deps, insn, for_read: true, for_write: true);
2758 /* FALLTHRU */
2759
2760 case ASM_OPERANDS:
2761 case ASM_INPUT:
2762 {
2763 /* Traditional and volatile asm instructions must be considered to use
2764 and clobber all hard registers, all pseudo-registers and all of
2765 memory. So must TRAP_IF and UNSPEC_VOLATILE operations.
2766
2767 Consider for instance a volatile asm that changes the fpu rounding
2768 mode. An insn should not be moved across this even if it only uses
2769 pseudo-regs because it might give an incorrectly rounded result. */
2770 if ((code != ASM_OPERANDS || MEM_VOLATILE_P (x))
2771 && !DEBUG_INSN_P (insn))
2772 reg_pending_barrier = TRUE_BARRIER;
2773
2774 /* For all ASM_OPERANDS, we must traverse the vector of input operands.
2775 We cannot just fall through here since then we would be confused
2776 by the ASM_INPUT rtx inside ASM_OPERANDS, which do not indicate
2777 traditional asms unlike their normal usage. */
2778
2779 if (code == ASM_OPERANDS)
2780 {
2781 for (j = 0; j < ASM_OPERANDS_INPUT_LENGTH (x); j++)
2782 sched_analyze_2 (deps, ASM_OPERANDS_INPUT (x, j), insn);
2783
2784 if (cslr_p && sched_deps_info->finish_rhs)
2785 sched_deps_info->finish_rhs ();
2786
2787 return;
2788 }
2789 break;
2790 }
2791
2792 case PRE_DEC:
2793 case POST_DEC:
2794 case PRE_INC:
2795 case POST_INC:
2796 /* These both read and modify the result. We must handle them as writes
2797 to get proper dependencies for following instructions. We must handle
2798 them as reads to get proper dependencies from this to previous
2799 instructions. Thus we need to pass them to both sched_analyze_1
2800 and sched_analyze_2. We must call sched_analyze_2 first in order
2801 to get the proper antecedent for the read. */
2802 sched_analyze_2 (deps, XEXP (x, 0), insn);
2803 sched_analyze_1 (deps, x, insn);
2804
2805 if (cslr_p && sched_deps_info->finish_rhs)
2806 sched_deps_info->finish_rhs ();
2807
2808 return;
2809
2810 case POST_MODIFY:
2811 case PRE_MODIFY:
2812 /* op0 = op0 + op1 */
2813 sched_analyze_2 (deps, XEXP (x, 0), insn);
2814 sched_analyze_2 (deps, XEXP (x, 1), insn);
2815 sched_analyze_1 (deps, x, insn);
2816
2817 if (cslr_p && sched_deps_info->finish_rhs)
2818 sched_deps_info->finish_rhs ();
2819
2820 return;
2821
2822 default:
2823 break;
2824 }
2825
2826 /* Other cases: walk the insn. */
2827 fmt = GET_RTX_FORMAT (code);
2828 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
2829 {
2830 if (fmt[i] == 'e')
2831 sched_analyze_2 (deps, XEXP (x, i), insn);
2832 else if (fmt[i] == 'E')
2833 for (j = 0; j < XVECLEN (x, i); j++)
2834 sched_analyze_2 (deps, XVECEXP (x, i, j), insn);
2835 }
2836
2837 if (cslr_p && sched_deps_info->finish_rhs)
2838 sched_deps_info->finish_rhs ();
2839}
2840
2841/* Try to group two fusible insns together to prevent scheduler
2842 from scheduling them apart. */
2843
2844static void
2845sched_macro_fuse_insns (rtx_insn *insn)
2846{
2847 rtx_insn *prev;
2848 /* No target hook would return true for debug insn as any of the
2849 hook operand, and with very large sequences of only debug insns
2850 where on each we call sched_macro_fuse_insns it has quadratic
2851 compile time complexity. */
2852 if (DEBUG_INSN_P (insn))
2853 return;
2854 prev = prev_nonnote_nondebug_insn_bb (insn);
2855 if (!prev)
2856 return;
2857
2858 if (any_condjump_p (insn))
2859 {
2860 unsigned int condreg1, condreg2;
2861 rtx cc_reg_1;
2862 if (targetm.fixed_condition_code_regs (&condreg1, &condreg2))
2863 {
2864 cc_reg_1 = gen_rtx_REG (CCmode, condreg1);
2865 if (reg_referenced_p (cc_reg_1, PATTERN (insn))
2866 && modified_in_p (cc_reg_1, prev))
2867 {
2868 if (targetm.sched.macro_fusion_pair_p (prev, insn))
2869 SCHED_GROUP_P (insn) = 1;
2870 return;
2871 }
2872 }
2873 }
2874
2875 if (single_set (insn) && single_set (insn: prev))
2876 {
2877 if (targetm.sched.macro_fusion_pair_p (prev, insn))
2878 SCHED_GROUP_P (insn) = 1;
2879 }
2880}
2881
2882/* Get the implicit reg pending clobbers for INSN and save them in TEMP. */
2883void
2884get_implicit_reg_pending_clobbers (HARD_REG_SET *temp, rtx_insn *insn)
2885{
2886 extract_insn (insn);
2887 preprocess_constraints (insn);
2888 alternative_mask preferred = get_preferred_alternatives (insn);
2889 ira_implicitly_set_insn_hard_regs (temp, preferred);
2890 *temp &= ~ira_no_alloc_regs;
2891}
2892
2893/* Analyze an INSN with pattern X to find all dependencies. */
2894static void
2895sched_analyze_insn (class deps_desc *deps, rtx x, rtx_insn *insn)
2896{
2897 RTX_CODE code = GET_CODE (x);
2898 rtx link;
2899 unsigned i;
2900 reg_set_iterator rsi;
2901
2902 if (! reload_completed)
2903 {
2904 HARD_REG_SET temp;
2905 get_implicit_reg_pending_clobbers (temp: &temp, insn);
2906 implicit_reg_pending_clobbers |= temp;
2907 }
2908
2909 can_start_lhs_rhs_p = (NONJUMP_INSN_P (insn)
2910 && code == SET);
2911
2912 /* Group compare and branch insns for macro-fusion. */
2913 if (!deps->readonly
2914 && targetm.sched.macro_fusion_p
2915 && targetm.sched.macro_fusion_p ())
2916 sched_macro_fuse_insns (insn);
2917
2918 if (may_trap_p (x))
2919 /* Avoid moving trapping instructions across function calls that might
2920 not always return. */
2921 add_dependence_list (insn, list: deps->last_function_call_may_noreturn,
2922 uncond: 1, dep_type: REG_DEP_ANTI, hard: true);
2923
2924 /* We must avoid creating a situation in which two successors of the
2925 current block have different unwind info after scheduling. If at any
2926 point the two paths re-join this leads to incorrect unwind info. */
2927 /* ??? There are certain situations involving a forced frame pointer in
2928 which, with extra effort, we could fix up the unwind info at a later
2929 CFG join. However, it seems better to notice these cases earlier
2930 during prologue generation and avoid marking the frame pointer setup
2931 as frame-related at all. */
2932 if (RTX_FRAME_RELATED_P (insn))
2933 {
2934 /* Make sure prologue insn is scheduled before next jump. */
2935 deps->sched_before_next_jump
2936 = alloc_INSN_LIST (insn, deps->sched_before_next_jump);
2937
2938 /* Make sure epilogue insn is scheduled after preceding jumps. */
2939 add_dependence_list (insn, list: deps->last_pending_memory_flush, uncond: 1,
2940 dep_type: REG_DEP_ANTI, hard: true);
2941 add_dependence_list (insn, list: deps->pending_jump_insns, uncond: 1, dep_type: REG_DEP_ANTI,
2942 hard: true);
2943 }
2944
2945 if (code == COND_EXEC)
2946 {
2947 sched_analyze_2 (deps, COND_EXEC_TEST (x), insn);
2948
2949 /* ??? Should be recording conditions so we reduce the number of
2950 false dependencies. */
2951 x = COND_EXEC_CODE (x);
2952 code = GET_CODE (x);
2953 }
2954 if (code == SET || code == CLOBBER)
2955 {
2956 sched_analyze_1 (deps, x, insn);
2957
2958 /* Bare clobber insns are used for letting life analysis, reg-stack
2959 and others know that a value is dead. Depend on the last call
2960 instruction so that reg-stack won't get confused. */
2961 if (code == CLOBBER)
2962 add_dependence_list (insn, list: deps->last_function_call, uncond: 1,
2963 dep_type: REG_DEP_OUTPUT, hard: true);
2964 }
2965 else if (code == PARALLEL)
2966 {
2967 for (i = XVECLEN (x, 0); i--;)
2968 {
2969 rtx sub = XVECEXP (x, 0, i);
2970 code = GET_CODE (sub);
2971
2972 if (code == COND_EXEC)
2973 {
2974 sched_analyze_2 (deps, COND_EXEC_TEST (sub), insn);
2975 sub = COND_EXEC_CODE (sub);
2976 code = GET_CODE (sub);
2977 }
2978 else if (code == SET || code == CLOBBER)
2979 sched_analyze_1 (deps, x: sub, insn);
2980 else
2981 sched_analyze_2 (deps, x: sub, insn);
2982 }
2983 }
2984 else
2985 sched_analyze_2 (deps, x, insn);
2986
2987 /* Mark registers CLOBBERED or used by called function. */
2988 if (CALL_P (insn))
2989 {
2990 for (link = CALL_INSN_FUNCTION_USAGE (insn); link; link = XEXP (link, 1))
2991 {
2992 if (GET_CODE (XEXP (link, 0)) == CLOBBER)
2993 sched_analyze_1 (deps, XEXP (link, 0), insn);
2994 else if (GET_CODE (XEXP (link, 0)) != SET)
2995 sched_analyze_2 (deps, XEXP (link, 0), insn);
2996 }
2997 /* Don't schedule anything after a tail call, tail call needs
2998 to use at least all call-saved registers. */
2999 if (SIBLING_CALL_P (insn))
3000 reg_pending_barrier = TRUE_BARRIER;
3001 else if (find_reg_note (insn, REG_SETJMP, NULL))
3002 reg_pending_barrier = MOVE_BARRIER;
3003 }
3004
3005 if (JUMP_P (insn))
3006 {
3007 rtx_insn *next = next_nonnote_nondebug_insn (insn);
3008 /* ??? For tablejumps, the barrier may appear not immediately after
3009 the jump, but after a label and a jump_table_data insn. */
3010 if (next && LABEL_P (next) && NEXT_INSN (insn: next)
3011 && JUMP_TABLE_DATA_P (NEXT_INSN (next)))
3012 next = NEXT_INSN (insn: NEXT_INSN (insn: next));
3013 if (next && BARRIER_P (next))
3014 reg_pending_barrier = MOVE_BARRIER;
3015 else
3016 {
3017 rtx_insn_list *pending;
3018 rtx_expr_list *pending_mem;
3019
3020 if (sched_deps_info->compute_jump_reg_dependencies)
3021 {
3022 (*sched_deps_info->compute_jump_reg_dependencies)
3023 (insn, reg_pending_control_uses);
3024
3025 /* Make latency of jump equal to 0 by using anti-dependence. */
3026 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3027 {
3028 struct deps_reg *reg_last = &deps->reg_last[i];
3029 add_dependence_list (insn, list: reg_last->sets, uncond: 0, dep_type: REG_DEP_ANTI,
3030 hard: false);
3031 add_dependence_list (insn, list: reg_last->implicit_sets,
3032 uncond: 0, dep_type: REG_DEP_ANTI, hard: false);
3033 add_dependence_list (insn, list: reg_last->clobbers, uncond: 0,
3034 dep_type: REG_DEP_ANTI, hard: false);
3035 }
3036 }
3037
3038 /* All memory writes and volatile reads must happen before the
3039 jump. Non-volatile reads must happen before the jump iff
3040 the result is needed by the above register used mask. */
3041
3042 pending = deps->pending_write_insns;
3043 pending_mem = deps->pending_write_mems;
3044 while (pending)
3045 {
3046 if (! sched_insns_conditions_mutex_p (insn1: insn, insn2: pending->insn ()))
3047 add_dependence (con: insn, pro: pending->insn (), dep_type: REG_DEP_OUTPUT);
3048 pending = pending->next ();
3049 pending_mem = pending_mem->next ();
3050 }
3051
3052 pending = deps->pending_read_insns;
3053 pending_mem = deps->pending_read_mems;
3054 while (pending)
3055 {
3056 rtx mem = pending_mem->element ();
3057 if (MEM_P (mem) && MEM_VOLATILE_P (mem)
3058 && ! sched_insns_conditions_mutex_p (insn1: insn, insn2: pending->insn ()))
3059 add_dependence (con: insn, pro: pending->insn (), dep_type: REG_DEP_OUTPUT);
3060 pending = pending->next ();
3061 pending_mem = pending_mem->next ();
3062 }
3063
3064 add_dependence_list (insn, list: deps->last_pending_memory_flush, uncond: 1,
3065 dep_type: REG_DEP_ANTI, hard: true);
3066 add_dependence_list (insn, list: deps->pending_jump_insns, uncond: 1,
3067 dep_type: REG_DEP_ANTI, hard: true);
3068 }
3069 }
3070
3071 /* If this instruction can throw an exception, then moving it changes
3072 where block boundaries fall. This is mighty confusing elsewhere.
3073 Therefore, prevent such an instruction from being moved. Same for
3074 non-jump instructions that define block boundaries.
3075 ??? Unclear whether this is still necessary in EBB mode. If not,
3076 add_branch_dependences should be adjusted for RGN mode instead. */
3077 if (((CALL_P (insn) || JUMP_P (insn)) && can_throw_internal (insn))
3078 || (NONJUMP_INSN_P (insn) && control_flow_insn_p (insn)))
3079 reg_pending_barrier = MOVE_BARRIER;
3080
3081 if (sched_pressure != SCHED_PRESSURE_NONE)
3082 {
3083 setup_insn_reg_uses (deps, insn);
3084 init_insn_reg_pressure_info (insn);
3085 }
3086
3087 /* Add register dependencies for insn. */
3088 if (DEBUG_INSN_P (insn))
3089 {
3090 rtx_insn *prev = deps->last_debug_insn;
3091 rtx_insn_list *u;
3092
3093 if (!deps->readonly)
3094 deps->last_debug_insn = insn;
3095
3096 if (prev)
3097 add_dependence (con: insn, pro: prev, dep_type: REG_DEP_ANTI);
3098
3099 add_dependence_list (insn, list: deps->last_function_call, uncond: 1,
3100 dep_type: REG_DEP_ANTI, hard: false);
3101
3102 if (!sel_sched_p ())
3103 for (u = deps->last_pending_memory_flush; u; u = u->next ())
3104 add_dependence (con: insn, pro: u->insn (), dep_type: REG_DEP_ANTI);
3105
3106 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3107 {
3108 struct deps_reg *reg_last = &deps->reg_last[i];
3109 add_dependence_list (insn, list: reg_last->sets, uncond: 1, dep_type: REG_DEP_ANTI, hard: false);
3110 /* There's no point in making REG_DEP_CONTROL dependencies for
3111 debug insns. */
3112 add_dependence_list (insn, list: reg_last->clobbers, uncond: 1, dep_type: REG_DEP_ANTI,
3113 hard: false);
3114
3115 if (!deps->readonly)
3116 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3117 }
3118 CLEAR_REG_SET (reg_pending_uses);
3119
3120 /* Quite often, a debug insn will refer to stuff in the
3121 previous instruction, but the reason we want this
3122 dependency here is to make sure the scheduler doesn't
3123 gratuitously move a debug insn ahead. This could dirty
3124 DF flags and cause additional analysis that wouldn't have
3125 occurred in compilation without debug insns, and such
3126 additional analysis can modify the generated code. */
3127 prev = PREV_INSN (insn);
3128
3129 if (prev && NONDEBUG_INSN_P (prev))
3130 add_dependence (con: insn, pro: prev, dep_type: REG_DEP_ANTI);
3131 }
3132 else
3133 {
3134 regset_head set_or_clobbered;
3135
3136 EXECUTE_IF_SET_IN_REG_SET (reg_pending_uses, 0, i, rsi)
3137 {
3138 struct deps_reg *reg_last = &deps->reg_last[i];
3139 add_dependence_list (insn, list: reg_last->sets, uncond: 0, dep_type: REG_DEP_TRUE, hard: false);
3140 add_dependence_list (insn, list: reg_last->implicit_sets, uncond: 0, dep_type: REG_DEP_ANTI,
3141 hard: false);
3142 add_dependence_list (insn, list: reg_last->clobbers, uncond: 0, dep_type: REG_DEP_TRUE,
3143 hard: false);
3144
3145 if (!deps->readonly)
3146 {
3147 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3148 reg_last->uses_length++;
3149 }
3150 }
3151
3152 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3153 if (TEST_HARD_REG_BIT (set: implicit_reg_pending_uses, bit: i))
3154 {
3155 struct deps_reg *reg_last = &deps->reg_last[i];
3156 add_dependence_list (insn, list: reg_last->sets, uncond: 0, dep_type: REG_DEP_TRUE, hard: false);
3157 add_dependence_list (insn, list: reg_last->implicit_sets, uncond: 0,
3158 dep_type: REG_DEP_ANTI, hard: false);
3159 add_dependence_list (insn, list: reg_last->clobbers, uncond: 0, dep_type: REG_DEP_TRUE,
3160 hard: false);
3161
3162 if (!deps->readonly)
3163 {
3164 reg_last->uses = alloc_INSN_LIST (insn, reg_last->uses);
3165 reg_last->uses_length++;
3166 }
3167 }
3168
3169 if (targetm.sched.exposed_pipeline)
3170 {
3171 INIT_REG_SET (&set_or_clobbered);
3172 bitmap_ior (&set_or_clobbered, reg_pending_clobbers,
3173 reg_pending_sets);
3174 EXECUTE_IF_SET_IN_REG_SET (&set_or_clobbered, 0, i, rsi)
3175 {
3176 struct deps_reg *reg_last = &deps->reg_last[i];
3177 rtx list;
3178 for (list = reg_last->uses; list; list = XEXP (list, 1))
3179 {
3180 rtx other = XEXP (list, 0);
3181 if (INSN_CACHED_COND (other) != const_true_rtx
3182 && refers_to_regno_p (regnum: i, INSN_CACHED_COND (other)))
3183 INSN_CACHED_COND (other) = const_true_rtx;
3184 }
3185 }
3186 }
3187
3188 /* If the current insn is conditional, we can't free any
3189 of the lists. */
3190 if (sched_has_condition_p (insn))
3191 {
3192 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3193 {
3194 struct deps_reg *reg_last = &deps->reg_last[i];
3195 add_dependence_list (insn, list: reg_last->sets, uncond: 0, dep_type: REG_DEP_OUTPUT,
3196 hard: false);
3197 add_dependence_list (insn, list: reg_last->implicit_sets, uncond: 0,
3198 dep_type: REG_DEP_ANTI, hard: false);
3199 add_dependence_list (insn, list: reg_last->uses, uncond: 0, dep_type: REG_DEP_ANTI,
3200 hard: false);
3201 add_dependence_list (insn, list: reg_last->control_uses, uncond: 0,
3202 dep_type: REG_DEP_CONTROL, hard: false);
3203
3204 if (!deps->readonly)
3205 {
3206 reg_last->clobbers
3207 = alloc_INSN_LIST (insn, reg_last->clobbers);
3208 reg_last->clobbers_length++;
3209 }
3210 }
3211 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3212 {
3213 struct deps_reg *reg_last = &deps->reg_last[i];
3214 add_dependence_list (insn, list: reg_last->sets, uncond: 0, dep_type: REG_DEP_OUTPUT,
3215 hard: false);
3216 add_dependence_list (insn, list: reg_last->implicit_sets, uncond: 0,
3217 dep_type: REG_DEP_ANTI, hard: false);
3218 add_dependence_list (insn, list: reg_last->clobbers, uncond: 0, dep_type: REG_DEP_OUTPUT,
3219 hard: false);
3220 add_dependence_list (insn, list: reg_last->uses, uncond: 0, dep_type: REG_DEP_ANTI,
3221 hard: false);
3222 add_dependence_list (insn, list: reg_last->control_uses, uncond: 0,
3223 dep_type: REG_DEP_CONTROL, hard: false);
3224
3225 if (!deps->readonly)
3226 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3227 }
3228 }
3229 else
3230 {
3231 EXECUTE_IF_SET_IN_REG_SET (reg_pending_clobbers, 0, i, rsi)
3232 {
3233 struct deps_reg *reg_last = &deps->reg_last[i];
3234 if (reg_last->uses_length >= param_max_pending_list_length
3235 || reg_last->clobbers_length >= param_max_pending_list_length)
3236 {
3237 add_dependence_list_and_free (deps, insn, listp: &reg_last->sets, uncond: 0,
3238 dep_type: REG_DEP_OUTPUT, hard: false);
3239 add_dependence_list_and_free (deps, insn,
3240 listp: &reg_last->implicit_sets, uncond: 0,
3241 dep_type: REG_DEP_ANTI, hard: false);
3242 add_dependence_list_and_free (deps, insn, listp: &reg_last->uses, uncond: 0,
3243 dep_type: REG_DEP_ANTI, hard: false);
3244 add_dependence_list_and_free (deps, insn,
3245 listp: &reg_last->control_uses, uncond: 0,
3246 dep_type: REG_DEP_ANTI, hard: false);
3247 add_dependence_list_and_free (deps, insn,
3248 listp: &reg_last->clobbers, uncond: 0,
3249 dep_type: REG_DEP_OUTPUT, hard: false);
3250
3251 if (!deps->readonly)
3252 {
3253 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3254 reg_last->clobbers_length = 0;
3255 reg_last->uses_length = 0;
3256 }
3257 }
3258 else
3259 {
3260 add_dependence_list (insn, list: reg_last->sets, uncond: 0, dep_type: REG_DEP_OUTPUT,
3261 hard: false);
3262 add_dependence_list (insn, list: reg_last->implicit_sets, uncond: 0,
3263 dep_type: REG_DEP_ANTI, hard: false);
3264 add_dependence_list (insn, list: reg_last->uses, uncond: 0, dep_type: REG_DEP_ANTI,
3265 hard: false);
3266 add_dependence_list (insn, list: reg_last->control_uses, uncond: 0,
3267 dep_type: REG_DEP_CONTROL, hard: false);
3268 }
3269
3270 if (!deps->readonly)
3271 {
3272 reg_last->clobbers_length++;
3273 reg_last->clobbers
3274 = alloc_INSN_LIST (insn, reg_last->clobbers);
3275 }
3276 }
3277 EXECUTE_IF_SET_IN_REG_SET (reg_pending_sets, 0, i, rsi)
3278 {
3279 struct deps_reg *reg_last = &deps->reg_last[i];
3280
3281 add_dependence_list_and_free (deps, insn, listp: &reg_last->sets, uncond: 0,
3282 dep_type: REG_DEP_OUTPUT, hard: false);
3283 add_dependence_list_and_free (deps, insn,
3284 listp: &reg_last->implicit_sets,
3285 uncond: 0, dep_type: REG_DEP_ANTI, hard: false);
3286 add_dependence_list_and_free (deps, insn, listp: &reg_last->clobbers, uncond: 0,
3287 dep_type: REG_DEP_OUTPUT, hard: false);
3288 add_dependence_list_and_free (deps, insn, listp: &reg_last->uses, uncond: 0,
3289 dep_type: REG_DEP_ANTI, hard: false);
3290 add_dependence_list (insn, list: reg_last->control_uses, uncond: 0,
3291 dep_type: REG_DEP_CONTROL, hard: false);
3292
3293 if (!deps->readonly)
3294 {
3295 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3296 reg_last->uses_length = 0;
3297 reg_last->clobbers_length = 0;
3298 }
3299 }
3300 }
3301 if (!deps->readonly)
3302 {
3303 EXECUTE_IF_SET_IN_REG_SET (reg_pending_control_uses, 0, i, rsi)
3304 {
3305 struct deps_reg *reg_last = &deps->reg_last[i];
3306 reg_last->control_uses
3307 = alloc_INSN_LIST (insn, reg_last->control_uses);
3308 }
3309 }
3310 }
3311
3312 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3313 if (TEST_HARD_REG_BIT (set: implicit_reg_pending_clobbers, bit: i))
3314 {
3315 struct deps_reg *reg_last = &deps->reg_last[i];
3316 add_dependence_list (insn, list: reg_last->sets, uncond: 0, dep_type: REG_DEP_ANTI, hard: false);
3317 add_dependence_list (insn, list: reg_last->clobbers, uncond: 0, dep_type: REG_DEP_ANTI, hard: false);
3318 add_dependence_list (insn, list: reg_last->uses, uncond: 0, dep_type: REG_DEP_ANTI, hard: false);
3319 add_dependence_list (insn, list: reg_last->control_uses, uncond: 0, dep_type: REG_DEP_ANTI,
3320 hard: false);
3321
3322 if (!deps->readonly)
3323 reg_last->implicit_sets
3324 = alloc_INSN_LIST (insn, reg_last->implicit_sets);
3325 }
3326
3327 if (!deps->readonly)
3328 {
3329 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_uses);
3330 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_clobbers);
3331 IOR_REG_SET (&deps->reg_last_in_use, reg_pending_sets);
3332 IOR_REG_SET_HRS (&deps->reg_last_in_use,
3333 implicit_reg_pending_uses
3334 | implicit_reg_pending_clobbers);
3335
3336 /* Set up the pending barrier found. */
3337 deps->last_reg_pending_barrier = reg_pending_barrier;
3338 }
3339
3340 CLEAR_REG_SET (reg_pending_uses);
3341 CLEAR_REG_SET (reg_pending_clobbers);
3342 CLEAR_REG_SET (reg_pending_sets);
3343 CLEAR_REG_SET (reg_pending_control_uses);
3344 CLEAR_HARD_REG_SET (set&: implicit_reg_pending_clobbers);
3345 CLEAR_HARD_REG_SET (set&: implicit_reg_pending_uses);
3346
3347 /* Add dependencies if a scheduling barrier was found. */
3348 if (reg_pending_barrier)
3349 {
3350 /* In the case of barrier the most added dependencies are not
3351 real, so we use anti-dependence here. */
3352 if (sched_has_condition_p (insn))
3353 {
3354 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3355 {
3356 struct deps_reg *reg_last = &deps->reg_last[i];
3357 add_dependence_list (insn, list: reg_last->uses, uncond: 0, dep_type: REG_DEP_ANTI,
3358 hard: true);
3359 add_dependence_list (insn, list: reg_last->sets, uncond: 0,
3360 dep_type: reg_pending_barrier == TRUE_BARRIER
3361 ? REG_DEP_TRUE : REG_DEP_ANTI, hard: true);
3362 add_dependence_list (insn, list: reg_last->implicit_sets, uncond: 0,
3363 dep_type: REG_DEP_ANTI, hard: true);
3364 add_dependence_list (insn, list: reg_last->clobbers, uncond: 0,
3365 dep_type: reg_pending_barrier == TRUE_BARRIER
3366 ? REG_DEP_TRUE : REG_DEP_ANTI, hard: true);
3367 }
3368 }
3369 else
3370 {
3371 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
3372 {
3373 struct deps_reg *reg_last = &deps->reg_last[i];
3374 add_dependence_list_and_free (deps, insn, listp: &reg_last->uses, uncond: 0,
3375 dep_type: REG_DEP_ANTI, hard: true);
3376 add_dependence_list_and_free (deps, insn,
3377 listp: &reg_last->control_uses, uncond: 0,
3378 dep_type: REG_DEP_CONTROL, hard: true);
3379 add_dependence_list_and_free (deps, insn, listp: &reg_last->sets, uncond: 0,
3380 dep_type: reg_pending_barrier == TRUE_BARRIER
3381 ? REG_DEP_TRUE : REG_DEP_ANTI,
3382 hard: true);
3383 add_dependence_list_and_free (deps, insn,
3384 listp: &reg_last->implicit_sets, uncond: 0,
3385 dep_type: REG_DEP_ANTI, hard: true);
3386 add_dependence_list_and_free (deps, insn, listp: &reg_last->clobbers, uncond: 0,
3387 dep_type: reg_pending_barrier == TRUE_BARRIER
3388 ? REG_DEP_TRUE : REG_DEP_ANTI,
3389 hard: true);
3390
3391 if (!deps->readonly)
3392 {
3393 reg_last->uses_length = 0;
3394 reg_last->clobbers_length = 0;
3395 }
3396 }
3397 }
3398
3399 if (!deps->readonly)
3400 for (i = 0; i < (unsigned)deps->max_reg; i++)
3401 {
3402 struct deps_reg *reg_last = &deps->reg_last[i];
3403 reg_last->sets = alloc_INSN_LIST (insn, reg_last->sets);
3404 SET_REGNO_REG_SET (&deps->reg_last_in_use, i);
3405 }
3406
3407 /* Don't flush pending lists on speculative checks for
3408 selective scheduling. */
3409 if (!sel_sched_p () || !sel_insn_is_speculation_check (insn))
3410 flush_pending_lists (deps, insn, for_read: true, for_write: true);
3411
3412 reg_pending_barrier = NOT_A_BARRIER;
3413 }
3414
3415 /* If a post-call group is still open, see if it should remain so.
3416 This insn must be a simple move of a hard reg to a pseudo or
3417 vice-versa.
3418
3419 We must avoid moving these insns for correctness on targets
3420 with small register classes, and for special registers like
3421 PIC_OFFSET_TABLE_REGNUM. For simplicity, extend this to all
3422 hard regs for all targets. */
3423
3424 if (deps->in_post_call_group_p)
3425 {
3426 rtx tmp, set = single_set (insn);
3427 int src_regno, dest_regno;
3428
3429 if (set == NULL)
3430 {
3431 if (DEBUG_INSN_P (insn))
3432 /* We don't want to mark debug insns as part of the same
3433 sched group. We know they really aren't, but if we use
3434 debug insns to tell that a call group is over, we'll
3435 get different code if debug insns are not there and
3436 instructions that follow seem like they should be part
3437 of the call group.
3438
3439 Also, if we did, chain_to_prev_insn would move the
3440 deps of the debug insn to the call insn, modifying
3441 non-debug post-dependency counts of the debug insn
3442 dependencies and otherwise messing with the scheduling
3443 order.
3444
3445 Instead, let such debug insns be scheduled freely, but
3446 keep the call group open in case there are insns that
3447 should be part of it afterwards. Since we grant debug
3448 insns higher priority than even sched group insns, it
3449 will all turn out all right. */
3450 goto debug_dont_end_call_group;
3451 else
3452 goto end_call_group;
3453 }
3454
3455 tmp = SET_DEST (set);
3456 if (GET_CODE (tmp) == SUBREG)
3457 tmp = SUBREG_REG (tmp);
3458 if (REG_P (tmp))
3459 dest_regno = REGNO (tmp);
3460 else
3461 goto end_call_group;
3462
3463 tmp = SET_SRC (set);
3464 if (GET_CODE (tmp) == SUBREG)
3465 tmp = SUBREG_REG (tmp);
3466 if ((GET_CODE (tmp) == PLUS
3467 || GET_CODE (tmp) == MINUS)
3468 && REG_P (XEXP (tmp, 0))
3469 && REGNO (XEXP (tmp, 0)) == STACK_POINTER_REGNUM
3470 && dest_regno == STACK_POINTER_REGNUM)
3471 src_regno = STACK_POINTER_REGNUM;
3472 else if (REG_P (tmp))
3473 src_regno = REGNO (tmp);
3474 else
3475 goto end_call_group;
3476
3477 if (src_regno < FIRST_PSEUDO_REGISTER
3478 || dest_regno < FIRST_PSEUDO_REGISTER)
3479 {
3480 if (!deps->readonly
3481 && deps->in_post_call_group_p == post_call_initial)
3482 deps->in_post_call_group_p = post_call;
3483
3484 if (!sel_sched_p () || sched_emulate_haifa_p)
3485 {
3486 SCHED_GROUP_P (insn) = 1;
3487 CANT_MOVE (insn) = 1;
3488 }
3489 }
3490 else
3491 {
3492 end_call_group:
3493 if (!deps->readonly)
3494 deps->in_post_call_group_p = not_post_call;
3495 }
3496 }
3497
3498 debug_dont_end_call_group:
3499 if ((current_sched_info->flags & DO_SPECULATION)
3500 && !sched_insn_is_legitimate_for_speculation_p (insn, ds: 0))
3501 /* INSN has an internal dependency (e.g. r14 = [r14]) and thus cannot
3502 be speculated. */
3503 {
3504 if (sel_sched_p ())
3505 sel_mark_hard_insn (insn);
3506 else
3507 {
3508 sd_iterator_def sd_it;
3509 dep_t dep;
3510
3511 for (sd_it = sd_iterator_start (insn, SD_LIST_SPEC_BACK);
3512 sd_iterator_cond (it_ptr: &sd_it, dep_ptr: &dep);)
3513 change_spec_dep_to_hard (sd_it);
3514 }
3515 }
3516
3517 /* We do not yet have code to adjust REG_ARGS_SIZE, therefore we must
3518 honor their original ordering. */
3519 if (find_reg_note (insn, REG_ARGS_SIZE, NULL))
3520 {
3521 if (deps->last_args_size)
3522 add_dependence (con: insn, pro: deps->last_args_size, dep_type: REG_DEP_OUTPUT);
3523 if (!deps->readonly)
3524 deps->last_args_size = insn;
3525 }
3526
3527 /* We must not mix prologue and epilogue insns. See PR78029. */
3528 if (prologue_contains (insn))
3529 {
3530 add_dependence_list (insn, list: deps->last_epilogue, uncond: true, dep_type: REG_DEP_ANTI, hard: true);
3531 if (!deps->readonly)
3532 {
3533 if (deps->last_logue_was_epilogue)
3534 free_INSN_LIST_list (&deps->last_prologue);
3535 deps->last_prologue = alloc_INSN_LIST (insn, deps->last_prologue);
3536 deps->last_logue_was_epilogue = false;
3537 }
3538 }
3539
3540 if (epilogue_contains (insn))
3541 {
3542 add_dependence_list (insn, list: deps->last_prologue, uncond: true, dep_type: REG_DEP_ANTI, hard: true);
3543 if (!deps->readonly)
3544 {
3545 if (!deps->last_logue_was_epilogue)
3546 free_INSN_LIST_list (&deps->last_epilogue);
3547 deps->last_epilogue = alloc_INSN_LIST (insn, deps->last_epilogue);
3548 deps->last_logue_was_epilogue = true;
3549 }
3550 }
3551}
3552
3553/* Return TRUE if INSN might not always return normally (e.g. call exit,
3554 longjmp, loop forever, ...). */
3555/* FIXME: Why can't this function just use flags_from_decl_or_type and
3556 test for ECF_NORETURN? */
3557static bool
3558call_may_noreturn_p (rtx_insn *insn)
3559{
3560 rtx call;
3561
3562 /* const or pure calls that aren't looping will always return. */
3563 if (RTL_CONST_OR_PURE_CALL_P (insn)
3564 && !RTL_LOOPING_CONST_OR_PURE_CALL_P (insn))
3565 return false;
3566
3567 call = get_call_rtx_from (insn);
3568 if (call && GET_CODE (XEXP (XEXP (call, 0), 0)) == SYMBOL_REF)
3569 {
3570 rtx symbol = XEXP (XEXP (call, 0), 0);
3571 if (SYMBOL_REF_DECL (symbol)
3572 && TREE_CODE (SYMBOL_REF_DECL (symbol)) == FUNCTION_DECL)
3573 {
3574 if (DECL_BUILT_IN_CLASS (SYMBOL_REF_DECL (symbol))
3575 == BUILT_IN_NORMAL)
3576 switch (DECL_FUNCTION_CODE (SYMBOL_REF_DECL (symbol)))
3577 {
3578 case BUILT_IN_BCMP:
3579 case BUILT_IN_BCOPY:
3580 case BUILT_IN_BZERO:
3581 case BUILT_IN_INDEX:
3582 case BUILT_IN_MEMCHR:
3583 case BUILT_IN_MEMCMP:
3584 case BUILT_IN_MEMCPY:
3585 case BUILT_IN_MEMMOVE:
3586 case BUILT_IN_MEMPCPY:
3587 case BUILT_IN_MEMSET:
3588 case BUILT_IN_RINDEX:
3589 case BUILT_IN_STPCPY:
3590 case BUILT_IN_STPNCPY:
3591 case BUILT_IN_STRCAT:
3592 case BUILT_IN_STRCHR:
3593 case BUILT_IN_STRCMP:
3594 case BUILT_IN_STRCPY:
3595 case BUILT_IN_STRCSPN:
3596 case BUILT_IN_STRLEN:
3597 case BUILT_IN_STRNCAT:
3598 case BUILT_IN_STRNCMP:
3599 case BUILT_IN_STRNCPY:
3600 case BUILT_IN_STRPBRK:
3601 case BUILT_IN_STRRCHR:
3602 case BUILT_IN_STRSPN:
3603 case BUILT_IN_STRSTR:
3604 /* Assume certain string/memory builtins always return. */
3605 return false;
3606 default:
3607 break;
3608 }
3609 }
3610 }
3611
3612 /* For all other calls assume that they might not always return. */
3613 return true;
3614}
3615
3616/* Return true if INSN should be made dependent on the previous instruction
3617 group, and if all INSN's dependencies should be moved to the first
3618 instruction of that group. */
3619
3620static bool
3621chain_to_prev_insn_p (rtx_insn *insn)
3622{
3623 /* INSN forms a group with the previous instruction. */
3624 if (SCHED_GROUP_P (insn))
3625 return true;
3626
3627 /* If the previous instruction clobbers a register R and this one sets
3628 part of R, the clobber was added specifically to help us track the
3629 liveness of R. There's no point scheduling the clobber and leaving
3630 INSN behind, especially if we move the clobber to another block. */
3631 rtx_insn *prev = prev_nonnote_nondebug_insn (insn);
3632 if (prev
3633 && INSN_P (prev)
3634 && BLOCK_FOR_INSN (insn: prev) == BLOCK_FOR_INSN (insn)
3635 && GET_CODE (PATTERN (prev)) == CLOBBER)
3636 {
3637 rtx x = XEXP (PATTERN (prev), 0);
3638 if (set_of (x, insn))
3639 return true;
3640 }
3641
3642 return false;
3643}
3644
3645/* Analyze INSN with DEPS as a context. */
3646void
3647deps_analyze_insn (class deps_desc *deps, rtx_insn *insn)
3648{
3649 if (sched_deps_info->start_insn)
3650 sched_deps_info->start_insn (insn);
3651
3652 /* Record the condition for this insn. */
3653 if (NONDEBUG_INSN_P (insn))
3654 {
3655 rtx t;
3656 sched_get_condition_with_rev (insn, NULL);
3657 t = INSN_CACHED_COND (insn);
3658 INSN_COND_DEPS (insn) = NULL;
3659 if (reload_completed
3660 && (current_sched_info->flags & DO_PREDICATION)
3661 && COMPARISON_P (t)
3662 && REG_P (XEXP (t, 0))
3663 && CONSTANT_P (XEXP (t, 1)))
3664 {
3665 unsigned int regno;
3666 int nregs;
3667 rtx_insn_list *cond_deps = NULL;
3668 t = XEXP (t, 0);
3669 regno = REGNO (t);
3670 nregs = REG_NREGS (t);
3671 while (nregs-- > 0)
3672 {
3673 struct deps_reg *reg_last = &deps->reg_last[regno + nregs];
3674 cond_deps = concat_INSN_LIST (reg_last->sets, cond_deps);
3675 cond_deps = concat_INSN_LIST (reg_last->clobbers, cond_deps);
3676 cond_deps = concat_INSN_LIST (reg_last->implicit_sets, cond_deps);
3677 }
3678 INSN_COND_DEPS (insn) = cond_deps;
3679 }
3680 }
3681
3682 if (JUMP_P (insn))
3683 {
3684 /* Make each JUMP_INSN (but not a speculative check)
3685 a scheduling barrier for memory references. */
3686 if (!deps->readonly
3687 && !(sel_sched_p ()
3688 && sel_insn_is_speculation_check (insn)))
3689 {
3690 /* Keep the list a reasonable size. */
3691 if (deps->pending_flush_length++ >= param_max_pending_list_length)
3692 flush_pending_lists (deps, insn, for_read: true, for_write: true);
3693 else
3694 deps->pending_jump_insns
3695 = alloc_INSN_LIST (insn, deps->pending_jump_insns);
3696 }
3697
3698 /* For each insn which shouldn't cross a jump, add a dependence. */
3699 add_dependence_list_and_free (deps, insn,
3700 listp: &deps->sched_before_next_jump, uncond: 1,
3701 dep_type: REG_DEP_ANTI, hard: true);
3702
3703 sched_analyze_insn (deps, x: PATTERN (insn), insn);
3704 }
3705 else if (NONJUMP_INSN_P (insn) || DEBUG_INSN_P (insn))
3706 {
3707 sched_analyze_insn (deps, x: PATTERN (insn), insn);
3708 }
3709 else if (CALL_P (insn))
3710 {
3711 int i;
3712
3713 CANT_MOVE (insn) = 1;
3714
3715 if (!reload_completed)
3716 {
3717 /* Scheduling across calls may increase register pressure by extending
3718 live ranges of pseudos over the call. Worse, in presence of setjmp
3719 it may incorrectly move up an assignment over a longjmp. */
3720 reg_pending_barrier = MOVE_BARRIER;
3721 }
3722 else if (find_reg_note (insn, REG_SETJMP, NULL))
3723 {
3724 /* This is setjmp. Assume that all registers, not just
3725 hard registers, may be clobbered by this call. */
3726 reg_pending_barrier = MOVE_BARRIER;
3727 }
3728 else
3729 {
3730 function_abi callee_abi = insn_callee_abi (insn);
3731 for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
3732 /* A call may read and modify global register variables. */
3733 if (global_regs[i])
3734 {
3735 SET_REGNO_REG_SET (reg_pending_sets, i);
3736 SET_HARD_REG_BIT (set&: implicit_reg_pending_uses, bit: i);
3737 }
3738 /* Other call-clobbered hard regs may be clobbered.
3739 Since we only have a choice between 'might be clobbered'
3740 and 'definitely not clobbered', we must include all
3741 partly call-clobbered registers here. */
3742 else if (callee_abi.clobbers_at_least_part_of_reg_p (regno: i))
3743 SET_REGNO_REG_SET (reg_pending_clobbers, i);
3744 /* We don't know what set of fixed registers might be used
3745 by the function, but it is certain that the stack pointer
3746 is among them, but be conservative. */
3747 else if (fixed_regs[i])
3748 SET_HARD_REG_BIT (set&: implicit_reg_pending_uses, bit: i);
3749 /* The frame pointer is normally not used by the function
3750 itself, but by the debugger. */
3751 /* ??? MIPS o32 is an exception. It uses the frame pointer
3752 in the macro expansion of jal but does not represent this
3753 fact in the call_insn rtl. */
3754 else if (i == FRAME_POINTER_REGNUM
3755 || (i == HARD_FRAME_POINTER_REGNUM
3756 && (! reload_completed || frame_pointer_needed)))
3757 SET_HARD_REG_BIT (set&: implicit_reg_pending_uses, bit: i);
3758 }
3759
3760 /* For each insn which shouldn't cross a call, add a dependence
3761 between that insn and this call insn. */
3762 add_dependence_list_and_free (deps, insn,
3763 listp: &deps->sched_before_next_call, uncond: 1,
3764 dep_type: REG_DEP_ANTI, hard: true);
3765
3766 sched_analyze_insn (deps, x: PATTERN (insn), insn);
3767
3768 /* If CALL would be in a sched group, then this will violate
3769 convention that sched group insns have dependencies only on the
3770 previous instruction.
3771
3772 Of course one can say: "Hey! What about head of the sched group?"
3773 And I will answer: "Basic principles (one dep per insn) are always
3774 the same." */
3775 gcc_assert (!SCHED_GROUP_P (insn));
3776
3777 /* In the absence of interprocedural alias analysis, we must flush
3778 all pending reads and writes, and start new dependencies starting
3779 from here. But only flush writes for constant calls (which may
3780 be passed a pointer to something we haven't written yet). */
3781 flush_pending_lists (deps, insn, for_read: true, for_write: ! RTL_CONST_OR_PURE_CALL_P (insn));
3782
3783 if (!deps->readonly)
3784 {
3785 /* Remember the last function call for limiting lifetimes. */
3786 free_INSN_LIST_list (&deps->last_function_call);
3787 deps->last_function_call = alloc_INSN_LIST (insn, NULL_RTX);
3788
3789 if (call_may_noreturn_p (insn))
3790 {
3791 /* Remember the last function call that might not always return
3792 normally for limiting moves of trapping insns. */
3793 free_INSN_LIST_list (&deps->last_function_call_may_noreturn);
3794 deps->last_function_call_may_noreturn
3795 = alloc_INSN_LIST (insn, NULL_RTX);
3796 }
3797
3798 /* Before reload, begin a post-call group, so as to keep the
3799 lifetimes of hard registers correct. */
3800 if (! reload_completed)
3801 deps->in_post_call_group_p = post_call;
3802 }
3803 }
3804
3805 if (sched_deps_info->use_cselib)
3806 cselib_process_insn (insn);
3807
3808 if (sched_deps_info->finish_insn)
3809 sched_deps_info->finish_insn ();
3810
3811 /* Fixup the dependencies in the sched group. */
3812 if ((NONJUMP_INSN_P (insn) || JUMP_P (insn))
3813 && chain_to_prev_insn_p (insn)
3814 && !sel_sched_p ())
3815 chain_to_prev_insn (insn);
3816}
3817
3818/* Initialize DEPS for the new block beginning with HEAD. */
3819void
3820deps_start_bb (class deps_desc *deps, rtx_insn *head)
3821{
3822 gcc_assert (!deps->readonly);
3823
3824 /* Before reload, if the previous block ended in a call, show that
3825 we are inside a post-call group, so as to keep the lifetimes of
3826 hard registers correct. */
3827 if (! reload_completed && !LABEL_P (head))
3828 {
3829 rtx_insn *insn = prev_nonnote_nondebug_insn (head);
3830
3831 if (insn && CALL_P (insn))
3832 deps->in_post_call_group_p = post_call_initial;
3833 }
3834}
3835
3836/* Analyze every insn between HEAD and TAIL inclusive, creating backward
3837 dependencies for each insn. */
3838void
3839sched_analyze (class deps_desc *deps, rtx_insn *head, rtx_insn *tail)
3840{
3841 rtx_insn *insn;
3842
3843 if (sched_deps_info->use_cselib)
3844 cselib_init (CSELIB_RECORD_MEMORY);
3845
3846 deps_start_bb (deps, head);
3847
3848 for (insn = head;; insn = NEXT_INSN (insn))
3849 {
3850 if (INSN_P (insn))
3851 {
3852 /* And initialize deps_lists. */
3853 sd_init_insn (insn);
3854 /* Clean up SCHED_GROUP_P which may be set by last
3855 scheduler pass. */
3856 if (SCHED_GROUP_P (insn))
3857 SCHED_GROUP_P (insn) = 0;
3858 }
3859
3860 deps_analyze_insn (deps, insn);
3861
3862 if (insn == tail)
3863 {
3864 if (sched_deps_info->use_cselib)
3865 cselib_finish ();
3866 return;
3867 }
3868 }
3869}
3870
3871/* Helper for sched_free_deps ().
3872 Delete INSN's (RESOLVED_P) backward dependencies. */
3873static void
3874delete_dep_nodes_in_back_deps (rtx_insn *insn, bool resolved_p)
3875{
3876 sd_iterator_def sd_it;
3877 dep_t dep;
3878 sd_list_types_def types;
3879
3880 if (resolved_p)
3881 types = SD_LIST_RES_BACK;
3882 else
3883 types = SD_LIST_BACK;
3884
3885 for (sd_it = sd_iterator_start (insn, types);
3886 sd_iterator_cond (it_ptr: &sd_it, dep_ptr: &dep);)
3887 {
3888 dep_link_t link = *sd_it.linkp;
3889 dep_node_t node = DEP_LINK_NODE (link);
3890 deps_list_t back_list;
3891 deps_list_t forw_list;
3892
3893 get_back_and_forw_lists (dep, resolved_p, back_list_ptr: &back_list, forw_list_ptr: &forw_list);
3894 remove_from_deps_list (link, list: back_list);
3895 delete_dep_node (n: node);
3896 }
3897}
3898
3899/* Delete (RESOLVED_P) dependencies between HEAD and TAIL together with
3900 deps_lists. */
3901void
3902sched_free_deps (rtx_insn *head, rtx_insn *tail, bool resolved_p)
3903{
3904 rtx_insn *insn;
3905 rtx_insn *next_tail = NEXT_INSN (insn: tail);
3906
3907 /* We make two passes since some insns may be scheduled before their
3908 dependencies are resolved. */
3909 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3910 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3911 {
3912 /* Clear forward deps and leave the dep_nodes to the
3913 corresponding back_deps list. */
3914 if (resolved_p)
3915 clear_deps_list (INSN_RESOLVED_FORW_DEPS (insn));
3916 else
3917 clear_deps_list (INSN_FORW_DEPS (insn));
3918 }
3919 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
3920 if (INSN_P (insn) && INSN_LUID (insn) > 0)
3921 {
3922 /* Clear resolved back deps together with its dep_nodes. */
3923 delete_dep_nodes_in_back_deps (insn, resolved_p);
3924
3925 sd_finish_insn (insn);
3926 }
3927}
3928
3929/* Initialize variables for region data dependence analysis.
3930 When LAZY_REG_LAST is true, do not allocate reg_last array
3931 of class deps_desc immediately. */
3932
3933void
3934init_deps (class deps_desc *deps, bool lazy_reg_last)
3935{
3936 int max_reg = (reload_completed ? FIRST_PSEUDO_REGISTER : max_reg_num ());
3937
3938 deps->max_reg = max_reg;
3939 if (lazy_reg_last)
3940 deps->reg_last = NULL;
3941 else
3942 deps->reg_last = XCNEWVEC (struct deps_reg, max_reg);
3943 INIT_REG_SET (&deps->reg_last_in_use);
3944
3945 deps->pending_read_insns = 0;
3946 deps->pending_read_mems = 0;
3947 deps->pending_write_insns = 0;
3948 deps->pending_write_mems = 0;
3949 deps->pending_jump_insns = 0;
3950 deps->pending_read_list_length = 0;
3951 deps->pending_write_list_length = 0;
3952 deps->pending_flush_length = 0;
3953 deps->last_pending_memory_flush = 0;
3954 deps->last_function_call = 0;
3955 deps->last_function_call_may_noreturn = 0;
3956 deps->sched_before_next_call = 0;
3957 deps->sched_before_next_jump = 0;
3958 deps->in_post_call_group_p = not_post_call;
3959 deps->last_debug_insn = 0;
3960 deps->last_args_size = 0;
3961 deps->last_prologue = 0;
3962 deps->last_epilogue = 0;
3963 deps->last_logue_was_epilogue = false;
3964 deps->last_reg_pending_barrier = NOT_A_BARRIER;
3965 deps->readonly = 0;
3966}
3967
3968/* Init only reg_last field of DEPS, which was not allocated before as
3969 we inited DEPS lazily. */
3970void
3971init_deps_reg_last (class deps_desc *deps)
3972{
3973 gcc_assert (deps && deps->max_reg > 0);
3974 gcc_assert (deps->reg_last == NULL);
3975
3976 deps->reg_last = XCNEWVEC (struct deps_reg, deps->max_reg);
3977}
3978
3979
3980/* Free insn lists found in DEPS. */
3981
3982void
3983free_deps (class deps_desc *deps)
3984{
3985 unsigned i;
3986 reg_set_iterator rsi;
3987
3988 /* We set max_reg to 0 when this context was already freed. */
3989 if (deps->max_reg == 0)
3990 {
3991 gcc_assert (deps->reg_last == NULL);
3992 return;
3993 }
3994 deps->max_reg = 0;
3995
3996 free_INSN_LIST_list (&deps->pending_read_insns);
3997 free_EXPR_LIST_list (&deps->pending_read_mems);
3998 free_INSN_LIST_list (&deps->pending_write_insns);
3999 free_EXPR_LIST_list (&deps->pending_write_mems);
4000 free_INSN_LIST_list (&deps->last_pending_memory_flush);
4001
4002 /* Without the EXECUTE_IF_SET, this loop is executed max_reg * nr_regions
4003 times. For a testcase with 42000 regs and 8000 small basic blocks,
4004 this loop accounted for nearly 60% (84 sec) of the total -O2 runtime. */
4005 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
4006 {
4007 struct deps_reg *reg_last = &deps->reg_last[i];
4008 if (reg_last->uses)
4009 free_INSN_LIST_list (&reg_last->uses);
4010 if (reg_last->sets)
4011 free_INSN_LIST_list (&reg_last->sets);
4012 if (reg_last->implicit_sets)
4013 free_INSN_LIST_list (&reg_last->implicit_sets);
4014 if (reg_last->control_uses)
4015 free_INSN_LIST_list (&reg_last->control_uses);
4016 if (reg_last->clobbers)
4017 free_INSN_LIST_list (&reg_last->clobbers);
4018 }
4019 CLEAR_REG_SET (&deps->reg_last_in_use);
4020
4021 /* As we initialize reg_last lazily, it is possible that we didn't allocate
4022 it at all. */
4023 free (ptr: deps->reg_last);
4024 deps->reg_last = NULL;
4025
4026 deps = NULL;
4027}
4028
4029/* Remove INSN from dependence contexts DEPS. */
4030void
4031remove_from_deps (class deps_desc *deps, rtx_insn *insn)
4032{
4033 int removed;
4034 unsigned i;
4035 reg_set_iterator rsi;
4036
4037 removed = remove_from_both_dependence_lists (insn, listp: &deps->pending_read_insns,
4038 exprp: &deps->pending_read_mems);
4039 if (!DEBUG_INSN_P (insn))
4040 deps->pending_read_list_length -= removed;
4041 removed = remove_from_both_dependence_lists (insn, listp: &deps->pending_write_insns,
4042 exprp: &deps->pending_write_mems);
4043 deps->pending_write_list_length -= removed;
4044
4045 removed = remove_from_dependence_list (insn, listp: &deps->pending_jump_insns);
4046 deps->pending_flush_length -= removed;
4047 removed = remove_from_dependence_list (insn, listp: &deps->last_pending_memory_flush);
4048 deps->pending_flush_length -= removed;
4049
4050 unsigned to_clear = -1U;
4051 EXECUTE_IF_SET_IN_REG_SET (&deps->reg_last_in_use, 0, i, rsi)
4052 {
4053 if (to_clear != -1U)
4054 {
4055 CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, to_clear);
4056 to_clear = -1U;
4057 }
4058 struct deps_reg *reg_last = &deps->reg_last[i];
4059 if (reg_last->uses)
4060 remove_from_dependence_list (insn, listp: &reg_last->uses);
4061 if (reg_last->sets)
4062 remove_from_dependence_list (insn, listp: &reg_last->sets);
4063 if (reg_last->implicit_sets)
4064 remove_from_dependence_list (insn, listp: &reg_last->implicit_sets);
4065 if (reg_last->clobbers)
4066 remove_from_dependence_list (insn, listp: &reg_last->clobbers);
4067 if (!reg_last->uses && !reg_last->sets && !reg_last->implicit_sets
4068 && !reg_last->clobbers)
4069 to_clear = i;
4070 }
4071 if (to_clear != -1U)
4072 CLEAR_REGNO_REG_SET (&deps->reg_last_in_use, to_clear);
4073
4074 if (CALL_P (insn))
4075 {
4076 remove_from_dependence_list (insn, listp: &deps->last_function_call);
4077 remove_from_dependence_list (insn,
4078 listp: &deps->last_function_call_may_noreturn);
4079 }
4080 remove_from_dependence_list (insn, listp: &deps->sched_before_next_call);
4081}
4082
4083/* Init deps data vector. */
4084static void
4085init_deps_data_vector (void)
4086{
4087 int reserve = (sched_max_luid + 1 - h_d_i_d.length ());
4088 if (reserve > 0 && ! h_d_i_d.space (nelems: reserve))
4089 h_d_i_d.safe_grow_cleared (len: 3 * sched_max_luid / 2, exact: true);
4090}
4091
4092/* If it is profitable to use them, initialize or extend (depending on
4093 GLOBAL_P) dependency data. */
4094void
4095sched_deps_init (bool global_p)
4096{
4097 /* Average number of insns in the basic block.
4098 '+ 1' is used to make it nonzero. */
4099 int insns_in_block = sched_max_luid / n_basic_blocks_for_fn (cfun) + 1;
4100
4101 init_deps_data_vector ();
4102
4103 /* We use another caching mechanism for selective scheduling, so
4104 we don't use this one. */
4105 if (!sel_sched_p () && global_p && insns_in_block > 100 * 5)
4106 {
4107 /* ?!? We could save some memory by computing a per-region luid mapping
4108 which could reduce both the number of vectors in the cache and the
4109 size of each vector. Instead we just avoid the cache entirely unless
4110 the average number of instructions in a basic block is very high. See
4111 the comment before the declaration of true_dependency_cache for
4112 what we consider "very high". */
4113 cache_size = 0;
4114 extend_dependency_caches (sched_max_luid, true);
4115 }
4116
4117 if (global_p)
4118 {
4119 dl_pool = new object_allocator<_deps_list> ("deps_list");
4120 /* Allocate lists for one block at a time. */
4121 dn_pool = new object_allocator<_dep_node> ("dep_node");
4122 /* Allocate nodes for one block at a time. */
4123 }
4124}
4125
4126
4127/* Create or extend (depending on CREATE_P) dependency caches to
4128 size N. */
4129void
4130extend_dependency_caches (int n, bool create_p)
4131{
4132 if (create_p || true_dependency_cache)
4133 {
4134 int i, luid = cache_size + n;
4135
4136 true_dependency_cache = XRESIZEVEC (bitmap_head, true_dependency_cache,
4137 luid);
4138 output_dependency_cache = XRESIZEVEC (bitmap_head,
4139 output_dependency_cache, luid);
4140 anti_dependency_cache = XRESIZEVEC (bitmap_head, anti_dependency_cache,
4141 luid);
4142 control_dependency_cache = XRESIZEVEC (bitmap_head, control_dependency_cache,
4143 luid);
4144
4145 if (current_sched_info->flags & DO_SPECULATION)
4146 spec_dependency_cache = XRESIZEVEC (bitmap_head, spec_dependency_cache,
4147 luid);
4148
4149 for (i = cache_size; i < luid; i++)
4150 {
4151 bitmap_initialize (head: &true_dependency_cache[i], obstack: 0);
4152 bitmap_initialize (head: &output_dependency_cache[i], obstack: 0);
4153 bitmap_initialize (head: &anti_dependency_cache[i], obstack: 0);
4154 bitmap_initialize (head: &control_dependency_cache[i], obstack: 0);
4155
4156 if (current_sched_info->flags & DO_SPECULATION)
4157 bitmap_initialize (head: &spec_dependency_cache[i], obstack: 0);
4158 }
4159 cache_size = luid;
4160 }
4161}
4162
4163/* Finalize dependency information for the whole function. */
4164void
4165sched_deps_finish (void)
4166{
4167 gcc_assert (deps_pools_are_empty_p ());
4168 delete dn_pool;
4169 delete dl_pool;
4170 dn_pool = NULL;
4171 dl_pool = NULL;
4172
4173 h_d_i_d.release ();
4174 cache_size = 0;
4175
4176 if (true_dependency_cache)
4177 {
4178 int i;
4179
4180 for (i = 0; i < cache_size; i++)
4181 {
4182 bitmap_clear (&true_dependency_cache[i]);
4183 bitmap_clear (&output_dependency_cache[i]);
4184 bitmap_clear (&anti_dependency_cache[i]);
4185 bitmap_clear (&control_dependency_cache[i]);
4186
4187 if (sched_deps_info->generate_spec_deps)
4188 bitmap_clear (&spec_dependency_cache[i]);
4189 }
4190 free (ptr: true_dependency_cache);
4191 true_dependency_cache = NULL;
4192 free (ptr: output_dependency_cache);
4193 output_dependency_cache = NULL;
4194 free (ptr: anti_dependency_cache);
4195 anti_dependency_cache = NULL;
4196 free (ptr: control_dependency_cache);
4197 control_dependency_cache = NULL;
4198
4199 if (sched_deps_info->generate_spec_deps)
4200 {
4201 free (ptr: spec_dependency_cache);
4202 spec_dependency_cache = NULL;
4203 }
4204
4205 }
4206}
4207
4208/* Initialize some global variables needed by the dependency analysis
4209 code. */
4210
4211void
4212init_deps_global (void)
4213{
4214 CLEAR_HARD_REG_SET (set&: implicit_reg_pending_clobbers);
4215 CLEAR_HARD_REG_SET (set&: implicit_reg_pending_uses);
4216 reg_pending_sets = ALLOC_REG_SET (&reg_obstack);
4217 reg_pending_clobbers = ALLOC_REG_SET (&reg_obstack);
4218 reg_pending_uses = ALLOC_REG_SET (&reg_obstack);
4219 reg_pending_control_uses = ALLOC_REG_SET (&reg_obstack);
4220 reg_pending_barrier = NOT_A_BARRIER;
4221
4222 if (!sel_sched_p () || sched_emulate_haifa_p)
4223 {
4224 sched_deps_info->start_insn = haifa_start_insn;
4225 sched_deps_info->finish_insn = haifa_finish_insn;
4226
4227 sched_deps_info->note_reg_set = haifa_note_reg_set;
4228 sched_deps_info->note_reg_clobber = haifa_note_reg_clobber;
4229 sched_deps_info->note_reg_use = haifa_note_reg_use;
4230
4231 sched_deps_info->note_mem_dep = haifa_note_mem_dep;
4232 sched_deps_info->note_dep = haifa_note_dep;
4233 }
4234}
4235
4236/* Free everything used by the dependency analysis code. */
4237
4238void
4239finish_deps_global (void)
4240{
4241 FREE_REG_SET (reg_pending_sets);
4242 FREE_REG_SET (reg_pending_clobbers);
4243 FREE_REG_SET (reg_pending_uses);
4244 FREE_REG_SET (reg_pending_control_uses);
4245}
4246
4247/* Estimate the weakness of dependence between MEM1 and MEM2. */
4248dw_t
4249estimate_dep_weak (rtx mem1, rtx mem2)
4250{
4251 if (mem1 == mem2)
4252 /* MEMs are the same - don't speculate. */
4253 return MIN_DEP_WEAK;
4254
4255 rtx r1 = XEXP (mem1, 0);
4256 rtx r2 = XEXP (mem2, 0);
4257
4258 if (sched_deps_info->use_cselib)
4259 {
4260 /* We cannot call rtx_equal_for_cselib_p because the VALUEs might be
4261 dangling at this point, since we never preserve them. Instead we
4262 canonicalize manually to get stable VALUEs out of hashing. */
4263 if (GET_CODE (r1) == VALUE && CSELIB_VAL_PTR (r1))
4264 r1 = canonical_cselib_val (CSELIB_VAL_PTR (r1))->val_rtx;
4265 if (GET_CODE (r2) == VALUE && CSELIB_VAL_PTR (r2))
4266 r2 = canonical_cselib_val (CSELIB_VAL_PTR (r2))->val_rtx;
4267 }
4268
4269 if (r1 == r2
4270 || (REG_P (r1) && REG_P (r2) && REGNO (r1) == REGNO (r2)))
4271 /* Again, MEMs are the same. */
4272 return MIN_DEP_WEAK;
4273 else if ((REG_P (r1) && !REG_P (r2)) || (!REG_P (r1) && REG_P (r2)))
4274 /* Different addressing modes - reason to be more speculative,
4275 than usual. */
4276 return NO_DEP_WEAK - (NO_DEP_WEAK - UNCERTAIN_DEP_WEAK) / 2;
4277 else
4278 /* We can't say anything about the dependence. */
4279 return UNCERTAIN_DEP_WEAK;
4280}
4281
4282/* Add or update backward dependence between INSN and ELEM with type DEP_TYPE.
4283 This function can handle same INSN and ELEM (INSN == ELEM).
4284 It is a convenience wrapper. */
4285static void
4286add_dependence_1 (rtx_insn *insn, rtx_insn *elem, enum reg_note dep_type)
4287{
4288 ds_t ds;
4289 bool internal;
4290
4291 if (dep_type == REG_DEP_TRUE)
4292 ds = DEP_TRUE;
4293 else if (dep_type == REG_DEP_OUTPUT)
4294 ds = DEP_OUTPUT;
4295 else if (dep_type == REG_DEP_CONTROL)
4296 ds = DEP_CONTROL;
4297 else
4298 {
4299 gcc_assert (dep_type == REG_DEP_ANTI);
4300 ds = DEP_ANTI;
4301 }
4302
4303 /* When add_dependence is called from inside sched-deps.cc, we expect
4304 cur_insn to be non-null. */
4305 internal = cur_insn != NULL;
4306 if (internal)
4307 gcc_assert (insn == cur_insn);
4308 else
4309 cur_insn = insn;
4310
4311 note_dep (e: elem, ds);
4312 if (!internal)
4313 cur_insn = NULL;
4314}
4315
4316/* Return weakness of speculative type TYPE in the dep_status DS,
4317 without checking to prevent ICEs on malformed input. */
4318static dw_t
4319get_dep_weak_1 (ds_t ds, ds_t type)
4320{
4321 ds = ds & type;
4322
4323 switch (type)
4324 {
4325 case BEGIN_DATA: ds >>= BEGIN_DATA_BITS_OFFSET; break;
4326 case BE_IN_DATA: ds >>= BE_IN_DATA_BITS_OFFSET; break;
4327 case BEGIN_CONTROL: ds >>= BEGIN_CONTROL_BITS_OFFSET; break;
4328 case BE_IN_CONTROL: ds >>= BE_IN_CONTROL_BITS_OFFSET; break;
4329 default: gcc_unreachable ();
4330 }
4331
4332 return (dw_t) ds;
4333}
4334
4335/* Return weakness of speculative type TYPE in the dep_status DS. */
4336dw_t
4337get_dep_weak (ds_t ds, ds_t type)
4338{
4339 dw_t dw = get_dep_weak_1 (ds, type);
4340
4341 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4342 return dw;
4343}
4344
4345/* Return the dep_status, which has the same parameters as DS, except for
4346 speculative type TYPE, that will have weakness DW. */
4347ds_t
4348set_dep_weak (ds_t ds, ds_t type, dw_t dw)
4349{
4350 gcc_assert (MIN_DEP_WEAK <= dw && dw <= MAX_DEP_WEAK);
4351
4352 ds &= ~type;
4353 switch (type)
4354 {
4355 case BEGIN_DATA: ds |= ((ds_t) dw) << BEGIN_DATA_BITS_OFFSET; break;
4356 case BE_IN_DATA: ds |= ((ds_t) dw) << BE_IN_DATA_BITS_OFFSET; break;
4357 case BEGIN_CONTROL: ds |= ((ds_t) dw) << BEGIN_CONTROL_BITS_OFFSET; break;
4358 case BE_IN_CONTROL: ds |= ((ds_t) dw) << BE_IN_CONTROL_BITS_OFFSET; break;
4359 default: gcc_unreachable ();
4360 }
4361 return ds;
4362}
4363
4364/* Return the join of two dep_statuses DS1 and DS2.
4365 If MAX_P is true then choose the greater probability,
4366 otherwise multiply probabilities.
4367 This function assumes that both DS1 and DS2 contain speculative bits. */
4368static ds_t
4369ds_merge_1 (ds_t ds1, ds_t ds2, bool max_p)
4370{
4371 ds_t ds, t;
4372
4373 gcc_assert ((ds1 & SPECULATIVE) && (ds2 & SPECULATIVE));
4374
4375 ds = (ds1 & DEP_TYPES) | (ds2 & DEP_TYPES);
4376
4377 t = FIRST_SPEC_TYPE;
4378 do
4379 {
4380 if ((ds1 & t) && !(ds2 & t))
4381 ds |= ds1 & t;
4382 else if (!(ds1 & t) && (ds2 & t))
4383 ds |= ds2 & t;
4384 else if ((ds1 & t) && (ds2 & t))
4385 {
4386 dw_t dw1 = get_dep_weak (ds: ds1, type: t);
4387 dw_t dw2 = get_dep_weak (ds: ds2, type: t);
4388 ds_t dw;
4389
4390 if (!max_p)
4391 {
4392 dw = ((ds_t) dw1) * ((ds_t) dw2);
4393 dw /= MAX_DEP_WEAK;
4394 if (dw < MIN_DEP_WEAK)
4395 dw = MIN_DEP_WEAK;
4396 }
4397 else
4398 {
4399 if (dw1 >= dw2)
4400 dw = dw1;
4401 else
4402 dw = dw2;
4403 }
4404
4405 ds = set_dep_weak (ds, type: t, dw: (dw_t) dw);
4406 }
4407
4408 if (t == LAST_SPEC_TYPE)
4409 break;
4410 t <<= SPEC_TYPE_SHIFT;
4411 }
4412 while (1);
4413
4414 return ds;
4415}
4416
4417/* Return the join of two dep_statuses DS1 and DS2.
4418 This function assumes that both DS1 and DS2 contain speculative bits. */
4419ds_t
4420ds_merge (ds_t ds1, ds_t ds2)
4421{
4422 return ds_merge_1 (ds1, ds2, max_p: false);
4423}
4424
4425/* Return the join of two dep_statuses DS1 and DS2. */
4426ds_t
4427ds_full_merge (ds_t ds, ds_t ds2, rtx mem1, rtx mem2)
4428{
4429 ds_t new_status = ds | ds2;
4430
4431 if (new_status & SPECULATIVE)
4432 {
4433 if ((ds && !(ds & SPECULATIVE))
4434 || (ds2 && !(ds2 & SPECULATIVE)))
4435 /* Then this dep can't be speculative. */
4436 new_status &= ~SPECULATIVE;
4437 else
4438 {
4439 /* Both are speculative. Merging probabilities. */
4440 if (mem1)
4441 {
4442 dw_t dw;
4443
4444 dw = estimate_dep_weak (mem1, mem2);
4445 ds = set_dep_weak (ds, BEGIN_DATA, dw);
4446 }
4447
4448 if (!ds)
4449 new_status = ds2;
4450 else if (!ds2)
4451 new_status = ds;
4452 else
4453 new_status = ds_merge (ds1: ds2, ds2: ds);
4454 }
4455 }
4456
4457 return new_status;
4458}
4459
4460/* Return the join of DS1 and DS2. Use maximum instead of multiplying
4461 probabilities. */
4462ds_t
4463ds_max_merge (ds_t ds1, ds_t ds2)
4464{
4465 if (ds1 == 0 && ds2 == 0)
4466 return 0;
4467
4468 if (ds1 == 0 && ds2 != 0)
4469 return ds2;
4470
4471 if (ds1 != 0 && ds2 == 0)
4472 return ds1;
4473
4474 return ds_merge_1 (ds1, ds2, max_p: true);
4475}
4476
4477/* Return the probability of speculation success for the speculation
4478 status DS. */
4479dw_t
4480ds_weak (ds_t ds)
4481{
4482 ds_t res = 1, dt;
4483 int n = 0;
4484
4485 dt = FIRST_SPEC_TYPE;
4486 do
4487 {
4488 if (ds & dt)
4489 {
4490 res *= (ds_t) get_dep_weak (ds, type: dt);
4491 n++;
4492 }
4493
4494 if (dt == LAST_SPEC_TYPE)
4495 break;
4496 dt <<= SPEC_TYPE_SHIFT;
4497 }
4498 while (1);
4499
4500 gcc_assert (n);
4501 while (--n)
4502 res /= MAX_DEP_WEAK;
4503
4504 if (res < MIN_DEP_WEAK)
4505 res = MIN_DEP_WEAK;
4506
4507 gcc_assert (res <= MAX_DEP_WEAK);
4508
4509 return (dw_t) res;
4510}
4511
4512/* Return a dep status that contains all speculation types of DS. */
4513ds_t
4514ds_get_speculation_types (ds_t ds)
4515{
4516 if (ds & BEGIN_DATA)
4517 ds |= BEGIN_DATA;
4518 if (ds & BE_IN_DATA)
4519 ds |= BE_IN_DATA;
4520 if (ds & BEGIN_CONTROL)
4521 ds |= BEGIN_CONTROL;
4522 if (ds & BE_IN_CONTROL)
4523 ds |= BE_IN_CONTROL;
4524
4525 return ds & SPECULATIVE;
4526}
4527
4528/* Return a dep status that contains maximal weakness for each speculation
4529 type present in DS. */
4530ds_t
4531ds_get_max_dep_weak (ds_t ds)
4532{
4533 if (ds & BEGIN_DATA)
4534 ds = set_dep_weak (ds, BEGIN_DATA, MAX_DEP_WEAK);
4535 if (ds & BE_IN_DATA)
4536 ds = set_dep_weak (ds, BE_IN_DATA, MAX_DEP_WEAK);
4537 if (ds & BEGIN_CONTROL)
4538 ds = set_dep_weak (ds, BEGIN_CONTROL, MAX_DEP_WEAK);
4539 if (ds & BE_IN_CONTROL)
4540 ds = set_dep_weak (ds, BE_IN_CONTROL, MAX_DEP_WEAK);
4541
4542 return ds;
4543}
4544
4545/* Dump information about the dependence status S. */
4546static void
4547dump_ds (FILE *f, ds_t s)
4548{
4549 fprintf (stream: f, format: "{");
4550
4551 if (s & BEGIN_DATA)
4552 fprintf (stream: f, format: "BEGIN_DATA: %d; ", get_dep_weak_1 (ds: s, BEGIN_DATA));
4553 if (s & BE_IN_DATA)
4554 fprintf (stream: f, format: "BE_IN_DATA: %d; ", get_dep_weak_1 (ds: s, BE_IN_DATA));
4555 if (s & BEGIN_CONTROL)
4556 fprintf (stream: f, format: "BEGIN_CONTROL: %d; ", get_dep_weak_1 (ds: s, BEGIN_CONTROL));
4557 if (s & BE_IN_CONTROL)
4558 fprintf (stream: f, format: "BE_IN_CONTROL: %d; ", get_dep_weak_1 (ds: s, BE_IN_CONTROL));
4559
4560 if (s & HARD_DEP)
4561 fprintf (stream: f, format: "HARD_DEP; ");
4562
4563 if (s & DEP_TRUE)
4564 fprintf (stream: f, format: "DEP_TRUE; ");
4565 if (s & DEP_OUTPUT)
4566 fprintf (stream: f, format: "DEP_OUTPUT; ");
4567 if (s & DEP_ANTI)
4568 fprintf (stream: f, format: "DEP_ANTI; ");
4569 if (s & DEP_CONTROL)
4570 fprintf (stream: f, format: "DEP_CONTROL; ");
4571
4572 fprintf (stream: f, format: "}");
4573}
4574
4575DEBUG_FUNCTION void
4576debug_ds (ds_t s)
4577{
4578 dump_ds (stderr, s);
4579 fprintf (stderr, format: "\n");
4580}
4581
4582/* Verify that dependence type and status are consistent.
4583 If RELAXED_P is true, then skip dep_weakness checks. */
4584static void
4585check_dep (dep_t dep, bool relaxed_p)
4586{
4587 enum reg_note dt = DEP_TYPE (dep);
4588 ds_t ds = DEP_STATUS (dep);
4589
4590 gcc_assert (DEP_PRO (dep) != DEP_CON (dep));
4591
4592 if (!(current_sched_info->flags & USE_DEPS_LIST))
4593 {
4594 gcc_assert (ds == 0);
4595 return;
4596 }
4597
4598 /* Check that dependence type contains the same bits as the status. */
4599 if (dt == REG_DEP_TRUE)
4600 gcc_assert (ds & DEP_TRUE);
4601 else if (dt == REG_DEP_OUTPUT)
4602 gcc_assert ((ds & DEP_OUTPUT)
4603 && !(ds & DEP_TRUE));
4604 else if (dt == REG_DEP_ANTI)
4605 gcc_assert ((ds & DEP_ANTI)
4606 && !(ds & (DEP_OUTPUT | DEP_TRUE)));
4607 else
4608 gcc_assert (dt == REG_DEP_CONTROL
4609 && (ds & DEP_CONTROL)
4610 && !(ds & (DEP_OUTPUT | DEP_ANTI | DEP_TRUE)));
4611
4612 /* HARD_DEP cannot appear in dep_status of a link. */
4613 gcc_assert (!(ds & HARD_DEP));
4614
4615 /* Check that dependence status is set correctly when speculation is not
4616 supported. */
4617 if (!sched_deps_info->generate_spec_deps)
4618 gcc_assert (!(ds & SPECULATIVE));
4619 else if (ds & SPECULATIVE)
4620 {
4621 if (!relaxed_p)
4622 {
4623 ds_t type = FIRST_SPEC_TYPE;
4624
4625 /* Check that dependence weakness is in proper range. */
4626 do
4627 {
4628 if (ds & type)
4629 get_dep_weak (ds, type);
4630
4631 if (type == LAST_SPEC_TYPE)
4632 break;
4633 type <<= SPEC_TYPE_SHIFT;
4634 }
4635 while (1);
4636 }
4637
4638 if (ds & BEGIN_SPEC)
4639 {
4640 /* Only true dependence can be data speculative. */
4641 if (ds & BEGIN_DATA)
4642 gcc_assert (ds & DEP_TRUE);
4643
4644 /* Control dependencies in the insn scheduler are represented by
4645 anti-dependencies, therefore only anti dependence can be
4646 control speculative. */
4647 if (ds & BEGIN_CONTROL)
4648 gcc_assert (ds & DEP_ANTI);
4649 }
4650 else
4651 {
4652 /* Subsequent speculations should resolve true dependencies. */
4653 gcc_assert ((ds & DEP_TYPES) == DEP_TRUE);
4654 }
4655
4656 /* Check that true and anti dependencies can't have other speculative
4657 statuses. */
4658 if (ds & DEP_TRUE)
4659 gcc_assert (ds & (BEGIN_DATA | BE_IN_SPEC));
4660 /* An output dependence can't be speculative at all. */
4661 gcc_assert (!(ds & DEP_OUTPUT));
4662 if (ds & DEP_ANTI)
4663 gcc_assert (ds & BEGIN_CONTROL);
4664 }
4665}
4666
4667/* The following code discovers opportunities to switch a memory reference
4668 and an increment by modifying the address. We ensure that this is done
4669 only for dependencies that are only used to show a single register
4670 dependence (using DEP_NONREG and DEP_MULTIPLE), and so that every memory
4671 instruction involved is subject to only one dep that can cause a pattern
4672 change.
4673
4674 When we discover a suitable dependency, we fill in the dep_replacement
4675 structure to show how to modify the memory reference. */
4676
4677/* Holds information about a pair of memory reference and register increment
4678 insns which depend on each other, but could possibly be interchanged. */
4679struct mem_inc_info
4680{
4681 rtx_insn *inc_insn;
4682 rtx_insn *mem_insn;
4683
4684 rtx *mem_loc;
4685 /* A register occurring in the memory address for which we wish to break
4686 the dependence. This must be identical to the destination register of
4687 the increment. */
4688 rtx mem_reg0;
4689 /* Any kind of index that is added to that register. */
4690 rtx mem_index;
4691 /* The constant offset used in the memory address. */
4692 HOST_WIDE_INT mem_constant;
4693 /* The constant added in the increment insn. Negated if the increment is
4694 after the memory address. */
4695 HOST_WIDE_INT inc_constant;
4696 /* The source register used in the increment. May be different from mem_reg0
4697 if the increment occurs before the memory address. */
4698 rtx inc_input;
4699};
4700
4701/* Verify that the memory location described in MII can be replaced with
4702 one using NEW_ADDR. Return the new memory reference or NULL_RTX. The
4703 insn remains unchanged by this function. */
4704
4705static rtx
4706attempt_change (struct mem_inc_info *mii, rtx new_addr)
4707{
4708 rtx mem = *mii->mem_loc;
4709 rtx new_mem;
4710
4711 if (!targetm.new_address_profitable_p (mem, mii->mem_insn, new_addr))
4712 return NULL_RTX;
4713
4714 /* Jump through a lot of hoops to keep the attributes up to date. We
4715 do not want to call one of the change address variants that take
4716 an offset even though we know the offset in many cases. These
4717 assume you are changing where the address is pointing by the
4718 offset. */
4719 new_mem = replace_equiv_address_nv (mem, new_addr);
4720 if (! validate_change (mii->mem_insn, mii->mem_loc, new_mem, 0))
4721 {
4722 if (sched_verbose >= 5)
4723 fprintf (stream: sched_dump, format: "validation failure\n");
4724 return NULL_RTX;
4725 }
4726
4727 /* Put back the old one. */
4728 validate_change (mii->mem_insn, mii->mem_loc, mem, 0);
4729
4730 return new_mem;
4731}
4732
4733/* Return true if INSN is of a form "a = b op c" where a and b are
4734 regs. op is + if c is a reg and +|- if c is a const. Fill in
4735 informantion in MII about what is found.
4736 BEFORE_MEM indicates whether the increment is found before or after
4737 a corresponding memory reference. */
4738
4739static bool
4740parse_add_or_inc (struct mem_inc_info *mii, rtx_insn *insn, bool before_mem)
4741{
4742 rtx pat = single_set (insn);
4743 rtx src, cst;
4744 bool regs_equal;
4745
4746 if (RTX_FRAME_RELATED_P (insn) || !pat)
4747 return false;
4748
4749 /* Do not allow breaking data dependencies for insns that are marked
4750 with REG_STACK_CHECK. */
4751 if (find_reg_note (insn, REG_STACK_CHECK, NULL))
4752 return false;
4753
4754 /* Result must be single reg. */
4755 if (!REG_P (SET_DEST (pat)))
4756 return false;
4757
4758 if (GET_CODE (SET_SRC (pat)) != PLUS)
4759 return false;
4760
4761 mii->inc_insn = insn;
4762 src = SET_SRC (pat);
4763 mii->inc_input = XEXP (src, 0);
4764
4765 if (!REG_P (XEXP (src, 0)))
4766 return false;
4767
4768 if (!rtx_equal_p (SET_DEST (pat), mii->mem_reg0))
4769 return false;
4770
4771 cst = XEXP (src, 1);
4772 if (!CONST_INT_P (cst))
4773 return false;
4774 mii->inc_constant = INTVAL (cst);
4775
4776 regs_equal = rtx_equal_p (mii->inc_input, mii->mem_reg0);
4777
4778 if (!before_mem)
4779 {
4780 mii->inc_constant = -mii->inc_constant;
4781 if (!regs_equal)
4782 return false;
4783 }
4784
4785 if (regs_equal && REGNO (SET_DEST (pat)) == STACK_POINTER_REGNUM)
4786 {
4787 /* Note that the sign has already been reversed for !before_mem. */
4788 if (STACK_GROWS_DOWNWARD)
4789 return mii->inc_constant > 0;
4790 else
4791 return mii->inc_constant < 0;
4792 }
4793 return true;
4794}
4795
4796/* Once a suitable mem reference has been found and the corresponding data
4797 in MII has been filled in, this function is called to find a suitable
4798 add or inc insn involving the register we found in the memory
4799 reference.
4800 If successful, this function will create additional dependencies between
4801 - mii->inc_insn's producers and mii->mem_insn as a consumer (if backwards)
4802 - mii->inc_insn's consumers and mii->mem_insn as a producer (if !backwards).
4803*/
4804
4805static bool
4806find_inc (struct mem_inc_info *mii, bool backwards)
4807{
4808 sd_iterator_def sd_it;
4809 dep_t dep;
4810 sd_list_types_def mem_deps = backwards ? SD_LIST_HARD_BACK : SD_LIST_FORW;
4811 int n_mem_deps = dep_list_size (mii->mem_insn, mem_deps);
4812
4813 sd_it = sd_iterator_start (insn: mii->mem_insn, types: mem_deps);
4814 while (sd_iterator_cond (it_ptr: &sd_it, dep_ptr: &dep))
4815 {
4816 dep_node_t node = DEP_LINK_NODE (*sd_it.linkp);
4817 rtx_insn *pro = DEP_PRO (dep);
4818 rtx_insn *con = DEP_CON (dep);
4819 rtx_insn *inc_cand;
4820 int n_inc_deps;
4821
4822 if (DEP_NONREG (dep) || DEP_MULTIPLE (dep))
4823 goto next;
4824
4825 if (backwards)
4826 {
4827 inc_cand = pro;
4828 n_inc_deps = dep_list_size (inc_cand, SD_LIST_BACK);
4829 }
4830 else
4831 {
4832 inc_cand = con;
4833 n_inc_deps = dep_list_size (inc_cand, SD_LIST_FORW);
4834 }
4835
4836 /* In the FOR_EACH_DEP loop below we will create additional n_inc_deps
4837 for mem_insn. This by itself is not a problem, since each mem_insn
4838 will have only a few inc_insns associated with it. However, if
4839 we consider that a single inc_insn may have a lot of mem_insns, AND,
4840 on top of that, a few other inc_insns associated with it --
4841 those _other inc_insns_ will get (n_mem_deps * number of MEM insns)
4842 dependencies created for them. This may cause an exponential
4843 growth of memory usage and scheduling time.
4844 See PR96388 for details.
4845 We [heuristically] use n_inc_deps as a proxy for the number of MEM
4846 insns, and drop opportunities for breaking modifiable_mem dependencies
4847 when dependency lists grow beyond reasonable size. */
4848 if (n_mem_deps * n_inc_deps
4849 >= param_max_pending_list_length * param_max_pending_list_length)
4850 goto next;
4851
4852 if (parse_add_or_inc (mii, insn: inc_cand, before_mem: backwards))
4853 {
4854 struct dep_replacement *desc;
4855 df_ref def;
4856 rtx newaddr, newmem;
4857
4858 if (sched_verbose >= 5)
4859 fprintf (stream: sched_dump, format: "candidate mem/inc pair: %d %d\n",
4860 INSN_UID (insn: mii->mem_insn), INSN_UID (insn: inc_cand));
4861
4862 /* Need to assure that none of the operands of the inc
4863 instruction are assigned to by the mem insn. */
4864 FOR_EACH_INSN_DEF (def, mii->mem_insn)
4865 if (reg_overlap_mentioned_p (DF_REF_REG (def), mii->inc_input)
4866 || reg_overlap_mentioned_p (DF_REF_REG (def), mii->mem_reg0))
4867 {
4868 if (sched_verbose >= 5)
4869 fprintf (stream: sched_dump,
4870 format: "inc conflicts with store failure.\n");
4871 goto next;
4872 }
4873
4874 newaddr = mii->inc_input;
4875 if (mii->mem_index != NULL_RTX)
4876 newaddr = gen_rtx_PLUS (GET_MODE (newaddr), newaddr,
4877 mii->mem_index);
4878 newaddr = plus_constant (GET_MODE (newaddr), newaddr,
4879 mii->mem_constant + mii->inc_constant);
4880 newmem = attempt_change (mii, new_addr: newaddr);
4881 if (newmem == NULL_RTX)
4882 goto next;
4883 if (sched_verbose >= 5)
4884 fprintf (stream: sched_dump, format: "successful address replacement\n");
4885 desc = XCNEW (struct dep_replacement);
4886 DEP_REPLACE (dep) = desc;
4887 desc->loc = mii->mem_loc;
4888 desc->newval = newmem;
4889 desc->orig = *desc->loc;
4890 desc->insn = mii->mem_insn;
4891 move_dep_link (DEP_NODE_BACK (node), INSN_HARD_BACK_DEPS (con),
4892 INSN_SPEC_BACK_DEPS (con));
4893
4894 /* Make sure that n_inc_deps above is consistent with dependencies
4895 we create. */
4896 gcc_assert (mii->inc_insn == inc_cand);
4897
4898 if (backwards)
4899 {
4900 FOR_EACH_DEP (mii->inc_insn, SD_LIST_BACK, sd_it, dep)
4901 add_dependence_1 (insn: mii->mem_insn, DEP_PRO (dep),
4902 dep_type: REG_DEP_TRUE);
4903 }
4904 else
4905 {
4906 FOR_EACH_DEP (mii->inc_insn, SD_LIST_FORW, sd_it, dep)
4907 add_dependence_1 (DEP_CON (dep), elem: mii->mem_insn,
4908 dep_type: REG_DEP_ANTI);
4909 }
4910 return true;
4911 }
4912 next:
4913 sd_iterator_next (it_ptr: &sd_it);
4914 }
4915 return false;
4916}
4917
4918/* A recursive function that walks ADDRESS_OF_X to find memory references
4919 which could be modified during scheduling. We call find_inc for each
4920 one we find that has a recognizable form. MII holds information about
4921 the pair of memory/increment instructions.
4922 We ensure that every instruction with a memory reference (which will be
4923 the location of the replacement) is assigned at most one breakable
4924 dependency. */
4925
4926static bool
4927find_mem (struct mem_inc_info *mii, rtx *address_of_x)
4928{
4929 rtx x = *address_of_x;
4930 enum rtx_code code = GET_CODE (x);
4931 const char *const fmt = GET_RTX_FORMAT (code);
4932 int i;
4933
4934 if (code == MEM)
4935 {
4936 rtx reg0 = XEXP (x, 0);
4937
4938 mii->mem_loc = address_of_x;
4939 mii->mem_index = NULL_RTX;
4940 mii->mem_constant = 0;
4941 if (GET_CODE (reg0) == PLUS && CONST_INT_P (XEXP (reg0, 1)))
4942 {
4943 mii->mem_constant = INTVAL (XEXP (reg0, 1));
4944 reg0 = XEXP (reg0, 0);
4945 }
4946 if (GET_CODE (reg0) == PLUS)
4947 {
4948 mii->mem_index = XEXP (reg0, 1);
4949 reg0 = XEXP (reg0, 0);
4950 }
4951 if (REG_P (reg0))
4952 {
4953 df_ref use;
4954 int occurrences = 0;
4955
4956 /* Make sure this reg appears only once in this insn. Can't use
4957 count_occurrences since that only works for pseudos. */
4958 FOR_EACH_INSN_USE (use, mii->mem_insn)
4959 if (reg_overlap_mentioned_p (reg0, DF_REF_REG (use)))
4960 if (++occurrences > 1)
4961 {
4962 if (sched_verbose >= 5)
4963 fprintf (stream: sched_dump, format: "mem count failure\n");
4964 return false;
4965 }
4966
4967 mii->mem_reg0 = reg0;
4968 return find_inc (mii, backwards: true) || find_inc (mii, backwards: false);
4969 }
4970 return false;
4971 }
4972
4973 if (code == SIGN_EXTRACT || code == ZERO_EXTRACT)
4974 {
4975 /* If REG occurs inside a MEM used in a bit-field reference,
4976 that is unacceptable. */
4977 return false;
4978 }
4979
4980 /* Time for some deep diving. */
4981 for (i = GET_RTX_LENGTH (code) - 1; i >= 0; i--)
4982 {
4983 if (fmt[i] == 'e')
4984 {
4985 if (find_mem (mii, address_of_x: &XEXP (x, i)))
4986 return true;
4987 }
4988 else if (fmt[i] == 'E')
4989 {
4990 int j;
4991 for (j = XVECLEN (x, i) - 1; j >= 0; j--)
4992 if (find_mem (mii, address_of_x: &XVECEXP (x, i, j)))
4993 return true;
4994 }
4995 }
4996 return false;
4997}
4998
4999
5000/* Examine the instructions between HEAD and TAIL and try to find
5001 dependencies that can be broken by modifying one of the patterns. */
5002
5003void
5004find_modifiable_mems (rtx_insn *head, rtx_insn *tail)
5005{
5006 rtx_insn *insn, *next_tail = NEXT_INSN (insn: tail);
5007 int success_in_block = 0;
5008
5009 for (insn = head; insn != next_tail; insn = NEXT_INSN (insn))
5010 {
5011 struct mem_inc_info mii;
5012
5013 if (!NONDEBUG_INSN_P (insn) || RTX_FRAME_RELATED_P (insn))
5014 continue;
5015
5016 mii.mem_insn = insn;
5017 if (find_mem (mii: &mii, address_of_x: &PATTERN (insn)))
5018 success_in_block++;
5019 }
5020 if (success_in_block && sched_verbose >= 5)
5021 fprintf (stream: sched_dump, format: "%d candidates for address modification found.\n",
5022 success_in_block);
5023}
5024
5025#endif /* INSN_SCHEDULING */
5026

source code of gcc/sched-deps.cc