1 | /* Instruction scheduling pass. Selective scheduler and pipeliner. |
2 | Copyright (C) 2006-2017 Free Software Foundation, Inc. |
3 | |
4 | This file is part of GCC. |
5 | |
6 | GCC is free software; you can redistribute it and/or modify it under |
7 | the terms of the GNU General Public License as published by the Free |
8 | Software Foundation; either version 3, or (at your option) any later |
9 | version. |
10 | |
11 | GCC is distributed in the hope that it will be useful, but WITHOUT ANY |
12 | WARRANTY; without even the implied warranty of MERCHANTABILITY or |
13 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License |
14 | for more details. |
15 | |
16 | You should have received a copy of the GNU General Public License |
17 | along with GCC; see the file COPYING3. If not see |
18 | <http://www.gnu.org/licenses/>. */ |
19 | |
20 | #include "config.h" |
21 | #include "system.h" |
22 | #include "coretypes.h" |
23 | #include "backend.h" |
24 | #include "cfghooks.h" |
25 | #include "tree.h" |
26 | #include "rtl.h" |
27 | #include "df.h" |
28 | #include "memmodel.h" |
29 | #include "tm_p.h" |
30 | #include "cfgrtl.h" |
31 | #include "cfganal.h" |
32 | #include "cfgbuild.h" |
33 | #include "insn-config.h" |
34 | #include "insn-attr.h" |
35 | #include "recog.h" |
36 | #include "params.h" |
37 | #include "target.h" |
38 | #include "sched-int.h" |
39 | #include "emit-rtl.h" /* FIXME: Can go away once crtl is moved to rtl.h. */ |
40 | |
41 | #ifdef INSN_SCHEDULING |
42 | #include "regset.h" |
43 | #include "cfgloop.h" |
44 | #include "sel-sched-ir.h" |
45 | /* We don't have to use it except for sel_print_insn. */ |
46 | #include "sel-sched-dump.h" |
47 | |
48 | /* A vector holding bb info for whole scheduling pass. */ |
49 | vec<sel_global_bb_info_def> sel_global_bb_info; |
50 | |
51 | /* A vector holding bb info. */ |
52 | vec<sel_region_bb_info_def> sel_region_bb_info; |
53 | |
54 | /* A pool for allocating all lists. */ |
55 | object_allocator<_list_node> sched_lists_pool ("sel-sched-lists" ); |
56 | |
57 | /* This contains information about successors for compute_av_set. */ |
58 | struct succs_info current_succs; |
59 | |
60 | /* Data structure to describe interaction with the generic scheduler utils. */ |
61 | static struct common_sched_info_def sel_common_sched_info; |
62 | |
63 | /* The loop nest being pipelined. */ |
64 | struct loop *current_loop_nest; |
65 | |
66 | /* LOOP_NESTS is a vector containing the corresponding loop nest for |
67 | each region. */ |
68 | static vec<loop_p> loop_nests; |
69 | |
70 | /* Saves blocks already in loop regions, indexed by bb->index. */ |
71 | static sbitmap bbs_in_loop_rgns = NULL; |
72 | |
73 | /* CFG hooks that are saved before changing create_basic_block hook. */ |
74 | static struct cfg_hooks orig_cfg_hooks; |
75 | |
76 | |
77 | /* Array containing reverse topological index of function basic blocks, |
78 | indexed by BB->INDEX. */ |
79 | static int *rev_top_order_index = NULL; |
80 | |
81 | /* Length of the above array. */ |
82 | static int rev_top_order_index_len = -1; |
83 | |
84 | /* A regset pool structure. */ |
85 | static struct |
86 | { |
87 | /* The stack to which regsets are returned. */ |
88 | regset *v; |
89 | |
90 | /* Its pointer. */ |
91 | int n; |
92 | |
93 | /* Its size. */ |
94 | int s; |
95 | |
96 | /* In VV we save all generated regsets so that, when destructing the |
97 | pool, we can compare it with V and check that every regset was returned |
98 | back to pool. */ |
99 | regset *vv; |
100 | |
101 | /* The pointer of VV stack. */ |
102 | int nn; |
103 | |
104 | /* Its size. */ |
105 | int ss; |
106 | |
107 | /* The difference between allocated and returned regsets. */ |
108 | int diff; |
109 | } regset_pool = { NULL, 0, 0, NULL, 0, 0, 0 }; |
110 | |
111 | /* This represents the nop pool. */ |
112 | static struct |
113 | { |
114 | /* The vector which holds previously emitted nops. */ |
115 | insn_t *v; |
116 | |
117 | /* Its pointer. */ |
118 | int n; |
119 | |
120 | /* Its size. */ |
121 | int s; |
122 | } nop_pool = { NULL, 0, 0 }; |
123 | |
124 | /* The pool for basic block notes. */ |
125 | static vec<rtx_note *> bb_note_pool; |
126 | |
127 | /* A NOP pattern used to emit placeholder insns. */ |
128 | rtx nop_pattern = NULL_RTX; |
129 | /* A special instruction that resides in EXIT_BLOCK. |
130 | EXIT_INSN is successor of the insns that lead to EXIT_BLOCK. */ |
131 | rtx_insn *exit_insn = NULL; |
132 | |
133 | /* TRUE if while scheduling current region, which is loop, its preheader |
134 | was removed. */ |
135 | bool = false; |
136 | |
137 | |
138 | /* Forward static declarations. */ |
139 | static void fence_clear (fence_t); |
140 | |
141 | static void deps_init_id (idata_t, insn_t, bool); |
142 | static void init_id_from_df (idata_t, insn_t, bool); |
143 | static expr_t set_insn_init (expr_t, vinsn_t, int); |
144 | |
145 | static void cfg_preds (basic_block, insn_t **, int *); |
146 | static void prepare_insn_expr (insn_t, int); |
147 | static void free_history_vect (vec<expr_history_def> &); |
148 | |
149 | static void move_bb_info (basic_block, basic_block); |
150 | static void remove_empty_bb (basic_block, bool); |
151 | static void sel_merge_blocks (basic_block, basic_block); |
152 | static void sel_remove_loop_preheader (void); |
153 | static bool bb_has_removable_jump_to_p (basic_block, basic_block); |
154 | |
155 | static bool insn_is_the_only_one_in_bb_p (insn_t); |
156 | static void create_initial_data_sets (basic_block); |
157 | |
158 | static void free_av_set (basic_block); |
159 | static void invalidate_av_set (basic_block); |
160 | static void extend_insn_data (void); |
161 | static void sel_init_new_insn (insn_t, int, int = -1); |
162 | static void finish_insns (void); |
163 | |
164 | /* Various list functions. */ |
165 | |
166 | /* Copy an instruction list L. */ |
167 | ilist_t |
168 | ilist_copy (ilist_t l) |
169 | { |
170 | ilist_t head = NULL, *tailp = &head; |
171 | |
172 | while (l) |
173 | { |
174 | ilist_add (tailp, ILIST_INSN (l)); |
175 | tailp = &ILIST_NEXT (*tailp); |
176 | l = ILIST_NEXT (l); |
177 | } |
178 | |
179 | return head; |
180 | } |
181 | |
182 | /* Invert an instruction list L. */ |
183 | ilist_t |
184 | ilist_invert (ilist_t l) |
185 | { |
186 | ilist_t res = NULL; |
187 | |
188 | while (l) |
189 | { |
190 | ilist_add (&res, ILIST_INSN (l)); |
191 | l = ILIST_NEXT (l); |
192 | } |
193 | |
194 | return res; |
195 | } |
196 | |
197 | /* Add a new boundary to the LP list with parameters TO, PTR, and DC. */ |
198 | void |
199 | blist_add (blist_t *lp, insn_t to, ilist_t ptr, deps_t dc) |
200 | { |
201 | bnd_t bnd; |
202 | |
203 | _list_add (lp); |
204 | bnd = BLIST_BND (*lp); |
205 | |
206 | BND_TO (bnd) = to; |
207 | BND_PTR (bnd) = ptr; |
208 | BND_AV (bnd) = NULL; |
209 | BND_AV1 (bnd) = NULL; |
210 | BND_DC (bnd) = dc; |
211 | } |
212 | |
213 | /* Remove the list note pointed to by LP. */ |
214 | void |
215 | blist_remove (blist_t *lp) |
216 | { |
217 | bnd_t b = BLIST_BND (*lp); |
218 | |
219 | av_set_clear (&BND_AV (b)); |
220 | av_set_clear (&BND_AV1 (b)); |
221 | ilist_clear (&BND_PTR (b)); |
222 | |
223 | _list_remove (lp); |
224 | } |
225 | |
226 | /* Init a fence tail L. */ |
227 | void |
228 | flist_tail_init (flist_tail_t l) |
229 | { |
230 | FLIST_TAIL_HEAD (l) = NULL; |
231 | FLIST_TAIL_TAILP (l) = &FLIST_TAIL_HEAD (l); |
232 | } |
233 | |
234 | /* Try to find fence corresponding to INSN in L. */ |
235 | fence_t |
236 | flist_lookup (flist_t l, insn_t insn) |
237 | { |
238 | while (l) |
239 | { |
240 | if (FENCE_INSN (FLIST_FENCE (l)) == insn) |
241 | return FLIST_FENCE (l); |
242 | |
243 | l = FLIST_NEXT (l); |
244 | } |
245 | |
246 | return NULL; |
247 | } |
248 | |
249 | /* Init the fields of F before running fill_insns. */ |
250 | static void |
251 | init_fence_for_scheduling (fence_t f) |
252 | { |
253 | FENCE_BNDS (f) = NULL; |
254 | FENCE_PROCESSED_P (f) = false; |
255 | FENCE_SCHEDULED_P (f) = false; |
256 | } |
257 | |
258 | /* Add new fence consisting of INSN and STATE to the list pointed to by LP. */ |
259 | static void |
260 | flist_add (flist_t *lp, insn_t insn, state_t state, deps_t dc, void *tc, |
261 | insn_t last_scheduled_insn, vec<rtx_insn *, va_gc> *executing_insns, |
262 | int *ready_ticks, int ready_ticks_size, insn_t sched_next, |
263 | int cycle, int cycle_issued_insns, int issue_more, |
264 | bool starts_cycle_p, bool after_stall_p) |
265 | { |
266 | fence_t f; |
267 | |
268 | _list_add (lp); |
269 | f = FLIST_FENCE (*lp); |
270 | |
271 | FENCE_INSN (f) = insn; |
272 | |
273 | gcc_assert (state != NULL); |
274 | FENCE_STATE (f) = state; |
275 | |
276 | FENCE_CYCLE (f) = cycle; |
277 | FENCE_ISSUED_INSNS (f) = cycle_issued_insns; |
278 | FENCE_STARTS_CYCLE_P (f) = starts_cycle_p; |
279 | FENCE_AFTER_STALL_P (f) = after_stall_p; |
280 | |
281 | gcc_assert (dc != NULL); |
282 | FENCE_DC (f) = dc; |
283 | |
284 | gcc_assert (tc != NULL || targetm.sched.alloc_sched_context == NULL); |
285 | FENCE_TC (f) = tc; |
286 | |
287 | FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn; |
288 | FENCE_ISSUE_MORE (f) = issue_more; |
289 | FENCE_EXECUTING_INSNS (f) = executing_insns; |
290 | FENCE_READY_TICKS (f) = ready_ticks; |
291 | FENCE_READY_TICKS_SIZE (f) = ready_ticks_size; |
292 | FENCE_SCHED_NEXT (f) = sched_next; |
293 | |
294 | init_fence_for_scheduling (f); |
295 | } |
296 | |
297 | /* Remove the head node of the list pointed to by LP. */ |
298 | static void |
299 | flist_remove (flist_t *lp) |
300 | { |
301 | if (FENCE_INSN (FLIST_FENCE (*lp))) |
302 | fence_clear (FLIST_FENCE (*lp)); |
303 | _list_remove (lp); |
304 | } |
305 | |
306 | /* Clear the fence list pointed to by LP. */ |
307 | void |
308 | flist_clear (flist_t *lp) |
309 | { |
310 | while (*lp) |
311 | flist_remove (lp); |
312 | } |
313 | |
314 | /* Add ORIGINAL_INSN the def list DL honoring CROSSES_CALL. */ |
315 | void |
316 | def_list_add (def_list_t *dl, insn_t original_insn, bool crosses_call) |
317 | { |
318 | def_t d; |
319 | |
320 | _list_add (dl); |
321 | d = DEF_LIST_DEF (*dl); |
322 | |
323 | d->orig_insn = original_insn; |
324 | d->crosses_call = crosses_call; |
325 | } |
326 | |
327 | |
328 | /* Functions to work with target contexts. */ |
329 | |
330 | /* Bulk target context. It is convenient for debugging purposes to ensure |
331 | that there are no uninitialized (null) target contexts. */ |
332 | static tc_t bulk_tc = (tc_t) 1; |
333 | |
334 | /* Target hooks wrappers. In the future we can provide some default |
335 | implementations for them. */ |
336 | |
337 | /* Allocate a store for the target context. */ |
338 | static tc_t |
339 | alloc_target_context (void) |
340 | { |
341 | return (targetm.sched.alloc_sched_context |
342 | ? targetm.sched.alloc_sched_context () : bulk_tc); |
343 | } |
344 | |
345 | /* Init target context TC. |
346 | If CLEAN_P is true, then make TC as it is beginning of the scheduler. |
347 | Overwise, copy current backend context to TC. */ |
348 | static void |
349 | init_target_context (tc_t tc, bool clean_p) |
350 | { |
351 | if (targetm.sched.init_sched_context) |
352 | targetm.sched.init_sched_context (tc, clean_p); |
353 | } |
354 | |
355 | /* Allocate and initialize a target context. Meaning of CLEAN_P is the same as |
356 | int init_target_context (). */ |
357 | tc_t |
358 | create_target_context (bool clean_p) |
359 | { |
360 | tc_t tc = alloc_target_context (); |
361 | |
362 | init_target_context (tc, clean_p); |
363 | return tc; |
364 | } |
365 | |
366 | /* Copy TC to the current backend context. */ |
367 | void |
368 | set_target_context (tc_t tc) |
369 | { |
370 | if (targetm.sched.set_sched_context) |
371 | targetm.sched.set_sched_context (tc); |
372 | } |
373 | |
374 | /* TC is about to be destroyed. Free any internal data. */ |
375 | static void |
376 | clear_target_context (tc_t tc) |
377 | { |
378 | if (targetm.sched.clear_sched_context) |
379 | targetm.sched.clear_sched_context (tc); |
380 | } |
381 | |
382 | /* Clear and free it. */ |
383 | static void |
384 | delete_target_context (tc_t tc) |
385 | { |
386 | clear_target_context (tc); |
387 | |
388 | if (targetm.sched.free_sched_context) |
389 | targetm.sched.free_sched_context (tc); |
390 | } |
391 | |
392 | /* Make a copy of FROM in TO. |
393 | NB: May be this should be a hook. */ |
394 | static void |
395 | copy_target_context (tc_t to, tc_t from) |
396 | { |
397 | tc_t tmp = create_target_context (false); |
398 | |
399 | set_target_context (from); |
400 | init_target_context (to, false); |
401 | |
402 | set_target_context (tmp); |
403 | delete_target_context (tmp); |
404 | } |
405 | |
406 | /* Create a copy of TC. */ |
407 | static tc_t |
408 | create_copy_of_target_context (tc_t tc) |
409 | { |
410 | tc_t copy = alloc_target_context (); |
411 | |
412 | copy_target_context (copy, tc); |
413 | |
414 | return copy; |
415 | } |
416 | |
417 | /* Clear TC and initialize it according to CLEAN_P. The meaning of CLEAN_P |
418 | is the same as in init_target_context (). */ |
419 | void |
420 | reset_target_context (tc_t tc, bool clean_p) |
421 | { |
422 | clear_target_context (tc); |
423 | init_target_context (tc, clean_p); |
424 | } |
425 | |
426 | /* Functions to work with dependence contexts. |
427 | Dc (aka deps context, aka deps_t, aka struct deps_desc *) is short for dependence |
428 | context. It accumulates information about processed insns to decide if |
429 | current insn is dependent on the processed ones. */ |
430 | |
431 | /* Make a copy of FROM in TO. */ |
432 | static void |
433 | copy_deps_context (deps_t to, deps_t from) |
434 | { |
435 | init_deps (to, false); |
436 | deps_join (to, from); |
437 | } |
438 | |
439 | /* Allocate store for dep context. */ |
440 | static deps_t |
441 | alloc_deps_context (void) |
442 | { |
443 | return XNEW (struct deps_desc); |
444 | } |
445 | |
446 | /* Allocate and initialize dep context. */ |
447 | static deps_t |
448 | create_deps_context (void) |
449 | { |
450 | deps_t dc = alloc_deps_context (); |
451 | |
452 | init_deps (dc, false); |
453 | return dc; |
454 | } |
455 | |
456 | /* Create a copy of FROM. */ |
457 | static deps_t |
458 | create_copy_of_deps_context (deps_t from) |
459 | { |
460 | deps_t to = alloc_deps_context (); |
461 | |
462 | copy_deps_context (to, from); |
463 | return to; |
464 | } |
465 | |
466 | /* Clean up internal data of DC. */ |
467 | static void |
468 | clear_deps_context (deps_t dc) |
469 | { |
470 | free_deps (dc); |
471 | } |
472 | |
473 | /* Clear and free DC. */ |
474 | static void |
475 | delete_deps_context (deps_t dc) |
476 | { |
477 | clear_deps_context (dc); |
478 | free (dc); |
479 | } |
480 | |
481 | /* Clear and init DC. */ |
482 | static void |
483 | reset_deps_context (deps_t dc) |
484 | { |
485 | clear_deps_context (dc); |
486 | init_deps (dc, false); |
487 | } |
488 | |
489 | /* This structure describes the dependence analysis hooks for advancing |
490 | dependence context. */ |
491 | static struct sched_deps_info_def advance_deps_context_sched_deps_info = |
492 | { |
493 | NULL, |
494 | |
495 | NULL, /* start_insn */ |
496 | NULL, /* finish_insn */ |
497 | NULL, /* start_lhs */ |
498 | NULL, /* finish_lhs */ |
499 | NULL, /* start_rhs */ |
500 | NULL, /* finish_rhs */ |
501 | haifa_note_reg_set, |
502 | haifa_note_reg_clobber, |
503 | haifa_note_reg_use, |
504 | NULL, /* note_mem_dep */ |
505 | NULL, /* note_dep */ |
506 | |
507 | 0, 0, 0 |
508 | }; |
509 | |
510 | /* Process INSN and add its impact on DC. */ |
511 | void |
512 | advance_deps_context (deps_t dc, insn_t insn) |
513 | { |
514 | sched_deps_info = &advance_deps_context_sched_deps_info; |
515 | deps_analyze_insn (dc, insn); |
516 | } |
517 | |
518 | |
519 | /* Functions to work with DFA states. */ |
520 | |
521 | /* Allocate store for a DFA state. */ |
522 | static state_t |
523 | state_alloc (void) |
524 | { |
525 | return xmalloc (dfa_state_size); |
526 | } |
527 | |
528 | /* Allocate and initialize DFA state. */ |
529 | static state_t |
530 | state_create (void) |
531 | { |
532 | state_t state = state_alloc (); |
533 | |
534 | state_reset (state); |
535 | advance_state (state); |
536 | return state; |
537 | } |
538 | |
539 | /* Free DFA state. */ |
540 | static void |
541 | state_free (state_t state) |
542 | { |
543 | free (state); |
544 | } |
545 | |
546 | /* Make a copy of FROM in TO. */ |
547 | static void |
548 | state_copy (state_t to, state_t from) |
549 | { |
550 | memcpy (to, from, dfa_state_size); |
551 | } |
552 | |
553 | /* Create a copy of FROM. */ |
554 | static state_t |
555 | state_create_copy (state_t from) |
556 | { |
557 | state_t to = state_alloc (); |
558 | |
559 | state_copy (to, from); |
560 | return to; |
561 | } |
562 | |
563 | |
564 | /* Functions to work with fences. */ |
565 | |
566 | /* Clear the fence. */ |
567 | static void |
568 | fence_clear (fence_t f) |
569 | { |
570 | state_t s = FENCE_STATE (f); |
571 | deps_t dc = FENCE_DC (f); |
572 | void *tc = FENCE_TC (f); |
573 | |
574 | ilist_clear (&FENCE_BNDS (f)); |
575 | |
576 | gcc_assert ((s != NULL && dc != NULL && tc != NULL) |
577 | || (s == NULL && dc == NULL && tc == NULL)); |
578 | |
579 | free (s); |
580 | |
581 | if (dc != NULL) |
582 | delete_deps_context (dc); |
583 | |
584 | if (tc != NULL) |
585 | delete_target_context (tc); |
586 | vec_free (FENCE_EXECUTING_INSNS (f)); |
587 | free (FENCE_READY_TICKS (f)); |
588 | FENCE_READY_TICKS (f) = NULL; |
589 | } |
590 | |
591 | /* Init a list of fences with successors of OLD_FENCE. */ |
592 | void |
593 | init_fences (insn_t old_fence) |
594 | { |
595 | insn_t succ; |
596 | succ_iterator si; |
597 | bool first = true; |
598 | int ready_ticks_size = get_max_uid () + 1; |
599 | |
600 | FOR_EACH_SUCC_1 (succ, si, old_fence, |
601 | SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS) |
602 | { |
603 | |
604 | if (first) |
605 | first = false; |
606 | else |
607 | gcc_assert (flag_sel_sched_pipelining_outer_loops); |
608 | |
609 | flist_add (&fences, succ, |
610 | state_create (), |
611 | create_deps_context () /* dc */, |
612 | create_target_context (true) /* tc */, |
613 | NULL /* last_scheduled_insn */, |
614 | NULL, /* executing_insns */ |
615 | XCNEWVEC (int, ready_ticks_size), /* ready_ticks */ |
616 | ready_ticks_size, |
617 | NULL /* sched_next */, |
618 | 1 /* cycle */, 0 /* cycle_issued_insns */, |
619 | issue_rate, /* issue_more */ |
620 | 1 /* starts_cycle_p */, 0 /* after_stall_p */); |
621 | } |
622 | } |
623 | |
624 | /* Merges two fences (filling fields of fence F with resulting values) by |
625 | following rules: 1) state, target context and last scheduled insn are |
626 | propagated from fallthrough edge if it is available; |
627 | 2) deps context and cycle is propagated from more probable edge; |
628 | 3) all other fields are set to corresponding constant values. |
629 | |
630 | INSN, STATE, DC, TC, LAST_SCHEDULED_INSN, EXECUTING_INSNS, |
631 | READY_TICKS, READY_TICKS_SIZE, SCHED_NEXT, CYCLE, ISSUE_MORE |
632 | and AFTER_STALL_P are the corresponding fields of the second fence. */ |
633 | static void |
634 | merge_fences (fence_t f, insn_t insn, |
635 | state_t state, deps_t dc, void *tc, |
636 | rtx_insn *last_scheduled_insn, |
637 | vec<rtx_insn *, va_gc> *executing_insns, |
638 | int *ready_ticks, int ready_ticks_size, |
639 | rtx sched_next, int cycle, int issue_more, bool after_stall_p) |
640 | { |
641 | insn_t last_scheduled_insn_old = FENCE_LAST_SCHEDULED_INSN (f); |
642 | |
643 | gcc_assert (sel_bb_head_p (FENCE_INSN (f)) |
644 | && !sched_next && !FENCE_SCHED_NEXT (f)); |
645 | |
646 | /* Check if we can decide which path fences came. |
647 | If we can't (or don't want to) - reset all. */ |
648 | if (last_scheduled_insn == NULL |
649 | || last_scheduled_insn_old == NULL |
650 | /* This is a case when INSN is reachable on several paths from |
651 | one insn (this can happen when pipelining of outer loops is on and |
652 | there are two edges: one going around of inner loop and the other - |
653 | right through it; in such case just reset everything). */ |
654 | || last_scheduled_insn == last_scheduled_insn_old) |
655 | { |
656 | state_reset (FENCE_STATE (f)); |
657 | state_free (state); |
658 | |
659 | reset_deps_context (FENCE_DC (f)); |
660 | delete_deps_context (dc); |
661 | |
662 | reset_target_context (FENCE_TC (f), true); |
663 | delete_target_context (tc); |
664 | |
665 | if (cycle > FENCE_CYCLE (f)) |
666 | FENCE_CYCLE (f) = cycle; |
667 | |
668 | FENCE_LAST_SCHEDULED_INSN (f) = NULL; |
669 | FENCE_ISSUE_MORE (f) = issue_rate; |
670 | vec_free (executing_insns); |
671 | free (ready_ticks); |
672 | if (FENCE_EXECUTING_INSNS (f)) |
673 | FENCE_EXECUTING_INSNS (f)->block_remove (0, |
674 | FENCE_EXECUTING_INSNS (f)->length ()); |
675 | if (FENCE_READY_TICKS (f)) |
676 | memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f)); |
677 | } |
678 | else |
679 | { |
680 | edge edge_old = NULL, edge_new = NULL; |
681 | edge candidate; |
682 | succ_iterator si; |
683 | insn_t succ; |
684 | |
685 | /* Find fallthrough edge. */ |
686 | gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb); |
687 | candidate = find_fallthru_edge_from (BLOCK_FOR_INSN (insn)->prev_bb); |
688 | |
689 | if (!candidate |
690 | || (candidate->src != BLOCK_FOR_INSN (last_scheduled_insn) |
691 | && candidate->src != BLOCK_FOR_INSN (last_scheduled_insn_old))) |
692 | { |
693 | /* No fallthrough edge leading to basic block of INSN. */ |
694 | state_reset (FENCE_STATE (f)); |
695 | state_free (state); |
696 | |
697 | reset_target_context (FENCE_TC (f), true); |
698 | delete_target_context (tc); |
699 | |
700 | FENCE_LAST_SCHEDULED_INSN (f) = NULL; |
701 | FENCE_ISSUE_MORE (f) = issue_rate; |
702 | } |
703 | else |
704 | if (candidate->src == BLOCK_FOR_INSN (last_scheduled_insn)) |
705 | { |
706 | /* Would be weird if same insn is successor of several fallthrough |
707 | edges. */ |
708 | gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb |
709 | != BLOCK_FOR_INSN (last_scheduled_insn_old)); |
710 | |
711 | state_free (FENCE_STATE (f)); |
712 | FENCE_STATE (f) = state; |
713 | |
714 | delete_target_context (FENCE_TC (f)); |
715 | FENCE_TC (f) = tc; |
716 | |
717 | FENCE_LAST_SCHEDULED_INSN (f) = last_scheduled_insn; |
718 | FENCE_ISSUE_MORE (f) = issue_more; |
719 | } |
720 | else |
721 | { |
722 | /* Leave STATE, TC and LAST_SCHEDULED_INSN fields untouched. */ |
723 | state_free (state); |
724 | delete_target_context (tc); |
725 | |
726 | gcc_assert (BLOCK_FOR_INSN (insn)->prev_bb |
727 | != BLOCK_FOR_INSN (last_scheduled_insn)); |
728 | } |
729 | |
730 | /* Find edge of first predecessor (last_scheduled_insn_old->insn). */ |
731 | FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn_old, |
732 | SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS) |
733 | { |
734 | if (succ == insn) |
735 | { |
736 | /* No same successor allowed from several edges. */ |
737 | gcc_assert (!edge_old); |
738 | edge_old = si.e1; |
739 | } |
740 | } |
741 | /* Find edge of second predecessor (last_scheduled_insn->insn). */ |
742 | FOR_EACH_SUCC_1 (succ, si, last_scheduled_insn, |
743 | SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS) |
744 | { |
745 | if (succ == insn) |
746 | { |
747 | /* No same successor allowed from several edges. */ |
748 | gcc_assert (!edge_new); |
749 | edge_new = si.e1; |
750 | } |
751 | } |
752 | |
753 | /* Check if we can choose most probable predecessor. */ |
754 | if (edge_old == NULL || edge_new == NULL) |
755 | { |
756 | reset_deps_context (FENCE_DC (f)); |
757 | delete_deps_context (dc); |
758 | vec_free (executing_insns); |
759 | free (ready_ticks); |
760 | |
761 | FENCE_CYCLE (f) = MAX (FENCE_CYCLE (f), cycle); |
762 | if (FENCE_EXECUTING_INSNS (f)) |
763 | FENCE_EXECUTING_INSNS (f)->block_remove (0, |
764 | FENCE_EXECUTING_INSNS (f)->length ()); |
765 | if (FENCE_READY_TICKS (f)) |
766 | memset (FENCE_READY_TICKS (f), 0, FENCE_READY_TICKS_SIZE (f)); |
767 | } |
768 | else |
769 | if (edge_new->probability > edge_old->probability) |
770 | { |
771 | delete_deps_context (FENCE_DC (f)); |
772 | FENCE_DC (f) = dc; |
773 | vec_free (FENCE_EXECUTING_INSNS (f)); |
774 | FENCE_EXECUTING_INSNS (f) = executing_insns; |
775 | free (FENCE_READY_TICKS (f)); |
776 | FENCE_READY_TICKS (f) = ready_ticks; |
777 | FENCE_READY_TICKS_SIZE (f) = ready_ticks_size; |
778 | FENCE_CYCLE (f) = cycle; |
779 | } |
780 | else |
781 | { |
782 | /* Leave DC and CYCLE untouched. */ |
783 | delete_deps_context (dc); |
784 | vec_free (executing_insns); |
785 | free (ready_ticks); |
786 | } |
787 | } |
788 | |
789 | /* Fill remaining invariant fields. */ |
790 | if (after_stall_p) |
791 | FENCE_AFTER_STALL_P (f) = 1; |
792 | |
793 | FENCE_ISSUED_INSNS (f) = 0; |
794 | FENCE_STARTS_CYCLE_P (f) = 1; |
795 | FENCE_SCHED_NEXT (f) = NULL; |
796 | } |
797 | |
798 | /* Add a new fence to NEW_FENCES list, initializing it from all |
799 | other parameters. */ |
800 | static void |
801 | add_to_fences (flist_tail_t new_fences, insn_t insn, |
802 | state_t state, deps_t dc, void *tc, |
803 | rtx_insn *last_scheduled_insn, |
804 | vec<rtx_insn *, va_gc> *executing_insns, int *ready_ticks, |
805 | int ready_ticks_size, rtx_insn *sched_next, int cycle, |
806 | int cycle_issued_insns, int issue_rate, |
807 | bool starts_cycle_p, bool after_stall_p) |
808 | { |
809 | fence_t f = flist_lookup (FLIST_TAIL_HEAD (new_fences), insn); |
810 | |
811 | if (! f) |
812 | { |
813 | flist_add (FLIST_TAIL_TAILP (new_fences), insn, state, dc, tc, |
814 | last_scheduled_insn, executing_insns, ready_ticks, |
815 | ready_ticks_size, sched_next, cycle, cycle_issued_insns, |
816 | issue_rate, starts_cycle_p, after_stall_p); |
817 | |
818 | FLIST_TAIL_TAILP (new_fences) |
819 | = &FLIST_NEXT (*FLIST_TAIL_TAILP (new_fences)); |
820 | } |
821 | else |
822 | { |
823 | merge_fences (f, insn, state, dc, tc, last_scheduled_insn, |
824 | executing_insns, ready_ticks, ready_ticks_size, |
825 | sched_next, cycle, issue_rate, after_stall_p); |
826 | } |
827 | } |
828 | |
829 | /* Move the first fence in the OLD_FENCES list to NEW_FENCES. */ |
830 | void |
831 | move_fence_to_fences (flist_t old_fences, flist_tail_t new_fences) |
832 | { |
833 | fence_t f, old; |
834 | flist_t *tailp = FLIST_TAIL_TAILP (new_fences); |
835 | |
836 | old = FLIST_FENCE (old_fences); |
837 | f = flist_lookup (FLIST_TAIL_HEAD (new_fences), |
838 | FENCE_INSN (FLIST_FENCE (old_fences))); |
839 | if (f) |
840 | { |
841 | merge_fences (f, old->insn, old->state, old->dc, old->tc, |
842 | old->last_scheduled_insn, old->executing_insns, |
843 | old->ready_ticks, old->ready_ticks_size, |
844 | old->sched_next, old->cycle, old->issue_more, |
845 | old->after_stall_p); |
846 | } |
847 | else |
848 | { |
849 | _list_add (tailp); |
850 | FLIST_TAIL_TAILP (new_fences) = &FLIST_NEXT (*tailp); |
851 | *FLIST_FENCE (*tailp) = *old; |
852 | init_fence_for_scheduling (FLIST_FENCE (*tailp)); |
853 | } |
854 | FENCE_INSN (old) = NULL; |
855 | } |
856 | |
857 | /* Add a new fence to NEW_FENCES list and initialize most of its data |
858 | as a clean one. */ |
859 | void |
860 | add_clean_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence) |
861 | { |
862 | int ready_ticks_size = get_max_uid () + 1; |
863 | |
864 | add_to_fences (new_fences, |
865 | succ, state_create (), create_deps_context (), |
866 | create_target_context (true), |
867 | NULL, NULL, |
868 | XCNEWVEC (int, ready_ticks_size), ready_ticks_size, |
869 | NULL, FENCE_CYCLE (fence) + 1, |
870 | 0, issue_rate, 1, FENCE_AFTER_STALL_P (fence)); |
871 | } |
872 | |
873 | /* Add a new fence to NEW_FENCES list and initialize all of its data |
874 | from FENCE and SUCC. */ |
875 | void |
876 | add_dirty_fence_to_fences (flist_tail_t new_fences, insn_t succ, fence_t fence) |
877 | { |
878 | int * new_ready_ticks |
879 | = XNEWVEC (int, FENCE_READY_TICKS_SIZE (fence)); |
880 | |
881 | memcpy (new_ready_ticks, FENCE_READY_TICKS (fence), |
882 | FENCE_READY_TICKS_SIZE (fence) * sizeof (int)); |
883 | add_to_fences (new_fences, |
884 | succ, state_create_copy (FENCE_STATE (fence)), |
885 | create_copy_of_deps_context (FENCE_DC (fence)), |
886 | create_copy_of_target_context (FENCE_TC (fence)), |
887 | FENCE_LAST_SCHEDULED_INSN (fence), |
888 | vec_safe_copy (FENCE_EXECUTING_INSNS (fence)), |
889 | new_ready_ticks, |
890 | FENCE_READY_TICKS_SIZE (fence), |
891 | FENCE_SCHED_NEXT (fence), |
892 | FENCE_CYCLE (fence), |
893 | FENCE_ISSUED_INSNS (fence), |
894 | FENCE_ISSUE_MORE (fence), |
895 | FENCE_STARTS_CYCLE_P (fence), |
896 | FENCE_AFTER_STALL_P (fence)); |
897 | } |
898 | |
899 | |
900 | /* Functions to work with regset and nop pools. */ |
901 | |
902 | /* Returns the new regset from pool. It might have some of the bits set |
903 | from the previous usage. */ |
904 | regset |
905 | get_regset_from_pool (void) |
906 | { |
907 | regset rs; |
908 | |
909 | if (regset_pool.n != 0) |
910 | rs = regset_pool.v[--regset_pool.n]; |
911 | else |
912 | /* We need to create the regset. */ |
913 | { |
914 | rs = ALLOC_REG_SET (®_obstack); |
915 | |
916 | if (regset_pool.nn == regset_pool.ss) |
917 | regset_pool.vv = XRESIZEVEC (regset, regset_pool.vv, |
918 | (regset_pool.ss = 2 * regset_pool.ss + 1)); |
919 | regset_pool.vv[regset_pool.nn++] = rs; |
920 | } |
921 | |
922 | regset_pool.diff++; |
923 | |
924 | return rs; |
925 | } |
926 | |
927 | /* Same as above, but returns the empty regset. */ |
928 | regset |
929 | get_clear_regset_from_pool (void) |
930 | { |
931 | regset rs = get_regset_from_pool (); |
932 | |
933 | CLEAR_REG_SET (rs); |
934 | return rs; |
935 | } |
936 | |
937 | /* Return regset RS to the pool for future use. */ |
938 | void |
939 | return_regset_to_pool (regset rs) |
940 | { |
941 | gcc_assert (rs); |
942 | regset_pool.diff--; |
943 | |
944 | if (regset_pool.n == regset_pool.s) |
945 | regset_pool.v = XRESIZEVEC (regset, regset_pool.v, |
946 | (regset_pool.s = 2 * regset_pool.s + 1)); |
947 | regset_pool.v[regset_pool.n++] = rs; |
948 | } |
949 | |
950 | /* This is used as a qsort callback for sorting regset pool stacks. |
951 | X and XX are addresses of two regsets. They are never equal. */ |
952 | static int |
953 | cmp_v_in_regset_pool (const void *x, const void *xx) |
954 | { |
955 | uintptr_t r1 = (uintptr_t) *((const regset *) x); |
956 | uintptr_t r2 = (uintptr_t) *((const regset *) xx); |
957 | if (r1 > r2) |
958 | return 1; |
959 | else if (r1 < r2) |
960 | return -1; |
961 | gcc_unreachable (); |
962 | } |
963 | |
964 | /* Free the regset pool possibly checking for memory leaks. */ |
965 | void |
966 | free_regset_pool (void) |
967 | { |
968 | if (flag_checking) |
969 | { |
970 | regset *v = regset_pool.v; |
971 | int i = 0; |
972 | int n = regset_pool.n; |
973 | |
974 | regset *vv = regset_pool.vv; |
975 | int ii = 0; |
976 | int nn = regset_pool.nn; |
977 | |
978 | int diff = 0; |
979 | |
980 | gcc_assert (n <= nn); |
981 | |
982 | /* Sort both vectors so it will be possible to compare them. */ |
983 | qsort (v, n, sizeof (*v), cmp_v_in_regset_pool); |
984 | qsort (vv, nn, sizeof (*vv), cmp_v_in_regset_pool); |
985 | |
986 | while (ii < nn) |
987 | { |
988 | if (v[i] == vv[ii]) |
989 | i++; |
990 | else |
991 | /* VV[II] was lost. */ |
992 | diff++; |
993 | |
994 | ii++; |
995 | } |
996 | |
997 | gcc_assert (diff == regset_pool.diff); |
998 | } |
999 | |
1000 | /* If not true - we have a memory leak. */ |
1001 | gcc_assert (regset_pool.diff == 0); |
1002 | |
1003 | while (regset_pool.n) |
1004 | { |
1005 | --regset_pool.n; |
1006 | FREE_REG_SET (regset_pool.v[regset_pool.n]); |
1007 | } |
1008 | |
1009 | free (regset_pool.v); |
1010 | regset_pool.v = NULL; |
1011 | regset_pool.s = 0; |
1012 | |
1013 | free (regset_pool.vv); |
1014 | regset_pool.vv = NULL; |
1015 | regset_pool.nn = 0; |
1016 | regset_pool.ss = 0; |
1017 | |
1018 | regset_pool.diff = 0; |
1019 | } |
1020 | |
1021 | |
1022 | /* Functions to work with nop pools. NOP insns are used as temporary |
1023 | placeholders of the insns being scheduled to allow correct update of |
1024 | the data sets. When update is finished, NOPs are deleted. */ |
1025 | |
1026 | /* A vinsn that is used to represent a nop. This vinsn is shared among all |
1027 | nops sel-sched generates. */ |
1028 | static vinsn_t nop_vinsn = NULL; |
1029 | |
1030 | /* Emit a nop before INSN, taking it from pool. */ |
1031 | insn_t |
1032 | get_nop_from_pool (insn_t insn) |
1033 | { |
1034 | rtx nop_pat; |
1035 | insn_t nop; |
1036 | bool old_p = nop_pool.n != 0; |
1037 | int flags; |
1038 | |
1039 | if (old_p) |
1040 | nop_pat = nop_pool.v[--nop_pool.n]; |
1041 | else |
1042 | nop_pat = nop_pattern; |
1043 | |
1044 | nop = emit_insn_before (nop_pat, insn); |
1045 | |
1046 | if (old_p) |
1047 | flags = INSN_INIT_TODO_SSID; |
1048 | else |
1049 | flags = INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID; |
1050 | |
1051 | set_insn_init (INSN_EXPR (insn), nop_vinsn, INSN_SEQNO (insn)); |
1052 | sel_init_new_insn (nop, flags); |
1053 | |
1054 | return nop; |
1055 | } |
1056 | |
1057 | /* Remove NOP from the instruction stream and return it to the pool. */ |
1058 | void |
1059 | return_nop_to_pool (insn_t nop, bool full_tidying) |
1060 | { |
1061 | gcc_assert (INSN_IN_STREAM_P (nop)); |
1062 | sel_remove_insn (nop, false, full_tidying); |
1063 | |
1064 | /* We'll recycle this nop. */ |
1065 | nop->set_undeleted (); |
1066 | |
1067 | if (nop_pool.n == nop_pool.s) |
1068 | nop_pool.v = XRESIZEVEC (rtx_insn *, nop_pool.v, |
1069 | (nop_pool.s = 2 * nop_pool.s + 1)); |
1070 | nop_pool.v[nop_pool.n++] = nop; |
1071 | } |
1072 | |
1073 | /* Free the nop pool. */ |
1074 | void |
1075 | free_nop_pool (void) |
1076 | { |
1077 | nop_pool.n = 0; |
1078 | nop_pool.s = 0; |
1079 | free (nop_pool.v); |
1080 | nop_pool.v = NULL; |
1081 | } |
1082 | |
1083 | |
1084 | /* Skip unspec to support ia64 speculation. Called from rtx_equal_p_cb. |
1085 | The callback is given two rtxes XX and YY and writes the new rtxes |
1086 | to NX and NY in case some needs to be skipped. */ |
1087 | static int |
1088 | skip_unspecs_callback (const_rtx *xx, const_rtx *yy, rtx *nx, rtx* ny) |
1089 | { |
1090 | const_rtx x = *xx; |
1091 | const_rtx y = *yy; |
1092 | |
1093 | if (GET_CODE (x) == UNSPEC |
1094 | && (targetm.sched.skip_rtx_p == NULL |
1095 | || targetm.sched.skip_rtx_p (x))) |
1096 | { |
1097 | *nx = XVECEXP (x, 0, 0); |
1098 | *ny = CONST_CAST_RTX (y); |
1099 | return 1; |
1100 | } |
1101 | |
1102 | if (GET_CODE (y) == UNSPEC |
1103 | && (targetm.sched.skip_rtx_p == NULL |
1104 | || targetm.sched.skip_rtx_p (y))) |
1105 | { |
1106 | *nx = CONST_CAST_RTX (x); |
1107 | *ny = XVECEXP (y, 0, 0); |
1108 | return 1; |
1109 | } |
1110 | |
1111 | return 0; |
1112 | } |
1113 | |
1114 | /* Callback, called from hash_rtx_cb. Helps to hash UNSPEC rtx X in a correct way |
1115 | to support ia64 speculation. When changes are needed, new rtx X and new mode |
1116 | NMODE are written, and the callback returns true. */ |
1117 | static int |
1118 | hash_with_unspec_callback (const_rtx x, machine_mode mode ATTRIBUTE_UNUSED, |
1119 | rtx *nx, machine_mode* nmode) |
1120 | { |
1121 | if (GET_CODE (x) == UNSPEC |
1122 | && targetm.sched.skip_rtx_p |
1123 | && targetm.sched.skip_rtx_p (x)) |
1124 | { |
1125 | *nx = XVECEXP (x, 0 ,0); |
1126 | *nmode = VOIDmode; |
1127 | return 1; |
1128 | } |
1129 | |
1130 | return 0; |
1131 | } |
1132 | |
1133 | /* Returns LHS and RHS are ok to be scheduled separately. */ |
1134 | static bool |
1135 | lhs_and_rhs_separable_p (rtx lhs, rtx rhs) |
1136 | { |
1137 | if (lhs == NULL || rhs == NULL) |
1138 | return false; |
1139 | |
1140 | /* Do not schedule constants as rhs: no point to use reg, if const |
1141 | can be used. Moreover, scheduling const as rhs may lead to mode |
1142 | mismatch cause consts don't have modes but they could be merged |
1143 | from branches where the same const used in different modes. */ |
1144 | if (CONSTANT_P (rhs)) |
1145 | return false; |
1146 | |
1147 | /* ??? Do not rename predicate registers to avoid ICEs in bundling. */ |
1148 | if (COMPARISON_P (rhs)) |
1149 | return false; |
1150 | |
1151 | /* Do not allow single REG to be an rhs. */ |
1152 | if (REG_P (rhs)) |
1153 | return false; |
1154 | |
1155 | /* See comment at find_used_regs_1 (*1) for explanation of this |
1156 | restriction. */ |
1157 | /* FIXME: remove this later. */ |
1158 | if (MEM_P (lhs)) |
1159 | return false; |
1160 | |
1161 | /* This will filter all tricky things like ZERO_EXTRACT etc. |
1162 | For now we don't handle it. */ |
1163 | if (!REG_P (lhs) && !MEM_P (lhs)) |
1164 | return false; |
1165 | |
1166 | return true; |
1167 | } |
1168 | |
1169 | /* Initialize vinsn VI for INSN. Only for use from vinsn_create (). When |
1170 | FORCE_UNIQUE_P is true, the resulting vinsn will not be clonable. This is |
1171 | used e.g. for insns from recovery blocks. */ |
1172 | static void |
1173 | vinsn_init (vinsn_t vi, insn_t insn, bool force_unique_p) |
1174 | { |
1175 | hash_rtx_callback_function hrcf; |
1176 | int insn_class; |
1177 | |
1178 | VINSN_INSN_RTX (vi) = insn; |
1179 | VINSN_COUNT (vi) = 0; |
1180 | vi->cost = -1; |
1181 | |
1182 | if (INSN_NOP_P (insn)) |
1183 | return; |
1184 | |
1185 | if (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL) |
1186 | init_id_from_df (VINSN_ID (vi), insn, force_unique_p); |
1187 | else |
1188 | deps_init_id (VINSN_ID (vi), insn, force_unique_p); |
1189 | |
1190 | /* Hash vinsn depending on whether it is separable or not. */ |
1191 | hrcf = targetm.sched.skip_rtx_p ? hash_with_unspec_callback : NULL; |
1192 | if (VINSN_SEPARABLE_P (vi)) |
1193 | { |
1194 | rtx rhs = VINSN_RHS (vi); |
1195 | |
1196 | VINSN_HASH (vi) = hash_rtx_cb (rhs, GET_MODE (rhs), |
1197 | NULL, NULL, false, hrcf); |
1198 | VINSN_HASH_RTX (vi) = hash_rtx_cb (VINSN_PATTERN (vi), |
1199 | VOIDmode, NULL, NULL, |
1200 | false, hrcf); |
1201 | } |
1202 | else |
1203 | { |
1204 | VINSN_HASH (vi) = hash_rtx_cb (VINSN_PATTERN (vi), VOIDmode, |
1205 | NULL, NULL, false, hrcf); |
1206 | VINSN_HASH_RTX (vi) = VINSN_HASH (vi); |
1207 | } |
1208 | |
1209 | insn_class = haifa_classify_insn (insn); |
1210 | if (insn_class >= 2 |
1211 | && (!targetm.sched.get_insn_spec_ds |
1212 | || ((targetm.sched.get_insn_spec_ds (insn) & BEGIN_CONTROL) |
1213 | == 0))) |
1214 | VINSN_MAY_TRAP_P (vi) = true; |
1215 | else |
1216 | VINSN_MAY_TRAP_P (vi) = false; |
1217 | } |
1218 | |
1219 | /* Indicate that VI has become the part of an rtx object. */ |
1220 | void |
1221 | vinsn_attach (vinsn_t vi) |
1222 | { |
1223 | /* Assert that VI is not pending for deletion. */ |
1224 | gcc_assert (VINSN_INSN_RTX (vi)); |
1225 | |
1226 | VINSN_COUNT (vi)++; |
1227 | } |
1228 | |
1229 | /* Create and init VI from the INSN. Use UNIQUE_P for determining the correct |
1230 | VINSN_TYPE (VI). */ |
1231 | static vinsn_t |
1232 | vinsn_create (insn_t insn, bool force_unique_p) |
1233 | { |
1234 | vinsn_t vi = XCNEW (struct vinsn_def); |
1235 | |
1236 | vinsn_init (vi, insn, force_unique_p); |
1237 | return vi; |
1238 | } |
1239 | |
1240 | /* Return a copy of VI. When REATTACH_P is true, detach VI and attach |
1241 | the copy. */ |
1242 | vinsn_t |
1243 | vinsn_copy (vinsn_t vi, bool reattach_p) |
1244 | { |
1245 | rtx_insn *copy; |
1246 | bool unique = VINSN_UNIQUE_P (vi); |
1247 | vinsn_t new_vi; |
1248 | |
1249 | copy = create_copy_of_insn_rtx (VINSN_INSN_RTX (vi)); |
1250 | new_vi = create_vinsn_from_insn_rtx (copy, unique); |
1251 | if (reattach_p) |
1252 | { |
1253 | vinsn_detach (vi); |
1254 | vinsn_attach (new_vi); |
1255 | } |
1256 | |
1257 | return new_vi; |
1258 | } |
1259 | |
1260 | /* Delete the VI vinsn and free its data. */ |
1261 | static void |
1262 | vinsn_delete (vinsn_t vi) |
1263 | { |
1264 | gcc_assert (VINSN_COUNT (vi) == 0); |
1265 | |
1266 | if (!INSN_NOP_P (VINSN_INSN_RTX (vi))) |
1267 | { |
1268 | return_regset_to_pool (VINSN_REG_SETS (vi)); |
1269 | return_regset_to_pool (VINSN_REG_USES (vi)); |
1270 | return_regset_to_pool (VINSN_REG_CLOBBERS (vi)); |
1271 | } |
1272 | |
1273 | free (vi); |
1274 | } |
1275 | |
1276 | /* Indicate that VI is no longer a part of some rtx object. |
1277 | Remove VI if it is no longer needed. */ |
1278 | void |
1279 | vinsn_detach (vinsn_t vi) |
1280 | { |
1281 | gcc_assert (VINSN_COUNT (vi) > 0); |
1282 | |
1283 | if (--VINSN_COUNT (vi) == 0) |
1284 | vinsn_delete (vi); |
1285 | } |
1286 | |
1287 | /* Returns TRUE if VI is a branch. */ |
1288 | bool |
1289 | vinsn_cond_branch_p (vinsn_t vi) |
1290 | { |
1291 | insn_t insn; |
1292 | |
1293 | if (!VINSN_UNIQUE_P (vi)) |
1294 | return false; |
1295 | |
1296 | insn = VINSN_INSN_RTX (vi); |
1297 | if (BB_END (BLOCK_FOR_INSN (insn)) != insn) |
1298 | return false; |
1299 | |
1300 | return control_flow_insn_p (insn); |
1301 | } |
1302 | |
1303 | /* Return latency of INSN. */ |
1304 | static int |
1305 | sel_insn_rtx_cost (rtx_insn *insn) |
1306 | { |
1307 | int cost; |
1308 | |
1309 | /* A USE insn, or something else we don't need to |
1310 | understand. We can't pass these directly to |
1311 | result_ready_cost or insn_default_latency because it will |
1312 | trigger a fatal error for unrecognizable insns. */ |
1313 | if (recog_memoized (insn) < 0) |
1314 | cost = 0; |
1315 | else |
1316 | { |
1317 | cost = insn_default_latency (insn); |
1318 | |
1319 | if (cost < 0) |
1320 | cost = 0; |
1321 | } |
1322 | |
1323 | return cost; |
1324 | } |
1325 | |
1326 | /* Return the cost of the VI. |
1327 | !!! FIXME: Unify with haifa-sched.c: insn_sched_cost (). */ |
1328 | int |
1329 | sel_vinsn_cost (vinsn_t vi) |
1330 | { |
1331 | int cost = vi->cost; |
1332 | |
1333 | if (cost < 0) |
1334 | { |
1335 | cost = sel_insn_rtx_cost (VINSN_INSN_RTX (vi)); |
1336 | vi->cost = cost; |
1337 | } |
1338 | |
1339 | return cost; |
1340 | } |
1341 | |
1342 | |
1343 | /* Functions for insn emitting. */ |
1344 | |
1345 | /* Emit new insn after AFTER based on PATTERN and initialize its data from |
1346 | EXPR and SEQNO. */ |
1347 | insn_t |
1348 | sel_gen_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno, insn_t after) |
1349 | { |
1350 | insn_t new_insn; |
1351 | |
1352 | gcc_assert (EXPR_TARGET_AVAILABLE (expr) == true); |
1353 | |
1354 | new_insn = emit_insn_after (pattern, after); |
1355 | set_insn_init (expr, NULL, seqno); |
1356 | sel_init_new_insn (new_insn, INSN_INIT_TODO_LUID | INSN_INIT_TODO_SSID); |
1357 | |
1358 | return new_insn; |
1359 | } |
1360 | |
1361 | /* Force newly generated vinsns to be unique. */ |
1362 | static bool init_insn_force_unique_p = false; |
1363 | |
1364 | /* Emit new speculation recovery insn after AFTER based on PATTERN and |
1365 | initialize its data from EXPR and SEQNO. */ |
1366 | insn_t |
1367 | sel_gen_recovery_insn_from_rtx_after (rtx pattern, expr_t expr, int seqno, |
1368 | insn_t after) |
1369 | { |
1370 | insn_t insn; |
1371 | |
1372 | gcc_assert (!init_insn_force_unique_p); |
1373 | |
1374 | init_insn_force_unique_p = true; |
1375 | insn = sel_gen_insn_from_rtx_after (pattern, expr, seqno, after); |
1376 | CANT_MOVE (insn) = 1; |
1377 | init_insn_force_unique_p = false; |
1378 | |
1379 | return insn; |
1380 | } |
1381 | |
1382 | /* Emit new insn after AFTER based on EXPR and SEQNO. If VINSN is not NULL, |
1383 | take it as a new vinsn instead of EXPR's vinsn. |
1384 | We simplify insns later, after scheduling region in |
1385 | simplify_changed_insns. */ |
1386 | insn_t |
1387 | sel_gen_insn_from_expr_after (expr_t expr, vinsn_t vinsn, int seqno, |
1388 | insn_t after) |
1389 | { |
1390 | expr_t emit_expr; |
1391 | insn_t insn; |
1392 | int flags; |
1393 | |
1394 | emit_expr = set_insn_init (expr, vinsn ? vinsn : EXPR_VINSN (expr), |
1395 | seqno); |
1396 | insn = EXPR_INSN_RTX (emit_expr); |
1397 | |
1398 | /* The insn may come from the transformation cache, which may hold already |
1399 | deleted insns, so mark it as not deleted. */ |
1400 | insn->set_undeleted (); |
1401 | |
1402 | add_insn_after (insn, after, BLOCK_FOR_INSN (insn)); |
1403 | |
1404 | flags = INSN_INIT_TODO_SSID; |
1405 | if (INSN_LUID (insn) == 0) |
1406 | flags |= INSN_INIT_TODO_LUID; |
1407 | sel_init_new_insn (insn, flags); |
1408 | |
1409 | return insn; |
1410 | } |
1411 | |
1412 | /* Move insn from EXPR after AFTER. */ |
1413 | insn_t |
1414 | sel_move_insn (expr_t expr, int seqno, insn_t after) |
1415 | { |
1416 | insn_t insn = EXPR_INSN_RTX (expr); |
1417 | basic_block bb = BLOCK_FOR_INSN (after); |
1418 | insn_t next = NEXT_INSN (after); |
1419 | |
1420 | /* Assert that in move_op we disconnected this insn properly. */ |
1421 | gcc_assert (EXPR_VINSN (INSN_EXPR (insn)) != NULL); |
1422 | SET_PREV_INSN (insn) = after; |
1423 | SET_NEXT_INSN (insn) = next; |
1424 | |
1425 | SET_NEXT_INSN (after) = insn; |
1426 | SET_PREV_INSN (next) = insn; |
1427 | |
1428 | /* Update links from insn to bb and vice versa. */ |
1429 | df_insn_change_bb (insn, bb); |
1430 | if (BB_END (bb) == after) |
1431 | BB_END (bb) = insn; |
1432 | |
1433 | prepare_insn_expr (insn, seqno); |
1434 | return insn; |
1435 | } |
1436 | |
1437 | |
1438 | /* Functions to work with right-hand sides. */ |
1439 | |
1440 | /* Search for a hash value determined by UID/NEW_VINSN in a sorted vector |
1441 | VECT and return true when found. Use NEW_VINSN for comparison only when |
1442 | COMPARE_VINSNS is true. Write to INDP the index on which |
1443 | the search has stopped, such that inserting the new element at INDP will |
1444 | retain VECT's sort order. */ |
1445 | static bool |
1446 | find_in_history_vect_1 (vec<expr_history_def> vect, |
1447 | unsigned uid, vinsn_t new_vinsn, |
1448 | bool compare_vinsns, int *indp) |
1449 | { |
1450 | expr_history_def *arr; |
1451 | int i, j, len = vect.length (); |
1452 | |
1453 | if (len == 0) |
1454 | { |
1455 | *indp = 0; |
1456 | return false; |
1457 | } |
1458 | |
1459 | arr = vect.address (); |
1460 | i = 0, j = len - 1; |
1461 | |
1462 | while (i <= j) |
1463 | { |
1464 | unsigned auid = arr[i].uid; |
1465 | vinsn_t avinsn = arr[i].new_expr_vinsn; |
1466 | |
1467 | if (auid == uid |
1468 | /* When undoing transformation on a bookkeeping copy, the new vinsn |
1469 | may not be exactly equal to the one that is saved in the vector. |
1470 | This is because the insn whose copy we're checking was possibly |
1471 | substituted itself. */ |
1472 | && (! compare_vinsns |
1473 | || vinsn_equal_p (avinsn, new_vinsn))) |
1474 | { |
1475 | *indp = i; |
1476 | return true; |
1477 | } |
1478 | else if (auid > uid) |
1479 | break; |
1480 | i++; |
1481 | } |
1482 | |
1483 | *indp = i; |
1484 | return false; |
1485 | } |
1486 | |
1487 | /* Search for a uid of INSN and NEW_VINSN in a sorted vector VECT. Return |
1488 | the position found or -1, if no such value is in vector. |
1489 | Search also for UIDs of insn's originators, if ORIGINATORS_P is true. */ |
1490 | int |
1491 | find_in_history_vect (vec<expr_history_def> vect, rtx insn, |
1492 | vinsn_t new_vinsn, bool originators_p) |
1493 | { |
1494 | int ind; |
1495 | |
1496 | if (find_in_history_vect_1 (vect, INSN_UID (insn), new_vinsn, |
1497 | false, &ind)) |
1498 | return ind; |
1499 | |
1500 | if (INSN_ORIGINATORS (insn) && originators_p) |
1501 | { |
1502 | unsigned uid; |
1503 | bitmap_iterator bi; |
1504 | |
1505 | EXECUTE_IF_SET_IN_BITMAP (INSN_ORIGINATORS (insn), 0, uid, bi) |
1506 | if (find_in_history_vect_1 (vect, uid, new_vinsn, false, &ind)) |
1507 | return ind; |
1508 | } |
1509 | |
1510 | return -1; |
1511 | } |
1512 | |
1513 | /* Insert new element in a sorted history vector pointed to by PVECT, |
1514 | if it is not there already. The element is searched using |
1515 | UID/NEW_EXPR_VINSN pair. TYPE, OLD_EXPR_VINSN and SPEC_DS save |
1516 | the history of a transformation. */ |
1517 | void |
1518 | insert_in_history_vect (vec<expr_history_def> *pvect, |
1519 | unsigned uid, enum local_trans_type type, |
1520 | vinsn_t old_expr_vinsn, vinsn_t new_expr_vinsn, |
1521 | ds_t spec_ds) |
1522 | { |
1523 | vec<expr_history_def> vect = *pvect; |
1524 | expr_history_def temp; |
1525 | bool res; |
1526 | int ind; |
1527 | |
1528 | res = find_in_history_vect_1 (vect, uid, new_expr_vinsn, true, &ind); |
1529 | |
1530 | if (res) |
1531 | { |
1532 | expr_history_def *phist = &vect[ind]; |
1533 | |
1534 | /* It is possible that speculation types of expressions that were |
1535 | propagated through different paths will be different here. In this |
1536 | case, merge the status to get the correct check later. */ |
1537 | if (phist->spec_ds != spec_ds) |
1538 | phist->spec_ds = ds_max_merge (phist->spec_ds, spec_ds); |
1539 | return; |
1540 | } |
1541 | |
1542 | temp.uid = uid; |
1543 | temp.old_expr_vinsn = old_expr_vinsn; |
1544 | temp.new_expr_vinsn = new_expr_vinsn; |
1545 | temp.spec_ds = spec_ds; |
1546 | temp.type = type; |
1547 | |
1548 | vinsn_attach (old_expr_vinsn); |
1549 | vinsn_attach (new_expr_vinsn); |
1550 | vect.safe_insert (ind, temp); |
1551 | *pvect = vect; |
1552 | } |
1553 | |
1554 | /* Free history vector PVECT. */ |
1555 | static void |
1556 | free_history_vect (vec<expr_history_def> &pvect) |
1557 | { |
1558 | unsigned i; |
1559 | expr_history_def *phist; |
1560 | |
1561 | if (! pvect.exists ()) |
1562 | return; |
1563 | |
1564 | for (i = 0; pvect.iterate (i, &phist); i++) |
1565 | { |
1566 | vinsn_detach (phist->old_expr_vinsn); |
1567 | vinsn_detach (phist->new_expr_vinsn); |
1568 | } |
1569 | |
1570 | pvect.release (); |
1571 | } |
1572 | |
1573 | /* Merge vector FROM to PVECT. */ |
1574 | static void |
1575 | merge_history_vect (vec<expr_history_def> *pvect, |
1576 | vec<expr_history_def> from) |
1577 | { |
1578 | expr_history_def *phist; |
1579 | int i; |
1580 | |
1581 | /* We keep this vector sorted. */ |
1582 | for (i = 0; from.iterate (i, &phist); i++) |
1583 | insert_in_history_vect (pvect, phist->uid, phist->type, |
1584 | phist->old_expr_vinsn, phist->new_expr_vinsn, |
1585 | phist->spec_ds); |
1586 | } |
1587 | |
1588 | /* Compare two vinsns as rhses if possible and as vinsns otherwise. */ |
1589 | bool |
1590 | vinsn_equal_p (vinsn_t x, vinsn_t y) |
1591 | { |
1592 | rtx_equal_p_callback_function repcf; |
1593 | |
1594 | if (x == y) |
1595 | return true; |
1596 | |
1597 | if (VINSN_TYPE (x) != VINSN_TYPE (y)) |
1598 | return false; |
1599 | |
1600 | if (VINSN_HASH (x) != VINSN_HASH (y)) |
1601 | return false; |
1602 | |
1603 | repcf = targetm.sched.skip_rtx_p ? skip_unspecs_callback : NULL; |
1604 | if (VINSN_SEPARABLE_P (x)) |
1605 | { |
1606 | /* Compare RHSes of VINSNs. */ |
1607 | gcc_assert (VINSN_RHS (x)); |
1608 | gcc_assert (VINSN_RHS (y)); |
1609 | |
1610 | return rtx_equal_p_cb (VINSN_RHS (x), VINSN_RHS (y), repcf); |
1611 | } |
1612 | |
1613 | return rtx_equal_p_cb (VINSN_PATTERN (x), VINSN_PATTERN (y), repcf); |
1614 | } |
1615 | |
1616 | |
1617 | /* Functions for working with expressions. */ |
1618 | |
1619 | /* Initialize EXPR. */ |
1620 | static void |
1621 | init_expr (expr_t expr, vinsn_t vi, int spec, int use, int priority, |
1622 | int sched_times, int orig_bb_index, ds_t spec_done_ds, |
1623 | ds_t spec_to_check_ds, int orig_sched_cycle, |
1624 | vec<expr_history_def> history, |
1625 | signed char target_available, |
1626 | bool was_substituted, bool was_renamed, bool needs_spec_check_p, |
1627 | bool cant_move) |
1628 | { |
1629 | vinsn_attach (vi); |
1630 | |
1631 | EXPR_VINSN (expr) = vi; |
1632 | EXPR_SPEC (expr) = spec; |
1633 | EXPR_USEFULNESS (expr) = use; |
1634 | EXPR_PRIORITY (expr) = priority; |
1635 | EXPR_PRIORITY_ADJ (expr) = 0; |
1636 | EXPR_SCHED_TIMES (expr) = sched_times; |
1637 | EXPR_ORIG_BB_INDEX (expr) = orig_bb_index; |
1638 | EXPR_ORIG_SCHED_CYCLE (expr) = orig_sched_cycle; |
1639 | EXPR_SPEC_DONE_DS (expr) = spec_done_ds; |
1640 | EXPR_SPEC_TO_CHECK_DS (expr) = spec_to_check_ds; |
1641 | |
1642 | if (history.exists ()) |
1643 | EXPR_HISTORY_OF_CHANGES (expr) = history; |
1644 | else |
1645 | EXPR_HISTORY_OF_CHANGES (expr).create (0); |
1646 | |
1647 | EXPR_TARGET_AVAILABLE (expr) = target_available; |
1648 | EXPR_WAS_SUBSTITUTED (expr) = was_substituted; |
1649 | EXPR_WAS_RENAMED (expr) = was_renamed; |
1650 | EXPR_NEEDS_SPEC_CHECK_P (expr) = needs_spec_check_p; |
1651 | EXPR_CANT_MOVE (expr) = cant_move; |
1652 | } |
1653 | |
1654 | /* Make a copy of the expr FROM into the expr TO. */ |
1655 | void |
1656 | copy_expr (expr_t to, expr_t from) |
1657 | { |
1658 | vec<expr_history_def> temp = vNULL; |
1659 | |
1660 | if (EXPR_HISTORY_OF_CHANGES (from).exists ()) |
1661 | { |
1662 | unsigned i; |
1663 | expr_history_def *phist; |
1664 | |
1665 | temp = EXPR_HISTORY_OF_CHANGES (from).copy (); |
1666 | for (i = 0; |
1667 | temp.iterate (i, &phist); |
1668 | i++) |
1669 | { |
1670 | vinsn_attach (phist->old_expr_vinsn); |
1671 | vinsn_attach (phist->new_expr_vinsn); |
1672 | } |
1673 | } |
1674 | |
1675 | init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from), |
1676 | EXPR_USEFULNESS (from), EXPR_PRIORITY (from), |
1677 | EXPR_SCHED_TIMES (from), EXPR_ORIG_BB_INDEX (from), |
1678 | EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from), |
1679 | EXPR_ORIG_SCHED_CYCLE (from), temp, |
1680 | EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from), |
1681 | EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from), |
1682 | EXPR_CANT_MOVE (from)); |
1683 | } |
1684 | |
1685 | /* Same, but the final expr will not ever be in av sets, so don't copy |
1686 | "uninteresting" data such as bitmap cache. */ |
1687 | void |
1688 | copy_expr_onside (expr_t to, expr_t from) |
1689 | { |
1690 | init_expr (to, EXPR_VINSN (from), EXPR_SPEC (from), EXPR_USEFULNESS (from), |
1691 | EXPR_PRIORITY (from), EXPR_SCHED_TIMES (from), 0, |
1692 | EXPR_SPEC_DONE_DS (from), EXPR_SPEC_TO_CHECK_DS (from), 0, |
1693 | vNULL, |
1694 | EXPR_TARGET_AVAILABLE (from), EXPR_WAS_SUBSTITUTED (from), |
1695 | EXPR_WAS_RENAMED (from), EXPR_NEEDS_SPEC_CHECK_P (from), |
1696 | EXPR_CANT_MOVE (from)); |
1697 | } |
1698 | |
1699 | /* Prepare the expr of INSN for scheduling. Used when moving insn and when |
1700 | initializing new insns. */ |
1701 | static void |
1702 | prepare_insn_expr (insn_t insn, int seqno) |
1703 | { |
1704 | expr_t expr = INSN_EXPR (insn); |
1705 | ds_t ds; |
1706 | |
1707 | INSN_SEQNO (insn) = seqno; |
1708 | EXPR_ORIG_BB_INDEX (expr) = BLOCK_NUM (insn); |
1709 | EXPR_SPEC (expr) = 0; |
1710 | EXPR_ORIG_SCHED_CYCLE (expr) = 0; |
1711 | EXPR_WAS_SUBSTITUTED (expr) = 0; |
1712 | EXPR_WAS_RENAMED (expr) = 0; |
1713 | EXPR_TARGET_AVAILABLE (expr) = 1; |
1714 | INSN_LIVE_VALID_P (insn) = false; |
1715 | |
1716 | /* ??? If this expression is speculative, make its dependence |
1717 | as weak as possible. We can filter this expression later |
1718 | in process_spec_exprs, because we do not distinguish |
1719 | between the status we got during compute_av_set and the |
1720 | existing status. To be fixed. */ |
1721 | ds = EXPR_SPEC_DONE_DS (expr); |
1722 | if (ds) |
1723 | EXPR_SPEC_DONE_DS (expr) = ds_get_max_dep_weak (ds); |
1724 | |
1725 | free_history_vect (EXPR_HISTORY_OF_CHANGES (expr)); |
1726 | } |
1727 | |
1728 | /* Update target_available bits when merging exprs TO and FROM. SPLIT_POINT |
1729 | is non-null when expressions are merged from different successors at |
1730 | a split point. */ |
1731 | static void |
1732 | update_target_availability (expr_t to, expr_t from, insn_t split_point) |
1733 | { |
1734 | if (EXPR_TARGET_AVAILABLE (to) < 0 |
1735 | || EXPR_TARGET_AVAILABLE (from) < 0) |
1736 | EXPR_TARGET_AVAILABLE (to) = -1; |
1737 | else |
1738 | { |
1739 | /* We try to detect the case when one of the expressions |
1740 | can only be reached through another one. In this case, |
1741 | we can do better. */ |
1742 | if (split_point == NULL) |
1743 | { |
1744 | int toind, fromind; |
1745 | |
1746 | toind = EXPR_ORIG_BB_INDEX (to); |
1747 | fromind = EXPR_ORIG_BB_INDEX (from); |
1748 | |
1749 | if (toind && toind == fromind) |
1750 | /* Do nothing -- everything is done in |
1751 | merge_with_other_exprs. */ |
1752 | ; |
1753 | else |
1754 | EXPR_TARGET_AVAILABLE (to) = -1; |
1755 | } |
1756 | else if (EXPR_TARGET_AVAILABLE (from) == 0 |
1757 | && EXPR_LHS (from) |
1758 | && REG_P (EXPR_LHS (from)) |
1759 | && REGNO (EXPR_LHS (to)) != REGNO (EXPR_LHS (from))) |
1760 | EXPR_TARGET_AVAILABLE (to) = -1; |
1761 | else |
1762 | EXPR_TARGET_AVAILABLE (to) &= EXPR_TARGET_AVAILABLE (from); |
1763 | } |
1764 | } |
1765 | |
1766 | /* Update speculation bits when merging exprs TO and FROM. SPLIT_POINT |
1767 | is non-null when expressions are merged from different successors at |
1768 | a split point. */ |
1769 | static void |
1770 | update_speculative_bits (expr_t to, expr_t from, insn_t split_point) |
1771 | { |
1772 | ds_t old_to_ds, old_from_ds; |
1773 | |
1774 | old_to_ds = EXPR_SPEC_DONE_DS (to); |
1775 | old_from_ds = EXPR_SPEC_DONE_DS (from); |
1776 | |
1777 | EXPR_SPEC_DONE_DS (to) = ds_max_merge (old_to_ds, old_from_ds); |
1778 | EXPR_SPEC_TO_CHECK_DS (to) |= EXPR_SPEC_TO_CHECK_DS (from); |
1779 | EXPR_NEEDS_SPEC_CHECK_P (to) |= EXPR_NEEDS_SPEC_CHECK_P (from); |
1780 | |
1781 | /* When merging e.g. control & data speculative exprs, or a control |
1782 | speculative with a control&data speculative one, we really have |
1783 | to change vinsn too. Also, when speculative status is changed, |
1784 | we also need to record this as a transformation in expr's history. */ |
1785 | if ((old_to_ds & SPECULATIVE) || (old_from_ds & SPECULATIVE)) |
1786 | { |
1787 | old_to_ds = ds_get_speculation_types (old_to_ds); |
1788 | old_from_ds = ds_get_speculation_types (old_from_ds); |
1789 | |
1790 | if (old_to_ds != old_from_ds) |
1791 | { |
1792 | ds_t record_ds; |
1793 | |
1794 | /* When both expressions are speculative, we need to change |
1795 | the vinsn first. */ |
1796 | if ((old_to_ds & SPECULATIVE) && (old_from_ds & SPECULATIVE)) |
1797 | { |
1798 | int res; |
1799 | |
1800 | res = speculate_expr (to, EXPR_SPEC_DONE_DS (to)); |
1801 | gcc_assert (res >= 0); |
1802 | } |
1803 | |
1804 | if (split_point != NULL) |
1805 | { |
1806 | /* Record the change with proper status. */ |
1807 | record_ds = EXPR_SPEC_DONE_DS (to) & SPECULATIVE; |
1808 | record_ds &= ~(old_to_ds & SPECULATIVE); |
1809 | record_ds &= ~(old_from_ds & SPECULATIVE); |
1810 | |
1811 | insert_in_history_vect (&EXPR_HISTORY_OF_CHANGES (to), |
1812 | INSN_UID (split_point), TRANS_SPECULATION, |
1813 | EXPR_VINSN (from), EXPR_VINSN (to), |
1814 | record_ds); |
1815 | } |
1816 | } |
1817 | } |
1818 | } |
1819 | |
1820 | |
1821 | /* Merge bits of FROM expr to TO expr. When SPLIT_POINT is not NULL, |
1822 | this is done along different paths. */ |
1823 | void |
1824 | merge_expr_data (expr_t to, expr_t from, insn_t split_point) |
1825 | { |
1826 | /* Choose the maximum of the specs of merged exprs. This is required |
1827 | for correctness of bookkeeping. */ |
1828 | if (EXPR_SPEC (to) < EXPR_SPEC (from)) |
1829 | EXPR_SPEC (to) = EXPR_SPEC (from); |
1830 | |
1831 | if (split_point) |
1832 | EXPR_USEFULNESS (to) += EXPR_USEFULNESS (from); |
1833 | else |
1834 | EXPR_USEFULNESS (to) = MAX (EXPR_USEFULNESS (to), |
1835 | EXPR_USEFULNESS (from)); |
1836 | |
1837 | if (EXPR_PRIORITY (to) < EXPR_PRIORITY (from)) |
1838 | EXPR_PRIORITY (to) = EXPR_PRIORITY (from); |
1839 | |
1840 | if (EXPR_SCHED_TIMES (to) > EXPR_SCHED_TIMES (from)) |
1841 | EXPR_SCHED_TIMES (to) = EXPR_SCHED_TIMES (from); |
1842 | |
1843 | if (EXPR_ORIG_BB_INDEX (to) != EXPR_ORIG_BB_INDEX (from)) |
1844 | EXPR_ORIG_BB_INDEX (to) = 0; |
1845 | |
1846 | EXPR_ORIG_SCHED_CYCLE (to) = MIN (EXPR_ORIG_SCHED_CYCLE (to), |
1847 | EXPR_ORIG_SCHED_CYCLE (from)); |
1848 | |
1849 | EXPR_WAS_SUBSTITUTED (to) |= EXPR_WAS_SUBSTITUTED (from); |
1850 | EXPR_WAS_RENAMED (to) |= EXPR_WAS_RENAMED (from); |
1851 | EXPR_CANT_MOVE (to) |= EXPR_CANT_MOVE (from); |
1852 | |
1853 | merge_history_vect (&EXPR_HISTORY_OF_CHANGES (to), |
1854 | EXPR_HISTORY_OF_CHANGES (from)); |
1855 | update_target_availability (to, from, split_point); |
1856 | update_speculative_bits (to, from, split_point); |
1857 | } |
1858 | |
1859 | /* Merge bits of FROM expr to TO expr. Vinsns in the exprs should be equal |
1860 | in terms of vinsn_equal_p. SPLIT_POINT is non-null when expressions |
1861 | are merged from different successors at a split point. */ |
1862 | void |
1863 | merge_expr (expr_t to, expr_t from, insn_t split_point) |
1864 | { |
1865 | vinsn_t to_vi = EXPR_VINSN (to); |
1866 | vinsn_t from_vi = EXPR_VINSN (from); |
1867 | |
1868 | gcc_assert (vinsn_equal_p (to_vi, from_vi)); |
1869 | |
1870 | /* Make sure that speculative pattern is propagated into exprs that |
1871 | have non-speculative one. This will provide us with consistent |
1872 | speculative bits and speculative patterns inside expr. */ |
1873 | if (EXPR_SPEC_DONE_DS (to) == 0 |
1874 | && (EXPR_SPEC_DONE_DS (from) != 0 |
1875 | /* Do likewise for volatile insns, so that we always retain |
1876 | the may_trap_p bit on the resulting expression. However, |
1877 | avoid propagating the trapping bit into the instructions |
1878 | already speculated. This would result in replacing the |
1879 | speculative pattern with the non-speculative one and breaking |
1880 | the speculation support. */ |
1881 | || (!VINSN_MAY_TRAP_P (EXPR_VINSN (to)) |
1882 | && VINSN_MAY_TRAP_P (EXPR_VINSN (from))))) |
1883 | change_vinsn_in_expr (to, EXPR_VINSN (from)); |
1884 | |
1885 | merge_expr_data (to, from, split_point); |
1886 | gcc_assert (EXPR_USEFULNESS (to) <= REG_BR_PROB_BASE); |
1887 | } |
1888 | |
1889 | /* Clear the information of this EXPR. */ |
1890 | void |
1891 | clear_expr (expr_t expr) |
1892 | { |
1893 | |
1894 | vinsn_detach (EXPR_VINSN (expr)); |
1895 | EXPR_VINSN (expr) = NULL; |
1896 | |
1897 | free_history_vect (EXPR_HISTORY_OF_CHANGES (expr)); |
1898 | } |
1899 | |
1900 | /* For a given LV_SET, mark EXPR having unavailable target register. */ |
1901 | static void |
1902 | set_unavailable_target_for_expr (expr_t expr, regset lv_set) |
1903 | { |
1904 | if (EXPR_SEPARABLE_P (expr)) |
1905 | { |
1906 | if (REG_P (EXPR_LHS (expr)) |
1907 | && register_unavailable_p (lv_set, EXPR_LHS (expr))) |
1908 | { |
1909 | /* If it's an insn like r1 = use (r1, ...), and it exists in |
1910 | different forms in each of the av_sets being merged, we can't say |
1911 | whether original destination register is available or not. |
1912 | However, this still works if destination register is not used |
1913 | in the original expression: if the branch at which LV_SET we're |
1914 | looking here is not actually 'other branch' in sense that same |
1915 | expression is available through it (but it can't be determined |
1916 | at computation stage because of transformations on one of the |
1917 | branches), it still won't affect the availability. |
1918 | Liveness of a register somewhere on a code motion path means |
1919 | it's either read somewhere on a codemotion path, live on |
1920 | 'other' branch, live at the point immediately following |
1921 | the original operation, or is read by the original operation. |
1922 | The latter case is filtered out in the condition below. |
1923 | It still doesn't cover the case when register is defined and used |
1924 | somewhere within the code motion path, and in this case we could |
1925 | miss a unifying code motion along both branches using a renamed |
1926 | register, but it won't affect a code correctness since upon |
1927 | an actual code motion a bookkeeping code would be generated. */ |
1928 | if (register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)), |
1929 | EXPR_LHS (expr))) |
1930 | EXPR_TARGET_AVAILABLE (expr) = -1; |
1931 | else |
1932 | EXPR_TARGET_AVAILABLE (expr) = false; |
1933 | } |
1934 | } |
1935 | else |
1936 | { |
1937 | unsigned regno; |
1938 | reg_set_iterator rsi; |
1939 | |
1940 | EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_SETS (EXPR_VINSN (expr)), |
1941 | 0, regno, rsi) |
1942 | if (bitmap_bit_p (lv_set, regno)) |
1943 | { |
1944 | EXPR_TARGET_AVAILABLE (expr) = false; |
1945 | break; |
1946 | } |
1947 | |
1948 | EXECUTE_IF_SET_IN_REG_SET (VINSN_REG_CLOBBERS (EXPR_VINSN (expr)), |
1949 | 0, regno, rsi) |
1950 | if (bitmap_bit_p (lv_set, regno)) |
1951 | { |
1952 | EXPR_TARGET_AVAILABLE (expr) = false; |
1953 | break; |
1954 | } |
1955 | } |
1956 | } |
1957 | |
1958 | /* Try to make EXPR speculative. Return 1 when EXPR's pattern |
1959 | or dependence status have changed, 2 when also the target register |
1960 | became unavailable, 0 if nothing had to be changed. */ |
1961 | int |
1962 | speculate_expr (expr_t expr, ds_t ds) |
1963 | { |
1964 | int res; |
1965 | rtx_insn *orig_insn_rtx; |
1966 | rtx spec_pat; |
1967 | ds_t target_ds, current_ds; |
1968 | |
1969 | /* Obtain the status we need to put on EXPR. */ |
1970 | target_ds = (ds & SPECULATIVE); |
1971 | current_ds = EXPR_SPEC_DONE_DS (expr); |
1972 | ds = ds_full_merge (current_ds, target_ds, NULL_RTX, NULL_RTX); |
1973 | |
1974 | orig_insn_rtx = EXPR_INSN_RTX (expr); |
1975 | |
1976 | res = sched_speculate_insn (orig_insn_rtx, ds, &spec_pat); |
1977 | |
1978 | switch (res) |
1979 | { |
1980 | case 0: |
1981 | EXPR_SPEC_DONE_DS (expr) = ds; |
1982 | return current_ds != ds ? 1 : 0; |
1983 | |
1984 | case 1: |
1985 | { |
1986 | rtx_insn *spec_insn_rtx = |
1987 | create_insn_rtx_from_pattern (spec_pat, NULL_RTX); |
1988 | vinsn_t spec_vinsn = create_vinsn_from_insn_rtx (spec_insn_rtx, false); |
1989 | |
1990 | change_vinsn_in_expr (expr, spec_vinsn); |
1991 | EXPR_SPEC_DONE_DS (expr) = ds; |
1992 | EXPR_NEEDS_SPEC_CHECK_P (expr) = true; |
1993 | |
1994 | /* Do not allow clobbering the address register of speculative |
1995 | insns. */ |
1996 | if (register_unavailable_p (VINSN_REG_USES (EXPR_VINSN (expr)), |
1997 | expr_dest_reg (expr))) |
1998 | { |
1999 | EXPR_TARGET_AVAILABLE (expr) = false; |
2000 | return 2; |
2001 | } |
2002 | |
2003 | return 1; |
2004 | } |
2005 | |
2006 | case -1: |
2007 | return -1; |
2008 | |
2009 | default: |
2010 | gcc_unreachable (); |
2011 | return -1; |
2012 | } |
2013 | } |
2014 | |
2015 | /* Return a destination register, if any, of EXPR. */ |
2016 | rtx |
2017 | expr_dest_reg (expr_t expr) |
2018 | { |
2019 | rtx dest = VINSN_LHS (EXPR_VINSN (expr)); |
2020 | |
2021 | if (dest != NULL_RTX && REG_P (dest)) |
2022 | return dest; |
2023 | |
2024 | return NULL_RTX; |
2025 | } |
2026 | |
2027 | /* Returns the REGNO of the R's destination. */ |
2028 | unsigned |
2029 | expr_dest_regno (expr_t expr) |
2030 | { |
2031 | rtx dest = expr_dest_reg (expr); |
2032 | |
2033 | gcc_assert (dest != NULL_RTX); |
2034 | return REGNO (dest); |
2035 | } |
2036 | |
2037 | /* For a given LV_SET, mark all expressions in JOIN_SET, but not present in |
2038 | AV_SET having unavailable target register. */ |
2039 | void |
2040 | mark_unavailable_targets (av_set_t join_set, av_set_t av_set, regset lv_set) |
2041 | { |
2042 | expr_t expr; |
2043 | av_set_iterator avi; |
2044 | |
2045 | FOR_EACH_EXPR (expr, avi, join_set) |
2046 | if (av_set_lookup (av_set, EXPR_VINSN (expr)) == NULL) |
2047 | set_unavailable_target_for_expr (expr, lv_set); |
2048 | } |
2049 | |
2050 | |
2051 | /* Returns true if REG (at least partially) is present in REGS. */ |
2052 | bool |
2053 | register_unavailable_p (regset regs, rtx reg) |
2054 | { |
2055 | unsigned regno, end_regno; |
2056 | |
2057 | regno = REGNO (reg); |
2058 | if (bitmap_bit_p (regs, regno)) |
2059 | return true; |
2060 | |
2061 | end_regno = END_REGNO (reg); |
2062 | |
2063 | while (++regno < end_regno) |
2064 | if (bitmap_bit_p (regs, regno)) |
2065 | return true; |
2066 | |
2067 | return false; |
2068 | } |
2069 | |
2070 | /* Av set functions. */ |
2071 | |
2072 | /* Add a new element to av set SETP. |
2073 | Return the element added. */ |
2074 | static av_set_t |
2075 | av_set_add_element (av_set_t *setp) |
2076 | { |
2077 | /* Insert at the beginning of the list. */ |
2078 | _list_add (setp); |
2079 | return *setp; |
2080 | } |
2081 | |
2082 | /* Add EXPR to SETP. */ |
2083 | void |
2084 | av_set_add (av_set_t *setp, expr_t expr) |
2085 | { |
2086 | av_set_t elem; |
2087 | |
2088 | gcc_assert (!INSN_NOP_P (EXPR_INSN_RTX (expr))); |
2089 | elem = av_set_add_element (setp); |
2090 | copy_expr (_AV_SET_EXPR (elem), expr); |
2091 | } |
2092 | |
2093 | /* Same, but do not copy EXPR. */ |
2094 | static void |
2095 | av_set_add_nocopy (av_set_t *setp, expr_t expr) |
2096 | { |
2097 | av_set_t elem; |
2098 | |
2099 | elem = av_set_add_element (setp); |
2100 | *_AV_SET_EXPR (elem) = *expr; |
2101 | } |
2102 | |
2103 | /* Remove expr pointed to by IP from the av_set. */ |
2104 | void |
2105 | av_set_iter_remove (av_set_iterator *ip) |
2106 | { |
2107 | clear_expr (_AV_SET_EXPR (*ip->lp)); |
2108 | _list_iter_remove (ip); |
2109 | } |
2110 | |
2111 | /* Search for an expr in SET, such that it's equivalent to SOUGHT_VINSN in the |
2112 | sense of vinsn_equal_p function. Return NULL if no such expr is |
2113 | in SET was found. */ |
2114 | expr_t |
2115 | av_set_lookup (av_set_t set, vinsn_t sought_vinsn) |
2116 | { |
2117 | expr_t expr; |
2118 | av_set_iterator i; |
2119 | |
2120 | FOR_EACH_EXPR (expr, i, set) |
2121 | if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn)) |
2122 | return expr; |
2123 | return NULL; |
2124 | } |
2125 | |
2126 | /* Same, but also remove the EXPR found. */ |
2127 | static expr_t |
2128 | av_set_lookup_and_remove (av_set_t *setp, vinsn_t sought_vinsn) |
2129 | { |
2130 | expr_t expr; |
2131 | av_set_iterator i; |
2132 | |
2133 | FOR_EACH_EXPR_1 (expr, i, setp) |
2134 | if (vinsn_equal_p (EXPR_VINSN (expr), sought_vinsn)) |
2135 | { |
2136 | _list_iter_remove_nofree (&i); |
2137 | return expr; |
2138 | } |
2139 | return NULL; |
2140 | } |
2141 | |
2142 | /* Search for an expr in SET, such that it's equivalent to EXPR in the |
2143 | sense of vinsn_equal_p function of their vinsns, but not EXPR itself. |
2144 | Returns NULL if no such expr is in SET was found. */ |
2145 | static expr_t |
2146 | av_set_lookup_other_equiv_expr (av_set_t set, expr_t expr) |
2147 | { |
2148 | expr_t cur_expr; |
2149 | av_set_iterator i; |
2150 | |
2151 | FOR_EACH_EXPR (cur_expr, i, set) |
2152 | { |
2153 | if (cur_expr == expr) |
2154 | continue; |
2155 | if (vinsn_equal_p (EXPR_VINSN (cur_expr), EXPR_VINSN (expr))) |
2156 | return cur_expr; |
2157 | } |
2158 | |
2159 | return NULL; |
2160 | } |
2161 | |
2162 | /* If other expression is already in AVP, remove one of them. */ |
2163 | expr_t |
2164 | merge_with_other_exprs (av_set_t *avp, av_set_iterator *ip, expr_t expr) |
2165 | { |
2166 | expr_t expr2; |
2167 | |
2168 | expr2 = av_set_lookup_other_equiv_expr (*avp, expr); |
2169 | if (expr2 != NULL) |
2170 | { |
2171 | /* Reset target availability on merge, since taking it only from one |
2172 | of the exprs would be controversial for different code. */ |
2173 | EXPR_TARGET_AVAILABLE (expr2) = -1; |
2174 | EXPR_USEFULNESS (expr2) = 0; |
2175 | |
2176 | merge_expr (expr2, expr, NULL); |
2177 | |
2178 | /* Fix usefulness as it should be now REG_BR_PROB_BASE. */ |
2179 | EXPR_USEFULNESS (expr2) = REG_BR_PROB_BASE; |
2180 | |
2181 | av_set_iter_remove (ip); |
2182 | return expr2; |
2183 | } |
2184 | |
2185 | return expr; |
2186 | } |
2187 | |
2188 | /* Return true if there is an expr that correlates to VI in SET. */ |
2189 | bool |
2190 | av_set_is_in_p (av_set_t set, vinsn_t vi) |
2191 | { |
2192 | return av_set_lookup (set, vi) != NULL; |
2193 | } |
2194 | |
2195 | /* Return a copy of SET. */ |
2196 | av_set_t |
2197 | av_set_copy (av_set_t set) |
2198 | { |
2199 | expr_t expr; |
2200 | av_set_iterator i; |
2201 | av_set_t res = NULL; |
2202 | |
2203 | FOR_EACH_EXPR (expr, i, set) |
2204 | av_set_add (&res, expr); |
2205 | |
2206 | return res; |
2207 | } |
2208 | |
2209 | /* Join two av sets that do not have common elements by attaching second set |
2210 | (pointed to by FROMP) to the end of first set (TO_TAILP must point to |
2211 | _AV_SET_NEXT of first set's last element). */ |
2212 | static void |
2213 | join_distinct_sets (av_set_t *to_tailp, av_set_t *fromp) |
2214 | { |
2215 | gcc_assert (*to_tailp == NULL); |
2216 | *to_tailp = *fromp; |
2217 | *fromp = NULL; |
2218 | } |
2219 | |
2220 | /* Makes set pointed to by TO to be the union of TO and FROM. Clear av_set |
2221 | pointed to by FROMP afterwards. */ |
2222 | void |
2223 | av_set_union_and_clear (av_set_t *top, av_set_t *fromp, insn_t insn) |
2224 | { |
2225 | expr_t expr1; |
2226 | av_set_iterator i; |
2227 | |
2228 | /* Delete from TOP all exprs, that present in FROMP. */ |
2229 | FOR_EACH_EXPR_1 (expr1, i, top) |
2230 | { |
2231 | expr_t expr2 = av_set_lookup (*fromp, EXPR_VINSN (expr1)); |
2232 | |
2233 | if (expr2) |
2234 | { |
2235 | merge_expr (expr2, expr1, insn); |
2236 | av_set_iter_remove (&i); |
2237 | } |
2238 | } |
2239 | |
2240 | join_distinct_sets (i.lp, fromp); |
2241 | } |
2242 | |
2243 | /* Same as above, but also update availability of target register in |
2244 | TOP judging by TO_LV_SET and FROM_LV_SET. */ |
2245 | void |
2246 | av_set_union_and_live (av_set_t *top, av_set_t *fromp, regset to_lv_set, |
2247 | regset from_lv_set, insn_t insn) |
2248 | { |
2249 | expr_t expr1; |
2250 | av_set_iterator i; |
2251 | av_set_t *to_tailp, in_both_set = NULL; |
2252 | |
2253 | /* Delete from TOP all expres, that present in FROMP. */ |
2254 | FOR_EACH_EXPR_1 (expr1, i, top) |
2255 | { |
2256 | expr_t expr2 = av_set_lookup_and_remove (fromp, EXPR_VINSN (expr1)); |
2257 | |
2258 | if (expr2) |
2259 | { |
2260 | /* It may be that the expressions have different destination |
2261 | registers, in which case we need to check liveness here. */ |
2262 | if (EXPR_SEPARABLE_P (expr1)) |
2263 | { |
2264 | int regno1 = (REG_P (EXPR_LHS (expr1)) |
2265 | ? (int) expr_dest_regno (expr1) : -1); |
2266 | int regno2 = (REG_P (EXPR_LHS (expr2)) |
2267 | ? (int) expr_dest_regno (expr2) : -1); |
2268 | |
2269 | /* ??? We don't have a way to check restrictions for |
2270 | *other* register on the current path, we did it only |
2271 | for the current target register. Give up. */ |
2272 | if (regno1 != regno2) |
2273 | EXPR_TARGET_AVAILABLE (expr2) = -1; |
2274 | } |
2275 | else if (EXPR_INSN_RTX (expr1) != EXPR_INSN_RTX (expr2)) |
2276 | EXPR_TARGET_AVAILABLE (expr2) = -1; |
2277 | |
2278 | merge_expr (expr2, expr1, insn); |
2279 | av_set_add_nocopy (&in_both_set, expr2); |
2280 | av_set_iter_remove (&i); |
2281 | } |
2282 | else |
2283 | /* EXPR1 is present in TOP, but not in FROMP. Check it on |
2284 | FROM_LV_SET. */ |
2285 | set_unavailable_target_for_expr (expr1, from_lv_set); |
2286 | } |
2287 | to_tailp = i.lp; |
2288 | |
2289 | /* These expressions are not present in TOP. Check liveness |
2290 | restrictions on TO_LV_SET. */ |
2291 | FOR_EACH_EXPR (expr1, i, *fromp) |
2292 | set_unavailable_target_for_expr (expr1, to_lv_set); |
2293 | |
2294 | join_distinct_sets (i.lp, &in_both_set); |
2295 | join_distinct_sets (to_tailp, fromp); |
2296 | } |
2297 | |
2298 | /* Clear av_set pointed to by SETP. */ |
2299 | void |
2300 | av_set_clear (av_set_t *setp) |
2301 | { |
2302 | expr_t expr; |
2303 | av_set_iterator i; |
2304 | |
2305 | FOR_EACH_EXPR_1 (expr, i, setp) |
2306 | av_set_iter_remove (&i); |
2307 | |
2308 | gcc_assert (*setp == NULL); |
2309 | } |
2310 | |
2311 | /* Leave only one non-speculative element in the SETP. */ |
2312 | void |
2313 | av_set_leave_one_nonspec (av_set_t *setp) |
2314 | { |
2315 | expr_t expr; |
2316 | av_set_iterator i; |
2317 | bool has_one_nonspec = false; |
2318 | |
2319 | /* Keep all speculative exprs, and leave one non-speculative |
2320 | (the first one). */ |
2321 | FOR_EACH_EXPR_1 (expr, i, setp) |
2322 | { |
2323 | if (!EXPR_SPEC_DONE_DS (expr)) |
2324 | { |
2325 | if (has_one_nonspec) |
2326 | av_set_iter_remove (&i); |
2327 | else |
2328 | has_one_nonspec = true; |
2329 | } |
2330 | } |
2331 | } |
2332 | |
2333 | /* Return the N'th element of the SET. */ |
2334 | expr_t |
2335 | av_set_element (av_set_t set, int n) |
2336 | { |
2337 | expr_t expr; |
2338 | av_set_iterator i; |
2339 | |
2340 | FOR_EACH_EXPR (expr, i, set) |
2341 | if (n-- == 0) |
2342 | return expr; |
2343 | |
2344 | gcc_unreachable (); |
2345 | return NULL; |
2346 | } |
2347 | |
2348 | /* Deletes all expressions from AVP that are conditional branches (IFs). */ |
2349 | void |
2350 | av_set_substract_cond_branches (av_set_t *avp) |
2351 | { |
2352 | av_set_iterator i; |
2353 | expr_t expr; |
2354 | |
2355 | FOR_EACH_EXPR_1 (expr, i, avp) |
2356 | if (vinsn_cond_branch_p (EXPR_VINSN (expr))) |
2357 | av_set_iter_remove (&i); |
2358 | } |
2359 | |
2360 | /* Multiplies usefulness attribute of each member of av-set *AVP by |
2361 | value PROB / ALL_PROB. */ |
2362 | void |
2363 | av_set_split_usefulness (av_set_t av, int prob, int all_prob) |
2364 | { |
2365 | av_set_iterator i; |
2366 | expr_t expr; |
2367 | |
2368 | FOR_EACH_EXPR (expr, i, av) |
2369 | EXPR_USEFULNESS (expr) = (all_prob |
2370 | ? (EXPR_USEFULNESS (expr) * prob) / all_prob |
2371 | : 0); |
2372 | } |
2373 | |
2374 | /* Leave in AVP only those expressions, which are present in AV, |
2375 | and return it, merging history expressions. */ |
2376 | void |
2377 | av_set_code_motion_filter (av_set_t *avp, av_set_t av) |
2378 | { |
2379 | av_set_iterator i; |
2380 | expr_t expr, expr2; |
2381 | |
2382 | FOR_EACH_EXPR_1 (expr, i, avp) |
2383 | if ((expr2 = av_set_lookup (av, EXPR_VINSN (expr))) == NULL) |
2384 | av_set_iter_remove (&i); |
2385 | else |
2386 | /* When updating av sets in bookkeeping blocks, we can add more insns |
2387 | there which will be transformed but the upper av sets will not |
2388 | reflect those transformations. We then fail to undo those |
2389 | when searching for such insns. So merge the history saved |
2390 | in the av set of the block we are processing. */ |
2391 | merge_history_vect (&EXPR_HISTORY_OF_CHANGES (expr), |
2392 | EXPR_HISTORY_OF_CHANGES (expr2)); |
2393 | } |
2394 | |
2395 | |
2396 | |
2397 | /* Dependence hooks to initialize insn data. */ |
2398 | |
2399 | /* This is used in hooks callable from dependence analysis when initializing |
2400 | instruction's data. */ |
2401 | static struct |
2402 | { |
2403 | /* Where the dependence was found (lhs/rhs). */ |
2404 | deps_where_t where; |
2405 | |
2406 | /* The actual data object to initialize. */ |
2407 | idata_t id; |
2408 | |
2409 | /* True when the insn should not be made clonable. */ |
2410 | bool force_unique_p; |
2411 | |
2412 | /* True when insn should be treated as of type USE, i.e. never renamed. */ |
2413 | bool force_use_p; |
2414 | } deps_init_id_data; |
2415 | |
2416 | |
2417 | /* Setup ID for INSN. FORCE_UNIQUE_P is true when INSN should not be |
2418 | clonable. */ |
2419 | static void |
2420 | setup_id_for_insn (idata_t id, insn_t insn, bool force_unique_p) |
2421 | { |
2422 | int type; |
2423 | |
2424 | /* Determine whether INSN could be cloned and return appropriate vinsn type. |
2425 | That clonable insns which can be separated into lhs and rhs have type SET. |
2426 | Other clonable insns have type USE. */ |
2427 | type = GET_CODE (insn); |
2428 | |
2429 | /* Only regular insns could be cloned. */ |
2430 | if (type == INSN && !force_unique_p) |
2431 | type = SET; |
2432 | else if (type == JUMP_INSN && simplejump_p (insn)) |
2433 | type = PC; |
2434 | else if (type == DEBUG_INSN) |
2435 | type = !force_unique_p ? USE : INSN; |
2436 | |
2437 | IDATA_TYPE (id) = type; |
2438 | IDATA_REG_SETS (id) = get_clear_regset_from_pool (); |
2439 | IDATA_REG_USES (id) = get_clear_regset_from_pool (); |
2440 | IDATA_REG_CLOBBERS (id) = get_clear_regset_from_pool (); |
2441 | } |
2442 | |
2443 | /* Start initializing insn data. */ |
2444 | static void |
2445 | deps_init_id_start_insn (insn_t insn) |
2446 | { |
2447 | gcc_assert (deps_init_id_data.where == DEPS_IN_NOWHERE); |
2448 | |
2449 | setup_id_for_insn (deps_init_id_data.id, insn, |
2450 | deps_init_id_data.force_unique_p); |
2451 | deps_init_id_data.where = DEPS_IN_INSN; |
2452 | } |
2453 | |
2454 | /* Start initializing lhs data. */ |
2455 | static void |
2456 | deps_init_id_start_lhs (rtx lhs) |
2457 | { |
2458 | gcc_assert (deps_init_id_data.where == DEPS_IN_INSN); |
2459 | gcc_assert (IDATA_LHS (deps_init_id_data.id) == NULL); |
2460 | |
2461 | if (IDATA_TYPE (deps_init_id_data.id) == SET) |
2462 | { |
2463 | IDATA_LHS (deps_init_id_data.id) = lhs; |
2464 | deps_init_id_data.where = DEPS_IN_LHS; |
2465 | } |
2466 | } |
2467 | |
2468 | /* Finish initializing lhs data. */ |
2469 | static void |
2470 | deps_init_id_finish_lhs (void) |
2471 | { |
2472 | deps_init_id_data.where = DEPS_IN_INSN; |
2473 | } |
2474 | |
2475 | /* Note a set of REGNO. */ |
2476 | static void |
2477 | deps_init_id_note_reg_set (int regno) |
2478 | { |
2479 | haifa_note_reg_set (regno); |
2480 | |
2481 | if (deps_init_id_data.where == DEPS_IN_RHS) |
2482 | deps_init_id_data.force_use_p = true; |
2483 | |
2484 | if (IDATA_TYPE (deps_init_id_data.id) != PC) |
2485 | SET_REGNO_REG_SET (IDATA_REG_SETS (deps_init_id_data.id), regno); |
2486 | |
2487 | #ifdef STACK_REGS |
2488 | /* Make instructions that set stack registers to be ineligible for |
2489 | renaming to avoid issues with find_used_regs. */ |
2490 | if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG)) |
2491 | deps_init_id_data.force_use_p = true; |
2492 | #endif |
2493 | } |
2494 | |
2495 | /* Note a clobber of REGNO. */ |
2496 | static void |
2497 | deps_init_id_note_reg_clobber (int regno) |
2498 | { |
2499 | haifa_note_reg_clobber (regno); |
2500 | |
2501 | if (deps_init_id_data.where == DEPS_IN_RHS) |
2502 | deps_init_id_data.force_use_p = true; |
2503 | |
2504 | if (IDATA_TYPE (deps_init_id_data.id) != PC) |
2505 | SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (deps_init_id_data.id), regno); |
2506 | } |
2507 | |
2508 | /* Note a use of REGNO. */ |
2509 | static void |
2510 | deps_init_id_note_reg_use (int regno) |
2511 | { |
2512 | haifa_note_reg_use (regno); |
2513 | |
2514 | if (IDATA_TYPE (deps_init_id_data.id) != PC) |
2515 | SET_REGNO_REG_SET (IDATA_REG_USES (deps_init_id_data.id), regno); |
2516 | } |
2517 | |
2518 | /* Start initializing rhs data. */ |
2519 | static void |
2520 | deps_init_id_start_rhs (rtx rhs) |
2521 | { |
2522 | gcc_assert (deps_init_id_data.where == DEPS_IN_INSN); |
2523 | |
2524 | /* And there was no sel_deps_reset_to_insn (). */ |
2525 | if (IDATA_LHS (deps_init_id_data.id) != NULL) |
2526 | { |
2527 | IDATA_RHS (deps_init_id_data.id) = rhs; |
2528 | deps_init_id_data.where = DEPS_IN_RHS; |
2529 | } |
2530 | } |
2531 | |
2532 | /* Finish initializing rhs data. */ |
2533 | static void |
2534 | deps_init_id_finish_rhs (void) |
2535 | { |
2536 | gcc_assert (deps_init_id_data.where == DEPS_IN_RHS |
2537 | || deps_init_id_data.where == DEPS_IN_INSN); |
2538 | deps_init_id_data.where = DEPS_IN_INSN; |
2539 | } |
2540 | |
2541 | /* Finish initializing insn data. */ |
2542 | static void |
2543 | deps_init_id_finish_insn (void) |
2544 | { |
2545 | gcc_assert (deps_init_id_data.where == DEPS_IN_INSN); |
2546 | |
2547 | if (IDATA_TYPE (deps_init_id_data.id) == SET) |
2548 | { |
2549 | rtx lhs = IDATA_LHS (deps_init_id_data.id); |
2550 | rtx rhs = IDATA_RHS (deps_init_id_data.id); |
2551 | |
2552 | if (lhs == NULL || rhs == NULL || !lhs_and_rhs_separable_p (lhs, rhs) |
2553 | || deps_init_id_data.force_use_p) |
2554 | { |
2555 | /* This should be a USE, as we don't want to schedule its RHS |
2556 | separately. However, we still want to have them recorded |
2557 | for the purposes of substitution. That's why we don't |
2558 | simply call downgrade_to_use () here. */ |
2559 | gcc_assert (IDATA_TYPE (deps_init_id_data.id) == SET); |
2560 | gcc_assert (!lhs == !rhs); |
2561 | |
2562 | IDATA_TYPE (deps_init_id_data.id) = USE; |
2563 | } |
2564 | } |
2565 | |
2566 | deps_init_id_data.where = DEPS_IN_NOWHERE; |
2567 | } |
2568 | |
2569 | /* This is dependence info used for initializing insn's data. */ |
2570 | static struct sched_deps_info_def deps_init_id_sched_deps_info; |
2571 | |
2572 | /* This initializes most of the static part of the above structure. */ |
2573 | static const struct sched_deps_info_def const_deps_init_id_sched_deps_info = |
2574 | { |
2575 | NULL, |
2576 | |
2577 | deps_init_id_start_insn, |
2578 | deps_init_id_finish_insn, |
2579 | deps_init_id_start_lhs, |
2580 | deps_init_id_finish_lhs, |
2581 | deps_init_id_start_rhs, |
2582 | deps_init_id_finish_rhs, |
2583 | deps_init_id_note_reg_set, |
2584 | deps_init_id_note_reg_clobber, |
2585 | deps_init_id_note_reg_use, |
2586 | NULL, /* note_mem_dep */ |
2587 | NULL, /* note_dep */ |
2588 | |
2589 | 0, /* use_cselib */ |
2590 | 0, /* use_deps_list */ |
2591 | 0 /* generate_spec_deps */ |
2592 | }; |
2593 | |
2594 | /* Initialize INSN's lhs and rhs in ID. When FORCE_UNIQUE_P is true, |
2595 | we don't actually need information about lhs and rhs. */ |
2596 | static void |
2597 | setup_id_lhs_rhs (idata_t id, insn_t insn, bool force_unique_p) |
2598 | { |
2599 | rtx pat = PATTERN (insn); |
2600 | |
2601 | if (NONJUMP_INSN_P (insn) |
2602 | && GET_CODE (pat) == SET |
2603 | && !force_unique_p) |
2604 | { |
2605 | IDATA_RHS (id) = SET_SRC (pat); |
2606 | IDATA_LHS (id) = SET_DEST (pat); |
2607 | } |
2608 | else |
2609 | IDATA_LHS (id) = IDATA_RHS (id) = NULL; |
2610 | } |
2611 | |
2612 | /* Possibly downgrade INSN to USE. */ |
2613 | static void |
2614 | maybe_downgrade_id_to_use (idata_t id, insn_t insn) |
2615 | { |
2616 | bool must_be_use = false; |
2617 | df_ref def; |
2618 | rtx lhs = IDATA_LHS (id); |
2619 | rtx rhs = IDATA_RHS (id); |
2620 | |
2621 | /* We downgrade only SETs. */ |
2622 | if (IDATA_TYPE (id) != SET) |
2623 | return; |
2624 | |
2625 | if (!lhs || !lhs_and_rhs_separable_p (lhs, rhs)) |
2626 | { |
2627 | IDATA_TYPE (id) = USE; |
2628 | return; |
2629 | } |
2630 | |
2631 | FOR_EACH_INSN_DEF (def, insn) |
2632 | { |
2633 | if (DF_REF_INSN (def) |
2634 | && DF_REF_FLAGS_IS_SET (def, DF_REF_PRE_POST_MODIFY) |
2635 | && loc_mentioned_in_p (DF_REF_LOC (def), IDATA_RHS (id))) |
2636 | { |
2637 | must_be_use = true; |
2638 | break; |
2639 | } |
2640 | |
2641 | #ifdef STACK_REGS |
2642 | /* Make instructions that set stack registers to be ineligible for |
2643 | renaming to avoid issues with find_used_regs. */ |
2644 | if (IN_RANGE (DF_REF_REGNO (def), FIRST_STACK_REG, LAST_STACK_REG)) |
2645 | { |
2646 | must_be_use = true; |
2647 | break; |
2648 | } |
2649 | #endif |
2650 | } |
2651 | |
2652 | if (must_be_use) |
2653 | IDATA_TYPE (id) = USE; |
2654 | } |
2655 | |
2656 | /* Setup implicit register clobbers calculated by sched-deps for INSN |
2657 | before reload and save them in ID. */ |
2658 | static void |
2659 | setup_id_implicit_regs (idata_t id, insn_t insn) |
2660 | { |
2661 | if (reload_completed) |
2662 | return; |
2663 | |
2664 | HARD_REG_SET temp; |
2665 | unsigned regno; |
2666 | hard_reg_set_iterator hrsi; |
2667 | |
2668 | get_implicit_reg_pending_clobbers (&temp, insn); |
2669 | EXECUTE_IF_SET_IN_HARD_REG_SET (temp, 0, regno, hrsi) |
2670 | SET_REGNO_REG_SET (IDATA_REG_SETS (id), regno); |
2671 | } |
2672 | |
2673 | /* Setup register sets describing INSN in ID. */ |
2674 | static void |
2675 | setup_id_reg_sets (idata_t id, insn_t insn) |
2676 | { |
2677 | struct df_insn_info *insn_info = DF_INSN_INFO_GET (insn); |
2678 | df_ref def, use; |
2679 | regset tmp = get_clear_regset_from_pool (); |
2680 | |
2681 | FOR_EACH_INSN_INFO_DEF (def, insn_info) |
2682 | { |
2683 | unsigned int regno = DF_REF_REGNO (def); |
2684 | |
2685 | /* Post modifies are treated like clobbers by sched-deps.c. */ |
2686 | if (DF_REF_FLAGS_IS_SET (def, (DF_REF_MUST_CLOBBER |
2687 | | DF_REF_PRE_POST_MODIFY))) |
2688 | SET_REGNO_REG_SET (IDATA_REG_CLOBBERS (id), regno); |
2689 | else if (! DF_REF_FLAGS_IS_SET (def, DF_REF_MAY_CLOBBER)) |
2690 | { |
2691 | SET_REGNO_REG_SET (IDATA_REG_SETS (id), regno); |
2692 | |
2693 | #ifdef STACK_REGS |
2694 | /* For stack registers, treat writes to them as writes |
2695 | to the first one to be consistent with sched-deps.c. */ |
2696 | if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG)) |
2697 | SET_REGNO_REG_SET (IDATA_REG_SETS (id), FIRST_STACK_REG); |
2698 | #endif |
2699 | } |
2700 | /* Mark special refs that generate read/write def pair. */ |
2701 | if (DF_REF_FLAGS_IS_SET (def, DF_REF_CONDITIONAL) |
2702 | || regno == STACK_POINTER_REGNUM) |
2703 | bitmap_set_bit (tmp, regno); |
2704 | } |
2705 | |
2706 | FOR_EACH_INSN_INFO_USE (use, insn_info) |
2707 | { |
2708 | unsigned int regno = DF_REF_REGNO (use); |
2709 | |
2710 | /* When these refs are met for the first time, skip them, as |
2711 | these uses are just counterparts of some defs. */ |
2712 | if (bitmap_bit_p (tmp, regno)) |
2713 | bitmap_clear_bit (tmp, regno); |
2714 | else if (! DF_REF_FLAGS_IS_SET (use, DF_REF_CALL_STACK_USAGE)) |
2715 | { |
2716 | SET_REGNO_REG_SET (IDATA_REG_USES (id), regno); |
2717 | |
2718 | #ifdef STACK_REGS |
2719 | /* For stack registers, treat reads from them as reads from |
2720 | the first one to be consistent with sched-deps.c. */ |
2721 | if (IN_RANGE (regno, FIRST_STACK_REG, LAST_STACK_REG)) |
2722 | SET_REGNO_REG_SET (IDATA_REG_USES (id), FIRST_STACK_REG); |
2723 | #endif |
2724 | } |
2725 | } |
2726 | |
2727 | /* Also get implicit reg clobbers from sched-deps. */ |
2728 | setup_id_implicit_regs (id, insn); |
2729 | |
2730 | return_regset_to_pool (tmp); |
2731 | } |
2732 | |
2733 | /* Initialize instruction data for INSN in ID using DF's data. */ |
2734 | static void |
2735 | init_id_from_df (idata_t id, insn_t insn, bool force_unique_p) |
2736 | { |
2737 | gcc_assert (DF_INSN_UID_SAFE_GET (INSN_UID (insn)) != NULL); |
2738 | |
2739 | setup_id_for_insn (id, insn, force_unique_p); |
2740 | setup_id_lhs_rhs (id, insn, force_unique_p); |
2741 | |
2742 | if (INSN_NOP_P (insn)) |
2743 | return; |
2744 | |
2745 | maybe_downgrade_id_to_use (id, insn); |
2746 | setup_id_reg_sets (id, insn); |
2747 | } |
2748 | |
2749 | /* Initialize instruction data for INSN in ID. */ |
2750 | static void |
2751 | deps_init_id (idata_t id, insn_t insn, bool force_unique_p) |
2752 | { |
2753 | struct deps_desc _dc, *dc = &_dc; |
2754 | |
2755 | deps_init_id_data.where = DEPS_IN_NOWHERE; |
2756 | deps_init_id_data.id = id; |
2757 | deps_init_id_data.force_unique_p = force_unique_p; |
2758 | deps_init_id_data.force_use_p = false; |
2759 | |
2760 | init_deps (dc, false); |
2761 | memcpy (&deps_init_id_sched_deps_info, |
2762 | &const_deps_init_id_sched_deps_info, |
2763 | sizeof (deps_init_id_sched_deps_info)); |
2764 | if (spec_info != NULL) |
2765 | deps_init_id_sched_deps_info.generate_spec_deps = 1; |
2766 | sched_deps_info = &deps_init_id_sched_deps_info; |
2767 | |
2768 | deps_analyze_insn (dc, insn); |
2769 | /* Implicit reg clobbers received from sched-deps separately. */ |
2770 | setup_id_implicit_regs (id, insn); |
2771 | |
2772 | free_deps (dc); |
2773 | deps_init_id_data.id = NULL; |
2774 | } |
2775 | |
2776 | |
2777 | struct sched_scan_info_def |
2778 | { |
2779 | /* This hook notifies scheduler frontend to extend its internal per basic |
2780 | block data structures. This hook should be called once before a series of |
2781 | calls to bb_init (). */ |
2782 | void (*extend_bb) (void); |
2783 | |
2784 | /* This hook makes scheduler frontend to initialize its internal data |
2785 | structures for the passed basic block. */ |
2786 | void (*init_bb) (basic_block); |
2787 | |
2788 | /* This hook notifies scheduler frontend to extend its internal per insn data |
2789 | structures. This hook should be called once before a series of calls to |
2790 | insn_init (). */ |
2791 | void (*extend_insn) (void); |
2792 | |
2793 | /* This hook makes scheduler frontend to initialize its internal data |
2794 | structures for the passed insn. */ |
2795 | void (*init_insn) (insn_t); |
2796 | }; |
2797 | |
2798 | /* A driver function to add a set of basic blocks (BBS) to the |
2799 | scheduling region. */ |
2800 | static void |
2801 | sched_scan (const struct sched_scan_info_def *ssi, bb_vec_t bbs) |
2802 | { |
2803 | unsigned i; |
2804 | basic_block bb; |
2805 | |
2806 | if (ssi->extend_bb) |
2807 | ssi->extend_bb (); |
2808 | |
2809 | if (ssi->init_bb) |
2810 | FOR_EACH_VEC_ELT (bbs, i, bb) |
2811 | ssi->init_bb (bb); |
2812 | |
2813 | if (ssi->extend_insn) |
2814 | ssi->extend_insn (); |
2815 | |
2816 | if (ssi->init_insn) |
2817 | FOR_EACH_VEC_ELT (bbs, i, bb) |
2818 | { |
2819 | rtx_insn *insn; |
2820 | |
2821 | FOR_BB_INSNS (bb, insn) |
2822 | ssi->init_insn (insn); |
2823 | } |
2824 | } |
2825 | |
2826 | /* Implement hooks for collecting fundamental insn properties like if insn is |
2827 | an ASM or is within a SCHED_GROUP. */ |
2828 | |
2829 | /* True when a "one-time init" data for INSN was already inited. */ |
2830 | static bool |
2831 | first_time_insn_init (insn_t insn) |
2832 | { |
2833 | return INSN_LIVE (insn) == NULL; |
2834 | } |
2835 | |
2836 | /* Hash an entry in a transformed_insns hashtable. */ |
2837 | static hashval_t |
2838 | hash_transformed_insns (const void *p) |
2839 | { |
2840 | return VINSN_HASH_RTX (((const struct transformed_insns *) p)->vinsn_old); |
2841 | } |
2842 | |
2843 | /* Compare the entries in a transformed_insns hashtable. */ |
2844 | static int |
2845 | eq_transformed_insns (const void *p, const void *q) |
2846 | { |
2847 | rtx_insn *i1 = |
2848 | VINSN_INSN_RTX (((const struct transformed_insns *) p)->vinsn_old); |
2849 | rtx_insn *i2 = |
2850 | VINSN_INSN_RTX (((const struct transformed_insns *) q)->vinsn_old); |
2851 | |
2852 | if (INSN_UID (i1) == INSN_UID (i2)) |
2853 | return 1; |
2854 | return rtx_equal_p (PATTERN (i1), PATTERN (i2)); |
2855 | } |
2856 | |
2857 | /* Free an entry in a transformed_insns hashtable. */ |
2858 | static void |
2859 | free_transformed_insns (void *p) |
2860 | { |
2861 | struct transformed_insns *pti = (struct transformed_insns *) p; |
2862 | |
2863 | vinsn_detach (pti->vinsn_old); |
2864 | vinsn_detach (pti->vinsn_new); |
2865 | free (pti); |
2866 | } |
2867 | |
2868 | /* Init the s_i_d data for INSN which should be inited just once, when |
2869 | we first see the insn. */ |
2870 | static void |
2871 | init_first_time_insn_data (insn_t insn) |
2872 | { |
2873 | /* This should not be set if this is the first time we init data for |
2874 | insn. */ |
2875 | gcc_assert (first_time_insn_init (insn)); |
2876 | |
2877 | /* These are needed for nops too. */ |
2878 | INSN_LIVE (insn) = get_regset_from_pool (); |
2879 | INSN_LIVE_VALID_P (insn) = false; |
2880 | |
2881 | if (!INSN_NOP_P (insn)) |
2882 | { |
2883 | INSN_ANALYZED_DEPS (insn) = BITMAP_ALLOC (NULL); |
2884 | INSN_FOUND_DEPS (insn) = BITMAP_ALLOC (NULL); |
2885 | INSN_TRANSFORMED_INSNS (insn) |
2886 | = htab_create (16, hash_transformed_insns, |
2887 | eq_transformed_insns, free_transformed_insns); |
2888 | init_deps (&INSN_DEPS_CONTEXT (insn), true); |
2889 | } |
2890 | } |
2891 | |
2892 | /* Free almost all above data for INSN that is scheduled already. |
2893 | Used for extra-large basic blocks. */ |
2894 | void |
2895 | free_data_for_scheduled_insn (insn_t insn) |
2896 | { |
2897 | gcc_assert (! first_time_insn_init (insn)); |
2898 | |
2899 | if (! INSN_ANALYZED_DEPS (insn)) |
2900 | return; |
2901 | |
2902 | BITMAP_FREE (INSN_ANALYZED_DEPS (insn)); |
2903 | BITMAP_FREE (INSN_FOUND_DEPS (insn)); |
2904 | htab_delete (INSN_TRANSFORMED_INSNS (insn)); |
2905 | |
2906 | /* This is allocated only for bookkeeping insns. */ |
2907 | if (INSN_ORIGINATORS (insn)) |
2908 | BITMAP_FREE (INSN_ORIGINATORS (insn)); |
2909 | free_deps (&INSN_DEPS_CONTEXT (insn)); |
2910 | |
2911 | INSN_ANALYZED_DEPS (insn) = NULL; |
2912 | |
2913 | /* Clear the readonly flag so we would ICE when trying to recalculate |
2914 | the deps context (as we believe that it should not happen). */ |
2915 | (&INSN_DEPS_CONTEXT (insn))->readonly = 0; |
2916 | } |
2917 | |
2918 | /* Free the same data as above for INSN. */ |
2919 | static void |
2920 | free_first_time_insn_data (insn_t insn) |
2921 | { |
2922 | gcc_assert (! first_time_insn_init (insn)); |
2923 | |
2924 | free_data_for_scheduled_insn (insn); |
2925 | return_regset_to_pool (INSN_LIVE (insn)); |
2926 | INSN_LIVE (insn) = NULL; |
2927 | INSN_LIVE_VALID_P (insn) = false; |
2928 | } |
2929 | |
2930 | /* Initialize region-scope data structures for basic blocks. */ |
2931 | static void |
2932 | init_global_and_expr_for_bb (basic_block bb) |
2933 | { |
2934 | if (sel_bb_empty_p (bb)) |
2935 | return; |
2936 | |
2937 | invalidate_av_set (bb); |
2938 | } |
2939 | |
2940 | /* Data for global dependency analysis (to initialize CANT_MOVE and |
2941 | SCHED_GROUP_P). */ |
2942 | static struct |
2943 | { |
2944 | /* Previous insn. */ |
2945 | insn_t prev_insn; |
2946 | } init_global_data; |
2947 | |
2948 | /* Determine if INSN is in the sched_group, is an asm or should not be |
2949 | cloned. After that initialize its expr. */ |
2950 | static void |
2951 | init_global_and_expr_for_insn (insn_t insn) |
2952 | { |
2953 | if (LABEL_P (insn)) |
2954 | return; |
2955 | |
2956 | if (NOTE_INSN_BASIC_BLOCK_P (insn)) |
2957 | { |
2958 | init_global_data.prev_insn = NULL; |
2959 | return; |
2960 | } |
2961 | |
2962 | gcc_assert (INSN_P (insn)); |
2963 | |
2964 | if (SCHED_GROUP_P (insn)) |
2965 | /* Setup a sched_group. */ |
2966 | { |
2967 | insn_t prev_insn = init_global_data.prev_insn; |
2968 | |
2969 | if (prev_insn) |
2970 | INSN_SCHED_NEXT (prev_insn) = insn; |
2971 | |
2972 | init_global_data.prev_insn = insn; |
2973 | } |
2974 | else |
2975 | init_global_data.prev_insn = NULL; |
2976 | |
2977 | if (GET_CODE (PATTERN (insn)) == ASM_INPUT |
2978 | || asm_noperands (PATTERN (insn)) >= 0) |
2979 | /* Mark INSN as an asm. */ |
2980 | INSN_ASM_P (insn) = true; |
2981 | |
2982 | { |
2983 | bool force_unique_p; |
2984 | ds_t spec_done_ds; |
2985 | |
2986 | /* Certain instructions cannot be cloned, and frame related insns and |
2987 | the insn adjacent to NOTE_INSN_EPILOGUE_BEG cannot be moved out of |
2988 | their block. */ |
2989 | if (prologue_epilogue_contains (insn)) |
2990 | { |
2991 | if (RTX_FRAME_RELATED_P (insn)) |
2992 | CANT_MOVE (insn) = 1; |
2993 | else |
2994 | { |
2995 | rtx note; |
2996 | for (note = REG_NOTES (insn); note; note = XEXP (note, 1)) |
2997 | if (REG_NOTE_KIND (note) == REG_SAVE_NOTE |
2998 | && ((enum insn_note) INTVAL (XEXP (note, 0)) |
2999 | == NOTE_INSN_EPILOGUE_BEG)) |
3000 | { |
3001 | CANT_MOVE (insn) = 1; |
3002 | break; |
3003 | } |
3004 | } |
3005 | force_unique_p = true; |
3006 | } |
3007 | else |
3008 | if (CANT_MOVE (insn) |
3009 | || INSN_ASM_P (insn) |
3010 | || SCHED_GROUP_P (insn) |
3011 | || CALL_P (insn) |
3012 | /* Exception handling insns are always unique. */ |
3013 | || (cfun->can_throw_non_call_exceptions && can_throw_internal (insn)) |
3014 | /* TRAP_IF though have an INSN code is control_flow_insn_p (). */ |
3015 | || control_flow_insn_p (insn) |
3016 | || volatile_insn_p (PATTERN (insn)) |
3017 | || (targetm.cannot_copy_insn_p |
3018 | && targetm.cannot_copy_insn_p (insn))) |
3019 | force_unique_p = true; |
3020 | else |
3021 | force_unique_p = false; |
3022 | |
3023 | if (targetm.sched.get_insn_spec_ds) |
3024 | { |
3025 | spec_done_ds = targetm.sched.get_insn_spec_ds (insn); |
3026 | spec_done_ds = ds_get_max_dep_weak (spec_done_ds); |
3027 | } |
3028 | else |
3029 | spec_done_ds = 0; |
3030 | |
3031 | /* Initialize INSN's expr. */ |
3032 | init_expr (INSN_EXPR (insn), vinsn_create (insn, force_unique_p), 0, |
3033 | REG_BR_PROB_BASE, INSN_PRIORITY (insn), 0, BLOCK_NUM (insn), |
3034 | spec_done_ds, 0, 0, vNULL, true, |
3035 | false, false, false, CANT_MOVE (insn)); |
3036 | } |
3037 | |
3038 | init_first_time_insn_data (insn); |
3039 | } |
3040 | |
3041 | /* Scan the region and initialize instruction data for basic blocks BBS. */ |
3042 | void |
3043 | sel_init_global_and_expr (bb_vec_t bbs) |
3044 | { |
3045 | /* ??? It would be nice to implement push / pop scheme for sched_infos. */ |
3046 | const struct sched_scan_info_def ssi = |
3047 | { |
3048 | NULL, /* extend_bb */ |
3049 | init_global_and_expr_for_bb, /* init_bb */ |
3050 | extend_insn_data, /* extend_insn */ |
3051 | init_global_and_expr_for_insn /* init_insn */ |
3052 | }; |
3053 | |
3054 | sched_scan (&ssi, bbs); |
3055 | } |
3056 | |
3057 | /* Finalize region-scope data structures for basic blocks. */ |
3058 | static void |
3059 | finish_global_and_expr_for_bb (basic_block bb) |
3060 | { |
3061 | av_set_clear (&BB_AV_SET (bb)); |
3062 | BB_AV_LEVEL (bb) = 0; |
3063 | } |
3064 | |
3065 | /* Finalize INSN's data. */ |
3066 | static void |
3067 | finish_global_and_expr_insn (insn_t insn) |
3068 | { |
3069 | if (LABEL_P (insn) || NOTE_INSN_BASIC_BLOCK_P (insn)) |
3070 | return; |
3071 | |
3072 | gcc_assert (INSN_P (insn)); |
3073 | |
3074 | if (INSN_LUID (insn) > 0) |
3075 | { |
3076 | free_first_time_insn_data (insn); |
3077 | INSN_WS_LEVEL (insn) = 0; |
3078 | CANT_MOVE (insn) = 0; |
3079 | |
3080 | /* We can no longer assert this, as vinsns of this insn could be |
3081 | easily live in other insn's caches. This should be changed to |
3082 | a counter-like approach among all vinsns. */ |
3083 | gcc_assert (true || VINSN_COUNT (INSN_VINSN (insn)) == 1); |
3084 | clear_expr (INSN_EXPR (insn)); |
3085 | } |
3086 | } |
3087 | |
3088 | /* Finalize per instruction data for the whole region. */ |
3089 | void |
3090 | sel_finish_global_and_expr (void) |
3091 | { |
3092 | { |
3093 | bb_vec_t bbs; |
3094 | int i; |
3095 | |
3096 | bbs.create (current_nr_blocks); |
3097 | |
3098 | for (i = 0; i < current_nr_blocks; i++) |
3099 | bbs.quick_push (BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i))); |
3100 | |
3101 | /* Clear AV_SETs and INSN_EXPRs. */ |
3102 | { |
3103 | const struct sched_scan_info_def ssi = |
3104 | { |
3105 | NULL, /* extend_bb */ |
3106 | finish_global_and_expr_for_bb, /* init_bb */ |
3107 | NULL, /* extend_insn */ |
3108 | finish_global_and_expr_insn /* init_insn */ |
3109 | }; |
3110 | |
3111 | sched_scan (&ssi, bbs); |
3112 | } |
3113 | |
3114 | bbs.release (); |
3115 | } |
3116 | |
3117 | finish_insns (); |
3118 | } |
3119 | |
3120 | |
3121 | /* In the below hooks, we merely calculate whether or not a dependence |
3122 | exists, and in what part of insn. However, we will need more data |
3123 | when we'll start caching dependence requests. */ |
3124 | |
3125 | /* Container to hold information for dependency analysis. */ |
3126 | static struct |
3127 | { |
3128 | deps_t dc; |
3129 | |
3130 | /* A variable to track which part of rtx we are scanning in |
3131 | sched-deps.c: sched_analyze_insn (). */ |
3132 | deps_where_t where; |
3133 | |
3134 | /* Current producer. */ |
3135 | insn_t pro; |
3136 | |
3137 | /* Current consumer. */ |
3138 | vinsn_t con; |
3139 | |
3140 | /* Is SEL_DEPS_HAS_DEP_P[DEPS_IN_X] is true, then X has a dependence. |
3141 | X is from { INSN, LHS, RHS }. */ |
3142 | ds_t has_dep_p[DEPS_IN_NOWHERE]; |
3143 | } has_dependence_data; |
3144 | |
3145 | /* Start analyzing dependencies of INSN. */ |
3146 | static void |
3147 | has_dependence_start_insn (insn_t insn ATTRIBUTE_UNUSED) |
3148 | { |
3149 | gcc_assert (has_dependence_data.where == DEPS_IN_NOWHERE); |
3150 | |
3151 | has_dependence_data.where = DEPS_IN_INSN; |
3152 | } |
3153 | |
3154 | /* Finish analyzing dependencies of an insn. */ |
3155 | static void |
3156 | has_dependence_finish_insn (void) |
3157 | { |
3158 | gcc_assert (has_dependence_data.where == DEPS_IN_INSN); |
3159 | |
3160 | has_dependence_data.where = DEPS_IN_NOWHERE; |
3161 | } |
3162 | |
3163 | /* Start analyzing dependencies of LHS. */ |
3164 | static void |
3165 | has_dependence_start_lhs (rtx lhs ATTRIBUTE_UNUSED) |
3166 | { |
3167 | gcc_assert (has_dependence_data.where == DEPS_IN_INSN); |
3168 | |
3169 | if (VINSN_LHS (has_dependence_data.con) != NULL) |
3170 | has_dependence_data.where = DEPS_IN_LHS; |
3171 | } |
3172 | |
3173 | /* Finish analyzing dependencies of an lhs. */ |
3174 | static void |
3175 | has_dependence_finish_lhs (void) |
3176 | { |
3177 | has_dependence_data.where = DEPS_IN_INSN; |
3178 | } |
3179 | |
3180 | /* Start analyzing dependencies of RHS. */ |
3181 | static void |
3182 | has_dependence_start_rhs (rtx rhs ATTRIBUTE_UNUSED) |
3183 | { |
3184 | gcc_assert (has_dependence_data.where == DEPS_IN_INSN); |
3185 | |
3186 | if (VINSN_RHS (has_dependence_data.con) != NULL) |
3187 | has_dependence_data.where = DEPS_IN_RHS; |
3188 | } |
3189 | |
3190 | /* Start analyzing dependencies of an rhs. */ |
3191 | static void |
3192 | has_dependence_finish_rhs (void) |
3193 | { |
3194 | gcc_assert (has_dependence_data.where == DEPS_IN_RHS |
3195 | || has_dependence_data.where == DEPS_IN_INSN); |
3196 | |
3197 | has_dependence_data.where = DEPS_IN_INSN; |
3198 | } |
3199 | |
3200 | /* Note a set of REGNO. */ |
3201 | static void |
3202 | has_dependence_note_reg_set (int regno) |
3203 | { |
3204 | struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno]; |
3205 | |
3206 | if (!sched_insns_conditions_mutex_p (has_dependence_data.pro, |
3207 | VINSN_INSN_RTX |
3208 | (has_dependence_data.con))) |
3209 | { |
3210 | ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; |
3211 | |
3212 | if (reg_last->sets != NULL |
3213 | || reg_last->clobbers != NULL) |
3214 | *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT; |
3215 | |
3216 | if (reg_last->uses || reg_last->implicit_sets) |
3217 | *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI; |
3218 | } |
3219 | } |
3220 | |
3221 | /* Note a clobber of REGNO. */ |
3222 | static void |
3223 | has_dependence_note_reg_clobber (int regno) |
3224 | { |
3225 | struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno]; |
3226 | |
3227 | if (!sched_insns_conditions_mutex_p (has_dependence_data.pro, |
3228 | VINSN_INSN_RTX |
3229 | (has_dependence_data.con))) |
3230 | { |
3231 | ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; |
3232 | |
3233 | if (reg_last->sets) |
3234 | *dsp = (*dsp & ~SPECULATIVE) | DEP_OUTPUT; |
3235 | |
3236 | if (reg_last->uses || reg_last->implicit_sets) |
3237 | *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI; |
3238 | } |
3239 | } |
3240 | |
3241 | /* Note a use of REGNO. */ |
3242 | static void |
3243 | has_dependence_note_reg_use (int regno) |
3244 | { |
3245 | struct deps_reg *reg_last = &has_dependence_data.dc->reg_last[regno]; |
3246 | |
3247 | if (!sched_insns_conditions_mutex_p (has_dependence_data.pro, |
3248 | VINSN_INSN_RTX |
3249 | (has_dependence_data.con))) |
3250 | { |
3251 | ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; |
3252 | |
3253 | if (reg_last->sets) |
3254 | *dsp = (*dsp & ~SPECULATIVE) | DEP_TRUE; |
3255 | |
3256 | if (reg_last->clobbers || reg_last->implicit_sets) |
3257 | *dsp = (*dsp & ~SPECULATIVE) | DEP_ANTI; |
3258 | |
3259 | /* Merge BE_IN_SPEC bits into *DSP when the dependency producer |
3260 | is actually a check insn. We need to do this for any register |
3261 | read-read dependency with the check unless we track properly |
3262 | all registers written by BE_IN_SPEC-speculated insns, as |
3263 | we don't have explicit dependence lists. See PR 53975. */ |
3264 | if (reg_last->uses) |
3265 | { |
3266 | ds_t pro_spec_checked_ds; |
3267 | |
3268 | pro_spec_checked_ds = INSN_SPEC_CHECKED_DS (has_dependence_data.pro); |
3269 | pro_spec_checked_ds = ds_get_max_dep_weak (pro_spec_checked_ds); |
3270 | |
3271 | if (pro_spec_checked_ds != 0) |
3272 | *dsp = ds_full_merge (*dsp, pro_spec_checked_ds, |
3273 | NULL_RTX, NULL_RTX); |
3274 | } |
3275 | } |
3276 | } |
3277 | |
3278 | /* Note a memory dependence. */ |
3279 | static void |
3280 | has_dependence_note_mem_dep (rtx mem ATTRIBUTE_UNUSED, |
3281 | rtx pending_mem ATTRIBUTE_UNUSED, |
3282 | insn_t pending_insn ATTRIBUTE_UNUSED, |
3283 | ds_t ds ATTRIBUTE_UNUSED) |
3284 | { |
3285 | if (!sched_insns_conditions_mutex_p (has_dependence_data.pro, |
3286 | VINSN_INSN_RTX (has_dependence_data.con))) |
3287 | { |
3288 | ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; |
3289 | |
3290 | *dsp = ds_full_merge (ds, *dsp, pending_mem, mem); |
3291 | } |
3292 | } |
3293 | |
3294 | /* Note a dependence. */ |
3295 | static void |
3296 | has_dependence_note_dep (insn_t pro ATTRIBUTE_UNUSED, |
3297 | ds_t ds ATTRIBUTE_UNUSED) |
3298 | { |
3299 | if (!sched_insns_conditions_mutex_p (has_dependence_data.pro, |
3300 | VINSN_INSN_RTX (has_dependence_data.con))) |
3301 | { |
3302 | ds_t *dsp = &has_dependence_data.has_dep_p[has_dependence_data.where]; |
3303 | |
3304 | *dsp = ds_full_merge (ds, *dsp, NULL_RTX, NULL_RTX); |
3305 | } |
3306 | } |
3307 | |
3308 | /* Mark the insn as having a hard dependence that prevents speculation. */ |
3309 | void |
3310 | sel_mark_hard_insn (rtx insn) |
3311 | { |
3312 | int i; |
3313 | |
3314 | /* Only work when we're in has_dependence_p mode. |
3315 | ??? This is a hack, this should actually be a hook. */ |
3316 | if (!has_dependence_data.dc || !has_dependence_data.pro) |
3317 | return; |
3318 | |
3319 | gcc_assert (insn == VINSN_INSN_RTX (has_dependence_data.con)); |
3320 | gcc_assert (has_dependence_data.where == DEPS_IN_INSN); |
3321 | |
3322 | for (i = 0; i < DEPS_IN_NOWHERE; i++) |
3323 | has_dependence_data.has_dep_p[i] &= ~SPECULATIVE; |
3324 | } |
3325 | |
3326 | /* This structure holds the hooks for the dependency analysis used when |
3327 | actually processing dependencies in the scheduler. */ |
3328 | static struct sched_deps_info_def has_dependence_sched_deps_info; |
3329 | |
3330 | /* This initializes most of the fields of the above structure. */ |
3331 | static const struct sched_deps_info_def const_has_dependence_sched_deps_info = |
3332 | { |
3333 | NULL, |
3334 | |
3335 | has_dependence_start_insn, |
3336 | has_dependence_finish_insn, |
3337 | has_dependence_start_lhs, |
3338 | has_dependence_finish_lhs, |
3339 | has_dependence_start_rhs, |
3340 | has_dependence_finish_rhs, |
3341 | has_dependence_note_reg_set, |
3342 | has_dependence_note_reg_clobber, |
3343 | has_dependence_note_reg_use, |
3344 | has_dependence_note_mem_dep, |
3345 | has_dependence_note_dep, |
3346 | |
3347 | 0, /* use_cselib */ |
3348 | 0, /* use_deps_list */ |
3349 | 0 /* generate_spec_deps */ |
3350 | }; |
3351 | |
3352 | /* Initialize has_dependence_sched_deps_info with extra spec field. */ |
3353 | static void |
3354 | setup_has_dependence_sched_deps_info (void) |
3355 | { |
3356 | memcpy (&has_dependence_sched_deps_info, |
3357 | &const_has_dependence_sched_deps_info, |
3358 | sizeof (has_dependence_sched_deps_info)); |
3359 | |
3360 | if (spec_info != NULL) |
3361 | has_dependence_sched_deps_info.generate_spec_deps = 1; |
3362 | |
3363 | sched_deps_info = &has_dependence_sched_deps_info; |
3364 | } |
3365 | |
3366 | /* Remove all dependences found and recorded in has_dependence_data array. */ |
3367 | void |
3368 | sel_clear_has_dependence (void) |
3369 | { |
3370 | int i; |
3371 | |
3372 | for (i = 0; i < DEPS_IN_NOWHERE; i++) |
3373 | has_dependence_data.has_dep_p[i] = 0; |
3374 | } |
3375 | |
3376 | /* Return nonzero if EXPR has is dependent upon PRED. Return the pointer |
3377 | to the dependence information array in HAS_DEP_PP. */ |
3378 | ds_t |
3379 | has_dependence_p (expr_t expr, insn_t pred, ds_t **has_dep_pp) |
3380 | { |
3381 | int i; |
3382 | ds_t ds; |
3383 | struct deps_desc *dc; |
3384 | |
3385 | if (INSN_SIMPLEJUMP_P (pred)) |
3386 | /* Unconditional jump is just a transfer of control flow. |
3387 | Ignore it. */ |
3388 | return false; |
3389 | |
3390 | dc = &INSN_DEPS_CONTEXT (pred); |
3391 | |
3392 | /* We init this field lazily. */ |
3393 | if (dc->reg_last == NULL) |
3394 | init_deps_reg_last (dc); |
3395 | |
3396 | if (!dc->readonly) |
3397 | { |
3398 | has_dependence_data.pro = NULL; |
3399 | /* Initialize empty dep context with information about PRED. */ |
3400 | advance_deps_context (dc, pred); |
3401 | dc->readonly = 1; |
3402 | } |
3403 | |
3404 | has_dependence_data.where = DEPS_IN_NOWHERE; |
3405 | has_dependence_data.pro = pred; |
3406 | has_dependence_data.con = EXPR_VINSN (expr); |
3407 | has_dependence_data.dc = dc; |
3408 | |
3409 | sel_clear_has_dependence (); |
3410 | |
3411 | /* Now catch all dependencies that would be generated between PRED and |
3412 | INSN. */ |
3413 | setup_has_dependence_sched_deps_info (); |
3414 | deps_analyze_insn (dc, EXPR_INSN_RTX (expr)); |
3415 | has_dependence_data.dc = NULL; |
3416 | |
3417 | /* When a barrier was found, set DEPS_IN_INSN bits. */ |
3418 | if (dc->last_reg_pending_barrier == TRUE_BARRIER) |
3419 | has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_TRUE; |
3420 | else if (dc->last_reg_pending_barrier == MOVE_BARRIER) |
3421 | has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI; |
3422 | |
3423 | /* Do not allow stores to memory to move through checks. Currently |
3424 | we don't move this to sched-deps.c as the check doesn't have |
3425 | obvious places to which this dependence can be attached. |
3426 | FIMXE: this should go to a hook. */ |
3427 | if (EXPR_LHS (expr) |
3428 | && MEM_P (EXPR_LHS (expr)) |
3429 | && sel_insn_is_speculation_check (pred)) |
3430 | has_dependence_data.has_dep_p[DEPS_IN_INSN] = DEP_ANTI; |
3431 | |
3432 | *has_dep_pp = has_dependence_data.has_dep_p; |
3433 | ds = 0; |
3434 | for (i = 0; i < DEPS_IN_NOWHERE; i++) |
3435 | ds = ds_full_merge (ds, has_dependence_data.has_dep_p[i], |
3436 | NULL_RTX, NULL_RTX); |
3437 | |
3438 | return ds; |
3439 | } |
3440 | |
3441 | |
3442 | /* Dependence hooks implementation that checks dependence latency constraints |
3443 | on the insns being scheduled. The entry point for these routines is |
3444 | tick_check_p predicate. */ |
3445 | |
3446 | static struct |
3447 | { |
3448 | /* An expr we are currently checking. */ |
3449 | expr_t expr; |
3450 | |
3451 | /* A minimal cycle for its scheduling. */ |
3452 | int cycle; |
3453 | |
3454 | /* Whether we have seen a true dependence while checking. */ |
3455 | bool seen_true_dep_p; |
3456 | } tick_check_data; |
3457 | |
3458 | /* Update minimal scheduling cycle for tick_check_insn given that it depends |
3459 | on PRO with status DS and weight DW. */ |
3460 | static void |
3461 | tick_check_dep_with_dw (insn_t pro_insn, ds_t ds, dw_t dw) |
3462 | { |
3463 | expr_t con_expr = tick_check_data.expr; |
3464 | insn_t con_insn = EXPR_INSN_RTX (con_expr); |
3465 | |
3466 | if (con_insn != pro_insn) |
3467 | { |
3468 | enum reg_note dt; |
3469 | int tick; |
3470 | |
3471 | if (/* PROducer was removed from above due to pipelining. */ |
3472 | !INSN_IN_STREAM_P (pro_insn) |
3473 | /* Or PROducer was originally on the next iteration regarding the |
3474 | CONsumer. */ |
3475 | || (INSN_SCHED_TIMES (pro_insn) |
3476 | - EXPR_SCHED_TIMES (con_expr)) > 1) |
3477 | /* Don't count this dependence. */ |
3478 | return; |
3479 | |
3480 | dt = ds_to_dt (ds); |
3481 | if (dt == REG_DEP_TRUE) |
3482 | tick_check_data.seen_true_dep_p = true; |
3483 | |
3484 | gcc_assert (INSN_SCHED_CYCLE (pro_insn) > 0); |
3485 | |
3486 | { |
3487 | dep_def _dep, *dep = &_dep; |
3488 | |
3489 | init_dep (dep, pro_insn, con_insn, dt); |
3490 | |
3491 | tick = INSN_SCHED_CYCLE (pro_insn) + dep_cost_1 (dep, dw); |
3492 | } |
3493 | |
3494 | /* When there are several kinds of dependencies between pro and con, |
3495 | only REG_DEP_TRUE should be taken into account. */ |
3496 | if (tick > tick_check_data.cycle |
3497 | && (dt == REG_DEP_TRUE || !tick_check_data.seen_true_dep_p)) |
3498 | tick_check_data.cycle = tick; |
3499 | } |
3500 | } |
3501 | |
3502 | /* An implementation of note_dep hook. */ |
3503 | static void |
3504 | tick_check_note_dep (insn_t pro, ds_t ds) |
3505 | { |
3506 | tick_check_dep_with_dw (pro, ds, 0); |
3507 | } |
3508 | |
3509 | /* An implementation of note_mem_dep hook. */ |
3510 | static void |
3511 | tick_check_note_mem_dep (rtx mem1, rtx mem2, insn_t pro, ds_t ds) |
3512 | { |
3513 | dw_t dw; |
3514 | |
3515 | dw = (ds_to_dt (ds) == REG_DEP_TRUE |
3516 | ? estimate_dep_weak (mem1, mem2) |
3517 | : 0); |
3518 | |
3519 | tick_check_dep_with_dw (pro, ds, dw); |
3520 | } |
3521 | |
3522 | /* This structure contains hooks for dependence analysis used when determining |
3523 | whether an insn is ready for scheduling. */ |
3524 | static struct sched_deps_info_def tick_check_sched_deps_info = |
3525 | { |
3526 | NULL, |
3527 | |
3528 | NULL, |
3529 | NULL, |
3530 | NULL, |
3531 | NULL, |
3532 | NULL, |
3533 | NULL, |
3534 | haifa_note_reg_set, |
3535 | haifa_note_reg_clobber, |
3536 | haifa_note_reg_use, |
3537 | tick_check_note_mem_dep, |
3538 | tick_check_note_dep, |
3539 | |
3540 | 0, 0, 0 |
3541 | }; |
3542 | |
3543 | /* Estimate number of cycles from the current cycle of FENCE until EXPR can be |
3544 | scheduled. Return 0 if all data from producers in DC is ready. */ |
3545 | int |
3546 | tick_check_p (expr_t expr, deps_t dc, fence_t fence) |
3547 | { |
3548 | int cycles_left; |
3549 | /* Initialize variables. */ |
3550 | tick_check_data.expr = expr; |
3551 | tick_check_data.cycle = 0; |
3552 | tick_check_data.seen_true_dep_p = false; |
3553 | sched_deps_info = &tick_check_sched_deps_info; |
3554 | |
3555 | gcc_assert (!dc->readonly); |
3556 | dc->readonly = 1; |
3557 | deps_analyze_insn (dc, EXPR_INSN_RTX (expr)); |
3558 | dc->readonly = 0; |
3559 | |
3560 | cycles_left = tick_check_data.cycle - FENCE_CYCLE (fence); |
3561 | |
3562 | return cycles_left >= 0 ? cycles_left : 0; |
3563 | } |
3564 | |
3565 | |
3566 | /* Functions to work with insns. */ |
3567 | |
3568 | /* Returns true if LHS of INSN is the same as DEST of an insn |
3569 | being moved. */ |
3570 | bool |
3571 | lhs_of_insn_equals_to_dest_p (insn_t insn, rtx dest) |
3572 | { |
3573 | rtx lhs = INSN_LHS (insn); |
3574 | |
3575 | if (lhs == NULL || dest == NULL) |
3576 | return false; |
3577 | |
3578 | return rtx_equal_p (lhs, dest); |
3579 | } |
3580 | |
3581 | /* Return s_i_d entry of INSN. Callable from debugger. */ |
3582 | sel_insn_data_def |
3583 | insn_sid (insn_t insn) |
3584 | { |
3585 | return *SID (insn); |
3586 | } |
3587 | |
3588 | /* True when INSN is a speculative check. We can tell this by looking |
3589 | at the data structures of the selective scheduler, not by examining |
3590 | the pattern. */ |
3591 | bool |
3592 | sel_insn_is_speculation_check (rtx insn) |
3593 | { |
3594 | return s_i_d.exists () && !! INSN_SPEC_CHECKED_DS (insn); |
3595 | } |
3596 | |
3597 | /* Extracts machine mode MODE and destination location DST_LOC |
3598 | for given INSN. */ |
3599 | void |
3600 | get_dest_and_mode (rtx insn, rtx *dst_loc, machine_mode *mode) |
3601 | { |
3602 | rtx pat = PATTERN (insn); |
3603 | |
3604 | gcc_assert (dst_loc); |
3605 | gcc_assert (GET_CODE (pat) == SET); |
3606 | |
3607 | *dst_loc = SET_DEST (pat); |
3608 | |
3609 | gcc_assert (*dst_loc); |
3610 | gcc_assert (MEM_P (*dst_loc) || REG_P (*dst_loc)); |
3611 | |
3612 | if (mode) |
3613 | *mode = GET_MODE (*dst_loc); |
3614 | } |
3615 | |
3616 | /* Returns true when moving through JUMP will result in bookkeeping |
3617 | creation. */ |
3618 | bool |
3619 | bookkeeping_can_be_created_if_moved_through_p (insn_t jump) |
3620 | { |
3621 | insn_t succ; |
3622 | succ_iterator si; |
3623 | |
3624 | FOR_EACH_SUCC (succ, si, jump) |
3625 | if (sel_num_cfg_preds_gt_1 (succ)) |
3626 | return true; |
3627 | |
3628 | return false; |
3629 | } |
3630 | |
3631 | /* Return 'true' if INSN is the only one in its basic block. */ |
3632 | static bool |
3633 | insn_is_the_only_one_in_bb_p (insn_t insn) |
3634 | { |
3635 | return sel_bb_head_p (insn) && sel_bb_end_p (insn); |
3636 | } |
3637 | |
3638 | /* Check that the region we're scheduling still has at most one |
3639 | backedge. */ |
3640 | static void |
3641 | verify_backedges (void) |
3642 | { |
3643 | if (pipelining_p) |
3644 | { |
3645 | int i, n = 0; |
3646 | edge e; |
3647 | edge_iterator ei; |
3648 | |
3649 | for (i = 0; i < current_nr_blocks; i++) |
3650 | FOR_EACH_EDGE (e, ei, BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i))->succs) |
3651 | if (in_current_region_p (e->dest) |
3652 | && BLOCK_TO_BB (e->dest->index) < i) |
3653 | n++; |
3654 | |
3655 | gcc_assert (n <= 1); |
3656 | } |
3657 | } |
3658 | |
3659 | |
3660 | /* Functions to work with control flow. */ |
3661 | |
3662 | /* Recompute BLOCK_TO_BB and BB_FOR_BLOCK for current region so that blocks |
3663 | are sorted in topological order (it might have been invalidated by |
3664 | redirecting an edge). */ |
3665 | static void |
3666 | sel_recompute_toporder (void) |
3667 | { |
3668 | int i, n, rgn; |
3669 | int *postorder, n_blocks; |
3670 | |
3671 | postorder = XALLOCAVEC (int, n_basic_blocks_for_fn (cfun)); |
3672 | n_blocks = post_order_compute (postorder, false, false); |
3673 | |
3674 | rgn = CONTAINING_RGN (BB_TO_BLOCK (0)); |
3675 | for (n = 0, i = n_blocks - 1; i >= 0; i--) |
3676 | if (CONTAINING_RGN (postorder[i]) == rgn) |
3677 | { |
3678 | BLOCK_TO_BB (postorder[i]) = n; |
3679 | BB_TO_BLOCK (n) = postorder[i]; |
3680 | n++; |
3681 | } |
3682 | |
3683 | /* Assert that we updated info for all blocks. We may miss some blocks if |
3684 | this function is called when redirecting an edge made a block |
3685 | unreachable, but that block is not deleted yet. */ |
3686 | gcc_assert (n == RGN_NR_BLOCKS (rgn)); |
3687 | } |
3688 | |
3689 | /* Tidy the possibly empty block BB. */ |
3690 | static bool |
3691 | maybe_tidy_empty_bb (basic_block bb) |
3692 | { |
3693 | basic_block succ_bb, pred_bb, note_bb; |
3694 | vec<basic_block> dom_bbs; |
3695 | edge e; |
3696 | edge_iterator ei; |
3697 | bool rescan_p; |
3698 | |
3699 | /* Keep empty bb only if this block immediately precedes EXIT and |
3700 | has incoming non-fallthrough edge, or it has no predecessors or |
3701 | successors. Otherwise remove it. */ |
3702 | if (!sel_bb_empty_p (bb) |
3703 | || (single_succ_p (bb) |
3704 | && single_succ (bb) == EXIT_BLOCK_PTR_FOR_FN (cfun) |
3705 | && (!single_pred_p (bb) |
3706 | || !(single_pred_edge (bb)->flags & EDGE_FALLTHRU))) |
3707 | || EDGE_COUNT (bb->preds) == 0 |
3708 | || EDGE_COUNT (bb->succs) == 0) |
3709 | return false; |
3710 | |
3711 | /* Do not attempt to redirect complex edges. */ |
3712 | FOR_EACH_EDGE (e, ei, bb->preds) |
3713 | if (e->flags & EDGE_COMPLEX) |
3714 | return false; |
3715 | else if (e->flags & EDGE_FALLTHRU) |
3716 | { |
3717 | rtx note; |
3718 | /* If prev bb ends with asm goto, see if any of the |
3719 | ASM_OPERANDS_LABELs don't point to the fallthru |
3720 | label. Do not attempt to redirect it in that case. */ |
3721 | if (JUMP_P (BB_END (e->src)) |
3722 | && (note = extract_asm_operands (PATTERN (BB_END (e->src))))) |
3723 | { |
3724 | int i, n = ASM_OPERANDS_LABEL_LENGTH (note); |
3725 | |
3726 | for (i = 0; i < n; ++i) |
3727 | if (XEXP (ASM_OPERANDS_LABEL (note, i), 0) == BB_HEAD (bb)) |
3728 | return false; |
3729 | } |
3730 | } |
3731 | |
3732 | free_data_sets (bb); |
3733 | |
3734 | /* Do not delete BB if it has more than one successor. |
3735 | That can occur when we moving a jump. */ |
3736 | if (!single_succ_p (bb)) |
3737 | { |
3738 | gcc_assert (can_merge_blocks_p (bb->prev_bb, bb)); |
3739 | sel_merge_blocks (bb->prev_bb, bb); |
3740 | return true; |
3741 | } |
3742 | |
3743 | succ_bb = single_succ (bb); |
3744 | rescan_p = true; |
3745 | pred_bb = NULL; |
3746 | dom_bbs.create (0); |
3747 | |
3748 | /* Save a pred/succ from the current region to attach the notes to. */ |
3749 | note_bb = NULL; |
3750 | FOR_EACH_EDGE (e, ei, bb->preds) |
3751 | if (in_current_region_p (e->src)) |
3752 | { |
3753 | note_bb = e->src; |
3754 | break; |
3755 | } |
3756 | if (note_bb == NULL) |
3757 | note_bb = succ_bb; |
3758 | |
3759 | /* Redirect all non-fallthru edges to the next bb. */ |
3760 | while (rescan_p) |
3761 | { |
3762 | rescan_p = false; |
3763 | |
3764 | FOR_EACH_EDGE (e, ei, bb->preds) |
3765 | { |
3766 | pred_bb = e->src; |
3767 | |
3768 | if (!(e->flags & EDGE_FALLTHRU)) |
3769 | { |
3770 | /* We can not invalidate computed topological order by moving |
3771 | the edge destination block (E->SUCC) along a fallthru edge. |
3772 | |
3773 | We will update dominators here only when we'll get |
3774 | an unreachable block when redirecting, otherwise |
3775 | sel_redirect_edge_and_branch will take care of it. */ |
3776 | if (e->dest != bb |
3777 | && single_pred_p (e->dest)) |
3778 | dom_bbs.safe_push (e->dest); |
3779 | sel_redirect_edge_and_branch (e, succ_bb); |
3780 | rescan_p = true; |
3781 | break; |
3782 | } |
3783 | /* If the edge is fallthru, but PRED_BB ends in a conditional jump |
3784 | to BB (so there is no non-fallthru edge from PRED_BB to BB), we |
3785 | still have to adjust it. */ |
3786 | else if (single_succ_p (pred_bb) && any_condjump_p (BB_END (pred_bb))) |
3787 | { |
3788 | /* If possible, try to remove the unneeded conditional jump. */ |
3789 | if (INSN_SCHED_TIMES (BB_END (pred_bb)) == 0 |
3790 | && !IN_CURRENT_FENCE_P (BB_END (pred_bb))) |
3791 | { |
3792 | if (!sel_remove_insn (BB_END (pred_bb), false, false)) |
3793 | tidy_fallthru_edge (e); |
3794 | } |
3795 | else |
3796 | sel_redirect_edge_and_branch (e, succ_bb); |
3797 | rescan_p = true; |
3798 | break; |
3799 | } |
3800 | } |
3801 | } |
3802 | |
3803 | if (can_merge_blocks_p (bb->prev_bb, bb)) |
3804 | sel_merge_blocks (bb->prev_bb, bb); |
3805 | else |
3806 | { |
3807 | /* This is a block without fallthru predecessor. Just delete it. */ |
3808 | gcc_assert (note_bb); |
3809 | move_bb_info (note_bb, bb); |
3810 | remove_empty_bb (bb, true); |
3811 | } |
3812 | |
3813 | if (!dom_bbs.is_empty ()) |
3814 | { |
3815 | dom_bbs.safe_push (succ_bb); |
3816 | iterate_fix_dominators (CDI_DOMINATORS, dom_bbs, false); |
3817 | dom_bbs.release (); |
3818 | } |
3819 | |
3820 | return true; |
3821 | } |
3822 | |
3823 | /* Tidy the control flow after we have removed original insn from |
3824 | XBB. Return true if we have removed some blocks. When FULL_TIDYING |
3825 | is true, also try to optimize control flow on non-empty blocks. */ |
3826 | bool |
3827 | tidy_control_flow (basic_block xbb, bool full_tidying) |
3828 | { |
3829 | bool changed = true; |
3830 | insn_t first, last; |
3831 | |
3832 | /* First check whether XBB is empty. */ |
3833 | changed = maybe_tidy_empty_bb (xbb); |
3834 | if (changed || !full_tidying) |
3835 | return changed; |
3836 | |
3837 | /* Check if there is a unnecessary jump after insn left. */ |
3838 | if (bb_has_removable_jump_to_p (xbb, xbb->next_bb) |
3839 | && INSN_SCHED_TIMES (BB_END (xbb)) == 0 |
3840 | && !IN_CURRENT_FENCE_P (BB_END (xbb))) |
3841 | { |
3842 | if (sel_remove_insn (BB_END (xbb), false, false)) |
3843 | return true; |
3844 | tidy_fallthru_edge (EDGE_SUCC (xbb, 0)); |
3845 | } |
3846 | |
3847 | first = sel_bb_head (xbb); |
3848 | last = sel_bb_end (xbb); |
3849 | if (MAY_HAVE_DEBUG_INSNS) |
3850 | { |
3851 | if (first != last && DEBUG_INSN_P (first)) |
3852 | do |
3853 | first = NEXT_INSN (first); |
3854 | while (first != last && (DEBUG_INSN_P (first) || NOTE_P (first))); |
3855 | |
3856 | if (first != last && DEBUG_INSN_P (last)) |
3857 | do |
3858 | last = PREV_INSN (last); |
3859 | while (first != last && (DEBUG_INSN_P (last) || NOTE_P (last))); |
3860 | } |
3861 | /* Check if there is an unnecessary jump in previous basic block leading |
3862 | to next basic block left after removing INSN from stream. |
3863 | If it is so, remove that jump and redirect edge to current |
3864 | basic block (where there was INSN before deletion). This way |
3865 | when NOP will be deleted several instructions later with its |
3866 | basic block we will not get a jump to next instruction, which |
3867 | can be harmful. */ |
3868 | if (first == last |
3869 | && !sel_bb_empty_p (xbb) |
3870 | && INSN_NOP_P (last) |
3871 | /* Flow goes fallthru from current block to the next. */ |
3872 | && EDGE_COUNT (xbb->succs) == 1 |
3873 | && (EDGE_SUCC (xbb, 0)->flags & EDGE_FALLTHRU) |
3874 | /* When successor is an EXIT block, it may not be the next block. */ |
3875 | && single_succ (xbb) != EXIT_BLOCK_PTR_FOR_FN (cfun) |
3876 | /* And unconditional jump in previous basic block leads to |
3877 | next basic block of XBB and this jump can be safely removed. */ |
3878 | && in_current_region_p (xbb->prev_bb) |
3879 | && bb_has_removable_jump_to_p (xbb->prev_bb, xbb->next_bb) |
3880 | && INSN_SCHED_TIMES (BB_END (xbb->prev_bb)) == 0 |
3881 | /* Also this jump is not at the scheduling boundary. */ |
3882 | && !IN_CURRENT_FENCE_P (BB_END (xbb->prev_bb))) |
3883 | { |
3884 | bool recompute_toporder_p; |
3885 | /* Clear data structures of jump - jump itself will be removed |
3886 | by sel_redirect_edge_and_branch. */ |
3887 | clear_expr (INSN_EXPR (BB_END (xbb->prev_bb))); |
3888 | recompute_toporder_p |
3889 | = sel_redirect_edge_and_branch (EDGE_SUCC (xbb->prev_bb, 0), xbb); |
3890 | |
3891 | gcc_assert (EDGE_SUCC (xbb->prev_bb, 0)->flags & EDGE_FALLTHRU); |
3892 | |
3893 | /* It can turn out that after removing unused jump, basic block |
3894 | that contained that jump, becomes empty too. In such case |
3895 | remove it too. */ |
3896 | if (sel_bb_empty_p (xbb->prev_bb)) |
3897 | changed = maybe_tidy_empty_bb (xbb->prev_bb); |
3898 | if (recompute_toporder_p) |
3899 | sel_recompute_toporder (); |
3900 | } |
3901 | |
3902 | /* TODO: use separate flag for CFG checking. */ |
3903 | if (flag_checking) |
3904 | { |
3905 | verify_backedges (); |
3906 | verify_dominators (CDI_DOMINATORS); |
3907 | } |
3908 | |
3909 | return changed; |
3910 | } |
3911 | |
3912 | /* Purge meaningless empty blocks in the middle of a region. */ |
3913 | void |
3914 | purge_empty_blocks (void) |
3915 | { |
3916 | int i; |
3917 | |
3918 | /* Do not attempt to delete the first basic block in the region. */ |
3919 | for (i = 1; i < current_nr_blocks; ) |
3920 | { |
3921 | basic_block b = BASIC_BLOCK_FOR_FN (cfun, BB_TO_BLOCK (i)); |
3922 | |
3923 | if (maybe_tidy_empty_bb (b)) |
3924 | continue; |
3925 | |
3926 | i++; |
3927 | } |
3928 | } |
3929 | |
3930 | /* Rip-off INSN from the insn stream. When ONLY_DISCONNECT is true, |
3931 | do not delete insn's data, because it will be later re-emitted. |
3932 | Return true if we have removed some blocks afterwards. */ |
3933 | bool |
3934 | sel_remove_insn (insn_t insn, bool only_disconnect, bool full_tidying) |
3935 | { |
3936 | basic_block bb = BLOCK_FOR_INSN (insn); |
3937 | |
3938 | gcc_assert (INSN_IN_STREAM_P (insn)); |
3939 | |
3940 | if (DEBUG_INSN_P (insn) && BB_AV_SET_VALID_P (bb)) |
3941 | { |
3942 | expr_t expr; |
3943 | av_set_iterator i; |
3944 | |
3945 | /* When we remove a debug insn that is head of a BB, it remains |
3946 | in the AV_SET of the block, but it shouldn't. */ |
3947 | FOR_EACH_EXPR_1 (expr, i, &BB_AV_SET (bb)) |
3948 | if (EXPR_INSN_RTX (expr) == insn) |
3949 | { |
3950 | av_set_iter_remove (&i); |
3951 | break; |
3952 | } |
3953 | } |
3954 | |
3955 | if (only_disconnect) |
3956 | remove_insn (insn); |
3957 | else |
3958 | { |
3959 | delete_insn (insn); |
3960 | clear_expr (INSN_EXPR (insn)); |
3961 | } |
3962 | |
3963 | /* It is necessary to NULL these fields in case we are going to re-insert |
3964 | INSN into the insns stream, as will usually happen in the ONLY_DISCONNECT |
3965 | case, but also for NOPs that we will return to the nop pool. */ |
3966 | SET_PREV_INSN (insn) = NULL_RTX; |
3967 | SET_NEXT_INSN (insn) = NULL_RTX; |
3968 | set_block_for_insn (insn, NULL); |
3969 | |
3970 | return tidy_control_flow (bb, full_tidying); |
3971 | } |
3972 | |
3973 | /* Estimate number of the insns in BB. */ |
3974 | static int |
3975 | sel_estimate_number_of_insns (basic_block bb) |
3976 | { |
3977 | int res = 0; |
3978 | insn_t insn = NEXT_INSN (BB_HEAD (bb)), next_tail = NEXT_INSN (BB_END (bb)); |
3979 | |
3980 | for (; insn != next_tail; insn = NEXT_INSN (insn)) |
3981 | if (NONDEBUG_INSN_P (insn)) |
3982 | res++; |
3983 | |
3984 | return res; |
3985 | } |
3986 | |
3987 | /* We don't need separate luids for notes or labels. */ |
3988 | static int |
3989 | sel_luid_for_non_insn (rtx x) |
3990 | { |
3991 | gcc_assert (NOTE_P (x) || LABEL_P (x)); |
3992 | |
3993 | return -1; |
3994 | } |
3995 | |
3996 | /* Find the proper seqno for inserting at INSN by successors. |
3997 | Return -1 if no successors with positive seqno exist. */ |
3998 | static int |
3999 | get_seqno_by_succs (rtx_insn *insn) |
4000 | { |
4001 | basic_block bb = BLOCK_FOR_INSN (insn); |
4002 | rtx_insn *tmp = insn, *end = BB_END (bb); |
4003 | int seqno; |
4004 | insn_t succ = NULL; |
4005 | succ_iterator si; |
4006 | |
4007 | while (tmp != end) |
4008 | { |
4009 | tmp = NEXT_INSN (tmp); |
4010 | if (INSN_P (tmp)) |
4011 | return INSN_SEQNO (tmp); |
4012 | } |
4013 | |
4014 | seqno = INT_MAX; |
4015 | |
4016 | FOR_EACH_SUCC_1 (succ, si, end, SUCCS_NORMAL) |
4017 | if (INSN_SEQNO (succ) > 0) |
4018 | seqno = MIN (seqno, INSN_SEQNO (succ)); |
4019 | |
4020 | if (seqno == INT_MAX) |
4021 | return -1; |
4022 | |
4023 | return seqno; |
4024 | } |
4025 | |
4026 | /* Compute seqno for INSN by its preds or succs. Use OLD_SEQNO to compute |
4027 | seqno in corner cases. */ |
4028 | static int |
4029 | get_seqno_for_a_jump (insn_t insn, int old_seqno) |
4030 | { |
4031 | int seqno; |
4032 | |
4033 | gcc_assert (INSN_SIMPLEJUMP_P (insn)); |
4034 | |
4035 | if (!sel_bb_head_p (insn)) |
4036 | seqno = INSN_SEQNO (PREV_INSN (insn)); |
4037 | else |
4038 | { |
4039 | basic_block bb = BLOCK_FOR_INSN (insn); |
4040 | |
4041 | if (single_pred_p (bb) |
4042 | && !in_current_region_p (single_pred (bb))) |
4043 | { |
4044 | /* We can have preds outside a region when splitting edges |
4045 | for pipelining of an outer loop. Use succ instead. |
4046 | There should be only one of them. */ |
4047 | insn_t succ = NULL; |
4048 | succ_iterator si; |
4049 | bool first = true; |
4050 | |
4051 | gcc_assert (flag_sel_sched_pipelining_outer_loops |
4052 | && current_loop_nest); |
4053 | FOR_EACH_SUCC_1 (succ, si, insn, |
4054 | SUCCS_NORMAL | SUCCS_SKIP_TO_LOOP_EXITS) |
4055 | { |
4056 | gcc_assert (first); |
4057 | first = false; |
4058 | } |
4059 | |
4060 | gcc_assert (succ != NULL); |
4061 | seqno = INSN_SEQNO (succ); |
4062 | } |
4063 | else |
4064 | { |
4065 | insn_t *preds; |
4066 | int n; |
4067 | |
4068 | cfg_preds (BLOCK_FOR_INSN (insn), &preds, &n); |
4069 | |
4070 | gcc_assert (n > 0); |
4071 | /* For one predecessor, use simple method. */ |
4072 | if (n == 1) |
4073 | seqno = INSN_SEQNO (preds[0]); |
4074 | else |
4075 | seqno = get_seqno_by_preds (insn); |
4076 | |
4077 | free (preds); |
4078 | } |
4079 | } |
4080 | |
4081 | /* We were unable to find a good seqno among preds. */ |
4082 | if (seqno < 0) |
4083 | seqno = get_seqno_by_succs (insn); |
4084 | |
4085 | if (seqno < 0) |
4086 | { |
4087 | /* The only case where this could be here legally is that the only |
4088 | unscheduled insn was a conditional jump that got removed and turned |
4089 | into this unconditional one. Initialize from the old seqno |
4090 | of that jump passed down to here. */ |
4091 | seqno = old_seqno; |
4092 | } |
4093 | |
4094 | gcc_assert (seqno >= 0); |
4095 | return seqno; |
4096 | } |
4097 | |
4098 | /* Find the proper seqno for inserting at INSN. Returns -1 if no predecessors |
4099 | with positive seqno exist. */ |
4100 | int |
4101 | get_seqno_by_preds (rtx_insn *insn) |
4102 | { |
4103 | basic_block bb = BLOCK_FOR_INSN (insn); |
4104 | rtx_insn *tmp = insn, *head = BB_HEAD (bb); |
4105 | insn_t *preds; |
4106 | int n, i, seqno; |
4107 | |
4108 | /* Loop backwards from INSN to HEAD including both. */ |
4109 | while (1) |
4110 | { |
4111 | if (INSN_P (tmp)) |
4112 | return INSN_SEQNO (tmp); |
4113 | if (tmp == head) |
4114 | break; |
4115 | tmp = PREV_INSN (tmp); |
4116 | } |
4117 | |
4118 | cfg_preds (bb, &preds, &n); |
4119 | for (i = 0, seqno = -1; i < n; i++) |
4120 | seqno = MAX (seqno, INSN_SEQNO (preds[i])); |
4121 | |
4122 | return seqno; |
4123 | } |
4124 | |
4125 | |
4126 | |
4127 | /* Extend pass-scope data structures for basic blocks. */ |
4128 | void |
4129 | sel_extend_global_bb_info (void) |
4130 | { |
4131 | sel_global_bb_info.safe_grow_cleared (last_basic_block_for_fn (cfun)); |
4132 | } |
4133 | |
4134 | /* Extend region-scope data structures for basic blocks. */ |
4135 | static void |
4136 | extend_region_bb_info (void) |
4137 | { |
4138 | sel_region_bb_info.safe_grow_cleared (last_basic_block_for_fn (cfun)); |
4139 | } |
4140 | |
4141 | /* Extend all data structures to fit for all basic blocks. */ |
4142 | static void |
4143 | extend_bb_info (void) |
4144 | { |
4145 | sel_extend_global_bb_info (); |
4146 | extend_region_bb_info (); |
4147 | } |
4148 | |
4149 | /* Finalize pass-scope data structures for basic blocks. */ |
4150 | void |
4151 | sel_finish_global_bb_info (void) |
4152 | { |
4153 | sel_global_bb_info.release (); |
4154 | } |
4155 | |
4156 | /* Finalize region-scope data structures for basic blocks. */ |
4157 | static void |
4158 | finish_region_bb_info (void) |
4159 | { |
4160 | sel_region_bb_info.release (); |
4161 | } |
4162 | |
4163 | |
4164 | /* Data for each insn in current region. */ |
4165 | vec<sel_insn_data_def> s_i_d; |
4166 | |
4167 | /* Extend data structures for insns from current region. */ |
4168 | static void |
4169 | extend_insn_data (void) |
4170 | { |
4171 | int reserve; |
4172 | |
4173 | sched_extend_target (); |
4174 | sched_deps_init (false); |
4175 | |
4176 | /* Extend data structures for insns from current region. */ |
4177 | reserve = (sched_max_luid + 1 - s_i_d.length ()); |
4178 | if (reserve > 0 && ! s_i_d.space (reserve)) |
4179 | { |
4180 | int size; |
4181 | |
4182 | if (sched_max_luid / 2 > 1024) |
4183 | size = sched_max_luid + 1024; |
4184 | else |
4185 | size = 3 * sched_max_luid / 2; |
4186 | |
4187 | |
4188 | s_i_d.safe_grow_cleared (size); |
4189 | } |
4190 | } |
4191 | |
4192 | /* Finalize data structures for insns from current region. */ |
4193 | static void |
4194 | finish_insns (void) |
4195 | { |
4196 | unsigned i; |
4197 | |
4198 | /* Clear here all dependence contexts that may have left from insns that were |
4199 | removed during the scheduling. */ |
4200 | for (i = 0; i < s_i_d.length (); i++) |
4201 | { |
4202 | sel_insn_data_def *sid_entry = &s_i_d[i]; |
4203 | |
4204 | if (sid_entry->live) |
4205 | return_regset_to_pool (sid_entry->live); |
4206 | if (sid_entry->analyzed_deps) |
4207 | { |
4208 | BITMAP_FREE (sid_entry->analyzed_deps); |
4209 | BITMAP_FREE (sid_entry->found_deps); |
4210 | htab_delete (sid_entry->transformed_insns); |
4211 | free_deps (&sid_entry->deps_context); |
4212 | } |
4213 | if (EXPR_VINSN (&sid_entry->expr)) |
4214 | { |
4215 | clear_expr (&sid_entry->expr); |
4216 | |
4217 | /* Also, clear CANT_MOVE bit here, because we really don't want it |
4218 | to be passed to the next region. */ |
4219 | CANT_MOVE_BY_LUID (i) = 0; |
4220 | } |
4221 | } |
4222 | |
4223 | s_i_d.release (); |
4224 | } |
4225 | |
4226 | /* A proxy to pass initialization data to init_insn (). */ |
4227 | static sel_insn_data_def _insn_init_ssid; |
4228 | static sel_insn_data_t insn_init_ssid = &_insn_init_ssid; |
4229 | |
4230 | /* If true create a new vinsn. Otherwise use the one from EXPR. */ |
4231 | static bool insn_init_create_new_vinsn_p; |
4232 | |
4233 | /* Set all necessary data for initialization of the new insn[s]. */ |
4234 | static expr_t |
4235 | set_insn_init (expr_t expr, vinsn_t vi, int seqno) |
4236 | { |
4237 | expr_t x = &insn_init_ssid->expr; |
4238 | |
4239 | copy_expr_onside (x, expr); |
4240 | if (vi != NULL) |
4241 | { |
4242 | insn_init_create_new_vinsn_p = false; |
4243 | change_vinsn_in_expr (x, vi); |
4244 | } |
4245 | else |
4246 | insn_init_create_new_vinsn_p = true; |
4247 | |
4248 | insn_init_ssid->seqno = seqno; |
4249 | return x; |
4250 | } |
4251 | |
4252 | /* Init data for INSN. */ |
4253 | static void |
4254 | init_insn_data (insn_t insn) |
4255 | { |
4256 | expr_t expr; |
4257 | sel_insn_data_t ssid = insn_init_ssid; |
4258 | |
4259 | /* The fields mentioned below are special and hence are not being |
4260 | propagated to the new insns. */ |
4261 | gcc_assert (!ssid->asm_p && ssid->sched_next == NULL |
4262 | && !ssid->after_stall_p && ssid->sched_cycle == 0); |
4263 | gcc_assert (INSN_P (insn) && INSN_LUID (insn) > 0); |
4264 | |
4265 | expr = INSN_EXPR (insn); |
4266 | copy_expr (expr, &ssid->expr); |
4267 | prepare_insn_expr (insn, ssid->seqno); |
4268 | |
4269 | if (insn_init_create_new_vinsn_p) |
4270 | change_vinsn_in_expr (expr, vinsn_create (insn, init_insn_force_unique_p)); |
4271 | |
4272 | if (first_time_insn_init (insn)) |
4273 | init_first_time_insn_data (insn); |
4274 | } |
4275 | |
4276 | /* This is used to initialize spurious jumps generated by |
4277 | sel_redirect_edge (). OLD_SEQNO is used for initializing seqnos |
4278 | in corner cases within get_seqno_for_a_jump. */ |
4279 | static void |
4280 | init_simplejump_data (insn_t insn, int old_seqno) |
4281 | { |
4282 | init_expr (INSN_EXPR (insn), vinsn_create (insn, false), 0, |
4283 | REG_BR_PROB_BASE, 0, 0, 0, 0, 0, 0, |
4284 | vNULL, true, false, false, |
4285 | false, true); |
4286 | INSN_SEQNO (insn) = get_seqno_for_a_jump (insn, old_seqno); |
4287 | init_first_time_insn_data (insn); |
4288 | } |
4289 | |
4290 | /* Perform deferred initialization of insns. This is used to process |
4291 | a new jump that may be created by redirect_edge. OLD_SEQNO is used |
4292 | for initializing simplejumps in init_simplejump_data. */ |
4293 | static void |
4294 | sel_init_new_insn (insn_t insn, int flags, int old_seqno) |
4295 | { |
4296 | /* We create data structures for bb when the first insn is emitted in it. */ |
4297 | if (INSN_P (insn) |
4298 | && INSN_IN_STREAM_P (insn) |
4299 | && insn_is_the_only_one_in_bb_p (insn)) |
4300 | { |
4301 | extend_bb_info (); |
4302 | create_initial_data_sets (BLOCK_FOR_INSN (insn)); |
4303 | } |
4304 | |
4305 | if (flags & INSN_INIT_TODO_LUID) |
4306 | { |
4307 | sched_extend_luids (); |
4308 | sched_init_insn_luid (insn); |
4309 | } |
4310 | |
4311 | if (flags & INSN_INIT_TODO_SSID) |
4312 | { |
4313 | extend_insn_data (); |
4314 | init_insn_data (insn); |
4315 | clear_expr (&insn_init_ssid->expr); |
4316 | } |
4317 | |
4318 | if (flags & INSN_INIT_TODO_SIMPLEJUMP) |
4319 | { |
4320 | extend_insn_data (); |
4321 | init_simplejump_data (insn, old_seqno); |
4322 | } |
4323 | |
4324 | gcc_assert (CONTAINING_RGN (BLOCK_NUM (insn)) |
4325 | == CONTAINING_RGN (BB_TO_BLOCK (0))); |
4326 | } |
4327 | |
4328 | |
4329 | /* Functions to init/finish work with lv sets. */ |
4330 | |
4331 | /* Init BB_LV_SET of BB from DF_LR_IN set of BB. */ |
4332 | static void |
4333 | init_lv_set (basic_block bb) |
4334 | { |
4335 | gcc_assert (!BB_LV_SET_VALID_P (bb)); |
4336 | |
4337 | BB_LV_SET (bb) = get_regset_from_pool (); |
4338 | COPY_REG_SET (BB_LV_SET (bb), DF_LR_IN (bb)); |
4339 | BB_LV_SET_VALID_P (bb) = true; |
4340 | } |
4341 | |
4342 | /* Copy liveness information to BB from FROM_BB. */ |
4343 | static void |
4344 | copy_lv_set_from (basic_block bb, basic_block from_bb) |
4345 | { |
4346 | gcc_assert (!BB_LV_SET_VALID_P (bb)); |
4347 | |
4348 | COPY_REG_SET (BB_LV_SET (bb), BB_LV_SET (from_bb)); |
4349 | BB_LV_SET_VALID_P (bb) = true; |
4350 | } |
4351 | |
4352 | /* Initialize lv set of all bb headers. */ |
4353 | void |
4354 | init_lv_sets (void) |
4355 | { |
4356 | basic_block bb; |
4357 | |
4358 | /* Initialize of LV sets. */ |
4359 | FOR_EACH_BB_FN (bb, cfun) |
4360 | init_lv_set (bb); |
4361 | |
4362 | /* Don't forget EXIT_BLOCK. */ |
4363 | init_lv_set (EXIT_BLOCK_PTR_FOR_FN (cfun)); |
4364 | } |
4365 | |
4366 | /* Release lv set of HEAD. */ |
4367 | static void |
4368 | free_lv_set (basic_block bb) |
4369 | { |
4370 | gcc_assert (BB_LV_SET (bb) != NULL); |
4371 | |
4372 | return_regset_to_pool (BB_LV_SET (bb)); |
4373 | BB_LV_SET (bb) = NULL; |
4374 | BB_LV_SET_VALID_P (bb) = false; |
4375 | } |
4376 | |
4377 | /* Finalize lv sets of all bb headers. */ |
4378 | void |
4379 | free_lv_sets (void) |
4380 | { |
4381 | basic_block bb; |
4382 | |
4383 | /* Don't forget EXIT_BLOCK. */ |
4384 | free_lv_set (EXIT_BLOCK_PTR_FOR_FN (cfun)); |
4385 | |
4386 | /* Free LV sets. */ |
4387 | FOR_EACH_BB_FN (bb, cfun) |
4388 | if (BB_LV_SET (bb)) |
4389 | free_lv_set (bb); |
4390 | } |
4391 | |
4392 | /* Mark AV_SET for BB as invalid, so this set will be updated the next time |
4393 | compute_av() processes BB. This function is called when creating new basic |
4394 | blocks, as well as for blocks (either new or existing) where new jumps are |
4395 | created when the control flow is being updated. */ |
4396 | static void |
4397 | invalidate_av_set (basic_block bb) |
4398 | { |
4399 | BB_AV_LEVEL (bb) = -1; |
4400 | } |
4401 | |
4402 | /* Create initial data sets for BB (they will be invalid). */ |
4403 | static void |
4404 | create_initial_data_sets (basic_block bb) |
4405 | { |
4406 | if (BB_LV_SET (bb)) |
4407 | BB_LV_SET_VALID_P (bb) = false; |
4408 | else |
4409 | BB_LV_SET (bb) = get_regset_from_pool (); |
4410 | invalidate_av_set (bb); |
4411 | } |
4412 | |
4413 | /* Free av set of BB. */ |
4414 | static void |
4415 | free_av_set (basic_block bb) |
4416 | { |
4417 | av_set_clear (&BB_AV_SET (bb)); |
4418 | BB_AV_LEVEL (bb) = 0; |
4419 | } |
4420 | |
4421 | /* Free data sets of BB. */ |
4422 | void |
4423 | free_data_sets (basic_block bb) |
4424 | { |
4425 | free_lv_set (bb); |
4426 | free_av_set (bb); |
4427 | } |
4428 | |
4429 | /* Exchange data sets of TO and FROM. */ |
4430 | void |
4431 | exchange_data_sets (basic_block to, basic_block from) |
4432 | { |
4433 | /* Exchange lv sets of TO and FROM. */ |
4434 | std::swap (BB_LV_SET (from), BB_LV_SET (to)); |
4435 | std::swap (BB_LV_SET_VALID_P (from), BB_LV_SET_VALID_P (to)); |
4436 | |
4437 | /* Exchange av sets of TO and FROM. */ |
4438 | std::swap (BB_AV_SET (from), BB_AV_SET (to)); |
4439 | std::swap (BB_AV_LEVEL (from), BB_AV_LEVEL (to)); |
4440 | } |
4441 | |
4442 | /* Copy data sets of FROM to TO. */ |
4443 | void |
4444 | copy_data_sets (basic_block to, basic_block from) |
4445 | { |
4446 | gcc_assert (!BB_LV_SET_VALID_P (to) && !BB_AV_SET_VALID_P (to)); |
4447 | gcc_assert (BB_AV_SET (to) == NULL); |
4448 | |
4449 | BB_AV_LEVEL (to) = BB_AV_LEVEL (from); |
4450 | BB_LV_SET_VALID_P (to) = BB_LV_SET_VALID_P (from); |
4451 | |
4452 | if (BB_AV_SET_VALID_P (from)) |
4453 | { |
4454 | BB_AV_SET (to) = av_set_copy (BB_AV_SET (from)); |
4455 | } |
4456 | if (BB_LV_SET_VALID_P (from)) |
4457 | { |
4458 | gcc_assert (BB_LV_SET (to) != NULL); |
4459 | COPY_REG_SET (BB_LV_SET (to), BB_LV_SET (from)); |
4460 | } |
4461 | } |
4462 | |
4463 | /* Return an av set for INSN, if any. */ |
4464 | av_set_t |
4465 | get_av_set (insn_t insn) |
4466 | { |
4467 | av_set_t av_set; |
4468 | |
4469 | gcc_assert (AV_SET_VALID_P (insn)); |
4470 | |
4471 | if (sel_bb_head_p (insn)) |
4472 | av_set = BB_AV_SET (BLOCK_FOR_INSN (insn)); |
4473 | else |
4474 | av_set = NULL; |
4475 | |
4476 | return av_set; |
4477 | } |
4478 | |
4479 | /* Implementation of AV_LEVEL () macro. Return AV_LEVEL () of INSN. */ |
4480 | int |
4481 | get_av_level (insn_t insn) |
4482 | { |
4483 | int av_level; |
4484 | |
4485 | gcc_assert (INSN_P (insn)); |
4486 | |
4487 | if (sel_bb_head_p (insn)) |
4488 | av_level = BB_AV_LEVEL (BLOCK_FOR_INSN (insn)); |
4489 | else |
4490 | av_level = INSN_WS_LEVEL (insn); |
4491 | |
4492 | return av_level; |
4493 | } |
4494 | |
4495 | |
4496 | |
4497 | /* Variables to work with control-flow graph. */ |
4498 | |
4499 | /* The basic block that already has been processed by the sched_data_update (), |
4500 | but hasn't been in sel_add_bb () yet. */ |
4501 | static vec<basic_block> last_added_blocks; |
4502 | |
4503 | /* A pool for allocating successor infos. */ |
4504 | static struct |
4505 | { |
4506 | /* A stack for saving succs_info structures. */ |
4507 | struct succs_info *stack; |
4508 | |
4509 | /* Its size. */ |
4510 | int size; |
4511 | |
4512 | /* Top of the stack. */ |
4513 | int top; |
4514 | |
4515 | /* Maximal value of the top. */ |
4516 | int max_top; |
4517 | } succs_info_pool; |
4518 | |
4519 | /* Functions to work with control-flow graph. */ |
4520 | |
4521 | /* Return basic block note of BB. */ |
4522 | rtx_insn * |
4523 | sel_bb_head (basic_block bb) |
4524 | { |
4525 | rtx_insn *head; |
4526 | |
4527 | if (bb == EXIT_BLOCK_PTR_FOR_FN (cfun)) |
4528 | { |
4529 | gcc_assert (exit_insn != NULL_RTX); |
4530 | head = exit_insn; |
4531 | } |
4532 | else |
4533 | { |
4534 | rtx_note *note = bb_note (bb); |
4535 | head = next_nonnote_insn (note); |
4536 | |
4537 | if (head && (BARRIER_P (head) || BLOCK_FOR_INSN (head) != bb)) |
4538 | head = NULL; |
4539 | } |
4540 | |
4541 | return head; |
4542 | } |
4543 | |
4544 | /* Return true if INSN is a basic block header. */ |
4545 | bool |
4546 | sel_bb_head_p (insn_t insn) |
4547 | { |
4548 | return sel_bb_head (BLOCK_FOR_INSN (insn)) == insn; |
4549 | } |
4550 | |
4551 | /* Return last insn of BB. */ |
4552 | rtx_insn * |
4553 | sel_bb_end (basic_block bb) |
4554 | { |
4555 | if (sel_bb_empty_p (bb)) |
4556 | return NULL; |
4557 | |
4558 | gcc_assert (bb != EXIT_BLOCK_PTR_FOR_FN (cfun)); |
4559 | |
4560 | return BB_END (bb); |
4561 | } |
4562 | |
4563 | /* Return true if INSN is the last insn in its basic block. */ |
4564 | bool |
4565 | sel_bb_end_p (insn_t insn) |
4566 | { |
4567 | return insn == sel_bb_end (BLOCK_FOR_INSN (insn)); |
4568 | } |
4569 | |
4570 | /* Return true if BB consist of single NOTE_INSN_BASIC_BLOCK. */ |
4571 | bool |
4572 | sel_bb_empty_p (basic_block bb) |
4573 | { |
4574 | return sel_bb_head (bb) == NULL; |
4575 | } |
4576 | |
4577 | /* True when BB belongs to the current scheduling region. */ |
4578 | bool |
4579 | in_current_region_p (basic_block bb) |
4580 | { |
4581 | if (bb->index < NUM_FIXED_BLOCKS) |
4582 | return false; |
4583 | |
4584 | return CONTAINING_RGN (bb->index) == CONTAINING_RGN (BB_TO_BLOCK (0)); |
4585 | } |
4586 | |
4587 | /* Return the block which is a fallthru bb of a conditional jump JUMP. */ |
4588 | basic_block |
4589 | fallthru_bb_of_jump (const rtx_insn *jump) |
4590 | { |
4591 | if (!JUMP_P (jump)) |
4592 | return NULL; |
4593 | |
4594 | if (!any_condjump_p (jump)) |
4595 | return NULL; |
4596 | |
4597 | /* A basic block that ends with a conditional jump may still have one successor |
4598 | (and be followed by a barrier), we are not interested. */ |
4599 | if (single_succ_p (BLOCK_FOR_INSN (jump))) |
4600 | return NULL; |
4601 | |
4602 | return FALLTHRU_EDGE (BLOCK_FOR_INSN (jump))->dest; |
4603 | } |
4604 | |
4605 | /* Remove all notes from BB. */ |
4606 | static void |
4607 | init_bb (basic_block bb) |
4608 | { |
4609 | remove_notes (bb_note (bb), BB_END (bb)); |
4610 | BB_NOTE_LIST (bb) = note_list; |
4611 | } |
4612 | |
4613 | void |
4614 | sel_init_bbs (bb_vec_t bbs) |
4615 | { |
4616 | const struct sched_scan_info_def ssi = |
4617 | { |
4618 | extend_bb_info, /* extend_bb */ |
4619 | init_bb, /* init_bb */ |
4620 | NULL, /* extend_insn */ |
4621 | NULL /* init_insn */ |
4622 | }; |
4623 | |
4624 | sched_scan (&ssi, bbs); |
4625 | } |
4626 | |
4627 | /* Restore notes for the whole region. */ |
4628 | static void |
4629 | sel_restore_notes (void) |
4630 | { |
4631 | int bb; |
4632 | insn_t insn; |
4633 | |
4634 | for (bb = 0; bb < current_nr_blocks; bb++) |
4635 | { |
4636 | basic_block first, last; |
4637 | |
4638 | first = EBB_FIRST_BB (bb); |
4639 | last = EBB_LAST_BB (bb)->next_bb; |
4640 | |
4641 | do |
4642 | { |
4643 | note_list = BB_NOTE_LIST (first); |
4644 | restore_other_notes (NULL, first); |
4645 | BB_NOTE_LIST (first) = NULL; |
4646 | |
4647 | FOR_BB_INSNS (first, insn) |
4648 | if (NONDEBUG_INSN_P (insn)) |
4649 | reemit_notes (insn); |
4650 | |
4651 | first = first->next_bb; |
4652 | } |
4653 | while (first != last); |
4654 | } |
4655 | } |
4656 | |
4657 | /* Free per-bb data structures. */ |
4658 | void |
4659 | sel_finish_bbs (void) |
4660 | { |
4661 | sel_restore_notes (); |
4662 | |
4663 | /* Remove current loop preheader from this loop. */ |
4664 | if (current_loop_nest) |
4665 | sel_remove_loop_preheader (); |
4666 | |
4667 | finish_region_bb_info (); |
4668 | } |
4669 | |
4670 | /* Return true if INSN has a single successor of type FLAGS. */ |
4671 | bool |
4672 | sel_insn_has_single_succ_p (insn_t insn, int flags) |
4673 | { |
4674 | insn_t succ; |
4675 | succ_iterator si; |
4676 | bool first_p = true; |
4677 | |
4678 | FOR_EACH_SUCC_1 (succ, si, insn, flags) |
4679 | { |
4680 | if (first_p) |
4681 | first_p = false; |
4682 | else |
4683 | return false; |
4684 | } |
4685 | |
4686 | return true; |
4687 | } |
4688 | |
4689 | /* Allocate successor's info. */ |
4690 | static struct succs_info * |
4691 | alloc_succs_info (void) |
4692 | { |
4693 | if (succs_info_pool.top == succs_info_pool.max_top) |
4694 | { |
4695 | int i; |
4696 | |
4697 | if (++succs_info_pool.max_top >= succs_info_pool.size) |
4698 | gcc_unreachable (); |
4699 | |
4700 | i = ++succs_info_pool.top; |
4701 | succs_info_pool.stack[i].succs_ok.create (10); |
4702 | succs_info_pool.stack[i].succs_other.create (10); |
4703 | succs_info_pool.stack[i].probs_ok.create (10); |
4704 | } |
4705 | else |
4706 | succs_info_pool.top++; |
4707 | |
4708 | return &succs_info_pool.stack[succs_info_pool.top]; |
4709 | } |
4710 | |
4711 | /* Free successor's info. */ |
4712 | void |
4713 | free_succs_info (struct succs_info * sinfo) |
4714 | { |
4715 | gcc_assert (succs_info_pool.top >= 0 |
4716 | && &succs_info_pool.stack[succs_info_pool.top] == sinfo); |
4717 | succs_info_pool.top--; |
4718 | |
4719 | /* Clear stale info. */ |
4720 | sinfo->succs_ok.block_remove (0, sinfo->succs_ok.length ()); |
4721 | sinfo->succs_other.block_remove (0, sinfo->succs_other.length ()); |
4722 | sinfo->probs_ok.block_remove (0, sinfo->probs_ok.length ()); |
4723 | sinfo->all_prob = 0; |
4724 | sinfo->succs_ok_n = 0; |
4725 | sinfo->all_succs_n = 0; |
4726 | } |
4727 | |
4728 | /* Compute successor info for INSN. FLAGS are the flags passed |
4729 | to the FOR_EACH_SUCC_1 iterator. */ |
4730 | struct succs_info * |
4731 | compute_succs_info (insn_t insn, short flags) |
4732 | { |
4733 | succ_iterator si; |
4734 | insn_t succ; |
4735 | struct succs_info *sinfo = alloc_succs_info (); |
4736 | |
4737 | /* Traverse *all* successors and decide what to do with each. */ |
4738 | FOR_EACH_SUCC_1 (succ, si, insn, SUCCS_ALL) |
4739 | { |
4740 | /* FIXME: this doesn't work for skipping to loop exits, as we don't |
4741 | perform code motion through inner loops. */ |
4742 | short current_flags = si.current_flags & ~SUCCS_SKIP_TO_LOOP_EXITS; |
4743 | |
4744 | if (current_flags & flags) |
4745 | { |
4746 | sinfo->succs_ok.safe_push (succ); |
4747 | sinfo->probs_ok.safe_push ( |
4748 | /* FIXME: Improve calculation when skipping |
4749 | inner loop to exits. */ |
4750 | si.bb_end |
4751 | ? (si.e1->probability.initialized_p () |
4752 | ? si.e1->probability.to_reg_br_prob_base () |
4753 | : 0) |
4754 | : REG_BR_PROB_BASE); |
4755 | sinfo->succs_ok_n++; |
4756 | } |
4757 | else |
4758 | sinfo->succs_other.safe_push (succ); |
4759 | |
4760 | /* Compute all_prob. */ |
4761 | if (!si.bb_end) |
4762 | sinfo->all_prob = REG_BR_PROB_BASE; |
4763 | else if (si.e1->probability.initialized_p ()) |
4764 | sinfo->all_prob += si.e1->probability.to_reg_br_prob_base (); |
4765 | |
4766 | sinfo->all_succs_n++; |
4767 | } |
4768 | |
4769 | return sinfo; |
4770 | } |
4771 | |
4772 | /* Return the predecessors of BB in PREDS and their number in N. |
4773 | Empty blocks are skipped. SIZE is used to allocate PREDS. */ |
4774 | static void |
4775 | cfg_preds_1 (basic_block bb, insn_t **preds, int *n, int *size) |
4776 | { |
4777 | edge e; |
4778 | edge_iterator ei; |
4779 | |
4780 | gcc_assert (BLOCK_TO_BB (bb->index) != 0); |
4781 | |
4782 | FOR_EACH_EDGE (e, ei, bb->preds) |
4783 | { |
4784 | basic_block pred_bb = e->src; |
4785 | insn_t bb_end = BB_END (pred_bb); |
4786 | |
4787 | if (!in_current_region_p (pred_bb)) |
4788 | { |
4789 | gcc_assert (flag_sel_sched_pipelining_outer_loops |
4790 | && current_loop_nest); |
4791 | continue; |
4792 | } |
4793 | |
4794 | if (sel_bb_empty_p (pred_bb)) |
4795 | cfg_preds_1 (pred_bb, preds, n, size); |
4796 | else |
4797 | { |
4798 | if (*n == *size) |
4799 | *preds = XRESIZEVEC (insn_t, *preds, |
4800 | (*size = 2 * *size + 1)); |
4801 | (*preds)[(*n)++] = bb_end; |
4802 | } |
4803 | } |
4804 | |
4805 | gcc_assert (*n != 0 |
4806 | || (flag_sel_sched_pipelining_outer_loops |
4807 |
---|