1 | /* Code sinking for trees |
2 | Copyright (C) 2001-2024 Free Software Foundation, Inc. |
3 | Contributed by Daniel Berlin <dan@dberlin.org> |
4 | |
5 | This file is part of GCC. |
6 | |
7 | GCC is free software; you can redistribute it and/or modify |
8 | it under the terms of the GNU General Public License as published by |
9 | the Free Software Foundation; either version 3, or (at your option) |
10 | any later version. |
11 | |
12 | GCC is distributed in the hope that it will be useful, |
13 | but WITHOUT ANY WARRANTY; without even the implied warranty of |
14 | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
15 | GNU General Public License for more details. |
16 | |
17 | You should have received a copy of the GNU General Public License |
18 | along with GCC; see the file COPYING3. If not see |
19 | <http://www.gnu.org/licenses/>. */ |
20 | |
21 | #include "config.h" |
22 | #include "system.h" |
23 | #include "coretypes.h" |
24 | #include "backend.h" |
25 | #include "tree.h" |
26 | #include "gimple.h" |
27 | #include "cfghooks.h" |
28 | #include "tree-pass.h" |
29 | #include "ssa.h" |
30 | #include "gimple-pretty-print.h" |
31 | #include "fold-const.h" |
32 | #include "stor-layout.h" |
33 | #include "cfganal.h" |
34 | #include "gimple-iterator.h" |
35 | #include "tree-cfg.h" |
36 | #include "cfgloop.h" |
37 | #include "tree-eh.h" |
38 | #include "tree-ssa-live.h" |
39 | |
40 | /* TODO: |
41 | 1. Sinking store only using scalar promotion (IE without moving the RHS): |
42 | |
43 | *q = p; |
44 | p = p + 1; |
45 | if (something) |
46 | *q = <not p>; |
47 | else |
48 | y = *q; |
49 | |
50 | |
51 | should become |
52 | sinktemp = p; |
53 | p = p + 1; |
54 | if (something) |
55 | *q = <not p>; |
56 | else |
57 | { |
58 | *q = sinktemp; |
59 | y = *q |
60 | } |
61 | Store copy propagation will take care of the store elimination above. |
62 | |
63 | |
64 | 2. Sinking using Partial Dead Code Elimination. */ |
65 | |
66 | |
67 | static struct |
68 | { |
69 | /* The number of statements sunk down the flowgraph by code sinking. */ |
70 | int sunk; |
71 | |
72 | /* The number of stores commoned and sunk down by store commoning. */ |
73 | int commoned; |
74 | } sink_stats; |
75 | |
76 | |
77 | /* Given a PHI, and one of its arguments (DEF), find the edge for |
78 | that argument and return it. If the argument occurs twice in the PHI node, |
79 | we return NULL. */ |
80 | |
81 | static basic_block |
82 | find_bb_for_arg (gphi *phi, tree def) |
83 | { |
84 | size_t i; |
85 | bool foundone = false; |
86 | basic_block result = NULL; |
87 | for (i = 0; i < gimple_phi_num_args (gs: phi); i++) |
88 | if (PHI_ARG_DEF (phi, i) == def) |
89 | { |
90 | if (foundone) |
91 | return NULL; |
92 | foundone = true; |
93 | result = gimple_phi_arg_edge (phi, i)->src; |
94 | } |
95 | return result; |
96 | } |
97 | |
98 | /* When the first immediate use is in a statement, then return true if all |
99 | immediate uses in IMM are in the same statement. |
100 | We could also do the case where the first immediate use is in a phi node, |
101 | and all the other uses are in phis in the same basic block, but this |
102 | requires some expensive checking later (you have to make sure no def/vdef |
103 | in the statement occurs for multiple edges in the various phi nodes it's |
104 | used in, so that you only have one place you can sink it to. */ |
105 | |
106 | static bool |
107 | all_immediate_uses_same_place (def_operand_p def_p) |
108 | { |
109 | tree var = DEF_FROM_PTR (def_p); |
110 | imm_use_iterator imm_iter; |
111 | use_operand_p use_p; |
112 | |
113 | gimple *firstuse = NULL; |
114 | FOR_EACH_IMM_USE_FAST (use_p, imm_iter, var) |
115 | { |
116 | if (is_gimple_debug (USE_STMT (use_p))) |
117 | continue; |
118 | if (firstuse == NULL) |
119 | firstuse = USE_STMT (use_p); |
120 | else |
121 | if (firstuse != USE_STMT (use_p)) |
122 | return false; |
123 | } |
124 | |
125 | return true; |
126 | } |
127 | |
128 | /* Find the nearest common dominator of all of the immediate uses in IMM. */ |
129 | |
130 | static basic_block |
131 | nearest_common_dominator_of_uses (def_operand_p def_p, bool *debug_stmts) |
132 | { |
133 | tree var = DEF_FROM_PTR (def_p); |
134 | auto_bitmap blocks; |
135 | basic_block commondom; |
136 | unsigned int j; |
137 | bitmap_iterator bi; |
138 | imm_use_iterator imm_iter; |
139 | use_operand_p use_p; |
140 | |
141 | FOR_EACH_IMM_USE_FAST (use_p, imm_iter, var) |
142 | { |
143 | gimple *usestmt = USE_STMT (use_p); |
144 | basic_block useblock; |
145 | |
146 | if (gphi *phi = dyn_cast <gphi *> (p: usestmt)) |
147 | { |
148 | int idx = PHI_ARG_INDEX_FROM_USE (use_p); |
149 | |
150 | useblock = gimple_phi_arg_edge (phi, i: idx)->src; |
151 | } |
152 | else if (is_gimple_debug (gs: usestmt)) |
153 | { |
154 | *debug_stmts = true; |
155 | continue; |
156 | } |
157 | else |
158 | { |
159 | useblock = gimple_bb (g: usestmt); |
160 | } |
161 | |
162 | /* Short circuit. Nothing dominates the entry block. */ |
163 | if (useblock == ENTRY_BLOCK_PTR_FOR_FN (cfun)) |
164 | return NULL; |
165 | |
166 | bitmap_set_bit (blocks, useblock->index); |
167 | } |
168 | commondom = BASIC_BLOCK_FOR_FN (cfun, bitmap_first_set_bit (blocks)); |
169 | EXECUTE_IF_SET_IN_BITMAP (blocks, 0, j, bi) |
170 | commondom = nearest_common_dominator (CDI_DOMINATORS, commondom, |
171 | BASIC_BLOCK_FOR_FN (cfun, j)); |
172 | return commondom; |
173 | } |
174 | |
175 | /* Given EARLY_BB and LATE_BB, two blocks in a path through the dominator |
176 | tree, return the best basic block between them (inclusive) to place |
177 | statements. |
178 | |
179 | We want the most control dependent block in the shallowest loop nest. |
180 | |
181 | If the resulting block is in a shallower loop nest, then use it. Else |
182 | only use the resulting block if it has significantly lower execution |
183 | frequency than EARLY_BB to avoid gratuitous statement movement. We |
184 | consider statements with VOPS more desirable to move. |
185 | |
186 | This pass would obviously benefit from PDO as it utilizes block |
187 | frequencies. It would also benefit from recomputing frequencies |
188 | if profile data is not available since frequencies often get out |
189 | of sync with reality. */ |
190 | |
191 | static basic_block |
192 | select_best_block (basic_block early_bb, |
193 | basic_block late_bb, |
194 | gimple *stmt) |
195 | { |
196 | basic_block best_bb = late_bb; |
197 | basic_block temp_bb = late_bb; |
198 | int threshold; |
199 | |
200 | while (temp_bb != early_bb) |
201 | { |
202 | /* If we've moved into a lower loop nest, then that becomes |
203 | our best block. */ |
204 | if (bb_loop_depth (temp_bb) < bb_loop_depth (best_bb)) |
205 | best_bb = temp_bb; |
206 | |
207 | /* Walk up the dominator tree, hopefully we'll find a shallower |
208 | loop nest. */ |
209 | temp_bb = get_immediate_dominator (CDI_DOMINATORS, temp_bb); |
210 | } |
211 | |
212 | /* Placing a statement before a setjmp-like function would be invalid |
213 | (it cannot be reevaluated when execution follows an abnormal edge). |
214 | If we selected a block with abnormal predecessors, just punt. */ |
215 | if (bb_has_abnormal_pred (bb: best_bb)) |
216 | return early_bb; |
217 | |
218 | /* If we found a shallower loop nest, then we always consider that |
219 | a win. This will always give us the most control dependent block |
220 | within that loop nest. */ |
221 | if (bb_loop_depth (best_bb) < bb_loop_depth (early_bb)) |
222 | return best_bb; |
223 | |
224 | /* Avoid turning an unconditional read into a conditional one when we |
225 | still might want to perform vectorization. */ |
226 | if (best_bb->loop_father == early_bb->loop_father |
227 | && loop_outer (loop: best_bb->loop_father) |
228 | && !best_bb->loop_father->inner |
229 | && gimple_vuse (g: stmt) |
230 | && flag_tree_loop_vectorize |
231 | && !(cfun->curr_properties & PROP_loop_opts_done) |
232 | && dominated_by_p (CDI_DOMINATORS, best_bb->loop_father->latch, early_bb) |
233 | && !dominated_by_p (CDI_DOMINATORS, best_bb->loop_father->latch, best_bb)) |
234 | return early_bb; |
235 | |
236 | /* Get the sinking threshold. If the statement to be moved has memory |
237 | operands, then increase the threshold by 7% as those are even more |
238 | profitable to avoid, clamping at 100%. */ |
239 | threshold = param_sink_frequency_threshold; |
240 | if (gimple_vuse (g: stmt) || gimple_vdef (g: stmt)) |
241 | { |
242 | threshold += 7; |
243 | if (threshold > 100) |
244 | threshold = 100; |
245 | } |
246 | |
247 | /* If BEST_BB is at the same nesting level, then require it to have |
248 | significantly lower execution frequency to avoid gratuitous movement. */ |
249 | if (bb_loop_depth (best_bb) == bb_loop_depth (early_bb) |
250 | /* If result of comparsion is unknown, prefer EARLY_BB. |
251 | Thus use !(...>=..) rather than (...<...) */ |
252 | && !(best_bb->count * 100 >= early_bb->count * threshold)) |
253 | return best_bb; |
254 | |
255 | /* No better block found, so return EARLY_BB, which happens to be the |
256 | statement's original block. */ |
257 | return early_bb; |
258 | } |
259 | |
260 | /* Given a statement (STMT) and the basic block it is currently in (FROMBB), |
261 | determine the location to sink the statement to, if any. |
262 | Returns true if there is such location; in that case, TOGSI points to the |
263 | statement before that STMT should be moved. */ |
264 | |
265 | static bool |
266 | statement_sink_location (gimple *stmt, basic_block frombb, |
267 | gimple_stmt_iterator *togsi, bool *zero_uses_p, |
268 | virtual_operand_live &vop_live) |
269 | { |
270 | gimple *use; |
271 | use_operand_p one_use = NULL_USE_OPERAND_P; |
272 | basic_block sinkbb; |
273 | use_operand_p use_p; |
274 | def_operand_p def_p; |
275 | ssa_op_iter iter; |
276 | imm_use_iterator imm_iter; |
277 | |
278 | *zero_uses_p = false; |
279 | |
280 | /* We only can sink assignments and const/pure calls that are guaranteed |
281 | to return exactly once. */ |
282 | int cf; |
283 | if (!is_gimple_assign (gs: stmt) |
284 | && (!is_gimple_call (gs: stmt) |
285 | || !((cf = gimple_call_flags (stmt)) & (ECF_CONST|ECF_PURE)) |
286 | || (cf & (ECF_LOOPING_CONST_OR_PURE|ECF_RETURNS_TWICE)))) |
287 | return false; |
288 | |
289 | /* We only can sink stmts with a single definition. */ |
290 | def_p = single_ssa_def_operand (stmt, SSA_OP_ALL_DEFS); |
291 | if (def_p == NULL_DEF_OPERAND_P) |
292 | return false; |
293 | |
294 | /* There are a few classes of things we can't or don't move, some because we |
295 | don't have code to handle it, some because it's not profitable and some |
296 | because it's not legal. |
297 | |
298 | We can't sink things that may be global stores, at least not without |
299 | calculating a lot more information, because we may cause it to no longer |
300 | be seen by an external routine that needs it depending on where it gets |
301 | moved to. |
302 | |
303 | We can't sink statements that end basic blocks without splitting the |
304 | incoming edge for the sink location to place it there. |
305 | |
306 | We can't sink statements that have volatile operands. |
307 | |
308 | We don't want to sink dead code, so anything with 0 immediate uses is not |
309 | sunk. |
310 | |
311 | Don't sink BLKmode assignments if current function has any local explicit |
312 | register variables, as BLKmode assignments may involve memcpy or memset |
313 | calls or, on some targets, inline expansion thereof that sometimes need |
314 | to use specific hard registers. |
315 | |
316 | */ |
317 | if (stmt_ends_bb_p (stmt) |
318 | || gimple_has_side_effects (stmt) |
319 | || (cfun->has_local_explicit_reg_vars |
320 | && TYPE_MODE (TREE_TYPE (gimple_get_lhs (stmt))) == BLKmode)) |
321 | return false; |
322 | |
323 | /* Return if there are no immediate uses of this stmt. */ |
324 | if (has_zero_uses (DEF_FROM_PTR (def_p))) |
325 | { |
326 | *zero_uses_p = true; |
327 | return false; |
328 | } |
329 | |
330 | if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (DEF_FROM_PTR (def_p))) |
331 | return false; |
332 | |
333 | FOR_EACH_SSA_USE_OPERAND (use_p, stmt, iter, SSA_OP_ALL_USES) |
334 | { |
335 | tree use = USE_FROM_PTR (use_p); |
336 | if (SSA_NAME_OCCURS_IN_ABNORMAL_PHI (use)) |
337 | return false; |
338 | } |
339 | |
340 | use = NULL; |
341 | |
342 | /* If stmt is a store the one and only use needs to be the VOP |
343 | merging PHI node. */ |
344 | if (virtual_operand_p (DEF_FROM_PTR (def_p))) |
345 | { |
346 | FOR_EACH_IMM_USE_FAST (use_p, imm_iter, DEF_FROM_PTR (def_p)) |
347 | { |
348 | gimple *use_stmt = USE_STMT (use_p); |
349 | |
350 | /* A killing definition is not a use. */ |
351 | if ((gimple_has_lhs (stmt: use_stmt) |
352 | && operand_equal_p (gimple_get_lhs (stmt), |
353 | gimple_get_lhs (use_stmt), flags: 0)) |
354 | || stmt_kills_ref_p (use_stmt, gimple_get_lhs (stmt))) |
355 | { |
356 | /* If use_stmt is or might be a nop assignment then USE_STMT |
357 | acts as a use as well as definition. */ |
358 | if (stmt != use_stmt |
359 | && ref_maybe_used_by_stmt_p (use_stmt, |
360 | gimple_get_lhs (stmt))) |
361 | return false; |
362 | continue; |
363 | } |
364 | |
365 | if (gimple_code (g: use_stmt) != GIMPLE_PHI) |
366 | return false; |
367 | |
368 | if (use |
369 | && use != use_stmt) |
370 | return false; |
371 | |
372 | use = use_stmt; |
373 | } |
374 | if (!use) |
375 | return false; |
376 | } |
377 | /* If all the immediate uses are not in the same place, find the nearest |
378 | common dominator of all the immediate uses. For PHI nodes, we have to |
379 | find the nearest common dominator of all of the predecessor blocks, since |
380 | that is where insertion would have to take place. */ |
381 | else if (gimple_vuse (g: stmt) |
382 | || !all_immediate_uses_same_place (def_p)) |
383 | { |
384 | bool debug_stmts = false; |
385 | basic_block commondom = nearest_common_dominator_of_uses (def_p, |
386 | debug_stmts: &debug_stmts); |
387 | |
388 | if (commondom == frombb) |
389 | return false; |
390 | |
391 | /* If this is a load then do not sink past any stores. */ |
392 | if (gimple_vuse (g: stmt)) |
393 | { |
394 | /* Do not sink loads from hard registers. */ |
395 | if (gimple_assign_single_p (gs: stmt) |
396 | && VAR_P (gimple_assign_rhs1 (stmt)) |
397 | && DECL_HARD_REGISTER (gimple_assign_rhs1 (stmt))) |
398 | return false; |
399 | |
400 | /* When the live virtual operand at the intended sink location is |
401 | not the same as the one from the load walk up the dominator tree |
402 | for a new candidate location. */ |
403 | while (commondom != frombb |
404 | && vop_live.get_live_in (bb: commondom) != gimple_vuse (g: stmt)) |
405 | commondom = get_immediate_dominator (CDI_DOMINATORS, commondom); |
406 | if (commondom == frombb) |
407 | return false; |
408 | } |
409 | |
410 | /* Our common dominator has to be dominated by frombb in order to be a |
411 | trivially safe place to put this statement, since it has multiple |
412 | uses. */ |
413 | if (!dominated_by_p (CDI_DOMINATORS, commondom, frombb)) |
414 | return false; |
415 | |
416 | commondom = select_best_block (early_bb: frombb, late_bb: commondom, stmt); |
417 | |
418 | if (commondom == frombb) |
419 | return false; |
420 | |
421 | *togsi = gsi_after_labels (bb: commondom); |
422 | |
423 | return true; |
424 | } |
425 | else |
426 | { |
427 | FOR_EACH_IMM_USE_FAST (one_use, imm_iter, DEF_FROM_PTR (def_p)) |
428 | { |
429 | if (is_gimple_debug (USE_STMT (one_use))) |
430 | continue; |
431 | break; |
432 | } |
433 | use = USE_STMT (one_use); |
434 | |
435 | if (gimple_code (g: use) != GIMPLE_PHI) |
436 | { |
437 | sinkbb = select_best_block (early_bb: frombb, late_bb: gimple_bb (g: use), stmt); |
438 | |
439 | if (sinkbb == frombb) |
440 | return false; |
441 | |
442 | if (sinkbb == gimple_bb (g: use)) |
443 | *togsi = gsi_for_stmt (use); |
444 | else |
445 | *togsi = gsi_after_labels (bb: sinkbb); |
446 | |
447 | return true; |
448 | } |
449 | } |
450 | |
451 | sinkbb = find_bb_for_arg (phi: as_a <gphi *> (p: use), DEF_FROM_PTR (def_p)); |
452 | |
453 | /* This can happen if there are multiple uses in a PHI. */ |
454 | if (!sinkbb) |
455 | return false; |
456 | |
457 | sinkbb = select_best_block (early_bb: frombb, late_bb: sinkbb, stmt); |
458 | if (!sinkbb || sinkbb == frombb) |
459 | return false; |
460 | |
461 | /* If the latch block is empty, don't make it non-empty by sinking |
462 | something into it. */ |
463 | if (sinkbb == frombb->loop_father->latch |
464 | && empty_block_p (sinkbb)) |
465 | return false; |
466 | |
467 | *togsi = gsi_after_labels (bb: sinkbb); |
468 | |
469 | return true; |
470 | } |
471 | |
472 | /* Very simplistic code to sink common stores from the predecessor through |
473 | our virtual PHI. We do this before sinking stmts from BB as it might |
474 | expose sinking opportunities of the merged stores. |
475 | Once we have partial dead code elimination through sth like SSU-PRE this |
476 | should be moved there. */ |
477 | |
478 | static unsigned |
479 | sink_common_stores_to_bb (basic_block bb) |
480 | { |
481 | unsigned todo = 0; |
482 | gphi *phi; |
483 | |
484 | if (EDGE_COUNT (bb->preds) > 1 |
485 | && (phi = get_virtual_phi (bb))) |
486 | { |
487 | /* Repeat until no more common stores are found. */ |
488 | while (1) |
489 | { |
490 | gimple *first_store = NULL; |
491 | auto_vec <tree, 5> vdefs; |
492 | gimple_stmt_iterator gsi; |
493 | |
494 | /* Search for common stores defined by all virtual PHI args. |
495 | ??? Common stores not present in all predecessors could |
496 | be handled by inserting a forwarder to sink to. Generally |
497 | this involves deciding which stores to do this for if |
498 | multiple common stores are present for different sets of |
499 | predecessors. See PR11832 for an interesting case. */ |
500 | for (unsigned i = 0; i < gimple_phi_num_args (gs: phi); ++i) |
501 | { |
502 | tree arg = gimple_phi_arg_def (gs: phi, index: i); |
503 | gimple *def = SSA_NAME_DEF_STMT (arg); |
504 | if (! is_gimple_assign (gs: def) |
505 | || stmt_can_throw_internal (cfun, def) |
506 | || (gimple_phi_arg_edge (phi, i)->flags & EDGE_ABNORMAL)) |
507 | { |
508 | /* ??? We could handle some cascading with the def being |
509 | another PHI. We'd have to insert multiple PHIs for |
510 | the rhs then though (if they are not all equal). */ |
511 | first_store = NULL; |
512 | break; |
513 | } |
514 | /* ??? Do not try to do anything fancy with aliasing, thus |
515 | do not sink across non-aliased loads (or even stores, |
516 | so different store order will make the sinking fail). */ |
517 | bool all_uses_on_phi = true; |
518 | imm_use_iterator iter; |
519 | use_operand_p use_p; |
520 | FOR_EACH_IMM_USE_FAST (use_p, iter, arg) |
521 | if (USE_STMT (use_p) != phi) |
522 | { |
523 | all_uses_on_phi = false; |
524 | break; |
525 | } |
526 | if (! all_uses_on_phi) |
527 | { |
528 | first_store = NULL; |
529 | break; |
530 | } |
531 | /* Check all stores are to the same LHS. */ |
532 | if (! first_store) |
533 | first_store = def; |
534 | /* ??? We could handle differing SSA uses in the LHS by inserting |
535 | PHIs for them. */ |
536 | else if (! operand_equal_p (gimple_assign_lhs (gs: first_store), |
537 | gimple_assign_lhs (gs: def), flags: 0) |
538 | || (gimple_clobber_p (s: first_store) |
539 | != gimple_clobber_p (s: def))) |
540 | { |
541 | first_store = NULL; |
542 | break; |
543 | } |
544 | vdefs.safe_push (obj: arg); |
545 | } |
546 | if (! first_store) |
547 | break; |
548 | |
549 | /* Check if we need a PHI node to merge the stored values. */ |
550 | bool allsame = true; |
551 | if (!gimple_clobber_p (s: first_store)) |
552 | for (unsigned i = 1; i < vdefs.length (); ++i) |
553 | { |
554 | gimple *def = SSA_NAME_DEF_STMT (vdefs[i]); |
555 | if (! operand_equal_p (gimple_assign_rhs1 (gs: first_store), |
556 | gimple_assign_rhs1 (gs: def), flags: 0)) |
557 | { |
558 | allsame = false; |
559 | break; |
560 | } |
561 | } |
562 | |
563 | /* We cannot handle aggregate values if we need to merge them. */ |
564 | tree type = TREE_TYPE (gimple_assign_lhs (first_store)); |
565 | if (! allsame |
566 | && ! is_gimple_reg_type (type)) |
567 | break; |
568 | |
569 | if (dump_enabled_p ()) |
570 | { |
571 | dump_printf_loc (MSG_OPTIMIZED_LOCATIONS, |
572 | first_store, |
573 | "sinking common stores %sto " , |
574 | allsame ? "with same value " : "" ); |
575 | dump_generic_expr (MSG_OPTIMIZED_LOCATIONS, TDF_SLIM, |
576 | gimple_assign_lhs (gs: first_store)); |
577 | dump_printf (MSG_OPTIMIZED_LOCATIONS, "\n" ); |
578 | } |
579 | |
580 | /* Insert a PHI to merge differing stored values if necessary. |
581 | Note that in general inserting PHIs isn't a very good idea as |
582 | it makes the job of coalescing and register allocation harder. |
583 | Even common SSA uses on the rhs/lhs might extend their lifetime |
584 | across multiple edges by this code motion which makes |
585 | register allocation harder. */ |
586 | tree from; |
587 | if (! allsame) |
588 | { |
589 | from = make_ssa_name (var: type); |
590 | gphi *newphi = create_phi_node (from, bb); |
591 | for (unsigned i = 0; i < vdefs.length (); ++i) |
592 | { |
593 | gimple *def = SSA_NAME_DEF_STMT (vdefs[i]); |
594 | add_phi_arg (newphi, gimple_assign_rhs1 (gs: def), |
595 | EDGE_PRED (bb, i), UNKNOWN_LOCATION); |
596 | } |
597 | } |
598 | else |
599 | from = gimple_assign_rhs1 (gs: first_store); |
600 | |
601 | /* Remove all stores. */ |
602 | for (unsigned i = 0; i < vdefs.length (); ++i) |
603 | TREE_VISITED (vdefs[i]) = 1; |
604 | for (unsigned i = 0; i < vdefs.length (); ++i) |
605 | /* If we have more than one use of a VDEF on the PHI make sure |
606 | we remove the defining stmt only once. */ |
607 | if (TREE_VISITED (vdefs[i])) |
608 | { |
609 | TREE_VISITED (vdefs[i]) = 0; |
610 | gimple *def = SSA_NAME_DEF_STMT (vdefs[i]); |
611 | gsi = gsi_for_stmt (def); |
612 | unlink_stmt_vdef (def); |
613 | gsi_remove (&gsi, true); |
614 | release_defs (def); |
615 | } |
616 | |
617 | /* Insert the first store at the beginning of the merge BB. */ |
618 | gimple_set_vdef (g: first_store, vdef: gimple_phi_result (gs: phi)); |
619 | SSA_NAME_DEF_STMT (gimple_vdef (first_store)) = first_store; |
620 | gimple_phi_set_result (phi, result: make_ssa_name (var: gimple_vop (cfun))); |
621 | gimple_set_vuse (g: first_store, vuse: gimple_phi_result (gs: phi)); |
622 | gimple_assign_set_rhs1 (gs: first_store, rhs: from); |
623 | /* ??? Should we reset first_stores location? */ |
624 | gsi = gsi_after_labels (bb); |
625 | gsi_insert_before (&gsi, first_store, GSI_SAME_STMT); |
626 | sink_stats.commoned++; |
627 | |
628 | todo |= TODO_cleanup_cfg; |
629 | } |
630 | |
631 | /* We could now have empty predecessors that we could remove, |
632 | forming a proper CFG for further sinking. Note that even |
633 | CFG cleanup doesn't do this fully at the moment and it |
634 | doesn't preserve post-dominators in the process either. |
635 | The mergephi pass might do it though. gcc.dg/tree-ssa/ssa-sink-13.c |
636 | shows this nicely if you disable tail merging or (same effect) |
637 | make the stored values unequal. */ |
638 | } |
639 | |
640 | return todo; |
641 | } |
642 | |
643 | /* Perform code sinking on BB */ |
644 | |
645 | static unsigned |
646 | sink_code_in_bb (basic_block bb, virtual_operand_live &vop_live) |
647 | { |
648 | gimple_stmt_iterator gsi; |
649 | edge_iterator ei; |
650 | edge e; |
651 | bool last = true; |
652 | unsigned todo = 0; |
653 | |
654 | /* Sink common stores from the predecessor through our virtual PHI. */ |
655 | todo |= sink_common_stores_to_bb (bb); |
656 | |
657 | /* If this block doesn't dominate anything, there can't be any place to sink |
658 | the statements to. */ |
659 | if (first_dom_son (CDI_DOMINATORS, bb) == NULL) |
660 | return todo; |
661 | |
662 | /* We can't move things across abnormal edges, so don't try. */ |
663 | FOR_EACH_EDGE (e, ei, bb->succs) |
664 | if (e->flags & EDGE_ABNORMAL) |
665 | return todo; |
666 | |
667 | for (gsi = gsi_last_bb (bb); !gsi_end_p (i: gsi);) |
668 | { |
669 | gimple *stmt = gsi_stmt (i: gsi); |
670 | gimple_stmt_iterator togsi; |
671 | bool zero_uses_p; |
672 | |
673 | if (!statement_sink_location (stmt, frombb: bb, togsi: &togsi, zero_uses_p: &zero_uses_p, vop_live)) |
674 | { |
675 | gimple_stmt_iterator saved = gsi; |
676 | if (!gsi_end_p (i: gsi)) |
677 | gsi_prev (i: &gsi); |
678 | /* If we face a dead stmt remove it as it possibly blocks |
679 | sinking of uses. */ |
680 | if (zero_uses_p |
681 | && !gimple_vdef (g: stmt) |
682 | && (cfun->can_delete_dead_exceptions |
683 | || !stmt_could_throw_p (cfun, stmt))) |
684 | { |
685 | gsi_remove (&saved, true); |
686 | release_defs (stmt); |
687 | } |
688 | else |
689 | last = false; |
690 | continue; |
691 | } |
692 | if (dump_file) |
693 | { |
694 | fprintf (stream: dump_file, format: "Sinking " ); |
695 | print_gimple_stmt (dump_file, stmt, 0, TDF_VOPS); |
696 | fprintf (stream: dump_file, format: " from bb %d to bb %d\n" , |
697 | bb->index, (gsi_bb (i: togsi))->index); |
698 | } |
699 | |
700 | /* Update virtual operands of statements in the path we |
701 | do not sink to. */ |
702 | if (gimple_vdef (g: stmt)) |
703 | { |
704 | imm_use_iterator iter; |
705 | use_operand_p use_p; |
706 | gimple *vuse_stmt; |
707 | |
708 | FOR_EACH_IMM_USE_STMT (vuse_stmt, iter, gimple_vdef (stmt)) |
709 | if (gimple_code (g: vuse_stmt) != GIMPLE_PHI) |
710 | FOR_EACH_IMM_USE_ON_STMT (use_p, iter) |
711 | SET_USE (use_p, gimple_vuse (stmt)); |
712 | } |
713 | |
714 | /* If this is the end of the basic block, we need to insert at the end |
715 | of the basic block. */ |
716 | if (gsi_end_p (i: togsi)) |
717 | gsi_move_to_bb_end (&gsi, gsi_bb (i: togsi)); |
718 | else |
719 | gsi_move_before (&gsi, &togsi); |
720 | |
721 | sink_stats.sunk++; |
722 | |
723 | /* If we've just removed the last statement of the BB, the |
724 | gsi_end_p() test below would fail, but gsi_prev() would have |
725 | succeeded, and we want it to succeed. So we keep track of |
726 | whether we're at the last statement and pick up the new last |
727 | statement. */ |
728 | if (last) |
729 | { |
730 | gsi = gsi_last_bb (bb); |
731 | continue; |
732 | } |
733 | |
734 | last = false; |
735 | if (!gsi_end_p (i: gsi)) |
736 | gsi_prev (i: &gsi); |
737 | |
738 | } |
739 | |
740 | return todo; |
741 | } |
742 | |
743 | /* Perform code sinking. |
744 | This moves code down the flowgraph when we know it would be |
745 | profitable to do so, or it wouldn't increase the number of |
746 | executions of the statement. |
747 | |
748 | IE given |
749 | |
750 | a_1 = b + c; |
751 | if (<something>) |
752 | { |
753 | } |
754 | else |
755 | { |
756 | foo (&b, &c); |
757 | a_5 = b + c; |
758 | } |
759 | a_6 = PHI (a_5, a_1); |
760 | USE a_6. |
761 | |
762 | we'll transform this into: |
763 | |
764 | if (<something>) |
765 | { |
766 | a_1 = b + c; |
767 | } |
768 | else |
769 | { |
770 | foo (&b, &c); |
771 | a_5 = b + c; |
772 | } |
773 | a_6 = PHI (a_5, a_1); |
774 | USE a_6. |
775 | |
776 | Note that this reduces the number of computations of a = b + c to 1 |
777 | when we take the else edge, instead of 2. |
778 | */ |
779 | namespace { |
780 | |
781 | const pass_data pass_data_sink_code = |
782 | { |
783 | .type: GIMPLE_PASS, /* type */ |
784 | .name: "sink" , /* name */ |
785 | .optinfo_flags: OPTGROUP_NONE, /* optinfo_flags */ |
786 | .tv_id: TV_TREE_SINK, /* tv_id */ |
787 | /* PROP_no_crit_edges is ensured by running split_edges_for_insertion in |
788 | pass_data_sink_code::execute (). */ |
789 | .properties_required: ( PROP_cfg | PROP_ssa ), /* properties_required */ |
790 | .properties_provided: 0, /* properties_provided */ |
791 | .properties_destroyed: 0, /* properties_destroyed */ |
792 | .todo_flags_start: 0, /* todo_flags_start */ |
793 | TODO_update_ssa, /* todo_flags_finish */ |
794 | }; |
795 | |
796 | class pass_sink_code : public gimple_opt_pass |
797 | { |
798 | public: |
799 | pass_sink_code (gcc::context *ctxt) |
800 | : gimple_opt_pass (pass_data_sink_code, ctxt), unsplit_edges (false) |
801 | {} |
802 | |
803 | /* opt_pass methods: */ |
804 | bool gate (function *) final override { return flag_tree_sink != 0; } |
805 | unsigned int execute (function *) final override; |
806 | opt_pass *clone (void) final override { return new pass_sink_code (m_ctxt); } |
807 | void set_pass_param (unsigned n, bool param) final override |
808 | { |
809 | gcc_assert (n == 0); |
810 | unsplit_edges = param; |
811 | } |
812 | |
813 | private: |
814 | bool unsplit_edges; |
815 | }; // class pass_sink_code |
816 | |
817 | unsigned int |
818 | pass_sink_code::execute (function *fun) |
819 | { |
820 | loop_optimizer_init (LOOPS_NORMAL); |
821 | split_edges_for_insertion (); |
822 | /* Arrange for the critical edge splitting to be undone if requested. */ |
823 | unsigned todo = unsplit_edges ? TODO_cleanup_cfg : 0; |
824 | connect_infinite_loops_to_exit (); |
825 | mark_dfs_back_edges (fun); |
826 | memset (s: &sink_stats, c: 0, n: sizeof (sink_stats)); |
827 | calculate_dominance_info (CDI_DOMINATORS); |
828 | |
829 | virtual_operand_live vop_live; |
830 | |
831 | int *rpo = XNEWVEC (int, n_basic_blocks_for_fn (cfun)); |
832 | int n = inverted_rev_post_order_compute (fun, rpo); |
833 | for (int i = 0; i < n; ++i) |
834 | todo |= sink_code_in_bb (BASIC_BLOCK_FOR_FN (fun, rpo[i]), vop_live); |
835 | free (ptr: rpo); |
836 | |
837 | statistics_counter_event (fun, "Sunk statements" , sink_stats.sunk); |
838 | statistics_counter_event (fun, "Commoned stores" , sink_stats.commoned); |
839 | remove_fake_exit_edges (); |
840 | loop_optimizer_finalize (); |
841 | |
842 | return todo; |
843 | } |
844 | |
845 | } // anon namespace |
846 | |
847 | gimple_opt_pass * |
848 | make_pass_sink_code (gcc::context *ctxt) |
849 | { |
850 | return new pass_sink_code (ctxt); |
851 | } |
852 | |