1/* If-conversion for vectorizer.
2 Copyright (C) 2004-2017 Free Software Foundation, Inc.
3 Contributed by Devang Patel <dpatel@apple.com>
4
5This file is part of GCC.
6
7GCC is free software; you can redistribute it and/or modify it under
8the terms of the GNU General Public License as published by the Free
9Software Foundation; either version 3, or (at your option) any later
10version.
11
12GCC is distributed in the hope that it will be useful, but WITHOUT ANY
13WARRANTY; without even the implied warranty of MERCHANTABILITY or
14FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15for more details.
16
17You should have received a copy of the GNU General Public License
18along with GCC; see the file COPYING3. If not see
19<http://www.gnu.org/licenses/>. */
20
21/* This pass implements a tree level if-conversion of loops. Its
22 initial goal is to help the vectorizer to vectorize loops with
23 conditions.
24
25 A short description of if-conversion:
26
27 o Decide if a loop is if-convertible or not.
28 o Walk all loop basic blocks in breadth first order (BFS order).
29 o Remove conditional statements (at the end of basic block)
30 and propagate condition into destination basic blocks'
31 predicate list.
32 o Replace modify expression with conditional modify expression
33 using current basic block's condition.
34 o Merge all basic blocks
35 o Replace phi nodes with conditional modify expr
36 o Merge all basic blocks into header
37
38 Sample transformation:
39
40 INPUT
41 -----
42
43 # i_23 = PHI <0(0), i_18(10)>;
44 <L0>:;
45 j_15 = A[i_23];
46 if (j_15 > 41) goto <L1>; else goto <L17>;
47
48 <L17>:;
49 goto <bb 3> (<L3>);
50
51 <L1>:;
52
53 # iftmp.2_4 = PHI <0(8), 42(2)>;
54 <L3>:;
55 A[i_23] = iftmp.2_4;
56 i_18 = i_23 + 1;
57 if (i_18 <= 15) goto <L19>; else goto <L18>;
58
59 <L19>:;
60 goto <bb 1> (<L0>);
61
62 <L18>:;
63
64 OUTPUT
65 ------
66
67 # i_23 = PHI <0(0), i_18(10)>;
68 <L0>:;
69 j_15 = A[i_23];
70
71 <L3>:;
72 iftmp.2_4 = j_15 > 41 ? 42 : 0;
73 A[i_23] = iftmp.2_4;
74 i_18 = i_23 + 1;
75 if (i_18 <= 15) goto <L19>; else goto <L18>;
76
77 <L19>:;
78 goto <bb 1> (<L0>);
79
80 <L18>:;
81*/
82
83#include "config.h"
84#include "system.h"
85#include "coretypes.h"
86#include "backend.h"
87#include "rtl.h"
88#include "tree.h"
89#include "gimple.h"
90#include "cfghooks.h"
91#include "tree-pass.h"
92#include "ssa.h"
93#include "expmed.h"
94#include "optabs-query.h"
95#include "gimple-pretty-print.h"
96#include "alias.h"
97#include "fold-const.h"
98#include "stor-layout.h"
99#include "gimple-fold.h"
100#include "gimplify.h"
101#include "gimple-iterator.h"
102#include "gimplify-me.h"
103#include "tree-cfg.h"
104#include "tree-into-ssa.h"
105#include "tree-ssa.h"
106#include "cfgloop.h"
107#include "tree-data-ref.h"
108#include "tree-scalar-evolution.h"
109#include "tree-ssa-loop.h"
110#include "tree-ssa-loop-niter.h"
111#include "tree-ssa-loop-ivopts.h"
112#include "tree-ssa-address.h"
113#include "dbgcnt.h"
114#include "tree-hash-traits.h"
115#include "varasm.h"
116#include "builtins.h"
117#include "params.h"
118#include "cfganal.h"
119
120/* Only handle PHIs with no more arguments unless we are asked to by
121 simd pragma. */
122#define MAX_PHI_ARG_NUM \
123 ((unsigned) PARAM_VALUE (PARAM_MAX_TREE_IF_CONVERSION_PHI_ARGS))
124
125/* Indicate if new load/store that needs to be predicated is introduced
126 during if conversion. */
127static bool any_pred_load_store;
128
129/* Indicate if there are any complicated PHIs that need to be handled in
130 if-conversion. Complicated PHI has more than two arguments and can't
131 be degenerated to two arguments PHI. See more information in comment
132 before phi_convertible_by_degenerating_args. */
133static bool any_complicated_phi;
134
135/* Hash for struct innermost_loop_behavior. It depends on the user to
136 free the memory. */
137
138struct innermost_loop_behavior_hash : nofree_ptr_hash <innermost_loop_behavior>
139{
140 static inline hashval_t hash (const value_type &);
141 static inline bool equal (const value_type &,
142 const compare_type &);
143};
144
145inline hashval_t
146innermost_loop_behavior_hash::hash (const value_type &e)
147{
148 hashval_t hash;
149
150 hash = iterative_hash_expr (e->base_address, 0);
151 hash = iterative_hash_expr (e->offset, hash);
152 hash = iterative_hash_expr (e->init, hash);
153 return iterative_hash_expr (e->step, hash);
154}
155
156inline bool
157innermost_loop_behavior_hash::equal (const value_type &e1,
158 const compare_type &e2)
159{
160 if ((e1->base_address && !e2->base_address)
161 || (!e1->base_address && e2->base_address)
162 || (!e1->offset && e2->offset)
163 || (e1->offset && !e2->offset)
164 || (!e1->init && e2->init)
165 || (e1->init && !e2->init)
166 || (!e1->step && e2->step)
167 || (e1->step && !e2->step))
168 return false;
169
170 if (e1->base_address && e2->base_address
171 && !operand_equal_p (e1->base_address, e2->base_address, 0))
172 return false;
173 if (e1->offset && e2->offset
174 && !operand_equal_p (e1->offset, e2->offset, 0))
175 return false;
176 if (e1->init && e2->init
177 && !operand_equal_p (e1->init, e2->init, 0))
178 return false;
179 if (e1->step && e2->step
180 && !operand_equal_p (e1->step, e2->step, 0))
181 return false;
182
183 return true;
184}
185
186/* List of basic blocks in if-conversion-suitable order. */
187static basic_block *ifc_bbs;
188
189/* Hash table to store <DR's innermost loop behavior, DR> pairs. */
190static hash_map<innermost_loop_behavior_hash,
191 data_reference_p> *innermost_DR_map;
192
193/* Hash table to store <base reference, DR> pairs. */
194static hash_map<tree_operand_hash, data_reference_p> *baseref_DR_map;
195
196/* Structure used to predicate basic blocks. This is attached to the
197 ->aux field of the BBs in the loop to be if-converted. */
198struct bb_predicate {
199
200 /* The condition under which this basic block is executed. */
201 tree predicate;
202
203 /* PREDICATE is gimplified, and the sequence of statements is
204 recorded here, in order to avoid the duplication of computations
205 that occur in previous conditions. See PR44483. */
206 gimple_seq predicate_gimplified_stmts;
207};
208
209/* Returns true when the basic block BB has a predicate. */
210
211static inline bool
212bb_has_predicate (basic_block bb)
213{
214 return bb->aux != NULL;
215}
216
217/* Returns the gimplified predicate for basic block BB. */
218
219static inline tree
220bb_predicate (basic_block bb)
221{
222 return ((struct bb_predicate *) bb->aux)->predicate;
223}
224
225/* Sets the gimplified predicate COND for basic block BB. */
226
227static inline void
228set_bb_predicate (basic_block bb, tree cond)
229{
230 gcc_assert ((TREE_CODE (cond) == TRUTH_NOT_EXPR
231 && is_gimple_condexpr (TREE_OPERAND (cond, 0)))
232 || is_gimple_condexpr (cond));
233 ((struct bb_predicate *) bb->aux)->predicate = cond;
234}
235
236/* Returns the sequence of statements of the gimplification of the
237 predicate for basic block BB. */
238
239static inline gimple_seq
240bb_predicate_gimplified_stmts (basic_block bb)
241{
242 return ((struct bb_predicate *) bb->aux)->predicate_gimplified_stmts;
243}
244
245/* Sets the sequence of statements STMTS of the gimplification of the
246 predicate for basic block BB. */
247
248static inline void
249set_bb_predicate_gimplified_stmts (basic_block bb, gimple_seq stmts)
250{
251 ((struct bb_predicate *) bb->aux)->predicate_gimplified_stmts = stmts;
252}
253
254/* Adds the sequence of statements STMTS to the sequence of statements
255 of the predicate for basic block BB. */
256
257static inline void
258add_bb_predicate_gimplified_stmts (basic_block bb, gimple_seq stmts)
259{
260 gimple_seq_add_seq_without_update
261 (&(((struct bb_predicate *) bb->aux)->predicate_gimplified_stmts), stmts);
262}
263
264/* Initializes to TRUE the predicate of basic block BB. */
265
266static inline void
267init_bb_predicate (basic_block bb)
268{
269 bb->aux = XNEW (struct bb_predicate);
270 set_bb_predicate_gimplified_stmts (bb, NULL);
271 set_bb_predicate (bb, boolean_true_node);
272}
273
274/* Release the SSA_NAMEs associated with the predicate of basic block BB,
275 but don't actually free it. */
276
277static inline void
278release_bb_predicate (basic_block bb)
279{
280 gimple_seq stmts = bb_predicate_gimplified_stmts (bb);
281 if (stmts)
282 {
283 if (flag_checking)
284 for (gimple_stmt_iterator i = gsi_start (stmts);
285 !gsi_end_p (i); gsi_next (&i))
286 gcc_assert (! gimple_use_ops (gsi_stmt (i)));
287
288 set_bb_predicate_gimplified_stmts (bb, NULL);
289 }
290}
291
292/* Free the predicate of basic block BB. */
293
294static inline void
295free_bb_predicate (basic_block bb)
296{
297 if (!bb_has_predicate (bb))
298 return;
299
300 release_bb_predicate (bb);
301 free (bb->aux);
302 bb->aux = NULL;
303}
304
305/* Reinitialize predicate of BB with the true predicate. */
306
307static inline void
308reset_bb_predicate (basic_block bb)
309{
310 if (!bb_has_predicate (bb))
311 init_bb_predicate (bb);
312 else
313 {
314 release_bb_predicate (bb);
315 set_bb_predicate (bb, boolean_true_node);
316 }
317}
318
319/* Returns a new SSA_NAME of type TYPE that is assigned the value of
320 the expression EXPR. Inserts the statement created for this
321 computation before GSI and leaves the iterator GSI at the same
322 statement. */
323
324static tree
325ifc_temp_var (tree type, tree expr, gimple_stmt_iterator *gsi)
326{
327 tree new_name = make_temp_ssa_name (type, NULL, "_ifc_");
328 gimple *stmt = gimple_build_assign (new_name, expr);
329 gimple_set_vuse (stmt, gimple_vuse (gsi_stmt (*gsi)));
330 gsi_insert_before (gsi, stmt, GSI_SAME_STMT);
331 return new_name;
332}
333
334/* Return true when COND is a false predicate. */
335
336static inline bool
337is_false_predicate (tree cond)
338{
339 return (cond != NULL_TREE
340 && (cond == boolean_false_node
341 || integer_zerop (cond)));
342}
343
344/* Return true when COND is a true predicate. */
345
346static inline bool
347is_true_predicate (tree cond)
348{
349 return (cond == NULL_TREE
350 || cond == boolean_true_node
351 || integer_onep (cond));
352}
353
354/* Returns true when BB has a predicate that is not trivial: true or
355 NULL_TREE. */
356
357static inline bool
358is_predicated (basic_block bb)
359{
360 return !is_true_predicate (bb_predicate (bb));
361}
362
363/* Parses the predicate COND and returns its comparison code and
364 operands OP0 and OP1. */
365
366static enum tree_code
367parse_predicate (tree cond, tree *op0, tree *op1)
368{
369 gimple *s;
370
371 if (TREE_CODE (cond) == SSA_NAME
372 && is_gimple_assign (s = SSA_NAME_DEF_STMT (cond)))
373 {
374 if (TREE_CODE_CLASS (gimple_assign_rhs_code (s)) == tcc_comparison)
375 {
376 *op0 = gimple_assign_rhs1 (s);
377 *op1 = gimple_assign_rhs2 (s);
378 return gimple_assign_rhs_code (s);
379 }
380
381 else if (gimple_assign_rhs_code (s) == TRUTH_NOT_EXPR)
382 {
383 tree op = gimple_assign_rhs1 (s);
384 tree type = TREE_TYPE (op);
385 enum tree_code code = parse_predicate (op, op0, op1);
386
387 return code == ERROR_MARK ? ERROR_MARK
388 : invert_tree_comparison (code, HONOR_NANS (type));
389 }
390
391 return ERROR_MARK;
392 }
393
394 if (COMPARISON_CLASS_P (cond))
395 {
396 *op0 = TREE_OPERAND (cond, 0);
397 *op1 = TREE_OPERAND (cond, 1);
398 return TREE_CODE (cond);
399 }
400
401 return ERROR_MARK;
402}
403
404/* Returns the fold of predicate C1 OR C2 at location LOC. */
405
406static tree
407fold_or_predicates (location_t loc, tree c1, tree c2)
408{
409 tree op1a, op1b, op2a, op2b;
410 enum tree_code code1 = parse_predicate (c1, &op1a, &op1b);
411 enum tree_code code2 = parse_predicate (c2, &op2a, &op2b);
412
413 if (code1 != ERROR_MARK && code2 != ERROR_MARK)
414 {
415 tree t = maybe_fold_or_comparisons (code1, op1a, op1b,
416 code2, op2a, op2b);
417 if (t)
418 return t;
419 }
420
421 return fold_build2_loc (loc, TRUTH_OR_EXPR, boolean_type_node, c1, c2);
422}
423
424/* Returns either a COND_EXPR or the folded expression if the folded
425 expression is a MIN_EXPR, a MAX_EXPR, an ABS_EXPR,
426 a constant or a SSA_NAME. */
427
428static tree
429fold_build_cond_expr (tree type, tree cond, tree rhs, tree lhs)
430{
431 tree rhs1, lhs1, cond_expr;
432
433 /* If COND is comparison r != 0 and r has boolean type, convert COND
434 to SSA_NAME to accept by vect bool pattern. */
435 if (TREE_CODE (cond) == NE_EXPR)
436 {
437 tree op0 = TREE_OPERAND (cond, 0);
438 tree op1 = TREE_OPERAND (cond, 1);
439 if (TREE_CODE (op0) == SSA_NAME
440 && TREE_CODE (TREE_TYPE (op0)) == BOOLEAN_TYPE
441 && (integer_zerop (op1)))
442 cond = op0;
443 }
444 cond_expr = fold_ternary (COND_EXPR, type, cond, rhs, lhs);
445
446 if (cond_expr == NULL_TREE)
447 return build3 (COND_EXPR, type, cond, rhs, lhs);
448
449 STRIP_USELESS_TYPE_CONVERSION (cond_expr);
450
451 if (is_gimple_val (cond_expr))
452 return cond_expr;
453
454 if (TREE_CODE (cond_expr) == ABS_EXPR)
455 {
456 rhs1 = TREE_OPERAND (cond_expr, 1);
457 STRIP_USELESS_TYPE_CONVERSION (rhs1);
458 if (is_gimple_val (rhs1))
459 return build1 (ABS_EXPR, type, rhs1);
460 }
461
462 if (TREE_CODE (cond_expr) == MIN_EXPR
463 || TREE_CODE (cond_expr) == MAX_EXPR)
464 {
465 lhs1 = TREE_OPERAND (cond_expr, 0);
466 STRIP_USELESS_TYPE_CONVERSION (lhs1);
467 rhs1 = TREE_OPERAND (cond_expr, 1);
468 STRIP_USELESS_TYPE_CONVERSION (rhs1);
469 if (is_gimple_val (rhs1) && is_gimple_val (lhs1))
470 return build2 (TREE_CODE (cond_expr), type, lhs1, rhs1);
471 }
472 return build3 (COND_EXPR, type, cond, rhs, lhs);
473}
474
475/* Add condition NC to the predicate list of basic block BB. LOOP is
476 the loop to be if-converted. Use predicate of cd-equivalent block
477 for join bb if it exists: we call basic blocks bb1 and bb2
478 cd-equivalent if they are executed under the same condition. */
479
480static inline void
481add_to_predicate_list (struct loop *loop, basic_block bb, tree nc)
482{
483 tree bc, *tp;
484 basic_block dom_bb;
485
486 if (is_true_predicate (nc))
487 return;
488
489 /* If dominance tells us this basic block is always executed,
490 don't record any predicates for it. */
491 if (dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
492 return;
493
494 dom_bb = get_immediate_dominator (CDI_DOMINATORS, bb);
495 /* We use notion of cd equivalence to get simpler predicate for
496 join block, e.g. if join block has 2 predecessors with predicates
497 p1 & p2 and p1 & !p2, we'd like to get p1 for it instead of
498 p1 & p2 | p1 & !p2. */
499 if (dom_bb != loop->header
500 && get_immediate_dominator (CDI_POST_DOMINATORS, dom_bb) == bb)
501 {
502 gcc_assert (flow_bb_inside_loop_p (loop, dom_bb));
503 bc = bb_predicate (dom_bb);
504 if (!is_true_predicate (bc))
505 set_bb_predicate (bb, bc);
506 else
507 gcc_assert (is_true_predicate (bb_predicate (bb)));
508 if (dump_file && (dump_flags & TDF_DETAILS))
509 fprintf (dump_file, "Use predicate of bb#%d for bb#%d\n",
510 dom_bb->index, bb->index);
511 return;
512 }
513
514 if (!is_predicated (bb))
515 bc = nc;
516 else
517 {
518 bc = bb_predicate (bb);
519 bc = fold_or_predicates (EXPR_LOCATION (bc), nc, bc);
520 if (is_true_predicate (bc))
521 {
522 reset_bb_predicate (bb);
523 return;
524 }
525 }
526
527 /* Allow a TRUTH_NOT_EXPR around the main predicate. */
528 if (TREE_CODE (bc) == TRUTH_NOT_EXPR)
529 tp = &TREE_OPERAND (bc, 0);
530 else
531 tp = &bc;
532 if (!is_gimple_condexpr (*tp))
533 {
534 gimple_seq stmts;
535 *tp = force_gimple_operand_1 (*tp, &stmts, is_gimple_condexpr, NULL_TREE);
536 add_bb_predicate_gimplified_stmts (bb, stmts);
537 }
538 set_bb_predicate (bb, bc);
539}
540
541/* Add the condition COND to the previous condition PREV_COND, and add
542 this to the predicate list of the destination of edge E. LOOP is
543 the loop to be if-converted. */
544
545static void
546add_to_dst_predicate_list (struct loop *loop, edge e,
547 tree prev_cond, tree cond)
548{
549 if (!flow_bb_inside_loop_p (loop, e->dest))
550 return;
551
552 if (!is_true_predicate (prev_cond))
553 cond = fold_build2 (TRUTH_AND_EXPR, boolean_type_node,
554 prev_cond, cond);
555
556 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, e->dest))
557 add_to_predicate_list (loop, e->dest, cond);
558}
559
560/* Return true if one of the successor edges of BB exits LOOP. */
561
562static bool
563bb_with_exit_edge_p (struct loop *loop, basic_block bb)
564{
565 edge e;
566 edge_iterator ei;
567
568 FOR_EACH_EDGE (e, ei, bb->succs)
569 if (loop_exit_edge_p (loop, e))
570 return true;
571
572 return false;
573}
574
575/* Given PHI which has more than two arguments, this function checks if
576 it's if-convertible by degenerating its arguments. Specifically, if
577 below two conditions are satisfied:
578
579 1) Number of PHI arguments with different values equals to 2 and one
580 argument has the only occurrence.
581 2) The edge corresponding to the unique argument isn't critical edge.
582
583 Such PHI can be handled as PHIs have only two arguments. For example,
584 below PHI:
585
586 res = PHI <A_1(e1), A_1(e2), A_2(e3)>;
587
588 can be transformed into:
589
590 res = (predicate of e3) ? A_2 : A_1;
591
592 Return TRUE if it is the case, FALSE otherwise. */
593
594static bool
595phi_convertible_by_degenerating_args (gphi *phi)
596{
597 edge e;
598 tree arg, t1 = NULL, t2 = NULL;
599 unsigned int i, i1 = 0, i2 = 0, n1 = 0, n2 = 0;
600 unsigned int num_args = gimple_phi_num_args (phi);
601
602 gcc_assert (num_args > 2);
603
604 for (i = 0; i < num_args; i++)
605 {
606 arg = gimple_phi_arg_def (phi, i);
607 if (t1 == NULL || operand_equal_p (t1, arg, 0))
608 {
609 n1++;
610 i1 = i;
611 t1 = arg;
612 }
613 else if (t2 == NULL || operand_equal_p (t2, arg, 0))
614 {
615 n2++;
616 i2 = i;
617 t2 = arg;
618 }
619 else
620 return false;
621 }
622
623 if (n1 != 1 && n2 != 1)
624 return false;
625
626 /* Check if the edge corresponding to the unique arg is critical. */
627 e = gimple_phi_arg_edge (phi, (n1 == 1) ? i1 : i2);
628 if (EDGE_COUNT (e->src->succs) > 1)
629 return false;
630
631 return true;
632}
633
634/* Return true when PHI is if-convertible. PHI is part of loop LOOP
635 and it belongs to basic block BB. Note at this point, it is sure
636 that PHI is if-convertible. This function updates global variable
637 ANY_COMPLICATED_PHI if PHI is complicated. */
638
639static bool
640if_convertible_phi_p (struct loop *loop, basic_block bb, gphi *phi)
641{
642 if (dump_file && (dump_flags & TDF_DETAILS))
643 {
644 fprintf (dump_file, "-------------------------\n");
645 print_gimple_stmt (dump_file, phi, 0, TDF_SLIM);
646 }
647
648 if (bb != loop->header
649 && gimple_phi_num_args (phi) > 2
650 && !phi_convertible_by_degenerating_args (phi))
651 any_complicated_phi = true;
652
653 return true;
654}
655
656/* Records the status of a data reference. This struct is attached to
657 each DR->aux field. */
658
659struct ifc_dr {
660 bool rw_unconditionally;
661 bool w_unconditionally;
662 bool written_at_least_once;
663
664 tree rw_predicate;
665 tree w_predicate;
666 tree base_w_predicate;
667};
668
669#define IFC_DR(DR) ((struct ifc_dr *) (DR)->aux)
670#define DR_BASE_W_UNCONDITIONALLY(DR) (IFC_DR (DR)->written_at_least_once)
671#define DR_RW_UNCONDITIONALLY(DR) (IFC_DR (DR)->rw_unconditionally)
672#define DR_W_UNCONDITIONALLY(DR) (IFC_DR (DR)->w_unconditionally)
673
674/* Iterates over DR's and stores refs, DR and base refs, DR pairs in
675 HASH tables. While storing them in HASH table, it checks if the
676 reference is unconditionally read or written and stores that as a flag
677 information. For base reference it checks if it is written atlest once
678 unconditionally and stores it as flag information along with DR.
679 In other words for every data reference A in STMT there exist other
680 accesses to a data reference with the same base with predicates that
681 add up (OR-up) to the true predicate: this ensures that the data
682 reference A is touched (read or written) on every iteration of the
683 if-converted loop. */
684static void
685hash_memrefs_baserefs_and_store_DRs_read_written_info (data_reference_p a)
686{
687
688 data_reference_p *master_dr, *base_master_dr;
689 tree base_ref = DR_BASE_OBJECT (a);
690 innermost_loop_behavior *innermost = &DR_INNERMOST (a);
691 tree ca = bb_predicate (gimple_bb (DR_STMT (a)));
692 bool exist1, exist2;
693
694 master_dr = &innermost_DR_map->get_or_insert (innermost, &exist1);
695 if (!exist1)
696 *master_dr = a;
697
698 if (DR_IS_WRITE (a))
699 {
700 IFC_DR (*master_dr)->w_predicate
701 = fold_or_predicates (UNKNOWN_LOCATION, ca,
702 IFC_DR (*master_dr)->w_predicate);
703 if (is_true_predicate (IFC_DR (*master_dr)->w_predicate))
704 DR_W_UNCONDITIONALLY (*master_dr) = true;
705 }
706 IFC_DR (*master_dr)->rw_predicate
707 = fold_or_predicates (UNKNOWN_LOCATION, ca,
708 IFC_DR (*master_dr)->rw_predicate);
709 if (is_true_predicate (IFC_DR (*master_dr)->rw_predicate))
710 DR_RW_UNCONDITIONALLY (*master_dr) = true;
711
712 if (DR_IS_WRITE (a))
713 {
714 base_master_dr = &baseref_DR_map->get_or_insert (base_ref, &exist2);
715 if (!exist2)
716 *base_master_dr = a;
717 IFC_DR (*base_master_dr)->base_w_predicate
718 = fold_or_predicates (UNKNOWN_LOCATION, ca,
719 IFC_DR (*base_master_dr)->base_w_predicate);
720 if (is_true_predicate (IFC_DR (*base_master_dr)->base_w_predicate))
721 DR_BASE_W_UNCONDITIONALLY (*base_master_dr) = true;
722 }
723}
724
725/* Return TRUE if can prove the index IDX of an array reference REF is
726 within array bound. Return false otherwise. */
727
728static bool
729idx_within_array_bound (tree ref, tree *idx, void *dta)
730{
731 bool overflow;
732 widest_int niter, valid_niter, delta, wi_step;
733 tree ev, init, step;
734 tree low, high;
735 struct loop *loop = (struct loop*) dta;
736
737 /* Only support within-bound access for array references. */
738 if (TREE_CODE (ref) != ARRAY_REF)
739 return false;
740
741 /* For arrays at the end of the structure, we are not guaranteed that they
742 do not really extend over their declared size. However, for arrays of
743 size greater than one, this is unlikely to be intended. */
744 if (array_at_struct_end_p (ref))
745 return false;
746
747 ev = analyze_scalar_evolution (loop, *idx);
748 ev = instantiate_parameters (loop, ev);
749 init = initial_condition (ev);
750 step = evolution_part_in_loop_num (ev, loop->num);
751
752 if (!init || TREE_CODE (init) != INTEGER_CST
753 || (step && TREE_CODE (step) != INTEGER_CST))
754 return false;
755
756 low = array_ref_low_bound (ref);
757 high = array_ref_up_bound (ref);
758
759 /* The case of nonconstant bounds could be handled, but it would be
760 complicated. */
761 if (TREE_CODE (low) != INTEGER_CST
762 || !high || TREE_CODE (high) != INTEGER_CST)
763 return false;
764
765 /* Check if the intial idx is within bound. */
766 if (wi::to_widest (init) < wi::to_widest (low)
767 || wi::to_widest (init) > wi::to_widest (high))
768 return false;
769
770 /* The idx is always within bound. */
771 if (!step || integer_zerop (step))
772 return true;
773
774 if (!max_loop_iterations (loop, &niter))
775 return false;
776
777 if (wi::to_widest (step) < 0)
778 {
779 delta = wi::to_widest (init) - wi::to_widest (low);
780 wi_step = -wi::to_widest (step);
781 }
782 else
783 {
784 delta = wi::to_widest (high) - wi::to_widest (init);
785 wi_step = wi::to_widest (step);
786 }
787
788 valid_niter = wi::div_floor (delta, wi_step, SIGNED, &overflow);
789 /* The iteration space of idx is within array bound. */
790 if (!overflow && niter <= valid_niter)
791 return true;
792
793 return false;
794}
795
796/* Return TRUE if ref is a within bound array reference. */
797
798static bool
799ref_within_array_bound (gimple *stmt, tree ref)
800{
801 struct loop *loop = loop_containing_stmt (stmt);
802
803 gcc_assert (loop != NULL);
804 return for_each_index (&ref, idx_within_array_bound, loop);
805}
806
807
808/* Given a memory reference expression T, return TRUE if base object
809 it refers to is writable. The base object of a memory reference
810 is the main object being referenced, which is returned by function
811 get_base_address. */
812
813static bool
814base_object_writable (tree ref)
815{
816 tree base_tree = get_base_address (ref);
817
818 return (base_tree
819 && DECL_P (base_tree)
820 && decl_binds_to_current_def_p (base_tree)
821 && !TREE_READONLY (base_tree));
822}
823
824/* Return true when the memory references of STMT won't trap in the
825 if-converted code. There are two things that we have to check for:
826
827 - writes to memory occur to writable memory: if-conversion of
828 memory writes transforms the conditional memory writes into
829 unconditional writes, i.e. "if (cond) A[i] = foo" is transformed
830 into "A[i] = cond ? foo : A[i]", and as the write to memory may not
831 be executed at all in the original code, it may be a readonly
832 memory. To check that A is not const-qualified, we check that
833 there exists at least an unconditional write to A in the current
834 function.
835
836 - reads or writes to memory are valid memory accesses for every
837 iteration. To check that the memory accesses are correctly formed
838 and that we are allowed to read and write in these locations, we
839 check that the memory accesses to be if-converted occur at every
840 iteration unconditionally.
841
842 Returns true for the memory reference in STMT, same memory reference
843 is read or written unconditionally atleast once and the base memory
844 reference is written unconditionally once. This is to check reference
845 will not write fault. Also retuns true if the memory reference is
846 unconditionally read once then we are conditionally writing to memory
847 which is defined as read and write and is bound to the definition
848 we are seeing. */
849static bool
850ifcvt_memrefs_wont_trap (gimple *stmt, vec<data_reference_p> drs)
851{
852 data_reference_p *master_dr, *base_master_dr;
853 data_reference_p a = drs[gimple_uid (stmt) - 1];
854
855 tree base = DR_BASE_OBJECT (a);
856 innermost_loop_behavior *innermost = &DR_INNERMOST (a);
857
858 gcc_assert (DR_STMT (a) == stmt);
859 gcc_assert (DR_BASE_ADDRESS (a) || DR_OFFSET (a)
860 || DR_INIT (a) || DR_STEP (a));
861
862 master_dr = innermost_DR_map->get (innermost);
863 gcc_assert (master_dr != NULL);
864
865 base_master_dr = baseref_DR_map->get (base);
866
867 /* If a is unconditionally written to it doesn't trap. */
868 if (DR_W_UNCONDITIONALLY (*master_dr))
869 return true;
870
871 /* If a is unconditionally accessed then ...
872
873 Even a is conditional access, we can treat it as an unconditional
874 one if it's an array reference and all its index are within array
875 bound. */
876 if (DR_RW_UNCONDITIONALLY (*master_dr)
877 || ref_within_array_bound (stmt, DR_REF (a)))
878 {
879 /* an unconditional read won't trap. */
880 if (DR_IS_READ (a))
881 return true;
882
883 /* an unconditionaly write won't trap if the base is written
884 to unconditionally. */
885 if (base_master_dr
886 && DR_BASE_W_UNCONDITIONALLY (*base_master_dr))
887 return PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES);
888 /* or the base is known to be not readonly. */
889 else if (base_object_writable (DR_REF (a)))
890 return PARAM_VALUE (PARAM_ALLOW_STORE_DATA_RACES);
891 }
892
893 return false;
894}
895
896/* Return true if STMT could be converted into a masked load or store
897 (conditional load or store based on a mask computed from bb predicate). */
898
899static bool
900ifcvt_can_use_mask_load_store (gimple *stmt)
901{
902 tree lhs, ref;
903 machine_mode mode;
904 basic_block bb = gimple_bb (stmt);
905 bool is_load;
906
907 if (!(flag_tree_loop_vectorize || bb->loop_father->force_vectorize)
908 || bb->loop_father->dont_vectorize
909 || !gimple_assign_single_p (stmt)
910 || gimple_has_volatile_ops (stmt))
911 return false;
912
913 /* Check whether this is a load or store. */
914 lhs = gimple_assign_lhs (stmt);
915 if (gimple_store_p (stmt))
916 {
917 if (!is_gimple_val (gimple_assign_rhs1 (stmt)))
918 return false;
919 is_load = false;
920 ref = lhs;
921 }
922 else if (gimple_assign_load_p (stmt))
923 {
924 is_load = true;
925 ref = gimple_assign_rhs1 (stmt);
926 }
927 else
928 return false;
929
930 if (may_be_nonaddressable_p (ref))
931 return false;
932
933 /* Mask should be integer mode of the same size as the load/store
934 mode. */
935 mode = TYPE_MODE (TREE_TYPE (lhs));
936 if (!int_mode_for_mode (mode).exists () || VECTOR_MODE_P (mode))
937 return false;
938
939 if (can_vec_mask_load_store_p (mode, VOIDmode, is_load))
940 return true;
941
942 return false;
943}
944
945/* Return true when STMT is if-convertible.
946
947 GIMPLE_ASSIGN statement is not if-convertible if,
948 - it is not movable,
949 - it could trap,
950 - LHS is not var decl. */
951
952static bool
953if_convertible_gimple_assign_stmt_p (gimple *stmt,
954 vec<data_reference_p> refs)
955{
956 tree lhs = gimple_assign_lhs (stmt);
957
958 if (dump_file && (dump_flags & TDF_DETAILS))
959 {
960 fprintf (dump_file, "-------------------------\n");
961 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
962 }
963
964 if (!is_gimple_reg_type (TREE_TYPE (lhs)))
965 return false;
966
967 /* Some of these constrains might be too conservative. */
968 if (stmt_ends_bb_p (stmt)
969 || gimple_has_volatile_ops (stmt)
970 || (TREE_CODE (lhs) == SSA_NAME
971 && SSA_NAME_OCCURS_IN_ABNORMAL_PHI (lhs))
972 || gimple_has_side_effects (stmt))
973 {
974 if (dump_file && (dump_flags & TDF_DETAILS))
975 fprintf (dump_file, "stmt not suitable for ifcvt\n");
976 return false;
977 }
978
979 /* tree-into-ssa.c uses GF_PLF_1, so avoid it, because
980 in between if_convertible_loop_p and combine_blocks
981 we can perform loop versioning. */
982 gimple_set_plf (stmt, GF_PLF_2, false);
983
984 if ((! gimple_vuse (stmt)
985 || gimple_could_trap_p_1 (stmt, false, false)
986 || ! ifcvt_memrefs_wont_trap (stmt, refs))
987 && gimple_could_trap_p (stmt))
988 {
989 if (ifcvt_can_use_mask_load_store (stmt))
990 {
991 gimple_set_plf (stmt, GF_PLF_2, true);
992 any_pred_load_store = true;
993 return true;
994 }
995 if (dump_file && (dump_flags & TDF_DETAILS))
996 fprintf (dump_file, "tree could trap...\n");
997 return false;
998 }
999
1000 /* When if-converting stores force versioning, likewise if we
1001 ended up generating store data races. */
1002 if (gimple_vdef (stmt))
1003 any_pred_load_store = true;
1004
1005 return true;
1006}
1007
1008/* Return true when STMT is if-convertible.
1009
1010 A statement is if-convertible if:
1011 - it is an if-convertible GIMPLE_ASSIGN,
1012 - it is a GIMPLE_LABEL or a GIMPLE_COND,
1013 - it is builtins call. */
1014
1015static bool
1016if_convertible_stmt_p (gimple *stmt, vec<data_reference_p> refs)
1017{
1018 switch (gimple_code (stmt))
1019 {
1020 case GIMPLE_LABEL:
1021 case GIMPLE_DEBUG:
1022 case GIMPLE_COND:
1023 return true;
1024
1025 case GIMPLE_ASSIGN:
1026 return if_convertible_gimple_assign_stmt_p (stmt, refs);
1027
1028 case GIMPLE_CALL:
1029 {
1030 tree fndecl = gimple_call_fndecl (stmt);
1031 if (fndecl)
1032 {
1033 int flags = gimple_call_flags (stmt);
1034 if ((flags & ECF_CONST)
1035 && !(flags & ECF_LOOPING_CONST_OR_PURE)
1036 /* We can only vectorize some builtins at the moment,
1037 so restrict if-conversion to those. */
1038 && DECL_BUILT_IN (fndecl))
1039 return true;
1040 }
1041 return false;
1042 }
1043
1044 default:
1045 /* Don't know what to do with 'em so don't do anything. */
1046 if (dump_file && (dump_flags & TDF_DETAILS))
1047 {
1048 fprintf (dump_file, "don't know what to do\n");
1049 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
1050 }
1051 return false;
1052 }
1053
1054 return true;
1055}
1056
1057/* Assumes that BB has more than 1 predecessors.
1058 Returns false if at least one successor is not on critical edge
1059 and true otherwise. */
1060
1061static inline bool
1062all_preds_critical_p (basic_block bb)
1063{
1064 edge e;
1065 edge_iterator ei;
1066
1067 FOR_EACH_EDGE (e, ei, bb->preds)
1068 if (EDGE_COUNT (e->src->succs) == 1)
1069 return false;
1070 return true;
1071}
1072
1073/* Returns true if at least one successor in on critical edge. */
1074static inline bool
1075has_pred_critical_p (basic_block bb)
1076{
1077 edge e;
1078 edge_iterator ei;
1079
1080 FOR_EACH_EDGE (e, ei, bb->preds)
1081 if (EDGE_COUNT (e->src->succs) > 1)
1082 return true;
1083 return false;
1084}
1085
1086/* Return true when BB is if-convertible. This routine does not check
1087 basic block's statements and phis.
1088
1089 A basic block is not if-convertible if:
1090 - it is non-empty and it is after the exit block (in BFS order),
1091 - it is after the exit block but before the latch,
1092 - its edges are not normal.
1093
1094 EXIT_BB is the basic block containing the exit of the LOOP. BB is
1095 inside LOOP. */
1096
1097static bool
1098if_convertible_bb_p (struct loop *loop, basic_block bb, basic_block exit_bb)
1099{
1100 edge e;
1101 edge_iterator ei;
1102
1103 if (dump_file && (dump_flags & TDF_DETAILS))
1104 fprintf (dump_file, "----------[%d]-------------\n", bb->index);
1105
1106 if (EDGE_COUNT (bb->succs) > 2)
1107 return false;
1108
1109 if (exit_bb)
1110 {
1111 if (bb != loop->latch)
1112 {
1113 if (dump_file && (dump_flags & TDF_DETAILS))
1114 fprintf (dump_file, "basic block after exit bb but before latch\n");
1115 return false;
1116 }
1117 else if (!empty_block_p (bb))
1118 {
1119 if (dump_file && (dump_flags & TDF_DETAILS))
1120 fprintf (dump_file, "non empty basic block after exit bb\n");
1121 return false;
1122 }
1123 else if (bb == loop->latch
1124 && bb != exit_bb
1125 && !dominated_by_p (CDI_DOMINATORS, bb, exit_bb))
1126 {
1127 if (dump_file && (dump_flags & TDF_DETAILS))
1128 fprintf (dump_file, "latch is not dominated by exit_block\n");
1129 return false;
1130 }
1131 }
1132
1133 /* Be less adventurous and handle only normal edges. */
1134 FOR_EACH_EDGE (e, ei, bb->succs)
1135 if (e->flags & (EDGE_EH | EDGE_ABNORMAL | EDGE_IRREDUCIBLE_LOOP))
1136 {
1137 if (dump_file && (dump_flags & TDF_DETAILS))
1138 fprintf (dump_file, "Difficult to handle edges\n");
1139 return false;
1140 }
1141
1142 return true;
1143}
1144
1145/* Return true when all predecessor blocks of BB are visited. The
1146 VISITED bitmap keeps track of the visited blocks. */
1147
1148static bool
1149pred_blocks_visited_p (basic_block bb, bitmap *visited)
1150{
1151 edge e;
1152 edge_iterator ei;
1153 FOR_EACH_EDGE (e, ei, bb->preds)
1154 if (!bitmap_bit_p (*visited, e->src->index))
1155 return false;
1156
1157 return true;
1158}
1159
1160/* Get body of a LOOP in suitable order for if-conversion. It is
1161 caller's responsibility to deallocate basic block list.
1162 If-conversion suitable order is, breadth first sort (BFS) order
1163 with an additional constraint: select a block only if all its
1164 predecessors are already selected. */
1165
1166static basic_block *
1167get_loop_body_in_if_conv_order (const struct loop *loop)
1168{
1169 basic_block *blocks, *blocks_in_bfs_order;
1170 basic_block bb;
1171 bitmap visited;
1172 unsigned int index = 0;
1173 unsigned int visited_count = 0;
1174
1175 gcc_assert (loop->num_nodes);
1176 gcc_assert (loop->latch != EXIT_BLOCK_PTR_FOR_FN (cfun));
1177
1178 blocks = XCNEWVEC (basic_block, loop->num_nodes);
1179 visited = BITMAP_ALLOC (NULL);
1180
1181 blocks_in_bfs_order = get_loop_body_in_bfs_order (loop);
1182
1183 index = 0;
1184 while (index < loop->num_nodes)
1185 {
1186 bb = blocks_in_bfs_order [index];
1187
1188 if (bb->flags & BB_IRREDUCIBLE_LOOP)
1189 {
1190 free (blocks_in_bfs_order);
1191 BITMAP_FREE (visited);
1192 free (blocks);
1193 return NULL;
1194 }
1195
1196 if (!bitmap_bit_p (visited, bb->index))
1197 {
1198 if (pred_blocks_visited_p (bb, &visited)
1199 || bb == loop->header)
1200 {
1201 /* This block is now visited. */
1202 bitmap_set_bit (visited, bb->index);
1203 blocks[visited_count++] = bb;
1204 }
1205 }
1206
1207 index++;
1208
1209 if (index == loop->num_nodes
1210 && visited_count != loop->num_nodes)
1211 /* Not done yet. */
1212 index = 0;
1213 }
1214 free (blocks_in_bfs_order);
1215 BITMAP_FREE (visited);
1216 return blocks;
1217}
1218
1219/* Returns true when the analysis of the predicates for all the basic
1220 blocks in LOOP succeeded.
1221
1222 predicate_bbs first allocates the predicates of the basic blocks.
1223 These fields are then initialized with the tree expressions
1224 representing the predicates under which a basic block is executed
1225 in the LOOP. As the loop->header is executed at each iteration, it
1226 has the "true" predicate. Other statements executed under a
1227 condition are predicated with that condition, for example
1228
1229 | if (x)
1230 | S1;
1231 | else
1232 | S2;
1233
1234 S1 will be predicated with "x", and
1235 S2 will be predicated with "!x". */
1236
1237static void
1238predicate_bbs (loop_p loop)
1239{
1240 unsigned int i;
1241
1242 for (i = 0; i < loop->num_nodes; i++)
1243 init_bb_predicate (ifc_bbs[i]);
1244
1245 for (i = 0; i < loop->num_nodes; i++)
1246 {
1247 basic_block bb = ifc_bbs[i];
1248 tree cond;
1249 gimple *stmt;
1250
1251 /* The loop latch and loop exit block are always executed and
1252 have no extra conditions to be processed: skip them. */
1253 if (bb == loop->latch
1254 || bb_with_exit_edge_p (loop, bb))
1255 {
1256 reset_bb_predicate (bb);
1257 continue;
1258 }
1259
1260 cond = bb_predicate (bb);
1261 stmt = last_stmt (bb);
1262 if (stmt && gimple_code (stmt) == GIMPLE_COND)
1263 {
1264 tree c2;
1265 edge true_edge, false_edge;
1266 location_t loc = gimple_location (stmt);
1267 tree c = build2_loc (loc, gimple_cond_code (stmt),
1268 boolean_type_node,
1269 gimple_cond_lhs (stmt),
1270 gimple_cond_rhs (stmt));
1271
1272 /* Add new condition into destination's predicate list. */
1273 extract_true_false_edges_from_block (gimple_bb (stmt),
1274 &true_edge, &false_edge);
1275
1276 /* If C is true, then TRUE_EDGE is taken. */
1277 add_to_dst_predicate_list (loop, true_edge, unshare_expr (cond),
1278 unshare_expr (c));
1279
1280 /* If C is false, then FALSE_EDGE is taken. */
1281 c2 = build1_loc (loc, TRUTH_NOT_EXPR, boolean_type_node,
1282 unshare_expr (c));
1283 add_to_dst_predicate_list (loop, false_edge,
1284 unshare_expr (cond), c2);
1285
1286 cond = NULL_TREE;
1287 }
1288
1289 /* If current bb has only one successor, then consider it as an
1290 unconditional goto. */
1291 if (single_succ_p (bb))
1292 {
1293 basic_block bb_n = single_succ (bb);
1294
1295 /* The successor bb inherits the predicate of its
1296 predecessor. If there is no predicate in the predecessor
1297 bb, then consider the successor bb as always executed. */
1298 if (cond == NULL_TREE)
1299 cond = boolean_true_node;
1300
1301 add_to_predicate_list (loop, bb_n, cond);
1302 }
1303 }
1304
1305 /* The loop header is always executed. */
1306 reset_bb_predicate (loop->header);
1307 gcc_assert (bb_predicate_gimplified_stmts (loop->header) == NULL
1308 && bb_predicate_gimplified_stmts (loop->latch) == NULL);
1309}
1310
1311/* Build region by adding loop pre-header and post-header blocks. */
1312
1313static vec<basic_block>
1314build_region (struct loop *loop)
1315{
1316 vec<basic_block> region = vNULL;
1317 basic_block exit_bb = NULL;
1318
1319 gcc_assert (ifc_bbs);
1320 /* The first element is loop pre-header. */
1321 region.safe_push (loop_preheader_edge (loop)->src);
1322
1323 for (unsigned int i = 0; i < loop->num_nodes; i++)
1324 {
1325 basic_block bb = ifc_bbs[i];
1326 region.safe_push (bb);
1327 /* Find loop postheader. */
1328 edge e;
1329 edge_iterator ei;
1330 FOR_EACH_EDGE (e, ei, bb->succs)
1331 if (loop_exit_edge_p (loop, e))
1332 {
1333 exit_bb = e->dest;
1334 break;
1335 }
1336 }
1337 /* The last element is loop post-header. */
1338 gcc_assert (exit_bb);
1339 region.safe_push (exit_bb);
1340 return region;
1341}
1342
1343/* Return true when LOOP is if-convertible. This is a helper function
1344 for if_convertible_loop_p. REFS and DDRS are initialized and freed
1345 in if_convertible_loop_p. */
1346
1347static bool
1348if_convertible_loop_p_1 (struct loop *loop, vec<data_reference_p> *refs)
1349{
1350 unsigned int i;
1351 basic_block exit_bb = NULL;
1352 vec<basic_block> region;
1353
1354 if (find_data_references_in_loop (loop, refs) == chrec_dont_know)
1355 return false;
1356
1357 calculate_dominance_info (CDI_DOMINATORS);
1358
1359 /* Allow statements that can be handled during if-conversion. */
1360 ifc_bbs = get_loop_body_in_if_conv_order (loop);
1361 if (!ifc_bbs)
1362 {
1363 if (dump_file && (dump_flags & TDF_DETAILS))
1364 fprintf (dump_file, "Irreducible loop\n");
1365 return false;
1366 }
1367
1368 for (i = 0; i < loop->num_nodes; i++)
1369 {
1370 basic_block bb = ifc_bbs[i];
1371
1372 if (!if_convertible_bb_p (loop, bb, exit_bb))
1373 return false;
1374
1375 if (bb_with_exit_edge_p (loop, bb))
1376 exit_bb = bb;
1377 }
1378
1379 for (i = 0; i < loop->num_nodes; i++)
1380 {
1381 basic_block bb = ifc_bbs[i];
1382 gimple_stmt_iterator gsi;
1383
1384 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1385 switch (gimple_code (gsi_stmt (gsi)))
1386 {
1387 case GIMPLE_LABEL:
1388 case GIMPLE_ASSIGN:
1389 case GIMPLE_CALL:
1390 case GIMPLE_DEBUG:
1391 case GIMPLE_COND:
1392 gimple_set_uid (gsi_stmt (gsi), 0);
1393 break;
1394 default:
1395 return false;
1396 }
1397 }
1398
1399 data_reference_p dr;
1400
1401 innermost_DR_map
1402 = new hash_map<innermost_loop_behavior_hash, data_reference_p>;
1403 baseref_DR_map = new hash_map<tree_operand_hash, data_reference_p>;
1404
1405 /* Compute post-dominator tree locally. */
1406 region = build_region (loop);
1407 calculate_dominance_info_for_region (CDI_POST_DOMINATORS, region);
1408
1409 predicate_bbs (loop);
1410
1411 /* Free post-dominator tree since it is not used after predication. */
1412 free_dominance_info_for_region (cfun, CDI_POST_DOMINATORS, region);
1413 region.release ();
1414
1415 for (i = 0; refs->iterate (i, &dr); i++)
1416 {
1417 tree ref = DR_REF (dr);
1418
1419 dr->aux = XNEW (struct ifc_dr);
1420 DR_BASE_W_UNCONDITIONALLY (dr) = false;
1421 DR_RW_UNCONDITIONALLY (dr) = false;
1422 DR_W_UNCONDITIONALLY (dr) = false;
1423 IFC_DR (dr)->rw_predicate = boolean_false_node;
1424 IFC_DR (dr)->w_predicate = boolean_false_node;
1425 IFC_DR (dr)->base_w_predicate = boolean_false_node;
1426 if (gimple_uid (DR_STMT (dr)) == 0)
1427 gimple_set_uid (DR_STMT (dr), i + 1);
1428
1429 /* If DR doesn't have innermost loop behavior or it's a compound
1430 memory reference, we synthesize its innermost loop behavior
1431 for hashing. */
1432 if (TREE_CODE (ref) == COMPONENT_REF
1433 || TREE_CODE (ref) == IMAGPART_EXPR
1434 || TREE_CODE (ref) == REALPART_EXPR
1435 || !(DR_BASE_ADDRESS (dr) || DR_OFFSET (dr)
1436 || DR_INIT (dr) || DR_STEP (dr)))
1437 {
1438 while (TREE_CODE (ref) == COMPONENT_REF
1439 || TREE_CODE (ref) == IMAGPART_EXPR
1440 || TREE_CODE (ref) == REALPART_EXPR)
1441 ref = TREE_OPERAND (ref, 0);
1442
1443 memset (&DR_INNERMOST (dr), 0, sizeof (DR_INNERMOST (dr)));
1444 DR_BASE_ADDRESS (dr) = ref;
1445 }
1446 hash_memrefs_baserefs_and_store_DRs_read_written_info (dr);
1447 }
1448
1449 for (i = 0; i < loop->num_nodes; i++)
1450 {
1451 basic_block bb = ifc_bbs[i];
1452 gimple_stmt_iterator itr;
1453
1454 /* Check the if-convertibility of statements in predicated BBs. */
1455 if (!dominated_by_p (CDI_DOMINATORS, loop->latch, bb))
1456 for (itr = gsi_start_bb (bb); !gsi_end_p (itr); gsi_next (&itr))
1457 if (!if_convertible_stmt_p (gsi_stmt (itr), *refs))
1458 return false;
1459 }
1460
1461 /* Checking PHIs needs to be done after stmts, as the fact whether there
1462 are any masked loads or stores affects the tests. */
1463 for (i = 0; i < loop->num_nodes; i++)
1464 {
1465 basic_block bb = ifc_bbs[i];
1466 gphi_iterator itr;
1467
1468 for (itr = gsi_start_phis (bb); !gsi_end_p (itr); gsi_next (&itr))
1469 if (!if_convertible_phi_p (loop, bb, itr.phi ()))
1470 return false;
1471 }
1472
1473 if (dump_file)
1474 fprintf (dump_file, "Applying if-conversion\n");
1475
1476 return true;
1477}
1478
1479/* Return true when LOOP is if-convertible.
1480 LOOP is if-convertible if:
1481 - it is innermost,
1482 - it has two or more basic blocks,
1483 - it has only one exit,
1484 - loop header is not the exit edge,
1485 - if its basic blocks and phi nodes are if convertible. */
1486
1487static bool
1488if_convertible_loop_p (struct loop *loop)
1489{
1490 edge e;
1491 edge_iterator ei;
1492 bool res = false;
1493 vec<data_reference_p> refs;
1494
1495 /* Handle only innermost loop. */
1496 if (!loop || loop->inner)
1497 {
1498 if (dump_file && (dump_flags & TDF_DETAILS))
1499 fprintf (dump_file, "not innermost loop\n");
1500 return false;
1501 }
1502
1503 /* If only one block, no need for if-conversion. */
1504 if (loop->num_nodes <= 2)
1505 {
1506 if (dump_file && (dump_flags & TDF_DETAILS))
1507 fprintf (dump_file, "less than 2 basic blocks\n");
1508 return false;
1509 }
1510
1511 /* More than one loop exit is too much to handle. */
1512 if (!single_exit (loop))
1513 {
1514 if (dump_file && (dump_flags & TDF_DETAILS))
1515 fprintf (dump_file, "multiple exits\n");
1516 return false;
1517 }
1518
1519 /* If one of the loop header's edge is an exit edge then do not
1520 apply if-conversion. */
1521 FOR_EACH_EDGE (e, ei, loop->header->succs)
1522 if (loop_exit_edge_p (loop, e))
1523 return false;
1524
1525 refs.create (5);
1526 res = if_convertible_loop_p_1 (loop, &refs);
1527
1528 data_reference_p dr;
1529 unsigned int i;
1530 for (i = 0; refs.iterate (i, &dr); i++)
1531 free (dr->aux);
1532
1533 free_data_refs (refs);
1534
1535 delete innermost_DR_map;
1536 innermost_DR_map = NULL;
1537
1538 delete baseref_DR_map;
1539 baseref_DR_map = NULL;
1540
1541 return res;
1542}
1543
1544/* Returns true if def-stmt for phi argument ARG is simple increment/decrement
1545 which is in predicated basic block.
1546 In fact, the following PHI pattern is searching:
1547 loop-header:
1548 reduc_1 = PHI <..., reduc_2>
1549 ...
1550 if (...)
1551 reduc_3 = ...
1552 reduc_2 = PHI <reduc_1, reduc_3>
1553
1554 ARG_0 and ARG_1 are correspondent PHI arguments.
1555 REDUC, OP0 and OP1 contain reduction stmt and its operands.
1556 EXTENDED is true if PHI has > 2 arguments. */
1557
1558static bool
1559is_cond_scalar_reduction (gimple *phi, gimple **reduc, tree arg_0, tree arg_1,
1560 tree *op0, tree *op1, bool extended)
1561{
1562 tree lhs, r_op1, r_op2;
1563 gimple *stmt;
1564 gimple *header_phi = NULL;
1565 enum tree_code reduction_op;
1566 basic_block bb = gimple_bb (phi);
1567 struct loop *loop = bb->loop_father;
1568 edge latch_e = loop_latch_edge (loop);
1569 imm_use_iterator imm_iter;
1570 use_operand_p use_p;
1571 edge e;
1572 edge_iterator ei;
1573 bool result = false;
1574 if (TREE_CODE (arg_0) != SSA_NAME || TREE_CODE (arg_1) != SSA_NAME)
1575 return false;
1576
1577 if (!extended && gimple_code (SSA_NAME_DEF_STMT (arg_0)) == GIMPLE_PHI)
1578 {
1579 lhs = arg_1;
1580 header_phi = SSA_NAME_DEF_STMT (arg_0);
1581 stmt = SSA_NAME_DEF_STMT (arg_1);
1582 }
1583 else if (gimple_code (SSA_NAME_DEF_STMT (arg_1)) == GIMPLE_PHI)
1584 {
1585 lhs = arg_0;
1586 header_phi = SSA_NAME_DEF_STMT (arg_1);
1587 stmt = SSA_NAME_DEF_STMT (arg_0);
1588 }
1589 else
1590 return false;
1591 if (gimple_bb (header_phi) != loop->header)
1592 return false;
1593
1594 if (PHI_ARG_DEF_FROM_EDGE (header_phi, latch_e) != PHI_RESULT (phi))
1595 return false;
1596
1597 if (gimple_code (stmt) != GIMPLE_ASSIGN
1598 || gimple_has_volatile_ops (stmt))
1599 return false;
1600
1601 if (!flow_bb_inside_loop_p (loop, gimple_bb (stmt)))
1602 return false;
1603
1604 if (!is_predicated (gimple_bb (stmt)))
1605 return false;
1606
1607 /* Check that stmt-block is predecessor of phi-block. */
1608 FOR_EACH_EDGE (e, ei, gimple_bb (stmt)->succs)
1609 if (e->dest == bb)
1610 {
1611 result = true;
1612 break;
1613 }
1614 if (!result)
1615 return false;
1616
1617 if (!has_single_use (lhs))
1618 return false;
1619
1620 reduction_op = gimple_assign_rhs_code (stmt);
1621 if (reduction_op != PLUS_EXPR && reduction_op != MINUS_EXPR)
1622 return false;
1623 r_op1 = gimple_assign_rhs1 (stmt);
1624 r_op2 = gimple_assign_rhs2 (stmt);
1625
1626 /* Make R_OP1 to hold reduction variable. */
1627 if (r_op2 == PHI_RESULT (header_phi)
1628 && reduction_op == PLUS_EXPR)
1629 std::swap (r_op1, r_op2);
1630 else if (r_op1 != PHI_RESULT (header_phi))
1631 return false;
1632
1633 /* Check that R_OP1 is used in reduction stmt or in PHI only. */
1634 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, r_op1)
1635 {
1636 gimple *use_stmt = USE_STMT (use_p);
1637 if (is_gimple_debug (use_stmt))
1638 continue;
1639 if (use_stmt == stmt)
1640 continue;
1641 if (gimple_code (use_stmt) != GIMPLE_PHI)
1642 return false;
1643 }
1644
1645 *op0 = r_op1; *op1 = r_op2;
1646 *reduc = stmt;
1647 return true;
1648}
1649
1650/* Converts conditional scalar reduction into unconditional form, e.g.
1651 bb_4
1652 if (_5 != 0) goto bb_5 else goto bb_6
1653 end_bb_4
1654 bb_5
1655 res_6 = res_13 + 1;
1656 end_bb_5
1657 bb_6
1658 # res_2 = PHI <res_13(4), res_6(5)>
1659 end_bb_6
1660
1661 will be converted into sequence
1662 _ifc__1 = _5 != 0 ? 1 : 0;
1663 res_2 = res_13 + _ifc__1;
1664 Argument SWAP tells that arguments of conditional expression should be
1665 swapped.
1666 Returns rhs of resulting PHI assignment. */
1667
1668static tree
1669convert_scalar_cond_reduction (gimple *reduc, gimple_stmt_iterator *gsi,
1670 tree cond, tree op0, tree op1, bool swap)
1671{
1672 gimple_stmt_iterator stmt_it;
1673 gimple *new_assign;
1674 tree rhs;
1675 tree rhs1 = gimple_assign_rhs1 (reduc);
1676 tree tmp = make_temp_ssa_name (TREE_TYPE (rhs1), NULL, "_ifc_");
1677 tree c;
1678 tree zero = build_zero_cst (TREE_TYPE (rhs1));
1679
1680 if (dump_file && (dump_flags & TDF_DETAILS))
1681 {
1682 fprintf (dump_file, "Found cond scalar reduction.\n");
1683 print_gimple_stmt (dump_file, reduc, 0, TDF_SLIM);
1684 }
1685
1686 /* Build cond expression using COND and constant operand
1687 of reduction rhs. */
1688 c = fold_build_cond_expr (TREE_TYPE (rhs1),
1689 unshare_expr (cond),
1690 swap ? zero : op1,
1691 swap ? op1 : zero);
1692
1693 /* Create assignment stmt and insert it at GSI. */
1694 new_assign = gimple_build_assign (tmp, c);
1695 gsi_insert_before (gsi, new_assign, GSI_SAME_STMT);
1696 /* Build rhs for unconditional increment/decrement. */
1697 rhs = fold_build2 (gimple_assign_rhs_code (reduc),
1698 TREE_TYPE (rhs1), op0, tmp);
1699
1700 /* Delete original reduction stmt. */
1701 stmt_it = gsi_for_stmt (reduc);
1702 gsi_remove (&stmt_it, true);
1703 release_defs (reduc);
1704 return rhs;
1705}
1706
1707/* Produce condition for all occurrences of ARG in PHI node. */
1708
1709static tree
1710gen_phi_arg_condition (gphi *phi, vec<int> *occur,
1711 gimple_stmt_iterator *gsi)
1712{
1713 int len;
1714 int i;
1715 tree cond = NULL_TREE;
1716 tree c;
1717 edge e;
1718
1719 len = occur->length ();
1720 gcc_assert (len > 0);
1721 for (i = 0; i < len; i++)
1722 {
1723 e = gimple_phi_arg_edge (phi, (*occur)[i]);
1724 c = bb_predicate (e->src);
1725 if (is_true_predicate (c))
1726 {
1727 cond = c;
1728 break;
1729 }
1730 c = force_gimple_operand_gsi_1 (gsi, unshare_expr (c),
1731 is_gimple_condexpr, NULL_TREE,
1732 true, GSI_SAME_STMT);
1733 if (cond != NULL_TREE)
1734 {
1735 /* Must build OR expression. */
1736 cond = fold_or_predicates (EXPR_LOCATION (c), c, cond);
1737 cond = force_gimple_operand_gsi_1 (gsi, unshare_expr (cond),
1738 is_gimple_condexpr, NULL_TREE,
1739 true, GSI_SAME_STMT);
1740 }
1741 else
1742 cond = c;
1743 }
1744 gcc_assert (cond != NULL_TREE);
1745 return cond;
1746}
1747
1748/* Local valueization callback that follows all-use SSA edges. */
1749
1750static tree
1751ifcvt_follow_ssa_use_edges (tree val)
1752{
1753 return val;
1754}
1755
1756/* Replace a scalar PHI node with a COND_EXPR using COND as condition.
1757 This routine can handle PHI nodes with more than two arguments.
1758
1759 For example,
1760 S1: A = PHI <x1(1), x2(5)>
1761 is converted into,
1762 S2: A = cond ? x1 : x2;
1763
1764 The generated code is inserted at GSI that points to the top of
1765 basic block's statement list.
1766 If PHI node has more than two arguments a chain of conditional
1767 expression is produced. */
1768
1769
1770static void
1771predicate_scalar_phi (gphi *phi, gimple_stmt_iterator *gsi)
1772{
1773 gimple *new_stmt = NULL, *reduc;
1774 tree rhs, res, arg0, arg1, op0, op1, scev;
1775 tree cond;
1776 unsigned int index0;
1777 unsigned int max, args_len;
1778 edge e;
1779 basic_block bb;
1780 unsigned int i;
1781
1782 res = gimple_phi_result (phi);
1783 if (virtual_operand_p (res))
1784 return;
1785
1786 if ((rhs = degenerate_phi_result (phi))
1787 || ((scev = analyze_scalar_evolution (gimple_bb (phi)->loop_father,
1788 res))
1789 && !chrec_contains_undetermined (scev)
1790 && scev != res
1791 && (rhs = gimple_phi_arg_def (phi, 0))))
1792 {
1793 if (dump_file && (dump_flags & TDF_DETAILS))
1794 {
1795 fprintf (dump_file, "Degenerate phi!\n");
1796 print_gimple_stmt (dump_file, phi, 0, TDF_SLIM);
1797 }
1798 new_stmt = gimple_build_assign (res, rhs);
1799 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1800 update_stmt (new_stmt);
1801 return;
1802 }
1803
1804 bb = gimple_bb (phi);
1805 if (EDGE_COUNT (bb->preds) == 2)
1806 {
1807 /* Predicate ordinary PHI node with 2 arguments. */
1808 edge first_edge, second_edge;
1809 basic_block true_bb;
1810 first_edge = EDGE_PRED (bb, 0);
1811 second_edge = EDGE_PRED (bb, 1);
1812 cond = bb_predicate (first_edge->src);
1813 if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
1814 std::swap (first_edge, second_edge);
1815 if (EDGE_COUNT (first_edge->src->succs) > 1)
1816 {
1817 cond = bb_predicate (second_edge->src);
1818 if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
1819 cond = TREE_OPERAND (cond, 0);
1820 else
1821 first_edge = second_edge;
1822 }
1823 else
1824 cond = bb_predicate (first_edge->src);
1825 /* Gimplify the condition to a valid cond-expr conditonal operand. */
1826 cond = force_gimple_operand_gsi_1 (gsi, unshare_expr (cond),
1827 is_gimple_condexpr, NULL_TREE,
1828 true, GSI_SAME_STMT);
1829 true_bb = first_edge->src;
1830 if (EDGE_PRED (bb, 1)->src == true_bb)
1831 {
1832 arg0 = gimple_phi_arg_def (phi, 1);
1833 arg1 = gimple_phi_arg_def (phi, 0);
1834 }
1835 else
1836 {
1837 arg0 = gimple_phi_arg_def (phi, 0);
1838 arg1 = gimple_phi_arg_def (phi, 1);
1839 }
1840 if (is_cond_scalar_reduction (phi, &reduc, arg0, arg1,
1841 &op0, &op1, false))
1842 /* Convert reduction stmt into vectorizable form. */
1843 rhs = convert_scalar_cond_reduction (reduc, gsi, cond, op0, op1,
1844 true_bb != gimple_bb (reduc));
1845 else
1846 /* Build new RHS using selected condition and arguments. */
1847 rhs = fold_build_cond_expr (TREE_TYPE (res), unshare_expr (cond),
1848 arg0, arg1);
1849 new_stmt = gimple_build_assign (res, rhs);
1850 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1851 gimple_stmt_iterator new_gsi = gsi_for_stmt (new_stmt);
1852 if (fold_stmt (&new_gsi, ifcvt_follow_ssa_use_edges))
1853 {
1854 new_stmt = gsi_stmt (new_gsi);
1855 update_stmt (new_stmt);
1856 }
1857
1858 if (dump_file && (dump_flags & TDF_DETAILS))
1859 {
1860 fprintf (dump_file, "new phi replacement stmt\n");
1861 print_gimple_stmt (dump_file, new_stmt, 0, TDF_SLIM);
1862 }
1863 return;
1864 }
1865
1866 /* Create hashmap for PHI node which contain vector of argument indexes
1867 having the same value. */
1868 bool swap = false;
1869 hash_map<tree_operand_hash, auto_vec<int> > phi_arg_map;
1870 unsigned int num_args = gimple_phi_num_args (phi);
1871 int max_ind = -1;
1872 /* Vector of different PHI argument values. */
1873 auto_vec<tree> args (num_args);
1874
1875 /* Compute phi_arg_map. */
1876 for (i = 0; i < num_args; i++)
1877 {
1878 tree arg;
1879
1880 arg = gimple_phi_arg_def (phi, i);
1881 if (!phi_arg_map.get (arg))
1882 args.quick_push (arg);
1883 phi_arg_map.get_or_insert (arg).safe_push (i);
1884 }
1885
1886 /* Determine element with max number of occurrences. */
1887 max_ind = -1;
1888 max = 1;
1889 args_len = args.length ();
1890 for (i = 0; i < args_len; i++)
1891 {
1892 unsigned int len;
1893 if ((len = phi_arg_map.get (args[i])->length ()) > max)
1894 {
1895 max_ind = (int) i;
1896 max = len;
1897 }
1898 }
1899
1900 /* Put element with max number of occurences to the end of ARGS. */
1901 if (max_ind != -1 && max_ind +1 != (int) args_len)
1902 std::swap (args[args_len - 1], args[max_ind]);
1903
1904 /* Handle one special case when number of arguments with different values
1905 is equal 2 and one argument has the only occurrence. Such PHI can be
1906 handled as if would have only 2 arguments. */
1907 if (args_len == 2 && phi_arg_map.get (args[0])->length () == 1)
1908 {
1909 vec<int> *indexes;
1910 indexes = phi_arg_map.get (args[0]);
1911 index0 = (*indexes)[0];
1912 arg0 = args[0];
1913 arg1 = args[1];
1914 e = gimple_phi_arg_edge (phi, index0);
1915 cond = bb_predicate (e->src);
1916 if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
1917 {
1918 swap = true;
1919 cond = TREE_OPERAND (cond, 0);
1920 }
1921 /* Gimplify the condition to a valid cond-expr conditonal operand. */
1922 cond = force_gimple_operand_gsi_1 (gsi, unshare_expr (cond),
1923 is_gimple_condexpr, NULL_TREE,
1924 true, GSI_SAME_STMT);
1925 if (!(is_cond_scalar_reduction (phi, &reduc, arg0 , arg1,
1926 &op0, &op1, true)))
1927 rhs = fold_build_cond_expr (TREE_TYPE (res), unshare_expr (cond),
1928 swap? arg1 : arg0,
1929 swap? arg0 : arg1);
1930 else
1931 /* Convert reduction stmt into vectorizable form. */
1932 rhs = convert_scalar_cond_reduction (reduc, gsi, cond, op0, op1,
1933 swap);
1934 new_stmt = gimple_build_assign (res, rhs);
1935 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1936 update_stmt (new_stmt);
1937 }
1938 else
1939 {
1940 /* Common case. */
1941 vec<int> *indexes;
1942 tree type = TREE_TYPE (gimple_phi_result (phi));
1943 tree lhs;
1944 arg1 = args[1];
1945 for (i = 0; i < args_len; i++)
1946 {
1947 arg0 = args[i];
1948 indexes = phi_arg_map.get (args[i]);
1949 if (i != args_len - 1)
1950 lhs = make_temp_ssa_name (type, NULL, "_ifc_");
1951 else
1952 lhs = res;
1953 cond = gen_phi_arg_condition (phi, indexes, gsi);
1954 rhs = fold_build_cond_expr (type, unshare_expr (cond),
1955 arg0, arg1);
1956 new_stmt = gimple_build_assign (lhs, rhs);
1957 gsi_insert_before (gsi, new_stmt, GSI_SAME_STMT);
1958 update_stmt (new_stmt);
1959 arg1 = lhs;
1960 }
1961 }
1962
1963 if (dump_file && (dump_flags & TDF_DETAILS))
1964 {
1965 fprintf (dump_file, "new extended phi replacement stmt\n");
1966 print_gimple_stmt (dump_file, new_stmt, 0, TDF_SLIM);
1967 }
1968}
1969
1970/* Replaces in LOOP all the scalar phi nodes other than those in the
1971 LOOP->header block with conditional modify expressions. */
1972
1973static void
1974predicate_all_scalar_phis (struct loop *loop)
1975{
1976 basic_block bb;
1977 unsigned int orig_loop_num_nodes = loop->num_nodes;
1978 unsigned int i;
1979
1980 for (i = 1; i < orig_loop_num_nodes; i++)
1981 {
1982 gphi *phi;
1983 gimple_stmt_iterator gsi;
1984 gphi_iterator phi_gsi;
1985 bb = ifc_bbs[i];
1986
1987 if (bb == loop->header)
1988 continue;
1989
1990 phi_gsi = gsi_start_phis (bb);
1991 if (gsi_end_p (phi_gsi))
1992 continue;
1993
1994 gsi = gsi_after_labels (bb);
1995 while (!gsi_end_p (phi_gsi))
1996 {
1997 phi = phi_gsi.phi ();
1998 if (virtual_operand_p (gimple_phi_result (phi)))
1999 gsi_next (&phi_gsi);
2000 else
2001 {
2002 predicate_scalar_phi (phi, &gsi);
2003 remove_phi_node (&phi_gsi, false);
2004 }
2005 }
2006 }
2007}
2008
2009/* Insert in each basic block of LOOP the statements produced by the
2010 gimplification of the predicates. */
2011
2012static void
2013insert_gimplified_predicates (loop_p loop)
2014{
2015 unsigned int i;
2016
2017 for (i = 0; i < loop->num_nodes; i++)
2018 {
2019 basic_block bb = ifc_bbs[i];
2020 gimple_seq stmts;
2021 if (!is_predicated (bb))
2022 gcc_assert (bb_predicate_gimplified_stmts (bb) == NULL);
2023 if (!is_predicated (bb))
2024 {
2025 /* Do not insert statements for a basic block that is not
2026 predicated. Also make sure that the predicate of the
2027 basic block is set to true. */
2028 reset_bb_predicate (bb);
2029 continue;
2030 }
2031
2032 stmts = bb_predicate_gimplified_stmts (bb);
2033 if (stmts)
2034 {
2035 if (any_pred_load_store)
2036 {
2037 /* Insert the predicate of the BB just after the label,
2038 as the if-conversion of memory writes will use this
2039 predicate. */
2040 gimple_stmt_iterator gsi = gsi_after_labels (bb);
2041 gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
2042 }
2043 else
2044 {
2045 /* Insert the predicate of the BB at the end of the BB
2046 as this would reduce the register pressure: the only
2047 use of this predicate will be in successor BBs. */
2048 gimple_stmt_iterator gsi = gsi_last_bb (bb);
2049
2050 if (gsi_end_p (gsi)
2051 || stmt_ends_bb_p (gsi_stmt (gsi)))
2052 gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
2053 else
2054 gsi_insert_seq_after (&gsi, stmts, GSI_SAME_STMT);
2055 }
2056
2057 /* Once the sequence is code generated, set it to NULL. */
2058 set_bb_predicate_gimplified_stmts (bb, NULL);
2059 }
2060 }
2061}
2062
2063/* Helper function for predicate_mem_writes. Returns index of existent
2064 mask if it was created for given SIZE and -1 otherwise. */
2065
2066static int
2067mask_exists (int size, vec<int> vec)
2068{
2069 unsigned int ix;
2070 int v;
2071 FOR_EACH_VEC_ELT (vec, ix, v)
2072 if (v == size)
2073 return (int) ix;
2074 return -1;
2075}
2076
2077/* Predicate each write to memory in LOOP.
2078
2079 This function transforms control flow constructs containing memory
2080 writes of the form:
2081
2082 | for (i = 0; i < N; i++)
2083 | if (cond)
2084 | A[i] = expr;
2085
2086 into the following form that does not contain control flow:
2087
2088 | for (i = 0; i < N; i++)
2089 | A[i] = cond ? expr : A[i];
2090
2091 The original CFG looks like this:
2092
2093 | bb_0
2094 | i = 0
2095 | end_bb_0
2096 |
2097 | bb_1
2098 | if (i < N) goto bb_5 else goto bb_2
2099 | end_bb_1
2100 |
2101 | bb_2
2102 | cond = some_computation;
2103 | if (cond) goto bb_3 else goto bb_4
2104 | end_bb_2
2105 |
2106 | bb_3
2107 | A[i] = expr;
2108 | goto bb_4
2109 | end_bb_3
2110 |
2111 | bb_4
2112 | goto bb_1
2113 | end_bb_4
2114
2115 insert_gimplified_predicates inserts the computation of the COND
2116 expression at the beginning of the destination basic block:
2117
2118 | bb_0
2119 | i = 0
2120 | end_bb_0
2121 |
2122 | bb_1
2123 | if (i < N) goto bb_5 else goto bb_2
2124 | end_bb_1
2125 |
2126 | bb_2
2127 | cond = some_computation;
2128 | if (cond) goto bb_3 else goto bb_4
2129 | end_bb_2
2130 |
2131 | bb_3
2132 | cond = some_computation;
2133 | A[i] = expr;
2134 | goto bb_4
2135 | end_bb_3
2136 |
2137 | bb_4
2138 | goto bb_1
2139 | end_bb_4
2140
2141 predicate_mem_writes is then predicating the memory write as follows:
2142
2143 | bb_0
2144 | i = 0
2145 | end_bb_0
2146 |
2147 | bb_1
2148 | if (i < N) goto bb_5 else goto bb_2
2149 | end_bb_1
2150 |
2151 | bb_2
2152 | if (cond) goto bb_3 else goto bb_4
2153 | end_bb_2
2154 |
2155 | bb_3
2156 | cond = some_computation;
2157 | A[i] = cond ? expr : A[i];
2158 | goto bb_4
2159 | end_bb_3
2160 |
2161 | bb_4
2162 | goto bb_1
2163 | end_bb_4
2164
2165 and finally combine_blocks removes the basic block boundaries making
2166 the loop vectorizable:
2167
2168 | bb_0
2169 | i = 0
2170 | if (i < N) goto bb_5 else goto bb_1
2171 | end_bb_0
2172 |
2173 | bb_1
2174 | cond = some_computation;
2175 | A[i] = cond ? expr : A[i];
2176 | if (i < N) goto bb_5 else goto bb_4
2177 | end_bb_1
2178 |
2179 | bb_4
2180 | goto bb_1
2181 | end_bb_4
2182*/
2183
2184static void
2185predicate_mem_writes (loop_p loop)
2186{
2187 unsigned int i, orig_loop_num_nodes = loop->num_nodes;
2188 auto_vec<int, 1> vect_sizes;
2189 auto_vec<tree, 1> vect_masks;
2190
2191 for (i = 1; i < orig_loop_num_nodes; i++)
2192 {
2193 gimple_stmt_iterator gsi;
2194 basic_block bb = ifc_bbs[i];
2195 tree cond = bb_predicate (bb);
2196 bool swap;
2197 gimple *stmt;
2198 int index;
2199
2200 if (is_true_predicate (cond))
2201 continue;
2202
2203 swap = false;
2204 if (TREE_CODE (cond) == TRUTH_NOT_EXPR)
2205 {
2206 swap = true;
2207 cond = TREE_OPERAND (cond, 0);
2208 }
2209
2210 vect_sizes.truncate (0);
2211 vect_masks.truncate (0);
2212
2213 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi);)
2214 {
2215 if (!gimple_assign_single_p (stmt = gsi_stmt (gsi)))
2216 ;
2217 else if (is_false_predicate (cond)
2218 && gimple_vdef (stmt))
2219 {
2220 unlink_stmt_vdef (stmt);
2221 gsi_remove (&gsi, true);
2222 release_defs (stmt);
2223 continue;
2224 }
2225 else if (gimple_plf (stmt, GF_PLF_2))
2226 {
2227 tree lhs = gimple_assign_lhs (stmt);
2228 tree rhs = gimple_assign_rhs1 (stmt);
2229 tree ref, addr, ptr, mask;
2230 gcall *new_stmt;
2231 gimple_seq stmts = NULL;
2232 int bitsize = GET_MODE_BITSIZE (TYPE_MODE (TREE_TYPE (lhs)));
2233 ref = TREE_CODE (lhs) == SSA_NAME ? rhs : lhs;
2234 mark_addressable (ref);
2235 addr = force_gimple_operand_gsi (&gsi, build_fold_addr_expr (ref),
2236 true, NULL_TREE, true,
2237 GSI_SAME_STMT);
2238 if (!vect_sizes.is_empty ()
2239 && (index = mask_exists (bitsize, vect_sizes)) != -1)
2240 /* Use created mask. */
2241 mask = vect_masks[index];
2242 else
2243 {
2244 if (COMPARISON_CLASS_P (cond))
2245 mask = gimple_build (&stmts, TREE_CODE (cond),
2246 boolean_type_node,
2247 TREE_OPERAND (cond, 0),
2248 TREE_OPERAND (cond, 1));
2249 else
2250 mask = cond;
2251
2252 if (swap)
2253 {
2254 tree true_val
2255 = constant_boolean_node (true, TREE_TYPE (mask));
2256 mask = gimple_build (&stmts, BIT_XOR_EXPR,
2257 TREE_TYPE (mask), mask, true_val);
2258 }
2259 gsi_insert_seq_before (&gsi, stmts, GSI_SAME_STMT);
2260
2261 mask = ifc_temp_var (TREE_TYPE (mask), mask, &gsi);
2262 /* Save mask and its size for further use. */
2263 vect_sizes.safe_push (bitsize);
2264 vect_masks.safe_push (mask);
2265 }
2266 ptr = build_int_cst (reference_alias_ptr_type (ref),
2267 get_object_alignment (ref));
2268 /* Copy points-to info if possible. */
2269 if (TREE_CODE (addr) == SSA_NAME && !SSA_NAME_PTR_INFO (addr))
2270 copy_ref_info (build2 (MEM_REF, TREE_TYPE (ref), addr, ptr),
2271 ref);
2272 if (TREE_CODE (lhs) == SSA_NAME)
2273 {
2274 new_stmt
2275 = gimple_build_call_internal (IFN_MASK_LOAD, 3, addr,
2276 ptr, mask);
2277 gimple_call_set_lhs (new_stmt, lhs);
2278 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2279 }
2280 else
2281 {
2282 new_stmt
2283 = gimple_build_call_internal (IFN_MASK_STORE, 4, addr, ptr,
2284 mask, rhs);
2285 gimple_set_vuse (new_stmt, gimple_vuse (stmt));
2286 gimple_set_vdef (new_stmt, gimple_vdef (stmt));
2287 SSA_NAME_DEF_STMT (gimple_vdef (new_stmt)) = new_stmt;
2288 }
2289 gimple_call_set_nothrow (new_stmt, true);
2290
2291 gsi_replace (&gsi, new_stmt, true);
2292 }
2293 else if (gimple_vdef (stmt))
2294 {
2295 tree lhs = gimple_assign_lhs (stmt);
2296 tree rhs = gimple_assign_rhs1 (stmt);
2297 tree type = TREE_TYPE (lhs);
2298
2299 lhs = ifc_temp_var (type, unshare_expr (lhs), &gsi);
2300 rhs = ifc_temp_var (type, unshare_expr (rhs), &gsi);
2301 if (swap)
2302 std::swap (lhs, rhs);
2303 cond = force_gimple_operand_gsi_1 (&gsi, unshare_expr (cond),
2304 is_gimple_condexpr, NULL_TREE,
2305 true, GSI_SAME_STMT);
2306 rhs = fold_build_cond_expr (type, unshare_expr (cond), rhs, lhs);
2307 gimple_assign_set_rhs1 (stmt, ifc_temp_var (type, rhs, &gsi));
2308 update_stmt (stmt);
2309 }
2310 gsi_next (&gsi);
2311 }
2312 }
2313}
2314
2315/* Remove all GIMPLE_CONDs and GIMPLE_LABELs of all the basic blocks
2316 other than the exit and latch of the LOOP. Also resets the
2317 GIMPLE_DEBUG information. */
2318
2319static void
2320remove_conditions_and_labels (loop_p loop)
2321{
2322 gimple_stmt_iterator gsi;
2323 unsigned int i;
2324
2325 for (i = 0; i < loop->num_nodes; i++)
2326 {
2327 basic_block bb = ifc_bbs[i];
2328
2329 if (bb_with_exit_edge_p (loop, bb)
2330 || bb == loop->latch)
2331 continue;
2332
2333 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); )
2334 switch (gimple_code (gsi_stmt (gsi)))
2335 {
2336 case GIMPLE_COND:
2337 case GIMPLE_LABEL:
2338 gsi_remove (&gsi, true);
2339 break;
2340
2341 case GIMPLE_DEBUG:
2342 /* ??? Should there be conditional GIMPLE_DEBUG_BINDs? */
2343 if (gimple_debug_bind_p (gsi_stmt (gsi)))
2344 {
2345 gimple_debug_bind_reset_value (gsi_stmt (gsi));
2346 update_stmt (gsi_stmt (gsi));
2347 }
2348 gsi_next (&gsi);
2349 break;
2350
2351 default:
2352 gsi_next (&gsi);
2353 }
2354 }
2355}
2356
2357/* Combine all the basic blocks from LOOP into one or two super basic
2358 blocks. Replace PHI nodes with conditional modify expressions. */
2359
2360static void
2361combine_blocks (struct loop *loop)
2362{
2363 basic_block bb, exit_bb, merge_target_bb;
2364 unsigned int orig_loop_num_nodes = loop->num_nodes;
2365 unsigned int i;
2366 edge e;
2367 edge_iterator ei;
2368
2369 remove_conditions_and_labels (loop);
2370 insert_gimplified_predicates (loop);
2371 predicate_all_scalar_phis (loop);
2372
2373 if (any_pred_load_store)
2374 predicate_mem_writes (loop);
2375
2376 /* Merge basic blocks: first remove all the edges in the loop,
2377 except for those from the exit block. */
2378 exit_bb = NULL;
2379 bool *predicated = XNEWVEC (bool, orig_loop_num_nodes);
2380 for (i = 0; i < orig_loop_num_nodes; i++)
2381 {
2382 bb = ifc_bbs[i];
2383 predicated[i] = !is_true_predicate (bb_predicate (bb));
2384 free_bb_predicate (bb);
2385 if (bb_with_exit_edge_p (loop, bb))
2386 {
2387 gcc_assert (exit_bb == NULL);
2388 exit_bb = bb;
2389 }
2390 }
2391 gcc_assert (exit_bb != loop->latch);
2392
2393 for (i = 1; i < orig_loop_num_nodes; i++)
2394 {
2395 bb = ifc_bbs[i];
2396
2397 for (ei = ei_start (bb->preds); (e = ei_safe_edge (ei));)
2398 {
2399 if (e->src == exit_bb)
2400 ei_next (&ei);
2401 else
2402 remove_edge (e);
2403 }
2404 }
2405
2406 if (exit_bb != NULL)
2407 {
2408 if (exit_bb != loop->header)
2409 {
2410 /* Connect this node to loop header. */
2411 make_single_succ_edge (loop->header, exit_bb, EDGE_FALLTHRU);
2412 set_immediate_dominator (CDI_DOMINATORS, exit_bb, loop->header);
2413 }
2414
2415 /* Redirect non-exit edges to loop->latch. */
2416 FOR_EACH_EDGE (e, ei, exit_bb->succs)
2417 {
2418 if (!loop_exit_edge_p (loop, e))
2419 redirect_edge_and_branch (e, loop->latch);
2420 }
2421 set_immediate_dominator (CDI_DOMINATORS, loop->latch, exit_bb);
2422 }
2423 else
2424 {
2425 /* If the loop does not have an exit, reconnect header and latch. */
2426 make_edge (loop->header, loop->latch, EDGE_FALLTHRU);
2427 set_immediate_dominator (CDI_DOMINATORS, loop->latch, loop->header);
2428 }
2429
2430 merge_target_bb = loop->header;
2431
2432 /* Get at the virtual def valid for uses starting at the first block
2433 we merge into the header. Without a virtual PHI the loop has the
2434 same virtual use on all stmts. */
2435 gphi *vphi = get_virtual_phi (loop->header);
2436 tree last_vdef = NULL_TREE;
2437 if (vphi)
2438 {
2439 last_vdef = gimple_phi_result (vphi);
2440 for (gimple_stmt_iterator gsi = gsi_start_bb (loop->header);
2441 ! gsi_end_p (gsi); gsi_next (&gsi))
2442 if (gimple_vdef (gsi_stmt (gsi)))
2443 last_vdef = gimple_vdef (gsi_stmt (gsi));
2444 }
2445 for (i = 1; i < orig_loop_num_nodes; i++)
2446 {
2447 gimple_stmt_iterator gsi;
2448 gimple_stmt_iterator last;
2449
2450 bb = ifc_bbs[i];
2451
2452 if (bb == exit_bb || bb == loop->latch)
2453 continue;
2454
2455 /* We release virtual PHIs late because we have to propagate them
2456 out using the current VUSE. The def might be the one used
2457 after the loop. */
2458 vphi = get_virtual_phi (bb);
2459 if (vphi)
2460 {
2461 imm_use_iterator iter;
2462 use_operand_p use_p;
2463 gimple *use_stmt;
2464 FOR_EACH_IMM_USE_STMT (use_stmt, iter, gimple_phi_result (vphi))
2465 {
2466 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2467 SET_USE (use_p, last_vdef);
2468 }
2469 gsi = gsi_for_stmt (vphi);
2470 remove_phi_node (&gsi, true);
2471 }
2472
2473 /* Make stmts member of loop->header and clear range info from all stmts
2474 in BB which is now no longer executed conditional on a predicate we
2475 could have derived it from. */
2476 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2477 {
2478 gimple *stmt = gsi_stmt (gsi);
2479 gimple_set_bb (stmt, merge_target_bb);
2480 /* Update virtual operands. */
2481 if (last_vdef)
2482 {
2483 use_operand_p use_p = ssa_vuse_operand (stmt);
2484 if (use_p
2485 && USE_FROM_PTR (use_p) != last_vdef)
2486 SET_USE (use_p, last_vdef);
2487 if (gimple_vdef (stmt))
2488 last_vdef = gimple_vdef (stmt);
2489 }
2490 if (predicated[i])
2491 {
2492 ssa_op_iter i;
2493 tree op;
2494 FOR_EACH_SSA_TREE_OPERAND (op, stmt, i, SSA_OP_DEF)
2495 reset_flow_sensitive_info (op);
2496 }
2497 }
2498
2499 /* Update stmt list. */
2500 last = gsi_last_bb (merge_target_bb);
2501 gsi_insert_seq_after_without_update (&last, bb_seq (bb), GSI_NEW_STMT);
2502 set_bb_seq (bb, NULL);
2503
2504 delete_basic_block (bb);
2505 }
2506
2507 /* If possible, merge loop header to the block with the exit edge.
2508 This reduces the number of basic blocks to two, to please the
2509 vectorizer that handles only loops with two nodes. */
2510 if (exit_bb
2511 && exit_bb != loop->header)
2512 {
2513 /* We release virtual PHIs late because we have to propagate them
2514 out using the current VUSE. The def might be the one used
2515 after the loop. */
2516 vphi = get_virtual_phi (exit_bb);
2517 if (vphi)
2518 {
2519 imm_use_iterator iter;
2520 use_operand_p use_p;
2521 gimple *use_stmt;
2522 FOR_EACH_IMM_USE_STMT (use_stmt, iter, gimple_phi_result (vphi))
2523 {
2524 FOR_EACH_IMM_USE_ON_STMT (use_p, iter)
2525 SET_USE (use_p, last_vdef);
2526 }
2527 gimple_stmt_iterator gsi = gsi_for_stmt (vphi);
2528 remove_phi_node (&gsi, true);
2529 }
2530
2531 if (can_merge_blocks_p (loop->header, exit_bb))
2532 merge_blocks (loop->header, exit_bb);
2533 }
2534
2535 free (ifc_bbs);
2536 ifc_bbs = NULL;
2537 free (predicated);
2538}
2539
2540/* Version LOOP before if-converting it; the original loop
2541 will be if-converted, the new copy of the loop will not,
2542 and the LOOP_VECTORIZED internal call will be guarding which
2543 loop to execute. The vectorizer pass will fold this
2544 internal call into either true or false.
2545
2546 Note that this function intentionally invalidates profile. Both edges
2547 out of LOOP_VECTORIZED must have 100% probability so the profile remains
2548 consistent after the condition is folded in the vectorizer. */
2549
2550static struct loop *
2551version_loop_for_if_conversion (struct loop *loop)
2552{
2553 basic_block cond_bb;
2554 tree cond = make_ssa_name (boolean_type_node);
2555 struct loop *new_loop;
2556 gimple *g;
2557 gimple_stmt_iterator gsi;
2558 unsigned int save_length;
2559
2560 g = gimple_build_call_internal (IFN_LOOP_VECTORIZED, 2,
2561 build_int_cst (integer_type_node, loop->num),
2562 integer_zero_node);
2563 gimple_call_set_lhs (g, cond);
2564
2565 /* Save BB->aux around loop_version as that uses the same field. */
2566 save_length = loop->inner ? loop->inner->num_nodes : loop->num_nodes;
2567 void **saved_preds = XALLOCAVEC (void *, save_length);
2568 for (unsigned i = 0; i < save_length; i++)
2569 saved_preds[i] = ifc_bbs[i]->aux;
2570
2571 initialize_original_copy_tables ();
2572 /* At this point we invalidate porfile confistency until IFN_LOOP_VECTORIZED
2573 is re-merged in the vectorizer. */
2574 new_loop = loop_version (loop, cond, &cond_bb,
2575 profile_probability::always (),
2576 profile_probability::always (),
2577 profile_probability::always (),
2578 profile_probability::always (), true);
2579 free_original_copy_tables ();
2580
2581 for (unsigned i = 0; i < save_length; i++)
2582 ifc_bbs[i]->aux = saved_preds[i];
2583
2584 if (new_loop == NULL)
2585 return NULL;
2586
2587 new_loop->dont_vectorize = true;
2588 new_loop->force_vectorize = false;
2589 gsi = gsi_last_bb (cond_bb);
2590 gimple_call_set_arg (g, 1, build_int_cst (integer_type_node, new_loop->num));
2591 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2592 update_ssa (TODO_update_ssa);
2593 return new_loop;
2594}
2595
2596/* Return true when LOOP satisfies the follow conditions that will
2597 allow it to be recognized by the vectorizer for outer-loop
2598 vectorization:
2599 - The loop is not the root node of the loop tree.
2600 - The loop has exactly one inner loop.
2601 - The loop has a single exit.
2602 - The loop header has a single successor, which is the inner
2603 loop header.
2604 - Each of the inner and outer loop latches have a single
2605 predecessor.
2606 - The loop exit block has a single predecessor, which is the
2607 inner loop's exit block. */
2608
2609static bool
2610versionable_outer_loop_p (struct loop *loop)
2611{
2612 if (!loop_outer (loop)
2613 || loop->dont_vectorize
2614 || !loop->inner
2615 || loop->inner->next
2616 || !single_exit (loop)
2617 || !single_succ_p (loop->header)
2618 || single_succ (loop->header) != loop->inner->header
2619 || !single_pred_p (loop->latch)
2620 || !single_pred_p (loop->inner->latch))
2621 return false;
2622
2623 basic_block outer_exit = single_pred (loop->latch);
2624 basic_block inner_exit = single_pred (loop->inner->latch);
2625
2626 if (!single_pred_p (outer_exit) || single_pred (outer_exit) != inner_exit)
2627 return false;
2628
2629 if (dump_file)
2630 fprintf (dump_file, "Found vectorizable outer loop for versioning\n");
2631
2632 return true;
2633}
2634
2635/* Performs splitting of critical edges. Skip splitting and return false
2636 if LOOP will not be converted because:
2637
2638 - LOOP is not well formed.
2639 - LOOP has PHI with more than MAX_PHI_ARG_NUM arguments.
2640
2641 Last restriction is valid only if AGGRESSIVE_IF_CONV is false. */
2642
2643static bool
2644ifcvt_split_critical_edges (struct loop *loop, bool aggressive_if_conv)
2645{
2646 basic_block *body;
2647 basic_block bb;
2648 unsigned int num = loop->num_nodes;
2649 unsigned int i;
2650 gimple *stmt;
2651 edge e;
2652 edge_iterator ei;
2653 auto_vec<edge> critical_edges;
2654
2655 /* Loop is not well formed. */
2656 if (num <= 2 || loop->inner || !single_exit (loop))
2657 return false;
2658
2659 body = get_loop_body (loop);
2660 for (i = 0; i < num; i++)
2661 {
2662 bb = body[i];
2663 if (!aggressive_if_conv
2664 && phi_nodes (bb)
2665 && EDGE_COUNT (bb->preds) > MAX_PHI_ARG_NUM)
2666 {
2667 if (dump_file && (dump_flags & TDF_DETAILS))
2668 fprintf (dump_file,
2669 "BB %d has complicated PHI with more than %u args.\n",
2670 bb->index, MAX_PHI_ARG_NUM);
2671
2672 free (body);
2673 return false;
2674 }
2675 if (bb == loop->latch || bb_with_exit_edge_p (loop, bb))
2676 continue;
2677
2678 stmt = last_stmt (bb);
2679 /* Skip basic blocks not ending with conditional branch. */
2680 if (!stmt || gimple_code (stmt) != GIMPLE_COND)
2681 continue;
2682
2683 FOR_EACH_EDGE (e, ei, bb->succs)
2684 if (EDGE_CRITICAL_P (e) && e->dest->loop_father == loop)
2685 critical_edges.safe_push (e);
2686 }
2687 free (body);
2688
2689 while (critical_edges.length () > 0)
2690 {
2691 e = critical_edges.pop ();
2692 /* Don't split if bb can be predicated along non-critical edge. */
2693 if (EDGE_COUNT (e->dest->preds) > 2 || all_preds_critical_p (e->dest))
2694 split_edge (e);
2695 }
2696
2697 return true;
2698}
2699
2700/* Delete redundant statements produced by predication which prevents
2701 loop vectorization. */
2702
2703static void
2704ifcvt_local_dce (basic_block bb)
2705{
2706 gimple *stmt;
2707 gimple *stmt1;
2708 gimple *phi;
2709 gimple_stmt_iterator gsi;
2710 auto_vec<gimple *> worklist;
2711 enum gimple_code code;
2712 use_operand_p use_p;
2713 imm_use_iterator imm_iter;
2714
2715 worklist.create (64);
2716 /* Consider all phi as live statements. */
2717 for (gsi = gsi_start_phis (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2718 {
2719 phi = gsi_stmt (gsi);
2720 gimple_set_plf (phi, GF_PLF_2, true);
2721 worklist.safe_push (phi);
2722 }
2723 /* Consider load/store statements, CALL and COND as live. */
2724 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
2725 {
2726 stmt = gsi_stmt (gsi);
2727 if (gimple_store_p (stmt)
2728 || gimple_assign_load_p (stmt)
2729 || is_gimple_debug (stmt))
2730 {
2731 gimple_set_plf (stmt, GF_PLF_2, true);
2732 worklist.safe_push (stmt);
2733 continue;
2734 }
2735 code = gimple_code (stmt);
2736 if (code == GIMPLE_COND || code == GIMPLE_CALL)
2737 {
2738 gimple_set_plf (stmt, GF_PLF_2, true);
2739 worklist.safe_push (stmt);
2740 continue;
2741 }
2742 gimple_set_plf (stmt, GF_PLF_2, false);
2743
2744 if (code == GIMPLE_ASSIGN)
2745 {
2746 tree lhs = gimple_assign_lhs (stmt);
2747 FOR_EACH_IMM_USE_FAST (use_p, imm_iter, lhs)
2748 {
2749 stmt1 = USE_STMT (use_p);
2750 if (gimple_bb (stmt1) != bb)
2751 {
2752 gimple_set_plf (stmt, GF_PLF_2, true);
2753 worklist.safe_push (stmt);
2754 break;
2755 }
2756 }
2757 }
2758 }
2759 /* Propagate liveness through arguments of live stmt. */
2760 while (worklist.length () > 0)
2761 {
2762 ssa_op_iter iter;
2763 use_operand_p use_p;
2764 tree use;
2765
2766 stmt = worklist.pop ();
2767 FOR_EACH_PHI_OR_STMT_USE (use_p, stmt, iter, SSA_OP_USE)
2768 {
2769 use = USE_FROM_PTR (use_p);
2770 if (TREE_CODE (use) != SSA_NAME)
2771 continue;
2772 stmt1 = SSA_NAME_DEF_STMT (use);
2773 if (gimple_bb (stmt1) != bb
2774 || gimple_plf (stmt1, GF_PLF_2))
2775 continue;
2776 gimple_set_plf (stmt1, GF_PLF_2, true);
2777 worklist.safe_push (stmt1);
2778 }
2779 }
2780 /* Delete dead statements. */
2781 gsi = gsi_start_bb (bb);
2782 while (!gsi_end_p (gsi))
2783 {
2784 stmt = gsi_stmt (gsi);
2785 if (gimple_plf (stmt, GF_PLF_2))
2786 {
2787 gsi_next (&gsi);
2788 continue;
2789 }
2790 if (dump_file && (dump_flags & TDF_DETAILS))
2791 {
2792 fprintf (dump_file, "Delete dead stmt in bb#%d\n", bb->index);
2793 print_gimple_stmt (dump_file, stmt, 0, TDF_SLIM);
2794 }
2795 gsi_remove (&gsi, true);
2796 release_defs (stmt);
2797 }
2798}
2799
2800/* If-convert LOOP when it is legal. For the moment this pass has no
2801 profitability analysis. Returns non-zero todo flags when something
2802 changed. */
2803
2804unsigned int
2805tree_if_conversion (struct loop *loop)
2806{
2807 unsigned int todo = 0;
2808 bool aggressive_if_conv;
2809 struct loop *rloop;
2810
2811 again:
2812 rloop = NULL;
2813 ifc_bbs = NULL;
2814 any_pred_load_store = false;
2815 any_complicated_phi = false;
2816
2817 /* Apply more aggressive if-conversion when loop or its outer loop were
2818 marked with simd pragma. When that's the case, we try to if-convert
2819 loop containing PHIs with more than MAX_PHI_ARG_NUM arguments. */
2820 aggressive_if_conv = loop->force_vectorize;
2821 if (!aggressive_if_conv)
2822 {
2823 struct loop *outer_loop = loop_outer (loop);
2824 if (outer_loop && outer_loop->force_vectorize)
2825 aggressive_if_conv = true;
2826 }
2827
2828 if (!ifcvt_split_critical_edges (loop, aggressive_if_conv))
2829 goto cleanup;
2830
2831 if (!if_convertible_loop_p (loop)
2832 || !dbg_cnt (if_conversion_tree))
2833 goto cleanup;
2834
2835 if ((any_pred_load_store || any_complicated_phi)
2836 && ((!flag_tree_loop_vectorize && !loop->force_vectorize)
2837 || loop->dont_vectorize))
2838 goto cleanup;
2839
2840 /* Since we have no cost model, always version loops unless the user
2841 specified -ftree-loop-if-convert or unless versioning is required.
2842 Either version this loop, or if the pattern is right for outer-loop
2843 vectorization, version the outer loop. In the latter case we will
2844 still if-convert the original inner loop. */
2845 if (any_pred_load_store
2846 || any_complicated_phi
2847 || flag_tree_loop_if_convert != 1)
2848 {
2849 struct loop *vloop
2850 = (versionable_outer_loop_p (loop_outer (loop))
2851 ? loop_outer (loop) : loop);
2852 struct loop *nloop = version_loop_for_if_conversion (vloop);
2853 if (nloop == NULL)
2854 goto cleanup;
2855 if (vloop != loop)
2856 {
2857 /* If versionable_outer_loop_p decided to version the
2858 outer loop, version also the inner loop of the non-vectorized
2859 loop copy. So we transform:
2860 loop1
2861 loop2
2862 into:
2863 if (LOOP_VECTORIZED (1, 3))
2864 {
2865 loop1
2866 loop2
2867 }
2868 else
2869 loop3 (copy of loop1)
2870 if (LOOP_VECTORIZED (4, 5))
2871 loop4 (copy of loop2)
2872 else
2873 loop5 (copy of loop4) */
2874 gcc_assert (nloop->inner && nloop->inner->next == NULL);
2875 rloop = nloop->inner;
2876 }
2877 }
2878
2879 /* Now all statements are if-convertible. Combine all the basic
2880 blocks into one huge basic block doing the if-conversion
2881 on-the-fly. */
2882 combine_blocks (loop);
2883
2884 /* Delete dead predicate computations. */
2885 ifcvt_local_dce (loop->header);
2886
2887 todo |= TODO_cleanup_cfg;
2888
2889 cleanup:
2890 if (ifc_bbs)
2891 {
2892 unsigned int i;
2893
2894 for (i = 0; i < loop->num_nodes; i++)
2895 free_bb_predicate (ifc_bbs[i]);
2896
2897 free (ifc_bbs);
2898 ifc_bbs = NULL;
2899 }
2900 if (rloop != NULL)
2901 {
2902 loop = rloop;
2903 goto again;
2904 }
2905
2906 return todo;
2907}
2908
2909/* Tree if-conversion pass management. */
2910
2911namespace {
2912
2913const pass_data pass_data_if_conversion =
2914{
2915 GIMPLE_PASS, /* type */
2916 "ifcvt", /* name */
2917 OPTGROUP_NONE, /* optinfo_flags */
2918 TV_TREE_LOOP_IFCVT, /* tv_id */
2919 ( PROP_cfg | PROP_ssa ), /* properties_required */
2920 0, /* properties_provided */
2921 0, /* properties_destroyed */
2922 0, /* todo_flags_start */
2923 0, /* todo_flags_finish */
2924};
2925
2926class pass_if_conversion : public gimple_opt_pass
2927{
2928public:
2929 pass_if_conversion (gcc::context *ctxt)
2930 : gimple_opt_pass (pass_data_if_conversion, ctxt)
2931 {}
2932
2933 /* opt_pass methods: */
2934 virtual bool gate (function *);
2935 virtual unsigned int execute (function *);
2936
2937}; // class pass_if_conversion
2938
2939bool
2940pass_if_conversion::gate (function *fun)
2941{
2942 return (((flag_tree_loop_vectorize || fun->has_force_vectorize_loops)
2943 && flag_tree_loop_if_convert != 0)
2944 || flag_tree_loop_if_convert == 1);
2945}
2946
2947unsigned int
2948pass_if_conversion::execute (function *fun)
2949{
2950 struct loop *loop;
2951 unsigned todo = 0;
2952
2953 if (number_of_loops (fun) <= 1)
2954 return 0;
2955
2956 FOR_EACH_LOOP (loop, 0)
2957 if (flag_tree_loop_if_convert == 1
2958 || ((flag_tree_loop_vectorize || loop->force_vectorize)
2959 && !loop->dont_vectorize))
2960 todo |= tree_if_conversion (loop);
2961
2962 if (todo)
2963 {
2964 free_numbers_of_iterations_estimates (fun);
2965 scev_reset ();
2966 }
2967
2968 if (flag_checking)
2969 {
2970 basic_block bb;
2971 FOR_EACH_BB_FN (bb, fun)
2972 gcc_assert (!bb->aux);
2973 }
2974
2975 return todo;
2976}
2977
2978} // anon namespace
2979
2980gimple_opt_pass *
2981make_pass_if_conversion (gcc::context *ctxt)
2982{
2983 return new pass_if_conversion (ctxt);
2984}
2985