1/* Expansion pass for OMP directives. Outlines regions of certain OMP
2 directives to separate functions, converts others into explicit calls to the
3 runtime library (libgomp) and so forth
4
5Copyright (C) 2005-2017 Free Software Foundation, Inc.
6
7This file is part of GCC.
8
9GCC is free software; you can redistribute it and/or modify it under
10the terms of the GNU General Public License as published by the Free
11Software Foundation; either version 3, or (at your option) any later
12version.
13
14GCC is distributed in the hope that it will be useful, but WITHOUT ANY
15WARRANTY; without even the implied warranty of MERCHANTABILITY or
16FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
17for more details.
18
19You should have received a copy of the GNU General Public License
20along with GCC; see the file COPYING3. If not see
21<http://www.gnu.org/licenses/>. */
22
23#include "config.h"
24#include "system.h"
25#include "coretypes.h"
26#include "memmodel.h"
27#include "backend.h"
28#include "target.h"
29#include "rtl.h"
30#include "tree.h"
31#include "gimple.h"
32#include "cfghooks.h"
33#include "tree-pass.h"
34#include "ssa.h"
35#include "optabs.h"
36#include "cgraph.h"
37#include "pretty-print.h"
38#include "diagnostic-core.h"
39#include "fold-const.h"
40#include "stor-layout.h"
41#include "cfganal.h"
42#include "internal-fn.h"
43#include "gimplify.h"
44#include "gimple-iterator.h"
45#include "gimplify-me.h"
46#include "gimple-walk.h"
47#include "tree-cfg.h"
48#include "tree-into-ssa.h"
49#include "tree-ssa.h"
50#include "splay-tree.h"
51#include "cfgloop.h"
52#include "omp-general.h"
53#include "omp-offload.h"
54#include "tree-cfgcleanup.h"
55#include "symbol-summary.h"
56#include "gomp-constants.h"
57#include "gimple-pretty-print.h"
58#include "hsa-common.h"
59#include "debug.h"
60#include "stringpool.h"
61#include "attribs.h"
62
63/* OMP region information. Every parallel and workshare
64 directive is enclosed between two markers, the OMP_* directive
65 and a corresponding GIMPLE_OMP_RETURN statement. */
66
67struct omp_region
68{
69 /* The enclosing region. */
70 struct omp_region *outer;
71
72 /* First child region. */
73 struct omp_region *inner;
74
75 /* Next peer region. */
76 struct omp_region *next;
77
78 /* Block containing the omp directive as its last stmt. */
79 basic_block entry;
80
81 /* Block containing the GIMPLE_OMP_RETURN as its last stmt. */
82 basic_block exit;
83
84 /* Block containing the GIMPLE_OMP_CONTINUE as its last stmt. */
85 basic_block cont;
86
87 /* If this is a combined parallel+workshare region, this is a list
88 of additional arguments needed by the combined parallel+workshare
89 library call. */
90 vec<tree, va_gc> *ws_args;
91
92 /* The code for the omp directive of this region. */
93 enum gimple_code type;
94
95 /* Schedule kind, only used for GIMPLE_OMP_FOR type regions. */
96 enum omp_clause_schedule_kind sched_kind;
97
98 /* Schedule modifiers. */
99 unsigned char sched_modifiers;
100
101 /* True if this is a combined parallel+workshare region. */
102 bool is_combined_parallel;
103
104 /* The ordered stmt if type is GIMPLE_OMP_ORDERED and it has
105 a depend clause. */
106 gomp_ordered *ord_stmt;
107};
108
109static struct omp_region *root_omp_region;
110static bool omp_any_child_fn_dumped;
111
112static void expand_omp_build_assign (gimple_stmt_iterator *, tree, tree,
113 bool = false);
114static gphi *find_phi_with_arg_on_edge (tree, edge);
115static void expand_omp (struct omp_region *region);
116
117/* Return true if REGION is a combined parallel+workshare region. */
118
119static inline bool
120is_combined_parallel (struct omp_region *region)
121{
122 return region->is_combined_parallel;
123}
124
125/* Given two blocks PAR_ENTRY_BB and WS_ENTRY_BB such that WS_ENTRY_BB
126 is the immediate dominator of PAR_ENTRY_BB, return true if there
127 are no data dependencies that would prevent expanding the parallel
128 directive at PAR_ENTRY_BB as a combined parallel+workshare region.
129
130 When expanding a combined parallel+workshare region, the call to
131 the child function may need additional arguments in the case of
132 GIMPLE_OMP_FOR regions. In some cases, these arguments are
133 computed out of variables passed in from the parent to the child
134 via 'struct .omp_data_s'. For instance:
135
136 #pragma omp parallel for schedule (guided, i * 4)
137 for (j ...)
138
139 Is lowered into:
140
141 # BLOCK 2 (PAR_ENTRY_BB)
142 .omp_data_o.i = i;
143 #pragma omp parallel [child fn: bar.omp_fn.0 ( ..., D.1598)
144
145 # BLOCK 3 (WS_ENTRY_BB)
146 .omp_data_i = &.omp_data_o;
147 D.1667 = .omp_data_i->i;
148 D.1598 = D.1667 * 4;
149 #pragma omp for schedule (guided, D.1598)
150
151 When we outline the parallel region, the call to the child function
152 'bar.omp_fn.0' will need the value D.1598 in its argument list, but
153 that value is computed *after* the call site. So, in principle we
154 cannot do the transformation.
155
156 To see whether the code in WS_ENTRY_BB blocks the combined
157 parallel+workshare call, we collect all the variables used in the
158 GIMPLE_OMP_FOR header check whether they appear on the LHS of any
159 statement in WS_ENTRY_BB. If so, then we cannot emit the combined
160 call.
161
162 FIXME. If we had the SSA form built at this point, we could merely
163 hoist the code in block 3 into block 2 and be done with it. But at
164 this point we don't have dataflow information and though we could
165 hack something up here, it is really not worth the aggravation. */
166
167static bool
168workshare_safe_to_combine_p (basic_block ws_entry_bb)
169{
170 struct omp_for_data fd;
171 gimple *ws_stmt = last_stmt (ws_entry_bb);
172
173 if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
174 return true;
175
176 gcc_assert (gimple_code (ws_stmt) == GIMPLE_OMP_FOR);
177
178 omp_extract_for_data (as_a <gomp_for *> (ws_stmt), &fd, NULL);
179
180 if (fd.collapse > 1 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
181 return false;
182 if (fd.iter_type != long_integer_type_node)
183 return false;
184
185 /* FIXME. We give up too easily here. If any of these arguments
186 are not constants, they will likely involve variables that have
187 been mapped into fields of .omp_data_s for sharing with the child
188 function. With appropriate data flow, it would be possible to
189 see through this. */
190 if (!is_gimple_min_invariant (fd.loop.n1)
191 || !is_gimple_min_invariant (fd.loop.n2)
192 || !is_gimple_min_invariant (fd.loop.step)
193 || (fd.chunk_size && !is_gimple_min_invariant (fd.chunk_size)))
194 return false;
195
196 return true;
197}
198
199/* Adjust CHUNK_SIZE from SCHEDULE clause, depending on simd modifier
200 presence (SIMD_SCHEDULE). */
201
202static tree
203omp_adjust_chunk_size (tree chunk_size, bool simd_schedule)
204{
205 if (!simd_schedule)
206 return chunk_size;
207
208 int vf = omp_max_vf ();
209 if (vf == 1)
210 return chunk_size;
211
212 tree type = TREE_TYPE (chunk_size);
213 chunk_size = fold_build2 (PLUS_EXPR, type, chunk_size,
214 build_int_cst (type, vf - 1));
215 return fold_build2 (BIT_AND_EXPR, type, chunk_size,
216 build_int_cst (type, -vf));
217}
218
219/* Collect additional arguments needed to emit a combined
220 parallel+workshare call. WS_STMT is the workshare directive being
221 expanded. */
222
223static vec<tree, va_gc> *
224get_ws_args_for (gimple *par_stmt, gimple *ws_stmt)
225{
226 tree t;
227 location_t loc = gimple_location (ws_stmt);
228 vec<tree, va_gc> *ws_args;
229
230 if (gomp_for *for_stmt = dyn_cast <gomp_for *> (ws_stmt))
231 {
232 struct omp_for_data fd;
233 tree n1, n2;
234
235 omp_extract_for_data (for_stmt, &fd, NULL);
236 n1 = fd.loop.n1;
237 n2 = fd.loop.n2;
238
239 if (gimple_omp_for_combined_into_p (for_stmt))
240 {
241 tree innerc
242 = omp_find_clause (gimple_omp_parallel_clauses (par_stmt),
243 OMP_CLAUSE__LOOPTEMP_);
244 gcc_assert (innerc);
245 n1 = OMP_CLAUSE_DECL (innerc);
246 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
247 OMP_CLAUSE__LOOPTEMP_);
248 gcc_assert (innerc);
249 n2 = OMP_CLAUSE_DECL (innerc);
250 }
251
252 vec_alloc (ws_args, 3 + (fd.chunk_size != 0));
253
254 t = fold_convert_loc (loc, long_integer_type_node, n1);
255 ws_args->quick_push (t);
256
257 t = fold_convert_loc (loc, long_integer_type_node, n2);
258 ws_args->quick_push (t);
259
260 t = fold_convert_loc (loc, long_integer_type_node, fd.loop.step);
261 ws_args->quick_push (t);
262
263 if (fd.chunk_size)
264 {
265 t = fold_convert_loc (loc, long_integer_type_node, fd.chunk_size);
266 t = omp_adjust_chunk_size (t, fd.simd_schedule);
267 ws_args->quick_push (t);
268 }
269
270 return ws_args;
271 }
272 else if (gimple_code (ws_stmt) == GIMPLE_OMP_SECTIONS)
273 {
274 /* Number of sections is equal to the number of edges from the
275 GIMPLE_OMP_SECTIONS_SWITCH statement, except for the one to
276 the exit of the sections region. */
277 basic_block bb = single_succ (gimple_bb (ws_stmt));
278 t = build_int_cst (unsigned_type_node, EDGE_COUNT (bb->succs) - 1);
279 vec_alloc (ws_args, 1);
280 ws_args->quick_push (t);
281 return ws_args;
282 }
283
284 gcc_unreachable ();
285}
286
287/* Discover whether REGION is a combined parallel+workshare region. */
288
289static void
290determine_parallel_type (struct omp_region *region)
291{
292 basic_block par_entry_bb, par_exit_bb;
293 basic_block ws_entry_bb, ws_exit_bb;
294
295 if (region == NULL || region->inner == NULL
296 || region->exit == NULL || region->inner->exit == NULL
297 || region->inner->cont == NULL)
298 return;
299
300 /* We only support parallel+for and parallel+sections. */
301 if (region->type != GIMPLE_OMP_PARALLEL
302 || (region->inner->type != GIMPLE_OMP_FOR
303 && region->inner->type != GIMPLE_OMP_SECTIONS))
304 return;
305
306 /* Check for perfect nesting PAR_ENTRY_BB -> WS_ENTRY_BB and
307 WS_EXIT_BB -> PAR_EXIT_BB. */
308 par_entry_bb = region->entry;
309 par_exit_bb = region->exit;
310 ws_entry_bb = region->inner->entry;
311 ws_exit_bb = region->inner->exit;
312
313 if (single_succ (par_entry_bb) == ws_entry_bb
314 && single_succ (ws_exit_bb) == par_exit_bb
315 && workshare_safe_to_combine_p (ws_entry_bb)
316 && (gimple_omp_parallel_combined_p (last_stmt (par_entry_bb))
317 || (last_and_only_stmt (ws_entry_bb)
318 && last_and_only_stmt (par_exit_bb))))
319 {
320 gimple *par_stmt = last_stmt (par_entry_bb);
321 gimple *ws_stmt = last_stmt (ws_entry_bb);
322
323 if (region->inner->type == GIMPLE_OMP_FOR)
324 {
325 /* If this is a combined parallel loop, we need to determine
326 whether or not to use the combined library calls. There
327 are two cases where we do not apply the transformation:
328 static loops and any kind of ordered loop. In the first
329 case, we already open code the loop so there is no need
330 to do anything else. In the latter case, the combined
331 parallel loop call would still need extra synchronization
332 to implement ordered semantics, so there would not be any
333 gain in using the combined call. */
334 tree clauses = gimple_omp_for_clauses (ws_stmt);
335 tree c = omp_find_clause (clauses, OMP_CLAUSE_SCHEDULE);
336 if (c == NULL
337 || ((OMP_CLAUSE_SCHEDULE_KIND (c) & OMP_CLAUSE_SCHEDULE_MASK)
338 == OMP_CLAUSE_SCHEDULE_STATIC)
339 || omp_find_clause (clauses, OMP_CLAUSE_ORDERED))
340 {
341 region->is_combined_parallel = false;
342 region->inner->is_combined_parallel = false;
343 return;
344 }
345 }
346
347 region->is_combined_parallel = true;
348 region->inner->is_combined_parallel = true;
349 region->ws_args = get_ws_args_for (par_stmt, ws_stmt);
350 }
351}
352
353/* Debugging dumps for parallel regions. */
354void dump_omp_region (FILE *, struct omp_region *, int);
355void debug_omp_region (struct omp_region *);
356void debug_all_omp_regions (void);
357
358/* Dump the parallel region tree rooted at REGION. */
359
360void
361dump_omp_region (FILE *file, struct omp_region *region, int indent)
362{
363 fprintf (file, "%*sbb %d: %s\n", indent, "", region->entry->index,
364 gimple_code_name[region->type]);
365
366 if (region->inner)
367 dump_omp_region (file, region->inner, indent + 4);
368
369 if (region->cont)
370 {
371 fprintf (file, "%*sbb %d: GIMPLE_OMP_CONTINUE\n", indent, "",
372 region->cont->index);
373 }
374
375 if (region->exit)
376 fprintf (file, "%*sbb %d: GIMPLE_OMP_RETURN\n", indent, "",
377 region->exit->index);
378 else
379 fprintf (file, "%*s[no exit marker]\n", indent, "");
380
381 if (region->next)
382 dump_omp_region (file, region->next, indent);
383}
384
385DEBUG_FUNCTION void
386debug_omp_region (struct omp_region *region)
387{
388 dump_omp_region (stderr, region, 0);
389}
390
391DEBUG_FUNCTION void
392debug_all_omp_regions (void)
393{
394 dump_omp_region (stderr, root_omp_region, 0);
395}
396
397/* Create a new parallel region starting at STMT inside region PARENT. */
398
399static struct omp_region *
400new_omp_region (basic_block bb, enum gimple_code type,
401 struct omp_region *parent)
402{
403 struct omp_region *region = XCNEW (struct omp_region);
404
405 region->outer = parent;
406 region->entry = bb;
407 region->type = type;
408
409 if (parent)
410 {
411 /* This is a nested region. Add it to the list of inner
412 regions in PARENT. */
413 region->next = parent->inner;
414 parent->inner = region;
415 }
416 else
417 {
418 /* This is a toplevel region. Add it to the list of toplevel
419 regions in ROOT_OMP_REGION. */
420 region->next = root_omp_region;
421 root_omp_region = region;
422 }
423
424 return region;
425}
426
427/* Release the memory associated with the region tree rooted at REGION. */
428
429static void
430free_omp_region_1 (struct omp_region *region)
431{
432 struct omp_region *i, *n;
433
434 for (i = region->inner; i ; i = n)
435 {
436 n = i->next;
437 free_omp_region_1 (i);
438 }
439
440 free (region);
441}
442
443/* Release the memory for the entire omp region tree. */
444
445void
446omp_free_regions (void)
447{
448 struct omp_region *r, *n;
449 for (r = root_omp_region; r ; r = n)
450 {
451 n = r->next;
452 free_omp_region_1 (r);
453 }
454 root_omp_region = NULL;
455}
456
457/* A convenience function to build an empty GIMPLE_COND with just the
458 condition. */
459
460static gcond *
461gimple_build_cond_empty (tree cond)
462{
463 enum tree_code pred_code;
464 tree lhs, rhs;
465
466 gimple_cond_get_ops_from_tree (cond, &pred_code, &lhs, &rhs);
467 return gimple_build_cond (pred_code, lhs, rhs, NULL_TREE, NULL_TREE);
468}
469
470/* Return true if a parallel REGION is within a declare target function or
471 within a target region and is not a part of a gridified target. */
472
473static bool
474parallel_needs_hsa_kernel_p (struct omp_region *region)
475{
476 bool indirect = false;
477 for (region = region->outer; region; region = region->outer)
478 {
479 if (region->type == GIMPLE_OMP_PARALLEL)
480 indirect = true;
481 else if (region->type == GIMPLE_OMP_TARGET)
482 {
483 gomp_target *tgt_stmt
484 = as_a <gomp_target *> (last_stmt (region->entry));
485
486 if (omp_find_clause (gimple_omp_target_clauses (tgt_stmt),
487 OMP_CLAUSE__GRIDDIM_))
488 return indirect;
489 else
490 return true;
491 }
492 }
493
494 if (lookup_attribute ("omp declare target",
495 DECL_ATTRIBUTES (current_function_decl)))
496 return true;
497
498 return false;
499}
500
501/* Change DECL_CONTEXT of CHILD_FNDECL to that of the parent function.
502 Add CHILD_FNDECL to decl chain of the supercontext of the block
503 ENTRY_BLOCK - this is the block which originally contained the
504 code from which CHILD_FNDECL was created.
505
506 Together, these actions ensure that the debug info for the outlined
507 function will be emitted with the correct lexical scope. */
508
509static void
510adjust_context_and_scope (tree entry_block, tree child_fndecl)
511{
512 if (entry_block != NULL_TREE && TREE_CODE (entry_block) == BLOCK)
513 {
514 tree b = BLOCK_SUPERCONTEXT (entry_block);
515
516 if (TREE_CODE (b) == BLOCK)
517 {
518 tree parent_fndecl;
519
520 /* Follow supercontext chain until the parent fndecl
521 is found. */
522 for (parent_fndecl = BLOCK_SUPERCONTEXT (b);
523 TREE_CODE (parent_fndecl) == BLOCK;
524 parent_fndecl = BLOCK_SUPERCONTEXT (parent_fndecl))
525 ;
526
527 gcc_assert (TREE_CODE (parent_fndecl) == FUNCTION_DECL);
528
529 DECL_CONTEXT (child_fndecl) = parent_fndecl;
530
531 DECL_CHAIN (child_fndecl) = BLOCK_VARS (b);
532 BLOCK_VARS (b) = child_fndecl;
533 }
534 }
535}
536
537/* Build the function calls to GOMP_parallel_start etc to actually
538 generate the parallel operation. REGION is the parallel region
539 being expanded. BB is the block where to insert the code. WS_ARGS
540 will be set if this is a call to a combined parallel+workshare
541 construct, it contains the list of additional arguments needed by
542 the workshare construct. */
543
544static void
545expand_parallel_call (struct omp_region *region, basic_block bb,
546 gomp_parallel *entry_stmt,
547 vec<tree, va_gc> *ws_args)
548{
549 tree t, t1, t2, val, cond, c, clauses, flags;
550 gimple_stmt_iterator gsi;
551 gimple *stmt;
552 enum built_in_function start_ix;
553 int start_ix2;
554 location_t clause_loc;
555 vec<tree, va_gc> *args;
556
557 clauses = gimple_omp_parallel_clauses (entry_stmt);
558
559 /* Determine what flavor of GOMP_parallel we will be
560 emitting. */
561 start_ix = BUILT_IN_GOMP_PARALLEL;
562 if (is_combined_parallel (region))
563 {
564 switch (region->inner->type)
565 {
566 case GIMPLE_OMP_FOR:
567 gcc_assert (region->inner->sched_kind != OMP_CLAUSE_SCHEDULE_AUTO);
568 switch (region->inner->sched_kind)
569 {
570 case OMP_CLAUSE_SCHEDULE_RUNTIME:
571 start_ix2 = 3;
572 break;
573 case OMP_CLAUSE_SCHEDULE_DYNAMIC:
574 case OMP_CLAUSE_SCHEDULE_GUIDED:
575 if (region->inner->sched_modifiers
576 & OMP_CLAUSE_SCHEDULE_NONMONOTONIC)
577 {
578 start_ix2 = 3 + region->inner->sched_kind;
579 break;
580 }
581 /* FALLTHRU */
582 default:
583 start_ix2 = region->inner->sched_kind;
584 break;
585 }
586 start_ix2 += (int) BUILT_IN_GOMP_PARALLEL_LOOP_STATIC;
587 start_ix = (enum built_in_function) start_ix2;
588 break;
589 case GIMPLE_OMP_SECTIONS:
590 start_ix = BUILT_IN_GOMP_PARALLEL_SECTIONS;
591 break;
592 default:
593 gcc_unreachable ();
594 }
595 }
596
597 /* By default, the value of NUM_THREADS is zero (selected at run time)
598 and there is no conditional. */
599 cond = NULL_TREE;
600 val = build_int_cst (unsigned_type_node, 0);
601 flags = build_int_cst (unsigned_type_node, 0);
602
603 c = omp_find_clause (clauses, OMP_CLAUSE_IF);
604 if (c)
605 cond = OMP_CLAUSE_IF_EXPR (c);
606
607 c = omp_find_clause (clauses, OMP_CLAUSE_NUM_THREADS);
608 if (c)
609 {
610 val = OMP_CLAUSE_NUM_THREADS_EXPR (c);
611 clause_loc = OMP_CLAUSE_LOCATION (c);
612 }
613 else
614 clause_loc = gimple_location (entry_stmt);
615
616 c = omp_find_clause (clauses, OMP_CLAUSE_PROC_BIND);
617 if (c)
618 flags = build_int_cst (unsigned_type_node, OMP_CLAUSE_PROC_BIND_KIND (c));
619
620 /* Ensure 'val' is of the correct type. */
621 val = fold_convert_loc (clause_loc, unsigned_type_node, val);
622
623 /* If we found the clause 'if (cond)', build either
624 (cond != 0) or (cond ? val : 1u). */
625 if (cond)
626 {
627 cond = gimple_boolify (cond);
628
629 if (integer_zerop (val))
630 val = fold_build2_loc (clause_loc,
631 EQ_EXPR, unsigned_type_node, cond,
632 build_int_cst (TREE_TYPE (cond), 0));
633 else
634 {
635 basic_block cond_bb, then_bb, else_bb;
636 edge e, e_then, e_else;
637 tree tmp_then, tmp_else, tmp_join, tmp_var;
638
639 tmp_var = create_tmp_var (TREE_TYPE (val));
640 if (gimple_in_ssa_p (cfun))
641 {
642 tmp_then = make_ssa_name (tmp_var);
643 tmp_else = make_ssa_name (tmp_var);
644 tmp_join = make_ssa_name (tmp_var);
645 }
646 else
647 {
648 tmp_then = tmp_var;
649 tmp_else = tmp_var;
650 tmp_join = tmp_var;
651 }
652
653 e = split_block_after_labels (bb);
654 cond_bb = e->src;
655 bb = e->dest;
656 remove_edge (e);
657
658 then_bb = create_empty_bb (cond_bb);
659 else_bb = create_empty_bb (then_bb);
660 set_immediate_dominator (CDI_DOMINATORS, then_bb, cond_bb);
661 set_immediate_dominator (CDI_DOMINATORS, else_bb, cond_bb);
662
663 stmt = gimple_build_cond_empty (cond);
664 gsi = gsi_start_bb (cond_bb);
665 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
666
667 gsi = gsi_start_bb (then_bb);
668 expand_omp_build_assign (&gsi, tmp_then, val, true);
669
670 gsi = gsi_start_bb (else_bb);
671 expand_omp_build_assign (&gsi, tmp_else,
672 build_int_cst (unsigned_type_node, 1),
673 true);
674
675 make_edge (cond_bb, then_bb, EDGE_TRUE_VALUE);
676 make_edge (cond_bb, else_bb, EDGE_FALSE_VALUE);
677 add_bb_to_loop (then_bb, cond_bb->loop_father);
678 add_bb_to_loop (else_bb, cond_bb->loop_father);
679 e_then = make_edge (then_bb, bb, EDGE_FALLTHRU);
680 e_else = make_edge (else_bb, bb, EDGE_FALLTHRU);
681
682 if (gimple_in_ssa_p (cfun))
683 {
684 gphi *phi = create_phi_node (tmp_join, bb);
685 add_phi_arg (phi, tmp_then, e_then, UNKNOWN_LOCATION);
686 add_phi_arg (phi, tmp_else, e_else, UNKNOWN_LOCATION);
687 }
688
689 val = tmp_join;
690 }
691
692 gsi = gsi_start_bb (bb);
693 val = force_gimple_operand_gsi (&gsi, val, true, NULL_TREE,
694 false, GSI_CONTINUE_LINKING);
695 }
696
697 gsi = gsi_last_nondebug_bb (bb);
698 t = gimple_omp_parallel_data_arg (entry_stmt);
699 if (t == NULL)
700 t1 = null_pointer_node;
701 else
702 t1 = build_fold_addr_expr (t);
703 tree child_fndecl = gimple_omp_parallel_child_fn (entry_stmt);
704 t2 = build_fold_addr_expr (child_fndecl);
705
706 adjust_context_and_scope (gimple_block (entry_stmt), child_fndecl);
707
708 vec_alloc (args, 4 + vec_safe_length (ws_args));
709 args->quick_push (t2);
710 args->quick_push (t1);
711 args->quick_push (val);
712 if (ws_args)
713 args->splice (*ws_args);
714 args->quick_push (flags);
715
716 t = build_call_expr_loc_vec (UNKNOWN_LOCATION,
717 builtin_decl_explicit (start_ix), args);
718
719 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
720 false, GSI_CONTINUE_LINKING);
721
722 if (hsa_gen_requested_p ()
723 && parallel_needs_hsa_kernel_p (region))
724 {
725 cgraph_node *child_cnode = cgraph_node::get (child_fndecl);
726 hsa_register_kernel (child_cnode);
727 }
728}
729
730/* Build the function call to GOMP_task to actually
731 generate the task operation. BB is the block where to insert the code. */
732
733static void
734expand_task_call (struct omp_region *region, basic_block bb,
735 gomp_task *entry_stmt)
736{
737 tree t1, t2, t3;
738 gimple_stmt_iterator gsi;
739 location_t loc = gimple_location (entry_stmt);
740
741 tree clauses = gimple_omp_task_clauses (entry_stmt);
742
743 tree ifc = omp_find_clause (clauses, OMP_CLAUSE_IF);
744 tree untied = omp_find_clause (clauses, OMP_CLAUSE_UNTIED);
745 tree mergeable = omp_find_clause (clauses, OMP_CLAUSE_MERGEABLE);
746 tree depend = omp_find_clause (clauses, OMP_CLAUSE_DEPEND);
747 tree finalc = omp_find_clause (clauses, OMP_CLAUSE_FINAL);
748 tree priority = omp_find_clause (clauses, OMP_CLAUSE_PRIORITY);
749
750 unsigned int iflags
751 = (untied ? GOMP_TASK_FLAG_UNTIED : 0)
752 | (mergeable ? GOMP_TASK_FLAG_MERGEABLE : 0)
753 | (depend ? GOMP_TASK_FLAG_DEPEND : 0);
754
755 bool taskloop_p = gimple_omp_task_taskloop_p (entry_stmt);
756 tree startvar = NULL_TREE, endvar = NULL_TREE, step = NULL_TREE;
757 tree num_tasks = NULL_TREE;
758 bool ull = false;
759 if (taskloop_p)
760 {
761 gimple *g = last_stmt (region->outer->entry);
762 gcc_assert (gimple_code (g) == GIMPLE_OMP_FOR
763 && gimple_omp_for_kind (g) == GF_OMP_FOR_KIND_TASKLOOP);
764 struct omp_for_data fd;
765 omp_extract_for_data (as_a <gomp_for *> (g), &fd, NULL);
766 startvar = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
767 endvar = omp_find_clause (OMP_CLAUSE_CHAIN (startvar),
768 OMP_CLAUSE__LOOPTEMP_);
769 startvar = OMP_CLAUSE_DECL (startvar);
770 endvar = OMP_CLAUSE_DECL (endvar);
771 step = fold_convert_loc (loc, fd.iter_type, fd.loop.step);
772 if (fd.loop.cond_code == LT_EXPR)
773 iflags |= GOMP_TASK_FLAG_UP;
774 tree tclauses = gimple_omp_for_clauses (g);
775 num_tasks = omp_find_clause (tclauses, OMP_CLAUSE_NUM_TASKS);
776 if (num_tasks)
777 num_tasks = OMP_CLAUSE_NUM_TASKS_EXPR (num_tasks);
778 else
779 {
780 num_tasks = omp_find_clause (tclauses, OMP_CLAUSE_GRAINSIZE);
781 if (num_tasks)
782 {
783 iflags |= GOMP_TASK_FLAG_GRAINSIZE;
784 num_tasks = OMP_CLAUSE_GRAINSIZE_EXPR (num_tasks);
785 }
786 else
787 num_tasks = integer_zero_node;
788 }
789 num_tasks = fold_convert_loc (loc, long_integer_type_node, num_tasks);
790 if (ifc == NULL_TREE)
791 iflags |= GOMP_TASK_FLAG_IF;
792 if (omp_find_clause (tclauses, OMP_CLAUSE_NOGROUP))
793 iflags |= GOMP_TASK_FLAG_NOGROUP;
794 ull = fd.iter_type == long_long_unsigned_type_node;
795 }
796 else if (priority)
797 iflags |= GOMP_TASK_FLAG_PRIORITY;
798
799 tree flags = build_int_cst (unsigned_type_node, iflags);
800
801 tree cond = boolean_true_node;
802 if (ifc)
803 {
804 if (taskloop_p)
805 {
806 tree t = gimple_boolify (OMP_CLAUSE_IF_EXPR (ifc));
807 t = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, t,
808 build_int_cst (unsigned_type_node,
809 GOMP_TASK_FLAG_IF),
810 build_int_cst (unsigned_type_node, 0));
811 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node,
812 flags, t);
813 }
814 else
815 cond = gimple_boolify (OMP_CLAUSE_IF_EXPR (ifc));
816 }
817
818 if (finalc)
819 {
820 tree t = gimple_boolify (OMP_CLAUSE_FINAL_EXPR (finalc));
821 t = fold_build3_loc (loc, COND_EXPR, unsigned_type_node, t,
822 build_int_cst (unsigned_type_node,
823 GOMP_TASK_FLAG_FINAL),
824 build_int_cst (unsigned_type_node, 0));
825 flags = fold_build2_loc (loc, PLUS_EXPR, unsigned_type_node, flags, t);
826 }
827 if (depend)
828 depend = OMP_CLAUSE_DECL (depend);
829 else
830 depend = build_int_cst (ptr_type_node, 0);
831 if (priority)
832 priority = fold_convert (integer_type_node,
833 OMP_CLAUSE_PRIORITY_EXPR (priority));
834 else
835 priority = integer_zero_node;
836
837 gsi = gsi_last_nondebug_bb (bb);
838 tree t = gimple_omp_task_data_arg (entry_stmt);
839 if (t == NULL)
840 t2 = null_pointer_node;
841 else
842 t2 = build_fold_addr_expr_loc (loc, t);
843 t1 = build_fold_addr_expr_loc (loc, gimple_omp_task_child_fn (entry_stmt));
844 t = gimple_omp_task_copy_fn (entry_stmt);
845 if (t == NULL)
846 t3 = null_pointer_node;
847 else
848 t3 = build_fold_addr_expr_loc (loc, t);
849
850 if (taskloop_p)
851 t = build_call_expr (ull
852 ? builtin_decl_explicit (BUILT_IN_GOMP_TASKLOOP_ULL)
853 : builtin_decl_explicit (BUILT_IN_GOMP_TASKLOOP),
854 11, t1, t2, t3,
855 gimple_omp_task_arg_size (entry_stmt),
856 gimple_omp_task_arg_align (entry_stmt), flags,
857 num_tasks, priority, startvar, endvar, step);
858 else
859 t = build_call_expr (builtin_decl_explicit (BUILT_IN_GOMP_TASK),
860 9, t1, t2, t3,
861 gimple_omp_task_arg_size (entry_stmt),
862 gimple_omp_task_arg_align (entry_stmt), cond, flags,
863 depend, priority);
864
865 force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
866 false, GSI_CONTINUE_LINKING);
867}
868
869/* Chain all the DECLs in LIST by their TREE_CHAIN fields. */
870
871static tree
872vec2chain (vec<tree, va_gc> *v)
873{
874 tree chain = NULL_TREE, t;
875 unsigned ix;
876
877 FOR_EACH_VEC_SAFE_ELT_REVERSE (v, ix, t)
878 {
879 DECL_CHAIN (t) = chain;
880 chain = t;
881 }
882
883 return chain;
884}
885
886/* Remove barriers in REGION->EXIT's block. Note that this is only
887 valid for GIMPLE_OMP_PARALLEL regions. Since the end of a parallel region
888 is an implicit barrier, any workshare inside the GIMPLE_OMP_PARALLEL that
889 left a barrier at the end of the GIMPLE_OMP_PARALLEL region can now be
890 removed. */
891
892static void
893remove_exit_barrier (struct omp_region *region)
894{
895 gimple_stmt_iterator gsi;
896 basic_block exit_bb;
897 edge_iterator ei;
898 edge e;
899 gimple *stmt;
900 int any_addressable_vars = -1;
901
902 exit_bb = region->exit;
903
904 /* If the parallel region doesn't return, we don't have REGION->EXIT
905 block at all. */
906 if (! exit_bb)
907 return;
908
909 /* The last insn in the block will be the parallel's GIMPLE_OMP_RETURN. The
910 workshare's GIMPLE_OMP_RETURN will be in a preceding block. The kinds of
911 statements that can appear in between are extremely limited -- no
912 memory operations at all. Here, we allow nothing at all, so the
913 only thing we allow to precede this GIMPLE_OMP_RETURN is a label. */
914 gsi = gsi_last_nondebug_bb (exit_bb);
915 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
916 gsi_prev_nondebug (&gsi);
917 if (!gsi_end_p (gsi) && gimple_code (gsi_stmt (gsi)) != GIMPLE_LABEL)
918 return;
919
920 FOR_EACH_EDGE (e, ei, exit_bb->preds)
921 {
922 gsi = gsi_last_nondebug_bb (e->src);
923 if (gsi_end_p (gsi))
924 continue;
925 stmt = gsi_stmt (gsi);
926 if (gimple_code (stmt) == GIMPLE_OMP_RETURN
927 && !gimple_omp_return_nowait_p (stmt))
928 {
929 /* OpenMP 3.0 tasks unfortunately prevent this optimization
930 in many cases. If there could be tasks queued, the barrier
931 might be needed to let the tasks run before some local
932 variable of the parallel that the task uses as shared
933 runs out of scope. The task can be spawned either
934 from within current function (this would be easy to check)
935 or from some function it calls and gets passed an address
936 of such a variable. */
937 if (any_addressable_vars < 0)
938 {
939 gomp_parallel *parallel_stmt
940 = as_a <gomp_parallel *> (last_stmt (region->entry));
941 tree child_fun = gimple_omp_parallel_child_fn (parallel_stmt);
942 tree local_decls, block, decl;
943 unsigned ix;
944
945 any_addressable_vars = 0;
946 FOR_EACH_LOCAL_DECL (DECL_STRUCT_FUNCTION (child_fun), ix, decl)
947 if (TREE_ADDRESSABLE (decl))
948 {
949 any_addressable_vars = 1;
950 break;
951 }
952 for (block = gimple_block (stmt);
953 !any_addressable_vars
954 && block
955 && TREE_CODE (block) == BLOCK;
956 block = BLOCK_SUPERCONTEXT (block))
957 {
958 for (local_decls = BLOCK_VARS (block);
959 local_decls;
960 local_decls = DECL_CHAIN (local_decls))
961 if (TREE_ADDRESSABLE (local_decls))
962 {
963 any_addressable_vars = 1;
964 break;
965 }
966 if (block == gimple_block (parallel_stmt))
967 break;
968 }
969 }
970 if (!any_addressable_vars)
971 gimple_omp_return_set_nowait (stmt);
972 }
973 }
974}
975
976static void
977remove_exit_barriers (struct omp_region *region)
978{
979 if (region->type == GIMPLE_OMP_PARALLEL)
980 remove_exit_barrier (region);
981
982 if (region->inner)
983 {
984 region = region->inner;
985 remove_exit_barriers (region);
986 while (region->next)
987 {
988 region = region->next;
989 remove_exit_barriers (region);
990 }
991 }
992}
993
994/* Optimize omp_get_thread_num () and omp_get_num_threads ()
995 calls. These can't be declared as const functions, but
996 within one parallel body they are constant, so they can be
997 transformed there into __builtin_omp_get_{thread_num,num_threads} ()
998 which are declared const. Similarly for task body, except
999 that in untied task omp_get_thread_num () can change at any task
1000 scheduling point. */
1001
1002static void
1003optimize_omp_library_calls (gimple *entry_stmt)
1004{
1005 basic_block bb;
1006 gimple_stmt_iterator gsi;
1007 tree thr_num_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
1008 tree thr_num_id = DECL_ASSEMBLER_NAME (thr_num_tree);
1009 tree num_thr_tree = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
1010 tree num_thr_id = DECL_ASSEMBLER_NAME (num_thr_tree);
1011 bool untied_task = (gimple_code (entry_stmt) == GIMPLE_OMP_TASK
1012 && omp_find_clause (gimple_omp_task_clauses (entry_stmt),
1013 OMP_CLAUSE_UNTIED) != NULL);
1014
1015 FOR_EACH_BB_FN (bb, cfun)
1016 for (gsi = gsi_start_bb (bb); !gsi_end_p (gsi); gsi_next (&gsi))
1017 {
1018 gimple *call = gsi_stmt (gsi);
1019 tree decl;
1020
1021 if (is_gimple_call (call)
1022 && (decl = gimple_call_fndecl (call))
1023 && DECL_EXTERNAL (decl)
1024 && TREE_PUBLIC (decl)
1025 && DECL_INITIAL (decl) == NULL)
1026 {
1027 tree built_in;
1028
1029 if (DECL_NAME (decl) == thr_num_id)
1030 {
1031 /* In #pragma omp task untied omp_get_thread_num () can change
1032 during the execution of the task region. */
1033 if (untied_task)
1034 continue;
1035 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_THREAD_NUM);
1036 }
1037 else if (DECL_NAME (decl) == num_thr_id)
1038 built_in = builtin_decl_explicit (BUILT_IN_OMP_GET_NUM_THREADS);
1039 else
1040 continue;
1041
1042 if (DECL_ASSEMBLER_NAME (decl) != DECL_ASSEMBLER_NAME (built_in)
1043 || gimple_call_num_args (call) != 0)
1044 continue;
1045
1046 if (flag_exceptions && !TREE_NOTHROW (decl))
1047 continue;
1048
1049 if (TREE_CODE (TREE_TYPE (decl)) != FUNCTION_TYPE
1050 || !types_compatible_p (TREE_TYPE (TREE_TYPE (decl)),
1051 TREE_TYPE (TREE_TYPE (built_in))))
1052 continue;
1053
1054 gimple_call_set_fndecl (call, built_in);
1055 }
1056 }
1057}
1058
1059/* Callback for expand_omp_build_assign. Return non-NULL if *tp needs to be
1060 regimplified. */
1061
1062static tree
1063expand_omp_regimplify_p (tree *tp, int *walk_subtrees, void *)
1064{
1065 tree t = *tp;
1066
1067 /* Any variable with DECL_VALUE_EXPR needs to be regimplified. */
1068 if (VAR_P (t) && DECL_HAS_VALUE_EXPR_P (t))
1069 return t;
1070
1071 if (TREE_CODE (t) == ADDR_EXPR)
1072 recompute_tree_invariant_for_addr_expr (t);
1073
1074 *walk_subtrees = !TYPE_P (t) && !DECL_P (t);
1075 return NULL_TREE;
1076}
1077
1078/* Prepend or append TO = FROM assignment before or after *GSI_P. */
1079
1080static void
1081expand_omp_build_assign (gimple_stmt_iterator *gsi_p, tree to, tree from,
1082 bool after)
1083{
1084 bool simple_p = DECL_P (to) && TREE_ADDRESSABLE (to);
1085 from = force_gimple_operand_gsi (gsi_p, from, simple_p, NULL_TREE,
1086 !after, after ? GSI_CONTINUE_LINKING
1087 : GSI_SAME_STMT);
1088 gimple *stmt = gimple_build_assign (to, from);
1089 if (after)
1090 gsi_insert_after (gsi_p, stmt, GSI_CONTINUE_LINKING);
1091 else
1092 gsi_insert_before (gsi_p, stmt, GSI_SAME_STMT);
1093 if (walk_tree (&from, expand_omp_regimplify_p, NULL, NULL)
1094 || walk_tree (&to, expand_omp_regimplify_p, NULL, NULL))
1095 {
1096 gimple_stmt_iterator gsi = gsi_for_stmt (stmt);
1097 gimple_regimplify_operands (stmt, &gsi);
1098 }
1099}
1100
1101/* Expand the OpenMP parallel or task directive starting at REGION. */
1102
1103static void
1104expand_omp_taskreg (struct omp_region *region)
1105{
1106 basic_block entry_bb, exit_bb, new_bb;
1107 struct function *child_cfun;
1108 tree child_fn, block, t;
1109 gimple_stmt_iterator gsi;
1110 gimple *entry_stmt, *stmt;
1111 edge e;
1112 vec<tree, va_gc> *ws_args;
1113
1114 entry_stmt = last_stmt (region->entry);
1115 child_fn = gimple_omp_taskreg_child_fn (entry_stmt);
1116 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
1117
1118 entry_bb = region->entry;
1119 if (gimple_code (entry_stmt) == GIMPLE_OMP_TASK)
1120 exit_bb = region->cont;
1121 else
1122 exit_bb = region->exit;
1123
1124 if (is_combined_parallel (region))
1125 ws_args = region->ws_args;
1126 else
1127 ws_args = NULL;
1128
1129 if (child_cfun->cfg)
1130 {
1131 /* Due to inlining, it may happen that we have already outlined
1132 the region, in which case all we need to do is make the
1133 sub-graph unreachable and emit the parallel call. */
1134 edge entry_succ_e, exit_succ_e;
1135
1136 entry_succ_e = single_succ_edge (entry_bb);
1137
1138 gsi = gsi_last_nondebug_bb (entry_bb);
1139 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_PARALLEL
1140 || gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_TASK);
1141 gsi_remove (&gsi, true);
1142
1143 new_bb = entry_bb;
1144 if (exit_bb)
1145 {
1146 exit_succ_e = single_succ_edge (exit_bb);
1147 make_edge (new_bb, exit_succ_e->dest, EDGE_FALLTHRU);
1148 }
1149 remove_edge_and_dominated_blocks (entry_succ_e);
1150 }
1151 else
1152 {
1153 unsigned srcidx, dstidx, num;
1154
1155 /* If the parallel region needs data sent from the parent
1156 function, then the very first statement (except possible
1157 tree profile counter updates) of the parallel body
1158 is a copy assignment .OMP_DATA_I = &.OMP_DATA_O. Since
1159 &.OMP_DATA_O is passed as an argument to the child function,
1160 we need to replace it with the argument as seen by the child
1161 function.
1162
1163 In most cases, this will end up being the identity assignment
1164 .OMP_DATA_I = .OMP_DATA_I. However, if the parallel body had
1165 a function call that has been inlined, the original PARM_DECL
1166 .OMP_DATA_I may have been converted into a different local
1167 variable. In which case, we need to keep the assignment. */
1168 if (gimple_omp_taskreg_data_arg (entry_stmt))
1169 {
1170 basic_block entry_succ_bb
1171 = single_succ_p (entry_bb) ? single_succ (entry_bb)
1172 : FALLTHRU_EDGE (entry_bb)->dest;
1173 tree arg;
1174 gimple *parcopy_stmt = NULL;
1175
1176 for (gsi = gsi_start_bb (entry_succ_bb); ; gsi_next (&gsi))
1177 {
1178 gimple *stmt;
1179
1180 gcc_assert (!gsi_end_p (gsi));
1181 stmt = gsi_stmt (gsi);
1182 if (gimple_code (stmt) != GIMPLE_ASSIGN)
1183 continue;
1184
1185 if (gimple_num_ops (stmt) == 2)
1186 {
1187 tree arg = gimple_assign_rhs1 (stmt);
1188
1189 /* We're ignore the subcode because we're
1190 effectively doing a STRIP_NOPS. */
1191
1192 if (TREE_CODE (arg) == ADDR_EXPR
1193 && TREE_OPERAND (arg, 0)
1194 == gimple_omp_taskreg_data_arg (entry_stmt))
1195 {
1196 parcopy_stmt = stmt;
1197 break;
1198 }
1199 }
1200 }
1201
1202 gcc_assert (parcopy_stmt != NULL);
1203 arg = DECL_ARGUMENTS (child_fn);
1204
1205 if (!gimple_in_ssa_p (cfun))
1206 {
1207 if (gimple_assign_lhs (parcopy_stmt) == arg)
1208 gsi_remove (&gsi, true);
1209 else
1210 {
1211 /* ?? Is setting the subcode really necessary ?? */
1212 gimple_omp_set_subcode (parcopy_stmt, TREE_CODE (arg));
1213 gimple_assign_set_rhs1 (parcopy_stmt, arg);
1214 }
1215 }
1216 else
1217 {
1218 tree lhs = gimple_assign_lhs (parcopy_stmt);
1219 gcc_assert (SSA_NAME_VAR (lhs) == arg);
1220 /* We'd like to set the rhs to the default def in the child_fn,
1221 but it's too early to create ssa names in the child_fn.
1222 Instead, we set the rhs to the parm. In
1223 move_sese_region_to_fn, we introduce a default def for the
1224 parm, map the parm to it's default def, and once we encounter
1225 this stmt, replace the parm with the default def. */
1226 gimple_assign_set_rhs1 (parcopy_stmt, arg);
1227 update_stmt (parcopy_stmt);
1228 }
1229 }
1230
1231 /* Declare local variables needed in CHILD_CFUN. */
1232 block = DECL_INITIAL (child_fn);
1233 BLOCK_VARS (block) = vec2chain (child_cfun->local_decls);
1234 /* The gimplifier could record temporaries in parallel/task block
1235 rather than in containing function's local_decls chain,
1236 which would mean cgraph missed finalizing them. Do it now. */
1237 for (t = BLOCK_VARS (block); t; t = DECL_CHAIN (t))
1238 if (VAR_P (t) && TREE_STATIC (t) && !DECL_EXTERNAL (t))
1239 varpool_node::finalize_decl (t);
1240 DECL_SAVED_TREE (child_fn) = NULL;
1241 /* We'll create a CFG for child_fn, so no gimple body is needed. */
1242 gimple_set_body (child_fn, NULL);
1243 TREE_USED (block) = 1;
1244
1245 /* Reset DECL_CONTEXT on function arguments. */
1246 for (t = DECL_ARGUMENTS (child_fn); t; t = DECL_CHAIN (t))
1247 DECL_CONTEXT (t) = child_fn;
1248
1249 /* Split ENTRY_BB at GIMPLE_OMP_PARALLEL or GIMPLE_OMP_TASK,
1250 so that it can be moved to the child function. */
1251 gsi = gsi_last_nondebug_bb (entry_bb);
1252 stmt = gsi_stmt (gsi);
1253 gcc_assert (stmt && (gimple_code (stmt) == GIMPLE_OMP_PARALLEL
1254 || gimple_code (stmt) == GIMPLE_OMP_TASK));
1255 e = split_block (entry_bb, stmt);
1256 gsi_remove (&gsi, true);
1257 entry_bb = e->dest;
1258 edge e2 = NULL;
1259 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
1260 single_succ_edge (entry_bb)->flags = EDGE_FALLTHRU;
1261 else
1262 {
1263 e2 = make_edge (e->src, BRANCH_EDGE (entry_bb)->dest, EDGE_ABNORMAL);
1264 gcc_assert (e2->dest == region->exit);
1265 remove_edge (BRANCH_EDGE (entry_bb));
1266 set_immediate_dominator (CDI_DOMINATORS, e2->dest, e->src);
1267 gsi = gsi_last_nondebug_bb (region->exit);
1268 gcc_assert (!gsi_end_p (gsi)
1269 && gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_RETURN);
1270 gsi_remove (&gsi, true);
1271 }
1272
1273 /* Convert GIMPLE_OMP_{RETURN,CONTINUE} into a RETURN_EXPR. */
1274 if (exit_bb)
1275 {
1276 gsi = gsi_last_nondebug_bb (exit_bb);
1277 gcc_assert (!gsi_end_p (gsi)
1278 && (gimple_code (gsi_stmt (gsi))
1279 == (e2 ? GIMPLE_OMP_CONTINUE : GIMPLE_OMP_RETURN)));
1280 stmt = gimple_build_return (NULL);
1281 gsi_insert_after (&gsi, stmt, GSI_SAME_STMT);
1282 gsi_remove (&gsi, true);
1283 }
1284
1285 /* Move the parallel region into CHILD_CFUN. */
1286
1287 if (gimple_in_ssa_p (cfun))
1288 {
1289 init_tree_ssa (child_cfun);
1290 init_ssa_operands (child_cfun);
1291 child_cfun->gimple_df->in_ssa_p = true;
1292 block = NULL_TREE;
1293 }
1294 else
1295 block = gimple_block (entry_stmt);
1296
1297 /* Make sure to generate early debug for the function before
1298 outlining anything. */
1299 if (! gimple_in_ssa_p (cfun))
1300 (*debug_hooks->early_global_decl) (cfun->decl);
1301
1302 new_bb = move_sese_region_to_fn (child_cfun, entry_bb, exit_bb, block);
1303 if (exit_bb)
1304 single_succ_edge (new_bb)->flags = EDGE_FALLTHRU;
1305 if (e2)
1306 {
1307 basic_block dest_bb = e2->dest;
1308 if (!exit_bb)
1309 make_edge (new_bb, dest_bb, EDGE_FALLTHRU);
1310 remove_edge (e2);
1311 set_immediate_dominator (CDI_DOMINATORS, dest_bb, new_bb);
1312 }
1313 /* When the OMP expansion process cannot guarantee an up-to-date
1314 loop tree arrange for the child function to fixup loops. */
1315 if (loops_state_satisfies_p (LOOPS_NEED_FIXUP))
1316 child_cfun->x_current_loops->state |= LOOPS_NEED_FIXUP;
1317
1318 /* Remove non-local VAR_DECLs from child_cfun->local_decls list. */
1319 num = vec_safe_length (child_cfun->local_decls);
1320 for (srcidx = 0, dstidx = 0; srcidx < num; srcidx++)
1321 {
1322 t = (*child_cfun->local_decls)[srcidx];
1323 if (DECL_CONTEXT (t) == cfun->decl)
1324 continue;
1325 if (srcidx != dstidx)
1326 (*child_cfun->local_decls)[dstidx] = t;
1327 dstidx++;
1328 }
1329 if (dstidx != num)
1330 vec_safe_truncate (child_cfun->local_decls, dstidx);
1331
1332 /* Inform the callgraph about the new function. */
1333 child_cfun->curr_properties = cfun->curr_properties;
1334 child_cfun->has_simduid_loops |= cfun->has_simduid_loops;
1335 child_cfun->has_force_vectorize_loops |= cfun->has_force_vectorize_loops;
1336 cgraph_node *node = cgraph_node::get_create (child_fn);
1337 node->parallelized_function = 1;
1338 cgraph_node::add_new_function (child_fn, true);
1339
1340 bool need_asm = DECL_ASSEMBLER_NAME_SET_P (current_function_decl)
1341 && !DECL_ASSEMBLER_NAME_SET_P (child_fn);
1342
1343 /* Fix the callgraph edges for child_cfun. Those for cfun will be
1344 fixed in a following pass. */
1345 push_cfun (child_cfun);
1346 if (need_asm)
1347 assign_assembler_name_if_needed (child_fn);
1348
1349 if (optimize)
1350 optimize_omp_library_calls (entry_stmt);
1351 update_max_bb_count ();
1352 cgraph_edge::rebuild_edges ();
1353
1354 /* Some EH regions might become dead, see PR34608. If
1355 pass_cleanup_cfg isn't the first pass to happen with the
1356 new child, these dead EH edges might cause problems.
1357 Clean them up now. */
1358 if (flag_exceptions)
1359 {
1360 basic_block bb;
1361 bool changed = false;
1362
1363 FOR_EACH_BB_FN (bb, cfun)
1364 changed |= gimple_purge_dead_eh_edges (bb);
1365 if (changed)
1366 cleanup_tree_cfg ();
1367 }
1368 if (gimple_in_ssa_p (cfun))
1369 update_ssa (TODO_update_ssa);
1370 if (flag_checking && !loops_state_satisfies_p (LOOPS_NEED_FIXUP))
1371 verify_loop_structure ();
1372 pop_cfun ();
1373
1374 if (dump_file && !gimple_in_ssa_p (cfun))
1375 {
1376 omp_any_child_fn_dumped = true;
1377 dump_function_header (dump_file, child_fn, dump_flags);
1378 dump_function_to_file (child_fn, dump_file, dump_flags);
1379 }
1380 }
1381
1382 if (gimple_code (entry_stmt) == GIMPLE_OMP_PARALLEL)
1383 expand_parallel_call (region, new_bb,
1384 as_a <gomp_parallel *> (entry_stmt), ws_args);
1385 else
1386 expand_task_call (region, new_bb, as_a <gomp_task *> (entry_stmt));
1387 if (gimple_in_ssa_p (cfun))
1388 update_ssa (TODO_update_ssa_only_virtuals);
1389}
1390
1391/* Information about members of an OpenACC collapsed loop nest. */
1392
1393struct oacc_collapse
1394{
1395 tree base; /* Base value. */
1396 tree iters; /* Number of steps. */
1397 tree step; /* Step size. */
1398 tree tile; /* Tile increment (if tiled). */
1399 tree outer; /* Tile iterator var. */
1400};
1401
1402/* Helper for expand_oacc_for. Determine collapsed loop information.
1403 Fill in COUNTS array. Emit any initialization code before GSI.
1404 Return the calculated outer loop bound of BOUND_TYPE. */
1405
1406static tree
1407expand_oacc_collapse_init (const struct omp_for_data *fd,
1408 gimple_stmt_iterator *gsi,
1409 oacc_collapse *counts, tree bound_type,
1410 location_t loc)
1411{
1412 tree tiling = fd->tiling;
1413 tree total = build_int_cst (bound_type, 1);
1414 int ix;
1415
1416 gcc_assert (integer_onep (fd->loop.step));
1417 gcc_assert (integer_zerop (fd->loop.n1));
1418
1419 /* When tiling, the first operand of the tile clause applies to the
1420 innermost loop, and we work outwards from there. Seems
1421 backwards, but whatever. */
1422 for (ix = fd->collapse; ix--;)
1423 {
1424 const omp_for_data_loop *loop = &fd->loops[ix];
1425
1426 tree iter_type = TREE_TYPE (loop->v);
1427 tree diff_type = iter_type;
1428 tree plus_type = iter_type;
1429
1430 gcc_assert (loop->cond_code == fd->loop.cond_code);
1431
1432 if (POINTER_TYPE_P (iter_type))
1433 plus_type = sizetype;
1434 if (POINTER_TYPE_P (diff_type) || TYPE_UNSIGNED (diff_type))
1435 diff_type = signed_type_for (diff_type);
1436
1437 if (tiling)
1438 {
1439 tree num = build_int_cst (integer_type_node, fd->collapse);
1440 tree loop_no = build_int_cst (integer_type_node, ix);
1441 tree tile = TREE_VALUE (tiling);
1442 gcall *call
1443 = gimple_build_call_internal (IFN_GOACC_TILE, 5, num, loop_no, tile,
1444 /* gwv-outer=*/integer_zero_node,
1445 /* gwv-inner=*/integer_zero_node);
1446
1447 counts[ix].outer = create_tmp_var (iter_type, ".outer");
1448 counts[ix].tile = create_tmp_var (diff_type, ".tile");
1449 gimple_call_set_lhs (call, counts[ix].tile);
1450 gimple_set_location (call, loc);
1451 gsi_insert_before (gsi, call, GSI_SAME_STMT);
1452
1453 tiling = TREE_CHAIN (tiling);
1454 }
1455 else
1456 {
1457 counts[ix].tile = NULL;
1458 counts[ix].outer = loop->v;
1459 }
1460
1461 tree b = loop->n1;
1462 tree e = loop->n2;
1463 tree s = loop->step;
1464 bool up = loop->cond_code == LT_EXPR;
1465 tree dir = build_int_cst (diff_type, up ? +1 : -1);
1466 bool negating;
1467 tree expr;
1468
1469 b = force_gimple_operand_gsi (gsi, b, true, NULL_TREE,
1470 true, GSI_SAME_STMT);
1471 e = force_gimple_operand_gsi (gsi, e, true, NULL_TREE,
1472 true, GSI_SAME_STMT);
1473
1474 /* Convert the step, avoiding possible unsigned->signed overflow. */
1475 negating = !up && TYPE_UNSIGNED (TREE_TYPE (s));
1476 if (negating)
1477 s = fold_build1 (NEGATE_EXPR, TREE_TYPE (s), s);
1478 s = fold_convert (diff_type, s);
1479 if (negating)
1480 s = fold_build1 (NEGATE_EXPR, diff_type, s);
1481 s = force_gimple_operand_gsi (gsi, s, true, NULL_TREE,
1482 true, GSI_SAME_STMT);
1483
1484 /* Determine the range, avoiding possible unsigned->signed overflow. */
1485 negating = !up && TYPE_UNSIGNED (iter_type);
1486 expr = fold_build2 (MINUS_EXPR, plus_type,
1487 fold_convert (plus_type, negating ? b : e),
1488 fold_convert (plus_type, negating ? e : b));
1489 expr = fold_convert (diff_type, expr);
1490 if (negating)
1491 expr = fold_build1 (NEGATE_EXPR, diff_type, expr);
1492 tree range = force_gimple_operand_gsi
1493 (gsi, expr, true, NULL_TREE, true, GSI_SAME_STMT);
1494
1495 /* Determine number of iterations. */
1496 expr = fold_build2 (MINUS_EXPR, diff_type, range, dir);
1497 expr = fold_build2 (PLUS_EXPR, diff_type, expr, s);
1498 expr = fold_build2 (TRUNC_DIV_EXPR, diff_type, expr, s);
1499
1500 tree iters = force_gimple_operand_gsi (gsi, expr, true, NULL_TREE,
1501 true, GSI_SAME_STMT);
1502
1503 counts[ix].base = b;
1504 counts[ix].iters = iters;
1505 counts[ix].step = s;
1506
1507 total = fold_build2 (MULT_EXPR, bound_type, total,
1508 fold_convert (bound_type, iters));
1509 }
1510
1511 return total;
1512}
1513
1514/* Emit initializers for collapsed loop members. INNER is true if
1515 this is for the element loop of a TILE. IVAR is the outer
1516 loop iteration variable, from which collapsed loop iteration values
1517 are calculated. COUNTS array has been initialized by
1518 expand_oacc_collapse_inits. */
1519
1520static void
1521expand_oacc_collapse_vars (const struct omp_for_data *fd, bool inner,
1522 gimple_stmt_iterator *gsi,
1523 const oacc_collapse *counts, tree ivar)
1524{
1525 tree ivar_type = TREE_TYPE (ivar);
1526
1527 /* The most rapidly changing iteration variable is the innermost
1528 one. */
1529 for (int ix = fd->collapse; ix--;)
1530 {
1531 const omp_for_data_loop *loop = &fd->loops[ix];
1532 const oacc_collapse *collapse = &counts[ix];
1533 tree v = inner ? loop->v : collapse->outer;
1534 tree iter_type = TREE_TYPE (v);
1535 tree diff_type = TREE_TYPE (collapse->step);
1536 tree plus_type = iter_type;
1537 enum tree_code plus_code = PLUS_EXPR;
1538 tree expr;
1539
1540 if (POINTER_TYPE_P (iter_type))
1541 {
1542 plus_code = POINTER_PLUS_EXPR;
1543 plus_type = sizetype;
1544 }
1545
1546 expr = ivar;
1547 if (ix)
1548 {
1549 tree mod = fold_convert (ivar_type, collapse->iters);
1550 ivar = fold_build2 (TRUNC_DIV_EXPR, ivar_type, expr, mod);
1551 expr = fold_build2 (TRUNC_MOD_EXPR, ivar_type, expr, mod);
1552 ivar = force_gimple_operand_gsi (gsi, ivar, true, NULL_TREE,
1553 true, GSI_SAME_STMT);
1554 }
1555
1556 expr = fold_build2 (MULT_EXPR, diff_type, fold_convert (diff_type, expr),
1557 collapse->step);
1558 expr = fold_build2 (plus_code, iter_type,
1559 inner ? collapse->outer : collapse->base,
1560 fold_convert (plus_type, expr));
1561 expr = force_gimple_operand_gsi (gsi, expr, false, NULL_TREE,
1562 true, GSI_SAME_STMT);
1563 gassign *ass = gimple_build_assign (v, expr);
1564 gsi_insert_before (gsi, ass, GSI_SAME_STMT);
1565 }
1566}
1567
1568/* Helper function for expand_omp_{for_*,simd}. If this is the outermost
1569 of the combined collapse > 1 loop constructs, generate code like:
1570 if (__builtin_expect (N32 cond3 N31, 0)) goto ZERO_ITER_BB;
1571 if (cond3 is <)
1572 adj = STEP3 - 1;
1573 else
1574 adj = STEP3 + 1;
1575 count3 = (adj + N32 - N31) / STEP3;
1576 if (__builtin_expect (N22 cond2 N21, 0)) goto ZERO_ITER_BB;
1577 if (cond2 is <)
1578 adj = STEP2 - 1;
1579 else
1580 adj = STEP2 + 1;
1581 count2 = (adj + N22 - N21) / STEP2;
1582 if (__builtin_expect (N12 cond1 N11, 0)) goto ZERO_ITER_BB;
1583 if (cond1 is <)
1584 adj = STEP1 - 1;
1585 else
1586 adj = STEP1 + 1;
1587 count1 = (adj + N12 - N11) / STEP1;
1588 count = count1 * count2 * count3;
1589 Furthermore, if ZERO_ITER_BB is NULL, create a BB which does:
1590 count = 0;
1591 and set ZERO_ITER_BB to that bb. If this isn't the outermost
1592 of the combined loop constructs, just initialize COUNTS array
1593 from the _looptemp_ clauses. */
1594
1595/* NOTE: It *could* be better to moosh all of the BBs together,
1596 creating one larger BB with all the computation and the unexpected
1597 jump at the end. I.e.
1598
1599 bool zero3, zero2, zero1, zero;
1600
1601 zero3 = N32 c3 N31;
1602 count3 = (N32 - N31) /[cl] STEP3;
1603 zero2 = N22 c2 N21;
1604 count2 = (N22 - N21) /[cl] STEP2;
1605 zero1 = N12 c1 N11;
1606 count1 = (N12 - N11) /[cl] STEP1;
1607 zero = zero3 || zero2 || zero1;
1608 count = count1 * count2 * count3;
1609 if (__builtin_expect(zero, false)) goto zero_iter_bb;
1610
1611 After all, we expect the zero=false, and thus we expect to have to
1612 evaluate all of the comparison expressions, so short-circuiting
1613 oughtn't be a win. Since the condition isn't protecting a
1614 denominator, we're not concerned about divide-by-zero, so we can
1615 fully evaluate count even if a numerator turned out to be wrong.
1616
1617 It seems like putting this all together would create much better
1618 scheduling opportunities, and less pressure on the chip's branch
1619 predictor. */
1620
1621static void
1622expand_omp_for_init_counts (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
1623 basic_block &entry_bb, tree *counts,
1624 basic_block &zero_iter1_bb, int &first_zero_iter1,
1625 basic_block &zero_iter2_bb, int &first_zero_iter2,
1626 basic_block &l2_dom_bb)
1627{
1628 tree t, type = TREE_TYPE (fd->loop.v);
1629 edge e, ne;
1630 int i;
1631
1632 /* Collapsed loops need work for expansion into SSA form. */
1633 gcc_assert (!gimple_in_ssa_p (cfun));
1634
1635 if (gimple_omp_for_combined_into_p (fd->for_stmt)
1636 && TREE_CODE (fd->loop.n2) != INTEGER_CST)
1637 {
1638 gcc_assert (fd->ordered == 0);
1639 /* First two _looptemp_ clauses are for istart/iend, counts[0]
1640 isn't supposed to be handled, as the inner loop doesn't
1641 use it. */
1642 tree innerc = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
1643 OMP_CLAUSE__LOOPTEMP_);
1644 gcc_assert (innerc);
1645 for (i = 0; i < fd->collapse; i++)
1646 {
1647 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
1648 OMP_CLAUSE__LOOPTEMP_);
1649 gcc_assert (innerc);
1650 if (i)
1651 counts[i] = OMP_CLAUSE_DECL (innerc);
1652 else
1653 counts[0] = NULL_TREE;
1654 }
1655 return;
1656 }
1657
1658 for (i = fd->collapse; i < fd->ordered; i++)
1659 {
1660 tree itype = TREE_TYPE (fd->loops[i].v);
1661 counts[i] = NULL_TREE;
1662 t = fold_binary (fd->loops[i].cond_code, boolean_type_node,
1663 fold_convert (itype, fd->loops[i].n1),
1664 fold_convert (itype, fd->loops[i].n2));
1665 if (t && integer_zerop (t))
1666 {
1667 for (i = fd->collapse; i < fd->ordered; i++)
1668 counts[i] = build_int_cst (type, 0);
1669 break;
1670 }
1671 }
1672 for (i = 0; i < (fd->ordered ? fd->ordered : fd->collapse); i++)
1673 {
1674 tree itype = TREE_TYPE (fd->loops[i].v);
1675
1676 if (i >= fd->collapse && counts[i])
1677 continue;
1678 if ((SSA_VAR_P (fd->loop.n2) || i >= fd->collapse)
1679 && ((t = fold_binary (fd->loops[i].cond_code, boolean_type_node,
1680 fold_convert (itype, fd->loops[i].n1),
1681 fold_convert (itype, fd->loops[i].n2)))
1682 == NULL_TREE || !integer_onep (t)))
1683 {
1684 gcond *cond_stmt;
1685 tree n1, n2;
1686 n1 = fold_convert (itype, unshare_expr (fd->loops[i].n1));
1687 n1 = force_gimple_operand_gsi (gsi, n1, true, NULL_TREE,
1688 true, GSI_SAME_STMT);
1689 n2 = fold_convert (itype, unshare_expr (fd->loops[i].n2));
1690 n2 = force_gimple_operand_gsi (gsi, n2, true, NULL_TREE,
1691 true, GSI_SAME_STMT);
1692 cond_stmt = gimple_build_cond (fd->loops[i].cond_code, n1, n2,
1693 NULL_TREE, NULL_TREE);
1694 gsi_insert_before (gsi, cond_stmt, GSI_SAME_STMT);
1695 if (walk_tree (gimple_cond_lhs_ptr (cond_stmt),
1696 expand_omp_regimplify_p, NULL, NULL)
1697 || walk_tree (gimple_cond_rhs_ptr (cond_stmt),
1698 expand_omp_regimplify_p, NULL, NULL))
1699 {
1700 *gsi = gsi_for_stmt (cond_stmt);
1701 gimple_regimplify_operands (cond_stmt, gsi);
1702 }
1703 e = split_block (entry_bb, cond_stmt);
1704 basic_block &zero_iter_bb
1705 = i < fd->collapse ? zero_iter1_bb : zero_iter2_bb;
1706 int &first_zero_iter
1707 = i < fd->collapse ? first_zero_iter1 : first_zero_iter2;
1708 if (zero_iter_bb == NULL)
1709 {
1710 gassign *assign_stmt;
1711 first_zero_iter = i;
1712 zero_iter_bb = create_empty_bb (entry_bb);
1713 add_bb_to_loop (zero_iter_bb, entry_bb->loop_father);
1714 *gsi = gsi_after_labels (zero_iter_bb);
1715 if (i < fd->collapse)
1716 assign_stmt = gimple_build_assign (fd->loop.n2,
1717 build_zero_cst (type));
1718 else
1719 {
1720 counts[i] = create_tmp_reg (type, ".count");
1721 assign_stmt
1722 = gimple_build_assign (counts[i], build_zero_cst (type));
1723 }
1724 gsi_insert_before (gsi, assign_stmt, GSI_SAME_STMT);
1725 set_immediate_dominator (CDI_DOMINATORS, zero_iter_bb,
1726 entry_bb);
1727 }
1728 ne = make_edge (entry_bb, zero_iter_bb, EDGE_FALSE_VALUE);
1729 ne->probability = profile_probability::very_unlikely ();
1730 e->flags = EDGE_TRUE_VALUE;
1731 e->probability = ne->probability.invert ();
1732 if (l2_dom_bb == NULL)
1733 l2_dom_bb = entry_bb;
1734 entry_bb = e->dest;
1735 *gsi = gsi_last_nondebug_bb (entry_bb);
1736 }
1737
1738 if (POINTER_TYPE_P (itype))
1739 itype = signed_type_for (itype);
1740 t = build_int_cst (itype, (fd->loops[i].cond_code == LT_EXPR
1741 ? -1 : 1));
1742 t = fold_build2 (PLUS_EXPR, itype,
1743 fold_convert (itype, fd->loops[i].step), t);
1744 t = fold_build2 (PLUS_EXPR, itype, t,
1745 fold_convert (itype, fd->loops[i].n2));
1746 t = fold_build2 (MINUS_EXPR, itype, t,
1747 fold_convert (itype, fd->loops[i].n1));
1748 /* ?? We could probably use CEIL_DIV_EXPR instead of
1749 TRUNC_DIV_EXPR and adjusting by hand. Unless we can't
1750 generate the same code in the end because generically we
1751 don't know that the values involved must be negative for
1752 GT?? */
1753 if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
1754 t = fold_build2 (TRUNC_DIV_EXPR, itype,
1755 fold_build1 (NEGATE_EXPR, itype, t),
1756 fold_build1 (NEGATE_EXPR, itype,
1757 fold_convert (itype,
1758 fd->loops[i].step)));
1759 else
1760 t = fold_build2 (TRUNC_DIV_EXPR, itype, t,
1761 fold_convert (itype, fd->loops[i].step));
1762 t = fold_convert (type, t);
1763 if (TREE_CODE (t) == INTEGER_CST)
1764 counts[i] = t;
1765 else
1766 {
1767 if (i < fd->collapse || i != first_zero_iter2)
1768 counts[i] = create_tmp_reg (type, ".count");
1769 expand_omp_build_assign (gsi, counts[i], t);
1770 }
1771 if (SSA_VAR_P (fd->loop.n2) && i < fd->collapse)
1772 {
1773 if (i == 0)
1774 t = counts[0];
1775 else
1776 t = fold_build2 (MULT_EXPR, type, fd->loop.n2, counts[i]);
1777 expand_omp_build_assign (gsi, fd->loop.n2, t);
1778 }
1779 }
1780}
1781
1782/* Helper function for expand_omp_{for_*,simd}. Generate code like:
1783 T = V;
1784 V3 = N31 + (T % count3) * STEP3;
1785 T = T / count3;
1786 V2 = N21 + (T % count2) * STEP2;
1787 T = T / count2;
1788 V1 = N11 + T * STEP1;
1789 if this loop doesn't have an inner loop construct combined with it.
1790 If it does have an inner loop construct combined with it and the
1791 iteration count isn't known constant, store values from counts array
1792 into its _looptemp_ temporaries instead. */
1793
1794static void
1795expand_omp_for_init_vars (struct omp_for_data *fd, gimple_stmt_iterator *gsi,
1796 tree *counts, gimple *inner_stmt, tree startvar)
1797{
1798 int i;
1799 if (gimple_omp_for_combined_p (fd->for_stmt))
1800 {
1801 /* If fd->loop.n2 is constant, then no propagation of the counts
1802 is needed, they are constant. */
1803 if (TREE_CODE (fd->loop.n2) == INTEGER_CST)
1804 return;
1805
1806 tree clauses = gimple_code (inner_stmt) != GIMPLE_OMP_FOR
1807 ? gimple_omp_taskreg_clauses (inner_stmt)
1808 : gimple_omp_for_clauses (inner_stmt);
1809 /* First two _looptemp_ clauses are for istart/iend, counts[0]
1810 isn't supposed to be handled, as the inner loop doesn't
1811 use it. */
1812 tree innerc = omp_find_clause (clauses, OMP_CLAUSE__LOOPTEMP_);
1813 gcc_assert (innerc);
1814 for (i = 0; i < fd->collapse; i++)
1815 {
1816 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
1817 OMP_CLAUSE__LOOPTEMP_);
1818 gcc_assert (innerc);
1819 if (i)
1820 {
1821 tree tem = OMP_CLAUSE_DECL (innerc);
1822 tree t = fold_convert (TREE_TYPE (tem), counts[i]);
1823 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
1824 false, GSI_CONTINUE_LINKING);
1825 gassign *stmt = gimple_build_assign (tem, t);
1826 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
1827 }
1828 }
1829 return;
1830 }
1831
1832 tree type = TREE_TYPE (fd->loop.v);
1833 tree tem = create_tmp_reg (type, ".tem");
1834 gassign *stmt = gimple_build_assign (tem, startvar);
1835 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
1836
1837 for (i = fd->collapse - 1; i >= 0; i--)
1838 {
1839 tree vtype = TREE_TYPE (fd->loops[i].v), itype, t;
1840 itype = vtype;
1841 if (POINTER_TYPE_P (vtype))
1842 itype = signed_type_for (vtype);
1843 if (i != 0)
1844 t = fold_build2 (TRUNC_MOD_EXPR, type, tem, counts[i]);
1845 else
1846 t = tem;
1847 t = fold_convert (itype, t);
1848 t = fold_build2 (MULT_EXPR, itype, t,
1849 fold_convert (itype, fd->loops[i].step));
1850 if (POINTER_TYPE_P (vtype))
1851 t = fold_build_pointer_plus (fd->loops[i].n1, t);
1852 else
1853 t = fold_build2 (PLUS_EXPR, itype, fd->loops[i].n1, t);
1854 t = force_gimple_operand_gsi (gsi, t,
1855 DECL_P (fd->loops[i].v)
1856 && TREE_ADDRESSABLE (fd->loops[i].v),
1857 NULL_TREE, false,
1858 GSI_CONTINUE_LINKING);
1859 stmt = gimple_build_assign (fd->loops[i].v, t);
1860 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
1861 if (i != 0)
1862 {
1863 t = fold_build2 (TRUNC_DIV_EXPR, type, tem, counts[i]);
1864 t = force_gimple_operand_gsi (gsi, t, false, NULL_TREE,
1865 false, GSI_CONTINUE_LINKING);
1866 stmt = gimple_build_assign (tem, t);
1867 gsi_insert_after (gsi, stmt, GSI_CONTINUE_LINKING);
1868 }
1869 }
1870}
1871
1872/* Helper function for expand_omp_for_*. Generate code like:
1873 L10:
1874 V3 += STEP3;
1875 if (V3 cond3 N32) goto BODY_BB; else goto L11;
1876 L11:
1877 V3 = N31;
1878 V2 += STEP2;
1879 if (V2 cond2 N22) goto BODY_BB; else goto L12;
1880 L12:
1881 V2 = N21;
1882 V1 += STEP1;
1883 goto BODY_BB; */
1884
1885static basic_block
1886extract_omp_for_update_vars (struct omp_for_data *fd, basic_block cont_bb,
1887 basic_block body_bb)
1888{
1889 basic_block last_bb, bb, collapse_bb = NULL;
1890 int i;
1891 gimple_stmt_iterator gsi;
1892 edge e;
1893 tree t;
1894 gimple *stmt;
1895
1896 last_bb = cont_bb;
1897 for (i = fd->collapse - 1; i >= 0; i--)
1898 {
1899 tree vtype = TREE_TYPE (fd->loops[i].v);
1900
1901 bb = create_empty_bb (last_bb);
1902 add_bb_to_loop (bb, last_bb->loop_father);
1903 gsi = gsi_start_bb (bb);
1904
1905 if (i < fd->collapse - 1)
1906 {
1907 e = make_edge (last_bb, bb, EDGE_FALSE_VALUE);
1908 e->probability = profile_probability::guessed_always ().apply_scale (1, 8);
1909
1910 t = fd->loops[i + 1].n1;
1911 t = force_gimple_operand_gsi (&gsi, t,
1912 DECL_P (fd->loops[i + 1].v)
1913 && TREE_ADDRESSABLE (fd->loops[i
1914 + 1].v),
1915 NULL_TREE, false,
1916 GSI_CONTINUE_LINKING);
1917 stmt = gimple_build_assign (fd->loops[i + 1].v, t);
1918 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1919 }
1920 else
1921 collapse_bb = bb;
1922
1923 set_immediate_dominator (CDI_DOMINATORS, bb, last_bb);
1924
1925 if (POINTER_TYPE_P (vtype))
1926 t = fold_build_pointer_plus (fd->loops[i].v, fd->loops[i].step);
1927 else
1928 t = fold_build2 (PLUS_EXPR, vtype, fd->loops[i].v, fd->loops[i].step);
1929 t = force_gimple_operand_gsi (&gsi, t,
1930 DECL_P (fd->loops[i].v)
1931 && TREE_ADDRESSABLE (fd->loops[i].v),
1932 NULL_TREE, false, GSI_CONTINUE_LINKING);
1933 stmt = gimple_build_assign (fd->loops[i].v, t);
1934 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1935
1936 if (i > 0)
1937 {
1938 t = fd->loops[i].n2;
1939 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
1940 false, GSI_CONTINUE_LINKING);
1941 tree v = fd->loops[i].v;
1942 if (DECL_P (v) && TREE_ADDRESSABLE (v))
1943 v = force_gimple_operand_gsi (&gsi, v, true, NULL_TREE,
1944 false, GSI_CONTINUE_LINKING);
1945 t = fold_build2 (fd->loops[i].cond_code, boolean_type_node, v, t);
1946 stmt = gimple_build_cond_empty (t);
1947 gsi_insert_after (&gsi, stmt, GSI_CONTINUE_LINKING);
1948 e = make_edge (bb, body_bb, EDGE_TRUE_VALUE);
1949 e->probability = profile_probability::guessed_always ().apply_scale (7, 8);
1950 }
1951 else
1952 make_edge (bb, body_bb, EDGE_FALLTHRU);
1953 last_bb = bb;
1954 }
1955
1956 return collapse_bb;
1957}
1958
1959/* Expand #pragma omp ordered depend(source). */
1960
1961static void
1962expand_omp_ordered_source (gimple_stmt_iterator *gsi, struct omp_for_data *fd,
1963 tree *counts, location_t loc)
1964{
1965 enum built_in_function source_ix
1966 = fd->iter_type == long_integer_type_node
1967 ? BUILT_IN_GOMP_DOACROSS_POST : BUILT_IN_GOMP_DOACROSS_ULL_POST;
1968 gimple *g
1969 = gimple_build_call (builtin_decl_explicit (source_ix), 1,
1970 build_fold_addr_expr (counts[fd->ordered]));
1971 gimple_set_location (g, loc);
1972 gsi_insert_before (gsi, g, GSI_SAME_STMT);
1973}
1974
1975/* Expand a single depend from #pragma omp ordered depend(sink:...). */
1976
1977static void
1978expand_omp_ordered_sink (gimple_stmt_iterator *gsi, struct omp_for_data *fd,
1979 tree *counts, tree c, location_t loc)
1980{
1981 auto_vec<tree, 10> args;
1982 enum built_in_function sink_ix
1983 = fd->iter_type == long_integer_type_node
1984 ? BUILT_IN_GOMP_DOACROSS_WAIT : BUILT_IN_GOMP_DOACROSS_ULL_WAIT;
1985 tree t, off, coff = NULL_TREE, deps = OMP_CLAUSE_DECL (c), cond = NULL_TREE;
1986 int i;
1987 gimple_stmt_iterator gsi2 = *gsi;
1988 bool warned_step = false;
1989
1990 for (i = 0; i < fd->ordered; i++)
1991 {
1992 tree step = NULL_TREE;
1993 off = TREE_PURPOSE (deps);
1994 if (TREE_CODE (off) == TRUNC_DIV_EXPR)
1995 {
1996 step = TREE_OPERAND (off, 1);
1997 off = TREE_OPERAND (off, 0);
1998 }
1999 if (!integer_zerop (off))
2000 {
2001 gcc_assert (fd->loops[i].cond_code == LT_EXPR
2002 || fd->loops[i].cond_code == GT_EXPR);
2003 bool forward = fd->loops[i].cond_code == LT_EXPR;
2004 if (step)
2005 {
2006 /* Non-simple Fortran DO loops. If step is variable,
2007 we don't know at compile even the direction, so can't
2008 warn. */
2009 if (TREE_CODE (step) != INTEGER_CST)
2010 break;
2011 forward = tree_int_cst_sgn (step) != -1;
2012 }
2013 if (forward ^ OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
2014 warning_at (loc, 0, "%<depend(sink)%> clause waiting for "
2015 "lexically later iteration");
2016 break;
2017 }
2018 deps = TREE_CHAIN (deps);
2019 }
2020 /* If all offsets corresponding to the collapsed loops are zero,
2021 this depend clause can be ignored. FIXME: but there is still a
2022 flush needed. We need to emit one __sync_synchronize () for it
2023 though (perhaps conditionally)? Solve this together with the
2024 conservative dependence folding optimization.
2025 if (i >= fd->collapse)
2026 return; */
2027
2028 deps = OMP_CLAUSE_DECL (c);
2029 gsi_prev (&gsi2);
2030 edge e1 = split_block (gsi_bb (gsi2), gsi_stmt (gsi2));
2031 edge e2 = split_block_after_labels (e1->dest);
2032
2033 gsi2 = gsi_after_labels (e1->dest);
2034 *gsi = gsi_last_bb (e1->src);
2035 for (i = 0; i < fd->ordered; i++)
2036 {
2037 tree itype = TREE_TYPE (fd->loops[i].v);
2038 tree step = NULL_TREE;
2039 tree orig_off = NULL_TREE;
2040 if (POINTER_TYPE_P (itype))
2041 itype = sizetype;
2042 if (i)
2043 deps = TREE_CHAIN (deps);
2044 off = TREE_PURPOSE (deps);
2045 if (TREE_CODE (off) == TRUNC_DIV_EXPR)
2046 {
2047 step = TREE_OPERAND (off, 1);
2048 off = TREE_OPERAND (off, 0);
2049 gcc_assert (fd->loops[i].cond_code == LT_EXPR
2050 && integer_onep (fd->loops[i].step)
2051 && !POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)));
2052 }
2053 tree s = fold_convert_loc (loc, itype, step ? step : fd->loops[i].step);
2054 if (step)
2055 {
2056 off = fold_convert_loc (loc, itype, off);
2057 orig_off = off;
2058 off = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, off, s);
2059 }
2060
2061 if (integer_zerop (off))
2062 t = boolean_true_node;
2063 else
2064 {
2065 tree a;
2066 tree co = fold_convert_loc (loc, itype, off);
2067 if (POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v)))
2068 {
2069 if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
2070 co = fold_build1_loc (loc, NEGATE_EXPR, itype, co);
2071 a = fold_build2_loc (loc, POINTER_PLUS_EXPR,
2072 TREE_TYPE (fd->loops[i].v), fd->loops[i].v,
2073 co);
2074 }
2075 else if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
2076 a = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (fd->loops[i].v),
2077 fd->loops[i].v, co);
2078 else
2079 a = fold_build2_loc (loc, PLUS_EXPR, TREE_TYPE (fd->loops[i].v),
2080 fd->loops[i].v, co);
2081 if (step)
2082 {
2083 tree t1, t2;
2084 if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
2085 t1 = fold_build2_loc (loc, GE_EXPR, boolean_type_node, a,
2086 fd->loops[i].n1);
2087 else
2088 t1 = fold_build2_loc (loc, LT_EXPR, boolean_type_node, a,
2089 fd->loops[i].n2);
2090 if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
2091 t2 = fold_build2_loc (loc, LT_EXPR, boolean_type_node, a,
2092 fd->loops[i].n2);
2093 else
2094 t2 = fold_build2_loc (loc, GE_EXPR, boolean_type_node, a,
2095 fd->loops[i].n1);
2096 t = fold_build2_loc (loc, LT_EXPR, boolean_type_node,
2097 step, build_int_cst (TREE_TYPE (step), 0));
2098 if (TREE_CODE (step) != INTEGER_CST)
2099 {
2100 t1 = unshare_expr (t1);
2101 t1 = force_gimple_operand_gsi (gsi, t1, true, NULL_TREE,
2102 false, GSI_CONTINUE_LINKING);
2103 t2 = unshare_expr (t2);
2104 t2 = force_gimple_operand_gsi (gsi, t2, true, NULL_TREE,
2105 false, GSI_CONTINUE_LINKING);
2106 }
2107 t = fold_build3_loc (loc, COND_EXPR, boolean_type_node,
2108 t, t2, t1);
2109 }
2110 else if (fd->loops[i].cond_code == LT_EXPR)
2111 {
2112 if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
2113 t = fold_build2_loc (loc, GE_EXPR, boolean_type_node, a,
2114 fd->loops[i].n1);
2115 else
2116 t = fold_build2_loc (loc, LT_EXPR, boolean_type_node, a,
2117 fd->loops[i].n2);
2118 }
2119 else if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
2120 t = fold_build2_loc (loc, GT_EXPR, boolean_type_node, a,
2121 fd->loops[i].n2);
2122 else
2123 t = fold_build2_loc (loc, LE_EXPR, boolean_type_node, a,
2124 fd->loops[i].n1);
2125 }
2126 if (cond)
2127 cond = fold_build2_loc (loc, BIT_AND_EXPR, boolean_type_node, cond, t);
2128 else
2129 cond = t;
2130
2131 off = fold_convert_loc (loc, itype, off);
2132
2133 if (step
2134 || (fd->loops[i].cond_code == LT_EXPR
2135 ? !integer_onep (fd->loops[i].step)
2136 : !integer_minus_onep (fd->loops[i].step)))
2137 {
2138 if (step == NULL_TREE
2139 && TYPE_UNSIGNED (itype)
2140 && fd->loops[i].cond_code == GT_EXPR)
2141 t = fold_build2_loc (loc, TRUNC_MOD_EXPR, itype, off,
2142 fold_build1_loc (loc, NEGATE_EXPR, itype,
2143 s));
2144 else
2145 t = fold_build2_loc (loc, TRUNC_MOD_EXPR, itype,
2146 orig_off ? orig_off : off, s);
2147 t = fold_build2_loc (loc, EQ_EXPR, boolean_type_node, t,
2148 build_int_cst (itype, 0));
2149 if (integer_zerop (t) && !warned_step)
2150 {
2151 warning_at (loc, 0, "%<depend(sink)%> refers to iteration never "
2152 "in the iteration space");
2153 warned_step = true;
2154 }
2155 cond = fold_build2_loc (loc, BIT_AND_EXPR, boolean_type_node,
2156 cond, t);
2157 }
2158
2159 if (i <= fd->collapse - 1 && fd->collapse > 1)
2160 t = fd->loop.v;
2161 else if (counts[i])
2162 t = counts[i];
2163 else
2164 {
2165 t = fold_build2_loc (loc, MINUS_EXPR, TREE_TYPE (fd->loops[i].v),
2166 fd->loops[i].v, fd->loops[i].n1);
2167 t = fold_convert_loc (loc, fd->iter_type, t);
2168 }
2169 if (step)
2170 /* We have divided off by step already earlier. */;
2171 else if (TYPE_UNSIGNED (itype) && fd->loops[i].cond_code == GT_EXPR)
2172 off = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, off,
2173 fold_build1_loc (loc, NEGATE_EXPR, itype,
2174 s));
2175 else
2176 off = fold_build2_loc (loc, TRUNC_DIV_EXPR, itype, off, s);
2177 if (OMP_CLAUSE_DEPEND_SINK_NEGATIVE (deps))
2178 off = fold_build1_loc (loc, NEGATE_EXPR, itype, off);
2179 off = fold_convert_loc (loc, fd->iter_type, off);
2180 if (i <= fd->collapse - 1 && fd->collapse > 1)
2181 {
2182 if (i)
2183 off = fold_build2_loc (loc, PLUS_EXPR, fd->iter_type, coff,
2184 off);
2185 if (i < fd->collapse - 1)
2186 {
2187 coff = fold_build2_loc (loc, MULT_EXPR, fd->iter_type, off,
2188 counts[i]);
2189 continue;
2190 }
2191 }
2192 off = unshare_expr (off);
2193 t = fold_build2_loc (loc, PLUS_EXPR, fd->iter_type, t, off);
2194 t = force_gimple_operand_gsi (&gsi2, t, true, NULL_TREE,
2195 true, GSI_SAME_STMT);
2196 args.safe_push (t);
2197 }
2198 gimple *g = gimple_build_call_vec (builtin_decl_explicit (sink_ix), args);
2199 gimple_set_location (g, loc);
2200 gsi_insert_before (&gsi2, g, GSI_SAME_STMT);
2201
2202 cond = unshare_expr (cond);
2203 cond = force_gimple_operand_gsi (gsi, cond, true, NULL_TREE, false,
2204 GSI_CONTINUE_LINKING);
2205 gsi_insert_after (gsi, gimple_build_cond_empty (cond), GSI_NEW_STMT);
2206 edge e3 = make_edge (e1->src, e2->dest, EDGE_FALSE_VALUE);
2207 e3->probability = profile_probability::guessed_always ().apply_scale (1, 8);
2208 e1->probability = e3->probability.invert ();
2209 e1->flags = EDGE_TRUE_VALUE;
2210 set_immediate_dominator (CDI_DOMINATORS, e2->dest, e1->src);
2211
2212 *gsi = gsi_after_labels (e2->dest);
2213}
2214
2215/* Expand all #pragma omp ordered depend(source) and
2216 #pragma omp ordered depend(sink:...) constructs in the current
2217 #pragma omp for ordered(n) region. */
2218
2219static void
2220expand_omp_ordered_source_sink (struct omp_region *region,
2221 struct omp_for_data *fd, tree *counts,
2222 basic_block cont_bb)
2223{
2224 struct omp_region *inner;
2225 int i;
2226 for (i = fd->collapse - 1; i < fd->ordered; i++)
2227 if (i == fd->collapse - 1 && fd->collapse > 1)
2228 counts[i] = NULL_TREE;
2229 else if (i >= fd->collapse && !cont_bb)
2230 counts[i] = build_zero_cst (fd->iter_type);
2231 else if (!POINTER_TYPE_P (TREE_TYPE (fd->loops[i].v))
2232 && integer_onep (fd->loops[i].step))
2233 counts[i] = NULL_TREE;
2234 else
2235 counts[i] = create_tmp_var (fd->iter_type, ".orditer");
2236 tree atype
2237 = build_array_type_nelts (fd->iter_type, fd->ordered - fd->collapse + 1);
2238 counts[fd->ordered] = create_tmp_var (atype, ".orditera");
2239 TREE_ADDRESSABLE (counts[fd->ordered]) = 1;
2240
2241 for (inner = region->inner; inner; inner = inner->next)
2242 if (inner->type == GIMPLE_OMP_ORDERED)
2243 {
2244 gomp_ordered *ord_stmt = inner->ord_stmt;
2245 gimple_stmt_iterator gsi = gsi_for_stmt (ord_stmt);
2246 location_t loc = gimple_location (ord_stmt);
2247 tree c;
2248 for (c = gimple_omp_ordered_clauses (ord_stmt);
2249 c; c = OMP_CLAUSE_CHAIN (c))
2250 if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE)
2251 break;
2252 if (c)
2253 expand_omp_ordered_source (&gsi, fd, counts, loc);
2254 for (c = gimple_omp_ordered_clauses (ord_stmt);
2255 c; c = OMP_CLAUSE_CHAIN (c))
2256 if (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK)
2257 expand_omp_ordered_sink (&gsi, fd, counts, c, loc);
2258 gsi_remove (&gsi, true);
2259 }
2260}
2261
2262/* Wrap the body into fd->ordered - fd->collapse loops that aren't
2263 collapsed. */
2264
2265static basic_block
2266expand_omp_for_ordered_loops (struct omp_for_data *fd, tree *counts,
2267 basic_block cont_bb, basic_block body_bb,
2268 bool ordered_lastprivate)
2269{
2270 if (fd->ordered == fd->collapse)
2271 return cont_bb;
2272
2273 if (!cont_bb)
2274 {
2275 gimple_stmt_iterator gsi = gsi_after_labels (body_bb);
2276 for (int i = fd->collapse; i < fd->ordered; i++)
2277 {
2278 tree type = TREE_TYPE (fd->loops[i].v);
2279 tree n1 = fold_convert (type, fd->loops[i].n1);
2280 expand_omp_build_assign (&gsi, fd->loops[i].v, n1);
2281 tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered],
2282 size_int (i - fd->collapse + 1),
2283 NULL_TREE, NULL_TREE);
2284 expand_omp_build_assign (&gsi, aref, build_zero_cst (fd->iter_type));
2285 }
2286 return NULL;
2287 }
2288
2289 for (int i = fd->ordered - 1; i >= fd->collapse; i--)
2290 {
2291 tree t, type = TREE_TYPE (fd->loops[i].v);
2292 gimple_stmt_iterator gsi = gsi_after_labels (body_bb);
2293 expand_omp_build_assign (&gsi, fd->loops[i].v,
2294 fold_convert (type, fd->loops[i].n1));
2295 if (counts[i])
2296 expand_omp_build_assign (&gsi, counts[i],
2297 build_zero_cst (fd->iter_type));
2298 tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered],
2299 size_int (i - fd->collapse + 1),
2300 NULL_TREE, NULL_TREE);
2301 expand_omp_build_assign (&gsi, aref, build_zero_cst (fd->iter_type));
2302 if (!gsi_end_p (gsi))
2303 gsi_prev (&gsi);
2304 else
2305 gsi = gsi_last_bb (body_bb);
2306 edge e1 = split_block (body_bb, gsi_stmt (gsi));
2307 basic_block new_body = e1->dest;
2308 if (body_bb == cont_bb)
2309 cont_bb = new_body;
2310 edge e2 = NULL;
2311 basic_block new_header;
2312 if (EDGE_COUNT (cont_bb->preds) > 0)
2313 {
2314 gsi = gsi_last_bb (cont_bb);
2315 if (POINTER_TYPE_P (type))
2316 t = fold_build_pointer_plus (fd->loops[i].v,
2317 fold_convert (sizetype,
2318 fd->loops[i].step));
2319 else
2320 t = fold_build2 (PLUS_EXPR, type, fd->loops[i].v,
2321 fold_convert (type, fd->loops[i].step));
2322 expand_omp_build_assign (&gsi, fd->loops[i].v, t);
2323 if (counts[i])
2324 {
2325 t = fold_build2 (PLUS_EXPR, fd->iter_type, counts[i],
2326 build_int_cst (fd->iter_type, 1));
2327 expand_omp_build_assign (&gsi, counts[i], t);
2328 t = counts[i];
2329 }
2330 else
2331 {
2332 t = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->loops[i].v),
2333 fd->loops[i].v, fd->loops[i].n1);
2334 t = fold_convert (fd->iter_type, t);
2335 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
2336 true, GSI_SAME_STMT);
2337 }
2338 aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered],
2339 size_int (i - fd->collapse + 1),
2340 NULL_TREE, NULL_TREE);
2341 expand_omp_build_assign (&gsi, aref, t);
2342 gsi_prev (&gsi);
2343 e2 = split_block (cont_bb, gsi_stmt (gsi));
2344 new_header = e2->dest;
2345 }
2346 else
2347 new_header = cont_bb;
2348 gsi = gsi_after_labels (new_header);
2349 tree v = force_gimple_operand_gsi (&gsi, fd->loops[i].v, true, NULL_TREE,
2350 true, GSI_SAME_STMT);
2351 tree n2
2352 = force_gimple_operand_gsi (&gsi, fold_convert (type, fd->loops[i].n2),
2353 true, NULL_TREE, true, GSI_SAME_STMT);
2354 t = build2 (fd->loops[i].cond_code, boolean_type_node, v, n2);
2355 gsi_insert_before (&gsi, gimple_build_cond_empty (t), GSI_NEW_STMT);
2356 edge e3 = split_block (new_header, gsi_stmt (gsi));
2357 cont_bb = e3->dest;
2358 remove_edge (e1);
2359 make_edge (body_bb, new_header, EDGE_FALLTHRU);
2360 e3->flags = EDGE_FALSE_VALUE;
2361 e3->probability = profile_probability::guessed_always ().apply_scale (1, 8);
2362 e1 = make_edge (new_header, new_body, EDGE_TRUE_VALUE);
2363 e1->probability = e3->probability.invert ();
2364
2365 set_immediate_dominator (CDI_DOMINATORS, new_header, body_bb);
2366 set_immediate_dominator (CDI_DOMINATORS, new_body, new_header);
2367
2368 if (e2)
2369 {
2370 struct loop *loop = alloc_loop ();
2371 loop->header = new_header;
2372 loop->latch = e2->src;
2373 add_loop (loop, body_bb->loop_father);
2374 }
2375 }
2376
2377 /* If there are any lastprivate clauses and it is possible some loops
2378 might have zero iterations, ensure all the decls are initialized,
2379 otherwise we could crash evaluating C++ class iterators with lastprivate
2380 clauses. */
2381 bool need_inits = false;
2382 for (int i = fd->collapse; ordered_lastprivate && i < fd->ordered; i++)
2383 if (need_inits)
2384 {
2385 tree type = TREE_TYPE (fd->loops[i].v);
2386 gimple_stmt_iterator gsi = gsi_after_labels (body_bb);
2387 expand_omp_build_assign (&gsi, fd->loops[i].v,
2388 fold_convert (type, fd->loops[i].n1));
2389 }
2390 else
2391 {
2392 tree type = TREE_TYPE (fd->loops[i].v);
2393 tree this_cond = fold_build2 (fd->loops[i].cond_code,
2394 boolean_type_node,
2395 fold_convert (type, fd->loops[i].n1),
2396 fold_convert (type, fd->loops[i].n2));
2397 if (!integer_onep (this_cond))
2398 need_inits = true;
2399 }
2400
2401 return cont_bb;
2402}
2403
2404/* A subroutine of expand_omp_for. Generate code for a parallel
2405 loop with any schedule. Given parameters:
2406
2407 for (V = N1; V cond N2; V += STEP) BODY;
2408
2409 where COND is "<" or ">", we generate pseudocode
2410
2411 more = GOMP_loop_foo_start (N1, N2, STEP, CHUNK, &istart0, &iend0);
2412 if (more) goto L0; else goto L3;
2413 L0:
2414 V = istart0;
2415 iend = iend0;
2416 L1:
2417 BODY;
2418 V += STEP;
2419 if (V cond iend) goto L1; else goto L2;
2420 L2:
2421 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
2422 L3:
2423
2424 If this is a combined omp parallel loop, instead of the call to
2425 GOMP_loop_foo_start, we call GOMP_loop_foo_next.
2426 If this is gimple_omp_for_combined_p loop, then instead of assigning
2427 V and iend in L0 we assign the first two _looptemp_ clause decls of the
2428 inner GIMPLE_OMP_FOR and V += STEP; and
2429 if (V cond iend) goto L1; else goto L2; are removed.
2430
2431 For collapsed loops, given parameters:
2432 collapse(3)
2433 for (V1 = N11; V1 cond1 N12; V1 += STEP1)
2434 for (V2 = N21; V2 cond2 N22; V2 += STEP2)
2435 for (V3 = N31; V3 cond3 N32; V3 += STEP3)
2436 BODY;
2437
2438 we generate pseudocode
2439
2440 if (__builtin_expect (N32 cond3 N31, 0)) goto Z0;
2441 if (cond3 is <)
2442 adj = STEP3 - 1;
2443 else
2444 adj = STEP3 + 1;
2445 count3 = (adj + N32 - N31) / STEP3;
2446 if (__builtin_expect (N22 cond2 N21, 0)) goto Z0;
2447 if (cond2 is <)
2448 adj = STEP2 - 1;
2449 else
2450 adj = STEP2 + 1;
2451 count2 = (adj + N22 - N21) / STEP2;
2452 if (__builtin_expect (N12 cond1 N11, 0)) goto Z0;
2453 if (cond1 is <)
2454 adj = STEP1 - 1;
2455 else
2456 adj = STEP1 + 1;
2457 count1 = (adj + N12 - N11) / STEP1;
2458 count = count1 * count2 * count3;
2459 goto Z1;
2460 Z0:
2461 count = 0;
2462 Z1:
2463 more = GOMP_loop_foo_start (0, count, 1, CHUNK, &istart0, &iend0);
2464 if (more) goto L0; else goto L3;
2465 L0:
2466 V = istart0;
2467 T = V;
2468 V3 = N31 + (T % count3) * STEP3;
2469 T = T / count3;
2470 V2 = N21 + (T % count2) * STEP2;
2471 T = T / count2;
2472 V1 = N11 + T * STEP1;
2473 iend = iend0;
2474 L1:
2475 BODY;
2476 V += 1;
2477 if (V < iend) goto L10; else goto L2;
2478 L10:
2479 V3 += STEP3;
2480 if (V3 cond3 N32) goto L1; else goto L11;
2481 L11:
2482 V3 = N31;
2483 V2 += STEP2;
2484 if (V2 cond2 N22) goto L1; else goto L12;
2485 L12:
2486 V2 = N21;
2487 V1 += STEP1;
2488 goto L1;
2489 L2:
2490 if (GOMP_loop_foo_next (&istart0, &iend0)) goto L0; else goto L3;
2491 L3:
2492
2493 */
2494
2495static void
2496expand_omp_for_generic (struct omp_region *region,
2497 struct omp_for_data *fd,
2498 enum built_in_function start_fn,
2499 enum built_in_function next_fn,
2500 gimple *inner_stmt)
2501{
2502 tree type, istart0, iend0, iend;
2503 tree t, vmain, vback, bias = NULL_TREE;
2504 basic_block entry_bb, cont_bb, exit_bb, l0_bb, l1_bb, collapse_bb;
2505 basic_block l2_bb = NULL, l3_bb = NULL;
2506 gimple_stmt_iterator gsi;
2507 gassign *assign_stmt;
2508 bool in_combined_parallel = is_combined_parallel (region);
2509 bool broken_loop = region->cont == NULL;
2510 edge e, ne;
2511 tree *counts = NULL;
2512 int i;
2513 bool ordered_lastprivate = false;
2514
2515 gcc_assert (!broken_loop || !in_combined_parallel);
2516 gcc_assert (fd->iter_type == long_integer_type_node
2517 || !in_combined_parallel);
2518
2519 entry_bb = region->entry;
2520 cont_bb = region->cont;
2521 collapse_bb = NULL;
2522 gcc_assert (EDGE_COUNT (entry_bb->succs) == 2);
2523 gcc_assert (broken_loop
2524 || BRANCH_EDGE (entry_bb)->dest == FALLTHRU_EDGE (cont_bb)->dest);
2525 l0_bb = split_edge (FALLTHRU_EDGE (entry_bb));
2526 l1_bb = single_succ (l0_bb);
2527 if (!broken_loop)
2528 {
2529 l2_bb = create_empty_bb (cont_bb);
2530 gcc_assert (BRANCH_EDGE (cont_bb)->dest == l1_bb
2531 || (single_succ_edge (BRANCH_EDGE (cont_bb)->dest)->dest
2532 == l1_bb));
2533 gcc_assert (EDGE_COUNT (cont_bb->succs) == 2);
2534 }
2535 else
2536 l2_bb = NULL;
2537 l3_bb = BRANCH_EDGE (entry_bb)->dest;
2538 exit_bb = region->exit;
2539
2540 gsi = gsi_last_nondebug_bb (entry_bb);
2541
2542 gcc_assert (gimple_code (gsi_stmt (gsi)) == GIMPLE_OMP_FOR);
2543 if (fd->ordered
2544 && omp_find_clause (gimple_omp_for_clauses (gsi_stmt (gsi)),
2545 OMP_CLAUSE_LASTPRIVATE))
2546 ordered_lastprivate = false;
2547 if (fd->collapse > 1 || fd->ordered)
2548 {
2549 int first_zero_iter1 = -1, first_zero_iter2 = -1;
2550 basic_block zero_iter1_bb = NULL, zero_iter2_bb = NULL, l2_dom_bb = NULL;
2551
2552 counts = XALLOCAVEC (tree, fd->ordered ? fd->ordered + 1 : fd->collapse);
2553 expand_omp_for_init_counts (fd, &gsi, entry_bb, counts,
2554 zero_iter1_bb, first_zero_iter1,
2555 zero_iter2_bb, first_zero_iter2, l2_dom_bb);
2556
2557 if (zero_iter1_bb)
2558 {
2559 /* Some counts[i] vars might be uninitialized if
2560 some loop has zero iterations. But the body shouldn't
2561 be executed in that case, so just avoid uninit warnings. */
2562 for (i = first_zero_iter1;
2563 i < (fd->ordered ? fd->ordered : fd->collapse); i++)
2564 if (SSA_VAR_P (counts[i]))
2565 TREE_NO_WARNING (counts[i]) = 1;
2566 gsi_prev (&gsi);
2567 e = split_block (entry_bb, gsi_stmt (gsi));
2568 entry_bb = e->dest;
2569 make_edge (zero_iter1_bb, entry_bb, EDGE_FALLTHRU);
2570 gsi = gsi_last_nondebug_bb (entry_bb);
2571 set_immediate_dominator (CDI_DOMINATORS, entry_bb,
2572 get_immediate_dominator (CDI_DOMINATORS,
2573 zero_iter1_bb));
2574 }
2575 if (zero_iter2_bb)
2576 {
2577 /* Some counts[i] vars might be uninitialized if
2578 some loop has zero iterations. But the body shouldn't
2579 be executed in that case, so just avoid uninit warnings. */
2580 for (i = first_zero_iter2; i < fd->ordered; i++)
2581 if (SSA_VAR_P (counts[i]))
2582 TREE_NO_WARNING (counts[i]) = 1;
2583 if (zero_iter1_bb)
2584 make_edge (zero_iter2_bb, entry_bb, EDGE_FALLTHRU);
2585 else
2586 {
2587 gsi_prev (&gsi);
2588 e = split_block (entry_bb, gsi_stmt (gsi));
2589 entry_bb = e->dest;
2590 make_edge (zero_iter2_bb, entry_bb, EDGE_FALLTHRU);
2591 gsi = gsi_last_nondebug_bb (entry_bb);
2592 set_immediate_dominator (CDI_DOMINATORS, entry_bb,
2593 get_immediate_dominator
2594 (CDI_DOMINATORS, zero_iter2_bb));
2595 }
2596 }
2597 if (fd->collapse == 1)
2598 {
2599 counts[0] = fd->loop.n2;
2600 fd->loop = fd->loops[0];
2601 }
2602 }
2603
2604 type = TREE_TYPE (fd->loop.v);
2605 istart0 = create_tmp_var (fd->iter_type, ".istart0");
2606 iend0 = create_tmp_var (fd->iter_type, ".iend0");
2607 TREE_ADDRESSABLE (istart0) = 1;
2608 TREE_ADDRESSABLE (iend0) = 1;
2609
2610 /* See if we need to bias by LLONG_MIN. */
2611 if (fd->iter_type == long_long_unsigned_type_node
2612 && TREE_CODE (type) == INTEGER_TYPE
2613 && !TYPE_UNSIGNED (type)
2614 && fd->ordered == 0)
2615 {
2616 tree n1, n2;
2617
2618 if (fd->loop.cond_code == LT_EXPR)
2619 {
2620 n1 = fd->loop.n1;
2621 n2 = fold_build2 (PLUS_EXPR, type, fd->loop.n2, fd->loop.step);
2622 }
2623 else
2624 {
2625 n1 = fold_build2 (MINUS_EXPR, type, fd->loop.n2, fd->loop.step);
2626 n2 = fd->loop.n1;
2627 }
2628 if (TREE_CODE (n1) != INTEGER_CST
2629 || TREE_CODE (n2) != INTEGER_CST
2630 || ((tree_int_cst_sgn (n1) < 0) ^ (tree_int_cst_sgn (n2) < 0)))
2631 bias = fold_convert (fd->iter_type, TYPE_MIN_VALUE (type));
2632 }
2633
2634 gimple_stmt_iterator gsif = gsi;
2635 gsi_prev (&gsif);
2636
2637 tree arr = NULL_TREE;
2638 if (in_combined_parallel)
2639 {
2640 gcc_assert (fd->ordered == 0);
2641 /* In a combined parallel loop, emit a call to
2642 GOMP_loop_foo_next. */
2643 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
2644 build_fold_addr_expr (istart0),
2645 build_fold_addr_expr (iend0));
2646 }
2647 else
2648 {
2649 tree t0, t1, t2, t3, t4;
2650 /* If this is not a combined parallel loop, emit a call to
2651 GOMP_loop_foo_start in ENTRY_BB. */
2652 t4 = build_fold_addr_expr (iend0);
2653 t3 = build_fold_addr_expr (istart0);
2654 if (fd->ordered)
2655 {
2656 t0 = build_int_cst (unsigned_type_node,
2657 fd->ordered - fd->collapse + 1);
2658 arr = create_tmp_var (build_array_type_nelts (fd->iter_type,
2659 fd->ordered
2660 - fd->collapse + 1),
2661 ".omp_counts");
2662 DECL_NAMELESS (arr) = 1;
2663 TREE_ADDRESSABLE (arr) = 1;
2664 TREE_STATIC (arr) = 1;
2665 vec<constructor_elt, va_gc> *v;
2666 vec_alloc (v, fd->ordered - fd->collapse + 1);
2667 int idx;
2668
2669 for (idx = 0; idx < fd->ordered - fd->collapse + 1; idx++)
2670 {
2671 tree c;
2672 if (idx == 0 && fd->collapse > 1)
2673 c = fd->loop.n2;
2674 else
2675 c = counts[idx + fd->collapse - 1];
2676 tree purpose = size_int (idx);
2677 CONSTRUCTOR_APPEND_ELT (v, purpose, c);
2678 if (TREE_CODE (c) != INTEGER_CST)
2679 TREE_STATIC (arr) = 0;
2680 }
2681
2682 DECL_INITIAL (arr) = build_constructor (TREE_TYPE (arr), v);
2683 if (!TREE_STATIC (arr))
2684 force_gimple_operand_gsi (&gsi, build1 (DECL_EXPR,
2685 void_type_node, arr),
2686 true, NULL_TREE, true, GSI_SAME_STMT);
2687 t1 = build_fold_addr_expr (arr);
2688 t2 = NULL_TREE;
2689 }
2690 else
2691 {
2692 t2 = fold_convert (fd->iter_type, fd->loop.step);
2693 t1 = fd->loop.n2;
2694 t0 = fd->loop.n1;
2695 if (gimple_omp_for_combined_into_p (fd->for_stmt))
2696 {
2697 tree innerc
2698 = omp_find_clause (gimple_omp_for_clauses (fd->for_stmt),
2699 OMP_CLAUSE__LOOPTEMP_);
2700 gcc_assert (innerc);
2701 t0 = OMP_CLAUSE_DECL (innerc);
2702 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
2703 OMP_CLAUSE__LOOPTEMP_);
2704 gcc_assert (innerc);
2705 t1 = OMP_CLAUSE_DECL (innerc);
2706 }
2707 if (POINTER_TYPE_P (TREE_TYPE (t0))
2708 && TYPE_PRECISION (TREE_TYPE (t0))
2709 != TYPE_PRECISION (fd->iter_type))
2710 {
2711 /* Avoid casting pointers to integer of a different size. */
2712 tree itype = signed_type_for (type);
2713 t1 = fold_convert (fd->iter_type, fold_convert (itype, t1));
2714 t0 = fold_convert (fd->iter_type, fold_convert (itype, t0));
2715 }
2716 else
2717 {
2718 t1 = fold_convert (fd->iter_type, t1);
2719 t0 = fold_convert (fd->iter_type, t0);
2720 }
2721 if (bias)
2722 {
2723 t1 = fold_build2 (PLUS_EXPR, fd->iter_type, t1, bias);
2724 t0 = fold_build2 (PLUS_EXPR, fd->iter_type, t0, bias);
2725 }
2726 }
2727 if (fd->iter_type == long_integer_type_node || fd->ordered)
2728 {
2729 if (fd->chunk_size)
2730 {
2731 t = fold_convert (fd->iter_type, fd->chunk_size);
2732 t = omp_adjust_chunk_size (t, fd->simd_schedule);
2733 if (fd->ordered)
2734 t = build_call_expr (builtin_decl_explicit (start_fn),
2735 5, t0, t1, t, t3, t4);
2736 else
2737 t = build_call_expr (builtin_decl_explicit (start_fn),
2738 6, t0, t1, t2, t, t3, t4);
2739 }
2740 else if (fd->ordered)
2741 t = build_call_expr (builtin_decl_explicit (start_fn),
2742 4, t0, t1, t3, t4);
2743 else
2744 t = build_call_expr (builtin_decl_explicit (start_fn),
2745 5, t0, t1, t2, t3, t4);
2746 }
2747 else
2748 {
2749 tree t5;
2750 tree c_bool_type;
2751 tree bfn_decl;
2752
2753 /* The GOMP_loop_ull_*start functions have additional boolean
2754 argument, true for < loops and false for > loops.
2755 In Fortran, the C bool type can be different from
2756 boolean_type_node. */
2757 bfn_decl = builtin_decl_explicit (start_fn);
2758 c_bool_type = TREE_TYPE (TREE_TYPE (bfn_decl));
2759 t5 = build_int_cst (c_bool_type,
2760 fd->loop.cond_code == LT_EXPR ? 1 : 0);
2761 if (fd->chunk_size)
2762 {
2763 tree bfn_decl = builtin_decl_explicit (start_fn);
2764 t = fold_convert (fd->iter_type, fd->chunk_size);
2765 t = omp_adjust_chunk_size (t, fd->simd_schedule);
2766 t = build_call_expr (bfn_decl, 7, t5, t0, t1, t2, t, t3, t4);
2767 }
2768 else
2769 t = build_call_expr (builtin_decl_explicit (start_fn),
2770 6, t5, t0, t1, t2, t3, t4);
2771 }
2772 }
2773 if (TREE_TYPE (t) != boolean_type_node)
2774 t = fold_build2 (NE_EXPR, boolean_type_node,
2775 t, build_int_cst (TREE_TYPE (t), 0));
2776 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
2777 true, GSI_SAME_STMT);
2778 if (arr && !TREE_STATIC (arr))
2779 {
2780 tree clobber = build_constructor (TREE_TYPE (arr), NULL);
2781 TREE_THIS_VOLATILE (clobber) = 1;
2782 gsi_insert_before (&gsi, gimple_build_assign (arr, clobber),
2783 GSI_SAME_STMT);
2784 }
2785 gsi_insert_after (&gsi, gimple_build_cond_empty (t), GSI_SAME_STMT);
2786
2787 /* Remove the GIMPLE_OMP_FOR statement. */
2788 gsi_remove (&gsi, true);
2789
2790 if (gsi_end_p (gsif))
2791 gsif = gsi_after_labels (gsi_bb (gsif));
2792 gsi_next (&gsif);
2793
2794 /* Iteration setup for sequential loop goes in L0_BB. */
2795 tree startvar = fd->loop.v;
2796 tree endvar = NULL_TREE;
2797
2798 if (gimple_omp_for_combined_p (fd->for_stmt))
2799 {
2800 gcc_assert (gimple_code (inner_stmt) == GIMPLE_OMP_FOR
2801 && gimple_omp_for_kind (inner_stmt)
2802 == GF_OMP_FOR_KIND_SIMD);
2803 tree innerc = omp_find_clause (gimple_omp_for_clauses (inner_stmt),
2804 OMP_CLAUSE__LOOPTEMP_);
2805 gcc_assert (innerc);
2806 startvar = OMP_CLAUSE_DECL (innerc);
2807 innerc = omp_find_clause (OMP_CLAUSE_CHAIN (innerc),
2808 OMP_CLAUSE__LOOPTEMP_);
2809 gcc_assert (innerc);
2810 endvar = OMP_CLAUSE_DECL (innerc);
2811 }
2812
2813 gsi = gsi_start_bb (l0_bb);
2814 t = istart0;
2815 if (fd->ordered && fd->collapse == 1)
2816 t = fold_build2 (MULT_EXPR, fd->iter_type, t,
2817 fold_convert (fd->iter_type, fd->loop.step));
2818 else if (bias)
2819 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
2820 if (fd->ordered && fd->collapse == 1)
2821 {
2822 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
2823 t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (startvar),
2824 fd->loop.n1, fold_convert (sizetype, t));
2825 else
2826 {
2827 t = fold_convert (TREE_TYPE (startvar), t);
2828 t = fold_build2 (PLUS_EXPR, TREE_TYPE (startvar),
2829 fd->loop.n1, t);
2830 }
2831 }
2832 else
2833 {
2834 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
2835 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
2836 t = fold_convert (TREE_TYPE (startvar), t);
2837 }
2838 t = force_gimple_operand_gsi (&gsi, t,
2839 DECL_P (startvar)
2840 && TREE_ADDRESSABLE (startvar),
2841 NULL_TREE, false, GSI_CONTINUE_LINKING);
2842 assign_stmt = gimple_build_assign (startvar, t);
2843 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
2844
2845 t = iend0;
2846 if (fd->ordered && fd->collapse == 1)
2847 t = fold_build2 (MULT_EXPR, fd->iter_type, t,
2848 fold_convert (fd->iter_type, fd->loop.step));
2849 else if (bias)
2850 t = fold_build2 (MINUS_EXPR, fd->iter_type, t, bias);
2851 if (fd->ordered && fd->collapse == 1)
2852 {
2853 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
2854 t = fold_build2 (POINTER_PLUS_EXPR, TREE_TYPE (startvar),
2855 fd->loop.n1, fold_convert (sizetype, t));
2856 else
2857 {
2858 t = fold_convert (TREE_TYPE (startvar), t);
2859 t = fold_build2 (PLUS_EXPR, TREE_TYPE (startvar),
2860 fd->loop.n1, t);
2861 }
2862 }
2863 else
2864 {
2865 if (POINTER_TYPE_P (TREE_TYPE (startvar)))
2866 t = fold_convert (signed_type_for (TREE_TYPE (startvar)), t);
2867 t = fold_convert (TREE_TYPE (startvar), t);
2868 }
2869 iend = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
2870 false, GSI_CONTINUE_LINKING);
2871 if (endvar)
2872 {
2873 assign_stmt = gimple_build_assign (endvar, iend);
2874 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
2875 if (useless_type_conversion_p (TREE_TYPE (fd->loop.v), TREE_TYPE (iend)))
2876 assign_stmt = gimple_build_assign (fd->loop.v, iend);
2877 else
2878 assign_stmt = gimple_build_assign (fd->loop.v, NOP_EXPR, iend);
2879 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
2880 }
2881 /* Handle linear clause adjustments. */
2882 tree itercnt = NULL_TREE;
2883 if (gimple_omp_for_kind (fd->for_stmt) == GF_OMP_FOR_KIND_FOR)
2884 for (tree c = gimple_omp_for_clauses (fd->for_stmt);
2885 c; c = OMP_CLAUSE_CHAIN (c))
2886 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
2887 && !OMP_CLAUSE_LINEAR_NO_COPYIN (c))
2888 {
2889 tree d = OMP_CLAUSE_DECL (c);
2890 bool is_ref = omp_is_reference (d);
2891 tree t = d, a, dest;
2892 if (is_ref)
2893 t = build_simple_mem_ref_loc (OMP_CLAUSE_LOCATION (c), t);
2894 tree type = TREE_TYPE (t);
2895 if (POINTER_TYPE_P (type))
2896 type = sizetype;
2897 dest = unshare_expr (t);
2898 tree v = create_tmp_var (TREE_TYPE (t), NULL);
2899 expand_omp_build_assign (&gsif, v, t);
2900 if (itercnt == NULL_TREE)
2901 {
2902 itercnt = startvar;
2903 tree n1 = fd->loop.n1;
2904 if (POINTER_TYPE_P (TREE_TYPE (itercnt)))
2905 {
2906 itercnt
2907 = fold_convert (signed_type_for (TREE_TYPE (itercnt)),
2908 itercnt);
2909 n1 = fold_convert (TREE_TYPE (itercnt), n1);
2910 }
2911 itercnt = fold_build2 (MINUS_EXPR, TREE_TYPE (itercnt),
2912 itercnt, n1);
2913 itercnt = fold_build2 (EXACT_DIV_EXPR, TREE_TYPE (itercnt),
2914 itercnt, fd->loop.step);
2915 itercnt = force_gimple_operand_gsi (&gsi, itercnt, true,
2916 NULL_TREE, false,
2917 GSI_CONTINUE_LINKING);
2918 }
2919 a = fold_build2 (MULT_EXPR, type,
2920 fold_convert (type, itercnt),
2921 fold_convert (type, OMP_CLAUSE_LINEAR_STEP (c)));
2922 t = fold_build2 (type == TREE_TYPE (t) ? PLUS_EXPR
2923 : POINTER_PLUS_EXPR, TREE_TYPE (t), v, a);
2924 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
2925 false, GSI_CONTINUE_LINKING);
2926 assign_stmt = gimple_build_assign (dest, t);
2927 gsi_insert_after (&gsi, assign_stmt, GSI_CONTINUE_LINKING);
2928 }
2929 if (fd->collapse > 1)
2930 expand_omp_for_init_vars (fd, &gsi, counts, inner_stmt, startvar);
2931
2932 if (fd->ordered)
2933 {
2934 /* Until now, counts array contained number of iterations or
2935 variable containing it for ith loop. From now on, we need
2936 those counts only for collapsed loops, and only for the 2nd
2937 till the last collapsed one. Move those one element earlier,
2938 we'll use counts[fd->collapse - 1] for the first source/sink
2939 iteration counter and so on and counts[fd->ordered]
2940 as the array holding the current counter values for
2941 depend(source). */
2942 if (fd->collapse > 1)
2943 memmove (counts, counts + 1, (fd->collapse - 1) * sizeof (counts[0]));
2944 if (broken_loop)
2945 {
2946 int i;
2947 for (i = fd->collapse; i < fd->ordered; i++)
2948 {
2949 tree type = TREE_TYPE (fd->loops[i].v);
2950 tree this_cond
2951 = fold_build2 (fd->loops[i].cond_code, boolean_type_node,
2952 fold_convert (type, fd->loops[i].n1),
2953 fold_convert (type, fd->loops[i].n2));
2954 if (!integer_onep (this_cond))
2955 break;
2956 }
2957 if (i < fd->ordered)
2958 {
2959 cont_bb
2960 = create_empty_bb (EXIT_BLOCK_PTR_FOR_FN (cfun)->prev_bb);
2961 add_bb_to_loop (cont_bb, l1_bb->loop_father);
2962 gimple_stmt_iterator gsi = gsi_after_labels (cont_bb);
2963 gimple *g = gimple_build_omp_continue (fd->loop.v, fd->loop.v);
2964 gsi_insert_before (&gsi, g, GSI_SAME_STMT);
2965 make_edge (cont_bb, l3_bb, EDGE_FALLTHRU);
2966 make_edge (cont_bb, l1_bb, 0);
2967 l2_bb = create_empty_bb (cont_bb);
2968 broken_loop = false;
2969 }
2970 }
2971 expand_omp_ordered_source_sink (region, fd, counts, cont_bb);
2972 cont_bb = expand_omp_for_ordered_loops (fd, counts, cont_bb, l1_bb,
2973 ordered_lastprivate);
2974 if (counts[fd->collapse - 1])
2975 {
2976 gcc_assert (fd->collapse == 1);
2977 gsi = gsi_last_bb (l0_bb);
2978 expand_omp_build_assign (&gsi, counts[fd->collapse - 1],
2979 istart0, true);
2980 gsi = gsi_last_bb (cont_bb);
2981 t = fold_build2 (PLUS_EXPR, fd->iter_type, counts[fd->collapse - 1],
2982 build_int_cst (fd->iter_type, 1));
2983 expand_omp_build_assign (&gsi, counts[fd->collapse - 1], t);
2984 tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered],
2985 size_zero_node, NULL_TREE, NULL_TREE);
2986 expand_omp_build_assign (&gsi, aref, counts[fd->collapse - 1]);
2987 t = counts[fd->collapse - 1];
2988 }
2989 else if (fd->collapse > 1)
2990 t = fd->loop.v;
2991 else
2992 {
2993 t = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->loops[0].v),
2994 fd->loops[0].v, fd->loops[0].n1);
2995 t = fold_convert (fd->iter_type, t);
2996 }
2997 gsi = gsi_last_bb (l0_bb);
2998 tree aref = build4 (ARRAY_REF, fd->iter_type, counts[fd->ordered],
2999 size_zero_node, NULL_TREE, NULL_TREE);
3000 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3001 false, GSI_CONTINUE_LINKING);
3002 expand_omp_build_assign (&gsi, aref, t, true);
3003 }
3004
3005 if (!broken_loop)
3006 {
3007 /* Code to control the increment and predicate for the sequential
3008 loop goes in the CONT_BB. */
3009 gsi = gsi_last_nondebug_bb (cont_bb);
3010 gomp_continue *cont_stmt = as_a <gomp_continue *> (gsi_stmt (gsi));
3011 gcc_assert (gimple_code (cont_stmt) == GIMPLE_OMP_CONTINUE);
3012 vmain = gimple_omp_continue_control_use (cont_stmt);
3013 vback = gimple_omp_continue_control_def (cont_stmt);
3014
3015 if (!gimple_omp_for_combined_p (fd->for_stmt))
3016 {
3017 if (POINTER_TYPE_P (type))
3018 t = fold_build_pointer_plus (vmain, fd->loop.step);
3019 else
3020 t = fold_build2 (PLUS_EXPR, type, vmain, fd->loop.step);
3021 t = force_gimple_operand_gsi (&gsi, t,
3022 DECL_P (vback)
3023 && TREE_ADDRESSABLE (vback),
3024 NULL_TREE, true, GSI_SAME_STMT);
3025 assign_stmt = gimple_build_assign (vback, t);
3026 gsi_insert_before (&gsi, assign_stmt, GSI_SAME_STMT);
3027
3028 if (fd->ordered && counts[fd->collapse - 1] == NULL_TREE)
3029 {
3030 if (fd->collapse > 1)
3031 t = fd->loop.v;
3032 else
3033 {
3034 t = fold_build2 (MINUS_EXPR, TREE_TYPE (fd->loops[0].v),
3035 fd->loops[0].v, fd->loops[0].n1);
3036 t = fold_convert (fd->iter_type, t);
3037 }
3038 tree aref = build4 (ARRAY_REF, fd->iter_type,
3039 counts[fd->ordered], size_zero_node,
3040 NULL_TREE, NULL_TREE);
3041 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3042 true, GSI_SAME_STMT);
3043 expand_omp_build_assign (&gsi, aref, t);
3044 }
3045
3046 t = build2 (fd->loop.cond_code, boolean_type_node,
3047 DECL_P (vback) && TREE_ADDRESSABLE (vback) ? t : vback,
3048 iend);
3049 gcond *cond_stmt = gimple_build_cond_empty (t);
3050 gsi_insert_before (&gsi, cond_stmt, GSI_SAME_STMT);
3051 }
3052
3053 /* Remove GIMPLE_OMP_CONTINUE. */
3054 gsi_remove (&gsi, true);
3055
3056 if (fd->collapse > 1 && !gimple_omp_for_combined_p (fd->for_stmt))
3057 collapse_bb = extract_omp_for_update_vars (fd, cont_bb, l1_bb);
3058
3059 /* Emit code to get the next parallel iteration in L2_BB. */
3060 gsi = gsi_start_bb (l2_bb);
3061
3062 t = build_call_expr (builtin_decl_explicit (next_fn), 2,
3063 build_fold_addr_expr (istart0),
3064 build_fold_addr_expr (iend0));
3065 t = force_gimple_operand_gsi (&gsi, t, true, NULL_TREE,
3066 false, GSI_CONTINUE_LINKING);
3067 if (TREE_TYPE (t) != boolean_type_node)
3068 t = fold_build2 (NE_EXPR, boolean_type_node,
3069 t, build_int_cst (TREE_TYPE (t), 0));
3070 gcond *cond_stmt = gimple_build_cond_empty (t);
3071 gsi_insert_after (&gsi, cond_stmt, GSI_CONTINUE_LINKING);
3072 }
3073
3074 /* Add the loop cleanup function. */
3075 gsi = gsi_last_nondebug_bb (exit_bb);
3076 if (gimple_omp_return_nowait_p (gsi_stmt (gsi)))
3077 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_NOWAIT);
3078 else if (gimple_omp_return_lhs (gsi_stmt (gsi)))
3079 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END_CANCEL);
3080 else
3081 t = builtin_decl_explicit (BUILT_IN_GOMP_LOOP_END);
3082 gcall *call_stmt = gimple_build_call (t, 0);
3083 if (gimple_omp_return_lhs (gsi_stmt (gsi)))
3084 gimple_call_set_lhs (call_stmt, gimple_omp_return_lhs (gsi_stmt (gsi)));
3085 gsi_insert_after (&gsi, call_stmt, GSI_SAME_STMT);
3086 if (fd->ordered)
3087 {
3088 tree arr = counts[fd->ordered];
3089 tree clobber = build_constructor (TREE_TYPE (arr), NULL);
3090 TREE_THIS_VOLATILE (clobber) = 1;
3091 gsi_insert_after (&gsi, gimple_build_assign (arr, clobber),
3092 GSI_SAME_STMT);
3093 }
3094 gsi_remove (&gsi, true);
3095
3096 /* Connect the new blocks. */
3097 find_edge (entry_bb, l0_bb)->flags = EDGE_TRUE_VALUE;
3098 find_edge (entry_bb, l3_bb)->flags = EDGE_FALSE_VALUE;
3099
3100 if (!broken_loop)
3101 {
3102 gimple_seq phis;
3103
3104 e = find_edge (cont_bb, l3_bb);
3105 ne = make_edge (l2_bb, l3_bb, EDGE_FALSE_VALUE);
3106
3107 phis = phi_nodes (l3_bb);
3108 for (gsi = gsi_start (phis); !gsi_end_p (gsi); gsi_next (&gsi))
3109 {
3110 gimple *phi = gsi_stmt (gsi);
3111 SET_USE (PHI_ARG_DEF_PTR_FROM_EDGE (phi, ne),
3112 PHI_ARG_DEF_FROM_EDGE (phi, e));
3113 }
3114 remove_edge (e);
3115
3116 make_edge (cont_bb, l2_bb, EDGE_FALSE_VALUE);
3117 e = find_edge (cont_bb, l1_bb);
3118 if (e == NULL)
3119 {
3120 e = BRANCH_EDGE (cont_bb);
3121 gcc_assert (single_succ (e->dest) == l1_bb);
3122 }
3123 if (gimple_omp_for_combined_p (fd->for_stmt))
3124 {
3125 remove_edge (e);
3126 e = NULL;
3127 }
3128 else if (fd->collapse > 1)
3129 {
3130 remove_edge (e);
3131 e = make_edge (cont_bb, collapse_bb, EDGE_TRUE_VALUE);
3132 }
3133 else
3134 e->flags = EDGE_TRUE_VALUE;
3135 if (e)
3136 {
3137 e->probability = profile_probability::guessed_always ().apply_scale (7, 8);
3138 find_edge (cont_bb, l2_bb)->probability = e->probability.invert ();
3139 }
3140 else
3141 {
3142 e = find_edge (cont_bb, l2_bb);
3143 e->flags = EDGE_FALLTHRU;
3144 }
3145 make_edge (l2_bb, l0_bb, EDGE_TRUE_VALUE);
3146
3147 if (gimple_in_ssa_p (cfun))
3148 {
3149 /* Add phis to the outer loop that connect to the phis in the inner,
3150 original loop, and move the loop entry value of the inner phi to
3151 the loop entry value of the outer phi. */
3152 gphi_iterator psi;
3153 for (psi = gsi_start_phis (l3_bb); !gsi_end_p (psi); gsi_next (&psi))
3154 {
3155 source_location locus;
3156 gphi *nphi;
3157 gphi *exit_phi = psi.phi ();
3158
3159 edge l2_to_l3 = find_edge (l2_bb, l3_bb);
3160 tree exit_res = PHI_ARG_DEF_FROM_EDGE (exit_phi, l2_to_l3);
3161
3162 basic_block latch = BRANCH_EDGE (cont_bb)->dest;
3163 edge latch_to_l1 = find_edge (latch, l1_bb);
3164 gphi *inner_phi
3165 = find_phi_with_arg_on_edge (exit_res, latch_to_l1);
3166
3167 tree t = gimple_phi_result (exit_phi);
3168 tree new_res = copy_ssa_name (t, NULL);
3169 nphi = create_phi_node (new_res, l0_bb);
3170
3171 edge l0_to_l1 = find_edge (l0_bb, l1_bb);
3172 t = PHI_ARG_DEF_FROM_EDGE (inner_phi, l0_to_l1);
3173 locus = gimple_phi_arg_location_from_edge (inner_phi, l0_to_l1);
3174 edge entry_to_l0 = find_edge (entry_bb, l0_bb);
3175 add_phi_arg (nphi, t, entry_to_l0, locus);
3176
3177 edge l2_to_l0 = find_edge (l2_bb, l0_bb);
3178 add_phi_arg (nphi, exit_res, l2_to_l0, UNKNOWN_LOCATION);
3179
3180 add_phi_arg (inner_phi, new_res, l0_to_l1, UNKNOWN_LOCATION);
3181 };
3182 }
3183
3184 set_immediate_dominator (CDI_DOMINATORS, l2_bb,
3185 recompute_dominator (CDI_DOMINATORS, l2_bb));
3186 set_immediate_dominator (CDI_DOMINATORS, l3_bb,
3187 recompute_dominator (CDI_DOMINATORS, l3_bb));
3188 set_immediate_dominator (CDI_DOMINATORS, l0_bb,
3189 recompute_dominator (CDI_DOMINATORS, l0_bb));
3190 set_immediate_dominator (CDI_DOMINATORS, l1_bb,
3191 recompute_dominator (CDI_DOMINATORS, l1_bb));
3192
3193 /* We enter expand_omp_for_generic with a loop. This original loop may
3194 have its own loop struct, or it may be part of an outer loop struct
3195 (which may be the fake loop). */
3196 struct loop *outer_loop = entry_bb->loop_father;
3197 bool orig_loop_has_loop_struct = l1_bb->loop_father != outer_loop;
3198
3199 add_bb_to_loop (l2_bb, outer_loop);
3200
3201 /* We've added a new loop around the original loop. Allocate the
3202 corresponding loop struct. */
3203 struct loop *new_loop = alloc_loop ();
3204 new_loop->header = l0_bb;
3205 new_loop->latch = l2_bb;
3206 add_loop (new_loop, outer_loop);
3207
3208 /* Allocate a loop structure for the original loop unless we already
3209 had one. */
3210 if (!orig_loop_has_loop_struct
3211 && !gimple_omp_for_combined_p (fd->for_stmt))
3212 {
3213 struct loop *orig_loop = alloc_loop ();
3214 orig_loop->header = l1_bb;
3215 /* The loop may have multiple latches. */
3216 add_loop (orig_loop, new_loop);
3217 }
3218 }
3219}
3220
3221/* A subroutine of expand_omp_for. Generate code for a parallel
3222 loop with static schedule and no specified chunk size. Given
3223 parameters:
3224
3225 for (V = N1; V cond N2; V += STEP) BODY;
3226
3227 where COND is "<" or ">", we generate pseudocode
3228
3229 if ((__typeof (V)) -1 > 0 && N2 cond N1) goto L2;
3230 if (cond is <)
3231 adj = STEP - 1;
3232 else
3233 adj = STEP + 1;
3234 if ((__typeof (V)) -1 > 0 && cond is >)
3235 n = -(adj + N2 - N1) / -STEP;
3236 else
3237 n = (adj + N2 - N1) / STEP;
3238 q = n / nthreads;
3239 tt = n % nthreads;
3240 if (threadid < tt) goto L3; else goto L4;
3241 L3:
3242 tt = 0;
3243 q = q + 1;
3244 L4:
3245 s0 = q * threadid + tt;
3246 e0 = s0 + q;
3247 V = s0 * STEP + N1;
3248 if (s0 >= e0) goto L2; else goto L0;
3249 L0:
3250 e = e0 * STEP + N1;
3251 L1:
3252 BODY;
3253 V += STEP;
3254 if (V cond e) goto L1;
3255 L2:
3256*/
3257
3258static void
3259expand_omp_for_static_nochunk (struct omp_region *region,
3260 struct omp_for_data *fd,
3261 gimple *inner_stmt)
3262{
3263 tree n, q, s0, e0, e, t, tt, nthreads, threadid;
3264