1/* Lowering pass for OMP directives. Converts OMP directives into explicit
2 calls to the runtime library (libgomp), data marshalling to implement data
3 sharing and copying clauses, offloading to accelerators, and more.
4
5 Contributed by Diego Novillo <dnovillo@redhat.com>
6
7 Copyright (C) 2005-2017 Free Software Foundation, Inc.
8
9This file is part of GCC.
10
11GCC is free software; you can redistribute it and/or modify it under
12the terms of the GNU General Public License as published by the Free
13Software Foundation; either version 3, or (at your option) any later
14version.
15
16GCC is distributed in the hope that it will be useful, but WITHOUT ANY
17WARRANTY; without even the implied warranty of MERCHANTABILITY or
18FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
19for more details.
20
21You should have received a copy of the GNU General Public License
22along with GCC; see the file COPYING3. If not see
23<http://www.gnu.org/licenses/>. */
24
25#include "config.h"
26#include "system.h"
27#include "coretypes.h"
28#include "backend.h"
29#include "target.h"
30#include "tree.h"
31#include "gimple.h"
32#include "tree-pass.h"
33#include "ssa.h"
34#include "cgraph.h"
35#include "pretty-print.h"
36#include "diagnostic-core.h"
37#include "fold-const.h"
38#include "stor-layout.h"
39#include "internal-fn.h"
40#include "gimple-fold.h"
41#include "gimplify.h"
42#include "gimple-iterator.h"
43#include "gimplify-me.h"
44#include "gimple-walk.h"
45#include "tree-iterator.h"
46#include "tree-inline.h"
47#include "langhooks.h"
48#include "tree-dfa.h"
49#include "tree-ssa.h"
50#include "splay-tree.h"
51#include "omp-general.h"
52#include "omp-low.h"
53#include "omp-grid.h"
54#include "gimple-low.h"
55#include "symbol-summary.h"
56#include "tree-nested.h"
57#include "context.h"
58#include "gomp-constants.h"
59#include "gimple-pretty-print.h"
60#include "hsa-common.h"
61#include "stringpool.h"
62#include "attribs.h"
63
64/* Lowering of OMP parallel and workshare constructs proceeds in two
65 phases. The first phase scans the function looking for OMP statements
66 and then for variables that must be replaced to satisfy data sharing
67 clauses. The second phase expands code for the constructs, as well as
68 re-gimplifying things when variables have been replaced with complex
69 expressions.
70
71 Final code generation is done by pass_expand_omp. The flowgraph is
72 scanned for regions which are then moved to a new
73 function, to be invoked by the thread library, or offloaded. */
74
75/* Context structure. Used to store information about each parallel
76 directive in the code. */
77
78struct omp_context
79{
80 /* This field must be at the beginning, as we do "inheritance": Some
81 callback functions for tree-inline.c (e.g., omp_copy_decl)
82 receive a copy_body_data pointer that is up-casted to an
83 omp_context pointer. */
84 copy_body_data cb;
85
86 /* The tree of contexts corresponding to the encountered constructs. */
87 struct omp_context *outer;
88 gimple *stmt;
89
90 /* Map variables to fields in a structure that allows communication
91 between sending and receiving threads. */
92 splay_tree field_map;
93 tree record_type;
94 tree sender_decl;
95 tree receiver_decl;
96
97 /* These are used just by task contexts, if task firstprivate fn is
98 needed. srecord_type is used to communicate from the thread
99 that encountered the task construct to task firstprivate fn,
100 record_type is allocated by GOMP_task, initialized by task firstprivate
101 fn and passed to the task body fn. */
102 splay_tree sfield_map;
103 tree srecord_type;
104
105 /* A chain of variables to add to the top-level block surrounding the
106 construct. In the case of a parallel, this is in the child function. */
107 tree block_vars;
108
109 /* Label to which GOMP_cancel{,llation_point} and explicit and implicit
110 barriers should jump to during omplower pass. */
111 tree cancel_label;
112
113 /* The sibling GIMPLE_OMP_FOR simd with _simt_ clause or NULL
114 otherwise. */
115 gimple *simt_stmt;
116
117 /* Nesting depth of this context. Used to beautify error messages re
118 invalid gotos. The outermost ctx is depth 1, with depth 0 being
119 reserved for the main body of the function. */
120 int depth;
121
122 /* True if this parallel directive is nested within another. */
123 bool is_nested;
124
125 /* True if this construct can be cancelled. */
126 bool cancellable;
127};
128
129static splay_tree all_contexts;
130static int taskreg_nesting_level;
131static int target_nesting_level;
132static bitmap task_shared_vars;
133static vec<omp_context *> taskreg_contexts;
134
135static void scan_omp (gimple_seq *, omp_context *);
136static tree scan_omp_1_op (tree *, int *, void *);
137
138#define WALK_SUBSTMTS \
139 case GIMPLE_BIND: \
140 case GIMPLE_TRY: \
141 case GIMPLE_CATCH: \
142 case GIMPLE_EH_FILTER: \
143 case GIMPLE_TRANSACTION: \
144 /* The sub-statements for these should be walked. */ \
145 *handled_ops_p = false; \
146 break;
147
148/* Return true if CTX corresponds to an oacc parallel region. */
149
150static bool
151is_oacc_parallel (omp_context *ctx)
152{
153 enum gimple_code outer_type = gimple_code (ctx->stmt);
154 return ((outer_type == GIMPLE_OMP_TARGET)
155 && (gimple_omp_target_kind (ctx->stmt)
156 == GF_OMP_TARGET_KIND_OACC_PARALLEL));
157}
158
159/* Return true if CTX corresponds to an oacc kernels region. */
160
161static bool
162is_oacc_kernels (omp_context *ctx)
163{
164 enum gimple_code outer_type = gimple_code (ctx->stmt);
165 return ((outer_type == GIMPLE_OMP_TARGET)
166 && (gimple_omp_target_kind (ctx->stmt)
167 == GF_OMP_TARGET_KIND_OACC_KERNELS));
168}
169
170/* If DECL is the artificial dummy VAR_DECL created for non-static
171 data member privatization, return the underlying "this" parameter,
172 otherwise return NULL. */
173
174tree
175omp_member_access_dummy_var (tree decl)
176{
177 if (!VAR_P (decl)
178 || !DECL_ARTIFICIAL (decl)
179 || !DECL_IGNORED_P (decl)
180 || !DECL_HAS_VALUE_EXPR_P (decl)
181 || !lang_hooks.decls.omp_disregard_value_expr (decl, false))
182 return NULL_TREE;
183
184 tree v = DECL_VALUE_EXPR (decl);
185 if (TREE_CODE (v) != COMPONENT_REF)
186 return NULL_TREE;
187
188 while (1)
189 switch (TREE_CODE (v))
190 {
191 case COMPONENT_REF:
192 case MEM_REF:
193 case INDIRECT_REF:
194 CASE_CONVERT:
195 case POINTER_PLUS_EXPR:
196 v = TREE_OPERAND (v, 0);
197 continue;
198 case PARM_DECL:
199 if (DECL_CONTEXT (v) == current_function_decl
200 && DECL_ARTIFICIAL (v)
201 && TREE_CODE (TREE_TYPE (v)) == POINTER_TYPE)
202 return v;
203 return NULL_TREE;
204 default:
205 return NULL_TREE;
206 }
207}
208
209/* Helper for unshare_and_remap, called through walk_tree. */
210
211static tree
212unshare_and_remap_1 (tree *tp, int *walk_subtrees, void *data)
213{
214 tree *pair = (tree *) data;
215 if (*tp == pair[0])
216 {
217 *tp = unshare_expr (pair[1]);
218 *walk_subtrees = 0;
219 }
220 else if (IS_TYPE_OR_DECL_P (*tp))
221 *walk_subtrees = 0;
222 return NULL_TREE;
223}
224
225/* Return unshare_expr (X) with all occurrences of FROM
226 replaced with TO. */
227
228static tree
229unshare_and_remap (tree x, tree from, tree to)
230{
231 tree pair[2] = { from, to };
232 x = unshare_expr (x);
233 walk_tree (&x, unshare_and_remap_1, pair, NULL);
234 return x;
235}
236
237/* Convenience function for calling scan_omp_1_op on tree operands. */
238
239static inline tree
240scan_omp_op (tree *tp, omp_context *ctx)
241{
242 struct walk_stmt_info wi;
243
244 memset (&wi, 0, sizeof (wi));
245 wi.info = ctx;
246 wi.want_locations = true;
247
248 return walk_tree (tp, scan_omp_1_op, &wi, NULL);
249}
250
251static void lower_omp (gimple_seq *, omp_context *);
252static tree lookup_decl_in_outer_ctx (tree, omp_context *);
253static tree maybe_lookup_decl_in_outer_ctx (tree, omp_context *);
254
255/* Return true if CTX is for an omp parallel. */
256
257static inline bool
258is_parallel_ctx (omp_context *ctx)
259{
260 return gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL;
261}
262
263
264/* Return true if CTX is for an omp task. */
265
266static inline bool
267is_task_ctx (omp_context *ctx)
268{
269 return gimple_code (ctx->stmt) == GIMPLE_OMP_TASK;
270}
271
272
273/* Return true if CTX is for an omp taskloop. */
274
275static inline bool
276is_taskloop_ctx (omp_context *ctx)
277{
278 return gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
279 && gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_TASKLOOP;
280}
281
282
283/* Return true if CTX is for an omp parallel or omp task. */
284
285static inline bool
286is_taskreg_ctx (omp_context *ctx)
287{
288 return is_parallel_ctx (ctx) || is_task_ctx (ctx);
289}
290
291/* Return true if EXPR is variable sized. */
292
293static inline bool
294is_variable_sized (const_tree expr)
295{
296 return !TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (expr)));
297}
298
299/* Lookup variables. The "maybe" form
300 allows for the variable form to not have been entered, otherwise we
301 assert that the variable must have been entered. */
302
303static inline tree
304lookup_decl (tree var, omp_context *ctx)
305{
306 tree *n = ctx->cb.decl_map->get (var);
307 return *n;
308}
309
310static inline tree
311maybe_lookup_decl (const_tree var, omp_context *ctx)
312{
313 tree *n = ctx->cb.decl_map->get (const_cast<tree> (var));
314 return n ? *n : NULL_TREE;
315}
316
317static inline tree
318lookup_field (tree var, omp_context *ctx)
319{
320 splay_tree_node n;
321 n = splay_tree_lookup (ctx->field_map, (splay_tree_key) var);
322 return (tree) n->value;
323}
324
325static inline tree
326lookup_sfield (splay_tree_key key, omp_context *ctx)
327{
328 splay_tree_node n;
329 n = splay_tree_lookup (ctx->sfield_map
330 ? ctx->sfield_map : ctx->field_map, key);
331 return (tree) n->value;
332}
333
334static inline tree
335lookup_sfield (tree var, omp_context *ctx)
336{
337 return lookup_sfield ((splay_tree_key) var, ctx);
338}
339
340static inline tree
341maybe_lookup_field (splay_tree_key key, omp_context *ctx)
342{
343 splay_tree_node n;
344 n = splay_tree_lookup (ctx->field_map, key);
345 return n ? (tree) n->value : NULL_TREE;
346}
347
348static inline tree
349maybe_lookup_field (tree var, omp_context *ctx)
350{
351 return maybe_lookup_field ((splay_tree_key) var, ctx);
352}
353
354/* Return true if DECL should be copied by pointer. SHARED_CTX is
355 the parallel context if DECL is to be shared. */
356
357static bool
358use_pointer_for_field (tree decl, omp_context *shared_ctx)
359{
360 if (AGGREGATE_TYPE_P (TREE_TYPE (decl))
361 || TYPE_ATOMIC (TREE_TYPE (decl)))
362 return true;
363
364 /* We can only use copy-in/copy-out semantics for shared variables
365 when we know the value is not accessible from an outer scope. */
366 if (shared_ctx)
367 {
368 gcc_assert (!is_gimple_omp_oacc (shared_ctx->stmt));
369
370 /* ??? Trivially accessible from anywhere. But why would we even
371 be passing an address in this case? Should we simply assert
372 this to be false, or should we have a cleanup pass that removes
373 these from the list of mappings? */
374 if (TREE_STATIC (decl) || DECL_EXTERNAL (decl))
375 return true;
376
377 /* For variables with DECL_HAS_VALUE_EXPR_P set, we cannot tell
378 without analyzing the expression whether or not its location
379 is accessible to anyone else. In the case of nested parallel
380 regions it certainly may be. */
381 if (TREE_CODE (decl) != RESULT_DECL && DECL_HAS_VALUE_EXPR_P (decl))
382 return true;
383
384 /* Do not use copy-in/copy-out for variables that have their
385 address taken. */
386 if (TREE_ADDRESSABLE (decl))
387 return true;
388
389 /* lower_send_shared_vars only uses copy-in, but not copy-out
390 for these. */
391 if (TREE_READONLY (decl)
392 || ((TREE_CODE (decl) == RESULT_DECL
393 || TREE_CODE (decl) == PARM_DECL)
394 && DECL_BY_REFERENCE (decl)))
395 return false;
396
397 /* Disallow copy-in/out in nested parallel if
398 decl is shared in outer parallel, otherwise
399 each thread could store the shared variable
400 in its own copy-in location, making the
401 variable no longer really shared. */
402 if (shared_ctx->is_nested)
403 {
404 omp_context *up;
405
406 for (up = shared_ctx->outer; up; up = up->outer)
407 if (is_taskreg_ctx (up) && maybe_lookup_decl (decl, up))
408 break;
409
410 if (up)
411 {
412 tree c;
413
414 for (c = gimple_omp_taskreg_clauses (up->stmt);
415 c; c = OMP_CLAUSE_CHAIN (c))
416 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
417 && OMP_CLAUSE_DECL (c) == decl)
418 break;
419
420 if (c)
421 goto maybe_mark_addressable_and_ret;
422 }
423 }
424
425 /* For tasks avoid using copy-in/out. As tasks can be
426 deferred or executed in different thread, when GOMP_task
427 returns, the task hasn't necessarily terminated. */
428 if (is_task_ctx (shared_ctx))
429 {
430 tree outer;
431 maybe_mark_addressable_and_ret:
432 outer = maybe_lookup_decl_in_outer_ctx (decl, shared_ctx);
433 if (is_gimple_reg (outer) && !omp_member_access_dummy_var (outer))
434 {
435 /* Taking address of OUTER in lower_send_shared_vars
436 might need regimplification of everything that uses the
437 variable. */
438 if (!task_shared_vars)
439 task_shared_vars = BITMAP_ALLOC (NULL);
440 bitmap_set_bit (task_shared_vars, DECL_UID (outer));
441 TREE_ADDRESSABLE (outer) = 1;
442 }
443 return true;
444 }
445 }
446
447 return false;
448}
449
450/* Construct a new automatic decl similar to VAR. */
451
452static tree
453omp_copy_decl_2 (tree var, tree name, tree type, omp_context *ctx)
454{
455 tree copy = copy_var_decl (var, name, type);
456
457 DECL_CONTEXT (copy) = current_function_decl;
458 DECL_CHAIN (copy) = ctx->block_vars;
459 /* If VAR is listed in task_shared_vars, it means it wasn't
460 originally addressable and is just because task needs to take
461 it's address. But we don't need to take address of privatizations
462 from that var. */
463 if (TREE_ADDRESSABLE (var)
464 && task_shared_vars
465 && bitmap_bit_p (task_shared_vars, DECL_UID (var)))
466 TREE_ADDRESSABLE (copy) = 0;
467 ctx->block_vars = copy;
468
469 return copy;
470}
471
472static tree
473omp_copy_decl_1 (tree var, omp_context *ctx)
474{
475 return omp_copy_decl_2 (var, DECL_NAME (var), TREE_TYPE (var), ctx);
476}
477
478/* Build COMPONENT_REF and set TREE_THIS_VOLATILE and TREE_READONLY on it
479 as appropriate. */
480static tree
481omp_build_component_ref (tree obj, tree field)
482{
483 tree ret = build3 (COMPONENT_REF, TREE_TYPE (field), obj, field, NULL);
484 if (TREE_THIS_VOLATILE (field))
485 TREE_THIS_VOLATILE (ret) |= 1;
486 if (TREE_READONLY (field))
487 TREE_READONLY (ret) |= 1;
488 return ret;
489}
490
491/* Build tree nodes to access the field for VAR on the receiver side. */
492
493static tree
494build_receiver_ref (tree var, bool by_ref, omp_context *ctx)
495{
496 tree x, field = lookup_field (var, ctx);
497
498 /* If the receiver record type was remapped in the child function,
499 remap the field into the new record type. */
500 x = maybe_lookup_field (field, ctx);
501 if (x != NULL)
502 field = x;
503
504 x = build_simple_mem_ref (ctx->receiver_decl);
505 TREE_THIS_NOTRAP (x) = 1;
506 x = omp_build_component_ref (x, field);
507 if (by_ref)
508 {
509 x = build_simple_mem_ref (x);
510 TREE_THIS_NOTRAP (x) = 1;
511 }
512
513 return x;
514}
515
516/* Build tree nodes to access VAR in the scope outer to CTX. In the case
517 of a parallel, this is a component reference; for workshare constructs
518 this is some variable. */
519
520static tree
521build_outer_var_ref (tree var, omp_context *ctx,
522 enum omp_clause_code code = OMP_CLAUSE_ERROR)
523{
524 tree x;
525
526 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx)))
527 x = var;
528 else if (is_variable_sized (var))
529 {
530 x = TREE_OPERAND (DECL_VALUE_EXPR (var), 0);
531 x = build_outer_var_ref (x, ctx, code);
532 x = build_simple_mem_ref (x);
533 }
534 else if (is_taskreg_ctx (ctx))
535 {
536 bool by_ref = use_pointer_for_field (var, NULL);
537 x = build_receiver_ref (var, by_ref, ctx);
538 }
539 else if ((gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
540 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
541 || (code == OMP_CLAUSE_PRIVATE
542 && (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
543 || gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS
544 || gimple_code (ctx->stmt) == GIMPLE_OMP_SINGLE)))
545 {
546 /* #pragma omp simd isn't a worksharing construct, and can reference
547 even private vars in its linear etc. clauses.
548 Similarly for OMP_CLAUSE_PRIVATE with outer ref, that can refer
549 to private vars in all worksharing constructs. */
550 x = NULL_TREE;
551 if (ctx->outer && is_taskreg_ctx (ctx))
552 x = lookup_decl (var, ctx->outer);
553 else if (ctx->outer)
554 x = maybe_lookup_decl_in_outer_ctx (var, ctx);
555 if (x == NULL_TREE)
556 x = var;
557 }
558 else if (code == OMP_CLAUSE_LASTPRIVATE && is_taskloop_ctx (ctx))
559 {
560 gcc_assert (ctx->outer);
561 splay_tree_node n
562 = splay_tree_lookup (ctx->outer->field_map,
563 (splay_tree_key) &DECL_UID (var));
564 if (n == NULL)
565 {
566 if (is_global_var (maybe_lookup_decl_in_outer_ctx (var, ctx->outer)))
567 x = var;
568 else
569 x = lookup_decl (var, ctx->outer);
570 }
571 else
572 {
573 tree field = (tree) n->value;
574 /* If the receiver record type was remapped in the child function,
575 remap the field into the new record type. */
576 x = maybe_lookup_field (field, ctx->outer);
577 if (x != NULL)
578 field = x;
579
580 x = build_simple_mem_ref (ctx->outer->receiver_decl);
581 x = omp_build_component_ref (x, field);
582 if (use_pointer_for_field (var, ctx->outer))
583 x = build_simple_mem_ref (x);
584 }
585 }
586 else if (ctx->outer)
587 {
588 omp_context *outer = ctx->outer;
589 if (gimple_code (outer->stmt) == GIMPLE_OMP_GRID_BODY)
590 {
591 outer = outer->outer;
592 gcc_assert (outer
593 && gimple_code (outer->stmt) != GIMPLE_OMP_GRID_BODY);
594 }
595 x = lookup_decl (var, outer);
596 }
597 else if (omp_is_reference (var))
598 /* This can happen with orphaned constructs. If var is reference, it is
599 possible it is shared and as such valid. */
600 x = var;
601 else if (omp_member_access_dummy_var (var))
602 x = var;
603 else
604 gcc_unreachable ();
605
606 if (x == var)
607 {
608 tree t = omp_member_access_dummy_var (var);
609 if (t)
610 {
611 x = DECL_VALUE_EXPR (var);
612 tree o = maybe_lookup_decl_in_outer_ctx (t, ctx);
613 if (o != t)
614 x = unshare_and_remap (x, t, o);
615 else
616 x = unshare_expr (x);
617 }
618 }
619
620 if (omp_is_reference (var))
621 x = build_simple_mem_ref (x);
622
623 return x;
624}
625
626/* Build tree nodes to access the field for VAR on the sender side. */
627
628static tree
629build_sender_ref (splay_tree_key key, omp_context *ctx)
630{
631 tree field = lookup_sfield (key, ctx);
632 return omp_build_component_ref (ctx->sender_decl, field);
633}
634
635static tree
636build_sender_ref (tree var, omp_context *ctx)
637{
638 return build_sender_ref ((splay_tree_key) var, ctx);
639}
640
641/* Add a new field for VAR inside the structure CTX->SENDER_DECL. If
642 BASE_POINTERS_RESTRICT, declare the field with restrict. */
643
644static void
645install_var_field (tree var, bool by_ref, int mask, omp_context *ctx,
646 bool base_pointers_restrict = false)
647{
648 tree field, type, sfield = NULL_TREE;
649 splay_tree_key key = (splay_tree_key) var;
650
651 if ((mask & 8) != 0)
652 {
653 key = (splay_tree_key) &DECL_UID (var);
654 gcc_checking_assert (key != (splay_tree_key) var);
655 }
656 gcc_assert ((mask & 1) == 0
657 || !splay_tree_lookup (ctx->field_map, key));
658 gcc_assert ((mask & 2) == 0 || !ctx->sfield_map
659 || !splay_tree_lookup (ctx->sfield_map, key));
660 gcc_assert ((mask & 3) == 3
661 || !is_gimple_omp_oacc (ctx->stmt));
662
663 type = TREE_TYPE (var);
664 /* Prevent redeclaring the var in the split-off function with a restrict
665 pointer type. Note that we only clear type itself, restrict qualifiers in
666 the pointed-to type will be ignored by points-to analysis. */
667 if (POINTER_TYPE_P (type)
668 && TYPE_RESTRICT (type))
669 type = build_qualified_type (type, TYPE_QUALS (type) & ~TYPE_QUAL_RESTRICT);
670
671 if (mask & 4)
672 {
673 gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
674 type = build_pointer_type (build_pointer_type (type));
675 }
676 else if (by_ref)
677 {
678 type = build_pointer_type (type);
679 if (base_pointers_restrict)
680 type = build_qualified_type (type, TYPE_QUAL_RESTRICT);
681 }
682 else if ((mask & 3) == 1 && omp_is_reference (var))
683 type = TREE_TYPE (type);
684
685 field = build_decl (DECL_SOURCE_LOCATION (var),
686 FIELD_DECL, DECL_NAME (var), type);
687
688 /* Remember what variable this field was created for. This does have a
689 side effect of making dwarf2out ignore this member, so for helpful
690 debugging we clear it later in delete_omp_context. */
691 DECL_ABSTRACT_ORIGIN (field) = var;
692 if (type == TREE_TYPE (var))
693 {
694 SET_DECL_ALIGN (field, DECL_ALIGN (var));
695 DECL_USER_ALIGN (field) = DECL_USER_ALIGN (var);
696 TREE_THIS_VOLATILE (field) = TREE_THIS_VOLATILE (var);
697 }
698 else
699 SET_DECL_ALIGN (field, TYPE_ALIGN (type));
700
701 if ((mask & 3) == 3)
702 {
703 insert_field_into_struct (ctx->record_type, field);
704 if (ctx->srecord_type)
705 {
706 sfield = build_decl (DECL_SOURCE_LOCATION (var),
707 FIELD_DECL, DECL_NAME (var), type);
708 DECL_ABSTRACT_ORIGIN (sfield) = var;
709 SET_DECL_ALIGN (sfield, DECL_ALIGN (field));
710 DECL_USER_ALIGN (sfield) = DECL_USER_ALIGN (field);
711 TREE_THIS_VOLATILE (sfield) = TREE_THIS_VOLATILE (field);
712 insert_field_into_struct (ctx->srecord_type, sfield);
713 }
714 }
715 else
716 {
717 if (ctx->srecord_type == NULL_TREE)
718 {
719 tree t;
720
721 ctx->srecord_type = lang_hooks.types.make_type (RECORD_TYPE);
722 ctx->sfield_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
723 for (t = TYPE_FIELDS (ctx->record_type); t ; t = TREE_CHAIN (t))
724 {
725 sfield = build_decl (DECL_SOURCE_LOCATION (t),
726 FIELD_DECL, DECL_NAME (t), TREE_TYPE (t));
727 DECL_ABSTRACT_ORIGIN (sfield) = DECL_ABSTRACT_ORIGIN (t);
728 insert_field_into_struct (ctx->srecord_type, sfield);
729 splay_tree_insert (ctx->sfield_map,
730 (splay_tree_key) DECL_ABSTRACT_ORIGIN (t),
731 (splay_tree_value) sfield);
732 }
733 }
734 sfield = field;
735 insert_field_into_struct ((mask & 1) ? ctx->record_type
736 : ctx->srecord_type, field);
737 }
738
739 if (mask & 1)
740 splay_tree_insert (ctx->field_map, key, (splay_tree_value) field);
741 if ((mask & 2) && ctx->sfield_map)
742 splay_tree_insert (ctx->sfield_map, key, (splay_tree_value) sfield);
743}
744
745static tree
746install_var_local (tree var, omp_context *ctx)
747{
748 tree new_var = omp_copy_decl_1 (var, ctx);
749 insert_decl_map (&ctx->cb, var, new_var);
750 return new_var;
751}
752
753/* Adjust the replacement for DECL in CTX for the new context. This means
754 copying the DECL_VALUE_EXPR, and fixing up the type. */
755
756static void
757fixup_remapped_decl (tree decl, omp_context *ctx, bool private_debug)
758{
759 tree new_decl, size;
760
761 new_decl = lookup_decl (decl, ctx);
762
763 TREE_TYPE (new_decl) = remap_type (TREE_TYPE (decl), &ctx->cb);
764
765 if ((!TREE_CONSTANT (DECL_SIZE (new_decl)) || private_debug)
766 && DECL_HAS_VALUE_EXPR_P (decl))
767 {
768 tree ve = DECL_VALUE_EXPR (decl);
769 walk_tree (&ve, copy_tree_body_r, &ctx->cb, NULL);
770 SET_DECL_VALUE_EXPR (new_decl, ve);
771 DECL_HAS_VALUE_EXPR_P (new_decl) = 1;
772 }
773
774 if (!TREE_CONSTANT (DECL_SIZE (new_decl)))
775 {
776 size = remap_decl (DECL_SIZE (decl), &ctx->cb);
777 if (size == error_mark_node)
778 size = TYPE_SIZE (TREE_TYPE (new_decl));
779 DECL_SIZE (new_decl) = size;
780
781 size = remap_decl (DECL_SIZE_UNIT (decl), &ctx->cb);
782 if (size == error_mark_node)
783 size = TYPE_SIZE_UNIT (TREE_TYPE (new_decl));
784 DECL_SIZE_UNIT (new_decl) = size;
785 }
786}
787
788/* The callback for remap_decl. Search all containing contexts for a
789 mapping of the variable; this avoids having to duplicate the splay
790 tree ahead of time. We know a mapping doesn't already exist in the
791 given context. Create new mappings to implement default semantics. */
792
793static tree
794omp_copy_decl (tree var, copy_body_data *cb)
795{
796 omp_context *ctx = (omp_context *) cb;
797 tree new_var;
798
799 if (TREE_CODE (var) == LABEL_DECL)
800 {
801 if (FORCED_LABEL (var) || DECL_NONLOCAL (var))
802 return var;
803 new_var = create_artificial_label (DECL_SOURCE_LOCATION (var));
804 DECL_CONTEXT (new_var) = current_function_decl;
805 insert_decl_map (&ctx->cb, var, new_var);
806 return new_var;
807 }
808
809 while (!is_taskreg_ctx (ctx))
810 {
811 ctx = ctx->outer;
812 if (ctx == NULL)
813 return var;
814 new_var = maybe_lookup_decl (var, ctx);
815 if (new_var)
816 return new_var;
817 }
818
819 if (is_global_var (var) || decl_function_context (var) != ctx->cb.src_fn)
820 return var;
821
822 return error_mark_node;
823}
824
825/* Create a new context, with OUTER_CTX being the surrounding context. */
826
827static omp_context *
828new_omp_context (gimple *stmt, omp_context *outer_ctx)
829{
830 omp_context *ctx = XCNEW (omp_context);
831
832 splay_tree_insert (all_contexts, (splay_tree_key) stmt,
833 (splay_tree_value) ctx);
834 ctx->stmt = stmt;
835
836 if (outer_ctx)
837 {
838 ctx->outer = outer_ctx;
839 ctx->cb = outer_ctx->cb;
840 ctx->cb.block = NULL;
841 ctx->depth = outer_ctx->depth + 1;
842 }
843 else
844 {
845 ctx->cb.src_fn = current_function_decl;
846 ctx->cb.dst_fn = current_function_decl;
847 ctx->cb.src_node = cgraph_node::get (current_function_decl);
848 gcc_checking_assert (ctx->cb.src_node);
849 ctx->cb.dst_node = ctx->cb.src_node;
850 ctx->cb.src_cfun = cfun;
851 ctx->cb.copy_decl = omp_copy_decl;
852 ctx->cb.eh_lp_nr = 0;
853 ctx->cb.transform_call_graph_edges = CB_CGE_MOVE;
854 ctx->depth = 1;
855 }
856
857 ctx->cb.decl_map = new hash_map<tree, tree>;
858
859 return ctx;
860}
861
862static gimple_seq maybe_catch_exception (gimple_seq);
863
864/* Finalize task copyfn. */
865
866static void
867finalize_task_copyfn (gomp_task *task_stmt)
868{
869 struct function *child_cfun;
870 tree child_fn;
871 gimple_seq seq = NULL, new_seq;
872 gbind *bind;
873
874 child_fn = gimple_omp_task_copy_fn (task_stmt);
875 if (child_fn == NULL_TREE)
876 return;
877
878 child_cfun = DECL_STRUCT_FUNCTION (child_fn);
879 DECL_STRUCT_FUNCTION (child_fn)->curr_properties = cfun->curr_properties;
880
881 push_cfun (child_cfun);
882 bind = gimplify_body (child_fn, false);
883 gimple_seq_add_stmt (&seq, bind);
884 new_seq = maybe_catch_exception (seq);
885 if (new_seq != seq)
886 {
887 bind = gimple_build_bind (NULL, new_seq, NULL);
888 seq = NULL;
889 gimple_seq_add_stmt (&seq, bind);
890 }
891 gimple_set_body (child_fn, seq);
892 pop_cfun ();
893
894 /* Inform the callgraph about the new function. */
895 cgraph_node *node = cgraph_node::get_create (child_fn);
896 node->parallelized_function = 1;
897 cgraph_node::add_new_function (child_fn, false);
898}
899
900/* Destroy a omp_context data structures. Called through the splay tree
901 value delete callback. */
902
903static void
904delete_omp_context (splay_tree_value value)
905{
906 omp_context *ctx = (omp_context *) value;
907
908 delete ctx->cb.decl_map;
909
910 if (ctx->field_map)
911 splay_tree_delete (ctx->field_map);
912 if (ctx->sfield_map)
913 splay_tree_delete (ctx->sfield_map);
914
915 /* We hijacked DECL_ABSTRACT_ORIGIN earlier. We need to clear it before
916 it produces corrupt debug information. */
917 if (ctx->record_type)
918 {
919 tree t;
920 for (t = TYPE_FIELDS (ctx->record_type); t ; t = DECL_CHAIN (t))
921 DECL_ABSTRACT_ORIGIN (t) = NULL;
922 }
923 if (ctx->srecord_type)
924 {
925 tree t;
926 for (t = TYPE_FIELDS (ctx->srecord_type); t ; t = DECL_CHAIN (t))
927 DECL_ABSTRACT_ORIGIN (t) = NULL;
928 }
929
930 if (is_task_ctx (ctx))
931 finalize_task_copyfn (as_a <gomp_task *> (ctx->stmt));
932
933 XDELETE (ctx);
934}
935
936/* Fix up RECEIVER_DECL with a type that has been remapped to the child
937 context. */
938
939static void
940fixup_child_record_type (omp_context *ctx)
941{
942 tree f, type = ctx->record_type;
943
944 if (!ctx->receiver_decl)
945 return;
946 /* ??? It isn't sufficient to just call remap_type here, because
947 variably_modified_type_p doesn't work the way we expect for
948 record types. Testing each field for whether it needs remapping
949 and creating a new record by hand works, however. */
950 for (f = TYPE_FIELDS (type); f ; f = DECL_CHAIN (f))
951 if (variably_modified_type_p (TREE_TYPE (f), ctx->cb.src_fn))
952 break;
953 if (f)
954 {
955 tree name, new_fields = NULL;
956
957 type = lang_hooks.types.make_type (RECORD_TYPE);
958 name = DECL_NAME (TYPE_NAME (ctx->record_type));
959 name = build_decl (DECL_SOURCE_LOCATION (ctx->receiver_decl),
960 TYPE_DECL, name, type);
961 TYPE_NAME (type) = name;
962
963 for (f = TYPE_FIELDS (ctx->record_type); f ; f = DECL_CHAIN (f))
964 {
965 tree new_f = copy_node (f);
966 DECL_CONTEXT (new_f) = type;
967 TREE_TYPE (new_f) = remap_type (TREE_TYPE (f), &ctx->cb);
968 DECL_CHAIN (new_f) = new_fields;
969 walk_tree (&DECL_SIZE (new_f), copy_tree_body_r, &ctx->cb, NULL);
970 walk_tree (&DECL_SIZE_UNIT (new_f), copy_tree_body_r,
971 &ctx->cb, NULL);
972 walk_tree (&DECL_FIELD_OFFSET (new_f), copy_tree_body_r,
973 &ctx->cb, NULL);
974 new_fields = new_f;
975
976 /* Arrange to be able to look up the receiver field
977 given the sender field. */
978 splay_tree_insert (ctx->field_map, (splay_tree_key) f,
979 (splay_tree_value) new_f);
980 }
981 TYPE_FIELDS (type) = nreverse (new_fields);
982 layout_type (type);
983 }
984
985 /* In a target region we never modify any of the pointers in *.omp_data_i,
986 so attempt to help the optimizers. */
987 if (is_gimple_omp_offloaded (ctx->stmt))
988 type = build_qualified_type (type, TYPE_QUAL_CONST);
989
990 TREE_TYPE (ctx->receiver_decl)
991 = build_qualified_type (build_reference_type (type), TYPE_QUAL_RESTRICT);
992}
993
994/* Instantiate decls as necessary in CTX to satisfy the data sharing
995 specified by CLAUSES. If BASE_POINTERS_RESTRICT, install var field with
996 restrict. */
997
998static void
999scan_sharing_clauses (tree clauses, omp_context *ctx,
1000 bool base_pointers_restrict = false)
1001{
1002 tree c, decl;
1003 bool scan_array_reductions = false;
1004
1005 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1006 {
1007 bool by_ref;
1008
1009 switch (OMP_CLAUSE_CODE (c))
1010 {
1011 case OMP_CLAUSE_PRIVATE:
1012 decl = OMP_CLAUSE_DECL (c);
1013 if (OMP_CLAUSE_PRIVATE_OUTER_REF (c))
1014 goto do_private;
1015 else if (!is_variable_sized (decl))
1016 install_var_local (decl, ctx);
1017 break;
1018
1019 case OMP_CLAUSE_SHARED:
1020 decl = OMP_CLAUSE_DECL (c);
1021 /* Ignore shared directives in teams construct. */
1022 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
1023 {
1024 /* Global variables don't need to be copied,
1025 the receiver side will use them directly. */
1026 tree odecl = maybe_lookup_decl_in_outer_ctx (decl, ctx);
1027 if (is_global_var (odecl))
1028 break;
1029 insert_decl_map (&ctx->cb, decl, odecl);
1030 break;
1031 }
1032 gcc_assert (is_taskreg_ctx (ctx));
1033 gcc_assert (!COMPLETE_TYPE_P (TREE_TYPE (decl))
1034 || !is_variable_sized (decl));
1035 /* Global variables don't need to be copied,
1036 the receiver side will use them directly. */
1037 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1038 break;
1039 if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
1040 {
1041 use_pointer_for_field (decl, ctx);
1042 break;
1043 }
1044 by_ref = use_pointer_for_field (decl, NULL);
1045 if ((! TREE_READONLY (decl) && !OMP_CLAUSE_SHARED_READONLY (c))
1046 || TREE_ADDRESSABLE (decl)
1047 || by_ref
1048 || omp_is_reference (decl))
1049 {
1050 by_ref = use_pointer_for_field (decl, ctx);
1051 install_var_field (decl, by_ref, 3, ctx);
1052 install_var_local (decl, ctx);
1053 break;
1054 }
1055 /* We don't need to copy const scalar vars back. */
1056 OMP_CLAUSE_SET_CODE (c, OMP_CLAUSE_FIRSTPRIVATE);
1057 goto do_private;
1058
1059 case OMP_CLAUSE_REDUCTION:
1060 decl = OMP_CLAUSE_DECL (c);
1061 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1062 && TREE_CODE (decl) == MEM_REF)
1063 {
1064 tree t = TREE_OPERAND (decl, 0);
1065 if (TREE_CODE (t) == POINTER_PLUS_EXPR)
1066 t = TREE_OPERAND (t, 0);
1067 if (TREE_CODE (t) == INDIRECT_REF
1068 || TREE_CODE (t) == ADDR_EXPR)
1069 t = TREE_OPERAND (t, 0);
1070 install_var_local (t, ctx);
1071 if (is_taskreg_ctx (ctx)
1072 && !is_global_var (maybe_lookup_decl_in_outer_ctx (t, ctx))
1073 && !is_variable_sized (t))
1074 {
1075 by_ref = use_pointer_for_field (t, ctx);
1076 install_var_field (t, by_ref, 3, ctx);
1077 }
1078 break;
1079 }
1080 goto do_private;
1081
1082 case OMP_CLAUSE_LASTPRIVATE:
1083 /* Let the corresponding firstprivate clause create
1084 the variable. */
1085 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1086 break;
1087 /* FALLTHRU */
1088
1089 case OMP_CLAUSE_FIRSTPRIVATE:
1090 case OMP_CLAUSE_LINEAR:
1091 decl = OMP_CLAUSE_DECL (c);
1092 do_private:
1093 if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE
1094 || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IS_DEVICE_PTR)
1095 && is_gimple_omp_offloaded (ctx->stmt))
1096 {
1097 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE)
1098 install_var_field (decl, !omp_is_reference (decl), 3, ctx);
1099 else if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1100 install_var_field (decl, true, 3, ctx);
1101 else
1102 install_var_field (decl, false, 3, ctx);
1103 }
1104 if (is_variable_sized (decl))
1105 {
1106 if (is_task_ctx (ctx))
1107 install_var_field (decl, false, 1, ctx);
1108 break;
1109 }
1110 else if (is_taskreg_ctx (ctx))
1111 {
1112 bool global
1113 = is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx));
1114 by_ref = use_pointer_for_field (decl, NULL);
1115
1116 if (is_task_ctx (ctx)
1117 && (global || by_ref || omp_is_reference (decl)))
1118 {
1119 install_var_field (decl, false, 1, ctx);
1120 if (!global)
1121 install_var_field (decl, by_ref, 2, ctx);
1122 }
1123 else if (!global)
1124 install_var_field (decl, by_ref, 3, ctx);
1125 }
1126 install_var_local (decl, ctx);
1127 break;
1128
1129 case OMP_CLAUSE_USE_DEVICE_PTR:
1130 decl = OMP_CLAUSE_DECL (c);
1131 if (TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1132 install_var_field (decl, true, 3, ctx);
1133 else
1134 install_var_field (decl, false, 3, ctx);
1135 if (DECL_SIZE (decl)
1136 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
1137 {
1138 tree decl2 = DECL_VALUE_EXPR (decl);
1139 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
1140 decl2 = TREE_OPERAND (decl2, 0);
1141 gcc_assert (DECL_P (decl2));
1142 install_var_local (decl2, ctx);
1143 }
1144 install_var_local (decl, ctx);
1145 break;
1146
1147 case OMP_CLAUSE_IS_DEVICE_PTR:
1148 decl = OMP_CLAUSE_DECL (c);
1149 goto do_private;
1150
1151 case OMP_CLAUSE__LOOPTEMP_:
1152 gcc_assert (is_taskreg_ctx (ctx));
1153 decl = OMP_CLAUSE_DECL (c);
1154 install_var_field (decl, false, 3, ctx);
1155 install_var_local (decl, ctx);
1156 break;
1157
1158 case OMP_CLAUSE_COPYPRIVATE:
1159 case OMP_CLAUSE_COPYIN:
1160 decl = OMP_CLAUSE_DECL (c);
1161 by_ref = use_pointer_for_field (decl, NULL);
1162 install_var_field (decl, by_ref, 3, ctx);
1163 break;
1164
1165 case OMP_CLAUSE_FINAL:
1166 case OMP_CLAUSE_IF:
1167 case OMP_CLAUSE_NUM_THREADS:
1168 case OMP_CLAUSE_NUM_TEAMS:
1169 case OMP_CLAUSE_THREAD_LIMIT:
1170 case OMP_CLAUSE_DEVICE:
1171 case OMP_CLAUSE_SCHEDULE:
1172 case OMP_CLAUSE_DIST_SCHEDULE:
1173 case OMP_CLAUSE_DEPEND:
1174 case OMP_CLAUSE_PRIORITY:
1175 case OMP_CLAUSE_GRAINSIZE:
1176 case OMP_CLAUSE_NUM_TASKS:
1177 case OMP_CLAUSE_NUM_GANGS:
1178 case OMP_CLAUSE_NUM_WORKERS:
1179 case OMP_CLAUSE_VECTOR_LENGTH:
1180 if (ctx->outer)
1181 scan_omp_op (&OMP_CLAUSE_OPERAND (c, 0), ctx->outer);
1182 break;
1183
1184 case OMP_CLAUSE_TO:
1185 case OMP_CLAUSE_FROM:
1186 case OMP_CLAUSE_MAP:
1187 if (ctx->outer)
1188 scan_omp_op (&OMP_CLAUSE_SIZE (c), ctx->outer);
1189 decl = OMP_CLAUSE_DECL (c);
1190 /* Global variables with "omp declare target" attribute
1191 don't need to be copied, the receiver side will use them
1192 directly. However, global variables with "omp declare target link"
1193 attribute need to be copied. */
1194 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1195 && DECL_P (decl)
1196 && ((OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FIRSTPRIVATE_POINTER
1197 && (OMP_CLAUSE_MAP_KIND (c)
1198 != GOMP_MAP_FIRSTPRIVATE_REFERENCE))
1199 || TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1200 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
1201 && varpool_node::get_create (decl)->offloadable
1202 && !lookup_attribute ("omp declare target link",
1203 DECL_ATTRIBUTES (decl)))
1204 break;
1205 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1206 && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER)
1207 {
1208 /* Ignore GOMP_MAP_POINTER kind for arrays in regions that are
1209 not offloaded; there is nothing to map for those. */
1210 if (!is_gimple_omp_offloaded (ctx->stmt)
1211 && !POINTER_TYPE_P (TREE_TYPE (decl))
1212 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c))
1213 break;
1214 }
1215 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1216 && (OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER
1217 || (OMP_CLAUSE_MAP_KIND (c)
1218 == GOMP_MAP_FIRSTPRIVATE_REFERENCE)))
1219 {
1220 if (TREE_CODE (decl) == COMPONENT_REF
1221 || (TREE_CODE (decl) == INDIRECT_REF
1222 && TREE_CODE (TREE_OPERAND (decl, 0)) == COMPONENT_REF
1223 && (TREE_CODE (TREE_TYPE (TREE_OPERAND (decl, 0)))
1224 == REFERENCE_TYPE)))
1225 break;
1226 if (DECL_SIZE (decl)
1227 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
1228 {
1229 tree decl2 = DECL_VALUE_EXPR (decl);
1230 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
1231 decl2 = TREE_OPERAND (decl2, 0);
1232 gcc_assert (DECL_P (decl2));
1233 install_var_local (decl2, ctx);
1234 }
1235 install_var_local (decl, ctx);
1236 break;
1237 }
1238 if (DECL_P (decl))
1239 {
1240 if (DECL_SIZE (decl)
1241 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
1242 {
1243 tree decl2 = DECL_VALUE_EXPR (decl);
1244 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
1245 decl2 = TREE_OPERAND (decl2, 0);
1246 gcc_assert (DECL_P (decl2));
1247 install_var_field (decl2, true, 3, ctx);
1248 install_var_local (decl2, ctx);
1249 install_var_local (decl, ctx);
1250 }
1251 else
1252 {
1253 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_MAP
1254 && OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
1255 && !OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c)
1256 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1257 install_var_field (decl, true, 7, ctx);
1258 else
1259 install_var_field (decl, true, 3, ctx,
1260 base_pointers_restrict);
1261 if (is_gimple_omp_offloaded (ctx->stmt)
1262 && !OMP_CLAUSE_MAP_IN_REDUCTION (c))
1263 install_var_local (decl, ctx);
1264 }
1265 }
1266 else
1267 {
1268 tree base = get_base_address (decl);
1269 tree nc = OMP_CLAUSE_CHAIN (c);
1270 if (DECL_P (base)
1271 && nc != NULL_TREE
1272 && OMP_CLAUSE_CODE (nc) == OMP_CLAUSE_MAP
1273 && OMP_CLAUSE_DECL (nc) == base
1274 && OMP_CLAUSE_MAP_KIND (nc) == GOMP_MAP_POINTER
1275 && integer_zerop (OMP_CLAUSE_SIZE (nc)))
1276 {
1277 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (c) = 1;
1278 OMP_CLAUSE_MAP_ZERO_BIAS_ARRAY_SECTION (nc) = 1;
1279 }
1280 else
1281 {
1282 if (ctx->outer)
1283 {
1284 scan_omp_op (&OMP_CLAUSE_DECL (c), ctx->outer);
1285 decl = OMP_CLAUSE_DECL (c);
1286 }
1287 gcc_assert (!splay_tree_lookup (ctx->field_map,
1288 (splay_tree_key) decl));
1289 tree field
1290 = build_decl (OMP_CLAUSE_LOCATION (c),
1291 FIELD_DECL, NULL_TREE, ptr_type_node);
1292 SET_DECL_ALIGN (field, TYPE_ALIGN (ptr_type_node));
1293 insert_field_into_struct (ctx->record_type, field);
1294 splay_tree_insert (ctx->field_map, (splay_tree_key) decl,
1295 (splay_tree_value) field);
1296 }
1297 }
1298 break;
1299
1300 case OMP_CLAUSE__GRIDDIM_:
1301 if (ctx->outer)
1302 {
1303 scan_omp_op (&OMP_CLAUSE__GRIDDIM__SIZE (c), ctx->outer);
1304 scan_omp_op (&OMP_CLAUSE__GRIDDIM__GROUP (c), ctx->outer);
1305 }
1306 break;
1307
1308 case OMP_CLAUSE_NOWAIT:
1309 case OMP_CLAUSE_ORDERED:
1310 case OMP_CLAUSE_COLLAPSE:
1311 case OMP_CLAUSE_UNTIED:
1312 case OMP_CLAUSE_MERGEABLE:
1313 case OMP_CLAUSE_PROC_BIND:
1314 case OMP_CLAUSE_SAFELEN:
1315 case OMP_CLAUSE_SIMDLEN:
1316 case OMP_CLAUSE_THREADS:
1317 case OMP_CLAUSE_SIMD:
1318 case OMP_CLAUSE_NOGROUP:
1319 case OMP_CLAUSE_DEFAULTMAP:
1320 case OMP_CLAUSE_ASYNC:
1321 case OMP_CLAUSE_WAIT:
1322 case OMP_CLAUSE_GANG:
1323 case OMP_CLAUSE_WORKER:
1324 case OMP_CLAUSE_VECTOR:
1325 case OMP_CLAUSE_INDEPENDENT:
1326 case OMP_CLAUSE_AUTO:
1327 case OMP_CLAUSE_SEQ:
1328 case OMP_CLAUSE_TILE:
1329 case OMP_CLAUSE__SIMT_:
1330 case OMP_CLAUSE_DEFAULT:
1331 break;
1332
1333 case OMP_CLAUSE_ALIGNED:
1334 decl = OMP_CLAUSE_DECL (c);
1335 if (is_global_var (decl)
1336 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1337 install_var_local (decl, ctx);
1338 break;
1339
1340 case OMP_CLAUSE__CACHE_:
1341 default:
1342 gcc_unreachable ();
1343 }
1344 }
1345
1346 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1347 {
1348 switch (OMP_CLAUSE_CODE (c))
1349 {
1350 case OMP_CLAUSE_LASTPRIVATE:
1351 /* Let the corresponding firstprivate clause create
1352 the variable. */
1353 if (OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1354 scan_array_reductions = true;
1355 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
1356 break;
1357 /* FALLTHRU */
1358
1359 case OMP_CLAUSE_FIRSTPRIVATE:
1360 case OMP_CLAUSE_PRIVATE:
1361 case OMP_CLAUSE_LINEAR:
1362 case OMP_CLAUSE_IS_DEVICE_PTR:
1363 decl = OMP_CLAUSE_DECL (c);
1364 if (is_variable_sized (decl))
1365 {
1366 if ((OMP_CLAUSE_CODE (c) == OMP_CLAUSE_FIRSTPRIVATE
1367 || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_IS_DEVICE_PTR)
1368 && is_gimple_omp_offloaded (ctx->stmt))
1369 {
1370 tree decl2 = DECL_VALUE_EXPR (decl);
1371 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
1372 decl2 = TREE_OPERAND (decl2, 0);
1373 gcc_assert (DECL_P (decl2));
1374 install_var_local (decl2, ctx);
1375 fixup_remapped_decl (decl2, ctx, false);
1376 }
1377 install_var_local (decl, ctx);
1378 }
1379 fixup_remapped_decl (decl, ctx,
1380 OMP_CLAUSE_CODE (c) == OMP_CLAUSE_PRIVATE
1381 && OMP_CLAUSE_PRIVATE_DEBUG (c));
1382 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
1383 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
1384 scan_array_reductions = true;
1385 break;
1386
1387 case OMP_CLAUSE_REDUCTION:
1388 decl = OMP_CLAUSE_DECL (c);
1389 if (TREE_CODE (decl) != MEM_REF)
1390 {
1391 if (is_variable_sized (decl))
1392 install_var_local (decl, ctx);
1393 fixup_remapped_decl (decl, ctx, false);
1394 }
1395 if (OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1396 scan_array_reductions = true;
1397 break;
1398
1399 case OMP_CLAUSE_SHARED:
1400 /* Ignore shared directives in teams construct. */
1401 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
1402 break;
1403 decl = OMP_CLAUSE_DECL (c);
1404 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1405 break;
1406 if (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
1407 {
1408 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl,
1409 ctx->outer)))
1410 break;
1411 bool by_ref = use_pointer_for_field (decl, ctx);
1412 install_var_field (decl, by_ref, 11, ctx);
1413 break;
1414 }
1415 fixup_remapped_decl (decl, ctx, false);
1416 break;
1417
1418 case OMP_CLAUSE_MAP:
1419 if (!is_gimple_omp_offloaded (ctx->stmt))
1420 break;
1421 decl = OMP_CLAUSE_DECL (c);
1422 if (DECL_P (decl)
1423 && ((OMP_CLAUSE_MAP_KIND (c) != GOMP_MAP_FIRSTPRIVATE_POINTER
1424 && (OMP_CLAUSE_MAP_KIND (c)
1425 != GOMP_MAP_FIRSTPRIVATE_REFERENCE))
1426 || TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE)
1427 && is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx))
1428 && varpool_node::get_create (decl)->offloadable)
1429 break;
1430 if (DECL_P (decl))
1431 {
1432 if ((OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_POINTER
1433 || OMP_CLAUSE_MAP_KIND (c) == GOMP_MAP_FIRSTPRIVATE_POINTER)
1434 && TREE_CODE (TREE_TYPE (decl)) == ARRAY_TYPE
1435 && !COMPLETE_TYPE_P (TREE_TYPE (decl)))
1436 {
1437 tree new_decl = lookup_decl (decl, ctx);
1438 TREE_TYPE (new_decl)
1439 = remap_type (TREE_TYPE (decl), &ctx->cb);
1440 }
1441 else if (DECL_SIZE (decl)
1442 && TREE_CODE (DECL_SIZE (decl)) != INTEGER_CST)
1443 {
1444 tree decl2 = DECL_VALUE_EXPR (decl);
1445 gcc_assert (TREE_CODE (decl2) == INDIRECT_REF);
1446 decl2 = TREE_OPERAND (decl2, 0);
1447 gcc_assert (DECL_P (decl2));
1448 fixup_remapped_decl (decl2, ctx, false);
1449 fixup_remapped_decl (decl, ctx, true);
1450 }
1451 else
1452 fixup_remapped_decl (decl, ctx, false);
1453 }
1454 break;
1455
1456 case OMP_CLAUSE_COPYPRIVATE:
1457 case OMP_CLAUSE_COPYIN:
1458 case OMP_CLAUSE_DEFAULT:
1459 case OMP_CLAUSE_IF:
1460 case OMP_CLAUSE_NUM_THREADS:
1461 case OMP_CLAUSE_NUM_TEAMS:
1462 case OMP_CLAUSE_THREAD_LIMIT:
1463 case OMP_CLAUSE_DEVICE:
1464 case OMP_CLAUSE_SCHEDULE:
1465 case OMP_CLAUSE_DIST_SCHEDULE:
1466 case OMP_CLAUSE_NOWAIT:
1467 case OMP_CLAUSE_ORDERED:
1468 case OMP_CLAUSE_COLLAPSE:
1469 case OMP_CLAUSE_UNTIED:
1470 case OMP_CLAUSE_FINAL:
1471 case OMP_CLAUSE_MERGEABLE:
1472 case OMP_CLAUSE_PROC_BIND:
1473 case OMP_CLAUSE_SAFELEN:
1474 case OMP_CLAUSE_SIMDLEN:
1475 case OMP_CLAUSE_ALIGNED:
1476 case OMP_CLAUSE_DEPEND:
1477 case OMP_CLAUSE__LOOPTEMP_:
1478 case OMP_CLAUSE_TO:
1479 case OMP_CLAUSE_FROM:
1480 case OMP_CLAUSE_PRIORITY:
1481 case OMP_CLAUSE_GRAINSIZE:
1482 case OMP_CLAUSE_NUM_TASKS:
1483 case OMP_CLAUSE_THREADS:
1484 case OMP_CLAUSE_SIMD:
1485 case OMP_CLAUSE_NOGROUP:
1486 case OMP_CLAUSE_DEFAULTMAP:
1487 case OMP_CLAUSE_USE_DEVICE_PTR:
1488 case OMP_CLAUSE_ASYNC:
1489 case OMP_CLAUSE_WAIT:
1490 case OMP_CLAUSE_NUM_GANGS:
1491 case OMP_CLAUSE_NUM_WORKERS:
1492 case OMP_CLAUSE_VECTOR_LENGTH:
1493 case OMP_CLAUSE_GANG:
1494 case OMP_CLAUSE_WORKER:
1495 case OMP_CLAUSE_VECTOR:
1496 case OMP_CLAUSE_INDEPENDENT:
1497 case OMP_CLAUSE_AUTO:
1498 case OMP_CLAUSE_SEQ:
1499 case OMP_CLAUSE_TILE:
1500 case OMP_CLAUSE__GRIDDIM_:
1501 case OMP_CLAUSE__SIMT_:
1502 break;
1503
1504 case OMP_CLAUSE__CACHE_:
1505 default:
1506 gcc_unreachable ();
1507 }
1508 }
1509
1510 gcc_checking_assert (!scan_array_reductions
1511 || !is_gimple_omp_oacc (ctx->stmt));
1512 if (scan_array_reductions)
1513 {
1514 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
1515 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_REDUCTION
1516 && OMP_CLAUSE_REDUCTION_PLACEHOLDER (c))
1517 {
1518 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_INIT (c), ctx);
1519 scan_omp (&OMP_CLAUSE_REDUCTION_GIMPLE_MERGE (c), ctx);
1520 }
1521 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LASTPRIVATE
1522 && OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c))
1523 scan_omp (&OMP_CLAUSE_LASTPRIVATE_GIMPLE_SEQ (c), ctx);
1524 else if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_LINEAR
1525 && OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c))
1526 scan_omp (&OMP_CLAUSE_LINEAR_GIMPLE_SEQ (c), ctx);
1527 }
1528}
1529
1530/* Create a new name for omp child function. Returns an identifier. */
1531
1532static tree
1533create_omp_child_function_name (bool task_copy)
1534{
1535 return clone_function_name (current_function_decl,
1536 task_copy ? "_omp_cpyfn" : "_omp_fn");
1537}
1538
1539/* Return true if CTX may belong to offloaded code: either if current function
1540 is offloaded, or any enclosing context corresponds to a target region. */
1541
1542static bool
1543omp_maybe_offloaded_ctx (omp_context *ctx)
1544{
1545 if (cgraph_node::get (current_function_decl)->offloadable)
1546 return true;
1547 for (; ctx; ctx = ctx->outer)
1548 if (is_gimple_omp_offloaded (ctx->stmt))
1549 return true;
1550 return false;
1551}
1552
1553/* Build a decl for the omp child function. It'll not contain a body
1554 yet, just the bare decl. */
1555
1556static void
1557create_omp_child_function (omp_context *ctx, bool task_copy)
1558{
1559 tree decl, type, name, t;
1560
1561 name = create_omp_child_function_name (task_copy);
1562 if (task_copy)
1563 type = build_function_type_list (void_type_node, ptr_type_node,
1564 ptr_type_node, NULL_TREE);
1565 else
1566 type = build_function_type_list (void_type_node, ptr_type_node, NULL_TREE);
1567
1568 decl = build_decl (gimple_location (ctx->stmt), FUNCTION_DECL, name, type);
1569
1570 gcc_checking_assert (!is_gimple_omp_oacc (ctx->stmt)
1571 || !task_copy);
1572 if (!task_copy)
1573 ctx->cb.dst_fn = decl;
1574 else
1575 gimple_omp_task_set_copy_fn (ctx->stmt, decl);
1576
1577 TREE_STATIC (decl) = 1;
1578 TREE_USED (decl) = 1;
1579 DECL_ARTIFICIAL (decl) = 1;
1580 DECL_IGNORED_P (decl) = 0;
1581 TREE_PUBLIC (decl) = 0;
1582 DECL_UNINLINABLE (decl) = 1;
1583 DECL_EXTERNAL (decl) = 0;
1584 DECL_CONTEXT (decl) = NULL_TREE;
1585 DECL_INITIAL (decl) = make_node (BLOCK);
1586 BLOCK_SUPERCONTEXT (DECL_INITIAL (decl)) = decl;
1587 DECL_ATTRIBUTES (decl) = DECL_ATTRIBUTES (current_function_decl);
1588 DECL_FUNCTION_SPECIFIC_OPTIMIZATION (decl)
1589 = DECL_FUNCTION_SPECIFIC_OPTIMIZATION (current_function_decl);
1590 DECL_FUNCTION_SPECIFIC_TARGET (decl)
1591 = DECL_FUNCTION_SPECIFIC_TARGET (current_function_decl);
1592 DECL_FUNCTION_VERSIONED (decl)
1593 = DECL_FUNCTION_VERSIONED (current_function_decl);
1594
1595 if (omp_maybe_offloaded_ctx (ctx))
1596 {
1597 cgraph_node::get_create (decl)->offloadable = 1;
1598 if (ENABLE_OFFLOADING)
1599 g->have_offload = true;
1600 }
1601
1602 if (cgraph_node::get_create (decl)->offloadable
1603 && !lookup_attribute ("omp declare target",
1604 DECL_ATTRIBUTES (current_function_decl)))
1605 {
1606 const char *target_attr = (is_gimple_omp_offloaded (ctx->stmt)
1607 ? "omp target entrypoint"
1608 : "omp declare target");
1609 DECL_ATTRIBUTES (decl)
1610 = tree_cons (get_identifier (target_attr),
1611 NULL_TREE, DECL_ATTRIBUTES (decl));
1612 }
1613
1614 t = build_decl (DECL_SOURCE_LOCATION (decl),
1615 RESULT_DECL, NULL_TREE, void_type_node);
1616 DECL_ARTIFICIAL (t) = 1;
1617 DECL_IGNORED_P (t) = 1;
1618 DECL_CONTEXT (t) = decl;
1619 DECL_RESULT (decl) = t;
1620
1621 tree data_name = get_identifier (".omp_data_i");
1622 t = build_decl (DECL_SOURCE_LOCATION (decl), PARM_DECL, data_name,
1623 ptr_type_node);
1624 DECL_ARTIFICIAL (t) = 1;
1625 DECL_NAMELESS (t) = 1;
1626 DECL_ARG_TYPE (t) = ptr_type_node;
1627 DECL_CONTEXT (t) = current_function_decl;
1628 TREE_USED (t) = 1;
1629 TREE_READONLY (t) = 1;
1630 DECL_ARGUMENTS (decl) = t;
1631 if (!task_copy)
1632 ctx->receiver_decl = t;
1633 else
1634 {
1635 t = build_decl (DECL_SOURCE_LOCATION (decl),
1636 PARM_DECL, get_identifier (".omp_data_o"),
1637 ptr_type_node);
1638 DECL_ARTIFICIAL (t) = 1;
1639 DECL_NAMELESS (t) = 1;
1640 DECL_ARG_TYPE (t) = ptr_type_node;
1641 DECL_CONTEXT (t) = current_function_decl;
1642 TREE_USED (t) = 1;
1643 TREE_ADDRESSABLE (t) = 1;
1644 DECL_CHAIN (t) = DECL_ARGUMENTS (decl);
1645 DECL_ARGUMENTS (decl) = t;
1646 }
1647
1648 /* Allocate memory for the function structure. The call to
1649 allocate_struct_function clobbers CFUN, so we need to restore
1650 it afterward. */
1651 push_struct_function (decl);
1652 cfun->function_end_locus = gimple_location (ctx->stmt);
1653 init_tree_ssa (cfun);
1654 pop_cfun ();
1655}
1656
1657/* Callback for walk_gimple_seq. Check if combined parallel
1658 contains gimple_omp_for_combined_into_p OMP_FOR. */
1659
1660tree
1661omp_find_combined_for (gimple_stmt_iterator *gsi_p,
1662 bool *handled_ops_p,
1663 struct walk_stmt_info *wi)
1664{
1665 gimple *stmt = gsi_stmt (*gsi_p);
1666
1667 *handled_ops_p = true;
1668 switch (gimple_code (stmt))
1669 {
1670 WALK_SUBSTMTS;
1671
1672 case GIMPLE_OMP_FOR:
1673 if (gimple_omp_for_combined_into_p (stmt)
1674 && gimple_omp_for_kind (stmt)
1675 == *(const enum gf_mask *) (wi->info))
1676 {
1677 wi->info = stmt;
1678 return integer_zero_node;
1679 }
1680 break;
1681 default:
1682 break;
1683 }
1684 return NULL;
1685}
1686
1687/* Add _LOOPTEMP_ clauses on OpenMP parallel or task. */
1688
1689static void
1690add_taskreg_looptemp_clauses (enum gf_mask msk, gimple *stmt,
1691 omp_context *outer_ctx)
1692{
1693 struct walk_stmt_info wi;
1694
1695 memset (&wi, 0, sizeof (wi));
1696 wi.val_only = true;
1697 wi.info = (void *) &msk;
1698 walk_gimple_seq (gimple_omp_body (stmt), omp_find_combined_for, NULL, &wi);
1699 if (wi.info != (void *) &msk)
1700 {
1701 gomp_for *for_stmt = as_a <gomp_for *> ((gimple *) wi.info);
1702 struct omp_for_data fd;
1703 omp_extract_for_data (for_stmt, &fd, NULL);
1704 /* We need two temporaries with fd.loop.v type (istart/iend)
1705 and then (fd.collapse - 1) temporaries with the same
1706 type for count2 ... countN-1 vars if not constant. */
1707 size_t count = 2, i;
1708 tree type = fd.iter_type;
1709 if (fd.collapse > 1
1710 && TREE_CODE (fd.loop.n2) != INTEGER_CST)
1711 {
1712 count += fd.collapse - 1;
1713 /* If there are lastprivate clauses on the inner
1714 GIMPLE_OMP_FOR, add one more temporaries for the total number
1715 of iterations (product of count1 ... countN-1). */
1716 if (omp_find_clause (gimple_omp_for_clauses (for_stmt),
1717 OMP_CLAUSE_LASTPRIVATE))
1718 count++;
1719 else if (msk == GF_OMP_FOR_KIND_FOR
1720 && omp_find_clause (gimple_omp_parallel_clauses (stmt),
1721 OMP_CLAUSE_LASTPRIVATE))
1722 count++;
1723 }
1724 for (i = 0; i < count; i++)
1725 {
1726 tree temp = create_tmp_var (type);
1727 tree c = build_omp_clause (UNKNOWN_LOCATION, OMP_CLAUSE__LOOPTEMP_);
1728 insert_decl_map (&outer_ctx->cb, temp, temp);
1729 OMP_CLAUSE_DECL (c) = temp;
1730 OMP_CLAUSE_CHAIN (c) = gimple_omp_taskreg_clauses (stmt);
1731 gimple_omp_taskreg_set_clauses (stmt, c);
1732 }
1733 }
1734}
1735
1736/* Scan an OpenMP parallel directive. */
1737
1738static void
1739scan_omp_parallel (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1740{
1741 omp_context *ctx;
1742 tree name;
1743 gomp_parallel *stmt = as_a <gomp_parallel *> (gsi_stmt (*gsi));
1744
1745 /* Ignore parallel directives with empty bodies, unless there
1746 are copyin clauses. */
1747 if (optimize > 0
1748 && empty_body_p (gimple_omp_body (stmt))
1749 && omp_find_clause (gimple_omp_parallel_clauses (stmt),
1750 OMP_CLAUSE_COPYIN) == NULL)
1751 {
1752 gsi_replace (gsi, gimple_build_nop (), false);
1753 return;
1754 }
1755
1756 if (gimple_omp_parallel_combined_p (stmt))
1757 add_taskreg_looptemp_clauses (GF_OMP_FOR_KIND_FOR, stmt, outer_ctx);
1758
1759 ctx = new_omp_context (stmt, outer_ctx);
1760 taskreg_contexts.safe_push (ctx);
1761 if (taskreg_nesting_level > 1)
1762 ctx->is_nested = true;
1763 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1764 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1765 name = create_tmp_var_name (".omp_data_s");
1766 name = build_decl (gimple_location (stmt),
1767 TYPE_DECL, name, ctx->record_type);
1768 DECL_ARTIFICIAL (name) = 1;
1769 DECL_NAMELESS (name) = 1;
1770 TYPE_NAME (ctx->record_type) = name;
1771 TYPE_ARTIFICIAL (ctx->record_type) = 1;
1772 if (!gimple_omp_parallel_grid_phony (stmt))
1773 {
1774 create_omp_child_function (ctx, false);
1775 gimple_omp_parallel_set_child_fn (stmt, ctx->cb.dst_fn);
1776 }
1777
1778 scan_sharing_clauses (gimple_omp_parallel_clauses (stmt), ctx);
1779 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1780
1781 if (TYPE_FIELDS (ctx->record_type) == NULL)
1782 ctx->record_type = ctx->receiver_decl = NULL;
1783}
1784
1785/* Scan an OpenMP task directive. */
1786
1787static void
1788scan_omp_task (gimple_stmt_iterator *gsi, omp_context *outer_ctx)
1789{
1790 omp_context *ctx;
1791 tree name, t;
1792 gomp_task *stmt = as_a <gomp_task *> (gsi_stmt (*gsi));
1793
1794 /* Ignore task directives with empty bodies, unless they have depend
1795 clause. */
1796 if (optimize > 0
1797 && empty_body_p (gimple_omp_body (stmt))
1798 && !omp_find_clause (gimple_omp_task_clauses (stmt), OMP_CLAUSE_DEPEND))
1799 {
1800 gsi_replace (gsi, gimple_build_nop (), false);
1801 return;
1802 }
1803
1804 if (gimple_omp_task_taskloop_p (stmt))
1805 add_taskreg_looptemp_clauses (GF_OMP_FOR_KIND_TASKLOOP, stmt, outer_ctx);
1806
1807 ctx = new_omp_context (stmt, outer_ctx);
1808 taskreg_contexts.safe_push (ctx);
1809 if (taskreg_nesting_level > 1)
1810 ctx->is_nested = true;
1811 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
1812 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
1813 name = create_tmp_var_name (".omp_data_s");
1814 name = build_decl (gimple_location (stmt),
1815 TYPE_DECL, name, ctx->record_type);
1816 DECL_ARTIFICIAL (name) = 1;
1817 DECL_NAMELESS (name) = 1;
1818 TYPE_NAME (ctx->record_type) = name;
1819 TYPE_ARTIFICIAL (ctx->record_type) = 1;
1820 create_omp_child_function (ctx, false);
1821 gimple_omp_task_set_child_fn (stmt, ctx->cb.dst_fn);
1822
1823 scan_sharing_clauses (gimple_omp_task_clauses (stmt), ctx);
1824
1825 if (ctx->srecord_type)
1826 {
1827 name = create_tmp_var_name (".omp_data_a");
1828 name = build_decl (gimple_location (stmt),
1829 TYPE_DECL, name, ctx->srecord_type);
1830 DECL_ARTIFICIAL (name) = 1;
1831 DECL_NAMELESS (name) = 1;
1832 TYPE_NAME (ctx->srecord_type) = name;
1833 TYPE_ARTIFICIAL (ctx->srecord_type) = 1;
1834 create_omp_child_function (ctx, true);
1835 }
1836
1837 scan_omp (gimple_omp_body_ptr (stmt), ctx);
1838
1839 if (TYPE_FIELDS (ctx->record_type) == NULL)
1840 {
1841 ctx->record_type = ctx->receiver_decl = NULL;
1842 t = build_int_cst (long_integer_type_node, 0);
1843 gimple_omp_task_set_arg_size (stmt, t);
1844 t = build_int_cst (long_integer_type_node, 1);
1845 gimple_omp_task_set_arg_align (stmt, t);
1846 }
1847}
1848
1849/* Helper function for finish_taskreg_scan, called through walk_tree.
1850 If maybe_lookup_decl_in_outer_context returns non-NULL for some
1851 tree, replace it in the expression. */
1852
1853static tree
1854finish_taskreg_remap (tree *tp, int *walk_subtrees, void *data)
1855{
1856 if (VAR_P (*tp))
1857 {
1858 omp_context *ctx = (omp_context *) data;
1859 tree t = maybe_lookup_decl_in_outer_ctx (*tp, ctx);
1860 if (t != *tp)
1861 {
1862 if (DECL_HAS_VALUE_EXPR_P (t))
1863 t = unshare_expr (DECL_VALUE_EXPR (t));
1864 *tp = t;
1865 }
1866 *walk_subtrees = 0;
1867 }
1868 else if (IS_TYPE_OR_DECL_P (*tp))
1869 *walk_subtrees = 0;
1870 return NULL_TREE;
1871}
1872
1873/* If any decls have been made addressable during scan_omp,
1874 adjust their fields if needed, and layout record types
1875 of parallel/task constructs. */
1876
1877static void
1878finish_taskreg_scan (omp_context *ctx)
1879{
1880 if (ctx->record_type == NULL_TREE)
1881 return;
1882
1883 /* If any task_shared_vars were needed, verify all
1884 OMP_CLAUSE_SHARED clauses on GIMPLE_OMP_{PARALLEL,TASK}
1885 statements if use_pointer_for_field hasn't changed
1886 because of that. If it did, update field types now. */
1887 if (task_shared_vars)
1888 {
1889 tree c;
1890
1891 for (c = gimple_omp_taskreg_clauses (ctx->stmt);
1892 c; c = OMP_CLAUSE_CHAIN (c))
1893 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SHARED
1894 && !OMP_CLAUSE_SHARED_FIRSTPRIVATE (c))
1895 {
1896 tree decl = OMP_CLAUSE_DECL (c);
1897
1898 /* Global variables don't need to be copied,
1899 the receiver side will use them directly. */
1900 if (is_global_var (maybe_lookup_decl_in_outer_ctx (decl, ctx)))
1901 continue;
1902 if (!bitmap_bit_p (task_shared_vars, DECL_UID (decl))
1903 || !use_pointer_for_field (decl, ctx))
1904 continue;
1905 tree field = lookup_field (decl, ctx);
1906 if (TREE_CODE (TREE_TYPE (field)) == POINTER_TYPE
1907 && TREE_TYPE (TREE_TYPE (field)) == TREE_TYPE (decl))
1908 continue;
1909 TREE_TYPE (field) = build_pointer_type (TREE_TYPE (decl));
1910 TREE_THIS_VOLATILE (field) = 0;
1911 DECL_USER_ALIGN (field) = 0;
1912 SET_DECL_ALIGN (field, TYPE_ALIGN (TREE_TYPE (field)));
1913 if (TYPE_ALIGN (ctx->record_type) < DECL_ALIGN (field))
1914 SET_TYPE_ALIGN (ctx->record_type, DECL_ALIGN (field));
1915 if (ctx->srecord_type)
1916 {
1917 tree sfield = lookup_sfield (decl, ctx);
1918 TREE_TYPE (sfield) = TREE_TYPE (field);
1919 TREE_THIS_VOLATILE (sfield) = 0;
1920 DECL_USER_ALIGN (sfield) = 0;
1921 SET_DECL_ALIGN (sfield, DECL_ALIGN (field));
1922 if (TYPE_ALIGN (ctx->srecord_type) < DECL_ALIGN (sfield))
1923 SET_TYPE_ALIGN (ctx->srecord_type, DECL_ALIGN (sfield));
1924 }
1925 }
1926 }
1927
1928 if (gimple_code (ctx->stmt) == GIMPLE_OMP_PARALLEL)
1929 {
1930 layout_type (ctx->record_type);
1931 fixup_child_record_type (ctx);
1932 }
1933 else
1934 {
1935 location_t loc = gimple_location (ctx->stmt);
1936 tree *p, vla_fields = NULL_TREE, *q = &vla_fields;
1937 /* Move VLA fields to the end. */
1938 p = &TYPE_FIELDS (ctx->record_type);
1939 while (*p)
1940 if (!TYPE_SIZE_UNIT (TREE_TYPE (*p))
1941 || ! TREE_CONSTANT (TYPE_SIZE_UNIT (TREE_TYPE (*p))))
1942 {
1943 *q = *p;
1944 *p = TREE_CHAIN (*p);
1945 TREE_CHAIN (*q) = NULL_TREE;
1946 q = &TREE_CHAIN (*q);
1947 }
1948 else
1949 p = &DECL_CHAIN (*p);
1950 *p = vla_fields;
1951 if (gimple_omp_task_taskloop_p (ctx->stmt))
1952 {
1953 /* Move fields corresponding to first and second _looptemp_
1954 clause first. There are filled by GOMP_taskloop
1955 and thus need to be in specific positions. */
1956 tree c1 = gimple_omp_task_clauses (ctx->stmt);
1957 c1 = omp_find_clause (c1, OMP_CLAUSE__LOOPTEMP_);
1958 tree c2 = omp_find_clause (OMP_CLAUSE_CHAIN (c1),
1959 OMP_CLAUSE__LOOPTEMP_);
1960 tree f1 = lookup_field (OMP_CLAUSE_DECL (c1), ctx);
1961 tree f2 = lookup_field (OMP_CLAUSE_DECL (c2), ctx);
1962 p = &TYPE_FIELDS (ctx->record_type);
1963 while (*p)
1964 if (*p == f1 || *p == f2)
1965 *p = DECL_CHAIN (*p);
1966 else
1967 p = &DECL_CHAIN (*p);
1968 DECL_CHAIN (f1) = f2;
1969 DECL_CHAIN (f2) = TYPE_FIELDS (ctx->record_type);
1970 TYPE_FIELDS (ctx->record_type) = f1;
1971 if (ctx->srecord_type)
1972 {
1973 f1 = lookup_sfield (OMP_CLAUSE_DECL (c1), ctx);
1974 f2 = lookup_sfield (OMP_CLAUSE_DECL (c2), ctx);
1975 p = &TYPE_FIELDS (ctx->srecord_type);
1976 while (*p)
1977 if (*p == f1 || *p == f2)
1978 *p = DECL_CHAIN (*p);
1979 else
1980 p = &DECL_CHAIN (*p);
1981 DECL_CHAIN (f1) = f2;
1982 DECL_CHAIN (f2) = TYPE_FIELDS (ctx->srecord_type);
1983 TYPE_FIELDS (ctx->srecord_type) = f1;
1984 }
1985 }
1986 layout_type (ctx->record_type);
1987 fixup_child_record_type (ctx);
1988 if (ctx->srecord_type)
1989 layout_type (ctx->srecord_type);
1990 tree t = fold_convert_loc (loc, long_integer_type_node,
1991 TYPE_SIZE_UNIT (ctx->record_type));
1992 if (TREE_CODE (t) != INTEGER_CST)
1993 {
1994 t = unshare_expr (t);
1995 walk_tree (&t, finish_taskreg_remap, ctx, NULL);
1996 }
1997 gimple_omp_task_set_arg_size (ctx->stmt, t);
1998 t = build_int_cst (long_integer_type_node,
1999 TYPE_ALIGN_UNIT (ctx->record_type));
2000 gimple_omp_task_set_arg_align (ctx->stmt, t);
2001 }
2002}
2003
2004/* Find the enclosing offload context. */
2005
2006static omp_context *
2007enclosing_target_ctx (omp_context *ctx)
2008{
2009 for (; ctx; ctx = ctx->outer)
2010 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TARGET)
2011 break;
2012
2013 return ctx;
2014}
2015
2016/* Return true if ctx is part of an oacc kernels region. */
2017
2018static bool
2019ctx_in_oacc_kernels_region (omp_context *ctx)
2020{
2021 for (;ctx != NULL; ctx = ctx->outer)
2022 {
2023 gimple *stmt = ctx->stmt;
2024 if (gimple_code (stmt) == GIMPLE_OMP_TARGET
2025 && gimple_omp_target_kind (stmt) == GF_OMP_TARGET_KIND_OACC_KERNELS)
2026 return true;
2027 }
2028
2029 return false;
2030}
2031
2032/* Check the parallelism clauses inside a kernels regions.
2033 Until kernels handling moves to use the same loop indirection
2034 scheme as parallel, we need to do this checking early. */
2035
2036static unsigned
2037check_oacc_kernel_gwv (gomp_for *stmt, omp_context *ctx)
2038{
2039 bool checking = true;
2040 unsigned outer_mask = 0;
2041 unsigned this_mask = 0;
2042 bool has_seq = false, has_auto = false;
2043
2044 if (ctx->outer)
2045 outer_mask = check_oacc_kernel_gwv (NULL, ctx->outer);
2046 if (!stmt)
2047 {
2048 checking = false;
2049 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR)
2050 return outer_mask;
2051 stmt = as_a <gomp_for *> (ctx->stmt);
2052 }
2053
2054 for (tree c = gimple_omp_for_clauses (stmt); c; c = OMP_CLAUSE_CHAIN (c))
2055 {
2056 switch (OMP_CLAUSE_CODE (c))
2057 {
2058 case OMP_CLAUSE_GANG:
2059 this_mask |= GOMP_DIM_MASK (GOMP_DIM_GANG);
2060 break;
2061 case OMP_CLAUSE_WORKER:
2062 this_mask |= GOMP_DIM_MASK (GOMP_DIM_WORKER);
2063 break;
2064 case OMP_CLAUSE_VECTOR:
2065 this_mask |= GOMP_DIM_MASK (GOMP_DIM_VECTOR);
2066 break;
2067 case OMP_CLAUSE_SEQ:
2068 has_seq = true;
2069 break;
2070 case OMP_CLAUSE_AUTO:
2071 has_auto = true;
2072 break;
2073 default:
2074 break;
2075 }
2076 }
2077
2078 if (checking)
2079 {
2080 if (has_seq && (this_mask || has_auto))
2081 error_at (gimple_location (stmt), "%<seq%> overrides other"
2082 " OpenACC loop specifiers");
2083 else if (has_auto && this_mask)
2084 error_at (gimple_location (stmt), "%<auto%> conflicts with other"
2085 " OpenACC loop specifiers");
2086
2087 if (this_mask & outer_mask)
2088 error_at (gimple_location (stmt), "inner loop uses same"
2089 " OpenACC parallelism as containing loop");
2090 }
2091
2092 return outer_mask | this_mask;
2093}
2094
2095/* Scan a GIMPLE_OMP_FOR. */
2096
2097static omp_context *
2098scan_omp_for (gomp_for *stmt, omp_context *outer_ctx)
2099{
2100 omp_context *ctx;
2101 size_t i;
2102 tree clauses = gimple_omp_for_clauses (stmt);
2103
2104 ctx = new_omp_context (stmt, outer_ctx);
2105
2106 if (is_gimple_omp_oacc (stmt))
2107 {
2108 omp_context *tgt = enclosing_target_ctx (outer_ctx);
2109
2110 if (!tgt || is_oacc_parallel (tgt))
2111 for (tree c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
2112 {
2113 char const *check = NULL;
2114
2115 switch (OMP_CLAUSE_CODE (c))
2116 {
2117 case OMP_CLAUSE_GANG:
2118 check = "gang";
2119 break;
2120
2121 case OMP_CLAUSE_WORKER:
2122 check = "worker";
2123 break;
2124
2125 case OMP_CLAUSE_VECTOR:
2126 check = "vector";
2127 break;
2128
2129 default:
2130 break;
2131 }
2132
2133 if (check && OMP_CLAUSE_OPERAND (c, 0))
2134 error_at (gimple_location (stmt),
2135 "argument not permitted on %qs clause in"
2136 " OpenACC %<parallel%>", check);
2137 }
2138
2139 if (tgt && is_oacc_kernels (tgt))
2140 {
2141 /* Strip out reductions, as they are not handled yet. */
2142 tree *prev_ptr = &clauses;
2143
2144 while (tree probe = *prev_ptr)
2145 {
2146 tree *next_ptr = &OMP_CLAUSE_CHAIN (probe);
2147
2148 if (OMP_CLAUSE_CODE (probe) == OMP_CLAUSE_REDUCTION)
2149 *prev_ptr = *next_ptr;
2150 else
2151 prev_ptr = next_ptr;
2152 }
2153
2154 gimple_omp_for_set_clauses (stmt, clauses);
2155 check_oacc_kernel_gwv (stmt, ctx);
2156 }
2157 }
2158
2159 scan_sharing_clauses (clauses, ctx);
2160
2161 scan_omp (gimple_omp_for_pre_body_ptr (stmt), ctx);
2162 for (i = 0; i < gimple_omp_for_collapse (stmt); i++)
2163 {
2164 scan_omp_op (gimple_omp_for_index_ptr (stmt, i), ctx);
2165 scan_omp_op (gimple_omp_for_initial_ptr (stmt, i), ctx);
2166 scan_omp_op (gimple_omp_for_final_ptr (stmt, i), ctx);
2167 scan_omp_op (gimple_omp_for_incr_ptr (stmt, i), ctx);
2168 }
2169 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2170 return ctx;
2171}
2172
2173/* Duplicate #pragma omp simd, one for SIMT, another one for SIMD. */
2174
2175static void
2176scan_omp_simd (gimple_stmt_iterator *gsi, gomp_for *stmt,
2177 omp_context *outer_ctx)
2178{
2179 gbind *bind = gimple_build_bind (NULL, NULL, NULL);
2180 gsi_replace (gsi, bind, false);
2181 gimple_seq seq = NULL;
2182 gimple *g = gimple_build_call_internal (IFN_GOMP_USE_SIMT, 0);
2183 tree cond = create_tmp_var_raw (integer_type_node);
2184 DECL_CONTEXT (cond) = current_function_decl;
2185 DECL_SEEN_IN_BIND_EXPR_P (cond) = 1;
2186 gimple_bind_set_vars (bind, cond);
2187 gimple_call_set_lhs (g, cond);
2188 gimple_seq_add_stmt (&seq, g);
2189 tree lab1 = create_artificial_label (UNKNOWN_LOCATION);
2190 tree lab2 = create_artificial_label (UNKNOWN_LOCATION);
2191 tree lab3 = create_artificial_label (UNKNOWN_LOCATION);
2192 g = gimple_build_cond (NE_EXPR, cond, integer_zero_node, lab1, lab2);
2193 gimple_seq_add_stmt (&seq, g);
2194 g = gimple_build_label (lab1);
2195 gimple_seq_add_stmt (&seq, g);
2196 gimple_seq new_seq = copy_gimple_seq_and_replace_locals (stmt);
2197 gomp_for *new_stmt = as_a <gomp_for *> (new_seq);
2198 tree clause = build_omp_clause (gimple_location (stmt), OMP_CLAUSE__SIMT_);
2199 OMP_CLAUSE_CHAIN (clause) = gimple_omp_for_clauses (new_stmt);
2200 gimple_omp_for_set_clauses (new_stmt, clause);
2201 gimple_seq_add_stmt (&seq, new_stmt);
2202 g = gimple_build_goto (lab3);
2203 gimple_seq_add_stmt (&seq, g);
2204 g = gimple_build_label (lab2);
2205 gimple_seq_add_stmt (&seq, g);
2206 gimple_seq_add_stmt (&seq, stmt);
2207 g = gimple_build_label (lab3);
2208 gimple_seq_add_stmt (&seq, g);
2209 gimple_bind_set_body (bind, seq);
2210 update_stmt (bind);
2211 scan_omp_for (new_stmt, outer_ctx);
2212 scan_omp_for (stmt, outer_ctx)->simt_stmt = new_stmt;
2213}
2214
2215/* Scan an OpenMP sections directive. */
2216
2217static void
2218scan_omp_sections (gomp_sections *stmt, omp_context *outer_ctx)
2219{
2220 omp_context *ctx;
2221
2222 ctx = new_omp_context (stmt, outer_ctx);
2223 scan_sharing_clauses (gimple_omp_sections_clauses (stmt), ctx);
2224 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2225}
2226
2227/* Scan an OpenMP single directive. */
2228
2229static void
2230scan_omp_single (gomp_single *stmt, omp_context *outer_ctx)
2231{
2232 omp_context *ctx;
2233 tree name;
2234
2235 ctx = new_omp_context (stmt, outer_ctx);
2236 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2237 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2238 name = create_tmp_var_name (".omp_copy_s");
2239 name = build_decl (gimple_location (stmt),
2240 TYPE_DECL, name, ctx->record_type);
2241 TYPE_NAME (ctx->record_type) = name;
2242
2243 scan_sharing_clauses (gimple_omp_single_clauses (stmt), ctx);
2244 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2245
2246 if (TYPE_FIELDS (ctx->record_type) == NULL)
2247 ctx->record_type = NULL;
2248 else
2249 layout_type (ctx->record_type);
2250}
2251
2252/* Return true if the CLAUSES of an omp target guarantee that the base pointers
2253 used in the corresponding offloaded function are restrict. */
2254
2255static bool
2256omp_target_base_pointers_restrict_p (tree clauses)
2257{
2258 /* The analysis relies on the GOMP_MAP_FORCE_* mapping kinds, which are only
2259 used by OpenACC. */
2260 if (flag_openacc == 0)
2261 return false;
2262
2263 /* I. Basic example:
2264
2265 void foo (void)
2266 {
2267 unsigned int a[2], b[2];
2268
2269 #pragma acc kernels \
2270 copyout (a) \
2271 copyout (b)
2272 {
2273 a[0] = 0;
2274 b[0] = 1;
2275 }
2276 }
2277
2278 After gimplification, we have:
2279
2280 #pragma omp target oacc_kernels \
2281 map(force_from:a [len: 8]) \
2282 map(force_from:b [len: 8])
2283 {
2284 a[0] = 0;
2285 b[0] = 1;
2286 }
2287
2288 Because both mappings have the force prefix, we know that they will be
2289 allocated when calling the corresponding offloaded function, which means we
2290 can mark the base pointers for a and b in the offloaded function as
2291 restrict. */
2292
2293 tree c;
2294 for (c = clauses; c; c = OMP_CLAUSE_CHAIN (c))
2295 {
2296 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_MAP)
2297 return false;
2298
2299 switch (OMP_CLAUSE_MAP_KIND (c))
2300 {
2301 case GOMP_MAP_FORCE_ALLOC:
2302 case GOMP_MAP_FORCE_TO:
2303 case GOMP_MAP_FORCE_FROM:
2304 case GOMP_MAP_FORCE_TOFROM:
2305 break;
2306 default:
2307 return false;
2308 }
2309 }
2310
2311 return true;
2312}
2313
2314/* Scan a GIMPLE_OMP_TARGET. */
2315
2316static void
2317scan_omp_target (gomp_target *stmt, omp_context *outer_ctx)
2318{
2319 omp_context *ctx;
2320 tree name;
2321 bool offloaded = is_gimple_omp_offloaded (stmt);
2322 tree clauses = gimple_omp_target_clauses (stmt);
2323
2324 ctx = new_omp_context (stmt, outer_ctx);
2325 ctx->field_map = splay_tree_new (splay_tree_compare_pointers, 0, 0);
2326 ctx->record_type = lang_hooks.types.make_type (RECORD_TYPE);
2327 name = create_tmp_var_name (".omp_data_t");
2328 name = build_decl (gimple_location (stmt),
2329 TYPE_DECL, name, ctx->record_type);
2330 DECL_ARTIFICIAL (name) = 1;
2331 DECL_NAMELESS (name) = 1;
2332 TYPE_NAME (ctx->record_type) = name;
2333 TYPE_ARTIFICIAL (ctx->record_type) = 1;
2334
2335 bool base_pointers_restrict = false;
2336 if (offloaded)
2337 {
2338 create_omp_child_function (ctx, false);
2339 gimple_omp_target_set_child_fn (stmt, ctx->cb.dst_fn);
2340
2341 base_pointers_restrict = omp_target_base_pointers_restrict_p (clauses);
2342 if (base_pointers_restrict
2343 && dump_file && (dump_flags & TDF_DETAILS))
2344 fprintf (dump_file,
2345 "Base pointers in offloaded function are restrict\n");
2346 }
2347
2348 scan_sharing_clauses (clauses, ctx, base_pointers_restrict);
2349 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2350
2351 if (TYPE_FIELDS (ctx->record_type) == NULL)
2352 ctx->record_type = ctx->receiver_decl = NULL;
2353 else
2354 {
2355 TYPE_FIELDS (ctx->record_type)
2356 = nreverse (TYPE_FIELDS (ctx->record_type));
2357 if (flag_checking)
2358 {
2359 unsigned int align = DECL_ALIGN (TYPE_FIELDS (ctx->record_type));
2360 for (tree field = TYPE_FIELDS (ctx->record_type);
2361 field;
2362 field = DECL_CHAIN (field))
2363 gcc_assert (DECL_ALIGN (field) == align);
2364 }
2365 layout_type (ctx->record_type);
2366 if (offloaded)
2367 fixup_child_record_type (ctx);
2368 }
2369}
2370
2371/* Scan an OpenMP teams directive. */
2372
2373static void
2374scan_omp_teams (gomp_teams *stmt, omp_context *outer_ctx)
2375{
2376 omp_context *ctx = new_omp_context (stmt, outer_ctx);
2377 scan_sharing_clauses (gimple_omp_teams_clauses (stmt), ctx);
2378 scan_omp (gimple_omp_body_ptr (stmt), ctx);
2379}
2380
2381/* Check nesting restrictions. */
2382static bool
2383check_omp_nesting_restrictions (gimple *stmt, omp_context *ctx)
2384{
2385 tree c;
2386
2387 if (ctx && gimple_code (ctx->stmt) == GIMPLE_OMP_GRID_BODY)
2388 /* GRID_BODY is an artificial construct, nesting rules will be checked in
2389 the original copy of its contents. */
2390 return true;
2391
2392 /* No nesting of non-OpenACC STMT (that is, an OpenMP one, or a GOMP builtin)
2393 inside an OpenACC CTX. */
2394 if (!(is_gimple_omp (stmt)
2395 && is_gimple_omp_oacc (stmt))
2396 /* Except for atomic codes that we share with OpenMP. */
2397 && !(gimple_code (stmt) == GIMPLE_OMP_ATOMIC_LOAD
2398 || gimple_code (stmt) == GIMPLE_OMP_ATOMIC_STORE))
2399 {
2400 if (oacc_get_fn_attrib (cfun->decl) != NULL)
2401 {
2402 error_at (gimple_location (stmt),
2403 "non-OpenACC construct inside of OpenACC routine");
2404 return false;
2405 }
2406 else
2407 for (omp_context *octx = ctx; octx != NULL; octx = octx->outer)
2408 if (is_gimple_omp (octx->stmt)
2409 && is_gimple_omp_oacc (octx->stmt))
2410 {
2411 error_at (gimple_location (stmt),
2412 "non-OpenACC construct inside of OpenACC region");
2413 return false;
2414 }
2415 }
2416
2417 if (ctx != NULL)
2418 {
2419 if (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
2420 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
2421 {
2422 c = NULL_TREE;
2423 if (gimple_code (stmt) == GIMPLE_OMP_ORDERED)
2424 {
2425 c = gimple_omp_ordered_clauses (as_a <gomp_ordered *> (stmt));
2426 if (omp_find_clause (c, OMP_CLAUSE_SIMD))
2427 {
2428 if (omp_find_clause (c, OMP_CLAUSE_THREADS)
2429 && (ctx->outer == NULL
2430 || !gimple_omp_for_combined_into_p (ctx->stmt)
2431 || gimple_code (ctx->outer->stmt) != GIMPLE_OMP_FOR
2432 || (gimple_omp_for_kind (ctx->outer->stmt)
2433 != GF_OMP_FOR_KIND_FOR)
2434 || !gimple_omp_for_combined_p (ctx->outer->stmt)))
2435 {
2436 error_at (gimple_location (stmt),
2437 "%<ordered simd threads%> must be closely "
2438 "nested inside of %<for simd%> region");
2439 return false;
2440 }
2441 return true;
2442 }
2443 }
2444 error_at (gimple_location (stmt),
2445 "OpenMP constructs other than %<#pragma omp ordered simd%>"
2446 " may not be nested inside %<simd%> region");
2447 return false;
2448 }
2449 else if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
2450 {
2451 if ((gimple_code (stmt) != GIMPLE_OMP_FOR
2452 || ((gimple_omp_for_kind (stmt) != GF_OMP_FOR_KIND_DISTRIBUTE)
2453 && (gimple_omp_for_kind (stmt) != GF_OMP_FOR_KIND_GRID_LOOP)))
2454 && gimple_code (stmt) != GIMPLE_OMP_PARALLEL)
2455 {
2456 error_at (gimple_location (stmt),
2457 "only %<distribute%> or %<parallel%> regions are "
2458 "allowed to be strictly nested inside %<teams%> "
2459 "region");
2460 return false;
2461 }
2462 }
2463 }
2464 switch (gimple_code (stmt))
2465 {
2466 case GIMPLE_OMP_FOR:
2467 if (gimple_omp_for_kind (stmt) & GF_OMP_FOR_SIMD)
2468 return true;
2469 if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_DISTRIBUTE)
2470 {
2471 if (ctx != NULL && gimple_code (ctx->stmt) != GIMPLE_OMP_TEAMS)
2472 {
2473 error_at (gimple_location (stmt),
2474 "%<distribute%> region must be strictly nested "
2475 "inside %<teams%> construct");
2476 return false;
2477 }
2478 return true;
2479 }
2480 /* We split taskloop into task and nested taskloop in it. */
2481 if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_TASKLOOP)
2482 return true;
2483 if (gimple_omp_for_kind (stmt) == GF_OMP_FOR_KIND_OACC_LOOP)
2484 {
2485 bool ok = false;
2486
2487 if (ctx)
2488 switch (gimple_code (ctx->stmt))
2489 {
2490 case GIMPLE_OMP_FOR:
2491 ok = (gimple_omp_for_kind (ctx->stmt)
2492 == GF_OMP_FOR_KIND_OACC_LOOP);
2493 break;
2494
2495 case GIMPLE_OMP_TARGET:
2496 switch (gimple_omp_target_kind (ctx->stmt))
2497 {
2498 case GF_OMP_TARGET_KIND_OACC_PARALLEL:
2499 case GF_OMP_TARGET_KIND_OACC_KERNELS:
2500 ok = true;
2501 break;
2502
2503 default:
2504 break;
2505 }
2506
2507 default:
2508 break;
2509 }
2510 else if (oacc_get_fn_attrib (current_function_decl))
2511 ok = true;
2512 if (!ok)
2513 {
2514 error_at (gimple_location (stmt),
2515 "OpenACC loop directive must be associated with"
2516 " an OpenACC compute region");
2517 return false;
2518 }
2519 }
2520 /* FALLTHRU */
2521 case GIMPLE_CALL:
2522 if (is_gimple_call (stmt)
2523 && (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2524 == BUILT_IN_GOMP_CANCEL
2525 || DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2526 == BUILT_IN_GOMP_CANCELLATION_POINT))
2527 {
2528 const char *bad = NULL;
2529 const char *kind = NULL;
2530 const char *construct
2531 = (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2532 == BUILT_IN_GOMP_CANCEL)
2533 ? "#pragma omp cancel"
2534 : "#pragma omp cancellation point";
2535 if (ctx == NULL)
2536 {
2537 error_at (gimple_location (stmt), "orphaned %qs construct",
2538 construct);
2539 return false;
2540 }
2541 switch (tree_fits_shwi_p (gimple_call_arg (stmt, 0))
2542 ? tree_to_shwi (gimple_call_arg (stmt, 0))
2543 : 0)
2544 {
2545 case 1:
2546 if (gimple_code (ctx->stmt) != GIMPLE_OMP_PARALLEL)
2547 bad = "#pragma omp parallel";
2548 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2549 == BUILT_IN_GOMP_CANCEL
2550 && !integer_zerop (gimple_call_arg (stmt, 1)))
2551 ctx->cancellable = true;
2552 kind = "parallel";
2553 break;
2554 case 2:
2555 if (gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
2556 || gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR)
2557 bad = "#pragma omp for";
2558 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2559 == BUILT_IN_GOMP_CANCEL
2560 && !integer_zerop (gimple_call_arg (stmt, 1)))
2561 {
2562 ctx->cancellable = true;
2563 if (omp_find_clause (gimple_omp_for_clauses (ctx->stmt),
2564 OMP_CLAUSE_NOWAIT))
2565 warning_at (gimple_location (stmt), 0,
2566 "%<#pragma omp cancel for%> inside "
2567 "%<nowait%> for construct");
2568 if (omp_find_clause (gimple_omp_for_clauses (ctx->stmt),
2569 OMP_CLAUSE_ORDERED))
2570 warning_at (gimple_location (stmt), 0,
2571 "%<#pragma omp cancel for%> inside "
2572 "%<ordered%> for construct");
2573 }
2574 kind = "for";
2575 break;
2576 case 4:
2577 if (gimple_code (ctx->stmt) != GIMPLE_OMP_SECTIONS
2578 && gimple_code (ctx->stmt) != GIMPLE_OMP_SECTION)
2579 bad = "#pragma omp sections";
2580 else if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2581 == BUILT_IN_GOMP_CANCEL
2582 && !integer_zerop (gimple_call_arg (stmt, 1)))
2583 {
2584 if (gimple_code (ctx->stmt) == GIMPLE_OMP_SECTIONS)
2585 {
2586 ctx->cancellable = true;
2587 if (omp_find_clause (gimple_omp_sections_clauses
2588 (ctx->stmt),
2589 OMP_CLAUSE_NOWAIT))
2590 warning_at (gimple_location (stmt), 0,
2591 "%<#pragma omp cancel sections%> inside "
2592 "%<nowait%> sections construct");
2593 }
2594 else
2595 {
2596 gcc_assert (ctx->outer
2597 && gimple_code (ctx->outer->stmt)
2598 == GIMPLE_OMP_SECTIONS);
2599 ctx->outer->cancellable = true;
2600 if (omp_find_clause (gimple_omp_sections_clauses
2601 (ctx->outer->stmt),
2602 OMP_CLAUSE_NOWAIT))
2603 warning_at (gimple_location (stmt), 0,
2604 "%<#pragma omp cancel sections%> inside "
2605 "%<nowait%> sections construct");
2606 }
2607 }
2608 kind = "sections";
2609 break;
2610 case 8:
2611 if (gimple_code (ctx->stmt) != GIMPLE_OMP_TASK)
2612 bad = "#pragma omp task";
2613 else
2614 {
2615 for (omp_context *octx = ctx->outer;
2616 octx; octx = octx->outer)
2617 {
2618 switch (gimple_code (octx->stmt))
2619 {
2620 case GIMPLE_OMP_TASKGROUP:
2621 break;
2622 case GIMPLE_OMP_TARGET:
2623 if (gimple_omp_target_kind (octx->stmt)
2624 != GF_OMP_TARGET_KIND_REGION)
2625 continue;
2626 /* FALLTHRU */
2627 case GIMPLE_OMP_PARALLEL:
2628 case GIMPLE_OMP_TEAMS:
2629 error_at (gimple_location (stmt),
2630 "%<%s taskgroup%> construct not closely "
2631 "nested inside of %<taskgroup%> region",
2632 construct);
2633 return false;
2634 default:
2635 continue;
2636 }
2637 break;
2638 }
2639 ctx->cancellable = true;
2640 }
2641 kind = "taskgroup";
2642 break;
2643 default:
2644 error_at (gimple_location (stmt), "invalid arguments");
2645 return false;
2646 }
2647 if (bad)
2648 {
2649 error_at (gimple_location (stmt),
2650 "%<%s %s%> construct not closely nested inside of %qs",
2651 construct, kind, bad);
2652 return false;
2653 }
2654 }
2655 /* FALLTHRU */
2656 case GIMPLE_OMP_SECTIONS:
2657 case GIMPLE_OMP_SINGLE:
2658 for (; ctx != NULL; ctx = ctx->outer)
2659 switch (gimple_code (ctx->stmt))
2660 {
2661 case GIMPLE_OMP_FOR:
2662 if (gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR
2663 && gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_TASKLOOP)
2664 break;
2665 /* FALLTHRU */
2666 case GIMPLE_OMP_SECTIONS:
2667 case GIMPLE_OMP_SINGLE:
2668 case GIMPLE_OMP_ORDERED:
2669 case GIMPLE_OMP_MASTER:
2670 case GIMPLE_OMP_TASK:
2671 case GIMPLE_OMP_CRITICAL:
2672 if (is_gimple_call (stmt))
2673 {
2674 if (DECL_FUNCTION_CODE (gimple_call_fndecl (stmt))
2675 != BUILT_IN_GOMP_BARRIER)
2676 return true;
2677 error_at (gimple_location (stmt),
2678 "barrier region may not be closely nested inside "
2679 "of work-sharing, %<critical%>, %<ordered%>, "
2680 "%<master%>, explicit %<task%> or %<taskloop%> "
2681 "region");
2682 return false;
2683 }
2684 error_at (gimple_location (stmt),
2685 "work-sharing region may not be closely nested inside "
2686 "of work-sharing, %<critical%>, %<ordered%>, "
2687 "%<master%>, explicit %<task%> or %<taskloop%> region");
2688 return false;
2689 case GIMPLE_OMP_PARALLEL:
2690 case GIMPLE_OMP_TEAMS:
2691 return true;
2692 case GIMPLE_OMP_TARGET:
2693 if (gimple_omp_target_kind (ctx->stmt)
2694 == GF_OMP_TARGET_KIND_REGION)
2695 return true;
2696 break;
2697 default:
2698 break;
2699 }
2700 break;
2701 case GIMPLE_OMP_MASTER:
2702 for (; ctx != NULL; ctx = ctx->outer)
2703 switch (gimple_code (ctx->stmt))
2704 {
2705 case GIMPLE_OMP_FOR:
2706 if (gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_FOR
2707 && gimple_omp_for_kind (ctx->stmt) != GF_OMP_FOR_KIND_TASKLOOP)
2708 break;
2709 /* FALLTHRU */
2710 case GIMPLE_OMP_SECTIONS:
2711 case GIMPLE_OMP_SINGLE:
2712 case GIMPLE_OMP_TASK:
2713 error_at (gimple_location (stmt),
2714 "%<master%> region may not be closely nested inside "
2715 "of work-sharing, explicit %<task%> or %<taskloop%> "
2716 "region");
2717 return false;
2718 case GIMPLE_OMP_PARALLEL:
2719 case GIMPLE_OMP_TEAMS:
2720 return true;
2721 case GIMPLE_OMP_TARGET:
2722 if (gimple_omp_target_kind (ctx->stmt)
2723 == GF_OMP_TARGET_KIND_REGION)
2724 return true;
2725 break;
2726 default:
2727 break;
2728 }
2729 break;
2730 case GIMPLE_OMP_TASK:
2731 for (c = gimple_omp_task_clauses (stmt); c; c = OMP_CLAUSE_CHAIN (c))
2732 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
2733 && (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE
2734 || OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK))
2735 {
2736 enum omp_clause_depend_kind kind = OMP_CLAUSE_DEPEND_KIND (c);
2737 error_at (OMP_CLAUSE_LOCATION (c),
2738 "%<depend(%s)%> is only allowed in %<omp ordered%>",
2739 kind == OMP_CLAUSE_DEPEND_SOURCE ? "source" : "sink");
2740 return false;
2741 }
2742 break;
2743 case GIMPLE_OMP_ORDERED:
2744 for (c = gimple_omp_ordered_clauses (as_a <gomp_ordered *> (stmt));
2745 c; c = OMP_CLAUSE_CHAIN (c))
2746 {
2747 if (OMP_CLAUSE_CODE (c) != OMP_CLAUSE_DEPEND)
2748 {
2749 gcc_assert (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_THREADS
2750 || OMP_CLAUSE_CODE (c) == OMP_CLAUSE_SIMD);
2751 continue;
2752 }
2753 enum omp_clause_depend_kind kind = OMP_CLAUSE_DEPEND_KIND (c);
2754 if (kind == OMP_CLAUSE_DEPEND_SOURCE
2755 || kind == OMP_CLAUSE_DEPEND_SINK)
2756 {
2757 tree oclause;
2758 /* Look for containing ordered(N) loop. */
2759 if (ctx == NULL
2760 || gimple_code (ctx->stmt) != GIMPLE_OMP_FOR
2761 || (oclause
2762 = omp_find_clause (gimple_omp_for_clauses (ctx->stmt),
2763 OMP_CLAUSE_ORDERED)) == NULL_TREE)
2764 {
2765 error_at (OMP_CLAUSE_LOCATION (c),
2766 "%<ordered%> construct with %<depend%> clause "
2767 "must be closely nested inside an %<ordered%> "
2768 "loop");
2769 return false;
2770 }
2771 else if (OMP_CLAUSE_ORDERED_EXPR (oclause) == NULL_TREE)
2772 {
2773 error_at (OMP_CLAUSE_LOCATION (c),
2774 "%<ordered%> construct with %<depend%> clause "
2775 "must be closely nested inside a loop with "
2776 "%<ordered%> clause with a parameter");
2777 return false;
2778 }
2779 }
2780 else
2781 {
2782 error_at (OMP_CLAUSE_LOCATION (c),
2783 "invalid depend kind in omp %<ordered%> %<depend%>");
2784 return false;
2785 }
2786 }
2787 c = gimple_omp_ordered_clauses (as_a <gomp_ordered *> (stmt));
2788 if (omp_find_clause (c, OMP_CLAUSE_SIMD))
2789 {
2790 /* ordered simd must be closely nested inside of simd region,
2791 and simd region must not encounter constructs other than
2792 ordered simd, therefore ordered simd may be either orphaned,
2793 or ctx->stmt must be simd. The latter case is handled already
2794 earlier. */
2795 if (ctx != NULL)
2796 {
2797 error_at (gimple_location (stmt),
2798 "%<ordered%> %<simd%> must be closely nested inside "
2799 "%<simd%> region");
2800 return false;
2801 }
2802 }
2803 for (; ctx != NULL; ctx = ctx->outer)
2804 switch (gimple_code (ctx->stmt))
2805 {
2806 case GIMPLE_OMP_CRITICAL:
2807 case GIMPLE_OMP_TASK:
2808 case GIMPLE_OMP_ORDERED:
2809 ordered_in_taskloop:
2810 error_at (gimple_location (stmt),
2811 "%<ordered%> region may not be closely nested inside "
2812 "of %<critical%>, %<ordered%>, explicit %<task%> or "
2813 "%<taskloop%> region");
2814 return false;
2815 case GIMPLE_OMP_FOR:
2816 if (gimple_omp_for_kind (ctx->stmt) == GF_OMP_FOR_KIND_TASKLOOP)
2817 goto ordered_in_taskloop;
2818 if (omp_find_clause (gimple_omp_for_clauses (ctx->stmt),
2819 OMP_CLAUSE_ORDERED) == NULL)
2820 {
2821 error_at (gimple_location (stmt),
2822 "%<ordered%> region must be closely nested inside "
2823 "a loop region with an %<ordered%> clause");
2824 return false;
2825 }
2826 return true;
2827 case GIMPLE_OMP_TARGET:
2828 if (gimple_omp_target_kind (ctx->stmt)
2829 != GF_OMP_TARGET_KIND_REGION)
2830 break;
2831 /* FALLTHRU */
2832 case GIMPLE_OMP_PARALLEL:
2833 case GIMPLE_OMP_TEAMS:
2834 error_at (gimple_location (stmt),
2835 "%<ordered%> region must be closely nested inside "
2836 "a loop region with an %<ordered%> clause");
2837 return false;
2838 default:
2839 break;
2840 }
2841 break;
2842 case GIMPLE_OMP_CRITICAL:
2843 {
2844 tree this_stmt_name
2845 = gimple_omp_critical_name (as_a <gomp_critical *> (stmt));
2846 for (; ctx != NULL; ctx = ctx->outer)
2847 if (gomp_critical *other_crit
2848 = dyn_cast <gomp_critical *> (ctx->stmt))
2849 if (this_stmt_name == gimple_omp_critical_name (other_crit))
2850 {
2851 error_at (gimple_location (stmt),
2852 "%<critical%> region may not be nested inside "
2853 "a %<critical%> region with the same name");
2854 return false;
2855 }
2856 }
2857 break;
2858 case GIMPLE_OMP_TEAMS:
2859 if (ctx == NULL
2860 || gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET
2861 || gimple_omp_target_kind (ctx->stmt) != GF_OMP_TARGET_KIND_REGION)
2862 {
2863 error_at (gimple_location (stmt),
2864 "%<teams%> construct not closely nested inside of "
2865 "%<target%> construct");
2866 return false;
2867 }
2868 break;
2869 case GIMPLE_OMP_TARGET:
2870 for (c = gimple_omp_target_clauses (stmt); c; c = OMP_CLAUSE_CHAIN (c))
2871 if (OMP_CLAUSE_CODE (c) == OMP_CLAUSE_DEPEND
2872 && (OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SOURCE
2873 || OMP_CLAUSE_DEPEND_KIND (c) == OMP_CLAUSE_DEPEND_SINK))
2874 {
2875 enum omp_clause_depend_kind kind = OMP_CLAUSE_DEPEND_KIND (c);
2876 error_at (OMP_CLAUSE_LOCATION (c),
2877 "%<depend(%s)%> is only allowed in %<omp ordered%>",
2878 kind == OMP_CLAUSE_DEPEND_SOURCE ? "source" : "sink");
2879 return false;
2880 }
2881 if (is_gimple_omp_offloaded (stmt)
2882 && oacc_get_fn_attrib (cfun->decl) != NULL)
2883 {
2884 error_at (gimple_location (stmt),
2885 "OpenACC region inside of OpenACC routine, nested "
2886 "parallelism not supported yet");
2887 return false;
2888 }
2889 for (; ctx != NULL; ctx = ctx->outer)
2890 {
2891 if (gimple_code (ctx->stmt) != GIMPLE_OMP_TARGET)
2892 {
2893 if (is_gimple_omp (stmt)
2894 && is_gimple_omp_oacc (stmt)
2895 && is_gimple_omp (ctx->stmt))
2896 {
2897 error_at (gimple_location (stmt),
2898 "OpenACC construct inside of non-OpenACC region");
2899 return false;
2900 }
2901 continue;
2902 }
2903
2904 const char *stmt_name, *ctx_stmt_name;
2905 switch (gimple_omp_target_kind (stmt))
2906 {
2907 case GF_OMP_TARGET_KIND_REGION: stmt_name = "target"; break;
2908 case GF_OMP_TARGET_KIND_DATA: stmt_name = "target data"; break;
2909 case GF_OMP_TARGET_KIND_UPDATE: stmt_name = "target update"; break;
2910 case GF_OMP_TARGET_KIND_ENTER_DATA:
2911 stmt_name = "target enter data"; break;
2912 case GF_OMP_TARGET_KIND_EXIT_DATA:
2913 stmt_name = "target exit data"; break;
2914 case GF_OMP_TARGET_KIND_OACC_PARALLEL: stmt_name = "parallel"; break;
2915 case GF_OMP_TARGET_KIND_OACC_KERNELS: stmt_name = "kernels"; break;
2916 case GF_OMP_TARGET_KIND_OACC_DATA: stmt_name = "data"; break;
2917 case GF_OMP_TARGET_KIND_OACC_UPDATE: stmt_name = "update"; break;
2918 case GF_OMP_TARGET_KIND_OACC_ENTER_EXIT_DATA:
2919 stmt_name = "enter/exit data"; break;
2920 case GF_OMP_TARGET_KIND_OACC_HOST_DATA: stmt_name = "host_data";
2921 break;
2922 default: gcc_unreachable ();
2923 }
2924 switch (gimple_omp_target_kind (ctx->stmt))
2925 {
2926 case GF_OMP_TARGET_KIND_REGION: ctx_stmt_name = "target"; break;
2927 case GF_OMP_TARGET_KIND_DATA: ctx_stmt_name = "target data"; break;
2928 case GF_OMP_TARGET_KIND_OACC_PARALLEL:
2929 ctx_stmt_name = "parallel"; break;
2930 case GF_OMP_TARGET_KIND_OACC_KERNELS:
2931 ctx_stmt_name = "kernels"; break;
2932 case GF_OMP_TARGET_KIND_OACC_DATA: ctx_stmt_name = "data"; break;
2933 case GF_OMP_TARGET_KIND_OACC_HOST_DATA:
2934 ctx_stmt_name = "host_data"; break;
2935 default: gcc_unreachable ();
2936 }
2937
2938 /* OpenACC/OpenMP mismatch? */
2939 if (is_gimple_omp_oacc (stmt)
2940 != is_gimple_omp_oacc (ctx->stmt))
2941 {
2942 error_at (gimple_location (stmt),
2943 "%s %qs construct inside of %s %qs region",
2944 (is_gimple_omp_oacc (stmt)
2945 ? "OpenACC" : "OpenMP"), stmt_name,
2946 (is_gimple_omp_oacc (ctx->stmt)
2947 ? "OpenACC" : "OpenMP"), ctx_stmt_name);
2948 return false;
2949 }
2950 if (is_gimple_omp_offloaded (ctx->stmt))
2951 {
2952 /* No GIMPLE_OMP_TARGET inside offloaded OpenACC CTX. */
2953 if (is_gimple_omp_oacc (ctx->stmt))
2954 {
2955 error_at (gimple_location (stmt),
2956 "%qs construct inside of %qs region",
2957 stmt_name, ctx_stmt_name);
2958 return false;
2959 }
2960 else
2961 {
2962 warning_at (gimple_location (stmt), 0,
2963 "%qs construct inside of %qs region",
2964 stmt_name, ctx_stmt_name);
2965 }
2966 }
2967 }
2968 break;
2969 default:
2970 break;
2971 }
2972 return true;
2973}
2974
2975
2976/* Helper function scan_omp.
2977
2978 Callback for walk_tree or operators in walk_gimple_stmt used to
2979 scan for OMP directives in TP. */
2980
2981static tree
2982scan_omp_1_op (tree *tp, int *walk_subtrees, void *data)
2983{
2984 struct walk_stmt_info *wi = (struct walk_stmt_info *) data;
2985 omp_context *ctx = (omp_context *) wi->info;
2986 tree t = *tp;
2987
2988 switch (TREE_CODE (t))
2989 {
2990 case VAR_DECL:
2991 case PARM_DECL:
2992 case LABEL_DECL:
2993 case RESULT_DECL:
2994 if (ctx)
2995 {
2996 tree repl = remap_decl (t, &ctx->cb);
2997 gcc_checking_assert (TREE_CODE (repl) != ERROR_MARK);
2998 *tp = repl;
2999 }
3000 break;
3001
3002 default:
3003 if (ctx && TYPE_P (t))
3004 *tp = remap_type (t, &ctx->cb);
3005 else if (!DECL_P (t))
3006 {
3007 *walk_subtrees = 1;
3008 if (ctx)
3009 {
3010 tree tem = remap_type (TREE_TYPE (t), &ctx->cb);
3011 if (tem != TREE_TYPE (t))
3012 {
3013 if (TREE_CODE (t) == INTEGER_CST)
3014 *tp = wide_int_to_tree (tem, wi::to_wide (t));
3015 else
3016 TREE_TYPE (t) = tem;
3017 }
3018 }
3019 }
3020 break;
3021 }
3022
3023 return NULL_TREE;
3024}
3025
3026/* Return true if FNDECL is a setjmp or a longjmp. */
3027
3028static bool
3029setjmp_or_longjmp_p (const_tree fndecl)
3030{
3031 if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL
3032 && (DECL_FUNCTION_CODE (fndecl) == BUILT_IN_SETJMP
3033 || DECL_FUNCTION_CODE (fndecl) == BUILT_IN_LONGJMP))
3034 return true;
3035
3036 tree declname = DECL_NAME (fndecl);
3037 if (!declname)
3038 return false;
3039 const char *name = IDENTIFIER_POINTER (declname);
3040 return !strcmp (name, "setjmp") || !strcmp (name, "longjmp");
3041}
3042
3043
3044/* Helper function for scan_omp.
3045
3046 Callback for walk_gimple_stmt used to scan for OMP directives in
3047 the current statement in GSI. */
3048
3049static tree
3050scan_omp_1_stmt (gimple_stmt_iterator *gsi, bool *handled_ops_p,
3051 struct walk_stmt_info *wi)
3052{
3053 gimple *stmt = gsi_stmt (*gsi);
3054 omp_context *ctx = (omp_context *) wi->info;
3055
3056 if (gimple_has_location (stmt))
3057 input_location = gimple_location (stmt);
3058
3059 /* Check the nesting restrictions. */
3060 bool remove = false;
3061 if (is_gimple_omp (stmt))
3062 remove = !check_omp_nesting_restrictions (stmt, ctx);
3063 else if (is_gimple_call (stmt))
3064 {
3065 tree fndecl = gimple_call_fndecl (stmt);
3066 if (fndecl)
3067 {
3068 if (setjmp_or_longjmp_p (fndecl)
3069 && ctx
3070 && gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3071 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD)
3072 {
3073 remove = true;
3074 error_at (gimple_location (stmt),
3075 "setjmp/longjmp inside simd construct");
3076 }
3077 else if (DECL_BUILT_IN_CLASS (fndecl) == BUILT_IN_NORMAL)
3078 switch (DECL_FUNCTION_CODE (fndecl))
3079 {
3080 case BUILT_IN_GOMP_BARRIER:
3081 case BUILT_IN_GOMP_CANCEL:
3082 case BUILT_IN_GOMP_CANCELLATION_POINT:
3083 case BUILT_IN_GOMP_TASKYIELD:
3084 case BUILT_IN_GOMP_TASKWAIT:
3085 case BUILT_IN_GOMP_TASKGROUP_START:
3086 case BUILT_IN_GOMP_TASKGROUP_END:
3087 remove = !check_omp_nesting_restrictions (stmt, ctx);
3088 break;
3089 default:
3090 break;
3091 }
3092 }
3093 }
3094 if (remove)
3095 {
3096 stmt = gimple_build_nop ();
3097 gsi_replace (gsi, stmt, false);
3098 }
3099
3100 *handled_ops_p = true;
3101
3102 switch (gimple_code (stmt))
3103 {
3104 case GIMPLE_OMP_PARALLEL:
3105 taskreg_nesting_level++;
3106 scan_omp_parallel (gsi, ctx);
3107 taskreg_nesting_level--;
3108 break;
3109
3110 case GIMPLE_OMP_TASK:
3111 taskreg_nesting_level++;
3112 scan_omp_task (gsi, ctx);
3113 taskreg_nesting_level--;
3114 break;
3115
3116 case GIMPLE_OMP_FOR:
3117 if (((gimple_omp_for_kind (as_a <gomp_for *> (stmt))
3118 & GF_OMP_FOR_KIND_MASK) == GF_OMP_FOR_KIND_SIMD)
3119 && omp_maybe_offloaded_ctx (ctx)
3120 && omp_max_simt_vf ())
3121 scan_omp_simd (gsi, as_a <gomp_for *> (stmt), ctx);
3122 else
3123 scan_omp_for (as_a <gomp_for *> (stmt), ctx);
3124 break;
3125
3126 case GIMPLE_OMP_SECTIONS:
3127 scan_omp_sections (as_a <gomp_sections *> (stmt), ctx);
3128 break;
3129
3130 case GIMPLE_OMP_SINGLE:
3131 scan_omp_single (as_a <gomp_single *> (stmt), ctx);
3132 break;
3133
3134 case GIMPLE_OMP_SECTION:
3135 case GIMPLE_OMP_MASTER:
3136 case GIMPLE_OMP_TASKGROUP:
3137 case GIMPLE_OMP_ORDERED:
3138 case GIMPLE_OMP_CRITICAL:
3139 case GIMPLE_OMP_GRID_BODY:
3140 ctx = new_omp_context (stmt, ctx);
3141 scan_omp (gimple_omp_body_ptr (stmt), ctx);
3142 break;
3143
3144 case GIMPLE_OMP_TARGET:
3145 scan_omp_target (as_a <gomp_target *> (stmt), ctx);
3146 break;
3147
3148 case GIMPLE_OMP_TEAMS:
3149 scan_omp_teams (as_a <gomp_teams *> (stmt), ctx);
3150 break;
3151
3152 case GIMPLE_BIND:
3153 {
3154 tree var;
3155
3156 *handled_ops_p = false;
3157 if (ctx)
3158 for (var = gimple_bind_vars (as_a <gbind *> (stmt));
3159 var ;
3160 var = DECL_CHAIN (var))
3161 insert_decl_map (&ctx->cb, var, var);
3162 }
3163 break;
3164 default:
3165 *handled_ops_p = false;
3166 break;
3167 }
3168
3169 return NULL_TREE;
3170}
3171
3172
3173/* Scan all the statements starting at the current statement. CTX
3174 contains context information about the OMP directives and
3175 clauses found during the scan. */
3176
3177static void
3178scan_omp (gimple_seq *body_p, omp_context *ctx)
3179{
3180 location_t saved_location;
3181 struct walk_stmt_info wi;
3182
3183 memset (&wi, 0, sizeof (wi));
3184 wi.info = ctx;
3185 wi.want_locations = true;
3186
3187 saved_location = input_location;
3188 walk_gimple_seq_mod (body_p, scan_omp_1_stmt, scan_omp_1_op, &wi);
3189 input_location = saved_location;
3190}
3191
3192/* Re-gimplification and code generation routines. */
3193
3194/* If a context was created for STMT when it was scanned, return it. */
3195
3196static omp_context *
3197maybe_lookup_ctx (gimple *stmt)
3198{
3199 splay_tree_node n;
3200 n = splay_tree_lookup (all_contexts, (splay_tree_key) stmt);
3201 return n ? (omp_context *) n->value : NULL;
3202}
3203
3204
3205/* Find the mapping for DECL in CTX or the immediately enclosing
3206 context that has a mapping for DECL.
3207
3208 If CTX is a nested parallel directive, we may have to use the decl
3209 mappings created in CTX's parent context. Suppose that we have the
3210 following parallel nesting (variable UIDs showed for clarity):
3211
3212 iD.1562 = 0;
3213 #omp parallel shared(iD.1562) -> outer parallel
3214 iD.1562 = iD.1562 + 1;
3215
3216 #omp parallel shared (iD.1562) -> inner parallel
3217 iD.1562 = iD.1562 - 1;
3218
3219 Each parallel structure will create a distinct .omp_data_s structure
3220 for copying iD.1562 in/out of the directive:
3221
3222 outer parallel .omp_data_s.1.i -> iD.1562
3223 inner parallel .omp_data_s.2.i -> iD.1562
3224
3225 A shared variable mapping will produce a copy-out operation before
3226 the parallel directive and a copy-in operation after it. So, in
3227 this case we would have:
3228
3229 iD.1562 = 0;
3230 .omp_data_o.1.i = iD.1562;
3231 #omp parallel shared(iD.1562) -> outer parallel
3232 .omp_data_i.1 = &.omp_data_o.1
3233 .omp_data_i.1->i = .omp_data_i.1->i + 1;
3234
3235 .omp_data_o.2.i = iD.1562; -> **
3236 #omp parallel shared(iD.1562) -> inner parallel
3237 .omp_data_i.2 = &.omp_data_o.2
3238 .omp_data_i.2->i = .omp_data_i.2->i - 1;
3239
3240
3241 ** This is a problem. The symbol iD.1562 cannot be referenced
3242 inside the body of the outer parallel region. But since we are
3243 emitting this copy operation while expanding the inner parallel
3244 directive, we need to access the CTX structure of the outer
3245 parallel directive to get the correct mapping:
3246
3247 .omp_data_o.2.i = .omp_data_i.1->i
3248
3249 Since there may be other workshare or parallel directives enclosing
3250 the parallel directive, it may be necessary to walk up the context
3251 parent chain. This is not a problem in general because nested
3252 parallelism happens only rarely. */
3253
3254static tree
3255lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
3256{
3257 tree t;
3258 omp_context *up;
3259
3260 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
3261 t = maybe_lookup_decl (decl, up);
3262
3263 gcc_assert (!ctx->is_nested || t || is_global_var (decl));
3264
3265 return t ? t : decl;
3266}
3267
3268
3269/* Similar to lookup_decl_in_outer_ctx, but return DECL if not found
3270 in outer contexts. */
3271
3272static tree
3273maybe_lookup_decl_in_outer_ctx (tree decl, omp_context *ctx)
3274{
3275 tree t = NULL;
3276 omp_context *up;
3277
3278 for (up = ctx->outer, t = NULL; up && t == NULL; up = up->outer)
3279 t = maybe_lookup_decl (decl, up);
3280
3281 return t ? t : decl;
3282}
3283
3284
3285/* Construct the initialization value for reduction operation OP. */
3286
3287tree
3288omp_reduction_init_op (location_t loc, enum tree_code op, tree type)
3289{
3290 switch (op)
3291 {
3292 case PLUS_EXPR:
3293 case MINUS_EXPR:
3294 case BIT_IOR_EXPR:
3295 case BIT_XOR_EXPR:
3296 case TRUTH_OR_EXPR:
3297 case TRUTH_ORIF_EXPR:
3298 case TRUTH_XOR_EXPR:
3299 case NE_EXPR:
3300 return build_zero_cst (type);
3301
3302 case MULT_EXPR:
3303 case TRUTH_AND_EXPR:
3304 case TRUTH_ANDIF_EXPR:
3305 case EQ_EXPR:
3306 return fold_convert_loc (loc, type, integer_one_node);
3307
3308 case BIT_AND_EXPR:
3309 return fold_convert_loc (loc, type, integer_minus_one_node);
3310
3311 case MAX_EXPR:
3312 if (SCALAR_FLOAT_TYPE_P (type))
3313 {
3314 REAL_VALUE_TYPE max, min;
3315 if (HONOR_INFINITIES (type))
3316 {
3317 real_inf (&max);
3318 real_arithmetic (&min, NEGATE_EXPR, &max, NULL);
3319 }
3320 else
3321 real_maxval (&min, 1, TYPE_MODE (type));
3322 return build_real (type, min);
3323 }
3324 else if (POINTER_TYPE_P (type))
3325 {
3326 wide_int min
3327 = wi::min_value (TYPE_PRECISION (type), TYPE_SIGN (type));
3328 return wide_int_to_tree (type, min);
3329 }
3330 else
3331 {
3332 gcc_assert (INTEGRAL_TYPE_P (type));
3333 return TYPE_MIN_VALUE (type);
3334 }
3335
3336 case MIN_EXPR:
3337 if (SCALAR_FLOAT_TYPE_P (type))
3338 {
3339 REAL_VALUE_TYPE max;
3340 if (HONOR_INFINITIES (type))
3341 real_inf (&max);
3342 else
3343 real_maxval (&max, 0, TYPE_MODE (type));
3344 return build_real (type, max);
3345 }
3346 else if (POINTER_TYPE_P (type))
3347 {
3348 wide_int max
3349 = wi::max_value (TYPE_PRECISION (type), TYPE_SIGN (type));
3350 return wide_int_to_tree (type, max);
3351 }
3352 else
3353 {
3354 gcc_assert (INTEGRAL_TYPE_P (type));
3355 return TYPE_MAX_VALUE (type);
3356 }
3357
3358 default:
3359 gcc_unreachable ();
3360 }
3361}
3362
3363/* Construct the initialization value for reduction CLAUSE. */
3364
3365tree
3366omp_reduction_init (tree clause, tree type)
3367{
3368 return omp_reduction_init_op (OMP_CLAUSE_LOCATION (clause),
3369 OMP_CLAUSE_REDUCTION_CODE (clause), type);
3370}
3371
3372/* Return alignment to be assumed for var in CLAUSE, which should be
3373 OMP_CLAUSE_ALIGNED. */
3374
3375static tree
3376omp_clause_aligned_alignment (tree clause)
3377{
3378 if (OMP_CLAUSE_ALIGNED_ALIGNMENT (clause))
3379 return OMP_CLAUSE_ALIGNED_ALIGNMENT (clause);
3380
3381 /* Otherwise return implementation defined alignment. */
3382 unsigned int al = 1;
3383 opt_scalar_mode mode_iter;
3384 int vs = targetm.vectorize.autovectorize_vector_sizes ();
3385 if (vs)
3386 vs = 1 << floor_log2 (vs);
3387 static enum mode_class classes[]
3388 = { MODE_INT, MODE_VECTOR_INT, MODE_FLOAT, MODE_VECTOR_FLOAT };
3389 for (int i = 0; i < 4; i += 2)
3390 /* The for loop above dictates that we only walk through scalar classes. */
3391 FOR_EACH_MODE_IN_CLASS (mode_iter, classes[i])
3392 {
3393 scalar_mode mode = mode_iter.require ();
3394 machine_mode vmode = targetm.vectorize.preferred_simd_mode (mode);
3395 if (GET_MODE_CLASS (vmode) != classes[i + 1])
3396 continue;
3397 while (vs
3398 && GET_MODE_SIZE (vmode) < vs
3399 && GET_MODE_2XWIDER_MODE (vmode).exists ())
3400 vmode = GET_MODE_2XWIDER_MODE (vmode).require ();
3401
3402 tree type = lang_hooks.types.type_for_mode (mode, 1);
3403 if (type == NULL_TREE || TYPE_MODE (type) != mode)
3404 continue;
3405 type = build_vector_type (type, GET_MODE_SIZE (vmode)
3406 / GET_MODE_SIZE (mode));
3407 if (TYPE_MODE (type) != vmode)
3408 continue;
3409 if (TYPE_ALIGN_UNIT (type) > al)
3410 al = TYPE_ALIGN_UNIT (type);
3411 }
3412 return build_int_cst (integer_type_node, al);
3413}
3414
3415
3416/* This structure is part of the interface between lower_rec_simd_input_clauses
3417 and lower_rec_input_clauses. */
3418
3419struct omplow_simd_context {
3420 tree idx;
3421 tree lane;
3422 vec<tree, va_heap> simt_eargs;
3423 gimple_seq simt_dlist;
3424 int max_vf;
3425 bool is_simt;
3426};
3427
3428/* Helper function of lower_rec_input_clauses, used for #pragma omp simd
3429 privatization. */
3430
3431static bool
3432lower_rec_simd_input_clauses (tree new_var, omp_context *ctx,
3433 omplow_simd_context *sctx, tree &ivar, tree &lvar)
3434{
3435 if (sctx->max_vf == 0)
3436 {
3437 sctx->max_vf = sctx->is_simt ? omp_max_simt_vf () : omp_max_vf ();
3438 if (sctx->max_vf > 1)
3439 {
3440 tree c = omp_find_clause (gimple_omp_for_clauses (ctx->stmt),
3441 OMP_CLAUSE_SAFELEN);
3442 if (c
3443 && (TREE_CODE (OMP_CLAUSE_SAFELEN_EXPR (c)) != INTEGER_CST
3444 || tree_int_cst_sgn (OMP_CLAUSE_SAFELEN_EXPR (c)) != 1))
3445 sctx->max_vf = 1;
3446 else if (c && compare_tree_int (OMP_CLAUSE_SAFELEN_EXPR (c),
3447 sctx->max_vf) == -1)
3448 sctx->max_vf = tree_to_shwi (OMP_CLAUSE_SAFELEN_EXPR (c));
3449 }
3450 if (sctx->max_vf > 1)
3451 {
3452 sctx->idx = create_tmp_var (unsigned_type_node);
3453 sctx->lane = create_tmp_var (unsigned_type_node);
3454 }
3455 }
3456 if (sctx->max_vf == 1)
3457 return false;
3458
3459 if (sctx->is_simt)
3460 {
3461 if (is_gimple_reg (new_var))
3462 {
3463 ivar = lvar = new_var;
3464 return true;
3465 }
3466 tree type = TREE_TYPE (new_var), ptype = build_pointer_type (type);
3467 ivar = lvar = create_tmp_var (type);
3468 TREE_ADDRESSABLE (ivar) = 1;
3469 DECL_ATTRIBUTES (ivar) = tree_cons (get_identifier ("omp simt private"),
3470 NULL, DECL_ATTRIBUTES (ivar));
3471 sctx->simt_eargs.safe_push (build1 (ADDR_EXPR, ptype, ivar));
3472 tree clobber = build_constructor (type, NULL);
3473 TREE_THIS_VOLATILE (clobber) = 1;
3474 gimple *g = gimple_build_assign (ivar, clobber);
3475 gimple_seq_add_stmt (&sctx->simt_dlist, g);
3476 }
3477 else
3478 {
3479 tree atype = build_array_type_nelts (TREE_TYPE (new_var), sctx->max_vf);
3480 tree avar = create_tmp_var_raw (atype);
3481 if (TREE_ADDRESSABLE (new_var))
3482 TREE_ADDRESSABLE (avar) = 1;
3483 DECL_ATTRIBUTES (avar)
3484 = tree_cons (get_identifier ("omp simd array"), NULL,
3485 DECL_ATTRIBUTES (avar));
3486 gimple_add_tmp_var (avar);
3487 ivar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, sctx->idx,
3488 NULL_TREE, NULL_TREE);
3489 lvar = build4 (ARRAY_REF, TREE_TYPE (new_var), avar, sctx->lane,
3490 NULL_TREE, NULL_TREE);
3491 }
3492 if (DECL_P (new_var))
3493 {
3494 SET_DECL_VALUE_EXPR (new_var, lvar);
3495 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3496 }
3497 return true;
3498}
3499
3500/* Helper function of lower_rec_input_clauses. For a reference
3501 in simd reduction, add an underlying variable it will reference. */
3502
3503static void
3504handle_simd_reference (location_t loc, tree new_vard, gimple_seq *ilist)
3505{
3506 tree z = TYPE_SIZE_UNIT (TREE_TYPE (TREE_TYPE (new_vard)));
3507 if (TREE_CONSTANT (z))
3508 {
3509 z = create_tmp_var_raw (TREE_TYPE (TREE_TYPE (new_vard)),
3510 get_name (new_vard));
3511 gimple_add_tmp_var (z);
3512 TREE_ADDRESSABLE (z) = 1;
3513 z = build_fold_addr_expr_loc (loc, z);
3514 gimplify_assign (new_vard, z, ilist);
3515 }
3516}
3517
3518/* Generate code to implement the input clauses, FIRSTPRIVATE and COPYIN,
3519 from the receiver (aka child) side and initializers for REFERENCE_TYPE
3520 private variables. Initialization statements go in ILIST, while calls
3521 to destructors go in DLIST. */
3522
3523static void
3524lower_rec_input_clauses (tree clauses, gimple_seq *ilist, gimple_seq *dlist,
3525 omp_context *ctx, struct omp_for_data *fd)
3526{
3527 tree c, dtor, copyin_seq, x, ptr;
3528 bool copyin_by_ref = false;
3529 bool lastprivate_firstprivate = false;
3530 bool reduction_omp_orig_ref = false;
3531 int pass;
3532 bool is_simd = (gimple_code (ctx->stmt) == GIMPLE_OMP_FOR
3533 && gimple_omp_for_kind (ctx->stmt) & GF_OMP_FOR_SIMD);
3534 omplow_simd_context sctx = omplow_simd_context ();
3535 tree simt_lane = NULL_TREE, simtrec = NULL_TREE;
3536 tree ivar = NULL_TREE, lvar = NULL_TREE, uid = NULL_TREE;
3537 gimple_seq llist[3] = { };
3538
3539 copyin_seq = NULL;
3540 sctx.is_simt = is_simd && omp_find_clause (clauses, OMP_CLAUSE__SIMT_);
3541
3542 /* Set max_vf=1 (which will later enforce safelen=1) in simd loops
3543 with data sharing clauses referencing variable sized vars. That
3544 is unnecessarily hard to support and very unlikely to result in
3545 vectorized code anyway. */
3546 if (is_simd)
3547 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3548 switch (OMP_CLAUSE_CODE (c))
3549 {
3550 case OMP_CLAUSE_LINEAR:
3551 if (OMP_CLAUSE_LINEAR_ARRAY (c))
3552 sctx.max_vf = 1;
3553 /* FALLTHRU */
3554 case OMP_CLAUSE_PRIVATE:
3555 case OMP_CLAUSE_FIRSTPRIVATE:
3556 case OMP_CLAUSE_LASTPRIVATE:
3557 if (is_variable_sized (OMP_CLAUSE_DECL (c)))
3558 sctx.max_vf = 1;
3559 break;
3560 case OMP_CLAUSE_REDUCTION:
3561 if (TREE_CODE (OMP_CLAUSE_DECL (c)) == MEM_REF
3562 || is_variable_sized (OMP_CLAUSE_DECL (c)))
3563 sctx.max_vf = 1;
3564 break;
3565 default:
3566 continue;
3567 }
3568
3569 /* Add a placeholder for simduid. */
3570 if (sctx.is_simt && sctx.max_vf != 1)
3571 sctx.simt_eargs.safe_push (NULL_TREE);
3572
3573 /* Do all the fixed sized types in the first pass, and the variable sized
3574 types in the second pass. This makes sure that the scalar arguments to
3575 the variable sized types are processed before we use them in the
3576 variable sized operations. */
3577 for (pass = 0; pass < 2; ++pass)
3578 {
3579 for (c = clauses; c ; c = OMP_CLAUSE_CHAIN (c))
3580 {
3581 enum omp_clause_code c_kind = OMP_CLAUSE_CODE (c);
3582 tree var, new_var;
3583 bool by_ref;
3584 location_t clause_loc = OMP_CLAUSE_LOCATION (c);
3585
3586 switch (c_kind)
3587 {
3588 case OMP_CLAUSE_PRIVATE:
3589 if (OMP_CLAUSE_PRIVATE_DEBUG (c))
3590 continue;
3591 break;
3592 case OMP_CLAUSE_SHARED:
3593 /* Ignore shared directives in teams construct. */
3594 if (gimple_code (ctx->stmt) == GIMPLE_OMP_TEAMS)
3595 continue;
3596 if (maybe_lookup_decl (OMP_CLAUSE_DECL (c), ctx) == NULL)
3597 {
3598 gcc_assert (OMP_CLAUSE_SHARED_FIRSTPRIVATE (c)
3599 || is_global_var (OMP_CLAUSE_DECL (c)));
3600 continue;
3601 }
3602 case OMP_CLAUSE_FIRSTPRIVATE:
3603 case OMP_CLAUSE_COPYIN:
3604 break;
3605 case OMP_CLAUSE_LINEAR:
3606 if (!OMP_CLAUSE_LINEAR_NO_COPYIN (c)
3607 && !OMP_CLAUSE_LINEAR_NO_COPYOUT (c))
3608 lastprivate_firstprivate = true;
3609 break;
3610 case OMP_CLAUSE_REDUCTION:
3611 if (OMP_CLAUSE_REDUCTION_OMP_ORIG_REF (c))
3612 reduction_omp_orig_ref = true;
3613 break;
3614 case OMP_CLAUSE__LOOPTEMP_:
3615 /* Handle _looptemp_ clauses only on parallel/task. */
3616 if (fd)
3617 continue;
3618 break;
3619 case OMP_CLAUSE_LASTPRIVATE:
3620 if (OMP_CLAUSE_LASTPRIVATE_FIRSTPRIVATE (c))
3621 {
3622 lastprivate_firstprivate = true;
3623 if (pass != 0 || is_taskloop_ctx (ctx))
3624 continue;
3625 }
3626 /* Even without corresponding firstprivate, if
3627 decl is Fortran allocatable, it needs outer var
3628 reference. */
3629 else if (pass == 0
3630 && lang_hooks.decls.omp_private_outer_ref
3631 (OMP_CLAUSE_DECL (c)))
3632 lastprivate_firstprivate = true;
3633 break;
3634 case OMP_CLAUSE_ALIGNED:
3635 if (pass == 0)
3636 continue;
3637 var = OMP_CLAUSE_DECL (c);
3638 if (TREE_CODE (TREE_TYPE (var)) == POINTER_TYPE
3639 && !is_global_var (var))
3640 {
3641 new_var = maybe_lookup_decl (var, ctx);
3642 if (new_var == NULL_TREE)
3643 new_var = maybe_lookup_decl_in_outer_ctx (var, ctx);
3644 x = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
3645 tree alarg = omp_clause_aligned_alignment (c);
3646 alarg = fold_convert_loc (clause_loc, size_type_node, alarg);
3647 x = build_call_expr_loc (clause_loc, x, 2, new_var, alarg);
3648 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var), x);
3649 x = build2 (MODIFY_EXPR, TREE_TYPE (new_var), new_var, x);
3650 gimplify_and_add (x, ilist);
3651 }
3652 else if (TREE_CODE (TREE_TYPE (var)) == ARRAY_TYPE
3653 && is_global_var (var))
3654 {
3655 tree ptype = build_pointer_type (TREE_TYPE (var)), t, t2;
3656 new_var = lookup_decl (var, ctx);
3657 t = maybe_lookup_decl_in_outer_ctx (var, ctx);
3658 t = build_fold_addr_expr_loc (clause_loc, t);
3659 t2 = builtin_decl_explicit (BUILT_IN_ASSUME_ALIGNED);
3660 tree alarg = omp_clause_aligned_alignment (c);
3661 alarg = fold_convert_loc (clause_loc, size_type_node, alarg);
3662 t = build_call_expr_loc (clause_loc, t2, 2, t, alarg);
3663 t = fold_convert_loc (clause_loc, ptype, t);
3664 x = create_tmp_var (ptype);
3665 t = build2 (MODIFY_EXPR, ptype, x, t);
3666 gimplify_and_add (t, ilist);
3667 t = build_simple_mem_ref_loc (clause_loc, x);
3668 SET_DECL_VALUE_EXPR (new_var, t);
3669 DECL_HAS_VALUE_EXPR_P (new_var) = 1;
3670 }
3671 continue;
3672 default:
3673 continue;
3674 }
3675
3676 new_var = var = OMP_CLAUSE_DECL (c);
3677 if (c_kind == OMP_CLAUSE_REDUCTION && TREE_CODE (var) == MEM_REF)
3678 {
3679 var = TREE_OPERAND (var, 0);
3680 if (TREE_CODE (var) == POINTER_PLUS_EXPR)
3681 var = TREE_OPERAND (var, 0);
3682 if (TREE_CODE (var) == INDIRECT_REF
3683 || TREE_CODE (var) == ADDR_EXPR)
3684 var = TREE_OPERAND (var, 0);
3685 if (is_variable_sized (var))
3686 {
3687 gcc_assert (DECL_HAS_VALUE_EXPR_P (var));
3688 var = DECL_VALUE_EXPR (var);
3689 gcc_assert (TREE_CODE (var) == INDIRECT_REF);
3690 var = TREE_OPERAND (var, 0);
3691 gcc_assert (DECL_P (var));
3692 }
3693 new_var = var;
3694 }
3695 if (c_kind != OMP_CLAUSE_COPYIN)
3696 new_var = lookup_decl (var, ctx);
3697
3698 if (c_kind == OMP_CLAUSE_SHARED || c_kind == OMP_CLAUSE_COPYIN)
3699 {
3700 if (pass != 0)
3701 continue;
3702 }
3703 /* C/C++ array section reductions. */
3704 else if (c_kind == OMP_CLAUSE_REDUCTION
3705 && var != OMP_CLAUSE_DECL (c))
3706 {
3707 if (pass == 0)
3708 continue;
3709
3710 tree bias = TREE_OPERAND (OMP_CLAUSE_DECL (c), 1);
3711 tree orig_var = TREE_OPERAND (OMP_CLAUSE_DECL (c), 0);
3712 if (TREE_CODE (orig_var) == POINTER_PLUS_EXPR)
3713 {
3714 tree b = TREE_OPERAND (orig_var, 1);
3715 b = maybe_lookup_decl (b, ctx);
3716 if (b == NULL)
3717 {
3718 b = TREE_OPERAND (orig_var, 1);
3719 b = maybe_lookup_decl_in_outer_ctx (b, ctx);
3720 }
3721 if (integer_zerop (bias))
3722 bias = b;
3723 else
3724 {
3725 bias = fold_convert_loc (clause_loc,
3726 TREE_TYPE (b), bias);
3727 bias = fold_build2_loc (clause_loc, PLUS_EXPR,
3728 TREE_TYPE (b), b, bias);
3729 }
3730 orig_var = TREE_OPERAND (orig_var, 0);
3731 }
3732 if (TREE_CODE (orig_var) == INDIRECT_REF
3733 || TREE_CODE (orig_var) == ADDR_EXPR)
3734 orig_var = TREE_OPERAND (orig_var, 0);
3735 tree d = OMP_CLAUSE_DECL (c);
3736 tree type = TREE_TYPE (d);
3737 gcc_assert (TREE_CODE (type) == ARRAY_TYPE);
3738 tree v = TYPE_MAX_VALUE (TYPE_DOMAIN (type));
3739 const char *name = get_name (orig_var);
3740 if (TREE_CONSTANT (v))
3741 {
3742 x = create_tmp_var_raw (type, name);
3743 gimple_add_tmp_var (x);
3744 TREE_ADDRESSABLE (x) = 1;
3745 x = build_fold_addr_expr_loc (clause_loc, x);
3746 }
3747 else
3748 {
3749 tree atmp
3750 = builtin_decl_explicit (BUILT_IN_ALLOCA_WITH_ALIGN);
3751 tree t = maybe_lookup_decl (v, ctx);
3752 if (t)
3753 v = t;
3754 else
3755 v = maybe_lookup_decl_in_outer_ctx (v, ctx);
3756 gimplify_expr (&v, ilist, NULL, is_gimple_val, fb_rvalue);
3757 t = fold_build2_loc (clause_loc, PLUS_EXPR,
3758 TREE_TYPE (v), v,
3759 build_int_cst (TREE_TYPE (v), 1));
3760 t = fold_build2_loc (clause_loc, MULT_EXPR,
3761 TREE_TYPE (v), t,
3762 TYPE_SIZE_UNIT (TREE_TYPE (type)));
3763 tree al = size_int (TYPE_ALIGN (TREE_TYPE (type)));
3764 x = build_call_expr_loc (clause_loc, atmp, 2, t, al);
3765 }
3766
3767 tree ptype = build_pointer_type (TREE_TYPE (type));
3768 x = fold_convert_loc (clause_loc, ptype, x);
3769 tree y = create_tmp_var (ptype, name);
3770 gimplify_assign (y, x, ilist);
3771 x = y;
3772 tree yb = y;
3773
3774 if (!integer_zerop (bias))
3775 {
3776 bias = fold_convert_loc (clause_loc, pointer_sized_int_node,
3777 bias);
3778 yb = fold_convert_loc (clause_loc, pointer_sized_int_node,
3779 x);
3780 yb = fold_build2_loc (clause_loc, MINUS_EXPR,
3781 pointer_sized_int_node, yb, bias);
3782 x = fold_convert_loc (clause_loc, TREE_TYPE (x), yb);
3783 yb = create_tmp_var (ptype, name);
3784 gimplify_assign (yb, x, ilist);
3785 x = yb;
3786 }
3787
3788 d = TREE_OPERAND (d, 0);
3789 if (TREE_CODE (d) == POINTER_PLUS_EXPR)
3790 d = TREE_OPERAND (d, 0);
3791 if (TREE_CODE (d) == ADDR_EXPR)
3792 {
3793 if (orig_var != var)
3794 {
3795 gcc_assert (is_variable_sized (orig_var));
3796 x = fold_convert_loc (clause_loc, TREE_TYPE (new_var),
3797 x);
3798 gimplify_assign (new_var, x, ilist);
3799 tree new_orig_var = lookup_decl (orig_var, ctx);
3800 tree t = build_fold_indirect_ref (new_var);
3801 DECL_IGNORED_P (new_var) = 0;
3802