1/*
2 * kmp_tasking.cpp -- OpenMP 3.0 tasking support.
3 */
4
5//===----------------------------------------------------------------------===//
6//
7// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8// See https://llvm.org/LICENSE.txt for license information.
9// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10//
11//===----------------------------------------------------------------------===//
12
13#include "kmp.h"
14#include "kmp_i18n.h"
15#include "kmp_itt.h"
16#include "kmp_stats.h"
17#include "kmp_wait_release.h"
18#include "kmp_taskdeps.h"
19
20#if OMPT_SUPPORT
21#include "ompt-specific.h"
22#endif
23
24#if ENABLE_LIBOMPTARGET
25static void (*tgt_target_nowait_query)(void **);
26
27void __kmp_init_target_task() {
28 *(void **)(&tgt_target_nowait_query) = KMP_DLSYM("__tgt_target_nowait_query");
29}
30#endif
31
32/* forward declaration */
33static void __kmp_enable_tasking(kmp_task_team_t *task_team,
34 kmp_info_t *this_thr);
35static void __kmp_alloc_task_deque(kmp_info_t *thread,
36 kmp_thread_data_t *thread_data);
37static int __kmp_realloc_task_threads_data(kmp_info_t *thread,
38 kmp_task_team_t *task_team);
39static void __kmp_bottom_half_finish_proxy(kmp_int32 gtid, kmp_task_t *ptask);
40#if OMPX_TASKGRAPH
41static kmp_tdg_info_t *__kmp_find_tdg(kmp_int32 tdg_id);
42int __kmp_taskloop_task(int gtid, void *ptask);
43#endif
44
45#ifdef BUILD_TIED_TASK_STACK
46
47// __kmp_trace_task_stack: print the tied tasks from the task stack in order
48// from top do bottom
49//
50// gtid: global thread identifier for thread containing stack
51// thread_data: thread data for task team thread containing stack
52// threshold: value above which the trace statement triggers
53// location: string identifying call site of this function (for trace)
54static void __kmp_trace_task_stack(kmp_int32 gtid,
55 kmp_thread_data_t *thread_data,
56 int threshold, char *location) {
57 kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
58 kmp_taskdata_t **stack_top = task_stack->ts_top;
59 kmp_int32 entries = task_stack->ts_entries;
60 kmp_taskdata_t *tied_task;
61
62 KA_TRACE(
63 threshold,
64 ("__kmp_trace_task_stack(start): location = %s, gtid = %d, entries = %d, "
65 "first_block = %p, stack_top = %p \n",
66 location, gtid, entries, task_stack->ts_first_block, stack_top));
67
68 KMP_DEBUG_ASSERT(stack_top != NULL);
69 KMP_DEBUG_ASSERT(entries > 0);
70
71 while (entries != 0) {
72 KMP_DEBUG_ASSERT(stack_top != &task_stack->ts_first_block.sb_block[0]);
73 // fix up ts_top if we need to pop from previous block
74 if (entries & TASK_STACK_INDEX_MASK == 0) {
75 kmp_stack_block_t *stack_block = (kmp_stack_block_t *)(stack_top);
76
77 stack_block = stack_block->sb_prev;
78 stack_top = &stack_block->sb_block[TASK_STACK_BLOCK_SIZE];
79 }
80
81 // finish bookkeeping
82 stack_top--;
83 entries--;
84
85 tied_task = *stack_top;
86
87 KMP_DEBUG_ASSERT(tied_task != NULL);
88 KMP_DEBUG_ASSERT(tied_task->td_flags.tasktype == TASK_TIED);
89
90 KA_TRACE(threshold,
91 ("__kmp_trace_task_stack(%s): gtid=%d, entry=%d, "
92 "stack_top=%p, tied_task=%p\n",
93 location, gtid, entries, stack_top, tied_task));
94 }
95 KMP_DEBUG_ASSERT(stack_top == &task_stack->ts_first_block.sb_block[0]);
96
97 KA_TRACE(threshold,
98 ("__kmp_trace_task_stack(exit): location = %s, gtid = %d\n",
99 location, gtid));
100}
101
102// __kmp_init_task_stack: initialize the task stack for the first time
103// after a thread_data structure is created.
104// It should not be necessary to do this again (assuming the stack works).
105//
106// gtid: global thread identifier of calling thread
107// thread_data: thread data for task team thread containing stack
108static void __kmp_init_task_stack(kmp_int32 gtid,
109 kmp_thread_data_t *thread_data) {
110 kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
111 kmp_stack_block_t *first_block;
112
113 // set up the first block of the stack
114 first_block = &task_stack->ts_first_block;
115 task_stack->ts_top = (kmp_taskdata_t **)first_block;
116 memset((void *)first_block, '\0',
117 TASK_STACK_BLOCK_SIZE * sizeof(kmp_taskdata_t *));
118
119 // initialize the stack to be empty
120 task_stack->ts_entries = TASK_STACK_EMPTY;
121 first_block->sb_next = NULL;
122 first_block->sb_prev = NULL;
123}
124
125// __kmp_free_task_stack: free the task stack when thread_data is destroyed.
126//
127// gtid: global thread identifier for calling thread
128// thread_data: thread info for thread containing stack
129static void __kmp_free_task_stack(kmp_int32 gtid,
130 kmp_thread_data_t *thread_data) {
131 kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
132 kmp_stack_block_t *stack_block = &task_stack->ts_first_block;
133
134 KMP_DEBUG_ASSERT(task_stack->ts_entries == TASK_STACK_EMPTY);
135 // free from the second block of the stack
136 while (stack_block != NULL) {
137 kmp_stack_block_t *next_block = (stack_block) ? stack_block->sb_next : NULL;
138
139 stack_block->sb_next = NULL;
140 stack_block->sb_prev = NULL;
141 if (stack_block != &task_stack->ts_first_block) {
142 __kmp_thread_free(thread,
143 stack_block); // free the block, if not the first
144 }
145 stack_block = next_block;
146 }
147 // initialize the stack to be empty
148 task_stack->ts_entries = 0;
149 task_stack->ts_top = NULL;
150}
151
152// __kmp_push_task_stack: Push the tied task onto the task stack.
153// Grow the stack if necessary by allocating another block.
154//
155// gtid: global thread identifier for calling thread
156// thread: thread info for thread containing stack
157// tied_task: the task to push on the stack
158static void __kmp_push_task_stack(kmp_int32 gtid, kmp_info_t *thread,
159 kmp_taskdata_t *tied_task) {
160 // GEH - need to consider what to do if tt_threads_data not allocated yet
161 kmp_thread_data_t *thread_data =
162 &thread->th.th_task_team->tt.tt_threads_data[__kmp_tid_from_gtid(gtid)];
163 kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
164
165 if (tied_task->td_flags.team_serial || tied_task->td_flags.tasking_ser) {
166 return; // Don't push anything on stack if team or team tasks are serialized
167 }
168
169 KMP_DEBUG_ASSERT(tied_task->td_flags.tasktype == TASK_TIED);
170 KMP_DEBUG_ASSERT(task_stack->ts_top != NULL);
171
172 KA_TRACE(20,
173 ("__kmp_push_task_stack(enter): GTID: %d; THREAD: %p; TASK: %p\n",
174 gtid, thread, tied_task));
175 // Store entry
176 *(task_stack->ts_top) = tied_task;
177
178 // Do bookkeeping for next push
179 task_stack->ts_top++;
180 task_stack->ts_entries++;
181
182 if (task_stack->ts_entries & TASK_STACK_INDEX_MASK == 0) {
183 // Find beginning of this task block
184 kmp_stack_block_t *stack_block =
185 (kmp_stack_block_t *)(task_stack->ts_top - TASK_STACK_BLOCK_SIZE);
186
187 // Check if we already have a block
188 if (stack_block->sb_next !=
189 NULL) { // reset ts_top to beginning of next block
190 task_stack->ts_top = &stack_block->sb_next->sb_block[0];
191 } else { // Alloc new block and link it up
192 kmp_stack_block_t *new_block = (kmp_stack_block_t *)__kmp_thread_calloc(
193 thread, sizeof(kmp_stack_block_t));
194
195 task_stack->ts_top = &new_block->sb_block[0];
196 stack_block->sb_next = new_block;
197 new_block->sb_prev = stack_block;
198 new_block->sb_next = NULL;
199
200 KA_TRACE(
201 30,
202 ("__kmp_push_task_stack(): GTID: %d; TASK: %p; Alloc new block: %p\n",
203 gtid, tied_task, new_block));
204 }
205 }
206 KA_TRACE(20, ("__kmp_push_task_stack(exit): GTID: %d; TASK: %p\n", gtid,
207 tied_task));
208}
209
210// __kmp_pop_task_stack: Pop the tied task from the task stack. Don't return
211// the task, just check to make sure it matches the ending task passed in.
212//
213// gtid: global thread identifier for the calling thread
214// thread: thread info structure containing stack
215// tied_task: the task popped off the stack
216// ending_task: the task that is ending (should match popped task)
217static void __kmp_pop_task_stack(kmp_int32 gtid, kmp_info_t *thread,
218 kmp_taskdata_t *ending_task) {
219 // GEH - need to consider what to do if tt_threads_data not allocated yet
220 kmp_thread_data_t *thread_data =
221 &thread->th.th_task_team->tt_threads_data[__kmp_tid_from_gtid(gtid)];
222 kmp_task_stack_t *task_stack = &thread_data->td.td_susp_tied_tasks;
223 kmp_taskdata_t *tied_task;
224
225 if (ending_task->td_flags.team_serial || ending_task->td_flags.tasking_ser) {
226 // Don't pop anything from stack if team or team tasks are serialized
227 return;
228 }
229
230 KMP_DEBUG_ASSERT(task_stack->ts_top != NULL);
231 KMP_DEBUG_ASSERT(task_stack->ts_entries > 0);
232
233 KA_TRACE(20, ("__kmp_pop_task_stack(enter): GTID: %d; THREAD: %p\n", gtid,
234 thread));
235
236 // fix up ts_top if we need to pop from previous block
237 if (task_stack->ts_entries & TASK_STACK_INDEX_MASK == 0) {
238 kmp_stack_block_t *stack_block = (kmp_stack_block_t *)(task_stack->ts_top);
239
240 stack_block = stack_block->sb_prev;
241 task_stack->ts_top = &stack_block->sb_block[TASK_STACK_BLOCK_SIZE];
242 }
243
244 // finish bookkeeping
245 task_stack->ts_top--;
246 task_stack->ts_entries--;
247
248 tied_task = *(task_stack->ts_top);
249
250 KMP_DEBUG_ASSERT(tied_task != NULL);
251 KMP_DEBUG_ASSERT(tied_task->td_flags.tasktype == TASK_TIED);
252 KMP_DEBUG_ASSERT(tied_task == ending_task); // If we built the stack correctly
253
254 KA_TRACE(20, ("__kmp_pop_task_stack(exit): GTID: %d; TASK: %p\n", gtid,
255 tied_task));
256 return;
257}
258#endif /* BUILD_TIED_TASK_STACK */
259
260// returns 1 if new task is allowed to execute, 0 otherwise
261// checks Task Scheduling constraint (if requested) and
262// mutexinoutset dependencies if any
263static bool __kmp_task_is_allowed(int gtid, const kmp_int32 is_constrained,
264 const kmp_taskdata_t *tasknew,
265 const kmp_taskdata_t *taskcurr) {
266 if (is_constrained && (tasknew->td_flags.tiedness == TASK_TIED)) {
267 // Check if the candidate obeys the Task Scheduling Constraints (TSC)
268 // only descendant of all deferred tied tasks can be scheduled, checking
269 // the last one is enough, as it in turn is the descendant of all others
270 kmp_taskdata_t *current = taskcurr->td_last_tied;
271 KMP_DEBUG_ASSERT(current != NULL);
272 // check if the task is not suspended on barrier
273 if (current->td_flags.tasktype == TASK_EXPLICIT ||
274 current->td_taskwait_thread > 0) { // <= 0 on barrier
275 kmp_int32 level = current->td_level;
276 kmp_taskdata_t *parent = tasknew->td_parent;
277 while (parent != current && parent->td_level > level) {
278 // check generation up to the level of the current task
279 parent = parent->td_parent;
280 KMP_DEBUG_ASSERT(parent != NULL);
281 }
282 if (parent != current)
283 return false;
284 }
285 }
286 // Check mutexinoutset dependencies, acquire locks
287 kmp_depnode_t *node = tasknew->td_depnode;
288#if OMPX_TASKGRAPH
289 if (!tasknew->is_taskgraph && UNLIKELY(node && (node->dn.mtx_num_locks > 0))) {
290#else
291 if (UNLIKELY(node && (node->dn.mtx_num_locks > 0))) {
292#endif
293 for (int i = 0; i < node->dn.mtx_num_locks; ++i) {
294 KMP_DEBUG_ASSERT(node->dn.mtx_locks[i] != NULL);
295 if (__kmp_test_lock(lck: node->dn.mtx_locks[i], gtid))
296 continue;
297 // could not get the lock, release previous locks
298 for (int j = i - 1; j >= 0; --j)
299 __kmp_release_lock(lck: node->dn.mtx_locks[j], gtid);
300 return false;
301 }
302 // negative num_locks means all locks acquired successfully
303 node->dn.mtx_num_locks = -node->dn.mtx_num_locks;
304 }
305 return true;
306}
307
308// __kmp_realloc_task_deque:
309// Re-allocates a task deque for a particular thread, copies the content from
310// the old deque and adjusts the necessary data structures relating to the
311// deque. This operation must be done with the deque_lock being held
312static void __kmp_realloc_task_deque(kmp_info_t *thread,
313 kmp_thread_data_t *thread_data) {
314 kmp_int32 size = TASK_DEQUE_SIZE(thread_data->td);
315 KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) == size);
316 kmp_int32 new_size = 2 * size;
317
318 KE_TRACE(10, ("__kmp_realloc_task_deque: T#%d reallocating deque[from %d to "
319 "%d] for thread_data %p\n",
320 __kmp_gtid_from_thread(thread), size, new_size, thread_data));
321
322 kmp_taskdata_t **new_deque =
323 (kmp_taskdata_t **)__kmp_allocate(new_size * sizeof(kmp_taskdata_t *));
324
325 int i, j;
326 for (i = thread_data->td.td_deque_head, j = 0; j < size;
327 i = (i + 1) & TASK_DEQUE_MASK(thread_data->td), j++)
328 new_deque[j] = thread_data->td.td_deque[i];
329
330 __kmp_free(thread_data->td.td_deque);
331
332 thread_data->td.td_deque_head = 0;
333 thread_data->td.td_deque_tail = size;
334 thread_data->td.td_deque = new_deque;
335 thread_data->td.td_deque_size = new_size;
336}
337
338static kmp_task_pri_t *__kmp_alloc_task_pri_list() {
339 kmp_task_pri_t *l = (kmp_task_pri_t *)__kmp_allocate(sizeof(kmp_task_pri_t));
340 kmp_thread_data_t *thread_data = &l->td;
341 __kmp_init_bootstrap_lock(lck: &thread_data->td.td_deque_lock);
342 thread_data->td.td_deque_last_stolen = -1;
343 KE_TRACE(20, ("__kmp_alloc_task_pri_list: T#%d allocating deque[%d] "
344 "for thread_data %p\n",
345 __kmp_get_gtid(), INITIAL_TASK_DEQUE_SIZE, thread_data));
346 thread_data->td.td_deque = (kmp_taskdata_t **)__kmp_allocate(
347 INITIAL_TASK_DEQUE_SIZE * sizeof(kmp_taskdata_t *));
348 thread_data->td.td_deque_size = INITIAL_TASK_DEQUE_SIZE;
349 return l;
350}
351
352// The function finds the deque of priority tasks with given priority, or
353// allocates a new deque and put it into sorted (high -> low) list of deques.
354// Deques of non-default priority tasks are shared between all threads in team,
355// as opposed to per-thread deques of tasks with default priority.
356// The function is called under the lock task_team->tt.tt_task_pri_lock.
357static kmp_thread_data_t *
358__kmp_get_priority_deque_data(kmp_task_team_t *task_team, kmp_int32 pri) {
359 kmp_thread_data_t *thread_data;
360 kmp_task_pri_t *lst = task_team->tt.tt_task_pri_list;
361 if (lst->priority == pri) {
362 // Found queue of tasks with given priority.
363 thread_data = &lst->td;
364 } else if (lst->priority < pri) {
365 // All current priority queues contain tasks with lower priority.
366 // Allocate new one for given priority tasks.
367 kmp_task_pri_t *list = __kmp_alloc_task_pri_list();
368 thread_data = &list->td;
369 list->priority = pri;
370 list->next = lst;
371 task_team->tt.tt_task_pri_list = list;
372 } else { // task_team->tt.tt_task_pri_list->priority > pri
373 kmp_task_pri_t *next_queue = lst->next;
374 while (next_queue && next_queue->priority > pri) {
375 lst = next_queue;
376 next_queue = lst->next;
377 }
378 // lst->priority > pri && (next == NULL || pri >= next->priority)
379 if (next_queue == NULL) {
380 // No queue with pri priority, need to allocate new one.
381 kmp_task_pri_t *list = __kmp_alloc_task_pri_list();
382 thread_data = &list->td;
383 list->priority = pri;
384 list->next = NULL;
385 lst->next = list;
386 } else if (next_queue->priority == pri) {
387 // Found queue of tasks with given priority.
388 thread_data = &next_queue->td;
389 } else { // lst->priority > pri > next->priority
390 // insert newly allocated between existed queues
391 kmp_task_pri_t *list = __kmp_alloc_task_pri_list();
392 thread_data = &list->td;
393 list->priority = pri;
394 list->next = next_queue;
395 lst->next = list;
396 }
397 }
398 return thread_data;
399}
400
401// __kmp_push_priority_task: Add a task to the team's priority task deque
402static kmp_int32 __kmp_push_priority_task(kmp_int32 gtid, kmp_info_t *thread,
403 kmp_taskdata_t *taskdata,
404 kmp_task_team_t *task_team,
405 kmp_int32 pri) {
406 kmp_thread_data_t *thread_data = NULL;
407 KA_TRACE(20,
408 ("__kmp_push_priority_task: T#%d trying to push task %p, pri %d.\n",
409 gtid, taskdata, pri));
410
411 // Find task queue specific to priority value
412 kmp_task_pri_t *lst = task_team->tt.tt_task_pri_list;
413 if (UNLIKELY(lst == NULL)) {
414 __kmp_acquire_bootstrap_lock(lck: &task_team->tt.tt_task_pri_lock);
415 if (task_team->tt.tt_task_pri_list == NULL) {
416 // List of queues is still empty, allocate one.
417 kmp_task_pri_t *list = __kmp_alloc_task_pri_list();
418 thread_data = &list->td;
419 list->priority = pri;
420 list->next = NULL;
421 task_team->tt.tt_task_pri_list = list;
422 } else {
423 // Other thread initialized a queue. Check if it fits and get thread_data.
424 thread_data = __kmp_get_priority_deque_data(task_team, pri);
425 }
426 __kmp_release_bootstrap_lock(lck: &task_team->tt.tt_task_pri_lock);
427 } else {
428 if (lst->priority == pri) {
429 // Found queue of tasks with given priority.
430 thread_data = &lst->td;
431 } else {
432 __kmp_acquire_bootstrap_lock(lck: &task_team->tt.tt_task_pri_lock);
433 thread_data = __kmp_get_priority_deque_data(task_team, pri);
434 __kmp_release_bootstrap_lock(lck: &task_team->tt.tt_task_pri_lock);
435 }
436 }
437 KMP_DEBUG_ASSERT(thread_data);
438
439 __kmp_acquire_bootstrap_lock(lck: &thread_data->td.td_deque_lock);
440 // Check if deque is full
441 if (TCR_4(thread_data->td.td_deque_ntasks) >=
442 TASK_DEQUE_SIZE(thread_data->td)) {
443 if (__kmp_enable_task_throttling &&
444 __kmp_task_is_allowed(gtid, is_constrained: __kmp_task_stealing_constraint, tasknew: taskdata,
445 taskcurr: thread->th.th_current_task)) {
446 __kmp_release_bootstrap_lock(lck: &thread_data->td.td_deque_lock);
447 KA_TRACE(20, ("__kmp_push_priority_task: T#%d deque is full; returning "
448 "TASK_NOT_PUSHED for task %p\n",
449 gtid, taskdata));
450 return TASK_NOT_PUSHED;
451 } else {
452 // expand deque to push the task which is not allowed to execute
453 __kmp_realloc_task_deque(thread, thread_data);
454 }
455 }
456 KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) <
457 TASK_DEQUE_SIZE(thread_data->td));
458 // Push taskdata.
459 thread_data->td.td_deque[thread_data->td.td_deque_tail] = taskdata;
460 // Wrap index.
461 thread_data->td.td_deque_tail =
462 (thread_data->td.td_deque_tail + 1) & TASK_DEQUE_MASK(thread_data->td);
463 TCW_4(thread_data->td.td_deque_ntasks,
464 TCR_4(thread_data->td.td_deque_ntasks) + 1); // Adjust task count
465 KMP_FSYNC_RELEASING(thread->th.th_current_task); // releasing self
466 KMP_FSYNC_RELEASING(taskdata); // releasing child
467 KA_TRACE(20, ("__kmp_push_priority_task: T#%d returning "
468 "TASK_SUCCESSFULLY_PUSHED: task=%p ntasks=%d head=%u tail=%u\n",
469 gtid, taskdata, thread_data->td.td_deque_ntasks,
470 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
471 __kmp_release_bootstrap_lock(lck: &thread_data->td.td_deque_lock);
472 task_team->tt.tt_num_task_pri++; // atomic inc
473 return TASK_SUCCESSFULLY_PUSHED;
474}
475
476// __kmp_push_task: Add a task to the thread's deque
477static kmp_int32 __kmp_push_task(kmp_int32 gtid, kmp_task_t *task) {
478 kmp_info_t *thread = __kmp_threads[gtid];
479 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
480
481 // If we encounter a hidden helper task, and the current thread is not a
482 // hidden helper thread, we have to give the task to any hidden helper thread
483 // starting from its shadow one.
484 if (UNLIKELY(taskdata->td_flags.hidden_helper &&
485 !KMP_HIDDEN_HELPER_THREAD(gtid))) {
486 kmp_int32 shadow_gtid = KMP_GTID_TO_SHADOW_GTID(gtid);
487 __kmpc_give_task(ptask: task, start: __kmp_tid_from_gtid(gtid: shadow_gtid));
488 // Signal the hidden helper threads.
489 __kmp_hidden_helper_worker_thread_signal();
490 return TASK_SUCCESSFULLY_PUSHED;
491 }
492
493 kmp_task_team_t *task_team = thread->th.th_task_team;
494 kmp_int32 tid = __kmp_tid_from_gtid(gtid);
495 kmp_thread_data_t *thread_data;
496
497 KA_TRACE(20,
498 ("__kmp_push_task: T#%d trying to push task %p.\n", gtid, taskdata));
499
500 if (UNLIKELY(taskdata->td_flags.tiedness == TASK_UNTIED)) {
501 // untied task needs to increment counter so that the task structure is not
502 // freed prematurely
503 kmp_int32 counter = 1 + KMP_ATOMIC_INC(&taskdata->td_untied_count);
504 KMP_DEBUG_USE_VAR(counter);
505 KA_TRACE(
506 20,
507 ("__kmp_push_task: T#%d untied_count (%d) incremented for task %p\n",
508 gtid, counter, taskdata));
509 }
510
511 // The first check avoids building task_team thread data if serialized
512 if (UNLIKELY(taskdata->td_flags.task_serial)) {
513 KA_TRACE(20, ("__kmp_push_task: T#%d team serialized; returning "
514 "TASK_NOT_PUSHED for task %p\n",
515 gtid, taskdata));
516 return TASK_NOT_PUSHED;
517 }
518
519 // Now that serialized tasks have returned, we can assume that we are not in
520 // immediate exec mode
521 KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
522 if (UNLIKELY(!KMP_TASKING_ENABLED(task_team))) {
523 __kmp_enable_tasking(task_team, this_thr: thread);
524 }
525 KMP_DEBUG_ASSERT(TCR_4(task_team->tt.tt_found_tasks) == TRUE);
526 KMP_DEBUG_ASSERT(TCR_PTR(task_team->tt.tt_threads_data) != NULL);
527
528 if (taskdata->td_flags.priority_specified && task->data2.priority > 0 &&
529 __kmp_max_task_priority > 0) {
530 int pri = KMP_MIN(task->data2.priority, __kmp_max_task_priority);
531 return __kmp_push_priority_task(gtid, thread, taskdata, task_team, pri);
532 }
533
534 // Find tasking deque specific to encountering thread
535 thread_data = &task_team->tt.tt_threads_data[tid];
536
537 // No lock needed since only owner can allocate. If the task is hidden_helper,
538 // we don't need it either because we have initialized the dequeue for hidden
539 // helper thread data.
540 if (UNLIKELY(thread_data->td.td_deque == NULL)) {
541 __kmp_alloc_task_deque(thread, thread_data);
542 }
543
544 int locked = 0;
545 // Check if deque is full
546 if (TCR_4(thread_data->td.td_deque_ntasks) >=
547 TASK_DEQUE_SIZE(thread_data->td)) {
548 if (__kmp_enable_task_throttling &&
549 __kmp_task_is_allowed(gtid, is_constrained: __kmp_task_stealing_constraint, tasknew: taskdata,
550 taskcurr: thread->th.th_current_task)) {
551 KA_TRACE(20, ("__kmp_push_task: T#%d deque is full; returning "
552 "TASK_NOT_PUSHED for task %p\n",
553 gtid, taskdata));
554 return TASK_NOT_PUSHED;
555 } else {
556 __kmp_acquire_bootstrap_lock(lck: &thread_data->td.td_deque_lock);
557 locked = 1;
558 if (TCR_4(thread_data->td.td_deque_ntasks) >=
559 TASK_DEQUE_SIZE(thread_data->td)) {
560 // expand deque to push the task which is not allowed to execute
561 __kmp_realloc_task_deque(thread, thread_data);
562 }
563 }
564 }
565 // Lock the deque for the task push operation
566 if (!locked) {
567 __kmp_acquire_bootstrap_lock(lck: &thread_data->td.td_deque_lock);
568 // Need to recheck as we can get a proxy task from thread outside of OpenMP
569 if (TCR_4(thread_data->td.td_deque_ntasks) >=
570 TASK_DEQUE_SIZE(thread_data->td)) {
571 if (__kmp_enable_task_throttling &&
572 __kmp_task_is_allowed(gtid, is_constrained: __kmp_task_stealing_constraint, tasknew: taskdata,
573 taskcurr: thread->th.th_current_task)) {
574 __kmp_release_bootstrap_lock(lck: &thread_data->td.td_deque_lock);
575 KA_TRACE(20, ("__kmp_push_task: T#%d deque is full on 2nd check; "
576 "returning TASK_NOT_PUSHED for task %p\n",
577 gtid, taskdata));
578 return TASK_NOT_PUSHED;
579 } else {
580 // expand deque to push the task which is not allowed to execute
581 __kmp_realloc_task_deque(thread, thread_data);
582 }
583 }
584 }
585 // Must have room since no thread can add tasks but calling thread
586 KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) <
587 TASK_DEQUE_SIZE(thread_data->td));
588
589 thread_data->td.td_deque[thread_data->td.td_deque_tail] =
590 taskdata; // Push taskdata
591 // Wrap index.
592 thread_data->td.td_deque_tail =
593 (thread_data->td.td_deque_tail + 1) & TASK_DEQUE_MASK(thread_data->td);
594 TCW_4(thread_data->td.td_deque_ntasks,
595 TCR_4(thread_data->td.td_deque_ntasks) + 1); // Adjust task count
596 KMP_FSYNC_RELEASING(thread->th.th_current_task); // releasing self
597 KMP_FSYNC_RELEASING(taskdata); // releasing child
598 KA_TRACE(20, ("__kmp_push_task: T#%d returning TASK_SUCCESSFULLY_PUSHED: "
599 "task=%p ntasks=%d head=%u tail=%u\n",
600 gtid, taskdata, thread_data->td.td_deque_ntasks,
601 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
602
603 __kmp_release_bootstrap_lock(lck: &thread_data->td.td_deque_lock);
604
605 return TASK_SUCCESSFULLY_PUSHED;
606}
607
608// __kmp_pop_current_task_from_thread: set up current task from called thread
609// when team ends
610//
611// this_thr: thread structure to set current_task in.
612void __kmp_pop_current_task_from_thread(kmp_info_t *this_thr) {
613 KF_TRACE(10, ("__kmp_pop_current_task_from_thread(enter): T#%d "
614 "this_thread=%p, curtask=%p, "
615 "curtask_parent=%p\n",
616 0, this_thr, this_thr->th.th_current_task,
617 this_thr->th.th_current_task->td_parent));
618
619 this_thr->th.th_current_task = this_thr->th.th_current_task->td_parent;
620
621 KF_TRACE(10, ("__kmp_pop_current_task_from_thread(exit): T#%d "
622 "this_thread=%p, curtask=%p, "
623 "curtask_parent=%p\n",
624 0, this_thr, this_thr->th.th_current_task,
625 this_thr->th.th_current_task->td_parent));
626}
627
628// __kmp_push_current_task_to_thread: set up current task in called thread for a
629// new team
630//
631// this_thr: thread structure to set up
632// team: team for implicit task data
633// tid: thread within team to set up
634void __kmp_push_current_task_to_thread(kmp_info_t *this_thr, kmp_team_t *team,
635 int tid) {
636 // current task of the thread is a parent of the new just created implicit
637 // tasks of new team
638 KF_TRACE(10, ("__kmp_push_current_task_to_thread(enter): T#%d this_thread=%p "
639 "curtask=%p "
640 "parent_task=%p\n",
641 tid, this_thr, this_thr->th.th_current_task,
642 team->t.t_implicit_task_taskdata[tid].td_parent));
643
644 KMP_DEBUG_ASSERT(this_thr != NULL);
645
646 if (tid == 0) {
647 if (this_thr->th.th_current_task != &team->t.t_implicit_task_taskdata[0]) {
648 team->t.t_implicit_task_taskdata[0].td_parent =
649 this_thr->th.th_current_task;
650 this_thr->th.th_current_task = &team->t.t_implicit_task_taskdata[0];
651 }
652 } else {
653 team->t.t_implicit_task_taskdata[tid].td_parent =
654 team->t.t_implicit_task_taskdata[0].td_parent;
655 this_thr->th.th_current_task = &team->t.t_implicit_task_taskdata[tid];
656 }
657
658 KF_TRACE(10, ("__kmp_push_current_task_to_thread(exit): T#%d this_thread=%p "
659 "curtask=%p "
660 "parent_task=%p\n",
661 tid, this_thr, this_thr->th.th_current_task,
662 team->t.t_implicit_task_taskdata[tid].td_parent));
663}
664
665// __kmp_task_start: bookkeeping for a task starting execution
666//
667// GTID: global thread id of calling thread
668// task: task starting execution
669// current_task: task suspending
670static void __kmp_task_start(kmp_int32 gtid, kmp_task_t *task,
671 kmp_taskdata_t *current_task) {
672 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
673 kmp_info_t *thread = __kmp_threads[gtid];
674
675 KA_TRACE(10,
676 ("__kmp_task_start(enter): T#%d starting task %p: current_task=%p\n",
677 gtid, taskdata, current_task));
678
679 KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
680
681 // mark currently executing task as suspended
682 // TODO: GEH - make sure root team implicit task is initialized properly.
683 // KMP_DEBUG_ASSERT( current_task -> td_flags.executing == 1 );
684 current_task->td_flags.executing = 0;
685
686// Add task to stack if tied
687#ifdef BUILD_TIED_TASK_STACK
688 if (taskdata->td_flags.tiedness == TASK_TIED) {
689 __kmp_push_task_stack(gtid, thread, taskdata);
690 }
691#endif /* BUILD_TIED_TASK_STACK */
692
693 // mark starting task as executing and as current task
694 thread->th.th_current_task = taskdata;
695
696 KMP_DEBUG_ASSERT(taskdata->td_flags.started == 0 ||
697 taskdata->td_flags.tiedness == TASK_UNTIED);
698 KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 0 ||
699 taskdata->td_flags.tiedness == TASK_UNTIED);
700 taskdata->td_flags.started = 1;
701 taskdata->td_flags.executing = 1;
702 KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0);
703 KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
704
705 // GEH TODO: shouldn't we pass some sort of location identifier here?
706 // APT: yes, we will pass location here.
707 // need to store current thread state (in a thread or taskdata structure)
708 // before setting work_state, otherwise wrong state is set after end of task
709
710 KA_TRACE(10, ("__kmp_task_start(exit): T#%d task=%p\n", gtid, taskdata));
711
712 return;
713}
714
715#if OMPT_SUPPORT
716//------------------------------------------------------------------------------
717// __ompt_task_init:
718// Initialize OMPT fields maintained by a task. This will only be called after
719// ompt_start_tool, so we already know whether ompt is enabled or not.
720
721static inline void __ompt_task_init(kmp_taskdata_t *task, int tid) {
722 // The calls to __ompt_task_init already have the ompt_enabled condition.
723 task->ompt_task_info.task_data.value = 0;
724 task->ompt_task_info.frame.exit_frame = ompt_data_none;
725 task->ompt_task_info.frame.enter_frame = ompt_data_none;
726 task->ompt_task_info.frame.exit_frame_flags =
727 ompt_frame_runtime | ompt_frame_framepointer;
728 task->ompt_task_info.frame.enter_frame_flags =
729 ompt_frame_runtime | ompt_frame_framepointer;
730 task->ompt_task_info.dispatch_chunk.start = 0;
731 task->ompt_task_info.dispatch_chunk.iterations = 0;
732}
733
734// __ompt_task_start:
735// Build and trigger task-begin event
736static inline void __ompt_task_start(kmp_task_t *task,
737 kmp_taskdata_t *current_task,
738 kmp_int32 gtid) {
739 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
740 ompt_task_status_t status = ompt_task_switch;
741 if (__kmp_threads[gtid]->th.ompt_thread_info.ompt_task_yielded) {
742 status = ompt_task_yield;
743 __kmp_threads[gtid]->th.ompt_thread_info.ompt_task_yielded = 0;
744 }
745 /* let OMPT know that we're about to run this task */
746 if (ompt_enabled.ompt_callback_task_schedule) {
747 ompt_callbacks.ompt_callback(ompt_callback_task_schedule)(
748 &(current_task->ompt_task_info.task_data), status,
749 &(taskdata->ompt_task_info.task_data));
750 }
751 taskdata->ompt_task_info.scheduling_parent = current_task;
752}
753
754// __ompt_task_finish:
755// Build and trigger final task-schedule event
756static inline void __ompt_task_finish(kmp_task_t *task,
757 kmp_taskdata_t *resumed_task,
758 ompt_task_status_t status) {
759 if (ompt_enabled.ompt_callback_task_schedule) {
760 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
761 if (__kmp_omp_cancellation && taskdata->td_taskgroup &&
762 taskdata->td_taskgroup->cancel_request == cancel_taskgroup) {
763 status = ompt_task_cancel;
764 }
765
766 /* let OMPT know that we're returning to the callee task */
767 ompt_callbacks.ompt_callback(ompt_callback_task_schedule)(
768 &(taskdata->ompt_task_info.task_data), status,
769 (resumed_task ? &(resumed_task->ompt_task_info.task_data) : NULL));
770 }
771}
772#endif
773
774template <bool ompt>
775static void __kmpc_omp_task_begin_if0_template(ident_t *loc_ref, kmp_int32 gtid,
776 kmp_task_t *task,
777 void *frame_address,
778 void *return_address) {
779 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
780 kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
781
782 KA_TRACE(10, ("__kmpc_omp_task_begin_if0(enter): T#%d loc=%p task=%p "
783 "current_task=%p\n",
784 gtid, loc_ref, taskdata, current_task));
785
786 if (UNLIKELY(taskdata->td_flags.tiedness == TASK_UNTIED)) {
787 // untied task needs to increment counter so that the task structure is not
788 // freed prematurely
789 kmp_int32 counter = 1 + KMP_ATOMIC_INC(&taskdata->td_untied_count);
790 KMP_DEBUG_USE_VAR(counter);
791 KA_TRACE(20, ("__kmpc_omp_task_begin_if0: T#%d untied_count (%d) "
792 "incremented for task %p\n",
793 gtid, counter, taskdata));
794 }
795
796 taskdata->td_flags.task_serial =
797 1; // Execute this task immediately, not deferred.
798 __kmp_task_start(gtid, task, current_task);
799
800#if OMPT_SUPPORT
801 if (ompt) {
802 if (current_task->ompt_task_info.frame.enter_frame.ptr == NULL) {
803 current_task->ompt_task_info.frame.enter_frame.ptr =
804 taskdata->ompt_task_info.frame.exit_frame.ptr = frame_address;
805 current_task->ompt_task_info.frame.enter_frame_flags =
806 taskdata->ompt_task_info.frame.exit_frame_flags =
807 ompt_frame_application | ompt_frame_framepointer;
808 }
809 if (ompt_enabled.ompt_callback_task_create) {
810 ompt_task_info_t *parent_info = &(current_task->ompt_task_info);
811 ompt_callbacks.ompt_callback(ompt_callback_task_create)(
812 &(parent_info->task_data), &(parent_info->frame),
813 &(taskdata->ompt_task_info.task_data),
814 ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(taskdata), 0,
815 return_address);
816 }
817 __ompt_task_start(task, current_task, gtid);
818 }
819#endif // OMPT_SUPPORT
820
821 KA_TRACE(10, ("__kmpc_omp_task_begin_if0(exit): T#%d loc=%p task=%p,\n", gtid,
822 loc_ref, taskdata));
823}
824
825#if OMPT_SUPPORT
826OMPT_NOINLINE
827static void __kmpc_omp_task_begin_if0_ompt(ident_t *loc_ref, kmp_int32 gtid,
828 kmp_task_t *task,
829 void *frame_address,
830 void *return_address) {
831 __kmpc_omp_task_begin_if0_template<true>(loc_ref, gtid, task, frame_address,
832 return_address);
833}
834#endif // OMPT_SUPPORT
835
836// __kmpc_omp_task_begin_if0: report that a given serialized task has started
837// execution
838//
839// loc_ref: source location information; points to beginning of task block.
840// gtid: global thread number.
841// task: task thunk for the started task.
842#ifdef __s390x__
843// This is required for OMPT_GET_FRAME_ADDRESS(1) to compile on s390x.
844// In order for it to work correctly, the caller also needs to be compiled with
845// backchain. If a caller is compiled without backchain,
846// OMPT_GET_FRAME_ADDRESS(1) will produce an incorrect value, but will not
847// crash.
848__attribute__((target("backchain")))
849#endif
850void __kmpc_omp_task_begin_if0(ident_t *loc_ref, kmp_int32 gtid,
851 kmp_task_t *task) {
852#if OMPT_SUPPORT
853 if (UNLIKELY(ompt_enabled.enabled)) {
854 OMPT_STORE_RETURN_ADDRESS(gtid);
855 __kmpc_omp_task_begin_if0_ompt(loc_ref, gtid, task,
856 OMPT_GET_FRAME_ADDRESS(1),
857 OMPT_LOAD_RETURN_ADDRESS(gtid));
858 return;
859 }
860#endif
861 __kmpc_omp_task_begin_if0_template<false>(loc_ref, gtid, task, NULL, NULL);
862}
863
864#ifdef TASK_UNUSED
865// __kmpc_omp_task_begin: report that a given task has started execution
866// NEVER GENERATED BY COMPILER, DEPRECATED!!!
867void __kmpc_omp_task_begin(ident_t *loc_ref, kmp_int32 gtid, kmp_task_t *task) {
868 kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
869
870 KA_TRACE(
871 10,
872 ("__kmpc_omp_task_begin(enter): T#%d loc=%p task=%p current_task=%p\n",
873 gtid, loc_ref, KMP_TASK_TO_TASKDATA(task), current_task));
874
875 __kmp_task_start(gtid, task, current_task);
876
877 KA_TRACE(10, ("__kmpc_omp_task_begin(exit): T#%d loc=%p task=%p,\n", gtid,
878 loc_ref, KMP_TASK_TO_TASKDATA(task)));
879 return;
880}
881#endif // TASK_UNUSED
882
883// __kmp_free_task: free the current task space and the space for shareds
884//
885// gtid: Global thread ID of calling thread
886// taskdata: task to free
887// thread: thread data structure of caller
888static void __kmp_free_task(kmp_int32 gtid, kmp_taskdata_t *taskdata,
889 kmp_info_t *thread) {
890 KA_TRACE(30, ("__kmp_free_task: T#%d freeing data from task %p\n", gtid,
891 taskdata));
892
893 // Check to make sure all flags and counters have the correct values
894 KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
895 KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 0);
896 KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 1);
897 KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
898 KMP_DEBUG_ASSERT(taskdata->td_allocated_child_tasks == 0 ||
899 taskdata->td_flags.task_serial == 1);
900 KMP_DEBUG_ASSERT(taskdata->td_incomplete_child_tasks == 0);
901 kmp_task_t *task = KMP_TASKDATA_TO_TASK(taskdata);
902 // Clear data to not be re-used later by mistake.
903 task->data1.destructors = NULL;
904 task->data2.priority = 0;
905
906 taskdata->td_flags.freed = 1;
907#if OMPX_TASKGRAPH
908 // do not free tasks in taskgraph
909 if (!taskdata->is_taskgraph) {
910#endif
911// deallocate the taskdata and shared variable blocks associated with this task
912#if USE_FAST_MEMORY
913 __kmp_fast_free(thread, taskdata);
914#else /* ! USE_FAST_MEMORY */
915 __kmp_thread_free(thread, taskdata);
916#endif
917#if OMPX_TASKGRAPH
918 } else {
919 taskdata->td_flags.complete = 0;
920 taskdata->td_flags.started = 0;
921 taskdata->td_flags.freed = 0;
922 taskdata->td_flags.executing = 0;
923 taskdata->td_flags.task_serial =
924 (taskdata->td_parent->td_flags.final ||
925 taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser);
926
927 // taskdata->td_allow_completion_event.pending_events_count = 1;
928 KMP_ATOMIC_ST_RLX(&taskdata->td_untied_count, 0);
929 KMP_ATOMIC_ST_RLX(&taskdata->td_incomplete_child_tasks, 0);
930 // start at one because counts current task and children
931 KMP_ATOMIC_ST_RLX(&taskdata->td_allocated_child_tasks, 1);
932 }
933#endif
934
935 KA_TRACE(20, ("__kmp_free_task: T#%d freed task %p\n", gtid, taskdata));
936}
937
938// __kmp_free_task_and_ancestors: free the current task and ancestors without
939// children
940//
941// gtid: Global thread ID of calling thread
942// taskdata: task to free
943// thread: thread data structure of caller
944static void __kmp_free_task_and_ancestors(kmp_int32 gtid,
945 kmp_taskdata_t *taskdata,
946 kmp_info_t *thread) {
947 // Proxy tasks must always be allowed to free their parents
948 // because they can be run in background even in serial mode.
949 kmp_int32 team_serial =
950 (taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser) &&
951 !taskdata->td_flags.proxy;
952 KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
953
954 kmp_int32 children = KMP_ATOMIC_DEC(&taskdata->td_allocated_child_tasks) - 1;
955 KMP_DEBUG_ASSERT(children >= 0);
956
957 // Now, go up the ancestor tree to see if any ancestors can now be freed.
958 while (children == 0) {
959 kmp_taskdata_t *parent_taskdata = taskdata->td_parent;
960
961 KA_TRACE(20, ("__kmp_free_task_and_ancestors(enter): T#%d task %p complete "
962 "and freeing itself\n",
963 gtid, taskdata));
964
965 // --- Deallocate my ancestor task ---
966 __kmp_free_task(gtid, taskdata, thread);
967
968 taskdata = parent_taskdata;
969
970 if (team_serial)
971 return;
972 // Stop checking ancestors at implicit task instead of walking up ancestor
973 // tree to avoid premature deallocation of ancestors.
974 if (taskdata->td_flags.tasktype == TASK_IMPLICIT) {
975 if (taskdata->td_dephash) { // do we need to cleanup dephash?
976 int children = KMP_ATOMIC_LD_ACQ(&taskdata->td_incomplete_child_tasks);
977 kmp_tasking_flags_t flags_old = taskdata->td_flags;
978 if (children == 0 && flags_old.complete == 1) {
979 kmp_tasking_flags_t flags_new = flags_old;
980 flags_new.complete = 0;
981 if (KMP_COMPARE_AND_STORE_ACQ32(
982 RCAST(kmp_int32 *, &taskdata->td_flags),
983 *RCAST(kmp_int32 *, &flags_old),
984 *RCAST(kmp_int32 *, &flags_new))) {
985 KA_TRACE(100, ("__kmp_free_task_and_ancestors: T#%d cleans "
986 "dephash of implicit task %p\n",
987 gtid, taskdata));
988 // cleanup dephash of finished implicit task
989 __kmp_dephash_free_entries(thread, h: taskdata->td_dephash);
990 }
991 }
992 }
993 return;
994 }
995 // Predecrement simulated by "- 1" calculation
996 children = KMP_ATOMIC_DEC(&taskdata->td_allocated_child_tasks) - 1;
997 KMP_DEBUG_ASSERT(children >= 0);
998 }
999
1000 KA_TRACE(
1001 20, ("__kmp_free_task_and_ancestors(exit): T#%d task %p has %d children; "
1002 "not freeing it yet\n",
1003 gtid, taskdata, children));
1004}
1005
1006// Only need to keep track of child task counts if any of the following:
1007// 1. team parallel and tasking not serialized;
1008// 2. it is a proxy or detachable or hidden helper task
1009// 3. the children counter of its parent task is greater than 0.
1010// The reason for the 3rd one is for serialized team that found detached task,
1011// hidden helper task, T. In this case, the execution of T is still deferred,
1012// and it is also possible that a regular task depends on T. In this case, if we
1013// don't track the children, task synchronization will be broken.
1014static bool __kmp_track_children_task(kmp_taskdata_t *taskdata) {
1015 kmp_tasking_flags_t flags = taskdata->td_flags;
1016 bool ret = !(flags.team_serial || flags.tasking_ser);
1017 ret = ret || flags.proxy == TASK_PROXY ||
1018 flags.detachable == TASK_DETACHABLE || flags.hidden_helper;
1019 ret = ret ||
1020 KMP_ATOMIC_LD_ACQ(&taskdata->td_parent->td_incomplete_child_tasks) > 0;
1021#if OMPX_TASKGRAPH
1022 if (taskdata->td_taskgroup && taskdata->is_taskgraph)
1023 ret = ret || KMP_ATOMIC_LD_ACQ(&taskdata->td_taskgroup->count) > 0;
1024#endif
1025 return ret;
1026}
1027
1028// __kmp_task_finish: bookkeeping to do when a task finishes execution
1029//
1030// gtid: global thread ID for calling thread
1031// task: task to be finished
1032// resumed_task: task to be resumed. (may be NULL if task is serialized)
1033//
1034// template<ompt>: effectively ompt_enabled.enabled!=0
1035// the version with ompt=false is inlined, allowing to optimize away all ompt
1036// code in this case
1037template <bool ompt>
1038static void __kmp_task_finish(kmp_int32 gtid, kmp_task_t *task,
1039 kmp_taskdata_t *resumed_task) {
1040 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
1041 kmp_info_t *thread = __kmp_threads[gtid];
1042 kmp_task_team_t *task_team =
1043 thread->th.th_task_team; // might be NULL for serial teams...
1044#if OMPX_TASKGRAPH
1045 // to avoid seg fault when we need to access taskdata->td_flags after free when using vanilla taskloop
1046 bool is_taskgraph;
1047#endif
1048#if KMP_DEBUG
1049 kmp_int32 children = 0;
1050#endif
1051 KA_TRACE(10, ("__kmp_task_finish(enter): T#%d finishing task %p and resuming "
1052 "task %p\n",
1053 gtid, taskdata, resumed_task));
1054
1055 KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
1056
1057#if OMPX_TASKGRAPH
1058 is_taskgraph = taskdata->is_taskgraph;
1059#endif
1060
1061// Pop task from stack if tied
1062#ifdef BUILD_TIED_TASK_STACK
1063 if (taskdata->td_flags.tiedness == TASK_TIED) {
1064 __kmp_pop_task_stack(gtid, thread, taskdata);
1065 }
1066#endif /* BUILD_TIED_TASK_STACK */
1067
1068 if (UNLIKELY(taskdata->td_flags.tiedness == TASK_UNTIED)) {
1069 // untied task needs to check the counter so that the task structure is not
1070 // freed prematurely
1071 kmp_int32 counter = KMP_ATOMIC_DEC(&taskdata->td_untied_count) - 1;
1072 KA_TRACE(
1073 20,
1074 ("__kmp_task_finish: T#%d untied_count (%d) decremented for task %p\n",
1075 gtid, counter, taskdata));
1076 if (counter > 0) {
1077 // untied task is not done, to be continued possibly by other thread, do
1078 // not free it now
1079 if (resumed_task == NULL) {
1080 KMP_DEBUG_ASSERT(taskdata->td_flags.task_serial);
1081 resumed_task = taskdata->td_parent; // In a serialized task, the resumed
1082 // task is the parent
1083 }
1084 thread->th.th_current_task = resumed_task; // restore current_task
1085 resumed_task->td_flags.executing = 1; // resume previous task
1086 KA_TRACE(10, ("__kmp_task_finish(exit): T#%d partially done task %p, "
1087 "resuming task %p\n",
1088 gtid, taskdata, resumed_task));
1089 return;
1090 }
1091 }
1092
1093 // bookkeeping for resuming task:
1094 // GEH - note tasking_ser => task_serial
1095 KMP_DEBUG_ASSERT(
1096 (taskdata->td_flags.tasking_ser || taskdata->td_flags.task_serial) ==
1097 taskdata->td_flags.task_serial);
1098 if (taskdata->td_flags.task_serial) {
1099 if (resumed_task == NULL) {
1100 resumed_task = taskdata->td_parent; // In a serialized task, the resumed
1101 // task is the parent
1102 }
1103 } else {
1104 KMP_DEBUG_ASSERT(resumed_task !=
1105 NULL); // verify that resumed task is passed as argument
1106 }
1107
1108 /* If the tasks' destructor thunk flag has been set, we need to invoke the
1109 destructor thunk that has been generated by the compiler. The code is
1110 placed here, since at this point other tasks might have been released
1111 hence overlapping the destructor invocations with some other work in the
1112 released tasks. The OpenMP spec is not specific on when the destructors
1113 are invoked, so we should be free to choose. */
1114 if (UNLIKELY(taskdata->td_flags.destructors_thunk)) {
1115 kmp_routine_entry_t destr_thunk = task->data1.destructors;
1116 KMP_ASSERT(destr_thunk);
1117 destr_thunk(gtid, task);
1118 }
1119
1120 KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0);
1121 KMP_DEBUG_ASSERT(taskdata->td_flags.started == 1);
1122 KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
1123
1124 bool completed = true;
1125 if (UNLIKELY(taskdata->td_flags.detachable == TASK_DETACHABLE)) {
1126 if (taskdata->td_allow_completion_event.type ==
1127 KMP_EVENT_ALLOW_COMPLETION) {
1128 // event hasn't been fulfilled yet. Try to detach task.
1129 __kmp_acquire_tas_lock(lck: &taskdata->td_allow_completion_event.lock, gtid);
1130 if (taskdata->td_allow_completion_event.type ==
1131 KMP_EVENT_ALLOW_COMPLETION) {
1132 // task finished execution
1133 KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 1);
1134 taskdata->td_flags.executing = 0; // suspend the finishing task
1135
1136#if OMPT_SUPPORT
1137 // For a detached task, which is not completed, we switch back
1138 // the omp_fulfill_event signals completion
1139 // locking is necessary to avoid a race with ompt_task_late_fulfill
1140 if (ompt)
1141 __ompt_task_finish(task, resumed_task, status: ompt_task_detach);
1142#endif
1143
1144 // no access to taskdata after this point!
1145 // __kmp_fulfill_event might free taskdata at any time from now
1146
1147 taskdata->td_flags.proxy = TASK_PROXY; // proxify!
1148 completed = false;
1149 }
1150 __kmp_release_tas_lock(lck: &taskdata->td_allow_completion_event.lock, gtid);
1151 }
1152 }
1153
1154 // Tasks with valid target async handles must be re-enqueued.
1155 if (taskdata->td_target_data.async_handle != NULL) {
1156 // Note: no need to translate gtid to its shadow. If the current thread is a
1157 // hidden helper one, then the gtid is already correct. Otherwise, hidden
1158 // helper threads are disabled, and gtid refers to a OpenMP thread.
1159 __kmpc_give_task(ptask: task, start: __kmp_tid_from_gtid(gtid));
1160 if (KMP_HIDDEN_HELPER_THREAD(gtid))
1161 __kmp_hidden_helper_worker_thread_signal();
1162 completed = false;
1163 }
1164
1165 if (completed) {
1166 taskdata->td_flags.complete = 1; // mark the task as completed
1167#if OMPX_TASKGRAPH
1168 taskdata->td_flags.onced = 1; // mark the task as ran once already
1169#endif
1170
1171#if OMPT_SUPPORT
1172 // This is not a detached task, we are done here
1173 if (ompt)
1174 __ompt_task_finish(task, resumed_task, status: ompt_task_complete);
1175#endif
1176 // TODO: What would be the balance between the conditions in the function
1177 // and an atomic operation?
1178 if (__kmp_track_children_task(taskdata)) {
1179 __kmp_release_deps(gtid, task: taskdata);
1180 // Predecrement simulated by "- 1" calculation
1181#if KMP_DEBUG
1182 children = -1 +
1183#endif
1184 KMP_ATOMIC_DEC(&taskdata->td_parent->td_incomplete_child_tasks);
1185 KMP_DEBUG_ASSERT(children >= 0);
1186#if OMPX_TASKGRAPH
1187 if (taskdata->td_taskgroup && !taskdata->is_taskgraph)
1188#else
1189 if (taskdata->td_taskgroup)
1190#endif
1191 KMP_ATOMIC_DEC(&taskdata->td_taskgroup->count);
1192 } else if (task_team && (task_team->tt.tt_found_proxy_tasks ||
1193 task_team->tt.tt_hidden_helper_task_encountered)) {
1194 // if we found proxy or hidden helper tasks there could exist a dependency
1195 // chain with the proxy task as origin
1196 __kmp_release_deps(gtid, task: taskdata);
1197 }
1198 // td_flags.executing must be marked as 0 after __kmp_release_deps has been
1199 // called. Othertwise, if a task is executed immediately from the
1200 // release_deps code, the flag will be reset to 1 again by this same
1201 // function
1202 KMP_DEBUG_ASSERT(taskdata->td_flags.executing == 1);
1203 taskdata->td_flags.executing = 0; // suspend the finishing task
1204
1205 // Decrement the counter of hidden helper tasks to be executed.
1206 if (taskdata->td_flags.hidden_helper) {
1207 // Hidden helper tasks can only be executed by hidden helper threads.
1208 KMP_ASSERT(KMP_HIDDEN_HELPER_THREAD(gtid));
1209 KMP_ATOMIC_DEC(&__kmp_unexecuted_hidden_helper_tasks);
1210 }
1211 }
1212
1213 KA_TRACE(
1214 20, ("__kmp_task_finish: T#%d finished task %p, %d incomplete children\n",
1215 gtid, taskdata, children));
1216
1217 // Free this task and then ancestor tasks if they have no children.
1218 // Restore th_current_task first as suggested by John:
1219 // johnmc: if an asynchronous inquiry peers into the runtime system
1220 // it doesn't see the freed task as the current task.
1221 thread->th.th_current_task = resumed_task;
1222 if (completed)
1223 __kmp_free_task_and_ancestors(gtid, taskdata, thread);
1224
1225 // TODO: GEH - make sure root team implicit task is initialized properly.
1226 // KMP_DEBUG_ASSERT( resumed_task->td_flags.executing == 0 );
1227 resumed_task->td_flags.executing = 1; // resume previous task
1228
1229#if OMPX_TASKGRAPH
1230 if (is_taskgraph && __kmp_track_children_task(taskdata) &&
1231 taskdata->td_taskgroup) {
1232 // TDG: we only release taskgroup barrier here because
1233 // free_task_and_ancestors will call
1234 // __kmp_free_task, which resets all task parameters such as
1235 // taskdata->started, etc. If we release the barrier earlier, these
1236 // parameters could be read before being reset. This is not an issue for
1237 // non-TDG implementation because we never reuse a task(data) structure
1238 KMP_ATOMIC_DEC(&taskdata->td_taskgroup->count);
1239 }
1240#endif
1241
1242 KA_TRACE(
1243 10, ("__kmp_task_finish(exit): T#%d finished task %p, resuming task %p\n",
1244 gtid, taskdata, resumed_task));
1245
1246 return;
1247}
1248
1249template <bool ompt>
1250static void __kmpc_omp_task_complete_if0_template(ident_t *loc_ref,
1251 kmp_int32 gtid,
1252 kmp_task_t *task) {
1253 KA_TRACE(10, ("__kmpc_omp_task_complete_if0(enter): T#%d loc=%p task=%p\n",
1254 gtid, loc_ref, KMP_TASK_TO_TASKDATA(task)));
1255 KMP_DEBUG_ASSERT(gtid >= 0);
1256 // this routine will provide task to resume
1257 __kmp_task_finish<ompt>(gtid, task, NULL);
1258
1259 KA_TRACE(10, ("__kmpc_omp_task_complete_if0(exit): T#%d loc=%p task=%p\n",
1260 gtid, loc_ref, KMP_TASK_TO_TASKDATA(task)));
1261
1262#if OMPT_SUPPORT
1263 if (ompt) {
1264 ompt_frame_t *ompt_frame;
1265 __ompt_get_task_info_internal(ancestor_level: 0, NULL, NULL, task_frame: &ompt_frame, NULL, NULL);
1266 ompt_frame->enter_frame = ompt_data_none;
1267 ompt_frame->enter_frame_flags =
1268 ompt_frame_runtime | ompt_frame_framepointer;
1269 }
1270#endif
1271
1272 return;
1273}
1274
1275#if OMPT_SUPPORT
1276OMPT_NOINLINE
1277void __kmpc_omp_task_complete_if0_ompt(ident_t *loc_ref, kmp_int32 gtid,
1278 kmp_task_t *task) {
1279 __kmpc_omp_task_complete_if0_template<true>(loc_ref, gtid, task);
1280}
1281#endif // OMPT_SUPPORT
1282
1283// __kmpc_omp_task_complete_if0: report that a task has completed execution
1284//
1285// loc_ref: source location information; points to end of task block.
1286// gtid: global thread number.
1287// task: task thunk for the completed task.
1288void __kmpc_omp_task_complete_if0(ident_t *loc_ref, kmp_int32 gtid,
1289 kmp_task_t *task) {
1290#if OMPT_SUPPORT
1291 if (UNLIKELY(ompt_enabled.enabled)) {
1292 __kmpc_omp_task_complete_if0_ompt(loc_ref, gtid, task);
1293 return;
1294 }
1295#endif
1296 __kmpc_omp_task_complete_if0_template<false>(loc_ref, gtid, task);
1297}
1298
1299#ifdef TASK_UNUSED
1300// __kmpc_omp_task_complete: report that a task has completed execution
1301// NEVER GENERATED BY COMPILER, DEPRECATED!!!
1302void __kmpc_omp_task_complete(ident_t *loc_ref, kmp_int32 gtid,
1303 kmp_task_t *task) {
1304 KA_TRACE(10, ("__kmpc_omp_task_complete(enter): T#%d loc=%p task=%p\n", gtid,
1305 loc_ref, KMP_TASK_TO_TASKDATA(task)));
1306
1307 __kmp_task_finish<false>(gtid, task,
1308 NULL); // Not sure how to find task to resume
1309
1310 KA_TRACE(10, ("__kmpc_omp_task_complete(exit): T#%d loc=%p task=%p\n", gtid,
1311 loc_ref, KMP_TASK_TO_TASKDATA(task)));
1312 return;
1313}
1314#endif // TASK_UNUSED
1315
1316// __kmp_init_implicit_task: Initialize the appropriate fields in the implicit
1317// task for a given thread
1318//
1319// loc_ref: reference to source location of parallel region
1320// this_thr: thread data structure corresponding to implicit task
1321// team: team for this_thr
1322// tid: thread id of given thread within team
1323// set_curr_task: TRUE if need to push current task to thread
1324// NOTE: Routine does not set up the implicit task ICVS. This is assumed to
1325// have already been done elsewhere.
1326// TODO: Get better loc_ref. Value passed in may be NULL
1327void __kmp_init_implicit_task(ident_t *loc_ref, kmp_info_t *this_thr,
1328 kmp_team_t *team, int tid, int set_curr_task) {
1329 kmp_taskdata_t *task = &team->t.t_implicit_task_taskdata[tid];
1330
1331 KF_TRACE(
1332 10,
1333 ("__kmp_init_implicit_task(enter): T#:%d team=%p task=%p, reinit=%s\n",
1334 tid, team, task, set_curr_task ? "TRUE" : "FALSE"));
1335
1336 task->td_task_id = KMP_GEN_TASK_ID();
1337 task->td_team = team;
1338 // task->td_parent = NULL; // fix for CQ230101 (broken parent task info
1339 // in debugger)
1340 task->td_ident = loc_ref;
1341 task->td_taskwait_ident = NULL;
1342 task->td_taskwait_counter = 0;
1343 task->td_taskwait_thread = 0;
1344
1345 task->td_flags.tiedness = TASK_TIED;
1346 task->td_flags.tasktype = TASK_IMPLICIT;
1347 task->td_flags.proxy = TASK_FULL;
1348
1349 // All implicit tasks are executed immediately, not deferred
1350 task->td_flags.task_serial = 1;
1351 task->td_flags.tasking_ser = (__kmp_tasking_mode == tskm_immediate_exec);
1352 task->td_flags.team_serial = (team->t.t_serialized) ? 1 : 0;
1353
1354 task->td_flags.started = 1;
1355 task->td_flags.executing = 1;
1356 task->td_flags.complete = 0;
1357 task->td_flags.freed = 0;
1358#if OMPX_TASKGRAPH
1359 task->td_flags.onced = 0;
1360#endif
1361
1362 task->td_depnode = NULL;
1363 task->td_last_tied = task;
1364 task->td_allow_completion_event.type = KMP_EVENT_UNINITIALIZED;
1365
1366 if (set_curr_task) { // only do this init first time thread is created
1367 KMP_ATOMIC_ST_REL(&task->td_incomplete_child_tasks, 0);
1368 // Not used: don't need to deallocate implicit task
1369 KMP_ATOMIC_ST_REL(&task->td_allocated_child_tasks, 0);
1370 task->td_taskgroup = NULL; // An implicit task does not have taskgroup
1371 task->td_dephash = NULL;
1372 __kmp_push_current_task_to_thread(this_thr, team, tid);
1373 } else {
1374 KMP_DEBUG_ASSERT(task->td_incomplete_child_tasks == 0);
1375 KMP_DEBUG_ASSERT(task->td_allocated_child_tasks == 0);
1376 }
1377
1378#if OMPT_SUPPORT
1379 if (UNLIKELY(ompt_enabled.enabled))
1380 __ompt_task_init(task, tid);
1381#endif
1382
1383 KF_TRACE(10, ("__kmp_init_implicit_task(exit): T#:%d team=%p task=%p\n", tid,
1384 team, task));
1385}
1386
1387// __kmp_finish_implicit_task: Release resources associated to implicit tasks
1388// at the end of parallel regions. Some resources are kept for reuse in the next
1389// parallel region.
1390//
1391// thread: thread data structure corresponding to implicit task
1392void __kmp_finish_implicit_task(kmp_info_t *thread) {
1393 kmp_taskdata_t *task = thread->th.th_current_task;
1394 if (task->td_dephash) {
1395 int children;
1396 task->td_flags.complete = 1;
1397#if OMPX_TASKGRAPH
1398 task->td_flags.onced = 1;
1399#endif
1400 children = KMP_ATOMIC_LD_ACQ(&task->td_incomplete_child_tasks);
1401 kmp_tasking_flags_t flags_old = task->td_flags;
1402 if (children == 0 && flags_old.complete == 1) {
1403 kmp_tasking_flags_t flags_new = flags_old;
1404 flags_new.complete = 0;
1405 if (KMP_COMPARE_AND_STORE_ACQ32(RCAST(kmp_int32 *, &task->td_flags),
1406 *RCAST(kmp_int32 *, &flags_old),
1407 *RCAST(kmp_int32 *, &flags_new))) {
1408 KA_TRACE(100, ("__kmp_finish_implicit_task: T#%d cleans "
1409 "dephash of implicit task %p\n",
1410 thread->th.th_info.ds.ds_gtid, task));
1411 __kmp_dephash_free_entries(thread, h: task->td_dephash);
1412 }
1413 }
1414 }
1415}
1416
1417// __kmp_free_implicit_task: Release resources associated to implicit tasks
1418// when these are destroyed regions
1419//
1420// thread: thread data structure corresponding to implicit task
1421void __kmp_free_implicit_task(kmp_info_t *thread) {
1422 kmp_taskdata_t *task = thread->th.th_current_task;
1423 if (task && task->td_dephash) {
1424 __kmp_dephash_free(thread, h: task->td_dephash);
1425 task->td_dephash = NULL;
1426 }
1427}
1428
1429// Round up a size to a power of two specified by val: Used to insert padding
1430// between structures co-allocated using a single malloc() call
1431static size_t __kmp_round_up_to_val(size_t size, size_t val) {
1432 if (size & (val - 1)) {
1433 size &= ~(val - 1);
1434 if (size <= KMP_SIZE_T_MAX - val) {
1435 size += val; // Round up if there is no overflow.
1436 }
1437 }
1438 return size;
1439} // __kmp_round_up_to_va
1440
1441// __kmp_task_alloc: Allocate the taskdata and task data structures for a task
1442//
1443// loc_ref: source location information
1444// gtid: global thread number.
1445// flags: include tiedness & task type (explicit vs. implicit) of the ''new''
1446// task encountered. Converted from kmp_int32 to kmp_tasking_flags_t in routine.
1447// sizeof_kmp_task_t: Size in bytes of kmp_task_t data structure including
1448// private vars accessed in task.
1449// sizeof_shareds: Size in bytes of array of pointers to shared vars accessed
1450// in task.
1451// task_entry: Pointer to task code entry point generated by compiler.
1452// returns: a pointer to the allocated kmp_task_t structure (task).
1453kmp_task_t *__kmp_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
1454 kmp_tasking_flags_t *flags,
1455 size_t sizeof_kmp_task_t, size_t sizeof_shareds,
1456 kmp_routine_entry_t task_entry) {
1457 kmp_task_t *task;
1458 kmp_taskdata_t *taskdata;
1459 kmp_info_t *thread = __kmp_threads[gtid];
1460 kmp_team_t *team = thread->th.th_team;
1461 kmp_taskdata_t *parent_task = thread->th.th_current_task;
1462 size_t shareds_offset;
1463
1464 if (UNLIKELY(!TCR_4(__kmp_init_middle)))
1465 __kmp_middle_initialize();
1466
1467 if (flags->hidden_helper) {
1468 if (__kmp_enable_hidden_helper) {
1469 if (!TCR_4(__kmp_init_hidden_helper))
1470 __kmp_hidden_helper_initialize();
1471 } else {
1472 // If the hidden helper task is not enabled, reset the flag to FALSE.
1473 flags->hidden_helper = FALSE;
1474 }
1475 }
1476
1477 KA_TRACE(10, ("__kmp_task_alloc(enter): T#%d loc=%p, flags=(0x%x) "
1478 "sizeof_task=%ld sizeof_shared=%ld entry=%p\n",
1479 gtid, loc_ref, *((kmp_int32 *)flags), sizeof_kmp_task_t,
1480 sizeof_shareds, task_entry));
1481
1482 KMP_DEBUG_ASSERT(parent_task);
1483 if (parent_task->td_flags.final) {
1484 if (flags->merged_if0) {
1485 }
1486 flags->final = 1;
1487 }
1488
1489 if (flags->tiedness == TASK_UNTIED && !team->t.t_serialized) {
1490 // Untied task encountered causes the TSC algorithm to check entire deque of
1491 // the victim thread. If no untied task encountered, then checking the head
1492 // of the deque should be enough.
1493 KMP_CHECK_UPDATE(thread->th.th_task_team->tt.tt_untied_task_encountered, 1);
1494 }
1495
1496 // Detachable tasks are not proxy tasks yet but could be in the future. Doing
1497 // the tasking setup
1498 // when that happens is too late.
1499 if (UNLIKELY(flags->proxy == TASK_PROXY ||
1500 flags->detachable == TASK_DETACHABLE || flags->hidden_helper)) {
1501 if (flags->proxy == TASK_PROXY) {
1502 flags->tiedness = TASK_UNTIED;
1503 flags->merged_if0 = 1;
1504 }
1505 /* are we running in a sequential parallel or tskm_immediate_exec... we need
1506 tasking support enabled */
1507 if ((thread->th.th_task_team) == NULL) {
1508 /* This should only happen if the team is serialized
1509 setup a task team and propagate it to the thread */
1510 KMP_DEBUG_ASSERT(team->t.t_serialized);
1511 KA_TRACE(30,
1512 ("T#%d creating task team in __kmp_task_alloc for proxy task\n",
1513 gtid));
1514 // 1 indicates setup the current team regardless of nthreads
1515 __kmp_task_team_setup(this_thr: thread, team, always: 1);
1516 thread->th.th_task_team = team->t.t_task_team[thread->th.th_task_state];
1517 }
1518 kmp_task_team_t *task_team = thread->th.th_task_team;
1519
1520 /* tasking must be enabled now as the task might not be pushed */
1521 if (!KMP_TASKING_ENABLED(task_team)) {
1522 KA_TRACE(
1523 30,
1524 ("T#%d enabling tasking in __kmp_task_alloc for proxy task\n", gtid));
1525 __kmp_enable_tasking(task_team, this_thr: thread);
1526 kmp_int32 tid = thread->th.th_info.ds.ds_tid;
1527 kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[tid];
1528 // No lock needed since only owner can allocate
1529 if (thread_data->td.td_deque == NULL) {
1530 __kmp_alloc_task_deque(thread, thread_data);
1531 }
1532 }
1533
1534 if ((flags->proxy == TASK_PROXY || flags->detachable == TASK_DETACHABLE) &&
1535 task_team->tt.tt_found_proxy_tasks == FALSE)
1536 TCW_4(task_team->tt.tt_found_proxy_tasks, TRUE);
1537 if (flags->hidden_helper &&
1538 task_team->tt.tt_hidden_helper_task_encountered == FALSE)
1539 TCW_4(task_team->tt.tt_hidden_helper_task_encountered, TRUE);
1540 }
1541
1542 // Calculate shared structure offset including padding after kmp_task_t struct
1543 // to align pointers in shared struct
1544 shareds_offset = sizeof(kmp_taskdata_t) + sizeof_kmp_task_t;
1545 shareds_offset = __kmp_round_up_to_val(size: shareds_offset, val: sizeof(void *));
1546
1547 // Allocate a kmp_taskdata_t block and a kmp_task_t block.
1548 KA_TRACE(30, ("__kmp_task_alloc: T#%d First malloc size: %ld\n", gtid,
1549 shareds_offset));
1550 KA_TRACE(30, ("__kmp_task_alloc: T#%d Second malloc size: %ld\n", gtid,
1551 sizeof_shareds));
1552
1553 // Avoid double allocation here by combining shareds with taskdata
1554#if USE_FAST_MEMORY
1555 taskdata = (kmp_taskdata_t *)__kmp_fast_allocate(thread, shareds_offset +
1556 sizeof_shareds);
1557#else /* ! USE_FAST_MEMORY */
1558 taskdata = (kmp_taskdata_t *)__kmp_thread_malloc(thread, shareds_offset +
1559 sizeof_shareds);
1560#endif /* USE_FAST_MEMORY */
1561
1562 task = KMP_TASKDATA_TO_TASK(taskdata);
1563
1564// Make sure task & taskdata are aligned appropriately
1565#if KMP_ARCH_X86 || KMP_ARCH_PPC64 || KMP_ARCH_S390X || !KMP_HAVE_QUAD
1566 KMP_DEBUG_ASSERT((((kmp_uintptr_t)taskdata) & (sizeof(double) - 1)) == 0);
1567 KMP_DEBUG_ASSERT((((kmp_uintptr_t)task) & (sizeof(double) - 1)) == 0);
1568#else
1569 KMP_DEBUG_ASSERT((((kmp_uintptr_t)taskdata) & (sizeof(_Quad) - 1)) == 0);
1570 KMP_DEBUG_ASSERT((((kmp_uintptr_t)task) & (sizeof(_Quad) - 1)) == 0);
1571#endif
1572 if (sizeof_shareds > 0) {
1573 // Avoid double allocation here by combining shareds with taskdata
1574 task->shareds = &((char *)taskdata)[shareds_offset];
1575 // Make sure shareds struct is aligned to pointer size
1576 KMP_DEBUG_ASSERT((((kmp_uintptr_t)task->shareds) & (sizeof(void *) - 1)) ==
1577 0);
1578 } else {
1579 task->shareds = NULL;
1580 }
1581 task->routine = task_entry;
1582 task->part_id = 0; // AC: Always start with 0 part id
1583
1584 taskdata->td_task_id = KMP_GEN_TASK_ID();
1585 taskdata->td_team = thread->th.th_team;
1586 taskdata->td_alloc_thread = thread;
1587 taskdata->td_parent = parent_task;
1588 taskdata->td_level = parent_task->td_level + 1; // increment nesting level
1589 KMP_ATOMIC_ST_RLX(&taskdata->td_untied_count, 0);
1590 taskdata->td_ident = loc_ref;
1591 taskdata->td_taskwait_ident = NULL;
1592 taskdata->td_taskwait_counter = 0;
1593 taskdata->td_taskwait_thread = 0;
1594 KMP_DEBUG_ASSERT(taskdata->td_parent != NULL);
1595 // avoid copying icvs for proxy tasks
1596 if (flags->proxy == TASK_FULL)
1597 copy_icvs(dst: &taskdata->td_icvs, src: &taskdata->td_parent->td_icvs);
1598
1599 taskdata->td_flags = *flags;
1600 taskdata->td_task_team = thread->th.th_task_team;
1601 taskdata->td_size_alloc = shareds_offset + sizeof_shareds;
1602 taskdata->td_flags.tasktype = TASK_EXPLICIT;
1603 // If it is hidden helper task, we need to set the team and task team
1604 // correspondingly.
1605 if (flags->hidden_helper) {
1606 kmp_info_t *shadow_thread = __kmp_threads[KMP_GTID_TO_SHADOW_GTID(gtid)];
1607 taskdata->td_team = shadow_thread->th.th_team;
1608 taskdata->td_task_team = shadow_thread->th.th_task_team;
1609 }
1610
1611 // GEH - TODO: fix this to copy parent task's value of tasking_ser flag
1612 taskdata->td_flags.tasking_ser = (__kmp_tasking_mode == tskm_immediate_exec);
1613
1614 // GEH - TODO: fix this to copy parent task's value of team_serial flag
1615 taskdata->td_flags.team_serial = (team->t.t_serialized) ? 1 : 0;
1616
1617 // GEH - Note we serialize the task if the team is serialized to make sure
1618 // implicit parallel region tasks are not left until program termination to
1619 // execute. Also, it helps locality to execute immediately.
1620
1621 taskdata->td_flags.task_serial =
1622 (parent_task->td_flags.final || taskdata->td_flags.team_serial ||
1623 taskdata->td_flags.tasking_ser || flags->merged_if0);
1624
1625 taskdata->td_flags.started = 0;
1626 taskdata->td_flags.executing = 0;
1627 taskdata->td_flags.complete = 0;
1628 taskdata->td_flags.freed = 0;
1629#if OMPX_TASKGRAPH
1630 taskdata->td_flags.onced = 0;
1631#endif
1632 KMP_ATOMIC_ST_RLX(&taskdata->td_incomplete_child_tasks, 0);
1633 // start at one because counts current task and children
1634 KMP_ATOMIC_ST_RLX(&taskdata->td_allocated_child_tasks, 1);
1635 taskdata->td_taskgroup =
1636 parent_task->td_taskgroup; // task inherits taskgroup from the parent task
1637 taskdata->td_dephash = NULL;
1638 taskdata->td_depnode = NULL;
1639 taskdata->td_target_data.async_handle = NULL;
1640 if (flags->tiedness == TASK_UNTIED)
1641 taskdata->td_last_tied = NULL; // will be set when the task is scheduled
1642 else
1643 taskdata->td_last_tied = taskdata;
1644 taskdata->td_allow_completion_event.type = KMP_EVENT_UNINITIALIZED;
1645#if OMPT_SUPPORT
1646 if (UNLIKELY(ompt_enabled.enabled))
1647 __ompt_task_init(task: taskdata, tid: gtid);
1648#endif
1649 // TODO: What would be the balance between the conditions in the function and
1650 // an atomic operation?
1651 if (__kmp_track_children_task(taskdata)) {
1652 KMP_ATOMIC_INC(&parent_task->td_incomplete_child_tasks);
1653 if (parent_task->td_taskgroup)
1654 KMP_ATOMIC_INC(&parent_task->td_taskgroup->count);
1655 // Only need to keep track of allocated child tasks for explicit tasks since
1656 // implicit not deallocated
1657 if (taskdata->td_parent->td_flags.tasktype == TASK_EXPLICIT) {
1658 KMP_ATOMIC_INC(&taskdata->td_parent->td_allocated_child_tasks);
1659 }
1660 if (flags->hidden_helper) {
1661 taskdata->td_flags.task_serial = FALSE;
1662 // Increment the number of hidden helper tasks to be executed
1663 KMP_ATOMIC_INC(&__kmp_unexecuted_hidden_helper_tasks);
1664 }
1665 }
1666
1667#if OMPX_TASKGRAPH
1668 kmp_tdg_info_t *tdg = __kmp_find_tdg(__kmp_curr_tdg_idx);
1669 if (tdg && __kmp_tdg_is_recording(tdg->tdg_status) &&
1670 (task_entry != (kmp_routine_entry_t)__kmp_taskloop_task)) {
1671 taskdata->is_taskgraph = 1;
1672 taskdata->tdg = __kmp_global_tdgs[__kmp_curr_tdg_idx];
1673 taskdata->td_task_id = KMP_ATOMIC_INC(&__kmp_tdg_task_id);
1674 }
1675#endif
1676 KA_TRACE(20, ("__kmp_task_alloc(exit): T#%d created task %p parent=%p\n",
1677 gtid, taskdata, taskdata->td_parent));
1678
1679 return task;
1680}
1681
1682kmp_task_t *__kmpc_omp_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
1683 kmp_int32 flags, size_t sizeof_kmp_task_t,
1684 size_t sizeof_shareds,
1685 kmp_routine_entry_t task_entry) {
1686 kmp_task_t *retval;
1687 kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *)&flags;
1688 __kmp_assert_valid_gtid(gtid);
1689 input_flags->native = FALSE;
1690 // __kmp_task_alloc() sets up all other runtime flags
1691 KA_TRACE(10, ("__kmpc_omp_task_alloc(enter): T#%d loc=%p, flags=(%s %s %s) "
1692 "sizeof_task=%ld sizeof_shared=%ld entry=%p\n",
1693 gtid, loc_ref, input_flags->tiedness ? "tied " : "untied",
1694 input_flags->proxy ? "proxy" : "",
1695 input_flags->detachable ? "detachable" : "", sizeof_kmp_task_t,
1696 sizeof_shareds, task_entry));
1697
1698 retval = __kmp_task_alloc(loc_ref, gtid, flags: input_flags, sizeof_kmp_task_t,
1699 sizeof_shareds, task_entry);
1700
1701 KA_TRACE(20, ("__kmpc_omp_task_alloc(exit): T#%d retval %p\n", gtid, retval));
1702
1703 return retval;
1704}
1705
1706kmp_task_t *__kmpc_omp_target_task_alloc(ident_t *loc_ref, kmp_int32 gtid,
1707 kmp_int32 flags,
1708 size_t sizeof_kmp_task_t,
1709 size_t sizeof_shareds,
1710 kmp_routine_entry_t task_entry,
1711 kmp_int64 device_id) {
1712 auto &input_flags = reinterpret_cast<kmp_tasking_flags_t &>(flags);
1713 // target task is untied defined in the specification
1714 input_flags.tiedness = TASK_UNTIED;
1715
1716 if (__kmp_enable_hidden_helper)
1717 input_flags.hidden_helper = TRUE;
1718
1719 return __kmpc_omp_task_alloc(loc_ref, gtid, flags, sizeof_kmp_task_t,
1720 sizeof_shareds, task_entry);
1721}
1722
1723/*!
1724@ingroup TASKING
1725@param loc_ref location of the original task directive
1726@param gtid Global Thread ID of encountering thread
1727@param new_task task thunk allocated by __kmpc_omp_task_alloc() for the ''new
1728task''
1729@param naffins Number of affinity items
1730@param affin_list List of affinity items
1731@return Returns non-zero if registering affinity information was not successful.
1732 Returns 0 if registration was successful
1733This entry registers the affinity information attached to a task with the task
1734thunk structure kmp_taskdata_t.
1735*/
1736kmp_int32
1737__kmpc_omp_reg_task_with_affinity(ident_t *loc_ref, kmp_int32 gtid,
1738 kmp_task_t *new_task, kmp_int32 naffins,
1739 kmp_task_affinity_info_t *affin_list) {
1740 return 0;
1741}
1742
1743// __kmp_invoke_task: invoke the specified task
1744//
1745// gtid: global thread ID of caller
1746// task: the task to invoke
1747// current_task: the task to resume after task invocation
1748#ifdef __s390x__
1749__attribute__((target("backchain")))
1750#endif
1751static void
1752__kmp_invoke_task(kmp_int32 gtid, kmp_task_t *task,
1753 kmp_taskdata_t *current_task) {
1754 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
1755 kmp_info_t *thread;
1756 int discard = 0 /* false */;
1757 KA_TRACE(
1758 30, ("__kmp_invoke_task(enter): T#%d invoking task %p, current_task=%p\n",
1759 gtid, taskdata, current_task));
1760 KMP_DEBUG_ASSERT(task);
1761 if (UNLIKELY(taskdata->td_flags.proxy == TASK_PROXY &&
1762 taskdata->td_flags.complete == 1)) {
1763 // This is a proxy task that was already completed but it needs to run
1764 // its bottom-half finish
1765 KA_TRACE(
1766 30,
1767 ("__kmp_invoke_task: T#%d running bottom finish for proxy task %p\n",
1768 gtid, taskdata));
1769
1770 __kmp_bottom_half_finish_proxy(gtid, ptask: task);
1771
1772 KA_TRACE(30, ("__kmp_invoke_task(exit): T#%d completed bottom finish for "
1773 "proxy task %p, resuming task %p\n",
1774 gtid, taskdata, current_task));
1775
1776 return;
1777 }
1778
1779#if OMPT_SUPPORT
1780 // For untied tasks, the first task executed only calls __kmpc_omp_task and
1781 // does not execute code.
1782 ompt_thread_info_t oldInfo;
1783 if (UNLIKELY(ompt_enabled.enabled)) {
1784 // Store the threads states and restore them after the task
1785 thread = __kmp_threads[gtid];
1786 oldInfo = thread->th.ompt_thread_info;
1787 thread->th.ompt_thread_info.wait_id = 0;
1788 thread->th.ompt_thread_info.state = (thread->th.th_team_serialized)
1789 ? ompt_state_work_serial
1790 : ompt_state_work_parallel;
1791 taskdata->ompt_task_info.frame.exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1792 }
1793#endif
1794
1795 // Proxy tasks are not handled by the runtime
1796 if (taskdata->td_flags.proxy != TASK_PROXY) {
1797 __kmp_task_start(gtid, task, current_task); // OMPT only if not discarded
1798 }
1799
1800 // TODO: cancel tasks if the parallel region has also been cancelled
1801 // TODO: check if this sequence can be hoisted above __kmp_task_start
1802 // if cancellation has been enabled for this run ...
1803 if (UNLIKELY(__kmp_omp_cancellation)) {
1804 thread = __kmp_threads[gtid];
1805 kmp_team_t *this_team = thread->th.th_team;
1806 kmp_taskgroup_t *taskgroup = taskdata->td_taskgroup;
1807 if ((taskgroup && taskgroup->cancel_request) ||
1808 (this_team->t.t_cancel_request == cancel_parallel)) {
1809#if OMPT_SUPPORT && OMPT_OPTIONAL
1810 ompt_data_t *task_data;
1811 if (UNLIKELY(ompt_enabled.ompt_callback_cancel)) {
1812 __ompt_get_task_info_internal(ancestor_level: 0, NULL, task_data: &task_data, NULL, NULL, NULL);
1813 ompt_callbacks.ompt_callback(ompt_callback_cancel)(
1814 task_data,
1815 ((taskgroup && taskgroup->cancel_request) ? ompt_cancel_taskgroup
1816 : ompt_cancel_parallel) |
1817 ompt_cancel_discarded_task,
1818 NULL);
1819 }
1820#endif
1821 KMP_COUNT_BLOCK(TASK_cancelled);
1822 // this task belongs to a task group and we need to cancel it
1823 discard = 1 /* true */;
1824 }
1825 }
1826
1827 // Invoke the task routine and pass in relevant data.
1828 // Thunks generated by gcc take a different argument list.
1829 if (!discard) {
1830 if (taskdata->td_flags.tiedness == TASK_UNTIED) {
1831 taskdata->td_last_tied = current_task->td_last_tied;
1832 KMP_DEBUG_ASSERT(taskdata->td_last_tied);
1833 }
1834#if KMP_STATS_ENABLED
1835 KMP_COUNT_BLOCK(TASK_executed);
1836 switch (KMP_GET_THREAD_STATE()) {
1837 case FORK_JOIN_BARRIER:
1838 KMP_PUSH_PARTITIONED_TIMER(OMP_task_join_bar);
1839 break;
1840 case PLAIN_BARRIER:
1841 KMP_PUSH_PARTITIONED_TIMER(OMP_task_plain_bar);
1842 break;
1843 case TASKYIELD:
1844 KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskyield);
1845 break;
1846 case TASKWAIT:
1847 KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskwait);
1848 break;
1849 case TASKGROUP:
1850 KMP_PUSH_PARTITIONED_TIMER(OMP_task_taskgroup);
1851 break;
1852 default:
1853 KMP_PUSH_PARTITIONED_TIMER(OMP_task_immediate);
1854 break;
1855 }
1856#endif // KMP_STATS_ENABLED
1857
1858// OMPT task begin
1859#if OMPT_SUPPORT
1860 if (UNLIKELY(ompt_enabled.enabled))
1861 __ompt_task_start(task, current_task, gtid);
1862#endif
1863#if OMPT_SUPPORT && OMPT_OPTIONAL
1864 if (UNLIKELY(ompt_enabled.ompt_callback_dispatch &&
1865 taskdata->ompt_task_info.dispatch_chunk.iterations > 0)) {
1866 ompt_data_t instance = ompt_data_none;
1867 instance.ptr = &(taskdata->ompt_task_info.dispatch_chunk);
1868 ompt_team_info_t *team_info = __ompt_get_teaminfo(depth: 0, NULL);
1869 ompt_callbacks.ompt_callback(ompt_callback_dispatch)(
1870 &(team_info->parallel_data), &(taskdata->ompt_task_info.task_data),
1871 ompt_dispatch_taskloop_chunk, instance);
1872 taskdata->ompt_task_info.dispatch_chunk = {.start: 0, .iterations: 0};
1873 }
1874#endif // OMPT_SUPPORT && OMPT_OPTIONAL
1875
1876#if OMPD_SUPPORT
1877 if (ompd_state & OMPD_ENABLE_BP)
1878 ompd_bp_task_begin();
1879#endif
1880
1881#if USE_ITT_BUILD && USE_ITT_NOTIFY
1882 kmp_uint64 cur_time;
1883 kmp_int32 kmp_itt_count_task =
1884 __kmp_forkjoin_frames_mode == 3 && !taskdata->td_flags.task_serial &&
1885 current_task->td_flags.tasktype == TASK_IMPLICIT;
1886 if (kmp_itt_count_task) {
1887 thread = __kmp_threads[gtid];
1888 // Time outer level explicit task on barrier for adjusting imbalance time
1889 if (thread->th.th_bar_arrive_time)
1890 cur_time = __itt_get_timestamp();
1891 else
1892 kmp_itt_count_task = 0; // thread is not on a barrier - skip timing
1893 }
1894 KMP_FSYNC_ACQUIRED(taskdata); // acquired self (new task)
1895#endif
1896
1897#if ENABLE_LIBOMPTARGET
1898 if (taskdata->td_target_data.async_handle != NULL) {
1899 // If we have a valid target async handle, that means that we have already
1900 // executed the task routine once. We must query for the handle completion
1901 // instead of re-executing the routine.
1902 KMP_ASSERT(tgt_target_nowait_query);
1903 tgt_target_nowait_query(&taskdata->td_target_data.async_handle);
1904 } else
1905#endif
1906 if (task->routine != NULL) {
1907#ifdef KMP_GOMP_COMPAT
1908 if (taskdata->td_flags.native) {
1909 ((void (*)(void *))(*(task->routine)))(task->shareds);
1910 } else
1911#endif /* KMP_GOMP_COMPAT */
1912 {
1913 (*(task->routine))(gtid, task);
1914 }
1915 }
1916 KMP_POP_PARTITIONED_TIMER();
1917
1918#if USE_ITT_BUILD && USE_ITT_NOTIFY
1919 if (kmp_itt_count_task) {
1920 // Barrier imbalance - adjust arrive time with the task duration
1921 thread->th.th_bar_arrive_time += (__itt_get_timestamp() - cur_time);
1922 }
1923 KMP_FSYNC_CANCEL(taskdata); // destroy self (just executed)
1924 KMP_FSYNC_RELEASING(taskdata->td_parent); // releasing parent
1925#endif
1926 }
1927
1928#if OMPD_SUPPORT
1929 if (ompd_state & OMPD_ENABLE_BP)
1930 ompd_bp_task_end();
1931#endif
1932
1933 // Proxy tasks are not handled by the runtime
1934 if (taskdata->td_flags.proxy != TASK_PROXY) {
1935#if OMPT_SUPPORT
1936 if (UNLIKELY(ompt_enabled.enabled)) {
1937 thread->th.ompt_thread_info = oldInfo;
1938 if (taskdata->td_flags.tiedness == TASK_TIED) {
1939 taskdata->ompt_task_info.frame.exit_frame = ompt_data_none;
1940 }
1941 __kmp_task_finish<true>(gtid, task, resumed_task: current_task);
1942 } else
1943#endif
1944 __kmp_task_finish<false>(gtid, task, resumed_task: current_task);
1945 }
1946
1947 KA_TRACE(
1948 30,
1949 ("__kmp_invoke_task(exit): T#%d completed task %p, resuming task %p\n",
1950 gtid, taskdata, current_task));
1951 return;
1952}
1953
1954// __kmpc_omp_task_parts: Schedule a thread-switchable task for execution
1955//
1956// loc_ref: location of original task pragma (ignored)
1957// gtid: Global Thread ID of encountering thread
1958// new_task: task thunk allocated by __kmp_omp_task_alloc() for the ''new task''
1959// Returns:
1960// TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
1961// be resumed later.
1962// TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
1963// resumed later.
1964kmp_int32 __kmpc_omp_task_parts(ident_t *loc_ref, kmp_int32 gtid,
1965 kmp_task_t *new_task) {
1966 kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
1967
1968 KA_TRACE(10, ("__kmpc_omp_task_parts(enter): T#%d loc=%p task=%p\n", gtid,
1969 loc_ref, new_taskdata));
1970
1971#if OMPT_SUPPORT
1972 kmp_taskdata_t *parent;
1973 if (UNLIKELY(ompt_enabled.enabled)) {
1974 parent = new_taskdata->td_parent;
1975 if (ompt_enabled.ompt_callback_task_create) {
1976 ompt_callbacks.ompt_callback(ompt_callback_task_create)(
1977 &(parent->ompt_task_info.task_data), &(parent->ompt_task_info.frame),
1978 &(new_taskdata->ompt_task_info.task_data), ompt_task_explicit, 0,
1979 OMPT_GET_RETURN_ADDRESS(0));
1980 }
1981 }
1982#endif
1983
1984 /* Should we execute the new task or queue it? For now, let's just always try
1985 to queue it. If the queue fills up, then we'll execute it. */
1986
1987 if (__kmp_push_task(gtid, task: new_task) == TASK_NOT_PUSHED) // if cannot defer
1988 { // Execute this task immediately
1989 kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
1990 new_taskdata->td_flags.task_serial = 1;
1991 __kmp_invoke_task(gtid, task: new_task, current_task);
1992 }
1993
1994 KA_TRACE(
1995 10,
1996 ("__kmpc_omp_task_parts(exit): T#%d returning TASK_CURRENT_NOT_QUEUED: "
1997 "loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n",
1998 gtid, loc_ref, new_taskdata));
1999
2000#if OMPT_SUPPORT
2001 if (UNLIKELY(ompt_enabled.enabled)) {
2002 parent->ompt_task_info.frame.enter_frame = ompt_data_none;
2003 }
2004#endif
2005 return TASK_CURRENT_NOT_QUEUED;
2006}
2007
2008// __kmp_omp_task: Schedule a non-thread-switchable task for execution
2009//
2010// gtid: Global Thread ID of encountering thread
2011// new_task:non-thread-switchable task thunk allocated by __kmp_omp_task_alloc()
2012// serialize_immediate: if TRUE then if the task is executed immediately its
2013// execution will be serialized
2014// Returns:
2015// TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
2016// be resumed later.
2017// TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
2018// resumed later.
2019kmp_int32 __kmp_omp_task(kmp_int32 gtid, kmp_task_t *new_task,
2020 bool serialize_immediate) {
2021 kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
2022
2023#if OMPX_TASKGRAPH
2024 if (new_taskdata->is_taskgraph &&
2025 __kmp_tdg_is_recording(new_taskdata->tdg->tdg_status)) {
2026 kmp_tdg_info_t *tdg = new_taskdata->tdg;
2027 // extend the record_map if needed
2028 if (new_taskdata->td_task_id >= new_taskdata->tdg->map_size) {
2029 __kmp_acquire_bootstrap_lock(&tdg->graph_lock);
2030 // map_size could have been updated by another thread if recursive
2031 // taskloop
2032 if (new_taskdata->td_task_id >= tdg->map_size) {
2033 kmp_uint old_size = tdg->map_size;
2034 kmp_uint new_size = old_size * 2;
2035 kmp_node_info_t *old_record = tdg->record_map;
2036 kmp_node_info_t *new_record = (kmp_node_info_t *)__kmp_allocate(
2037 new_size * sizeof(kmp_node_info_t));
2038
2039 KMP_MEMCPY(new_record, old_record, old_size * sizeof(kmp_node_info_t));
2040 tdg->record_map = new_record;
2041
2042 __kmp_free(old_record);
2043
2044 for (kmp_int i = old_size; i < new_size; i++) {
2045 kmp_int32 *successorsList = (kmp_int32 *)__kmp_allocate(
2046 __kmp_successors_size * sizeof(kmp_int32));
2047 new_record[i].task = nullptr;
2048 new_record[i].successors = successorsList;
2049 new_record[i].nsuccessors = 0;
2050 new_record[i].npredecessors = 0;
2051 new_record[i].successors_size = __kmp_successors_size;
2052 KMP_ATOMIC_ST_REL(&new_record[i].npredecessors_counter, 0);
2053 }
2054 // update the size at the end, so that we avoid other
2055 // threads use old_record while map_size is already updated
2056 tdg->map_size = new_size;
2057 }
2058 __kmp_release_bootstrap_lock(&tdg->graph_lock);
2059 }
2060 // record a task
2061 if (tdg->record_map[new_taskdata->td_task_id].task == nullptr) {
2062 tdg->record_map[new_taskdata->td_task_id].task = new_task;
2063 tdg->record_map[new_taskdata->td_task_id].parent_task =
2064 new_taskdata->td_parent;
2065 KMP_ATOMIC_INC(&tdg->num_tasks);
2066 }
2067 }
2068#endif
2069
2070 /* Should we execute the new task or queue it? For now, let's just always try
2071 to queue it. If the queue fills up, then we'll execute it. */
2072 if (new_taskdata->td_flags.proxy == TASK_PROXY ||
2073 __kmp_push_task(gtid, task: new_task) == TASK_NOT_PUSHED) // if cannot defer
2074 { // Execute this task immediately
2075 kmp_taskdata_t *current_task = __kmp_threads[gtid]->th.th_current_task;
2076 if (serialize_immediate)
2077 new_taskdata->td_flags.task_serial = 1;
2078 __kmp_invoke_task(gtid, task: new_task, current_task);
2079 } else if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME &&
2080 __kmp_wpolicy_passive) {
2081 kmp_info_t *this_thr = __kmp_threads[gtid];
2082 kmp_team_t *team = this_thr->th.th_team;
2083 kmp_int32 nthreads = this_thr->th.th_team_nproc;
2084 for (int i = 0; i < nthreads; ++i) {
2085 kmp_info_t *thread = team->t.t_threads[i];
2086 if (thread == this_thr)
2087 continue;
2088 if (thread->th.th_sleep_loc != NULL) {
2089 __kmp_null_resume_wrapper(thr: thread);
2090 break; // awake one thread at a time
2091 }
2092 }
2093 }
2094 return TASK_CURRENT_NOT_QUEUED;
2095}
2096
2097// __kmpc_omp_task: Wrapper around __kmp_omp_task to schedule a
2098// non-thread-switchable task from the parent thread only!
2099//
2100// loc_ref: location of original task pragma (ignored)
2101// gtid: Global Thread ID of encountering thread
2102// new_task: non-thread-switchable task thunk allocated by
2103// __kmp_omp_task_alloc()
2104// Returns:
2105// TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
2106// be resumed later.
2107// TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
2108// resumed later.
2109kmp_int32 __kmpc_omp_task(ident_t *loc_ref, kmp_int32 gtid,
2110 kmp_task_t *new_task) {
2111 kmp_int32 res;
2112 KMP_SET_THREAD_STATE_BLOCK(EXPLICIT_TASK);
2113
2114#if KMP_DEBUG || OMPT_SUPPORT
2115 kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
2116#endif
2117 KA_TRACE(10, ("__kmpc_omp_task(enter): T#%d loc=%p task=%p\n", gtid, loc_ref,
2118 new_taskdata));
2119 __kmp_assert_valid_gtid(gtid);
2120
2121#if OMPT_SUPPORT
2122 kmp_taskdata_t *parent = NULL;
2123 if (UNLIKELY(ompt_enabled.enabled)) {
2124 if (!new_taskdata->td_flags.started) {
2125 OMPT_STORE_RETURN_ADDRESS(gtid);
2126 parent = new_taskdata->td_parent;
2127 if (!parent->ompt_task_info.frame.enter_frame.ptr) {
2128 parent->ompt_task_info.frame.enter_frame.ptr =
2129 OMPT_GET_FRAME_ADDRESS(0);
2130 }
2131 if (ompt_enabled.ompt_callback_task_create) {
2132 ompt_callbacks.ompt_callback(ompt_callback_task_create)(
2133 &(parent->ompt_task_info.task_data),
2134 &(parent->ompt_task_info.frame),
2135 &(new_taskdata->ompt_task_info.task_data),
2136 ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(new_taskdata), 0,
2137 OMPT_LOAD_RETURN_ADDRESS(gtid));
2138 }
2139 } else {
2140 // We are scheduling the continuation of an UNTIED task.
2141 // Scheduling back to the parent task.
2142 __ompt_task_finish(task: new_task,
2143 resumed_task: new_taskdata->ompt_task_info.scheduling_parent,
2144 status: ompt_task_switch);
2145 new_taskdata->ompt_task_info.frame.exit_frame = ompt_data_none;
2146 }
2147 }
2148#endif
2149
2150 res = __kmp_omp_task(gtid, new_task, serialize_immediate: true);
2151
2152 KA_TRACE(10, ("__kmpc_omp_task(exit): T#%d returning "
2153 "TASK_CURRENT_NOT_QUEUED: loc=%p task=%p\n",
2154 gtid, loc_ref, new_taskdata));
2155#if OMPT_SUPPORT
2156 if (UNLIKELY(ompt_enabled.enabled && parent != NULL)) {
2157 parent->ompt_task_info.frame.enter_frame = ompt_data_none;
2158 }
2159#endif
2160 return res;
2161}
2162
2163// __kmp_omp_taskloop_task: Wrapper around __kmp_omp_task to schedule
2164// a taskloop task with the correct OMPT return address
2165//
2166// loc_ref: location of original task pragma (ignored)
2167// gtid: Global Thread ID of encountering thread
2168// new_task: non-thread-switchable task thunk allocated by
2169// __kmp_omp_task_alloc()
2170// codeptr_ra: return address for OMPT callback
2171// Returns:
2172// TASK_CURRENT_NOT_QUEUED (0) if did not suspend and queue current task to
2173// be resumed later.
2174// TASK_CURRENT_QUEUED (1) if suspended and queued the current task to be
2175// resumed later.
2176kmp_int32 __kmp_omp_taskloop_task(ident_t *loc_ref, kmp_int32 gtid,
2177 kmp_task_t *new_task, void *codeptr_ra) {
2178 kmp_int32 res;
2179 KMP_SET_THREAD_STATE_BLOCK(EXPLICIT_TASK);
2180
2181#if KMP_DEBUG || OMPT_SUPPORT
2182 kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
2183#endif
2184 KA_TRACE(10, ("__kmpc_omp_task(enter): T#%d loc=%p task=%p\n", gtid, loc_ref,
2185 new_taskdata));
2186
2187#if OMPT_SUPPORT
2188 kmp_taskdata_t *parent = NULL;
2189 if (UNLIKELY(ompt_enabled.enabled && !new_taskdata->td_flags.started)) {
2190 parent = new_taskdata->td_parent;
2191 if (!parent->ompt_task_info.frame.enter_frame.ptr)
2192 parent->ompt_task_info.frame.enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
2193 if (ompt_enabled.ompt_callback_task_create) {
2194 ompt_callbacks.ompt_callback(ompt_callback_task_create)(
2195 &(parent->ompt_task_info.task_data), &(parent->ompt_task_info.frame),
2196 &(new_taskdata->ompt_task_info.task_data),
2197 ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(new_taskdata), 0,
2198 codeptr_ra);
2199 }
2200 }
2201#endif
2202
2203 res = __kmp_omp_task(gtid, new_task, serialize_immediate: true);
2204
2205 KA_TRACE(10, ("__kmpc_omp_task(exit): T#%d returning "
2206 "TASK_CURRENT_NOT_QUEUED: loc=%p task=%p\n",
2207 gtid, loc_ref, new_taskdata));
2208#if OMPT_SUPPORT
2209 if (UNLIKELY(ompt_enabled.enabled && parent != NULL)) {
2210 parent->ompt_task_info.frame.enter_frame = ompt_data_none;
2211 }
2212#endif
2213 return res;
2214}
2215
2216template <bool ompt>
2217static kmp_int32 __kmpc_omp_taskwait_template(ident_t *loc_ref, kmp_int32 gtid,
2218 void *frame_address,
2219 void *return_address) {
2220 kmp_taskdata_t *taskdata = nullptr;
2221 kmp_info_t *thread;
2222 int thread_finished = FALSE;
2223 KMP_SET_THREAD_STATE_BLOCK(TASKWAIT);
2224
2225 KA_TRACE(10, ("__kmpc_omp_taskwait(enter): T#%d loc=%p\n", gtid, loc_ref));
2226 KMP_DEBUG_ASSERT(gtid >= 0);
2227
2228 if (__kmp_tasking_mode != tskm_immediate_exec) {
2229 thread = __kmp_threads[gtid];
2230 taskdata = thread->th.th_current_task;
2231
2232#if OMPT_SUPPORT && OMPT_OPTIONAL
2233 ompt_data_t *my_task_data;
2234 ompt_data_t *my_parallel_data;
2235
2236 if (ompt) {
2237 my_task_data = &(taskdata->ompt_task_info.task_data);
2238 my_parallel_data = OMPT_CUR_TEAM_DATA(thread);
2239
2240 taskdata->ompt_task_info.frame.enter_frame.ptr = frame_address;
2241
2242 if (ompt_enabled.ompt_callback_sync_region) {
2243 ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
2244 ompt_sync_region_taskwait, ompt_scope_begin, my_parallel_data,
2245 my_task_data, return_address);
2246 }
2247
2248 if (ompt_enabled.ompt_callback_sync_region_wait) {
2249 ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
2250 ompt_sync_region_taskwait, ompt_scope_begin, my_parallel_data,
2251 my_task_data, return_address);
2252 }
2253 }
2254#endif // OMPT_SUPPORT && OMPT_OPTIONAL
2255
2256// Debugger: The taskwait is active. Store location and thread encountered the
2257// taskwait.
2258#if USE_ITT_BUILD
2259// Note: These values are used by ITT events as well.
2260#endif /* USE_ITT_BUILD */
2261 taskdata->td_taskwait_counter += 1;
2262 taskdata->td_taskwait_ident = loc_ref;
2263 taskdata->td_taskwait_thread = gtid + 1;
2264
2265#if USE_ITT_BUILD
2266 void *itt_sync_obj = NULL;
2267#if USE_ITT_NOTIFY
2268 KMP_ITT_TASKWAIT_STARTING(itt_sync_obj);
2269#endif /* USE_ITT_NOTIFY */
2270#endif /* USE_ITT_BUILD */
2271
2272 bool must_wait =
2273 !taskdata->td_flags.team_serial && !taskdata->td_flags.final;
2274
2275 must_wait = must_wait || (thread->th.th_task_team != NULL &&
2276 thread->th.th_task_team->tt.tt_found_proxy_tasks);
2277 // If hidden helper thread is encountered, we must enable wait here.
2278 must_wait =
2279 must_wait ||
2280 (__kmp_enable_hidden_helper && thread->th.th_task_team != NULL &&
2281 thread->th.th_task_team->tt.tt_hidden_helper_task_encountered);
2282
2283 if (must_wait) {
2284 kmp_flag_32<false, false> flag(
2285 RCAST(std::atomic<kmp_uint32> *,
2286 &(taskdata->td_incomplete_child_tasks)),
2287 0U);
2288 while (KMP_ATOMIC_LD_ACQ(&taskdata->td_incomplete_child_tasks) != 0) {
2289 flag.execute_tasks(this_thr: thread, gtid, FALSE,
2290 thread_finished: &thread_finished USE_ITT_BUILD_ARG(itt_sync_obj),
2291 is_constrained: __kmp_task_stealing_constraint);
2292 }
2293 }
2294#if USE_ITT_BUILD
2295 KMP_ITT_TASKWAIT_FINISHED(itt_sync_obj);
2296 KMP_FSYNC_ACQUIRED(taskdata); // acquire self - sync with children
2297#endif /* USE_ITT_BUILD */
2298
2299 // Debugger: The taskwait is completed. Location remains, but thread is
2300 // negated.
2301 taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread;
2302
2303#if OMPT_SUPPORT && OMPT_OPTIONAL
2304 if (ompt) {
2305 if (ompt_enabled.ompt_callback_sync_region_wait) {
2306 ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
2307 ompt_sync_region_taskwait, ompt_scope_end, my_parallel_data,
2308 my_task_data, return_address);
2309 }
2310 if (ompt_enabled.ompt_callback_sync_region) {
2311 ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
2312 ompt_sync_region_taskwait, ompt_scope_end, my_parallel_data,
2313 my_task_data, return_address);
2314 }
2315 taskdata->ompt_task_info.frame.enter_frame = ompt_data_none;
2316 }
2317#endif // OMPT_SUPPORT && OMPT_OPTIONAL
2318 }
2319
2320 KA_TRACE(10, ("__kmpc_omp_taskwait(exit): T#%d task %p finished waiting, "
2321 "returning TASK_CURRENT_NOT_QUEUED\n",
2322 gtid, taskdata));
2323
2324 return TASK_CURRENT_NOT_QUEUED;
2325}
2326
2327#if OMPT_SUPPORT && OMPT_OPTIONAL
2328OMPT_NOINLINE
2329static kmp_int32 __kmpc_omp_taskwait_ompt(ident_t *loc_ref, kmp_int32 gtid,
2330 void *frame_address,
2331 void *return_address) {
2332 return __kmpc_omp_taskwait_template<true>(loc_ref, gtid, frame_address,
2333 return_address);
2334}
2335#endif // OMPT_SUPPORT && OMPT_OPTIONAL
2336
2337// __kmpc_omp_taskwait: Wait until all tasks generated by the current task are
2338// complete
2339kmp_int32 __kmpc_omp_taskwait(ident_t *loc_ref, kmp_int32 gtid) {
2340#if OMPT_SUPPORT && OMPT_OPTIONAL
2341 if (UNLIKELY(ompt_enabled.enabled)) {
2342 OMPT_STORE_RETURN_ADDRESS(gtid);
2343 return __kmpc_omp_taskwait_ompt(loc_ref, gtid, OMPT_GET_FRAME_ADDRESS(0),
2344 OMPT_LOAD_RETURN_ADDRESS(gtid));
2345 }
2346#endif
2347 return __kmpc_omp_taskwait_template<false>(loc_ref, gtid, NULL, NULL);
2348}
2349
2350// __kmpc_omp_taskyield: switch to a different task
2351kmp_int32 __kmpc_omp_taskyield(ident_t *loc_ref, kmp_int32 gtid, int end_part) {
2352 kmp_taskdata_t *taskdata = NULL;
2353 kmp_info_t *thread;
2354 int thread_finished = FALSE;
2355
2356 KMP_COUNT_BLOCK(OMP_TASKYIELD);
2357 KMP_SET_THREAD_STATE_BLOCK(TASKYIELD);
2358
2359 KA_TRACE(10, ("__kmpc_omp_taskyield(enter): T#%d loc=%p end_part = %d\n",
2360 gtid, loc_ref, end_part));
2361 __kmp_assert_valid_gtid(gtid);
2362
2363 if (__kmp_tasking_mode != tskm_immediate_exec && __kmp_init_parallel) {
2364 thread = __kmp_threads[gtid];
2365 taskdata = thread->th.th_current_task;
2366// Should we model this as a task wait or not?
2367// Debugger: The taskwait is active. Store location and thread encountered the
2368// taskwait.
2369#if USE_ITT_BUILD
2370// Note: These values are used by ITT events as well.
2371#endif /* USE_ITT_BUILD */
2372 taskdata->td_taskwait_counter += 1;
2373 taskdata->td_taskwait_ident = loc_ref;
2374 taskdata->td_taskwait_thread = gtid + 1;
2375
2376#if USE_ITT_BUILD
2377 void *itt_sync_obj = NULL;
2378#if USE_ITT_NOTIFY
2379 KMP_ITT_TASKWAIT_STARTING(itt_sync_obj);
2380#endif /* USE_ITT_NOTIFY */
2381#endif /* USE_ITT_BUILD */
2382 if (!taskdata->td_flags.team_serial) {
2383 kmp_task_team_t *task_team = thread->th.th_task_team;
2384 if (task_team != NULL) {
2385 if (KMP_TASKING_ENABLED(task_team)) {
2386#if OMPT_SUPPORT
2387 if (UNLIKELY(ompt_enabled.enabled))
2388 thread->th.ompt_thread_info.ompt_task_yielded = 1;
2389#endif
2390 __kmp_execute_tasks_32(
2391 thread, gtid, flag: (kmp_flag_32<> *)NULL, FALSE,
2392 thread_finished: &thread_finished USE_ITT_BUILD_ARG(itt_sync_obj),
2393 is_constrained: __kmp_task_stealing_constraint);
2394#if OMPT_SUPPORT
2395 if (UNLIKELY(ompt_enabled.enabled))
2396 thread->th.ompt_thread_info.ompt_task_yielded = 0;
2397#endif
2398 }
2399 }
2400 }
2401#if USE_ITT_BUILD
2402 KMP_ITT_TASKWAIT_FINISHED(itt_sync_obj);
2403#endif /* USE_ITT_BUILD */
2404
2405 // Debugger: The taskwait is completed. Location remains, but thread is
2406 // negated.
2407 taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread;
2408 }
2409
2410 KA_TRACE(10, ("__kmpc_omp_taskyield(exit): T#%d task %p resuming, "
2411 "returning TASK_CURRENT_NOT_QUEUED\n",
2412 gtid, taskdata));
2413
2414 return TASK_CURRENT_NOT_QUEUED;
2415}
2416
2417// Task Reduction implementation
2418//
2419// Note: initial implementation didn't take into account the possibility
2420// to specify omp_orig for initializer of the UDR (user defined reduction).
2421// Corrected implementation takes into account the omp_orig object.
2422// Compiler is free to use old implementation if omp_orig is not specified.
2423
2424/*!
2425@ingroup BASIC_TYPES
2426@{
2427*/
2428
2429/*!
2430Flags for special info per task reduction item.
2431*/
2432typedef struct kmp_taskred_flags {
2433 /*! 1 - use lazy alloc/init (e.g. big objects, num tasks < num threads) */
2434 unsigned lazy_priv : 1;
2435 unsigned reserved31 : 31;
2436} kmp_taskred_flags_t;
2437
2438/*!
2439Internal struct for reduction data item related info set up by compiler.
2440*/
2441typedef struct kmp_task_red_input {
2442 void *reduce_shar; /**< shared between tasks item to reduce into */
2443 size_t reduce_size; /**< size of data item in bytes */
2444 // three compiler-generated routines (init, fini are optional):
2445 void *reduce_init; /**< data initialization routine (single parameter) */
2446 void *reduce_fini; /**< data finalization routine */
2447 void *reduce_comb; /**< data combiner routine */
2448 kmp_taskred_flags_t flags; /**< flags for additional info from compiler */
2449} kmp_task_red_input_t;
2450
2451/*!
2452Internal struct for reduction data item related info saved by the library.
2453*/
2454typedef struct kmp_taskred_data {
2455 void *reduce_shar; /**< shared between tasks item to reduce into */
2456 size_t reduce_size; /**< size of data item */
2457 kmp_taskred_flags_t flags; /**< flags for additional info from compiler */
2458 void *reduce_priv; /**< array of thread specific items */
2459 void *reduce_pend; /**< end of private data for faster comparison op */
2460 // three compiler-generated routines (init, fini are optional):
2461 void *reduce_comb; /**< data combiner routine */
2462 void *reduce_init; /**< data initialization routine (two parameters) */
2463 void *reduce_fini; /**< data finalization routine */
2464 void *reduce_orig; /**< original item (can be used in UDR initializer) */
2465} kmp_taskred_data_t;
2466
2467/*!
2468Internal struct for reduction data item related info set up by compiler.
2469
2470New interface: added reduce_orig field to provide omp_orig for UDR initializer.
2471*/
2472typedef struct kmp_taskred_input {
2473 void *reduce_shar; /**< shared between tasks item to reduce into */
2474 void *reduce_orig; /**< original reduction item used for initialization */
2475 size_t reduce_size; /**< size of data item */
2476 // three compiler-generated routines (init, fini are optional):
2477 void *reduce_init; /**< data initialization routine (two parameters) */
2478 void *reduce_fini; /**< data finalization routine */
2479 void *reduce_comb; /**< data combiner routine */
2480 kmp_taskred_flags_t flags; /**< flags for additional info from compiler */
2481} kmp_taskred_input_t;
2482/*!
2483@}
2484*/
2485
2486template <typename T> void __kmp_assign_orig(kmp_taskred_data_t &item, T &src);
2487template <>
2488void __kmp_assign_orig<kmp_task_red_input_t>(kmp_taskred_data_t &item,
2489 kmp_task_red_input_t &src) {
2490 item.reduce_orig = NULL;
2491}
2492template <>
2493void __kmp_assign_orig<kmp_taskred_input_t>(kmp_taskred_data_t &item,
2494 kmp_taskred_input_t &src) {
2495 if (src.reduce_orig != NULL) {
2496 item.reduce_orig = src.reduce_orig;
2497 } else {
2498 item.reduce_orig = src.reduce_shar;
2499 } // non-NULL reduce_orig means new interface used
2500}
2501
2502template <typename T> void __kmp_call_init(kmp_taskred_data_t &item, size_t j);
2503template <>
2504void __kmp_call_init<kmp_task_red_input_t>(kmp_taskred_data_t &item,
2505 size_t offset) {
2506 ((void (*)(void *))item.reduce_init)((char *)(item.reduce_priv) + offset);
2507}
2508template <>
2509void __kmp_call_init<kmp_taskred_input_t>(kmp_taskred_data_t &item,
2510 size_t offset) {
2511 ((void (*)(void *, void *))item.reduce_init)(
2512 (char *)(item.reduce_priv) + offset, item.reduce_orig);
2513}
2514
2515template <typename T>
2516void *__kmp_task_reduction_init(int gtid, int num, T *data) {
2517 __kmp_assert_valid_gtid(gtid);
2518 kmp_info_t *thread = __kmp_threads[gtid];
2519 kmp_taskgroup_t *tg = thread->th.th_current_task->td_taskgroup;
2520 kmp_uint32 nth = thread->th.th_team_nproc;
2521 kmp_taskred_data_t *arr;
2522
2523 // check input data just in case
2524 KMP_ASSERT(tg != NULL);
2525 KMP_ASSERT(data != NULL);
2526 KMP_ASSERT(num > 0);
2527 if (nth == 1 && !__kmp_enable_hidden_helper) {
2528 KA_TRACE(10, ("__kmpc_task_reduction_init: T#%d, tg %p, exiting nth=1\n",
2529 gtid, tg));
2530 return (void *)tg;
2531 }
2532 KA_TRACE(10, ("__kmpc_task_reduction_init: T#%d, taskgroup %p, #items %d\n",
2533 gtid, tg, num));
2534 arr = (kmp_taskred_data_t *)__kmp_thread_malloc(
2535 thread, num * sizeof(kmp_taskred_data_t));
2536 for (int i = 0; i < num; ++i) {
2537 size_t size = data[i].reduce_size - 1;
2538 // round the size up to cache line per thread-specific item
2539 size += CACHE_LINE - size % CACHE_LINE;
2540 KMP_ASSERT(data[i].reduce_comb != NULL); // combiner is mandatory
2541 arr[i].reduce_shar = data[i].reduce_shar;
2542 arr[i].reduce_size = size;
2543 arr[i].flags = data[i].flags;
2544 arr[i].reduce_comb = data[i].reduce_comb;
2545 arr[i].reduce_init = data[i].reduce_init;
2546 arr[i].reduce_fini = data[i].reduce_fini;
2547 __kmp_assign_orig<T>(arr[i], data[i]);
2548 if (!arr[i].flags.lazy_priv) {
2549 // allocate cache-line aligned block and fill it with zeros
2550 arr[i].reduce_priv = __kmp_allocate(nth * size);
2551 arr[i].reduce_pend = (char *)(arr[i].reduce_priv) + nth * size;
2552 if (arr[i].reduce_init != NULL) {
2553 // initialize all thread-specific items
2554 for (size_t j = 0; j < nth; ++j) {
2555 __kmp_call_init<T>(arr[i], j * size);
2556 }
2557 }
2558 } else {
2559 // only allocate space for pointers now,
2560 // objects will be lazily allocated/initialized if/when requested
2561 // note that __kmp_allocate zeroes the allocated memory
2562 arr[i].reduce_priv = __kmp_allocate(nth * sizeof(void *));
2563 }
2564 }
2565 tg->reduce_data = (void *)arr;
2566 tg->reduce_num_data = num;
2567 return (void *)tg;
2568}
2569
2570/*!
2571@ingroup TASKING
2572@param gtid Global thread ID
2573@param num Number of data items to reduce
2574@param data Array of data for reduction
2575@return The taskgroup identifier
2576
2577Initialize task reduction for the taskgroup.
2578
2579Note: this entry supposes the optional compiler-generated initializer routine
2580has single parameter - pointer to object to be initialized. That means
2581the reduction either does not use omp_orig object, or the omp_orig is accessible
2582without help of the runtime library.
2583*/
2584void *__kmpc_task_reduction_init(int gtid, int num, void *data) {
2585#if OMPX_TASKGRAPH
2586 kmp_tdg_info_t *tdg = __kmp_find_tdg(__kmp_curr_tdg_idx);
2587 if (tdg && __kmp_tdg_is_recording(tdg->tdg_status)) {
2588 kmp_tdg_info_t *this_tdg = __kmp_global_tdgs[__kmp_curr_tdg_idx];
2589 this_tdg->rec_taskred_data =
2590 __kmp_allocate(sizeof(kmp_task_red_input_t) * num);
2591 this_tdg->rec_num_taskred = num;
2592 KMP_MEMCPY(this_tdg->rec_taskred_data, data,
2593 sizeof(kmp_task_red_input_t) * num);
2594 }
2595#endif
2596 return __kmp_task_reduction_init(gtid, num, data: (kmp_task_red_input_t *)data);
2597}
2598
2599/*!
2600@ingroup TASKING
2601@param gtid Global thread ID
2602@param num Number of data items to reduce
2603@param data Array of data for reduction
2604@return The taskgroup identifier
2605
2606Initialize task reduction for the taskgroup.
2607
2608Note: this entry supposes the optional compiler-generated initializer routine
2609has two parameters, pointer to object to be initialized and pointer to omp_orig
2610*/
2611void *__kmpc_taskred_init(int gtid, int num, void *data) {
2612#if OMPX_TASKGRAPH
2613 kmp_tdg_info_t *tdg = __kmp_find_tdg(__kmp_curr_tdg_idx);
2614 if (tdg && __kmp_tdg_is_recording(tdg->tdg_status)) {
2615 kmp_tdg_info_t *this_tdg = __kmp_global_tdgs[__kmp_curr_tdg_idx];
2616 this_tdg->rec_taskred_data =
2617 __kmp_allocate(sizeof(kmp_task_red_input_t) * num);
2618 this_tdg->rec_num_taskred = num;
2619 KMP_MEMCPY(this_tdg->rec_taskred_data, data,
2620 sizeof(kmp_task_red_input_t) * num);
2621 }
2622#endif
2623 return __kmp_task_reduction_init(gtid, num, data: (kmp_taskred_input_t *)data);
2624}
2625
2626// Copy task reduction data (except for shared pointers).
2627template <typename T>
2628void __kmp_task_reduction_init_copy(kmp_info_t *thr, int num, T *data,
2629 kmp_taskgroup_t *tg, void *reduce_data) {
2630 kmp_taskred_data_t *arr;
2631 KA_TRACE(20, ("__kmp_task_reduction_init_copy: Th %p, init taskgroup %p,"
2632 " from data %p\n",
2633 thr, tg, reduce_data));
2634 arr = (kmp_taskred_data_t *)__kmp_thread_malloc(
2635 thr, num * sizeof(kmp_taskred_data_t));
2636 // threads will share private copies, thunk routines, sizes, flags, etc.:
2637 KMP_MEMCPY(dest: arr, src: reduce_data, n: num * sizeof(kmp_taskred_data_t));
2638 for (int i = 0; i < num; ++i) {
2639 arr[i].reduce_shar = data[i].reduce_shar; // init unique shared pointers
2640 }
2641 tg->reduce_data = (void *)arr;
2642 tg->reduce_num_data = num;
2643}
2644
2645/*!
2646@ingroup TASKING
2647@param gtid Global thread ID
2648@param tskgrp The taskgroup ID (optional)
2649@param data Shared location of the item
2650@return The pointer to per-thread data
2651
2652Get thread-specific location of data item
2653*/
2654void *__kmpc_task_reduction_get_th_data(int gtid, void *tskgrp, void *data) {
2655 __kmp_assert_valid_gtid(gtid);
2656 kmp_info_t *thread = __kmp_threads[gtid];
2657 kmp_int32 nth = thread->th.th_team_nproc;
2658 if (nth == 1)
2659 return data; // nothing to do
2660
2661 kmp_taskgroup_t *tg = (kmp_taskgroup_t *)tskgrp;
2662 if (tg == NULL)
2663 tg = thread->th.th_current_task->td_taskgroup;
2664 KMP_ASSERT(tg != NULL);
2665 kmp_taskred_data_t *arr;
2666 kmp_int32 num;
2667 kmp_int32 tid = thread->th.th_info.ds.ds_tid;
2668
2669#if OMPX_TASKGRAPH
2670 if ((thread->th.th_current_task->is_taskgraph) &&
2671 (!__kmp_tdg_is_recording(
2672 __kmp_global_tdgs[__kmp_curr_tdg_idx]->tdg_status))) {
2673 tg = thread->th.th_current_task->td_taskgroup;
2674 KMP_ASSERT(tg != NULL);
2675 KMP_ASSERT(tg->reduce_data != NULL);
2676 arr = (kmp_taskred_data_t *)(tg->reduce_data);
2677 num = tg->reduce_num_data;
2678 }
2679#endif
2680
2681 KMP_ASSERT(data != NULL);
2682 while (tg != NULL) {
2683 arr = (kmp_taskred_data_t *)(tg->reduce_data);
2684 num = tg->reduce_num_data;
2685 for (int i = 0; i < num; ++i) {
2686 if (!arr[i].flags.lazy_priv) {
2687 if (data == arr[i].reduce_shar ||
2688 (data >= arr[i].reduce_priv && data < arr[i].reduce_pend))
2689 return (char *)(arr[i].reduce_priv) + tid * arr[i].reduce_size;
2690 } else {
2691 // check shared location first
2692 void **p_priv = (void **)(arr[i].reduce_priv);
2693 if (data == arr[i].reduce_shar)
2694 goto found;
2695 // check if we get some thread specific location as parameter
2696 for (int j = 0; j < nth; ++j)
2697 if (data == p_priv[j])
2698 goto found;
2699 continue; // not found, continue search
2700 found:
2701 if (p_priv[tid] == NULL) {
2702 // allocate thread specific object lazily
2703 p_priv[tid] = __kmp_allocate(arr[i].reduce_size);
2704 if (arr[i].reduce_init != NULL) {
2705 if (arr[i].reduce_orig != NULL) { // new interface
2706 ((void (*)(void *, void *))arr[i].reduce_init)(
2707 p_priv[tid], arr[i].reduce_orig);
2708 } else { // old interface (single parameter)
2709 ((void (*)(void *))arr[i].reduce_init)(p_priv[tid]);
2710 }
2711 }
2712 }
2713 return p_priv[tid];
2714 }
2715 }
2716 KMP_ASSERT(tg->parent);
2717 tg = tg->parent;
2718 }
2719 KMP_ASSERT2(0, "Unknown task reduction item");
2720 return NULL; // ERROR, this line never executed
2721}
2722
2723// Finalize task reduction.
2724// Called from __kmpc_end_taskgroup()
2725static void __kmp_task_reduction_fini(kmp_info_t *th, kmp_taskgroup_t *tg) {
2726 kmp_int32 nth = th->th.th_team_nproc;
2727 KMP_DEBUG_ASSERT(
2728 nth > 1 ||
2729 __kmp_enable_hidden_helper); // should not be called if nth == 1 unless we
2730 // are using hidden helper threads
2731 kmp_taskred_data_t *arr = (kmp_taskred_data_t *)tg->reduce_data;
2732 kmp_int32 num = tg->reduce_num_data;
2733 for (int i = 0; i < num; ++i) {
2734 void *sh_data = arr[i].reduce_shar;
2735 void (*f_fini)(void *) = (void (*)(void *))(arr[i].reduce_fini);
2736 void (*f_comb)(void *, void *) =
2737 (void (*)(void *, void *))(arr[i].reduce_comb);
2738 if (!arr[i].flags.lazy_priv) {
2739 void *pr_data = arr[i].reduce_priv;
2740 size_t size = arr[i].reduce_size;
2741 for (int j = 0; j < nth; ++j) {
2742 void *priv_data = (char *)pr_data + j * size;
2743 f_comb(sh_data, priv_data); // combine results
2744 if (f_fini)
2745 f_fini(priv_data); // finalize if needed
2746 }
2747 } else {
2748 void **pr_data = (void **)(arr[i].reduce_priv);
2749 for (int j = 0; j < nth; ++j) {
2750 if (pr_data[j] != NULL) {
2751 f_comb(sh_data, pr_data[j]); // combine results
2752 if (f_fini)
2753 f_fini(pr_data[j]); // finalize if needed
2754 __kmp_free(pr_data[j]);
2755 }
2756 }
2757 }
2758 __kmp_free(arr[i].reduce_priv);
2759 }
2760 __kmp_thread_free(th, arr);
2761 tg->reduce_data = NULL;
2762 tg->reduce_num_data = 0;
2763}
2764
2765// Cleanup task reduction data for parallel or worksharing,
2766// do not touch task private data other threads still working with.
2767// Called from __kmpc_end_taskgroup()
2768static void __kmp_task_reduction_clean(kmp_info_t *th, kmp_taskgroup_t *tg) {
2769 __kmp_thread_free(th, tg->reduce_data);
2770 tg->reduce_data = NULL;
2771 tg->reduce_num_data = 0;
2772}
2773
2774template <typename T>
2775void *__kmp_task_reduction_modifier_init(ident_t *loc, int gtid, int is_ws,
2776 int num, T *data) {
2777 __kmp_assert_valid_gtid(gtid);
2778 kmp_info_t *thr = __kmp_threads[gtid];
2779 kmp_int32 nth = thr->th.th_team_nproc;
2780 __kmpc_taskgroup(loc, gtid); // form new taskgroup first
2781 if (nth == 1) {
2782 KA_TRACE(10,
2783 ("__kmpc_reduction_modifier_init: T#%d, tg %p, exiting nth=1\n",
2784 gtid, thr->th.th_current_task->td_taskgroup));
2785 return (void *)thr->th.th_current_task->td_taskgroup;
2786 }
2787 kmp_team_t *team = thr->th.th_team;
2788 void *reduce_data;
2789 kmp_taskgroup_t *tg;
2790 reduce_data = KMP_ATOMIC_LD_RLX(&team->t.t_tg_reduce_data[is_ws]);
2791 if (reduce_data == NULL &&
2792 __kmp_atomic_compare_store(p: &team->t.t_tg_reduce_data[is_ws], expected: reduce_data,
2793 desired: (void *)1)) {
2794 // single thread enters this block to initialize common reduction data
2795 KMP_DEBUG_ASSERT(reduce_data == NULL);
2796 // first initialize own data, then make a copy other threads can use
2797 tg = (kmp_taskgroup_t *)__kmp_task_reduction_init<T>(gtid, num, data);
2798 reduce_data = __kmp_thread_malloc(thr, num * sizeof(kmp_taskred_data_t));
2799 KMP_MEMCPY(dest: reduce_data, src: tg->reduce_data, n: num * sizeof(kmp_taskred_data_t));
2800 // fini counters should be 0 at this point
2801 KMP_DEBUG_ASSERT(KMP_ATOMIC_LD_RLX(&team->t.t_tg_fini_counter[0]) == 0);
2802 KMP_DEBUG_ASSERT(KMP_ATOMIC_LD_RLX(&team->t.t_tg_fini_counter[1]) == 0);
2803 KMP_ATOMIC_ST_REL(&team->t.t_tg_reduce_data[is_ws], reduce_data);
2804 } else {
2805 while (
2806 (reduce_data = KMP_ATOMIC_LD_ACQ(&team->t.t_tg_reduce_data[is_ws])) ==
2807 (void *)1) { // wait for task reduction initialization
2808 KMP_CPU_PAUSE();
2809 }
2810 KMP_DEBUG_ASSERT(reduce_data > (void *)1); // should be valid pointer here
2811 tg = thr->th.th_current_task->td_taskgroup;
2812 __kmp_task_reduction_init_copy<T>(thr, num, data, tg, reduce_data);
2813 }
2814 return tg;
2815}
2816
2817/*!
2818@ingroup TASKING
2819@param loc Source location info
2820@param gtid Global thread ID
2821@param is_ws Is 1 if the reduction is for worksharing, 0 otherwise
2822@param num Number of data items to reduce
2823@param data Array of data for reduction
2824@return The taskgroup identifier
2825
2826Initialize task reduction for a parallel or worksharing.
2827
2828Note: this entry supposes the optional compiler-generated initializer routine
2829has single parameter - pointer to object to be initialized. That means
2830the reduction either does not use omp_orig object, or the omp_orig is accessible
2831without help of the runtime library.
2832*/
2833void *__kmpc_task_reduction_modifier_init(ident_t *loc, int gtid, int is_ws,
2834 int num, void *data) {
2835 return __kmp_task_reduction_modifier_init(loc, gtid, is_ws, num,
2836 data: (kmp_task_red_input_t *)data);
2837}
2838
2839/*!
2840@ingroup TASKING
2841@param loc Source location info
2842@param gtid Global thread ID
2843@param is_ws Is 1 if the reduction is for worksharing, 0 otherwise
2844@param num Number of data items to reduce
2845@param data Array of data for reduction
2846@return The taskgroup identifier
2847
2848Initialize task reduction for a parallel or worksharing.
2849
2850Note: this entry supposes the optional compiler-generated initializer routine
2851has two parameters, pointer to object to be initialized and pointer to omp_orig
2852*/
2853void *__kmpc_taskred_modifier_init(ident_t *loc, int gtid, int is_ws, int num,
2854 void *data) {
2855 return __kmp_task_reduction_modifier_init(loc, gtid, is_ws, num,
2856 data: (kmp_taskred_input_t *)data);
2857}
2858
2859/*!
2860@ingroup TASKING
2861@param loc Source location info
2862@param gtid Global thread ID
2863@param is_ws Is 1 if the reduction is for worksharing, 0 otherwise
2864
2865Finalize task reduction for a parallel or worksharing.
2866*/
2867void __kmpc_task_reduction_modifier_fini(ident_t *loc, int gtid, int is_ws) {
2868 __kmpc_end_taskgroup(loc, gtid);
2869}
2870
2871// __kmpc_taskgroup: Start a new taskgroup
2872void __kmpc_taskgroup(ident_t *loc, int gtid) {
2873 __kmp_assert_valid_gtid(gtid);
2874 kmp_info_t *thread = __kmp_threads[gtid];
2875 kmp_taskdata_t *taskdata = thread->th.th_current_task;
2876 kmp_taskgroup_t *tg_new =
2877 (kmp_taskgroup_t *)__kmp_thread_malloc(thread, sizeof(kmp_taskgroup_t));
2878 KA_TRACE(10, ("__kmpc_taskgroup: T#%d loc=%p group=%p\n", gtid, loc, tg_new));
2879 KMP_ATOMIC_ST_RLX(&tg_new->count, 0);
2880 KMP_ATOMIC_ST_RLX(&tg_new->cancel_request, cancel_noreq);
2881 tg_new->parent = taskdata->td_taskgroup;
2882 tg_new->reduce_data = NULL;
2883 tg_new->reduce_num_data = 0;
2884 tg_new->gomp_data = NULL;
2885 taskdata->td_taskgroup = tg_new;
2886
2887#if OMPT_SUPPORT && OMPT_OPTIONAL
2888 if (UNLIKELY(ompt_enabled.ompt_callback_sync_region)) {
2889 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2890 if (!codeptr)
2891 codeptr = OMPT_GET_RETURN_ADDRESS(0);
2892 kmp_team_t *team = thread->th.th_team;
2893 ompt_data_t my_task_data = taskdata->ompt_task_info.task_data;
2894 // FIXME: I think this is wrong for lwt!
2895 ompt_data_t my_parallel_data = team->t.ompt_team_info.parallel_data;
2896
2897 ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
2898 ompt_sync_region_taskgroup, ompt_scope_begin, &(my_parallel_data),
2899 &(my_task_data), codeptr);
2900 }
2901#endif
2902}
2903
2904// __kmpc_end_taskgroup: Wait until all tasks generated by the current task
2905// and its descendants are complete
2906void __kmpc_end_taskgroup(ident_t *loc, int gtid) {
2907 __kmp_assert_valid_gtid(gtid);
2908 kmp_info_t *thread = __kmp_threads[gtid];
2909 kmp_taskdata_t *taskdata = thread->th.th_current_task;
2910 kmp_taskgroup_t *taskgroup = taskdata->td_taskgroup;
2911 int thread_finished = FALSE;
2912
2913#if OMPT_SUPPORT && OMPT_OPTIONAL
2914 kmp_team_t *team;
2915 ompt_data_t my_task_data;
2916 ompt_data_t my_parallel_data;
2917 void *codeptr = nullptr;
2918 if (UNLIKELY(ompt_enabled.enabled)) {
2919 team = thread->th.th_team;
2920 my_task_data = taskdata->ompt_task_info.task_data;
2921 // FIXME: I think this is wrong for lwt!
2922 my_parallel_data = team->t.ompt_team_info.parallel_data;
2923 codeptr = OMPT_LOAD_RETURN_ADDRESS(gtid);
2924 if (!codeptr)
2925 codeptr = OMPT_GET_RETURN_ADDRESS(0);
2926 }
2927#endif
2928
2929 KA_TRACE(10, ("__kmpc_end_taskgroup(enter): T#%d loc=%p\n", gtid, loc));
2930 KMP_DEBUG_ASSERT(taskgroup != NULL);
2931 KMP_SET_THREAD_STATE_BLOCK(TASKGROUP);
2932
2933 if (__kmp_tasking_mode != tskm_immediate_exec) {
2934 // mark task as waiting not on a barrier
2935 taskdata->td_taskwait_counter += 1;
2936 taskdata->td_taskwait_ident = loc;
2937 taskdata->td_taskwait_thread = gtid + 1;
2938#if USE_ITT_BUILD
2939 // For ITT the taskgroup wait is similar to taskwait until we need to
2940 // distinguish them
2941 void *itt_sync_obj = NULL;
2942#if USE_ITT_NOTIFY
2943 KMP_ITT_TASKWAIT_STARTING(itt_sync_obj);
2944#endif /* USE_ITT_NOTIFY */
2945#endif /* USE_ITT_BUILD */
2946
2947#if OMPT_SUPPORT && OMPT_OPTIONAL
2948 if (UNLIKELY(ompt_enabled.ompt_callback_sync_region_wait)) {
2949 ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
2950 ompt_sync_region_taskgroup, ompt_scope_begin, &(my_parallel_data),
2951 &(my_task_data), codeptr);
2952 }
2953#endif
2954
2955 if (!taskdata->td_flags.team_serial ||
2956 (thread->th.th_task_team != NULL &&
2957 (thread->th.th_task_team->tt.tt_found_proxy_tasks ||
2958 thread->th.th_task_team->tt.tt_hidden_helper_task_encountered))) {
2959 kmp_flag_32<false, false> flag(
2960 RCAST(std::atomic<kmp_uint32> *, &(taskgroup->count)), 0U);
2961 while (KMP_ATOMIC_LD_ACQ(&taskgroup->count) != 0) {
2962 flag.execute_tasks(this_thr: thread, gtid, FALSE,
2963 thread_finished: &thread_finished USE_ITT_BUILD_ARG(itt_sync_obj),
2964 is_constrained: __kmp_task_stealing_constraint);
2965 }
2966 }
2967 taskdata->td_taskwait_thread = -taskdata->td_taskwait_thread; // end waiting
2968
2969#if OMPT_SUPPORT && OMPT_OPTIONAL
2970 if (UNLIKELY(ompt_enabled.ompt_callback_sync_region_wait)) {
2971 ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
2972 ompt_sync_region_taskgroup, ompt_scope_end, &(my_parallel_data),
2973 &(my_task_data), codeptr);
2974 }
2975#endif
2976
2977#if USE_ITT_BUILD
2978 KMP_ITT_TASKWAIT_FINISHED(itt_sync_obj);
2979 KMP_FSYNC_ACQUIRED(taskdata); // acquire self - sync with descendants
2980#endif /* USE_ITT_BUILD */
2981 }
2982 KMP_DEBUG_ASSERT(taskgroup->count == 0);
2983
2984 if (taskgroup->reduce_data != NULL &&
2985 !taskgroup->gomp_data) { // need to reduce?
2986 int cnt;
2987 void *reduce_data;
2988 kmp_team_t *t = thread->th.th_team;
2989 kmp_taskred_data_t *arr = (kmp_taskred_data_t *)taskgroup->reduce_data;
2990 // check if <priv> data of the first reduction variable shared for the team
2991 void *priv0 = arr[0].reduce_priv;
2992 if ((reduce_data = KMP_ATOMIC_LD_ACQ(&t->t.t_tg_reduce_data[0])) != NULL &&
2993 ((kmp_taskred_data_t *)reduce_data)[0].reduce_priv == priv0) {
2994 // finishing task reduction on parallel
2995 cnt = KMP_ATOMIC_INC(&t->t.t_tg_fini_counter[0]);
2996 if (cnt == thread->th.th_team_nproc - 1) {
2997 // we are the last thread passing __kmpc_reduction_modifier_fini()
2998 // finalize task reduction:
2999 __kmp_task_reduction_fini(th: thread, tg: taskgroup);
3000 // cleanup fields in the team structure:
3001 // TODO: is relaxed store enough here (whole barrier should follow)?
3002 __kmp_thread_free(thread, reduce_data);
3003 KMP_ATOMIC_ST_REL(&t->t.t_tg_reduce_data[0], NULL);
3004 KMP_ATOMIC_ST_REL(&t->t.t_tg_fini_counter[0], 0);
3005 } else {
3006 // we are not the last thread passing __kmpc_reduction_modifier_fini(),
3007 // so do not finalize reduction, just clean own copy of the data
3008 __kmp_task_reduction_clean(th: thread, tg: taskgroup);
3009 }
3010 } else if ((reduce_data = KMP_ATOMIC_LD_ACQ(&t->t.t_tg_reduce_data[1])) !=
3011 NULL &&
3012 ((kmp_taskred_data_t *)reduce_data)[0].reduce_priv == priv0) {
3013 // finishing task reduction on worksharing
3014 cnt = KMP_ATOMIC_INC(&t->t.t_tg_fini_counter[1]);
3015 if (cnt == thread->th.th_team_nproc - 1) {
3016 // we are the last thread passing __kmpc_reduction_modifier_fini()
3017 __kmp_task_reduction_fini(th: thread, tg: taskgroup);
3018 // cleanup fields in team structure:
3019 // TODO: is relaxed store enough here (whole barrier should follow)?
3020 __kmp_thread_free(thread, reduce_data);
3021 KMP_ATOMIC_ST_REL(&t->t.t_tg_reduce_data[1], NULL);
3022 KMP_ATOMIC_ST_REL(&t->t.t_tg_fini_counter[1], 0);
3023 } else {
3024 // we are not the last thread passing __kmpc_reduction_modifier_fini(),
3025 // so do not finalize reduction, just clean own copy of the data
3026 __kmp_task_reduction_clean(th: thread, tg: taskgroup);
3027 }
3028 } else {
3029 // finishing task reduction on taskgroup
3030 __kmp_task_reduction_fini(th: thread, tg: taskgroup);
3031 }
3032 }
3033 // Restore parent taskgroup for the current task
3034 taskdata->td_taskgroup = taskgroup->parent;
3035 __kmp_thread_free(thread, taskgroup);
3036
3037 KA_TRACE(10, ("__kmpc_end_taskgroup(exit): T#%d task %p finished waiting\n",
3038 gtid, taskdata));
3039
3040#if OMPT_SUPPORT && OMPT_OPTIONAL
3041 if (UNLIKELY(ompt_enabled.ompt_callback_sync_region)) {
3042 ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
3043 ompt_sync_region_taskgroup, ompt_scope_end, &(my_parallel_data),
3044 &(my_task_data), codeptr);
3045 }
3046#endif
3047}
3048
3049static kmp_task_t *__kmp_get_priority_task(kmp_int32 gtid,
3050 kmp_task_team_t *task_team,
3051 kmp_int32 is_constrained) {
3052 kmp_task_t *task = NULL;
3053 kmp_taskdata_t *taskdata;
3054 kmp_taskdata_t *current;
3055 kmp_thread_data_t *thread_data;
3056 int ntasks = task_team->tt.tt_num_task_pri;
3057 if (ntasks == 0) {
3058 KA_TRACE(
3059 20, ("__kmp_get_priority_task(exit #1): T#%d No tasks to get\n", gtid));
3060 return NULL;
3061 }
3062 do {
3063 // decrement num_tasks to "reserve" one task to get for execution
3064 if (__kmp_atomic_compare_store(p: &task_team->tt.tt_num_task_pri, expected: ntasks,
3065 desired: ntasks - 1))
3066 break;
3067 ntasks = task_team->tt.tt_num_task_pri;
3068 } while (ntasks > 0);
3069 if (ntasks == 0) {
3070 KA_TRACE(20, ("__kmp_get_priority_task(exit #2): T#%d No tasks to get\n",
3071 __kmp_get_gtid()));
3072 return NULL;
3073 }
3074 // We got a "ticket" to get a "reserved" priority task
3075 int deque_ntasks;
3076 kmp_task_pri_t *list = task_team->tt.tt_task_pri_list;
3077 do {
3078 KMP_ASSERT(list != NULL);
3079 thread_data = &list->td;
3080 __kmp_acquire_bootstrap_lock(lck: &thread_data->td.td_deque_lock);
3081 deque_ntasks = thread_data->td.td_deque_ntasks;
3082 if (deque_ntasks == 0) {
3083 __kmp_release_bootstrap_lock(lck: &thread_data->td.td_deque_lock);
3084 KA_TRACE(20, ("__kmp_get_priority_task: T#%d No tasks to get from %p\n",
3085 __kmp_get_gtid(), thread_data));
3086 list = list->next;
3087 }
3088 } while (deque_ntasks == 0);
3089 KMP_DEBUG_ASSERT(deque_ntasks);
3090 int target = thread_data->td.td_deque_head;
3091 current = __kmp_threads[gtid]->th.th_current_task;
3092 taskdata = thread_data->td.td_deque[target];
3093 if (__kmp_task_is_allowed(gtid, is_constrained, tasknew: taskdata, taskcurr: current)) {
3094 // Bump head pointer and Wrap.
3095 thread_data->td.td_deque_head =
3096 (target + 1) & TASK_DEQUE_MASK(thread_data->td);
3097 } else {
3098 if (!task_team->tt.tt_untied_task_encountered) {
3099 // The TSC does not allow to steal victim task
3100 __kmp_release_bootstrap_lock(lck: &thread_data->td.td_deque_lock);
3101 KA_TRACE(20, ("__kmp_get_priority_task(exit #3): T#%d could not get task "
3102 "from %p: task_team=%p ntasks=%d head=%u tail=%u\n",
3103 gtid, thread_data, task_team, deque_ntasks, target,
3104 thread_data->td.td_deque_tail));
3105 task_team->tt.tt_num_task_pri++; // atomic inc, restore value
3106 return NULL;
3107 }
3108 int i;
3109 // walk through the deque trying to steal any task
3110 taskdata = NULL;
3111 for (i = 1; i < deque_ntasks; ++i) {
3112 target = (target + 1) & TASK_DEQUE_MASK(thread_data->td);
3113 taskdata = thread_data->td.td_deque[target];
3114 if (__kmp_task_is_allowed(gtid, is_constrained, tasknew: taskdata, taskcurr: current)) {
3115 break; // found task to execute
3116 } else {
3117 taskdata = NULL;
3118 }
3119 }
3120 if (taskdata == NULL) {
3121 // No appropriate candidate found to execute
3122 __kmp_release_bootstrap_lock(lck: &thread_data->td.td_deque_lock);
3123 KA_TRACE(
3124 10, ("__kmp_get_priority_task(exit #4): T#%d could not get task from "
3125 "%p: task_team=%p ntasks=%d head=%u tail=%u\n",
3126 gtid, thread_data, task_team, deque_ntasks,
3127 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
3128 task_team->tt.tt_num_task_pri++; // atomic inc, restore value
3129 return NULL;
3130 }
3131 int prev = target;
3132 for (i = i + 1; i < deque_ntasks; ++i) {
3133 // shift remaining tasks in the deque left by 1
3134 target = (target + 1) & TASK_DEQUE_MASK(thread_data->td);
3135 thread_data->td.td_deque[prev] = thread_data->td.td_deque[target];
3136 prev = target;
3137 }
3138 KMP_DEBUG_ASSERT(
3139 thread_data->td.td_deque_tail ==
3140 (kmp_uint32)((target + 1) & TASK_DEQUE_MASK(thread_data->td)));
3141 thread_data->td.td_deque_tail = target; // tail -= 1 (wrapped))
3142 }
3143 thread_data->td.td_deque_ntasks = deque_ntasks - 1;
3144 __kmp_release_bootstrap_lock(lck: &thread_data->td.td_deque_lock);
3145 task = KMP_TASKDATA_TO_TASK(taskdata);
3146 return task;
3147}
3148
3149// __kmp_remove_my_task: remove a task from my own deque
3150static kmp_task_t *__kmp_remove_my_task(kmp_info_t *thread, kmp_int32 gtid,
3151 kmp_task_team_t *task_team,
3152 kmp_int32 is_constrained) {
3153 kmp_task_t *task;
3154 kmp_taskdata_t *taskdata;
3155 kmp_thread_data_t *thread_data;
3156 kmp_uint32 tail;
3157
3158 KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
3159 KMP_DEBUG_ASSERT(task_team->tt.tt_threads_data !=
3160 NULL); // Caller should check this condition
3161
3162 thread_data = &task_team->tt.tt_threads_data[__kmp_tid_from_gtid(gtid)];
3163
3164 KA_TRACE(10, ("__kmp_remove_my_task(enter): T#%d ntasks=%d head=%u tail=%u\n",
3165 gtid, thread_data->td.td_deque_ntasks,
3166 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
3167
3168 if (TCR_4(thread_data->td.td_deque_ntasks) == 0) {
3169 KA_TRACE(10,
3170 ("__kmp_remove_my_task(exit #1): T#%d No tasks to remove: "
3171 "ntasks=%d head=%u tail=%u\n",
3172 gtid, thread_data->td.td_deque_ntasks,
3173 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
3174 return NULL;
3175 }
3176
3177 __kmp_acquire_bootstrap_lock(lck: &thread_data->td.td_deque_lock);
3178
3179 if (TCR_4(thread_data->td.td_deque_ntasks) == 0) {
3180 __kmp_release_bootstrap_lock(lck: &thread_data->td.td_deque_lock);
3181 KA_TRACE(10,
3182 ("__kmp_remove_my_task(exit #2): T#%d No tasks to remove: "
3183 "ntasks=%d head=%u tail=%u\n",
3184 gtid, thread_data->td.td_deque_ntasks,
3185 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
3186 return NULL;
3187 }
3188
3189 tail = (thread_data->td.td_deque_tail - 1) &
3190 TASK_DEQUE_MASK(thread_data->td); // Wrap index.
3191 taskdata = thread_data->td.td_deque[tail];
3192
3193 if (!__kmp_task_is_allowed(gtid, is_constrained, tasknew: taskdata,
3194 taskcurr: thread->th.th_current_task)) {
3195 // The TSC does not allow to steal victim task
3196 __kmp_release_bootstrap_lock(lck: &thread_data->td.td_deque_lock);
3197 KA_TRACE(10,
3198 ("__kmp_remove_my_task(exit #3): T#%d TSC blocks tail task: "
3199 "ntasks=%d head=%u tail=%u\n",
3200 gtid, thread_data->td.td_deque_ntasks,
3201 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
3202 return NULL;
3203 }
3204
3205 thread_data->td.td_deque_tail = tail;
3206 TCW_4(thread_data->td.td_deque_ntasks, thread_data->td.td_deque_ntasks - 1);
3207
3208 __kmp_release_bootstrap_lock(lck: &thread_data->td.td_deque_lock);
3209
3210 KA_TRACE(10, ("__kmp_remove_my_task(exit #4): T#%d task %p removed: "
3211 "ntasks=%d head=%u tail=%u\n",
3212 gtid, taskdata, thread_data->td.td_deque_ntasks,
3213 thread_data->td.td_deque_head, thread_data->td.td_deque_tail));
3214
3215 task = KMP_TASKDATA_TO_TASK(taskdata);
3216 return task;
3217}
3218
3219// __kmp_steal_task: remove a task from another thread's deque
3220// Assume that calling thread has already checked existence of
3221// task_team thread_data before calling this routine.
3222static kmp_task_t *__kmp_steal_task(kmp_int32 victim_tid, kmp_int32 gtid,
3223 kmp_task_team_t *task_team,
3224 std::atomic<kmp_int32> *unfinished_threads,
3225 int *thread_finished,
3226 kmp_int32 is_constrained) {
3227 kmp_task_t *task;
3228 kmp_taskdata_t *taskdata;
3229 kmp_taskdata_t *current;
3230 kmp_thread_data_t *victim_td, *threads_data;
3231 kmp_int32 target;
3232 kmp_info_t *victim_thr;
3233
3234 KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
3235
3236 threads_data = task_team->tt.tt_threads_data;
3237 KMP_DEBUG_ASSERT(threads_data != NULL); // Caller should check this condition
3238 KMP_DEBUG_ASSERT(victim_tid >= 0);
3239 KMP_DEBUG_ASSERT(victim_tid < task_team->tt.tt_nproc);
3240
3241 victim_td = &threads_data[victim_tid];
3242 victim_thr = victim_td->td.td_thr;
3243 (void)victim_thr; // Use in TRACE messages which aren't always enabled.
3244
3245 KA_TRACE(10, ("__kmp_steal_task(enter): T#%d try to steal from T#%d: "
3246 "task_team=%p ntasks=%d head=%u tail=%u\n",
3247 gtid, __kmp_gtid_from_thread(victim_thr), task_team,
3248 victim_td->td.td_deque_ntasks, victim_td->td.td_deque_head,
3249 victim_td->td.td_deque_tail));
3250
3251 if (TCR_4(victim_td->td.td_deque_ntasks) == 0) {
3252 KA_TRACE(10, ("__kmp_steal_task(exit #1): T#%d could not steal from T#%d: "
3253 "task_team=%p ntasks=%d head=%u tail=%u\n",
3254 gtid, __kmp_gtid_from_thread(victim_thr), task_team,
3255 victim_td->td.td_deque_ntasks, victim_td->td.td_deque_head,
3256 victim_td->td.td_deque_tail));
3257 return NULL;
3258 }
3259
3260 __kmp_acquire_bootstrap_lock(lck: &victim_td->td.td_deque_lock);
3261
3262 int ntasks = TCR_4(victim_td->td.td_deque_ntasks);
3263 // Check again after we acquire the lock
3264 if (ntasks == 0) {
3265 __kmp_release_bootstrap_lock(lck: &victim_td->td.td_deque_lock);
3266 KA_TRACE(10, ("__kmp_steal_task(exit #2): T#%d could not steal from T#%d: "
3267 "task_team=%p ntasks=%d head=%u tail=%u\n",
3268 gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks,
3269 victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
3270 return NULL;
3271 }
3272
3273 KMP_DEBUG_ASSERT(victim_td->td.td_deque != NULL);
3274 current = __kmp_threads[gtid]->th.th_current_task;
3275 taskdata = victim_td->td.td_deque[victim_td->td.td_deque_head];
3276 if (__kmp_task_is_allowed(gtid, is_constrained, tasknew: taskdata, taskcurr: current)) {
3277 // Bump head pointer and Wrap.
3278 victim_td->td.td_deque_head =
3279 (victim_td->td.td_deque_head + 1) & TASK_DEQUE_MASK(victim_td->td);
3280 } else {
3281 if (!task_team->tt.tt_untied_task_encountered) {
3282 // The TSC does not allow to steal victim task
3283 __kmp_release_bootstrap_lock(lck: &victim_td->td.td_deque_lock);
3284 KA_TRACE(10, ("__kmp_steal_task(exit #3): T#%d could not steal from "
3285 "T#%d: task_team=%p ntasks=%d head=%u tail=%u\n",
3286 gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks,
3287 victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
3288 return NULL;
3289 }
3290 int i;
3291 // walk through victim's deque trying to steal any task
3292 target = victim_td->td.td_deque_head;
3293 taskdata = NULL;
3294 for (i = 1; i < ntasks; ++i) {
3295 target = (target + 1) & TASK_DEQUE_MASK(victim_td->td);
3296 taskdata = victim_td->td.td_deque[target];
3297 if (__kmp_task_is_allowed(gtid, is_constrained, tasknew: taskdata, taskcurr: current)) {
3298 break; // found victim task
3299 } else {
3300 taskdata = NULL;
3301 }
3302 }
3303 if (taskdata == NULL) {
3304 // No appropriate candidate to steal found
3305 __kmp_release_bootstrap_lock(lck: &victim_td->td.td_deque_lock);
3306 KA_TRACE(10, ("__kmp_steal_task(exit #4): T#%d could not steal from "
3307 "T#%d: task_team=%p ntasks=%d head=%u tail=%u\n",
3308 gtid, __kmp_gtid_from_thread(victim_thr), task_team, ntasks,
3309 victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
3310 return NULL;
3311 }
3312 int prev = target;
3313 for (i = i + 1; i < ntasks; ++i) {
3314 // shift remaining tasks in the deque left by 1
3315 target = (target + 1) & TASK_DEQUE_MASK(victim_td->td);
3316 victim_td->td.td_deque[prev] = victim_td->td.td_deque[target];
3317 prev = target;
3318 }
3319 KMP_DEBUG_ASSERT(
3320 victim_td->td.td_deque_tail ==
3321 (kmp_uint32)((target + 1) & TASK_DEQUE_MASK(victim_td->td)));
3322 victim_td->td.td_deque_tail = target; // tail -= 1 (wrapped))
3323 }
3324 if (*thread_finished) {
3325 // We need to un-mark this victim as a finished victim. This must be done
3326 // before releasing the lock, or else other threads (starting with the
3327 // primary thread victim) might be prematurely released from the barrier!!!
3328#if KMP_DEBUG
3329 kmp_int32 count =
3330#endif
3331 KMP_ATOMIC_INC(unfinished_threads);
3332 KA_TRACE(
3333 20,
3334 ("__kmp_steal_task: T#%d inc unfinished_threads to %d: task_team=%p\n",
3335 gtid, count + 1, task_team));
3336 *thread_finished = FALSE;
3337 }
3338 TCW_4(victim_td->td.td_deque_ntasks, ntasks - 1);
3339
3340 __kmp_release_bootstrap_lock(lck: &victim_td->td.td_deque_lock);
3341
3342 KMP_COUNT_BLOCK(TASK_stolen);
3343 KA_TRACE(10,
3344 ("__kmp_steal_task(exit #5): T#%d stole task %p from T#%d: "
3345 "task_team=%p ntasks=%d head=%u tail=%u\n",
3346 gtid, taskdata, __kmp_gtid_from_thread(victim_thr), task_team,
3347 ntasks, victim_td->td.td_deque_head, victim_td->td.td_deque_tail));
3348
3349 task = KMP_TASKDATA_TO_TASK(taskdata);
3350 return task;
3351}
3352
3353// __kmp_execute_tasks_template: Choose and execute tasks until either the
3354// condition is statisfied (return true) or there are none left (return false).
3355//
3356// final_spin is TRUE if this is the spin at the release barrier.
3357// thread_finished indicates whether the thread is finished executing all
3358// the tasks it has on its deque, and is at the release barrier.
3359// spinner is the location on which to spin.
3360// spinner == NULL means only execute a single task and return.
3361// checker is the value to check to terminate the spin.
3362template <class C>
3363static inline int __kmp_execute_tasks_template(
3364 kmp_info_t *thread, kmp_int32 gtid, C *flag, int final_spin,
3365 int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
3366 kmp_int32 is_constrained) {
3367 kmp_task_team_t *task_team = thread->th.th_task_team;
3368 kmp_thread_data_t *threads_data;
3369 kmp_task_t *task;
3370 kmp_info_t *other_thread;
3371 kmp_taskdata_t *current_task = thread->th.th_current_task;
3372 std::atomic<kmp_int32> *unfinished_threads;
3373 kmp_int32 nthreads, victim_tid = -2, use_own_tasks = 1, new_victim = 0,
3374 tid = thread->th.th_info.ds.ds_tid;
3375
3376 KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
3377 KMP_DEBUG_ASSERT(thread == __kmp_threads[gtid]);
3378
3379 if (task_team == NULL || current_task == NULL)
3380 return FALSE;
3381
3382 KA_TRACE(15, ("__kmp_execute_tasks_template(enter): T#%d final_spin=%d "
3383 "*thread_finished=%d\n",
3384 gtid, final_spin, *thread_finished));
3385
3386 thread->th.th_reap_state = KMP_NOT_SAFE_TO_REAP;
3387 threads_data = (kmp_thread_data_t *)TCR_PTR(task_team->tt.tt_threads_data);
3388
3389 KMP_DEBUG_ASSERT(threads_data != NULL);
3390
3391 nthreads = task_team->tt.tt_nproc;
3392 unfinished_threads = &(task_team->tt.tt_unfinished_threads);
3393 KMP_DEBUG_ASSERT(nthreads > 1 || task_team->tt.tt_found_proxy_tasks ||
3394 task_team->tt.tt_hidden_helper_task_encountered);
3395 KMP_DEBUG_ASSERT(*unfinished_threads >= 0);
3396
3397 while (1) { // Outer loop keeps trying to find tasks in case of single thread
3398 // getting tasks from target constructs
3399 while (1) { // Inner loop to find a task and execute it
3400 task = NULL;
3401 if (task_team->tt.tt_num_task_pri) { // get priority task first
3402 task = __kmp_get_priority_task(gtid, task_team, is_constrained);
3403 }
3404 if (task == NULL && use_own_tasks) { // check own queue next
3405 task = __kmp_remove_my_task(thread, gtid, task_team, is_constrained);
3406 }
3407 if ((task == NULL) && (nthreads > 1)) { // Steal a task finally
3408 int asleep = 1;
3409 use_own_tasks = 0;
3410 // Try to steal from the last place I stole from successfully.
3411 if (victim_tid == -2) { // haven't stolen anything yet
3412 victim_tid = threads_data[tid].td.td_deque_last_stolen;
3413 if (victim_tid !=
3414 -1) // if we have a last stolen from victim, get the thread
3415 other_thread = threads_data[victim_tid].td.td_thr;
3416 }
3417 if (victim_tid != -1) { // found last victim
3418 asleep = 0;
3419 } else if (!new_victim) { // no recent steals and we haven't already
3420 // used a new victim; select a random thread
3421 do { // Find a different thread to steal work from.
3422 // Pick a random thread. Initial plan was to cycle through all the
3423 // threads, and only return if we tried to steal from every thread,
3424 // and failed. Arch says that's not such a great idea.
3425 victim_tid = __kmp_get_random(thread) % (nthreads - 1);
3426 if (victim_tid >= tid) {
3427 ++victim_tid; // Adjusts random distribution to exclude self
3428 }
3429 // Found a potential victim
3430 other_thread = threads_data[victim_tid].td.td_thr;
3431 // There is a slight chance that __kmp_enable_tasking() did not wake
3432 // up all threads waiting at the barrier. If victim is sleeping,
3433 // then wake it up. Since we were going to pay the cache miss
3434 // penalty for referencing another thread's kmp_info_t struct
3435 // anyway,
3436 // the check shouldn't cost too much performance at this point. In
3437 // extra barrier mode, tasks do not sleep at the separate tasking
3438 // barrier, so this isn't a problem.
3439 asleep = 0;
3440 if ((__kmp_tasking_mode == tskm_task_teams) &&
3441 (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) &&
3442 (TCR_PTR(CCAST(void *, other_thread->th.th_sleep_loc)) !=
3443 NULL)) {
3444 asleep = 1;
3445 __kmp_null_resume_wrapper(thr: other_thread);
3446 // A sleeping thread should not have any tasks on it's queue.
3447 // There is a slight possibility that it resumes, steals a task
3448 // from another thread, which spawns more tasks, all in the time
3449 // that it takes this thread to check => don't write an assertion
3450 // that the victim's queue is empty. Try stealing from a
3451 // different thread.
3452 }
3453 } while (asleep);
3454 }
3455
3456 if (!asleep) {
3457 // We have a victim to try to steal from
3458 task =
3459 __kmp_steal_task(victim_tid, gtid, task_team, unfinished_threads,
3460 thread_finished, is_constrained);
3461 }
3462 if (task != NULL) { // set last stolen to victim
3463 if (threads_data[tid].td.td_deque_last_stolen != victim_tid) {
3464 threads_data[tid].td.td_deque_last_stolen = victim_tid;
3465 // The pre-refactored code did not try more than 1 successful new
3466 // vicitm, unless the last one generated more local tasks;
3467 // new_victim keeps track of this
3468 new_victim = 1;
3469 }
3470 } else { // No tasks found; unset last_stolen
3471 KMP_CHECK_UPDATE(threads_data[tid].td.td_deque_last_stolen, -1);
3472 victim_tid = -2; // no successful victim found
3473 }
3474 }
3475
3476 if (task == NULL)
3477 break; // break out of tasking loop
3478
3479// Found a task; execute it
3480#if USE_ITT_BUILD && USE_ITT_NOTIFY
3481 if (__itt_sync_create_ptr || KMP_ITT_DEBUG) {
3482 if (itt_sync_obj == NULL) { // we are at fork barrier where we could not
3483 // get the object reliably
3484 itt_sync_obj = __kmp_itt_barrier_object(gtid, bt: bs_forkjoin_barrier);
3485 }
3486 __kmp_itt_task_starting(object: itt_sync_obj);
3487 }
3488#endif /* USE_ITT_BUILD && USE_ITT_NOTIFY */
3489 __kmp_invoke_task(gtid, task, current_task);
3490#if USE_ITT_BUILD
3491 if (itt_sync_obj != NULL)
3492 __kmp_itt_task_finished(object: itt_sync_obj);
3493#endif /* USE_ITT_BUILD */
3494 // If this thread is only partway through the barrier and the condition is
3495 // met, then return now, so that the barrier gather/release pattern can
3496 // proceed. If this thread is in the last spin loop in the barrier,
3497 // waiting to be released, we know that the termination condition will not
3498 // be satisfied, so don't waste any cycles checking it.
3499 if (flag == NULL || (!final_spin && flag->done_check())) {
3500 KA_TRACE(
3501 15,
3502 ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n",
3503 gtid));
3504 return TRUE;
3505 }
3506 if (thread->th.th_task_team == NULL) {
3507 break;
3508 }
3509 KMP_YIELD(__kmp_library == library_throughput); // Yield before next task
3510 // If execution of a stolen task results in more tasks being placed on our
3511 // run queue, reset use_own_tasks
3512 if (!use_own_tasks && TCR_4(threads_data[tid].td.td_deque_ntasks) != 0) {
3513 KA_TRACE(20, ("__kmp_execute_tasks_template: T#%d stolen task spawned "
3514 "other tasks, restart\n",
3515 gtid));
3516 use_own_tasks = 1;
3517 new_victim = 0;
3518 }
3519 }
3520
3521 // The task source has been exhausted. If in final spin loop of barrier,
3522 // check if termination condition is satisfied. The work queue may be empty
3523 // but there might be proxy tasks still executing.
3524 if (final_spin &&
3525 KMP_ATOMIC_LD_ACQ(&current_task->td_incomplete_child_tasks) == 0) {
3526 // First, decrement the #unfinished threads, if that has not already been
3527 // done. This decrement might be to the spin location, and result in the
3528 // termination condition being satisfied.
3529 if (!*thread_finished) {
3530#if KMP_DEBUG
3531 kmp_int32 count = -1 +
3532#endif
3533 KMP_ATOMIC_DEC(unfinished_threads);
3534 KA_TRACE(20, ("__kmp_execute_tasks_template: T#%d dec "
3535 "unfinished_threads to %d task_team=%p\n",
3536 gtid, count, task_team));
3537 *thread_finished = TRUE;
3538 }
3539
3540 // It is now unsafe to reference thread->th.th_team !!!
3541 // Decrementing task_team->tt.tt_unfinished_threads can allow the primary
3542 // thread to pass through the barrier, where it might reset each thread's
3543 // th.th_team field for the next parallel region. If we can steal more
3544 // work, we know that this has not happened yet.
3545 if (flag != NULL && flag->done_check()) {
3546 KA_TRACE(
3547 15,
3548 ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n",
3549 gtid));
3550 return TRUE;
3551 }
3552 }
3553
3554 // If this thread's task team is NULL, primary thread has recognized that
3555 // there are no more tasks; bail out
3556 if (thread->th.th_task_team == NULL) {
3557 KA_TRACE(15,
3558 ("__kmp_execute_tasks_template: T#%d no more tasks\n", gtid));
3559 return FALSE;
3560 }
3561
3562 // Check the flag again to see if it has already done in case to be trapped
3563 // into infinite loop when a if0 task depends on a hidden helper task
3564 // outside any parallel region. Detached tasks are not impacted in this case
3565 // because the only thread executing this function has to execute the proxy
3566 // task so it is in another code path that has the same check.
3567 if (flag == NULL || (!final_spin && flag->done_check())) {
3568 KA_TRACE(15,
3569 ("__kmp_execute_tasks_template: T#%d spin condition satisfied\n",
3570 gtid));
3571 return TRUE;
3572 }
3573
3574 // We could be getting tasks from target constructs; if this is the only
3575 // thread, keep trying to execute tasks from own queue
3576 if (nthreads == 1 &&
3577 KMP_ATOMIC_LD_ACQ(&current_task->td_incomplete_child_tasks))
3578 use_own_tasks = 1;
3579 else {
3580 KA_TRACE(15,
3581 ("__kmp_execute_tasks_template: T#%d can't find work\n", gtid));
3582 return FALSE;
3583 }
3584 }
3585}
3586
3587template <bool C, bool S>
3588int __kmp_execute_tasks_32(
3589 kmp_info_t *thread, kmp_int32 gtid, kmp_flag_32<C, S> *flag, int final_spin,
3590 int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
3591 kmp_int32 is_constrained) {
3592 return __kmp_execute_tasks_template(
3593 thread, gtid, flag, final_spin,
3594 thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
3595}
3596
3597template <bool C, bool S>
3598int __kmp_execute_tasks_64(
3599 kmp_info_t *thread, kmp_int32 gtid, kmp_flag_64<C, S> *flag, int final_spin,
3600 int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
3601 kmp_int32 is_constrained) {
3602 return __kmp_execute_tasks_template(
3603 thread, gtid, flag, final_spin,
3604 thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
3605}
3606
3607template <bool C, bool S>
3608int __kmp_atomic_execute_tasks_64(
3609 kmp_info_t *thread, kmp_int32 gtid, kmp_atomic_flag_64<C, S> *flag,
3610 int final_spin, int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
3611 kmp_int32 is_constrained) {
3612 return __kmp_execute_tasks_template(
3613 thread, gtid, flag, final_spin,
3614 thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
3615}
3616
3617int __kmp_execute_tasks_oncore(
3618 kmp_info_t *thread, kmp_int32 gtid, kmp_flag_oncore *flag, int final_spin,
3619 int *thread_finished USE_ITT_BUILD_ARG(void *itt_sync_obj),
3620 kmp_int32 is_constrained) {
3621 return __kmp_execute_tasks_template(
3622 thread, gtid, flag, final_spin,
3623 thread_finished USE_ITT_BUILD_ARG(itt_sync_obj), is_constrained);
3624}
3625
3626template int
3627__kmp_execute_tasks_32<false, false>(kmp_info_t *, kmp_int32,
3628 kmp_flag_32<false, false> *, int,
3629 int *USE_ITT_BUILD_ARG(void *), kmp_int32);
3630
3631template int __kmp_execute_tasks_64<false, true>(kmp_info_t *, kmp_int32,
3632 kmp_flag_64<false, true> *,
3633 int,
3634 int *USE_ITT_BUILD_ARG(void *),
3635 kmp_int32);
3636
3637template int __kmp_execute_tasks_64<true, false>(kmp_info_t *, kmp_int32,
3638 kmp_flag_64<true, false> *,
3639 int,
3640 int *USE_ITT_BUILD_ARG(void *),
3641 kmp_int32);
3642
3643template int __kmp_atomic_execute_tasks_64<false, true>(
3644 kmp_info_t *, kmp_int32, kmp_atomic_flag_64<false, true> *, int,
3645 int *USE_ITT_BUILD_ARG(void *), kmp_int32);
3646
3647template int __kmp_atomic_execute_tasks_64<true, false>(
3648 kmp_info_t *, kmp_int32, kmp_atomic_flag_64<true, false> *, int,
3649 int *USE_ITT_BUILD_ARG(void *), kmp_int32);
3650
3651// __kmp_enable_tasking: Allocate task team and resume threads sleeping at the
3652// next barrier so they can assist in executing enqueued tasks.
3653// First thread in allocates the task team atomically.
3654static void __kmp_enable_tasking(kmp_task_team_t *task_team,
3655 kmp_info_t *this_thr) {
3656 kmp_thread_data_t *threads_data;
3657 int nthreads, i, is_init_thread;
3658
3659 KA_TRACE(10, ("__kmp_enable_tasking(enter): T#%d\n",
3660 __kmp_gtid_from_thread(this_thr)));
3661
3662 KMP_DEBUG_ASSERT(task_team != NULL);
3663 KMP_DEBUG_ASSERT(this_thr->th.th_team != NULL);
3664
3665 nthreads = task_team->tt.tt_nproc;
3666 KMP_DEBUG_ASSERT(nthreads > 0);
3667 KMP_DEBUG_ASSERT(nthreads == this_thr->th.th_team->t.t_nproc);
3668
3669 // Allocate or increase the size of threads_data if necessary
3670 is_init_thread = __kmp_realloc_task_threads_data(thread: this_thr, task_team);
3671
3672 if (!is_init_thread) {
3673 // Some other thread already set up the array.
3674 KA_TRACE(
3675 20,
3676 ("__kmp_enable_tasking(exit): T#%d: threads array already set up.\n",
3677 __kmp_gtid_from_thread(this_thr)));
3678 return;
3679 }
3680 threads_data = (kmp_thread_data_t *)TCR_PTR(task_team->tt.tt_threads_data);
3681 KMP_DEBUG_ASSERT(threads_data != NULL);
3682
3683 if (__kmp_tasking_mode == tskm_task_teams &&
3684 (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME)) {
3685 // Release any threads sleeping at the barrier, so that they can steal
3686 // tasks and execute them. In extra barrier mode, tasks do not sleep
3687 // at the separate tasking barrier, so this isn't a problem.
3688 for (i = 0; i < nthreads; i++) {
3689 void *sleep_loc;
3690 kmp_info_t *thread = threads_data[i].td.td_thr;
3691
3692 if (i == this_thr->th.th_info.ds.ds_tid) {
3693 continue;
3694 }
3695 // Since we haven't locked the thread's suspend mutex lock at this
3696 // point, there is a small window where a thread might be putting
3697 // itself to sleep, but hasn't set the th_sleep_loc field yet.
3698 // To work around this, __kmp_execute_tasks_template() periodically checks
3699 // see if other threads are sleeping (using the same random mechanism that
3700 // is used for task stealing) and awakens them if they are.
3701 if ((sleep_loc = TCR_PTR(CCAST(void *, thread->th.th_sleep_loc))) !=
3702 NULL) {
3703 KF_TRACE(50, ("__kmp_enable_tasking: T#%d waking up thread T#%d\n",
3704 __kmp_gtid_from_thread(this_thr),
3705 __kmp_gtid_from_thread(thread)));
3706 __kmp_null_resume_wrapper(thr: thread);
3707 } else {
3708 KF_TRACE(50, ("__kmp_enable_tasking: T#%d don't wake up thread T#%d\n",
3709 __kmp_gtid_from_thread(this_thr),
3710 __kmp_gtid_from_thread(thread)));
3711 }
3712 }
3713 }
3714
3715 KA_TRACE(10, ("__kmp_enable_tasking(exit): T#%d\n",
3716 __kmp_gtid_from_thread(this_thr)));
3717}
3718
3719/* // TODO: Check the comment consistency
3720 * Utility routines for "task teams". A task team (kmp_task_t) is kind of
3721 * like a shadow of the kmp_team_t data struct, with a different lifetime.
3722 * After a child * thread checks into a barrier and calls __kmp_release() from
3723 * the particular variant of __kmp_<barrier_kind>_barrier_gather(), it can no
3724 * longer assume that the kmp_team_t structure is intact (at any moment, the
3725 * primary thread may exit the barrier code and free the team data structure,
3726 * and return the threads to the thread pool).
3727 *
3728 * This does not work with the tasking code, as the thread is still
3729 * expected to participate in the execution of any tasks that may have been
3730 * spawned my a member of the team, and the thread still needs access to all
3731 * to each thread in the team, so that it can steal work from it.
3732 *
3733 * Enter the existence of the kmp_task_team_t struct. It employs a reference
3734 * counting mechanism, and is allocated by the primary thread before calling
3735 * __kmp_<barrier_kind>_release, and then is release by the last thread to
3736 * exit __kmp_<barrier_kind>_release at the next barrier. I.e. the lifetimes
3737 * of the kmp_task_team_t structs for consecutive barriers can overlap
3738 * (and will, unless the primary thread is the last thread to exit the barrier
3739 * release phase, which is not typical). The existence of such a struct is
3740 * useful outside the context of tasking.
3741 *
3742 * We currently use the existence of the threads array as an indicator that
3743 * tasks were spawned since the last barrier. If the structure is to be
3744 * useful outside the context of tasking, then this will have to change, but
3745 * not setting the field minimizes the performance impact of tasking on
3746 * barriers, when no explicit tasks were spawned (pushed, actually).
3747 */
3748
3749static kmp_task_team_t *__kmp_free_task_teams =
3750 NULL; // Free list for task_team data structures
3751// Lock for task team data structures
3752kmp_bootstrap_lock_t __kmp_task_team_lock =
3753 KMP_BOOTSTRAP_LOCK_INITIALIZER(__kmp_task_team_lock);
3754
3755// __kmp_alloc_task_deque:
3756// Allocates a task deque for a particular thread, and initialize the necessary
3757// data structures relating to the deque. This only happens once per thread
3758// per task team since task teams are recycled. No lock is needed during
3759// allocation since each thread allocates its own deque.
3760static void __kmp_alloc_task_deque(kmp_info_t *thread,
3761 kmp_thread_data_t *thread_data) {
3762 __kmp_init_bootstrap_lock(lck: &thread_data->td.td_deque_lock);
3763 KMP_DEBUG_ASSERT(thread_data->td.td_deque == NULL);
3764
3765 // Initialize last stolen task field to "none"
3766 thread_data->td.td_deque_last_stolen = -1;
3767
3768 KMP_DEBUG_ASSERT(TCR_4(thread_data->td.td_deque_ntasks) == 0);
3769 KMP_DEBUG_ASSERT(thread_data->td.td_deque_head == 0);
3770 KMP_DEBUG_ASSERT(thread_data->td.td_deque_tail == 0);
3771
3772 KE_TRACE(
3773 10,
3774 ("__kmp_alloc_task_deque: T#%d allocating deque[%d] for thread_data %p\n",
3775 __kmp_gtid_from_thread(thread), INITIAL_TASK_DEQUE_SIZE, thread_data));
3776 // Allocate space for task deque, and zero the deque
3777 // Cannot use __kmp_thread_calloc() because threads not around for
3778 // kmp_reap_task_team( ).
3779 thread_data->td.td_deque = (kmp_taskdata_t **)__kmp_allocate(
3780 INITIAL_TASK_DEQUE_SIZE * sizeof(kmp_taskdata_t *));
3781 thread_data->td.td_deque_size = INITIAL_TASK_DEQUE_SIZE;
3782}
3783
3784// __kmp_free_task_deque:
3785// Deallocates a task deque for a particular thread. Happens at library
3786// deallocation so don't need to reset all thread data fields.
3787static void __kmp_free_task_deque(kmp_thread_data_t *thread_data) {
3788 if (thread_data->td.td_deque != NULL) {
3789 __kmp_acquire_bootstrap_lock(lck: &thread_data->td.td_deque_lock);
3790 TCW_4(thread_data->td.td_deque_ntasks, 0);
3791 __kmp_free(thread_data->td.td_deque);
3792 thread_data->td.td_deque = NULL;
3793 __kmp_release_bootstrap_lock(lck: &thread_data->td.td_deque_lock);
3794 }
3795
3796#ifdef BUILD_TIED_TASK_STACK
3797 // GEH: Figure out what to do here for td_susp_tied_tasks
3798 if (thread_data->td.td_susp_tied_tasks.ts_entries != TASK_STACK_EMPTY) {
3799 __kmp_free_task_stack(__kmp_thread_from_gtid(gtid), thread_data);
3800 }
3801#endif // BUILD_TIED_TASK_STACK
3802}
3803
3804// __kmp_realloc_task_threads_data:
3805// Allocates a threads_data array for a task team, either by allocating an
3806// initial array or enlarging an existing array. Only the first thread to get
3807// the lock allocs or enlarges the array and re-initializes the array elements.
3808// That thread returns "TRUE", the rest return "FALSE".
3809// Assumes that the new array size is given by task_team -> tt.tt_nproc.
3810// The current size is given by task_team -> tt.tt_max_threads.
3811static int __kmp_realloc_task_threads_data(kmp_info_t *thread,
3812 kmp_task_team_t *task_team) {
3813 kmp_thread_data_t **threads_data_p;
3814 kmp_int32 nthreads, maxthreads;
3815 int is_init_thread = FALSE;
3816
3817 if (TCR_4(task_team->tt.tt_found_tasks)) {
3818 // Already reallocated and initialized.
3819 return FALSE;
3820 }
3821
3822 threads_data_p = &task_team->tt.tt_threads_data;
3823 nthreads = task_team->tt.tt_nproc;
3824 maxthreads = task_team->tt.tt_max_threads;
3825
3826 // All threads must lock when they encounter the first task of the implicit
3827 // task region to make sure threads_data fields are (re)initialized before
3828 // used.
3829 __kmp_acquire_bootstrap_lock(lck: &task_team->tt.tt_threads_lock);
3830
3831 if (!TCR_4(task_team->tt.tt_found_tasks)) {
3832 // first thread to enable tasking
3833 kmp_team_t *team = thread->th.th_team;
3834 int i;
3835
3836 is_init_thread = TRUE;
3837 if (maxthreads < nthreads) {
3838
3839 if (*threads_data_p != NULL) {
3840 kmp_thread_data_t *old_data = *threads_data_p;
3841 kmp_thread_data_t *new_data = NULL;
3842
3843 KE_TRACE(
3844 10,
3845 ("__kmp_realloc_task_threads_data: T#%d reallocating "
3846 "threads data for task_team %p, new_size = %d, old_size = %d\n",
3847 __kmp_gtid_from_thread(thread), task_team, nthreads, maxthreads));
3848 // Reallocate threads_data to have more elements than current array
3849 // Cannot use __kmp_thread_realloc() because threads not around for
3850 // kmp_reap_task_team( ). Note all new array entries are initialized
3851 // to zero by __kmp_allocate().
3852 new_data = (kmp_thread_data_t *)__kmp_allocate(
3853 nthreads * sizeof(kmp_thread_data_t));
3854 // copy old data to new data
3855 KMP_MEMCPY_S((void *)new_data, nthreads * sizeof(kmp_thread_data_t),
3856 (void *)old_data, maxthreads * sizeof(kmp_thread_data_t));
3857
3858#ifdef BUILD_TIED_TASK_STACK
3859 // GEH: Figure out if this is the right thing to do
3860 for (i = maxthreads; i < nthreads; i++) {
3861 kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
3862 __kmp_init_task_stack(__kmp_gtid_from_thread(thread), thread_data);
3863 }
3864#endif // BUILD_TIED_TASK_STACK
3865 // Install the new data and free the old data
3866 (*threads_data_p) = new_data;
3867 __kmp_free(old_data);
3868 } else {
3869 KE_TRACE(10, ("__kmp_realloc_task_threads_data: T#%d allocating "
3870 "threads data for task_team %p, size = %d\n",
3871 __kmp_gtid_from_thread(thread), task_team, nthreads));
3872 // Make the initial allocate for threads_data array, and zero entries
3873 // Cannot use __kmp_thread_calloc() because threads not around for
3874 // kmp_reap_task_team( ).
3875 *threads_data_p = (kmp_thread_data_t *)__kmp_allocate(
3876 nthreads * sizeof(kmp_thread_data_t));
3877#ifdef BUILD_TIED_TASK_STACK
3878 // GEH: Figure out if this is the right thing to do
3879 for (i = 0; i < nthreads; i++) {
3880 kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
3881 __kmp_init_task_stack(__kmp_gtid_from_thread(thread), thread_data);
3882 }
3883#endif // BUILD_TIED_TASK_STACK
3884 }
3885 task_team->tt.tt_max_threads = nthreads;
3886 } else {
3887 // If array has (more than) enough elements, go ahead and use it
3888 KMP_DEBUG_ASSERT(*threads_data_p != NULL);
3889 }
3890
3891 // initialize threads_data pointers back to thread_info structures
3892 for (i = 0; i < nthreads; i++) {
3893 kmp_thread_data_t *thread_data = &(*threads_data_p)[i];
3894 thread_data->td.td_thr = team->t.t_threads[i];
3895
3896 if (thread_data->td.td_deque_last_stolen >= nthreads) {
3897 // The last stolen field survives across teams / barrier, and the number
3898 // of threads may have changed. It's possible (likely?) that a new
3899 // parallel region will exhibit the same behavior as previous region.
3900 thread_data->td.td_deque_last_stolen = -1;
3901 }
3902 }
3903
3904 KMP_MB();
3905 TCW_SYNC_4(task_team->tt.tt_found_tasks, TRUE);
3906 }
3907
3908 __kmp_release_bootstrap_lock(lck: &task_team->tt.tt_threads_lock);
3909 return is_init_thread;
3910}
3911
3912// __kmp_free_task_threads_data:
3913// Deallocates a threads_data array for a task team, including any attached
3914// tasking deques. Only occurs at library shutdown.
3915static void __kmp_free_task_threads_data(kmp_task_team_t *task_team) {
3916 __kmp_acquire_bootstrap_lock(lck: &task_team->tt.tt_threads_lock);
3917 if (task_team->tt.tt_threads_data != NULL) {
3918 int i;
3919 for (i = 0; i < task_team->tt.tt_max_threads; i++) {
3920 __kmp_free_task_deque(thread_data: &task_team->tt.tt_threads_data[i]);
3921 }
3922 __kmp_free(task_team->tt.tt_threads_data);
3923 task_team->tt.tt_threads_data = NULL;
3924 }
3925 __kmp_release_bootstrap_lock(lck: &task_team->tt.tt_threads_lock);
3926}
3927
3928// __kmp_free_task_pri_list:
3929// Deallocates tasking deques used for priority tasks.
3930// Only occurs at library shutdown.
3931static void __kmp_free_task_pri_list(kmp_task_team_t *task_team) {
3932 __kmp_acquire_bootstrap_lock(lck: &task_team->tt.tt_task_pri_lock);
3933 if (task_team->tt.tt_task_pri_list != NULL) {
3934 kmp_task_pri_t *list = task_team->tt.tt_task_pri_list;
3935 while (list != NULL) {
3936 kmp_task_pri_t *next = list->next;
3937 __kmp_free_task_deque(thread_data: &list->td);
3938 __kmp_free(list);
3939 list = next;
3940 }
3941 task_team->tt.tt_task_pri_list = NULL;
3942 }
3943 __kmp_release_bootstrap_lock(lck: &task_team->tt.tt_task_pri_lock);
3944}
3945
3946// __kmp_allocate_task_team:
3947// Allocates a task team associated with a specific team, taking it from
3948// the global task team free list if possible. Also initializes data
3949// structures.
3950static kmp_task_team_t *__kmp_allocate_task_team(kmp_info_t *thread,
3951 kmp_team_t *team) {
3952 kmp_task_team_t *task_team = NULL;
3953 int nthreads;
3954
3955 KA_TRACE(20, ("__kmp_allocate_task_team: T#%d entering; team = %p\n",
3956 (thread ? __kmp_gtid_from_thread(thread) : -1), team));
3957
3958 if (TCR_PTR(__kmp_free_task_teams) != NULL) {
3959 // Take a task team from the task team pool
3960 __kmp_acquire_bootstrap_lock(lck: &__kmp_task_team_lock);
3961 if (__kmp_free_task_teams != NULL) {
3962 task_team = __kmp_free_task_teams;
3963 TCW_PTR(__kmp_free_task_teams, task_team->tt.tt_next);
3964 task_team->tt.tt_next = NULL;
3965 }
3966 __kmp_release_bootstrap_lock(lck: &__kmp_task_team_lock);
3967 }
3968
3969 if (task_team == NULL) {
3970 KE_TRACE(10, ("__kmp_allocate_task_team: T#%d allocating "
3971 "task team for team %p\n",
3972 __kmp_gtid_from_thread(thread), team));
3973 // Allocate a new task team if one is not available. Cannot use
3974 // __kmp_thread_malloc because threads not around for kmp_reap_task_team.
3975 task_team = (kmp_task_team_t *)__kmp_allocate(sizeof(kmp_task_team_t));
3976 __kmp_init_bootstrap_lock(lck: &task_team->tt.tt_threads_lock);
3977 __kmp_init_bootstrap_lock(lck: &task_team->tt.tt_task_pri_lock);
3978#if USE_ITT_BUILD && USE_ITT_NOTIFY && KMP_DEBUG
3979 // suppress race conditions detection on synchronization flags in debug mode
3980 // this helps to analyze library internals eliminating false positives
3981 __itt_suppress_mark_range(
3982 __itt_suppress_range, __itt_suppress_threading_errors,
3983 &task_team->tt.tt_found_tasks, sizeof(task_team->tt.tt_found_tasks));
3984 __itt_suppress_mark_range(__itt_suppress_range,
3985 __itt_suppress_threading_errors,
3986 CCAST(kmp_uint32 *, &task_team->tt.tt_active),
3987 sizeof(task_team->tt.tt_active));
3988#endif /* USE_ITT_BUILD && USE_ITT_NOTIFY && KMP_DEBUG */
3989 // Note: __kmp_allocate zeroes returned memory, othewise we would need:
3990 // task_team->tt.tt_threads_data = NULL;
3991 // task_team->tt.tt_max_threads = 0;
3992 // task_team->tt.tt_next = NULL;
3993 }
3994
3995 TCW_4(task_team->tt.tt_found_tasks, FALSE);
3996 TCW_4(task_team->tt.tt_found_proxy_tasks, FALSE);
3997 TCW_4(task_team->tt.tt_hidden_helper_task_encountered, FALSE);
3998 task_team->tt.tt_nproc = nthreads = team->t.t_nproc;
3999
4000 KMP_ATOMIC_ST_REL(&task_team->tt.tt_unfinished_threads, nthreads);
4001 TCW_4(task_team->tt.tt_hidden_helper_task_encountered, FALSE);
4002 TCW_4(task_team->tt.tt_active, TRUE);
4003
4004 KA_TRACE(20, ("__kmp_allocate_task_team: T#%d exiting; task_team = %p "
4005 "unfinished_threads init'd to %d\n",
4006 (thread ? __kmp_gtid_from_thread(thread) : -1), task_team,
4007 KMP_ATOMIC_LD_RLX(&task_team->tt.tt_unfinished_threads)));
4008 return task_team;
4009}
4010
4011// __kmp_free_task_team:
4012// Frees the task team associated with a specific thread, and adds it
4013// to the global task team free list.
4014void __kmp_free_task_team(kmp_info_t *thread, kmp_task_team_t *task_team) {
4015 KA_TRACE(20, ("__kmp_free_task_team: T#%d task_team = %p\n",
4016 thread ? __kmp_gtid_from_thread(thread) : -1, task_team));
4017
4018 // Put task team back on free list
4019 __kmp_acquire_bootstrap_lock(lck: &__kmp_task_team_lock);
4020
4021 KMP_DEBUG_ASSERT(task_team->tt.tt_next == NULL);
4022 task_team->tt.tt_next = __kmp_free_task_teams;
4023 TCW_PTR(__kmp_free_task_teams, task_team);
4024
4025 __kmp_release_bootstrap_lock(lck: &__kmp_task_team_lock);
4026}
4027
4028// __kmp_reap_task_teams:
4029// Free all the task teams on the task team free list.
4030// Should only be done during library shutdown.
4031// Cannot do anything that needs a thread structure or gtid since they are
4032// already gone.
4033void __kmp_reap_task_teams(void) {
4034 kmp_task_team_t *task_team;
4035
4036 if (TCR_PTR(__kmp_free_task_teams) != NULL) {
4037 // Free all task_teams on the free list
4038 __kmp_acquire_bootstrap_lock(lck: &__kmp_task_team_lock);
4039 while ((task_team = __kmp_free_task_teams) != NULL) {
4040 __kmp_free_task_teams = task_team->tt.tt_next;
4041 task_team->tt.tt_next = NULL;
4042
4043 // Free threads_data if necessary
4044 if (task_team->tt.tt_threads_data != NULL) {
4045 __kmp_free_task_threads_data(task_team);
4046 }
4047 if (task_team->tt.tt_task_pri_list != NULL) {
4048 __kmp_free_task_pri_list(task_team);
4049 }
4050 __kmp_free(task_team);
4051 }
4052 __kmp_release_bootstrap_lock(lck: &__kmp_task_team_lock);
4053 }
4054}
4055
4056// __kmp_wait_to_unref_task_teams:
4057// Some threads could still be in the fork barrier release code, possibly
4058// trying to steal tasks. Wait for each thread to unreference its task team.
4059void __kmp_wait_to_unref_task_teams(void) {
4060 kmp_info_t *thread;
4061 kmp_uint32 spins;
4062 kmp_uint64 time;
4063 int done;
4064
4065 KMP_INIT_YIELD(spins);
4066 KMP_INIT_BACKOFF(time);
4067
4068 for (;;) {
4069 done = TRUE;
4070
4071 // TODO: GEH - this may be is wrong because some sync would be necessary
4072 // in case threads are added to the pool during the traversal. Need to
4073 // verify that lock for thread pool is held when calling this routine.
4074 for (thread = CCAST(kmp_info_t *, __kmp_thread_pool); thread != NULL;
4075 thread = thread->th.th_next_pool) {
4076#if KMP_OS_WINDOWS
4077 DWORD exit_val;
4078#endif
4079 if (TCR_PTR(thread->th.th_task_team) == NULL) {
4080 KA_TRACE(10, ("__kmp_wait_to_unref_task_team: T#%d task_team == NULL\n",
4081 __kmp_gtid_from_thread(thread)));
4082 continue;
4083 }
4084#if KMP_OS_WINDOWS
4085 // TODO: GEH - add this check for Linux* OS / OS X* as well?
4086 if (!__kmp_is_thread_alive(thread, &exit_val)) {
4087 thread->th.th_task_team = NULL;
4088 continue;
4089 }
4090#endif
4091
4092 done = FALSE; // Because th_task_team pointer is not NULL for this thread
4093
4094 KA_TRACE(10, ("__kmp_wait_to_unref_task_team: Waiting for T#%d to "
4095 "unreference task_team\n",
4096 __kmp_gtid_from_thread(thread)));
4097
4098 if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) {
4099 void *sleep_loc;
4100 // If the thread is sleeping, awaken it.
4101 if ((sleep_loc = TCR_PTR(CCAST(void *, thread->th.th_sleep_loc))) !=
4102 NULL) {
4103 KA_TRACE(
4104 10,
4105 ("__kmp_wait_to_unref_task_team: T#%d waking up thread T#%d\n",
4106 __kmp_gtid_from_thread(thread), __kmp_gtid_from_thread(thread)));
4107 __kmp_null_resume_wrapper(thr: thread);
4108 }
4109 }
4110 }
4111 if (done) {
4112 break;
4113 }
4114
4115 // If oversubscribed or have waited a bit, yield.
4116 KMP_YIELD_OVERSUB_ELSE_SPIN(spins, time);
4117 }
4118}
4119
4120void __kmp_shift_task_state_stack(kmp_info_t *this_thr, kmp_uint8 value) {
4121 // Shift values from th_task_state_top+1 to task_state_stack_sz
4122 if (this_thr->th.th_task_state_top + 1 >=
4123 this_thr->th.th_task_state_stack_sz) { // increase size
4124 kmp_uint32 new_size = 2 * this_thr->th.th_task_state_stack_sz;
4125 kmp_uint8 *old_stack, *new_stack;
4126 kmp_uint32 i;
4127 new_stack = (kmp_uint8 *)__kmp_allocate(new_size);
4128 for (i = 0; i <= this_thr->th.th_task_state_top; ++i) {
4129 new_stack[i] = this_thr->th.th_task_state_memo_stack[i];
4130 }
4131 // If we need to reallocate do the shift at the same time.
4132 for (; i < this_thr->th.th_task_state_stack_sz; ++i) {
4133 new_stack[i + 1] = this_thr->th.th_task_state_memo_stack[i];
4134 }
4135 for (i = this_thr->th.th_task_state_stack_sz; i < new_size;
4136 ++i) { // zero-init rest of stack
4137 new_stack[i] = 0;
4138 }
4139 old_stack = this_thr->th.th_task_state_memo_stack;
4140 this_thr->th.th_task_state_memo_stack = new_stack;
4141 this_thr->th.th_task_state_stack_sz = new_size;
4142 __kmp_free(old_stack);
4143 } else {
4144 kmp_uint8 *end;
4145 kmp_uint32 i;
4146
4147 end = &this_thr->th
4148 .th_task_state_memo_stack[this_thr->th.th_task_state_stack_sz];
4149
4150 for (i = this_thr->th.th_task_state_stack_sz - 1;
4151 i > this_thr->th.th_task_state_top; i--, end--)
4152 end[0] = end[-1];
4153 }
4154 this_thr->th.th_task_state_memo_stack[this_thr->th.th_task_state_top + 1] =
4155 value;
4156}
4157
4158// __kmp_task_team_setup: Create a task_team for the current team, but use
4159// an already created, unused one if it already exists.
4160void __kmp_task_team_setup(kmp_info_t *this_thr, kmp_team_t *team, int always) {
4161 KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
4162
4163 // If this task_team hasn't been created yet, allocate it. It will be used in
4164 // the region after the next.
4165 // If it exists, it is the current task team and shouldn't be touched yet as
4166 // it may still be in use.
4167 if (team->t.t_task_team[this_thr->th.th_task_state] == NULL &&
4168 (always || team->t.t_nproc > 1)) {
4169 team->t.t_task_team[this_thr->th.th_task_state] =
4170 __kmp_allocate_task_team(thread: this_thr, team);
4171 KA_TRACE(20, ("__kmp_task_team_setup: Primary T#%d created new task_team %p"
4172 " for team %d at parity=%d\n",
4173 __kmp_gtid_from_thread(this_thr),
4174 team->t.t_task_team[this_thr->th.th_task_state], team->t.t_id,
4175 this_thr->th.th_task_state));
4176 }
4177 if (this_thr->th.th_task_state == 1 && always && team->t.t_nproc == 1) {
4178 // fix task state stack to adjust for proxy and helper tasks
4179 KA_TRACE(20, ("__kmp_task_team_setup: Primary T#%d needs to shift stack"
4180 " for team %d at parity=%d\n",
4181 __kmp_gtid_from_thread(this_thr), team->t.t_id,
4182 this_thr->th.th_task_state));
4183 __kmp_shift_task_state_stack(this_thr, value: this_thr->th.th_task_state);
4184 }
4185
4186 // After threads exit the release, they will call sync, and then point to this
4187 // other task_team; make sure it is allocated and properly initialized. As
4188 // threads spin in the barrier release phase, they will continue to use the
4189 // previous task_team struct(above), until they receive the signal to stop
4190 // checking for tasks (they can't safely reference the kmp_team_t struct,
4191 // which could be reallocated by the primary thread). No task teams are formed
4192 // for serialized teams.
4193 if (team->t.t_nproc > 1) {
4194 int other_team = 1 - this_thr->th.th_task_state;
4195 KMP_DEBUG_ASSERT(other_team >= 0 && other_team < 2);
4196 if (team->t.t_task_team[other_team] == NULL) { // setup other team as well
4197 team->t.t_task_team[other_team] =
4198 __kmp_allocate_task_team(thread: this_thr, team);
4199 KA_TRACE(20, ("__kmp_task_team_setup: Primary T#%d created second new "
4200 "task_team %p for team %d at parity=%d\n",
4201 __kmp_gtid_from_thread(this_thr),
4202 team->t.t_task_team[other_team], team->t.t_id, other_team));
4203 } else { // Leave the old task team struct in place for the upcoming region;
4204 // adjust as needed
4205 kmp_task_team_t *task_team = team->t.t_task_team[other_team];
4206 if (!task_team->tt.tt_active ||
4207 team->t.t_nproc != task_team->tt.tt_nproc) {
4208 TCW_4(task_team->tt.tt_nproc, team->t.t_nproc);
4209 TCW_4(task_team->tt.tt_found_tasks, FALSE);
4210 TCW_4(task_team->tt.tt_found_proxy_tasks, FALSE);
4211 TCW_4(task_team->tt.tt_hidden_helper_task_encountered, FALSE);
4212 KMP_ATOMIC_ST_REL(&task_team->tt.tt_unfinished_threads,
4213 team->t.t_nproc);
4214 TCW_4(task_team->tt.tt_active, TRUE);
4215 }
4216 // if team size has changed, the first thread to enable tasking will
4217 // realloc threads_data if necessary
4218 KA_TRACE(20, ("__kmp_task_team_setup: Primary T#%d reset next task_team "
4219 "%p for team %d at parity=%d\n",
4220 __kmp_gtid_from_thread(this_thr),
4221 team->t.t_task_team[other_team], team->t.t_id, other_team));
4222 }
4223 }
4224
4225 // For regular thread, task enabling should be called when the task is going
4226 // to be pushed to a dequeue. However, for the hidden helper thread, we need
4227 // it ahead of time so that some operations can be performed without race
4228 // condition.
4229 if (this_thr == __kmp_hidden_helper_main_thread) {
4230 for (int i = 0; i < 2; ++i) {
4231 kmp_task_team_t *task_team = team->t.t_task_team[i];
4232 if (KMP_TASKING_ENABLED(task_team)) {
4233 continue;
4234 }
4235 __kmp_enable_tasking(task_team, this_thr);
4236 for (int j = 0; j < task_team->tt.tt_nproc; ++j) {
4237 kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[j];
4238 if (thread_data->td.td_deque == NULL) {
4239 __kmp_alloc_task_deque(thread: __kmp_hidden_helper_threads[j], thread_data);
4240 }
4241 }
4242 }
4243 }
4244}
4245
4246// __kmp_task_team_sync: Propagation of task team data from team to threads
4247// which happens just after the release phase of a team barrier. This may be
4248// called by any thread, but only for teams with # threads > 1.
4249void __kmp_task_team_sync(kmp_info_t *this_thr, kmp_team_t *team) {
4250 KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
4251
4252 // Toggle the th_task_state field, to switch which task_team this thread
4253 // refers to
4254 this_thr->th.th_task_state = (kmp_uint8)(1 - this_thr->th.th_task_state);
4255
4256 // It is now safe to propagate the task team pointer from the team struct to
4257 // the current thread.
4258 TCW_PTR(this_thr->th.th_task_team,
4259 team->t.t_task_team[this_thr->th.th_task_state]);
4260 KA_TRACE(20,
4261 ("__kmp_task_team_sync: Thread T#%d task team switched to task_team "
4262 "%p from Team #%d (parity=%d)\n",
4263 __kmp_gtid_from_thread(this_thr), this_thr->th.th_task_team,
4264 team->t.t_id, this_thr->th.th_task_state));
4265}
4266
4267// __kmp_task_team_wait: Primary thread waits for outstanding tasks after the
4268// barrier gather phase. Only called by primary thread if #threads in team > 1
4269// or if proxy tasks were created.
4270//
4271// wait is a flag that defaults to 1 (see kmp.h), but waiting can be turned off
4272// by passing in 0 optionally as the last argument. When wait is zero, primary
4273// thread does not wait for unfinished_threads to reach 0.
4274void __kmp_task_team_wait(
4275 kmp_info_t *this_thr,
4276 kmp_team_t *team USE_ITT_BUILD_ARG(void *itt_sync_obj), int wait) {
4277 kmp_task_team_t *task_team = team->t.t_task_team[this_thr->th.th_task_state];
4278
4279 KMP_DEBUG_ASSERT(__kmp_tasking_mode != tskm_immediate_exec);
4280 KMP_DEBUG_ASSERT(task_team == this_thr->th.th_task_team);
4281
4282 if ((task_team != NULL) && KMP_TASKING_ENABLED(task_team)) {
4283 if (wait) {
4284 KA_TRACE(20, ("__kmp_task_team_wait: Primary T#%d waiting for all tasks "
4285 "(for unfinished_threads to reach 0) on task_team = %p\n",
4286 __kmp_gtid_from_thread(this_thr), task_team));
4287 // Worker threads may have dropped through to release phase, but could
4288 // still be executing tasks. Wait here for tasks to complete. To avoid
4289 // memory contention, only primary thread checks termination condition.
4290 kmp_flag_32<false, false> flag(
4291 RCAST(std::atomic<kmp_uint32> *,
4292 &task_team->tt.tt_unfinished_threads),
4293 0U);
4294 flag.wait(this_thr, TRUE USE_ITT_BUILD_ARG(itt_sync_obj));
4295 }
4296 // Deactivate the old task team, so that the worker threads will stop
4297 // referencing it while spinning.
4298 KA_TRACE(
4299 20,
4300 ("__kmp_task_team_wait: Primary T#%d deactivating task_team %p: "
4301 "setting active to false, setting local and team's pointer to NULL\n",
4302 __kmp_gtid_from_thread(this_thr), task_team));
4303 KMP_DEBUG_ASSERT(task_team->tt.tt_nproc > 1 ||
4304 task_team->tt.tt_found_proxy_tasks == TRUE ||
4305 task_team->tt.tt_hidden_helper_task_encountered == TRUE);
4306 TCW_SYNC_4(task_team->tt.tt_found_proxy_tasks, FALSE);
4307 TCW_SYNC_4(task_team->tt.tt_hidden_helper_task_encountered, FALSE);
4308 KMP_CHECK_UPDATE(task_team->tt.tt_untied_task_encountered, 0);
4309 TCW_SYNC_4(task_team->tt.tt_active, FALSE);
4310 KMP_MB();
4311
4312 TCW_PTR(this_thr->th.th_task_team, NULL);
4313 }
4314}
4315
4316// __kmp_tasking_barrier:
4317// This routine is called only when __kmp_tasking_mode == tskm_extra_barrier.
4318// Internal function to execute all tasks prior to a regular barrier or a join
4319// barrier. It is a full barrier itself, which unfortunately turns regular
4320// barriers into double barriers and join barriers into 1 1/2 barriers.
4321void __kmp_tasking_barrier(kmp_team_t *team, kmp_info_t *thread, int gtid) {
4322 std::atomic<kmp_uint32> *spin = RCAST(
4323 std::atomic<kmp_uint32> *,
4324 &team->t.t_task_team[thread->th.th_task_state]->tt.tt_unfinished_threads);
4325 int flag = FALSE;
4326 KMP_DEBUG_ASSERT(__kmp_tasking_mode == tskm_extra_barrier);
4327
4328#if USE_ITT_BUILD
4329 KMP_FSYNC_SPIN_INIT(spin, NULL);
4330#endif /* USE_ITT_BUILD */
4331 kmp_flag_32<false, false> spin_flag(spin, 0U);
4332 while (!spin_flag.execute_tasks(this_thr: thread, gtid, TRUE,
4333 thread_finished: &flag USE_ITT_BUILD_ARG(NULL), is_constrained: 0)) {
4334#if USE_ITT_BUILD
4335 // TODO: What about itt_sync_obj??
4336 KMP_FSYNC_SPIN_PREPARE(RCAST(void *, spin));
4337#endif /* USE_ITT_BUILD */
4338
4339 if (TCR_4(__kmp_global.g.g_done)) {
4340 if (__kmp_global.g.g_abort)
4341 __kmp_abort_thread();
4342 break;
4343 }
4344 KMP_YIELD(TRUE);
4345 }
4346#if USE_ITT_BUILD
4347 KMP_FSYNC_SPIN_ACQUIRED(RCAST(void *, spin));
4348#endif /* USE_ITT_BUILD */
4349}
4350
4351// __kmp_give_task puts a task into a given thread queue if:
4352// - the queue for that thread was created
4353// - there's space in that queue
4354// Because of this, __kmp_push_task needs to check if there's space after
4355// getting the lock
4356static bool __kmp_give_task(kmp_info_t *thread, kmp_int32 tid, kmp_task_t *task,
4357 kmp_int32 pass) {
4358 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
4359 kmp_task_team_t *task_team = taskdata->td_task_team;
4360
4361 KA_TRACE(20, ("__kmp_give_task: trying to give task %p to thread %d.\n",
4362 taskdata, tid));
4363
4364 // If task_team is NULL something went really bad...
4365 KMP_DEBUG_ASSERT(task_team != NULL);
4366
4367 bool result = false;
4368 kmp_thread_data_t *thread_data = &task_team->tt.tt_threads_data[tid];
4369
4370 if (thread_data->td.td_deque == NULL) {
4371 // There's no queue in this thread, go find another one
4372 // We're guaranteed that at least one thread has a queue
4373 KA_TRACE(30,
4374 ("__kmp_give_task: thread %d has no queue while giving task %p.\n",
4375 tid, taskdata));
4376 return result;
4377 }
4378
4379 if (TCR_4(thread_data->td.td_deque_ntasks) >=
4380 TASK_DEQUE_SIZE(thread_data->td)) {
4381 KA_TRACE(
4382 30,
4383 ("__kmp_give_task: queue is full while giving task %p to thread %d.\n",
4384 taskdata, tid));
4385
4386 // if this deque is bigger than the pass ratio give a chance to another
4387 // thread
4388 if (TASK_DEQUE_SIZE(thread_data->td) / INITIAL_TASK_DEQUE_SIZE >= pass)
4389 return result;
4390
4391 __kmp_acquire_bootstrap_lock(lck: &thread_data->td.td_deque_lock);
4392 if (TCR_4(thread_data->td.td_deque_ntasks) >=
4393 TASK_DEQUE_SIZE(thread_data->td)) {
4394 // expand deque to push the task which is not allowed to execute
4395 __kmp_realloc_task_deque(thread, thread_data);
4396 }
4397
4398 } else {
4399
4400 __kmp_acquire_bootstrap_lock(lck: &thread_data->td.td_deque_lock);
4401
4402 if (TCR_4(thread_data->td.td_deque_ntasks) >=
4403 TASK_DEQUE_SIZE(thread_data->td)) {
4404 KA_TRACE(30, ("__kmp_give_task: queue is full while giving task %p to "
4405 "thread %d.\n",
4406 taskdata, tid));
4407
4408 // if this deque is bigger than the pass ratio give a chance to another
4409 // thread
4410 if (TASK_DEQUE_SIZE(thread_data->td) / INITIAL_TASK_DEQUE_SIZE >= pass)
4411 goto release_and_exit;
4412
4413 __kmp_realloc_task_deque(thread, thread_data);
4414 }
4415 }
4416
4417 // lock is held here, and there is space in the deque
4418
4419 thread_data->td.td_deque[thread_data->td.td_deque_tail] = taskdata;
4420 // Wrap index.
4421 thread_data->td.td_deque_tail =
4422 (thread_data->td.td_deque_tail + 1) & TASK_DEQUE_MASK(thread_data->td);
4423 TCW_4(thread_data->td.td_deque_ntasks,
4424 TCR_4(thread_data->td.td_deque_ntasks) + 1);
4425
4426 result = true;
4427 KA_TRACE(30, ("__kmp_give_task: successfully gave task %p to thread %d.\n",
4428 taskdata, tid));
4429
4430release_and_exit:
4431 __kmp_release_bootstrap_lock(lck: &thread_data->td.td_deque_lock);
4432
4433 return result;
4434}
4435
4436#define PROXY_TASK_FLAG 0x40000000
4437/* The finish of the proxy tasks is divided in two pieces:
4438 - the top half is the one that can be done from a thread outside the team
4439 - the bottom half must be run from a thread within the team
4440
4441 In order to run the bottom half the task gets queued back into one of the
4442 threads of the team. Once the td_incomplete_child_task counter of the parent
4443 is decremented the threads can leave the barriers. So, the bottom half needs
4444 to be queued before the counter is decremented. The top half is therefore
4445 divided in two parts:
4446 - things that can be run before queuing the bottom half
4447 - things that must be run after queuing the bottom half
4448
4449 This creates a second race as the bottom half can free the task before the
4450 second top half is executed. To avoid this we use the
4451 td_incomplete_child_task of the proxy task to synchronize the top and bottom
4452 half. */
4453static void __kmp_first_top_half_finish_proxy(kmp_taskdata_t *taskdata) {
4454 KMP_DEBUG_ASSERT(taskdata->td_flags.tasktype == TASK_EXPLICIT);
4455 KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
4456 KMP_DEBUG_ASSERT(taskdata->td_flags.complete == 0);
4457 KMP_DEBUG_ASSERT(taskdata->td_flags.freed == 0);
4458
4459 taskdata->td_flags.complete = 1; // mark the task as completed
4460#if OMPX_TASKGRAPH
4461 taskdata->td_flags.onced = 1;
4462#endif
4463
4464 if (taskdata->td_taskgroup)
4465 KMP_ATOMIC_DEC(&taskdata->td_taskgroup->count);
4466
4467 // Create an imaginary children for this task so the bottom half cannot
4468 // release the task before we have completed the second top half
4469 KMP_ATOMIC_OR(&taskdata->td_incomplete_child_tasks, PROXY_TASK_FLAG);
4470}
4471
4472static void __kmp_second_top_half_finish_proxy(kmp_taskdata_t *taskdata) {
4473#if KMP_DEBUG
4474 kmp_int32 children = 0;
4475 // Predecrement simulated by "- 1" calculation
4476 children = -1 +
4477#endif
4478 KMP_ATOMIC_DEC(&taskdata->td_parent->td_incomplete_child_tasks);
4479 KMP_DEBUG_ASSERT(children >= 0);
4480
4481 // Remove the imaginary children
4482 KMP_ATOMIC_AND(&taskdata->td_incomplete_child_tasks, ~PROXY_TASK_FLAG);
4483}
4484
4485static void __kmp_bottom_half_finish_proxy(kmp_int32 gtid, kmp_task_t *ptask) {
4486 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
4487 kmp_info_t *thread = __kmp_threads[gtid];
4488
4489 KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
4490 KMP_DEBUG_ASSERT(taskdata->td_flags.complete ==
4491 1); // top half must run before bottom half
4492
4493 // We need to wait to make sure the top half is finished
4494 // Spinning here should be ok as this should happen quickly
4495 while ((KMP_ATOMIC_LD_ACQ(&taskdata->td_incomplete_child_tasks) &
4496 PROXY_TASK_FLAG) > 0)
4497 ;
4498
4499 __kmp_release_deps(gtid, task: taskdata);
4500 __kmp_free_task_and_ancestors(gtid, taskdata, thread);
4501}
4502
4503/*!
4504@ingroup TASKING
4505@param gtid Global Thread ID of encountering thread
4506@param ptask Task which execution is completed
4507
4508Execute the completion of a proxy task from a thread of that is part of the
4509team. Run first and bottom halves directly.
4510*/
4511void __kmpc_proxy_task_completed(kmp_int32 gtid, kmp_task_t *ptask) {
4512 KMP_DEBUG_ASSERT(ptask != NULL);
4513 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
4514 KA_TRACE(
4515 10, ("__kmp_proxy_task_completed(enter): T#%d proxy task %p completing\n",
4516 gtid, taskdata));
4517 __kmp_assert_valid_gtid(gtid);
4518 KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
4519
4520 __kmp_first_top_half_finish_proxy(taskdata);
4521 __kmp_second_top_half_finish_proxy(taskdata);
4522 __kmp_bottom_half_finish_proxy(gtid, ptask);
4523
4524 KA_TRACE(10,
4525 ("__kmp_proxy_task_completed(exit): T#%d proxy task %p completing\n",
4526 gtid, taskdata));
4527}
4528
4529void __kmpc_give_task(kmp_task_t *ptask, kmp_int32 start = 0) {
4530 KMP_DEBUG_ASSERT(ptask != NULL);
4531 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
4532
4533 // Enqueue task to complete bottom half completion from a thread within the
4534 // corresponding team
4535 kmp_team_t *team = taskdata->td_team;
4536 kmp_int32 nthreads = team->t.t_nproc;
4537 kmp_info_t *thread;
4538
4539 // This should be similar to start_k = __kmp_get_random( thread ) % nthreads
4540 // but we cannot use __kmp_get_random here
4541 kmp_int32 start_k = start % nthreads;
4542 kmp_int32 pass = 1;
4543 kmp_int32 k = start_k;
4544
4545 do {
4546 // For now we're just linearly trying to find a thread
4547 thread = team->t.t_threads[k];
4548 k = (k + 1) % nthreads;
4549
4550 // we did a full pass through all the threads
4551 if (k == start_k)
4552 pass = pass << 1;
4553
4554 } while (!__kmp_give_task(thread, tid: k, task: ptask, pass));
4555
4556 if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME && __kmp_wpolicy_passive) {
4557 // awake at least one thread to execute given task
4558 for (int i = 0; i < nthreads; ++i) {
4559 thread = team->t.t_threads[i];
4560 if (thread->th.th_sleep_loc != NULL) {
4561 __kmp_null_resume_wrapper(thr: thread);
4562 break;
4563 }
4564 }
4565 }
4566}
4567
4568/*!
4569@ingroup TASKING
4570@param ptask Task which execution is completed
4571
4572Execute the completion of a proxy task from a thread that could not belong to
4573the team.
4574*/
4575void __kmpc_proxy_task_completed_ooo(kmp_task_t *ptask) {
4576 KMP_DEBUG_ASSERT(ptask != NULL);
4577 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
4578
4579 KA_TRACE(
4580 10,
4581 ("__kmp_proxy_task_completed_ooo(enter): proxy task completing ooo %p\n",
4582 taskdata));
4583
4584 KMP_DEBUG_ASSERT(taskdata->td_flags.proxy == TASK_PROXY);
4585
4586 __kmp_first_top_half_finish_proxy(taskdata);
4587
4588 __kmpc_give_task(ptask);
4589
4590 __kmp_second_top_half_finish_proxy(taskdata);
4591
4592 KA_TRACE(
4593 10,
4594 ("__kmp_proxy_task_completed_ooo(exit): proxy task completing ooo %p\n",
4595 taskdata));
4596}
4597
4598kmp_event_t *__kmpc_task_allow_completion_event(ident_t *loc_ref, int gtid,
4599 kmp_task_t *task) {
4600 kmp_taskdata_t *td = KMP_TASK_TO_TASKDATA(task);
4601 if (td->td_allow_completion_event.type == KMP_EVENT_UNINITIALIZED) {
4602 td->td_allow_completion_event.type = KMP_EVENT_ALLOW_COMPLETION;
4603 td->td_allow_completion_event.ed.task = task;
4604 __kmp_init_tas_lock(lck: &td->td_allow_completion_event.lock);
4605 }
4606 return &td->td_allow_completion_event;
4607}
4608
4609void __kmp_fulfill_event(kmp_event_t *event) {
4610 if (event->type == KMP_EVENT_ALLOW_COMPLETION) {
4611 kmp_task_t *ptask = event->ed.task;
4612 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(ptask);
4613 bool detached = false;
4614 int gtid = __kmp_get_gtid();
4615
4616 // The associated task might have completed or could be completing at this
4617 // point.
4618 // We need to take the lock to avoid races
4619 __kmp_acquire_tas_lock(lck: &event->lock, gtid);
4620 if (taskdata->td_flags.proxy == TASK_PROXY) {
4621 detached = true;
4622 } else {
4623#if OMPT_SUPPORT
4624 // The OMPT event must occur under mutual exclusion,
4625 // otherwise the tool might access ptask after free
4626 if (UNLIKELY(ompt_enabled.enabled))
4627 __ompt_task_finish(task: ptask, NULL, status: ompt_task_early_fulfill);
4628#endif
4629 }
4630 event->type = KMP_EVENT_UNINITIALIZED;
4631 __kmp_release_tas_lock(lck: &event->lock, gtid);
4632
4633 if (detached) {
4634#if OMPT_SUPPORT
4635 // We free ptask afterwards and know the task is finished,
4636 // so locking is not necessary
4637 if (UNLIKELY(ompt_enabled.enabled))
4638 __ompt_task_finish(task: ptask, NULL, status: ompt_task_late_fulfill);
4639#endif
4640 // If the task detached complete the proxy task
4641 if (gtid >= 0) {
4642 kmp_team_t *team = taskdata->td_team;
4643 kmp_info_t *thread = __kmp_get_thread();
4644 if (thread->th.th_team == team) {
4645 __kmpc_proxy_task_completed(gtid, ptask);
4646 return;
4647 }
4648 }
4649
4650 // fallback
4651 __kmpc_proxy_task_completed_ooo(ptask);
4652 }
4653 }
4654}
4655
4656// __kmp_task_dup_alloc: Allocate the taskdata and make a copy of source task
4657// for taskloop
4658//
4659// thread: allocating thread
4660// task_src: pointer to source task to be duplicated
4661// taskloop_recur: used only when dealing with taskgraph,
4662// indicating whether we need to update task->td_task_id
4663// returns: a pointer to the allocated kmp_task_t structure (task).
4664kmp_task_t *__kmp_task_dup_alloc(kmp_info_t *thread, kmp_task_t *task_src
4665#if OMPX_TASKGRAPH
4666 , int taskloop_recur
4667#endif
4668) {
4669 kmp_task_t *task;
4670 kmp_taskdata_t *taskdata;
4671 kmp_taskdata_t *taskdata_src = KMP_TASK_TO_TASKDATA(task_src);
4672 kmp_taskdata_t *parent_task = taskdata_src->td_parent; // same parent task
4673 size_t shareds_offset;
4674 size_t task_size;
4675
4676 KA_TRACE(10, ("__kmp_task_dup_alloc(enter): Th %p, source task %p\n", thread,
4677 task_src));
4678 KMP_DEBUG_ASSERT(taskdata_src->td_flags.proxy ==
4679 TASK_FULL); // it should not be proxy task
4680 KMP_DEBUG_ASSERT(taskdata_src->td_flags.tasktype == TASK_EXPLICIT);
4681 task_size = taskdata_src->td_size_alloc;
4682
4683 // Allocate a kmp_taskdata_t block and a kmp_task_t block.
4684 KA_TRACE(30, ("__kmp_task_dup_alloc: Th %p, malloc size %ld\n", thread,
4685 task_size));
4686#if USE_FAST_MEMORY
4687 taskdata = (kmp_taskdata_t *)__kmp_fast_allocate(thread, task_size);
4688#else
4689 taskdata = (kmp_taskdata_t *)__kmp_thread_malloc(thread, task_size);
4690#endif /* USE_FAST_MEMORY */
4691 KMP_MEMCPY(dest: taskdata, src: taskdata_src, n: task_size);
4692
4693 task = KMP_TASKDATA_TO_TASK(taskdata);
4694
4695 // Initialize new task (only specific fields not affected by memcpy)
4696#if OMPX_TASKGRAPH
4697 if (!taskdata->is_taskgraph || taskloop_recur)
4698 taskdata->td_task_id = KMP_GEN_TASK_ID();
4699 else if (taskdata->is_taskgraph &&
4700 __kmp_tdg_is_recording(taskdata_src->tdg->tdg_status))
4701 taskdata->td_task_id = KMP_ATOMIC_INC(&__kmp_tdg_task_id);
4702#else
4703 taskdata->td_task_id = KMP_GEN_TASK_ID();
4704#endif
4705 if (task->shareds != NULL) { // need setup shareds pointer
4706 shareds_offset = (char *)task_src->shareds - (char *)taskdata_src;
4707 task->shareds = &((char *)taskdata)[shareds_offset];
4708 KMP_DEBUG_ASSERT((((kmp_uintptr_t)task->shareds) & (sizeof(void *) - 1)) ==
4709 0);
4710 }
4711 taskdata->td_alloc_thread = thread;
4712 taskdata->td_parent = parent_task;
4713 // task inherits the taskgroup from the parent task
4714 taskdata->td_taskgroup = parent_task->td_taskgroup;
4715 // tied task needs to initialize the td_last_tied at creation,
4716 // untied one does this when it is scheduled for execution
4717 if (taskdata->td_flags.tiedness == TASK_TIED)
4718 taskdata->td_last_tied = taskdata;
4719
4720 // Only need to keep track of child task counts if team parallel and tasking
4721 // not serialized
4722 if (!(taskdata->td_flags.team_serial || taskdata->td_flags.tasking_ser)) {
4723 KMP_ATOMIC_INC(&parent_task->td_incomplete_child_tasks);
4724 if (parent_task->td_taskgroup)
4725 KMP_ATOMIC_INC(&parent_task->td_taskgroup->count);
4726 // Only need to keep track of allocated child tasks for explicit tasks since
4727 // implicit not deallocated
4728 if (taskdata->td_parent->td_flags.tasktype == TASK_EXPLICIT)
4729 KMP_ATOMIC_INC(&taskdata->td_parent->td_allocated_child_tasks);
4730 }
4731
4732 KA_TRACE(20,
4733 ("__kmp_task_dup_alloc(exit): Th %p, created task %p, parent=%p\n",
4734 thread, taskdata, taskdata->td_parent));
4735#if OMPT_SUPPORT
4736 if (UNLIKELY(ompt_enabled.enabled))
4737 __ompt_task_init(task: taskdata, tid: thread->th.th_info.ds.ds_gtid);
4738#endif
4739 return task;
4740}
4741
4742// Routine optionally generated by the compiler for setting the lastprivate flag
4743// and calling needed constructors for private/firstprivate objects
4744// (used to form taskloop tasks from pattern task)
4745// Parameters: dest task, src task, lastprivate flag.
4746typedef void (*p_task_dup_t)(kmp_task_t *, kmp_task_t *, kmp_int32);
4747
4748KMP_BUILD_ASSERT(sizeof(long) == 4 || sizeof(long) == 8);
4749
4750// class to encapsulate manipulating loop bounds in a taskloop task.
4751// this abstracts away the Intel vs GOMP taskloop interface for setting/getting
4752// the loop bound variables.
4753class kmp_taskloop_bounds_t {
4754 kmp_task_t *task;
4755 const kmp_taskdata_t *taskdata;
4756 size_t lower_offset;
4757 size_t upper_offset;
4758
4759public:
4760 kmp_taskloop_bounds_t(kmp_task_t *_task, kmp_uint64 *lb, kmp_uint64 *ub)
4761 : task(_task), taskdata(KMP_TASK_TO_TASKDATA(task)),
4762 lower_offset((char *)lb - (char *)task),
4763 upper_offset((char *)ub - (char *)task) {
4764 KMP_DEBUG_ASSERT((char *)lb > (char *)_task);
4765 KMP_DEBUG_ASSERT((char *)ub > (char *)_task);
4766 }
4767 kmp_taskloop_bounds_t(kmp_task_t *_task, const kmp_taskloop_bounds_t &bounds)
4768 : task(_task), taskdata(KMP_TASK_TO_TASKDATA(_task)),
4769 lower_offset(bounds.lower_offset), upper_offset(bounds.upper_offset) {}
4770 size_t get_lower_offset() const { return lower_offset; }
4771 size_t get_upper_offset() const { return upper_offset; }
4772 kmp_uint64 get_lb() const {
4773 kmp_int64 retval;
4774#if defined(KMP_GOMP_COMPAT)
4775 // Intel task just returns the lower bound normally
4776 if (!taskdata->td_flags.native) {
4777 retval = *(kmp_int64 *)((char *)task + lower_offset);
4778 } else {
4779 // GOMP task has to take into account the sizeof(long)
4780 if (taskdata->td_size_loop_bounds == 4) {
4781 kmp_int32 *lb = RCAST(kmp_int32 *, task->shareds);
4782 retval = (kmp_int64)*lb;
4783 } else {
4784 kmp_int64 *lb = RCAST(kmp_int64 *, task->shareds);
4785 retval = (kmp_int64)*lb;
4786 }
4787 }
4788#else
4789 (void)taskdata;
4790 retval = *(kmp_int64 *)((char *)task + lower_offset);
4791#endif // defined(KMP_GOMP_COMPAT)
4792 return retval;
4793 }
4794 kmp_uint64 get_ub() const {
4795 kmp_int64 retval;
4796#if defined(KMP_GOMP_COMPAT)
4797 // Intel task just returns the upper bound normally
4798 if (!taskdata->td_flags.native) {
4799 retval = *(kmp_int64 *)((char *)task + upper_offset);
4800 } else {
4801 // GOMP task has to take into account the sizeof(long)
4802 if (taskdata->td_size_loop_bounds == 4) {
4803 kmp_int32 *ub = RCAST(kmp_int32 *, task->shareds) + 1;
4804 retval = (kmp_int64)*ub;
4805 } else {
4806 kmp_int64 *ub = RCAST(kmp_int64 *, task->shareds) + 1;
4807 retval = (kmp_int64)*ub;
4808 }
4809 }
4810#else
4811 retval = *(kmp_int64 *)((char *)task + upper_offset);
4812#endif // defined(KMP_GOMP_COMPAT)
4813 return retval;
4814 }
4815 void set_lb(kmp_uint64 lb) {
4816#if defined(KMP_GOMP_COMPAT)
4817 // Intel task just sets the lower bound normally
4818 if (!taskdata->td_flags.native) {
4819 *(kmp_uint64 *)((char *)task + lower_offset) = lb;
4820 } else {
4821 // GOMP task has to take into account the sizeof(long)
4822 if (taskdata->td_size_loop_bounds == 4) {
4823 kmp_uint32 *lower = RCAST(kmp_uint32 *, task->shareds);
4824 *lower = (kmp_uint32)lb;
4825 } else {
4826 kmp_uint64 *lower = RCAST(kmp_uint64 *, task->shareds);
4827 *lower = (kmp_uint64)lb;
4828 }
4829 }
4830#else
4831 *(kmp_uint64 *)((char *)task + lower_offset) = lb;
4832#endif // defined(KMP_GOMP_COMPAT)
4833 }
4834 void set_ub(kmp_uint64 ub) {
4835#if defined(KMP_GOMP_COMPAT)
4836 // Intel task just sets the upper bound normally
4837 if (!taskdata->td_flags.native) {
4838 *(kmp_uint64 *)((char *)task + upper_offset) = ub;
4839 } else {
4840 // GOMP task has to take into account the sizeof(long)
4841 if (taskdata->td_size_loop_bounds == 4) {
4842 kmp_uint32 *upper = RCAST(kmp_uint32 *, task->shareds) + 1;
4843 *upper = (kmp_uint32)ub;
4844 } else {
4845 kmp_uint64 *upper = RCAST(kmp_uint64 *, task->shareds) + 1;
4846 *upper = (kmp_uint64)ub;
4847 }
4848 }
4849#else
4850 *(kmp_uint64 *)((char *)task + upper_offset) = ub;
4851#endif // defined(KMP_GOMP_COMPAT)
4852 }
4853};
4854
4855// __kmp_taskloop_linear: Start tasks of the taskloop linearly
4856//
4857// loc Source location information
4858// gtid Global thread ID
4859// task Pattern task, exposes the loop iteration range
4860// lb Pointer to loop lower bound in task structure
4861// ub Pointer to loop upper bound in task structure
4862// st Loop stride
4863// ub_glob Global upper bound (used for lastprivate check)
4864// num_tasks Number of tasks to execute
4865// grainsize Number of loop iterations per task
4866// extras Number of chunks with grainsize+1 iterations
4867// last_chunk Reduction of grainsize for last task
4868// tc Iterations count
4869// task_dup Tasks duplication routine
4870// codeptr_ra Return address for OMPT events
4871void __kmp_taskloop_linear(ident_t *loc, int gtid, kmp_task_t *task,
4872 kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st,
4873 kmp_uint64 ub_glob, kmp_uint64 num_tasks,
4874 kmp_uint64 grainsize, kmp_uint64 extras,
4875 kmp_int64 last_chunk, kmp_uint64 tc,
4876#if OMPT_SUPPORT
4877 void *codeptr_ra,
4878#endif
4879 void *task_dup) {
4880 KMP_COUNT_BLOCK(OMP_TASKLOOP);
4881 KMP_TIME_PARTITIONED_BLOCK(OMP_taskloop_scheduling);
4882 p_task_dup_t ptask_dup = (p_task_dup_t)task_dup;
4883 // compiler provides global bounds here
4884 kmp_taskloop_bounds_t task_bounds(task, lb, ub);
4885 kmp_uint64 lower = task_bounds.get_lb();
4886 kmp_uint64 upper = task_bounds.get_ub();
4887 kmp_uint64 i;
4888 kmp_info_t *thread = __kmp_threads[gtid];
4889 kmp_taskdata_t *current_task = thread->th.th_current_task;
4890 kmp_task_t *next_task;
4891 kmp_int32 lastpriv = 0;
4892
4893 KMP_DEBUG_ASSERT(tc == num_tasks * grainsize +
4894 (last_chunk < 0 ? last_chunk : extras));
4895 KMP_DEBUG_ASSERT(num_tasks > extras);
4896 KMP_DEBUG_ASSERT(num_tasks > 0);
4897 KA_TRACE(20, ("__kmp_taskloop_linear: T#%d: %lld tasks, grainsize %lld, "
4898 "extras %lld, last_chunk %lld, i=%lld,%lld(%d)%lld, dup %p\n",
4899 gtid, num_tasks, grainsize, extras, last_chunk, lower, upper,
4900 ub_glob, st, task_dup));
4901
4902 // Launch num_tasks tasks, assign grainsize iterations each task
4903 for (i = 0; i < num_tasks; ++i) {
4904 kmp_uint64 chunk_minus_1;
4905 if (extras == 0) {
4906 chunk_minus_1 = grainsize - 1;
4907 } else {
4908 chunk_minus_1 = grainsize;
4909 --extras; // first extras iterations get bigger chunk (grainsize+1)
4910 }
4911 upper = lower + st * chunk_minus_1;
4912 if (upper > *ub) {
4913 upper = *ub;
4914 }
4915 if (i == num_tasks - 1) {
4916 // schedule the last task, set lastprivate flag if needed
4917 if (st == 1) { // most common case
4918 KMP_DEBUG_ASSERT(upper == *ub);
4919 if (upper == ub_glob)
4920 lastpriv = 1;
4921 } else if (st > 0) { // positive loop stride
4922 KMP_DEBUG_ASSERT((kmp_uint64)st > *ub - upper);
4923 if ((kmp_uint64)st > ub_glob - upper)
4924 lastpriv = 1;
4925 } else { // negative loop stride
4926 KMP_DEBUG_ASSERT(upper + st < *ub);
4927 if (upper - ub_glob < (kmp_uint64)(-st))
4928 lastpriv = 1;
4929 }
4930 }
4931
4932#if OMPX_TASKGRAPH
4933 next_task = __kmp_task_dup_alloc(thread, task, /* taskloop_recur */ 0);
4934#else
4935 next_task = __kmp_task_dup_alloc(thread, task_src: task); // allocate new task
4936#endif
4937
4938 kmp_taskdata_t *next_taskdata = KMP_TASK_TO_TASKDATA(next_task);
4939 kmp_taskloop_bounds_t next_task_bounds =
4940 kmp_taskloop_bounds_t(next_task, task_bounds);
4941
4942 // adjust task-specific bounds
4943 next_task_bounds.set_lb(lower);
4944 if (next_taskdata->td_flags.native) {
4945 next_task_bounds.set_ub(upper + (st > 0 ? 1 : -1));
4946 } else {
4947 next_task_bounds.set_ub(upper);
4948 }
4949 if (ptask_dup != NULL) // set lastprivate flag, construct firstprivates,
4950 // etc.
4951 ptask_dup(next_task, task, lastpriv);
4952 KA_TRACE(40,
4953 ("__kmp_taskloop_linear: T#%d; task #%llu: task %p: lower %lld, "
4954 "upper %lld stride %lld, (offsets %p %p)\n",
4955 gtid, i, next_task, lower, upper, st,
4956 next_task_bounds.get_lower_offset(),
4957 next_task_bounds.get_upper_offset()));
4958#if OMPT_SUPPORT
4959 __kmp_omp_taskloop_task(NULL, gtid, new_task: next_task,
4960 codeptr_ra); // schedule new task
4961#if OMPT_OPTIONAL
4962 if (ompt_enabled.ompt_callback_dispatch) {
4963 OMPT_GET_DISPATCH_CHUNK(next_taskdata->ompt_task_info.dispatch_chunk,
4964 lower, upper, st);
4965 }
4966#endif // OMPT_OPTIONAL
4967#else
4968 __kmp_omp_task(gtid, next_task, true); // schedule new task
4969#endif
4970 lower = upper + st; // adjust lower bound for the next iteration
4971 }
4972 // free the pattern task and exit
4973 __kmp_task_start(gtid, task, current_task); // make internal bookkeeping
4974 // do not execute the pattern task, just do internal bookkeeping
4975 __kmp_task_finish<false>(gtid, task, resumed_task: current_task);
4976}
4977
4978// Structure to keep taskloop parameters for auxiliary task
4979// kept in the shareds of the task structure.
4980typedef struct __taskloop_params {
4981 kmp_task_t *task;
4982 kmp_uint64 *lb;
4983 kmp_uint64 *ub;
4984 void *task_dup;
4985 kmp_int64 st;
4986 kmp_uint64 ub_glob;
4987 kmp_uint64 num_tasks;
4988 kmp_uint64 grainsize;
4989 kmp_uint64 extras;
4990 kmp_int64 last_chunk;
4991 kmp_uint64 tc;
4992 kmp_uint64 num_t_min;
4993#if OMPT_SUPPORT
4994 void *codeptr_ra;
4995#endif
4996} __taskloop_params_t;
4997
4998void __kmp_taskloop_recur(ident_t *, int, kmp_task_t *, kmp_uint64 *,
4999 kmp_uint64 *, kmp_int64, kmp_uint64, kmp_uint64,
5000 kmp_uint64, kmp_uint64, kmp_int64, kmp_uint64,
5001 kmp_uint64,
5002#if OMPT_SUPPORT
5003 void *,
5004#endif
5005 void *);
5006
5007// Execute part of the taskloop submitted as a task.
5008int __kmp_taskloop_task(int gtid, void *ptask) {
5009 __taskloop_params_t *p =
5010 (__taskloop_params_t *)((kmp_task_t *)ptask)->shareds;
5011 kmp_task_t *task = p->task;
5012 kmp_uint64 *lb = p->lb;
5013 kmp_uint64 *ub = p->ub;
5014 void *task_dup = p->task_dup;
5015 // p_task_dup_t ptask_dup = (p_task_dup_t)task_dup;
5016 kmp_int64 st = p->st;
5017 kmp_uint64 ub_glob = p->ub_glob;
5018 kmp_uint64 num_tasks = p->num_tasks;
5019 kmp_uint64 grainsize = p->grainsize;
5020 kmp_uint64 extras = p->extras;
5021 kmp_int64 last_chunk = p->last_chunk;
5022 kmp_uint64 tc = p->tc;
5023 kmp_uint64 num_t_min = p->num_t_min;
5024#if OMPT_SUPPORT
5025 void *codeptr_ra = p->codeptr_ra;
5026#endif
5027#if KMP_DEBUG
5028 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
5029 KMP_DEBUG_ASSERT(task != NULL);
5030 KA_TRACE(20,
5031 ("__kmp_taskloop_task: T#%d, task %p: %lld tasks, grainsize"
5032 " %lld, extras %lld, last_chunk %lld, i=%lld,%lld(%d), dup %p\n",
5033 gtid, taskdata, num_tasks, grainsize, extras, last_chunk, *lb, *ub,
5034 st, task_dup));
5035#endif
5036 KMP_DEBUG_ASSERT(num_tasks * 2 + 1 > num_t_min);
5037 if (num_tasks > num_t_min)
5038 __kmp_taskloop_recur(NULL, gtid, task, lb, ub, st, ub_glob, num_tasks,
5039 grainsize, extras, last_chunk, tc, num_t_min,
5040#if OMPT_SUPPORT
5041 codeptr_ra,
5042#endif
5043 task_dup);
5044 else
5045 __kmp_taskloop_linear(NULL, gtid, task, lb, ub, st, ub_glob, num_tasks,
5046 grainsize, extras, last_chunk, tc,
5047#if OMPT_SUPPORT
5048 codeptr_ra,
5049#endif
5050 task_dup);
5051
5052 KA_TRACE(40, ("__kmp_taskloop_task(exit): T#%d\n", gtid));
5053 return 0;
5054}
5055
5056// Schedule part of the taskloop as a task,
5057// execute the rest of the taskloop.
5058//
5059// loc Source location information
5060// gtid Global thread ID
5061// task Pattern task, exposes the loop iteration range
5062// lb Pointer to loop lower bound in task structure
5063// ub Pointer to loop upper bound in task structure
5064// st Loop stride
5065// ub_glob Global upper bound (used for lastprivate check)
5066// num_tasks Number of tasks to execute
5067// grainsize Number of loop iterations per task
5068// extras Number of chunks with grainsize+1 iterations
5069// last_chunk Reduction of grainsize for last task
5070// tc Iterations count
5071// num_t_min Threshold to launch tasks recursively
5072// task_dup Tasks duplication routine
5073// codeptr_ra Return address for OMPT events
5074void __kmp_taskloop_recur(ident_t *loc, int gtid, kmp_task_t *task,
5075 kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st,
5076 kmp_uint64 ub_glob, kmp_uint64 num_tasks,
5077 kmp_uint64 grainsize, kmp_uint64 extras,
5078 kmp_int64 last_chunk, kmp_uint64 tc,
5079 kmp_uint64 num_t_min,
5080#if OMPT_SUPPORT
5081 void *codeptr_ra,
5082#endif
5083 void *task_dup) {
5084 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
5085 KMP_DEBUG_ASSERT(task != NULL);
5086 KMP_DEBUG_ASSERT(num_tasks > num_t_min);
5087 KA_TRACE(20,
5088 ("__kmp_taskloop_recur: T#%d, task %p: %lld tasks, grainsize"
5089 " %lld, extras %lld, last_chunk %lld, i=%lld,%lld(%d), dup %p\n",
5090 gtid, taskdata, num_tasks, grainsize, extras, last_chunk, *lb, *ub,
5091 st, task_dup));
5092 p_task_dup_t ptask_dup = (p_task_dup_t)task_dup;
5093 kmp_uint64 lower = *lb;
5094 kmp_info_t *thread = __kmp_threads[gtid];
5095 // kmp_taskdata_t *current_task = thread->th.th_current_task;
5096 kmp_task_t *next_task;
5097 size_t lower_offset =
5098 (char *)lb - (char *)task; // remember offset of lb in the task structure
5099 size_t upper_offset =
5100 (char *)ub - (char *)task; // remember offset of ub in the task structure
5101
5102 KMP_DEBUG_ASSERT(tc == num_tasks * grainsize +
5103 (last_chunk < 0 ? last_chunk : extras));
5104 KMP_DEBUG_ASSERT(num_tasks > extras);
5105 KMP_DEBUG_ASSERT(num_tasks > 0);
5106
5107 // split the loop in two halves
5108 kmp_uint64 lb1, ub0, tc0, tc1, ext0, ext1;
5109 kmp_int64 last_chunk0 = 0, last_chunk1 = 0;
5110 kmp_uint64 gr_size0 = grainsize;
5111 kmp_uint64 n_tsk0 = num_tasks >> 1; // num_tasks/2 to execute
5112 kmp_uint64 n_tsk1 = num_tasks - n_tsk0; // to schedule as a task
5113 if (last_chunk < 0) {
5114 ext0 = ext1 = 0;
5115 last_chunk1 = last_chunk;
5116 tc0 = grainsize * n_tsk0;
5117 tc1 = tc - tc0;
5118 } else if (n_tsk0 <= extras) {
5119 gr_size0++; // integrate extras into grainsize
5120 ext0 = 0; // no extra iters in 1st half
5121 ext1 = extras - n_tsk0; // remaining extras
5122 tc0 = gr_size0 * n_tsk0;
5123 tc1 = tc - tc0;
5124 } else { // n_tsk0 > extras
5125 ext1 = 0; // no extra iters in 2nd half
5126 ext0 = extras;
5127 tc1 = grainsize * n_tsk1;
5128 tc0 = tc - tc1;
5129 }
5130 ub0 = lower + st * (tc0 - 1);
5131 lb1 = ub0 + st;
5132
5133 // create pattern task for 2nd half of the loop
5134#if OMPX_TASKGRAPH
5135 next_task = __kmp_task_dup_alloc(thread, task,
5136 /* taskloop_recur */ 1);
5137#else
5138 next_task = __kmp_task_dup_alloc(thread, task_src: task); // duplicate the task
5139#endif
5140 // adjust lower bound (upper bound is not changed) for the 2nd half
5141 *(kmp_uint64 *)((char *)next_task + lower_offset) = lb1;
5142 if (ptask_dup != NULL) // construct firstprivates, etc.
5143 ptask_dup(next_task, task, 0);
5144 *ub = ub0; // adjust upper bound for the 1st half
5145
5146 // create auxiliary task for 2nd half of the loop
5147 // make sure new task has same parent task as the pattern task
5148 kmp_taskdata_t *current_task = thread->th.th_current_task;
5149 thread->th.th_current_task = taskdata->td_parent;
5150 kmp_task_t *new_task =
5151 __kmpc_omp_task_alloc(loc_ref: loc, gtid, flags: 1, sizeof_kmp_task_t: 3 * sizeof(void *),
5152 sizeof_shareds: sizeof(__taskloop_params_t), task_entry: &__kmp_taskloop_task);
5153 // restore current task
5154 thread->th.th_current_task = current_task;
5155 __taskloop_params_t *p = (__taskloop_params_t *)new_task->shareds;
5156 p->task = next_task;
5157 p->lb = (kmp_uint64 *)((char *)next_task + lower_offset);
5158 p->ub = (kmp_uint64 *)((char *)next_task + upper_offset);
5159 p->task_dup = task_dup;
5160 p->st = st;
5161 p->ub_glob = ub_glob;
5162 p->num_tasks = n_tsk1;
5163 p->grainsize = grainsize;
5164 p->extras = ext1;
5165 p->last_chunk = last_chunk1;
5166 p->tc = tc1;
5167 p->num_t_min = num_t_min;
5168#if OMPT_SUPPORT
5169 p->codeptr_ra = codeptr_ra;
5170#endif
5171
5172#if OMPX_TASKGRAPH
5173 kmp_taskdata_t *new_task_data = KMP_TASK_TO_TASKDATA(new_task);
5174 new_task_data->tdg = taskdata->tdg;
5175 new_task_data->is_taskgraph = 0;
5176#endif
5177
5178#if OMPT_SUPPORT
5179 // schedule new task with correct return address for OMPT events
5180 __kmp_omp_taskloop_task(NULL, gtid, new_task, codeptr_ra);
5181#else
5182 __kmp_omp_task(gtid, new_task, true); // schedule new task
5183#endif
5184
5185 // execute the 1st half of current subrange
5186 if (n_tsk0 > num_t_min)
5187 __kmp_taskloop_recur(loc, gtid, task, lb, ub, st, ub_glob, num_tasks: n_tsk0, grainsize: gr_size0,
5188 extras: ext0, last_chunk: last_chunk0, tc: tc0, num_t_min,
5189#if OMPT_SUPPORT
5190 codeptr_ra,
5191#endif
5192 task_dup);
5193 else
5194 __kmp_taskloop_linear(loc, gtid, task, lb, ub, st, ub_glob, num_tasks: n_tsk0,
5195 grainsize: gr_size0, extras: ext0, last_chunk: last_chunk0, tc: tc0,
5196#if OMPT_SUPPORT
5197 codeptr_ra,
5198#endif
5199 task_dup);
5200
5201 KA_TRACE(40, ("__kmp_taskloop_recur(exit): T#%d\n", gtid));
5202}
5203
5204static void __kmp_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int if_val,
5205 kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st,
5206 int nogroup, int sched, kmp_uint64 grainsize,
5207 int modifier, void *task_dup) {
5208 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
5209 KMP_DEBUG_ASSERT(task != NULL);
5210 if (nogroup == 0) {
5211#if OMPT_SUPPORT && OMPT_OPTIONAL
5212 OMPT_STORE_RETURN_ADDRESS(gtid);
5213#endif
5214 __kmpc_taskgroup(loc, gtid);
5215 }
5216
5217#if OMPX_TASKGRAPH
5218 KMP_ATOMIC_DEC(&__kmp_tdg_task_id);
5219#endif
5220 // =========================================================================
5221 // calculate loop parameters
5222 kmp_taskloop_bounds_t task_bounds(task, lb, ub);
5223 kmp_uint64 tc;
5224 // compiler provides global bounds here
5225 kmp_uint64 lower = task_bounds.get_lb();
5226 kmp_uint64 upper = task_bounds.get_ub();
5227 kmp_uint64 ub_glob = upper; // global upper used to calc lastprivate flag
5228 kmp_uint64 num_tasks = 0, extras = 0;
5229 kmp_int64 last_chunk =
5230 0; // reduce grainsize of last task by last_chunk in strict mode
5231 kmp_uint64 num_tasks_min = __kmp_taskloop_min_tasks;
5232 kmp_info_t *thread = __kmp_threads[gtid];
5233 kmp_taskdata_t *current_task = thread->th.th_current_task;
5234
5235 KA_TRACE(20, ("__kmp_taskloop: T#%d, task %p, lb %lld, ub %lld, st %lld, "
5236 "grain %llu(%d, %d), dup %p\n",
5237 gtid, taskdata, lower, upper, st, grainsize, sched, modifier,
5238 task_dup));
5239
5240 // compute trip count
5241 if (st == 1) { // most common case
5242 tc = upper - lower + 1;
5243 } else if (st < 0) {
5244 tc = (lower - upper) / (-st) + 1;
5245 } else { // st > 0
5246 tc = (upper - lower) / st + 1;
5247 }
5248 if (tc == 0) {
5249 KA_TRACE(20, ("__kmp_taskloop(exit): T#%d zero-trip loop\n", gtid));
5250 // free the pattern task and exit
5251 __kmp_task_start(gtid, task, current_task);
5252 // do not execute anything for zero-trip loop
5253 __kmp_task_finish<false>(gtid, task, resumed_task: current_task);
5254 return;
5255 }
5256
5257#if OMPT_SUPPORT && OMPT_OPTIONAL
5258 ompt_team_info_t *team_info = __ompt_get_teaminfo(depth: 0, NULL);
5259 ompt_task_info_t *task_info = __ompt_get_task_info_object(depth: 0);
5260 if (ompt_enabled.ompt_callback_work) {
5261 ompt_callbacks.ompt_callback(ompt_callback_work)(
5262 ompt_work_taskloop, ompt_scope_begin, &(team_info->parallel_data),
5263 &(task_info->task_data), tc, OMPT_GET_RETURN_ADDRESS(0));
5264 }
5265#endif
5266
5267 if (num_tasks_min == 0)
5268 // TODO: can we choose better default heuristic?
5269 num_tasks_min =
5270 KMP_MIN(thread->th.th_team_nproc * 10, INITIAL_TASK_DEQUE_SIZE);
5271
5272 // compute num_tasks/grainsize based on the input provided
5273 switch (sched) {
5274 case 0: // no schedule clause specified, we can choose the default
5275 // let's try to schedule (team_size*10) tasks
5276 grainsize = thread->th.th_team_nproc * 10;
5277 KMP_FALLTHROUGH();
5278 case 2: // num_tasks provided
5279 if (grainsize > tc) {
5280 num_tasks = tc; // too big num_tasks requested, adjust values
5281 grainsize = 1;
5282 extras = 0;
5283 } else {
5284 num_tasks = grainsize;
5285 grainsize = tc / num_tasks;
5286 extras = tc % num_tasks;
5287 }
5288 break;
5289 case 1: // grainsize provided
5290 if (grainsize > tc) {
5291 num_tasks = 1;
5292 grainsize = tc; // too big grainsize requested, adjust values
5293 extras = 0;
5294 } else {
5295 if (modifier) {
5296 num_tasks = (tc + grainsize - 1) / grainsize;
5297 last_chunk = tc - (num_tasks * grainsize);
5298 extras = 0;
5299 } else {
5300 num_tasks = tc / grainsize;
5301 // adjust grainsize for balanced distribution of iterations
5302 grainsize = tc / num_tasks;
5303 extras = tc % num_tasks;
5304 }
5305 }
5306 break;
5307 default:
5308 KMP_ASSERT2(0, "unknown scheduling of taskloop");
5309 }
5310
5311 KMP_DEBUG_ASSERT(tc == num_tasks * grainsize +
5312 (last_chunk < 0 ? last_chunk : extras));
5313 KMP_DEBUG_ASSERT(num_tasks > extras);
5314 KMP_DEBUG_ASSERT(num_tasks > 0);
5315 // =========================================================================
5316
5317 // check if clause value first
5318 // Also require GOMP_taskloop to reduce to linear (taskdata->td_flags.native)
5319 if (if_val == 0) { // if(0) specified, mark task as serial
5320 taskdata->td_flags.task_serial = 1;
5321 taskdata->td_flags.tiedness = TASK_TIED; // AC: serial task cannot be untied
5322 // always start serial tasks linearly
5323 __kmp_taskloop_linear(loc, gtid, task, lb, ub, st, ub_glob, num_tasks,
5324 grainsize, extras, last_chunk, tc,
5325#if OMPT_SUPPORT
5326 OMPT_GET_RETURN_ADDRESS(0),
5327#endif
5328 task_dup);
5329 // !taskdata->td_flags.native => currently force linear spawning of tasks
5330 // for GOMP_taskloop
5331 } else if (num_tasks > num_tasks_min && !taskdata->td_flags.native) {
5332 KA_TRACE(20, ("__kmp_taskloop: T#%d, go recursive: tc %llu, #tasks %llu"
5333 "(%lld), grain %llu, extras %llu, last_chunk %lld\n",
5334 gtid, tc, num_tasks, num_tasks_min, grainsize, extras,
5335 last_chunk));
5336 __kmp_taskloop_recur(loc, gtid, task, lb, ub, st, ub_glob, num_tasks,
5337 grainsize, extras, last_chunk, tc, num_t_min: num_tasks_min,
5338#if OMPT_SUPPORT
5339 OMPT_GET_RETURN_ADDRESS(0),
5340#endif
5341 task_dup);
5342 } else {
5343 KA_TRACE(20, ("__kmp_taskloop: T#%d, go linear: tc %llu, #tasks %llu"
5344 "(%lld), grain %llu, extras %llu, last_chunk %lld\n",
5345 gtid, tc, num_tasks, num_tasks_min, grainsize, extras,
5346 last_chunk));
5347 __kmp_taskloop_linear(loc, gtid, task, lb, ub, st, ub_glob, num_tasks,
5348 grainsize, extras, last_chunk, tc,
5349#if OMPT_SUPPORT
5350 OMPT_GET_RETURN_ADDRESS(0),
5351#endif
5352 task_dup);
5353 }
5354
5355#if OMPT_SUPPORT && OMPT_OPTIONAL
5356 if (ompt_enabled.ompt_callback_work) {
5357 ompt_callbacks.ompt_callback(ompt_callback_work)(
5358 ompt_work_taskloop, ompt_scope_end, &(team_info->parallel_data),
5359 &(task_info->task_data), tc, OMPT_GET_RETURN_ADDRESS(0));
5360 }
5361#endif
5362
5363 if (nogroup == 0) {
5364#if OMPT_SUPPORT && OMPT_OPTIONAL
5365 OMPT_STORE_RETURN_ADDRESS(gtid);
5366#endif
5367 __kmpc_end_taskgroup(loc, gtid);
5368 }
5369 KA_TRACE(20, ("__kmp_taskloop(exit): T#%d\n", gtid));
5370}
5371
5372/*!
5373@ingroup TASKING
5374@param loc Source location information
5375@param gtid Global thread ID
5376@param task Task structure
5377@param if_val Value of the if clause
5378@param lb Pointer to loop lower bound in task structure
5379@param ub Pointer to loop upper bound in task structure
5380@param st Loop stride
5381@param nogroup Flag, 1 if nogroup clause specified, 0 otherwise
5382@param sched Schedule specified 0/1/2 for none/grainsize/num_tasks
5383@param grainsize Schedule value if specified
5384@param task_dup Tasks duplication routine
5385
5386Execute the taskloop construct.
5387*/
5388void __kmpc_taskloop(ident_t *loc, int gtid, kmp_task_t *task, int if_val,
5389 kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st, int nogroup,
5390 int sched, kmp_uint64 grainsize, void *task_dup) {
5391 __kmp_assert_valid_gtid(gtid);
5392 KA_TRACE(20, ("__kmpc_taskloop(enter): T#%d\n", gtid));
5393 __kmp_taskloop(loc, gtid, task, if_val, lb, ub, st, nogroup, sched, grainsize,
5394 modifier: 0, task_dup);
5395 KA_TRACE(20, ("__kmpc_taskloop(exit): T#%d\n", gtid));
5396}
5397
5398/*!
5399@ingroup TASKING
5400@param loc Source location information
5401@param gtid Global thread ID
5402@param task Task structure
5403@param if_val Value of the if clause
5404@param lb Pointer to loop lower bound in task structure
5405@param ub Pointer to loop upper bound in task structure
5406@param st Loop stride
5407@param nogroup Flag, 1 if nogroup clause specified, 0 otherwise
5408@param sched Schedule specified 0/1/2 for none/grainsize/num_tasks
5409@param grainsize Schedule value if specified
5410@param modifier Modifier 'strict' for sched, 1 if present, 0 otherwise
5411@param task_dup Tasks duplication routine
5412
5413Execute the taskloop construct.
5414*/
5415void __kmpc_taskloop_5(ident_t *loc, int gtid, kmp_task_t *task, int if_val,
5416 kmp_uint64 *lb, kmp_uint64 *ub, kmp_int64 st,
5417 int nogroup, int sched, kmp_uint64 grainsize,
5418 int modifier, void *task_dup) {
5419 __kmp_assert_valid_gtid(gtid);
5420 KA_TRACE(20, ("__kmpc_taskloop_5(enter): T#%d\n", gtid));
5421 __kmp_taskloop(loc, gtid, task, if_val, lb, ub, st, nogroup, sched, grainsize,
5422 modifier, task_dup);
5423 KA_TRACE(20, ("__kmpc_taskloop_5(exit): T#%d\n", gtid));
5424}
5425
5426/*!
5427@ingroup TASKING
5428@param gtid Global Thread ID of current thread
5429@return Returns a pointer to the thread's current task async handle. If no task
5430is present or gtid is invalid, returns NULL.
5431
5432Acqurires a pointer to the target async handle from the current task.
5433*/
5434void **__kmpc_omp_get_target_async_handle_ptr(kmp_int32 gtid) {
5435 if (gtid == KMP_GTID_DNE)
5436 return NULL;
5437
5438 kmp_info_t *thread = __kmp_thread_from_gtid(gtid);
5439 kmp_taskdata_t *taskdata = thread->th.th_current_task;
5440
5441 if (!taskdata)
5442 return NULL;
5443
5444 return &taskdata->td_target_data.async_handle;
5445}
5446
5447/*!
5448@ingroup TASKING
5449@param gtid Global Thread ID of current thread
5450@return Returns TRUE if the current task being executed of the given thread has
5451a task team allocated to it. Otherwise, returns FALSE.
5452
5453Checks if the current thread has a task team.
5454*/
5455bool __kmpc_omp_has_task_team(kmp_int32 gtid) {
5456 if (gtid == KMP_GTID_DNE)
5457 return FALSE;
5458
5459 kmp_info_t *thread = __kmp_thread_from_gtid(gtid);
5460 kmp_taskdata_t *taskdata = thread->th.th_current_task;
5461
5462 if (!taskdata)
5463 return FALSE;
5464
5465 return taskdata->td_task_team != NULL;
5466}
5467
5468#if OMPX_TASKGRAPH
5469// __kmp_find_tdg: identify a TDG through its ID
5470// gtid: Global Thread ID
5471// tdg_id: ID of the TDG
5472// returns: If a TDG corresponding to this ID is found and not
5473// its initial state, return the pointer to it, otherwise nullptr
5474static kmp_tdg_info_t *__kmp_find_tdg(kmp_int32 tdg_id) {
5475 kmp_tdg_info_t *res = nullptr;
5476 if (__kmp_max_tdgs == 0)
5477 return res;
5478
5479 if (__kmp_global_tdgs == NULL)
5480 __kmp_global_tdgs = (kmp_tdg_info_t **)__kmp_allocate(
5481 sizeof(kmp_tdg_info_t *) * __kmp_max_tdgs);
5482
5483 if ((__kmp_global_tdgs[tdg_id]) &&
5484 (__kmp_global_tdgs[tdg_id]->tdg_status != KMP_TDG_NONE))
5485 res = __kmp_global_tdgs[tdg_id];
5486 return res;
5487}
5488
5489// __kmp_print_tdg_dot: prints the TDG to a dot file
5490// tdg: ID of the TDG
5491void __kmp_print_tdg_dot(kmp_tdg_info_t *tdg) {
5492 kmp_int32 tdg_id = tdg->tdg_id;
5493 KA_TRACE(10, ("__kmp_print_tdg_dot(enter): T#%d tdg_id=%d \n", gtid, tdg_id));
5494
5495 char file_name[20];
5496 sprintf(file_name, "tdg_%d.dot", tdg_id);
5497 kmp_safe_raii_file_t tdg_file(file_name, "w");
5498
5499 kmp_int32 num_tasks = KMP_ATOMIC_LD_RLX(&tdg->num_tasks);
5500 fprintf(tdg_file,
5501 "digraph TDG {\n"
5502 " compound=true\n"
5503 " subgraph cluster {\n"
5504 " label=TDG_%d\n",
5505 tdg_id);
5506 for (kmp_int32 i = 0; i < num_tasks; i++) {
5507 fprintf(tdg_file, " %d[style=bold]\n", i);
5508 }
5509 fprintf(tdg_file, " }\n");
5510 for (kmp_int32 i = 0; i < num_tasks; i++) {
5511 kmp_int32 nsuccessors = tdg->record_map[i].nsuccessors;
5512 kmp_int32 *successors = tdg->record_map[i].successors;
5513 if (nsuccessors > 0) {
5514 for (kmp_int32 j = 0; j < nsuccessors; j++)
5515 fprintf(tdg_file, " %d -> %d \n", i, successors[j]);
5516 }
5517 }
5518 fprintf(tdg_file, "}");
5519 KA_TRACE(10, ("__kmp_print_tdg_dot(exit): T#%d tdg_id=%d \n", gtid, tdg_id));
5520}
5521
5522// __kmp_start_record: launch the execution of a previous
5523// recorded TDG
5524// gtid: Global Thread ID
5525// tdg: ID of the TDG
5526void __kmp_exec_tdg(kmp_int32 gtid, kmp_tdg_info_t *tdg) {
5527 KMP_DEBUG_ASSERT(tdg->tdg_status == KMP_TDG_READY);
5528 KA_TRACE(10, ("__kmp_exec_tdg(enter): T#%d tdg_id=%d num_roots=%d\n", gtid,
5529 tdg->tdg_id, tdg->num_roots));
5530 kmp_node_info_t *this_record_map = tdg->record_map;
5531 kmp_int32 *this_root_tasks = tdg->root_tasks;
5532 kmp_int32 this_num_roots = tdg->num_roots;
5533 kmp_int32 this_num_tasks = KMP_ATOMIC_LD_RLX(&tdg->num_tasks);
5534
5535 kmp_info_t *thread = __kmp_threads[gtid];
5536 kmp_taskdata_t *parent_task = thread->th.th_current_task;
5537
5538 if (tdg->rec_taskred_data) {
5539 __kmpc_taskred_init(gtid, tdg->rec_num_taskred, tdg->rec_taskred_data);
5540 }
5541
5542 for (kmp_int32 j = 0; j < this_num_tasks; j++) {
5543 kmp_taskdata_t *td = KMP_TASK_TO_TASKDATA(this_record_map[j].task);
5544
5545 td->td_parent = parent_task;
5546 this_record_map[j].parent_task = parent_task;
5547
5548 kmp_taskgroup_t *parent_taskgroup =
5549 this_record_map[j].parent_task->td_taskgroup;
5550
5551 KMP_ATOMIC_ST_RLX(&this_record_map[j].npredecessors_counter,
5552 this_record_map[j].npredecessors);
5553 KMP_ATOMIC_INC(&this_record_map[j].parent_task->td_incomplete_child_tasks);
5554
5555 if (parent_taskgroup) {
5556 KMP_ATOMIC_INC(&parent_taskgroup->count);
5557 // The taskgroup is different so we must update it
5558 td->td_taskgroup = parent_taskgroup;
5559 } else if (td->td_taskgroup != nullptr) {
5560 // If the parent doesnt have a taskgroup, remove it from the task
5561 td->td_taskgroup = nullptr;
5562 }
5563 if (this_record_map[j].parent_task->td_flags.tasktype == TASK_EXPLICIT)
5564 KMP_ATOMIC_INC(&this_record_map[j].parent_task->td_allocated_child_tasks);
5565 }
5566
5567 for (kmp_int32 j = 0; j < this_num_roots; ++j) {
5568 __kmp_omp_task(gtid, this_record_map[this_root_tasks[j]].task, true);
5569 }
5570 KA_TRACE(10, ("__kmp_exec_tdg(exit): T#%d tdg_id=%d num_roots=%d\n", gtid,
5571 tdg->tdg_id, tdg->num_roots));
5572}
5573
5574// __kmp_start_record: set up a TDG structure and turn the
5575// recording flag to true
5576// gtid: Global Thread ID of the encountering thread
5577// input_flags: Flags associated with the TDG
5578// tdg_id: ID of the TDG to record
5579static inline void __kmp_start_record(kmp_int32 gtid,
5580 kmp_taskgraph_flags_t *flags,
5581 kmp_int32 tdg_id) {
5582 kmp_tdg_info_t *tdg =
5583 (kmp_tdg_info_t *)__kmp_allocate(sizeof(kmp_tdg_info_t));
5584 __kmp_global_tdgs[__kmp_curr_tdg_idx] = tdg;
5585 // Initializing the TDG structure
5586 tdg->tdg_id = tdg_id;
5587 tdg->map_size = INIT_MAPSIZE;
5588 tdg->num_roots = -1;
5589 tdg->root_tasks = nullptr;
5590 tdg->tdg_status = KMP_TDG_RECORDING;
5591 tdg->rec_num_taskred = 0;
5592 tdg->rec_taskred_data = nullptr;
5593 KMP_ATOMIC_ST_RLX(&tdg->num_tasks, 0);
5594
5595 // Initializing the list of nodes in this TDG
5596 kmp_node_info_t *this_record_map =
5597 (kmp_node_info_t *)__kmp_allocate(INIT_MAPSIZE * sizeof(kmp_node_info_t));
5598 for (kmp_int32 i = 0; i < INIT_MAPSIZE; i++) {
5599 kmp_int32 *successorsList =
5600 (kmp_int32 *)__kmp_allocate(__kmp_successors_size * sizeof(kmp_int32));
5601 this_record_map[i].task = nullptr;
5602 this_record_map[i].successors = successorsList;
5603 this_record_map[i].nsuccessors = 0;
5604 this_record_map[i].npredecessors = 0;
5605 this_record_map[i].successors_size = __kmp_successors_size;
5606 KMP_ATOMIC_ST_RLX(&this_record_map[i].npredecessors_counter, 0);
5607 }
5608
5609 __kmp_global_tdgs[__kmp_curr_tdg_idx]->record_map = this_record_map;
5610}
5611
5612// __kmpc_start_record_task: Wrapper around __kmp_start_record to mark
5613// the beginning of the record process of a task region
5614// loc_ref: Location of TDG, not used yet
5615// gtid: Global Thread ID of the encountering thread
5616// input_flags: Flags associated with the TDG
5617// tdg_id: ID of the TDG to record, for now, incremental integer
5618// returns: 1 if we record, otherwise, 0
5619kmp_int32 __kmpc_start_record_task(ident_t *loc_ref, kmp_int32 gtid,
5620 kmp_int32 input_flags, kmp_int32 tdg_id) {
5621
5622 kmp_int32 res;
5623 kmp_taskgraph_flags_t *flags = (kmp_taskgraph_flags_t *)&input_flags;
5624 KA_TRACE(10,
5625 ("__kmpc_start_record_task(enter): T#%d loc=%p flags=%d tdg_id=%d\n",
5626 gtid, loc_ref, input_flags, tdg_id));
5627
5628 if (__kmp_max_tdgs == 0) {
5629 KA_TRACE(
5630 10,
5631 ("__kmpc_start_record_task(abandon): T#%d loc=%p flags=%d tdg_id = %d, "
5632 "__kmp_max_tdgs = 0\n",
5633 gtid, loc_ref, input_flags, tdg_id));
5634 return 1;
5635 }
5636
5637 __kmpc_taskgroup(loc_ref, gtid);
5638 if (kmp_tdg_info_t *tdg = __kmp_find_tdg(tdg_id)) {
5639 // TODO: use re_record flag
5640 __kmp_exec_tdg(gtid, tdg);
5641 res = 0;
5642 } else {
5643 __kmp_curr_tdg_idx = tdg_id;
5644 KMP_DEBUG_ASSERT(__kmp_curr_tdg_idx < __kmp_max_tdgs);
5645 __kmp_start_record(gtid, flags, tdg_id);
5646 __kmp_num_tdg++;
5647 res = 1;
5648 }
5649 KA_TRACE(10, ("__kmpc_start_record_task(exit): T#%d TDG %d starts to %s\n",
5650 gtid, tdg_id, res ? "record" : "execute"));
5651 return res;
5652}
5653
5654// __kmp_end_record: set up a TDG after recording it
5655// gtid: Global thread ID
5656// tdg: Pointer to the TDG
5657void __kmp_end_record(kmp_int32 gtid, kmp_tdg_info_t *tdg) {
5658 // Store roots
5659 kmp_node_info_t *this_record_map = tdg->record_map;
5660 kmp_int32 this_num_tasks = KMP_ATOMIC_LD_RLX(&tdg->num_tasks);
5661 kmp_int32 *this_root_tasks =
5662 (kmp_int32 *)__kmp_allocate(this_num_tasks * sizeof(kmp_int32));
5663 kmp_int32 this_map_size = tdg->map_size;
5664 kmp_int32 this_num_roots = 0;
5665 kmp_info_t *thread = __kmp_threads[gtid];
5666
5667 for (kmp_int32 i = 0; i < this_num_tasks; i++) {
5668 if (this_record_map[i].npredecessors == 0) {
5669 this_root_tasks[this_num_roots++] = i;
5670 }
5671 }
5672
5673 // Update with roots info and mapsize
5674 tdg->map_size = this_map_size;
5675 tdg->num_roots = this_num_roots;
5676 tdg->root_tasks = this_root_tasks;
5677 KMP_DEBUG_ASSERT(tdg->tdg_status == KMP_TDG_RECORDING);
5678 tdg->tdg_status = KMP_TDG_READY;
5679
5680 if (thread->th.th_current_task->td_dephash) {
5681 __kmp_dephash_free(thread, thread->th.th_current_task->td_dephash);
5682 thread->th.th_current_task->td_dephash = NULL;
5683 }
5684
5685 // Reset predecessor counter
5686 for (kmp_int32 i = 0; i < this_num_tasks; i++) {
5687 KMP_ATOMIC_ST_RLX(&this_record_map[i].npredecessors_counter,
5688 this_record_map[i].npredecessors);
5689 }
5690 KMP_ATOMIC_ST_RLX(&__kmp_tdg_task_id, 0);
5691
5692 if (__kmp_tdg_dot)
5693 __kmp_print_tdg_dot(tdg);
5694}
5695
5696// __kmpc_end_record_task: wrapper around __kmp_end_record to mark
5697// the end of recording phase
5698//
5699// loc_ref: Source location information
5700// gtid: Global thread ID
5701// input_flags: Flags attached to the graph
5702// tdg_id: ID of the TDG just finished recording
5703void __kmpc_end_record_task(ident_t *loc_ref, kmp_int32 gtid,
5704 kmp_int32 input_flags, kmp_int32 tdg_id) {
5705 kmp_tdg_info_t *tdg = __kmp_find_tdg(tdg_id);
5706
5707 KA_TRACE(10, ("__kmpc_end_record_task(enter): T#%d loc=%p finishes recording"
5708 " tdg=%d with flags=%d\n",
5709 gtid, loc_ref, tdg_id, input_flags));
5710 if (__kmp_max_tdgs) {
5711 // TODO: use input_flags->nowait
5712 __kmpc_end_taskgroup(loc_ref, gtid);
5713 if (__kmp_tdg_is_recording(tdg->tdg_status))
5714 __kmp_end_record(gtid, tdg);
5715 }
5716 KA_TRACE(10, ("__kmpc_end_record_task(exit): T#%d loc=%p finished recording"
5717 " tdg=%d, its status is now READY\n",
5718 gtid, loc_ref, tdg_id));
5719}
5720#endif
5721

source code of openmp/runtime/src/kmp_tasking.cpp