1/*
2 * kmp_taskdeps.h
3 */
4
5//===----------------------------------------------------------------------===//
6//
7// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8// See https://llvm.org/LICENSE.txt for license information.
9// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10//
11//===----------------------------------------------------------------------===//
12
13#ifndef KMP_TASKDEPS_H
14#define KMP_TASKDEPS_H
15
16#include "kmp.h"
17
18#define KMP_ACQUIRE_DEPNODE(gtid, n) __kmp_acquire_lock(&(n)->dn.lock, (gtid))
19#define KMP_RELEASE_DEPNODE(gtid, n) __kmp_release_lock(&(n)->dn.lock, (gtid))
20
21static inline void __kmp_node_deref(kmp_info_t *thread, kmp_depnode_t *node) {
22 if (!node)
23 return;
24
25 kmp_int32 n = KMP_ATOMIC_SUB(&node->dn.nrefs, 2) - 2;
26 KMP_DEBUG_ASSERT(n >= 0);
27 if ((n & ~1) == 0) {
28#if USE_ITT_BUILD && USE_ITT_NOTIFY
29 __itt_sync_destroy(node);
30#endif
31 // These two assertions are somewhat redundant. The first is intended to
32 // detect if we are trying to free a depnode on the stack.
33 KMP_DEBUG_ASSERT((node->dn.nrefs & 1) == 0);
34 KMP_ASSERT(node->dn.nrefs == 0);
35#if USE_FAST_MEMORY
36 __kmp_fast_free(thread, node);
37#else
38 __kmp_thread_free(thread, node);
39#endif
40 }
41}
42
43static inline void __kmp_depnode_list_free(kmp_info_t *thread,
44 kmp_depnode_list *list) {
45 kmp_depnode_list *next;
46
47 for (; list; list = next) {
48 next = list->next;
49
50 __kmp_node_deref(thread, node: list->node);
51#if USE_FAST_MEMORY
52 __kmp_fast_free(thread, list);
53#else
54 __kmp_thread_free(thread, list);
55#endif
56 }
57}
58
59static inline void __kmp_dephash_free_entries(kmp_info_t *thread,
60 kmp_dephash_t *h) {
61 for (size_t i = 0; i < h->size; i++) {
62 if (h->buckets[i]) {
63 kmp_dephash_entry_t *next;
64 for (kmp_dephash_entry_t *entry = h->buckets[i]; entry; entry = next) {
65 next = entry->next_in_bucket;
66 __kmp_depnode_list_free(thread, list: entry->last_set);
67 __kmp_depnode_list_free(thread, list: entry->prev_set);
68 __kmp_node_deref(thread, node: entry->last_out);
69 if (entry->mtx_lock) {
70 __kmp_destroy_lock(lck: entry->mtx_lock);
71 __kmp_free(entry->mtx_lock);
72 }
73#if USE_FAST_MEMORY
74 __kmp_fast_free(thread, entry);
75#else
76 __kmp_thread_free(thread, entry);
77#endif
78 }
79 h->buckets[i] = 0;
80 }
81 }
82 __kmp_node_deref(thread, node: h->last_all);
83 h->last_all = NULL;
84}
85
86static inline void __kmp_dephash_free(kmp_info_t *thread, kmp_dephash_t *h) {
87 __kmp_dephash_free_entries(thread, h);
88#if USE_FAST_MEMORY
89 __kmp_fast_free(thread, h);
90#else
91 __kmp_thread_free(thread, h);
92#endif
93}
94
95extern void __kmpc_give_task(kmp_task_t *ptask, kmp_int32 start);
96
97static inline void __kmp_release_deps(kmp_int32 gtid, kmp_taskdata_t *task) {
98
99#if OMPX_TASKGRAPH
100 if (task->is_taskgraph && !(__kmp_tdg_is_recording(task->tdg->tdg_status))) {
101 kmp_node_info_t *TaskInfo = &(task->tdg->record_map[task->td_tdg_task_id]);
102
103 for (int i = 0; i < TaskInfo->nsuccessors; i++) {
104 kmp_int32 successorNumber = TaskInfo->successors[i];
105 kmp_node_info_t *successor = &(task->tdg->record_map[successorNumber]);
106 kmp_int32 npredecessors = KMP_ATOMIC_DEC(&successor->npredecessors_counter) - 1;
107 if (successor->task != nullptr && npredecessors == 0) {
108 __kmp_omp_task(gtid, successor->task, false);
109 }
110 }
111 return;
112 }
113#endif
114
115 kmp_info_t *thread = __kmp_threads[gtid];
116 kmp_depnode_t *node = task->td_depnode;
117
118 // Check mutexinoutset dependencies, release locks
119 if (UNLIKELY(node && (node->dn.mtx_num_locks < 0))) {
120 // negative num_locks means all locks were acquired
121 node->dn.mtx_num_locks = -node->dn.mtx_num_locks;
122 for (int i = node->dn.mtx_num_locks - 1; i >= 0; --i) {
123 KMP_DEBUG_ASSERT(node->dn.mtx_locks[i] != NULL);
124 __kmp_release_lock(lck: node->dn.mtx_locks[i], gtid);
125 }
126 }
127
128 if (task->td_dephash) {
129 KA_TRACE(
130 40, ("__kmp_release_deps: T#%d freeing dependencies hash of task %p.\n",
131 gtid, task));
132 __kmp_dephash_free(thread, h: task->td_dephash);
133 task->td_dephash = NULL;
134 }
135
136 if (!node)
137 return;
138
139 KA_TRACE(20, ("__kmp_release_deps: T#%d notifying successors of task %p.\n",
140 gtid, task));
141
142 KMP_ACQUIRE_DEPNODE(gtid, node);
143#if OMPX_TASKGRAPH
144 if (!task->is_taskgraph ||
145 (task->is_taskgraph && !__kmp_tdg_is_recording(task->tdg->tdg_status)))
146#endif
147 node->dn.task =
148 NULL; // mark this task as finished, so no new dependencies are generated
149 KMP_RELEASE_DEPNODE(gtid, node);
150
151 kmp_depnode_list_t *next;
152 kmp_taskdata_t *next_taskdata;
153 for (kmp_depnode_list_t *p = node->dn.successors; p; p = next) {
154 kmp_depnode_t *successor = p->node;
155#if USE_ITT_BUILD && USE_ITT_NOTIFY
156 __itt_sync_releasing(successor);
157#endif
158 kmp_int32 npredecessors = KMP_ATOMIC_DEC(&successor->dn.npredecessors) - 1;
159
160 // successor task can be NULL for wait_depends or because deps are still
161 // being processed
162 if (npredecessors == 0) {
163#if USE_ITT_BUILD && USE_ITT_NOTIFY
164 __itt_sync_acquired(successor);
165#endif
166 KMP_MB();
167 if (successor->dn.task) {
168 KA_TRACE(20, ("__kmp_release_deps: T#%d successor %p of %p scheduled "
169 "for execution.\n",
170 gtid, successor->dn.task, task));
171 // If a regular task depending on a hidden helper task, when the
172 // hidden helper task is done, the regular task should be executed by
173 // its encountering team.
174 if (KMP_HIDDEN_HELPER_THREAD(gtid)) {
175 // Hidden helper thread can only execute hidden helper tasks
176 KMP_ASSERT(task->td_flags.hidden_helper);
177 next_taskdata = KMP_TASK_TO_TASKDATA(successor->dn.task);
178 // If the dependent task is a regular task, we need to push to its
179 // encountering thread's queue; otherwise, it can be pushed to its own
180 // queue.
181 if (!next_taskdata->td_flags.hidden_helper) {
182 kmp_int32 encountering_gtid =
183 next_taskdata->td_alloc_thread->th.th_info.ds.ds_gtid;
184 kmp_int32 encountering_tid = __kmp_tid_from_gtid(gtid: encountering_gtid);
185 __kmpc_give_task(ptask: successor->dn.task, start: encountering_tid);
186 } else {
187 __kmp_omp_task(gtid, new_task: successor->dn.task, serialize_immediate: false);
188 }
189 } else {
190 __kmp_omp_task(gtid, new_task: successor->dn.task, serialize_immediate: false);
191 }
192 }
193 }
194
195 next = p->next;
196 __kmp_node_deref(thread, node: p->node);
197#if USE_FAST_MEMORY
198 __kmp_fast_free(thread, p);
199#else
200 __kmp_thread_free(thread, p);
201#endif
202 }
203
204 __kmp_node_deref(thread, node);
205
206 KA_TRACE(
207 20,
208 ("__kmp_release_deps: T#%d all successors of %p notified of completion\n",
209 gtid, task));
210}
211
212#endif // KMP_TASKDEPS_H
213

source code of openmp/runtime/src/kmp_taskdeps.h