1 | /* |
2 | * kmp_taskdeps.h |
3 | */ |
4 | |
5 | //===----------------------------------------------------------------------===// |
6 | // |
7 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
8 | // See https://llvm.org/LICENSE.txt for license information. |
9 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #ifndef KMP_TASKDEPS_H |
14 | #define KMP_TASKDEPS_H |
15 | |
16 | #include "kmp.h" |
17 | |
18 | #define KMP_ACQUIRE_DEPNODE(gtid, n) __kmp_acquire_lock(&(n)->dn.lock, (gtid)) |
19 | #define KMP_RELEASE_DEPNODE(gtid, n) __kmp_release_lock(&(n)->dn.lock, (gtid)) |
20 | |
21 | static inline void __kmp_node_deref(kmp_info_t *thread, kmp_depnode_t *node) { |
22 | if (!node) |
23 | return; |
24 | |
25 | kmp_int32 n = KMP_ATOMIC_DEC(&node->dn.nrefs) - 1; |
26 | KMP_DEBUG_ASSERT(n >= 0); |
27 | if (n == 0) { |
28 | #if USE_ITT_BUILD && USE_ITT_NOTIFY |
29 | __itt_sync_destroy(node); |
30 | #endif |
31 | KMP_ASSERT(node->dn.nrefs == 0); |
32 | #if USE_FAST_MEMORY |
33 | __kmp_fast_free(thread, node); |
34 | #else |
35 | __kmp_thread_free(thread, node); |
36 | #endif |
37 | } |
38 | } |
39 | |
40 | static inline void __kmp_depnode_list_free(kmp_info_t *thread, |
41 | kmp_depnode_list *list) { |
42 | kmp_depnode_list *next; |
43 | |
44 | for (; list; list = next) { |
45 | next = list->next; |
46 | |
47 | __kmp_node_deref(thread, node: list->node); |
48 | #if USE_FAST_MEMORY |
49 | __kmp_fast_free(thread, list); |
50 | #else |
51 | __kmp_thread_free(thread, list); |
52 | #endif |
53 | } |
54 | } |
55 | |
56 | static inline void __kmp_dephash_free_entries(kmp_info_t *thread, |
57 | kmp_dephash_t *h) { |
58 | for (size_t i = 0; i < h->size; i++) { |
59 | if (h->buckets[i]) { |
60 | kmp_dephash_entry_t *next; |
61 | for (kmp_dephash_entry_t *entry = h->buckets[i]; entry; entry = next) { |
62 | next = entry->next_in_bucket; |
63 | __kmp_depnode_list_free(thread, list: entry->last_set); |
64 | __kmp_depnode_list_free(thread, list: entry->prev_set); |
65 | __kmp_node_deref(thread, node: entry->last_out); |
66 | if (entry->mtx_lock) { |
67 | __kmp_destroy_lock(lck: entry->mtx_lock); |
68 | __kmp_free(entry->mtx_lock); |
69 | } |
70 | #if USE_FAST_MEMORY |
71 | __kmp_fast_free(thread, entry); |
72 | #else |
73 | __kmp_thread_free(thread, entry); |
74 | #endif |
75 | } |
76 | h->buckets[i] = 0; |
77 | } |
78 | } |
79 | __kmp_node_deref(thread, node: h->last_all); |
80 | h->last_all = NULL; |
81 | } |
82 | |
83 | static inline void __kmp_dephash_free(kmp_info_t *thread, kmp_dephash_t *h) { |
84 | __kmp_dephash_free_entries(thread, h); |
85 | #if USE_FAST_MEMORY |
86 | __kmp_fast_free(thread, h); |
87 | #else |
88 | __kmp_thread_free(thread, h); |
89 | #endif |
90 | } |
91 | |
92 | extern void __kmpc_give_task(kmp_task_t *ptask, kmp_int32 start); |
93 | |
94 | static inline void __kmp_release_deps(kmp_int32 gtid, kmp_taskdata_t *task) { |
95 | |
96 | #if OMPX_TASKGRAPH |
97 | if (task->is_taskgraph && !(__kmp_tdg_is_recording(task->tdg->tdg_status))) { |
98 | kmp_node_info_t *TaskInfo = &(task->tdg->record_map[task->td_task_id]); |
99 | |
100 | for (int i = 0; i < TaskInfo->nsuccessors; i++) { |
101 | kmp_int32 successorNumber = TaskInfo->successors[i]; |
102 | kmp_node_info_t *successor = &(task->tdg->record_map[successorNumber]); |
103 | kmp_int32 npredecessors = KMP_ATOMIC_DEC(&successor->npredecessors_counter) - 1; |
104 | if (successor->task != nullptr && npredecessors == 0) { |
105 | __kmp_omp_task(gtid, successor->task, false); |
106 | } |
107 | } |
108 | return; |
109 | } |
110 | #endif |
111 | |
112 | kmp_info_t *thread = __kmp_threads[gtid]; |
113 | kmp_depnode_t *node = task->td_depnode; |
114 | |
115 | // Check mutexinoutset dependencies, release locks |
116 | if (UNLIKELY(node && (node->dn.mtx_num_locks < 0))) { |
117 | // negative num_locks means all locks were acquired |
118 | node->dn.mtx_num_locks = -node->dn.mtx_num_locks; |
119 | for (int i = node->dn.mtx_num_locks - 1; i >= 0; --i) { |
120 | KMP_DEBUG_ASSERT(node->dn.mtx_locks[i] != NULL); |
121 | __kmp_release_lock(lck: node->dn.mtx_locks[i], gtid); |
122 | } |
123 | } |
124 | |
125 | if (task->td_dephash) { |
126 | KA_TRACE( |
127 | 40, ("__kmp_release_deps: T#%d freeing dependencies hash of task %p.\n" , |
128 | gtid, task)); |
129 | __kmp_dephash_free(thread, h: task->td_dephash); |
130 | task->td_dephash = NULL; |
131 | } |
132 | |
133 | if (!node) |
134 | return; |
135 | |
136 | KA_TRACE(20, ("__kmp_release_deps: T#%d notifying successors of task %p.\n" , |
137 | gtid, task)); |
138 | |
139 | KMP_ACQUIRE_DEPNODE(gtid, node); |
140 | #if OMPX_TASKGRAPH |
141 | if (!task->is_taskgraph || |
142 | (task->is_taskgraph && !__kmp_tdg_is_recording(task->tdg->tdg_status))) |
143 | #endif |
144 | node->dn.task = |
145 | NULL; // mark this task as finished, so no new dependencies are generated |
146 | KMP_RELEASE_DEPNODE(gtid, node); |
147 | |
148 | kmp_depnode_list_t *next; |
149 | kmp_taskdata_t *next_taskdata; |
150 | for (kmp_depnode_list_t *p = node->dn.successors; p; p = next) { |
151 | kmp_depnode_t *successor = p->node; |
152 | #if USE_ITT_BUILD && USE_ITT_NOTIFY |
153 | __itt_sync_releasing(successor); |
154 | #endif |
155 | kmp_int32 npredecessors = KMP_ATOMIC_DEC(&successor->dn.npredecessors) - 1; |
156 | |
157 | // successor task can be NULL for wait_depends or because deps are still |
158 | // being processed |
159 | if (npredecessors == 0) { |
160 | #if USE_ITT_BUILD && USE_ITT_NOTIFY |
161 | __itt_sync_acquired(successor); |
162 | #endif |
163 | KMP_MB(); |
164 | if (successor->dn.task) { |
165 | KA_TRACE(20, ("__kmp_release_deps: T#%d successor %p of %p scheduled " |
166 | "for execution.\n" , |
167 | gtid, successor->dn.task, task)); |
168 | // If a regular task depending on a hidden helper task, when the |
169 | // hidden helper task is done, the regular task should be executed by |
170 | // its encountering team. |
171 | if (KMP_HIDDEN_HELPER_THREAD(gtid)) { |
172 | // Hidden helper thread can only execute hidden helper tasks |
173 | KMP_ASSERT(task->td_flags.hidden_helper); |
174 | next_taskdata = KMP_TASK_TO_TASKDATA(successor->dn.task); |
175 | // If the dependent task is a regular task, we need to push to its |
176 | // encountering thread's queue; otherwise, it can be pushed to its own |
177 | // queue. |
178 | if (!next_taskdata->td_flags.hidden_helper) { |
179 | kmp_int32 encountering_gtid = |
180 | next_taskdata->td_alloc_thread->th.th_info.ds.ds_gtid; |
181 | kmp_int32 encountering_tid = __kmp_tid_from_gtid(gtid: encountering_gtid); |
182 | __kmpc_give_task(ptask: successor->dn.task, start: encountering_tid); |
183 | } else { |
184 | __kmp_omp_task(gtid, new_task: successor->dn.task, serialize_immediate: false); |
185 | } |
186 | } else { |
187 | __kmp_omp_task(gtid, new_task: successor->dn.task, serialize_immediate: false); |
188 | } |
189 | } |
190 | } |
191 | |
192 | next = p->next; |
193 | __kmp_node_deref(thread, node: p->node); |
194 | #if USE_FAST_MEMORY |
195 | __kmp_fast_free(thread, p); |
196 | #else |
197 | __kmp_thread_free(thread, p); |
198 | #endif |
199 | } |
200 | |
201 | __kmp_node_deref(thread, node); |
202 | |
203 | KA_TRACE( |
204 | 20, |
205 | ("__kmp_release_deps: T#%d all successors of %p notified of completion\n" , |
206 | gtid, task)); |
207 | } |
208 | |
209 | #endif // KMP_TASKDEPS_H |
210 | |