1 | /* |
2 | * kmp_taskdeps.cpp |
3 | */ |
4 | |
5 | //===----------------------------------------------------------------------===// |
6 | // |
7 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
8 | // See https://llvm.org/LICENSE.txt for license information. |
9 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | //#define KMP_SUPPORT_GRAPH_OUTPUT 1 |
14 | |
15 | #include "kmp.h" |
16 | #include "kmp_io.h" |
17 | #include "kmp_wait_release.h" |
18 | #include "kmp_taskdeps.h" |
19 | #if OMPT_SUPPORT |
20 | #include "ompt-specific.h" |
21 | #endif |
22 | |
23 | // TODO: Improve memory allocation? keep a list of pre-allocated structures? |
24 | // allocate in blocks? re-use list finished list entries? |
25 | // TODO: don't use atomic ref counters for stack-allocated nodes. |
26 | // TODO: find an alternate to atomic refs for heap-allocated nodes? |
27 | // TODO: Finish graph output support |
28 | // TODO: kmp_lock_t seems a tad to big (and heavy weight) for this. Check other |
29 | // runtime locks |
30 | // TODO: Any ITT support needed? |
31 | |
32 | #ifdef KMP_SUPPORT_GRAPH_OUTPUT |
33 | static std::atomic<kmp_int32> kmp_node_id_seed = 0; |
34 | #endif |
35 | |
36 | static void __kmp_init_node(kmp_depnode_t *node, bool on_stack) { |
37 | node->dn.successors = NULL; |
38 | node->dn.task = NULL; // will point to the right task |
39 | // once dependences have been processed |
40 | for (int i = 0; i < MAX_MTX_DEPS; ++i) |
41 | node->dn.mtx_locks[i] = NULL; |
42 | node->dn.mtx_num_locks = 0; |
43 | __kmp_init_lock(lck: &node->dn.lock); |
44 | // Init creates the first reference. Bit 0 indicates that this node |
45 | // resides on the stack. The refcount is incremented and decremented in |
46 | // steps of two, maintaining use of even numbers for heap nodes and odd |
47 | // numbers for stack nodes. |
48 | KMP_ATOMIC_ST_RLX(&node->dn.nrefs, on_stack ? 3 : 2); |
49 | #ifdef KMP_SUPPORT_GRAPH_OUTPUT |
50 | node->dn.id = KMP_ATOMIC_INC(&kmp_node_id_seed); |
51 | #endif |
52 | #if USE_ITT_BUILD && USE_ITT_NOTIFY |
53 | __itt_sync_create(node, "OMP task dep node" , NULL, 0); |
54 | #endif |
55 | } |
56 | |
57 | static inline kmp_depnode_t *__kmp_node_ref(kmp_depnode_t *node) { |
58 | KMP_ATOMIC_ADD(&node->dn.nrefs, 2); |
59 | return node; |
60 | } |
61 | |
62 | enum { KMP_DEPHASH_OTHER_SIZE = 97, KMP_DEPHASH_MASTER_SIZE = 997 }; |
63 | |
64 | size_t sizes[] = {997, 2003, 4001, 8191, 16001, 32003, 64007, 131071, 270029}; |
65 | const size_t MAX_GEN = 8; |
66 | |
67 | static inline size_t __kmp_dephash_hash(kmp_intptr_t addr, size_t hsize) { |
68 | // TODO alternate to try: set = (((Addr64)(addrUsefulBits * 9.618)) % |
69 | // m_num_sets ); |
70 | return ((addr >> 6) ^ (addr >> 2)) % hsize; |
71 | } |
72 | |
73 | static kmp_dephash_t *__kmp_dephash_extend(kmp_info_t *thread, |
74 | kmp_dephash_t *current_dephash) { |
75 | kmp_dephash_t *h; |
76 | |
77 | size_t gen = current_dephash->generation + 1; |
78 | if (gen >= MAX_GEN) |
79 | return current_dephash; |
80 | size_t new_size = sizes[gen]; |
81 | |
82 | size_t size_to_allocate = |
83 | new_size * sizeof(kmp_dephash_entry_t *) + sizeof(kmp_dephash_t); |
84 | |
85 | #if USE_FAST_MEMORY |
86 | h = (kmp_dephash_t *)__kmp_fast_allocate(thread, size_to_allocate); |
87 | #else |
88 | h = (kmp_dephash_t *)__kmp_thread_malloc(thread, size_to_allocate); |
89 | #endif |
90 | |
91 | h->size = new_size; |
92 | h->nelements = current_dephash->nelements; |
93 | h->buckets = (kmp_dephash_entry **)(h + 1); |
94 | h->generation = gen; |
95 | h->nconflicts = 0; |
96 | h->last_all = current_dephash->last_all; |
97 | |
98 | // make sure buckets are properly initialized |
99 | for (size_t i = 0; i < new_size; i++) { |
100 | h->buckets[i] = NULL; |
101 | } |
102 | |
103 | // insert existing elements in the new table |
104 | for (size_t i = 0; i < current_dephash->size; i++) { |
105 | kmp_dephash_entry_t *next, *entry; |
106 | for (entry = current_dephash->buckets[i]; entry; entry = next) { |
107 | next = entry->next_in_bucket; |
108 | // Compute the new hash using the new size, and insert the entry in |
109 | // the new bucket. |
110 | size_t new_bucket = __kmp_dephash_hash(addr: entry->addr, hsize: h->size); |
111 | entry->next_in_bucket = h->buckets[new_bucket]; |
112 | if (entry->next_in_bucket) { |
113 | h->nconflicts++; |
114 | } |
115 | h->buckets[new_bucket] = entry; |
116 | } |
117 | } |
118 | |
119 | // Free old hash table |
120 | #if USE_FAST_MEMORY |
121 | __kmp_fast_free(thread, current_dephash); |
122 | #else |
123 | __kmp_thread_free(thread, current_dephash); |
124 | #endif |
125 | |
126 | return h; |
127 | } |
128 | |
129 | static kmp_dephash_t *__kmp_dephash_create(kmp_info_t *thread, |
130 | kmp_taskdata_t *current_task) { |
131 | kmp_dephash_t *h; |
132 | |
133 | size_t h_size; |
134 | |
135 | if (current_task->td_flags.tasktype == TASK_IMPLICIT) |
136 | h_size = KMP_DEPHASH_MASTER_SIZE; |
137 | else |
138 | h_size = KMP_DEPHASH_OTHER_SIZE; |
139 | |
140 | size_t size = h_size * sizeof(kmp_dephash_entry_t *) + sizeof(kmp_dephash_t); |
141 | |
142 | #if USE_FAST_MEMORY |
143 | h = (kmp_dephash_t *)__kmp_fast_allocate(thread, size); |
144 | #else |
145 | h = (kmp_dephash_t *)__kmp_thread_malloc(thread, size); |
146 | #endif |
147 | h->size = h_size; |
148 | |
149 | h->generation = 0; |
150 | h->nelements = 0; |
151 | h->nconflicts = 0; |
152 | h->buckets = (kmp_dephash_entry **)(h + 1); |
153 | h->last_all = NULL; |
154 | |
155 | for (size_t i = 0; i < h_size; i++) |
156 | h->buckets[i] = 0; |
157 | |
158 | return h; |
159 | } |
160 | |
161 | static kmp_dephash_entry *__kmp_dephash_find(kmp_info_t *thread, |
162 | kmp_dephash_t **hash, |
163 | kmp_intptr_t addr) { |
164 | kmp_dephash_t *h = *hash; |
165 | if (h->nelements != 0 && h->nconflicts / h->size >= 1) { |
166 | *hash = __kmp_dephash_extend(thread, current_dephash: h); |
167 | h = *hash; |
168 | } |
169 | size_t bucket = __kmp_dephash_hash(addr, hsize: h->size); |
170 | |
171 | kmp_dephash_entry_t *entry; |
172 | for (entry = h->buckets[bucket]; entry; entry = entry->next_in_bucket) |
173 | if (entry->addr == addr) |
174 | break; |
175 | |
176 | if (entry == NULL) { |
177 | // create entry. This is only done by one thread so no locking required |
178 | #if USE_FAST_MEMORY |
179 | entry = (kmp_dephash_entry_t *)__kmp_fast_allocate( |
180 | thread, sizeof(kmp_dephash_entry_t)); |
181 | #else |
182 | entry = (kmp_dephash_entry_t *)__kmp_thread_malloc( |
183 | thread, sizeof(kmp_dephash_entry_t)); |
184 | #endif |
185 | entry->addr = addr; |
186 | if (!h->last_all) // no predecessor task with omp_all_memory dependence |
187 | entry->last_out = NULL; |
188 | else // else link the omp_all_memory depnode to the new entry |
189 | entry->last_out = __kmp_node_ref(node: h->last_all); |
190 | entry->last_set = NULL; |
191 | entry->prev_set = NULL; |
192 | entry->last_flag = 0; |
193 | entry->mtx_lock = NULL; |
194 | entry->next_in_bucket = h->buckets[bucket]; |
195 | h->buckets[bucket] = entry; |
196 | h->nelements++; |
197 | if (entry->next_in_bucket) |
198 | h->nconflicts++; |
199 | } |
200 | return entry; |
201 | } |
202 | |
203 | static kmp_depnode_list_t *__kmp_add_node(kmp_info_t *thread, |
204 | kmp_depnode_list_t *list, |
205 | kmp_depnode_t *node) { |
206 | kmp_depnode_list_t *new_head; |
207 | |
208 | #if USE_FAST_MEMORY |
209 | new_head = (kmp_depnode_list_t *)__kmp_fast_allocate( |
210 | thread, sizeof(kmp_depnode_list_t)); |
211 | #else |
212 | new_head = (kmp_depnode_list_t *)__kmp_thread_malloc( |
213 | thread, sizeof(kmp_depnode_list_t)); |
214 | #endif |
215 | |
216 | new_head->node = __kmp_node_ref(node); |
217 | new_head->next = list; |
218 | |
219 | return new_head; |
220 | } |
221 | |
222 | static inline void __kmp_track_dependence(kmp_int32 gtid, kmp_depnode_t *source, |
223 | kmp_depnode_t *sink, |
224 | kmp_task_t *sink_task) { |
225 | #if OMPX_TASKGRAPH |
226 | kmp_taskdata_t *task_source = KMP_TASK_TO_TASKDATA(source->dn.task); |
227 | kmp_taskdata_t *task_sink = KMP_TASK_TO_TASKDATA(sink_task); |
228 | if (source->dn.task && sink_task) { |
229 | // Not supporting dependency between two tasks that one is within the TDG |
230 | // and the other is not |
231 | KMP_ASSERT(task_source->is_taskgraph == task_sink->is_taskgraph); |
232 | } |
233 | if (task_sink->is_taskgraph && |
234 | __kmp_tdg_is_recording(task_sink->tdg->tdg_status)) { |
235 | kmp_node_info_t *source_info = |
236 | &task_sink->tdg->record_map[task_source->td_tdg_task_id]; |
237 | bool exists = false; |
238 | for (int i = 0; i < source_info->nsuccessors; i++) { |
239 | if (source_info->successors[i] == task_sink->td_tdg_task_id) { |
240 | exists = true; |
241 | break; |
242 | } |
243 | } |
244 | if (!exists) { |
245 | if (source_info->nsuccessors >= source_info->successors_size) { |
246 | kmp_uint old_size = source_info->successors_size; |
247 | source_info->successors_size = 2 * source_info->successors_size; |
248 | kmp_int32 *old_succ_ids = source_info->successors; |
249 | kmp_int32 *new_succ_ids = (kmp_int32 *)__kmp_allocate( |
250 | source_info->successors_size * sizeof(kmp_int32)); |
251 | KMP_MEMCPY(new_succ_ids, old_succ_ids, old_size * sizeof(kmp_int32)); |
252 | source_info->successors = new_succ_ids; |
253 | __kmp_free(old_succ_ids); |
254 | } |
255 | |
256 | source_info->successors[source_info->nsuccessors] = |
257 | task_sink->td_tdg_task_id; |
258 | source_info->nsuccessors++; |
259 | |
260 | kmp_node_info_t *sink_info = |
261 | &(task_sink->tdg->record_map[task_sink->td_tdg_task_id]); |
262 | sink_info->npredecessors++; |
263 | } |
264 | } |
265 | #endif |
266 | #ifdef KMP_SUPPORT_GRAPH_OUTPUT |
267 | kmp_taskdata_t *task_source = KMP_TASK_TO_TASKDATA(source->dn.task); |
268 | // do not use sink->dn.task as that is only filled after the dependences |
269 | // are already processed! |
270 | kmp_taskdata_t *task_sink = KMP_TASK_TO_TASKDATA(sink_task); |
271 | |
272 | __kmp_printf("%d(%s) -> %d(%s)\n" , source->dn.id, |
273 | task_source->td_ident->psource, sink->dn.id, |
274 | task_sink->td_ident->psource); |
275 | #endif |
276 | #if OMPT_SUPPORT && OMPT_OPTIONAL |
277 | /* OMPT tracks dependences between task (a=source, b=sink) in which |
278 | task a blocks the execution of b through the ompt_new_dependence_callback |
279 | */ |
280 | if (ompt_enabled.ompt_callback_task_dependence) { |
281 | kmp_taskdata_t *task_source = KMP_TASK_TO_TASKDATA(source->dn.task); |
282 | ompt_data_t *sink_data; |
283 | if (sink_task) |
284 | sink_data = &(KMP_TASK_TO_TASKDATA(sink_task)->ompt_task_info.task_data); |
285 | else |
286 | sink_data = &__kmp_threads[gtid]->th.ompt_thread_info.task_data; |
287 | |
288 | ompt_callbacks.ompt_callback(ompt_callback_task_dependence)( |
289 | &(task_source->ompt_task_info.task_data), sink_data); |
290 | } |
291 | #endif /* OMPT_SUPPORT && OMPT_OPTIONAL */ |
292 | } |
293 | |
294 | kmp_base_depnode_t *__kmpc_task_get_depnode(kmp_task_t *task) { |
295 | kmp_taskdata_t *td = KMP_TASK_TO_TASKDATA(task); |
296 | return td->td_depnode ? &(td->td_depnode->dn) : NULL; |
297 | } |
298 | |
299 | kmp_depnode_list_t *__kmpc_task_get_successors(kmp_task_t *task) { |
300 | kmp_taskdata_t *td = KMP_TASK_TO_TASKDATA(task); |
301 | return td->td_depnode->dn.successors; |
302 | } |
303 | |
304 | static inline kmp_int32 |
305 | __kmp_depnode_link_successor(kmp_int32 gtid, kmp_info_t *thread, |
306 | kmp_task_t *task, kmp_depnode_t *node, |
307 | kmp_depnode_list_t *plist) { |
308 | if (!plist) |
309 | return 0; |
310 | kmp_int32 npredecessors = 0; |
311 | // link node as successor of list elements |
312 | for (kmp_depnode_list_t *p = plist; p; p = p->next) { |
313 | kmp_depnode_t *dep = p->node; |
314 | #if OMPX_TASKGRAPH |
315 | kmp_tdg_status tdg_status = KMP_TDG_NONE; |
316 | if (task) { |
317 | kmp_taskdata_t *td = KMP_TASK_TO_TASKDATA(task); |
318 | if (td->is_taskgraph) |
319 | tdg_status = KMP_TASK_TO_TASKDATA(task)->tdg->tdg_status; |
320 | if (__kmp_tdg_is_recording(tdg_status)) |
321 | __kmp_track_dependence(gtid, dep, node, task); |
322 | } |
323 | #endif |
324 | if (dep->dn.task) { |
325 | KMP_ACQUIRE_DEPNODE(gtid, dep); |
326 | if (dep->dn.task) { |
327 | if (!dep->dn.successors || dep->dn.successors->node != node) { |
328 | #if OMPX_TASKGRAPH |
329 | if (!(__kmp_tdg_is_recording(tdg_status)) && task) |
330 | #endif |
331 | __kmp_track_dependence(gtid, source: dep, sink: node, sink_task: task); |
332 | dep->dn.successors = __kmp_add_node(thread, list: dep->dn.successors, node); |
333 | KA_TRACE(40, ("__kmp_process_deps: T#%d adding dependence from %p to " |
334 | "%p\n" , |
335 | gtid, KMP_TASK_TO_TASKDATA(dep->dn.task), |
336 | KMP_TASK_TO_TASKDATA(task))); |
337 | npredecessors++; |
338 | } |
339 | } |
340 | KMP_RELEASE_DEPNODE(gtid, dep); |
341 | } |
342 | } |
343 | return npredecessors; |
344 | } |
345 | |
346 | // Add the edge 'sink' -> 'source' in the task dependency graph |
347 | static inline kmp_int32 __kmp_depnode_link_successor(kmp_int32 gtid, |
348 | kmp_info_t *thread, |
349 | kmp_task_t *task, |
350 | kmp_depnode_t *source, |
351 | kmp_depnode_t *sink) { |
352 | if (!sink) |
353 | return 0; |
354 | kmp_int32 npredecessors = 0; |
355 | #if OMPX_TASKGRAPH |
356 | kmp_tdg_status tdg_status = KMP_TDG_NONE; |
357 | kmp_taskdata_t *td = KMP_TASK_TO_TASKDATA(task); |
358 | if (task) { |
359 | if (td->is_taskgraph) |
360 | tdg_status = KMP_TASK_TO_TASKDATA(task)->tdg->tdg_status; |
361 | if (__kmp_tdg_is_recording(tdg_status) && sink->dn.task) |
362 | __kmp_track_dependence(gtid, sink, source, task); |
363 | } |
364 | #endif |
365 | if (sink->dn.task) { |
366 | // synchronously add source to sink' list of successors |
367 | KMP_ACQUIRE_DEPNODE(gtid, sink); |
368 | if (sink->dn.task) { |
369 | if (!sink->dn.successors || sink->dn.successors->node != source) { |
370 | #if OMPX_TASKGRAPH |
371 | if (!(__kmp_tdg_is_recording(tdg_status)) && task) |
372 | #endif |
373 | __kmp_track_dependence(gtid, source: sink, sink: source, sink_task: task); |
374 | sink->dn.successors = __kmp_add_node(thread, list: sink->dn.successors, node: source); |
375 | KA_TRACE(40, ("__kmp_process_deps: T#%d adding dependence from %p to " |
376 | "%p\n" , |
377 | gtid, KMP_TASK_TO_TASKDATA(sink->dn.task), |
378 | KMP_TASK_TO_TASKDATA(task))); |
379 | #if OMPX_TASKGRAPH |
380 | if (__kmp_tdg_is_recording(tdg_status)) { |
381 | kmp_taskdata_t *tdd = KMP_TASK_TO_TASKDATA(sink->dn.task); |
382 | if (tdd->is_taskgraph) { |
383 | if (tdd->td_flags.onced) |
384 | // decrement npredecessors if sink->dn.task belongs to a taskgraph |
385 | // and |
386 | // 1) the task is reset to its initial state (by kmp_free_task) or |
387 | // 2) the task is complete but not yet reset |
388 | npredecessors--; |
389 | } |
390 | } |
391 | #endif |
392 | npredecessors++; |
393 | } |
394 | } |
395 | KMP_RELEASE_DEPNODE(gtid, sink); |
396 | } |
397 | return npredecessors; |
398 | } |
399 | |
400 | static inline kmp_int32 |
401 | __kmp_process_dep_all(kmp_int32 gtid, kmp_depnode_t *node, kmp_dephash_t *h, |
402 | bool dep_barrier, kmp_task_t *task) { |
403 | KA_TRACE(30, ("__kmp_process_dep_all: T#%d processing dep_all, " |
404 | "dep_barrier = %d\n" , |
405 | gtid, dep_barrier)); |
406 | kmp_info_t *thread = __kmp_threads[gtid]; |
407 | kmp_int32 npredecessors = 0; |
408 | |
409 | // process previous omp_all_memory node if any |
410 | npredecessors += |
411 | __kmp_depnode_link_successor(gtid, thread, task, source: node, sink: h->last_all); |
412 | __kmp_node_deref(thread, node: h->last_all); |
413 | if (!dep_barrier) { |
414 | h->last_all = __kmp_node_ref(node); |
415 | } else { |
416 | // if this is a sync point in the serial sequence, then the previous |
417 | // outputs are guaranteed to be completed after the execution of this |
418 | // task so the previous output nodes can be cleared. |
419 | h->last_all = NULL; |
420 | } |
421 | |
422 | // process all regular dependences |
423 | for (size_t i = 0; i < h->size; i++) { |
424 | kmp_dephash_entry_t *info = h->buckets[i]; |
425 | if (!info) // skip empty slots in dephash |
426 | continue; |
427 | for (; info; info = info->next_in_bucket) { |
428 | // for each entry the omp_all_memory works as OUT dependence |
429 | kmp_depnode_t *last_out = info->last_out; |
430 | kmp_depnode_list_t *last_set = info->last_set; |
431 | kmp_depnode_list_t *prev_set = info->prev_set; |
432 | if (last_set) { |
433 | npredecessors += |
434 | __kmp_depnode_link_successor(gtid, thread, task, node, plist: last_set); |
435 | __kmp_depnode_list_free(thread, list: last_set); |
436 | __kmp_depnode_list_free(thread, list: prev_set); |
437 | info->last_set = NULL; |
438 | info->prev_set = NULL; |
439 | info->last_flag = 0; // no sets in this dephash entry |
440 | } else { |
441 | npredecessors += |
442 | __kmp_depnode_link_successor(gtid, thread, task, source: node, sink: last_out); |
443 | } |
444 | __kmp_node_deref(thread, node: last_out); |
445 | if (!dep_barrier) { |
446 | info->last_out = __kmp_node_ref(node); |
447 | } else { |
448 | info->last_out = NULL; |
449 | } |
450 | } |
451 | } |
452 | KA_TRACE(30, ("__kmp_process_dep_all: T#%d found %d predecessors\n" , gtid, |
453 | npredecessors)); |
454 | return npredecessors; |
455 | } |
456 | |
457 | template <bool filter> |
458 | static inline kmp_int32 |
459 | __kmp_process_deps(kmp_int32 gtid, kmp_depnode_t *node, kmp_dephash_t **hash, |
460 | bool dep_barrier, kmp_int32 ndeps, |
461 | kmp_depend_info_t *dep_list, kmp_task_t *task) { |
462 | KA_TRACE(30, ("__kmp_process_deps<%d>: T#%d processing %d dependences : " |
463 | "dep_barrier = %d\n" , |
464 | filter, gtid, ndeps, dep_barrier)); |
465 | |
466 | kmp_info_t *thread = __kmp_threads[gtid]; |
467 | kmp_int32 npredecessors = 0; |
468 | for (kmp_int32 i = 0; i < ndeps; i++) { |
469 | const kmp_depend_info_t *dep = &dep_list[i]; |
470 | |
471 | if (filter && dep->base_addr == 0) |
472 | continue; // skip filtered entries |
473 | |
474 | kmp_dephash_entry_t *info = |
475 | __kmp_dephash_find(thread, hash, addr: dep->base_addr); |
476 | kmp_depnode_t *last_out = info->last_out; |
477 | kmp_depnode_list_t *last_set = info->last_set; |
478 | kmp_depnode_list_t *prev_set = info->prev_set; |
479 | |
480 | if (dep->flags.out) { // out or inout --> clean lists if any |
481 | if (last_set) { |
482 | npredecessors += |
483 | __kmp_depnode_link_successor(gtid, thread, task, node, plist: last_set); |
484 | __kmp_depnode_list_free(thread, list: last_set); |
485 | __kmp_depnode_list_free(thread, list: prev_set); |
486 | info->last_set = NULL; |
487 | info->prev_set = NULL; |
488 | info->last_flag = 0; // no sets in this dephash entry |
489 | } else { |
490 | npredecessors += |
491 | __kmp_depnode_link_successor(gtid, thread, task, source: node, sink: last_out); |
492 | } |
493 | __kmp_node_deref(thread, node: last_out); |
494 | if (!dep_barrier) { |
495 | info->last_out = __kmp_node_ref(node); |
496 | } else { |
497 | // if this is a sync point in the serial sequence, then the previous |
498 | // outputs are guaranteed to be completed after the execution of this |
499 | // task so the previous output nodes can be cleared. |
500 | info->last_out = NULL; |
501 | } |
502 | } else { // either IN or MTX or SET |
503 | if (info->last_flag == 0 || info->last_flag == dep->flag) { |
504 | // last_set either didn't exist or of same dep kind |
505 | // link node as successor of the last_out if any |
506 | npredecessors += |
507 | __kmp_depnode_link_successor(gtid, thread, task, source: node, sink: last_out); |
508 | // link node as successor of all nodes in the prev_set if any |
509 | npredecessors += |
510 | __kmp_depnode_link_successor(gtid, thread, task, node, plist: prev_set); |
511 | if (dep_barrier) { |
512 | // clean last_out and prev_set if any; don't touch last_set |
513 | __kmp_node_deref(thread, node: last_out); |
514 | info->last_out = NULL; |
515 | __kmp_depnode_list_free(thread, list: prev_set); |
516 | info->prev_set = NULL; |
517 | } |
518 | } else { // last_set is of different dep kind, make it prev_set |
519 | // link node as successor of all nodes in the last_set |
520 | npredecessors += |
521 | __kmp_depnode_link_successor(gtid, thread, task, node, plist: last_set); |
522 | // clean last_out if any |
523 | __kmp_node_deref(thread, node: last_out); |
524 | info->last_out = NULL; |
525 | // clean prev_set if any |
526 | __kmp_depnode_list_free(thread, list: prev_set); |
527 | if (!dep_barrier) { |
528 | // move last_set to prev_set, new last_set will be allocated |
529 | info->prev_set = last_set; |
530 | } else { |
531 | info->prev_set = NULL; |
532 | info->last_flag = 0; |
533 | } |
534 | info->last_set = NULL; |
535 | } |
536 | // for dep_barrier last_flag value should remain: |
537 | // 0 if last_set is empty, unchanged otherwise |
538 | if (!dep_barrier) { |
539 | info->last_flag = dep->flag; // store dep kind of the last_set |
540 | info->last_set = __kmp_add_node(thread, list: info->last_set, node); |
541 | } |
542 | // check if we are processing MTX dependency |
543 | if (dep->flag == KMP_DEP_MTX) { |
544 | if (info->mtx_lock == NULL) { |
545 | info->mtx_lock = (kmp_lock_t *)__kmp_allocate(sizeof(kmp_lock_t)); |
546 | __kmp_init_lock(lck: info->mtx_lock); |
547 | } |
548 | KMP_DEBUG_ASSERT(node->dn.mtx_num_locks < MAX_MTX_DEPS); |
549 | kmp_int32 m; |
550 | // Save lock in node's array |
551 | for (m = 0; m < MAX_MTX_DEPS; ++m) { |
552 | // sort pointers in decreasing order to avoid potential livelock |
553 | if (node->dn.mtx_locks[m] < info->mtx_lock) { |
554 | KMP_DEBUG_ASSERT(!node->dn.mtx_locks[node->dn.mtx_num_locks]); |
555 | for (int n = node->dn.mtx_num_locks; n > m; --n) { |
556 | // shift right all lesser non-NULL pointers |
557 | KMP_DEBUG_ASSERT(node->dn.mtx_locks[n - 1] != NULL); |
558 | node->dn.mtx_locks[n] = node->dn.mtx_locks[n - 1]; |
559 | } |
560 | node->dn.mtx_locks[m] = info->mtx_lock; |
561 | break; |
562 | } |
563 | } |
564 | KMP_DEBUG_ASSERT(m < MAX_MTX_DEPS); // must break from loop |
565 | node->dn.mtx_num_locks++; |
566 | } |
567 | } |
568 | } |
569 | KA_TRACE(30, ("__kmp_process_deps<%d>: T#%d found %d predecessors\n" , filter, |
570 | gtid, npredecessors)); |
571 | return npredecessors; |
572 | } |
573 | |
574 | #define NO_DEP_BARRIER (false) |
575 | #define DEP_BARRIER (true) |
576 | |
577 | // returns true if the task has any outstanding dependence |
578 | static bool __kmp_check_deps(kmp_int32 gtid, kmp_depnode_t *node, |
579 | kmp_task_t *task, kmp_dephash_t **hash, |
580 | bool dep_barrier, kmp_int32 ndeps, |
581 | kmp_depend_info_t *dep_list, |
582 | kmp_int32 ndeps_noalias, |
583 | kmp_depend_info_t *noalias_dep_list) { |
584 | int i, n_mtxs = 0, dep_all = 0; |
585 | #if KMP_DEBUG |
586 | kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task); |
587 | #endif |
588 | KA_TRACE(20, ("__kmp_check_deps: T#%d checking dependences for task %p : %d " |
589 | "possibly aliased dependences, %d non-aliased dependences : " |
590 | "dep_barrier=%d .\n" , |
591 | gtid, taskdata, ndeps, ndeps_noalias, dep_barrier)); |
592 | |
593 | // Filter deps in dep_list |
594 | // TODO: Different algorithm for large dep_list ( > 10 ? ) |
595 | for (i = 0; i < ndeps; i++) { |
596 | if (dep_list[i].base_addr != 0 && |
597 | dep_list[i].base_addr != (kmp_intptr_t)KMP_SIZE_T_MAX) { |
598 | KMP_DEBUG_ASSERT( |
599 | dep_list[i].flag == KMP_DEP_IN || dep_list[i].flag == KMP_DEP_OUT || |
600 | dep_list[i].flag == KMP_DEP_INOUT || |
601 | dep_list[i].flag == KMP_DEP_MTX || dep_list[i].flag == KMP_DEP_SET); |
602 | for (int j = i + 1; j < ndeps; j++) { |
603 | if (dep_list[i].base_addr == dep_list[j].base_addr) { |
604 | if (dep_list[i].flag != dep_list[j].flag) { |
605 | // two different dependences on same address work identical to OUT |
606 | dep_list[i].flag = KMP_DEP_OUT; |
607 | } |
608 | dep_list[j].base_addr = 0; // Mark j element as void |
609 | } |
610 | } |
611 | if (dep_list[i].flag == KMP_DEP_MTX) { |
612 | // limit number of mtx deps to MAX_MTX_DEPS per node |
613 | if (n_mtxs < MAX_MTX_DEPS && task != NULL) { |
614 | ++n_mtxs; |
615 | } else { |
616 | dep_list[i].flag = KMP_DEP_OUT; // downgrade mutexinoutset to inout |
617 | } |
618 | } |
619 | } else if (dep_list[i].flag == KMP_DEP_ALL || |
620 | dep_list[i].base_addr == (kmp_intptr_t)KMP_SIZE_T_MAX) { |
621 | // omp_all_memory dependence can be marked by compiler by either |
622 | // (addr=0 && flag=0x80) (flag KMP_DEP_ALL), or (addr=-1). |
623 | // omp_all_memory overrides all other dependences if any |
624 | dep_all = 1; |
625 | break; |
626 | } |
627 | } |
628 | |
629 | // doesn't need to be atomic as no other thread is going to be accessing this |
630 | // node just yet. |
631 | // npredecessors is set -1 to ensure that none of the releasing tasks queues |
632 | // this task before we have finished processing all the dependences |
633 | node->dn.npredecessors = -1; |
634 | |
635 | // used to pack all npredecessors additions into a single atomic operation at |
636 | // the end |
637 | int npredecessors; |
638 | |
639 | if (!dep_all) { // regular dependences |
640 | npredecessors = __kmp_process_deps<true>(gtid, node, hash, dep_barrier, |
641 | ndeps, dep_list, task); |
642 | npredecessors += __kmp_process_deps<false>( |
643 | gtid, node, hash, dep_barrier, ndeps: ndeps_noalias, dep_list: noalias_dep_list, task); |
644 | } else { // omp_all_memory dependence |
645 | npredecessors = __kmp_process_dep_all(gtid, node, h: *hash, dep_barrier, task); |
646 | } |
647 | |
648 | node->dn.task = task; |
649 | KMP_MB(); |
650 | |
651 | // Account for our initial fake value |
652 | npredecessors++; |
653 | |
654 | // Update predecessors and obtain current value to check if there are still |
655 | // any outstanding dependences (some tasks may have finished while we |
656 | // processed the dependences) |
657 | npredecessors = |
658 | node->dn.npredecessors.fetch_add(i: npredecessors) + npredecessors; |
659 | |
660 | KA_TRACE(20, ("__kmp_check_deps: T#%d found %d predecessors for task %p \n" , |
661 | gtid, npredecessors, taskdata)); |
662 | |
663 | // beyond this point the task could be queued (and executed) by a releasing |
664 | // task... |
665 | return npredecessors > 0 ? true : false; |
666 | } |
667 | |
668 | /*! |
669 | @ingroup TASKING |
670 | @param loc_ref location of the original task directive |
671 | @param gtid Global Thread ID of encountering thread |
672 | @param new_task task thunk allocated by __kmp_omp_task_alloc() for the ''new |
673 | task'' |
674 | @param ndeps Number of depend items with possible aliasing |
675 | @param dep_list List of depend items with possible aliasing |
676 | @param ndeps_noalias Number of depend items with no aliasing |
677 | @param noalias_dep_list List of depend items with no aliasing |
678 | |
679 | @return Returns either TASK_CURRENT_NOT_QUEUED if the current task was not |
680 | suspended and queued, or TASK_CURRENT_QUEUED if it was suspended and queued |
681 | |
682 | Schedule a non-thread-switchable task with dependences for execution |
683 | */ |
684 | kmp_int32 __kmpc_omp_task_with_deps(ident_t *loc_ref, kmp_int32 gtid, |
685 | kmp_task_t *new_task, kmp_int32 ndeps, |
686 | kmp_depend_info_t *dep_list, |
687 | kmp_int32 ndeps_noalias, |
688 | kmp_depend_info_t *noalias_dep_list) { |
689 | |
690 | kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task); |
691 | KA_TRACE(10, ("__kmpc_omp_task_with_deps(enter): T#%d loc=%p task=%p\n" , gtid, |
692 | loc_ref, new_taskdata)); |
693 | __kmp_assert_valid_gtid(gtid); |
694 | kmp_info_t *thread = __kmp_threads[gtid]; |
695 | kmp_taskdata_t *current_task = thread->th.th_current_task; |
696 | |
697 | #if OMPX_TASKGRAPH |
698 | // record TDG with deps |
699 | if (new_taskdata->is_taskgraph && |
700 | __kmp_tdg_is_recording(new_taskdata->tdg->tdg_status)) { |
701 | kmp_tdg_info_t *tdg = new_taskdata->tdg; |
702 | // extend record_map if needed |
703 | if (new_taskdata->td_tdg_task_id >= tdg->map_size) { |
704 | __kmp_acquire_bootstrap_lock(&tdg->graph_lock); |
705 | if (new_taskdata->td_tdg_task_id >= tdg->map_size) { |
706 | kmp_uint old_size = tdg->map_size; |
707 | kmp_uint new_size = old_size * 2; |
708 | kmp_node_info_t *old_record = tdg->record_map; |
709 | kmp_node_info_t *new_record = (kmp_node_info_t *)__kmp_allocate( |
710 | new_size * sizeof(kmp_node_info_t)); |
711 | KMP_MEMCPY(new_record, tdg->record_map, |
712 | old_size * sizeof(kmp_node_info_t)); |
713 | tdg->record_map = new_record; |
714 | |
715 | __kmp_free(old_record); |
716 | |
717 | for (kmp_int i = old_size; i < new_size; i++) { |
718 | kmp_int32 *successorsList = (kmp_int32 *)__kmp_allocate( |
719 | __kmp_successors_size * sizeof(kmp_int32)); |
720 | new_record[i].task = nullptr; |
721 | new_record[i].successors = successorsList; |
722 | new_record[i].nsuccessors = 0; |
723 | new_record[i].npredecessors = 0; |
724 | new_record[i].successors_size = __kmp_successors_size; |
725 | KMP_ATOMIC_ST_REL(&new_record[i].npredecessors_counter, 0); |
726 | } |
727 | // update the size at the end, so that we avoid other |
728 | // threads use old_record while map_size is already updated |
729 | tdg->map_size = new_size; |
730 | } |
731 | __kmp_release_bootstrap_lock(&tdg->graph_lock); |
732 | } |
733 | tdg->record_map[new_taskdata->td_tdg_task_id].task = new_task; |
734 | tdg->record_map[new_taskdata->td_tdg_task_id].parent_task = |
735 | new_taskdata->td_parent; |
736 | KMP_ATOMIC_INC(&tdg->num_tasks); |
737 | } |
738 | #endif |
739 | #if OMPT_SUPPORT |
740 | if (ompt_enabled.enabled) { |
741 | if (!current_task->ompt_task_info.frame.enter_frame.ptr) |
742 | current_task->ompt_task_info.frame.enter_frame.ptr = |
743 | OMPT_GET_FRAME_ADDRESS(0); |
744 | if (ompt_enabled.ompt_callback_task_create) { |
745 | ompt_callbacks.ompt_callback(ompt_callback_task_create)( |
746 | &(current_task->ompt_task_info.task_data), |
747 | &(current_task->ompt_task_info.frame), |
748 | &(new_taskdata->ompt_task_info.task_data), |
749 | TASK_TYPE_DETAILS_FORMAT(new_taskdata), 1, |
750 | OMPT_LOAD_OR_GET_RETURN_ADDRESS(gtid)); |
751 | } |
752 | |
753 | new_taskdata->ompt_task_info.frame.enter_frame.ptr = |
754 | OMPT_GET_FRAME_ADDRESS(0); |
755 | } |
756 | |
757 | #if OMPT_OPTIONAL |
758 | /* OMPT grab all dependences if requested by the tool */ |
759 | if (ndeps + ndeps_noalias > 0 && ompt_enabled.ompt_callback_dependences) { |
760 | kmp_int32 i; |
761 | |
762 | int ompt_ndeps = ndeps + ndeps_noalias; |
763 | ompt_dependence_t *ompt_deps = (ompt_dependence_t *)KMP_OMPT_DEPS_ALLOC( |
764 | thread, (ndeps + ndeps_noalias) * sizeof(ompt_dependence_t)); |
765 | |
766 | KMP_ASSERT(ompt_deps != NULL); |
767 | |
768 | for (i = 0; i < ndeps; i++) { |
769 | ompt_deps[i].variable.ptr = (void *)dep_list[i].base_addr; |
770 | if (dep_list[i].base_addr == (kmp_intptr_t)KMP_SIZE_T_MAX) |
771 | ompt_deps[i].dependence_type = ompt_dependence_type_out_all_memory; |
772 | else if (dep_list[i].flags.in && dep_list[i].flags.out) |
773 | ompt_deps[i].dependence_type = ompt_dependence_type_inout; |
774 | else if (dep_list[i].flags.out) |
775 | ompt_deps[i].dependence_type = ompt_dependence_type_out; |
776 | else if (dep_list[i].flags.in) |
777 | ompt_deps[i].dependence_type = ompt_dependence_type_in; |
778 | else if (dep_list[i].flags.mtx) |
779 | ompt_deps[i].dependence_type = ompt_dependence_type_mutexinoutset; |
780 | else if (dep_list[i].flags.set) |
781 | ompt_deps[i].dependence_type = ompt_dependence_type_inoutset; |
782 | else if (dep_list[i].flags.all) |
783 | ompt_deps[i].dependence_type = ompt_dependence_type_out_all_memory; |
784 | } |
785 | for (i = 0; i < ndeps_noalias; i++) { |
786 | ompt_deps[ndeps + i].variable.ptr = (void *)noalias_dep_list[i].base_addr; |
787 | if (noalias_dep_list[i].base_addr == (kmp_intptr_t)KMP_SIZE_T_MAX) |
788 | ompt_deps[ndeps + i].dependence_type = |
789 | ompt_dependence_type_out_all_memory; |
790 | else if (noalias_dep_list[i].flags.in && noalias_dep_list[i].flags.out) |
791 | ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_inout; |
792 | else if (noalias_dep_list[i].flags.out) |
793 | ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_out; |
794 | else if (noalias_dep_list[i].flags.in) |
795 | ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_in; |
796 | else if (noalias_dep_list[i].flags.mtx) |
797 | ompt_deps[ndeps + i].dependence_type = |
798 | ompt_dependence_type_mutexinoutset; |
799 | else if (noalias_dep_list[i].flags.set) |
800 | ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_inoutset; |
801 | else if (noalias_dep_list[i].flags.all) |
802 | ompt_deps[ndeps + i].dependence_type = |
803 | ompt_dependence_type_out_all_memory; |
804 | } |
805 | ompt_callbacks.ompt_callback(ompt_callback_dependences)( |
806 | &(new_taskdata->ompt_task_info.task_data), ompt_deps, ompt_ndeps); |
807 | /* We can now free the allocated memory for the dependences */ |
808 | /* For OMPD we might want to delay the free until end of this function */ |
809 | KMP_OMPT_DEPS_FREE(thread, ompt_deps); |
810 | } |
811 | #endif /* OMPT_OPTIONAL */ |
812 | #endif /* OMPT_SUPPORT */ |
813 | |
814 | bool serial = current_task->td_flags.team_serial || |
815 | current_task->td_flags.tasking_ser || |
816 | current_task->td_flags.final; |
817 | kmp_task_team_t *task_team = thread->th.th_task_team; |
818 | serial = serial && |
819 | !(task_team && (task_team->tt.tt_found_proxy_tasks || |
820 | task_team->tt.tt_hidden_helper_task_encountered)); |
821 | |
822 | if (!serial && (ndeps > 0 || ndeps_noalias > 0)) { |
823 | /* if no dependences have been tracked yet, create the dependence hash */ |
824 | if (current_task->td_dephash == NULL) |
825 | current_task->td_dephash = __kmp_dephash_create(thread, current_task); |
826 | |
827 | #if USE_FAST_MEMORY |
828 | kmp_depnode_t *node = |
829 | (kmp_depnode_t *)__kmp_fast_allocate(thread, sizeof(kmp_depnode_t)); |
830 | #else |
831 | kmp_depnode_t *node = |
832 | (kmp_depnode_t *)__kmp_thread_malloc(thread, sizeof(kmp_depnode_t)); |
833 | #endif |
834 | |
835 | __kmp_init_node(node, /*on_stack=*/false); |
836 | new_taskdata->td_depnode = node; |
837 | |
838 | if (__kmp_check_deps(gtid, node, task: new_task, hash: ¤t_task->td_dephash, |
839 | NO_DEP_BARRIER, ndeps, dep_list, ndeps_noalias, |
840 | noalias_dep_list)) { |
841 | KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d task had blocking " |
842 | "dependences: " |
843 | "loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n" , |
844 | gtid, loc_ref, new_taskdata)); |
845 | #if OMPT_SUPPORT |
846 | if (ompt_enabled.enabled) { |
847 | current_task->ompt_task_info.frame.enter_frame = ompt_data_none; |
848 | } |
849 | #endif |
850 | return TASK_CURRENT_NOT_QUEUED; |
851 | } |
852 | } else { |
853 | KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d ignored dependences " |
854 | "for task (serialized) loc=%p task=%p\n" , |
855 | gtid, loc_ref, new_taskdata)); |
856 | } |
857 | |
858 | KA_TRACE(10, ("__kmpc_omp_task_with_deps(exit): T#%d task had no blocking " |
859 | "dependences : " |
860 | "loc=%p task=%p, transferring to __kmp_omp_task\n" , |
861 | gtid, loc_ref, new_taskdata)); |
862 | |
863 | kmp_int32 ret = __kmp_omp_task(gtid, new_task, serialize_immediate: true); |
864 | #if OMPT_SUPPORT |
865 | if (ompt_enabled.enabled) { |
866 | current_task->ompt_task_info.frame.enter_frame = ompt_data_none; |
867 | } |
868 | #endif |
869 | return ret; |
870 | } |
871 | |
872 | #if OMPT_SUPPORT |
873 | void __ompt_taskwait_dep_finish(kmp_taskdata_t *current_task, |
874 | ompt_data_t *taskwait_task_data) { |
875 | if (ompt_enabled.ompt_callback_task_schedule) { |
876 | ompt_callbacks.ompt_callback(ompt_callback_task_schedule)( |
877 | taskwait_task_data, ompt_taskwait_complete, NULL); |
878 | } |
879 | current_task->ompt_task_info.frame.enter_frame.ptr = NULL; |
880 | *taskwait_task_data = ompt_data_none; |
881 | } |
882 | #endif /* OMPT_SUPPORT */ |
883 | |
884 | /*! |
885 | @ingroup TASKING |
886 | @param loc_ref location of the original task directive |
887 | @param gtid Global Thread ID of encountering thread |
888 | @param ndeps Number of depend items with possible aliasing |
889 | @param dep_list List of depend items with possible aliasing |
890 | @param ndeps_noalias Number of depend items with no aliasing |
891 | @param noalias_dep_list List of depend items with no aliasing |
892 | |
893 | Blocks the current task until all specifies dependences have been fulfilled. |
894 | */ |
895 | void __kmpc_omp_wait_deps(ident_t *loc_ref, kmp_int32 gtid, kmp_int32 ndeps, |
896 | kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias, |
897 | kmp_depend_info_t *noalias_dep_list) { |
898 | __kmpc_omp_taskwait_deps_51(loc_ref, gtid, ndeps, dep_list, ndeps_noalias, |
899 | noalias_dep_list, has_no_wait: false); |
900 | } |
901 | |
902 | /* __kmpc_omp_taskwait_deps_51 : Function for OpenMP 5.1 nowait clause. |
903 | Placeholder for taskwait with nowait clause. |
904 | Earlier code of __kmpc_omp_wait_deps() is now |
905 | in this function. |
906 | */ |
907 | void __kmpc_omp_taskwait_deps_51(ident_t *loc_ref, kmp_int32 gtid, |
908 | kmp_int32 ndeps, kmp_depend_info_t *dep_list, |
909 | kmp_int32 ndeps_noalias, |
910 | kmp_depend_info_t *noalias_dep_list, |
911 | kmp_int32 has_no_wait) { |
912 | KA_TRACE(10, ("__kmpc_omp_taskwait_deps(enter): T#%d loc=%p nowait#%d\n" , |
913 | gtid, loc_ref, has_no_wait)); |
914 | if (ndeps == 0 && ndeps_noalias == 0) { |
915 | KA_TRACE(10, ("__kmpc_omp_taskwait_deps(exit): T#%d has no dependences to " |
916 | "wait upon : loc=%p\n" , |
917 | gtid, loc_ref)); |
918 | return; |
919 | } |
920 | __kmp_assert_valid_gtid(gtid); |
921 | kmp_info_t *thread = __kmp_threads[gtid]; |
922 | kmp_taskdata_t *current_task = thread->th.th_current_task; |
923 | |
924 | #if OMPT_SUPPORT |
925 | // this function represents a taskwait construct with depend clause |
926 | // We signal 4 events: |
927 | // - creation of the taskwait task |
928 | // - dependences of the taskwait task |
929 | // - schedule and finish of the taskwait task |
930 | ompt_data_t *taskwait_task_data = &thread->th.ompt_thread_info.task_data; |
931 | KMP_ASSERT(taskwait_task_data->ptr == NULL); |
932 | if (ompt_enabled.enabled) { |
933 | if (!current_task->ompt_task_info.frame.enter_frame.ptr) |
934 | current_task->ompt_task_info.frame.enter_frame.ptr = |
935 | OMPT_GET_FRAME_ADDRESS(0); |
936 | if (ompt_enabled.ompt_callback_task_create) { |
937 | ompt_callbacks.ompt_callback(ompt_callback_task_create)( |
938 | &(current_task->ompt_task_info.task_data), |
939 | &(current_task->ompt_task_info.frame), taskwait_task_data, |
940 | ompt_task_taskwait | ompt_task_undeferred | ompt_task_mergeable, 1, |
941 | OMPT_LOAD_OR_GET_RETURN_ADDRESS(gtid)); |
942 | } |
943 | } |
944 | |
945 | #if OMPT_OPTIONAL |
946 | /* OMPT grab all dependences if requested by the tool */ |
947 | if (ndeps + ndeps_noalias > 0 && ompt_enabled.ompt_callback_dependences) { |
948 | kmp_int32 i; |
949 | |
950 | int ompt_ndeps = ndeps + ndeps_noalias; |
951 | ompt_dependence_t *ompt_deps = (ompt_dependence_t *)KMP_OMPT_DEPS_ALLOC( |
952 | thread, (ndeps + ndeps_noalias) * sizeof(ompt_dependence_t)); |
953 | |
954 | KMP_ASSERT(ompt_deps != NULL); |
955 | |
956 | for (i = 0; i < ndeps; i++) { |
957 | ompt_deps[i].variable.ptr = (void *)dep_list[i].base_addr; |
958 | if (dep_list[i].flags.in && dep_list[i].flags.out) |
959 | ompt_deps[i].dependence_type = ompt_dependence_type_inout; |
960 | else if (dep_list[i].flags.out) |
961 | ompt_deps[i].dependence_type = ompt_dependence_type_out; |
962 | else if (dep_list[i].flags.in) |
963 | ompt_deps[i].dependence_type = ompt_dependence_type_in; |
964 | else if (dep_list[i].flags.mtx) |
965 | ompt_deps[ndeps + i].dependence_type = |
966 | ompt_dependence_type_mutexinoutset; |
967 | else if (dep_list[i].flags.set) |
968 | ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_inoutset; |
969 | } |
970 | for (i = 0; i < ndeps_noalias; i++) { |
971 | ompt_deps[ndeps + i].variable.ptr = (void *)noalias_dep_list[i].base_addr; |
972 | if (noalias_dep_list[i].flags.in && noalias_dep_list[i].flags.out) |
973 | ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_inout; |
974 | else if (noalias_dep_list[i].flags.out) |
975 | ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_out; |
976 | else if (noalias_dep_list[i].flags.in) |
977 | ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_in; |
978 | else if (noalias_dep_list[i].flags.mtx) |
979 | ompt_deps[ndeps + i].dependence_type = |
980 | ompt_dependence_type_mutexinoutset; |
981 | else if (noalias_dep_list[i].flags.set) |
982 | ompt_deps[ndeps + i].dependence_type = ompt_dependence_type_inoutset; |
983 | } |
984 | ompt_callbacks.ompt_callback(ompt_callback_dependences)( |
985 | taskwait_task_data, ompt_deps, ompt_ndeps); |
986 | /* We can now free the allocated memory for the dependences */ |
987 | /* For OMPD we might want to delay the free until end of this function */ |
988 | KMP_OMPT_DEPS_FREE(thread, ompt_deps); |
989 | ompt_deps = NULL; |
990 | } |
991 | #endif /* OMPT_OPTIONAL */ |
992 | #endif /* OMPT_SUPPORT */ |
993 | |
994 | // We can return immediately as: |
995 | // - dependences are not computed in serial teams (except with proxy tasks) |
996 | // - if the dephash is not yet created it means we have nothing to wait for |
997 | bool ignore = current_task->td_flags.team_serial || |
998 | current_task->td_flags.tasking_ser || |
999 | current_task->td_flags.final; |
1000 | ignore = |
1001 | ignore && thread->th.th_task_team != NULL && |
1002 | thread->th.th_task_team->tt.tt_found_proxy_tasks == FALSE && |
1003 | thread->th.th_task_team->tt.tt_hidden_helper_task_encountered == FALSE; |
1004 | ignore = ignore || current_task->td_dephash == NULL; |
1005 | |
1006 | if (ignore) { |
1007 | KA_TRACE(10, ("__kmpc_omp_taskwait_deps(exit): T#%d has no blocking " |
1008 | "dependences : loc=%p\n" , |
1009 | gtid, loc_ref)); |
1010 | #if OMPT_SUPPORT |
1011 | __ompt_taskwait_dep_finish(current_task, taskwait_task_data); |
1012 | #endif /* OMPT_SUPPORT */ |
1013 | return; |
1014 | } |
1015 | |
1016 | kmp_depnode_t node = {.dn_align: 0}; |
1017 | __kmp_init_node(node: &node, /*on_stack=*/true); |
1018 | |
1019 | if (!__kmp_check_deps(gtid, node: &node, NULL, hash: ¤t_task->td_dephash, |
1020 | DEP_BARRIER, ndeps, dep_list, ndeps_noalias, |
1021 | noalias_dep_list)) { |
1022 | KA_TRACE(10, ("__kmpc_omp_taskwait_deps(exit): T#%d has no blocking " |
1023 | "dependences : loc=%p\n" , |
1024 | gtid, loc_ref)); |
1025 | #if OMPT_SUPPORT |
1026 | __ompt_taskwait_dep_finish(current_task, taskwait_task_data); |
1027 | #endif /* OMPT_SUPPORT */ |
1028 | |
1029 | // There may still be references to this node here, due to task stealing. |
1030 | // Wait for them to be released. |
1031 | kmp_int32 nrefs; |
1032 | while ((nrefs = node.dn.nrefs) > 3) { |
1033 | KMP_DEBUG_ASSERT((nrefs & 1) == 1); |
1034 | KMP_YIELD(TRUE); |
1035 | } |
1036 | KMP_DEBUG_ASSERT(nrefs == 3); |
1037 | |
1038 | return; |
1039 | } |
1040 | |
1041 | int thread_finished = FALSE; |
1042 | kmp_flag_32<false, false> flag( |
1043 | (std::atomic<kmp_uint32> *)&node.dn.npredecessors, 0U); |
1044 | while (node.dn.npredecessors > 0) { |
1045 | flag.execute_tasks(this_thr: thread, gtid, FALSE, |
1046 | thread_finished: &thread_finished USE_ITT_BUILD_ARG(NULL), |
1047 | is_constrained: __kmp_task_stealing_constraint); |
1048 | } |
1049 | |
1050 | // Wait until the last __kmp_release_deps is finished before we free the |
1051 | // current stack frame holding the "node" variable; once its nrefs count |
1052 | // reaches 3 (meaning 1, since bit zero of the refcount indicates a stack |
1053 | // rather than a heap address), we're sure nobody else can try to reference |
1054 | // it again. |
1055 | kmp_int32 nrefs; |
1056 | while ((nrefs = node.dn.nrefs) > 3) { |
1057 | KMP_DEBUG_ASSERT((nrefs & 1) == 1); |
1058 | KMP_YIELD(TRUE); |
1059 | } |
1060 | KMP_DEBUG_ASSERT(nrefs == 3); |
1061 | |
1062 | #if OMPT_SUPPORT |
1063 | __ompt_taskwait_dep_finish(current_task, taskwait_task_data); |
1064 | #endif /* OMPT_SUPPORT */ |
1065 | KA_TRACE(10, ("__kmpc_omp_taskwait_deps(exit): T#%d finished waiting : loc=%p\ |
1066 | \n" , |
1067 | gtid, loc_ref)); |
1068 | } |
1069 | |