1 | /* |
2 | * kmp_gsupport.cpp |
3 | */ |
4 | |
5 | //===----------------------------------------------------------------------===// |
6 | // |
7 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
8 | // See https://llvm.org/LICENSE.txt for license information. |
9 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "kmp.h" |
14 | #include "kmp_atomic.h" |
15 | #include "kmp_utils.h" |
16 | |
17 | #if OMPT_SUPPORT |
18 | #include "ompt-specific.h" |
19 | #endif |
20 | |
21 | enum { |
22 | KMP_GOMP_TASK_UNTIED_FLAG = 1, |
23 | KMP_GOMP_TASK_FINAL_FLAG = 2, |
24 | KMP_GOMP_TASK_DEPENDS_FLAG = 8 |
25 | }; |
26 | |
27 | enum { |
28 | KMP_GOMP_DEPOBJ_IN = 1, |
29 | KMP_GOMP_DEPOBJ_OUT = 2, |
30 | KMP_GOMP_DEPOBJ_INOUT = 3, |
31 | KMP_GOMP_DEPOBJ_MTXINOUTSET = 4 |
32 | }; |
33 | |
34 | // This class helps convert gomp dependency info into |
35 | // kmp_depend_info_t structures |
36 | class kmp_gomp_depends_info_t { |
37 | void **depend; |
38 | kmp_int32 num_deps; |
39 | size_t num_out, num_mutexinout, num_in, num_depobj; |
40 | size_t offset; |
41 | |
42 | public: |
43 | kmp_gomp_depends_info_t(void **depend) : depend(depend) { |
44 | size_t ndeps = (kmp_intptr_t)depend[0]; |
45 | // GOMP taskdep structure: |
46 | // if depend[0] != 0: |
47 | // depend = [ ndeps | nout | &out | ... | &out | &in | ... | &in ] |
48 | // |
49 | // if depend[0] == 0: |
50 | // depend = [ 0 | ndeps | nout | nmtx | nin | &out | ... | &out | &mtx | |
51 | // ... | &mtx | &in | ... | &in | &depobj | ... | &depobj ] |
52 | if (ndeps) { |
53 | num_out = (kmp_intptr_t)depend[1]; |
54 | num_in = ndeps - num_out; |
55 | num_mutexinout = num_depobj = 0; |
56 | offset = 2; |
57 | } else { |
58 | ndeps = (kmp_intptr_t)depend[1]; |
59 | num_out = (kmp_intptr_t)depend[2]; |
60 | num_mutexinout = (kmp_intptr_t)depend[3]; |
61 | num_in = (kmp_intptr_t)depend[4]; |
62 | num_depobj = ndeps - num_out - num_mutexinout - num_in; |
63 | KMP_ASSERT(num_depobj <= ndeps); |
64 | offset = 5; |
65 | } |
66 | num_deps = static_cast<kmp_int32>(ndeps); |
67 | } |
68 | kmp_int32 get_num_deps() const { return num_deps; } |
69 | kmp_depend_info_t get_kmp_depend(size_t index) const { |
70 | kmp_depend_info_t retval; |
71 | memset(s: &retval, c: '\0', n: sizeof(retval)); |
72 | KMP_ASSERT(index < (size_t)num_deps); |
73 | retval.len = 0; |
74 | // Because inout and out are logically equivalent, |
75 | // use inout and in dependency flags. GOMP does not provide a |
76 | // way to distinguish if user specified out vs. inout. |
77 | if (index < num_out) { |
78 | retval.flags.in = 1; |
79 | retval.flags.out = 1; |
80 | retval.base_addr = (kmp_intptr_t)depend[offset + index]; |
81 | } else if (index >= num_out && index < (num_out + num_mutexinout)) { |
82 | retval.flags.mtx = 1; |
83 | retval.base_addr = (kmp_intptr_t)depend[offset + index]; |
84 | } else if (index >= (num_out + num_mutexinout) && |
85 | index < (num_out + num_mutexinout + num_in)) { |
86 | retval.flags.in = 1; |
87 | retval.base_addr = (kmp_intptr_t)depend[offset + index]; |
88 | } else { |
89 | // depobj is a two element array (size of elements are size of pointer) |
90 | // depobj[0] = base_addr |
91 | // depobj[1] = type (in, out, inout, mutexinoutset, etc.) |
92 | kmp_intptr_t *depobj = (kmp_intptr_t *)depend[offset + index]; |
93 | retval.base_addr = depobj[0]; |
94 | switch (depobj[1]) { |
95 | case KMP_GOMP_DEPOBJ_IN: |
96 | retval.flags.in = 1; |
97 | break; |
98 | case KMP_GOMP_DEPOBJ_OUT: |
99 | retval.flags.out = 1; |
100 | break; |
101 | case KMP_GOMP_DEPOBJ_INOUT: |
102 | retval.flags.in = 1; |
103 | retval.flags.out = 1; |
104 | break; |
105 | case KMP_GOMP_DEPOBJ_MTXINOUTSET: |
106 | retval.flags.mtx = 1; |
107 | break; |
108 | default: |
109 | KMP_FATAL(GompFeatureNotSupported, "Unknown depobj type" ); |
110 | } |
111 | } |
112 | return retval; |
113 | } |
114 | }; |
115 | |
116 | #ifdef __cplusplus |
117 | extern "C" { |
118 | #endif // __cplusplus |
119 | |
120 | #define MKLOC(loc, routine) \ |
121 | static ident_t loc = {0, KMP_IDENT_KMPC, 0, 0, ";unknown;unknown;0;0;;"}; |
122 | |
123 | #include "kmp_ftn_os.h" |
124 | |
125 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_BARRIER)(void) { |
126 | int gtid = __kmp_entry_gtid(); |
127 | MKLOC(loc, "GOMP_barrier" ); |
128 | KA_TRACE(20, ("GOMP_barrier: T#%d\n" , gtid)); |
129 | #if OMPT_SUPPORT && OMPT_OPTIONAL |
130 | ompt_frame_t *ompt_frame; |
131 | if (ompt_enabled.enabled) { |
132 | __ompt_get_task_info_internal(ancestor_level: 0, NULL, NULL, task_frame: &ompt_frame, NULL, NULL); |
133 | ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); |
134 | } |
135 | OMPT_STORE_RETURN_ADDRESS(gtid); |
136 | #endif |
137 | __kmpc_barrier(&loc, global_tid: gtid); |
138 | #if OMPT_SUPPORT && OMPT_OPTIONAL |
139 | if (ompt_enabled.enabled) { |
140 | ompt_frame->enter_frame = ompt_data_none; |
141 | } |
142 | #endif |
143 | } |
144 | |
145 | // Mutual exclusion |
146 | |
147 | // The symbol that icc/ifort generates for unnamed critical sections |
148 | // - .gomp_critical_user_ - is defined using .comm in any objects reference it. |
149 | // We can't reference it directly here in C code, as the symbol contains a ".". |
150 | // |
151 | // The RTL contains an assembly language definition of .gomp_critical_user_ |
152 | // with another symbol __kmp_unnamed_critical_addr initialized with it's |
153 | // address. |
154 | extern kmp_critical_name *__kmp_unnamed_critical_addr; |
155 | |
156 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CRITICAL_START)(void) { |
157 | int gtid = __kmp_entry_gtid(); |
158 | MKLOC(loc, "GOMP_critical_start" ); |
159 | KA_TRACE(20, ("GOMP_critical_start: T#%d\n" , gtid)); |
160 | #if OMPT_SUPPORT && OMPT_OPTIONAL |
161 | OMPT_STORE_RETURN_ADDRESS(gtid); |
162 | #endif |
163 | __kmpc_critical(&loc, global_tid: gtid, __kmp_unnamed_critical_addr); |
164 | } |
165 | |
166 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CRITICAL_END)(void) { |
167 | int gtid = __kmp_get_gtid(); |
168 | MKLOC(loc, "GOMP_critical_end" ); |
169 | KA_TRACE(20, ("GOMP_critical_end: T#%d\n" , gtid)); |
170 | #if OMPT_SUPPORT && OMPT_OPTIONAL |
171 | OMPT_STORE_RETURN_ADDRESS(gtid); |
172 | #endif |
173 | __kmpc_end_critical(&loc, global_tid: gtid, __kmp_unnamed_critical_addr); |
174 | } |
175 | |
176 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CRITICAL_NAME_START)(void **pptr) { |
177 | int gtid = __kmp_entry_gtid(); |
178 | MKLOC(loc, "GOMP_critical_name_start" ); |
179 | KA_TRACE(20, ("GOMP_critical_name_start: T#%d\n" , gtid)); |
180 | __kmpc_critical(&loc, global_tid: gtid, (kmp_critical_name *)pptr); |
181 | } |
182 | |
183 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CRITICAL_NAME_END)(void **pptr) { |
184 | int gtid = __kmp_get_gtid(); |
185 | MKLOC(loc, "GOMP_critical_name_end" ); |
186 | KA_TRACE(20, ("GOMP_critical_name_end: T#%d\n" , gtid)); |
187 | __kmpc_end_critical(&loc, global_tid: gtid, (kmp_critical_name *)pptr); |
188 | } |
189 | |
190 | // The Gnu codegen tries to use locked operations to perform atomic updates |
191 | // inline. If it can't, then it calls GOMP_atomic_start() before performing |
192 | // the update and GOMP_atomic_end() afterward, regardless of the data type. |
193 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ATOMIC_START)(void) { |
194 | int gtid = __kmp_entry_gtid(); |
195 | KA_TRACE(20, ("GOMP_atomic_start: T#%d\n" , gtid)); |
196 | |
197 | #if OMPT_SUPPORT |
198 | __ompt_thread_assign_wait_id(variable: 0); |
199 | #endif |
200 | |
201 | __kmp_acquire_atomic_lock(lck: &__kmp_atomic_lock, gtid); |
202 | } |
203 | |
204 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ATOMIC_END)(void) { |
205 | int gtid = __kmp_get_gtid(); |
206 | KA_TRACE(20, ("GOMP_atomic_end: T#%d\n" , gtid)); |
207 | __kmp_release_atomic_lock(lck: &__kmp_atomic_lock, gtid); |
208 | } |
209 | |
210 | int KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SINGLE_START)(void) { |
211 | int gtid = __kmp_entry_gtid(); |
212 | MKLOC(loc, "GOMP_single_start" ); |
213 | KA_TRACE(20, ("GOMP_single_start: T#%d\n" , gtid)); |
214 | |
215 | if (!TCR_4(__kmp_init_parallel)) |
216 | __kmp_parallel_initialize(); |
217 | __kmp_resume_if_soft_paused(); |
218 | |
219 | // 3rd parameter == FALSE prevents kmp_enter_single from pushing a |
220 | // workshare when USE_CHECKS is defined. We need to avoid the push, |
221 | // as there is no corresponding GOMP_single_end() call. |
222 | kmp_int32 rc = __kmp_enter_single(gtid, id_ref: &loc, FALSE); |
223 | |
224 | #if OMPT_SUPPORT && OMPT_OPTIONAL |
225 | kmp_info_t *this_thr = __kmp_threads[gtid]; |
226 | kmp_team_t *team = this_thr->th.th_team; |
227 | int tid = __kmp_tid_from_gtid(gtid); |
228 | |
229 | if (ompt_enabled.enabled) { |
230 | if (rc) { |
231 | if (ompt_enabled.ompt_callback_work) { |
232 | ompt_callbacks.ompt_callback(ompt_callback_work)( |
233 | ompt_work_single_executor, ompt_scope_begin, |
234 | &(team->t.ompt_team_info.parallel_data), |
235 | &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data), |
236 | 1, OMPT_GET_RETURN_ADDRESS(0)); |
237 | } |
238 | } else { |
239 | if (ompt_enabled.ompt_callback_work) { |
240 | ompt_callbacks.ompt_callback(ompt_callback_work)( |
241 | ompt_work_single_other, ompt_scope_begin, |
242 | &(team->t.ompt_team_info.parallel_data), |
243 | &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data), |
244 | 1, OMPT_GET_RETURN_ADDRESS(0)); |
245 | ompt_callbacks.ompt_callback(ompt_callback_work)( |
246 | ompt_work_single_other, ompt_scope_end, |
247 | &(team->t.ompt_team_info.parallel_data), |
248 | &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data), |
249 | 1, OMPT_GET_RETURN_ADDRESS(0)); |
250 | } |
251 | } |
252 | } |
253 | #endif |
254 | |
255 | return rc; |
256 | } |
257 | |
258 | void *KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SINGLE_COPY_START)(void) { |
259 | void *retval; |
260 | int gtid = __kmp_entry_gtid(); |
261 | MKLOC(loc, "GOMP_single_copy_start" ); |
262 | KA_TRACE(20, ("GOMP_single_copy_start: T#%d\n" , gtid)); |
263 | |
264 | if (!TCR_4(__kmp_init_parallel)) |
265 | __kmp_parallel_initialize(); |
266 | __kmp_resume_if_soft_paused(); |
267 | |
268 | // If this is the first thread to enter, return NULL. The generated code will |
269 | // then call GOMP_single_copy_end() for this thread only, with the |
270 | // copyprivate data pointer as an argument. |
271 | if (__kmp_enter_single(gtid, id_ref: &loc, FALSE)) |
272 | return NULL; |
273 | |
274 | // Wait for the first thread to set the copyprivate data pointer, |
275 | // and for all other threads to reach this point. |
276 | |
277 | #if OMPT_SUPPORT && OMPT_OPTIONAL |
278 | ompt_frame_t *ompt_frame; |
279 | if (ompt_enabled.enabled) { |
280 | __ompt_get_task_info_internal(ancestor_level: 0, NULL, NULL, task_frame: &ompt_frame, NULL, NULL); |
281 | ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); |
282 | } |
283 | OMPT_STORE_RETURN_ADDRESS(gtid); |
284 | #endif |
285 | __kmp_barrier(bt: bs_plain_barrier, gtid, FALSE, reduce_size: 0, NULL, NULL); |
286 | |
287 | // Retrieve the value of the copyprivate data point, and wait for all |
288 | // threads to do likewise, then return. |
289 | retval = __kmp_team_from_gtid(gtid)->t.t_copypriv_data; |
290 | { |
291 | #if OMPT_SUPPORT && OMPT_OPTIONAL |
292 | OMPT_STORE_RETURN_ADDRESS(gtid); |
293 | #endif |
294 | __kmp_barrier(bt: bs_plain_barrier, gtid, FALSE, reduce_size: 0, NULL, NULL); |
295 | } |
296 | #if OMPT_SUPPORT && OMPT_OPTIONAL |
297 | if (ompt_enabled.enabled) { |
298 | ompt_frame->enter_frame = ompt_data_none; |
299 | } |
300 | #endif |
301 | return retval; |
302 | } |
303 | |
304 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SINGLE_COPY_END)(void *data) { |
305 | int gtid = __kmp_get_gtid(); |
306 | KA_TRACE(20, ("GOMP_single_copy_end: T#%d\n" , gtid)); |
307 | |
308 | // Set the copyprivate data pointer fo the team, then hit the barrier so that |
309 | // the other threads will continue on and read it. Hit another barrier before |
310 | // continuing, so that the know that the copyprivate data pointer has been |
311 | // propagated to all threads before trying to reuse the t_copypriv_data field. |
312 | __kmp_team_from_gtid(gtid)->t.t_copypriv_data = data; |
313 | #if OMPT_SUPPORT && OMPT_OPTIONAL |
314 | ompt_frame_t *ompt_frame; |
315 | if (ompt_enabled.enabled) { |
316 | __ompt_get_task_info_internal(ancestor_level: 0, NULL, NULL, task_frame: &ompt_frame, NULL, NULL); |
317 | ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); |
318 | } |
319 | OMPT_STORE_RETURN_ADDRESS(gtid); |
320 | #endif |
321 | __kmp_barrier(bt: bs_plain_barrier, gtid, FALSE, reduce_size: 0, NULL, NULL); |
322 | { |
323 | #if OMPT_SUPPORT && OMPT_OPTIONAL |
324 | OMPT_STORE_RETURN_ADDRESS(gtid); |
325 | #endif |
326 | __kmp_barrier(bt: bs_plain_barrier, gtid, FALSE, reduce_size: 0, NULL, NULL); |
327 | } |
328 | #if OMPT_SUPPORT && OMPT_OPTIONAL |
329 | if (ompt_enabled.enabled) { |
330 | ompt_frame->enter_frame = ompt_data_none; |
331 | } |
332 | #endif |
333 | } |
334 | |
335 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ORDERED_START)(void) { |
336 | int gtid = __kmp_entry_gtid(); |
337 | MKLOC(loc, "GOMP_ordered_start" ); |
338 | KA_TRACE(20, ("GOMP_ordered_start: T#%d\n" , gtid)); |
339 | #if OMPT_SUPPORT && OMPT_OPTIONAL |
340 | OMPT_STORE_RETURN_ADDRESS(gtid); |
341 | #endif |
342 | __kmpc_ordered(&loc, global_tid: gtid); |
343 | } |
344 | |
345 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ORDERED_END)(void) { |
346 | int gtid = __kmp_get_gtid(); |
347 | MKLOC(loc, "GOMP_ordered_end" ); |
348 | KA_TRACE(20, ("GOMP_ordered_start: T#%d\n" , gtid)); |
349 | #if OMPT_SUPPORT && OMPT_OPTIONAL |
350 | OMPT_STORE_RETURN_ADDRESS(gtid); |
351 | #endif |
352 | __kmpc_end_ordered(&loc, global_tid: gtid); |
353 | } |
354 | |
355 | // Dispatch macro defs |
356 | // |
357 | // They come in two flavors: 64-bit unsigned, and either 32-bit signed |
358 | // (IA-32 architecture) or 64-bit signed (Intel(R) 64). |
359 | |
360 | #if KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_MIPS || KMP_ARCH_WASM || \ |
361 | KMP_ARCH_PPC || KMP_ARCH_AARCH64_32 |
362 | #define KMP_DISPATCH_INIT __kmp_aux_dispatch_init_4 |
363 | #define KMP_DISPATCH_FINI_CHUNK __kmp_aux_dispatch_fini_chunk_4 |
364 | #define KMP_DISPATCH_NEXT __kmpc_dispatch_next_4 |
365 | #else |
366 | #define KMP_DISPATCH_INIT __kmp_aux_dispatch_init_8 |
367 | #define KMP_DISPATCH_FINI_CHUNK __kmp_aux_dispatch_fini_chunk_8 |
368 | #define KMP_DISPATCH_NEXT __kmpc_dispatch_next_8 |
369 | #endif /* KMP_ARCH_X86 */ |
370 | |
371 | #define KMP_DISPATCH_INIT_ULL __kmp_aux_dispatch_init_8u |
372 | #define KMP_DISPATCH_FINI_CHUNK_ULL __kmp_aux_dispatch_fini_chunk_8u |
373 | #define KMP_DISPATCH_NEXT_ULL __kmpc_dispatch_next_8u |
374 | |
375 | // The parallel construct |
376 | |
377 | #ifndef KMP_DEBUG |
378 | static |
379 | #endif /* KMP_DEBUG */ |
380 | void |
381 | __kmp_GOMP_microtask_wrapper(int *gtid, int *npr, void (*task)(void *), |
382 | void *data) { |
383 | #if OMPT_SUPPORT |
384 | kmp_info_t *thr; |
385 | ompt_frame_t *ompt_frame; |
386 | ompt_state_t enclosing_state; |
387 | |
388 | if (ompt_enabled.enabled) { |
389 | // get pointer to thread data structure |
390 | thr = __kmp_threads[*gtid]; |
391 | |
392 | // save enclosing task state; set current state for task |
393 | enclosing_state = thr->th.ompt_thread_info.state; |
394 | thr->th.ompt_thread_info.state = ompt_state_work_parallel; |
395 | |
396 | // set task frame |
397 | __ompt_get_task_info_internal(ancestor_level: 0, NULL, NULL, task_frame: &ompt_frame, NULL, NULL); |
398 | ompt_frame->exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); |
399 | } |
400 | #endif |
401 | |
402 | task(data); |
403 | |
404 | #if OMPT_SUPPORT |
405 | if (ompt_enabled.enabled) { |
406 | // clear task frame |
407 | ompt_frame->exit_frame = ompt_data_none; |
408 | |
409 | // restore enclosing state |
410 | thr->th.ompt_thread_info.state = enclosing_state; |
411 | } |
412 | #endif |
413 | } |
414 | |
415 | #ifndef KMP_DEBUG |
416 | static |
417 | #endif /* KMP_DEBUG */ |
418 | void |
419 | __kmp_GOMP_parallel_microtask_wrapper(int *gtid, int *npr, |
420 | void (*task)(void *), void *data, |
421 | unsigned num_threads, ident_t *loc, |
422 | enum sched_type schedule, long start, |
423 | long end, long incr, |
424 | long chunk_size) { |
425 | // Initialize the loop worksharing construct. |
426 | |
427 | KMP_DISPATCH_INIT(loc, gtid: *gtid, schedule, lb: start, ub: end, st: incr, chunk: chunk_size, |
428 | push_ws: schedule != kmp_sch_static); |
429 | |
430 | #if OMPT_SUPPORT |
431 | kmp_info_t *thr; |
432 | ompt_frame_t *ompt_frame; |
433 | ompt_state_t enclosing_state; |
434 | |
435 | if (ompt_enabled.enabled) { |
436 | thr = __kmp_threads[*gtid]; |
437 | // save enclosing task state; set current state for task |
438 | enclosing_state = thr->th.ompt_thread_info.state; |
439 | thr->th.ompt_thread_info.state = ompt_state_work_parallel; |
440 | |
441 | // set task frame |
442 | __ompt_get_task_info_internal(ancestor_level: 0, NULL, NULL, task_frame: &ompt_frame, NULL, NULL); |
443 | ompt_frame->exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); |
444 | } |
445 | #endif |
446 | |
447 | // Now invoke the microtask. |
448 | task(data); |
449 | |
450 | #if OMPT_SUPPORT |
451 | if (ompt_enabled.enabled) { |
452 | // clear task frame |
453 | ompt_frame->exit_frame = ompt_data_none; |
454 | |
455 | // reset enclosing state |
456 | thr->th.ompt_thread_info.state = enclosing_state; |
457 | } |
458 | #endif |
459 | } |
460 | |
461 | static void __kmp_GOMP_fork_call(ident_t *loc, int gtid, unsigned num_threads, |
462 | unsigned flags, void (*unwrapped_task)(void *), |
463 | microtask_t wrapper, int argc, ...) { |
464 | int rc; |
465 | kmp_info_t *thr = __kmp_threads[gtid]; |
466 | kmp_team_t *team = thr->th.th_team; |
467 | int tid = __kmp_tid_from_gtid(gtid); |
468 | |
469 | va_list ap; |
470 | va_start(ap, argc); |
471 | |
472 | if (num_threads != 0) |
473 | __kmp_push_num_threads(loc, gtid, num_threads); |
474 | if (flags != 0) |
475 | __kmp_push_proc_bind(loc, gtid, proc_bind: (kmp_proc_bind_t)flags); |
476 | rc = __kmp_fork_call(loc, gtid, fork_context: fork_context_gnu, argc, microtask: wrapper, |
477 | invoker: __kmp_invoke_task_func, kmp_va_addr_of(ap)); |
478 | |
479 | va_end(ap); |
480 | |
481 | if (rc) { |
482 | __kmp_run_before_invoked_task(gtid, tid, this_thr: thr, team); |
483 | } |
484 | |
485 | #if OMPT_SUPPORT |
486 | int ompt_team_size; |
487 | if (ompt_enabled.enabled) { |
488 | ompt_team_info_t *team_info = __ompt_get_teaminfo(depth: 0, NULL); |
489 | ompt_task_info_t *task_info = __ompt_get_task_info_object(depth: 0); |
490 | |
491 | // implicit task callback |
492 | if (ompt_enabled.ompt_callback_implicit_task) { |
493 | ompt_team_size = __kmp_team_from_gtid(gtid)->t.t_nproc; |
494 | ompt_callbacks.ompt_callback(ompt_callback_implicit_task)( |
495 | ompt_scope_begin, &(team_info->parallel_data), |
496 | &(task_info->task_data), ompt_team_size, __kmp_tid_from_gtid(gtid), |
497 | ompt_task_implicit); // TODO: Can this be ompt_task_initial? |
498 | task_info->thread_num = __kmp_tid_from_gtid(gtid); |
499 | } |
500 | thr->th.ompt_thread_info.state = ompt_state_work_parallel; |
501 | } |
502 | #endif |
503 | } |
504 | |
505 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_START)(void (*task)(void *), |
506 | void *data, |
507 | unsigned num_threads) { |
508 | int gtid = __kmp_entry_gtid(); |
509 | |
510 | #if OMPT_SUPPORT |
511 | ompt_frame_t *parent_frame, *frame; |
512 | |
513 | if (ompt_enabled.enabled) { |
514 | __ompt_get_task_info_internal(ancestor_level: 0, NULL, NULL, task_frame: &parent_frame, NULL, NULL); |
515 | parent_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); |
516 | } |
517 | OMPT_STORE_RETURN_ADDRESS(gtid); |
518 | #endif |
519 | |
520 | MKLOC(loc, "GOMP_parallel_start" ); |
521 | KA_TRACE(20, ("GOMP_parallel_start: T#%d\n" , gtid)); |
522 | __kmp_GOMP_fork_call(loc: &loc, gtid, num_threads, flags: 0u, unwrapped_task: task, |
523 | wrapper: (microtask_t)__kmp_GOMP_microtask_wrapper, argc: 2, task, |
524 | data); |
525 | #if OMPT_SUPPORT |
526 | if (ompt_enabled.enabled) { |
527 | __ompt_get_task_info_internal(ancestor_level: 0, NULL, NULL, task_frame: &frame, NULL, NULL); |
528 | frame->exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); |
529 | } |
530 | #endif |
531 | #if OMPD_SUPPORT |
532 | if (ompd_state & OMPD_ENABLE_BP) |
533 | ompd_bp_parallel_begin(); |
534 | #endif |
535 | } |
536 | |
537 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_END)(void) { |
538 | int gtid = __kmp_get_gtid(); |
539 | kmp_info_t *thr; |
540 | |
541 | thr = __kmp_threads[gtid]; |
542 | |
543 | MKLOC(loc, "GOMP_parallel_end" ); |
544 | KA_TRACE(20, ("GOMP_parallel_end: T#%d\n" , gtid)); |
545 | |
546 | if (!thr->th.th_team->t.t_serialized) { |
547 | __kmp_run_after_invoked_task(gtid, tid: __kmp_tid_from_gtid(gtid), this_thr: thr, |
548 | team: thr->th.th_team); |
549 | } |
550 | #if OMPT_SUPPORT |
551 | if (ompt_enabled.enabled) { |
552 | // Implicit task is finished here, in the barrier we might schedule |
553 | // deferred tasks, |
554 | // these don't see the implicit task on the stack |
555 | OMPT_CUR_TASK_INFO(thr)->frame.exit_frame = ompt_data_none; |
556 | } |
557 | #endif |
558 | |
559 | __kmp_join_call(loc: &loc, gtid |
560 | #if OMPT_SUPPORT |
561 | , |
562 | fork_context: fork_context_gnu |
563 | #endif |
564 | ); |
565 | #if OMPD_SUPPORT |
566 | if (ompd_state & OMPD_ENABLE_BP) |
567 | ompd_bp_parallel_end(); |
568 | #endif |
569 | } |
570 | |
571 | // Loop worksharing constructs |
572 | |
573 | // The Gnu codegen passes in an exclusive upper bound for the overall range, |
574 | // but the libguide dispatch code expects an inclusive upper bound, hence the |
575 | // "end - incr" 5th argument to KMP_DISPATCH_INIT (and the " ub - str" 11th |
576 | // argument to __kmp_GOMP_fork_call). |
577 | // |
578 | // Conversely, KMP_DISPATCH_NEXT returns and inclusive upper bound in *p_ub, |
579 | // but the Gnu codegen expects an exclusive upper bound, so the adjustment |
580 | // "*p_ub += stride" compensates for the discrepancy. |
581 | // |
582 | // Correction: the gnu codegen always adjusts the upper bound by +-1, not the |
583 | // stride value. We adjust the dispatch parameters accordingly (by +-1), but |
584 | // we still adjust p_ub by the actual stride value. |
585 | // |
586 | // The "runtime" versions do not take a chunk_sz parameter. |
587 | // |
588 | // The profile lib cannot support construct checking of unordered loops that |
589 | // are predetermined by the compiler to be statically scheduled, as the gcc |
590 | // codegen will not always emit calls to GOMP_loop_static_next() to get the |
591 | // next iteration. Instead, it emits inline code to call omp_get_thread_num() |
592 | // num and calculate the iteration space using the result. It doesn't do this |
593 | // with ordered static loop, so they can be checked. |
594 | |
595 | #if OMPT_SUPPORT |
596 | #define IF_OMPT_SUPPORT(code) code |
597 | #else |
598 | #define IF_OMPT_SUPPORT(code) |
599 | #endif |
600 | |
601 | #define LOOP_START(func, schedule) \ |
602 | int func(long lb, long ub, long str, long chunk_sz, long *p_lb, \ |
603 | long *p_ub) { \ |
604 | int status; \ |
605 | long stride; \ |
606 | int gtid = __kmp_entry_gtid(); \ |
607 | MKLOC(loc, KMP_STR(func)); \ |
608 | KA_TRACE( \ |
609 | 20, \ |
610 | (KMP_STR( \ |
611 | func) ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \ |
612 | gtid, lb, ub, str, chunk_sz)); \ |
613 | \ |
614 | if ((str > 0) ? (lb < ub) : (lb > ub)) { \ |
615 | { \ |
616 | IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \ |
617 | KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \ |
618 | (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \ |
619 | (schedule) != kmp_sch_static); \ |
620 | } \ |
621 | { \ |
622 | IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \ |
623 | status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \ |
624 | (kmp_int *)p_ub, (kmp_int *)&stride); \ |
625 | } \ |
626 | if (status) { \ |
627 | KMP_DEBUG_ASSERT(stride == str); \ |
628 | *p_ub += (str > 0) ? 1 : -1; \ |
629 | } \ |
630 | } else { \ |
631 | status = 0; \ |
632 | } \ |
633 | \ |
634 | KA_TRACE( \ |
635 | 20, \ |
636 | (KMP_STR( \ |
637 | func) " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \ |
638 | gtid, *p_lb, *p_ub, status)); \ |
639 | return status; \ |
640 | } |
641 | |
642 | #define LOOP_RUNTIME_START(func, schedule) \ |
643 | int func(long lb, long ub, long str, long *p_lb, long *p_ub) { \ |
644 | int status; \ |
645 | long stride; \ |
646 | long chunk_sz = 0; \ |
647 | int gtid = __kmp_entry_gtid(); \ |
648 | MKLOC(loc, KMP_STR(func)); \ |
649 | KA_TRACE( \ |
650 | 20, \ |
651 | (KMP_STR(func) ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz %d\n", \ |
652 | gtid, lb, ub, str, chunk_sz)); \ |
653 | \ |
654 | if ((str > 0) ? (lb < ub) : (lb > ub)) { \ |
655 | { \ |
656 | IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \ |
657 | KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \ |
658 | (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \ |
659 | TRUE); \ |
660 | } \ |
661 | { \ |
662 | IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \ |
663 | status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \ |
664 | (kmp_int *)p_ub, (kmp_int *)&stride); \ |
665 | } \ |
666 | if (status) { \ |
667 | KMP_DEBUG_ASSERT(stride == str); \ |
668 | *p_ub += (str > 0) ? 1 : -1; \ |
669 | } \ |
670 | } else { \ |
671 | status = 0; \ |
672 | } \ |
673 | \ |
674 | KA_TRACE( \ |
675 | 20, \ |
676 | (KMP_STR( \ |
677 | func) " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \ |
678 | gtid, *p_lb, *p_ub, status)); \ |
679 | return status; \ |
680 | } |
681 | |
682 | #define KMP_DOACROSS_FINI(status, gtid) \ |
683 | if (!status && __kmp_threads[gtid]->th.th_dispatch->th_doacross_flags) { \ |
684 | __kmpc_doacross_fini(NULL, gtid); \ |
685 | } |
686 | |
687 | #define LOOP_NEXT(func, fini_code) \ |
688 | int func(long *p_lb, long *p_ub) { \ |
689 | int status; \ |
690 | long stride; \ |
691 | int gtid = __kmp_get_gtid(); \ |
692 | MKLOC(loc, KMP_STR(func)); \ |
693 | KA_TRACE(20, (KMP_STR(func) ": T#%d\n", gtid)); \ |
694 | \ |
695 | IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \ |
696 | fini_code status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \ |
697 | (kmp_int *)p_ub, (kmp_int *)&stride); \ |
698 | if (status) { \ |
699 | *p_ub += (stride > 0) ? 1 : -1; \ |
700 | } \ |
701 | KMP_DOACROSS_FINI(status, gtid) \ |
702 | \ |
703 | KA_TRACE( \ |
704 | 20, \ |
705 | (KMP_STR(func) " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, stride 0x%lx, " \ |
706 | "returning %d\n", \ |
707 | gtid, *p_lb, *p_ub, stride, status)); \ |
708 | return status; \ |
709 | } |
710 | |
711 | LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_STATIC_START), kmp_sch_static) |
712 | LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT), {}) |
713 | LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START), |
714 | kmp_sch_dynamic_chunked) |
715 | LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_DYNAMIC_START), |
716 | kmp_sch_dynamic_chunked) |
717 | LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT), {}) |
718 | LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_DYNAMIC_NEXT), {}) |
719 | LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_GUIDED_START), |
720 | kmp_sch_guided_chunked) |
721 | LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_GUIDED_START), |
722 | kmp_sch_guided_chunked) |
723 | LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT), {}) |
724 | LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_GUIDED_NEXT), {}) |
725 | LOOP_RUNTIME_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_RUNTIME_START), |
726 | kmp_sch_runtime) |
727 | LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT), {}) |
728 | LOOP_RUNTIME_START( |
729 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_MAYBE_NONMONOTONIC_RUNTIME_START), |
730 | kmp_sch_runtime) |
731 | LOOP_RUNTIME_START( |
732 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_RUNTIME_START), |
733 | kmp_sch_runtime) |
734 | LOOP_NEXT( |
735 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_MAYBE_NONMONOTONIC_RUNTIME_NEXT), {}) |
736 | LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_RUNTIME_NEXT), {}) |
737 | |
738 | LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START), |
739 | kmp_ord_static) |
740 | LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT), |
741 | { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); }) |
742 | LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START), |
743 | kmp_ord_dynamic_chunked) |
744 | LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT), |
745 | { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); }) |
746 | LOOP_START(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START), |
747 | kmp_ord_guided_chunked) |
748 | LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT), |
749 | { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); }) |
750 | LOOP_RUNTIME_START( |
751 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START), |
752 | kmp_ord_runtime) |
753 | LOOP_NEXT(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT), |
754 | { KMP_DISPATCH_FINI_CHUNK(&loc, gtid); }) |
755 | |
756 | #define LOOP_DOACROSS_START(func, schedule) \ |
757 | bool func(unsigned ncounts, long *counts, long chunk_sz, long *p_lb, \ |
758 | long *p_ub) { \ |
759 | int status; \ |
760 | long stride, lb, ub, str; \ |
761 | int gtid = __kmp_entry_gtid(); \ |
762 | struct kmp_dim *dims = \ |
763 | (struct kmp_dim *)__kmp_allocate(sizeof(struct kmp_dim) * ncounts); \ |
764 | MKLOC(loc, KMP_STR(func)); \ |
765 | for (unsigned i = 0; i < ncounts; ++i) { \ |
766 | dims[i].lo = 0; \ |
767 | dims[i].up = counts[i] - 1; \ |
768 | dims[i].st = 1; \ |
769 | } \ |
770 | __kmpc_doacross_init(&loc, gtid, (int)ncounts, dims); \ |
771 | lb = 0; \ |
772 | ub = counts[0]; \ |
773 | str = 1; \ |
774 | KA_TRACE(20, (KMP_STR(func) ": T#%d, ncounts %u, lb 0x%lx, ub 0x%lx, str " \ |
775 | "0x%lx, chunk_sz " \ |
776 | "0x%lx\n", \ |
777 | gtid, ncounts, lb, ub, str, chunk_sz)); \ |
778 | \ |
779 | if ((str > 0) ? (lb < ub) : (lb > ub)) { \ |
780 | KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \ |
781 | (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \ |
782 | (schedule) != kmp_sch_static); \ |
783 | status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \ |
784 | (kmp_int *)p_ub, (kmp_int *)&stride); \ |
785 | if (status) { \ |
786 | KMP_DEBUG_ASSERT(stride == str); \ |
787 | *p_ub += (str > 0) ? 1 : -1; \ |
788 | } \ |
789 | } else { \ |
790 | status = 0; \ |
791 | } \ |
792 | KMP_DOACROSS_FINI(status, gtid); \ |
793 | \ |
794 | KA_TRACE( \ |
795 | 20, \ |
796 | (KMP_STR( \ |
797 | func) " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \ |
798 | gtid, *p_lb, *p_ub, status)); \ |
799 | __kmp_free(dims); \ |
800 | return status; \ |
801 | } |
802 | |
803 | #define LOOP_DOACROSS_RUNTIME_START(func, schedule) \ |
804 | int func(unsigned ncounts, long *counts, long *p_lb, long *p_ub) { \ |
805 | int status; \ |
806 | long stride, lb, ub, str; \ |
807 | long chunk_sz = 0; \ |
808 | int gtid = __kmp_entry_gtid(); \ |
809 | struct kmp_dim *dims = \ |
810 | (struct kmp_dim *)__kmp_allocate(sizeof(struct kmp_dim) * ncounts); \ |
811 | MKLOC(loc, KMP_STR(func)); \ |
812 | for (unsigned i = 0; i < ncounts; ++i) { \ |
813 | dims[i].lo = 0; \ |
814 | dims[i].up = counts[i] - 1; \ |
815 | dims[i].st = 1; \ |
816 | } \ |
817 | __kmpc_doacross_init(&loc, gtid, (int)ncounts, dims); \ |
818 | lb = 0; \ |
819 | ub = counts[0]; \ |
820 | str = 1; \ |
821 | KA_TRACE( \ |
822 | 20, \ |
823 | (KMP_STR(func) ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz %d\n", \ |
824 | gtid, lb, ub, str, chunk_sz)); \ |
825 | \ |
826 | if ((str > 0) ? (lb < ub) : (lb > ub)) { \ |
827 | KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \ |
828 | (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, TRUE); \ |
829 | status = KMP_DISPATCH_NEXT(&loc, gtid, NULL, (kmp_int *)p_lb, \ |
830 | (kmp_int *)p_ub, (kmp_int *)&stride); \ |
831 | if (status) { \ |
832 | KMP_DEBUG_ASSERT(stride == str); \ |
833 | *p_ub += (str > 0) ? 1 : -1; \ |
834 | } \ |
835 | } else { \ |
836 | status = 0; \ |
837 | } \ |
838 | KMP_DOACROSS_FINI(status, gtid); \ |
839 | \ |
840 | KA_TRACE( \ |
841 | 20, \ |
842 | (KMP_STR( \ |
843 | func) " exit: T#%d, *p_lb 0x%lx, *p_ub 0x%lx, returning %d\n", \ |
844 | gtid, *p_lb, *p_ub, status)); \ |
845 | __kmp_free(dims); \ |
846 | return status; \ |
847 | } |
848 | |
849 | LOOP_DOACROSS_START( |
850 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DOACROSS_STATIC_START), |
851 | kmp_sch_static) |
852 | LOOP_DOACROSS_START( |
853 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DOACROSS_DYNAMIC_START), |
854 | kmp_sch_dynamic_chunked) |
855 | LOOP_DOACROSS_START( |
856 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DOACROSS_GUIDED_START), |
857 | kmp_sch_guided_chunked) |
858 | LOOP_DOACROSS_RUNTIME_START( |
859 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DOACROSS_RUNTIME_START), |
860 | kmp_sch_runtime) |
861 | |
862 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_END)(void) { |
863 | int gtid = __kmp_get_gtid(); |
864 | KA_TRACE(20, ("GOMP_loop_end: T#%d\n" , gtid)) |
865 | |
866 | #if OMPT_SUPPORT && OMPT_OPTIONAL |
867 | ompt_frame_t *ompt_frame; |
868 | if (ompt_enabled.enabled) { |
869 | __ompt_get_task_info_internal(ancestor_level: 0, NULL, NULL, task_frame: &ompt_frame, NULL, NULL); |
870 | ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); |
871 | OMPT_STORE_RETURN_ADDRESS(gtid); |
872 | } |
873 | #endif |
874 | __kmp_barrier(bt: bs_plain_barrier, gtid, FALSE, reduce_size: 0, NULL, NULL); |
875 | #if OMPT_SUPPORT && OMPT_OPTIONAL |
876 | if (ompt_enabled.enabled) { |
877 | ompt_frame->enter_frame = ompt_data_none; |
878 | } |
879 | #endif |
880 | |
881 | KA_TRACE(20, ("GOMP_loop_end exit: T#%d\n" , gtid)) |
882 | } |
883 | |
884 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_END_NOWAIT)(void) { |
885 | KA_TRACE(20, ("GOMP_loop_end_nowait: T#%d\n" , __kmp_get_gtid())) |
886 | } |
887 | |
888 | // Unsigned long long loop worksharing constructs |
889 | // |
890 | // These are new with gcc 4.4 |
891 | |
892 | #define LOOP_START_ULL(func, schedule) \ |
893 | int func(int up, unsigned long long lb, unsigned long long ub, \ |
894 | unsigned long long str, unsigned long long chunk_sz, \ |
895 | unsigned long long *p_lb, unsigned long long *p_ub) { \ |
896 | int status; \ |
897 | long long str2 = up ? ((long long)str) : -((long long)str); \ |
898 | long long stride; \ |
899 | int gtid = __kmp_entry_gtid(); \ |
900 | MKLOC(loc, KMP_STR(func)); \ |
901 | \ |
902 | KA_TRACE(20, (KMP_STR(func) ": T#%d, up %d, lb 0x%llx, ub 0x%llx, str " \ |
903 | "0x%llx, chunk_sz 0x%llx\n", \ |
904 | gtid, up, lb, ub, str, chunk_sz)); \ |
905 | \ |
906 | if ((str > 0) ? (lb < ub) : (lb > ub)) { \ |
907 | KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \ |
908 | (str2 > 0) ? (ub - 1) : (ub + 1), str2, chunk_sz, \ |
909 | (schedule) != kmp_sch_static); \ |
910 | status = \ |
911 | KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \ |
912 | (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \ |
913 | if (status) { \ |
914 | KMP_DEBUG_ASSERT(stride == str2); \ |
915 | *p_ub += (str > 0) ? 1 : -1; \ |
916 | } \ |
917 | } else { \ |
918 | status = 0; \ |
919 | } \ |
920 | \ |
921 | KA_TRACE( \ |
922 | 20, \ |
923 | (KMP_STR( \ |
924 | func) " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \ |
925 | gtid, *p_lb, *p_ub, status)); \ |
926 | return status; \ |
927 | } |
928 | |
929 | #define LOOP_RUNTIME_START_ULL(func, schedule) \ |
930 | int func(int up, unsigned long long lb, unsigned long long ub, \ |
931 | unsigned long long str, unsigned long long *p_lb, \ |
932 | unsigned long long *p_ub) { \ |
933 | int status; \ |
934 | long long str2 = up ? ((long long)str) : -((long long)str); \ |
935 | unsigned long long stride; \ |
936 | unsigned long long chunk_sz = 0; \ |
937 | int gtid = __kmp_entry_gtid(); \ |
938 | MKLOC(loc, KMP_STR(func)); \ |
939 | \ |
940 | KA_TRACE(20, (KMP_STR(func) ": T#%d, up %d, lb 0x%llx, ub 0x%llx, str " \ |
941 | "0x%llx, chunk_sz 0x%llx\n", \ |
942 | gtid, up, lb, ub, str, chunk_sz)); \ |
943 | \ |
944 | if ((str > 0) ? (lb < ub) : (lb > ub)) { \ |
945 | KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \ |
946 | (str2 > 0) ? (ub - 1) : (ub + 1), str2, chunk_sz, \ |
947 | TRUE); \ |
948 | status = \ |
949 | KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \ |
950 | (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \ |
951 | if (status) { \ |
952 | KMP_DEBUG_ASSERT((long long)stride == str2); \ |
953 | *p_ub += (str > 0) ? 1 : -1; \ |
954 | } \ |
955 | } else { \ |
956 | status = 0; \ |
957 | } \ |
958 | \ |
959 | KA_TRACE( \ |
960 | 20, \ |
961 | (KMP_STR( \ |
962 | func) " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \ |
963 | gtid, *p_lb, *p_ub, status)); \ |
964 | return status; \ |
965 | } |
966 | |
967 | #define LOOP_NEXT_ULL(func, fini_code) \ |
968 | int func(unsigned long long *p_lb, unsigned long long *p_ub) { \ |
969 | int status; \ |
970 | long long stride; \ |
971 | int gtid = __kmp_get_gtid(); \ |
972 | MKLOC(loc, KMP_STR(func)); \ |
973 | KA_TRACE(20, (KMP_STR(func) ": T#%d\n", gtid)); \ |
974 | \ |
975 | fini_code status = \ |
976 | KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \ |
977 | (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \ |
978 | if (status) { \ |
979 | *p_ub += (stride > 0) ? 1 : -1; \ |
980 | } \ |
981 | \ |
982 | KA_TRACE( \ |
983 | 20, \ |
984 | (KMP_STR( \ |
985 | func) " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, stride 0x%llx, " \ |
986 | "returning %d\n", \ |
987 | gtid, *p_lb, *p_ub, stride, status)); \ |
988 | return status; \ |
989 | } |
990 | |
991 | LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START), |
992 | kmp_sch_static) |
993 | LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT), {}) |
994 | LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START), |
995 | kmp_sch_dynamic_chunked) |
996 | LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT), {}) |
997 | LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START), |
998 | kmp_sch_guided_chunked) |
999 | LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT), {}) |
1000 | LOOP_START_ULL( |
1001 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_DYNAMIC_START), |
1002 | kmp_sch_dynamic_chunked) |
1003 | LOOP_NEXT_ULL( |
1004 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_DYNAMIC_NEXT), {}) |
1005 | LOOP_START_ULL( |
1006 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_GUIDED_START), |
1007 | kmp_sch_guided_chunked) |
1008 | LOOP_NEXT_ULL( |
1009 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_GUIDED_NEXT), {}) |
1010 | LOOP_RUNTIME_START_ULL( |
1011 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START), kmp_sch_runtime) |
1012 | LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT), {}) |
1013 | LOOP_RUNTIME_START_ULL( |
1014 | KMP_EXPAND_NAME( |
1015 | KMP_API_NAME_GOMP_LOOP_ULL_MAYBE_NONMONOTONIC_RUNTIME_START), |
1016 | kmp_sch_runtime) |
1017 | LOOP_RUNTIME_START_ULL( |
1018 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_RUNTIME_START), |
1019 | kmp_sch_runtime) |
1020 | LOOP_NEXT_ULL( |
1021 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_MAYBE_NONMONOTONIC_RUNTIME_NEXT), |
1022 | {}) |
1023 | LOOP_NEXT_ULL( |
1024 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_RUNTIME_NEXT), {}) |
1025 | |
1026 | LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START), |
1027 | kmp_ord_static) |
1028 | LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT), |
1029 | { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); }) |
1030 | LOOP_START_ULL( |
1031 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START), |
1032 | kmp_ord_dynamic_chunked) |
1033 | LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT), |
1034 | { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); }) |
1035 | LOOP_START_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START), |
1036 | kmp_ord_guided_chunked) |
1037 | LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT), |
1038 | { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); }) |
1039 | LOOP_RUNTIME_START_ULL( |
1040 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START), |
1041 | kmp_ord_runtime) |
1042 | LOOP_NEXT_ULL(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT), |
1043 | { KMP_DISPATCH_FINI_CHUNK_ULL(&loc, gtid); }) |
1044 | |
1045 | #define LOOP_DOACROSS_START_ULL(func, schedule) \ |
1046 | int func(unsigned ncounts, unsigned long long *counts, \ |
1047 | unsigned long long chunk_sz, unsigned long long *p_lb, \ |
1048 | unsigned long long *p_ub) { \ |
1049 | int status; \ |
1050 | long long stride, str, lb, ub; \ |
1051 | int gtid = __kmp_entry_gtid(); \ |
1052 | struct kmp_dim *dims = \ |
1053 | (struct kmp_dim *)__kmp_allocate(sizeof(struct kmp_dim) * ncounts); \ |
1054 | MKLOC(loc, KMP_STR(func)); \ |
1055 | for (unsigned i = 0; i < ncounts; ++i) { \ |
1056 | dims[i].lo = 0; \ |
1057 | dims[i].up = counts[i] - 1; \ |
1058 | dims[i].st = 1; \ |
1059 | } \ |
1060 | __kmpc_doacross_init(&loc, gtid, (int)ncounts, dims); \ |
1061 | lb = 0; \ |
1062 | ub = counts[0]; \ |
1063 | str = 1; \ |
1064 | \ |
1065 | KA_TRACE(20, (KMP_STR(func) ": T#%d, lb 0x%llx, ub 0x%llx, str " \ |
1066 | "0x%llx, chunk_sz 0x%llx\n", \ |
1067 | gtid, lb, ub, str, chunk_sz)); \ |
1068 | \ |
1069 | if ((str > 0) ? (lb < ub) : (lb > ub)) { \ |
1070 | KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \ |
1071 | (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \ |
1072 | (schedule) != kmp_sch_static); \ |
1073 | status = \ |
1074 | KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \ |
1075 | (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \ |
1076 | if (status) { \ |
1077 | KMP_DEBUG_ASSERT(stride == str); \ |
1078 | *p_ub += (str > 0) ? 1 : -1; \ |
1079 | } \ |
1080 | } else { \ |
1081 | status = 0; \ |
1082 | } \ |
1083 | KMP_DOACROSS_FINI(status, gtid); \ |
1084 | \ |
1085 | KA_TRACE( \ |
1086 | 20, \ |
1087 | (KMP_STR( \ |
1088 | func) " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \ |
1089 | gtid, *p_lb, *p_ub, status)); \ |
1090 | __kmp_free(dims); \ |
1091 | return status; \ |
1092 | } |
1093 | |
1094 | #define LOOP_DOACROSS_RUNTIME_START_ULL(func, schedule) \ |
1095 | int func(unsigned ncounts, unsigned long long *counts, \ |
1096 | unsigned long long *p_lb, unsigned long long *p_ub) { \ |
1097 | int status; \ |
1098 | unsigned long long stride, str, lb, ub; \ |
1099 | unsigned long long chunk_sz = 0; \ |
1100 | int gtid = __kmp_entry_gtid(); \ |
1101 | struct kmp_dim *dims = \ |
1102 | (struct kmp_dim *)__kmp_allocate(sizeof(struct kmp_dim) * ncounts); \ |
1103 | MKLOC(loc, KMP_STR(func)); \ |
1104 | for (unsigned i = 0; i < ncounts; ++i) { \ |
1105 | dims[i].lo = 0; \ |
1106 | dims[i].up = counts[i] - 1; \ |
1107 | dims[i].st = 1; \ |
1108 | } \ |
1109 | __kmpc_doacross_init(&loc, gtid, (int)ncounts, dims); \ |
1110 | lb = 0; \ |
1111 | ub = counts[0]; \ |
1112 | str = 1; \ |
1113 | KA_TRACE(20, (KMP_STR(func) ": T#%d, lb 0x%llx, ub 0x%llx, str " \ |
1114 | "0x%llx, chunk_sz 0x%llx\n", \ |
1115 | gtid, lb, ub, str, chunk_sz)); \ |
1116 | \ |
1117 | if ((str > 0) ? (lb < ub) : (lb > ub)) { \ |
1118 | KMP_DISPATCH_INIT_ULL(&loc, gtid, (schedule), lb, \ |
1119 | (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \ |
1120 | TRUE); \ |
1121 | status = \ |
1122 | KMP_DISPATCH_NEXT_ULL(&loc, gtid, NULL, (kmp_uint64 *)p_lb, \ |
1123 | (kmp_uint64 *)p_ub, (kmp_int64 *)&stride); \ |
1124 | if (status) { \ |
1125 | KMP_DEBUG_ASSERT(stride == str); \ |
1126 | *p_ub += (str > 0) ? 1 : -1; \ |
1127 | } \ |
1128 | } else { \ |
1129 | status = 0; \ |
1130 | } \ |
1131 | KMP_DOACROSS_FINI(status, gtid); \ |
1132 | \ |
1133 | KA_TRACE( \ |
1134 | 20, \ |
1135 | (KMP_STR( \ |
1136 | func) " exit: T#%d, *p_lb 0x%llx, *p_ub 0x%llx, returning %d\n", \ |
1137 | gtid, *p_lb, *p_ub, status)); \ |
1138 | __kmp_free(dims); \ |
1139 | return status; \ |
1140 | } |
1141 | |
1142 | LOOP_DOACROSS_START_ULL( |
1143 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_STATIC_START), |
1144 | kmp_sch_static) |
1145 | LOOP_DOACROSS_START_ULL( |
1146 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_DYNAMIC_START), |
1147 | kmp_sch_dynamic_chunked) |
1148 | LOOP_DOACROSS_START_ULL( |
1149 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_GUIDED_START), |
1150 | kmp_sch_guided_chunked) |
1151 | LOOP_DOACROSS_RUNTIME_START_ULL( |
1152 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_RUNTIME_START), |
1153 | kmp_sch_runtime) |
1154 | |
1155 | // Combined parallel / loop worksharing constructs |
1156 | // |
1157 | // There are no ull versions (yet). |
1158 | |
1159 | #define PARALLEL_LOOP_START(func, schedule, ompt_pre, ompt_post) \ |
1160 | void func(void (*task)(void *), void *data, unsigned num_threads, long lb, \ |
1161 | long ub, long str, long chunk_sz) { \ |
1162 | int gtid = __kmp_entry_gtid(); \ |
1163 | MKLOC(loc, KMP_STR(func)); \ |
1164 | KA_TRACE( \ |
1165 | 20, \ |
1166 | (KMP_STR( \ |
1167 | func) ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \ |
1168 | gtid, lb, ub, str, chunk_sz)); \ |
1169 | \ |
1170 | ompt_pre(); \ |
1171 | \ |
1172 | __kmp_GOMP_fork_call(&loc, gtid, num_threads, 0u, task, \ |
1173 | (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, \ |
1174 | 9, task, data, num_threads, &loc, (schedule), lb, \ |
1175 | (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz); \ |
1176 | IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid)); \ |
1177 | \ |
1178 | KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \ |
1179 | (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \ |
1180 | (schedule) != kmp_sch_static); \ |
1181 | \ |
1182 | ompt_post(); \ |
1183 | \ |
1184 | KA_TRACE(20, (KMP_STR(func) " exit: T#%d\n", gtid)); \ |
1185 | } |
1186 | |
1187 | #if OMPT_SUPPORT && OMPT_OPTIONAL |
1188 | |
1189 | #define OMPT_LOOP_PRE() \ |
1190 | ompt_frame_t *parent_frame; \ |
1191 | if (ompt_enabled.enabled) { \ |
1192 | __ompt_get_task_info_internal(0, NULL, NULL, &parent_frame, NULL, NULL); \ |
1193 | parent_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); \ |
1194 | OMPT_STORE_RETURN_ADDRESS(gtid); \ |
1195 | } |
1196 | |
1197 | #define OMPT_LOOP_POST() \ |
1198 | if (ompt_enabled.enabled) { \ |
1199 | parent_frame->enter_frame = ompt_data_none; \ |
1200 | } |
1201 | |
1202 | #else |
1203 | |
1204 | #define OMPT_LOOP_PRE() |
1205 | |
1206 | #define OMPT_LOOP_POST() |
1207 | |
1208 | #endif |
1209 | |
1210 | PARALLEL_LOOP_START( |
1211 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START), |
1212 | kmp_sch_static, OMPT_LOOP_PRE, OMPT_LOOP_POST) |
1213 | PARALLEL_LOOP_START( |
1214 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START), |
1215 | kmp_sch_dynamic_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST) |
1216 | PARALLEL_LOOP_START( |
1217 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START), |
1218 | kmp_sch_guided_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST) |
1219 | PARALLEL_LOOP_START( |
1220 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START), |
1221 | kmp_sch_runtime, OMPT_LOOP_PRE, OMPT_LOOP_POST) |
1222 | |
1223 | // Tasking constructs |
1224 | |
1225 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASK)(void (*func)(void *), void *data, |
1226 | void (*copy_func)(void *, void *), |
1227 | long arg_size, long arg_align, |
1228 | bool if_cond, unsigned gomp_flags, |
1229 | void **depend) { |
1230 | MKLOC(loc, "GOMP_task" ); |
1231 | int gtid = __kmp_entry_gtid(); |
1232 | kmp_int32 flags = 0; |
1233 | kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *)&flags; |
1234 | |
1235 | KA_TRACE(20, ("GOMP_task: T#%d\n" , gtid)); |
1236 | |
1237 | // The low-order bit is the "untied" flag |
1238 | if (!(gomp_flags & KMP_GOMP_TASK_UNTIED_FLAG)) { |
1239 | input_flags->tiedness = TASK_TIED; |
1240 | } |
1241 | // The second low-order bit is the "final" flag |
1242 | if (gomp_flags & KMP_GOMP_TASK_FINAL_FLAG) { |
1243 | input_flags->final = 1; |
1244 | } |
1245 | input_flags->native = 1; |
1246 | // __kmp_task_alloc() sets up all other flags |
1247 | |
1248 | if (!if_cond) { |
1249 | arg_size = 0; |
1250 | } |
1251 | |
1252 | kmp_task_t *task = __kmp_task_alloc( |
1253 | loc_ref: &loc, gtid, flags: input_flags, sizeof_kmp_task_t: sizeof(kmp_task_t), |
1254 | sizeof_shareds: arg_size ? arg_size + arg_align - 1 : 0, task_entry: (kmp_routine_entry_t)func); |
1255 | |
1256 | if (arg_size > 0) { |
1257 | if (arg_align > 0) { |
1258 | task->shareds = (void *)((((size_t)task->shareds) + arg_align - 1) / |
1259 | arg_align * arg_align); |
1260 | } |
1261 | // else error?? |
1262 | |
1263 | if (copy_func) { |
1264 | (*copy_func)(task->shareds, data); |
1265 | } else { |
1266 | KMP_MEMCPY(dest: task->shareds, src: data, n: arg_size); |
1267 | } |
1268 | } |
1269 | |
1270 | #if OMPT_SUPPORT |
1271 | kmp_taskdata_t *current_task; |
1272 | if (ompt_enabled.enabled) { |
1273 | current_task = __kmp_threads[gtid]->th.th_current_task; |
1274 | current_task->ompt_task_info.frame.enter_frame.ptr = |
1275 | OMPT_GET_FRAME_ADDRESS(0); |
1276 | } |
1277 | OMPT_STORE_RETURN_ADDRESS(gtid); |
1278 | #endif |
1279 | |
1280 | if (if_cond) { |
1281 | if (gomp_flags & KMP_GOMP_TASK_DEPENDS_FLAG) { |
1282 | KMP_ASSERT(depend); |
1283 | kmp_gomp_depends_info_t gomp_depends(depend); |
1284 | kmp_int32 ndeps = gomp_depends.get_num_deps(); |
1285 | SimpleVLA<kmp_depend_info_t> dep_list(ndeps); |
1286 | for (kmp_int32 i = 0; i < ndeps; i++) |
1287 | dep_list[i] = gomp_depends.get_kmp_depend(index: i); |
1288 | kmp_int32 ndeps_cnv; |
1289 | __kmp_type_convert(src: ndeps, dest: &ndeps_cnv); |
1290 | __kmpc_omp_task_with_deps(loc_ref: &loc, gtid, new_task: task, ndeps: ndeps_cnv, dep_list, ndeps_noalias: 0, NULL); |
1291 | } else { |
1292 | __kmpc_omp_task(loc_ref: &loc, gtid, new_task: task); |
1293 | } |
1294 | } else { |
1295 | #if OMPT_SUPPORT |
1296 | ompt_thread_info_t oldInfo; |
1297 | kmp_info_t *thread; |
1298 | kmp_taskdata_t *taskdata; |
1299 | if (ompt_enabled.enabled) { |
1300 | // Store the threads states and restore them after the task |
1301 | thread = __kmp_threads[gtid]; |
1302 | taskdata = KMP_TASK_TO_TASKDATA(task); |
1303 | oldInfo = thread->th.ompt_thread_info; |
1304 | thread->th.ompt_thread_info.wait_id = 0; |
1305 | thread->th.ompt_thread_info.state = ompt_state_work_parallel; |
1306 | taskdata->ompt_task_info.frame.exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); |
1307 | } |
1308 | OMPT_STORE_RETURN_ADDRESS(gtid); |
1309 | #endif |
1310 | if (gomp_flags & KMP_GOMP_TASK_DEPENDS_FLAG) { |
1311 | KMP_ASSERT(depend); |
1312 | kmp_gomp_depends_info_t gomp_depends(depend); |
1313 | kmp_int32 ndeps = gomp_depends.get_num_deps(); |
1314 | SimpleVLA<kmp_depend_info_t> dep_list(ndeps); |
1315 | for (kmp_int32 i = 0; i < ndeps; i++) |
1316 | dep_list[i] = gomp_depends.get_kmp_depend(index: i); |
1317 | __kmpc_omp_wait_deps(loc_ref: &loc, gtid, ndeps, dep_list, ndeps_noalias: 0, NULL); |
1318 | } |
1319 | |
1320 | __kmpc_omp_task_begin_if0(loc_ref: &loc, gtid, task); |
1321 | func(data); |
1322 | __kmpc_omp_task_complete_if0(loc_ref: &loc, gtid, task); |
1323 | |
1324 | #if OMPT_SUPPORT |
1325 | if (ompt_enabled.enabled) { |
1326 | thread->th.ompt_thread_info = oldInfo; |
1327 | taskdata->ompt_task_info.frame.exit_frame = ompt_data_none; |
1328 | } |
1329 | #endif |
1330 | } |
1331 | #if OMPT_SUPPORT |
1332 | if (ompt_enabled.enabled) { |
1333 | current_task->ompt_task_info.frame.enter_frame = ompt_data_none; |
1334 | } |
1335 | #endif |
1336 | |
1337 | KA_TRACE(20, ("GOMP_task exit: T#%d\n" , gtid)); |
1338 | } |
1339 | |
1340 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKWAIT)(void) { |
1341 | MKLOC(loc, "GOMP_taskwait" ); |
1342 | int gtid = __kmp_entry_gtid(); |
1343 | |
1344 | #if OMPT_SUPPORT |
1345 | OMPT_STORE_RETURN_ADDRESS(gtid); |
1346 | #endif |
1347 | |
1348 | KA_TRACE(20, ("GOMP_taskwait: T#%d\n" , gtid)); |
1349 | |
1350 | __kmpc_omp_taskwait(loc_ref: &loc, gtid); |
1351 | |
1352 | KA_TRACE(20, ("GOMP_taskwait exit: T#%d\n" , gtid)); |
1353 | } |
1354 | |
1355 | // Sections worksharing constructs |
1356 | // |
1357 | // For the sections construct, we initialize a dynamically scheduled loop |
1358 | // worksharing construct with lb 1 and stride 1, and use the iteration #'s |
1359 | // that its returns as sections ids. |
1360 | // |
1361 | // There are no special entry points for ordered sections, so we always use |
1362 | // the dynamically scheduled workshare, even if the sections aren't ordered. |
1363 | |
1364 | unsigned KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_START)(unsigned count) { |
1365 | int status; |
1366 | kmp_int lb, ub, stride; |
1367 | int gtid = __kmp_entry_gtid(); |
1368 | MKLOC(loc, "GOMP_sections_start" ); |
1369 | KA_TRACE(20, ("GOMP_sections_start: T#%d\n" , gtid)); |
1370 | |
1371 | KMP_DISPATCH_INIT(loc: &loc, gtid, schedule: kmp_nm_dynamic_chunked, lb: 1, ub: count, st: 1, chunk: 1, TRUE); |
1372 | |
1373 | status = KMP_DISPATCH_NEXT(loc: &loc, gtid, NULL, p_lb: &lb, p_ub: &ub, p_st: &stride); |
1374 | if (status) { |
1375 | KMP_DEBUG_ASSERT(stride == 1); |
1376 | KMP_DEBUG_ASSERT(lb > 0); |
1377 | KMP_ASSERT(lb == ub); |
1378 | } else { |
1379 | lb = 0; |
1380 | } |
1381 | |
1382 | KA_TRACE(20, ("GOMP_sections_start exit: T#%d returning %u\n" , gtid, |
1383 | (unsigned)lb)); |
1384 | return (unsigned)lb; |
1385 | } |
1386 | |
1387 | unsigned KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_NEXT)(void) { |
1388 | int status; |
1389 | kmp_int lb, ub, stride; |
1390 | int gtid = __kmp_get_gtid(); |
1391 | MKLOC(loc, "GOMP_sections_next" ); |
1392 | KA_TRACE(20, ("GOMP_sections_next: T#%d\n" , gtid)); |
1393 | |
1394 | #if OMPT_SUPPORT |
1395 | OMPT_STORE_RETURN_ADDRESS(gtid); |
1396 | #endif |
1397 | |
1398 | status = KMP_DISPATCH_NEXT(loc: &loc, gtid, NULL, p_lb: &lb, p_ub: &ub, p_st: &stride); |
1399 | if (status) { |
1400 | KMP_DEBUG_ASSERT(stride == 1); |
1401 | KMP_DEBUG_ASSERT(lb > 0); |
1402 | KMP_ASSERT(lb == ub); |
1403 | } else { |
1404 | lb = 0; |
1405 | } |
1406 | |
1407 | KA_TRACE( |
1408 | 20, ("GOMP_sections_next exit: T#%d returning %u\n" , gtid, (unsigned)lb)); |
1409 | return (unsigned)lb; |
1410 | } |
1411 | |
1412 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START)( |
1413 | void (*task)(void *), void *data, unsigned num_threads, unsigned count) { |
1414 | int gtid = __kmp_entry_gtid(); |
1415 | |
1416 | #if OMPT_SUPPORT |
1417 | ompt_frame_t *parent_frame; |
1418 | |
1419 | if (ompt_enabled.enabled) { |
1420 | __ompt_get_task_info_internal(ancestor_level: 0, NULL, NULL, task_frame: &parent_frame, NULL, NULL); |
1421 | parent_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); |
1422 | } |
1423 | OMPT_STORE_RETURN_ADDRESS(gtid); |
1424 | #endif |
1425 | |
1426 | MKLOC(loc, "GOMP_parallel_sections_start" ); |
1427 | KA_TRACE(20, ("GOMP_parallel_sections_start: T#%d\n" , gtid)); |
1428 | |
1429 | __kmp_GOMP_fork_call(loc: &loc, gtid, num_threads, flags: 0u, unwrapped_task: task, |
1430 | wrapper: (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, argc: 9, |
1431 | task, data, num_threads, &loc, kmp_nm_dynamic_chunked, |
1432 | (kmp_int)1, (kmp_int)count, (kmp_int)1, (kmp_int)1); |
1433 | |
1434 | #if OMPT_SUPPORT |
1435 | if (ompt_enabled.enabled) { |
1436 | parent_frame->enter_frame = ompt_data_none; |
1437 | } |
1438 | #endif |
1439 | |
1440 | KMP_DISPATCH_INIT(loc: &loc, gtid, schedule: kmp_nm_dynamic_chunked, lb: 1, ub: count, st: 1, chunk: 1, TRUE); |
1441 | |
1442 | KA_TRACE(20, ("GOMP_parallel_sections_start exit: T#%d\n" , gtid)); |
1443 | } |
1444 | |
1445 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_END)(void) { |
1446 | int gtid = __kmp_get_gtid(); |
1447 | KA_TRACE(20, ("GOMP_sections_end: T#%d\n" , gtid)) |
1448 | |
1449 | #if OMPT_SUPPORT |
1450 | ompt_frame_t *ompt_frame; |
1451 | if (ompt_enabled.enabled) { |
1452 | __ompt_get_task_info_internal(ancestor_level: 0, NULL, NULL, task_frame: &ompt_frame, NULL, NULL); |
1453 | ompt_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); |
1454 | } |
1455 | OMPT_STORE_RETURN_ADDRESS(gtid); |
1456 | #endif |
1457 | __kmp_barrier(bt: bs_plain_barrier, gtid, FALSE, reduce_size: 0, NULL, NULL); |
1458 | #if OMPT_SUPPORT |
1459 | if (ompt_enabled.enabled) { |
1460 | ompt_frame->enter_frame = ompt_data_none; |
1461 | } |
1462 | #endif |
1463 | |
1464 | KA_TRACE(20, ("GOMP_sections_end exit: T#%d\n" , gtid)) |
1465 | } |
1466 | |
1467 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT)(void) { |
1468 | KA_TRACE(20, ("GOMP_sections_end_nowait: T#%d\n" , __kmp_get_gtid())) |
1469 | } |
1470 | |
1471 | // libgomp has an empty function for GOMP_taskyield as of 2013-10-10 |
1472 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKYIELD)(void) { |
1473 | KA_TRACE(20, ("GOMP_taskyield: T#%d\n" , __kmp_get_gtid())) |
1474 | return; |
1475 | } |
1476 | |
1477 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL)(void (*task)(void *), |
1478 | void *data, |
1479 | unsigned num_threads, |
1480 | unsigned int flags) { |
1481 | int gtid = __kmp_entry_gtid(); |
1482 | MKLOC(loc, "GOMP_parallel" ); |
1483 | KA_TRACE(20, ("GOMP_parallel: T#%d\n" , gtid)); |
1484 | |
1485 | #if OMPT_SUPPORT |
1486 | ompt_task_info_t *parent_task_info, *task_info; |
1487 | if (ompt_enabled.enabled) { |
1488 | parent_task_info = __ompt_get_task_info_object(depth: 0); |
1489 | parent_task_info->frame.enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); |
1490 | } |
1491 | OMPT_STORE_RETURN_ADDRESS(gtid); |
1492 | #endif |
1493 | __kmp_GOMP_fork_call(loc: &loc, gtid, num_threads, flags, unwrapped_task: task, |
1494 | wrapper: (microtask_t)__kmp_GOMP_microtask_wrapper, argc: 2, task, |
1495 | data); |
1496 | #if OMPT_SUPPORT |
1497 | if (ompt_enabled.enabled) { |
1498 | task_info = __ompt_get_task_info_object(depth: 0); |
1499 | task_info->frame.exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); |
1500 | } |
1501 | #endif |
1502 | task(data); |
1503 | { |
1504 | #if OMPT_SUPPORT |
1505 | OMPT_STORE_RETURN_ADDRESS(gtid); |
1506 | #endif |
1507 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_END)(); |
1508 | } |
1509 | #if OMPT_SUPPORT |
1510 | if (ompt_enabled.enabled) { |
1511 | task_info->frame.exit_frame = ompt_data_none; |
1512 | parent_task_info->frame.enter_frame = ompt_data_none; |
1513 | } |
1514 | #endif |
1515 | } |
1516 | |
1517 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_SECTIONS)(void (*task)(void *), |
1518 | void *data, |
1519 | unsigned num_threads, |
1520 | unsigned count, |
1521 | unsigned flags) { |
1522 | int gtid = __kmp_entry_gtid(); |
1523 | MKLOC(loc, "GOMP_parallel_sections" ); |
1524 | KA_TRACE(20, ("GOMP_parallel_sections: T#%d\n" , gtid)); |
1525 | |
1526 | #if OMPT_SUPPORT |
1527 | ompt_frame_t *task_frame; |
1528 | kmp_info_t *thr; |
1529 | if (ompt_enabled.enabled) { |
1530 | thr = __kmp_threads[gtid]; |
1531 | task_frame = &(thr->th.th_current_task->ompt_task_info.frame); |
1532 | task_frame->enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); |
1533 | } |
1534 | OMPT_STORE_RETURN_ADDRESS(gtid); |
1535 | #endif |
1536 | |
1537 | __kmp_GOMP_fork_call(loc: &loc, gtid, num_threads, flags, unwrapped_task: task, |
1538 | wrapper: (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, argc: 9, |
1539 | task, data, num_threads, &loc, kmp_nm_dynamic_chunked, |
1540 | (kmp_int)1, (kmp_int)count, (kmp_int)1, (kmp_int)1); |
1541 | |
1542 | { |
1543 | #if OMPT_SUPPORT |
1544 | OMPT_STORE_RETURN_ADDRESS(gtid); |
1545 | #endif |
1546 | |
1547 | KMP_DISPATCH_INIT(loc: &loc, gtid, schedule: kmp_nm_dynamic_chunked, lb: 1, ub: count, st: 1, chunk: 1, TRUE); |
1548 | } |
1549 | |
1550 | #if OMPT_SUPPORT |
1551 | ompt_frame_t *child_frame; |
1552 | if (ompt_enabled.enabled) { |
1553 | child_frame = &(thr->th.th_current_task->ompt_task_info.frame); |
1554 | child_frame->exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); |
1555 | } |
1556 | #endif |
1557 | |
1558 | task(data); |
1559 | |
1560 | #if OMPT_SUPPORT |
1561 | if (ompt_enabled.enabled) { |
1562 | child_frame->exit_frame = ompt_data_none; |
1563 | } |
1564 | #endif |
1565 | |
1566 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_END)(); |
1567 | KA_TRACE(20, ("GOMP_parallel_sections exit: T#%d\n" , gtid)); |
1568 | |
1569 | #if OMPT_SUPPORT |
1570 | if (ompt_enabled.enabled) { |
1571 | task_frame->enter_frame = ompt_data_none; |
1572 | } |
1573 | #endif |
1574 | } |
1575 | |
1576 | #define PARALLEL_LOOP(func, schedule, ompt_pre, ompt_post) \ |
1577 | void func(void (*task)(void *), void *data, unsigned num_threads, long lb, \ |
1578 | long ub, long str, long chunk_sz, unsigned flags) { \ |
1579 | int gtid = __kmp_entry_gtid(); \ |
1580 | MKLOC(loc, KMP_STR(func)); \ |
1581 | KA_TRACE( \ |
1582 | 20, \ |
1583 | (KMP_STR( \ |
1584 | func) ": T#%d, lb 0x%lx, ub 0x%lx, str 0x%lx, chunk_sz 0x%lx\n", \ |
1585 | gtid, lb, ub, str, chunk_sz)); \ |
1586 | \ |
1587 | ompt_pre(); \ |
1588 | IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \ |
1589 | __kmp_GOMP_fork_call(&loc, gtid, num_threads, flags, task, \ |
1590 | (microtask_t)__kmp_GOMP_parallel_microtask_wrapper, \ |
1591 | 9, task, data, num_threads, &loc, (schedule), lb, \ |
1592 | (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz); \ |
1593 | \ |
1594 | { \ |
1595 | IF_OMPT_SUPPORT(OMPT_STORE_RETURN_ADDRESS(gtid);) \ |
1596 | KMP_DISPATCH_INIT(&loc, gtid, (schedule), lb, \ |
1597 | (str > 0) ? (ub - 1) : (ub + 1), str, chunk_sz, \ |
1598 | (schedule) != kmp_sch_static); \ |
1599 | } \ |
1600 | task(data); \ |
1601 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_END)(); \ |
1602 | ompt_post(); \ |
1603 | \ |
1604 | KA_TRACE(20, (KMP_STR(func) " exit: T#%d\n", gtid)); \ |
1605 | } |
1606 | |
1607 | PARALLEL_LOOP(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC), |
1608 | kmp_sch_static, OMPT_LOOP_PRE, OMPT_LOOP_POST) |
1609 | PARALLEL_LOOP(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC), |
1610 | kmp_sch_dynamic_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST) |
1611 | PARALLEL_LOOP( |
1612 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_NONMONOTONIC_GUIDED), |
1613 | kmp_sch_guided_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST) |
1614 | PARALLEL_LOOP( |
1615 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_NONMONOTONIC_DYNAMIC), |
1616 | kmp_sch_dynamic_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST) |
1617 | PARALLEL_LOOP(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED), |
1618 | kmp_sch_guided_chunked, OMPT_LOOP_PRE, OMPT_LOOP_POST) |
1619 | PARALLEL_LOOP(KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME), |
1620 | kmp_sch_runtime, OMPT_LOOP_PRE, OMPT_LOOP_POST) |
1621 | PARALLEL_LOOP( |
1622 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_MAYBE_NONMONOTONIC_RUNTIME), |
1623 | kmp_sch_runtime, OMPT_LOOP_PRE, OMPT_LOOP_POST) |
1624 | PARALLEL_LOOP( |
1625 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_LOOP_NONMONOTONIC_RUNTIME), |
1626 | kmp_sch_runtime, OMPT_LOOP_PRE, OMPT_LOOP_POST) |
1627 | |
1628 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKGROUP_START)(void) { |
1629 | int gtid = __kmp_entry_gtid(); |
1630 | MKLOC(loc, "GOMP_taskgroup_start" ); |
1631 | KA_TRACE(20, ("GOMP_taskgroup_start: T#%d\n" , gtid)); |
1632 | |
1633 | #if OMPT_SUPPORT |
1634 | OMPT_STORE_RETURN_ADDRESS(gtid); |
1635 | #endif |
1636 | |
1637 | __kmpc_taskgroup(loc: &loc, gtid); |
1638 | |
1639 | return; |
1640 | } |
1641 | |
1642 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKGROUP_END)(void) { |
1643 | int gtid = __kmp_get_gtid(); |
1644 | MKLOC(loc, "GOMP_taskgroup_end" ); |
1645 | KA_TRACE(20, ("GOMP_taskgroup_end: T#%d\n" , gtid)); |
1646 | |
1647 | #if OMPT_SUPPORT |
1648 | OMPT_STORE_RETURN_ADDRESS(gtid); |
1649 | #endif |
1650 | |
1651 | __kmpc_end_taskgroup(loc: &loc, gtid); |
1652 | |
1653 | return; |
1654 | } |
1655 | |
1656 | static kmp_int32 __kmp_gomp_to_omp_cancellation_kind(int gomp_kind) { |
1657 | kmp_int32 cncl_kind = 0; |
1658 | switch (gomp_kind) { |
1659 | case 1: |
1660 | cncl_kind = cancel_parallel; |
1661 | break; |
1662 | case 2: |
1663 | cncl_kind = cancel_loop; |
1664 | break; |
1665 | case 4: |
1666 | cncl_kind = cancel_sections; |
1667 | break; |
1668 | case 8: |
1669 | cncl_kind = cancel_taskgroup; |
1670 | break; |
1671 | } |
1672 | return cncl_kind; |
1673 | } |
1674 | |
1675 | // Return true if cancellation should take place, false otherwise |
1676 | bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CANCELLATION_POINT)(int which) { |
1677 | int gtid = __kmp_get_gtid(); |
1678 | MKLOC(loc, "GOMP_cancellation_point" ); |
1679 | KA_TRACE(20, ("GOMP_cancellation_point: T#%d which:%d\n" , gtid, which)); |
1680 | kmp_int32 cncl_kind = __kmp_gomp_to_omp_cancellation_kind(gomp_kind: which); |
1681 | return __kmpc_cancellationpoint(loc_ref: &loc, gtid, cncl_kind); |
1682 | } |
1683 | |
1684 | // Return true if cancellation should take place, false otherwise |
1685 | bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_CANCEL)(int which, bool do_cancel) { |
1686 | int gtid = __kmp_get_gtid(); |
1687 | MKLOC(loc, "GOMP_cancel" ); |
1688 | KA_TRACE(20, ("GOMP_cancel: T#%d which:%d do_cancel:%d\n" , gtid, which, |
1689 | (int)do_cancel)); |
1690 | kmp_int32 cncl_kind = __kmp_gomp_to_omp_cancellation_kind(gomp_kind: which); |
1691 | |
1692 | if (do_cancel == FALSE) { |
1693 | return __kmpc_cancellationpoint(loc_ref: &loc, gtid, cncl_kind); |
1694 | } else { |
1695 | return __kmpc_cancel(loc_ref: &loc, gtid, cncl_kind); |
1696 | } |
1697 | } |
1698 | |
1699 | // Return true if cancellation should take place, false otherwise |
1700 | bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_BARRIER_CANCEL)(void) { |
1701 | int gtid = __kmp_get_gtid(); |
1702 | KA_TRACE(20, ("GOMP_barrier_cancel: T#%d\n" , gtid)); |
1703 | return __kmp_barrier_gomp_cancel(gtid); |
1704 | } |
1705 | |
1706 | // Return true if cancellation should take place, false otherwise |
1707 | bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL)(void) { |
1708 | int gtid = __kmp_get_gtid(); |
1709 | KA_TRACE(20, ("GOMP_sections_end_cancel: T#%d\n" , gtid)); |
1710 | return __kmp_barrier_gomp_cancel(gtid); |
1711 | } |
1712 | |
1713 | // Return true if cancellation should take place, false otherwise |
1714 | bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_END_CANCEL)(void) { |
1715 | int gtid = __kmp_get_gtid(); |
1716 | KA_TRACE(20, ("GOMP_loop_end_cancel: T#%d\n" , gtid)); |
1717 | return __kmp_barrier_gomp_cancel(gtid); |
1718 | } |
1719 | |
1720 | // All target functions are empty as of 2014-05-29 |
1721 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TARGET)(int device, void (*fn)(void *), |
1722 | const void *openmp_target, |
1723 | size_t mapnum, void **hostaddrs, |
1724 | size_t *sizes, |
1725 | unsigned char *kinds) { |
1726 | return; |
1727 | } |
1728 | |
1729 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TARGET_DATA)( |
1730 | int device, const void *openmp_target, size_t mapnum, void **hostaddrs, |
1731 | size_t *sizes, unsigned char *kinds) { |
1732 | return; |
1733 | } |
1734 | |
1735 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TARGET_END_DATA)(void) { return; } |
1736 | |
1737 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TARGET_UPDATE)( |
1738 | int device, const void *openmp_target, size_t mapnum, void **hostaddrs, |
1739 | size_t *sizes, unsigned char *kinds) { |
1740 | return; |
1741 | } |
1742 | |
1743 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TEAMS)(unsigned int num_teams, |
1744 | unsigned int thread_limit) { |
1745 | return; |
1746 | } |
1747 | |
1748 | // Task duplication function which copies src to dest (both are |
1749 | // preallocated task structures) |
1750 | static void __kmp_gomp_task_dup(kmp_task_t *dest, kmp_task_t *src, |
1751 | kmp_int32 last_private) { |
1752 | kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(src); |
1753 | if (taskdata->td_copy_func) { |
1754 | (taskdata->td_copy_func)(dest->shareds, src->shareds); |
1755 | } |
1756 | } |
1757 | |
1758 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKGROUP_REDUCTION_REGISTER)( |
1759 | uintptr_t *); |
1760 | |
1761 | #ifdef __cplusplus |
1762 | } // extern "C" |
1763 | #endif |
1764 | |
1765 | template <typename T> |
1766 | void __GOMP_taskloop(void (*func)(void *), void *data, |
1767 | void (*copy_func)(void *, void *), long arg_size, |
1768 | long arg_align, unsigned gomp_flags, |
1769 | unsigned long num_tasks, int priority, T start, T end, |
1770 | T step) { |
1771 | typedef void (*p_task_dup_t)(kmp_task_t *, kmp_task_t *, kmp_int32); |
1772 | MKLOC(loc, "GOMP_taskloop" ); |
1773 | int sched; |
1774 | T *loop_bounds; |
1775 | int gtid = __kmp_entry_gtid(); |
1776 | kmp_int32 flags = 0; |
1777 | int if_val = gomp_flags & (1u << 10); |
1778 | int nogroup = gomp_flags & (1u << 11); |
1779 | int up = gomp_flags & (1u << 8); |
1780 | int reductions = gomp_flags & (1u << 12); |
1781 | p_task_dup_t task_dup = NULL; |
1782 | kmp_tasking_flags_t *input_flags = (kmp_tasking_flags_t *)&flags; |
1783 | #ifdef KMP_DEBUG |
1784 | { |
1785 | char *buff; |
1786 | buff = __kmp_str_format( |
1787 | "GOMP_taskloop: T#%%d: func:%%p data:%%p copy_func:%%p " |
1788 | "arg_size:%%ld arg_align:%%ld gomp_flags:0x%%x num_tasks:%%lu " |
1789 | "priority:%%d start:%%%s end:%%%s step:%%%s\n" , |
1790 | traits_t<T>::spec, traits_t<T>::spec, traits_t<T>::spec); |
1791 | KA_TRACE(20, (buff, gtid, func, data, copy_func, arg_size, arg_align, |
1792 | gomp_flags, num_tasks, priority, start, end, step)); |
1793 | __kmp_str_free(str: &buff); |
1794 | } |
1795 | #endif |
1796 | KMP_ASSERT((size_t)arg_size >= 2 * sizeof(T)); |
1797 | KMP_ASSERT(arg_align > 0); |
1798 | // The low-order bit is the "untied" flag |
1799 | if (!(gomp_flags & 1)) { |
1800 | input_flags->tiedness = TASK_TIED; |
1801 | } |
1802 | // The second low-order bit is the "final" flag |
1803 | if (gomp_flags & 2) { |
1804 | input_flags->final = 1; |
1805 | } |
1806 | // Negative step flag |
1807 | if (!up) { |
1808 | // If step is flagged as negative, but isn't properly sign extended |
1809 | // Then manually sign extend it. Could be a short, int, char embedded |
1810 | // in a long. So cannot assume any cast. |
1811 | if (step > 0) { |
1812 | for (int i = sizeof(T) * CHAR_BIT - 1; i >= 0L; --i) { |
1813 | // break at the first 1 bit |
1814 | if (step & ((T)1 << i)) |
1815 | break; |
1816 | step |= ((T)1 << i); |
1817 | } |
1818 | } |
1819 | } |
1820 | input_flags->native = 1; |
1821 | // Figure out if none/grainsize/num_tasks clause specified |
1822 | if (num_tasks > 0) { |
1823 | if (gomp_flags & (1u << 9)) |
1824 | sched = 1; // grainsize specified |
1825 | else |
1826 | sched = 2; // num_tasks specified |
1827 | // neither grainsize nor num_tasks specified |
1828 | } else { |
1829 | sched = 0; |
1830 | } |
1831 | |
1832 | // __kmp_task_alloc() sets up all other flags |
1833 | kmp_task_t *task = |
1834 | __kmp_task_alloc(loc_ref: &loc, gtid, flags: input_flags, sizeof_kmp_task_t: sizeof(kmp_task_t), |
1835 | sizeof_shareds: arg_size + arg_align - 1, task_entry: (kmp_routine_entry_t)func); |
1836 | kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task); |
1837 | taskdata->td_copy_func = copy_func; |
1838 | taskdata->td_size_loop_bounds = sizeof(T); |
1839 | |
1840 | // re-align shareds if needed and setup firstprivate copy constructors |
1841 | // through the task_dup mechanism |
1842 | task->shareds = (void *)((((size_t)task->shareds) + arg_align - 1) / |
1843 | arg_align * arg_align); |
1844 | if (copy_func) { |
1845 | task_dup = __kmp_gomp_task_dup; |
1846 | } |
1847 | KMP_MEMCPY(dest: task->shareds, src: data, n: arg_size); |
1848 | |
1849 | loop_bounds = (T *)task->shareds; |
1850 | loop_bounds[0] = start; |
1851 | loop_bounds[1] = end + (up ? -1 : 1); |
1852 | |
1853 | if (!nogroup) { |
1854 | #if OMPT_SUPPORT && OMPT_OPTIONAL |
1855 | OMPT_STORE_RETURN_ADDRESS(gtid); |
1856 | #endif |
1857 | __kmpc_taskgroup(loc: &loc, gtid); |
1858 | if (reductions) { |
1859 | // The data pointer points to lb, ub, then reduction data |
1860 | struct data_t { |
1861 | T a, b; |
1862 | uintptr_t *d; |
1863 | }; |
1864 | uintptr_t *d = ((data_t *)data)->d; |
1865 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKGROUP_REDUCTION_REGISTER)(d); |
1866 | } |
1867 | } |
1868 | __kmpc_taskloop(loc: &loc, gtid, task, if_val, lb: (kmp_uint64 *)&(loop_bounds[0]), |
1869 | ub: (kmp_uint64 *)&(loop_bounds[1]), st: (kmp_int64)step, nogroup: 1, sched, |
1870 | grainsize: (kmp_uint64)num_tasks, task_dup: (void *)task_dup); |
1871 | if (!nogroup) { |
1872 | #if OMPT_SUPPORT && OMPT_OPTIONAL |
1873 | OMPT_STORE_RETURN_ADDRESS(gtid); |
1874 | #endif |
1875 | __kmpc_end_taskgroup(loc: &loc, gtid); |
1876 | } |
1877 | } |
1878 | |
1879 | // 4 byte version of GOMP_doacross_post |
1880 | // This verison needs to create a temporary array which converts 4 byte |
1881 | // integers into 8 byte integers |
1882 | template <typename T, bool need_conversion = (sizeof(long) == 4)> |
1883 | void __kmp_GOMP_doacross_post(T *count); |
1884 | |
1885 | template <> void __kmp_GOMP_doacross_post<long, true>(long *count) { |
1886 | int gtid = __kmp_entry_gtid(); |
1887 | kmp_info_t *th = __kmp_threads[gtid]; |
1888 | MKLOC(loc, "GOMP_doacross_post" ); |
1889 | kmp_int64 num_dims = th->th.th_dispatch->th_doacross_info[0]; |
1890 | kmp_int64 *vec = (kmp_int64 *)__kmp_thread_malloc( |
1891 | th, (size_t)(sizeof(kmp_int64) * num_dims)); |
1892 | for (kmp_int64 i = 0; i < num_dims; ++i) { |
1893 | vec[i] = (kmp_int64)count[i]; |
1894 | } |
1895 | __kmpc_doacross_post(loc: &loc, gtid, vec); |
1896 | __kmp_thread_free(th, vec); |
1897 | } |
1898 | |
1899 | // 8 byte versions of GOMP_doacross_post |
1900 | // This version can just pass in the count array directly instead of creating |
1901 | // a temporary array |
1902 | template <> void __kmp_GOMP_doacross_post<long, false>(long *count) { |
1903 | int gtid = __kmp_entry_gtid(); |
1904 | MKLOC(loc, "GOMP_doacross_post" ); |
1905 | __kmpc_doacross_post(loc: &loc, gtid, RCAST(kmp_int64 *, count)); |
1906 | } |
1907 | |
1908 | template <typename T> void __kmp_GOMP_doacross_wait(T first, va_list args) { |
1909 | int gtid = __kmp_entry_gtid(); |
1910 | kmp_info_t *th = __kmp_threads[gtid]; |
1911 | MKLOC(loc, "GOMP_doacross_wait" ); |
1912 | kmp_int64 num_dims = th->th.th_dispatch->th_doacross_info[0]; |
1913 | kmp_int64 *vec = (kmp_int64 *)__kmp_thread_malloc( |
1914 | th, (size_t)(sizeof(kmp_int64) * num_dims)); |
1915 | vec[0] = (kmp_int64)first; |
1916 | for (kmp_int64 i = 1; i < num_dims; ++i) { |
1917 | T item = va_arg(args, T); |
1918 | vec[i] = (kmp_int64)item; |
1919 | } |
1920 | __kmpc_doacross_wait(loc: &loc, gtid, vec); |
1921 | __kmp_thread_free(th, vec); |
1922 | return; |
1923 | } |
1924 | |
1925 | #ifdef __cplusplus |
1926 | extern "C" { |
1927 | #endif // __cplusplus |
1928 | |
1929 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKLOOP)( |
1930 | void (*func)(void *), void *data, void (*copy_func)(void *, void *), |
1931 | long arg_size, long arg_align, unsigned gomp_flags, unsigned long num_tasks, |
1932 | int priority, long start, long end, long step) { |
1933 | __GOMP_taskloop<long>(func, data, copy_func, arg_size, arg_align, gomp_flags, |
1934 | num_tasks, priority, start, end, step); |
1935 | } |
1936 | |
1937 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKLOOP_ULL)( |
1938 | void (*func)(void *), void *data, void (*copy_func)(void *, void *), |
1939 | long arg_size, long arg_align, unsigned gomp_flags, unsigned long num_tasks, |
1940 | int priority, unsigned long long start, unsigned long long end, |
1941 | unsigned long long step) { |
1942 | __GOMP_taskloop<unsigned long long>(func, data, copy_func, arg_size, |
1943 | arg_align, gomp_flags, num_tasks, |
1944 | priority, start, end, step); |
1945 | } |
1946 | |
1947 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_DOACROSS_POST)(long *count) { |
1948 | __kmp_GOMP_doacross_post(count); |
1949 | } |
1950 | |
1951 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_DOACROSS_WAIT)(long first, ...) { |
1952 | va_list args; |
1953 | va_start(args, first); |
1954 | __kmp_GOMP_doacross_wait<long>(first, args); |
1955 | va_end(args); |
1956 | } |
1957 | |
1958 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_DOACROSS_ULL_POST)( |
1959 | unsigned long long *count) { |
1960 | int gtid = __kmp_entry_gtid(); |
1961 | MKLOC(loc, "GOMP_doacross_ull_post" ); |
1962 | __kmpc_doacross_post(loc: &loc, gtid, RCAST(kmp_int64 *, count)); |
1963 | } |
1964 | |
1965 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_DOACROSS_ULL_WAIT)( |
1966 | unsigned long long first, ...) { |
1967 | va_list args; |
1968 | va_start(args, first); |
1969 | __kmp_GOMP_doacross_wait<unsigned long long>(first, args); |
1970 | va_end(args); |
1971 | } |
1972 | |
1973 | // fn: the function each primary thread of new team will call |
1974 | // data: argument to fn |
1975 | // num_teams, thread_limit: max bounds on respective ICV |
1976 | // flags: unused |
1977 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TEAMS_REG)(void (*fn)(void *), |
1978 | void *data, |
1979 | unsigned num_teams, |
1980 | unsigned thread_limit, |
1981 | unsigned flags) { |
1982 | MKLOC(loc, "GOMP_teams_reg" ); |
1983 | int gtid = __kmp_entry_gtid(); |
1984 | KA_TRACE(20, ("GOMP_teams_reg: T#%d num_teams=%u thread_limit=%u flag=%u\n" , |
1985 | gtid, num_teams, thread_limit, flags)); |
1986 | __kmpc_push_num_teams(loc: &loc, global_tid: gtid, num_teams, num_threads: thread_limit); |
1987 | __kmpc_fork_teams(loc: &loc, argc: 2, microtask: (microtask_t)__kmp_GOMP_microtask_wrapper, fn, |
1988 | data); |
1989 | KA_TRACE(20, ("GOMP_teams_reg exit: T#%d\n" , gtid)); |
1990 | } |
1991 | |
1992 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKWAIT_DEPEND)(void **depend) { |
1993 | MKLOC(loc, "GOMP_taskwait_depend" ); |
1994 | int gtid = __kmp_entry_gtid(); |
1995 | KA_TRACE(20, ("GOMP_taskwait_depend: T#%d\n" , gtid)); |
1996 | kmp_gomp_depends_info_t gomp_depends(depend); |
1997 | kmp_int32 ndeps = gomp_depends.get_num_deps(); |
1998 | SimpleVLA<kmp_depend_info_t> dep_list(ndeps); |
1999 | for (kmp_int32 i = 0; i < ndeps; i++) |
2000 | dep_list[i] = gomp_depends.get_kmp_depend(index: i); |
2001 | #if OMPT_SUPPORT |
2002 | OMPT_STORE_RETURN_ADDRESS(gtid); |
2003 | #endif |
2004 | __kmpc_omp_wait_deps(loc_ref: &loc, gtid, ndeps, dep_list, ndeps_noalias: 0, NULL); |
2005 | KA_TRACE(20, ("GOMP_taskwait_depend exit: T#%d\n" , gtid)); |
2006 | } |
2007 | |
2008 | static inline void |
2009 | __kmp_GOMP_taskgroup_reduction_register(uintptr_t *data, kmp_taskgroup_t *tg, |
2010 | int nthreads, |
2011 | uintptr_t *allocated = nullptr) { |
2012 | KMP_ASSERT(data); |
2013 | KMP_ASSERT(nthreads > 0); |
2014 | // Have private copy pointers point to previously allocated |
2015 | // reduction data or allocate new data here |
2016 | if (allocated) { |
2017 | data[2] = allocated[2]; |
2018 | data[6] = allocated[6]; |
2019 | } else { |
2020 | data[2] = (uintptr_t)__kmp_allocate(nthreads * data[1]); |
2021 | data[6] = data[2] + (nthreads * data[1]); |
2022 | } |
2023 | if (tg) |
2024 | tg->gomp_data = data; |
2025 | } |
2026 | |
2027 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKGROUP_REDUCTION_REGISTER)( |
2028 | uintptr_t *data) { |
2029 | int gtid = __kmp_entry_gtid(); |
2030 | KA_TRACE(20, ("GOMP_taskgroup_reduction_register: T#%d\n" , gtid)); |
2031 | kmp_info_t *thread = __kmp_threads[gtid]; |
2032 | kmp_taskgroup_t *tg = thread->th.th_current_task->td_taskgroup; |
2033 | int nthreads = thread->th.th_team_nproc; |
2034 | __kmp_GOMP_taskgroup_reduction_register(data, tg, nthreads); |
2035 | } |
2036 | |
2037 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKGROUP_REDUCTION_UNREGISTER)( |
2038 | uintptr_t *data) { |
2039 | KA_TRACE(20, |
2040 | ("GOMP_taskgroup_reduction_unregister: T#%d\n" , __kmp_get_gtid())); |
2041 | KMP_ASSERT(data && data[2]); |
2042 | __kmp_free((void *)data[2]); |
2043 | } |
2044 | |
2045 | // Search through reduction data and set ptrs[] elements |
2046 | // to proper privatized copy address |
2047 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASK_REDUCTION_REMAP)(size_t cnt, |
2048 | size_t cntorig, |
2049 | void **ptrs) { |
2050 | int gtid = __kmp_entry_gtid(); |
2051 | KA_TRACE(20, ("GOMP_task_reduction_remap: T#%d\n" , gtid)); |
2052 | kmp_info_t *thread = __kmp_threads[gtid]; |
2053 | kmp_int32 tid = __kmp_get_tid(); |
2054 | for (size_t i = 0; i < cnt; ++i) { |
2055 | uintptr_t address = (uintptr_t)ptrs[i]; |
2056 | void *propagated_address = NULL; |
2057 | void *mapped_address = NULL; |
2058 | // Check taskgroups reduce data |
2059 | kmp_taskgroup_t *tg = thread->th.th_current_task->td_taskgroup; |
2060 | while (tg) { |
2061 | uintptr_t *gomp_data = tg->gomp_data; |
2062 | if (!gomp_data) { |
2063 | tg = tg->parent; |
2064 | continue; |
2065 | } |
2066 | // Check the shared addresses list |
2067 | size_t num_vars = (size_t)gomp_data[0]; |
2068 | uintptr_t per_thread_size = gomp_data[1]; |
2069 | uintptr_t reduce_data = gomp_data[2]; |
2070 | uintptr_t end_reduce_data = gomp_data[6]; |
2071 | for (size_t j = 0; j < num_vars; ++j) { |
2072 | uintptr_t *entry = gomp_data + 7 + 3 * j; |
2073 | if (entry[0] == address) { |
2074 | uintptr_t offset = entry[1]; |
2075 | mapped_address = |
2076 | (void *)(reduce_data + tid * per_thread_size + offset); |
2077 | if (i < cntorig) |
2078 | propagated_address = (void *)entry[0]; |
2079 | break; |
2080 | } |
2081 | } |
2082 | if (mapped_address) |
2083 | break; |
2084 | // Check if address is within privatized copies range |
2085 | if (!mapped_address && address >= reduce_data && |
2086 | address < end_reduce_data) { |
2087 | uintptr_t offset = (address - reduce_data) % per_thread_size; |
2088 | mapped_address = (void *)(reduce_data + tid * per_thread_size + offset); |
2089 | if (i < cntorig) { |
2090 | for (size_t j = 0; j < num_vars; ++j) { |
2091 | uintptr_t *entry = gomp_data + 7 + 3 * j; |
2092 | if (entry[1] == offset) { |
2093 | propagated_address = (void *)entry[0]; |
2094 | break; |
2095 | } |
2096 | } |
2097 | } |
2098 | } |
2099 | if (mapped_address) |
2100 | break; |
2101 | tg = tg->parent; |
2102 | } |
2103 | KMP_ASSERT(mapped_address); |
2104 | ptrs[i] = mapped_address; |
2105 | if (i < cntorig) { |
2106 | KMP_ASSERT(propagated_address); |
2107 | ptrs[cnt + i] = propagated_address; |
2108 | } |
2109 | } |
2110 | } |
2111 | |
2112 | static void __kmp_GOMP_init_reductions(int gtid, uintptr_t *data, int is_ws) { |
2113 | kmp_info_t *thr = __kmp_threads[gtid]; |
2114 | kmp_team_t *team = thr->th.th_team; |
2115 | // First start a taskgroup |
2116 | __kmpc_taskgroup(NULL, gtid); |
2117 | // Then setup reduction data |
2118 | void *reduce_data = KMP_ATOMIC_LD_RLX(&team->t.t_tg_reduce_data[is_ws]); |
2119 | if (reduce_data == NULL && |
2120 | __kmp_atomic_compare_store(p: &team->t.t_tg_reduce_data[is_ws], expected: reduce_data, |
2121 | desired: (void *)1)) { |
2122 | // Single thread enters this block to initialize common reduction data |
2123 | KMP_DEBUG_ASSERT(reduce_data == NULL); |
2124 | __kmp_GOMP_taskgroup_reduction_register(data, NULL, nthreads: thr->th.th_team_nproc); |
2125 | KMP_ATOMIC_ST_REL(&team->t.t_tg_fini_counter[is_ws], 0); |
2126 | KMP_ATOMIC_ST_REL(&team->t.t_tg_reduce_data[is_ws], (void *)data); |
2127 | } else { |
2128 | // Wait for task reduction initialization |
2129 | while ((reduce_data = KMP_ATOMIC_LD_ACQ( |
2130 | &team->t.t_tg_reduce_data[is_ws])) == (void *)1) { |
2131 | KMP_CPU_PAUSE(); |
2132 | } |
2133 | KMP_DEBUG_ASSERT(reduce_data > (void *)1); // should be valid pointer here |
2134 | } |
2135 | // For worksharing constructs, each thread has its own reduction structure. |
2136 | // Have each reduction structure point to same privatized copies of vars. |
2137 | // For parallel, each thread points to same reduction structure and privatized |
2138 | // copies of vars |
2139 | if (is_ws) { |
2140 | __kmp_GOMP_taskgroup_reduction_register( |
2141 | data, NULL, nthreads: thr->th.th_team_nproc, |
2142 | allocated: (uintptr_t *)KMP_ATOMIC_LD_ACQ(&team->t.t_tg_reduce_data[is_ws])); |
2143 | } |
2144 | kmp_taskgroup_t *tg = thr->th.th_current_task->td_taskgroup; |
2145 | tg->gomp_data = data; |
2146 | } |
2147 | |
2148 | static unsigned |
2149 | __kmp_GOMP_par_reductions_microtask_wrapper(int *gtid, int *npr, |
2150 | void (*task)(void *), void *data) { |
2151 | kmp_info_t *thr = __kmp_threads[*gtid]; |
2152 | kmp_team_t *team = thr->th.th_team; |
2153 | uintptr_t *reduce_data = *(uintptr_t **)data; |
2154 | __kmp_GOMP_init_reductions(gtid: *gtid, data: reduce_data, is_ws: 0); |
2155 | |
2156 | #if OMPT_SUPPORT |
2157 | ompt_frame_t *ompt_frame; |
2158 | ompt_state_t enclosing_state; |
2159 | |
2160 | if (ompt_enabled.enabled) { |
2161 | // save enclosing task state; set current state for task |
2162 | enclosing_state = thr->th.ompt_thread_info.state; |
2163 | thr->th.ompt_thread_info.state = ompt_state_work_parallel; |
2164 | |
2165 | // set task frame |
2166 | __ompt_get_task_info_internal(ancestor_level: 0, NULL, NULL, task_frame: &ompt_frame, NULL, NULL); |
2167 | ompt_frame->exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0); |
2168 | } |
2169 | #endif |
2170 | |
2171 | task(data); |
2172 | |
2173 | #if OMPT_SUPPORT |
2174 | if (ompt_enabled.enabled) { |
2175 | // clear task frame |
2176 | ompt_frame->exit_frame = ompt_data_none; |
2177 | |
2178 | // restore enclosing state |
2179 | thr->th.ompt_thread_info.state = enclosing_state; |
2180 | } |
2181 | #endif |
2182 | __kmpc_end_taskgroup(NULL, gtid: *gtid); |
2183 | // if last thread out, then reset the team's reduce data |
2184 | // the GOMP_taskgroup_reduction_unregister() function will deallocate |
2185 | // private copies after reduction calculations take place. |
2186 | int count = KMP_ATOMIC_INC(&team->t.t_tg_fini_counter[0]); |
2187 | if (count == thr->th.th_team_nproc - 1) { |
2188 | KMP_ATOMIC_ST_REL(&team->t.t_tg_reduce_data[0], NULL); |
2189 | KMP_ATOMIC_ST_REL(&team->t.t_tg_fini_counter[0], 0); |
2190 | } |
2191 | return (unsigned)thr->th.th_team_nproc; |
2192 | } |
2193 | |
2194 | unsigned KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_REDUCTIONS)( |
2195 | void (*task)(void *), void *data, unsigned num_threads, |
2196 | unsigned int flags) { |
2197 | MKLOC(loc, "GOMP_parallel_reductions" ); |
2198 | int gtid = __kmp_entry_gtid(); |
2199 | KA_TRACE(20, ("GOMP_parallel_reductions: T#%d\n" , gtid)); |
2200 | __kmp_GOMP_fork_call(loc: &loc, gtid, num_threads, flags, unwrapped_task: task, |
2201 | wrapper: (microtask_t)__kmp_GOMP_par_reductions_microtask_wrapper, |
2202 | argc: 2, task, data); |
2203 | unsigned retval = |
2204 | __kmp_GOMP_par_reductions_microtask_wrapper(gtid: >id, NULL, task, data); |
2205 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_PARALLEL_END)(); |
2206 | KA_TRACE(20, ("GOMP_parallel_reductions exit: T#%d\n" , gtid)); |
2207 | return retval; |
2208 | } |
2209 | |
2210 | bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_START)( |
2211 | long start, long end, long incr, long sched, long chunk_size, long *istart, |
2212 | long *iend, uintptr_t *reductions, void **mem) { |
2213 | int status = 0; |
2214 | int gtid = __kmp_entry_gtid(); |
2215 | KA_TRACE(20, ("GOMP_loop_start: T#%d, reductions: %p\n" , gtid, reductions)); |
2216 | if (reductions) |
2217 | __kmp_GOMP_init_reductions(gtid, data: reductions, is_ws: 1); |
2218 | if (mem) |
2219 | KMP_FATAL(GompFeatureNotSupported, "scan" ); |
2220 | if (istart == NULL) |
2221 | return true; |
2222 | const long MONOTONIC_FLAG = (long)(kmp_sched_monotonic); |
2223 | long monotonic = sched & MONOTONIC_FLAG; |
2224 | sched &= ~MONOTONIC_FLAG; |
2225 | if (sched == 0) { |
2226 | if (monotonic) |
2227 | status = KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_RUNTIME_START)( |
2228 | lb: start, ub: end, str: incr, p_lb: istart, p_ub: iend); |
2229 | else |
2230 | status = KMP_EXPAND_NAME( |
2231 | KMP_API_NAME_GOMP_LOOP_MAYBE_NONMONOTONIC_RUNTIME_START)( |
2232 | lb: start, ub: end, str: incr, p_lb: istart, p_ub: iend); |
2233 | } else if (sched == 1) { |
2234 | status = KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_STATIC_START)( |
2235 | lb: start, ub: end, str: incr, chunk_sz: chunk_size, p_lb: istart, p_ub: iend); |
2236 | } else if (sched == 2) { |
2237 | if (monotonic) |
2238 | status = KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START)( |
2239 | lb: start, ub: end, str: incr, chunk_sz: chunk_size, p_lb: istart, p_ub: iend); |
2240 | else |
2241 | status = |
2242 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_DYNAMIC_START)( |
2243 | lb: start, ub: end, str: incr, chunk_sz: chunk_size, p_lb: istart, p_ub: iend); |
2244 | } else if (sched == 3) { |
2245 | if (monotonic) |
2246 | status = KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_GUIDED_START)( |
2247 | lb: start, ub: end, str: incr, chunk_sz: chunk_size, p_lb: istart, p_ub: iend); |
2248 | else |
2249 | status = |
2250 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_GUIDED_START)( |
2251 | lb: start, ub: end, str: incr, chunk_sz: chunk_size, p_lb: istart, p_ub: iend); |
2252 | } else if (sched == 4) { |
2253 | status = KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_RUNTIME_START)( |
2254 | lb: start, ub: end, str: incr, p_lb: istart, p_ub: iend); |
2255 | } else { |
2256 | KMP_ASSERT(0); |
2257 | } |
2258 | return status; |
2259 | } |
2260 | |
2261 | bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_START)( |
2262 | bool up, unsigned long long start, unsigned long long end, |
2263 | unsigned long long incr, long sched, unsigned long long chunk_size, |
2264 | unsigned long long *istart, unsigned long long *iend, uintptr_t *reductions, |
2265 | void **mem) { |
2266 | int status = 0; |
2267 | int gtid = __kmp_entry_gtid(); |
2268 | KA_TRACE(20, |
2269 | ("GOMP_loop_ull_start: T#%d, reductions: %p\n" , gtid, reductions)); |
2270 | if (reductions) |
2271 | __kmp_GOMP_init_reductions(gtid, data: reductions, is_ws: 1); |
2272 | if (mem) |
2273 | KMP_FATAL(GompFeatureNotSupported, "scan" ); |
2274 | if (istart == NULL) |
2275 | return true; |
2276 | const long MONOTONIC_FLAG = (long)(kmp_sched_monotonic); |
2277 | long monotonic = sched & MONOTONIC_FLAG; |
2278 | sched &= ~MONOTONIC_FLAG; |
2279 | if (sched == 0) { |
2280 | if (monotonic) |
2281 | status = KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START)( |
2282 | up, lb: start, ub: end, str: incr, p_lb: istart, p_ub: iend); |
2283 | else |
2284 | status = KMP_EXPAND_NAME( |
2285 | KMP_API_NAME_GOMP_LOOP_ULL_MAYBE_NONMONOTONIC_RUNTIME_START)( |
2286 | up, lb: start, ub: end, str: incr, p_lb: istart, p_ub: iend); |
2287 | } else if (sched == 1) { |
2288 | status = KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START)( |
2289 | up, lb: start, ub: end, str: incr, chunk_sz: chunk_size, p_lb: istart, p_ub: iend); |
2290 | } else if (sched == 2) { |
2291 | if (monotonic) |
2292 | status = KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START)( |
2293 | up, lb: start, ub: end, str: incr, chunk_sz: chunk_size, p_lb: istart, p_ub: iend); |
2294 | else |
2295 | status = KMP_EXPAND_NAME( |
2296 | KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_DYNAMIC_START)( |
2297 | up, lb: start, ub: end, str: incr, chunk_sz: chunk_size, p_lb: istart, p_ub: iend); |
2298 | } else if (sched == 3) { |
2299 | if (monotonic) |
2300 | status = KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START)( |
2301 | up, lb: start, ub: end, str: incr, chunk_sz: chunk_size, p_lb: istart, p_ub: iend); |
2302 | else |
2303 | status = |
2304 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_GUIDED_START)( |
2305 | up, lb: start, ub: end, str: incr, chunk_sz: chunk_size, p_lb: istart, p_ub: iend); |
2306 | } else if (sched == 4) { |
2307 | status = |
2308 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_RUNTIME_START)( |
2309 | up, lb: start, ub: end, str: incr, p_lb: istart, p_ub: iend); |
2310 | } else { |
2311 | KMP_ASSERT(0); |
2312 | } |
2313 | return status; |
2314 | } |
2315 | |
2316 | bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DOACROSS_START)( |
2317 | unsigned ncounts, long *counts, long sched, long chunk_size, long *istart, |
2318 | long *iend, uintptr_t *reductions, void **mem) { |
2319 | int status = 0; |
2320 | int gtid = __kmp_entry_gtid(); |
2321 | KA_TRACE(20, ("GOMP_loop_doacross_start: T#%d, reductions: %p\n" , gtid, |
2322 | reductions)); |
2323 | if (reductions) |
2324 | __kmp_GOMP_init_reductions(gtid, data: reductions, is_ws: 1); |
2325 | if (mem) |
2326 | KMP_FATAL(GompFeatureNotSupported, "scan" ); |
2327 | if (istart == NULL) |
2328 | return true; |
2329 | // Ignore any monotonic flag |
2330 | const long MONOTONIC_FLAG = (long)(kmp_sched_monotonic); |
2331 | sched &= ~MONOTONIC_FLAG; |
2332 | if (sched == 0) { |
2333 | status = KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DOACROSS_RUNTIME_START)( |
2334 | ncounts, counts, p_lb: istart, p_ub: iend); |
2335 | } else if (sched == 1) { |
2336 | status = KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DOACROSS_STATIC_START)( |
2337 | ncounts, counts, chunk_sz: chunk_size, p_lb: istart, p_ub: iend); |
2338 | } else if (sched == 2) { |
2339 | status = KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DOACROSS_DYNAMIC_START)( |
2340 | ncounts, counts, chunk_sz: chunk_size, p_lb: istart, p_ub: iend); |
2341 | } else if (sched == 3) { |
2342 | status = KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_DOACROSS_GUIDED_START)( |
2343 | ncounts, counts, chunk_sz: chunk_size, p_lb: istart, p_ub: iend); |
2344 | } else { |
2345 | KMP_ASSERT(0); |
2346 | } |
2347 | return status; |
2348 | } |
2349 | |
2350 | bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_START)( |
2351 | unsigned ncounts, unsigned long long *counts, long sched, |
2352 | unsigned long long chunk_size, unsigned long long *istart, |
2353 | unsigned long long *iend, uintptr_t *reductions, void **mem) { |
2354 | int status = 0; |
2355 | int gtid = __kmp_entry_gtid(); |
2356 | KA_TRACE(20, ("GOMP_loop_ull_doacross_start: T#%d, reductions: %p\n" , gtid, |
2357 | reductions)); |
2358 | if (reductions) |
2359 | __kmp_GOMP_init_reductions(gtid, data: reductions, is_ws: 1); |
2360 | if (mem) |
2361 | KMP_FATAL(GompFeatureNotSupported, "scan" ); |
2362 | if (istart == NULL) |
2363 | return true; |
2364 | // Ignore any monotonic flag |
2365 | const long MONOTONIC_FLAG = (long)(kmp_sched_monotonic); |
2366 | sched &= ~MONOTONIC_FLAG; |
2367 | if (sched == 0) { |
2368 | status = KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_RUNTIME_START)( |
2369 | ncounts, counts, p_lb: istart, p_ub: iend); |
2370 | } else if (sched == 1) { |
2371 | status = KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_STATIC_START)( |
2372 | ncounts, counts, chunk_sz: chunk_size, p_lb: istart, p_ub: iend); |
2373 | } else if (sched == 2) { |
2374 | status = KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_DYNAMIC_START)( |
2375 | ncounts, counts, chunk_sz: chunk_size, p_lb: istart, p_ub: iend); |
2376 | } else if (sched == 3) { |
2377 | status = KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_GUIDED_START)( |
2378 | ncounts, counts, chunk_sz: chunk_size, p_lb: istart, p_ub: iend); |
2379 | } else { |
2380 | KMP_ASSERT(0); |
2381 | } |
2382 | return status; |
2383 | } |
2384 | |
2385 | bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_START)( |
2386 | long start, long end, long incr, long sched, long chunk_size, long *istart, |
2387 | long *iend, uintptr_t *reductions, void **mem) { |
2388 | int status = 0; |
2389 | int gtid = __kmp_entry_gtid(); |
2390 | KA_TRACE(20, ("GOMP_loop_ordered_start: T#%d, reductions: %p\n" , gtid, |
2391 | reductions)); |
2392 | if (reductions) |
2393 | __kmp_GOMP_init_reductions(gtid, data: reductions, is_ws: 1); |
2394 | if (mem) |
2395 | KMP_FATAL(GompFeatureNotSupported, "scan" ); |
2396 | if (istart == NULL) |
2397 | return true; |
2398 | // Ignore any monotonic flag |
2399 | const long MONOTONIC_FLAG = (long)(kmp_sched_monotonic); |
2400 | sched &= ~MONOTONIC_FLAG; |
2401 | if (sched == 0) { |
2402 | status = KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START)( |
2403 | lb: start, ub: end, str: incr, p_lb: istart, p_ub: iend); |
2404 | } else if (sched == 1) { |
2405 | status = KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START)( |
2406 | lb: start, ub: end, str: incr, chunk_sz: chunk_size, p_lb: istart, p_ub: iend); |
2407 | } else if (sched == 2) { |
2408 | status = KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START)( |
2409 | lb: start, ub: end, str: incr, chunk_sz: chunk_size, p_lb: istart, p_ub: iend); |
2410 | } else if (sched == 3) { |
2411 | status = KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START)( |
2412 | lb: start, ub: end, str: incr, chunk_sz: chunk_size, p_lb: istart, p_ub: iend); |
2413 | } else { |
2414 | KMP_ASSERT(0); |
2415 | } |
2416 | return status; |
2417 | } |
2418 | |
2419 | bool KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_START)( |
2420 | bool up, unsigned long long start, unsigned long long end, |
2421 | unsigned long long incr, long sched, unsigned long long chunk_size, |
2422 | unsigned long long *istart, unsigned long long *iend, uintptr_t *reductions, |
2423 | void **mem) { |
2424 | int status = 0; |
2425 | int gtid = __kmp_entry_gtid(); |
2426 | KA_TRACE(20, ("GOMP_loop_ull_ordered_start: T#%d, reductions: %p\n" , gtid, |
2427 | reductions)); |
2428 | if (reductions) |
2429 | __kmp_GOMP_init_reductions(gtid, data: reductions, is_ws: 1); |
2430 | if (mem) |
2431 | KMP_FATAL(GompFeatureNotSupported, "scan" ); |
2432 | if (istart == NULL) |
2433 | return true; |
2434 | // Ignore any monotonic flag |
2435 | const long MONOTONIC_FLAG = (long)(kmp_sched_monotonic); |
2436 | sched &= ~MONOTONIC_FLAG; |
2437 | if (sched == 0) { |
2438 | status = KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START)( |
2439 | up, lb: start, ub: end, str: incr, p_lb: istart, p_ub: iend); |
2440 | } else if (sched == 1) { |
2441 | status = KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START)( |
2442 | up, lb: start, ub: end, str: incr, chunk_sz: chunk_size, p_lb: istart, p_ub: iend); |
2443 | } else if (sched == 2) { |
2444 | status = KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START)( |
2445 | up, lb: start, ub: end, str: incr, chunk_sz: chunk_size, p_lb: istart, p_ub: iend); |
2446 | } else if (sched == 3) { |
2447 | status = KMP_EXPAND_NAME(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START)( |
2448 | up, lb: start, ub: end, str: incr, chunk_sz: chunk_size, p_lb: istart, p_ub: iend); |
2449 | } else { |
2450 | KMP_ASSERT(0); |
2451 | } |
2452 | return status; |
2453 | } |
2454 | |
2455 | unsigned KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS2_START)( |
2456 | unsigned count, uintptr_t *reductions, void **mem) { |
2457 | int gtid = __kmp_entry_gtid(); |
2458 | KA_TRACE(20, |
2459 | ("GOMP_sections2_start: T#%d, reductions: %p\n" , gtid, reductions)); |
2460 | if (reductions) |
2461 | __kmp_GOMP_init_reductions(gtid, data: reductions, is_ws: 1); |
2462 | if (mem) |
2463 | KMP_FATAL(GompFeatureNotSupported, "scan" ); |
2464 | return KMP_EXPAND_NAME(KMP_API_NAME_GOMP_SECTIONS_START)(count); |
2465 | } |
2466 | |
2467 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_WORKSHARE_TASK_REDUCTION_UNREGISTER)( |
2468 | bool cancelled) { |
2469 | int gtid = __kmp_get_gtid(); |
2470 | MKLOC(loc, "GOMP_workshare_task_reduction_unregister" ); |
2471 | KA_TRACE(20, ("GOMP_workshare_task_reduction_unregister: T#%d\n" , gtid)); |
2472 | kmp_info_t *thr = __kmp_threads[gtid]; |
2473 | kmp_team_t *team = thr->th.th_team; |
2474 | __kmpc_end_taskgroup(NULL, gtid); |
2475 | // If last thread out of workshare, then reset the team's reduce data |
2476 | // the GOMP_taskgroup_reduction_unregister() function will deallocate |
2477 | // private copies after reduction calculations take place. |
2478 | int count = KMP_ATOMIC_INC(&team->t.t_tg_fini_counter[1]); |
2479 | if (count == thr->th.th_team_nproc - 1) { |
2480 | KMP_EXPAND_NAME(KMP_API_NAME_GOMP_TASKGROUP_REDUCTION_UNREGISTER) |
2481 | (data: (uintptr_t *)KMP_ATOMIC_LD_RLX(&team->t.t_tg_reduce_data[1])); |
2482 | KMP_ATOMIC_ST_REL(&team->t.t_tg_reduce_data[1], NULL); |
2483 | KMP_ATOMIC_ST_REL(&team->t.t_tg_fini_counter[1], 0); |
2484 | } |
2485 | if (!cancelled) { |
2486 | __kmpc_barrier(&loc, global_tid: gtid); |
2487 | } |
2488 | } |
2489 | |
2490 | // allocator construct |
2491 | void *KMP_EXPAND_NAME(KMP_API_NAME_GOMP_ALLOC)(size_t alignment, size_t size, |
2492 | uintptr_t allocator) { |
2493 | int gtid = __kmp_entry_gtid(); |
2494 | KA_TRACE(20, ("GOMP_alloc: T#%d\n" , gtid)); |
2495 | #if OMPT_SUPPORT && OMPT_OPTIONAL |
2496 | OMPT_STORE_RETURN_ADDRESS(gtid); |
2497 | #endif |
2498 | return __kmp_alloc(gtid, align: alignment, sz: size, al: (omp_allocator_handle_t)allocator); |
2499 | } |
2500 | |
2501 | void KMP_EXPAND_NAME(KMP_API_NAME_GOMP_FREE)(void *ptr, uintptr_t allocator) { |
2502 | int gtid = __kmp_entry_gtid(); |
2503 | KA_TRACE(20, ("GOMP_free: T#%d\n" , gtid)); |
2504 | #if OMPT_SUPPORT && OMPT_OPTIONAL |
2505 | OMPT_STORE_RETURN_ADDRESS(gtid); |
2506 | #endif |
2507 | return ___kmpc_free(gtid, ptr, al: (omp_allocator_handle_t)allocator); |
2508 | } |
2509 | |
2510 | /* The following sections of code create aliases for the GOMP_* functions, then |
2511 | create versioned symbols using the assembler directive .symver. This is only |
2512 | pertinent for ELF .so library. The KMP_VERSION_SYMBOL macro is defined in |
2513 | kmp_os.h */ |
2514 | |
2515 | #ifdef KMP_USE_VERSION_SYMBOLS |
2516 | // GOMP_1.0 versioned symbols |
2517 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_ATOMIC_END, 10, "GOMP_1.0" ); |
2518 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_ATOMIC_START, 10, "GOMP_1.0" ); |
2519 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_BARRIER, 10, "GOMP_1.0" ); |
2520 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CRITICAL_END, 10, "GOMP_1.0" ); |
2521 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CRITICAL_NAME_END, 10, "GOMP_1.0" ); |
2522 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CRITICAL_NAME_START, 10, "GOMP_1.0" ); |
2523 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CRITICAL_START, 10, "GOMP_1.0" ); |
2524 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DYNAMIC_NEXT, 10, "GOMP_1.0" ); |
2525 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DYNAMIC_START, 10, "GOMP_1.0" ); |
2526 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_END, 10, "GOMP_1.0" ); |
2527 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_END_NOWAIT, 10, "GOMP_1.0" ); |
2528 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_GUIDED_NEXT, 10, "GOMP_1.0" ); |
2529 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_GUIDED_START, 10, "GOMP_1.0" ); |
2530 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_NEXT, 10, "GOMP_1.0" ); |
2531 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_DYNAMIC_START, 10, |
2532 | "GOMP_1.0" ); |
2533 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_NEXT, 10, "GOMP_1.0" ); |
2534 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_GUIDED_START, 10, "GOMP_1.0" ); |
2535 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_NEXT, 10, "GOMP_1.0" ); |
2536 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_RUNTIME_START, 10, |
2537 | "GOMP_1.0" ); |
2538 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_NEXT, 10, "GOMP_1.0" ); |
2539 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_STATIC_START, 10, "GOMP_1.0" ); |
2540 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_RUNTIME_NEXT, 10, "GOMP_1.0" ); |
2541 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_RUNTIME_START, 10, "GOMP_1.0" ); |
2542 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_STATIC_NEXT, 10, "GOMP_1.0" ); |
2543 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_STATIC_START, 10, "GOMP_1.0" ); |
2544 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_ORDERED_END, 10, "GOMP_1.0" ); |
2545 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_ORDERED_START, 10, "GOMP_1.0" ); |
2546 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_END, 10, "GOMP_1.0" ); |
2547 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC_START, 10, |
2548 | "GOMP_1.0" ); |
2549 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED_START, 10, |
2550 | "GOMP_1.0" ); |
2551 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME_START, 10, |
2552 | "GOMP_1.0" ); |
2553 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC_START, 10, |
2554 | "GOMP_1.0" ); |
2555 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_SECTIONS_START, 10, "GOMP_1.0" ); |
2556 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_START, 10, "GOMP_1.0" ); |
2557 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_END, 10, "GOMP_1.0" ); |
2558 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_END_NOWAIT, 10, "GOMP_1.0" ); |
2559 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_NEXT, 10, "GOMP_1.0" ); |
2560 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_START, 10, "GOMP_1.0" ); |
2561 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SINGLE_COPY_END, 10, "GOMP_1.0" ); |
2562 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SINGLE_COPY_START, 10, "GOMP_1.0" ); |
2563 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SINGLE_START, 10, "GOMP_1.0" ); |
2564 | |
2565 | // GOMP_2.0 versioned symbols |
2566 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASK, 20, "GOMP_2.0" ); |
2567 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKWAIT, 20, "GOMP_2.0" ); |
2568 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_NEXT, 20, "GOMP_2.0" ); |
2569 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DYNAMIC_START, 20, "GOMP_2.0" ); |
2570 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_NEXT, 20, "GOMP_2.0" ); |
2571 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_GUIDED_START, 20, "GOMP_2.0" ); |
2572 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_NEXT, 20, |
2573 | "GOMP_2.0" ); |
2574 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_DYNAMIC_START, 20, |
2575 | "GOMP_2.0" ); |
2576 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_NEXT, 20, |
2577 | "GOMP_2.0" ); |
2578 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_GUIDED_START, 20, |
2579 | "GOMP_2.0" ); |
2580 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_NEXT, 20, |
2581 | "GOMP_2.0" ); |
2582 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_RUNTIME_START, 20, |
2583 | "GOMP_2.0" ); |
2584 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_NEXT, 20, |
2585 | "GOMP_2.0" ); |
2586 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_STATIC_START, 20, |
2587 | "GOMP_2.0" ); |
2588 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_NEXT, 20, "GOMP_2.0" ); |
2589 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_RUNTIME_START, 20, "GOMP_2.0" ); |
2590 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_NEXT, 20, "GOMP_2.0" ); |
2591 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_STATIC_START, 20, "GOMP_2.0" ); |
2592 | |
2593 | // GOMP_3.0 versioned symbols |
2594 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKYIELD, 30, "GOMP_3.0" ); |
2595 | |
2596 | // GOMP_4.0 versioned symbols |
2597 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL, 40, "GOMP_4.0" ); |
2598 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_SECTIONS, 40, "GOMP_4.0" ); |
2599 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_DYNAMIC, 40, "GOMP_4.0" ); |
2600 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_GUIDED, 40, "GOMP_4.0" ); |
2601 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_RUNTIME, 40, "GOMP_4.0" ); |
2602 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_STATIC, 40, "GOMP_4.0" ); |
2603 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKGROUP_START, 40, "GOMP_4.0" ); |
2604 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKGROUP_END, 40, "GOMP_4.0" ); |
2605 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_BARRIER_CANCEL, 40, "GOMP_4.0" ); |
2606 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CANCEL, 40, "GOMP_4.0" ); |
2607 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_CANCELLATION_POINT, 40, "GOMP_4.0" ); |
2608 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_END_CANCEL, 40, "GOMP_4.0" ); |
2609 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS_END_CANCEL, 40, "GOMP_4.0" ); |
2610 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TARGET, 40, "GOMP_4.0" ); |
2611 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TARGET_DATA, 40, "GOMP_4.0" ); |
2612 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TARGET_END_DATA, 40, "GOMP_4.0" ); |
2613 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TARGET_UPDATE, 40, "GOMP_4.0" ); |
2614 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TEAMS, 40, "GOMP_4.0" ); |
2615 | |
2616 | // GOMP_4.5 versioned symbols |
2617 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKLOOP, 45, "GOMP_4.5" ); |
2618 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKLOOP_ULL, 45, "GOMP_4.5" ); |
2619 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_DOACROSS_POST, 45, "GOMP_4.5" ); |
2620 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_DOACROSS_WAIT, 45, "GOMP_4.5" ); |
2621 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DOACROSS_STATIC_START, 45, |
2622 | "GOMP_4.5" ); |
2623 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DOACROSS_DYNAMIC_START, 45, |
2624 | "GOMP_4.5" ); |
2625 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DOACROSS_GUIDED_START, 45, |
2626 | "GOMP_4.5" ); |
2627 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DOACROSS_RUNTIME_START, 45, |
2628 | "GOMP_4.5" ); |
2629 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_DOACROSS_ULL_POST, 45, "GOMP_4.5" ); |
2630 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_DOACROSS_ULL_WAIT, 45, "GOMP_4.5" ); |
2631 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_STATIC_START, 45, |
2632 | "GOMP_4.5" ); |
2633 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_DYNAMIC_START, 45, |
2634 | "GOMP_4.5" ); |
2635 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_GUIDED_START, 45, |
2636 | "GOMP_4.5" ); |
2637 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_RUNTIME_START, 45, |
2638 | "GOMP_4.5" ); |
2639 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_DYNAMIC_START, 45, |
2640 | "GOMP_4.5" ); |
2641 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_DYNAMIC_NEXT, 45, |
2642 | "GOMP_4.5" ); |
2643 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_GUIDED_START, 45, |
2644 | "GOMP_4.5" ); |
2645 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_GUIDED_NEXT, 45, |
2646 | "GOMP_4.5" ); |
2647 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_DYNAMIC_START, 45, |
2648 | "GOMP_4.5" ); |
2649 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_DYNAMIC_NEXT, 45, |
2650 | "GOMP_4.5" ); |
2651 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_GUIDED_START, 45, |
2652 | "GOMP_4.5" ); |
2653 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_GUIDED_NEXT, 45, |
2654 | "GOMP_4.5" ); |
2655 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_NONMONOTONIC_DYNAMIC, 45, |
2656 | "GOMP_4.5" ); |
2657 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_NONMONOTONIC_GUIDED, 45, |
2658 | "GOMP_4.5" ); |
2659 | |
2660 | // GOMP_5.0 versioned symbols |
2661 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_MAYBE_NONMONOTONIC_RUNTIME_NEXT, 50, |
2662 | "GOMP_5.0" ); |
2663 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_MAYBE_NONMONOTONIC_RUNTIME_START, 50, |
2664 | "GOMP_5.0" ); |
2665 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_RUNTIME_NEXT, 50, |
2666 | "GOMP_5.0" ); |
2667 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_NONMONOTONIC_RUNTIME_START, 50, |
2668 | "GOMP_5.0" ); |
2669 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_MAYBE_NONMONOTONIC_RUNTIME_NEXT, |
2670 | 50, "GOMP_5.0" ); |
2671 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_MAYBE_NONMONOTONIC_RUNTIME_START, |
2672 | 50, "GOMP_5.0" ); |
2673 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_RUNTIME_NEXT, 50, |
2674 | "GOMP_5.0" ); |
2675 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_NONMONOTONIC_RUNTIME_START, 50, |
2676 | "GOMP_5.0" ); |
2677 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_NONMONOTONIC_RUNTIME, 50, |
2678 | "GOMP_5.0" ); |
2679 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_LOOP_MAYBE_NONMONOTONIC_RUNTIME, |
2680 | 50, "GOMP_5.0" ); |
2681 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TEAMS_REG, 50, "GOMP_5.0" ); |
2682 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKWAIT_DEPEND, 50, "GOMP_5.0" ); |
2683 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKGROUP_REDUCTION_REGISTER, 50, |
2684 | "GOMP_5.0" ); |
2685 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASKGROUP_REDUCTION_UNREGISTER, 50, |
2686 | "GOMP_5.0" ); |
2687 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_TASK_REDUCTION_REMAP, 50, "GOMP_5.0" ); |
2688 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_PARALLEL_REDUCTIONS, 50, "GOMP_5.0" ); |
2689 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_START, 50, "GOMP_5.0" ); |
2690 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_START, 50, "GOMP_5.0" ); |
2691 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_DOACROSS_START, 50, "GOMP_5.0" ); |
2692 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_DOACROSS_START, 50, "GOMP_5.0" ); |
2693 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ORDERED_START, 50, "GOMP_5.0" ); |
2694 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_LOOP_ULL_ORDERED_START, 50, "GOMP_5.0" ); |
2695 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_SECTIONS2_START, 50, "GOMP_5.0" ); |
2696 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_WORKSHARE_TASK_REDUCTION_UNREGISTER, 50, |
2697 | "GOMP_5.0" ); |
2698 | |
2699 | // GOMP_5.0.1 versioned symbols |
2700 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_ALLOC, 501, "GOMP_5.0.1" ); |
2701 | KMP_VERSION_SYMBOL(KMP_API_NAME_GOMP_FREE, 501, "GOMP_5.0.1" ); |
2702 | #endif // KMP_USE_VERSION_SYMBOLS |
2703 | |
2704 | #ifdef __cplusplus |
2705 | } // extern "C" |
2706 | #endif // __cplusplus |
2707 | |