1/*
2 * omp-icv.cpp -- OMPD Internal Control Variable handling
3 */
4
5//===----------------------------------------------------------------------===//
6//
7// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8// See https://llvm.org/LICENSE.txt for license information.
9// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
10//
11//===----------------------------------------------------------------------===//
12// clang-format off
13/* clang-format expect kmp.h before omp.h which results in build break
14 * due to a few redeclarations.
15 */
16#include "omp-debug.h"
17// NOLINTNEXTLINE "to avoid clang tidy warning for the same reason as above."
18#include "omp.h"
19#include "ompd-private.h"
20#include "TargetValue.h"
21#define OMPD_SKIP_HWLOC 1
22#include "kmp.h"
23#undef OMPD_SKIP_HWLOC
24#include <cstring>
25
26/* The ICVs ompd-final-var and ompd-implicit-var below are for backward
27 * compatibility with 5.0.
28 */
29
30#define FOREACH_OMPD_ICV(macro) \
31 macro(dyn_var, "dyn-var", ompd_scope_thread, 0) \
32 macro(run_sched_var, "run-sched-var", ompd_scope_task, 0) \
33 macro(stacksize_var, "stacksize-var", ompd_scope_address_space, 0) \
34 macro(cancel_var, "cancel-var", ompd_scope_address_space, 0) \
35 macro(max_task_priority_var, "max-task-priority-var", ompd_scope_address_space, 0)\
36 macro(debug_var, "debug-var", ompd_scope_address_space, 0) \
37 macro(nthreads_var, "nthreads-var", ompd_scope_thread, 0) \
38 macro(display_affinity_var, "display-affinity-var", ompd_scope_address_space, 0) \
39 macro(affinity_format_var, "affinity-format-var", ompd_scope_address_space, 0) \
40 macro(default_device_var, "default-device-var", ompd_scope_thread, 0) \
41 macro(tool_var, "tool-var", ompd_scope_address_space, 0) \
42 macro(tool_libraries_var, "tool-libraries-var", ompd_scope_address_space, 0) \
43 macro(tool_verbose_init_var, "tool-verbose-init-var", ompd_scope_address_space, 0)\
44 macro(levels_var, "levels-var", ompd_scope_parallel, 1) \
45 macro(active_levels_var, "active-levels-var", ompd_scope_parallel, 0) \
46 macro(thread_limit_var, "thread-limit-var", ompd_scope_task, 0) \
47 macro(max_active_levels_var, "max-active-levels-var", ompd_scope_task, 0) \
48 macro(bind_var, "bind-var", ompd_scope_task, 0) \
49 macro(num_procs_var, "num-procs-var", ompd_scope_address_space, 0) \
50 macro(ompd_num_procs_var, "ompd-num-procs-var", ompd_scope_address_space, 0) \
51 macro(thread_num_var, "thread-num-var", ompd_scope_thread, 1) \
52 macro(ompd_thread_num_var, "ompd-thread-num-var", ompd_scope_thread, 1) \
53 macro(final_var, "final-task-var", ompd_scope_task, 0) \
54 macro(ompd_final_var, "ompd-final-var", ompd_scope_task, 0) \
55 macro(ompd_final_task_var, "ompd-final-task-var", ompd_scope_task, 0) \
56 macro(implicit_var, "implicit-task-var", ompd_scope_task, 0) \
57 macro(ompd_implicit_var, "ompd-implicit-var", ompd_scope_task, 0) \
58 macro(ompd_implicit_task_var, "ompd-implicit-task-var", ompd_scope_task, 0) \
59 macro(team_size_var, "team-size-var", ompd_scope_parallel, 1) \
60 macro(ompd_team_size_var, "ompd-team-size-var", ompd_scope_parallel, 1)
61
62void __ompd_init_icvs(const ompd_callbacks_t *table) { callbacks = table; }
63
64enum ompd_icv {
65 ompd_icv_undefined_marker =
66 0, // ompd_icv_undefined is already defined in ompd.h
67#define ompd_icv_macro(v, n, s, d) ompd_icv_##v,
68 FOREACH_OMPD_ICV(ompd_icv_macro)
69#undef ompd_icv_macro
70 ompd_icv_after_last_icv
71};
72
73static const char *ompd_icv_string_values[] = {"undefined",
74#define ompd_icv_macro(v, n, s, d) n,
75 FOREACH_OMPD_ICV(ompd_icv_macro)
76#undef ompd_icv_macro
77};
78
79static const ompd_scope_t ompd_icv_scope_values[] = {
80 ompd_scope_global, // undefined marker
81#define ompd_icv_macro(v, n, s, d) s,
82 FOREACH_OMPD_ICV(ompd_icv_macro)
83#undef ompd_icv_macro
84};
85
86// clang-format on
87ompd_rc_t ompd_enumerate_icvs(ompd_address_space_handle_t *handle,
88 ompd_icv_id_t current, ompd_icv_id_t *next_id,
89 const char **next_icv_name,
90 ompd_scope_t *next_scope, int *more) {
91 if (!handle) {
92 return ompd_rc_stale_handle;
93 }
94 if (!next_id || !next_icv_name || !next_scope || !more) {
95 return ompd_rc_bad_input;
96 }
97 if (current + 1 >= ompd_icv_after_last_icv) {
98 return ompd_rc_bad_input;
99 }
100
101 *next_id = current + 1;
102
103 char *icv_name = NULL;
104 ompd_rc_t ret = callbacks->alloc_memory(
105 std::strlen(s: ompd_icv_string_values[*next_id]) + 1, (void **)&icv_name);
106 *next_icv_name = icv_name;
107 if (ret != ompd_rc_ok) {
108 return ret;
109 }
110 std::strcpy(dest: icv_name, src: ompd_icv_string_values[*next_id]);
111
112 *next_scope = ompd_icv_scope_values[*next_id];
113
114 if ((*next_id) + 1 >= ompd_icv_after_last_icv) {
115 *more = 0;
116 } else {
117 *more = 1;
118 }
119
120 return ompd_rc_ok;
121}
122
123static ompd_rc_t create_empty_string(const char **empty_string_ptr) {
124 char *empty_str;
125 ompd_rc_t ret;
126
127 if (!callbacks) {
128 return ompd_rc_callback_error;
129 }
130 ret = callbacks->alloc_memory(1, (void **)&empty_str);
131 if (ret != ompd_rc_ok) {
132 return ret;
133 }
134 empty_str[0] = '\0';
135 *empty_string_ptr = empty_str;
136 return ompd_rc_ok;
137}
138
139static ompd_rc_t ompd_get_dynamic(
140 ompd_thread_handle_t *thread_handle, /* IN: OpenMP thread handle */
141 ompd_word_t *dyn_val /* OUT: Dynamic adjustment of threads */
142) {
143 if (!thread_handle)
144 return ompd_rc_stale_handle;
145 if (!thread_handle->ah)
146 return ompd_rc_stale_handle;
147 ompd_address_space_context_t *context = thread_handle->ah->context;
148 if (!context)
149 return ompd_rc_stale_handle;
150 if (!callbacks) {
151 return ompd_rc_callback_error;
152 }
153
154 int8_t dynamic;
155 ompd_rc_t ret =
156 TValue(context, thread_handle->th) /*__kmp_threads[t]->th*/
157 .cast(typeName: "kmp_base_info_t")
158 .access(fieldName: "th_current_task") /*__kmp_threads[t]->th.th_current_task*/
159 .cast(typeName: "kmp_taskdata_t", pointerLevel: 1)
160 .access(fieldName: "td_icvs") /*__kmp_threads[t]->th.th_current_task->td_icvs*/
161 .cast(typeName: "kmp_internal_control_t", pointerLevel: 0)
162 .access(
163 fieldName: "dynamic") /*__kmp_threads[t]->th.th_current_task->td_icvs.dynamic*/
164 .castBase()
165 .getValue(buf&: dynamic);
166 *dyn_val = dynamic;
167 return ret;
168}
169
170static ompd_rc_t
171ompd_get_stacksize(ompd_address_space_handle_t
172 *addr_handle, /* IN: handle for the address space */
173 ompd_word_t *stacksize_val /* OUT: per thread stack size */
174) {
175 ompd_address_space_context_t *context = addr_handle->context;
176 if (!context)
177 return ompd_rc_stale_handle;
178 ompd_rc_t ret;
179 if (!callbacks) {
180 return ompd_rc_callback_error;
181 }
182
183 size_t stacksize;
184 ret = TValue(context, "__kmp_stksize")
185 .castBase(varName: "__kmp_stksize")
186 .getValue(buf&: stacksize);
187 *stacksize_val = stacksize;
188 return ret;
189}
190
191static ompd_rc_t ompd_get_cancellation(
192 ompd_address_space_handle_t
193 *addr_handle, /* IN: handle for the address space */
194 ompd_word_t *cancellation_val /* OUT: cancellation value */
195) {
196 ompd_address_space_context_t *context = addr_handle->context;
197 if (!context)
198 return ompd_rc_stale_handle;
199 if (!callbacks) {
200 return ompd_rc_callback_error;
201 }
202 ompd_rc_t ret;
203
204 int omp_cancellation;
205 ret = TValue(context, "__kmp_omp_cancellation")
206 .castBase(varName: "__kmp_omp_cancellation")
207 .getValue(buf&: omp_cancellation);
208 *cancellation_val = omp_cancellation;
209 return ret;
210}
211
212static ompd_rc_t ompd_get_max_task_priority(
213 ompd_address_space_handle_t
214 *addr_handle, /* IN: handle for the address space */
215 ompd_word_t *max_task_priority_val /* OUT: max task priority value */
216) {
217 ompd_address_space_context_t *context = addr_handle->context;
218 if (!context)
219 return ompd_rc_stale_handle;
220 if (!callbacks) {
221 return ompd_rc_callback_error;
222 }
223 ompd_rc_t ret;
224
225 int max_task_priority;
226 ret = TValue(context, "__kmp_max_task_priority")
227 .castBase(varName: "__kmp_max_task_priority")
228 .getValue(buf&: max_task_priority);
229 *max_task_priority_val = max_task_priority;
230 return ret;
231}
232
233static ompd_rc_t
234ompd_get_debug(ompd_address_space_handle_t
235 *addr_handle, /* IN: handle for the address space */
236 ompd_word_t *debug_val /* OUT: debug value */
237) {
238 ompd_address_space_context_t *context = addr_handle->context;
239 if (!context)
240 return ompd_rc_stale_handle;
241 if (!callbacks) {
242 return ompd_rc_callback_error;
243 }
244 ompd_rc_t ret;
245
246 uint64_t ompd_state_val;
247 ret = TValue(context, "ompd_state")
248 .castBase(varName: "ompd_state")
249 .getValue(buf&: ompd_state_val);
250 if (ompd_state_val > 0) {
251 *debug_val = 1;
252 } else {
253 *debug_val = 0;
254 }
255 return ret;
256}
257
258/* Helper routine for the ompd_get_nthreads routines */
259static ompd_rc_t ompd_get_nthreads_aux(ompd_thread_handle_t *thread_handle,
260 uint32_t *used,
261 uint32_t *current_nesting_level,
262 uint32_t *nproc) {
263 if (!thread_handle)
264 return ompd_rc_stale_handle;
265 if (!thread_handle->ah)
266 return ompd_rc_stale_handle;
267 ompd_address_space_context_t *context = thread_handle->ah->context;
268 if (!context)
269 return ompd_rc_stale_handle;
270 if (!callbacks) {
271 return ompd_rc_callback_error;
272 }
273
274 ompd_rc_t ret = TValue(context, "__kmp_nested_nth")
275 .cast(typeName: "kmp_nested_nthreads_t")
276 .access(fieldName: "used")
277 .castBase(baseType: ompd_type_int)
278 .getValue(buf&: *used);
279 if (ret != ompd_rc_ok)
280 return ret;
281
282 TValue taskdata =
283 TValue(context, thread_handle->th) /*__kmp_threads[t]->th*/
284 .cast(typeName: "kmp_base_info_t")
285 .access(fieldName: "th_current_task") /*__kmp_threads[t]->th.th_current_task*/
286 .cast(typeName: "kmp_taskdata_t", pointerLevel: 1);
287
288 ret = taskdata
289 .access(fieldName: "td_team") /*__kmp_threads[t]->th.th_current_task.td_team*/
290 .cast(typeName: "kmp_team_p", pointerLevel: 1)
291 .access(fieldName: "t") /*__kmp_threads[t]->th.th_current_task.td_team->t*/
292 .cast(typeName: "kmp_base_team_t", pointerLevel: 0) /*t*/
293 .access(fieldName: "t_level") /*t.t_level*/
294 .castBase(baseType: ompd_type_int)
295 .getValue(buf&: *current_nesting_level);
296 if (ret != ompd_rc_ok)
297 return ret;
298
299 ret = taskdata.cast(typeName: "kmp_taskdata_t", pointerLevel: 1)
300 .access(fieldName: "td_icvs") /*__kmp_threads[t]->th.th_current_task->td_icvs*/
301 .cast(typeName: "kmp_internal_control_t", pointerLevel: 0)
302 .access(
303 fieldName: "nproc") /*__kmp_threads[t]->th.th_current_task->td_icvs.nproc*/
304 .castBase(baseType: ompd_type_int)
305 .getValue(buf&: *nproc);
306 if (ret != ompd_rc_ok)
307 return ret;
308
309 return ompd_rc_ok;
310}
311
312static ompd_rc_t ompd_get_nthreads(
313 ompd_thread_handle_t *thread_handle, /* IN: handle for the thread */
314 ompd_word_t *nthreads_var_val /* OUT: nthreads-var (of integer type)
315 value */
316) {
317 uint32_t used;
318 uint32_t nproc;
319 uint32_t current_nesting_level;
320
321 ompd_rc_t ret;
322 ret = ompd_get_nthreads_aux(thread_handle, used: &used, current_nesting_level: &current_nesting_level,
323 nproc: &nproc);
324 if (ret != ompd_rc_ok)
325 return ret;
326
327 /*__kmp_threads[t]->th.th_current_task->td_icvs.nproc*/
328 *nthreads_var_val = nproc;
329 /* If the nthreads-var is a list with more than one element, then the value of
330 this ICV cannot be represented by an integer type. In this case,
331 ompd_rc_incomplete is returned. The tool can check the return value and
332 can choose to invoke ompd_get_icv_string_from_scope() if needed. */
333 if (current_nesting_level < used - 1) {
334 return ompd_rc_incomplete;
335 }
336 return ompd_rc_ok;
337}
338
339static ompd_rc_t ompd_get_nthreads(
340 ompd_thread_handle_t *thread_handle, /* IN: handle for the thread */
341 const char **nthreads_list_string /* OUT: string list of comma separated
342 nthreads values */
343) {
344 uint32_t used;
345 uint32_t nproc;
346 uint32_t current_nesting_level;
347
348 ompd_rc_t ret;
349 ret = ompd_get_nthreads_aux(thread_handle, used: &used, current_nesting_level: &current_nesting_level,
350 nproc: &nproc);
351 if (ret != ompd_rc_ok)
352 return ret;
353
354 uint32_t num_list_elems;
355 if (used == 0 || current_nesting_level >= used) {
356 num_list_elems = 1;
357 } else {
358 num_list_elems = used - current_nesting_level;
359 }
360 size_t buffer_size = 16 /* digits per element including the comma separator */
361 * num_list_elems +
362 1; /* string terminator NULL */
363 char *nthreads_list_str;
364 ret = callbacks->alloc_memory(buffer_size, (void **)&nthreads_list_str);
365 if (ret != ompd_rc_ok)
366 return ret;
367
368 /* The nthreads-var list would be:
369 [__kmp_threads[t]->th.th_current_task->td_icvs.nproc,
370 __kmp_nested_nth.nth[current_nesting_level + 1],
371 __kmp_nested_nth.nth[current_nesting_level + 2],
372 …,
373 __kmp_nested_nth.nth[used - 1]]*/
374
375 sprintf(s: nthreads_list_str, format: "%d", nproc);
376 *nthreads_list_string = nthreads_list_str;
377 if (num_list_elems == 1) {
378 return ompd_rc_ok;
379 }
380
381 char temp_value[16];
382 uint32_t nth_value;
383
384 for (current_nesting_level++; /* the list element for this nesting
385 * level has already been accounted for
386 by nproc */
387 current_nesting_level < used; current_nesting_level++) {
388
389 ret = TValue(thread_handle->ah->context, "__kmp_nested_nth")
390 .cast(typeName: "kmp_nested_nthreads_t")
391 .access(fieldName: "nth")
392 .cast(typeName: "int", pointerLevel: 1)
393 .getArrayElement(elemNumber: current_nesting_level)
394 .castBase(baseType: ompd_type_int)
395 .getValue(buf&: nth_value);
396
397 if (ret != ompd_rc_ok)
398 return ret;
399
400 sprintf(s: temp_value, format: ",%d", nth_value);
401 strcat(dest: nthreads_list_str, src: temp_value);
402 }
403
404 return ompd_rc_ok;
405}
406
407static ompd_rc_t ompd_get_display_affinity(
408 ompd_address_space_handle_t
409 *addr_handle, /* IN: handle for the address space */
410 ompd_word_t *display_affinity_val /* OUT: display affinity value */
411) {
412 ompd_address_space_context_t *context = addr_handle->context;
413 if (!context)
414 return ompd_rc_stale_handle;
415 ompd_rc_t ret;
416
417 if (!callbacks) {
418 return ompd_rc_callback_error;
419 }
420 ret = TValue(context, "__kmp_display_affinity")
421 .castBase(varName: "__kmp_display_affinity")
422 .getValue(buf&: *display_affinity_val);
423 return ret;
424}
425
426static ompd_rc_t ompd_get_affinity_format(
427 ompd_address_space_handle_t *addr_handle, /* IN: address space handle*/
428 const char **affinity_format_string /* OUT: affinity format string */
429) {
430 ompd_address_space_context_t *context = addr_handle->context;
431 if (!context)
432 return ompd_rc_stale_handle;
433
434 if (!callbacks) {
435 return ompd_rc_callback_error;
436 }
437 ompd_rc_t ret;
438 ret = TValue(context, "__kmp_affinity_format")
439 .cast(typeName: "char", pointerLevel: 1)
440 .getString(buf: affinity_format_string);
441 return ret;
442}
443
444static ompd_rc_t ompd_get_tool_libraries(
445 ompd_address_space_handle_t *addr_handle, /* IN: address space handle*/
446 const char **tool_libraries_string /* OUT: tool libraries string */
447) {
448 if (!tool_libraries_string)
449 return ompd_rc_bad_input;
450
451 ompd_address_space_context_t *context = addr_handle->context;
452 if (!context)
453 return ompd_rc_stale_handle;
454
455 if (!callbacks) {
456 return ompd_rc_callback_error;
457 }
458 ompd_rc_t ret;
459 ret = TValue(context, "__kmp_tool_libraries")
460 .cast(typeName: "char", pointerLevel: 1)
461 .getString(buf: tool_libraries_string);
462 if (ret == ompd_rc_unsupported) {
463 ret = create_empty_string(empty_string_ptr: tool_libraries_string);
464 }
465 return ret;
466}
467
468static ompd_rc_t ompd_get_default_device(
469 ompd_thread_handle_t *thread_handle, /* IN: handle for the thread */
470 ompd_word_t *default_device_val /* OUT: default device value */
471) {
472 if (!thread_handle)
473 return ompd_rc_stale_handle;
474 if (!thread_handle->ah)
475 return ompd_rc_stale_handle;
476 ompd_address_space_context_t *context = thread_handle->ah->context;
477 if (!context)
478 return ompd_rc_stale_handle;
479 if (!callbacks)
480 return ompd_rc_callback_error;
481
482 ompd_rc_t ret =
483 TValue(context, thread_handle->th) /*__kmp_threads[t]->th*/
484 .cast(typeName: "kmp_base_info_t")
485 .access(fieldName: "th_current_task") /*__kmp_threads[t]->th.th_current_task*/
486 .cast(typeName: "kmp_taskdata_t", pointerLevel: 1)
487 .access(fieldName: "td_icvs") /*__kmp_threads[t]->th.th_current_task->td_icvs*/
488 .cast(typeName: "kmp_internal_control_t", pointerLevel: 0)
489 /*__kmp_threads[t]->th.th_current_task->td_icvs.default_device*/
490 .access(fieldName: "default_device")
491 .castBase()
492 .getValue(buf&: *default_device_val);
493 return ret;
494}
495
496static ompd_rc_t
497ompd_get_tool(ompd_address_space_handle_t
498 *addr_handle, /* IN: handle for the address space */
499 ompd_word_t *tool_val /* OUT: tool value */
500) {
501 ompd_address_space_context_t *context = addr_handle->context;
502 if (!context)
503 return ompd_rc_stale_handle;
504 if (!callbacks) {
505 return ompd_rc_callback_error;
506 }
507 ompd_rc_t ret;
508
509 ret =
510 TValue(context, "__kmp_tool").castBase(varName: "__kmp_tool").getValue(buf&: *tool_val);
511 return ret;
512}
513
514static ompd_rc_t ompd_get_tool_verbose_init(
515 ompd_address_space_handle_t *addr_handle, /* IN: address space handle*/
516 const char **tool_verbose_init_string /* OUT: tool verbose init string */
517) {
518 ompd_address_space_context_t *context = addr_handle->context;
519 if (!context)
520 return ompd_rc_stale_handle;
521
522 if (!callbacks) {
523 return ompd_rc_callback_error;
524 }
525 ompd_rc_t ret;
526 ret = TValue(context, "__kmp_tool_verbose_init")
527 .cast(typeName: "char", pointerLevel: 1)
528 .getString(buf: tool_verbose_init_string);
529 if (ret == ompd_rc_unsupported) {
530 ret = create_empty_string(empty_string_ptr: tool_verbose_init_string);
531 }
532 return ret;
533}
534
535static ompd_rc_t ompd_get_level(
536 ompd_parallel_handle_t *parallel_handle, /* IN: OpenMP parallel handle */
537 ompd_word_t *val /* OUT: nesting level */
538) {
539 if (!parallel_handle->ah)
540 return ompd_rc_stale_handle;
541 ompd_address_space_context_t *context = parallel_handle->ah->context;
542 if (!context)
543 return ompd_rc_stale_handle;
544
545 if (!callbacks) {
546 return ompd_rc_callback_error;
547 }
548
549 uint32_t res;
550
551 ompd_rc_t ret = TValue(context, parallel_handle->th)
552 .cast(typeName: "kmp_base_team_t", pointerLevel: 0) /*t*/
553 .access(fieldName: "t_level") /*t.t_level*/
554 .castBase()
555 .getValue(buf&: res);
556 *val = res;
557 return ret;
558}
559
560static ompd_rc_t ompd_get_active_level(
561 ompd_parallel_handle_t *parallel_handle, /* IN: OpenMP parallel handle */
562 ompd_word_t *val /* OUT: active nesting level */
563) {
564 if (!parallel_handle->ah)
565 return ompd_rc_stale_handle;
566 ompd_address_space_context_t *context = parallel_handle->ah->context;
567 if (!context)
568 return ompd_rc_stale_handle;
569 if (!callbacks) {
570 return ompd_rc_callback_error;
571 }
572
573 uint32_t res;
574
575 ompd_rc_t ret = TValue(context, parallel_handle->th)
576 .cast(typeName: "kmp_base_team_t", pointerLevel: 0) /*t*/
577 .access(fieldName: "t_active_level") /*t.t_active_level*/
578 .castBase()
579 .getValue(buf&: res);
580 *val = res;
581 return ret;
582}
583
584static ompd_rc_t
585ompd_get_num_procs(ompd_address_space_handle_t
586 *addr_handle, /* IN: handle for the address space */
587 ompd_word_t *val /* OUT: number of processes */
588) {
589 ompd_address_space_context_t *context = addr_handle->context;
590 if (!context)
591 return ompd_rc_stale_handle;
592 if (!callbacks) {
593 return ompd_rc_callback_error;
594 }
595
596 if (!val)
597 return ompd_rc_bad_input;
598 ompd_rc_t ret;
599
600 int nth;
601 ret = TValue(context, "__kmp_avail_proc")
602 .castBase(varName: "__kmp_avail_proc")
603 .getValue(buf&: nth);
604 *val = nth;
605 return ret;
606}
607
608static ompd_rc_t ompd_get_thread_limit(
609 ompd_task_handle_t *task_handle, /* IN: OpenMP task handle*/
610 ompd_word_t *val /* OUT: max number of threads */
611) {
612 if (!task_handle->ah)
613 return ompd_rc_stale_handle;
614 ompd_address_space_context_t *context = task_handle->ah->context;
615 if (!context)
616 return ompd_rc_stale_handle;
617 if (!callbacks) {
618 return ompd_rc_callback_error;
619 }
620
621 ompd_rc_t ret = TValue(context, task_handle->th)
622 .cast(typeName: "kmp_taskdata_t") // td
623 .access(fieldName: "td_icvs") // td->td_icvs
624 .cast(typeName: "kmp_internal_control_t", pointerLevel: 0)
625 .access(fieldName: "thread_limit") // td->td_icvs.thread_limit
626 .castBase()
627 .getValue(buf&: *val);
628
629 return ret;
630}
631
632static ompd_rc_t ompd_get_thread_num(
633 ompd_thread_handle_t *thread_handle, /* IN: OpenMP thread handle*/
634 ompd_word_t *val /* OUT: number of the thread within the team */
635) {
636 if (!thread_handle)
637 return ompd_rc_stale_handle;
638 if (!thread_handle->ah)
639 return ompd_rc_stale_handle;
640 ompd_address_space_context_t *context = thread_handle->ah->context;
641 if (!context)
642 return ompd_rc_stale_handle;
643 if (!callbacks) {
644 return ompd_rc_callback_error;
645 }
646
647 ompd_rc_t ret =
648 TValue(context, thread_handle->th) /*__kmp_threads[t]->th*/
649 .cast(typeName: "kmp_base_info_t")
650 .access(fieldName: "th_info") /*__kmp_threads[t]->th.th_info*/
651 .cast(typeName: "kmp_desc_t")
652 .access(fieldName: "ds") /*__kmp_threads[t]->th.th_info.ds*/
653 .cast(typeName: "kmp_desc_base_t")
654 .access(fieldName: "ds_tid") /*__kmp_threads[t]->th.th_info.ds.ds_tid*/
655 .castBase()
656 .getValue(buf&: *val);
657 return ret;
658}
659
660static ompd_rc_t
661ompd_in_final(ompd_task_handle_t *task_handle, /* IN: OpenMP task handle*/
662 ompd_word_t *val /* OUT: max number of threads */
663) {
664 if (!task_handle->ah)
665 return ompd_rc_stale_handle;
666 ompd_address_space_context_t *context = task_handle->ah->context;
667 if (!context)
668 return ompd_rc_stale_handle;
669 if (!callbacks) {
670 return ompd_rc_callback_error;
671 }
672
673 ompd_rc_t ret = TValue(context, task_handle->th)
674 .cast(typeName: "kmp_taskdata_t") // td
675 .access(fieldName: "td_flags") // td->td_flags
676 .cast(typeName: "kmp_tasking_flags_t")
677 .check(bitfieldName: "final", isSet: val); // td->td_flags.tasktype
678
679 return ret;
680}
681
682static ompd_rc_t ompd_get_max_active_levels(
683 ompd_task_handle_t *task_handle, /* IN: OpenMP task handle*/
684 ompd_word_t *val /* OUT: max number of threads */
685) {
686 if (!task_handle->ah)
687 return ompd_rc_stale_handle;
688 ompd_address_space_context_t *context = task_handle->ah->context;
689 if (!context)
690 return ompd_rc_stale_handle;
691 if (!callbacks) {
692 return ompd_rc_callback_error;
693 }
694
695 ompd_rc_t ret =
696 TValue(context, task_handle->th)
697 .cast(typeName: "kmp_taskdata_t") // td
698 .access(fieldName: "td_icvs") // td->td_icvs
699 .cast(typeName: "kmp_internal_control_t", pointerLevel: 0)
700 .access(fieldName: "max_active_levels") // td->td_icvs.max_active_levels
701 .castBase()
702 .getValue(buf&: *val);
703
704 return ret;
705}
706
707static ompd_rc_t ompd_get_run_schedule(
708 ompd_task_handle_t *task_handle, /* IN: OpenMP task handle*/
709 const char **run_sched_string /* OUT: Run Schedule String
710 consisting of kind and modifier */
711) {
712 if (!task_handle->ah)
713 return ompd_rc_stale_handle;
714 ompd_address_space_context_t *context = task_handle->ah->context;
715 if (!context)
716 return ompd_rc_stale_handle;
717 if (!callbacks) {
718 return ompd_rc_callback_error;
719 }
720
721 int kind;
722
723 TValue sched = TValue(context, task_handle->th)
724 .cast(typeName: "kmp_taskdata_t") // td
725 .access(fieldName: "td_icvs") // td->td_icvs
726 .cast(typeName: "kmp_internal_control_t", pointerLevel: 0)
727 .access(fieldName: "sched") // td->td_icvs.sched
728 .cast(typeName: "kmp_r_sched_t", pointerLevel: 0);
729
730 ompd_rc_t ret = sched
731 .access(fieldName: "r_sched_type") // td->td_icvs.sched.r_sched_type
732 .castBase()
733 .getValue(buf&: kind);
734 if (ret != ompd_rc_ok) {
735 return ret;
736 }
737 int chunk = 0;
738 ret = sched
739 .access(fieldName: "chunk") // td->td_icvs.sched.chunk
740 .castBase()
741 .getValue(buf&: chunk);
742 if (ret != ompd_rc_ok) {
743 return ret;
744 }
745 char *run_sched_var_string;
746 ret = callbacks->alloc_memory(100, (void **)&run_sched_var_string);
747 if (ret != ompd_rc_ok) {
748 return ret;
749 }
750 run_sched_var_string[0] = '\0';
751 if (SCHEDULE_HAS_MONOTONIC(kind)) {
752 strcpy(dest: run_sched_var_string, src: "monotonic:");
753 } else if (SCHEDULE_HAS_NONMONOTONIC(kind)) {
754 strcpy(dest: run_sched_var_string, src: "nonmonotonic:");
755 }
756
757 bool static_unchunked = false;
758 switch (SCHEDULE_WITHOUT_MODIFIERS(kind)) {
759 case kmp_sch_static:
760 case kmp_sch_static_greedy:
761 case kmp_sch_static_balanced:
762 static_unchunked = true;
763 strcat(dest: run_sched_var_string, src: "static");
764 break;
765 case kmp_sch_static_chunked:
766 strcat(dest: run_sched_var_string, src: "static");
767 break;
768 case kmp_sch_dynamic_chunked:
769 strcat(dest: run_sched_var_string, src: "dynamic");
770 break;
771 case kmp_sch_guided_chunked:
772 case kmp_sch_guided_iterative_chunked:
773 case kmp_sch_guided_analytical_chunked:
774 strcat(dest: run_sched_var_string, src: "guided");
775 break;
776 case kmp_sch_auto:
777 strcat(dest: run_sched_var_string, src: "auto");
778 break;
779 case kmp_sch_trapezoidal:
780 strcat(dest: run_sched_var_string, src: "trapezoidal");
781 break;
782 case kmp_sch_static_steal:
783 strcat(dest: run_sched_var_string, src: "static_steal");
784 break;
785 default:
786 ret = callbacks->free_memory((void *)(run_sched_var_string));
787 if (ret != ompd_rc_ok) {
788 return ret;
789 }
790 ret = create_empty_string(empty_string_ptr: run_sched_string);
791 return ret;
792 }
793
794 if (static_unchunked == true) {
795 // To be in sync with what OMPT returns.
796 // Chunk was not set. Shown with a zero value.
797 chunk = 0;
798 }
799
800 char temp_str[16];
801 sprintf(s: temp_str, format: ",%d", chunk);
802 strcat(dest: run_sched_var_string, src: temp_str);
803 *run_sched_string = run_sched_var_string;
804 return ret;
805}
806
807/* Helper routine for the ompd_get_proc_bind routines */
808static ompd_rc_t ompd_get_proc_bind_aux(ompd_task_handle_t *task_handle,
809 uint32_t *used,
810 uint32_t *current_nesting_level,
811 uint32_t *proc_bind) {
812 if (!task_handle->ah)
813 return ompd_rc_stale_handle;
814 ompd_address_space_context_t *context = task_handle->ah->context;
815 if (!context)
816 return ompd_rc_stale_handle;
817 if (!callbacks) {
818 return ompd_rc_callback_error;
819 }
820
821 ompd_rc_t ret = TValue(context, "__kmp_nested_proc_bind")
822 .cast(typeName: "kmp_nested_proc_bind_t")
823 .access(fieldName: "used")
824 .castBase(baseType: ompd_type_int)
825 .getValue(buf&: *used);
826 if (ret != ompd_rc_ok)
827 return ret;
828
829 TValue taskdata = TValue(context, task_handle->th) /* td */
830 .cast(typeName: "kmp_taskdata_t");
831
832 ret = taskdata
833 .access(fieldName: "td_team") /* td->td_team*/
834 .cast(typeName: "kmp_team_p", pointerLevel: 1)
835 .access(fieldName: "t") /* td->td_team->t*/
836 .cast(typeName: "kmp_base_team_t", pointerLevel: 0) /*t*/
837 .access(fieldName: "t_level") /*t.t_level*/
838 .castBase(baseType: ompd_type_int)
839 .getValue(buf&: *current_nesting_level);
840 if (ret != ompd_rc_ok)
841 return ret;
842
843 ret = taskdata
844 .access(fieldName: "td_icvs") /* td->td_icvs */
845 .cast(typeName: "kmp_internal_control_t", pointerLevel: 0)
846 .access(fieldName: "proc_bind") /* td->td_icvs.proc_bind */
847 .castBase()
848 .getValue(buf&: *proc_bind);
849 return ret;
850}
851
852static ompd_rc_t
853ompd_get_proc_bind(ompd_task_handle_t *task_handle, /* IN: OpenMP task handle */
854 ompd_word_t *bind /* OUT: Kind of proc-binding */
855) {
856 uint32_t used;
857 uint32_t proc_bind;
858 uint32_t current_nesting_level;
859
860 ompd_rc_t ret;
861 ret = ompd_get_proc_bind_aux(task_handle, used: &used, current_nesting_level: &current_nesting_level,
862 proc_bind: &proc_bind);
863 if (ret != ompd_rc_ok)
864 return ret;
865
866 *bind = proc_bind;
867 /* If bind-var is a list with more than one element, then the value of
868 this ICV cannot be represented by an integer type. In this case,
869 ompd_rc_incomplete is returned. The tool can check the return value and
870 can choose to invoke ompd_get_icv_string_from_scope() if needed. */
871 if (current_nesting_level < used - 1) {
872 return ompd_rc_incomplete;
873 }
874 return ompd_rc_ok;
875}
876
877static ompd_rc_t ompd_get_proc_bind(
878 ompd_task_handle_t *task_handle, /* IN: OpenMP task handle */
879 const char **proc_bind_list_string /* OUT: string list of comma separated
880 bind-var values */
881) {
882 uint32_t used;
883 uint32_t proc_bind;
884 uint32_t current_nesting_level;
885
886 ompd_rc_t ret;
887 ret = ompd_get_proc_bind_aux(task_handle, used: &used, current_nesting_level: &current_nesting_level,
888 proc_bind: &proc_bind);
889 if (ret != ompd_rc_ok)
890 return ret;
891
892 uint32_t num_list_elems;
893 if (used == 0 || current_nesting_level >= used) {
894 num_list_elems = 1;
895 } else {
896 num_list_elems = used - current_nesting_level;
897 }
898 size_t buffer_size = 16 /* digits per element including the comma separator */
899 * num_list_elems +
900 1; /* string terminator NULL */
901 char *proc_bind_list_str;
902 ret = callbacks->alloc_memory(buffer_size, (void **)&proc_bind_list_str);
903 if (ret != ompd_rc_ok)
904 return ret;
905
906 /* The bind-var list would be:
907 [td->td_icvs.proc_bind,
908 __kmp_nested_proc_bind.bind_types[current_nesting_level + 1],
909 __kmp_nested_proc_bind.bind_types[current_nesting_level + 2],
910 …,
911 __kmp_nested_proc_bind.bind_types[used - 1]]*/
912
913 sprintf(s: proc_bind_list_str, format: "%d", proc_bind);
914 *proc_bind_list_string = proc_bind_list_str;
915 if (num_list_elems == 1) {
916 return ompd_rc_ok;
917 }
918
919 char temp_value[16];
920 uint32_t bind_types_value;
921
922 for (current_nesting_level++; /* the list element for this nesting
923 level has already been accounted for
924 by proc_bind */
925 current_nesting_level < used; current_nesting_level++) {
926
927 ret = TValue(task_handle->ah->context, "__kmp_nested_proc_bind")
928 .cast(typeName: "kmp_nested_proc_bind_t")
929 .access(fieldName: "bind_types")
930 .cast(typeName: "int", pointerLevel: 1)
931 .getArrayElement(elemNumber: current_nesting_level)
932 .castBase(baseType: ompd_type_int)
933 .getValue(buf&: bind_types_value);
934
935 if (ret != ompd_rc_ok)
936 return ret;
937
938 sprintf(s: temp_value, format: ",%d", bind_types_value);
939 strcat(dest: proc_bind_list_str, src: temp_value);
940 }
941
942 return ompd_rc_ok;
943}
944
945static ompd_rc_t
946ompd_is_implicit(ompd_task_handle_t *task_handle, /* IN: OpenMP task handle*/
947 ompd_word_t *val /* OUT: max number of threads */
948) {
949 if (!task_handle)
950 return ompd_rc_stale_handle;
951 if (!task_handle->ah)
952 return ompd_rc_stale_handle;
953 ompd_address_space_context_t *context = task_handle->ah->context;
954 if (!context)
955 return ompd_rc_stale_handle;
956 if (!callbacks) {
957 return ompd_rc_callback_error;
958 }
959
960 ompd_rc_t ret = TValue(context, task_handle->th)
961 .cast(typeName: "kmp_taskdata_t") // td
962 .access(fieldName: "td_flags") // td->td_flags
963 .cast(typeName: "kmp_tasking_flags_t")
964 .check(bitfieldName: "tasktype", isSet: val); // td->td_flags.tasktype
965 *val ^= 1; // tasktype: explicit = 1, implicit = 0 => invert the value
966 return ret;
967}
968
969ompd_rc_t ompd_get_num_threads(
970 ompd_parallel_handle_t *parallel_handle, /* IN: OpenMP parallel handle */
971 ompd_word_t *val /* OUT: number of threads */
972) {
973 if (!parallel_handle->ah)
974 return ompd_rc_stale_handle;
975 ompd_address_space_context_t *context = parallel_handle->ah->context;
976 if (!context)
977 return ompd_rc_stale_handle;
978 if (!callbacks) {
979 return ompd_rc_callback_error;
980 }
981
982 ompd_rc_t ret = ompd_rc_ok;
983 if (parallel_handle->lwt.address != 0) {
984 *val = 1;
985 } else {
986 uint32_t res;
987 ret = TValue(context, parallel_handle->th)
988 .cast(typeName: "kmp_base_team_t", pointerLevel: 0) /*t*/
989 .access(fieldName: "t_nproc") /*t.t_nproc*/
990 .castBase()
991 .getValue(buf&: res);
992 *val = res;
993 }
994 return ret;
995}
996
997ompd_rc_t ompd_get_icv_from_scope(void *handle, ompd_scope_t scope,
998 ompd_icv_id_t icv_id,
999 ompd_word_t *icv_value) {
1000 if (!handle) {
1001 return ompd_rc_stale_handle;
1002 }
1003 if (icv_id >= ompd_icv_after_last_icv || icv_id == 0) {
1004 return ompd_rc_bad_input;
1005 }
1006 if (scope != ompd_icv_scope_values[icv_id]) {
1007 return ompd_rc_bad_input;
1008 }
1009
1010 ompd_device_t device_kind;
1011
1012 switch (scope) {
1013 case ompd_scope_thread:
1014 device_kind = ((ompd_thread_handle_t *)handle)->ah->kind;
1015 break;
1016 case ompd_scope_parallel:
1017 device_kind = ((ompd_parallel_handle_t *)handle)->ah->kind;
1018 break;
1019 case ompd_scope_address_space:
1020 device_kind = ((ompd_address_space_handle_t *)handle)->kind;
1021 break;
1022 case ompd_scope_task:
1023 device_kind = ((ompd_task_handle_t *)handle)->ah->kind;
1024 break;
1025 default:
1026 return ompd_rc_bad_input;
1027 }
1028
1029 if (device_kind == OMPD_DEVICE_KIND_HOST) {
1030 switch (icv_id) {
1031 case ompd_icv_dyn_var:
1032 return ompd_get_dynamic(thread_handle: (ompd_thread_handle_t *)handle, dyn_val: icv_value);
1033 case ompd_icv_run_sched_var:
1034 return ompd_rc_incompatible;
1035 case ompd_icv_stacksize_var:
1036 return ompd_get_stacksize(addr_handle: (ompd_address_space_handle_t *)handle,
1037 stacksize_val: icv_value);
1038 case ompd_icv_cancel_var:
1039 return ompd_get_cancellation(addr_handle: (ompd_address_space_handle_t *)handle,
1040 cancellation_val: icv_value);
1041 case ompd_icv_max_task_priority_var:
1042 return ompd_get_max_task_priority(addr_handle: (ompd_address_space_handle_t *)handle,
1043 max_task_priority_val: icv_value);
1044 case ompd_icv_debug_var:
1045 return ompd_get_debug(addr_handle: (ompd_address_space_handle_t *)handle, debug_val: icv_value);
1046 case ompd_icv_nthreads_var:
1047 return ompd_get_nthreads(thread_handle: (ompd_thread_handle_t *)handle, nthreads_var_val: icv_value);
1048 case ompd_icv_display_affinity_var:
1049 return ompd_get_display_affinity(addr_handle: (ompd_address_space_handle_t *)handle,
1050 display_affinity_val: icv_value);
1051 case ompd_icv_affinity_format_var:
1052 return ompd_rc_incompatible;
1053 case ompd_icv_tool_libraries_var:
1054 return ompd_rc_incompatible;
1055 case ompd_icv_default_device_var:
1056 return ompd_get_default_device(thread_handle: (ompd_thread_handle_t *)handle, default_device_val: icv_value);
1057 case ompd_icv_tool_var:
1058 return ompd_get_tool(addr_handle: (ompd_address_space_handle_t *)handle, tool_val: icv_value);
1059 case ompd_icv_tool_verbose_init_var:
1060 return ompd_rc_incompatible;
1061 case ompd_icv_levels_var:
1062 return ompd_get_level(parallel_handle: (ompd_parallel_handle_t *)handle, val: icv_value);
1063 case ompd_icv_active_levels_var:
1064 return ompd_get_active_level(parallel_handle: (ompd_parallel_handle_t *)handle, val: icv_value);
1065 case ompd_icv_thread_limit_var:
1066 return ompd_get_thread_limit(task_handle: (ompd_task_handle_t *)handle, val: icv_value);
1067 case ompd_icv_max_active_levels_var:
1068 return ompd_get_max_active_levels(task_handle: (ompd_task_handle_t *)handle,
1069 val: icv_value);
1070 case ompd_icv_bind_var:
1071 return ompd_get_proc_bind(task_handle: (ompd_task_handle_t *)handle, bind: icv_value);
1072 case ompd_icv_num_procs_var:
1073 case ompd_icv_ompd_num_procs_var:
1074 return ompd_get_num_procs(addr_handle: (ompd_address_space_handle_t *)handle,
1075 val: icv_value);
1076 case ompd_icv_thread_num_var:
1077 case ompd_icv_ompd_thread_num_var:
1078 return ompd_get_thread_num(thread_handle: (ompd_thread_handle_t *)handle, val: icv_value);
1079 case ompd_icv_final_var:
1080 case ompd_icv_ompd_final_var:
1081 case ompd_icv_ompd_final_task_var:
1082 return ompd_in_final(task_handle: (ompd_task_handle_t *)handle, val: icv_value);
1083 case ompd_icv_implicit_var:
1084 case ompd_icv_ompd_implicit_var:
1085 case ompd_icv_ompd_implicit_task_var:
1086 return ompd_is_implicit(task_handle: (ompd_task_handle_t *)handle, val: icv_value);
1087 case ompd_icv_team_size_var:
1088 case ompd_icv_ompd_team_size_var:
1089 return ompd_get_num_threads(parallel_handle: (ompd_parallel_handle_t *)handle, val: icv_value);
1090 default:
1091 return ompd_rc_unsupported;
1092 }
1093 }
1094 return ompd_rc_unsupported;
1095}
1096
1097ompd_rc_t ompd_get_icv_string_from_scope(void *handle, ompd_scope_t scope,
1098 ompd_icv_id_t icv_id,
1099 const char **icv_string) {
1100 if (!handle) {
1101 return ompd_rc_stale_handle;
1102 }
1103 if (icv_id >= ompd_icv_after_last_icv || icv_id == 0) {
1104 return ompd_rc_bad_input;
1105 }
1106 if (scope != ompd_icv_scope_values[icv_id]) {
1107 return ompd_rc_bad_input;
1108 }
1109
1110 ompd_device_t device_kind;
1111
1112 switch (scope) {
1113 case ompd_scope_thread:
1114 device_kind = ((ompd_thread_handle_t *)handle)->ah->kind;
1115 break;
1116 case ompd_scope_parallel:
1117 device_kind = ((ompd_parallel_handle_t *)handle)->ah->kind;
1118 break;
1119 case ompd_scope_address_space:
1120 device_kind = ((ompd_address_space_handle_t *)handle)->kind;
1121 break;
1122 case ompd_scope_task:
1123 device_kind = ((ompd_task_handle_t *)handle)->ah->kind;
1124 break;
1125 default:
1126 return ompd_rc_bad_input;
1127 }
1128
1129 if (device_kind == OMPD_DEVICE_KIND_HOST) {
1130 switch (icv_id) {
1131 case ompd_icv_run_sched_var:
1132 return ompd_get_run_schedule(task_handle: (ompd_task_handle_t *)handle, run_sched_string: icv_string);
1133 case ompd_icv_nthreads_var:
1134 return ompd_get_nthreads(thread_handle: (ompd_thread_handle_t *)handle, nthreads_list_string: icv_string);
1135 case ompd_icv_bind_var:
1136 return ompd_get_proc_bind(task_handle: (ompd_task_handle_t *)handle, proc_bind_list_string: icv_string);
1137 case ompd_icv_affinity_format_var:
1138 return ompd_get_affinity_format(addr_handle: (ompd_address_space_handle_t *)handle,
1139 affinity_format_string: icv_string);
1140 case ompd_icv_tool_libraries_var:
1141 return ompd_get_tool_libraries(addr_handle: (ompd_address_space_handle_t *)handle,
1142 tool_libraries_string: icv_string);
1143 case ompd_icv_tool_verbose_init_var:
1144 return ompd_get_tool_verbose_init(addr_handle: (ompd_address_space_handle_t *)handle,
1145 tool_verbose_init_string: icv_string);
1146 default:
1147 return ompd_rc_unsupported;
1148 }
1149 }
1150 return ompd_rc_unsupported;
1151}
1152
1153static ompd_rc_t __ompd_get_tool_data(TValue &dataValue, ompd_word_t *value,
1154 ompd_address_t *ptr) {
1155 ompd_rc_t ret = dataValue.getError();
1156 if (ret != ompd_rc_ok)
1157 return ret;
1158 ret = dataValue.access(fieldName: "value").castBase().getValue(buf&: *value);
1159 if (ret != ompd_rc_ok)
1160 return ret;
1161 ptr->segment = OMPD_SEGMENT_UNSPECIFIED;
1162 ret = dataValue.access(fieldName: "ptr").castBase().getValue(buf&: ptr->address);
1163 return ret;
1164}
1165
1166ompd_rc_t ompd_get_task_data(ompd_task_handle_t *task_handle,
1167 ompd_word_t *value, ompd_address_t *ptr) {
1168 ompd_address_space_context_t *context = task_handle->ah->context;
1169 if (!context)
1170 return ompd_rc_stale_handle;
1171 if (!callbacks) {
1172 return ompd_rc_callback_error;
1173 }
1174
1175 TValue dataValue;
1176 if (task_handle->lwt.address) {
1177 dataValue = TValue(context, task_handle->lwt)
1178 .cast(typeName: "ompt_lw_taskteam_t") /*lwt*/
1179 .access(fieldName: "ompt_task_info") // lwt->ompt_task_info
1180 .cast(typeName: "ompt_task_info_t")
1181 .access(fieldName: "task_data") // lwt->ompd_task_info.task_data
1182 .cast(typeName: "ompt_data_t");
1183 } else {
1184 dataValue = TValue(context, task_handle->th)
1185 .cast(typeName: "kmp_taskdata_t") /*td*/
1186 .access(fieldName: "ompt_task_info") // td->ompt_task_info
1187 .cast(typeName: "ompt_task_info_t")
1188 .access(fieldName: "task_data") // td->ompd_task_info.task_data
1189 .cast(typeName: "ompt_data_t");
1190 }
1191 return __ompd_get_tool_data(dataValue, value, ptr);
1192}
1193
1194ompd_rc_t ompd_get_parallel_data(ompd_parallel_handle_t *parallel_handle,
1195 ompd_word_t *value, ompd_address_t *ptr) {
1196 ompd_address_space_context_t *context = parallel_handle->ah->context;
1197 if (!context)
1198 return ompd_rc_stale_handle;
1199 if (!callbacks) {
1200 return ompd_rc_callback_error;
1201 }
1202
1203 TValue dataValue;
1204 if (parallel_handle->lwt.address) {
1205 dataValue =
1206 TValue(context, parallel_handle->lwt)
1207 .cast(typeName: "ompt_lw_taskteam_t") /*lwt*/
1208 .access(fieldName: "ompt_team_info") // lwt->ompt_team_info
1209 .cast(typeName: "ompt_team_info_t")
1210 .access(fieldName: "parallel_data") // lwt->ompt_team_info.parallel_data
1211 .cast(typeName: "ompt_data_t");
1212 } else {
1213 dataValue = TValue(context, parallel_handle->th)
1214 .cast(typeName: "kmp_base_team_t") /*t*/
1215 .access(fieldName: "ompt_team_info") // t->ompt_team_info
1216 .cast(typeName: "ompt_team_info_t")
1217 .access(fieldName: "parallel_data") // t->ompt_team_info.parallel_data
1218 .cast(typeName: "ompt_data_t");
1219 }
1220 return __ompd_get_tool_data(dataValue, value, ptr);
1221}
1222
1223ompd_rc_t ompd_get_thread_data(ompd_thread_handle_t *thread_handle,
1224 ompd_word_t *value, ompd_address_t *ptr) {
1225 ompd_address_space_context_t *context = thread_handle->ah->context;
1226 if (!context)
1227 return ompd_rc_stale_handle;
1228 if (!callbacks) {
1229 return ompd_rc_callback_error;
1230 }
1231
1232 TValue dataValue =
1233 TValue(context, thread_handle->th)
1234 .cast(typeName: "kmp_base_info_t") /*th*/
1235 .access(fieldName: "ompt_thread_info") // th->ompt_thread_info
1236 .cast(typeName: "ompt_thread_info_t")
1237 .access(fieldName: "thread_data") // th->ompt_thread_info.thread_data
1238 .cast(typeName: "ompt_data_t");
1239 return __ompd_get_tool_data(dataValue, value, ptr);
1240}
1241
1242ompd_rc_t ompd_get_tool_data(void *handle, ompd_scope_t scope,
1243 ompd_word_t *value, ompd_address_t *ptr) {
1244 if (!handle) {
1245 return ompd_rc_stale_handle;
1246 }
1247
1248 ompd_device_t device_kind;
1249
1250 switch (scope) {
1251 case ompd_scope_thread:
1252 device_kind = ((ompd_thread_handle_t *)handle)->ah->kind;
1253 break;
1254 case ompd_scope_parallel:
1255 device_kind = ((ompd_parallel_handle_t *)handle)->ah->kind;
1256 break;
1257 case ompd_scope_task:
1258 device_kind = ((ompd_task_handle_t *)handle)->ah->kind;
1259 break;
1260 default:
1261 return ompd_rc_bad_input;
1262 }
1263
1264 if (device_kind == OMPD_DEVICE_KIND_HOST) {
1265 switch (scope) {
1266 case ompd_scope_thread:
1267 return ompd_get_thread_data(thread_handle: (ompd_thread_handle_t *)handle, value, ptr);
1268 case ompd_scope_parallel:
1269 return ompd_get_parallel_data(parallel_handle: (ompd_parallel_handle_t *)handle, value,
1270 ptr);
1271 case ompd_scope_task:
1272 return ompd_get_task_data(task_handle: (ompd_task_handle_t *)handle, value, ptr);
1273 default:
1274 return ompd_rc_unsupported;
1275 }
1276 }
1277 return ompd_rc_unsupported;
1278}
1279

source code of openmp/libompd/src/omp-icv.cpp