1 | /* |
2 | * kmp_affinity.cpp -- affinity management |
3 | */ |
4 | |
5 | //===----------------------------------------------------------------------===// |
6 | // |
7 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
8 | // See https://llvm.org/LICENSE.txt for license information. |
9 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "kmp.h" |
14 | #include "kmp_affinity.h" |
15 | #include "kmp_i18n.h" |
16 | #include "kmp_io.h" |
17 | #include "kmp_str.h" |
18 | #include "kmp_wrapper_getpid.h" |
19 | #if KMP_USE_HIER_SCHED |
20 | #include "kmp_dispatch_hier.h" |
21 | #endif |
22 | #if KMP_USE_HWLOC |
23 | // Copied from hwloc |
24 | #define HWLOC_GROUP_KIND_INTEL_MODULE 102 |
25 | #define HWLOC_GROUP_KIND_INTEL_TILE 103 |
26 | #define HWLOC_GROUP_KIND_INTEL_DIE 104 |
27 | #define HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP 220 |
28 | #endif |
29 | #include <ctype.h> |
30 | |
31 | // The machine topology |
32 | kmp_topology_t *__kmp_topology = nullptr; |
33 | // KMP_HW_SUBSET environment variable |
34 | kmp_hw_subset_t *__kmp_hw_subset = nullptr; |
35 | |
36 | // Store the real or imagined machine hierarchy here |
37 | static hierarchy_info machine_hierarchy; |
38 | |
39 | void __kmp_cleanup_hierarchy() { machine_hierarchy.fini(); } |
40 | |
41 | #if KMP_AFFINITY_SUPPORTED |
42 | // Helper class to see if place lists further restrict the fullMask |
43 | class kmp_full_mask_modifier_t { |
44 | kmp_affin_mask_t *mask; |
45 | |
46 | public: |
47 | kmp_full_mask_modifier_t() { |
48 | KMP_CPU_ALLOC(mask); |
49 | KMP_CPU_ZERO(mask); |
50 | } |
51 | ~kmp_full_mask_modifier_t() { |
52 | KMP_CPU_FREE(mask); |
53 | mask = nullptr; |
54 | } |
55 | void include(const kmp_affin_mask_t *other) { KMP_CPU_UNION(mask, other); } |
56 | // If the new full mask is different from the current full mask, |
57 | // then switch them. Returns true if full mask was affected, false otherwise. |
58 | bool restrict_to_mask() { |
59 | // See if the new mask further restricts or changes the full mask |
60 | if (KMP_CPU_EQUAL(__kmp_affin_fullMask, mask) || KMP_CPU_ISEMPTY(mask)) |
61 | return false; |
62 | return __kmp_topology->restrict_to_mask(mask); |
63 | } |
64 | }; |
65 | |
66 | static inline const char * |
67 | __kmp_get_affinity_env_var(const kmp_affinity_t &affinity, |
68 | bool for_binding = false) { |
69 | if (affinity.flags.omp_places) { |
70 | if (for_binding) |
71 | return "OMP_PROC_BIND" ; |
72 | return "OMP_PLACES" ; |
73 | } |
74 | return affinity.env_var; |
75 | } |
76 | #endif // KMP_AFFINITY_SUPPORTED |
77 | |
78 | void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar) { |
79 | kmp_uint32 depth; |
80 | // The test below is true if affinity is available, but set to "none". Need to |
81 | // init on first use of hierarchical barrier. |
82 | if (TCR_1(machine_hierarchy.uninitialized)) |
83 | machine_hierarchy.init(num_addrs: nproc); |
84 | |
85 | // Adjust the hierarchy in case num threads exceeds original |
86 | if (nproc > machine_hierarchy.base_num_threads) |
87 | machine_hierarchy.resize(nproc); |
88 | |
89 | depth = machine_hierarchy.depth; |
90 | KMP_DEBUG_ASSERT(depth > 0); |
91 | |
92 | thr_bar->depth = depth; |
93 | __kmp_type_convert(src: machine_hierarchy.numPerLevel[0] - 1, |
94 | dest: &(thr_bar->base_leaf_kids)); |
95 | thr_bar->skip_per_level = machine_hierarchy.skipPerLevel; |
96 | } |
97 | |
98 | static int nCoresPerPkg, nPackages; |
99 | static int __kmp_nThreadsPerCore; |
100 | #ifndef KMP_DFLT_NTH_CORES |
101 | static int __kmp_ncores; |
102 | #endif |
103 | |
104 | const char *__kmp_hw_get_catalog_string(kmp_hw_t type, bool plural) { |
105 | switch (type) { |
106 | case KMP_HW_SOCKET: |
107 | return ((plural) ? KMP_I18N_STR(Sockets) : KMP_I18N_STR(Socket)); |
108 | case KMP_HW_DIE: |
109 | return ((plural) ? KMP_I18N_STR(Dice) : KMP_I18N_STR(Die)); |
110 | case KMP_HW_MODULE: |
111 | return ((plural) ? KMP_I18N_STR(Modules) : KMP_I18N_STR(Module)); |
112 | case KMP_HW_TILE: |
113 | return ((plural) ? KMP_I18N_STR(Tiles) : KMP_I18N_STR(Tile)); |
114 | case KMP_HW_NUMA: |
115 | return ((plural) ? KMP_I18N_STR(NumaDomains) : KMP_I18N_STR(NumaDomain)); |
116 | case KMP_HW_L3: |
117 | return ((plural) ? KMP_I18N_STR(L3Caches) : KMP_I18N_STR(L3Cache)); |
118 | case KMP_HW_L2: |
119 | return ((plural) ? KMP_I18N_STR(L2Caches) : KMP_I18N_STR(L2Cache)); |
120 | case KMP_HW_L1: |
121 | return ((plural) ? KMP_I18N_STR(L1Caches) : KMP_I18N_STR(L1Cache)); |
122 | case KMP_HW_LLC: |
123 | return ((plural) ? KMP_I18N_STR(LLCaches) : KMP_I18N_STR(LLCache)); |
124 | case KMP_HW_CORE: |
125 | return ((plural) ? KMP_I18N_STR(Cores) : KMP_I18N_STR(Core)); |
126 | case KMP_HW_THREAD: |
127 | return ((plural) ? KMP_I18N_STR(Threads) : KMP_I18N_STR(Thread)); |
128 | case KMP_HW_PROC_GROUP: |
129 | return ((plural) ? KMP_I18N_STR(ProcGroups) : KMP_I18N_STR(ProcGroup)); |
130 | case KMP_HW_UNKNOWN: |
131 | case KMP_HW_LAST: |
132 | return KMP_I18N_STR(Unknown); |
133 | } |
134 | KMP_ASSERT2(false, "Unhandled kmp_hw_t enumeration" ); |
135 | KMP_BUILTIN_UNREACHABLE; |
136 | } |
137 | |
138 | const char *__kmp_hw_get_keyword(kmp_hw_t type, bool plural) { |
139 | switch (type) { |
140 | case KMP_HW_SOCKET: |
141 | return ((plural) ? "sockets" : "socket" ); |
142 | case KMP_HW_DIE: |
143 | return ((plural) ? "dice" : "die" ); |
144 | case KMP_HW_MODULE: |
145 | return ((plural) ? "modules" : "module" ); |
146 | case KMP_HW_TILE: |
147 | return ((plural) ? "tiles" : "tile" ); |
148 | case KMP_HW_NUMA: |
149 | return ((plural) ? "numa_domains" : "numa_domain" ); |
150 | case KMP_HW_L3: |
151 | return ((plural) ? "l3_caches" : "l3_cache" ); |
152 | case KMP_HW_L2: |
153 | return ((plural) ? "l2_caches" : "l2_cache" ); |
154 | case KMP_HW_L1: |
155 | return ((plural) ? "l1_caches" : "l1_cache" ); |
156 | case KMP_HW_LLC: |
157 | return ((plural) ? "ll_caches" : "ll_cache" ); |
158 | case KMP_HW_CORE: |
159 | return ((plural) ? "cores" : "core" ); |
160 | case KMP_HW_THREAD: |
161 | return ((plural) ? "threads" : "thread" ); |
162 | case KMP_HW_PROC_GROUP: |
163 | return ((plural) ? "proc_groups" : "proc_group" ); |
164 | case KMP_HW_UNKNOWN: |
165 | case KMP_HW_LAST: |
166 | return ((plural) ? "unknowns" : "unknown" ); |
167 | } |
168 | KMP_ASSERT2(false, "Unhandled kmp_hw_t enumeration" ); |
169 | KMP_BUILTIN_UNREACHABLE; |
170 | } |
171 | |
172 | const char *__kmp_hw_get_core_type_string(kmp_hw_core_type_t type) { |
173 | switch (type) { |
174 | case KMP_HW_CORE_TYPE_UNKNOWN: |
175 | case KMP_HW_MAX_NUM_CORE_TYPES: |
176 | return "unknown" ; |
177 | #if KMP_ARCH_X86 || KMP_ARCH_X86_64 |
178 | case KMP_HW_CORE_TYPE_ATOM: |
179 | return "Intel Atom(R) processor" ; |
180 | case KMP_HW_CORE_TYPE_CORE: |
181 | return "Intel(R) Core(TM) processor" ; |
182 | #endif |
183 | } |
184 | KMP_ASSERT2(false, "Unhandled kmp_hw_core_type_t enumeration" ); |
185 | KMP_BUILTIN_UNREACHABLE; |
186 | } |
187 | |
188 | #if KMP_AFFINITY_SUPPORTED |
189 | // If affinity is supported, check the affinity |
190 | // verbose and warning flags before printing warning |
191 | #define KMP_AFF_WARNING(s, ...) \ |
192 | if (s.flags.verbose || (s.flags.warnings && (s.type != affinity_none))) { \ |
193 | KMP_WARNING(__VA_ARGS__); \ |
194 | } |
195 | #else |
196 | #define KMP_AFF_WARNING(s, ...) KMP_WARNING(__VA_ARGS__) |
197 | #endif |
198 | |
199 | //////////////////////////////////////////////////////////////////////////////// |
200 | // kmp_hw_thread_t methods |
201 | int kmp_hw_thread_t::compare_ids(const void *a, const void *b) { |
202 | const kmp_hw_thread_t *ahwthread = (const kmp_hw_thread_t *)a; |
203 | const kmp_hw_thread_t *bhwthread = (const kmp_hw_thread_t *)b; |
204 | int depth = __kmp_topology->get_depth(); |
205 | for (int level = 0; level < depth; ++level) { |
206 | if (ahwthread->ids[level] < bhwthread->ids[level]) |
207 | return -1; |
208 | else if (ahwthread->ids[level] > bhwthread->ids[level]) |
209 | return 1; |
210 | } |
211 | if (ahwthread->os_id < bhwthread->os_id) |
212 | return -1; |
213 | else if (ahwthread->os_id > bhwthread->os_id) |
214 | return 1; |
215 | return 0; |
216 | } |
217 | |
218 | #if KMP_AFFINITY_SUPPORTED |
219 | int kmp_hw_thread_t::compare_compact(const void *a, const void *b) { |
220 | int i; |
221 | const kmp_hw_thread_t *aa = (const kmp_hw_thread_t *)a; |
222 | const kmp_hw_thread_t *bb = (const kmp_hw_thread_t *)b; |
223 | int depth = __kmp_topology->get_depth(); |
224 | int compact = __kmp_topology->compact; |
225 | KMP_DEBUG_ASSERT(compact >= 0); |
226 | KMP_DEBUG_ASSERT(compact <= depth); |
227 | for (i = 0; i < compact; i++) { |
228 | int j = depth - i - 1; |
229 | if (aa->sub_ids[j] < bb->sub_ids[j]) |
230 | return -1; |
231 | if (aa->sub_ids[j] > bb->sub_ids[j]) |
232 | return 1; |
233 | } |
234 | for (; i < depth; i++) { |
235 | int j = i - compact; |
236 | if (aa->sub_ids[j] < bb->sub_ids[j]) |
237 | return -1; |
238 | if (aa->sub_ids[j] > bb->sub_ids[j]) |
239 | return 1; |
240 | } |
241 | return 0; |
242 | } |
243 | #endif |
244 | |
245 | void kmp_hw_thread_t::print() const { |
246 | int depth = __kmp_topology->get_depth(); |
247 | printf(format: "%4d " , os_id); |
248 | for (int i = 0; i < depth; ++i) { |
249 | printf(format: "%4d " , ids[i]); |
250 | } |
251 | if (attrs) { |
252 | if (attrs.is_core_type_valid()) |
253 | printf(format: " (%s)" , __kmp_hw_get_core_type_string(type: attrs.get_core_type())); |
254 | if (attrs.is_core_eff_valid()) |
255 | printf(format: " (eff=%d)" , attrs.get_core_eff()); |
256 | } |
257 | if (leader) |
258 | printf(format: " (leader)" ); |
259 | printf(format: "\n" ); |
260 | } |
261 | |
262 | //////////////////////////////////////////////////////////////////////////////// |
263 | // kmp_topology_t methods |
264 | |
265 | // Add a layer to the topology based on the ids. Assume the topology |
266 | // is perfectly nested (i.e., so no object has more than one parent) |
267 | void kmp_topology_t::_insert_layer(kmp_hw_t type, const int *ids) { |
268 | // Figure out where the layer should go by comparing the ids of the current |
269 | // layers with the new ids |
270 | int target_layer; |
271 | int previous_id = kmp_hw_thread_t::UNKNOWN_ID; |
272 | int previous_new_id = kmp_hw_thread_t::UNKNOWN_ID; |
273 | |
274 | // Start from the highest layer and work down to find target layer |
275 | // If new layer is equal to another layer then put the new layer above |
276 | for (target_layer = 0; target_layer < depth; ++target_layer) { |
277 | bool layers_equal = true; |
278 | bool strictly_above_target_layer = false; |
279 | for (int i = 0; i < num_hw_threads; ++i) { |
280 | int id = hw_threads[i].ids[target_layer]; |
281 | int new_id = ids[i]; |
282 | if (id != previous_id && new_id == previous_new_id) { |
283 | // Found the layer we are strictly above |
284 | strictly_above_target_layer = true; |
285 | layers_equal = false; |
286 | break; |
287 | } else if (id == previous_id && new_id != previous_new_id) { |
288 | // Found a layer we are below. Move to next layer and check. |
289 | layers_equal = false; |
290 | break; |
291 | } |
292 | previous_id = id; |
293 | previous_new_id = new_id; |
294 | } |
295 | if (strictly_above_target_layer || layers_equal) |
296 | break; |
297 | } |
298 | |
299 | // Found the layer we are above. Now move everything to accommodate the new |
300 | // layer. And put the new ids and type into the topology. |
301 | for (int i = depth - 1, j = depth; i >= target_layer; --i, --j) |
302 | types[j] = types[i]; |
303 | types[target_layer] = type; |
304 | for (int k = 0; k < num_hw_threads; ++k) { |
305 | for (int i = depth - 1, j = depth; i >= target_layer; --i, --j) |
306 | hw_threads[k].ids[j] = hw_threads[k].ids[i]; |
307 | hw_threads[k].ids[target_layer] = ids[k]; |
308 | } |
309 | equivalent[type] = type; |
310 | depth++; |
311 | } |
312 | |
313 | #if KMP_GROUP_AFFINITY |
314 | // Insert the Windows Processor Group structure into the topology |
315 | void kmp_topology_t::_insert_windows_proc_groups() { |
316 | // Do not insert the processor group structure for a single group |
317 | if (__kmp_num_proc_groups == 1) |
318 | return; |
319 | kmp_affin_mask_t *mask; |
320 | int *ids = (int *)__kmp_allocate(sizeof(int) * num_hw_threads); |
321 | KMP_CPU_ALLOC(mask); |
322 | for (int i = 0; i < num_hw_threads; ++i) { |
323 | KMP_CPU_ZERO(mask); |
324 | KMP_CPU_SET(hw_threads[i].os_id, mask); |
325 | ids[i] = __kmp_get_proc_group(mask); |
326 | } |
327 | KMP_CPU_FREE(mask); |
328 | _insert_layer(KMP_HW_PROC_GROUP, ids); |
329 | __kmp_free(ids); |
330 | |
331 | // sort topology after adding proc groups |
332 | __kmp_topology->sort_ids(); |
333 | } |
334 | #endif |
335 | |
336 | // Remove layers that don't add information to the topology. |
337 | // This is done by having the layer take on the id = UNKNOWN_ID (-1) |
338 | void kmp_topology_t::_remove_radix1_layers() { |
339 | int preference[KMP_HW_LAST]; |
340 | int top_index1, top_index2; |
341 | // Set up preference associative array |
342 | preference[KMP_HW_SOCKET] = 110; |
343 | preference[KMP_HW_PROC_GROUP] = 100; |
344 | preference[KMP_HW_CORE] = 95; |
345 | preference[KMP_HW_THREAD] = 90; |
346 | preference[KMP_HW_NUMA] = 85; |
347 | preference[KMP_HW_DIE] = 80; |
348 | preference[KMP_HW_TILE] = 75; |
349 | preference[KMP_HW_MODULE] = 73; |
350 | preference[KMP_HW_L3] = 70; |
351 | preference[KMP_HW_L2] = 65; |
352 | preference[KMP_HW_L1] = 60; |
353 | preference[KMP_HW_LLC] = 5; |
354 | top_index1 = 0; |
355 | top_index2 = 1; |
356 | while (top_index1 < depth - 1 && top_index2 < depth) { |
357 | kmp_hw_t type1 = types[top_index1]; |
358 | kmp_hw_t type2 = types[top_index2]; |
359 | KMP_ASSERT_VALID_HW_TYPE(type1); |
360 | KMP_ASSERT_VALID_HW_TYPE(type2); |
361 | // Do not allow the three main topology levels (sockets, cores, threads) to |
362 | // be compacted down |
363 | if ((type1 == KMP_HW_THREAD || type1 == KMP_HW_CORE || |
364 | type1 == KMP_HW_SOCKET) && |
365 | (type2 == KMP_HW_THREAD || type2 == KMP_HW_CORE || |
366 | type2 == KMP_HW_SOCKET)) { |
367 | top_index1 = top_index2++; |
368 | continue; |
369 | } |
370 | bool radix1 = true; |
371 | bool all_same = true; |
372 | int id1 = hw_threads[0].ids[top_index1]; |
373 | int id2 = hw_threads[0].ids[top_index2]; |
374 | int pref1 = preference[type1]; |
375 | int pref2 = preference[type2]; |
376 | for (int hwidx = 1; hwidx < num_hw_threads; ++hwidx) { |
377 | if (hw_threads[hwidx].ids[top_index1] == id1 && |
378 | hw_threads[hwidx].ids[top_index2] != id2) { |
379 | radix1 = false; |
380 | break; |
381 | } |
382 | if (hw_threads[hwidx].ids[top_index2] != id2) |
383 | all_same = false; |
384 | id1 = hw_threads[hwidx].ids[top_index1]; |
385 | id2 = hw_threads[hwidx].ids[top_index2]; |
386 | } |
387 | if (radix1) { |
388 | // Select the layer to remove based on preference |
389 | kmp_hw_t remove_type, keep_type; |
390 | int remove_layer, remove_layer_ids; |
391 | if (pref1 > pref2) { |
392 | remove_type = type2; |
393 | remove_layer = remove_layer_ids = top_index2; |
394 | keep_type = type1; |
395 | } else { |
396 | remove_type = type1; |
397 | remove_layer = remove_layer_ids = top_index1; |
398 | keep_type = type2; |
399 | } |
400 | // If all the indexes for the second (deeper) layer are the same. |
401 | // e.g., all are zero, then make sure to keep the first layer's ids |
402 | if (all_same) |
403 | remove_layer_ids = top_index2; |
404 | // Remove radix one type by setting the equivalence, removing the id from |
405 | // the hw threads and removing the layer from types and depth |
406 | set_equivalent_type(type1: remove_type, type2: keep_type); |
407 | for (int idx = 0; idx < num_hw_threads; ++idx) { |
408 | kmp_hw_thread_t &hw_thread = hw_threads[idx]; |
409 | for (int d = remove_layer_ids; d < depth - 1; ++d) |
410 | hw_thread.ids[d] = hw_thread.ids[d + 1]; |
411 | } |
412 | for (int idx = remove_layer; idx < depth - 1; ++idx) |
413 | types[idx] = types[idx + 1]; |
414 | depth--; |
415 | } else { |
416 | top_index1 = top_index2++; |
417 | } |
418 | } |
419 | KMP_ASSERT(depth > 0); |
420 | } |
421 | |
422 | void kmp_topology_t::_set_last_level_cache() { |
423 | if (get_equivalent_type(type: KMP_HW_L3) != KMP_HW_UNKNOWN) |
424 | set_equivalent_type(type1: KMP_HW_LLC, type2: KMP_HW_L3); |
425 | else if (get_equivalent_type(type: KMP_HW_L2) != KMP_HW_UNKNOWN) |
426 | set_equivalent_type(type1: KMP_HW_LLC, type2: KMP_HW_L2); |
427 | #if KMP_MIC_SUPPORTED |
428 | else if (__kmp_mic_type == mic3) { |
429 | if (get_equivalent_type(type: KMP_HW_L2) != KMP_HW_UNKNOWN) |
430 | set_equivalent_type(type1: KMP_HW_LLC, type2: KMP_HW_L2); |
431 | else if (get_equivalent_type(type: KMP_HW_TILE) != KMP_HW_UNKNOWN) |
432 | set_equivalent_type(type1: KMP_HW_LLC, type2: KMP_HW_TILE); |
433 | // L2/Tile wasn't detected so just say L1 |
434 | else |
435 | set_equivalent_type(type1: KMP_HW_LLC, type2: KMP_HW_L1); |
436 | } |
437 | #endif |
438 | else if (get_equivalent_type(type: KMP_HW_L1) != KMP_HW_UNKNOWN) |
439 | set_equivalent_type(type1: KMP_HW_LLC, type2: KMP_HW_L1); |
440 | // Fallback is to set last level cache to socket or core |
441 | if (get_equivalent_type(type: KMP_HW_LLC) == KMP_HW_UNKNOWN) { |
442 | if (get_equivalent_type(type: KMP_HW_SOCKET) != KMP_HW_UNKNOWN) |
443 | set_equivalent_type(type1: KMP_HW_LLC, type2: KMP_HW_SOCKET); |
444 | else if (get_equivalent_type(type: KMP_HW_CORE) != KMP_HW_UNKNOWN) |
445 | set_equivalent_type(type1: KMP_HW_LLC, type2: KMP_HW_CORE); |
446 | } |
447 | KMP_ASSERT(get_equivalent_type(KMP_HW_LLC) != KMP_HW_UNKNOWN); |
448 | } |
449 | |
450 | // Gather the count of each topology layer and the ratio |
451 | void kmp_topology_t::_gather_enumeration_information() { |
452 | int previous_id[KMP_HW_LAST]; |
453 | int max[KMP_HW_LAST]; |
454 | |
455 | for (int i = 0; i < depth; ++i) { |
456 | previous_id[i] = kmp_hw_thread_t::UNKNOWN_ID; |
457 | max[i] = 0; |
458 | count[i] = 0; |
459 | ratio[i] = 0; |
460 | } |
461 | int core_level = get_level(type: KMP_HW_CORE); |
462 | for (int i = 0; i < num_hw_threads; ++i) { |
463 | kmp_hw_thread_t &hw_thread = hw_threads[i]; |
464 | for (int layer = 0; layer < depth; ++layer) { |
465 | int id = hw_thread.ids[layer]; |
466 | if (id != previous_id[layer]) { |
467 | // Add an additional increment to each count |
468 | for (int l = layer; l < depth; ++l) |
469 | count[l]++; |
470 | // Keep track of topology layer ratio statistics |
471 | max[layer]++; |
472 | for (int l = layer + 1; l < depth; ++l) { |
473 | if (max[l] > ratio[l]) |
474 | ratio[l] = max[l]; |
475 | max[l] = 1; |
476 | } |
477 | // Figure out the number of different core types |
478 | // and efficiencies for hybrid CPUs |
479 | if (__kmp_is_hybrid_cpu() && core_level >= 0 && layer <= core_level) { |
480 | if (hw_thread.attrs.is_core_eff_valid() && |
481 | hw_thread.attrs.core_eff >= num_core_efficiencies) { |
482 | // Because efficiencies can range from 0 to max efficiency - 1, |
483 | // the number of efficiencies is max efficiency + 1 |
484 | num_core_efficiencies = hw_thread.attrs.core_eff + 1; |
485 | } |
486 | if (hw_thread.attrs.is_core_type_valid()) { |
487 | bool found = false; |
488 | for (int j = 0; j < num_core_types; ++j) { |
489 | if (hw_thread.attrs.get_core_type() == core_types[j]) { |
490 | found = true; |
491 | break; |
492 | } |
493 | } |
494 | if (!found) { |
495 | KMP_ASSERT(num_core_types < KMP_HW_MAX_NUM_CORE_TYPES); |
496 | core_types[num_core_types++] = hw_thread.attrs.get_core_type(); |
497 | } |
498 | } |
499 | } |
500 | break; |
501 | } |
502 | } |
503 | for (int layer = 0; layer < depth; ++layer) { |
504 | previous_id[layer] = hw_thread.ids[layer]; |
505 | } |
506 | } |
507 | for (int layer = 0; layer < depth; ++layer) { |
508 | if (max[layer] > ratio[layer]) |
509 | ratio[layer] = max[layer]; |
510 | } |
511 | } |
512 | |
513 | int kmp_topology_t::_get_ncores_with_attr(const kmp_hw_attr_t &attr, |
514 | int above_level, |
515 | bool find_all) const { |
516 | int current, current_max; |
517 | int previous_id[KMP_HW_LAST]; |
518 | for (int i = 0; i < depth; ++i) |
519 | previous_id[i] = kmp_hw_thread_t::UNKNOWN_ID; |
520 | int core_level = get_level(type: KMP_HW_CORE); |
521 | if (find_all) |
522 | above_level = -1; |
523 | KMP_ASSERT(above_level < core_level); |
524 | current_max = 0; |
525 | current = 0; |
526 | for (int i = 0; i < num_hw_threads; ++i) { |
527 | kmp_hw_thread_t &hw_thread = hw_threads[i]; |
528 | if (!find_all && hw_thread.ids[above_level] != previous_id[above_level]) { |
529 | if (current > current_max) |
530 | current_max = current; |
531 | current = hw_thread.attrs.contains(other: attr); |
532 | } else { |
533 | for (int level = above_level + 1; level <= core_level; ++level) { |
534 | if (hw_thread.ids[level] != previous_id[level]) { |
535 | if (hw_thread.attrs.contains(other: attr)) |
536 | current++; |
537 | break; |
538 | } |
539 | } |
540 | } |
541 | for (int level = 0; level < depth; ++level) |
542 | previous_id[level] = hw_thread.ids[level]; |
543 | } |
544 | if (current > current_max) |
545 | current_max = current; |
546 | return current_max; |
547 | } |
548 | |
549 | // Find out if the topology is uniform |
550 | void kmp_topology_t::_discover_uniformity() { |
551 | int num = 1; |
552 | for (int level = 0; level < depth; ++level) |
553 | num *= ratio[level]; |
554 | flags.uniform = (num == count[depth - 1]); |
555 | } |
556 | |
557 | // Set all the sub_ids for each hardware thread |
558 | void kmp_topology_t::_set_sub_ids() { |
559 | int previous_id[KMP_HW_LAST]; |
560 | int sub_id[KMP_HW_LAST]; |
561 | |
562 | for (int i = 0; i < depth; ++i) { |
563 | previous_id[i] = -1; |
564 | sub_id[i] = -1; |
565 | } |
566 | for (int i = 0; i < num_hw_threads; ++i) { |
567 | kmp_hw_thread_t &hw_thread = hw_threads[i]; |
568 | // Setup the sub_id |
569 | for (int j = 0; j < depth; ++j) { |
570 | if (hw_thread.ids[j] != previous_id[j]) { |
571 | sub_id[j]++; |
572 | for (int k = j + 1; k < depth; ++k) { |
573 | sub_id[k] = 0; |
574 | } |
575 | break; |
576 | } |
577 | } |
578 | // Set previous_id |
579 | for (int j = 0; j < depth; ++j) { |
580 | previous_id[j] = hw_thread.ids[j]; |
581 | } |
582 | // Set the sub_ids field |
583 | for (int j = 0; j < depth; ++j) { |
584 | hw_thread.sub_ids[j] = sub_id[j]; |
585 | } |
586 | } |
587 | } |
588 | |
589 | void kmp_topology_t::_set_globals() { |
590 | // Set nCoresPerPkg, nPackages, __kmp_nThreadsPerCore, __kmp_ncores |
591 | int core_level, thread_level, package_level; |
592 | package_level = get_level(type: KMP_HW_SOCKET); |
593 | #if KMP_GROUP_AFFINITY |
594 | if (package_level == -1) |
595 | package_level = get_level(KMP_HW_PROC_GROUP); |
596 | #endif |
597 | core_level = get_level(type: KMP_HW_CORE); |
598 | thread_level = get_level(type: KMP_HW_THREAD); |
599 | |
600 | KMP_ASSERT(core_level != -1); |
601 | KMP_ASSERT(thread_level != -1); |
602 | |
603 | __kmp_nThreadsPerCore = calculate_ratio(level1: thread_level, level2: core_level); |
604 | if (package_level != -1) { |
605 | nCoresPerPkg = calculate_ratio(level1: core_level, level2: package_level); |
606 | nPackages = get_count(level: package_level); |
607 | } else { |
608 | // assume one socket |
609 | nCoresPerPkg = get_count(level: core_level); |
610 | nPackages = 1; |
611 | } |
612 | #ifndef KMP_DFLT_NTH_CORES |
613 | __kmp_ncores = get_count(level: core_level); |
614 | #endif |
615 | } |
616 | |
617 | kmp_topology_t *kmp_topology_t::allocate(int nproc, int ndepth, |
618 | const kmp_hw_t *types) { |
619 | kmp_topology_t *retval; |
620 | // Allocate all data in one large allocation |
621 | size_t size = sizeof(kmp_topology_t) + sizeof(kmp_hw_thread_t) * nproc + |
622 | sizeof(int) * (size_t)KMP_HW_LAST * 3; |
623 | char *bytes = (char *)__kmp_allocate(size); |
624 | retval = (kmp_topology_t *)bytes; |
625 | if (nproc > 0) { |
626 | retval->hw_threads = (kmp_hw_thread_t *)(bytes + sizeof(kmp_topology_t)); |
627 | } else { |
628 | retval->hw_threads = nullptr; |
629 | } |
630 | retval->num_hw_threads = nproc; |
631 | retval->depth = ndepth; |
632 | int *arr = |
633 | (int *)(bytes + sizeof(kmp_topology_t) + sizeof(kmp_hw_thread_t) * nproc); |
634 | retval->types = (kmp_hw_t *)arr; |
635 | retval->ratio = arr + (size_t)KMP_HW_LAST; |
636 | retval->count = arr + 2 * (size_t)KMP_HW_LAST; |
637 | retval->num_core_efficiencies = 0; |
638 | retval->num_core_types = 0; |
639 | retval->compact = 0; |
640 | for (int i = 0; i < KMP_HW_MAX_NUM_CORE_TYPES; ++i) |
641 | retval->core_types[i] = KMP_HW_CORE_TYPE_UNKNOWN; |
642 | KMP_FOREACH_HW_TYPE(type) { retval->equivalent[type] = KMP_HW_UNKNOWN; } |
643 | for (int i = 0; i < ndepth; ++i) { |
644 | retval->types[i] = types[i]; |
645 | retval->equivalent[types[i]] = types[i]; |
646 | } |
647 | return retval; |
648 | } |
649 | |
650 | void kmp_topology_t::deallocate(kmp_topology_t *topology) { |
651 | if (topology) |
652 | __kmp_free(topology); |
653 | } |
654 | |
655 | bool kmp_topology_t::check_ids() const { |
656 | // Assume ids have been sorted |
657 | if (num_hw_threads == 0) |
658 | return true; |
659 | for (int i = 1; i < num_hw_threads; ++i) { |
660 | kmp_hw_thread_t ¤t_thread = hw_threads[i]; |
661 | kmp_hw_thread_t &previous_thread = hw_threads[i - 1]; |
662 | bool unique = false; |
663 | for (int j = 0; j < depth; ++j) { |
664 | if (previous_thread.ids[j] != current_thread.ids[j]) { |
665 | unique = true; |
666 | break; |
667 | } |
668 | } |
669 | if (unique) |
670 | continue; |
671 | return false; |
672 | } |
673 | return true; |
674 | } |
675 | |
676 | void kmp_topology_t::dump() const { |
677 | printf(format: "***********************\n" ); |
678 | printf(format: "*** __kmp_topology: ***\n" ); |
679 | printf(format: "***********************\n" ); |
680 | printf(format: "* depth: %d\n" , depth); |
681 | |
682 | printf(format: "* types: " ); |
683 | for (int i = 0; i < depth; ++i) |
684 | printf(format: "%15s " , __kmp_hw_get_keyword(type: types[i])); |
685 | printf(format: "\n" ); |
686 | |
687 | printf(format: "* ratio: " ); |
688 | for (int i = 0; i < depth; ++i) { |
689 | printf(format: "%15d " , ratio[i]); |
690 | } |
691 | printf(format: "\n" ); |
692 | |
693 | printf(format: "* count: " ); |
694 | for (int i = 0; i < depth; ++i) { |
695 | printf(format: "%15d " , count[i]); |
696 | } |
697 | printf(format: "\n" ); |
698 | |
699 | printf(format: "* num_core_eff: %d\n" , num_core_efficiencies); |
700 | printf(format: "* num_core_types: %d\n" , num_core_types); |
701 | printf(format: "* core_types: " ); |
702 | for (int i = 0; i < num_core_types; ++i) |
703 | printf(format: "%3d " , core_types[i]); |
704 | printf(format: "\n" ); |
705 | |
706 | printf(format: "* equivalent map:\n" ); |
707 | KMP_FOREACH_HW_TYPE(i) { |
708 | const char *key = __kmp_hw_get_keyword(type: i); |
709 | const char *value = __kmp_hw_get_keyword(type: equivalent[i]); |
710 | printf(format: "%-15s -> %-15s\n" , key, value); |
711 | } |
712 | |
713 | printf(format: "* uniform: %s\n" , (is_uniform() ? "Yes" : "No" )); |
714 | |
715 | printf(format: "* num_hw_threads: %d\n" , num_hw_threads); |
716 | printf(format: "* hw_threads:\n" ); |
717 | for (int i = 0; i < num_hw_threads; ++i) { |
718 | hw_threads[i].print(); |
719 | } |
720 | printf(format: "***********************\n" ); |
721 | } |
722 | |
723 | void kmp_topology_t::print(const char *env_var) const { |
724 | kmp_str_buf_t buf; |
725 | int print_types_depth; |
726 | __kmp_str_buf_init(&buf); |
727 | kmp_hw_t print_types[KMP_HW_LAST + 2]; |
728 | |
729 | // Num Available Threads |
730 | if (num_hw_threads) { |
731 | KMP_INFORM(AvailableOSProc, env_var, num_hw_threads); |
732 | } else { |
733 | KMP_INFORM(AvailableOSProc, env_var, __kmp_xproc); |
734 | } |
735 | |
736 | // Uniform or not |
737 | if (is_uniform()) { |
738 | KMP_INFORM(Uniform, env_var); |
739 | } else { |
740 | KMP_INFORM(NonUniform, env_var); |
741 | } |
742 | |
743 | // Equivalent types |
744 | KMP_FOREACH_HW_TYPE(type) { |
745 | kmp_hw_t eq_type = equivalent[type]; |
746 | if (eq_type != KMP_HW_UNKNOWN && eq_type != type) { |
747 | KMP_INFORM(AffEqualTopologyTypes, env_var, |
748 | __kmp_hw_get_catalog_string(type), |
749 | __kmp_hw_get_catalog_string(eq_type)); |
750 | } |
751 | } |
752 | |
753 | // Quick topology |
754 | KMP_ASSERT(depth > 0 && depth <= (int)KMP_HW_LAST); |
755 | // Create a print types array that always guarantees printing |
756 | // the core and thread level |
757 | print_types_depth = 0; |
758 | for (int level = 0; level < depth; ++level) |
759 | print_types[print_types_depth++] = types[level]; |
760 | if (equivalent[KMP_HW_CORE] != KMP_HW_CORE) { |
761 | // Force in the core level for quick topology |
762 | if (print_types[print_types_depth - 1] == KMP_HW_THREAD) { |
763 | // Force core before thread e.g., 1 socket X 2 threads/socket |
764 | // becomes 1 socket X 1 core/socket X 2 threads/socket |
765 | print_types[print_types_depth - 1] = KMP_HW_CORE; |
766 | print_types[print_types_depth++] = KMP_HW_THREAD; |
767 | } else { |
768 | print_types[print_types_depth++] = KMP_HW_CORE; |
769 | } |
770 | } |
771 | // Always put threads at very end of quick topology |
772 | if (equivalent[KMP_HW_THREAD] != KMP_HW_THREAD) |
773 | print_types[print_types_depth++] = KMP_HW_THREAD; |
774 | |
775 | __kmp_str_buf_clear(buffer: &buf); |
776 | kmp_hw_t numerator_type; |
777 | kmp_hw_t denominator_type = KMP_HW_UNKNOWN; |
778 | int core_level = get_level(type: KMP_HW_CORE); |
779 | int ncores = get_count(level: core_level); |
780 | |
781 | for (int plevel = 0, level = 0; plevel < print_types_depth; ++plevel) { |
782 | int c; |
783 | bool plural; |
784 | numerator_type = print_types[plevel]; |
785 | KMP_ASSERT_VALID_HW_TYPE(numerator_type); |
786 | if (equivalent[numerator_type] != numerator_type) |
787 | c = 1; |
788 | else |
789 | c = get_ratio(level: level++); |
790 | plural = (c > 1); |
791 | if (plevel == 0) { |
792 | __kmp_str_buf_print(buffer: &buf, format: "%d %s" , c, |
793 | __kmp_hw_get_catalog_string(type: numerator_type, plural)); |
794 | } else { |
795 | __kmp_str_buf_print(buffer: &buf, format: " x %d %s/%s" , c, |
796 | __kmp_hw_get_catalog_string(type: numerator_type, plural), |
797 | __kmp_hw_get_catalog_string(type: denominator_type)); |
798 | } |
799 | denominator_type = numerator_type; |
800 | } |
801 | KMP_INFORM(TopologyGeneric, env_var, buf.str, ncores); |
802 | |
803 | // Hybrid topology information |
804 | if (__kmp_is_hybrid_cpu()) { |
805 | for (int i = 0; i < num_core_types; ++i) { |
806 | kmp_hw_core_type_t core_type = core_types[i]; |
807 | kmp_hw_attr_t attr; |
808 | attr.clear(); |
809 | attr.set_core_type(core_type); |
810 | int ncores = get_ncores_with_attr(attr); |
811 | if (ncores > 0) { |
812 | KMP_INFORM(TopologyHybrid, env_var, ncores, |
813 | __kmp_hw_get_core_type_string(core_type)); |
814 | KMP_ASSERT(num_core_efficiencies <= KMP_HW_MAX_NUM_CORE_EFFS) |
815 | for (int eff = 0; eff < num_core_efficiencies; ++eff) { |
816 | attr.set_core_eff(eff); |
817 | int ncores_with_eff = get_ncores_with_attr(attr); |
818 | if (ncores_with_eff > 0) { |
819 | KMP_INFORM(TopologyHybridCoreEff, env_var, ncores_with_eff, eff); |
820 | } |
821 | } |
822 | } |
823 | } |
824 | } |
825 | |
826 | if (num_hw_threads <= 0) { |
827 | __kmp_str_buf_free(buffer: &buf); |
828 | return; |
829 | } |
830 | |
831 | // Full OS proc to hardware thread map |
832 | KMP_INFORM(OSProcToPhysicalThreadMap, env_var); |
833 | for (int i = 0; i < num_hw_threads; i++) { |
834 | __kmp_str_buf_clear(buffer: &buf); |
835 | for (int level = 0; level < depth; ++level) { |
836 | kmp_hw_t type = types[level]; |
837 | __kmp_str_buf_print(buffer: &buf, format: "%s " , __kmp_hw_get_catalog_string(type)); |
838 | __kmp_str_buf_print(buffer: &buf, format: "%d " , hw_threads[i].ids[level]); |
839 | } |
840 | if (__kmp_is_hybrid_cpu()) |
841 | __kmp_str_buf_print( |
842 | buffer: &buf, format: "(%s)" , |
843 | __kmp_hw_get_core_type_string(type: hw_threads[i].attrs.get_core_type())); |
844 | KMP_INFORM(OSProcMapToPack, env_var, hw_threads[i].os_id, buf.str); |
845 | } |
846 | |
847 | __kmp_str_buf_free(buffer: &buf); |
848 | } |
849 | |
850 | #if KMP_AFFINITY_SUPPORTED |
851 | void kmp_topology_t::set_granularity(kmp_affinity_t &affinity) const { |
852 | const char *env_var = __kmp_get_affinity_env_var(affinity); |
853 | // If requested hybrid CPU attributes for granularity (either OMP_PLACES or |
854 | // KMP_AFFINITY), but none exist, then reset granularity and have below method |
855 | // select a granularity and warn user. |
856 | if (!__kmp_is_hybrid_cpu()) { |
857 | if (affinity.core_attr_gran.valid) { |
858 | // OMP_PLACES with cores:<attribute> but non-hybrid arch, use cores |
859 | // instead |
860 | KMP_AFF_WARNING( |
861 | affinity, AffIgnoringNonHybrid, env_var, |
862 | __kmp_hw_get_catalog_string(KMP_HW_CORE, /*plural=*/true)); |
863 | affinity.gran = KMP_HW_CORE; |
864 | affinity.gran_levels = -1; |
865 | affinity.core_attr_gran = KMP_AFFINITY_ATTRS_UNKNOWN; |
866 | affinity.flags.core_types_gran = affinity.flags.core_effs_gran = 0; |
867 | } else if (affinity.flags.core_types_gran || |
868 | affinity.flags.core_effs_gran) { |
869 | // OMP_PLACES=core_types|core_effs but non-hybrid, use cores instead |
870 | if (affinity.flags.omp_places) { |
871 | KMP_AFF_WARNING( |
872 | affinity, AffIgnoringNonHybrid, env_var, |
873 | __kmp_hw_get_catalog_string(KMP_HW_CORE, /*plural=*/true)); |
874 | } else { |
875 | // KMP_AFFINITY=granularity=core_type|core_eff,... |
876 | KMP_AFF_WARNING(affinity, AffGranularityBad, env_var, |
877 | "Intel(R) Hybrid Technology core attribute" , |
878 | __kmp_hw_get_catalog_string(KMP_HW_CORE)); |
879 | } |
880 | affinity.gran = KMP_HW_CORE; |
881 | affinity.gran_levels = -1; |
882 | affinity.core_attr_gran = KMP_AFFINITY_ATTRS_UNKNOWN; |
883 | affinity.flags.core_types_gran = affinity.flags.core_effs_gran = 0; |
884 | } |
885 | } |
886 | // Set the number of affinity granularity levels |
887 | if (affinity.gran_levels < 0) { |
888 | kmp_hw_t gran_type = get_equivalent_type(type: affinity.gran); |
889 | // Check if user's granularity request is valid |
890 | if (gran_type == KMP_HW_UNKNOWN) { |
891 | // First try core, then thread, then package |
892 | kmp_hw_t gran_types[3] = {KMP_HW_CORE, KMP_HW_THREAD, KMP_HW_SOCKET}; |
893 | for (auto g : gran_types) { |
894 | if (get_equivalent_type(type: g) != KMP_HW_UNKNOWN) { |
895 | gran_type = g; |
896 | break; |
897 | } |
898 | } |
899 | KMP_ASSERT(gran_type != KMP_HW_UNKNOWN); |
900 | // Warn user what granularity setting will be used instead |
901 | KMP_AFF_WARNING(affinity, AffGranularityBad, env_var, |
902 | __kmp_hw_get_catalog_string(affinity.gran), |
903 | __kmp_hw_get_catalog_string(gran_type)); |
904 | affinity.gran = gran_type; |
905 | } |
906 | #if KMP_GROUP_AFFINITY |
907 | // If more than one processor group exists, and the level of |
908 | // granularity specified by the user is too coarse, then the |
909 | // granularity must be adjusted "down" to processor group affinity |
910 | // because threads can only exist within one processor group. |
911 | // For example, if a user sets granularity=socket and there are two |
912 | // processor groups that cover a socket, then the runtime must |
913 | // restrict the granularity down to the processor group level. |
914 | if (__kmp_num_proc_groups > 1) { |
915 | int gran_depth = get_level(gran_type); |
916 | int proc_group_depth = get_level(KMP_HW_PROC_GROUP); |
917 | if (gran_depth >= 0 && proc_group_depth >= 0 && |
918 | gran_depth < proc_group_depth) { |
919 | KMP_AFF_WARNING(affinity, AffGranTooCoarseProcGroup, env_var, |
920 | __kmp_hw_get_catalog_string(affinity.gran)); |
921 | affinity.gran = gran_type = KMP_HW_PROC_GROUP; |
922 | } |
923 | } |
924 | #endif |
925 | affinity.gran_levels = 0; |
926 | for (int i = depth - 1; i >= 0 && get_type(level: i) != gran_type; --i) |
927 | affinity.gran_levels++; |
928 | } |
929 | } |
930 | #endif |
931 | |
932 | void kmp_topology_t::canonicalize() { |
933 | #if KMP_GROUP_AFFINITY |
934 | _insert_windows_proc_groups(); |
935 | #endif |
936 | _remove_radix1_layers(); |
937 | _gather_enumeration_information(); |
938 | _discover_uniformity(); |
939 | _set_sub_ids(); |
940 | _set_globals(); |
941 | _set_last_level_cache(); |
942 | |
943 | #if KMP_MIC_SUPPORTED |
944 | // Manually Add L2 = Tile equivalence |
945 | if (__kmp_mic_type == mic3) { |
946 | if (get_level(type: KMP_HW_L2) != -1) |
947 | set_equivalent_type(type1: KMP_HW_TILE, type2: KMP_HW_L2); |
948 | else if (get_level(type: KMP_HW_TILE) != -1) |
949 | set_equivalent_type(type1: KMP_HW_L2, type2: KMP_HW_TILE); |
950 | } |
951 | #endif |
952 | |
953 | // Perform post canonicalization checking |
954 | KMP_ASSERT(depth > 0); |
955 | for (int level = 0; level < depth; ++level) { |
956 | // All counts, ratios, and types must be valid |
957 | KMP_ASSERT(count[level] > 0 && ratio[level] > 0); |
958 | KMP_ASSERT_VALID_HW_TYPE(types[level]); |
959 | // Detected types must point to themselves |
960 | KMP_ASSERT(equivalent[types[level]] == types[level]); |
961 | } |
962 | } |
963 | |
964 | // Canonicalize an explicit packages X cores/pkg X threads/core topology |
965 | void kmp_topology_t::canonicalize(int npackages, int ncores_per_pkg, |
966 | int nthreads_per_core, int ncores) { |
967 | int ndepth = 3; |
968 | depth = ndepth; |
969 | KMP_FOREACH_HW_TYPE(i) { equivalent[i] = KMP_HW_UNKNOWN; } |
970 | for (int level = 0; level < depth; ++level) { |
971 | count[level] = 0; |
972 | ratio[level] = 0; |
973 | } |
974 | count[0] = npackages; |
975 | count[1] = ncores; |
976 | count[2] = __kmp_xproc; |
977 | ratio[0] = npackages; |
978 | ratio[1] = ncores_per_pkg; |
979 | ratio[2] = nthreads_per_core; |
980 | equivalent[KMP_HW_SOCKET] = KMP_HW_SOCKET; |
981 | equivalent[KMP_HW_CORE] = KMP_HW_CORE; |
982 | equivalent[KMP_HW_THREAD] = KMP_HW_THREAD; |
983 | types[0] = KMP_HW_SOCKET; |
984 | types[1] = KMP_HW_CORE; |
985 | types[2] = KMP_HW_THREAD; |
986 | //__kmp_avail_proc = __kmp_xproc; |
987 | _discover_uniformity(); |
988 | } |
989 | |
990 | #if KMP_AFFINITY_SUPPORTED |
991 | static kmp_str_buf_t * |
992 | __kmp_hw_get_catalog_core_string(const kmp_hw_attr_t &attr, kmp_str_buf_t *buf, |
993 | bool plural) { |
994 | __kmp_str_buf_init(buf); |
995 | if (attr.is_core_type_valid()) |
996 | __kmp_str_buf_print(buffer: buf, format: "%s %s" , |
997 | __kmp_hw_get_core_type_string(type: attr.get_core_type()), |
998 | __kmp_hw_get_catalog_string(type: KMP_HW_CORE, plural)); |
999 | else |
1000 | __kmp_str_buf_print(buffer: buf, format: "%s eff=%d" , |
1001 | __kmp_hw_get_catalog_string(type: KMP_HW_CORE, plural), |
1002 | attr.get_core_eff()); |
1003 | return buf; |
1004 | } |
1005 | |
1006 | bool kmp_topology_t::restrict_to_mask(const kmp_affin_mask_t *mask) { |
1007 | // Apply the filter |
1008 | bool affected; |
1009 | int new_index = 0; |
1010 | for (int i = 0; i < num_hw_threads; ++i) { |
1011 | int os_id = hw_threads[i].os_id; |
1012 | if (KMP_CPU_ISSET(os_id, mask)) { |
1013 | if (i != new_index) |
1014 | hw_threads[new_index] = hw_threads[i]; |
1015 | new_index++; |
1016 | } else { |
1017 | KMP_CPU_CLR(os_id, __kmp_affin_fullMask); |
1018 | __kmp_avail_proc--; |
1019 | } |
1020 | } |
1021 | |
1022 | KMP_DEBUG_ASSERT(new_index <= num_hw_threads); |
1023 | affected = (num_hw_threads != new_index); |
1024 | num_hw_threads = new_index; |
1025 | |
1026 | // Post hardware subset canonicalization |
1027 | if (affected) { |
1028 | _gather_enumeration_information(); |
1029 | _discover_uniformity(); |
1030 | _set_globals(); |
1031 | _set_last_level_cache(); |
1032 | #if KMP_OS_WINDOWS |
1033 | // Copy filtered full mask if topology has single processor group |
1034 | if (__kmp_num_proc_groups <= 1) |
1035 | #endif |
1036 | __kmp_affin_origMask->copy(src: __kmp_affin_fullMask); |
1037 | } |
1038 | return affected; |
1039 | } |
1040 | |
1041 | // Apply the KMP_HW_SUBSET envirable to the topology |
1042 | // Returns true if KMP_HW_SUBSET filtered any processors |
1043 | // otherwise, returns false |
1044 | bool kmp_topology_t::filter_hw_subset() { |
1045 | // If KMP_HW_SUBSET wasn't requested, then do nothing. |
1046 | if (!__kmp_hw_subset) |
1047 | return false; |
1048 | |
1049 | // First, sort the KMP_HW_SUBSET items by the machine topology |
1050 | __kmp_hw_subset->sort(); |
1051 | |
1052 | __kmp_hw_subset->canonicalize(top: __kmp_topology); |
1053 | |
1054 | // Check to see if KMP_HW_SUBSET is a valid subset of the detected topology |
1055 | bool using_core_types = false; |
1056 | bool using_core_effs = false; |
1057 | bool is_absolute = __kmp_hw_subset->is_absolute(); |
1058 | int hw_subset_depth = __kmp_hw_subset->get_depth(); |
1059 | kmp_hw_t specified[KMP_HW_LAST]; |
1060 | int *topology_levels = (int *)KMP_ALLOCA(sizeof(int) * hw_subset_depth); |
1061 | KMP_ASSERT(hw_subset_depth > 0); |
1062 | KMP_FOREACH_HW_TYPE(i) { specified[i] = KMP_HW_UNKNOWN; } |
1063 | int core_level = get_level(type: KMP_HW_CORE); |
1064 | for (int i = 0; i < hw_subset_depth; ++i) { |
1065 | int max_count; |
1066 | const kmp_hw_subset_t::item_t &item = __kmp_hw_subset->at(index: i); |
1067 | int num = item.num[0]; |
1068 | int offset = item.offset[0]; |
1069 | kmp_hw_t type = item.type; |
1070 | kmp_hw_t equivalent_type = equivalent[type]; |
1071 | int level = get_level(type); |
1072 | topology_levels[i] = level; |
1073 | |
1074 | // Check to see if current layer is in detected machine topology |
1075 | if (equivalent_type != KMP_HW_UNKNOWN) { |
1076 | __kmp_hw_subset->at(index: i).type = equivalent_type; |
1077 | } else { |
1078 | KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetNotExistGeneric, |
1079 | __kmp_hw_get_catalog_string(type)); |
1080 | return false; |
1081 | } |
1082 | |
1083 | // Check to see if current layer has already been |
1084 | // specified either directly or through an equivalent type |
1085 | if (specified[equivalent_type] != KMP_HW_UNKNOWN) { |
1086 | KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetEqvLayers, |
1087 | __kmp_hw_get_catalog_string(type), |
1088 | __kmp_hw_get_catalog_string(specified[equivalent_type])); |
1089 | return false; |
1090 | } |
1091 | specified[equivalent_type] = type; |
1092 | |
1093 | // Check to see if each layer's num & offset parameters are valid |
1094 | max_count = get_ratio(level); |
1095 | if (!is_absolute) { |
1096 | if (max_count < 0 || |
1097 | (num != kmp_hw_subset_t::USE_ALL && num + offset > max_count)) { |
1098 | bool plural = (num > 1); |
1099 | KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetManyGeneric, |
1100 | __kmp_hw_get_catalog_string(type, plural)); |
1101 | return false; |
1102 | } |
1103 | } |
1104 | |
1105 | // Check to see if core attributes are consistent |
1106 | if (core_level == level) { |
1107 | // Determine which core attributes are specified |
1108 | for (int j = 0; j < item.num_attrs; ++j) { |
1109 | if (item.attr[j].is_core_type_valid()) |
1110 | using_core_types = true; |
1111 | if (item.attr[j].is_core_eff_valid()) |
1112 | using_core_effs = true; |
1113 | } |
1114 | |
1115 | // Check if using a single core attribute on non-hybrid arch. |
1116 | // Do not ignore all of KMP_HW_SUBSET, just ignore the attribute. |
1117 | // |
1118 | // Check if using multiple core attributes on non-hyrbid arch. |
1119 | // Ignore all of KMP_HW_SUBSET if this is the case. |
1120 | if ((using_core_effs || using_core_types) && !__kmp_is_hybrid_cpu()) { |
1121 | if (item.num_attrs == 1) { |
1122 | if (using_core_effs) { |
1123 | KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetIgnoringAttr, |
1124 | "efficiency" ); |
1125 | } else { |
1126 | KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetIgnoringAttr, |
1127 | "core_type" ); |
1128 | } |
1129 | using_core_effs = false; |
1130 | using_core_types = false; |
1131 | } else { |
1132 | KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetAttrsNonHybrid); |
1133 | return false; |
1134 | } |
1135 | } |
1136 | |
1137 | // Check if using both core types and core efficiencies together |
1138 | if (using_core_types && using_core_effs) { |
1139 | KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetIncompat, "core_type" , |
1140 | "efficiency" ); |
1141 | return false; |
1142 | } |
1143 | |
1144 | // Check that core efficiency values are valid |
1145 | if (using_core_effs) { |
1146 | for (int j = 0; j < item.num_attrs; ++j) { |
1147 | if (item.attr[j].is_core_eff_valid()) { |
1148 | int core_eff = item.attr[j].get_core_eff(); |
1149 | if (core_eff < 0 || core_eff >= num_core_efficiencies) { |
1150 | kmp_str_buf_t buf; |
1151 | __kmp_str_buf_init(&buf); |
1152 | __kmp_str_buf_print(buffer: &buf, format: "%d" , item.attr[j].get_core_eff()); |
1153 | __kmp_msg(kmp_ms_warning, |
1154 | KMP_MSG(AffHWSubsetAttrInvalid, "efficiency" , buf.str), |
1155 | KMP_HNT(ValidValuesRange, 0, num_core_efficiencies - 1), |
1156 | __kmp_msg_null); |
1157 | __kmp_str_buf_free(buffer: &buf); |
1158 | return false; |
1159 | } |
1160 | } |
1161 | } |
1162 | } |
1163 | |
1164 | // Check that the number of requested cores with attributes is valid |
1165 | if ((using_core_types || using_core_effs) && !is_absolute) { |
1166 | for (int j = 0; j < item.num_attrs; ++j) { |
1167 | int num = item.num[j]; |
1168 | int offset = item.offset[j]; |
1169 | int level_above = core_level - 1; |
1170 | if (level_above >= 0) { |
1171 | max_count = get_ncores_with_attr_per(attr: item.attr[j], above: level_above); |
1172 | if (max_count <= 0 || |
1173 | (num != kmp_hw_subset_t::USE_ALL && num + offset > max_count)) { |
1174 | kmp_str_buf_t buf; |
1175 | __kmp_hw_get_catalog_core_string(attr: item.attr[j], buf: &buf, plural: num > 0); |
1176 | KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetManyGeneric, buf.str); |
1177 | __kmp_str_buf_free(buffer: &buf); |
1178 | return false; |
1179 | } |
1180 | } |
1181 | } |
1182 | } |
1183 | |
1184 | if ((using_core_types || using_core_effs) && item.num_attrs > 1) { |
1185 | for (int j = 0; j < item.num_attrs; ++j) { |
1186 | // Ambiguous use of specific core attribute + generic core |
1187 | // e.g., 4c & 3c:intel_core or 4c & 3c:eff1 |
1188 | if (!item.attr[j]) { |
1189 | kmp_hw_attr_t other_attr; |
1190 | for (int k = 0; k < item.num_attrs; ++k) { |
1191 | if (item.attr[k] != item.attr[j]) { |
1192 | other_attr = item.attr[k]; |
1193 | break; |
1194 | } |
1195 | } |
1196 | kmp_str_buf_t buf; |
1197 | __kmp_hw_get_catalog_core_string(attr: other_attr, buf: &buf, plural: item.num[j] > 0); |
1198 | KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetIncompat, |
1199 | __kmp_hw_get_catalog_string(KMP_HW_CORE), buf.str); |
1200 | __kmp_str_buf_free(buffer: &buf); |
1201 | return false; |
1202 | } |
1203 | // Allow specifying a specific core type or core eff exactly once |
1204 | for (int k = 0; k < j; ++k) { |
1205 | if (!item.attr[j] || !item.attr[k]) |
1206 | continue; |
1207 | if (item.attr[k] == item.attr[j]) { |
1208 | kmp_str_buf_t buf; |
1209 | __kmp_hw_get_catalog_core_string(attr: item.attr[j], buf: &buf, |
1210 | plural: item.num[j] > 0); |
1211 | KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetAttrRepeat, buf.str); |
1212 | __kmp_str_buf_free(buffer: &buf); |
1213 | return false; |
1214 | } |
1215 | } |
1216 | } |
1217 | } |
1218 | } |
1219 | } |
1220 | |
1221 | // For keeping track of sub_ids for an absolute KMP_HW_SUBSET |
1222 | // or core attributes (core type or efficiency) |
1223 | int prev_sub_ids[KMP_HW_LAST]; |
1224 | int abs_sub_ids[KMP_HW_LAST]; |
1225 | int core_eff_sub_ids[KMP_HW_MAX_NUM_CORE_EFFS]; |
1226 | int core_type_sub_ids[KMP_HW_MAX_NUM_CORE_TYPES]; |
1227 | for (size_t i = 0; i < KMP_HW_LAST; ++i) { |
1228 | abs_sub_ids[i] = -1; |
1229 | prev_sub_ids[i] = -1; |
1230 | } |
1231 | for (size_t i = 0; i < KMP_HW_MAX_NUM_CORE_EFFS; ++i) |
1232 | core_eff_sub_ids[i] = -1; |
1233 | for (size_t i = 0; i < KMP_HW_MAX_NUM_CORE_TYPES; ++i) |
1234 | core_type_sub_ids[i] = -1; |
1235 | |
1236 | // Determine which hardware threads should be filtered. |
1237 | |
1238 | // Helpful to determine if a topology layer is targeted by an absolute subset |
1239 | auto is_targeted = [&](int level) { |
1240 | if (is_absolute) { |
1241 | for (int i = 0; i < hw_subset_depth; ++i) |
1242 | if (topology_levels[i] == level) |
1243 | return true; |
1244 | return false; |
1245 | } |
1246 | // If not absolute KMP_HW_SUBSET, then every layer is seen as targeted |
1247 | return true; |
1248 | }; |
1249 | |
1250 | // Helpful to index into core type sub Ids array |
1251 | auto get_core_type_index = [](const kmp_hw_thread_t &t) { |
1252 | switch (t.attrs.get_core_type()) { |
1253 | case KMP_HW_CORE_TYPE_UNKNOWN: |
1254 | case KMP_HW_MAX_NUM_CORE_TYPES: |
1255 | return 0; |
1256 | #if KMP_ARCH_X86 || KMP_ARCH_X86_64 |
1257 | case KMP_HW_CORE_TYPE_ATOM: |
1258 | return 1; |
1259 | case KMP_HW_CORE_TYPE_CORE: |
1260 | return 2; |
1261 | #endif |
1262 | } |
1263 | KMP_ASSERT2(false, "Unhandled kmp_hw_thread_t enumeration" ); |
1264 | KMP_BUILTIN_UNREACHABLE; |
1265 | }; |
1266 | |
1267 | // Helpful to index into core efficiencies sub Ids array |
1268 | auto get_core_eff_index = [](const kmp_hw_thread_t &t) { |
1269 | return t.attrs.get_core_eff(); |
1270 | }; |
1271 | |
1272 | int num_filtered = 0; |
1273 | kmp_affin_mask_t *filtered_mask; |
1274 | KMP_CPU_ALLOC(filtered_mask); |
1275 | KMP_CPU_COPY(filtered_mask, __kmp_affin_fullMask); |
1276 | for (int i = 0; i < num_hw_threads; ++i) { |
1277 | kmp_hw_thread_t &hw_thread = hw_threads[i]; |
1278 | |
1279 | // Figure out the absolute sub ids and core eff/type sub ids |
1280 | if (is_absolute || using_core_effs || using_core_types) { |
1281 | for (int level = 0; level < get_depth(); ++level) { |
1282 | if (hw_thread.sub_ids[level] != prev_sub_ids[level]) { |
1283 | bool found_targeted = false; |
1284 | for (int j = level; j < get_depth(); ++j) { |
1285 | bool targeted = is_targeted(j); |
1286 | if (!found_targeted && targeted) { |
1287 | found_targeted = true; |
1288 | abs_sub_ids[j]++; |
1289 | if (j == core_level && using_core_effs) |
1290 | core_eff_sub_ids[get_core_eff_index(hw_thread)]++; |
1291 | if (j == core_level && using_core_types) |
1292 | core_type_sub_ids[get_core_type_index(hw_thread)]++; |
1293 | } else if (targeted) { |
1294 | abs_sub_ids[j] = 0; |
1295 | if (j == core_level && using_core_effs) |
1296 | core_eff_sub_ids[get_core_eff_index(hw_thread)] = 0; |
1297 | if (j == core_level && using_core_types) |
1298 | core_type_sub_ids[get_core_type_index(hw_thread)] = 0; |
1299 | } |
1300 | } |
1301 | break; |
1302 | } |
1303 | } |
1304 | for (int level = 0; level < get_depth(); ++level) |
1305 | prev_sub_ids[level] = hw_thread.sub_ids[level]; |
1306 | } |
1307 | |
1308 | // Check to see if this hardware thread should be filtered |
1309 | bool should_be_filtered = false; |
1310 | for (int hw_subset_index = 0; hw_subset_index < hw_subset_depth; |
1311 | ++hw_subset_index) { |
1312 | const auto &hw_subset_item = __kmp_hw_subset->at(index: hw_subset_index); |
1313 | int level = topology_levels[hw_subset_index]; |
1314 | if (level == -1) |
1315 | continue; |
1316 | if ((using_core_effs || using_core_types) && level == core_level) { |
1317 | // Look for the core attribute in KMP_HW_SUBSET which corresponds |
1318 | // to this hardware thread's core attribute. Use this num,offset plus |
1319 | // the running sub_id for the particular core attribute of this hardware |
1320 | // thread to determine if the hardware thread should be filtered or not. |
1321 | int attr_idx; |
1322 | kmp_hw_core_type_t core_type = hw_thread.attrs.get_core_type(); |
1323 | int core_eff = hw_thread.attrs.get_core_eff(); |
1324 | for (attr_idx = 0; attr_idx < hw_subset_item.num_attrs; ++attr_idx) { |
1325 | if (using_core_types && |
1326 | hw_subset_item.attr[attr_idx].get_core_type() == core_type) |
1327 | break; |
1328 | if (using_core_effs && |
1329 | hw_subset_item.attr[attr_idx].get_core_eff() == core_eff) |
1330 | break; |
1331 | } |
1332 | // This core attribute isn't in the KMP_HW_SUBSET so always filter it. |
1333 | if (attr_idx == hw_subset_item.num_attrs) { |
1334 | should_be_filtered = true; |
1335 | break; |
1336 | } |
1337 | int sub_id; |
1338 | int num = hw_subset_item.num[attr_idx]; |
1339 | int offset = hw_subset_item.offset[attr_idx]; |
1340 | if (using_core_types) |
1341 | sub_id = core_type_sub_ids[get_core_type_index(hw_thread)]; |
1342 | else |
1343 | sub_id = core_eff_sub_ids[get_core_eff_index(hw_thread)]; |
1344 | if (sub_id < offset || |
1345 | (num != kmp_hw_subset_t::USE_ALL && sub_id >= offset + num)) { |
1346 | should_be_filtered = true; |
1347 | break; |
1348 | } |
1349 | } else { |
1350 | int sub_id; |
1351 | int num = hw_subset_item.num[0]; |
1352 | int offset = hw_subset_item.offset[0]; |
1353 | if (is_absolute) |
1354 | sub_id = abs_sub_ids[level]; |
1355 | else |
1356 | sub_id = hw_thread.sub_ids[level]; |
1357 | if (sub_id < offset || |
1358 | (num != kmp_hw_subset_t::USE_ALL && sub_id >= offset + num)) { |
1359 | should_be_filtered = true; |
1360 | break; |
1361 | } |
1362 | } |
1363 | } |
1364 | // Collect filtering information |
1365 | if (should_be_filtered) { |
1366 | KMP_CPU_CLR(hw_thread.os_id, filtered_mask); |
1367 | num_filtered++; |
1368 | } |
1369 | } |
1370 | |
1371 | // One last check that we shouldn't allow filtering entire machine |
1372 | if (num_filtered == num_hw_threads) { |
1373 | KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetAllFiltered); |
1374 | return false; |
1375 | } |
1376 | |
1377 | // Apply the filter |
1378 | restrict_to_mask(mask: filtered_mask); |
1379 | return true; |
1380 | } |
1381 | |
1382 | bool kmp_topology_t::is_close(int hwt1, int hwt2, |
1383 | const kmp_affinity_t &stgs) const { |
1384 | int hw_level = stgs.gran_levels; |
1385 | if (hw_level >= depth) |
1386 | return true; |
1387 | bool retval = true; |
1388 | const kmp_hw_thread_t &t1 = hw_threads[hwt1]; |
1389 | const kmp_hw_thread_t &t2 = hw_threads[hwt2]; |
1390 | if (stgs.flags.core_types_gran) |
1391 | return t1.attrs.get_core_type() == t2.attrs.get_core_type(); |
1392 | if (stgs.flags.core_effs_gran) |
1393 | return t1.attrs.get_core_eff() == t2.attrs.get_core_eff(); |
1394 | for (int i = 0; i < (depth - hw_level); ++i) { |
1395 | if (t1.ids[i] != t2.ids[i]) |
1396 | return false; |
1397 | } |
1398 | return retval; |
1399 | } |
1400 | |
1401 | //////////////////////////////////////////////////////////////////////////////// |
1402 | |
1403 | bool KMPAffinity::picked_api = false; |
1404 | |
1405 | void *KMPAffinity::Mask::operator new(size_t n) { return __kmp_allocate(n); } |
1406 | void *KMPAffinity::Mask::operator new[](size_t n) { return __kmp_allocate(n); } |
1407 | void KMPAffinity::Mask::operator delete(void *p) { __kmp_free(p); } |
1408 | void KMPAffinity::Mask::operator delete[](void *p) { __kmp_free(p); } |
1409 | void *KMPAffinity::operator new(size_t n) { return __kmp_allocate(n); } |
1410 | void KMPAffinity::operator delete(void *p) { __kmp_free(p); } |
1411 | |
1412 | void KMPAffinity::pick_api() { |
1413 | KMPAffinity *affinity_dispatch; |
1414 | if (picked_api) |
1415 | return; |
1416 | #if KMP_USE_HWLOC |
1417 | // Only use Hwloc if affinity isn't explicitly disabled and |
1418 | // user requests Hwloc topology method |
1419 | if (__kmp_affinity_top_method == affinity_top_method_hwloc && |
1420 | __kmp_affinity.type != affinity_disabled) { |
1421 | affinity_dispatch = new KMPHwlocAffinity(); |
1422 | } else |
1423 | #endif |
1424 | { |
1425 | affinity_dispatch = new KMPNativeAffinity(); |
1426 | } |
1427 | __kmp_affinity_dispatch = affinity_dispatch; |
1428 | picked_api = true; |
1429 | } |
1430 | |
1431 | void KMPAffinity::destroy_api() { |
1432 | if (__kmp_affinity_dispatch != NULL) { |
1433 | delete __kmp_affinity_dispatch; |
1434 | __kmp_affinity_dispatch = NULL; |
1435 | picked_api = false; |
1436 | } |
1437 | } |
1438 | |
1439 | #define KMP_ADVANCE_SCAN(scan) \ |
1440 | while (*scan != '\0') { \ |
1441 | scan++; \ |
1442 | } |
1443 | |
1444 | // Print the affinity mask to the character array in a pretty format. |
1445 | // The format is a comma separated list of non-negative integers or integer |
1446 | // ranges: e.g., 1,2,3-5,7,9-15 |
1447 | // The format can also be the string "{<empty>}" if no bits are set in mask |
1448 | char *__kmp_affinity_print_mask(char *buf, int buf_len, |
1449 | kmp_affin_mask_t *mask) { |
1450 | int start = 0, finish = 0, previous = 0; |
1451 | bool first_range; |
1452 | KMP_ASSERT(buf); |
1453 | KMP_ASSERT(buf_len >= 40); |
1454 | KMP_ASSERT(mask); |
1455 | char *scan = buf; |
1456 | char *end = buf + buf_len - 1; |
1457 | |
1458 | // Check for empty set. |
1459 | if (mask->begin() == mask->end()) { |
1460 | KMP_SNPRINTF(s: scan, maxlen: end - scan + 1, format: "{<empty>}" ); |
1461 | KMP_ADVANCE_SCAN(scan); |
1462 | KMP_ASSERT(scan <= end); |
1463 | return buf; |
1464 | } |
1465 | |
1466 | first_range = true; |
1467 | start = mask->begin(); |
1468 | while (1) { |
1469 | // Find next range |
1470 | // [start, previous] is inclusive range of contiguous bits in mask |
1471 | for (finish = mask->next(previous: start), previous = start; |
1472 | finish == previous + 1 && finish != mask->end(); |
1473 | finish = mask->next(previous: finish)) { |
1474 | previous = finish; |
1475 | } |
1476 | |
1477 | // The first range does not need a comma printed before it, but the rest |
1478 | // of the ranges do need a comma beforehand |
1479 | if (!first_range) { |
1480 | KMP_SNPRINTF(s: scan, maxlen: end - scan + 1, format: "%s" , "," ); |
1481 | KMP_ADVANCE_SCAN(scan); |
1482 | } else { |
1483 | first_range = false; |
1484 | } |
1485 | // Range with three or more contiguous bits in the affinity mask |
1486 | if (previous - start > 1) { |
1487 | KMP_SNPRINTF(s: scan, maxlen: end - scan + 1, format: "%u-%u" , start, previous); |
1488 | } else { |
1489 | // Range with one or two contiguous bits in the affinity mask |
1490 | KMP_SNPRINTF(s: scan, maxlen: end - scan + 1, format: "%u" , start); |
1491 | KMP_ADVANCE_SCAN(scan); |
1492 | if (previous - start > 0) { |
1493 | KMP_SNPRINTF(s: scan, maxlen: end - scan + 1, format: ",%u" , previous); |
1494 | } |
1495 | } |
1496 | KMP_ADVANCE_SCAN(scan); |
1497 | // Start over with new start point |
1498 | start = finish; |
1499 | if (start == mask->end()) |
1500 | break; |
1501 | // Check for overflow |
1502 | if (end - scan < 2) |
1503 | break; |
1504 | } |
1505 | |
1506 | // Check for overflow |
1507 | KMP_ASSERT(scan <= end); |
1508 | return buf; |
1509 | } |
1510 | #undef KMP_ADVANCE_SCAN |
1511 | |
1512 | // Print the affinity mask to the string buffer object in a pretty format |
1513 | // The format is a comma separated list of non-negative integers or integer |
1514 | // ranges: e.g., 1,2,3-5,7,9-15 |
1515 | // The format can also be the string "{<empty>}" if no bits are set in mask |
1516 | kmp_str_buf_t *__kmp_affinity_str_buf_mask(kmp_str_buf_t *buf, |
1517 | kmp_affin_mask_t *mask) { |
1518 | int start = 0, finish = 0, previous = 0; |
1519 | bool first_range; |
1520 | KMP_ASSERT(buf); |
1521 | KMP_ASSERT(mask); |
1522 | |
1523 | __kmp_str_buf_clear(buffer: buf); |
1524 | |
1525 | // Check for empty set. |
1526 | if (mask->begin() == mask->end()) { |
1527 | __kmp_str_buf_print(buffer: buf, format: "%s" , "{<empty>}" ); |
1528 | return buf; |
1529 | } |
1530 | |
1531 | first_range = true; |
1532 | start = mask->begin(); |
1533 | while (1) { |
1534 | // Find next range |
1535 | // [start, previous] is inclusive range of contiguous bits in mask |
1536 | for (finish = mask->next(previous: start), previous = start; |
1537 | finish == previous + 1 && finish != mask->end(); |
1538 | finish = mask->next(previous: finish)) { |
1539 | previous = finish; |
1540 | } |
1541 | |
1542 | // The first range does not need a comma printed before it, but the rest |
1543 | // of the ranges do need a comma beforehand |
1544 | if (!first_range) { |
1545 | __kmp_str_buf_print(buffer: buf, format: "%s" , "," ); |
1546 | } else { |
1547 | first_range = false; |
1548 | } |
1549 | // Range with three or more contiguous bits in the affinity mask |
1550 | if (previous - start > 1) { |
1551 | __kmp_str_buf_print(buffer: buf, format: "%u-%u" , start, previous); |
1552 | } else { |
1553 | // Range with one or two contiguous bits in the affinity mask |
1554 | __kmp_str_buf_print(buffer: buf, format: "%u" , start); |
1555 | if (previous - start > 0) { |
1556 | __kmp_str_buf_print(buffer: buf, format: ",%u" , previous); |
1557 | } |
1558 | } |
1559 | // Start over with new start point |
1560 | start = finish; |
1561 | if (start == mask->end()) |
1562 | break; |
1563 | } |
1564 | return buf; |
1565 | } |
1566 | |
1567 | // Return (possibly empty) affinity mask representing the offline CPUs |
1568 | // Caller must free the mask |
1569 | kmp_affin_mask_t *__kmp_affinity_get_offline_cpus() { |
1570 | kmp_affin_mask_t *offline; |
1571 | KMP_CPU_ALLOC(offline); |
1572 | KMP_CPU_ZERO(offline); |
1573 | #if KMP_OS_LINUX |
1574 | int n, begin_cpu, end_cpu; |
1575 | kmp_safe_raii_file_t offline_file; |
1576 | auto skip_ws = [](FILE *f) { |
1577 | int c; |
1578 | do { |
1579 | c = fgetc(stream: f); |
1580 | } while (isspace(c)); |
1581 | if (c != EOF) |
1582 | ungetc(c: c, stream: f); |
1583 | }; |
1584 | // File contains CSV of integer ranges representing the offline CPUs |
1585 | // e.g., 1,2,4-7,9,11-15 |
1586 | int status = offline_file.try_open(filename: "/sys/devices/system/cpu/offline" , mode: "r" ); |
1587 | if (status != 0) |
1588 | return offline; |
1589 | while (!feof(stream: offline_file)) { |
1590 | skip_ws(offline_file); |
1591 | n = fscanf(stream: offline_file, format: "%d" , &begin_cpu); |
1592 | if (n != 1) |
1593 | break; |
1594 | skip_ws(offline_file); |
1595 | int c = fgetc(stream: offline_file); |
1596 | if (c == EOF || c == ',') { |
1597 | // Just single CPU |
1598 | end_cpu = begin_cpu; |
1599 | } else if (c == '-') { |
1600 | // Range of CPUs |
1601 | skip_ws(offline_file); |
1602 | n = fscanf(stream: offline_file, format: "%d" , &end_cpu); |
1603 | if (n != 1) |
1604 | break; |
1605 | skip_ws(offline_file); |
1606 | c = fgetc(stream: offline_file); // skip ',' |
1607 | } else { |
1608 | // Syntax problem |
1609 | break; |
1610 | } |
1611 | // Ensure a valid range of CPUs |
1612 | if (begin_cpu < 0 || begin_cpu >= __kmp_xproc || end_cpu < 0 || |
1613 | end_cpu >= __kmp_xproc || begin_cpu > end_cpu) { |
1614 | continue; |
1615 | } |
1616 | // Insert [begin_cpu, end_cpu] into offline mask |
1617 | for (int cpu = begin_cpu; cpu <= end_cpu; ++cpu) { |
1618 | KMP_CPU_SET(cpu, offline); |
1619 | } |
1620 | } |
1621 | #endif |
1622 | return offline; |
1623 | } |
1624 | |
1625 | // Return the number of available procs |
1626 | int __kmp_affinity_entire_machine_mask(kmp_affin_mask_t *mask) { |
1627 | int avail_proc = 0; |
1628 | KMP_CPU_ZERO(mask); |
1629 | |
1630 | #if KMP_GROUP_AFFINITY |
1631 | |
1632 | if (__kmp_num_proc_groups > 1) { |
1633 | int group; |
1634 | KMP_DEBUG_ASSERT(__kmp_GetActiveProcessorCount != NULL); |
1635 | for (group = 0; group < __kmp_num_proc_groups; group++) { |
1636 | int i; |
1637 | int num = __kmp_GetActiveProcessorCount(group); |
1638 | for (i = 0; i < num; i++) { |
1639 | KMP_CPU_SET(i + group * (CHAR_BIT * sizeof(DWORD_PTR)), mask); |
1640 | avail_proc++; |
1641 | } |
1642 | } |
1643 | } else |
1644 | |
1645 | #endif /* KMP_GROUP_AFFINITY */ |
1646 | |
1647 | { |
1648 | int proc; |
1649 | kmp_affin_mask_t *offline_cpus = __kmp_affinity_get_offline_cpus(); |
1650 | for (proc = 0; proc < __kmp_xproc; proc++) { |
1651 | // Skip offline CPUs |
1652 | if (KMP_CPU_ISSET(proc, offline_cpus)) |
1653 | continue; |
1654 | KMP_CPU_SET(proc, mask); |
1655 | avail_proc++; |
1656 | } |
1657 | KMP_CPU_FREE(offline_cpus); |
1658 | } |
1659 | |
1660 | return avail_proc; |
1661 | } |
1662 | |
1663 | // All of the __kmp_affinity_create_*_map() routines should allocate the |
1664 | // internal topology object and set the layer ids for it. Each routine |
1665 | // returns a boolean on whether it was successful at doing so. |
1666 | kmp_affin_mask_t *__kmp_affin_fullMask = NULL; |
1667 | // Original mask is a subset of full mask in multiple processor groups topology |
1668 | kmp_affin_mask_t *__kmp_affin_origMask = NULL; |
1669 | |
1670 | #if KMP_USE_HWLOC |
1671 | static inline bool __kmp_hwloc_is_cache_type(hwloc_obj_t obj) { |
1672 | #if HWLOC_API_VERSION >= 0x00020000 |
1673 | return hwloc_obj_type_is_cache(obj->type); |
1674 | #else |
1675 | return obj->type == HWLOC_OBJ_CACHE; |
1676 | #endif |
1677 | } |
1678 | |
1679 | // Returns KMP_HW_* type derived from HWLOC_* type |
1680 | static inline kmp_hw_t __kmp_hwloc_type_2_topology_type(hwloc_obj_t obj) { |
1681 | |
1682 | if (__kmp_hwloc_is_cache_type(obj)) { |
1683 | if (obj->attr->cache.type == HWLOC_OBJ_CACHE_INSTRUCTION) |
1684 | return KMP_HW_UNKNOWN; |
1685 | switch (obj->attr->cache.depth) { |
1686 | case 1: |
1687 | return KMP_HW_L1; |
1688 | case 2: |
1689 | #if KMP_MIC_SUPPORTED |
1690 | if (__kmp_mic_type == mic3) { |
1691 | return KMP_HW_TILE; |
1692 | } |
1693 | #endif |
1694 | return KMP_HW_L2; |
1695 | case 3: |
1696 | return KMP_HW_L3; |
1697 | } |
1698 | return KMP_HW_UNKNOWN; |
1699 | } |
1700 | |
1701 | switch (obj->type) { |
1702 | case HWLOC_OBJ_PACKAGE: |
1703 | return KMP_HW_SOCKET; |
1704 | case HWLOC_OBJ_NUMANODE: |
1705 | return KMP_HW_NUMA; |
1706 | case HWLOC_OBJ_CORE: |
1707 | return KMP_HW_CORE; |
1708 | case HWLOC_OBJ_PU: |
1709 | return KMP_HW_THREAD; |
1710 | case HWLOC_OBJ_GROUP: |
1711 | #if HWLOC_API_VERSION >= 0x00020000 |
1712 | if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_DIE) |
1713 | return KMP_HW_DIE; |
1714 | else if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_TILE) |
1715 | return KMP_HW_TILE; |
1716 | else if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_MODULE) |
1717 | return KMP_HW_MODULE; |
1718 | else if (obj->attr->group.kind == HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP) |
1719 | return KMP_HW_PROC_GROUP; |
1720 | #endif |
1721 | return KMP_HW_UNKNOWN; |
1722 | #if HWLOC_API_VERSION >= 0x00020100 |
1723 | case HWLOC_OBJ_DIE: |
1724 | return KMP_HW_DIE; |
1725 | #endif |
1726 | } |
1727 | return KMP_HW_UNKNOWN; |
1728 | } |
1729 | |
1730 | // Returns the number of objects of type 'type' below 'obj' within the topology |
1731 | // tree structure. e.g., if obj is a HWLOC_OBJ_PACKAGE object, and type is |
1732 | // HWLOC_OBJ_PU, then this will return the number of PU's under the SOCKET |
1733 | // object. |
1734 | static int __kmp_hwloc_get_nobjs_under_obj(hwloc_obj_t obj, |
1735 | hwloc_obj_type_t type) { |
1736 | int retval = 0; |
1737 | hwloc_obj_t first; |
1738 | for (first = hwloc_get_obj_below_by_type(__kmp_hwloc_topology, obj->type, |
1739 | obj->logical_index, type, 0); |
1740 | first != NULL && hwloc_get_ancestor_obj_by_type(__kmp_hwloc_topology, |
1741 | obj->type, first) == obj; |
1742 | first = hwloc_get_next_obj_by_type(__kmp_hwloc_topology, first->type, |
1743 | first)) { |
1744 | ++retval; |
1745 | } |
1746 | return retval; |
1747 | } |
1748 | |
1749 | // This gets the sub_id for a lower object under a higher object in the |
1750 | // topology tree |
1751 | static int __kmp_hwloc_get_sub_id(hwloc_topology_t t, hwloc_obj_t higher, |
1752 | hwloc_obj_t lower) { |
1753 | hwloc_obj_t obj; |
1754 | hwloc_obj_type_t ltype = lower->type; |
1755 | int lindex = lower->logical_index - 1; |
1756 | int sub_id = 0; |
1757 | // Get the previous lower object |
1758 | obj = hwloc_get_obj_by_type(t, ltype, lindex); |
1759 | while (obj && lindex >= 0 && |
1760 | hwloc_bitmap_isincluded(obj->cpuset, higher->cpuset)) { |
1761 | if (obj->userdata) { |
1762 | sub_id = (int)(RCAST(kmp_intptr_t, obj->userdata)); |
1763 | break; |
1764 | } |
1765 | sub_id++; |
1766 | lindex--; |
1767 | obj = hwloc_get_obj_by_type(t, ltype, lindex); |
1768 | } |
1769 | // store sub_id + 1 so that 0 is differed from NULL |
1770 | lower->userdata = RCAST(void *, sub_id + 1); |
1771 | return sub_id; |
1772 | } |
1773 | |
1774 | static bool __kmp_affinity_create_hwloc_map(kmp_i18n_id_t *const msg_id) { |
1775 | kmp_hw_t type; |
1776 | int hw_thread_index, sub_id; |
1777 | int depth; |
1778 | hwloc_obj_t pu, obj, root, prev; |
1779 | kmp_hw_t types[KMP_HW_LAST]; |
1780 | hwloc_obj_type_t hwloc_types[KMP_HW_LAST]; |
1781 | |
1782 | hwloc_topology_t tp = __kmp_hwloc_topology; |
1783 | *msg_id = kmp_i18n_null; |
1784 | if (__kmp_affinity.flags.verbose) { |
1785 | KMP_INFORM(AffUsingHwloc, "KMP_AFFINITY" ); |
1786 | } |
1787 | |
1788 | if (!KMP_AFFINITY_CAPABLE()) { |
1789 | // Hack to try and infer the machine topology using only the data |
1790 | // available from hwloc on the current thread, and __kmp_xproc. |
1791 | KMP_ASSERT(__kmp_affinity.type == affinity_none); |
1792 | // hwloc only guarantees existance of PU object, so check PACKAGE and CORE |
1793 | hwloc_obj_t o = hwloc_get_obj_by_type(tp, HWLOC_OBJ_PACKAGE, 0); |
1794 | if (o != NULL) |
1795 | nCoresPerPkg = __kmp_hwloc_get_nobjs_under_obj(o, HWLOC_OBJ_CORE); |
1796 | else |
1797 | nCoresPerPkg = 1; // no PACKAGE found |
1798 | o = hwloc_get_obj_by_type(tp, HWLOC_OBJ_CORE, 0); |
1799 | if (o != NULL) |
1800 | __kmp_nThreadsPerCore = __kmp_hwloc_get_nobjs_under_obj(o, HWLOC_OBJ_PU); |
1801 | else |
1802 | __kmp_nThreadsPerCore = 1; // no CORE found |
1803 | if (__kmp_nThreadsPerCore == 0) |
1804 | __kmp_nThreadsPerCore = 1; |
1805 | __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore; |
1806 | if (nCoresPerPkg == 0) |
1807 | nCoresPerPkg = 1; // to prevent possible division by 0 |
1808 | nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg; |
1809 | return true; |
1810 | } |
1811 | |
1812 | #if HWLOC_API_VERSION >= 0x00020400 |
1813 | // Handle multiple types of cores if they exist on the system |
1814 | int nr_cpu_kinds = hwloc_cpukinds_get_nr(tp, 0); |
1815 | |
1816 | typedef struct kmp_hwloc_cpukinds_info_t { |
1817 | int efficiency; |
1818 | kmp_hw_core_type_t core_type; |
1819 | hwloc_bitmap_t mask; |
1820 | } kmp_hwloc_cpukinds_info_t; |
1821 | kmp_hwloc_cpukinds_info_t *cpukinds = nullptr; |
1822 | |
1823 | if (nr_cpu_kinds > 0) { |
1824 | unsigned nr_infos; |
1825 | struct hwloc_info_s *infos; |
1826 | cpukinds = (kmp_hwloc_cpukinds_info_t *)__kmp_allocate( |
1827 | sizeof(kmp_hwloc_cpukinds_info_t) * nr_cpu_kinds); |
1828 | for (unsigned idx = 0; idx < (unsigned)nr_cpu_kinds; ++idx) { |
1829 | cpukinds[idx].efficiency = -1; |
1830 | cpukinds[idx].core_type = KMP_HW_CORE_TYPE_UNKNOWN; |
1831 | cpukinds[idx].mask = hwloc_bitmap_alloc(); |
1832 | if (hwloc_cpukinds_get_info(tp, idx, cpukinds[idx].mask, |
1833 | &cpukinds[idx].efficiency, &nr_infos, &infos, |
1834 | 0) == 0) { |
1835 | for (unsigned i = 0; i < nr_infos; ++i) { |
1836 | if (__kmp_str_match("CoreType" , 8, infos[i].name)) { |
1837 | #if KMP_ARCH_X86 || KMP_ARCH_X86_64 |
1838 | if (__kmp_str_match("IntelAtom" , 9, infos[i].value)) { |
1839 | cpukinds[idx].core_type = KMP_HW_CORE_TYPE_ATOM; |
1840 | break; |
1841 | } else if (__kmp_str_match("IntelCore" , 9, infos[i].value)) { |
1842 | cpukinds[idx].core_type = KMP_HW_CORE_TYPE_CORE; |
1843 | break; |
1844 | } |
1845 | #endif |
1846 | } |
1847 | } |
1848 | } |
1849 | } |
1850 | } |
1851 | #endif |
1852 | |
1853 | root = hwloc_get_root_obj(tp); |
1854 | |
1855 | // Figure out the depth and types in the topology |
1856 | depth = 0; |
1857 | obj = hwloc_get_pu_obj_by_os_index(tp, __kmp_affin_fullMask->begin()); |
1858 | while (obj && obj != root) { |
1859 | #if HWLOC_API_VERSION >= 0x00020000 |
1860 | if (obj->memory_arity) { |
1861 | hwloc_obj_t memory; |
1862 | for (memory = obj->memory_first_child; memory; |
1863 | memory = hwloc_get_next_child(tp, obj, memory)) { |
1864 | if (memory->type == HWLOC_OBJ_NUMANODE) |
1865 | break; |
1866 | } |
1867 | if (memory && memory->type == HWLOC_OBJ_NUMANODE) { |
1868 | types[depth] = KMP_HW_NUMA; |
1869 | hwloc_types[depth] = memory->type; |
1870 | depth++; |
1871 | } |
1872 | } |
1873 | #endif |
1874 | type = __kmp_hwloc_type_2_topology_type(obj); |
1875 | if (type != KMP_HW_UNKNOWN) { |
1876 | types[depth] = type; |
1877 | hwloc_types[depth] = obj->type; |
1878 | depth++; |
1879 | } |
1880 | obj = obj->parent; |
1881 | } |
1882 | KMP_ASSERT(depth > 0); |
1883 | |
1884 | // Get the order for the types correct |
1885 | for (int i = 0, j = depth - 1; i < j; ++i, --j) { |
1886 | hwloc_obj_type_t hwloc_temp = hwloc_types[i]; |
1887 | kmp_hw_t temp = types[i]; |
1888 | types[i] = types[j]; |
1889 | types[j] = temp; |
1890 | hwloc_types[i] = hwloc_types[j]; |
1891 | hwloc_types[j] = hwloc_temp; |
1892 | } |
1893 | |
1894 | // Allocate the data structure to be returned. |
1895 | __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types); |
1896 | |
1897 | hw_thread_index = 0; |
1898 | pu = NULL; |
1899 | while ((pu = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, pu))) { |
1900 | int index = depth - 1; |
1901 | bool included = KMP_CPU_ISSET(pu->os_index, __kmp_affin_fullMask); |
1902 | kmp_hw_thread_t &hw_thread = __kmp_topology->at(hw_thread_index); |
1903 | if (included) { |
1904 | hw_thread.clear(); |
1905 | hw_thread.ids[index] = pu->logical_index; |
1906 | hw_thread.os_id = pu->os_index; |
1907 | // If multiple core types, then set that attribute for the hardware thread |
1908 | #if HWLOC_API_VERSION >= 0x00020400 |
1909 | if (cpukinds) { |
1910 | int cpukind_index = -1; |
1911 | for (int i = 0; i < nr_cpu_kinds; ++i) { |
1912 | if (hwloc_bitmap_isset(cpukinds[i].mask, hw_thread.os_id)) { |
1913 | cpukind_index = i; |
1914 | break; |
1915 | } |
1916 | } |
1917 | if (cpukind_index >= 0) { |
1918 | hw_thread.attrs.set_core_type(cpukinds[cpukind_index].core_type); |
1919 | hw_thread.attrs.set_core_eff(cpukinds[cpukind_index].efficiency); |
1920 | } |
1921 | } |
1922 | #endif |
1923 | index--; |
1924 | } |
1925 | obj = pu; |
1926 | prev = obj; |
1927 | while (obj != root && obj != NULL) { |
1928 | obj = obj->parent; |
1929 | #if HWLOC_API_VERSION >= 0x00020000 |
1930 | // NUMA Nodes are handled differently since they are not within the |
1931 | // parent/child structure anymore. They are separate children |
1932 | // of obj (memory_first_child points to first memory child) |
1933 | if (obj->memory_arity) { |
1934 | hwloc_obj_t memory; |
1935 | for (memory = obj->memory_first_child; memory; |
1936 | memory = hwloc_get_next_child(tp, obj, memory)) { |
1937 | if (memory->type == HWLOC_OBJ_NUMANODE) |
1938 | break; |
1939 | } |
1940 | if (memory && memory->type == HWLOC_OBJ_NUMANODE) { |
1941 | sub_id = __kmp_hwloc_get_sub_id(tp, memory, prev); |
1942 | if (included) { |
1943 | hw_thread.ids[index] = memory->logical_index; |
1944 | hw_thread.ids[index + 1] = sub_id; |
1945 | index--; |
1946 | } |
1947 | prev = memory; |
1948 | } |
1949 | prev = obj; |
1950 | } |
1951 | #endif |
1952 | type = __kmp_hwloc_type_2_topology_type(obj); |
1953 | if (type != KMP_HW_UNKNOWN) { |
1954 | sub_id = __kmp_hwloc_get_sub_id(tp, obj, prev); |
1955 | if (included) { |
1956 | hw_thread.ids[index] = obj->logical_index; |
1957 | hw_thread.ids[index + 1] = sub_id; |
1958 | index--; |
1959 | } |
1960 | prev = obj; |
1961 | } |
1962 | } |
1963 | if (included) |
1964 | hw_thread_index++; |
1965 | } |
1966 | |
1967 | #if HWLOC_API_VERSION >= 0x00020400 |
1968 | // Free the core types information |
1969 | if (cpukinds) { |
1970 | for (int idx = 0; idx < nr_cpu_kinds; ++idx) |
1971 | hwloc_bitmap_free(cpukinds[idx].mask); |
1972 | __kmp_free(cpukinds); |
1973 | } |
1974 | #endif |
1975 | __kmp_topology->sort_ids(); |
1976 | return true; |
1977 | } |
1978 | #endif // KMP_USE_HWLOC |
1979 | |
1980 | // If we don't know how to retrieve the machine's processor topology, or |
1981 | // encounter an error in doing so, this routine is called to form a "flat" |
1982 | // mapping of os thread id's <-> processor id's. |
1983 | static bool __kmp_affinity_create_flat_map(kmp_i18n_id_t *const msg_id) { |
1984 | *msg_id = kmp_i18n_null; |
1985 | int depth = 3; |
1986 | kmp_hw_t types[] = {KMP_HW_SOCKET, KMP_HW_CORE, KMP_HW_THREAD}; |
1987 | |
1988 | if (__kmp_affinity.flags.verbose) { |
1989 | KMP_INFORM(UsingFlatOS, "KMP_AFFINITY" ); |
1990 | } |
1991 | |
1992 | // Even if __kmp_affinity.type == affinity_none, this routine might still |
1993 | // be called to set __kmp_ncores, as well as |
1994 | // __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages. |
1995 | if (!KMP_AFFINITY_CAPABLE()) { |
1996 | KMP_ASSERT(__kmp_affinity.type == affinity_none); |
1997 | __kmp_ncores = nPackages = __kmp_xproc; |
1998 | __kmp_nThreadsPerCore = nCoresPerPkg = 1; |
1999 | return true; |
2000 | } |
2001 | |
2002 | // When affinity is off, this routine will still be called to set |
2003 | // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages. |
2004 | // Make sure all these vars are set correctly, and return now if affinity is |
2005 | // not enabled. |
2006 | __kmp_ncores = nPackages = __kmp_avail_proc; |
2007 | __kmp_nThreadsPerCore = nCoresPerPkg = 1; |
2008 | |
2009 | // Construct the data structure to be returned. |
2010 | __kmp_topology = kmp_topology_t::allocate(nproc: __kmp_avail_proc, ndepth: depth, types); |
2011 | int avail_ct = 0; |
2012 | int i; |
2013 | KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) { |
2014 | // Skip this proc if it is not included in the machine model. |
2015 | if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) { |
2016 | continue; |
2017 | } |
2018 | kmp_hw_thread_t &hw_thread = __kmp_topology->at(index: avail_ct); |
2019 | hw_thread.clear(); |
2020 | hw_thread.os_id = i; |
2021 | hw_thread.ids[0] = i; |
2022 | hw_thread.ids[1] = 0; |
2023 | hw_thread.ids[2] = 0; |
2024 | avail_ct++; |
2025 | } |
2026 | if (__kmp_affinity.flags.verbose) { |
2027 | KMP_INFORM(OSProcToPackage, "KMP_AFFINITY" ); |
2028 | } |
2029 | return true; |
2030 | } |
2031 | |
2032 | #if KMP_GROUP_AFFINITY |
2033 | // If multiple Windows* OS processor groups exist, we can create a 2-level |
2034 | // topology map with the groups at level 0 and the individual procs at level 1. |
2035 | // This facilitates letting the threads float among all procs in a group, |
2036 | // if granularity=group (the default when there are multiple groups). |
2037 | static bool __kmp_affinity_create_proc_group_map(kmp_i18n_id_t *const msg_id) { |
2038 | *msg_id = kmp_i18n_null; |
2039 | int depth = 3; |
2040 | kmp_hw_t types[] = {KMP_HW_PROC_GROUP, KMP_HW_CORE, KMP_HW_THREAD}; |
2041 | const static size_t BITS_PER_GROUP = CHAR_BIT * sizeof(DWORD_PTR); |
2042 | |
2043 | if (__kmp_affinity.flags.verbose) { |
2044 | KMP_INFORM(AffWindowsProcGroupMap, "KMP_AFFINITY" ); |
2045 | } |
2046 | |
2047 | // If we aren't affinity capable, then use flat topology |
2048 | if (!KMP_AFFINITY_CAPABLE()) { |
2049 | KMP_ASSERT(__kmp_affinity.type == affinity_none); |
2050 | nPackages = __kmp_num_proc_groups; |
2051 | __kmp_nThreadsPerCore = 1; |
2052 | __kmp_ncores = __kmp_xproc; |
2053 | nCoresPerPkg = nPackages / __kmp_ncores; |
2054 | return true; |
2055 | } |
2056 | |
2057 | // Construct the data structure to be returned. |
2058 | __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types); |
2059 | int avail_ct = 0; |
2060 | int i; |
2061 | KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) { |
2062 | // Skip this proc if it is not included in the machine model. |
2063 | if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) { |
2064 | continue; |
2065 | } |
2066 | kmp_hw_thread_t &hw_thread = __kmp_topology->at(avail_ct++); |
2067 | hw_thread.clear(); |
2068 | hw_thread.os_id = i; |
2069 | hw_thread.ids[0] = i / BITS_PER_GROUP; |
2070 | hw_thread.ids[1] = hw_thread.ids[2] = i % BITS_PER_GROUP; |
2071 | } |
2072 | return true; |
2073 | } |
2074 | #endif /* KMP_GROUP_AFFINITY */ |
2075 | |
2076 | #if KMP_ARCH_X86 || KMP_ARCH_X86_64 |
2077 | |
2078 | template <kmp_uint32 LSB, kmp_uint32 MSB> |
2079 | static inline unsigned (kmp_uint32 v) { |
2080 | const kmp_uint32 SHIFT_LEFT = sizeof(kmp_uint32) * 8 - 1 - MSB; |
2081 | const kmp_uint32 SHIFT_RIGHT = LSB; |
2082 | kmp_uint32 retval = v; |
2083 | retval <<= SHIFT_LEFT; |
2084 | retval >>= (SHIFT_LEFT + SHIFT_RIGHT); |
2085 | return retval; |
2086 | } |
2087 | |
2088 | static int __kmp_cpuid_mask_width(int count) { |
2089 | int r = 0; |
2090 | |
2091 | while ((1 << r) < count) |
2092 | ++r; |
2093 | return r; |
2094 | } |
2095 | |
2096 | class apicThreadInfo { |
2097 | public: |
2098 | unsigned osId; // param to __kmp_affinity_bind_thread |
2099 | unsigned apicId; // from cpuid after binding |
2100 | unsigned maxCoresPerPkg; // "" |
2101 | unsigned maxThreadsPerPkg; // "" |
2102 | unsigned pkgId; // inferred from above values |
2103 | unsigned coreId; // "" |
2104 | unsigned threadId; // "" |
2105 | }; |
2106 | |
2107 | static int __kmp_affinity_cmp_apicThreadInfo_phys_id(const void *a, |
2108 | const void *b) { |
2109 | const apicThreadInfo *aa = (const apicThreadInfo *)a; |
2110 | const apicThreadInfo *bb = (const apicThreadInfo *)b; |
2111 | if (aa->pkgId < bb->pkgId) |
2112 | return -1; |
2113 | if (aa->pkgId > bb->pkgId) |
2114 | return 1; |
2115 | if (aa->coreId < bb->coreId) |
2116 | return -1; |
2117 | if (aa->coreId > bb->coreId) |
2118 | return 1; |
2119 | if (aa->threadId < bb->threadId) |
2120 | return -1; |
2121 | if (aa->threadId > bb->threadId) |
2122 | return 1; |
2123 | return 0; |
2124 | } |
2125 | |
2126 | class kmp_cache_info_t { |
2127 | public: |
2128 | struct info_t { |
2129 | unsigned level, mask; |
2130 | }; |
2131 | kmp_cache_info_t() : depth(0) { get_leaf4_levels(); } |
2132 | size_t get_depth() const { return depth; } |
2133 | info_t &operator[](size_t index) { return table[index]; } |
2134 | const info_t &operator[](size_t index) const { return table[index]; } |
2135 | |
2136 | static kmp_hw_t get_topology_type(unsigned level) { |
2137 | KMP_DEBUG_ASSERT(level >= 1 && level <= MAX_CACHE_LEVEL); |
2138 | switch (level) { |
2139 | case 1: |
2140 | return KMP_HW_L1; |
2141 | case 2: |
2142 | return KMP_HW_L2; |
2143 | case 3: |
2144 | return KMP_HW_L3; |
2145 | } |
2146 | return KMP_HW_UNKNOWN; |
2147 | } |
2148 | |
2149 | private: |
2150 | static const int MAX_CACHE_LEVEL = 3; |
2151 | |
2152 | size_t depth; |
2153 | info_t table[MAX_CACHE_LEVEL]; |
2154 | |
2155 | void get_leaf4_levels() { |
2156 | unsigned level = 0; |
2157 | while (depth < MAX_CACHE_LEVEL) { |
2158 | unsigned cache_type, max_threads_sharing; |
2159 | unsigned cache_level, cache_mask_width; |
2160 | kmp_cpuid buf2; |
2161 | __kmp_x86_cpuid(leaf: 4, subleaf: level, p: &buf2); |
2162 | cache_type = __kmp_extract_bits<0, 4>(v: buf2.eax); |
2163 | if (!cache_type) |
2164 | break; |
2165 | // Skip instruction caches |
2166 | if (cache_type == 2) { |
2167 | level++; |
2168 | continue; |
2169 | } |
2170 | max_threads_sharing = __kmp_extract_bits<14, 25>(v: buf2.eax) + 1; |
2171 | cache_mask_width = __kmp_cpuid_mask_width(count: max_threads_sharing); |
2172 | cache_level = __kmp_extract_bits<5, 7>(v: buf2.eax); |
2173 | table[depth].level = cache_level; |
2174 | table[depth].mask = ((-1) << cache_mask_width); |
2175 | depth++; |
2176 | level++; |
2177 | } |
2178 | } |
2179 | }; |
2180 | |
2181 | // On IA-32 architecture and Intel(R) 64 architecture, we attempt to use |
2182 | // an algorithm which cycles through the available os threads, setting |
2183 | // the current thread's affinity mask to that thread, and then retrieves |
2184 | // the Apic Id for each thread context using the cpuid instruction. |
2185 | static bool __kmp_affinity_create_apicid_map(kmp_i18n_id_t *const msg_id) { |
2186 | kmp_cpuid buf; |
2187 | *msg_id = kmp_i18n_null; |
2188 | |
2189 | if (__kmp_affinity.flags.verbose) { |
2190 | KMP_INFORM(AffInfoStr, "KMP_AFFINITY" , KMP_I18N_STR(DecodingLegacyAPIC)); |
2191 | } |
2192 | |
2193 | // Check if cpuid leaf 4 is supported. |
2194 | __kmp_x86_cpuid(leaf: 0, subleaf: 0, p: &buf); |
2195 | if (buf.eax < 4) { |
2196 | *msg_id = kmp_i18n_str_NoLeaf4Support; |
2197 | return false; |
2198 | } |
2199 | |
2200 | // The algorithm used starts by setting the affinity to each available thread |
2201 | // and retrieving info from the cpuid instruction, so if we are not capable of |
2202 | // calling __kmp_get_system_affinity() and _kmp_get_system_affinity(), then we |
2203 | // need to do something else - use the defaults that we calculated from |
2204 | // issuing cpuid without binding to each proc. |
2205 | if (!KMP_AFFINITY_CAPABLE()) { |
2206 | // Hack to try and infer the machine topology using only the data |
2207 | // available from cpuid on the current thread, and __kmp_xproc. |
2208 | KMP_ASSERT(__kmp_affinity.type == affinity_none); |
2209 | |
2210 | // Get an upper bound on the number of threads per package using cpuid(1). |
2211 | // On some OS/chps combinations where HT is supported by the chip but is |
2212 | // disabled, this value will be 2 on a single core chip. Usually, it will be |
2213 | // 2 if HT is enabled and 1 if HT is disabled. |
2214 | __kmp_x86_cpuid(leaf: 1, subleaf: 0, p: &buf); |
2215 | int maxThreadsPerPkg = (buf.ebx >> 16) & 0xff; |
2216 | if (maxThreadsPerPkg == 0) { |
2217 | maxThreadsPerPkg = 1; |
2218 | } |
2219 | |
2220 | // The num cores per pkg comes from cpuid(4). 1 must be added to the encoded |
2221 | // value. |
2222 | // |
2223 | // The author of cpu_count.cpp treated this only an upper bound on the |
2224 | // number of cores, but I haven't seen any cases where it was greater than |
2225 | // the actual number of cores, so we will treat it as exact in this block of |
2226 | // code. |
2227 | // |
2228 | // First, we need to check if cpuid(4) is supported on this chip. To see if |
2229 | // cpuid(n) is supported, issue cpuid(0) and check if eax has the value n or |
2230 | // greater. |
2231 | __kmp_x86_cpuid(leaf: 0, subleaf: 0, p: &buf); |
2232 | if (buf.eax >= 4) { |
2233 | __kmp_x86_cpuid(leaf: 4, subleaf: 0, p: &buf); |
2234 | nCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1; |
2235 | } else { |
2236 | nCoresPerPkg = 1; |
2237 | } |
2238 | |
2239 | // There is no way to reliably tell if HT is enabled without issuing the |
2240 | // cpuid instruction from every thread, can correlating the cpuid info, so |
2241 | // if the machine is not affinity capable, we assume that HT is off. We have |
2242 | // seen quite a few machines where maxThreadsPerPkg is 2, yet the machine |
2243 | // does not support HT. |
2244 | // |
2245 | // - Older OSes are usually found on machines with older chips, which do not |
2246 | // support HT. |
2247 | // - The performance penalty for mistakenly identifying a machine as HT when |
2248 | // it isn't (which results in blocktime being incorrectly set to 0) is |
2249 | // greater than the penalty when for mistakenly identifying a machine as |
2250 | // being 1 thread/core when it is really HT enabled (which results in |
2251 | // blocktime being incorrectly set to a positive value). |
2252 | __kmp_ncores = __kmp_xproc; |
2253 | nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg; |
2254 | __kmp_nThreadsPerCore = 1; |
2255 | return true; |
2256 | } |
2257 | |
2258 | // From here on, we can assume that it is safe to call |
2259 | // __kmp_get_system_affinity() and __kmp_set_system_affinity(), even if |
2260 | // __kmp_affinity.type = affinity_none. |
2261 | |
2262 | // Save the affinity mask for the current thread. |
2263 | kmp_affinity_raii_t previous_affinity; |
2264 | |
2265 | // Run through each of the available contexts, binding the current thread |
2266 | // to it, and obtaining the pertinent information using the cpuid instr. |
2267 | // |
2268 | // The relevant information is: |
2269 | // - Apic Id: Bits 24:31 of ebx after issuing cpuid(1) - each thread context |
2270 | // has a uniqie Apic Id, which is of the form pkg# : core# : thread#. |
2271 | // - Max Threads Per Pkg: Bits 16:23 of ebx after issuing cpuid(1). The value |
2272 | // of this field determines the width of the core# + thread# fields in the |
2273 | // Apic Id. It is also an upper bound on the number of threads per |
2274 | // package, but it has been verified that situations happen were it is not |
2275 | // exact. In particular, on certain OS/chip combinations where Intel(R) |
2276 | // Hyper-Threading Technology is supported by the chip but has been |
2277 | // disabled, the value of this field will be 2 (for a single core chip). |
2278 | // On other OS/chip combinations supporting Intel(R) Hyper-Threading |
2279 | // Technology, the value of this field will be 1 when Intel(R) |
2280 | // Hyper-Threading Technology is disabled and 2 when it is enabled. |
2281 | // - Max Cores Per Pkg: Bits 26:31 of eax after issuing cpuid(4). The value |
2282 | // of this field (+1) determines the width of the core# field in the Apic |
2283 | // Id. The comments in "cpucount.cpp" say that this value is an upper |
2284 | // bound, but the IA-32 architecture manual says that it is exactly the |
2285 | // number of cores per package, and I haven't seen any case where it |
2286 | // wasn't. |
2287 | // |
2288 | // From this information, deduce the package Id, core Id, and thread Id, |
2289 | // and set the corresponding fields in the apicThreadInfo struct. |
2290 | unsigned i; |
2291 | apicThreadInfo *threadInfo = (apicThreadInfo *)__kmp_allocate( |
2292 | __kmp_avail_proc * sizeof(apicThreadInfo)); |
2293 | unsigned nApics = 0; |
2294 | KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) { |
2295 | // Skip this proc if it is not included in the machine model. |
2296 | if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) { |
2297 | continue; |
2298 | } |
2299 | KMP_DEBUG_ASSERT((int)nApics < __kmp_avail_proc); |
2300 | |
2301 | __kmp_affinity_dispatch->bind_thread(proc: i); |
2302 | threadInfo[nApics].osId = i; |
2303 | |
2304 | // The apic id and max threads per pkg come from cpuid(1). |
2305 | __kmp_x86_cpuid(leaf: 1, subleaf: 0, p: &buf); |
2306 | if (((buf.edx >> 9) & 1) == 0) { |
2307 | __kmp_free(threadInfo); |
2308 | *msg_id = kmp_i18n_str_ApicNotPresent; |
2309 | return false; |
2310 | } |
2311 | threadInfo[nApics].apicId = (buf.ebx >> 24) & 0xff; |
2312 | threadInfo[nApics].maxThreadsPerPkg = (buf.ebx >> 16) & 0xff; |
2313 | if (threadInfo[nApics].maxThreadsPerPkg == 0) { |
2314 | threadInfo[nApics].maxThreadsPerPkg = 1; |
2315 | } |
2316 | |
2317 | // Max cores per pkg comes from cpuid(4). 1 must be added to the encoded |
2318 | // value. |
2319 | // |
2320 | // First, we need to check if cpuid(4) is supported on this chip. To see if |
2321 | // cpuid(n) is supported, issue cpuid(0) and check if eax has the value n |
2322 | // or greater. |
2323 | __kmp_x86_cpuid(leaf: 0, subleaf: 0, p: &buf); |
2324 | if (buf.eax >= 4) { |
2325 | __kmp_x86_cpuid(leaf: 4, subleaf: 0, p: &buf); |
2326 | threadInfo[nApics].maxCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1; |
2327 | } else { |
2328 | threadInfo[nApics].maxCoresPerPkg = 1; |
2329 | } |
2330 | |
2331 | // Infer the pkgId / coreId / threadId using only the info obtained locally. |
2332 | int widthCT = __kmp_cpuid_mask_width(count: threadInfo[nApics].maxThreadsPerPkg); |
2333 | threadInfo[nApics].pkgId = threadInfo[nApics].apicId >> widthCT; |
2334 | |
2335 | int widthC = __kmp_cpuid_mask_width(count: threadInfo[nApics].maxCoresPerPkg); |
2336 | int widthT = widthCT - widthC; |
2337 | if (widthT < 0) { |
2338 | // I've never seen this one happen, but I suppose it could, if the cpuid |
2339 | // instruction on a chip was really screwed up. Make sure to restore the |
2340 | // affinity mask before the tail call. |
2341 | __kmp_free(threadInfo); |
2342 | *msg_id = kmp_i18n_str_InvalidCpuidInfo; |
2343 | return false; |
2344 | } |
2345 | |
2346 | int maskC = (1 << widthC) - 1; |
2347 | threadInfo[nApics].coreId = (threadInfo[nApics].apicId >> widthT) & maskC; |
2348 | |
2349 | int maskT = (1 << widthT) - 1; |
2350 | threadInfo[nApics].threadId = threadInfo[nApics].apicId & maskT; |
2351 | |
2352 | nApics++; |
2353 | } |
2354 | |
2355 | // We've collected all the info we need. |
2356 | // Restore the old affinity mask for this thread. |
2357 | previous_affinity.restore(); |
2358 | |
2359 | // Sort the threadInfo table by physical Id. |
2360 | qsort(base: threadInfo, nmemb: nApics, size: sizeof(*threadInfo), |
2361 | compar: __kmp_affinity_cmp_apicThreadInfo_phys_id); |
2362 | |
2363 | // The table is now sorted by pkgId / coreId / threadId, but we really don't |
2364 | // know the radix of any of the fields. pkgId's may be sparsely assigned among |
2365 | // the chips on a system. Although coreId's are usually assigned |
2366 | // [0 .. coresPerPkg-1] and threadId's are usually assigned |
2367 | // [0..threadsPerCore-1], we don't want to make any such assumptions. |
2368 | // |
2369 | // For that matter, we don't know what coresPerPkg and threadsPerCore (or the |
2370 | // total # packages) are at this point - we want to determine that now. We |
2371 | // only have an upper bound on the first two figures. |
2372 | // |
2373 | // We also perform a consistency check at this point: the values returned by |
2374 | // the cpuid instruction for any thread bound to a given package had better |
2375 | // return the same info for maxThreadsPerPkg and maxCoresPerPkg. |
2376 | nPackages = 1; |
2377 | nCoresPerPkg = 1; |
2378 | __kmp_nThreadsPerCore = 1; |
2379 | unsigned nCores = 1; |
2380 | |
2381 | unsigned pkgCt = 1; // to determine radii |
2382 | unsigned lastPkgId = threadInfo[0].pkgId; |
2383 | unsigned coreCt = 1; |
2384 | unsigned lastCoreId = threadInfo[0].coreId; |
2385 | unsigned threadCt = 1; |
2386 | unsigned lastThreadId = threadInfo[0].threadId; |
2387 | |
2388 | // intra-pkg consist checks |
2389 | unsigned prevMaxCoresPerPkg = threadInfo[0].maxCoresPerPkg; |
2390 | unsigned prevMaxThreadsPerPkg = threadInfo[0].maxThreadsPerPkg; |
2391 | |
2392 | for (i = 1; i < nApics; i++) { |
2393 | if (threadInfo[i].pkgId != lastPkgId) { |
2394 | nCores++; |
2395 | pkgCt++; |
2396 | lastPkgId = threadInfo[i].pkgId; |
2397 | if ((int)coreCt > nCoresPerPkg) |
2398 | nCoresPerPkg = coreCt; |
2399 | coreCt = 1; |
2400 | lastCoreId = threadInfo[i].coreId; |
2401 | if ((int)threadCt > __kmp_nThreadsPerCore) |
2402 | __kmp_nThreadsPerCore = threadCt; |
2403 | threadCt = 1; |
2404 | lastThreadId = threadInfo[i].threadId; |
2405 | |
2406 | // This is a different package, so go on to the next iteration without |
2407 | // doing any consistency checks. Reset the consistency check vars, though. |
2408 | prevMaxCoresPerPkg = threadInfo[i].maxCoresPerPkg; |
2409 | prevMaxThreadsPerPkg = threadInfo[i].maxThreadsPerPkg; |
2410 | continue; |
2411 | } |
2412 | |
2413 | if (threadInfo[i].coreId != lastCoreId) { |
2414 | nCores++; |
2415 | coreCt++; |
2416 | lastCoreId = threadInfo[i].coreId; |
2417 | if ((int)threadCt > __kmp_nThreadsPerCore) |
2418 | __kmp_nThreadsPerCore = threadCt; |
2419 | threadCt = 1; |
2420 | lastThreadId = threadInfo[i].threadId; |
2421 | } else if (threadInfo[i].threadId != lastThreadId) { |
2422 | threadCt++; |
2423 | lastThreadId = threadInfo[i].threadId; |
2424 | } else { |
2425 | __kmp_free(threadInfo); |
2426 | *msg_id = kmp_i18n_str_LegacyApicIDsNotUnique; |
2427 | return false; |
2428 | } |
2429 | |
2430 | // Check to make certain that the maxCoresPerPkg and maxThreadsPerPkg |
2431 | // fields agree between all the threads bounds to a given package. |
2432 | if ((prevMaxCoresPerPkg != threadInfo[i].maxCoresPerPkg) || |
2433 | (prevMaxThreadsPerPkg != threadInfo[i].maxThreadsPerPkg)) { |
2434 | __kmp_free(threadInfo); |
2435 | *msg_id = kmp_i18n_str_InconsistentCpuidInfo; |
2436 | return false; |
2437 | } |
2438 | } |
2439 | // When affinity is off, this routine will still be called to set |
2440 | // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages. |
2441 | // Make sure all these vars are set correctly |
2442 | nPackages = pkgCt; |
2443 | if ((int)coreCt > nCoresPerPkg) |
2444 | nCoresPerPkg = coreCt; |
2445 | if ((int)threadCt > __kmp_nThreadsPerCore) |
2446 | __kmp_nThreadsPerCore = threadCt; |
2447 | __kmp_ncores = nCores; |
2448 | KMP_DEBUG_ASSERT(nApics == (unsigned)__kmp_avail_proc); |
2449 | |
2450 | // Now that we've determined the number of packages, the number of cores per |
2451 | // package, and the number of threads per core, we can construct the data |
2452 | // structure that is to be returned. |
2453 | int idx = 0; |
2454 | int pkgLevel = 0; |
2455 | int coreLevel = 1; |
2456 | int threadLevel = 2; |
2457 | //(__kmp_nThreadsPerCore <= 1) ? -1 : ((coreLevel >= 0) ? 2 : 1); |
2458 | int depth = (pkgLevel >= 0) + (coreLevel >= 0) + (threadLevel >= 0); |
2459 | kmp_hw_t types[3]; |
2460 | if (pkgLevel >= 0) |
2461 | types[idx++] = KMP_HW_SOCKET; |
2462 | if (coreLevel >= 0) |
2463 | types[idx++] = KMP_HW_CORE; |
2464 | if (threadLevel >= 0) |
2465 | types[idx++] = KMP_HW_THREAD; |
2466 | |
2467 | KMP_ASSERT(depth > 0); |
2468 | __kmp_topology = kmp_topology_t::allocate(nproc: nApics, ndepth: depth, types); |
2469 | |
2470 | for (i = 0; i < nApics; ++i) { |
2471 | idx = 0; |
2472 | unsigned os = threadInfo[i].osId; |
2473 | kmp_hw_thread_t &hw_thread = __kmp_topology->at(index: i); |
2474 | hw_thread.clear(); |
2475 | |
2476 | if (pkgLevel >= 0) { |
2477 | hw_thread.ids[idx++] = threadInfo[i].pkgId; |
2478 | } |
2479 | if (coreLevel >= 0) { |
2480 | hw_thread.ids[idx++] = threadInfo[i].coreId; |
2481 | } |
2482 | if (threadLevel >= 0) { |
2483 | hw_thread.ids[idx++] = threadInfo[i].threadId; |
2484 | } |
2485 | hw_thread.os_id = os; |
2486 | } |
2487 | |
2488 | __kmp_free(threadInfo); |
2489 | __kmp_topology->sort_ids(); |
2490 | if (!__kmp_topology->check_ids()) { |
2491 | kmp_topology_t::deallocate(topology: __kmp_topology); |
2492 | __kmp_topology = nullptr; |
2493 | *msg_id = kmp_i18n_str_LegacyApicIDsNotUnique; |
2494 | return false; |
2495 | } |
2496 | return true; |
2497 | } |
2498 | |
2499 | // Hybrid cpu detection using CPUID.1A |
2500 | // Thread should be pinned to processor already |
2501 | static void __kmp_get_hybrid_info(kmp_hw_core_type_t *type, int *efficiency, |
2502 | unsigned *native_model_id) { |
2503 | kmp_cpuid buf; |
2504 | __kmp_x86_cpuid(leaf: 0x1a, subleaf: 0, p: &buf); |
2505 | *type = (kmp_hw_core_type_t)__kmp_extract_bits<24, 31>(v: buf.eax); |
2506 | switch (*type) { |
2507 | case KMP_HW_CORE_TYPE_ATOM: |
2508 | *efficiency = 0; |
2509 | break; |
2510 | case KMP_HW_CORE_TYPE_CORE: |
2511 | *efficiency = 1; |
2512 | break; |
2513 | default: |
2514 | *efficiency = 0; |
2515 | } |
2516 | *native_model_id = __kmp_extract_bits<0, 23>(v: buf.eax); |
2517 | } |
2518 | |
2519 | // Intel(R) microarchitecture code name Nehalem, Dunnington and later |
2520 | // architectures support a newer interface for specifying the x2APIC Ids, |
2521 | // based on CPUID.B or CPUID.1F |
2522 | /* |
2523 | * CPUID.B or 1F, Input ECX (sub leaf # aka level number) |
2524 | Bits Bits Bits Bits |
2525 | 31-16 15-8 7-4 4-0 |
2526 | ---+-----------+--------------+-------------+-----------------+ |
2527 | EAX| reserved | reserved | reserved | Bits to Shift | |
2528 | ---+-----------|--------------+-------------+-----------------| |
2529 | EBX| reserved | Num logical processors at level (16 bits) | |
2530 | ---+-----------|--------------+-------------------------------| |
2531 | ECX| reserved | Level Type | Level Number (8 bits) | |
2532 | ---+-----------+--------------+-------------------------------| |
2533 | EDX| X2APIC ID (32 bits) | |
2534 | ---+----------------------------------------------------------+ |
2535 | */ |
2536 | |
2537 | enum { |
2538 | INTEL_LEVEL_TYPE_INVALID = 0, // Package level |
2539 | INTEL_LEVEL_TYPE_SMT = 1, |
2540 | INTEL_LEVEL_TYPE_CORE = 2, |
2541 | INTEL_LEVEL_TYPE_MODULE = 3, |
2542 | INTEL_LEVEL_TYPE_TILE = 4, |
2543 | INTEL_LEVEL_TYPE_DIE = 5, |
2544 | INTEL_LEVEL_TYPE_LAST = 6, |
2545 | }; |
2546 | |
2547 | struct cpuid_level_info_t { |
2548 | unsigned level_type, mask, mask_width, nitems, cache_mask; |
2549 | }; |
2550 | |
2551 | static kmp_hw_t __kmp_intel_type_2_topology_type(int intel_type) { |
2552 | switch (intel_type) { |
2553 | case INTEL_LEVEL_TYPE_INVALID: |
2554 | return KMP_HW_SOCKET; |
2555 | case INTEL_LEVEL_TYPE_SMT: |
2556 | return KMP_HW_THREAD; |
2557 | case INTEL_LEVEL_TYPE_CORE: |
2558 | return KMP_HW_CORE; |
2559 | case INTEL_LEVEL_TYPE_TILE: |
2560 | return KMP_HW_TILE; |
2561 | case INTEL_LEVEL_TYPE_MODULE: |
2562 | return KMP_HW_MODULE; |
2563 | case INTEL_LEVEL_TYPE_DIE: |
2564 | return KMP_HW_DIE; |
2565 | } |
2566 | return KMP_HW_UNKNOWN; |
2567 | } |
2568 | |
2569 | // This function takes the topology leaf, a levels array to store the levels |
2570 | // detected and a bitmap of the known levels. |
2571 | // Returns the number of levels in the topology |
2572 | static unsigned |
2573 | __kmp_x2apicid_get_levels(int leaf, |
2574 | cpuid_level_info_t levels[INTEL_LEVEL_TYPE_LAST], |
2575 | kmp_uint64 known_levels) { |
2576 | unsigned level, levels_index; |
2577 | unsigned level_type, mask_width, nitems; |
2578 | kmp_cpuid buf; |
2579 | |
2580 | // New algorithm has known topology layers act as highest unknown topology |
2581 | // layers when unknown topology layers exist. |
2582 | // e.g., Suppose layers were SMT <X> CORE <Y> <Z> PACKAGE, where <X> <Y> <Z> |
2583 | // are unknown topology layers, Then SMT will take the characteristics of |
2584 | // (SMT x <X>) and CORE will take the characteristics of (CORE x <Y> x <Z>). |
2585 | // This eliminates unknown portions of the topology while still keeping the |
2586 | // correct structure. |
2587 | level = levels_index = 0; |
2588 | do { |
2589 | __kmp_x86_cpuid(leaf, subleaf: level, p: &buf); |
2590 | level_type = __kmp_extract_bits<8, 15>(v: buf.ecx); |
2591 | mask_width = __kmp_extract_bits<0, 4>(v: buf.eax); |
2592 | nitems = __kmp_extract_bits<0, 15>(v: buf.ebx); |
2593 | if (level_type != INTEL_LEVEL_TYPE_INVALID && nitems == 0) |
2594 | return 0; |
2595 | |
2596 | if (known_levels & (1ull << level_type)) { |
2597 | // Add a new level to the topology |
2598 | KMP_ASSERT(levels_index < INTEL_LEVEL_TYPE_LAST); |
2599 | levels[levels_index].level_type = level_type; |
2600 | levels[levels_index].mask_width = mask_width; |
2601 | levels[levels_index].nitems = nitems; |
2602 | levels_index++; |
2603 | } else { |
2604 | // If it is an unknown level, then logically move the previous layer up |
2605 | if (levels_index > 0) { |
2606 | levels[levels_index - 1].mask_width = mask_width; |
2607 | levels[levels_index - 1].nitems = nitems; |
2608 | } |
2609 | } |
2610 | level++; |
2611 | } while (level_type != INTEL_LEVEL_TYPE_INVALID); |
2612 | |
2613 | // Ensure the INTEL_LEVEL_TYPE_INVALID (Socket) layer isn't first |
2614 | if (levels_index == 0 || levels[0].level_type == INTEL_LEVEL_TYPE_INVALID) |
2615 | return 0; |
2616 | |
2617 | // Set the masks to & with apicid |
2618 | for (unsigned i = 0; i < levels_index; ++i) { |
2619 | if (levels[i].level_type != INTEL_LEVEL_TYPE_INVALID) { |
2620 | levels[i].mask = ~((-1) << levels[i].mask_width); |
2621 | levels[i].cache_mask = (-1) << levels[i].mask_width; |
2622 | for (unsigned j = 0; j < i; ++j) |
2623 | levels[i].mask ^= levels[j].mask; |
2624 | } else { |
2625 | KMP_DEBUG_ASSERT(i > 0); |
2626 | levels[i].mask = (-1) << levels[i - 1].mask_width; |
2627 | levels[i].cache_mask = 0; |
2628 | } |
2629 | } |
2630 | return levels_index; |
2631 | } |
2632 | |
2633 | static bool __kmp_affinity_create_x2apicid_map(kmp_i18n_id_t *const msg_id) { |
2634 | |
2635 | cpuid_level_info_t levels[INTEL_LEVEL_TYPE_LAST]; |
2636 | kmp_hw_t types[INTEL_LEVEL_TYPE_LAST]; |
2637 | unsigned levels_index; |
2638 | kmp_cpuid buf; |
2639 | kmp_uint64 known_levels; |
2640 | int topology_leaf, highest_leaf, apic_id; |
2641 | int num_leaves; |
2642 | static int leaves[] = {0, 0}; |
2643 | |
2644 | kmp_i18n_id_t leaf_message_id; |
2645 | |
2646 | KMP_BUILD_ASSERT(sizeof(known_levels) * CHAR_BIT > KMP_HW_LAST); |
2647 | |
2648 | *msg_id = kmp_i18n_null; |
2649 | if (__kmp_affinity.flags.verbose) { |
2650 | KMP_INFORM(AffInfoStr, "KMP_AFFINITY" , KMP_I18N_STR(Decodingx2APIC)); |
2651 | } |
2652 | |
2653 | // Figure out the known topology levels |
2654 | known_levels = 0ull; |
2655 | for (int i = 0; i < INTEL_LEVEL_TYPE_LAST; ++i) { |
2656 | if (__kmp_intel_type_2_topology_type(intel_type: i) != KMP_HW_UNKNOWN) { |
2657 | known_levels |= (1ull << i); |
2658 | } |
2659 | } |
2660 | |
2661 | // Get the highest cpuid leaf supported |
2662 | __kmp_x86_cpuid(leaf: 0, subleaf: 0, p: &buf); |
2663 | highest_leaf = buf.eax; |
2664 | |
2665 | // If a specific topology method was requested, only allow that specific leaf |
2666 | // otherwise, try both leaves 31 and 11 in that order |
2667 | num_leaves = 0; |
2668 | if (__kmp_affinity_top_method == affinity_top_method_x2apicid) { |
2669 | num_leaves = 1; |
2670 | leaves[0] = 11; |
2671 | leaf_message_id = kmp_i18n_str_NoLeaf11Support; |
2672 | } else if (__kmp_affinity_top_method == affinity_top_method_x2apicid_1f) { |
2673 | num_leaves = 1; |
2674 | leaves[0] = 31; |
2675 | leaf_message_id = kmp_i18n_str_NoLeaf31Support; |
2676 | } else { |
2677 | num_leaves = 2; |
2678 | leaves[0] = 31; |
2679 | leaves[1] = 11; |
2680 | leaf_message_id = kmp_i18n_str_NoLeaf11Support; |
2681 | } |
2682 | |
2683 | // Check to see if cpuid leaf 31 or 11 is supported. |
2684 | __kmp_nThreadsPerCore = nCoresPerPkg = nPackages = 1; |
2685 | topology_leaf = -1; |
2686 | for (int i = 0; i < num_leaves; ++i) { |
2687 | int leaf = leaves[i]; |
2688 | if (highest_leaf < leaf) |
2689 | continue; |
2690 | __kmp_x86_cpuid(leaf, subleaf: 0, p: &buf); |
2691 | if (buf.ebx == 0) |
2692 | continue; |
2693 | topology_leaf = leaf; |
2694 | levels_index = __kmp_x2apicid_get_levels(leaf, levels, known_levels); |
2695 | if (levels_index == 0) |
2696 | continue; |
2697 | break; |
2698 | } |
2699 | if (topology_leaf == -1 || levels_index == 0) { |
2700 | *msg_id = leaf_message_id; |
2701 | return false; |
2702 | } |
2703 | KMP_ASSERT(levels_index <= INTEL_LEVEL_TYPE_LAST); |
2704 | |
2705 | // The algorithm used starts by setting the affinity to each available thread |
2706 | // and retrieving info from the cpuid instruction, so if we are not capable of |
2707 | // calling __kmp_get_system_affinity() and __kmp_get_system_affinity(), then |
2708 | // we need to do something else - use the defaults that we calculated from |
2709 | // issuing cpuid without binding to each proc. |
2710 | if (!KMP_AFFINITY_CAPABLE()) { |
2711 | // Hack to try and infer the machine topology using only the data |
2712 | // available from cpuid on the current thread, and __kmp_xproc. |
2713 | KMP_ASSERT(__kmp_affinity.type == affinity_none); |
2714 | for (unsigned i = 0; i < levels_index; ++i) { |
2715 | if (levels[i].level_type == INTEL_LEVEL_TYPE_SMT) { |
2716 | __kmp_nThreadsPerCore = levels[i].nitems; |
2717 | } else if (levels[i].level_type == INTEL_LEVEL_TYPE_CORE) { |
2718 | nCoresPerPkg = levels[i].nitems; |
2719 | } |
2720 | } |
2721 | __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore; |
2722 | nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg; |
2723 | return true; |
2724 | } |
2725 | |
2726 | // Allocate the data structure to be returned. |
2727 | int depth = levels_index; |
2728 | for (int i = depth - 1, j = 0; i >= 0; --i, ++j) |
2729 | types[j] = __kmp_intel_type_2_topology_type(intel_type: levels[i].level_type); |
2730 | __kmp_topology = |
2731 | kmp_topology_t::allocate(nproc: __kmp_avail_proc, ndepth: levels_index, types); |
2732 | |
2733 | // Insert equivalent cache types if they exist |
2734 | kmp_cache_info_t cache_info; |
2735 | for (size_t i = 0; i < cache_info.get_depth(); ++i) { |
2736 | const kmp_cache_info_t::info_t &info = cache_info[i]; |
2737 | unsigned cache_mask = info.mask; |
2738 | unsigned cache_level = info.level; |
2739 | for (unsigned j = 0; j < levels_index; ++j) { |
2740 | unsigned hw_cache_mask = levels[j].cache_mask; |
2741 | kmp_hw_t cache_type = kmp_cache_info_t::get_topology_type(level: cache_level); |
2742 | if (hw_cache_mask == cache_mask && j < levels_index - 1) { |
2743 | kmp_hw_t type = |
2744 | __kmp_intel_type_2_topology_type(intel_type: levels[j + 1].level_type); |
2745 | __kmp_topology->set_equivalent_type(type1: cache_type, type2: type); |
2746 | } |
2747 | } |
2748 | } |
2749 | |
2750 | // From here on, we can assume that it is safe to call |
2751 | // __kmp_get_system_affinity() and __kmp_set_system_affinity(), even if |
2752 | // __kmp_affinity.type = affinity_none. |
2753 | |
2754 | // Save the affinity mask for the current thread. |
2755 | kmp_affinity_raii_t previous_affinity; |
2756 | |
2757 | // Run through each of the available contexts, binding the current thread |
2758 | // to it, and obtaining the pertinent information using the cpuid instr. |
2759 | unsigned int proc; |
2760 | int hw_thread_index = 0; |
2761 | KMP_CPU_SET_ITERATE(proc, __kmp_affin_fullMask) { |
2762 | cpuid_level_info_t my_levels[INTEL_LEVEL_TYPE_LAST]; |
2763 | unsigned my_levels_index; |
2764 | |
2765 | // Skip this proc if it is not included in the machine model. |
2766 | if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) { |
2767 | continue; |
2768 | } |
2769 | KMP_DEBUG_ASSERT(hw_thread_index < __kmp_avail_proc); |
2770 | |
2771 | __kmp_affinity_dispatch->bind_thread(proc); |
2772 | |
2773 | // New algorithm |
2774 | __kmp_x86_cpuid(leaf: topology_leaf, subleaf: 0, p: &buf); |
2775 | apic_id = buf.edx; |
2776 | kmp_hw_thread_t &hw_thread = __kmp_topology->at(index: hw_thread_index); |
2777 | my_levels_index = |
2778 | __kmp_x2apicid_get_levels(leaf: topology_leaf, levels: my_levels, known_levels); |
2779 | if (my_levels_index == 0 || my_levels_index != levels_index) { |
2780 | *msg_id = kmp_i18n_str_InvalidCpuidInfo; |
2781 | return false; |
2782 | } |
2783 | hw_thread.clear(); |
2784 | hw_thread.os_id = proc; |
2785 | // Put in topology information |
2786 | for (unsigned j = 0, idx = depth - 1; j < my_levels_index; ++j, --idx) { |
2787 | hw_thread.ids[idx] = apic_id & my_levels[j].mask; |
2788 | if (j > 0) { |
2789 | hw_thread.ids[idx] >>= my_levels[j - 1].mask_width; |
2790 | } |
2791 | } |
2792 | // Hybrid information |
2793 | if (__kmp_is_hybrid_cpu() && highest_leaf >= 0x1a) { |
2794 | kmp_hw_core_type_t type; |
2795 | unsigned native_model_id; |
2796 | int efficiency; |
2797 | __kmp_get_hybrid_info(type: &type, efficiency: &efficiency, native_model_id: &native_model_id); |
2798 | hw_thread.attrs.set_core_type(type); |
2799 | hw_thread.attrs.set_core_eff(efficiency); |
2800 | } |
2801 | hw_thread_index++; |
2802 | } |
2803 | KMP_ASSERT(hw_thread_index > 0); |
2804 | __kmp_topology->sort_ids(); |
2805 | if (!__kmp_topology->check_ids()) { |
2806 | kmp_topology_t::deallocate(topology: __kmp_topology); |
2807 | __kmp_topology = nullptr; |
2808 | *msg_id = kmp_i18n_str_x2ApicIDsNotUnique; |
2809 | return false; |
2810 | } |
2811 | return true; |
2812 | } |
2813 | #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ |
2814 | |
2815 | #define osIdIndex 0 |
2816 | #define threadIdIndex 1 |
2817 | #define coreIdIndex 2 |
2818 | #define pkgIdIndex 3 |
2819 | #define nodeIdIndex 4 |
2820 | |
2821 | typedef unsigned *ProcCpuInfo; |
2822 | static unsigned maxIndex = pkgIdIndex; |
2823 | |
2824 | static int __kmp_affinity_cmp_ProcCpuInfo_phys_id(const void *a, |
2825 | const void *b) { |
2826 | unsigned i; |
2827 | const unsigned *aa = *(unsigned *const *)a; |
2828 | const unsigned *bb = *(unsigned *const *)b; |
2829 | for (i = maxIndex;; i--) { |
2830 | if (aa[i] < bb[i]) |
2831 | return -1; |
2832 | if (aa[i] > bb[i]) |
2833 | return 1; |
2834 | if (i == osIdIndex) |
2835 | break; |
2836 | } |
2837 | return 0; |
2838 | } |
2839 | |
2840 | #if KMP_USE_HIER_SCHED |
2841 | // Set the array sizes for the hierarchy layers |
2842 | static void __kmp_dispatch_set_hierarchy_values() { |
2843 | // Set the maximum number of L1's to number of cores |
2844 | // Set the maximum number of L2's to either number of cores / 2 for |
2845 | // Intel(R) Xeon Phi(TM) coprocessor formally codenamed Knights Landing |
2846 | // Or the number of cores for Intel(R) Xeon(R) processors |
2847 | // Set the maximum number of NUMA nodes and L3's to number of packages |
2848 | __kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1] = |
2849 | nPackages * nCoresPerPkg * __kmp_nThreadsPerCore; |
2850 | __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L1 + 1] = __kmp_ncores; |
2851 | #if KMP_ARCH_X86_64 && \ |
2852 | (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \ |
2853 | KMP_OS_WINDOWS) && \ |
2854 | KMP_MIC_SUPPORTED |
2855 | if (__kmp_mic_type >= mic3) |
2856 | __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L2 + 1] = __kmp_ncores / 2; |
2857 | else |
2858 | #endif // KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS) |
2859 | __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L2 + 1] = __kmp_ncores; |
2860 | __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L3 + 1] = nPackages; |
2861 | __kmp_hier_max_units[kmp_hier_layer_e::LAYER_NUMA + 1] = nPackages; |
2862 | __kmp_hier_max_units[kmp_hier_layer_e::LAYER_LOOP + 1] = 1; |
2863 | // Set the number of threads per unit |
2864 | // Number of hardware threads per L1/L2/L3/NUMA/LOOP |
2865 | __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_THREAD + 1] = 1; |
2866 | __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L1 + 1] = |
2867 | __kmp_nThreadsPerCore; |
2868 | #if KMP_ARCH_X86_64 && \ |
2869 | (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \ |
2870 | KMP_OS_WINDOWS) && \ |
2871 | KMP_MIC_SUPPORTED |
2872 | if (__kmp_mic_type >= mic3) |
2873 | __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L2 + 1] = |
2874 | 2 * __kmp_nThreadsPerCore; |
2875 | else |
2876 | #endif // KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS) |
2877 | __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L2 + 1] = |
2878 | __kmp_nThreadsPerCore; |
2879 | __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L3 + 1] = |
2880 | nCoresPerPkg * __kmp_nThreadsPerCore; |
2881 | __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_NUMA + 1] = |
2882 | nCoresPerPkg * __kmp_nThreadsPerCore; |
2883 | __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_LOOP + 1] = |
2884 | nPackages * nCoresPerPkg * __kmp_nThreadsPerCore; |
2885 | } |
2886 | |
2887 | // Return the index into the hierarchy for this tid and layer type (L1, L2, etc) |
2888 | // i.e., this thread's L1 or this thread's L2, etc. |
2889 | int __kmp_dispatch_get_index(int tid, kmp_hier_layer_e type) { |
2890 | int index = type + 1; |
2891 | int num_hw_threads = __kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1]; |
2892 | KMP_DEBUG_ASSERT(type != kmp_hier_layer_e::LAYER_LAST); |
2893 | if (type == kmp_hier_layer_e::LAYER_THREAD) |
2894 | return tid; |
2895 | else if (type == kmp_hier_layer_e::LAYER_LOOP) |
2896 | return 0; |
2897 | KMP_DEBUG_ASSERT(__kmp_hier_max_units[index] != 0); |
2898 | if (tid >= num_hw_threads) |
2899 | tid = tid % num_hw_threads; |
2900 | return (tid / __kmp_hier_threads_per[index]) % __kmp_hier_max_units[index]; |
2901 | } |
2902 | |
2903 | // Return the number of t1's per t2 |
2904 | int __kmp_dispatch_get_t1_per_t2(kmp_hier_layer_e t1, kmp_hier_layer_e t2) { |
2905 | int i1 = t1 + 1; |
2906 | int i2 = t2 + 1; |
2907 | KMP_DEBUG_ASSERT(i1 <= i2); |
2908 | KMP_DEBUG_ASSERT(t1 != kmp_hier_layer_e::LAYER_LAST); |
2909 | KMP_DEBUG_ASSERT(t2 != kmp_hier_layer_e::LAYER_LAST); |
2910 | KMP_DEBUG_ASSERT(__kmp_hier_threads_per[i1] != 0); |
2911 | // (nthreads/t2) / (nthreads/t1) = t1 / t2 |
2912 | return __kmp_hier_threads_per[i2] / __kmp_hier_threads_per[i1]; |
2913 | } |
2914 | #endif // KMP_USE_HIER_SCHED |
2915 | |
2916 | static inline const char *__kmp_cpuinfo_get_filename() { |
2917 | const char *filename; |
2918 | if (__kmp_cpuinfo_file != nullptr) |
2919 | filename = __kmp_cpuinfo_file; |
2920 | else |
2921 | filename = "/proc/cpuinfo" ; |
2922 | return filename; |
2923 | } |
2924 | |
2925 | static inline const char *__kmp_cpuinfo_get_envvar() { |
2926 | const char *envvar = nullptr; |
2927 | if (__kmp_cpuinfo_file != nullptr) |
2928 | envvar = "KMP_CPUINFO_FILE" ; |
2929 | return envvar; |
2930 | } |
2931 | |
2932 | // Parse /proc/cpuinfo (or an alternate file in the same format) to obtain the |
2933 | // affinity map. On AIX, the map is obtained through system SRAD (Scheduler |
2934 | // Resource Allocation Domain). |
2935 | static bool __kmp_affinity_create_cpuinfo_map(int *line, |
2936 | kmp_i18n_id_t *const msg_id) { |
2937 | *msg_id = kmp_i18n_null; |
2938 | |
2939 | #if KMP_OS_AIX |
2940 | unsigned num_records = __kmp_xproc; |
2941 | #else |
2942 | const char *filename = __kmp_cpuinfo_get_filename(); |
2943 | const char *envvar = __kmp_cpuinfo_get_envvar(); |
2944 | |
2945 | if (__kmp_affinity.flags.verbose) { |
2946 | KMP_INFORM(AffParseFilename, "KMP_AFFINITY" , filename); |
2947 | } |
2948 | |
2949 | kmp_safe_raii_file_t f(filename, "r" , envvar); |
2950 | |
2951 | // Scan of the file, and count the number of "processor" (osId) fields, |
2952 | // and find the highest value of <n> for a node_<n> field. |
2953 | char buf[256]; |
2954 | unsigned num_records = 0; |
2955 | while (!feof(stream: f)) { |
2956 | buf[sizeof(buf) - 1] = 1; |
2957 | if (!fgets(s: buf, n: sizeof(buf), stream: f)) { |
2958 | // Read errors presumably because of EOF |
2959 | break; |
2960 | } |
2961 | |
2962 | char s1[] = "processor" ; |
2963 | if (strncmp(s1: buf, s2: s1, n: sizeof(s1) - 1) == 0) { |
2964 | num_records++; |
2965 | continue; |
2966 | } |
2967 | |
2968 | // FIXME - this will match "node_<n> <garbage>" |
2969 | unsigned level; |
2970 | if (KMP_SSCANF(s: buf, format: "node_%u id" , &level) == 1) { |
2971 | // validate the input fisrt: |
2972 | if (level > (unsigned)__kmp_xproc) { // level is too big |
2973 | level = __kmp_xproc; |
2974 | } |
2975 | if (nodeIdIndex + level >= maxIndex) { |
2976 | maxIndex = nodeIdIndex + level; |
2977 | } |
2978 | continue; |
2979 | } |
2980 | } |
2981 | |
2982 | // Check for empty file / no valid processor records, or too many. The number |
2983 | // of records can't exceed the number of valid bits in the affinity mask. |
2984 | if (num_records == 0) { |
2985 | *msg_id = kmp_i18n_str_NoProcRecords; |
2986 | return false; |
2987 | } |
2988 | if (num_records > (unsigned)__kmp_xproc) { |
2989 | *msg_id = kmp_i18n_str_TooManyProcRecords; |
2990 | return false; |
2991 | } |
2992 | |
2993 | // Set the file pointer back to the beginning, so that we can scan the file |
2994 | // again, this time performing a full parse of the data. Allocate a vector of |
2995 | // ProcCpuInfo object, where we will place the data. Adding an extra element |
2996 | // at the end allows us to remove a lot of extra checks for termination |
2997 | // conditions. |
2998 | if (fseek(stream: f, off: 0, SEEK_SET) != 0) { |
2999 | *msg_id = kmp_i18n_str_CantRewindCpuinfo; |
3000 | return false; |
3001 | } |
3002 | #endif // KMP_OS_AIX |
3003 | |
3004 | // Allocate the array of records to store the proc info in. The dummy |
3005 | // element at the end makes the logic in filling them out easier to code. |
3006 | unsigned **threadInfo = |
3007 | (unsigned **)__kmp_allocate((num_records + 1) * sizeof(unsigned *)); |
3008 | unsigned i; |
3009 | for (i = 0; i <= num_records; i++) { |
3010 | threadInfo[i] = |
3011 | (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned)); |
3012 | } |
3013 | |
3014 | #define CLEANUP_THREAD_INFO \ |
3015 | for (i = 0; i <= num_records; i++) { \ |
3016 | __kmp_free(threadInfo[i]); \ |
3017 | } \ |
3018 | __kmp_free(threadInfo); |
3019 | |
3020 | // A value of UINT_MAX means that we didn't find the field |
3021 | unsigned __index; |
3022 | |
3023 | #define INIT_PROC_INFO(p) \ |
3024 | for (__index = 0; __index <= maxIndex; __index++) { \ |
3025 | (p)[__index] = UINT_MAX; \ |
3026 | } |
3027 | |
3028 | for (i = 0; i <= num_records; i++) { |
3029 | INIT_PROC_INFO(threadInfo[i]); |
3030 | } |
3031 | |
3032 | #if KMP_OS_AIX |
3033 | int smt_threads; |
3034 | lpar_info_format1_t cpuinfo; |
3035 | unsigned num_avail = __kmp_xproc; |
3036 | |
3037 | if (__kmp_affinity.flags.verbose) |
3038 | KMP_INFORM(AffParseFilename, "KMP_AFFINITY" , "system info for topology" ); |
3039 | |
3040 | // Get the number of SMT threads per core. |
3041 | int retval = |
3042 | lpar_get_info(LPAR_INFO_FORMAT1, &cpuinfo, sizeof(lpar_info_format1_t)); |
3043 | if (!retval) |
3044 | smt_threads = cpuinfo.smt_threads; |
3045 | else { |
3046 | CLEANUP_THREAD_INFO; |
3047 | *msg_id = kmp_i18n_str_UnknownTopology; |
3048 | return false; |
3049 | } |
3050 | |
3051 | // Allocate a resource set containing available system resourses. |
3052 | rsethandle_t sys_rset = rs_alloc(RS_SYSTEM); |
3053 | if (sys_rset == NULL) { |
3054 | CLEANUP_THREAD_INFO; |
3055 | *msg_id = kmp_i18n_str_UnknownTopology; |
3056 | return false; |
3057 | } |
3058 | // Allocate a resource set for the SRAD info. |
3059 | rsethandle_t srad = rs_alloc(RS_EMPTY); |
3060 | if (srad == NULL) { |
3061 | rs_free(sys_rset); |
3062 | CLEANUP_THREAD_INFO; |
3063 | *msg_id = kmp_i18n_str_UnknownTopology; |
3064 | return false; |
3065 | } |
3066 | |
3067 | // Get the SRAD system detail level. |
3068 | int sradsdl = rs_getinfo(NULL, R_SRADSDL, 0); |
3069 | if (sradsdl < 0) { |
3070 | rs_free(sys_rset); |
3071 | rs_free(srad); |
3072 | CLEANUP_THREAD_INFO; |
3073 | *msg_id = kmp_i18n_str_UnknownTopology; |
3074 | return false; |
3075 | } |
3076 | // Get the number of RADs at that SRAD SDL. |
3077 | int num_rads = rs_numrads(sys_rset, sradsdl, 0); |
3078 | if (num_rads < 0) { |
3079 | rs_free(sys_rset); |
3080 | rs_free(srad); |
3081 | CLEANUP_THREAD_INFO; |
3082 | *msg_id = kmp_i18n_str_UnknownTopology; |
3083 | return false; |
3084 | } |
3085 | |
3086 | // Get the maximum number of procs that may be contained in a resource set. |
3087 | int max_procs = rs_getinfo(NULL, R_MAXPROCS, 0); |
3088 | if (max_procs < 0) { |
3089 | rs_free(sys_rset); |
3090 | rs_free(srad); |
3091 | CLEANUP_THREAD_INFO; |
3092 | *msg_id = kmp_i18n_str_UnknownTopology; |
3093 | return false; |
3094 | } |
3095 | |
3096 | int cur_rad = 0; |
3097 | int num_set = 0; |
3098 | for (int srad_idx = 0; cur_rad < num_rads && srad_idx < VMI_MAXRADS; |
3099 | ++srad_idx) { |
3100 | // Check if the SRAD is available in the RSET. |
3101 | if (rs_getrad(sys_rset, srad, sradsdl, srad_idx, 0) < 0) |
3102 | continue; |
3103 | |
3104 | for (int cpu = 0; cpu < max_procs; cpu++) { |
3105 | // Set the info for the cpu if it is in the SRAD. |
3106 | if (rs_op(RS_TESTRESOURCE, srad, NULL, R_PROCS, cpu)) { |
3107 | threadInfo[cpu][osIdIndex] = cpu; |
3108 | threadInfo[cpu][pkgIdIndex] = cur_rad; |
3109 | threadInfo[cpu][coreIdIndex] = cpu / smt_threads; |
3110 | ++num_set; |
3111 | if (num_set >= num_avail) { |
3112 | // Done if all available CPUs have been set. |
3113 | break; |
3114 | } |
3115 | } |
3116 | } |
3117 | ++cur_rad; |
3118 | } |
3119 | rs_free(sys_rset); |
3120 | rs_free(srad); |
3121 | |
3122 | // The topology is already sorted. |
3123 | |
3124 | #else // !KMP_OS_AIX |
3125 | unsigned num_avail = 0; |
3126 | *line = 0; |
3127 | #if KMP_ARCH_S390X |
3128 | bool reading_s390x_sys_info = true; |
3129 | #endif |
3130 | while (!feof(stream: f)) { |
3131 | // Create an inner scoping level, so that all the goto targets at the end of |
3132 | // the loop appear in an outer scoping level. This avoids warnings about |
3133 | // jumping past an initialization to a target in the same block. |
3134 | { |
3135 | buf[sizeof(buf) - 1] = 1; |
3136 | bool long_line = false; |
3137 | if (!fgets(s: buf, n: sizeof(buf), stream: f)) { |
3138 | // Read errors presumably because of EOF |
3139 | // If there is valid data in threadInfo[num_avail], then fake |
3140 | // a blank line in ensure that the last address gets parsed. |
3141 | bool valid = false; |
3142 | for (i = 0; i <= maxIndex; i++) { |
3143 | if (threadInfo[num_avail][i] != UINT_MAX) { |
3144 | valid = true; |
3145 | } |
3146 | } |
3147 | if (!valid) { |
3148 | break; |
3149 | } |
3150 | buf[0] = 0; |
3151 | } else if (!buf[sizeof(buf) - 1]) { |
3152 | // The line is longer than the buffer. Set a flag and don't |
3153 | // emit an error if we were going to ignore the line, anyway. |
3154 | long_line = true; |
3155 | |
3156 | #define CHECK_LINE \ |
3157 | if (long_line) { \ |
3158 | CLEANUP_THREAD_INFO; \ |
3159 | *msg_id = kmp_i18n_str_LongLineCpuinfo; \ |
3160 | return false; \ |
3161 | } |
3162 | } |
3163 | (*line)++; |
3164 | |
3165 | #if KMP_ARCH_LOONGARCH64 |
3166 | // The parsing logic of /proc/cpuinfo in this function highly depends on |
3167 | // the blank lines between each processor info block. But on LoongArch a |
3168 | // blank line exists before the first processor info block (i.e. after the |
3169 | // "system type" line). This blank line was added because the "system |
3170 | // type" line is unrelated to any of the CPUs. We must skip this line so |
3171 | // that the original logic works on LoongArch. |
3172 | if (*buf == '\n' && *line == 2) |
3173 | continue; |
3174 | #endif |
3175 | #if KMP_ARCH_S390X |
3176 | // s390x /proc/cpuinfo starts with a variable number of lines containing |
3177 | // the overall system information. Skip them. |
3178 | if (reading_s390x_sys_info) { |
3179 | if (*buf == '\n') |
3180 | reading_s390x_sys_info = false; |
3181 | continue; |
3182 | } |
3183 | #endif |
3184 | |
3185 | #if KMP_ARCH_S390X |
3186 | char s1[] = "cpu number" ; |
3187 | #else |
3188 | char s1[] = "processor" ; |
3189 | #endif |
3190 | if (strncmp(s1: buf, s2: s1, n: sizeof(s1) - 1) == 0) { |
3191 | CHECK_LINE; |
3192 | char *p = strchr(s: buf + sizeof(s1) - 1, c: ':'); |
3193 | unsigned val; |
3194 | if ((p == NULL) || (KMP_SSCANF(s: p + 1, format: "%u\n" , &val) != 1)) |
3195 | goto no_val; |
3196 | if (threadInfo[num_avail][osIdIndex] != UINT_MAX) |
3197 | #if KMP_ARCH_AARCH64 |
3198 | // Handle the old AArch64 /proc/cpuinfo layout differently, |
3199 | // it contains all of the 'processor' entries listed in a |
3200 | // single 'Processor' section, therefore the normal looking |
3201 | // for duplicates in that section will always fail. |
3202 | num_avail++; |
3203 | #else |
3204 | goto dup_field; |
3205 | #endif |
3206 | threadInfo[num_avail][osIdIndex] = val; |
3207 | #if KMP_OS_LINUX && !(KMP_ARCH_X86 || KMP_ARCH_X86_64) |
3208 | char path[256]; |
3209 | KMP_SNPRINTF( |
3210 | path, sizeof(path), |
3211 | "/sys/devices/system/cpu/cpu%u/topology/physical_package_id" , |
3212 | threadInfo[num_avail][osIdIndex]); |
3213 | __kmp_read_from_file(path, "%u" , &threadInfo[num_avail][pkgIdIndex]); |
3214 | |
3215 | #if KMP_ARCH_S390X |
3216 | // Disambiguate physical_package_id. |
3217 | unsigned book_id; |
3218 | KMP_SNPRINTF(path, sizeof(path), |
3219 | "/sys/devices/system/cpu/cpu%u/topology/book_id" , |
3220 | threadInfo[num_avail][osIdIndex]); |
3221 | __kmp_read_from_file(path, "%u" , &book_id); |
3222 | threadInfo[num_avail][pkgIdIndex] |= (book_id << 8); |
3223 | |
3224 | unsigned drawer_id; |
3225 | KMP_SNPRINTF(path, sizeof(path), |
3226 | "/sys/devices/system/cpu/cpu%u/topology/drawer_id" , |
3227 | threadInfo[num_avail][osIdIndex]); |
3228 | __kmp_read_from_file(path, "%u" , &drawer_id); |
3229 | threadInfo[num_avail][pkgIdIndex] |= (drawer_id << 16); |
3230 | #endif |
3231 | |
3232 | KMP_SNPRINTF(path, sizeof(path), |
3233 | "/sys/devices/system/cpu/cpu%u/topology/core_id" , |
3234 | threadInfo[num_avail][osIdIndex]); |
3235 | __kmp_read_from_file(path, "%u" , &threadInfo[num_avail][coreIdIndex]); |
3236 | continue; |
3237 | #else |
3238 | } |
3239 | char s2[] = "physical id" ; |
3240 | if (strncmp(s1: buf, s2: s2, n: sizeof(s2) - 1) == 0) { |
3241 | CHECK_LINE; |
3242 | char *p = strchr(s: buf + sizeof(s2) - 1, c: ':'); |
3243 | unsigned val; |
3244 | if ((p == NULL) || (KMP_SSCANF(s: p + 1, format: "%u\n" , &val) != 1)) |
3245 | goto no_val; |
3246 | if (threadInfo[num_avail][pkgIdIndex] != UINT_MAX) |
3247 | goto dup_field; |
3248 | threadInfo[num_avail][pkgIdIndex] = val; |
3249 | continue; |
3250 | } |
3251 | char s3[] = "core id" ; |
3252 | if (strncmp(s1: buf, s2: s3, n: sizeof(s3) - 1) == 0) { |
3253 | CHECK_LINE; |
3254 | char *p = strchr(s: buf + sizeof(s3) - 1, c: ':'); |
3255 | unsigned val; |
3256 | if ((p == NULL) || (KMP_SSCANF(s: p + 1, format: "%u\n" , &val) != 1)) |
3257 | goto no_val; |
3258 | if (threadInfo[num_avail][coreIdIndex] != UINT_MAX) |
3259 | goto dup_field; |
3260 | threadInfo[num_avail][coreIdIndex] = val; |
3261 | continue; |
3262 | #endif // KMP_OS_LINUX && USE_SYSFS_INFO |
3263 | } |
3264 | char s4[] = "thread id" ; |
3265 | if (strncmp(s1: buf, s2: s4, n: sizeof(s4) - 1) == 0) { |
3266 | CHECK_LINE; |
3267 | char *p = strchr(s: buf + sizeof(s4) - 1, c: ':'); |
3268 | unsigned val; |
3269 | if ((p == NULL) || (KMP_SSCANF(s: p + 1, format: "%u\n" , &val) != 1)) |
3270 | goto no_val; |
3271 | if (threadInfo[num_avail][threadIdIndex] != UINT_MAX) |
3272 | goto dup_field; |
3273 | threadInfo[num_avail][threadIdIndex] = val; |
3274 | continue; |
3275 | } |
3276 | unsigned level; |
3277 | if (KMP_SSCANF(s: buf, format: "node_%u id" , &level) == 1) { |
3278 | CHECK_LINE; |
3279 | char *p = strchr(s: buf + sizeof(s4) - 1, c: ':'); |
3280 | unsigned val; |
3281 | if ((p == NULL) || (KMP_SSCANF(s: p + 1, format: "%u\n" , &val) != 1)) |
3282 | goto no_val; |
3283 | // validate the input before using level: |
3284 | if (level > (unsigned)__kmp_xproc) { // level is too big |
3285 | level = __kmp_xproc; |
3286 | } |
3287 | if (threadInfo[num_avail][nodeIdIndex + level] != UINT_MAX) |
3288 | goto dup_field; |
3289 | threadInfo[num_avail][nodeIdIndex + level] = val; |
3290 | continue; |
3291 | } |
3292 | |
3293 | // We didn't recognize the leading token on the line. There are lots of |
3294 | // leading tokens that we don't recognize - if the line isn't empty, go on |
3295 | // to the next line. |
3296 | if ((*buf != 0) && (*buf != '\n')) { |
3297 | // If the line is longer than the buffer, read characters |
3298 | // until we find a newline. |
3299 | if (long_line) { |
3300 | int ch; |
3301 | while (((ch = fgetc(stream: f)) != EOF) && (ch != '\n')) |
3302 | ; |
3303 | } |
3304 | continue; |
3305 | } |
3306 | |
3307 | // A newline has signalled the end of the processor record. |
3308 | // Check that there aren't too many procs specified. |
3309 | if ((int)num_avail == __kmp_xproc) { |
3310 | CLEANUP_THREAD_INFO; |
3311 | *msg_id = kmp_i18n_str_TooManyEntries; |
3312 | return false; |
3313 | } |
3314 | |
3315 | // Check for missing fields. The osId field must be there, and we |
3316 | // currently require that the physical id field is specified, also. |
3317 | if (threadInfo[num_avail][osIdIndex] == UINT_MAX) { |
3318 | CLEANUP_THREAD_INFO; |
3319 | *msg_id = kmp_i18n_str_MissingProcField; |
3320 | return false; |
3321 | } |
3322 | if (threadInfo[0][pkgIdIndex] == UINT_MAX) { |
3323 | CLEANUP_THREAD_INFO; |
3324 | *msg_id = kmp_i18n_str_MissingPhysicalIDField; |
3325 | return false; |
3326 | } |
3327 | |
3328 | // Skip this proc if it is not included in the machine model. |
3329 | if (KMP_AFFINITY_CAPABLE() && |
3330 | !KMP_CPU_ISSET(threadInfo[num_avail][osIdIndex], |
3331 | __kmp_affin_fullMask)) { |
3332 | INIT_PROC_INFO(threadInfo[num_avail]); |
3333 | continue; |
3334 | } |
3335 | |
3336 | // We have a successful parse of this proc's info. |
3337 | // Increment the counter, and prepare for the next proc. |
3338 | num_avail++; |
3339 | KMP_ASSERT(num_avail <= num_records); |
3340 | INIT_PROC_INFO(threadInfo[num_avail]); |
3341 | } |
3342 | continue; |
3343 | |
3344 | no_val: |
3345 | CLEANUP_THREAD_INFO; |
3346 | *msg_id = kmp_i18n_str_MissingValCpuinfo; |
3347 | return false; |
3348 | |
3349 | dup_field: |
3350 | CLEANUP_THREAD_INFO; |
3351 | *msg_id = kmp_i18n_str_DuplicateFieldCpuinfo; |
3352 | return false; |
3353 | } |
3354 | *line = 0; |
3355 | |
3356 | #if KMP_MIC && REDUCE_TEAM_SIZE |
3357 | unsigned teamSize = 0; |
3358 | #endif // KMP_MIC && REDUCE_TEAM_SIZE |
3359 | |
3360 | // check for num_records == __kmp_xproc ??? |
3361 | |
3362 | // If it is configured to omit the package level when there is only a single |
3363 | // package, the logic at the end of this routine won't work if there is only a |
3364 | // single thread |
3365 | KMP_ASSERT(num_avail > 0); |
3366 | KMP_ASSERT(num_avail <= num_records); |
3367 | |
3368 | // Sort the threadInfo table by physical Id. |
3369 | qsort(base: threadInfo, nmemb: num_avail, size: sizeof(*threadInfo), |
3370 | compar: __kmp_affinity_cmp_ProcCpuInfo_phys_id); |
3371 | |
3372 | #endif // KMP_OS_AIX |
3373 | |
3374 | // The table is now sorted by pkgId / coreId / threadId, but we really don't |
3375 | // know the radix of any of the fields. pkgId's may be sparsely assigned among |
3376 | // the chips on a system. Although coreId's are usually assigned |
3377 | // [0 .. coresPerPkg-1] and threadId's are usually assigned |
3378 | // [0..threadsPerCore-1], we don't want to make any such assumptions. |
3379 | // |
3380 | // For that matter, we don't know what coresPerPkg and threadsPerCore (or the |
3381 | // total # packages) are at this point - we want to determine that now. We |
3382 | // only have an upper bound on the first two figures. |
3383 | unsigned *counts = |
3384 | (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned)); |
3385 | unsigned *maxCt = |
3386 | (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned)); |
3387 | unsigned *totals = |
3388 | (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned)); |
3389 | unsigned *lastId = |
3390 | (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned)); |
3391 | |
3392 | bool assign_thread_ids = false; |
3393 | unsigned threadIdCt; |
3394 | unsigned index; |
3395 | |
3396 | restart_radix_check: |
3397 | threadIdCt = 0; |
3398 | |
3399 | // Initialize the counter arrays with data from threadInfo[0]. |
3400 | if (assign_thread_ids) { |
3401 | if (threadInfo[0][threadIdIndex] == UINT_MAX) { |
3402 | threadInfo[0][threadIdIndex] = threadIdCt++; |
3403 | } else if (threadIdCt <= threadInfo[0][threadIdIndex]) { |
3404 | threadIdCt = threadInfo[0][threadIdIndex] + 1; |
3405 | } |
3406 | } |
3407 | for (index = 0; index <= maxIndex; index++) { |
3408 | counts[index] = 1; |
3409 | maxCt[index] = 1; |
3410 | totals[index] = 1; |
3411 | lastId[index] = threadInfo[0][index]; |
3412 | ; |
3413 | } |
3414 | |
3415 | // Run through the rest of the OS procs. |
3416 | for (i = 1; i < num_avail; i++) { |
3417 | // Find the most significant index whose id differs from the id for the |
3418 | // previous OS proc. |
3419 | for (index = maxIndex; index >= threadIdIndex; index--) { |
3420 | if (assign_thread_ids && (index == threadIdIndex)) { |
3421 | // Auto-assign the thread id field if it wasn't specified. |
3422 | if (threadInfo[i][threadIdIndex] == UINT_MAX) { |
3423 | threadInfo[i][threadIdIndex] = threadIdCt++; |
3424 | } |
3425 | // Apparently the thread id field was specified for some entries and not |
3426 | // others. Start the thread id counter off at the next higher thread id. |
3427 | else if (threadIdCt <= threadInfo[i][threadIdIndex]) { |
3428 | threadIdCt = threadInfo[i][threadIdIndex] + 1; |
3429 | } |
3430 | } |
3431 | if (threadInfo[i][index] != lastId[index]) { |
3432 | // Run through all indices which are less significant, and reset the |
3433 | // counts to 1. At all levels up to and including index, we need to |
3434 | // increment the totals and record the last id. |
3435 | unsigned index2; |
3436 | for (index2 = threadIdIndex; index2 < index; index2++) { |
3437 | totals[index2]++; |
3438 | if (counts[index2] > maxCt[index2]) { |
3439 | maxCt[index2] = counts[index2]; |
3440 | } |
3441 | counts[index2] = 1; |
3442 | lastId[index2] = threadInfo[i][index2]; |
3443 | } |
3444 | counts[index]++; |
3445 | totals[index]++; |
3446 | lastId[index] = threadInfo[i][index]; |
3447 | |
3448 | if (assign_thread_ids && (index > threadIdIndex)) { |
3449 | |
3450 | #if KMP_MIC && REDUCE_TEAM_SIZE |
3451 | // The default team size is the total #threads in the machine |
3452 | // minus 1 thread for every core that has 3 or more threads. |
3453 | teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1); |
3454 | #endif // KMP_MIC && REDUCE_TEAM_SIZE |
3455 | |
3456 | // Restart the thread counter, as we are on a new core. |
3457 | threadIdCt = 0; |
3458 | |
3459 | // Auto-assign the thread id field if it wasn't specified. |
3460 | if (threadInfo[i][threadIdIndex] == UINT_MAX) { |
3461 | threadInfo[i][threadIdIndex] = threadIdCt++; |
3462 | } |
3463 | |
3464 | // Apparently the thread id field was specified for some entries and |
3465 | // not others. Start the thread id counter off at the next higher |
3466 | // thread id. |
3467 | else if (threadIdCt <= threadInfo[i][threadIdIndex]) { |
3468 | threadIdCt = threadInfo[i][threadIdIndex] + 1; |
3469 | } |
3470 | } |
3471 | break; |
3472 | } |
3473 | } |
3474 | if (index < threadIdIndex) { |
3475 | // If thread ids were specified, it is an error if they are not unique. |
3476 | // Also, check that we waven't already restarted the loop (to be safe - |
3477 | // shouldn't need to). |
3478 | if ((threadInfo[i][threadIdIndex] != UINT_MAX) || assign_thread_ids) { |
3479 | __kmp_free(lastId); |
3480 | __kmp_free(totals); |
3481 | __kmp_free(maxCt); |
3482 | __kmp_free(counts); |
3483 | CLEANUP_THREAD_INFO; |
3484 | *msg_id = kmp_i18n_str_PhysicalIDsNotUnique; |
3485 | return false; |
3486 | } |
3487 | |
3488 | // If the thread ids were not specified and we see entries that |
3489 | // are duplicates, start the loop over and assign the thread ids manually. |
3490 | assign_thread_ids = true; |
3491 | goto restart_radix_check; |
3492 | } |
3493 | } |
3494 | |
3495 | #if KMP_MIC && REDUCE_TEAM_SIZE |
3496 | // The default team size is the total #threads in the machine |
3497 | // minus 1 thread for every core that has 3 or more threads. |
3498 | teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1); |
3499 | #endif // KMP_MIC && REDUCE_TEAM_SIZE |
3500 | |
3501 | for (index = threadIdIndex; index <= maxIndex; index++) { |
3502 | if (counts[index] > maxCt[index]) { |
3503 | maxCt[index] = counts[index]; |
3504 | } |
3505 | } |
3506 | |
3507 | __kmp_nThreadsPerCore = maxCt[threadIdIndex]; |
3508 | nCoresPerPkg = maxCt[coreIdIndex]; |
3509 | nPackages = totals[pkgIdIndex]; |
3510 | |
3511 | // When affinity is off, this routine will still be called to set |
3512 | // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages. |
3513 | // Make sure all these vars are set correctly, and return now if affinity is |
3514 | // not enabled. |
3515 | __kmp_ncores = totals[coreIdIndex]; |
3516 | if (!KMP_AFFINITY_CAPABLE()) { |
3517 | KMP_ASSERT(__kmp_affinity.type == affinity_none); |
3518 | return true; |
3519 | } |
3520 | |
3521 | #if KMP_MIC && REDUCE_TEAM_SIZE |
3522 | // Set the default team size. |
3523 | if ((__kmp_dflt_team_nth == 0) && (teamSize > 0)) { |
3524 | __kmp_dflt_team_nth = teamSize; |
3525 | KA_TRACE(20, ("__kmp_affinity_create_cpuinfo_map: setting " |
3526 | "__kmp_dflt_team_nth = %d\n" , |
3527 | __kmp_dflt_team_nth)); |
3528 | } |
3529 | #endif // KMP_MIC && REDUCE_TEAM_SIZE |
3530 | |
3531 | KMP_DEBUG_ASSERT(num_avail == (unsigned)__kmp_avail_proc); |
3532 | |
3533 | // Count the number of levels which have more nodes at that level than at the |
3534 | // parent's level (with there being an implicit root node of the top level). |
3535 | // This is equivalent to saying that there is at least one node at this level |
3536 | // which has a sibling. These levels are in the map, and the package level is |
3537 | // always in the map. |
3538 | bool *inMap = (bool *)__kmp_allocate((maxIndex + 1) * sizeof(bool)); |
3539 | for (index = threadIdIndex; index < maxIndex; index++) { |
3540 | KMP_ASSERT(totals[index] >= totals[index + 1]); |
3541 | inMap[index] = (totals[index] > totals[index + 1]); |
3542 | } |
3543 | inMap[maxIndex] = (totals[maxIndex] > 1); |
3544 | inMap[pkgIdIndex] = true; |
3545 | inMap[coreIdIndex] = true; |
3546 | inMap[threadIdIndex] = true; |
3547 | |
3548 | int depth = 0; |
3549 | int idx = 0; |
3550 | kmp_hw_t types[KMP_HW_LAST]; |
3551 | int pkgLevel = -1; |
3552 | int coreLevel = -1; |
3553 | int threadLevel = -1; |
3554 | for (index = threadIdIndex; index <= maxIndex; index++) { |
3555 | if (inMap[index]) { |
3556 | depth++; |
3557 | } |
3558 | } |
3559 | if (inMap[pkgIdIndex]) { |
3560 | pkgLevel = idx; |
3561 | types[idx++] = KMP_HW_SOCKET; |
3562 | } |
3563 | if (inMap[coreIdIndex]) { |
3564 | coreLevel = idx; |
3565 | types[idx++] = KMP_HW_CORE; |
3566 | } |
3567 | if (inMap[threadIdIndex]) { |
3568 | threadLevel = idx; |
3569 | types[idx++] = KMP_HW_THREAD; |
3570 | } |
3571 | KMP_ASSERT(depth > 0); |
3572 | |
3573 | // Construct the data structure that is to be returned. |
3574 | __kmp_topology = kmp_topology_t::allocate(nproc: num_avail, ndepth: depth, types); |
3575 | |
3576 | for (i = 0; i < num_avail; ++i) { |
3577 | unsigned os = threadInfo[i][osIdIndex]; |
3578 | int src_index; |
3579 | kmp_hw_thread_t &hw_thread = __kmp_topology->at(index: i); |
3580 | hw_thread.clear(); |
3581 | hw_thread.os_id = os; |
3582 | |
3583 | idx = 0; |
3584 | for (src_index = maxIndex; src_index >= threadIdIndex; src_index--) { |
3585 | if (!inMap[src_index]) { |
3586 | continue; |
3587 | } |
3588 | if (src_index == pkgIdIndex) { |
3589 | hw_thread.ids[pkgLevel] = threadInfo[i][src_index]; |
3590 | } else if (src_index == coreIdIndex) { |
3591 | hw_thread.ids[coreLevel] = threadInfo[i][src_index]; |
3592 | } else if (src_index == threadIdIndex) { |
3593 | hw_thread.ids[threadLevel] = threadInfo[i][src_index]; |
3594 | } |
3595 | } |
3596 | } |
3597 | |
3598 | __kmp_free(inMap); |
3599 | __kmp_free(lastId); |
3600 | __kmp_free(totals); |
3601 | __kmp_free(maxCt); |
3602 | __kmp_free(counts); |
3603 | CLEANUP_THREAD_INFO; |
3604 | __kmp_topology->sort_ids(); |
3605 | if (!__kmp_topology->check_ids()) { |
3606 | kmp_topology_t::deallocate(topology: __kmp_topology); |
3607 | __kmp_topology = nullptr; |
3608 | *msg_id = kmp_i18n_str_PhysicalIDsNotUnique; |
3609 | return false; |
3610 | } |
3611 | return true; |
3612 | } |
3613 | |
3614 | // Create and return a table of affinity masks, indexed by OS thread ID. |
3615 | // This routine handles OR'ing together all the affinity masks of threads |
3616 | // that are sufficiently close, if granularity > fine. |
3617 | template <typename FindNextFunctionType> |
3618 | static void __kmp_create_os_id_masks(unsigned *numUnique, |
3619 | kmp_affinity_t &affinity, |
3620 | FindNextFunctionType find_next) { |
3621 | // First form a table of affinity masks in order of OS thread id. |
3622 | int maxOsId; |
3623 | int i; |
3624 | int numAddrs = __kmp_topology->get_num_hw_threads(); |
3625 | int depth = __kmp_topology->get_depth(); |
3626 | const char *env_var = __kmp_get_affinity_env_var(affinity); |
3627 | KMP_ASSERT(numAddrs); |
3628 | KMP_ASSERT(depth); |
3629 | |
3630 | i = find_next(-1); |
3631 | // If could not find HW thread location with attributes, then return and |
3632 | // fallback to increment find_next and disregard core attributes. |
3633 | if (i >= numAddrs) |
3634 | return; |
3635 | |
3636 | maxOsId = 0; |
3637 | for (i = numAddrs - 1;; --i) { |
3638 | int osId = __kmp_topology->at(index: i).os_id; |
3639 | if (osId > maxOsId) { |
3640 | maxOsId = osId; |
3641 | } |
3642 | if (i == 0) |
3643 | break; |
3644 | } |
3645 | affinity.num_os_id_masks = maxOsId + 1; |
3646 | KMP_CPU_ALLOC_ARRAY(affinity.os_id_masks, affinity.num_os_id_masks); |
3647 | KMP_ASSERT(affinity.gran_levels >= 0); |
3648 | if (affinity.flags.verbose && (affinity.gran_levels > 0)) { |
3649 | KMP_INFORM(ThreadsMigrate, env_var, affinity.gran_levels); |
3650 | } |
3651 | if (affinity.gran_levels >= (int)depth) { |
3652 | KMP_AFF_WARNING(affinity, AffThreadsMayMigrate); |
3653 | } |
3654 | |
3655 | // Run through the table, forming the masks for all threads on each core. |
3656 | // Threads on the same core will have identical kmp_hw_thread_t objects, not |
3657 | // considering the last level, which must be the thread id. All threads on a |
3658 | // core will appear consecutively. |
3659 | int unique = 0; |
3660 | int j = 0; // index of 1st thread on core |
3661 | int leader = 0; |
3662 | kmp_affin_mask_t *sum; |
3663 | KMP_CPU_ALLOC_ON_STACK(sum); |
3664 | KMP_CPU_ZERO(sum); |
3665 | |
3666 | i = j = leader = find_next(-1); |
3667 | KMP_CPU_SET(__kmp_topology->at(i).os_id, sum); |
3668 | kmp_full_mask_modifier_t full_mask; |
3669 | for (i = find_next(i); i < numAddrs; i = find_next(i)) { |
3670 | // If this thread is sufficiently close to the leader (within the |
3671 | // granularity setting), then set the bit for this os thread in the |
3672 | // affinity mask for this group, and go on to the next thread. |
3673 | if (__kmp_topology->is_close(hwt1: leader, hwt2: i, stgs: affinity)) { |
3674 | KMP_CPU_SET(__kmp_topology->at(i).os_id, sum); |
3675 | continue; |
3676 | } |
3677 | |
3678 | // For every thread in this group, copy the mask to the thread's entry in |
3679 | // the OS Id mask table. Mark the first address as a leader. |
3680 | for (; j < i; j = find_next(j)) { |
3681 | int osId = __kmp_topology->at(index: j).os_id; |
3682 | KMP_DEBUG_ASSERT(osId <= maxOsId); |
3683 | kmp_affin_mask_t *mask = KMP_CPU_INDEX(affinity.os_id_masks, osId); |
3684 | KMP_CPU_COPY(mask, sum); |
3685 | __kmp_topology->at(index: j).leader = (j == leader); |
3686 | } |
3687 | unique++; |
3688 | |
3689 | // Start a new mask. |
3690 | leader = i; |
3691 | full_mask.include(other: sum); |
3692 | KMP_CPU_ZERO(sum); |
3693 | KMP_CPU_SET(__kmp_topology->at(i).os_id, sum); |
3694 | } |
3695 | |
3696 | // For every thread in last group, copy the mask to the thread's |
3697 | // entry in the OS Id mask table. |
3698 | for (; j < i; j = find_next(j)) { |
3699 | int osId = __kmp_topology->at(index: j).os_id; |
3700 | KMP_DEBUG_ASSERT(osId <= maxOsId); |
3701 | kmp_affin_mask_t *mask = KMP_CPU_INDEX(affinity.os_id_masks, osId); |
3702 | KMP_CPU_COPY(mask, sum); |
3703 | __kmp_topology->at(index: j).leader = (j == leader); |
3704 | } |
3705 | full_mask.include(other: sum); |
3706 | unique++; |
3707 | KMP_CPU_FREE_FROM_STACK(sum); |
3708 | |
3709 | // See if the OS Id mask table further restricts or changes the full mask |
3710 | if (full_mask.restrict_to_mask() && affinity.flags.verbose) { |
3711 | __kmp_topology->print(env_var); |
3712 | } |
3713 | |
3714 | *numUnique = unique; |
3715 | } |
3716 | |
3717 | // Stuff for the affinity proclist parsers. It's easier to declare these vars |
3718 | // as file-static than to try and pass them through the calling sequence of |
3719 | // the recursive-descent OMP_PLACES parser. |
3720 | static kmp_affin_mask_t *newMasks; |
3721 | static int numNewMasks; |
3722 | static int nextNewMask; |
3723 | |
3724 | #define ADD_MASK(_mask) \ |
3725 | { \ |
3726 | if (nextNewMask >= numNewMasks) { \ |
3727 | int i; \ |
3728 | numNewMasks *= 2; \ |
3729 | kmp_affin_mask_t *temp; \ |
3730 | KMP_CPU_INTERNAL_ALLOC_ARRAY(temp, numNewMasks); \ |
3731 | for (i = 0; i < numNewMasks / 2; i++) { \ |
3732 | kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i); \ |
3733 | kmp_affin_mask_t *dest = KMP_CPU_INDEX(temp, i); \ |
3734 | KMP_CPU_COPY(dest, src); \ |
3735 | } \ |
3736 | KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks / 2); \ |
3737 | newMasks = temp; \ |
3738 | } \ |
3739 | KMP_CPU_COPY(KMP_CPU_INDEX(newMasks, nextNewMask), (_mask)); \ |
3740 | nextNewMask++; \ |
3741 | } |
3742 | |
3743 | #define ADD_MASK_OSID(_osId, _osId2Mask, _maxOsId) \ |
3744 | { \ |
3745 | if (((_osId) > _maxOsId) || \ |
3746 | (!KMP_CPU_ISSET((_osId), KMP_CPU_INDEX((_osId2Mask), (_osId))))) { \ |
3747 | KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, _osId); \ |
3748 | } else { \ |
3749 | ADD_MASK(KMP_CPU_INDEX(_osId2Mask, (_osId))); \ |
3750 | } \ |
3751 | } |
3752 | |
3753 | // Re-parse the proclist (for the explicit affinity type), and form the list |
3754 | // of affinity newMasks indexed by gtid. |
3755 | static void __kmp_affinity_process_proclist(kmp_affinity_t &affinity) { |
3756 | int i; |
3757 | kmp_affin_mask_t **out_masks = &affinity.masks; |
3758 | unsigned *out_numMasks = &affinity.num_masks; |
3759 | const char *proclist = affinity.proclist; |
3760 | kmp_affin_mask_t *osId2Mask = affinity.os_id_masks; |
3761 | int maxOsId = affinity.num_os_id_masks - 1; |
3762 | const char *scan = proclist; |
3763 | const char *next = proclist; |
3764 | |
3765 | // We use malloc() for the temporary mask vector, so that we can use |
3766 | // realloc() to extend it. |
3767 | numNewMasks = 2; |
3768 | KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks); |
3769 | nextNewMask = 0; |
3770 | kmp_affin_mask_t *sumMask; |
3771 | KMP_CPU_ALLOC(sumMask); |
3772 | int setSize = 0; |
3773 | |
3774 | for (;;) { |
3775 | int start, end, stride; |
3776 | |
3777 | SKIP_WS(scan); |
3778 | next = scan; |
3779 | if (*next == '\0') { |
3780 | break; |
3781 | } |
3782 | |
3783 | if (*next == '{') { |
3784 | int num; |
3785 | setSize = 0; |
3786 | next++; // skip '{' |
3787 | SKIP_WS(next); |
3788 | scan = next; |
3789 | |
3790 | // Read the first integer in the set. |
3791 | KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad proclist" ); |
3792 | SKIP_DIGITS(next); |
3793 | num = __kmp_str_to_int(str: scan, sentinel: *next); |
3794 | KMP_ASSERT2(num >= 0, "bad explicit proc list" ); |
3795 | |
3796 | // Copy the mask for that osId to the sum (union) mask. |
3797 | if ((num > maxOsId) || |
3798 | (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) { |
3799 | KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, num); |
3800 | KMP_CPU_ZERO(sumMask); |
3801 | } else { |
3802 | KMP_CPU_COPY(sumMask, KMP_CPU_INDEX(osId2Mask, num)); |
3803 | setSize = 1; |
3804 | } |
3805 | |
3806 | for (;;) { |
3807 | // Check for end of set. |
3808 | SKIP_WS(next); |
3809 | if (*next == '}') { |
3810 | next++; // skip '}' |
3811 | break; |
3812 | } |
3813 | |
3814 | // Skip optional comma. |
3815 | if (*next == ',') { |
3816 | next++; |
3817 | } |
3818 | SKIP_WS(next); |
3819 | |
3820 | // Read the next integer in the set. |
3821 | scan = next; |
3822 | KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list" ); |
3823 | |
3824 | SKIP_DIGITS(next); |
3825 | num = __kmp_str_to_int(str: scan, sentinel: *next); |
3826 | KMP_ASSERT2(num >= 0, "bad explicit proc list" ); |
3827 | |
3828 | // Add the mask for that osId to the sum mask. |
3829 | if ((num > maxOsId) || |
3830 | (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) { |
3831 | KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, num); |
3832 | } else { |
3833 | KMP_CPU_UNION(sumMask, KMP_CPU_INDEX(osId2Mask, num)); |
3834 | setSize++; |
3835 | } |
3836 | } |
3837 | if (setSize > 0) { |
3838 | ADD_MASK(sumMask); |
3839 | } |
3840 | |
3841 | SKIP_WS(next); |
3842 | if (*next == ',') { |
3843 | next++; |
3844 | } |
3845 | scan = next; |
3846 | continue; |
3847 | } |
3848 | |
3849 | // Read the first integer. |
3850 | KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list" ); |
3851 | SKIP_DIGITS(next); |
3852 | start = __kmp_str_to_int(str: scan, sentinel: *next); |
3853 | KMP_ASSERT2(start >= 0, "bad explicit proc list" ); |
3854 | SKIP_WS(next); |
3855 | |
3856 | // If this isn't a range, then add a mask to the list and go on. |
3857 | if (*next != '-') { |
3858 | ADD_MASK_OSID(start, osId2Mask, maxOsId); |
3859 | |
3860 | // Skip optional comma. |
3861 | if (*next == ',') { |
3862 | next++; |
3863 | } |
3864 | scan = next; |
3865 | continue; |
3866 | } |
3867 | |
3868 | // This is a range. Skip over the '-' and read in the 2nd int. |
3869 | next++; // skip '-' |
3870 | SKIP_WS(next); |
3871 | scan = next; |
3872 | KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list" ); |
3873 | SKIP_DIGITS(next); |
3874 | end = __kmp_str_to_int(str: scan, sentinel: *next); |
3875 | KMP_ASSERT2(end >= 0, "bad explicit proc list" ); |
3876 | |
3877 | // Check for a stride parameter |
3878 | stride = 1; |
3879 | SKIP_WS(next); |
3880 | if (*next == ':') { |
3881 | // A stride is specified. Skip over the ':" and read the 3rd int. |
3882 | int sign = +1; |
3883 | next++; // skip ':' |
3884 | SKIP_WS(next); |
3885 | scan = next; |
3886 | if (*next == '-') { |
3887 | sign = -1; |
3888 | next++; |
3889 | SKIP_WS(next); |
3890 | scan = next; |
3891 | } |
3892 | KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list" ); |
3893 | SKIP_DIGITS(next); |
3894 | stride = __kmp_str_to_int(str: scan, sentinel: *next); |
3895 | KMP_ASSERT2(stride >= 0, "bad explicit proc list" ); |
3896 | stride *= sign; |
3897 | } |
3898 | |
3899 | // Do some range checks. |
3900 | KMP_ASSERT2(stride != 0, "bad explicit proc list" ); |
3901 | if (stride > 0) { |
3902 | KMP_ASSERT2(start <= end, "bad explicit proc list" ); |
3903 | } else { |
3904 | KMP_ASSERT2(start >= end, "bad explicit proc list" ); |
3905 | } |
3906 | KMP_ASSERT2((end - start) / stride <= 65536, "bad explicit proc list" ); |
3907 | |
3908 | // Add the mask for each OS proc # to the list. |
3909 | if (stride > 0) { |
3910 | do { |
3911 | ADD_MASK_OSID(start, osId2Mask, maxOsId); |
3912 | start += stride; |
3913 | } while (start <= end); |
3914 | } else { |
3915 | do { |
3916 | ADD_MASK_OSID(start, osId2Mask, maxOsId); |
3917 | start += stride; |
3918 | } while (start >= end); |
3919 | } |
3920 | |
3921 | // Skip optional comma. |
3922 | SKIP_WS(next); |
3923 | if (*next == ',') { |
3924 | next++; |
3925 | } |
3926 | scan = next; |
3927 | } |
3928 | |
3929 | *out_numMasks = nextNewMask; |
3930 | if (nextNewMask == 0) { |
3931 | *out_masks = NULL; |
3932 | KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks); |
3933 | return; |
3934 | } |
3935 | KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask); |
3936 | for (i = 0; i < nextNewMask; i++) { |
3937 | kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i); |
3938 | kmp_affin_mask_t *dest = KMP_CPU_INDEX((*out_masks), i); |
3939 | KMP_CPU_COPY(dest, src); |
3940 | } |
3941 | KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks); |
3942 | KMP_CPU_FREE(sumMask); |
3943 | } |
3944 | |
3945 | /*----------------------------------------------------------------------------- |
3946 | Re-parse the OMP_PLACES proc id list, forming the newMasks for the different |
3947 | places. Again, Here is the grammar: |
3948 | |
3949 | place_list := place |
3950 | place_list := place , place_list |
3951 | place := num |
3952 | place := place : num |
3953 | place := place : num : signed |
3954 | place := { subplacelist } |
3955 | place := ! place // (lowest priority) |
3956 | subplace_list := subplace |
3957 | subplace_list := subplace , subplace_list |
3958 | subplace := num |
3959 | subplace := num : num |
3960 | subplace := num : num : signed |
3961 | signed := num |
3962 | signed := + signed |
3963 | signed := - signed |
3964 | -----------------------------------------------------------------------------*/ |
3965 | static void __kmp_process_subplace_list(const char **scan, |
3966 | kmp_affinity_t &affinity, int maxOsId, |
3967 | kmp_affin_mask_t *tempMask, |
3968 | int *setSize) { |
3969 | const char *next; |
3970 | kmp_affin_mask_t *osId2Mask = affinity.os_id_masks; |
3971 | |
3972 | for (;;) { |
3973 | int start, count, stride, i; |
3974 | |
3975 | // Read in the starting proc id |
3976 | SKIP_WS(*scan); |
3977 | KMP_ASSERT2((**scan >= '0') && (**scan <= '9'), "bad explicit places list" ); |
3978 | next = *scan; |
3979 | SKIP_DIGITS(next); |
3980 | start = __kmp_str_to_int(str: *scan, sentinel: *next); |
3981 | KMP_ASSERT(start >= 0); |
3982 | *scan = next; |
3983 | |
3984 | // valid follow sets are ',' ':' and '}' |
3985 | SKIP_WS(*scan); |
3986 | if (**scan == '}' || **scan == ',') { |
3987 | if ((start > maxOsId) || |
3988 | (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) { |
3989 | KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, start); |
3990 | } else { |
3991 | KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start)); |
3992 | (*setSize)++; |
3993 | } |
3994 | if (**scan == '}') { |
3995 | break; |
3996 | } |
3997 | (*scan)++; // skip ',' |
3998 | continue; |
3999 | } |
4000 | KMP_ASSERT2(**scan == ':', "bad explicit places list" ); |
4001 | (*scan)++; // skip ':' |
4002 | |
4003 | // Read count parameter |
4004 | SKIP_WS(*scan); |
4005 | KMP_ASSERT2((**scan >= '0') && (**scan <= '9'), "bad explicit places list" ); |
4006 | next = *scan; |
4007 | SKIP_DIGITS(next); |
4008 | count = __kmp_str_to_int(str: *scan, sentinel: *next); |
4009 | KMP_ASSERT(count >= 0); |
4010 | *scan = next; |
4011 | |
4012 | // valid follow sets are ',' ':' and '}' |
4013 | SKIP_WS(*scan); |
4014 | if (**scan == '}' || **scan == ',') { |
4015 | for (i = 0; i < count; i++) { |
4016 | if ((start > maxOsId) || |
4017 | (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) { |
4018 | KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, start); |
4019 | break; // don't proliferate warnings for large count |
4020 | } else { |
4021 | KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start)); |
4022 | start++; |
4023 | (*setSize)++; |
4024 | } |
4025 | } |
4026 | if (**scan == '}') { |
4027 | break; |
4028 | } |
4029 | (*scan)++; // skip ',' |
4030 | continue; |
4031 | } |
4032 | KMP_ASSERT2(**scan == ':', "bad explicit places list" ); |
4033 | (*scan)++; // skip ':' |
4034 | |
4035 | // Read stride parameter |
4036 | int sign = +1; |
4037 | for (;;) { |
4038 | SKIP_WS(*scan); |
4039 | if (**scan == '+') { |
4040 | (*scan)++; // skip '+' |
4041 | continue; |
4042 | } |
4043 | if (**scan == '-') { |
4044 | sign *= -1; |
4045 | (*scan)++; // skip '-' |
4046 | continue; |
4047 | } |
4048 | break; |
4049 | } |
4050 | SKIP_WS(*scan); |
4051 | KMP_ASSERT2((**scan >= '0') && (**scan <= '9'), "bad explicit places list" ); |
4052 | next = *scan; |
4053 | SKIP_DIGITS(next); |
4054 | stride = __kmp_str_to_int(str: *scan, sentinel: *next); |
4055 | KMP_ASSERT(stride >= 0); |
4056 | *scan = next; |
4057 | stride *= sign; |
4058 | |
4059 | // valid follow sets are ',' and '}' |
4060 | SKIP_WS(*scan); |
4061 | if (**scan == '}' || **scan == ',') { |
4062 | for (i = 0; i < count; i++) { |
4063 | if ((start > maxOsId) || |
4064 | (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) { |
4065 | KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, start); |
4066 | break; // don't proliferate warnings for large count |
4067 | } else { |
4068 | KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start)); |
4069 | start += stride; |
4070 | (*setSize)++; |
4071 | } |
4072 | } |
4073 | if (**scan == '}') { |
4074 | break; |
4075 | } |
4076 | (*scan)++; // skip ',' |
4077 | continue; |
4078 | } |
4079 | |
4080 | KMP_ASSERT2(0, "bad explicit places list" ); |
4081 | } |
4082 | } |
4083 | |
4084 | static void __kmp_process_place(const char **scan, kmp_affinity_t &affinity, |
4085 | int maxOsId, kmp_affin_mask_t *tempMask, |
4086 | int *setSize) { |
4087 | const char *next; |
4088 | kmp_affin_mask_t *osId2Mask = affinity.os_id_masks; |
4089 | |
4090 | // valid follow sets are '{' '!' and num |
4091 | SKIP_WS(*scan); |
4092 | if (**scan == '{') { |
4093 | (*scan)++; // skip '{' |
4094 | __kmp_process_subplace_list(scan, affinity, maxOsId, tempMask, setSize); |
4095 | KMP_ASSERT2(**scan == '}', "bad explicit places list" ); |
4096 | (*scan)++; // skip '}' |
4097 | } else if (**scan == '!') { |
4098 | (*scan)++; // skip '!' |
4099 | __kmp_process_place(scan, affinity, maxOsId, tempMask, setSize); |
4100 | KMP_CPU_COMPLEMENT(maxOsId, tempMask); |
4101 | } else if ((**scan >= '0') && (**scan <= '9')) { |
4102 | next = *scan; |
4103 | SKIP_DIGITS(next); |
4104 | int num = __kmp_str_to_int(str: *scan, sentinel: *next); |
4105 | KMP_ASSERT(num >= 0); |
4106 | if ((num > maxOsId) || |
4107 | (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) { |
4108 | KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, num); |
4109 | } else { |
4110 | KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, num)); |
4111 | (*setSize)++; |
4112 | } |
4113 | *scan = next; // skip num |
4114 | } else { |
4115 | KMP_ASSERT2(0, "bad explicit places list" ); |
4116 | } |
4117 | } |
4118 | |
4119 | // static void |
4120 | void __kmp_affinity_process_placelist(kmp_affinity_t &affinity) { |
4121 | int i, j, count, stride, sign; |
4122 | kmp_affin_mask_t **out_masks = &affinity.masks; |
4123 | unsigned *out_numMasks = &affinity.num_masks; |
4124 | const char *placelist = affinity.proclist; |
4125 | kmp_affin_mask_t *osId2Mask = affinity.os_id_masks; |
4126 | int maxOsId = affinity.num_os_id_masks - 1; |
4127 | const char *scan = placelist; |
4128 | const char *next = placelist; |
4129 | |
4130 | numNewMasks = 2; |
4131 | KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks); |
4132 | nextNewMask = 0; |
4133 | |
4134 | // tempMask is modified based on the previous or initial |
4135 | // place to form the current place |
4136 | // previousMask contains the previous place |
4137 | kmp_affin_mask_t *tempMask; |
4138 | kmp_affin_mask_t *previousMask; |
4139 | KMP_CPU_ALLOC(tempMask); |
4140 | KMP_CPU_ZERO(tempMask); |
4141 | KMP_CPU_ALLOC(previousMask); |
4142 | KMP_CPU_ZERO(previousMask); |
4143 | int setSize = 0; |
4144 | |
4145 | for (;;) { |
4146 | __kmp_process_place(scan: &scan, affinity, maxOsId, tempMask, setSize: &setSize); |
4147 | |
4148 | // valid follow sets are ',' ':' and EOL |
4149 | SKIP_WS(scan); |
4150 | if (*scan == '\0' || *scan == ',') { |
4151 | if (setSize > 0) { |
4152 | ADD_MASK(tempMask); |
4153 | } |
4154 | KMP_CPU_ZERO(tempMask); |
4155 | setSize = 0; |
4156 | if (*scan == '\0') { |
4157 | break; |
4158 | } |
4159 | scan++; // skip ',' |
4160 | continue; |
4161 | } |
4162 | |
4163 | KMP_ASSERT2(*scan == ':', "bad explicit places list" ); |
4164 | scan++; // skip ':' |
4165 | |
4166 | // Read count parameter |
4167 | SKIP_WS(scan); |
4168 | KMP_ASSERT2((*scan >= '0') && (*scan <= '9'), "bad explicit places list" ); |
4169 | next = scan; |
4170 | SKIP_DIGITS(next); |
4171 | count = __kmp_str_to_int(str: scan, sentinel: *next); |
4172 | KMP_ASSERT(count >= 0); |
4173 | scan = next; |
4174 | |
4175 | // valid follow sets are ',' ':' and EOL |
4176 | SKIP_WS(scan); |
4177 | if (*scan == '\0' || *scan == ',') { |
4178 | stride = +1; |
4179 | } else { |
4180 | KMP_ASSERT2(*scan == ':', "bad explicit places list" ); |
4181 | scan++; // skip ':' |
4182 | |
4183 | // Read stride parameter |
4184 | sign = +1; |
4185 | for (;;) { |
4186 | SKIP_WS(scan); |
4187 | if (*scan == '+') { |
4188 | scan++; // skip '+' |
4189 | continue; |
4190 | } |
4191 | if (*scan == '-') { |
4192 | sign *= -1; |
4193 | scan++; // skip '-' |
4194 | continue; |
4195 | } |
4196 | break; |
4197 | } |
4198 | SKIP_WS(scan); |
4199 | KMP_ASSERT2((*scan >= '0') && (*scan <= '9'), "bad explicit places list" ); |
4200 | next = scan; |
4201 | SKIP_DIGITS(next); |
4202 | stride = __kmp_str_to_int(str: scan, sentinel: *next); |
4203 | KMP_DEBUG_ASSERT(stride >= 0); |
4204 | scan = next; |
4205 | stride *= sign; |
4206 | } |
4207 | |
4208 | // Add places determined by initial_place : count : stride |
4209 | for (i = 0; i < count; i++) { |
4210 | if (setSize == 0) { |
4211 | break; |
4212 | } |
4213 | // Add the current place, then build the next place (tempMask) from that |
4214 | KMP_CPU_COPY(previousMask, tempMask); |
4215 | ADD_MASK(previousMask); |
4216 | KMP_CPU_ZERO(tempMask); |
4217 | setSize = 0; |
4218 | KMP_CPU_SET_ITERATE(j, previousMask) { |
4219 | if (!KMP_CPU_ISSET(j, previousMask)) { |
4220 | continue; |
4221 | } |
4222 | if ((j + stride > maxOsId) || (j + stride < 0) || |
4223 | (!KMP_CPU_ISSET(j, __kmp_affin_fullMask)) || |
4224 | (!KMP_CPU_ISSET(j + stride, |
4225 | KMP_CPU_INDEX(osId2Mask, j + stride)))) { |
4226 | if (i < count - 1) { |
4227 | KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, j + stride); |
4228 | } |
4229 | continue; |
4230 | } |
4231 | KMP_CPU_SET(j + stride, tempMask); |
4232 | setSize++; |
4233 | } |
4234 | } |
4235 | KMP_CPU_ZERO(tempMask); |
4236 | setSize = 0; |
4237 | |
4238 | // valid follow sets are ',' and EOL |
4239 | SKIP_WS(scan); |
4240 | if (*scan == '\0') { |
4241 | break; |
4242 | } |
4243 | if (*scan == ',') { |
4244 | scan++; // skip ',' |
4245 | continue; |
4246 | } |
4247 | |
4248 | KMP_ASSERT2(0, "bad explicit places list" ); |
4249 | } |
4250 | |
4251 | *out_numMasks = nextNewMask; |
4252 | if (nextNewMask == 0) { |
4253 | *out_masks = NULL; |
4254 | KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks); |
4255 | return; |
4256 | } |
4257 | KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask); |
4258 | KMP_CPU_FREE(tempMask); |
4259 | KMP_CPU_FREE(previousMask); |
4260 | for (i = 0; i < nextNewMask; i++) { |
4261 | kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i); |
4262 | kmp_affin_mask_t *dest = KMP_CPU_INDEX((*out_masks), i); |
4263 | KMP_CPU_COPY(dest, src); |
4264 | } |
4265 | KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks); |
4266 | } |
4267 | |
4268 | #undef ADD_MASK |
4269 | #undef ADD_MASK_OSID |
4270 | |
4271 | // This function figures out the deepest level at which there is at least one |
4272 | // cluster/core with more than one processing unit bound to it. |
4273 | static int __kmp_affinity_find_core_level(int nprocs, int bottom_level) { |
4274 | int core_level = 0; |
4275 | |
4276 | for (int i = 0; i < nprocs; i++) { |
4277 | const kmp_hw_thread_t &hw_thread = __kmp_topology->at(index: i); |
4278 | for (int j = bottom_level; j > 0; j--) { |
4279 | if (hw_thread.ids[j] > 0) { |
4280 | if (core_level < (j - 1)) { |
4281 | core_level = j - 1; |
4282 | } |
4283 | } |
4284 | } |
4285 | } |
4286 | return core_level; |
4287 | } |
4288 | |
4289 | // This function counts number of clusters/cores at given level. |
4290 | static int __kmp_affinity_compute_ncores(int nprocs, int bottom_level, |
4291 | int core_level) { |
4292 | return __kmp_topology->get_count(level: core_level); |
4293 | } |
4294 | // This function finds to which cluster/core given processing unit is bound. |
4295 | static int __kmp_affinity_find_core(int proc, int bottom_level, |
4296 | int core_level) { |
4297 | int core = 0; |
4298 | KMP_DEBUG_ASSERT(proc >= 0 && proc < __kmp_topology->get_num_hw_threads()); |
4299 | for (int i = 0; i <= proc; ++i) { |
4300 | if (i + 1 <= proc) { |
4301 | for (int j = 0; j <= core_level; ++j) { |
4302 | if (__kmp_topology->at(index: i + 1).sub_ids[j] != |
4303 | __kmp_topology->at(index: i).sub_ids[j]) { |
4304 | core++; |
4305 | break; |
4306 | } |
4307 | } |
4308 | } |
4309 | } |
4310 | return core; |
4311 | } |
4312 | |
4313 | // This function finds maximal number of processing units bound to a |
4314 | // cluster/core at given level. |
4315 | static int __kmp_affinity_max_proc_per_core(int nprocs, int bottom_level, |
4316 | int core_level) { |
4317 | if (core_level >= bottom_level) |
4318 | return 1; |
4319 | int thread_level = __kmp_topology->get_level(type: KMP_HW_THREAD); |
4320 | return __kmp_topology->calculate_ratio(level1: thread_level, level2: core_level); |
4321 | } |
4322 | |
4323 | static int *procarr = NULL; |
4324 | static int __kmp_aff_depth = 0; |
4325 | static int *__kmp_osid_to_hwthread_map = NULL; |
4326 | |
4327 | static void __kmp_affinity_get_mask_topology_info(const kmp_affin_mask_t *mask, |
4328 | kmp_affinity_ids_t &ids, |
4329 | kmp_affinity_attrs_t &attrs) { |
4330 | if (!KMP_AFFINITY_CAPABLE()) |
4331 | return; |
4332 | |
4333 | // Initiailze ids and attrs thread data |
4334 | for (int i = 0; i < KMP_HW_LAST; ++i) |
4335 | ids.ids[i] = kmp_hw_thread_t::UNKNOWN_ID; |
4336 | attrs = KMP_AFFINITY_ATTRS_UNKNOWN; |
4337 | |
4338 | // Iterate through each os id within the mask and determine |
4339 | // the topology id and attribute information |
4340 | int cpu; |
4341 | int depth = __kmp_topology->get_depth(); |
4342 | KMP_CPU_SET_ITERATE(cpu, mask) { |
4343 | int osid_idx = __kmp_osid_to_hwthread_map[cpu]; |
4344 | ids.os_id = cpu; |
4345 | const kmp_hw_thread_t &hw_thread = __kmp_topology->at(index: osid_idx); |
4346 | for (int level = 0; level < depth; ++level) { |
4347 | kmp_hw_t type = __kmp_topology->get_type(level); |
4348 | int id = hw_thread.sub_ids[level]; |
4349 | if (ids.ids[type] == kmp_hw_thread_t::UNKNOWN_ID || ids.ids[type] == id) { |
4350 | ids.ids[type] = id; |
4351 | } else { |
4352 | // This mask spans across multiple topology units, set it as such |
4353 | // and mark every level below as such as well. |
4354 | ids.ids[type] = kmp_hw_thread_t::MULTIPLE_ID; |
4355 | for (; level < depth; ++level) { |
4356 | kmp_hw_t type = __kmp_topology->get_type(level); |
4357 | ids.ids[type] = kmp_hw_thread_t::MULTIPLE_ID; |
4358 | } |
4359 | } |
4360 | } |
4361 | if (!attrs.valid) { |
4362 | attrs.core_type = hw_thread.attrs.get_core_type(); |
4363 | attrs.core_eff = hw_thread.attrs.get_core_eff(); |
4364 | attrs.valid = 1; |
4365 | } else { |
4366 | // This mask spans across multiple attributes, set it as such |
4367 | if (attrs.core_type != hw_thread.attrs.get_core_type()) |
4368 | attrs.core_type = KMP_HW_CORE_TYPE_UNKNOWN; |
4369 | if (attrs.core_eff != hw_thread.attrs.get_core_eff()) |
4370 | attrs.core_eff = kmp_hw_attr_t::UNKNOWN_CORE_EFF; |
4371 | } |
4372 | } |
4373 | } |
4374 | |
4375 | static void __kmp_affinity_get_thread_topology_info(kmp_info_t *th) { |
4376 | if (!KMP_AFFINITY_CAPABLE()) |
4377 | return; |
4378 | const kmp_affin_mask_t *mask = th->th.th_affin_mask; |
4379 | kmp_affinity_ids_t &ids = th->th.th_topology_ids; |
4380 | kmp_affinity_attrs_t &attrs = th->th.th_topology_attrs; |
4381 | __kmp_affinity_get_mask_topology_info(mask, ids, attrs); |
4382 | } |
4383 | |
4384 | // Assign the topology information to each place in the place list |
4385 | // A thread can then grab not only its affinity mask, but the topology |
4386 | // information associated with that mask. e.g., Which socket is a thread on |
4387 | static void __kmp_affinity_get_topology_info(kmp_affinity_t &affinity) { |
4388 | if (!KMP_AFFINITY_CAPABLE()) |
4389 | return; |
4390 | if (affinity.type != affinity_none) { |
4391 | KMP_ASSERT(affinity.num_os_id_masks); |
4392 | KMP_ASSERT(affinity.os_id_masks); |
4393 | } |
4394 | KMP_ASSERT(affinity.num_masks); |
4395 | KMP_ASSERT(affinity.masks); |
4396 | KMP_ASSERT(__kmp_affin_fullMask); |
4397 | |
4398 | int max_cpu = __kmp_affin_fullMask->get_max_cpu(); |
4399 | int num_hw_threads = __kmp_topology->get_num_hw_threads(); |
4400 | |
4401 | // Allocate thread topology information |
4402 | if (!affinity.ids) { |
4403 | affinity.ids = (kmp_affinity_ids_t *)__kmp_allocate( |
4404 | sizeof(kmp_affinity_ids_t) * affinity.num_masks); |
4405 | } |
4406 | if (!affinity.attrs) { |
4407 | affinity.attrs = (kmp_affinity_attrs_t *)__kmp_allocate( |
4408 | sizeof(kmp_affinity_attrs_t) * affinity.num_masks); |
4409 | } |
4410 | if (!__kmp_osid_to_hwthread_map) { |
4411 | // Want the +1 because max_cpu should be valid index into map |
4412 | __kmp_osid_to_hwthread_map = |
4413 | (int *)__kmp_allocate(sizeof(int) * (max_cpu + 1)); |
4414 | } |
4415 | |
4416 | // Create the OS proc to hardware thread map |
4417 | for (int hw_thread = 0; hw_thread < num_hw_threads; ++hw_thread) { |
4418 | int os_id = __kmp_topology->at(index: hw_thread).os_id; |
4419 | if (KMP_CPU_ISSET(os_id, __kmp_affin_fullMask)) |
4420 | __kmp_osid_to_hwthread_map[os_id] = hw_thread; |
4421 | } |
4422 | |
4423 | for (unsigned i = 0; i < affinity.num_masks; ++i) { |
4424 | kmp_affinity_ids_t &ids = affinity.ids[i]; |
4425 | kmp_affinity_attrs_t &attrs = affinity.attrs[i]; |
4426 | kmp_affin_mask_t *mask = KMP_CPU_INDEX(affinity.masks, i); |
4427 | __kmp_affinity_get_mask_topology_info(mask, ids, attrs); |
4428 | } |
4429 | } |
4430 | |
4431 | // Called when __kmp_topology is ready |
4432 | static void __kmp_aux_affinity_initialize_other_data(kmp_affinity_t &affinity) { |
4433 | // Initialize other data structures which depend on the topology |
4434 | if (__kmp_topology && __kmp_topology->get_num_hw_threads()) { |
4435 | machine_hierarchy.init(num_addrs: __kmp_topology->get_num_hw_threads()); |
4436 | __kmp_affinity_get_topology_info(affinity); |
4437 | #if KMP_WEIGHTED_ITERATIONS_SUPPORTED |
4438 | __kmp_first_osid_with_ecore = __kmp_get_first_osid_with_ecore(); |
4439 | #endif |
4440 | } |
4441 | } |
4442 | |
4443 | // Create a one element mask array (set of places) which only contains the |
4444 | // initial process's affinity mask |
4445 | static void __kmp_create_affinity_none_places(kmp_affinity_t &affinity) { |
4446 | KMP_ASSERT(__kmp_affin_fullMask != NULL); |
4447 | KMP_ASSERT(affinity.type == affinity_none); |
4448 | KMP_ASSERT(__kmp_avail_proc == __kmp_topology->get_num_hw_threads()); |
4449 | affinity.num_masks = 1; |
4450 | KMP_CPU_ALLOC_ARRAY(affinity.masks, affinity.num_masks); |
4451 | kmp_affin_mask_t *dest = KMP_CPU_INDEX(affinity.masks, 0); |
4452 | KMP_CPU_COPY(dest, __kmp_affin_fullMask); |
4453 | __kmp_aux_affinity_initialize_other_data(affinity); |
4454 | } |
4455 | |
4456 | static void __kmp_aux_affinity_initialize_masks(kmp_affinity_t &affinity) { |
4457 | // Create the "full" mask - this defines all of the processors that we |
4458 | // consider to be in the machine model. If respect is set, then it is the |
4459 | // initialization thread's affinity mask. Otherwise, it is all processors that |
4460 | // we know about on the machine. |
4461 | int verbose = affinity.flags.verbose; |
4462 | const char *env_var = affinity.env_var; |
4463 | |
4464 | // Already initialized |
4465 | if (__kmp_affin_fullMask && __kmp_affin_origMask) |
4466 | return; |
4467 | |
4468 | if (__kmp_affin_fullMask == NULL) { |
4469 | KMP_CPU_ALLOC(__kmp_affin_fullMask); |
4470 | } |
4471 | if (__kmp_affin_origMask == NULL) { |
4472 | KMP_CPU_ALLOC(__kmp_affin_origMask); |
4473 | } |
4474 | if (KMP_AFFINITY_CAPABLE()) { |
4475 | __kmp_get_system_affinity(__kmp_affin_fullMask, TRUE); |
4476 | // Make a copy before possible expanding to the entire machine mask |
4477 | __kmp_affin_origMask->copy(src: __kmp_affin_fullMask); |
4478 | if (affinity.flags.respect) { |
4479 | // Count the number of available processors. |
4480 | unsigned i; |
4481 | __kmp_avail_proc = 0; |
4482 | KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) { |
4483 | if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) { |
4484 | continue; |
4485 | } |
4486 | __kmp_avail_proc++; |
4487 | } |
4488 | if (__kmp_avail_proc > __kmp_xproc) { |
4489 | KMP_AFF_WARNING(affinity, ErrorInitializeAffinity); |
4490 | affinity.type = affinity_none; |
4491 | KMP_AFFINITY_DISABLE(); |
4492 | return; |
4493 | } |
4494 | |
4495 | if (verbose) { |
4496 | char buf[KMP_AFFIN_MASK_PRINT_LEN]; |
4497 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, |
4498 | mask: __kmp_affin_fullMask); |
4499 | KMP_INFORM(InitOSProcSetRespect, env_var, buf); |
4500 | } |
4501 | } else { |
4502 | if (verbose) { |
4503 | char buf[KMP_AFFIN_MASK_PRINT_LEN]; |
4504 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, |
4505 | mask: __kmp_affin_fullMask); |
4506 | KMP_INFORM(InitOSProcSetNotRespect, env_var, buf); |
4507 | } |
4508 | __kmp_avail_proc = |
4509 | __kmp_affinity_entire_machine_mask(mask: __kmp_affin_fullMask); |
4510 | #if KMP_OS_WINDOWS |
4511 | if (__kmp_num_proc_groups <= 1) { |
4512 | // Copy expanded full mask if topology has single processor group |
4513 | __kmp_affin_origMask->copy(__kmp_affin_fullMask); |
4514 | } |
4515 | // Set the process affinity mask since threads' affinity |
4516 | // masks must be subset of process mask in Windows* OS |
4517 | __kmp_affin_fullMask->set_process_affinity(true); |
4518 | #endif |
4519 | } |
4520 | } |
4521 | } |
4522 | |
4523 | static bool __kmp_aux_affinity_initialize_topology(kmp_affinity_t &affinity) { |
4524 | bool success = false; |
4525 | const char *env_var = affinity.env_var; |
4526 | kmp_i18n_id_t msg_id = kmp_i18n_null; |
4527 | int verbose = affinity.flags.verbose; |
4528 | |
4529 | // For backward compatibility, setting KMP_CPUINFO_FILE => |
4530 | // KMP_TOPOLOGY_METHOD=cpuinfo |
4531 | if ((__kmp_cpuinfo_file != NULL) && |
4532 | (__kmp_affinity_top_method == affinity_top_method_all)) { |
4533 | __kmp_affinity_top_method = affinity_top_method_cpuinfo; |
4534 | } |
4535 | |
4536 | if (__kmp_affinity_top_method == affinity_top_method_all) { |
4537 | // In the default code path, errors are not fatal - we just try using |
4538 | // another method. We only emit a warning message if affinity is on, or the |
4539 | // verbose flag is set, an the nowarnings flag was not set. |
4540 | #if KMP_USE_HWLOC |
4541 | if (!success && |
4542 | __kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC) { |
4543 | if (!__kmp_hwloc_error) { |
4544 | success = __kmp_affinity_create_hwloc_map(&msg_id); |
4545 | if (!success && verbose) { |
4546 | KMP_INFORM(AffIgnoringHwloc, env_var); |
4547 | } |
4548 | } else if (verbose) { |
4549 | KMP_INFORM(AffIgnoringHwloc, env_var); |
4550 | } |
4551 | } |
4552 | #endif |
4553 | |
4554 | #if KMP_ARCH_X86 || KMP_ARCH_X86_64 |
4555 | if (!success) { |
4556 | success = __kmp_affinity_create_x2apicid_map(&msg_id); |
4557 | if (!success && verbose && msg_id != kmp_i18n_null) { |
4558 | KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id)); |
4559 | } |
4560 | } |
4561 | if (!success) { |
4562 | success = __kmp_affinity_create_apicid_map(&msg_id); |
4563 | if (!success && verbose && msg_id != kmp_i18n_null) { |
4564 | KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id)); |
4565 | } |
4566 | } |
4567 | #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ |
4568 | |
4569 | #if KMP_OS_LINUX || KMP_OS_AIX |
4570 | if (!success) { |
4571 | int line = 0; |
4572 | success = __kmp_affinity_create_cpuinfo_map(&line, &msg_id); |
4573 | if (!success && verbose && msg_id != kmp_i18n_null) { |
4574 | KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id)); |
4575 | } |
4576 | } |
4577 | #endif /* KMP_OS_LINUX */ |
4578 | |
4579 | #if KMP_GROUP_AFFINITY |
4580 | if (!success && (__kmp_num_proc_groups > 1)) { |
4581 | success = __kmp_affinity_create_proc_group_map(&msg_id); |
4582 | if (!success && verbose && msg_id != kmp_i18n_null) { |
4583 | KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id)); |
4584 | } |
4585 | } |
4586 | #endif /* KMP_GROUP_AFFINITY */ |
4587 | |
4588 | if (!success) { |
4589 | success = __kmp_affinity_create_flat_map(&msg_id); |
4590 | if (!success && verbose && msg_id != kmp_i18n_null) { |
4591 | KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id)); |
4592 | } |
4593 | KMP_ASSERT(success); |
4594 | } |
4595 | } |
4596 | |
4597 | // If the user has specified that a paricular topology discovery method is to be |
4598 | // used, then we abort if that method fails. The exception is group affinity, |
4599 | // which might have been implicitly set. |
4600 | #if KMP_USE_HWLOC |
4601 | else if (__kmp_affinity_top_method == affinity_top_method_hwloc) { |
4602 | KMP_ASSERT(__kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC); |
4603 | success = __kmp_affinity_create_hwloc_map(&msg_id); |
4604 | if (!success) { |
4605 | KMP_ASSERT(msg_id != kmp_i18n_null); |
4606 | KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id)); |
4607 | } |
4608 | } |
4609 | #endif // KMP_USE_HWLOC |
4610 | |
4611 | #if KMP_ARCH_X86 || KMP_ARCH_X86_64 |
4612 | else if (__kmp_affinity_top_method == affinity_top_method_x2apicid || |
4613 | __kmp_affinity_top_method == affinity_top_method_x2apicid_1f) { |
4614 | success = __kmp_affinity_create_x2apicid_map(&msg_id); |
4615 | if (!success) { |
4616 | KMP_ASSERT(msg_id != kmp_i18n_null); |
4617 | KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id)); |
4618 | } |
4619 | } else if (__kmp_affinity_top_method == affinity_top_method_apicid) { |
4620 | success = __kmp_affinity_create_apicid_map(&msg_id); |
4621 | if (!success) { |
4622 | KMP_ASSERT(msg_id != kmp_i18n_null); |
4623 | KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id)); |
4624 | } |
4625 | } |
4626 | #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ |
4627 | |
4628 | else if (__kmp_affinity_top_method == affinity_top_method_cpuinfo) { |
4629 | int line = 0; |
4630 | success = __kmp_affinity_create_cpuinfo_map(&line, &msg_id); |
4631 | if (!success) { |
4632 | KMP_ASSERT(msg_id != kmp_i18n_null); |
4633 | const char *filename = __kmp_cpuinfo_get_filename(); |
4634 | if (line > 0) { |
4635 | KMP_FATAL(FileLineMsgExiting, filename, line, |
4636 | __kmp_i18n_catgets(msg_id)); |
4637 | } else { |
4638 | KMP_FATAL(FileMsgExiting, filename, __kmp_i18n_catgets(msg_id)); |
4639 | } |
4640 | } |
4641 | } |
4642 | |
4643 | #if KMP_GROUP_AFFINITY |
4644 | else if (__kmp_affinity_top_method == affinity_top_method_group) { |
4645 | success = __kmp_affinity_create_proc_group_map(&msg_id); |
4646 | KMP_ASSERT(success); |
4647 | if (!success) { |
4648 | KMP_ASSERT(msg_id != kmp_i18n_null); |
4649 | KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id)); |
4650 | } |
4651 | } |
4652 | #endif /* KMP_GROUP_AFFINITY */ |
4653 | |
4654 | else if (__kmp_affinity_top_method == affinity_top_method_flat) { |
4655 | success = __kmp_affinity_create_flat_map(&msg_id); |
4656 | // should not fail |
4657 | KMP_ASSERT(success); |
4658 | } |
4659 | |
4660 | // Early exit if topology could not be created |
4661 | if (!__kmp_topology) { |
4662 | if (KMP_AFFINITY_CAPABLE()) { |
4663 | KMP_AFF_WARNING(affinity, ErrorInitializeAffinity); |
4664 | } |
4665 | if (nPackages > 0 && nCoresPerPkg > 0 && __kmp_nThreadsPerCore > 0 && |
4666 | __kmp_ncores > 0) { |
4667 | __kmp_topology = kmp_topology_t::allocate(nproc: 0, ndepth: 0, NULL); |
4668 | __kmp_topology->canonicalize(npackages: nPackages, ncores_per_pkg: nCoresPerPkg, |
4669 | nthreads_per_core: __kmp_nThreadsPerCore, ncores: __kmp_ncores); |
4670 | if (verbose) { |
4671 | __kmp_topology->print(env_var); |
4672 | } |
4673 | } |
4674 | return false; |
4675 | } |
4676 | |
4677 | // Canonicalize, print (if requested), apply KMP_HW_SUBSET |
4678 | __kmp_topology->canonicalize(); |
4679 | if (verbose) |
4680 | __kmp_topology->print(env_var); |
4681 | bool filtered = __kmp_topology->filter_hw_subset(); |
4682 | if (filtered && verbose) |
4683 | __kmp_topology->print(env_var: "KMP_HW_SUBSET" ); |
4684 | return success; |
4685 | } |
4686 | |
4687 | static void __kmp_aux_affinity_initialize(kmp_affinity_t &affinity) { |
4688 | bool is_regular_affinity = (&affinity == &__kmp_affinity); |
4689 | bool is_hidden_helper_affinity = (&affinity == &__kmp_hh_affinity); |
4690 | const char *env_var = __kmp_get_affinity_env_var(affinity); |
4691 | |
4692 | if (affinity.flags.initialized) { |
4693 | KMP_ASSERT(__kmp_affin_fullMask != NULL); |
4694 | return; |
4695 | } |
4696 | |
4697 | if (is_regular_affinity && (!__kmp_affin_fullMask || !__kmp_affin_origMask)) |
4698 | __kmp_aux_affinity_initialize_masks(affinity); |
4699 | |
4700 | if (is_regular_affinity && !__kmp_topology) { |
4701 | bool success = __kmp_aux_affinity_initialize_topology(affinity); |
4702 | if (success) { |
4703 | KMP_ASSERT(__kmp_avail_proc == __kmp_topology->get_num_hw_threads()); |
4704 | } else { |
4705 | affinity.type = affinity_none; |
4706 | KMP_AFFINITY_DISABLE(); |
4707 | } |
4708 | } |
4709 | |
4710 | // If KMP_AFFINITY=none, then only create the single "none" place |
4711 | // which is the process's initial affinity mask or the number of |
4712 | // hardware threads depending on respect,norespect |
4713 | if (affinity.type == affinity_none) { |
4714 | __kmp_create_affinity_none_places(affinity); |
4715 | #if KMP_USE_HIER_SCHED |
4716 | __kmp_dispatch_set_hierarchy_values(); |
4717 | #endif |
4718 | affinity.flags.initialized = TRUE; |
4719 | return; |
4720 | } |
4721 | |
4722 | __kmp_topology->set_granularity(affinity); |
4723 | int depth = __kmp_topology->get_depth(); |
4724 | |
4725 | // Create the table of masks, indexed by thread Id. |
4726 | unsigned numUnique; |
4727 | int numAddrs = __kmp_topology->get_num_hw_threads(); |
4728 | // If OMP_PLACES=cores:<attribute> specified, then attempt |
4729 | // to make OS Id mask table using those attributes |
4730 | if (affinity.core_attr_gran.valid) { |
4731 | __kmp_create_os_id_masks(numUnique: &numUnique, affinity, find_next: [&](int idx) { |
4732 | KMP_ASSERT(idx >= -1); |
4733 | for (int i = idx + 1; i < numAddrs; ++i) |
4734 | if (__kmp_topology->at(index: i).attrs.contains(attr: affinity.core_attr_gran)) |
4735 | return i; |
4736 | return numAddrs; |
4737 | }); |
4738 | if (!affinity.os_id_masks) { |
4739 | const char *core_attribute; |
4740 | if (affinity.core_attr_gran.core_eff != kmp_hw_attr_t::UNKNOWN_CORE_EFF) |
4741 | core_attribute = "core_efficiency" ; |
4742 | else |
4743 | core_attribute = "core_type" ; |
4744 | KMP_AFF_WARNING(affinity, AffIgnoringNotAvailable, env_var, |
4745 | core_attribute, |
4746 | __kmp_hw_get_catalog_string(KMP_HW_CORE, /*plural=*/true)) |
4747 | } |
4748 | } |
4749 | // If core attributes did not work, or none were specified, |
4750 | // then make OS Id mask table using typical incremental way. |
4751 | if (!affinity.os_id_masks) { |
4752 | __kmp_create_os_id_masks(numUnique: &numUnique, affinity, find_next: [](int idx) { |
4753 | KMP_ASSERT(idx >= -1); |
4754 | return idx + 1; |
4755 | }); |
4756 | } |
4757 | if (affinity.gran_levels == 0) { |
4758 | KMP_DEBUG_ASSERT((int)numUnique == __kmp_avail_proc); |
4759 | } |
4760 | |
4761 | switch (affinity.type) { |
4762 | |
4763 | case affinity_explicit: |
4764 | KMP_DEBUG_ASSERT(affinity.proclist != NULL); |
4765 | if (is_hidden_helper_affinity || |
4766 | __kmp_nested_proc_bind.bind_types[0] == proc_bind_intel) { |
4767 | __kmp_affinity_process_proclist(affinity); |
4768 | } else { |
4769 | __kmp_affinity_process_placelist(affinity); |
4770 | } |
4771 | if (affinity.num_masks == 0) { |
4772 | KMP_AFF_WARNING(affinity, AffNoValidProcID); |
4773 | affinity.type = affinity_none; |
4774 | __kmp_create_affinity_none_places(affinity); |
4775 | affinity.flags.initialized = TRUE; |
4776 | return; |
4777 | } |
4778 | break; |
4779 | |
4780 | // The other affinity types rely on sorting the hardware threads according to |
4781 | // some permutation of the machine topology tree. Set affinity.compact |
4782 | // and affinity.offset appropriately, then jump to a common code |
4783 | // fragment to do the sort and create the array of affinity masks. |
4784 | case affinity_logical: |
4785 | affinity.compact = 0; |
4786 | if (affinity.offset) { |
4787 | affinity.offset = |
4788 | __kmp_nThreadsPerCore * affinity.offset % __kmp_avail_proc; |
4789 | } |
4790 | goto sortTopology; |
4791 | |
4792 | case affinity_physical: |
4793 | if (__kmp_nThreadsPerCore > 1) { |
4794 | affinity.compact = 1; |
4795 | if (affinity.compact >= depth) { |
4796 | affinity.compact = 0; |
4797 | } |
4798 | } else { |
4799 | affinity.compact = 0; |
4800 | } |
4801 | if (affinity.offset) { |
4802 | affinity.offset = |
4803 | __kmp_nThreadsPerCore * affinity.offset % __kmp_avail_proc; |
4804 | } |
4805 | goto sortTopology; |
4806 | |
4807 | case affinity_scatter: |
4808 | if (affinity.compact >= depth) { |
4809 | affinity.compact = 0; |
4810 | } else { |
4811 | affinity.compact = depth - 1 - affinity.compact; |
4812 | } |
4813 | goto sortTopology; |
4814 | |
4815 | case affinity_compact: |
4816 | if (affinity.compact >= depth) { |
4817 | affinity.compact = depth - 1; |
4818 | } |
4819 | goto sortTopology; |
4820 | |
4821 | case affinity_balanced: |
4822 | if (depth <= 1 || is_hidden_helper_affinity) { |
4823 | KMP_AFF_WARNING(affinity, AffBalancedNotAvail, env_var); |
4824 | affinity.type = affinity_none; |
4825 | __kmp_create_affinity_none_places(affinity); |
4826 | affinity.flags.initialized = TRUE; |
4827 | return; |
4828 | } else if (!__kmp_topology->is_uniform()) { |
4829 | // Save the depth for further usage |
4830 | __kmp_aff_depth = depth; |
4831 | |
4832 | int core_level = |
4833 | __kmp_affinity_find_core_level(nprocs: __kmp_avail_proc, bottom_level: depth - 1); |
4834 | int ncores = __kmp_affinity_compute_ncores(nprocs: __kmp_avail_proc, bottom_level: depth - 1, |
4835 | core_level); |
4836 | int maxprocpercore = __kmp_affinity_max_proc_per_core( |
4837 | nprocs: __kmp_avail_proc, bottom_level: depth - 1, core_level); |
4838 | |
4839 | int nproc = ncores * maxprocpercore; |
4840 | if ((nproc < 2) || (nproc < __kmp_avail_proc)) { |
4841 | KMP_AFF_WARNING(affinity, AffBalancedNotAvail, env_var); |
4842 | affinity.type = affinity_none; |
4843 | __kmp_create_affinity_none_places(affinity); |
4844 | affinity.flags.initialized = TRUE; |
4845 | return; |
4846 | } |
4847 | |
4848 | procarr = (int *)__kmp_allocate(sizeof(int) * nproc); |
4849 | for (int i = 0; i < nproc; i++) { |
4850 | procarr[i] = -1; |
4851 | } |
4852 | |
4853 | int lastcore = -1; |
4854 | int inlastcore = 0; |
4855 | for (int i = 0; i < __kmp_avail_proc; i++) { |
4856 | int proc = __kmp_topology->at(index: i).os_id; |
4857 | int core = __kmp_affinity_find_core(proc: i, bottom_level: depth - 1, core_level); |
4858 | |
4859 | if (core == lastcore) { |
4860 | inlastcore++; |
4861 | } else { |
4862 | inlastcore = 0; |
4863 | } |
4864 | lastcore = core; |
4865 | |
4866 | procarr[core * maxprocpercore + inlastcore] = proc; |
4867 | } |
4868 | } |
4869 | if (affinity.compact >= depth) { |
4870 | affinity.compact = depth - 1; |
4871 | } |
4872 | |
4873 | sortTopology: |
4874 | // Allocate the gtid->affinity mask table. |
4875 | if (affinity.flags.dups) { |
4876 | affinity.num_masks = __kmp_avail_proc; |
4877 | } else { |
4878 | affinity.num_masks = numUnique; |
4879 | } |
4880 | |
4881 | if ((__kmp_nested_proc_bind.bind_types[0] != proc_bind_intel) && |
4882 | (__kmp_affinity_num_places > 0) && |
4883 | ((unsigned)__kmp_affinity_num_places < affinity.num_masks) && |
4884 | !is_hidden_helper_affinity) { |
4885 | affinity.num_masks = __kmp_affinity_num_places; |
4886 | } |
4887 | |
4888 | KMP_CPU_ALLOC_ARRAY(affinity.masks, affinity.num_masks); |
4889 | |
4890 | // Sort the topology table according to the current setting of |
4891 | // affinity.compact, then fill out affinity.masks. |
4892 | __kmp_topology->sort_compact(affinity); |
4893 | { |
4894 | int i; |
4895 | unsigned j; |
4896 | int num_hw_threads = __kmp_topology->get_num_hw_threads(); |
4897 | kmp_full_mask_modifier_t full_mask; |
4898 | for (i = 0, j = 0; i < num_hw_threads; i++) { |
4899 | if ((!affinity.flags.dups) && (!__kmp_topology->at(index: i).leader)) { |
4900 | continue; |
4901 | } |
4902 | int osId = __kmp_topology->at(index: i).os_id; |
4903 | |
4904 | kmp_affin_mask_t *src = KMP_CPU_INDEX(affinity.os_id_masks, osId); |
4905 | kmp_affin_mask_t *dest = KMP_CPU_INDEX(affinity.masks, j); |
4906 | KMP_ASSERT(KMP_CPU_ISSET(osId, src)); |
4907 | KMP_CPU_COPY(dest, src); |
4908 | full_mask.include(other: src); |
4909 | if (++j >= affinity.num_masks) { |
4910 | break; |
4911 | } |
4912 | } |
4913 | KMP_DEBUG_ASSERT(j == affinity.num_masks); |
4914 | // See if the places list further restricts or changes the full mask |
4915 | if (full_mask.restrict_to_mask() && affinity.flags.verbose) { |
4916 | __kmp_topology->print(env_var); |
4917 | } |
4918 | } |
4919 | // Sort the topology back using ids |
4920 | __kmp_topology->sort_ids(); |
4921 | break; |
4922 | |
4923 | default: |
4924 | KMP_ASSERT2(0, "Unexpected affinity setting" ); |
4925 | } |
4926 | __kmp_aux_affinity_initialize_other_data(affinity); |
4927 | affinity.flags.initialized = TRUE; |
4928 | } |
4929 | |
4930 | void __kmp_affinity_initialize(kmp_affinity_t &affinity) { |
4931 | // Much of the code above was written assuming that if a machine was not |
4932 | // affinity capable, then affinity type == affinity_none. |
4933 | // We now explicitly represent this as affinity type == affinity_disabled. |
4934 | // There are too many checks for affinity type == affinity_none in this code. |
4935 | // Instead of trying to change them all, check if |
4936 | // affinity type == affinity_disabled, and if so, slam it with affinity_none, |
4937 | // call the real initialization routine, then restore affinity type to |
4938 | // affinity_disabled. |
4939 | int disabled = (affinity.type == affinity_disabled); |
4940 | if (!KMP_AFFINITY_CAPABLE()) |
4941 | KMP_ASSERT(disabled); |
4942 | if (disabled) |
4943 | affinity.type = affinity_none; |
4944 | __kmp_aux_affinity_initialize(affinity); |
4945 | if (disabled) |
4946 | affinity.type = affinity_disabled; |
4947 | } |
4948 | |
4949 | void __kmp_affinity_uninitialize(void) { |
4950 | for (kmp_affinity_t *affinity : __kmp_affinities) { |
4951 | if (affinity->masks != NULL) |
4952 | KMP_CPU_FREE_ARRAY(affinity->masks, affinity->num_masks); |
4953 | if (affinity->os_id_masks != NULL) |
4954 | KMP_CPU_FREE_ARRAY(affinity->os_id_masks, affinity->num_os_id_masks); |
4955 | if (affinity->proclist != NULL) |
4956 | __kmp_free(affinity->proclist); |
4957 | if (affinity->ids != NULL) |
4958 | __kmp_free(affinity->ids); |
4959 | if (affinity->attrs != NULL) |
4960 | __kmp_free(affinity->attrs); |
4961 | *affinity = KMP_AFFINITY_INIT(affinity->env_var); |
4962 | } |
4963 | if (__kmp_affin_origMask != NULL) { |
4964 | if (KMP_AFFINITY_CAPABLE()) { |
4965 | #if KMP_OS_AIX |
4966 | // Uninitialize by unbinding the thread. |
4967 | bindprocessor(BINDTHREAD, thread_self(), PROCESSOR_CLASS_ANY); |
4968 | #else |
4969 | __kmp_set_system_affinity(__kmp_affin_origMask, FALSE); |
4970 | #endif |
4971 | } |
4972 | KMP_CPU_FREE(__kmp_affin_origMask); |
4973 | __kmp_affin_origMask = NULL; |
4974 | } |
4975 | __kmp_affinity_num_places = 0; |
4976 | if (procarr != NULL) { |
4977 | __kmp_free(procarr); |
4978 | procarr = NULL; |
4979 | } |
4980 | if (__kmp_osid_to_hwthread_map) { |
4981 | __kmp_free(__kmp_osid_to_hwthread_map); |
4982 | __kmp_osid_to_hwthread_map = NULL; |
4983 | } |
4984 | #if KMP_USE_HWLOC |
4985 | if (__kmp_hwloc_topology != NULL) { |
4986 | hwloc_topology_destroy(__kmp_hwloc_topology); |
4987 | __kmp_hwloc_topology = NULL; |
4988 | } |
4989 | #endif |
4990 | if (__kmp_hw_subset) { |
4991 | kmp_hw_subset_t::deallocate(subset: __kmp_hw_subset); |
4992 | __kmp_hw_subset = nullptr; |
4993 | } |
4994 | if (__kmp_topology) { |
4995 | kmp_topology_t::deallocate(topology: __kmp_topology); |
4996 | __kmp_topology = nullptr; |
4997 | } |
4998 | KMPAffinity::destroy_api(); |
4999 | } |
5000 | |
5001 | static void __kmp_select_mask_by_gtid(int gtid, const kmp_affinity_t *affinity, |
5002 | int *place, kmp_affin_mask_t **mask) { |
5003 | int mask_idx; |
5004 | bool is_hidden_helper = KMP_HIDDEN_HELPER_THREAD(gtid); |
5005 | if (is_hidden_helper) |
5006 | // The first gtid is the regular primary thread, the second gtid is the main |
5007 | // thread of hidden team which does not participate in task execution. |
5008 | mask_idx = gtid - 2; |
5009 | else |
5010 | mask_idx = __kmp_adjust_gtid_for_hidden_helpers(gtid); |
5011 | KMP_DEBUG_ASSERT(affinity->num_masks > 0); |
5012 | *place = (mask_idx + affinity->offset) % affinity->num_masks; |
5013 | *mask = KMP_CPU_INDEX(affinity->masks, *place); |
5014 | } |
5015 | |
5016 | // This function initializes the per-thread data concerning affinity including |
5017 | // the mask and topology information |
5018 | void __kmp_affinity_set_init_mask(int gtid, int isa_root) { |
5019 | |
5020 | kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]); |
5021 | |
5022 | // Set the thread topology information to default of unknown |
5023 | for (int id = 0; id < KMP_HW_LAST; ++id) |
5024 | th->th.th_topology_ids.ids[id] = kmp_hw_thread_t::UNKNOWN_ID; |
5025 | th->th.th_topology_attrs = KMP_AFFINITY_ATTRS_UNKNOWN; |
5026 | |
5027 | if (!KMP_AFFINITY_CAPABLE()) { |
5028 | return; |
5029 | } |
5030 | |
5031 | if (th->th.th_affin_mask == NULL) { |
5032 | KMP_CPU_ALLOC(th->th.th_affin_mask); |
5033 | } else { |
5034 | KMP_CPU_ZERO(th->th.th_affin_mask); |
5035 | } |
5036 | |
5037 | // Copy the thread mask to the kmp_info_t structure. If |
5038 | // __kmp_affinity.type == affinity_none, copy the "full" mask, i.e. |
5039 | // one that has all of the OS proc ids set, or if |
5040 | // __kmp_affinity.flags.respect is set, then the full mask is the |
5041 | // same as the mask of the initialization thread. |
5042 | kmp_affin_mask_t *mask; |
5043 | int i; |
5044 | const kmp_affinity_t *affinity; |
5045 | bool is_hidden_helper = KMP_HIDDEN_HELPER_THREAD(gtid); |
5046 | |
5047 | if (is_hidden_helper) |
5048 | affinity = &__kmp_hh_affinity; |
5049 | else |
5050 | affinity = &__kmp_affinity; |
5051 | |
5052 | if (KMP_AFFINITY_NON_PROC_BIND || is_hidden_helper) { |
5053 | if ((affinity->type == affinity_none) || |
5054 | (affinity->type == affinity_balanced) || |
5055 | KMP_HIDDEN_HELPER_MAIN_THREAD(gtid)) { |
5056 | #if KMP_GROUP_AFFINITY |
5057 | if (__kmp_num_proc_groups > 1) { |
5058 | return; |
5059 | } |
5060 | #endif |
5061 | KMP_ASSERT(__kmp_affin_fullMask != NULL); |
5062 | i = 0; |
5063 | mask = __kmp_affin_fullMask; |
5064 | } else { |
5065 | __kmp_select_mask_by_gtid(gtid, affinity, place: &i, mask: &mask); |
5066 | } |
5067 | } else { |
5068 | if (!isa_root || __kmp_nested_proc_bind.bind_types[0] == proc_bind_false) { |
5069 | #if KMP_GROUP_AFFINITY |
5070 | if (__kmp_num_proc_groups > 1) { |
5071 | return; |
5072 | } |
5073 | #endif |
5074 | KMP_ASSERT(__kmp_affin_fullMask != NULL); |
5075 | i = KMP_PLACE_ALL; |
5076 | mask = __kmp_affin_fullMask; |
5077 | } else { |
5078 | __kmp_select_mask_by_gtid(gtid, affinity, place: &i, mask: &mask); |
5079 | } |
5080 | } |
5081 | |
5082 | th->th.th_current_place = i; |
5083 | if (isa_root && !is_hidden_helper) { |
5084 | th->th.th_new_place = i; |
5085 | th->th.th_first_place = 0; |
5086 | th->th.th_last_place = affinity->num_masks - 1; |
5087 | } else if (KMP_AFFINITY_NON_PROC_BIND) { |
5088 | // When using a Non-OMP_PROC_BIND affinity method, |
5089 | // set all threads' place-partition-var to the entire place list |
5090 | th->th.th_first_place = 0; |
5091 | th->th.th_last_place = affinity->num_masks - 1; |
5092 | } |
5093 | // Copy topology information associated with the place |
5094 | if (i >= 0) { |
5095 | th->th.th_topology_ids = __kmp_affinity.ids[i]; |
5096 | th->th.th_topology_attrs = __kmp_affinity.attrs[i]; |
5097 | } |
5098 | |
5099 | if (i == KMP_PLACE_ALL) { |
5100 | KA_TRACE(100, ("__kmp_affinity_set_init_mask: setting T#%d to all places\n" , |
5101 | gtid)); |
5102 | } else { |
5103 | KA_TRACE(100, ("__kmp_affinity_set_init_mask: setting T#%d to place %d\n" , |
5104 | gtid, i)); |
5105 | } |
5106 | |
5107 | KMP_CPU_COPY(th->th.th_affin_mask, mask); |
5108 | } |
5109 | |
5110 | void __kmp_affinity_bind_init_mask(int gtid) { |
5111 | if (!KMP_AFFINITY_CAPABLE()) { |
5112 | return; |
5113 | } |
5114 | kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]); |
5115 | const kmp_affinity_t *affinity; |
5116 | const char *env_var; |
5117 | bool is_hidden_helper = KMP_HIDDEN_HELPER_THREAD(gtid); |
5118 | |
5119 | if (is_hidden_helper) |
5120 | affinity = &__kmp_hh_affinity; |
5121 | else |
5122 | affinity = &__kmp_affinity; |
5123 | env_var = __kmp_get_affinity_env_var(affinity: *affinity, /*for_binding=*/true); |
5124 | /* to avoid duplicate printing (will be correctly printed on barrier) */ |
5125 | if (affinity->flags.verbose && (affinity->type == affinity_none || |
5126 | (th->th.th_current_place != KMP_PLACE_ALL && |
5127 | affinity->type != affinity_balanced)) && |
5128 | !KMP_HIDDEN_HELPER_MAIN_THREAD(gtid)) { |
5129 | char buf[KMP_AFFIN_MASK_PRINT_LEN]; |
5130 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, |
5131 | mask: th->th.th_affin_mask); |
5132 | KMP_INFORM(BoundToOSProcSet, env_var, (kmp_int32)getpid(), __kmp_gettid(), |
5133 | gtid, buf); |
5134 | } |
5135 | |
5136 | #if KMP_OS_WINDOWS |
5137 | // On Windows* OS, the process affinity mask might have changed. If the user |
5138 | // didn't request affinity and this call fails, just continue silently. |
5139 | // See CQ171393. |
5140 | if (affinity->type == affinity_none) { |
5141 | __kmp_set_system_affinity(th->th.th_affin_mask, FALSE); |
5142 | } else |
5143 | #endif |
5144 | #ifndef KMP_OS_AIX |
5145 | // Do not set the full mask as the init mask on AIX. |
5146 | __kmp_set_system_affinity(th->th.th_affin_mask, TRUE); |
5147 | #endif |
5148 | } |
5149 | |
5150 | void __kmp_affinity_bind_place(int gtid) { |
5151 | // Hidden helper threads should not be affected by OMP_PLACES/OMP_PROC_BIND |
5152 | if (!KMP_AFFINITY_CAPABLE() || KMP_HIDDEN_HELPER_THREAD(gtid)) { |
5153 | return; |
5154 | } |
5155 | |
5156 | kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]); |
5157 | |
5158 | KA_TRACE(100, ("__kmp_affinity_bind_place: binding T#%d to place %d (current " |
5159 | "place = %d)\n" , |
5160 | gtid, th->th.th_new_place, th->th.th_current_place)); |
5161 | |
5162 | // Check that the new place is within this thread's partition. |
5163 | KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL); |
5164 | KMP_ASSERT(th->th.th_new_place >= 0); |
5165 | KMP_ASSERT((unsigned)th->th.th_new_place <= __kmp_affinity.num_masks); |
5166 | if (th->th.th_first_place <= th->th.th_last_place) { |
5167 | KMP_ASSERT((th->th.th_new_place >= th->th.th_first_place) && |
5168 | (th->th.th_new_place <= th->th.th_last_place)); |
5169 | } else { |
5170 | KMP_ASSERT((th->th.th_new_place <= th->th.th_first_place) || |
5171 | (th->th.th_new_place >= th->th.th_last_place)); |
5172 | } |
5173 | |
5174 | // Copy the thread mask to the kmp_info_t structure, |
5175 | // and set this thread's affinity. |
5176 | kmp_affin_mask_t *mask = |
5177 | KMP_CPU_INDEX(__kmp_affinity.masks, th->th.th_new_place); |
5178 | KMP_CPU_COPY(th->th.th_affin_mask, mask); |
5179 | th->th.th_current_place = th->th.th_new_place; |
5180 | |
5181 | if (__kmp_affinity.flags.verbose) { |
5182 | char buf[KMP_AFFIN_MASK_PRINT_LEN]; |
5183 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, |
5184 | mask: th->th.th_affin_mask); |
5185 | KMP_INFORM(BoundToOSProcSet, "OMP_PROC_BIND" , (kmp_int32)getpid(), |
5186 | __kmp_gettid(), gtid, buf); |
5187 | } |
5188 | __kmp_set_system_affinity(th->th.th_affin_mask, TRUE); |
5189 | } |
5190 | |
5191 | int __kmp_aux_set_affinity(void **mask) { |
5192 | int gtid; |
5193 | kmp_info_t *th; |
5194 | int retval; |
5195 | |
5196 | if (!KMP_AFFINITY_CAPABLE()) { |
5197 | return -1; |
5198 | } |
5199 | |
5200 | gtid = __kmp_entry_gtid(); |
5201 | KA_TRACE( |
5202 | 1000, ("" ); { |
5203 | char buf[KMP_AFFIN_MASK_PRINT_LEN]; |
5204 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, |
5205 | (kmp_affin_mask_t *)(*mask)); |
5206 | __kmp_debug_printf( |
5207 | "kmp_set_affinity: setting affinity mask for thread %d = %s\n" , |
5208 | gtid, buf); |
5209 | }); |
5210 | |
5211 | if (__kmp_env_consistency_check) { |
5212 | if ((mask == NULL) || (*mask == NULL)) { |
5213 | KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity" ); |
5214 | } else { |
5215 | unsigned proc; |
5216 | int num_procs = 0; |
5217 | |
5218 | KMP_CPU_SET_ITERATE(proc, ((kmp_affin_mask_t *)(*mask))) { |
5219 | if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) { |
5220 | KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity" ); |
5221 | } |
5222 | if (!KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask))) { |
5223 | continue; |
5224 | } |
5225 | num_procs++; |
5226 | } |
5227 | if (num_procs == 0) { |
5228 | KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity" ); |
5229 | } |
5230 | |
5231 | #if KMP_GROUP_AFFINITY |
5232 | if (__kmp_get_proc_group((kmp_affin_mask_t *)(*mask)) < 0) { |
5233 | KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity" ); |
5234 | } |
5235 | #endif /* KMP_GROUP_AFFINITY */ |
5236 | } |
5237 | } |
5238 | |
5239 | th = __kmp_threads[gtid]; |
5240 | KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL); |
5241 | retval = __kmp_set_system_affinity((kmp_affin_mask_t *)(*mask), FALSE); |
5242 | if (retval == 0) { |
5243 | KMP_CPU_COPY(th->th.th_affin_mask, (kmp_affin_mask_t *)(*mask)); |
5244 | } |
5245 | |
5246 | th->th.th_current_place = KMP_PLACE_UNDEFINED; |
5247 | th->th.th_new_place = KMP_PLACE_UNDEFINED; |
5248 | th->th.th_first_place = 0; |
5249 | th->th.th_last_place = __kmp_affinity.num_masks - 1; |
5250 | |
5251 | // Turn off 4.0 affinity for the current tread at this parallel level. |
5252 | th->th.th_current_task->td_icvs.proc_bind = proc_bind_false; |
5253 | |
5254 | return retval; |
5255 | } |
5256 | |
5257 | int __kmp_aux_get_affinity(void **mask) { |
5258 | int gtid; |
5259 | int retval; |
5260 | #if KMP_OS_WINDOWS || KMP_OS_AIX || KMP_DEBUG |
5261 | kmp_info_t *th; |
5262 | #endif |
5263 | if (!KMP_AFFINITY_CAPABLE()) { |
5264 | return -1; |
5265 | } |
5266 | |
5267 | gtid = __kmp_entry_gtid(); |
5268 | #if KMP_OS_WINDOWS || KMP_OS_AIX || KMP_DEBUG |
5269 | th = __kmp_threads[gtid]; |
5270 | #else |
5271 | (void)gtid; // unused variable |
5272 | #endif |
5273 | KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL); |
5274 | |
5275 | KA_TRACE( |
5276 | 1000, ("" ); { |
5277 | char buf[KMP_AFFIN_MASK_PRINT_LEN]; |
5278 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, |
5279 | th->th.th_affin_mask); |
5280 | __kmp_printf( |
5281 | "kmp_get_affinity: stored affinity mask for thread %d = %s\n" , gtid, |
5282 | buf); |
5283 | }); |
5284 | |
5285 | if (__kmp_env_consistency_check) { |
5286 | if ((mask == NULL) || (*mask == NULL)) { |
5287 | KMP_FATAL(AffinityInvalidMask, "kmp_get_affinity" ); |
5288 | } |
5289 | } |
5290 | |
5291 | #if !KMP_OS_WINDOWS && !KMP_OS_AIX |
5292 | |
5293 | retval = __kmp_get_system_affinity((kmp_affin_mask_t *)(*mask), FALSE); |
5294 | KA_TRACE( |
5295 | 1000, ("" ); { |
5296 | char buf[KMP_AFFIN_MASK_PRINT_LEN]; |
5297 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, |
5298 | (kmp_affin_mask_t *)(*mask)); |
5299 | __kmp_printf( |
5300 | "kmp_get_affinity: system affinity mask for thread %d = %s\n" , gtid, |
5301 | buf); |
5302 | }); |
5303 | return retval; |
5304 | |
5305 | #else |
5306 | (void)retval; |
5307 | |
5308 | KMP_CPU_COPY((kmp_affin_mask_t *)(*mask), th->th.th_affin_mask); |
5309 | return 0; |
5310 | |
5311 | #endif /* !KMP_OS_WINDOWS && !KMP_OS_AIX */ |
5312 | } |
5313 | |
5314 | int __kmp_aux_get_affinity_max_proc() { |
5315 | if (!KMP_AFFINITY_CAPABLE()) { |
5316 | return 0; |
5317 | } |
5318 | #if KMP_GROUP_AFFINITY |
5319 | if (__kmp_num_proc_groups > 1) { |
5320 | return (int)(__kmp_num_proc_groups * sizeof(DWORD_PTR) * CHAR_BIT); |
5321 | } |
5322 | #endif |
5323 | return __kmp_xproc; |
5324 | } |
5325 | |
5326 | int __kmp_aux_set_affinity_mask_proc(int proc, void **mask) { |
5327 | if (!KMP_AFFINITY_CAPABLE()) { |
5328 | return -1; |
5329 | } |
5330 | |
5331 | KA_TRACE( |
5332 | 1000, ("" ); { |
5333 | int gtid = __kmp_entry_gtid(); |
5334 | char buf[KMP_AFFIN_MASK_PRINT_LEN]; |
5335 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, |
5336 | (kmp_affin_mask_t *)(*mask)); |
5337 | __kmp_debug_printf("kmp_set_affinity_mask_proc: setting proc %d in " |
5338 | "affinity mask for thread %d = %s\n" , |
5339 | proc, gtid, buf); |
5340 | }); |
5341 | |
5342 | if (__kmp_env_consistency_check) { |
5343 | if ((mask == NULL) || (*mask == NULL)) { |
5344 | KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity_mask_proc" ); |
5345 | } |
5346 | } |
5347 | |
5348 | if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) { |
5349 | return -1; |
5350 | } |
5351 | if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) { |
5352 | return -2; |
5353 | } |
5354 | |
5355 | KMP_CPU_SET(proc, (kmp_affin_mask_t *)(*mask)); |
5356 | return 0; |
5357 | } |
5358 | |
5359 | int __kmp_aux_unset_affinity_mask_proc(int proc, void **mask) { |
5360 | if (!KMP_AFFINITY_CAPABLE()) { |
5361 | return -1; |
5362 | } |
5363 | |
5364 | KA_TRACE( |
5365 | 1000, ("" ); { |
5366 | int gtid = __kmp_entry_gtid(); |
5367 | char buf[KMP_AFFIN_MASK_PRINT_LEN]; |
5368 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, |
5369 | (kmp_affin_mask_t *)(*mask)); |
5370 | __kmp_debug_printf("kmp_unset_affinity_mask_proc: unsetting proc %d in " |
5371 | "affinity mask for thread %d = %s\n" , |
5372 | proc, gtid, buf); |
5373 | }); |
5374 | |
5375 | if (__kmp_env_consistency_check) { |
5376 | if ((mask == NULL) || (*mask == NULL)) { |
5377 | KMP_FATAL(AffinityInvalidMask, "kmp_unset_affinity_mask_proc" ); |
5378 | } |
5379 | } |
5380 | |
5381 | if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) { |
5382 | return -1; |
5383 | } |
5384 | if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) { |
5385 | return -2; |
5386 | } |
5387 | |
5388 | KMP_CPU_CLR(proc, (kmp_affin_mask_t *)(*mask)); |
5389 | return 0; |
5390 | } |
5391 | |
5392 | int __kmp_aux_get_affinity_mask_proc(int proc, void **mask) { |
5393 | if (!KMP_AFFINITY_CAPABLE()) { |
5394 | return -1; |
5395 | } |
5396 | |
5397 | KA_TRACE( |
5398 | 1000, ("" ); { |
5399 | int gtid = __kmp_entry_gtid(); |
5400 | char buf[KMP_AFFIN_MASK_PRINT_LEN]; |
5401 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, |
5402 | (kmp_affin_mask_t *)(*mask)); |
5403 | __kmp_debug_printf("kmp_get_affinity_mask_proc: getting proc %d in " |
5404 | "affinity mask for thread %d = %s\n" , |
5405 | proc, gtid, buf); |
5406 | }); |
5407 | |
5408 | if (__kmp_env_consistency_check) { |
5409 | if ((mask == NULL) || (*mask == NULL)) { |
5410 | KMP_FATAL(AffinityInvalidMask, "kmp_get_affinity_mask_proc" ); |
5411 | } |
5412 | } |
5413 | |
5414 | if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) { |
5415 | return -1; |
5416 | } |
5417 | if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) { |
5418 | return 0; |
5419 | } |
5420 | |
5421 | return KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask)); |
5422 | } |
5423 | |
5424 | #if KMP_WEIGHTED_ITERATIONS_SUPPORTED |
5425 | // Returns first os proc id with ATOM core |
5426 | int __kmp_get_first_osid_with_ecore(void) { |
5427 | int low = 0; |
5428 | int high = __kmp_topology->get_num_hw_threads() - 1; |
5429 | int mid = 0; |
5430 | while (high - low > 1) { |
5431 | mid = (high + low) / 2; |
5432 | if (__kmp_topology->at(index: mid).attrs.get_core_type() == |
5433 | KMP_HW_CORE_TYPE_CORE) { |
5434 | low = mid + 1; |
5435 | } else { |
5436 | high = mid; |
5437 | } |
5438 | } |
5439 | if (__kmp_topology->at(index: mid).attrs.get_core_type() == KMP_HW_CORE_TYPE_ATOM) { |
5440 | return mid; |
5441 | } |
5442 | return -1; |
5443 | } |
5444 | #endif |
5445 | |
5446 | // Dynamic affinity settings - Affinity balanced |
5447 | void __kmp_balanced_affinity(kmp_info_t *th, int nthreads) { |
5448 | KMP_DEBUG_ASSERT(th); |
5449 | bool fine_gran = true; |
5450 | int tid = th->th.th_info.ds.ds_tid; |
5451 | const char *env_var = "KMP_AFFINITY" ; |
5452 | |
5453 | // Do not perform balanced affinity for the hidden helper threads |
5454 | if (KMP_HIDDEN_HELPER_THREAD(__kmp_gtid_from_thread(th))) |
5455 | return; |
5456 | |
5457 | switch (__kmp_affinity.gran) { |
5458 | case KMP_HW_THREAD: |
5459 | break; |
5460 | case KMP_HW_CORE: |
5461 | if (__kmp_nThreadsPerCore > 1) { |
5462 | fine_gran = false; |
5463 | } |
5464 | break; |
5465 | case KMP_HW_SOCKET: |
5466 | if (nCoresPerPkg > 1) { |
5467 | fine_gran = false; |
5468 | } |
5469 | break; |
5470 | default: |
5471 | fine_gran = false; |
5472 | } |
5473 | |
5474 | if (__kmp_topology->is_uniform()) { |
5475 | int coreID; |
5476 | int threadID; |
5477 | // Number of hyper threads per core in HT machine |
5478 | int __kmp_nth_per_core = __kmp_avail_proc / __kmp_ncores; |
5479 | // Number of cores |
5480 | int ncores = __kmp_ncores; |
5481 | if ((nPackages > 1) && (__kmp_nth_per_core <= 1)) { |
5482 | __kmp_nth_per_core = __kmp_avail_proc / nPackages; |
5483 | ncores = nPackages; |
5484 | } |
5485 | // How many threads will be bound to each core |
5486 | int chunk = nthreads / ncores; |
5487 | // How many cores will have an additional thread bound to it - "big cores" |
5488 | int big_cores = nthreads % ncores; |
5489 | // Number of threads on the big cores |
5490 | int big_nth = (chunk + 1) * big_cores; |
5491 | if (tid < big_nth) { |
5492 | coreID = tid / (chunk + 1); |
5493 | threadID = (tid % (chunk + 1)) % __kmp_nth_per_core; |
5494 | } else { // tid >= big_nth |
5495 | coreID = (tid - big_cores) / chunk; |
5496 | threadID = ((tid - big_cores) % chunk) % __kmp_nth_per_core; |
5497 | } |
5498 | KMP_DEBUG_ASSERT2(KMP_AFFINITY_CAPABLE(), |
5499 | "Illegal set affinity operation when not capable" ); |
5500 | |
5501 | kmp_affin_mask_t *mask = th->th.th_affin_mask; |
5502 | KMP_CPU_ZERO(mask); |
5503 | |
5504 | if (fine_gran) { |
5505 | int osID = |
5506 | __kmp_topology->at(index: coreID * __kmp_nth_per_core + threadID).os_id; |
5507 | KMP_CPU_SET(osID, mask); |
5508 | } else { |
5509 | for (int i = 0; i < __kmp_nth_per_core; i++) { |
5510 | int osID; |
5511 | osID = __kmp_topology->at(index: coreID * __kmp_nth_per_core + i).os_id; |
5512 | KMP_CPU_SET(osID, mask); |
5513 | } |
5514 | } |
5515 | if (__kmp_affinity.flags.verbose) { |
5516 | char buf[KMP_AFFIN_MASK_PRINT_LEN]; |
5517 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask); |
5518 | KMP_INFORM(BoundToOSProcSet, env_var, (kmp_int32)getpid(), __kmp_gettid(), |
5519 | tid, buf); |
5520 | } |
5521 | __kmp_affinity_get_thread_topology_info(th); |
5522 | __kmp_set_system_affinity(mask, TRUE); |
5523 | } else { // Non-uniform topology |
5524 | |
5525 | kmp_affin_mask_t *mask = th->th.th_affin_mask; |
5526 | KMP_CPU_ZERO(mask); |
5527 | |
5528 | int core_level = |
5529 | __kmp_affinity_find_core_level(nprocs: __kmp_avail_proc, bottom_level: __kmp_aff_depth - 1); |
5530 | int ncores = __kmp_affinity_compute_ncores(nprocs: __kmp_avail_proc, |
5531 | bottom_level: __kmp_aff_depth - 1, core_level); |
5532 | int nth_per_core = __kmp_affinity_max_proc_per_core( |
5533 | nprocs: __kmp_avail_proc, bottom_level: __kmp_aff_depth - 1, core_level); |
5534 | |
5535 | // For performance gain consider the special case nthreads == |
5536 | // __kmp_avail_proc |
5537 | if (nthreads == __kmp_avail_proc) { |
5538 | if (fine_gran) { |
5539 | int osID = __kmp_topology->at(index: tid).os_id; |
5540 | KMP_CPU_SET(osID, mask); |
5541 | } else { |
5542 | int core = |
5543 | __kmp_affinity_find_core(proc: tid, bottom_level: __kmp_aff_depth - 1, core_level); |
5544 | for (int i = 0; i < __kmp_avail_proc; i++) { |
5545 | int osID = __kmp_topology->at(index: i).os_id; |
5546 | if (__kmp_affinity_find_core(proc: i, bottom_level: __kmp_aff_depth - 1, core_level) == |
5547 | core) { |
5548 | KMP_CPU_SET(osID, mask); |
5549 | } |
5550 | } |
5551 | } |
5552 | } else if (nthreads <= ncores) { |
5553 | |
5554 | int core = 0; |
5555 | for (int i = 0; i < ncores; i++) { |
5556 | // Check if this core from procarr[] is in the mask |
5557 | int in_mask = 0; |
5558 | for (int j = 0; j < nth_per_core; j++) { |
5559 | if (procarr[i * nth_per_core + j] != -1) { |
5560 | in_mask = 1; |
5561 | break; |
5562 | } |
5563 | } |
5564 | if (in_mask) { |
5565 | if (tid == core) { |
5566 | for (int j = 0; j < nth_per_core; j++) { |
5567 | int osID = procarr[i * nth_per_core + j]; |
5568 | if (osID != -1) { |
5569 | KMP_CPU_SET(osID, mask); |
5570 | // For fine granularity it is enough to set the first available |
5571 | // osID for this core |
5572 | if (fine_gran) { |
5573 | break; |
5574 | } |
5575 | } |
5576 | } |
5577 | break; |
5578 | } else { |
5579 | core++; |
5580 | } |
5581 | } |
5582 | } |
5583 | } else { // nthreads > ncores |
5584 | // Array to save the number of processors at each core |
5585 | int *nproc_at_core = (int *)KMP_ALLOCA(sizeof(int) * ncores); |
5586 | // Array to save the number of cores with "x" available processors; |
5587 | int *ncores_with_x_procs = |
5588 | (int *)KMP_ALLOCA(sizeof(int) * (nth_per_core + 1)); |
5589 | // Array to save the number of cores with # procs from x to nth_per_core |
5590 | int *ncores_with_x_to_max_procs = |
5591 | (int *)KMP_ALLOCA(sizeof(int) * (nth_per_core + 1)); |
5592 | |
5593 | for (int i = 0; i <= nth_per_core; i++) { |
5594 | ncores_with_x_procs[i] = 0; |
5595 | ncores_with_x_to_max_procs[i] = 0; |
5596 | } |
5597 | |
5598 | for (int i = 0; i < ncores; i++) { |
5599 | int cnt = 0; |
5600 | for (int j = 0; j < nth_per_core; j++) { |
5601 | if (procarr[i * nth_per_core + j] != -1) { |
5602 | cnt++; |
5603 | } |
5604 | } |
5605 | nproc_at_core[i] = cnt; |
5606 | ncores_with_x_procs[cnt]++; |
5607 | } |
5608 | |
5609 | for (int i = 0; i <= nth_per_core; i++) { |
5610 | for (int j = i; j <= nth_per_core; j++) { |
5611 | ncores_with_x_to_max_procs[i] += ncores_with_x_procs[j]; |
5612 | } |
5613 | } |
5614 | |
5615 | // Max number of processors |
5616 | int nproc = nth_per_core * ncores; |
5617 | // An array to keep number of threads per each context |
5618 | int *newarr = (int *)__kmp_allocate(sizeof(int) * nproc); |
5619 | for (int i = 0; i < nproc; i++) { |
5620 | newarr[i] = 0; |
5621 | } |
5622 | |
5623 | int nth = nthreads; |
5624 | int flag = 0; |
5625 | while (nth > 0) { |
5626 | for (int j = 1; j <= nth_per_core; j++) { |
5627 | int cnt = ncores_with_x_to_max_procs[j]; |
5628 | for (int i = 0; i < ncores; i++) { |
5629 | // Skip the core with 0 processors |
5630 | if (nproc_at_core[i] == 0) { |
5631 | continue; |
5632 | } |
5633 | for (int k = 0; k < nth_per_core; k++) { |
5634 | if (procarr[i * nth_per_core + k] != -1) { |
5635 | if (newarr[i * nth_per_core + k] == 0) { |
5636 | newarr[i * nth_per_core + k] = 1; |
5637 | cnt--; |
5638 | nth--; |
5639 | break; |
5640 | } else { |
5641 | if (flag != 0) { |
5642 | newarr[i * nth_per_core + k]++; |
5643 | cnt--; |
5644 | nth--; |
5645 | break; |
5646 | } |
5647 | } |
5648 | } |
5649 | } |
5650 | if (cnt == 0 || nth == 0) { |
5651 | break; |
5652 | } |
5653 | } |
5654 | if (nth == 0) { |
5655 | break; |
5656 | } |
5657 | } |
5658 | flag = 1; |
5659 | } |
5660 | int sum = 0; |
5661 | for (int i = 0; i < nproc; i++) { |
5662 | sum += newarr[i]; |
5663 | if (sum > tid) { |
5664 | if (fine_gran) { |
5665 | int osID = procarr[i]; |
5666 | KMP_CPU_SET(osID, mask); |
5667 | } else { |
5668 | int coreID = i / nth_per_core; |
5669 | for (int ii = 0; ii < nth_per_core; ii++) { |
5670 | int osID = procarr[coreID * nth_per_core + ii]; |
5671 | if (osID != -1) { |
5672 | KMP_CPU_SET(osID, mask); |
5673 | } |
5674 | } |
5675 | } |
5676 | break; |
5677 | } |
5678 | } |
5679 | __kmp_free(newarr); |
5680 | } |
5681 | |
5682 | if (__kmp_affinity.flags.verbose) { |
5683 | char buf[KMP_AFFIN_MASK_PRINT_LEN]; |
5684 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask); |
5685 | KMP_INFORM(BoundToOSProcSet, env_var, (kmp_int32)getpid(), __kmp_gettid(), |
5686 | tid, buf); |
5687 | } |
5688 | __kmp_affinity_get_thread_topology_info(th); |
5689 | __kmp_set_system_affinity(mask, TRUE); |
5690 | } |
5691 | } |
5692 | |
5693 | #if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \ |
5694 | KMP_OS_AIX |
5695 | // We don't need this entry for Windows because |
5696 | // there is GetProcessAffinityMask() api |
5697 | // |
5698 | // The intended usage is indicated by these steps: |
5699 | // 1) The user gets the current affinity mask |
5700 | // 2) Then sets the affinity by calling this function |
5701 | // 3) Error check the return value |
5702 | // 4) Use non-OpenMP parallelization |
5703 | // 5) Reset the affinity to what was stored in step 1) |
5704 | #ifdef __cplusplus |
5705 | extern "C" |
5706 | #endif |
5707 | int |
5708 | kmp_set_thread_affinity_mask_initial() |
5709 | // the function returns 0 on success, |
5710 | // -1 if we cannot bind thread |
5711 | // >0 (errno) if an error happened during binding |
5712 | { |
5713 | int gtid = __kmp_get_gtid(); |
5714 | if (gtid < 0) { |
5715 | // Do not touch non-omp threads |
5716 | KA_TRACE(30, ("kmp_set_thread_affinity_mask_initial: " |
5717 | "non-omp thread, returning\n" )); |
5718 | return -1; |
5719 | } |
5720 | if (!KMP_AFFINITY_CAPABLE() || !__kmp_init_middle) { |
5721 | KA_TRACE(30, ("kmp_set_thread_affinity_mask_initial: " |
5722 | "affinity not initialized, returning\n" )); |
5723 | return -1; |
5724 | } |
5725 | KA_TRACE(30, ("kmp_set_thread_affinity_mask_initial: " |
5726 | "set full mask for thread %d\n" , |
5727 | gtid)); |
5728 | KMP_DEBUG_ASSERT(__kmp_affin_fullMask != NULL); |
5729 | #if KMP_OS_AIX |
5730 | return bindprocessor(BINDTHREAD, thread_self(), PROCESSOR_CLASS_ANY); |
5731 | #else |
5732 | return __kmp_set_system_affinity(__kmp_affin_fullMask, FALSE); |
5733 | #endif |
5734 | } |
5735 | #endif |
5736 | |
5737 | #endif // KMP_AFFINITY_SUPPORTED |
5738 | |