1 | /* |
2 | * kmp_affinity.cpp -- affinity management |
3 | */ |
4 | |
5 | //===----------------------------------------------------------------------===// |
6 | // |
7 | // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. |
8 | // See https://llvm.org/LICENSE.txt for license information. |
9 | // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception |
10 | // |
11 | //===----------------------------------------------------------------------===// |
12 | |
13 | #include "kmp.h" |
14 | #include "kmp_affinity.h" |
15 | #include "kmp_i18n.h" |
16 | #include "kmp_io.h" |
17 | #include "kmp_str.h" |
18 | #include "kmp_wrapper_getpid.h" |
19 | #if KMP_USE_HIER_SCHED |
20 | #include "kmp_dispatch_hier.h" |
21 | #endif |
22 | #if KMP_USE_HWLOC |
23 | // Copied from hwloc |
24 | #define HWLOC_GROUP_KIND_INTEL_MODULE 102 |
25 | #define HWLOC_GROUP_KIND_INTEL_TILE 103 |
26 | #define HWLOC_GROUP_KIND_INTEL_DIE 104 |
27 | #define HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP 220 |
28 | #endif |
29 | #include <ctype.h> |
30 | |
31 | // The machine topology |
32 | kmp_topology_t *__kmp_topology = nullptr; |
33 | // KMP_HW_SUBSET environment variable |
34 | kmp_hw_subset_t *__kmp_hw_subset = nullptr; |
35 | |
36 | // Store the real or imagined machine hierarchy here |
37 | static hierarchy_info machine_hierarchy; |
38 | |
39 | void __kmp_cleanup_hierarchy() { machine_hierarchy.fini(); } |
40 | |
41 | #if KMP_AFFINITY_SUPPORTED |
42 | // Helper class to see if place lists further restrict the fullMask |
43 | class kmp_full_mask_modifier_t { |
44 | kmp_affin_mask_t *mask; |
45 | |
46 | public: |
47 | kmp_full_mask_modifier_t() { |
48 | KMP_CPU_ALLOC(mask); |
49 | KMP_CPU_ZERO(mask); |
50 | } |
51 | ~kmp_full_mask_modifier_t() { |
52 | KMP_CPU_FREE(mask); |
53 | mask = nullptr; |
54 | } |
55 | void include(const kmp_affin_mask_t *other) { KMP_CPU_UNION(mask, other); } |
56 | // If the new full mask is different from the current full mask, |
57 | // then switch them. Returns true if full mask was affected, false otherwise. |
58 | bool restrict_to_mask() { |
59 | // See if the new mask further restricts or changes the full mask |
60 | if (KMP_CPU_EQUAL(__kmp_affin_fullMask, mask) || KMP_CPU_ISEMPTY(mask)) |
61 | return false; |
62 | return __kmp_topology->restrict_to_mask(mask); |
63 | } |
64 | }; |
65 | |
66 | static inline const char * |
67 | __kmp_get_affinity_env_var(const kmp_affinity_t &affinity, |
68 | bool for_binding = false) { |
69 | if (affinity.flags.omp_places) { |
70 | if (for_binding) |
71 | return "OMP_PROC_BIND" ; |
72 | return "OMP_PLACES" ; |
73 | } |
74 | return affinity.env_var; |
75 | } |
76 | #endif // KMP_AFFINITY_SUPPORTED |
77 | |
78 | void __kmp_get_hierarchy(kmp_uint32 nproc, kmp_bstate_t *thr_bar) { |
79 | kmp_uint32 depth; |
80 | // The test below is true if affinity is available, but set to "none". Need to |
81 | // init on first use of hierarchical barrier. |
82 | if (TCR_1(machine_hierarchy.uninitialized)) |
83 | machine_hierarchy.init(num_addrs: nproc); |
84 | |
85 | // Adjust the hierarchy in case num threads exceeds original |
86 | if (nproc > machine_hierarchy.base_num_threads) |
87 | machine_hierarchy.resize(nproc); |
88 | |
89 | depth = machine_hierarchy.depth; |
90 | KMP_DEBUG_ASSERT(depth > 0); |
91 | |
92 | thr_bar->depth = depth; |
93 | __kmp_type_convert(src: machine_hierarchy.numPerLevel[0] - 1, |
94 | dest: &(thr_bar->base_leaf_kids)); |
95 | thr_bar->skip_per_level = machine_hierarchy.skipPerLevel; |
96 | } |
97 | |
98 | static int nCoresPerPkg, nPackages; |
99 | static int __kmp_nThreadsPerCore; |
100 | #ifndef KMP_DFLT_NTH_CORES |
101 | static int __kmp_ncores; |
102 | #endif |
103 | |
104 | const char *__kmp_hw_get_catalog_string(kmp_hw_t type, bool plural) { |
105 | switch (type) { |
106 | case KMP_HW_SOCKET: |
107 | return ((plural) ? KMP_I18N_STR(Sockets) : KMP_I18N_STR(Socket)); |
108 | case KMP_HW_DIE: |
109 | return ((plural) ? KMP_I18N_STR(Dice) : KMP_I18N_STR(Die)); |
110 | case KMP_HW_MODULE: |
111 | return ((plural) ? KMP_I18N_STR(Modules) : KMP_I18N_STR(Module)); |
112 | case KMP_HW_TILE: |
113 | return ((plural) ? KMP_I18N_STR(Tiles) : KMP_I18N_STR(Tile)); |
114 | case KMP_HW_NUMA: |
115 | return ((plural) ? KMP_I18N_STR(NumaDomains) : KMP_I18N_STR(NumaDomain)); |
116 | case KMP_HW_L3: |
117 | return ((plural) ? KMP_I18N_STR(L3Caches) : KMP_I18N_STR(L3Cache)); |
118 | case KMP_HW_L2: |
119 | return ((plural) ? KMP_I18N_STR(L2Caches) : KMP_I18N_STR(L2Cache)); |
120 | case KMP_HW_L1: |
121 | return ((plural) ? KMP_I18N_STR(L1Caches) : KMP_I18N_STR(L1Cache)); |
122 | case KMP_HW_LLC: |
123 | return ((plural) ? KMP_I18N_STR(LLCaches) : KMP_I18N_STR(LLCache)); |
124 | case KMP_HW_CORE: |
125 | return ((plural) ? KMP_I18N_STR(Cores) : KMP_I18N_STR(Core)); |
126 | case KMP_HW_THREAD: |
127 | return ((plural) ? KMP_I18N_STR(Threads) : KMP_I18N_STR(Thread)); |
128 | case KMP_HW_PROC_GROUP: |
129 | return ((plural) ? KMP_I18N_STR(ProcGroups) : KMP_I18N_STR(ProcGroup)); |
130 | case KMP_HW_UNKNOWN: |
131 | case KMP_HW_LAST: |
132 | return KMP_I18N_STR(Unknown); |
133 | } |
134 | KMP_ASSERT2(false, "Unhandled kmp_hw_t enumeration" ); |
135 | KMP_BUILTIN_UNREACHABLE; |
136 | } |
137 | |
138 | const char *__kmp_hw_get_keyword(kmp_hw_t type, bool plural) { |
139 | switch (type) { |
140 | case KMP_HW_SOCKET: |
141 | return ((plural) ? "sockets" : "socket" ); |
142 | case KMP_HW_DIE: |
143 | return ((plural) ? "dice" : "die" ); |
144 | case KMP_HW_MODULE: |
145 | return ((plural) ? "modules" : "module" ); |
146 | case KMP_HW_TILE: |
147 | return ((plural) ? "tiles" : "tile" ); |
148 | case KMP_HW_NUMA: |
149 | return ((plural) ? "numa_domains" : "numa_domain" ); |
150 | case KMP_HW_L3: |
151 | return ((plural) ? "l3_caches" : "l3_cache" ); |
152 | case KMP_HW_L2: |
153 | return ((plural) ? "l2_caches" : "l2_cache" ); |
154 | case KMP_HW_L1: |
155 | return ((plural) ? "l1_caches" : "l1_cache" ); |
156 | case KMP_HW_LLC: |
157 | return ((plural) ? "ll_caches" : "ll_cache" ); |
158 | case KMP_HW_CORE: |
159 | return ((plural) ? "cores" : "core" ); |
160 | case KMP_HW_THREAD: |
161 | return ((plural) ? "threads" : "thread" ); |
162 | case KMP_HW_PROC_GROUP: |
163 | return ((plural) ? "proc_groups" : "proc_group" ); |
164 | case KMP_HW_UNKNOWN: |
165 | case KMP_HW_LAST: |
166 | return ((plural) ? "unknowns" : "unknown" ); |
167 | } |
168 | KMP_ASSERT2(false, "Unhandled kmp_hw_t enumeration" ); |
169 | KMP_BUILTIN_UNREACHABLE; |
170 | } |
171 | |
172 | const char *__kmp_hw_get_core_type_string(kmp_hw_core_type_t type) { |
173 | switch (type) { |
174 | case KMP_HW_CORE_TYPE_UNKNOWN: |
175 | case KMP_HW_MAX_NUM_CORE_TYPES: |
176 | return "unknown" ; |
177 | #if KMP_ARCH_X86 || KMP_ARCH_X86_64 |
178 | case KMP_HW_CORE_TYPE_ATOM: |
179 | return "Intel Atom(R) processor" ; |
180 | case KMP_HW_CORE_TYPE_CORE: |
181 | return "Intel(R) Core(TM) processor" ; |
182 | #endif |
183 | } |
184 | KMP_ASSERT2(false, "Unhandled kmp_hw_core_type_t enumeration" ); |
185 | KMP_BUILTIN_UNREACHABLE; |
186 | } |
187 | |
188 | #if KMP_AFFINITY_SUPPORTED |
189 | // If affinity is supported, check the affinity |
190 | // verbose and warning flags before printing warning |
191 | #define KMP_AFF_WARNING(s, ...) \ |
192 | if (s.flags.verbose || (s.flags.warnings && (s.type != affinity_none))) { \ |
193 | KMP_WARNING(__VA_ARGS__); \ |
194 | } |
195 | #else |
196 | #define KMP_AFF_WARNING(s, ...) KMP_WARNING(__VA_ARGS__) |
197 | #endif |
198 | |
199 | //////////////////////////////////////////////////////////////////////////////// |
200 | // kmp_hw_thread_t methods |
201 | int kmp_hw_thread_t::compare_ids(const void *a, const void *b) { |
202 | const kmp_hw_thread_t *ahwthread = (const kmp_hw_thread_t *)a; |
203 | const kmp_hw_thread_t *bhwthread = (const kmp_hw_thread_t *)b; |
204 | int depth = __kmp_topology->get_depth(); |
205 | for (int level = 0; level < depth; ++level) { |
206 | // Reverse sort (higher efficiencies earlier in list) cores by core |
207 | // efficiency if available. |
208 | if (__kmp_is_hybrid_cpu() && |
209 | __kmp_topology->get_type(level) == KMP_HW_CORE && |
210 | ahwthread->attrs.is_core_eff_valid() && |
211 | bhwthread->attrs.is_core_eff_valid()) { |
212 | if (ahwthread->attrs.get_core_eff() < bhwthread->attrs.get_core_eff()) |
213 | return 1; |
214 | if (ahwthread->attrs.get_core_eff() > bhwthread->attrs.get_core_eff()) |
215 | return -1; |
216 | } |
217 | if (ahwthread->ids[level] == bhwthread->ids[level]) |
218 | continue; |
219 | // If the hardware id is unknown for this level, then place hardware thread |
220 | // further down in the sorted list as it should take last priority |
221 | if (ahwthread->ids[level] == UNKNOWN_ID) |
222 | return 1; |
223 | else if (bhwthread->ids[level] == UNKNOWN_ID) |
224 | return -1; |
225 | else if (ahwthread->ids[level] < bhwthread->ids[level]) |
226 | return -1; |
227 | else if (ahwthread->ids[level] > bhwthread->ids[level]) |
228 | return 1; |
229 | } |
230 | if (ahwthread->os_id < bhwthread->os_id) |
231 | return -1; |
232 | else if (ahwthread->os_id > bhwthread->os_id) |
233 | return 1; |
234 | return 0; |
235 | } |
236 | |
237 | #if KMP_AFFINITY_SUPPORTED |
238 | int kmp_hw_thread_t::compare_compact(const void *a, const void *b) { |
239 | int i; |
240 | const kmp_hw_thread_t *aa = (const kmp_hw_thread_t *)a; |
241 | const kmp_hw_thread_t *bb = (const kmp_hw_thread_t *)b; |
242 | int depth = __kmp_topology->get_depth(); |
243 | int compact = __kmp_topology->compact; |
244 | KMP_DEBUG_ASSERT(compact >= 0); |
245 | KMP_DEBUG_ASSERT(compact <= depth); |
246 | for (i = 0; i < compact; i++) { |
247 | int j = depth - i - 1; |
248 | if (aa->sub_ids[j] < bb->sub_ids[j]) |
249 | return -1; |
250 | if (aa->sub_ids[j] > bb->sub_ids[j]) |
251 | return 1; |
252 | } |
253 | for (; i < depth; i++) { |
254 | int j = i - compact; |
255 | if (aa->sub_ids[j] < bb->sub_ids[j]) |
256 | return -1; |
257 | if (aa->sub_ids[j] > bb->sub_ids[j]) |
258 | return 1; |
259 | } |
260 | return 0; |
261 | } |
262 | #endif |
263 | |
264 | void kmp_hw_thread_t::print() const { |
265 | int depth = __kmp_topology->get_depth(); |
266 | printf(format: "%4d " , os_id); |
267 | for (int i = 0; i < depth; ++i) { |
268 | printf(format: "%4d (%d) " , ids[i], sub_ids[i]); |
269 | } |
270 | if (attrs) { |
271 | if (attrs.is_core_type_valid()) |
272 | printf(format: " (%s)" , __kmp_hw_get_core_type_string(type: attrs.get_core_type())); |
273 | if (attrs.is_core_eff_valid()) |
274 | printf(format: " (eff=%d)" , attrs.get_core_eff()); |
275 | } |
276 | if (leader) |
277 | printf(format: " (leader)" ); |
278 | printf(format: "\n" ); |
279 | } |
280 | |
281 | //////////////////////////////////////////////////////////////////////////////// |
282 | // kmp_topology_t methods |
283 | |
284 | // Add a layer to the topology based on the ids. Assume the topology |
285 | // is perfectly nested (i.e., so no object has more than one parent) |
286 | void kmp_topology_t::insert_layer(kmp_hw_t type, const int *ids) { |
287 | // Figure out where the layer should go by comparing the ids of the current |
288 | // layers with the new ids |
289 | int target_layer; |
290 | int previous_id = kmp_hw_thread_t::UNKNOWN_ID; |
291 | int previous_new_id = kmp_hw_thread_t::UNKNOWN_ID; |
292 | |
293 | // Start from the highest layer and work down to find target layer |
294 | // If new layer is equal to another layer then put the new layer above |
295 | for (target_layer = 0; target_layer < depth; ++target_layer) { |
296 | bool layers_equal = true; |
297 | bool strictly_above_target_layer = false; |
298 | for (int i = 0; i < num_hw_threads; ++i) { |
299 | int id = hw_threads[i].ids[target_layer]; |
300 | int new_id = ids[i]; |
301 | if (id != previous_id && new_id == previous_new_id) { |
302 | // Found the layer we are strictly above |
303 | strictly_above_target_layer = true; |
304 | layers_equal = false; |
305 | break; |
306 | } else if (id == previous_id && new_id != previous_new_id) { |
307 | // Found a layer we are below. Move to next layer and check. |
308 | layers_equal = false; |
309 | break; |
310 | } |
311 | previous_id = id; |
312 | previous_new_id = new_id; |
313 | } |
314 | if (strictly_above_target_layer || layers_equal) |
315 | break; |
316 | } |
317 | |
318 | // Found the layer we are above. Now move everything to accommodate the new |
319 | // layer. And put the new ids and type into the topology. |
320 | for (int i = depth - 1, j = depth; i >= target_layer; --i, --j) |
321 | types[j] = types[i]; |
322 | types[target_layer] = type; |
323 | for (int k = 0; k < num_hw_threads; ++k) { |
324 | for (int i = depth - 1, j = depth; i >= target_layer; --i, --j) |
325 | hw_threads[k].ids[j] = hw_threads[k].ids[i]; |
326 | hw_threads[k].ids[target_layer] = ids[k]; |
327 | } |
328 | equivalent[type] = type; |
329 | depth++; |
330 | } |
331 | |
332 | #if KMP_GROUP_AFFINITY |
333 | // Insert the Windows Processor Group structure into the topology |
334 | void kmp_topology_t::_insert_windows_proc_groups() { |
335 | // Do not insert the processor group structure for a single group |
336 | if (__kmp_num_proc_groups == 1) |
337 | return; |
338 | kmp_affin_mask_t *mask; |
339 | int *ids = (int *)__kmp_allocate(sizeof(int) * num_hw_threads); |
340 | KMP_CPU_ALLOC(mask); |
341 | for (int i = 0; i < num_hw_threads; ++i) { |
342 | KMP_CPU_ZERO(mask); |
343 | KMP_CPU_SET(hw_threads[i].os_id, mask); |
344 | ids[i] = __kmp_get_proc_group(mask); |
345 | } |
346 | KMP_CPU_FREE(mask); |
347 | insert_layer(KMP_HW_PROC_GROUP, ids); |
348 | __kmp_free(ids); |
349 | |
350 | // sort topology after adding proc groups |
351 | __kmp_topology->sort_ids(); |
352 | } |
353 | #endif |
354 | |
355 | // Remove layers that don't add information to the topology. |
356 | // This is done by having the layer take on the id = UNKNOWN_ID (-1) |
357 | void kmp_topology_t::_remove_radix1_layers() { |
358 | int preference[KMP_HW_LAST]; |
359 | int top_index1, top_index2; |
360 | // Set up preference associative array |
361 | preference[KMP_HW_SOCKET] = 110; |
362 | preference[KMP_HW_PROC_GROUP] = 100; |
363 | preference[KMP_HW_CORE] = 95; |
364 | preference[KMP_HW_THREAD] = 90; |
365 | preference[KMP_HW_NUMA] = 85; |
366 | preference[KMP_HW_DIE] = 80; |
367 | preference[KMP_HW_TILE] = 75; |
368 | preference[KMP_HW_MODULE] = 73; |
369 | preference[KMP_HW_L3] = 70; |
370 | preference[KMP_HW_L2] = 65; |
371 | preference[KMP_HW_L1] = 60; |
372 | preference[KMP_HW_LLC] = 5; |
373 | top_index1 = 0; |
374 | top_index2 = 1; |
375 | while (top_index1 < depth - 1 && top_index2 < depth) { |
376 | kmp_hw_t type1 = types[top_index1]; |
377 | kmp_hw_t type2 = types[top_index2]; |
378 | KMP_ASSERT_VALID_HW_TYPE(type1); |
379 | KMP_ASSERT_VALID_HW_TYPE(type2); |
380 | // Do not allow the three main topology levels (sockets, cores, threads) to |
381 | // be compacted down |
382 | if ((type1 == KMP_HW_THREAD || type1 == KMP_HW_CORE || |
383 | type1 == KMP_HW_SOCKET) && |
384 | (type2 == KMP_HW_THREAD || type2 == KMP_HW_CORE || |
385 | type2 == KMP_HW_SOCKET)) { |
386 | top_index1 = top_index2++; |
387 | continue; |
388 | } |
389 | bool radix1 = true; |
390 | bool all_same = true; |
391 | int id1 = hw_threads[0].ids[top_index1]; |
392 | int id2 = hw_threads[0].ids[top_index2]; |
393 | int pref1 = preference[type1]; |
394 | int pref2 = preference[type2]; |
395 | for (int hwidx = 1; hwidx < num_hw_threads; ++hwidx) { |
396 | if (hw_threads[hwidx].ids[top_index1] == id1 && |
397 | hw_threads[hwidx].ids[top_index2] != id2) { |
398 | radix1 = false; |
399 | break; |
400 | } |
401 | if (hw_threads[hwidx].ids[top_index2] != id2) |
402 | all_same = false; |
403 | id1 = hw_threads[hwidx].ids[top_index1]; |
404 | id2 = hw_threads[hwidx].ids[top_index2]; |
405 | } |
406 | if (radix1) { |
407 | // Select the layer to remove based on preference |
408 | kmp_hw_t remove_type, keep_type; |
409 | int remove_layer, remove_layer_ids; |
410 | if (pref1 > pref2) { |
411 | remove_type = type2; |
412 | remove_layer = remove_layer_ids = top_index2; |
413 | keep_type = type1; |
414 | } else { |
415 | remove_type = type1; |
416 | remove_layer = remove_layer_ids = top_index1; |
417 | keep_type = type2; |
418 | } |
419 | // If all the indexes for the second (deeper) layer are the same. |
420 | // e.g., all are zero, then make sure to keep the first layer's ids |
421 | if (all_same) |
422 | remove_layer_ids = top_index2; |
423 | // Remove radix one type by setting the equivalence, removing the id from |
424 | // the hw threads and removing the layer from types and depth |
425 | set_equivalent_type(type1: remove_type, type2: keep_type); |
426 | for (int idx = 0; idx < num_hw_threads; ++idx) { |
427 | kmp_hw_thread_t &hw_thread = hw_threads[idx]; |
428 | for (int d = remove_layer_ids; d < depth - 1; ++d) |
429 | hw_thread.ids[d] = hw_thread.ids[d + 1]; |
430 | } |
431 | for (int idx = remove_layer; idx < depth - 1; ++idx) |
432 | types[idx] = types[idx + 1]; |
433 | depth--; |
434 | } else { |
435 | top_index1 = top_index2++; |
436 | } |
437 | } |
438 | KMP_ASSERT(depth > 0); |
439 | } |
440 | |
441 | void kmp_topology_t::_set_last_level_cache() { |
442 | if (get_equivalent_type(type: KMP_HW_L3) != KMP_HW_UNKNOWN) |
443 | set_equivalent_type(type1: KMP_HW_LLC, type2: KMP_HW_L3); |
444 | else if (get_equivalent_type(type: KMP_HW_L2) != KMP_HW_UNKNOWN) |
445 | set_equivalent_type(type1: KMP_HW_LLC, type2: KMP_HW_L2); |
446 | #if KMP_MIC_SUPPORTED |
447 | else if (__kmp_mic_type == mic3) { |
448 | if (get_equivalent_type(type: KMP_HW_L2) != KMP_HW_UNKNOWN) |
449 | set_equivalent_type(type1: KMP_HW_LLC, type2: KMP_HW_L2); |
450 | else if (get_equivalent_type(type: KMP_HW_TILE) != KMP_HW_UNKNOWN) |
451 | set_equivalent_type(type1: KMP_HW_LLC, type2: KMP_HW_TILE); |
452 | // L2/Tile wasn't detected so just say L1 |
453 | else |
454 | set_equivalent_type(type1: KMP_HW_LLC, type2: KMP_HW_L1); |
455 | } |
456 | #endif |
457 | else if (get_equivalent_type(type: KMP_HW_L1) != KMP_HW_UNKNOWN) |
458 | set_equivalent_type(type1: KMP_HW_LLC, type2: KMP_HW_L1); |
459 | // Fallback is to set last level cache to socket or core |
460 | if (get_equivalent_type(type: KMP_HW_LLC) == KMP_HW_UNKNOWN) { |
461 | if (get_equivalent_type(type: KMP_HW_SOCKET) != KMP_HW_UNKNOWN) |
462 | set_equivalent_type(type1: KMP_HW_LLC, type2: KMP_HW_SOCKET); |
463 | else if (get_equivalent_type(type: KMP_HW_CORE) != KMP_HW_UNKNOWN) |
464 | set_equivalent_type(type1: KMP_HW_LLC, type2: KMP_HW_CORE); |
465 | } |
466 | KMP_ASSERT(get_equivalent_type(KMP_HW_LLC) != KMP_HW_UNKNOWN); |
467 | } |
468 | |
469 | // Gather the count of each topology layer and the ratio |
470 | void kmp_topology_t::_gather_enumeration_information() { |
471 | int previous_id[KMP_HW_LAST]; |
472 | int max[KMP_HW_LAST]; |
473 | |
474 | for (int i = 0; i < depth; ++i) { |
475 | previous_id[i] = kmp_hw_thread_t::UNKNOWN_ID; |
476 | max[i] = 0; |
477 | count[i] = 0; |
478 | ratio[i] = 0; |
479 | } |
480 | int core_level = get_level(type: KMP_HW_CORE); |
481 | for (int i = 0; i < num_hw_threads; ++i) { |
482 | kmp_hw_thread_t &hw_thread = hw_threads[i]; |
483 | for (int layer = 0; layer < depth; ++layer) { |
484 | int id = hw_thread.ids[layer]; |
485 | if (id != previous_id[layer]) { |
486 | // Add an additional increment to each count |
487 | for (int l = layer; l < depth; ++l) { |
488 | if (hw_thread.ids[l] != kmp_hw_thread_t::UNKNOWN_ID) |
489 | count[l]++; |
490 | } |
491 | // Keep track of topology layer ratio statistics |
492 | if (hw_thread.ids[layer] != kmp_hw_thread_t::UNKNOWN_ID) |
493 | max[layer]++; |
494 | for (int l = layer + 1; l < depth; ++l) { |
495 | if (max[l] > ratio[l]) |
496 | ratio[l] = max[l]; |
497 | max[l] = 1; |
498 | } |
499 | // Figure out the number of different core types |
500 | // and efficiencies for hybrid CPUs |
501 | if (__kmp_is_hybrid_cpu() && core_level >= 0 && layer <= core_level) { |
502 | if (hw_thread.attrs.is_core_eff_valid() && |
503 | hw_thread.attrs.core_eff >= num_core_efficiencies) { |
504 | // Because efficiencies can range from 0 to max efficiency - 1, |
505 | // the number of efficiencies is max efficiency + 1 |
506 | num_core_efficiencies = hw_thread.attrs.core_eff + 1; |
507 | } |
508 | if (hw_thread.attrs.is_core_type_valid()) { |
509 | bool found = false; |
510 | for (int j = 0; j < num_core_types; ++j) { |
511 | if (hw_thread.attrs.get_core_type() == core_types[j]) { |
512 | found = true; |
513 | break; |
514 | } |
515 | } |
516 | if (!found) { |
517 | KMP_ASSERT(num_core_types < KMP_HW_MAX_NUM_CORE_TYPES); |
518 | core_types[num_core_types++] = hw_thread.attrs.get_core_type(); |
519 | } |
520 | } |
521 | } |
522 | break; |
523 | } |
524 | } |
525 | for (int layer = 0; layer < depth; ++layer) { |
526 | previous_id[layer] = hw_thread.ids[layer]; |
527 | } |
528 | } |
529 | for (int layer = 0; layer < depth; ++layer) { |
530 | if (max[layer] > ratio[layer]) |
531 | ratio[layer] = max[layer]; |
532 | } |
533 | } |
534 | |
535 | int kmp_topology_t::_get_ncores_with_attr(const kmp_hw_attr_t &attr, |
536 | int above_level, |
537 | bool find_all) const { |
538 | int current, current_max; |
539 | int previous_id[KMP_HW_LAST]; |
540 | for (int i = 0; i < depth; ++i) |
541 | previous_id[i] = kmp_hw_thread_t::UNKNOWN_ID; |
542 | int core_level = get_level(type: KMP_HW_CORE); |
543 | if (find_all) |
544 | above_level = -1; |
545 | KMP_ASSERT(above_level < core_level); |
546 | current_max = 0; |
547 | current = 0; |
548 | for (int i = 0; i < num_hw_threads; ++i) { |
549 | kmp_hw_thread_t &hw_thread = hw_threads[i]; |
550 | if (!find_all && hw_thread.ids[above_level] != previous_id[above_level]) { |
551 | if (current > current_max) |
552 | current_max = current; |
553 | current = hw_thread.attrs.contains(other: attr); |
554 | } else { |
555 | for (int level = above_level + 1; level <= core_level; ++level) { |
556 | if (hw_thread.ids[level] != previous_id[level]) { |
557 | if (hw_thread.attrs.contains(other: attr)) |
558 | current++; |
559 | break; |
560 | } |
561 | } |
562 | } |
563 | for (int level = 0; level < depth; ++level) |
564 | previous_id[level] = hw_thread.ids[level]; |
565 | } |
566 | if (current > current_max) |
567 | current_max = current; |
568 | return current_max; |
569 | } |
570 | |
571 | // Find out if the topology is uniform |
572 | void kmp_topology_t::_discover_uniformity() { |
573 | int num = 1; |
574 | for (int level = 0; level < depth; ++level) |
575 | num *= ratio[level]; |
576 | flags.uniform = (num == count[depth - 1]); |
577 | } |
578 | |
579 | // Set all the sub_ids for each hardware thread |
580 | void kmp_topology_t::_set_sub_ids() { |
581 | int previous_id[KMP_HW_LAST]; |
582 | int sub_id[KMP_HW_LAST]; |
583 | |
584 | for (int i = 0; i < depth; ++i) { |
585 | previous_id[i] = -1; |
586 | sub_id[i] = -1; |
587 | } |
588 | for (int i = 0; i < num_hw_threads; ++i) { |
589 | kmp_hw_thread_t &hw_thread = hw_threads[i]; |
590 | // Setup the sub_id |
591 | for (int j = 0; j < depth; ++j) { |
592 | if (hw_thread.ids[j] != previous_id[j]) { |
593 | sub_id[j]++; |
594 | for (int k = j + 1; k < depth; ++k) { |
595 | sub_id[k] = 0; |
596 | } |
597 | break; |
598 | } |
599 | } |
600 | // Set previous_id |
601 | for (int j = 0; j < depth; ++j) { |
602 | previous_id[j] = hw_thread.ids[j]; |
603 | } |
604 | // Set the sub_ids field |
605 | for (int j = 0; j < depth; ++j) { |
606 | hw_thread.sub_ids[j] = sub_id[j]; |
607 | } |
608 | } |
609 | } |
610 | |
611 | void kmp_topology_t::_set_globals() { |
612 | // Set nCoresPerPkg, nPackages, __kmp_nThreadsPerCore, __kmp_ncores |
613 | int core_level, thread_level, package_level; |
614 | package_level = get_level(type: KMP_HW_SOCKET); |
615 | #if KMP_GROUP_AFFINITY |
616 | if (package_level == -1) |
617 | package_level = get_level(KMP_HW_PROC_GROUP); |
618 | #endif |
619 | core_level = get_level(type: KMP_HW_CORE); |
620 | thread_level = get_level(type: KMP_HW_THREAD); |
621 | |
622 | KMP_ASSERT(core_level != -1); |
623 | KMP_ASSERT(thread_level != -1); |
624 | |
625 | __kmp_nThreadsPerCore = calculate_ratio(level1: thread_level, level2: core_level); |
626 | if (package_level != -1) { |
627 | nCoresPerPkg = calculate_ratio(level1: core_level, level2: package_level); |
628 | nPackages = get_count(level: package_level); |
629 | } else { |
630 | // assume one socket |
631 | nCoresPerPkg = get_count(level: core_level); |
632 | nPackages = 1; |
633 | } |
634 | #ifndef KMP_DFLT_NTH_CORES |
635 | __kmp_ncores = get_count(level: core_level); |
636 | #endif |
637 | } |
638 | |
639 | kmp_topology_t *kmp_topology_t::allocate(int nproc, int ndepth, |
640 | const kmp_hw_t *types) { |
641 | kmp_topology_t *retval; |
642 | // Allocate all data in one large allocation |
643 | size_t size = sizeof(kmp_topology_t) + sizeof(kmp_hw_thread_t) * nproc + |
644 | sizeof(int) * (size_t)KMP_HW_LAST * 3; |
645 | char *bytes = (char *)__kmp_allocate(size); |
646 | retval = (kmp_topology_t *)bytes; |
647 | if (nproc > 0) { |
648 | retval->hw_threads = (kmp_hw_thread_t *)(bytes + sizeof(kmp_topology_t)); |
649 | } else { |
650 | retval->hw_threads = nullptr; |
651 | } |
652 | retval->num_hw_threads = nproc; |
653 | retval->depth = ndepth; |
654 | int *arr = |
655 | (int *)(bytes + sizeof(kmp_topology_t) + sizeof(kmp_hw_thread_t) * nproc); |
656 | retval->types = (kmp_hw_t *)arr; |
657 | retval->ratio = arr + (size_t)KMP_HW_LAST; |
658 | retval->count = arr + 2 * (size_t)KMP_HW_LAST; |
659 | retval->num_core_efficiencies = 0; |
660 | retval->num_core_types = 0; |
661 | retval->compact = 0; |
662 | for (int i = 0; i < KMP_HW_MAX_NUM_CORE_TYPES; ++i) |
663 | retval->core_types[i] = KMP_HW_CORE_TYPE_UNKNOWN; |
664 | KMP_FOREACH_HW_TYPE(type) { retval->equivalent[type] = KMP_HW_UNKNOWN; } |
665 | for (int i = 0; i < ndepth; ++i) { |
666 | retval->types[i] = types[i]; |
667 | retval->equivalent[types[i]] = types[i]; |
668 | } |
669 | return retval; |
670 | } |
671 | |
672 | void kmp_topology_t::deallocate(kmp_topology_t *topology) { |
673 | if (topology) |
674 | __kmp_free(topology); |
675 | } |
676 | |
677 | bool kmp_topology_t::check_ids() const { |
678 | // Assume ids have been sorted |
679 | if (num_hw_threads == 0) |
680 | return true; |
681 | for (int i = 1; i < num_hw_threads; ++i) { |
682 | kmp_hw_thread_t ¤t_thread = hw_threads[i]; |
683 | kmp_hw_thread_t &previous_thread = hw_threads[i - 1]; |
684 | bool unique = false; |
685 | for (int j = 0; j < depth; ++j) { |
686 | if (previous_thread.ids[j] != current_thread.ids[j]) { |
687 | unique = true; |
688 | break; |
689 | } |
690 | } |
691 | if (unique) |
692 | continue; |
693 | return false; |
694 | } |
695 | return true; |
696 | } |
697 | |
698 | void kmp_topology_t::dump() const { |
699 | printf(format: "***********************\n" ); |
700 | printf(format: "*** __kmp_topology: ***\n" ); |
701 | printf(format: "***********************\n" ); |
702 | printf(format: "* depth: %d\n" , depth); |
703 | |
704 | printf(format: "* types: " ); |
705 | for (int i = 0; i < depth; ++i) |
706 | printf(format: "%15s " , __kmp_hw_get_keyword(type: types[i])); |
707 | printf(format: "\n" ); |
708 | |
709 | printf(format: "* ratio: " ); |
710 | for (int i = 0; i < depth; ++i) { |
711 | printf(format: "%15d " , ratio[i]); |
712 | } |
713 | printf(format: "\n" ); |
714 | |
715 | printf(format: "* count: " ); |
716 | for (int i = 0; i < depth; ++i) { |
717 | printf(format: "%15d " , count[i]); |
718 | } |
719 | printf(format: "\n" ); |
720 | |
721 | printf(format: "* num_core_eff: %d\n" , num_core_efficiencies); |
722 | printf(format: "* num_core_types: %d\n" , num_core_types); |
723 | printf(format: "* core_types: " ); |
724 | for (int i = 0; i < num_core_types; ++i) |
725 | printf(format: "%3d " , core_types[i]); |
726 | printf(format: "\n" ); |
727 | |
728 | printf(format: "* equivalent map:\n" ); |
729 | KMP_FOREACH_HW_TYPE(i) { |
730 | const char *key = __kmp_hw_get_keyword(type: i); |
731 | const char *value = __kmp_hw_get_keyword(type: equivalent[i]); |
732 | printf(format: "%-15s -> %-15s\n" , key, value); |
733 | } |
734 | |
735 | printf(format: "* uniform: %s\n" , (is_uniform() ? "Yes" : "No" )); |
736 | |
737 | printf(format: "* num_hw_threads: %d\n" , num_hw_threads); |
738 | printf(format: "* hw_threads:\n" ); |
739 | for (int i = 0; i < num_hw_threads; ++i) { |
740 | hw_threads[i].print(); |
741 | } |
742 | printf(format: "***********************\n" ); |
743 | } |
744 | |
745 | void kmp_topology_t::print(const char *env_var) const { |
746 | kmp_str_buf_t buf; |
747 | int print_types_depth; |
748 | __kmp_str_buf_init(&buf); |
749 | kmp_hw_t print_types[KMP_HW_LAST + 2]; |
750 | |
751 | // Num Available Threads |
752 | if (num_hw_threads) { |
753 | KMP_INFORM(AvailableOSProc, env_var, num_hw_threads); |
754 | } else { |
755 | KMP_INFORM(AvailableOSProc, env_var, __kmp_xproc); |
756 | } |
757 | |
758 | // Uniform or not |
759 | if (is_uniform()) { |
760 | KMP_INFORM(Uniform, env_var); |
761 | } else { |
762 | KMP_INFORM(NonUniform, env_var); |
763 | } |
764 | |
765 | // Equivalent types |
766 | KMP_FOREACH_HW_TYPE(type) { |
767 | kmp_hw_t eq_type = equivalent[type]; |
768 | if (eq_type != KMP_HW_UNKNOWN && eq_type != type) { |
769 | KMP_INFORM(AffEqualTopologyTypes, env_var, |
770 | __kmp_hw_get_catalog_string(type), |
771 | __kmp_hw_get_catalog_string(eq_type)); |
772 | } |
773 | } |
774 | |
775 | // Quick topology |
776 | KMP_ASSERT(depth > 0 && depth <= (int)KMP_HW_LAST); |
777 | // Create a print types array that always guarantees printing |
778 | // the core and thread level |
779 | print_types_depth = 0; |
780 | for (int level = 0; level < depth; ++level) |
781 | print_types[print_types_depth++] = types[level]; |
782 | if (equivalent[KMP_HW_CORE] != KMP_HW_CORE) { |
783 | // Force in the core level for quick topology |
784 | if (print_types[print_types_depth - 1] == KMP_HW_THREAD) { |
785 | // Force core before thread e.g., 1 socket X 2 threads/socket |
786 | // becomes 1 socket X 1 core/socket X 2 threads/socket |
787 | print_types[print_types_depth - 1] = KMP_HW_CORE; |
788 | print_types[print_types_depth++] = KMP_HW_THREAD; |
789 | } else { |
790 | print_types[print_types_depth++] = KMP_HW_CORE; |
791 | } |
792 | } |
793 | // Always put threads at very end of quick topology |
794 | if (equivalent[KMP_HW_THREAD] != KMP_HW_THREAD) |
795 | print_types[print_types_depth++] = KMP_HW_THREAD; |
796 | |
797 | __kmp_str_buf_clear(buffer: &buf); |
798 | kmp_hw_t numerator_type; |
799 | kmp_hw_t denominator_type = KMP_HW_UNKNOWN; |
800 | int core_level = get_level(type: KMP_HW_CORE); |
801 | int ncores = get_count(level: core_level); |
802 | |
803 | for (int plevel = 0, level = 0; plevel < print_types_depth; ++plevel) { |
804 | int c; |
805 | bool plural; |
806 | numerator_type = print_types[plevel]; |
807 | KMP_ASSERT_VALID_HW_TYPE(numerator_type); |
808 | if (equivalent[numerator_type] != numerator_type) |
809 | c = 1; |
810 | else |
811 | c = get_ratio(level: level++); |
812 | plural = (c > 1); |
813 | if (plevel == 0) { |
814 | __kmp_str_buf_print(buffer: &buf, format: "%d %s" , c, |
815 | __kmp_hw_get_catalog_string(type: numerator_type, plural)); |
816 | } else { |
817 | __kmp_str_buf_print(buffer: &buf, format: " x %d %s/%s" , c, |
818 | __kmp_hw_get_catalog_string(type: numerator_type, plural), |
819 | __kmp_hw_get_catalog_string(type: denominator_type)); |
820 | } |
821 | denominator_type = numerator_type; |
822 | } |
823 | KMP_INFORM(TopologyGeneric, env_var, buf.str, ncores); |
824 | |
825 | // Hybrid topology information |
826 | if (__kmp_is_hybrid_cpu()) { |
827 | for (int i = 0; i < num_core_types; ++i) { |
828 | kmp_hw_core_type_t core_type = core_types[i]; |
829 | kmp_hw_attr_t attr; |
830 | attr.clear(); |
831 | attr.set_core_type(core_type); |
832 | int ncores = get_ncores_with_attr(attr); |
833 | if (ncores > 0) { |
834 | KMP_INFORM(TopologyHybrid, env_var, ncores, |
835 | __kmp_hw_get_core_type_string(core_type)); |
836 | KMP_ASSERT(num_core_efficiencies <= KMP_HW_MAX_NUM_CORE_EFFS) |
837 | for (int eff = 0; eff < num_core_efficiencies; ++eff) { |
838 | attr.set_core_eff(eff); |
839 | int ncores_with_eff = get_ncores_with_attr(attr); |
840 | if (ncores_with_eff > 0) { |
841 | KMP_INFORM(TopologyHybridCoreEff, env_var, ncores_with_eff, eff); |
842 | } |
843 | } |
844 | } |
845 | } |
846 | } |
847 | |
848 | if (num_hw_threads <= 0) { |
849 | __kmp_str_buf_free(buffer: &buf); |
850 | return; |
851 | } |
852 | |
853 | // Full OS proc to hardware thread map |
854 | KMP_INFORM(OSProcToPhysicalThreadMap, env_var); |
855 | for (int i = 0; i < num_hw_threads; i++) { |
856 | __kmp_str_buf_clear(buffer: &buf); |
857 | for (int level = 0; level < depth; ++level) { |
858 | if (hw_threads[i].ids[level] == kmp_hw_thread_t::UNKNOWN_ID) |
859 | continue; |
860 | kmp_hw_t type = types[level]; |
861 | __kmp_str_buf_print(buffer: &buf, format: "%s " , __kmp_hw_get_catalog_string(type)); |
862 | __kmp_str_buf_print(buffer: &buf, format: "%d " , hw_threads[i].ids[level]); |
863 | } |
864 | if (__kmp_is_hybrid_cpu()) |
865 | __kmp_str_buf_print( |
866 | buffer: &buf, format: "(%s)" , |
867 | __kmp_hw_get_core_type_string(type: hw_threads[i].attrs.get_core_type())); |
868 | KMP_INFORM(OSProcMapToPack, env_var, hw_threads[i].os_id, buf.str); |
869 | } |
870 | |
871 | __kmp_str_buf_free(buffer: &buf); |
872 | } |
873 | |
874 | #if KMP_AFFINITY_SUPPORTED |
875 | void kmp_topology_t::set_granularity(kmp_affinity_t &affinity) const { |
876 | const char *env_var = __kmp_get_affinity_env_var(affinity); |
877 | // If requested hybrid CPU attributes for granularity (either OMP_PLACES or |
878 | // KMP_AFFINITY), but none exist, then reset granularity and have below method |
879 | // select a granularity and warn user. |
880 | if (!__kmp_is_hybrid_cpu()) { |
881 | if (affinity.core_attr_gran.valid) { |
882 | // OMP_PLACES with cores:<attribute> but non-hybrid arch, use cores |
883 | // instead |
884 | KMP_AFF_WARNING( |
885 | affinity, AffIgnoringNonHybrid, env_var, |
886 | __kmp_hw_get_catalog_string(KMP_HW_CORE, /*plural=*/true)); |
887 | affinity.gran = KMP_HW_CORE; |
888 | affinity.gran_levels = -1; |
889 | affinity.core_attr_gran = KMP_AFFINITY_ATTRS_UNKNOWN; |
890 | affinity.flags.core_types_gran = affinity.flags.core_effs_gran = 0; |
891 | } else if (affinity.flags.core_types_gran || |
892 | affinity.flags.core_effs_gran) { |
893 | // OMP_PLACES=core_types|core_effs but non-hybrid, use cores instead |
894 | if (affinity.flags.omp_places) { |
895 | KMP_AFF_WARNING( |
896 | affinity, AffIgnoringNonHybrid, env_var, |
897 | __kmp_hw_get_catalog_string(KMP_HW_CORE, /*plural=*/true)); |
898 | } else { |
899 | // KMP_AFFINITY=granularity=core_type|core_eff,... |
900 | KMP_AFF_WARNING(affinity, AffGranularityBad, env_var, |
901 | "Intel(R) Hybrid Technology core attribute" , |
902 | __kmp_hw_get_catalog_string(KMP_HW_CORE)); |
903 | } |
904 | affinity.gran = KMP_HW_CORE; |
905 | affinity.gran_levels = -1; |
906 | affinity.core_attr_gran = KMP_AFFINITY_ATTRS_UNKNOWN; |
907 | affinity.flags.core_types_gran = affinity.flags.core_effs_gran = 0; |
908 | } |
909 | } |
910 | // Set the number of affinity granularity levels |
911 | if (affinity.gran_levels < 0) { |
912 | kmp_hw_t gran_type = get_equivalent_type(type: affinity.gran); |
913 | // Check if user's granularity request is valid |
914 | if (gran_type == KMP_HW_UNKNOWN) { |
915 | // First try core, then thread, then package |
916 | kmp_hw_t gran_types[3] = {KMP_HW_CORE, KMP_HW_THREAD, KMP_HW_SOCKET}; |
917 | for (auto g : gran_types) { |
918 | if (get_equivalent_type(type: g) != KMP_HW_UNKNOWN) { |
919 | gran_type = g; |
920 | break; |
921 | } |
922 | } |
923 | KMP_ASSERT(gran_type != KMP_HW_UNKNOWN); |
924 | // Warn user what granularity setting will be used instead |
925 | KMP_AFF_WARNING(affinity, AffGranularityBad, env_var, |
926 | __kmp_hw_get_catalog_string(affinity.gran), |
927 | __kmp_hw_get_catalog_string(gran_type)); |
928 | affinity.gran = gran_type; |
929 | } |
930 | #if KMP_GROUP_AFFINITY |
931 | // If more than one processor group exists, and the level of |
932 | // granularity specified by the user is too coarse, then the |
933 | // granularity must be adjusted "down" to processor group affinity |
934 | // because threads can only exist within one processor group. |
935 | // For example, if a user sets granularity=socket and there are two |
936 | // processor groups that cover a socket, then the runtime must |
937 | // restrict the granularity down to the processor group level. |
938 | if (__kmp_num_proc_groups > 1) { |
939 | int gran_depth = get_level(gran_type); |
940 | int proc_group_depth = get_level(KMP_HW_PROC_GROUP); |
941 | if (gran_depth >= 0 && proc_group_depth >= 0 && |
942 | gran_depth < proc_group_depth) { |
943 | KMP_AFF_WARNING(affinity, AffGranTooCoarseProcGroup, env_var, |
944 | __kmp_hw_get_catalog_string(affinity.gran)); |
945 | affinity.gran = gran_type = KMP_HW_PROC_GROUP; |
946 | } |
947 | } |
948 | #endif |
949 | affinity.gran_levels = 0; |
950 | for (int i = depth - 1; i >= 0 && get_type(level: i) != gran_type; --i) |
951 | affinity.gran_levels++; |
952 | } |
953 | } |
954 | #endif |
955 | |
956 | void kmp_topology_t::canonicalize() { |
957 | #if KMP_GROUP_AFFINITY |
958 | _insert_windows_proc_groups(); |
959 | #endif |
960 | _remove_radix1_layers(); |
961 | _gather_enumeration_information(); |
962 | _discover_uniformity(); |
963 | _set_sub_ids(); |
964 | _set_globals(); |
965 | _set_last_level_cache(); |
966 | |
967 | #if KMP_MIC_SUPPORTED |
968 | // Manually Add L2 = Tile equivalence |
969 | if (__kmp_mic_type == mic3) { |
970 | if (get_level(type: KMP_HW_L2) != -1) |
971 | set_equivalent_type(type1: KMP_HW_TILE, type2: KMP_HW_L2); |
972 | else if (get_level(type: KMP_HW_TILE) != -1) |
973 | set_equivalent_type(type1: KMP_HW_L2, type2: KMP_HW_TILE); |
974 | } |
975 | #endif |
976 | |
977 | // Perform post canonicalization checking |
978 | KMP_ASSERT(depth > 0); |
979 | for (int level = 0; level < depth; ++level) { |
980 | // All counts, ratios, and types must be valid |
981 | KMP_ASSERT(count[level] > 0 && ratio[level] > 0); |
982 | KMP_ASSERT_VALID_HW_TYPE(types[level]); |
983 | // Detected types must point to themselves |
984 | KMP_ASSERT(equivalent[types[level]] == types[level]); |
985 | } |
986 | } |
987 | |
988 | // Canonicalize an explicit packages X cores/pkg X threads/core topology |
989 | void kmp_topology_t::canonicalize(int npackages, int ncores_per_pkg, |
990 | int nthreads_per_core, int ncores) { |
991 | int ndepth = 3; |
992 | depth = ndepth; |
993 | KMP_FOREACH_HW_TYPE(i) { equivalent[i] = KMP_HW_UNKNOWN; } |
994 | for (int level = 0; level < depth; ++level) { |
995 | count[level] = 0; |
996 | ratio[level] = 0; |
997 | } |
998 | count[0] = npackages; |
999 | count[1] = ncores; |
1000 | count[2] = __kmp_xproc; |
1001 | ratio[0] = npackages; |
1002 | ratio[1] = ncores_per_pkg; |
1003 | ratio[2] = nthreads_per_core; |
1004 | equivalent[KMP_HW_SOCKET] = KMP_HW_SOCKET; |
1005 | equivalent[KMP_HW_CORE] = KMP_HW_CORE; |
1006 | equivalent[KMP_HW_THREAD] = KMP_HW_THREAD; |
1007 | types[0] = KMP_HW_SOCKET; |
1008 | types[1] = KMP_HW_CORE; |
1009 | types[2] = KMP_HW_THREAD; |
1010 | //__kmp_avail_proc = __kmp_xproc; |
1011 | _discover_uniformity(); |
1012 | } |
1013 | |
1014 | #if KMP_AFFINITY_SUPPORTED |
1015 | static kmp_str_buf_t * |
1016 | __kmp_hw_get_catalog_core_string(const kmp_hw_attr_t &attr, kmp_str_buf_t *buf, |
1017 | bool plural) { |
1018 | __kmp_str_buf_init(buf); |
1019 | if (attr.is_core_type_valid()) |
1020 | __kmp_str_buf_print(buffer: buf, format: "%s %s" , |
1021 | __kmp_hw_get_core_type_string(type: attr.get_core_type()), |
1022 | __kmp_hw_get_catalog_string(type: KMP_HW_CORE, plural)); |
1023 | else |
1024 | __kmp_str_buf_print(buffer: buf, format: "%s eff=%d" , |
1025 | __kmp_hw_get_catalog_string(type: KMP_HW_CORE, plural), |
1026 | attr.get_core_eff()); |
1027 | return buf; |
1028 | } |
1029 | |
1030 | bool kmp_topology_t::restrict_to_mask(const kmp_affin_mask_t *mask) { |
1031 | // Apply the filter |
1032 | bool affected; |
1033 | int new_index = 0; |
1034 | for (int i = 0; i < num_hw_threads; ++i) { |
1035 | int os_id = hw_threads[i].os_id; |
1036 | if (KMP_CPU_ISSET(os_id, mask)) { |
1037 | if (i != new_index) |
1038 | hw_threads[new_index] = hw_threads[i]; |
1039 | new_index++; |
1040 | } else { |
1041 | KMP_CPU_CLR(os_id, __kmp_affin_fullMask); |
1042 | __kmp_avail_proc--; |
1043 | } |
1044 | } |
1045 | |
1046 | KMP_DEBUG_ASSERT(new_index <= num_hw_threads); |
1047 | affected = (num_hw_threads != new_index); |
1048 | num_hw_threads = new_index; |
1049 | |
1050 | // Post hardware subset canonicalization |
1051 | if (affected) { |
1052 | _gather_enumeration_information(); |
1053 | _discover_uniformity(); |
1054 | _set_globals(); |
1055 | _set_last_level_cache(); |
1056 | #if KMP_OS_WINDOWS |
1057 | // Copy filtered full mask if topology has single processor group |
1058 | if (__kmp_num_proc_groups <= 1) |
1059 | #endif |
1060 | __kmp_affin_origMask->copy(src: __kmp_affin_fullMask); |
1061 | } |
1062 | return affected; |
1063 | } |
1064 | |
1065 | // Apply the KMP_HW_SUBSET envirable to the topology |
1066 | // Returns true if KMP_HW_SUBSET filtered any processors |
1067 | // otherwise, returns false |
1068 | bool kmp_topology_t::filter_hw_subset() { |
1069 | // If KMP_HW_SUBSET wasn't requested, then do nothing. |
1070 | if (!__kmp_hw_subset) |
1071 | return false; |
1072 | |
1073 | // First, sort the KMP_HW_SUBSET items by the machine topology |
1074 | __kmp_hw_subset->sort(); |
1075 | |
1076 | __kmp_hw_subset->canonicalize(top: __kmp_topology); |
1077 | |
1078 | // Check to see if KMP_HW_SUBSET is a valid subset of the detected topology |
1079 | bool using_core_types = false; |
1080 | bool using_core_effs = false; |
1081 | bool is_absolute = __kmp_hw_subset->is_absolute(); |
1082 | int hw_subset_depth = __kmp_hw_subset->get_depth(); |
1083 | kmp_hw_t specified[KMP_HW_LAST]; |
1084 | int *topology_levels = (int *)KMP_ALLOCA(sizeof(int) * hw_subset_depth); |
1085 | KMP_ASSERT(hw_subset_depth > 0); |
1086 | KMP_FOREACH_HW_TYPE(i) { specified[i] = KMP_HW_UNKNOWN; } |
1087 | int core_level = get_level(type: KMP_HW_CORE); |
1088 | for (int i = 0; i < hw_subset_depth; ++i) { |
1089 | int max_count; |
1090 | const kmp_hw_subset_t::item_t &item = __kmp_hw_subset->at(index: i); |
1091 | int num = item.num[0]; |
1092 | int offset = item.offset[0]; |
1093 | kmp_hw_t type = item.type; |
1094 | kmp_hw_t equivalent_type = equivalent[type]; |
1095 | int level = get_level(type); |
1096 | topology_levels[i] = level; |
1097 | |
1098 | // Check to see if current layer is in detected machine topology |
1099 | if (equivalent_type != KMP_HW_UNKNOWN) { |
1100 | __kmp_hw_subset->at(index: i).type = equivalent_type; |
1101 | } else { |
1102 | KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetNotExistGeneric, |
1103 | __kmp_hw_get_catalog_string(type)); |
1104 | return false; |
1105 | } |
1106 | |
1107 | // Check to see if current layer has already been |
1108 | // specified either directly or through an equivalent type |
1109 | if (specified[equivalent_type] != KMP_HW_UNKNOWN) { |
1110 | KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetEqvLayers, |
1111 | __kmp_hw_get_catalog_string(type), |
1112 | __kmp_hw_get_catalog_string(specified[equivalent_type])); |
1113 | return false; |
1114 | } |
1115 | specified[equivalent_type] = type; |
1116 | |
1117 | // Check to see if each layer's num & offset parameters are valid |
1118 | max_count = get_ratio(level); |
1119 | if (!is_absolute) { |
1120 | if (max_count < 0 || |
1121 | (num != kmp_hw_subset_t::USE_ALL && num + offset > max_count)) { |
1122 | bool plural = (num > 1); |
1123 | KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetManyGeneric, |
1124 | __kmp_hw_get_catalog_string(type, plural)); |
1125 | return false; |
1126 | } |
1127 | } |
1128 | |
1129 | // Check to see if core attributes are consistent |
1130 | if (core_level == level) { |
1131 | // Determine which core attributes are specified |
1132 | for (int j = 0; j < item.num_attrs; ++j) { |
1133 | if (item.attr[j].is_core_type_valid()) |
1134 | using_core_types = true; |
1135 | if (item.attr[j].is_core_eff_valid()) |
1136 | using_core_effs = true; |
1137 | } |
1138 | |
1139 | // Check if using a single core attribute on non-hybrid arch. |
1140 | // Do not ignore all of KMP_HW_SUBSET, just ignore the attribute. |
1141 | // |
1142 | // Check if using multiple core attributes on non-hyrbid arch. |
1143 | // Ignore all of KMP_HW_SUBSET if this is the case. |
1144 | if ((using_core_effs || using_core_types) && !__kmp_is_hybrid_cpu()) { |
1145 | if (item.num_attrs == 1) { |
1146 | if (using_core_effs) { |
1147 | KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetIgnoringAttr, |
1148 | "efficiency" ); |
1149 | } else { |
1150 | KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetIgnoringAttr, |
1151 | "core_type" ); |
1152 | } |
1153 | using_core_effs = false; |
1154 | using_core_types = false; |
1155 | } else { |
1156 | KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetAttrsNonHybrid); |
1157 | return false; |
1158 | } |
1159 | } |
1160 | |
1161 | // Check if using both core types and core efficiencies together |
1162 | if (using_core_types && using_core_effs) { |
1163 | KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetIncompat, "core_type" , |
1164 | "efficiency" ); |
1165 | return false; |
1166 | } |
1167 | |
1168 | // Check that core efficiency values are valid |
1169 | if (using_core_effs) { |
1170 | for (int j = 0; j < item.num_attrs; ++j) { |
1171 | if (item.attr[j].is_core_eff_valid()) { |
1172 | int core_eff = item.attr[j].get_core_eff(); |
1173 | if (core_eff < 0 || core_eff >= num_core_efficiencies) { |
1174 | kmp_str_buf_t buf; |
1175 | __kmp_str_buf_init(&buf); |
1176 | __kmp_str_buf_print(buffer: &buf, format: "%d" , item.attr[j].get_core_eff()); |
1177 | __kmp_msg(kmp_ms_warning, |
1178 | KMP_MSG(AffHWSubsetAttrInvalid, "efficiency" , buf.str), |
1179 | KMP_HNT(ValidValuesRange, 0, num_core_efficiencies - 1), |
1180 | __kmp_msg_null); |
1181 | __kmp_str_buf_free(buffer: &buf); |
1182 | return false; |
1183 | } |
1184 | } |
1185 | } |
1186 | } |
1187 | |
1188 | // Check that the number of requested cores with attributes is valid |
1189 | if ((using_core_types || using_core_effs) && !is_absolute) { |
1190 | for (int j = 0; j < item.num_attrs; ++j) { |
1191 | int num = item.num[j]; |
1192 | int offset = item.offset[j]; |
1193 | int level_above = core_level - 1; |
1194 | if (level_above >= 0) { |
1195 | max_count = get_ncores_with_attr_per(attr: item.attr[j], above: level_above); |
1196 | if (max_count <= 0 || |
1197 | (num != kmp_hw_subset_t::USE_ALL && num + offset > max_count)) { |
1198 | kmp_str_buf_t buf; |
1199 | __kmp_hw_get_catalog_core_string(attr: item.attr[j], buf: &buf, plural: num > 0); |
1200 | KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetManyGeneric, buf.str); |
1201 | __kmp_str_buf_free(buffer: &buf); |
1202 | return false; |
1203 | } |
1204 | } |
1205 | } |
1206 | } |
1207 | |
1208 | if ((using_core_types || using_core_effs) && item.num_attrs > 1) { |
1209 | for (int j = 0; j < item.num_attrs; ++j) { |
1210 | // Ambiguous use of specific core attribute + generic core |
1211 | // e.g., 4c & 3c:intel_core or 4c & 3c:eff1 |
1212 | if (!item.attr[j]) { |
1213 | kmp_hw_attr_t other_attr; |
1214 | for (int k = 0; k < item.num_attrs; ++k) { |
1215 | if (item.attr[k] != item.attr[j]) { |
1216 | other_attr = item.attr[k]; |
1217 | break; |
1218 | } |
1219 | } |
1220 | kmp_str_buf_t buf; |
1221 | __kmp_hw_get_catalog_core_string(attr: other_attr, buf: &buf, plural: item.num[j] > 0); |
1222 | KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetIncompat, |
1223 | __kmp_hw_get_catalog_string(KMP_HW_CORE), buf.str); |
1224 | __kmp_str_buf_free(buffer: &buf); |
1225 | return false; |
1226 | } |
1227 | // Allow specifying a specific core type or core eff exactly once |
1228 | for (int k = 0; k < j; ++k) { |
1229 | if (!item.attr[j] || !item.attr[k]) |
1230 | continue; |
1231 | if (item.attr[k] == item.attr[j]) { |
1232 | kmp_str_buf_t buf; |
1233 | __kmp_hw_get_catalog_core_string(attr: item.attr[j], buf: &buf, |
1234 | plural: item.num[j] > 0); |
1235 | KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetAttrRepeat, buf.str); |
1236 | __kmp_str_buf_free(buffer: &buf); |
1237 | return false; |
1238 | } |
1239 | } |
1240 | } |
1241 | } |
1242 | } |
1243 | } |
1244 | |
1245 | // For keeping track of sub_ids for an absolute KMP_HW_SUBSET |
1246 | // or core attributes (core type or efficiency) |
1247 | int prev_sub_ids[KMP_HW_LAST]; |
1248 | int abs_sub_ids[KMP_HW_LAST]; |
1249 | int core_eff_sub_ids[KMP_HW_MAX_NUM_CORE_EFFS]; |
1250 | int core_type_sub_ids[KMP_HW_MAX_NUM_CORE_TYPES]; |
1251 | for (size_t i = 0; i < KMP_HW_LAST; ++i) { |
1252 | abs_sub_ids[i] = -1; |
1253 | prev_sub_ids[i] = -1; |
1254 | } |
1255 | for (size_t i = 0; i < KMP_HW_MAX_NUM_CORE_EFFS; ++i) |
1256 | core_eff_sub_ids[i] = -1; |
1257 | for (size_t i = 0; i < KMP_HW_MAX_NUM_CORE_TYPES; ++i) |
1258 | core_type_sub_ids[i] = -1; |
1259 | |
1260 | // Determine which hardware threads should be filtered. |
1261 | |
1262 | // Helpful to determine if a topology layer is targeted by an absolute subset |
1263 | auto is_targeted = [&](int level) { |
1264 | if (is_absolute) { |
1265 | for (int i = 0; i < hw_subset_depth; ++i) |
1266 | if (topology_levels[i] == level) |
1267 | return true; |
1268 | return false; |
1269 | } |
1270 | // If not absolute KMP_HW_SUBSET, then every layer is seen as targeted |
1271 | return true; |
1272 | }; |
1273 | |
1274 | // Helpful to index into core type sub Ids array |
1275 | auto get_core_type_index = [](const kmp_hw_thread_t &t) { |
1276 | switch (t.attrs.get_core_type()) { |
1277 | case KMP_HW_CORE_TYPE_UNKNOWN: |
1278 | case KMP_HW_MAX_NUM_CORE_TYPES: |
1279 | return 0; |
1280 | #if KMP_ARCH_X86 || KMP_ARCH_X86_64 |
1281 | case KMP_HW_CORE_TYPE_ATOM: |
1282 | return 1; |
1283 | case KMP_HW_CORE_TYPE_CORE: |
1284 | return 2; |
1285 | #endif |
1286 | } |
1287 | KMP_ASSERT2(false, "Unhandled kmp_hw_thread_t enumeration" ); |
1288 | KMP_BUILTIN_UNREACHABLE; |
1289 | }; |
1290 | |
1291 | // Helpful to index into core efficiencies sub Ids array |
1292 | auto get_core_eff_index = [](const kmp_hw_thread_t &t) { |
1293 | return t.attrs.get_core_eff(); |
1294 | }; |
1295 | |
1296 | int num_filtered = 0; |
1297 | kmp_affin_mask_t *filtered_mask; |
1298 | KMP_CPU_ALLOC(filtered_mask); |
1299 | KMP_CPU_COPY(filtered_mask, __kmp_affin_fullMask); |
1300 | for (int i = 0; i < num_hw_threads; ++i) { |
1301 | kmp_hw_thread_t &hw_thread = hw_threads[i]; |
1302 | |
1303 | // Figure out the absolute sub ids and core eff/type sub ids |
1304 | if (is_absolute || using_core_effs || using_core_types) { |
1305 | for (int level = 0; level < get_depth(); ++level) { |
1306 | if (hw_thread.sub_ids[level] != prev_sub_ids[level]) { |
1307 | bool found_targeted = false; |
1308 | for (int j = level; j < get_depth(); ++j) { |
1309 | bool targeted = is_targeted(j); |
1310 | if (!found_targeted && targeted) { |
1311 | found_targeted = true; |
1312 | abs_sub_ids[j]++; |
1313 | if (j == core_level && using_core_effs) |
1314 | core_eff_sub_ids[get_core_eff_index(hw_thread)]++; |
1315 | if (j == core_level && using_core_types) |
1316 | core_type_sub_ids[get_core_type_index(hw_thread)]++; |
1317 | } else if (targeted) { |
1318 | abs_sub_ids[j] = 0; |
1319 | if (j == core_level && using_core_effs) |
1320 | core_eff_sub_ids[get_core_eff_index(hw_thread)] = 0; |
1321 | if (j == core_level && using_core_types) |
1322 | core_type_sub_ids[get_core_type_index(hw_thread)] = 0; |
1323 | } |
1324 | } |
1325 | break; |
1326 | } |
1327 | } |
1328 | for (int level = 0; level < get_depth(); ++level) |
1329 | prev_sub_ids[level] = hw_thread.sub_ids[level]; |
1330 | } |
1331 | |
1332 | // Check to see if this hardware thread should be filtered |
1333 | bool should_be_filtered = false; |
1334 | for (int hw_subset_index = 0; hw_subset_index < hw_subset_depth; |
1335 | ++hw_subset_index) { |
1336 | const auto &hw_subset_item = __kmp_hw_subset->at(index: hw_subset_index); |
1337 | int level = topology_levels[hw_subset_index]; |
1338 | if (level == -1) |
1339 | continue; |
1340 | if ((using_core_effs || using_core_types) && level == core_level) { |
1341 | // Look for the core attribute in KMP_HW_SUBSET which corresponds |
1342 | // to this hardware thread's core attribute. Use this num,offset plus |
1343 | // the running sub_id for the particular core attribute of this hardware |
1344 | // thread to determine if the hardware thread should be filtered or not. |
1345 | int attr_idx; |
1346 | kmp_hw_core_type_t core_type = hw_thread.attrs.get_core_type(); |
1347 | int core_eff = hw_thread.attrs.get_core_eff(); |
1348 | for (attr_idx = 0; attr_idx < hw_subset_item.num_attrs; ++attr_idx) { |
1349 | if (using_core_types && |
1350 | hw_subset_item.attr[attr_idx].get_core_type() == core_type) |
1351 | break; |
1352 | if (using_core_effs && |
1353 | hw_subset_item.attr[attr_idx].get_core_eff() == core_eff) |
1354 | break; |
1355 | } |
1356 | // This core attribute isn't in the KMP_HW_SUBSET so always filter it. |
1357 | if (attr_idx == hw_subset_item.num_attrs) { |
1358 | should_be_filtered = true; |
1359 | break; |
1360 | } |
1361 | int sub_id; |
1362 | int num = hw_subset_item.num[attr_idx]; |
1363 | int offset = hw_subset_item.offset[attr_idx]; |
1364 | if (using_core_types) |
1365 | sub_id = core_type_sub_ids[get_core_type_index(hw_thread)]; |
1366 | else |
1367 | sub_id = core_eff_sub_ids[get_core_eff_index(hw_thread)]; |
1368 | if (sub_id < offset || |
1369 | (num != kmp_hw_subset_t::USE_ALL && sub_id >= offset + num)) { |
1370 | should_be_filtered = true; |
1371 | break; |
1372 | } |
1373 | } else { |
1374 | int sub_id; |
1375 | int num = hw_subset_item.num[0]; |
1376 | int offset = hw_subset_item.offset[0]; |
1377 | if (is_absolute) |
1378 | sub_id = abs_sub_ids[level]; |
1379 | else |
1380 | sub_id = hw_thread.sub_ids[level]; |
1381 | if (hw_thread.ids[level] == kmp_hw_thread_t::UNKNOWN_ID || |
1382 | sub_id < offset || |
1383 | (num != kmp_hw_subset_t::USE_ALL && sub_id >= offset + num)) { |
1384 | should_be_filtered = true; |
1385 | break; |
1386 | } |
1387 | } |
1388 | } |
1389 | // Collect filtering information |
1390 | if (should_be_filtered) { |
1391 | KMP_CPU_CLR(hw_thread.os_id, filtered_mask); |
1392 | num_filtered++; |
1393 | } |
1394 | } |
1395 | |
1396 | // One last check that we shouldn't allow filtering entire machine |
1397 | if (num_filtered == num_hw_threads) { |
1398 | KMP_AFF_WARNING(__kmp_affinity, AffHWSubsetAllFiltered); |
1399 | return false; |
1400 | } |
1401 | |
1402 | // Apply the filter |
1403 | restrict_to_mask(mask: filtered_mask); |
1404 | return true; |
1405 | } |
1406 | |
1407 | bool kmp_topology_t::is_close(int hwt1, int hwt2, |
1408 | const kmp_affinity_t &stgs) const { |
1409 | int hw_level = stgs.gran_levels; |
1410 | if (hw_level >= depth) |
1411 | return true; |
1412 | bool retval = true; |
1413 | const kmp_hw_thread_t &t1 = hw_threads[hwt1]; |
1414 | const kmp_hw_thread_t &t2 = hw_threads[hwt2]; |
1415 | if (stgs.flags.core_types_gran) |
1416 | return t1.attrs.get_core_type() == t2.attrs.get_core_type(); |
1417 | if (stgs.flags.core_effs_gran) |
1418 | return t1.attrs.get_core_eff() == t2.attrs.get_core_eff(); |
1419 | for (int i = 0; i < (depth - hw_level); ++i) { |
1420 | if (t1.ids[i] != t2.ids[i]) |
1421 | return false; |
1422 | } |
1423 | return retval; |
1424 | } |
1425 | |
1426 | //////////////////////////////////////////////////////////////////////////////// |
1427 | |
1428 | bool KMPAffinity::picked_api = false; |
1429 | |
1430 | void *KMPAffinity::Mask::operator new(size_t n) { return __kmp_allocate(n); } |
1431 | void *KMPAffinity::Mask::operator new[](size_t n) { return __kmp_allocate(n); } |
1432 | void KMPAffinity::Mask::operator delete(void *p) { __kmp_free(p); } |
1433 | void KMPAffinity::Mask::operator delete[](void *p) { __kmp_free(p); } |
1434 | void *KMPAffinity::operator new(size_t n) { return __kmp_allocate(n); } |
1435 | void KMPAffinity::operator delete(void *p) { __kmp_free(p); } |
1436 | |
1437 | void KMPAffinity::pick_api() { |
1438 | KMPAffinity *affinity_dispatch; |
1439 | if (picked_api) |
1440 | return; |
1441 | #if KMP_USE_HWLOC |
1442 | // Only use Hwloc if affinity isn't explicitly disabled and |
1443 | // user requests Hwloc topology method |
1444 | if (__kmp_affinity_top_method == affinity_top_method_hwloc && |
1445 | __kmp_affinity.type != affinity_disabled) { |
1446 | affinity_dispatch = new KMPHwlocAffinity(); |
1447 | __kmp_hwloc_available = true; |
1448 | } else |
1449 | #endif |
1450 | { |
1451 | affinity_dispatch = new KMPNativeAffinity(); |
1452 | } |
1453 | __kmp_affinity_dispatch = affinity_dispatch; |
1454 | picked_api = true; |
1455 | } |
1456 | |
1457 | void KMPAffinity::destroy_api() { |
1458 | if (__kmp_affinity_dispatch != NULL) { |
1459 | delete __kmp_affinity_dispatch; |
1460 | __kmp_affinity_dispatch = NULL; |
1461 | picked_api = false; |
1462 | } |
1463 | } |
1464 | |
1465 | #define KMP_ADVANCE_SCAN(scan) \ |
1466 | while (*scan != '\0') { \ |
1467 | scan++; \ |
1468 | } |
1469 | |
1470 | // Print the affinity mask to the character array in a pretty format. |
1471 | // The format is a comma separated list of non-negative integers or integer |
1472 | // ranges: e.g., 1,2,3-5,7,9-15 |
1473 | // The format can also be the string "{<empty>}" if no bits are set in mask |
1474 | char *__kmp_affinity_print_mask(char *buf, int buf_len, |
1475 | kmp_affin_mask_t *mask) { |
1476 | int start = 0, finish = 0, previous = 0; |
1477 | bool first_range; |
1478 | KMP_ASSERT(buf); |
1479 | KMP_ASSERT(buf_len >= 40); |
1480 | KMP_ASSERT(mask); |
1481 | char *scan = buf; |
1482 | char *end = buf + buf_len - 1; |
1483 | |
1484 | // Check for empty set. |
1485 | if (mask->begin() == mask->end()) { |
1486 | KMP_SNPRINTF(s: scan, maxlen: end - scan + 1, format: "{<empty>}" ); |
1487 | KMP_ADVANCE_SCAN(scan); |
1488 | KMP_ASSERT(scan <= end); |
1489 | return buf; |
1490 | } |
1491 | |
1492 | first_range = true; |
1493 | start = mask->begin(); |
1494 | while (1) { |
1495 | // Find next range |
1496 | // [start, previous] is inclusive range of contiguous bits in mask |
1497 | for (finish = mask->next(previous: start), previous = start; |
1498 | finish == previous + 1 && finish != mask->end(); |
1499 | finish = mask->next(previous: finish)) { |
1500 | previous = finish; |
1501 | } |
1502 | |
1503 | // The first range does not need a comma printed before it, but the rest |
1504 | // of the ranges do need a comma beforehand |
1505 | if (!first_range) { |
1506 | KMP_SNPRINTF(s: scan, maxlen: end - scan + 1, format: "%s" , "," ); |
1507 | KMP_ADVANCE_SCAN(scan); |
1508 | } else { |
1509 | first_range = false; |
1510 | } |
1511 | // Range with three or more contiguous bits in the affinity mask |
1512 | if (previous - start > 1) { |
1513 | KMP_SNPRINTF(s: scan, maxlen: end - scan + 1, format: "%u-%u" , start, previous); |
1514 | } else { |
1515 | // Range with one or two contiguous bits in the affinity mask |
1516 | KMP_SNPRINTF(s: scan, maxlen: end - scan + 1, format: "%u" , start); |
1517 | KMP_ADVANCE_SCAN(scan); |
1518 | if (previous - start > 0) { |
1519 | KMP_SNPRINTF(s: scan, maxlen: end - scan + 1, format: ",%u" , previous); |
1520 | } |
1521 | } |
1522 | KMP_ADVANCE_SCAN(scan); |
1523 | // Start over with new start point |
1524 | start = finish; |
1525 | if (start == mask->end()) |
1526 | break; |
1527 | // Check for overflow |
1528 | if (end - scan < 2) |
1529 | break; |
1530 | } |
1531 | |
1532 | // Check for overflow |
1533 | KMP_ASSERT(scan <= end); |
1534 | return buf; |
1535 | } |
1536 | #undef KMP_ADVANCE_SCAN |
1537 | |
1538 | // Print the affinity mask to the string buffer object in a pretty format |
1539 | // The format is a comma separated list of non-negative integers or integer |
1540 | // ranges: e.g., 1,2,3-5,7,9-15 |
1541 | // The format can also be the string "{<empty>}" if no bits are set in mask |
1542 | kmp_str_buf_t *__kmp_affinity_str_buf_mask(kmp_str_buf_t *buf, |
1543 | kmp_affin_mask_t *mask) { |
1544 | int start = 0, finish = 0, previous = 0; |
1545 | bool first_range; |
1546 | KMP_ASSERT(buf); |
1547 | KMP_ASSERT(mask); |
1548 | |
1549 | __kmp_str_buf_clear(buffer: buf); |
1550 | |
1551 | // Check for empty set. |
1552 | if (mask->begin() == mask->end()) { |
1553 | __kmp_str_buf_print(buffer: buf, format: "%s" , "{<empty>}" ); |
1554 | return buf; |
1555 | } |
1556 | |
1557 | first_range = true; |
1558 | start = mask->begin(); |
1559 | while (1) { |
1560 | // Find next range |
1561 | // [start, previous] is inclusive range of contiguous bits in mask |
1562 | for (finish = mask->next(previous: start), previous = start; |
1563 | finish == previous + 1 && finish != mask->end(); |
1564 | finish = mask->next(previous: finish)) { |
1565 | previous = finish; |
1566 | } |
1567 | |
1568 | // The first range does not need a comma printed before it, but the rest |
1569 | // of the ranges do need a comma beforehand |
1570 | if (!first_range) { |
1571 | __kmp_str_buf_print(buffer: buf, format: "%s" , "," ); |
1572 | } else { |
1573 | first_range = false; |
1574 | } |
1575 | // Range with three or more contiguous bits in the affinity mask |
1576 | if (previous - start > 1) { |
1577 | __kmp_str_buf_print(buffer: buf, format: "%u-%u" , start, previous); |
1578 | } else { |
1579 | // Range with one or two contiguous bits in the affinity mask |
1580 | __kmp_str_buf_print(buffer: buf, format: "%u" , start); |
1581 | if (previous - start > 0) { |
1582 | __kmp_str_buf_print(buffer: buf, format: ",%u" , previous); |
1583 | } |
1584 | } |
1585 | // Start over with new start point |
1586 | start = finish; |
1587 | if (start == mask->end()) |
1588 | break; |
1589 | } |
1590 | return buf; |
1591 | } |
1592 | |
1593 | static kmp_affin_mask_t *__kmp_parse_cpu_list(const char *path) { |
1594 | kmp_affin_mask_t *mask; |
1595 | KMP_CPU_ALLOC(mask); |
1596 | KMP_CPU_ZERO(mask); |
1597 | #if KMP_OS_LINUX |
1598 | int n, begin_cpu, end_cpu; |
1599 | kmp_safe_raii_file_t file; |
1600 | auto skip_ws = [](FILE *f) { |
1601 | int c; |
1602 | do { |
1603 | c = fgetc(stream: f); |
1604 | } while (isspace(c)); |
1605 | if (c != EOF) |
1606 | ungetc(c: c, stream: f); |
1607 | }; |
1608 | // File contains CSV of integer ranges representing the CPUs |
1609 | // e.g., 1,2,4-7,9,11-15 |
1610 | int status = file.try_open(filename: path, mode: "r" ); |
1611 | if (status != 0) |
1612 | return mask; |
1613 | while (!feof(stream: file)) { |
1614 | skip_ws(file); |
1615 | n = fscanf(stream: file, format: "%d" , &begin_cpu); |
1616 | if (n != 1) |
1617 | break; |
1618 | skip_ws(file); |
1619 | int c = fgetc(stream: file); |
1620 | if (c == EOF || c == ',') { |
1621 | // Just single CPU |
1622 | end_cpu = begin_cpu; |
1623 | } else if (c == '-') { |
1624 | // Range of CPUs |
1625 | skip_ws(file); |
1626 | n = fscanf(stream: file, format: "%d" , &end_cpu); |
1627 | if (n != 1) |
1628 | break; |
1629 | skip_ws(file); |
1630 | c = fgetc(stream: file); // skip ',' |
1631 | } else { |
1632 | // Syntax problem |
1633 | break; |
1634 | } |
1635 | // Ensure a valid range of CPUs |
1636 | if (begin_cpu < 0 || begin_cpu >= __kmp_xproc || end_cpu < 0 || |
1637 | end_cpu >= __kmp_xproc || begin_cpu > end_cpu) { |
1638 | continue; |
1639 | } |
1640 | // Insert [begin_cpu, end_cpu] into mask |
1641 | for (int cpu = begin_cpu; cpu <= end_cpu; ++cpu) { |
1642 | KMP_CPU_SET(cpu, mask); |
1643 | } |
1644 | } |
1645 | #endif |
1646 | return mask; |
1647 | } |
1648 | |
1649 | // Return (possibly empty) affinity mask representing the offline CPUs |
1650 | // Caller must free the mask |
1651 | kmp_affin_mask_t *__kmp_affinity_get_offline_cpus() { |
1652 | return __kmp_parse_cpu_list(path: "/sys/devices/system/cpu/offline" ); |
1653 | } |
1654 | |
1655 | // Return the number of available procs |
1656 | int __kmp_affinity_entire_machine_mask(kmp_affin_mask_t *mask) { |
1657 | int avail_proc = 0; |
1658 | KMP_CPU_ZERO(mask); |
1659 | |
1660 | #if KMP_GROUP_AFFINITY |
1661 | |
1662 | if (__kmp_num_proc_groups > 1) { |
1663 | int group; |
1664 | KMP_DEBUG_ASSERT(__kmp_GetActiveProcessorCount != NULL); |
1665 | for (group = 0; group < __kmp_num_proc_groups; group++) { |
1666 | int i; |
1667 | int num = __kmp_GetActiveProcessorCount(group); |
1668 | for (i = 0; i < num; i++) { |
1669 | KMP_CPU_SET(i + group * (CHAR_BIT * sizeof(DWORD_PTR)), mask); |
1670 | avail_proc++; |
1671 | } |
1672 | } |
1673 | } else |
1674 | |
1675 | #endif /* KMP_GROUP_AFFINITY */ |
1676 | |
1677 | { |
1678 | int proc; |
1679 | kmp_affin_mask_t *offline_cpus = __kmp_affinity_get_offline_cpus(); |
1680 | for (proc = 0; proc < __kmp_xproc; proc++) { |
1681 | // Skip offline CPUs |
1682 | if (KMP_CPU_ISSET(proc, offline_cpus)) |
1683 | continue; |
1684 | KMP_CPU_SET(proc, mask); |
1685 | avail_proc++; |
1686 | } |
1687 | KMP_CPU_FREE(offline_cpus); |
1688 | } |
1689 | |
1690 | return avail_proc; |
1691 | } |
1692 | |
1693 | // All of the __kmp_affinity_create_*_map() routines should allocate the |
1694 | // internal topology object and set the layer ids for it. Each routine |
1695 | // returns a boolean on whether it was successful at doing so. |
1696 | kmp_affin_mask_t *__kmp_affin_fullMask = NULL; |
1697 | // Original mask is a subset of full mask in multiple processor groups topology |
1698 | kmp_affin_mask_t *__kmp_affin_origMask = NULL; |
1699 | |
1700 | #if KMP_USE_HWLOC |
1701 | static inline bool __kmp_hwloc_is_cache_type(hwloc_obj_t obj) { |
1702 | #if HWLOC_API_VERSION >= 0x00020000 |
1703 | return hwloc_obj_type_is_cache(obj->type); |
1704 | #else |
1705 | return obj->type == HWLOC_OBJ_CACHE; |
1706 | #endif |
1707 | } |
1708 | |
1709 | // Returns KMP_HW_* type derived from HWLOC_* type |
1710 | static inline kmp_hw_t __kmp_hwloc_type_2_topology_type(hwloc_obj_t obj) { |
1711 | |
1712 | if (__kmp_hwloc_is_cache_type(obj)) { |
1713 | if (obj->attr->cache.type == HWLOC_OBJ_CACHE_INSTRUCTION) |
1714 | return KMP_HW_UNKNOWN; |
1715 | switch (obj->attr->cache.depth) { |
1716 | case 1: |
1717 | return KMP_HW_L1; |
1718 | case 2: |
1719 | #if KMP_MIC_SUPPORTED |
1720 | if (__kmp_mic_type == mic3) { |
1721 | return KMP_HW_TILE; |
1722 | } |
1723 | #endif |
1724 | return KMP_HW_L2; |
1725 | case 3: |
1726 | return KMP_HW_L3; |
1727 | } |
1728 | return KMP_HW_UNKNOWN; |
1729 | } |
1730 | |
1731 | switch (obj->type) { |
1732 | case HWLOC_OBJ_PACKAGE: |
1733 | return KMP_HW_SOCKET; |
1734 | case HWLOC_OBJ_NUMANODE: |
1735 | return KMP_HW_NUMA; |
1736 | case HWLOC_OBJ_CORE: |
1737 | return KMP_HW_CORE; |
1738 | case HWLOC_OBJ_PU: |
1739 | return KMP_HW_THREAD; |
1740 | case HWLOC_OBJ_GROUP: |
1741 | #if HWLOC_API_VERSION >= 0x00020000 |
1742 | if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_DIE) |
1743 | return KMP_HW_DIE; |
1744 | else if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_TILE) |
1745 | return KMP_HW_TILE; |
1746 | else if (obj->attr->group.kind == HWLOC_GROUP_KIND_INTEL_MODULE) |
1747 | return KMP_HW_MODULE; |
1748 | else if (obj->attr->group.kind == HWLOC_GROUP_KIND_WINDOWS_PROCESSOR_GROUP) |
1749 | return KMP_HW_PROC_GROUP; |
1750 | #endif |
1751 | return KMP_HW_UNKNOWN; |
1752 | #if HWLOC_API_VERSION >= 0x00020100 |
1753 | case HWLOC_OBJ_DIE: |
1754 | return KMP_HW_DIE; |
1755 | #endif |
1756 | } |
1757 | return KMP_HW_UNKNOWN; |
1758 | } |
1759 | |
1760 | // Returns the number of objects of type 'type' below 'obj' within the topology |
1761 | // tree structure. e.g., if obj is a HWLOC_OBJ_PACKAGE object, and type is |
1762 | // HWLOC_OBJ_PU, then this will return the number of PU's under the SOCKET |
1763 | // object. |
1764 | static int __kmp_hwloc_get_nobjs_under_obj(hwloc_obj_t obj, |
1765 | hwloc_obj_type_t type) { |
1766 | int retval = 0; |
1767 | hwloc_obj_t first; |
1768 | for (first = hwloc_get_obj_below_by_type(__kmp_hwloc_topology, obj->type, |
1769 | obj->logical_index, type, 0); |
1770 | first != NULL && hwloc_get_ancestor_obj_by_type(__kmp_hwloc_topology, |
1771 | obj->type, first) == obj; |
1772 | first = hwloc_get_next_obj_by_type(__kmp_hwloc_topology, first->type, |
1773 | first)) { |
1774 | ++retval; |
1775 | } |
1776 | return retval; |
1777 | } |
1778 | |
1779 | // This gets the sub_id for a lower object under a higher object in the |
1780 | // topology tree |
1781 | static int __kmp_hwloc_get_sub_id(hwloc_topology_t t, hwloc_obj_t higher, |
1782 | hwloc_obj_t lower) { |
1783 | hwloc_obj_t obj; |
1784 | hwloc_obj_type_t ltype = lower->type; |
1785 | int lindex = lower->logical_index - 1; |
1786 | int sub_id = 0; |
1787 | // Get the previous lower object |
1788 | obj = hwloc_get_obj_by_type(t, ltype, lindex); |
1789 | while (obj && lindex >= 0 && |
1790 | hwloc_bitmap_isincluded(obj->cpuset, higher->cpuset)) { |
1791 | if (obj->userdata) { |
1792 | sub_id = (int)(RCAST(kmp_intptr_t, obj->userdata)); |
1793 | break; |
1794 | } |
1795 | sub_id++; |
1796 | lindex--; |
1797 | obj = hwloc_get_obj_by_type(t, ltype, lindex); |
1798 | } |
1799 | // store sub_id + 1 so that 0 is differed from NULL |
1800 | lower->userdata = RCAST(void *, sub_id + 1); |
1801 | return sub_id; |
1802 | } |
1803 | |
1804 | static bool __kmp_affinity_create_hwloc_map(kmp_i18n_id_t *const msg_id) { |
1805 | kmp_hw_t type; |
1806 | int hw_thread_index, sub_id; |
1807 | int depth; |
1808 | hwloc_obj_t pu, obj, root, prev; |
1809 | kmp_hw_t types[KMP_HW_LAST]; |
1810 | hwloc_obj_type_t hwloc_types[KMP_HW_LAST]; |
1811 | |
1812 | hwloc_topology_t tp = __kmp_hwloc_topology; |
1813 | *msg_id = kmp_i18n_null; |
1814 | if (__kmp_affinity.flags.verbose) { |
1815 | KMP_INFORM(AffUsingHwloc, "KMP_AFFINITY" ); |
1816 | } |
1817 | |
1818 | if (!KMP_AFFINITY_CAPABLE()) { |
1819 | // Hack to try and infer the machine topology using only the data |
1820 | // available from hwloc on the current thread, and __kmp_xproc. |
1821 | KMP_ASSERT(__kmp_affinity.type == affinity_none); |
1822 | // hwloc only guarantees existance of PU object, so check PACKAGE and CORE |
1823 | hwloc_obj_t o = hwloc_get_obj_by_type(tp, HWLOC_OBJ_PACKAGE, 0); |
1824 | if (o != NULL) |
1825 | nCoresPerPkg = __kmp_hwloc_get_nobjs_under_obj(o, HWLOC_OBJ_CORE); |
1826 | else |
1827 | nCoresPerPkg = 1; // no PACKAGE found |
1828 | o = hwloc_get_obj_by_type(tp, HWLOC_OBJ_CORE, 0); |
1829 | if (o != NULL) |
1830 | __kmp_nThreadsPerCore = __kmp_hwloc_get_nobjs_under_obj(o, HWLOC_OBJ_PU); |
1831 | else |
1832 | __kmp_nThreadsPerCore = 1; // no CORE found |
1833 | if (__kmp_nThreadsPerCore == 0) |
1834 | __kmp_nThreadsPerCore = 1; |
1835 | __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore; |
1836 | if (nCoresPerPkg == 0) |
1837 | nCoresPerPkg = 1; // to prevent possible division by 0 |
1838 | nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg; |
1839 | return true; |
1840 | } |
1841 | |
1842 | #if HWLOC_API_VERSION >= 0x00020400 |
1843 | // Handle multiple types of cores if they exist on the system |
1844 | int nr_cpu_kinds = hwloc_cpukinds_get_nr(tp, 0); |
1845 | |
1846 | typedef struct kmp_hwloc_cpukinds_info_t { |
1847 | int efficiency; |
1848 | kmp_hw_core_type_t core_type; |
1849 | hwloc_bitmap_t mask; |
1850 | } kmp_hwloc_cpukinds_info_t; |
1851 | kmp_hwloc_cpukinds_info_t *cpukinds = nullptr; |
1852 | |
1853 | if (nr_cpu_kinds > 0) { |
1854 | unsigned nr_infos; |
1855 | struct hwloc_info_s *infos; |
1856 | cpukinds = (kmp_hwloc_cpukinds_info_t *)__kmp_allocate( |
1857 | sizeof(kmp_hwloc_cpukinds_info_t) * nr_cpu_kinds); |
1858 | for (unsigned idx = 0; idx < (unsigned)nr_cpu_kinds; ++idx) { |
1859 | cpukinds[idx].efficiency = -1; |
1860 | cpukinds[idx].core_type = KMP_HW_CORE_TYPE_UNKNOWN; |
1861 | cpukinds[idx].mask = hwloc_bitmap_alloc(); |
1862 | if (hwloc_cpukinds_get_info(tp, idx, cpukinds[idx].mask, |
1863 | &cpukinds[idx].efficiency, &nr_infos, &infos, |
1864 | 0) == 0) { |
1865 | for (unsigned i = 0; i < nr_infos; ++i) { |
1866 | if (__kmp_str_match("CoreType" , 8, infos[i].name)) { |
1867 | #if KMP_ARCH_X86 || KMP_ARCH_X86_64 |
1868 | if (__kmp_str_match("IntelAtom" , 9, infos[i].value)) { |
1869 | cpukinds[idx].core_type = KMP_HW_CORE_TYPE_ATOM; |
1870 | break; |
1871 | } else if (__kmp_str_match("IntelCore" , 9, infos[i].value)) { |
1872 | cpukinds[idx].core_type = KMP_HW_CORE_TYPE_CORE; |
1873 | break; |
1874 | } |
1875 | #endif |
1876 | } |
1877 | } |
1878 | } |
1879 | } |
1880 | } |
1881 | #endif |
1882 | |
1883 | root = hwloc_get_root_obj(tp); |
1884 | |
1885 | // Figure out the depth and types in the topology |
1886 | depth = 0; |
1887 | obj = hwloc_get_pu_obj_by_os_index(tp, __kmp_affin_fullMask->begin()); |
1888 | while (obj && obj != root) { |
1889 | #if HWLOC_API_VERSION >= 0x00020000 |
1890 | if (obj->memory_arity) { |
1891 | hwloc_obj_t memory; |
1892 | for (memory = obj->memory_first_child; memory; |
1893 | memory = hwloc_get_next_child(tp, obj, memory)) { |
1894 | if (memory->type == HWLOC_OBJ_NUMANODE) |
1895 | break; |
1896 | } |
1897 | if (memory && memory->type == HWLOC_OBJ_NUMANODE) { |
1898 | types[depth] = KMP_HW_NUMA; |
1899 | hwloc_types[depth] = memory->type; |
1900 | depth++; |
1901 | } |
1902 | } |
1903 | #endif |
1904 | type = __kmp_hwloc_type_2_topology_type(obj); |
1905 | if (type != KMP_HW_UNKNOWN) { |
1906 | types[depth] = type; |
1907 | hwloc_types[depth] = obj->type; |
1908 | depth++; |
1909 | } |
1910 | obj = obj->parent; |
1911 | } |
1912 | KMP_ASSERT(depth > 0); |
1913 | |
1914 | // Get the order for the types correct |
1915 | for (int i = 0, j = depth - 1; i < j; ++i, --j) { |
1916 | hwloc_obj_type_t hwloc_temp = hwloc_types[i]; |
1917 | kmp_hw_t temp = types[i]; |
1918 | types[i] = types[j]; |
1919 | types[j] = temp; |
1920 | hwloc_types[i] = hwloc_types[j]; |
1921 | hwloc_types[j] = hwloc_temp; |
1922 | } |
1923 | |
1924 | // Allocate the data structure to be returned. |
1925 | __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types); |
1926 | |
1927 | hw_thread_index = 0; |
1928 | pu = NULL; |
1929 | while ((pu = hwloc_get_next_obj_by_type(tp, HWLOC_OBJ_PU, pu))) { |
1930 | int index = depth - 1; |
1931 | bool included = KMP_CPU_ISSET(pu->os_index, __kmp_affin_fullMask); |
1932 | kmp_hw_thread_t &hw_thread = __kmp_topology->at(hw_thread_index); |
1933 | if (included) { |
1934 | hw_thread.clear(); |
1935 | hw_thread.ids[index] = pu->logical_index; |
1936 | hw_thread.os_id = pu->os_index; |
1937 | hw_thread.original_idx = hw_thread_index; |
1938 | // If multiple core types, then set that attribute for the hardware thread |
1939 | #if HWLOC_API_VERSION >= 0x00020400 |
1940 | if (cpukinds) { |
1941 | int cpukind_index = -1; |
1942 | for (int i = 0; i < nr_cpu_kinds; ++i) { |
1943 | if (hwloc_bitmap_isset(cpukinds[i].mask, hw_thread.os_id)) { |
1944 | cpukind_index = i; |
1945 | break; |
1946 | } |
1947 | } |
1948 | if (cpukind_index >= 0) { |
1949 | hw_thread.attrs.set_core_type(cpukinds[cpukind_index].core_type); |
1950 | hw_thread.attrs.set_core_eff(cpukinds[cpukind_index].efficiency); |
1951 | } |
1952 | } |
1953 | #endif |
1954 | index--; |
1955 | } |
1956 | obj = pu; |
1957 | prev = obj; |
1958 | while (obj != root && obj != NULL) { |
1959 | obj = obj->parent; |
1960 | #if HWLOC_API_VERSION >= 0x00020000 |
1961 | // NUMA Nodes are handled differently since they are not within the |
1962 | // parent/child structure anymore. They are separate children |
1963 | // of obj (memory_first_child points to first memory child) |
1964 | if (obj->memory_arity) { |
1965 | hwloc_obj_t memory; |
1966 | for (memory = obj->memory_first_child; memory; |
1967 | memory = hwloc_get_next_child(tp, obj, memory)) { |
1968 | if (memory->type == HWLOC_OBJ_NUMANODE) |
1969 | break; |
1970 | } |
1971 | if (memory && memory->type == HWLOC_OBJ_NUMANODE) { |
1972 | sub_id = __kmp_hwloc_get_sub_id(tp, memory, prev); |
1973 | if (included) { |
1974 | hw_thread.ids[index] = memory->logical_index; |
1975 | hw_thread.ids[index + 1] = sub_id; |
1976 | index--; |
1977 | } |
1978 | } |
1979 | prev = obj; |
1980 | } |
1981 | #endif |
1982 | type = __kmp_hwloc_type_2_topology_type(obj); |
1983 | if (type != KMP_HW_UNKNOWN) { |
1984 | sub_id = __kmp_hwloc_get_sub_id(tp, obj, prev); |
1985 | if (included) { |
1986 | hw_thread.ids[index] = obj->logical_index; |
1987 | hw_thread.ids[index + 1] = sub_id; |
1988 | index--; |
1989 | } |
1990 | prev = obj; |
1991 | } |
1992 | } |
1993 | if (included) |
1994 | hw_thread_index++; |
1995 | } |
1996 | |
1997 | #if HWLOC_API_VERSION >= 0x00020400 |
1998 | // Free the core types information |
1999 | if (cpukinds) { |
2000 | for (int idx = 0; idx < nr_cpu_kinds; ++idx) |
2001 | hwloc_bitmap_free(cpukinds[idx].mask); |
2002 | __kmp_free(cpukinds); |
2003 | } |
2004 | #endif |
2005 | __kmp_topology->sort_ids(); |
2006 | return true; |
2007 | } |
2008 | #endif // KMP_USE_HWLOC |
2009 | |
2010 | // If we don't know how to retrieve the machine's processor topology, or |
2011 | // encounter an error in doing so, this routine is called to form a "flat" |
2012 | // mapping of os thread id's <-> processor id's. |
2013 | static bool __kmp_affinity_create_flat_map(kmp_i18n_id_t *const msg_id) { |
2014 | *msg_id = kmp_i18n_null; |
2015 | int depth = 3; |
2016 | kmp_hw_t types[] = {KMP_HW_SOCKET, KMP_HW_CORE, KMP_HW_THREAD}; |
2017 | |
2018 | if (__kmp_affinity.flags.verbose) { |
2019 | KMP_INFORM(UsingFlatOS, "KMP_AFFINITY" ); |
2020 | } |
2021 | |
2022 | // Even if __kmp_affinity.type == affinity_none, this routine might still |
2023 | // be called to set __kmp_ncores, as well as |
2024 | // __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages. |
2025 | if (!KMP_AFFINITY_CAPABLE()) { |
2026 | KMP_ASSERT(__kmp_affinity.type == affinity_none); |
2027 | __kmp_ncores = nPackages = __kmp_xproc; |
2028 | __kmp_nThreadsPerCore = nCoresPerPkg = 1; |
2029 | return true; |
2030 | } |
2031 | |
2032 | // When affinity is off, this routine will still be called to set |
2033 | // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages. |
2034 | // Make sure all these vars are set correctly, and return now if affinity is |
2035 | // not enabled. |
2036 | __kmp_ncores = nPackages = __kmp_avail_proc; |
2037 | __kmp_nThreadsPerCore = nCoresPerPkg = 1; |
2038 | |
2039 | // Construct the data structure to be returned. |
2040 | __kmp_topology = kmp_topology_t::allocate(nproc: __kmp_avail_proc, ndepth: depth, types); |
2041 | int avail_ct = 0; |
2042 | int i; |
2043 | KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) { |
2044 | // Skip this proc if it is not included in the machine model. |
2045 | if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) { |
2046 | continue; |
2047 | } |
2048 | kmp_hw_thread_t &hw_thread = __kmp_topology->at(index: avail_ct); |
2049 | hw_thread.clear(); |
2050 | hw_thread.os_id = i; |
2051 | hw_thread.original_idx = avail_ct; |
2052 | hw_thread.ids[0] = i; |
2053 | hw_thread.ids[1] = 0; |
2054 | hw_thread.ids[2] = 0; |
2055 | avail_ct++; |
2056 | } |
2057 | if (__kmp_affinity.flags.verbose) { |
2058 | KMP_INFORM(OSProcToPackage, "KMP_AFFINITY" ); |
2059 | } |
2060 | return true; |
2061 | } |
2062 | |
2063 | #if KMP_GROUP_AFFINITY |
2064 | // If multiple Windows* OS processor groups exist, we can create a 2-level |
2065 | // topology map with the groups at level 0 and the individual procs at level 1. |
2066 | // This facilitates letting the threads float among all procs in a group, |
2067 | // if granularity=group (the default when there are multiple groups). |
2068 | static bool __kmp_affinity_create_proc_group_map(kmp_i18n_id_t *const msg_id) { |
2069 | *msg_id = kmp_i18n_null; |
2070 | int depth = 3; |
2071 | kmp_hw_t types[] = {KMP_HW_PROC_GROUP, KMP_HW_CORE, KMP_HW_THREAD}; |
2072 | const static size_t BITS_PER_GROUP = CHAR_BIT * sizeof(DWORD_PTR); |
2073 | |
2074 | if (__kmp_affinity.flags.verbose) { |
2075 | KMP_INFORM(AffWindowsProcGroupMap, "KMP_AFFINITY" ); |
2076 | } |
2077 | |
2078 | // If we aren't affinity capable, then use flat topology |
2079 | if (!KMP_AFFINITY_CAPABLE()) { |
2080 | KMP_ASSERT(__kmp_affinity.type == affinity_none); |
2081 | nPackages = __kmp_num_proc_groups; |
2082 | __kmp_nThreadsPerCore = 1; |
2083 | __kmp_ncores = __kmp_xproc; |
2084 | nCoresPerPkg = nPackages / __kmp_ncores; |
2085 | return true; |
2086 | } |
2087 | |
2088 | // Construct the data structure to be returned. |
2089 | __kmp_topology = kmp_topology_t::allocate(__kmp_avail_proc, depth, types); |
2090 | int avail_ct = 0; |
2091 | int i; |
2092 | KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) { |
2093 | // Skip this proc if it is not included in the machine model. |
2094 | if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) { |
2095 | continue; |
2096 | } |
2097 | kmp_hw_thread_t &hw_thread = __kmp_topology->at(avail_ct); |
2098 | hw_thread.clear(); |
2099 | hw_thread.os_id = i; |
2100 | hw_thread.original_idx = avail_ct; |
2101 | hw_thread.ids[0] = i / BITS_PER_GROUP; |
2102 | hw_thread.ids[1] = hw_thread.ids[2] = i % BITS_PER_GROUP; |
2103 | avail_ct++; |
2104 | } |
2105 | return true; |
2106 | } |
2107 | #endif /* KMP_GROUP_AFFINITY */ |
2108 | |
2109 | #if KMP_ARCH_X86 || KMP_ARCH_X86_64 |
2110 | |
2111 | template <kmp_uint32 LSB, kmp_uint32 MSB> |
2112 | static inline unsigned (kmp_uint32 v) { |
2113 | const kmp_uint32 SHIFT_LEFT = sizeof(kmp_uint32) * 8 - 1 - MSB; |
2114 | const kmp_uint32 SHIFT_RIGHT = LSB; |
2115 | kmp_uint32 retval = v; |
2116 | retval <<= SHIFT_LEFT; |
2117 | retval >>= (SHIFT_LEFT + SHIFT_RIGHT); |
2118 | return retval; |
2119 | } |
2120 | |
2121 | static int __kmp_cpuid_mask_width(int count) { |
2122 | int r = 0; |
2123 | |
2124 | while ((1 << r) < count) |
2125 | ++r; |
2126 | return r; |
2127 | } |
2128 | |
2129 | class apicThreadInfo { |
2130 | public: |
2131 | unsigned osId; // param to __kmp_affinity_bind_thread |
2132 | unsigned apicId; // from cpuid after binding |
2133 | unsigned maxCoresPerPkg; // "" |
2134 | unsigned maxThreadsPerPkg; // "" |
2135 | unsigned pkgId; // inferred from above values |
2136 | unsigned coreId; // "" |
2137 | unsigned threadId; // "" |
2138 | }; |
2139 | |
2140 | static int __kmp_affinity_cmp_apicThreadInfo_phys_id(const void *a, |
2141 | const void *b) { |
2142 | const apicThreadInfo *aa = (const apicThreadInfo *)a; |
2143 | const apicThreadInfo *bb = (const apicThreadInfo *)b; |
2144 | if (aa->pkgId < bb->pkgId) |
2145 | return -1; |
2146 | if (aa->pkgId > bb->pkgId) |
2147 | return 1; |
2148 | if (aa->coreId < bb->coreId) |
2149 | return -1; |
2150 | if (aa->coreId > bb->coreId) |
2151 | return 1; |
2152 | if (aa->threadId < bb->threadId) |
2153 | return -1; |
2154 | if (aa->threadId > bb->threadId) |
2155 | return 1; |
2156 | return 0; |
2157 | } |
2158 | |
2159 | class cpuid_cache_info_t { |
2160 | public: |
2161 | struct info_t { |
2162 | unsigned level = 0; |
2163 | unsigned mask = 0; |
2164 | bool operator==(const info_t &rhs) const { |
2165 | return level == rhs.level && mask == rhs.mask; |
2166 | } |
2167 | bool operator!=(const info_t &rhs) const { return !operator==(rhs); } |
2168 | }; |
2169 | cpuid_cache_info_t() : depth(0) { |
2170 | table[MAX_CACHE_LEVEL].level = 0; |
2171 | table[MAX_CACHE_LEVEL].mask = 0; |
2172 | } |
2173 | size_t get_depth() const { return depth; } |
2174 | info_t &operator[](size_t index) { return table[index]; } |
2175 | const info_t &operator[](size_t index) const { return table[index]; } |
2176 | bool operator==(const cpuid_cache_info_t &rhs) const { |
2177 | if (rhs.depth != depth) |
2178 | return false; |
2179 | for (size_t i = 0; i < depth; ++i) |
2180 | if (table[i] != rhs.table[i]) |
2181 | return false; |
2182 | return true; |
2183 | } |
2184 | bool operator!=(const cpuid_cache_info_t &rhs) const { |
2185 | return !operator==(rhs); |
2186 | } |
2187 | // Get cache information assocaited with L1, L2, L3 cache, etc. |
2188 | // If level does not exist, then return the "NULL" level (level 0) |
2189 | const info_t &get_level(unsigned level) const { |
2190 | for (size_t i = 0; i < depth; ++i) { |
2191 | if (table[i].level == level) |
2192 | return table[i]; |
2193 | } |
2194 | return table[MAX_CACHE_LEVEL]; |
2195 | } |
2196 | |
2197 | static kmp_hw_t get_topology_type(unsigned level) { |
2198 | KMP_DEBUG_ASSERT(level >= 1 && level <= MAX_CACHE_LEVEL); |
2199 | switch (level) { |
2200 | case 1: |
2201 | return KMP_HW_L1; |
2202 | case 2: |
2203 | return KMP_HW_L2; |
2204 | case 3: |
2205 | return KMP_HW_L3; |
2206 | } |
2207 | return KMP_HW_UNKNOWN; |
2208 | } |
2209 | void get_leaf4_levels() { |
2210 | unsigned level = 0; |
2211 | while (depth < MAX_CACHE_LEVEL) { |
2212 | unsigned cache_type, max_threads_sharing; |
2213 | unsigned cache_level, cache_mask_width; |
2214 | kmp_cpuid buf2; |
2215 | __kmp_x86_cpuid(leaf: 4, subleaf: level, p: &buf2); |
2216 | cache_type = __kmp_extract_bits<0, 4>(v: buf2.eax); |
2217 | if (!cache_type) |
2218 | break; |
2219 | // Skip instruction caches |
2220 | if (cache_type == 2) { |
2221 | level++; |
2222 | continue; |
2223 | } |
2224 | max_threads_sharing = __kmp_extract_bits<14, 25>(v: buf2.eax) + 1; |
2225 | cache_mask_width = __kmp_cpuid_mask_width(count: max_threads_sharing); |
2226 | cache_level = __kmp_extract_bits<5, 7>(v: buf2.eax); |
2227 | table[depth].level = cache_level; |
2228 | table[depth].mask = ((-1) << cache_mask_width); |
2229 | depth++; |
2230 | level++; |
2231 | } |
2232 | } |
2233 | static const int MAX_CACHE_LEVEL = 3; |
2234 | |
2235 | private: |
2236 | size_t depth; |
2237 | info_t table[MAX_CACHE_LEVEL + 1]; |
2238 | }; |
2239 | |
2240 | // On IA-32 architecture and Intel(R) 64 architecture, we attempt to use |
2241 | // an algorithm which cycles through the available os threads, setting |
2242 | // the current thread's affinity mask to that thread, and then retrieves |
2243 | // the Apic Id for each thread context using the cpuid instruction. |
2244 | static bool __kmp_affinity_create_apicid_map(kmp_i18n_id_t *const msg_id) { |
2245 | kmp_cpuid buf; |
2246 | *msg_id = kmp_i18n_null; |
2247 | |
2248 | if (__kmp_affinity.flags.verbose) { |
2249 | KMP_INFORM(AffInfoStr, "KMP_AFFINITY" , KMP_I18N_STR(DecodingLegacyAPIC)); |
2250 | } |
2251 | |
2252 | // Check if cpuid leaf 4 is supported. |
2253 | __kmp_x86_cpuid(leaf: 0, subleaf: 0, p: &buf); |
2254 | if (buf.eax < 4) { |
2255 | *msg_id = kmp_i18n_str_NoLeaf4Support; |
2256 | return false; |
2257 | } |
2258 | |
2259 | // The algorithm used starts by setting the affinity to each available thread |
2260 | // and retrieving info from the cpuid instruction, so if we are not capable of |
2261 | // calling __kmp_get_system_affinity() and _kmp_get_system_affinity(), then we |
2262 | // need to do something else - use the defaults that we calculated from |
2263 | // issuing cpuid without binding to each proc. |
2264 | if (!KMP_AFFINITY_CAPABLE()) { |
2265 | // Hack to try and infer the machine topology using only the data |
2266 | // available from cpuid on the current thread, and __kmp_xproc. |
2267 | KMP_ASSERT(__kmp_affinity.type == affinity_none); |
2268 | |
2269 | // Get an upper bound on the number of threads per package using cpuid(1). |
2270 | // On some OS/chps combinations where HT is supported by the chip but is |
2271 | // disabled, this value will be 2 on a single core chip. Usually, it will be |
2272 | // 2 if HT is enabled and 1 if HT is disabled. |
2273 | __kmp_x86_cpuid(leaf: 1, subleaf: 0, p: &buf); |
2274 | int maxThreadsPerPkg = (buf.ebx >> 16) & 0xff; |
2275 | if (maxThreadsPerPkg == 0) { |
2276 | maxThreadsPerPkg = 1; |
2277 | } |
2278 | |
2279 | // The num cores per pkg comes from cpuid(4). 1 must be added to the encoded |
2280 | // value. |
2281 | // |
2282 | // The author of cpu_count.cpp treated this only an upper bound on the |
2283 | // number of cores, but I haven't seen any cases where it was greater than |
2284 | // the actual number of cores, so we will treat it as exact in this block of |
2285 | // code. |
2286 | // |
2287 | // First, we need to check if cpuid(4) is supported on this chip. To see if |
2288 | // cpuid(n) is supported, issue cpuid(0) and check if eax has the value n or |
2289 | // greater. |
2290 | __kmp_x86_cpuid(leaf: 0, subleaf: 0, p: &buf); |
2291 | if (buf.eax >= 4) { |
2292 | __kmp_x86_cpuid(leaf: 4, subleaf: 0, p: &buf); |
2293 | nCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1; |
2294 | } else { |
2295 | nCoresPerPkg = 1; |
2296 | } |
2297 | |
2298 | // There is no way to reliably tell if HT is enabled without issuing the |
2299 | // cpuid instruction from every thread, can correlating the cpuid info, so |
2300 | // if the machine is not affinity capable, we assume that HT is off. We have |
2301 | // seen quite a few machines where maxThreadsPerPkg is 2, yet the machine |
2302 | // does not support HT. |
2303 | // |
2304 | // - Older OSes are usually found on machines with older chips, which do not |
2305 | // support HT. |
2306 | // - The performance penalty for mistakenly identifying a machine as HT when |
2307 | // it isn't (which results in blocktime being incorrectly set to 0) is |
2308 | // greater than the penalty when for mistakenly identifying a machine as |
2309 | // being 1 thread/core when it is really HT enabled (which results in |
2310 | // blocktime being incorrectly set to a positive value). |
2311 | __kmp_ncores = __kmp_xproc; |
2312 | nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg; |
2313 | __kmp_nThreadsPerCore = 1; |
2314 | return true; |
2315 | } |
2316 | |
2317 | // From here on, we can assume that it is safe to call |
2318 | // __kmp_get_system_affinity() and __kmp_set_system_affinity(), even if |
2319 | // __kmp_affinity.type = affinity_none. |
2320 | |
2321 | // Save the affinity mask for the current thread. |
2322 | kmp_affinity_raii_t previous_affinity; |
2323 | |
2324 | // Run through each of the available contexts, binding the current thread |
2325 | // to it, and obtaining the pertinent information using the cpuid instr. |
2326 | // |
2327 | // The relevant information is: |
2328 | // - Apic Id: Bits 24:31 of ebx after issuing cpuid(1) - each thread context |
2329 | // has a uniqie Apic Id, which is of the form pkg# : core# : thread#. |
2330 | // - Max Threads Per Pkg: Bits 16:23 of ebx after issuing cpuid(1). The value |
2331 | // of this field determines the width of the core# + thread# fields in the |
2332 | // Apic Id. It is also an upper bound on the number of threads per |
2333 | // package, but it has been verified that situations happen were it is not |
2334 | // exact. In particular, on certain OS/chip combinations where Intel(R) |
2335 | // Hyper-Threading Technology is supported by the chip but has been |
2336 | // disabled, the value of this field will be 2 (for a single core chip). |
2337 | // On other OS/chip combinations supporting Intel(R) Hyper-Threading |
2338 | // Technology, the value of this field will be 1 when Intel(R) |
2339 | // Hyper-Threading Technology is disabled and 2 when it is enabled. |
2340 | // - Max Cores Per Pkg: Bits 26:31 of eax after issuing cpuid(4). The value |
2341 | // of this field (+1) determines the width of the core# field in the Apic |
2342 | // Id. The comments in "cpucount.cpp" say that this value is an upper |
2343 | // bound, but the IA-32 architecture manual says that it is exactly the |
2344 | // number of cores per package, and I haven't seen any case where it |
2345 | // wasn't. |
2346 | // |
2347 | // From this information, deduce the package Id, core Id, and thread Id, |
2348 | // and set the corresponding fields in the apicThreadInfo struct. |
2349 | unsigned i; |
2350 | apicThreadInfo *threadInfo = (apicThreadInfo *)__kmp_allocate( |
2351 | __kmp_avail_proc * sizeof(apicThreadInfo)); |
2352 | unsigned nApics = 0; |
2353 | KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) { |
2354 | // Skip this proc if it is not included in the machine model. |
2355 | if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) { |
2356 | continue; |
2357 | } |
2358 | KMP_DEBUG_ASSERT((int)nApics < __kmp_avail_proc); |
2359 | |
2360 | __kmp_affinity_dispatch->bind_thread(proc: i); |
2361 | threadInfo[nApics].osId = i; |
2362 | |
2363 | // The apic id and max threads per pkg come from cpuid(1). |
2364 | __kmp_x86_cpuid(leaf: 1, subleaf: 0, p: &buf); |
2365 | if (((buf.edx >> 9) & 1) == 0) { |
2366 | __kmp_free(threadInfo); |
2367 | *msg_id = kmp_i18n_str_ApicNotPresent; |
2368 | return false; |
2369 | } |
2370 | threadInfo[nApics].apicId = (buf.ebx >> 24) & 0xff; |
2371 | threadInfo[nApics].maxThreadsPerPkg = (buf.ebx >> 16) & 0xff; |
2372 | if (threadInfo[nApics].maxThreadsPerPkg == 0) { |
2373 | threadInfo[nApics].maxThreadsPerPkg = 1; |
2374 | } |
2375 | |
2376 | // Max cores per pkg comes from cpuid(4). 1 must be added to the encoded |
2377 | // value. |
2378 | // |
2379 | // First, we need to check if cpuid(4) is supported on this chip. To see if |
2380 | // cpuid(n) is supported, issue cpuid(0) and check if eax has the value n |
2381 | // or greater. |
2382 | __kmp_x86_cpuid(leaf: 0, subleaf: 0, p: &buf); |
2383 | if (buf.eax >= 4) { |
2384 | __kmp_x86_cpuid(leaf: 4, subleaf: 0, p: &buf); |
2385 | threadInfo[nApics].maxCoresPerPkg = ((buf.eax >> 26) & 0x3f) + 1; |
2386 | } else { |
2387 | threadInfo[nApics].maxCoresPerPkg = 1; |
2388 | } |
2389 | |
2390 | // Infer the pkgId / coreId / threadId using only the info obtained locally. |
2391 | int widthCT = __kmp_cpuid_mask_width(count: threadInfo[nApics].maxThreadsPerPkg); |
2392 | threadInfo[nApics].pkgId = threadInfo[nApics].apicId >> widthCT; |
2393 | |
2394 | int widthC = __kmp_cpuid_mask_width(count: threadInfo[nApics].maxCoresPerPkg); |
2395 | int widthT = widthCT - widthC; |
2396 | if (widthT < 0) { |
2397 | // I've never seen this one happen, but I suppose it could, if the cpuid |
2398 | // instruction on a chip was really screwed up. Make sure to restore the |
2399 | // affinity mask before the tail call. |
2400 | __kmp_free(threadInfo); |
2401 | *msg_id = kmp_i18n_str_InvalidCpuidInfo; |
2402 | return false; |
2403 | } |
2404 | |
2405 | int maskC = (1 << widthC) - 1; |
2406 | threadInfo[nApics].coreId = (threadInfo[nApics].apicId >> widthT) & maskC; |
2407 | |
2408 | int maskT = (1 << widthT) - 1; |
2409 | threadInfo[nApics].threadId = threadInfo[nApics].apicId & maskT; |
2410 | |
2411 | nApics++; |
2412 | } |
2413 | |
2414 | // We've collected all the info we need. |
2415 | // Restore the old affinity mask for this thread. |
2416 | previous_affinity.restore(); |
2417 | |
2418 | // Sort the threadInfo table by physical Id. |
2419 | qsort(base: threadInfo, nmemb: nApics, size: sizeof(*threadInfo), |
2420 | compar: __kmp_affinity_cmp_apicThreadInfo_phys_id); |
2421 | |
2422 | // The table is now sorted by pkgId / coreId / threadId, but we really don't |
2423 | // know the radix of any of the fields. pkgId's may be sparsely assigned among |
2424 | // the chips on a system. Although coreId's are usually assigned |
2425 | // [0 .. coresPerPkg-1] and threadId's are usually assigned |
2426 | // [0..threadsPerCore-1], we don't want to make any such assumptions. |
2427 | // |
2428 | // For that matter, we don't know what coresPerPkg and threadsPerCore (or the |
2429 | // total # packages) are at this point - we want to determine that now. We |
2430 | // only have an upper bound on the first two figures. |
2431 | // |
2432 | // We also perform a consistency check at this point: the values returned by |
2433 | // the cpuid instruction for any thread bound to a given package had better |
2434 | // return the same info for maxThreadsPerPkg and maxCoresPerPkg. |
2435 | nPackages = 1; |
2436 | nCoresPerPkg = 1; |
2437 | __kmp_nThreadsPerCore = 1; |
2438 | unsigned nCores = 1; |
2439 | |
2440 | unsigned pkgCt = 1; // to determine radii |
2441 | unsigned lastPkgId = threadInfo[0].pkgId; |
2442 | unsigned coreCt = 1; |
2443 | unsigned lastCoreId = threadInfo[0].coreId; |
2444 | unsigned threadCt = 1; |
2445 | unsigned lastThreadId = threadInfo[0].threadId; |
2446 | |
2447 | // intra-pkg consist checks |
2448 | unsigned prevMaxCoresPerPkg = threadInfo[0].maxCoresPerPkg; |
2449 | unsigned prevMaxThreadsPerPkg = threadInfo[0].maxThreadsPerPkg; |
2450 | |
2451 | for (i = 1; i < nApics; i++) { |
2452 | if (threadInfo[i].pkgId != lastPkgId) { |
2453 | nCores++; |
2454 | pkgCt++; |
2455 | lastPkgId = threadInfo[i].pkgId; |
2456 | if ((int)coreCt > nCoresPerPkg) |
2457 | nCoresPerPkg = coreCt; |
2458 | coreCt = 1; |
2459 | lastCoreId = threadInfo[i].coreId; |
2460 | if ((int)threadCt > __kmp_nThreadsPerCore) |
2461 | __kmp_nThreadsPerCore = threadCt; |
2462 | threadCt = 1; |
2463 | lastThreadId = threadInfo[i].threadId; |
2464 | |
2465 | // This is a different package, so go on to the next iteration without |
2466 | // doing any consistency checks. Reset the consistency check vars, though. |
2467 | prevMaxCoresPerPkg = threadInfo[i].maxCoresPerPkg; |
2468 | prevMaxThreadsPerPkg = threadInfo[i].maxThreadsPerPkg; |
2469 | continue; |
2470 | } |
2471 | |
2472 | if (threadInfo[i].coreId != lastCoreId) { |
2473 | nCores++; |
2474 | coreCt++; |
2475 | lastCoreId = threadInfo[i].coreId; |
2476 | if ((int)threadCt > __kmp_nThreadsPerCore) |
2477 | __kmp_nThreadsPerCore = threadCt; |
2478 | threadCt = 1; |
2479 | lastThreadId = threadInfo[i].threadId; |
2480 | } else if (threadInfo[i].threadId != lastThreadId) { |
2481 | threadCt++; |
2482 | lastThreadId = threadInfo[i].threadId; |
2483 | } else { |
2484 | __kmp_free(threadInfo); |
2485 | *msg_id = kmp_i18n_str_LegacyApicIDsNotUnique; |
2486 | return false; |
2487 | } |
2488 | |
2489 | // Check to make certain that the maxCoresPerPkg and maxThreadsPerPkg |
2490 | // fields agree between all the threads bounds to a given package. |
2491 | if ((prevMaxCoresPerPkg != threadInfo[i].maxCoresPerPkg) || |
2492 | (prevMaxThreadsPerPkg != threadInfo[i].maxThreadsPerPkg)) { |
2493 | __kmp_free(threadInfo); |
2494 | *msg_id = kmp_i18n_str_InconsistentCpuidInfo; |
2495 | return false; |
2496 | } |
2497 | } |
2498 | // When affinity is off, this routine will still be called to set |
2499 | // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages. |
2500 | // Make sure all these vars are set correctly |
2501 | nPackages = pkgCt; |
2502 | if ((int)coreCt > nCoresPerPkg) |
2503 | nCoresPerPkg = coreCt; |
2504 | if ((int)threadCt > __kmp_nThreadsPerCore) |
2505 | __kmp_nThreadsPerCore = threadCt; |
2506 | __kmp_ncores = nCores; |
2507 | KMP_DEBUG_ASSERT(nApics == (unsigned)__kmp_avail_proc); |
2508 | |
2509 | // Now that we've determined the number of packages, the number of cores per |
2510 | // package, and the number of threads per core, we can construct the data |
2511 | // structure that is to be returned. |
2512 | int idx = 0; |
2513 | int pkgLevel = 0; |
2514 | int coreLevel = 1; |
2515 | int threadLevel = 2; |
2516 | //(__kmp_nThreadsPerCore <= 1) ? -1 : ((coreLevel >= 0) ? 2 : 1); |
2517 | int depth = (pkgLevel >= 0) + (coreLevel >= 0) + (threadLevel >= 0); |
2518 | kmp_hw_t types[3]; |
2519 | if (pkgLevel >= 0) |
2520 | types[idx++] = KMP_HW_SOCKET; |
2521 | if (coreLevel >= 0) |
2522 | types[idx++] = KMP_HW_CORE; |
2523 | if (threadLevel >= 0) |
2524 | types[idx++] = KMP_HW_THREAD; |
2525 | |
2526 | KMP_ASSERT(depth > 0); |
2527 | __kmp_topology = kmp_topology_t::allocate(nproc: nApics, ndepth: depth, types); |
2528 | |
2529 | for (i = 0; i < nApics; ++i) { |
2530 | idx = 0; |
2531 | unsigned os = threadInfo[i].osId; |
2532 | kmp_hw_thread_t &hw_thread = __kmp_topology->at(index: i); |
2533 | hw_thread.clear(); |
2534 | |
2535 | if (pkgLevel >= 0) { |
2536 | hw_thread.ids[idx++] = threadInfo[i].pkgId; |
2537 | } |
2538 | if (coreLevel >= 0) { |
2539 | hw_thread.ids[idx++] = threadInfo[i].coreId; |
2540 | } |
2541 | if (threadLevel >= 0) { |
2542 | hw_thread.ids[idx++] = threadInfo[i].threadId; |
2543 | } |
2544 | hw_thread.os_id = os; |
2545 | hw_thread.original_idx = i; |
2546 | } |
2547 | |
2548 | __kmp_free(threadInfo); |
2549 | __kmp_topology->sort_ids(); |
2550 | if (!__kmp_topology->check_ids()) { |
2551 | kmp_topology_t::deallocate(topology: __kmp_topology); |
2552 | __kmp_topology = nullptr; |
2553 | *msg_id = kmp_i18n_str_LegacyApicIDsNotUnique; |
2554 | return false; |
2555 | } |
2556 | return true; |
2557 | } |
2558 | |
2559 | // Hybrid cpu detection using CPUID.1A |
2560 | // Thread should be pinned to processor already |
2561 | static void __kmp_get_hybrid_info(kmp_hw_core_type_t *type, int *efficiency, |
2562 | unsigned *native_model_id) { |
2563 | kmp_cpuid buf; |
2564 | __kmp_x86_cpuid(leaf: 0x1a, subleaf: 0, p: &buf); |
2565 | *type = (kmp_hw_core_type_t)__kmp_extract_bits<24, 31>(v: buf.eax); |
2566 | switch (*type) { |
2567 | case KMP_HW_CORE_TYPE_ATOM: |
2568 | *efficiency = 0; |
2569 | break; |
2570 | case KMP_HW_CORE_TYPE_CORE: |
2571 | *efficiency = 1; |
2572 | break; |
2573 | default: |
2574 | *efficiency = 0; |
2575 | } |
2576 | *native_model_id = __kmp_extract_bits<0, 23>(v: buf.eax); |
2577 | } |
2578 | |
2579 | // Intel(R) microarchitecture code name Nehalem, Dunnington and later |
2580 | // architectures support a newer interface for specifying the x2APIC Ids, |
2581 | // based on CPUID.B or CPUID.1F |
2582 | /* |
2583 | * CPUID.B or 1F, Input ECX (sub leaf # aka level number) |
2584 | Bits Bits Bits Bits |
2585 | 31-16 15-8 7-4 4-0 |
2586 | ---+-----------+--------------+-------------+-----------------+ |
2587 | EAX| reserved | reserved | reserved | Bits to Shift | |
2588 | ---+-----------|--------------+-------------+-----------------| |
2589 | EBX| reserved | Num logical processors at level (16 bits) | |
2590 | ---+-----------|--------------+-------------------------------| |
2591 | ECX| reserved | Level Type | Level Number (8 bits) | |
2592 | ---+-----------+--------------+-------------------------------| |
2593 | EDX| X2APIC ID (32 bits) | |
2594 | ---+----------------------------------------------------------+ |
2595 | */ |
2596 | |
2597 | enum { |
2598 | INTEL_LEVEL_TYPE_INVALID = 0, // Package level |
2599 | INTEL_LEVEL_TYPE_SMT = 1, |
2600 | INTEL_LEVEL_TYPE_CORE = 2, |
2601 | INTEL_LEVEL_TYPE_MODULE = 3, |
2602 | INTEL_LEVEL_TYPE_TILE = 4, |
2603 | INTEL_LEVEL_TYPE_DIE = 5, |
2604 | INTEL_LEVEL_TYPE_LAST = 6, |
2605 | }; |
2606 | KMP_BUILD_ASSERT(INTEL_LEVEL_TYPE_LAST < sizeof(unsigned) * CHAR_BIT); |
2607 | #define KMP_LEAF_1F_KNOWN_LEVELS ((1u << INTEL_LEVEL_TYPE_LAST) - 1u) |
2608 | |
2609 | static kmp_hw_t __kmp_intel_type_2_topology_type(int intel_type) { |
2610 | switch (intel_type) { |
2611 | case INTEL_LEVEL_TYPE_INVALID: |
2612 | return KMP_HW_SOCKET; |
2613 | case INTEL_LEVEL_TYPE_SMT: |
2614 | return KMP_HW_THREAD; |
2615 | case INTEL_LEVEL_TYPE_CORE: |
2616 | return KMP_HW_CORE; |
2617 | case INTEL_LEVEL_TYPE_TILE: |
2618 | return KMP_HW_TILE; |
2619 | case INTEL_LEVEL_TYPE_MODULE: |
2620 | return KMP_HW_MODULE; |
2621 | case INTEL_LEVEL_TYPE_DIE: |
2622 | return KMP_HW_DIE; |
2623 | } |
2624 | return KMP_HW_UNKNOWN; |
2625 | } |
2626 | |
2627 | static int __kmp_topology_type_2_intel_type(kmp_hw_t type) { |
2628 | switch (type) { |
2629 | case KMP_HW_SOCKET: |
2630 | return INTEL_LEVEL_TYPE_INVALID; |
2631 | case KMP_HW_THREAD: |
2632 | return INTEL_LEVEL_TYPE_SMT; |
2633 | case KMP_HW_CORE: |
2634 | return INTEL_LEVEL_TYPE_CORE; |
2635 | case KMP_HW_TILE: |
2636 | return INTEL_LEVEL_TYPE_TILE; |
2637 | case KMP_HW_MODULE: |
2638 | return INTEL_LEVEL_TYPE_MODULE; |
2639 | case KMP_HW_DIE: |
2640 | return INTEL_LEVEL_TYPE_DIE; |
2641 | default: |
2642 | return INTEL_LEVEL_TYPE_INVALID; |
2643 | } |
2644 | } |
2645 | |
2646 | struct cpuid_level_info_t { |
2647 | unsigned level_type, mask, mask_width, nitems, cache_mask; |
2648 | }; |
2649 | |
2650 | class cpuid_topo_desc_t { |
2651 | unsigned desc = 0; |
2652 | |
2653 | public: |
2654 | void clear() { desc = 0; } |
2655 | bool contains(int intel_type) const { |
2656 | KMP_DEBUG_ASSERT(intel_type >= 0 && intel_type < INTEL_LEVEL_TYPE_LAST); |
2657 | if ((1u << intel_type) & desc) |
2658 | return true; |
2659 | return false; |
2660 | } |
2661 | bool contains_topology_type(kmp_hw_t type) const { |
2662 | KMP_DEBUG_ASSERT(type >= 0 && type < KMP_HW_LAST); |
2663 | int intel_type = __kmp_topology_type_2_intel_type(type); |
2664 | return contains(intel_type); |
2665 | } |
2666 | bool contains(cpuid_topo_desc_t rhs) const { |
2667 | return ((desc | rhs.desc) == desc); |
2668 | } |
2669 | void add(int intel_type) { desc |= (1u << intel_type); } |
2670 | void add(cpuid_topo_desc_t rhs) { desc |= rhs.desc; } |
2671 | }; |
2672 | |
2673 | struct cpuid_proc_info_t { |
2674 | // Topology info |
2675 | int os_id; |
2676 | unsigned apic_id; |
2677 | unsigned depth; |
2678 | // Hybrid info |
2679 | unsigned native_model_id; |
2680 | int efficiency; |
2681 | kmp_hw_core_type_t type; |
2682 | cpuid_topo_desc_t description; |
2683 | |
2684 | cpuid_level_info_t levels[INTEL_LEVEL_TYPE_LAST]; |
2685 | }; |
2686 | |
2687 | // This function takes the topology leaf, an info pointer to store the levels |
2688 | // detected, and writable descriptors for the total topology. |
2689 | // Returns whether total types, depth, or description were modified. |
2690 | static bool __kmp_x2apicid_get_levels(int leaf, cpuid_proc_info_t *info, |
2691 | kmp_hw_t total_types[KMP_HW_LAST], |
2692 | int *total_depth, |
2693 | cpuid_topo_desc_t *total_description) { |
2694 | unsigned level, levels_index; |
2695 | unsigned level_type, mask_width, nitems; |
2696 | kmp_cpuid buf; |
2697 | cpuid_level_info_t(&levels)[INTEL_LEVEL_TYPE_LAST] = info->levels; |
2698 | bool retval = false; |
2699 | |
2700 | // New algorithm has known topology layers act as highest unknown topology |
2701 | // layers when unknown topology layers exist. |
2702 | // e.g., Suppose layers were SMT <X> CORE <Y> <Z> PACKAGE, where <X> <Y> <Z> |
2703 | // are unknown topology layers, Then SMT will take the characteristics of |
2704 | // (SMT x <X>) and CORE will take the characteristics of (CORE x <Y> x <Z>). |
2705 | // This eliminates unknown portions of the topology while still keeping the |
2706 | // correct structure. |
2707 | level = levels_index = 0; |
2708 | do { |
2709 | __kmp_x86_cpuid(leaf, subleaf: level, p: &buf); |
2710 | level_type = __kmp_extract_bits<8, 15>(v: buf.ecx); |
2711 | mask_width = __kmp_extract_bits<0, 4>(v: buf.eax); |
2712 | nitems = __kmp_extract_bits<0, 15>(v: buf.ebx); |
2713 | if (level_type != INTEL_LEVEL_TYPE_INVALID && nitems == 0) { |
2714 | info->depth = 0; |
2715 | return retval; |
2716 | } |
2717 | |
2718 | if (KMP_LEAF_1F_KNOWN_LEVELS & (1u << level_type)) { |
2719 | // Add a new level to the topology |
2720 | KMP_ASSERT(levels_index < INTEL_LEVEL_TYPE_LAST); |
2721 | levels[levels_index].level_type = level_type; |
2722 | levels[levels_index].mask_width = mask_width; |
2723 | levels[levels_index].nitems = nitems; |
2724 | levels_index++; |
2725 | } else { |
2726 | // If it is an unknown level, then logically move the previous layer up |
2727 | if (levels_index > 0) { |
2728 | levels[levels_index - 1].mask_width = mask_width; |
2729 | levels[levels_index - 1].nitems = nitems; |
2730 | } |
2731 | } |
2732 | level++; |
2733 | } while (level_type != INTEL_LEVEL_TYPE_INVALID); |
2734 | KMP_ASSERT(levels_index <= INTEL_LEVEL_TYPE_LAST); |
2735 | info->description.clear(); |
2736 | info->depth = levels_index; |
2737 | |
2738 | // If types, depth, and total_description are uninitialized, |
2739 | // then initialize them now |
2740 | if (*total_depth == 0) { |
2741 | *total_depth = info->depth; |
2742 | total_description->clear(); |
2743 | for (int i = *total_depth - 1, j = 0; i >= 0; --i, ++j) { |
2744 | total_types[j] = |
2745 | __kmp_intel_type_2_topology_type(intel_type: info->levels[i].level_type); |
2746 | total_description->add(intel_type: info->levels[i].level_type); |
2747 | } |
2748 | retval = true; |
2749 | } |
2750 | |
2751 | // Ensure the INTEL_LEVEL_TYPE_INVALID (Socket) layer isn't first |
2752 | if (levels_index == 0 || levels[0].level_type == INTEL_LEVEL_TYPE_INVALID) |
2753 | return 0; |
2754 | |
2755 | // Set the masks to & with apicid |
2756 | for (unsigned i = 0; i < levels_index; ++i) { |
2757 | if (levels[i].level_type != INTEL_LEVEL_TYPE_INVALID) { |
2758 | levels[i].mask = ~((-1) << levels[i].mask_width); |
2759 | levels[i].cache_mask = (-1) << levels[i].mask_width; |
2760 | for (unsigned j = 0; j < i; ++j) |
2761 | levels[i].mask ^= levels[j].mask; |
2762 | } else { |
2763 | KMP_DEBUG_ASSERT(i > 0); |
2764 | levels[i].mask = (-1) << levels[i - 1].mask_width; |
2765 | levels[i].cache_mask = 0; |
2766 | } |
2767 | info->description.add(intel_type: info->levels[i].level_type); |
2768 | } |
2769 | |
2770 | // If this processor has level type not on other processors, then make |
2771 | // sure to include it in total types, depth, and description. |
2772 | // One assumption here is that the first type, i.e. socket, is known. |
2773 | // Another assumption is that types array is always large enough to fit any |
2774 | // new layers since its length is KMP_HW_LAST. |
2775 | if (!total_description->contains(rhs: info->description)) { |
2776 | for (int i = info->depth - 1, j = 0; i >= 0; --i, ++j) { |
2777 | // If this level is known already, then skip it. |
2778 | if (total_description->contains(intel_type: levels[i].level_type)) |
2779 | continue; |
2780 | // Unknown level, insert before last known level |
2781 | kmp_hw_t curr_type = |
2782 | __kmp_intel_type_2_topology_type(intel_type: levels[i].level_type); |
2783 | KMP_ASSERT(j != 0 && "Bad APIC Id information" ); |
2784 | // Move over all known levels to make room for new level |
2785 | for (int k = info->depth - 1; k >= j; --k) { |
2786 | KMP_DEBUG_ASSERT(k + 1 < KMP_HW_LAST); |
2787 | total_types[k + 1] = total_types[k]; |
2788 | } |
2789 | // Insert new level |
2790 | total_types[j] = curr_type; |
2791 | (*total_depth)++; |
2792 | } |
2793 | total_description->add(rhs: info->description); |
2794 | retval = true; |
2795 | } |
2796 | return retval; |
2797 | } |
2798 | |
2799 | static bool __kmp_affinity_create_x2apicid_map(kmp_i18n_id_t *const msg_id) { |
2800 | |
2801 | kmp_hw_t types[INTEL_LEVEL_TYPE_LAST]; |
2802 | kmp_cpuid buf; |
2803 | int topology_leaf, highest_leaf; |
2804 | int num_leaves; |
2805 | int depth = 0; |
2806 | cpuid_topo_desc_t total_description; |
2807 | static int leaves[] = {0, 0}; |
2808 | |
2809 | // If affinity is disabled, __kmp_avail_proc may be zero |
2810 | int ninfos = (__kmp_avail_proc > 0 ? __kmp_avail_proc : 1); |
2811 | cpuid_proc_info_t *proc_info = (cpuid_proc_info_t *)__kmp_allocate( |
2812 | (sizeof(cpuid_proc_info_t) + sizeof(cpuid_cache_info_t)) * ninfos); |
2813 | cpuid_cache_info_t *cache_info = (cpuid_cache_info_t *)(proc_info + ninfos); |
2814 | |
2815 | kmp_i18n_id_t leaf_message_id; |
2816 | |
2817 | *msg_id = kmp_i18n_null; |
2818 | if (__kmp_affinity.flags.verbose) { |
2819 | KMP_INFORM(AffInfoStr, "KMP_AFFINITY" , KMP_I18N_STR(Decodingx2APIC)); |
2820 | } |
2821 | |
2822 | // Get the highest cpuid leaf supported |
2823 | __kmp_x86_cpuid(leaf: 0, subleaf: 0, p: &buf); |
2824 | highest_leaf = buf.eax; |
2825 | |
2826 | // If a specific topology method was requested, only allow that specific leaf |
2827 | // otherwise, try both leaves 31 and 11 in that order |
2828 | num_leaves = 0; |
2829 | if (__kmp_affinity_top_method == affinity_top_method_x2apicid) { |
2830 | num_leaves = 1; |
2831 | leaves[0] = 11; |
2832 | leaf_message_id = kmp_i18n_str_NoLeaf11Support; |
2833 | } else if (__kmp_affinity_top_method == affinity_top_method_x2apicid_1f) { |
2834 | num_leaves = 1; |
2835 | leaves[0] = 31; |
2836 | leaf_message_id = kmp_i18n_str_NoLeaf31Support; |
2837 | } else { |
2838 | num_leaves = 2; |
2839 | leaves[0] = 31; |
2840 | leaves[1] = 11; |
2841 | leaf_message_id = kmp_i18n_str_NoLeaf11Support; |
2842 | } |
2843 | |
2844 | // Check to see if cpuid leaf 31 or 11 is supported. |
2845 | __kmp_nThreadsPerCore = nCoresPerPkg = nPackages = 1; |
2846 | topology_leaf = -1; |
2847 | for (int i = 0; i < num_leaves; ++i) { |
2848 | int leaf = leaves[i]; |
2849 | if (highest_leaf < leaf) |
2850 | continue; |
2851 | __kmp_x86_cpuid(leaf, subleaf: 0, p: &buf); |
2852 | if (buf.ebx == 0) |
2853 | continue; |
2854 | topology_leaf = leaf; |
2855 | __kmp_x2apicid_get_levels(leaf, info: &proc_info[0], total_types: types, total_depth: &depth, |
2856 | total_description: &total_description); |
2857 | if (depth == 0) |
2858 | continue; |
2859 | break; |
2860 | } |
2861 | if (topology_leaf == -1 || depth == 0) { |
2862 | *msg_id = leaf_message_id; |
2863 | __kmp_free(proc_info); |
2864 | return false; |
2865 | } |
2866 | KMP_ASSERT(depth <= INTEL_LEVEL_TYPE_LAST); |
2867 | |
2868 | // The algorithm used starts by setting the affinity to each available thread |
2869 | // and retrieving info from the cpuid instruction, so if we are not capable of |
2870 | // calling __kmp_get_system_affinity() and __kmp_get_system_affinity(), then |
2871 | // we need to do something else - use the defaults that we calculated from |
2872 | // issuing cpuid without binding to each proc. |
2873 | if (!KMP_AFFINITY_CAPABLE()) { |
2874 | // Hack to try and infer the machine topology using only the data |
2875 | // available from cpuid on the current thread, and __kmp_xproc. |
2876 | KMP_ASSERT(__kmp_affinity.type == affinity_none); |
2877 | for (int i = 0; i < depth; ++i) { |
2878 | if (proc_info[0].levels[i].level_type == INTEL_LEVEL_TYPE_SMT) { |
2879 | __kmp_nThreadsPerCore = proc_info[0].levels[i].nitems; |
2880 | } else if (proc_info[0].levels[i].level_type == INTEL_LEVEL_TYPE_CORE) { |
2881 | nCoresPerPkg = proc_info[0].levels[i].nitems; |
2882 | } |
2883 | } |
2884 | __kmp_ncores = __kmp_xproc / __kmp_nThreadsPerCore; |
2885 | nPackages = (__kmp_xproc + nCoresPerPkg - 1) / nCoresPerPkg; |
2886 | __kmp_free(proc_info); |
2887 | return true; |
2888 | } |
2889 | |
2890 | // From here on, we can assume that it is safe to call |
2891 | // __kmp_get_system_affinity() and __kmp_set_system_affinity(), even if |
2892 | // __kmp_affinity.type = affinity_none. |
2893 | |
2894 | // Save the affinity mask for the current thread. |
2895 | kmp_affinity_raii_t previous_affinity; |
2896 | |
2897 | // Run through each of the available contexts, binding the current thread |
2898 | // to it, and obtaining the pertinent information using the cpuid instr. |
2899 | unsigned int proc; |
2900 | int hw_thread_index = 0; |
2901 | bool uniform_caches = true; |
2902 | |
2903 | KMP_CPU_SET_ITERATE(proc, __kmp_affin_fullMask) { |
2904 | // Skip this proc if it is not included in the machine model. |
2905 | if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) { |
2906 | continue; |
2907 | } |
2908 | KMP_DEBUG_ASSERT(hw_thread_index < __kmp_avail_proc); |
2909 | |
2910 | // Gather topology information |
2911 | __kmp_affinity_dispatch->bind_thread(proc); |
2912 | __kmp_x86_cpuid(leaf: topology_leaf, subleaf: 0, p: &buf); |
2913 | proc_info[hw_thread_index].os_id = proc; |
2914 | proc_info[hw_thread_index].apic_id = buf.edx; |
2915 | __kmp_x2apicid_get_levels(leaf: topology_leaf, info: &proc_info[hw_thread_index], total_types: types, |
2916 | total_depth: &depth, total_description: &total_description); |
2917 | if (proc_info[hw_thread_index].depth == 0) { |
2918 | *msg_id = kmp_i18n_str_InvalidCpuidInfo; |
2919 | __kmp_free(proc_info); |
2920 | return false; |
2921 | } |
2922 | // Gather cache information and insert afterwards |
2923 | cache_info[hw_thread_index].get_leaf4_levels(); |
2924 | if (uniform_caches && hw_thread_index > 0) |
2925 | if (cache_info[0] != cache_info[hw_thread_index]) |
2926 | uniform_caches = false; |
2927 | // Hybrid information |
2928 | if (__kmp_is_hybrid_cpu() && highest_leaf >= 0x1a) { |
2929 | __kmp_get_hybrid_info(type: &proc_info[hw_thread_index].type, |
2930 | efficiency: &proc_info[hw_thread_index].efficiency, |
2931 | native_model_id: &proc_info[hw_thread_index].native_model_id); |
2932 | } |
2933 | hw_thread_index++; |
2934 | } |
2935 | KMP_ASSERT(hw_thread_index > 0); |
2936 | previous_affinity.restore(); |
2937 | |
2938 | // Allocate the data structure to be returned. |
2939 | __kmp_topology = kmp_topology_t::allocate(nproc: __kmp_avail_proc, ndepth: depth, types); |
2940 | |
2941 | // Create topology Ids and hybrid types in __kmp_topology |
2942 | for (int i = 0; i < __kmp_topology->get_num_hw_threads(); ++i) { |
2943 | kmp_hw_thread_t &hw_thread = __kmp_topology->at(index: i); |
2944 | hw_thread.clear(); |
2945 | hw_thread.os_id = proc_info[i].os_id; |
2946 | hw_thread.original_idx = i; |
2947 | unsigned apic_id = proc_info[i].apic_id; |
2948 | // Put in topology information |
2949 | for (int j = 0, idx = depth - 1; j < depth; ++j, --idx) { |
2950 | if (!(proc_info[i].description.contains_topology_type( |
2951 | type: __kmp_topology->get_type(level: j)))) { |
2952 | hw_thread.ids[idx] = kmp_hw_thread_t::UNKNOWN_ID; |
2953 | } else { |
2954 | hw_thread.ids[idx] = apic_id & proc_info[i].levels[j].mask; |
2955 | if (j > 0) { |
2956 | hw_thread.ids[idx] >>= proc_info[i].levels[j - 1].mask_width; |
2957 | } |
2958 | } |
2959 | } |
2960 | hw_thread.attrs.set_core_type(proc_info[i].type); |
2961 | hw_thread.attrs.set_core_eff(proc_info[i].efficiency); |
2962 | } |
2963 | |
2964 | __kmp_topology->sort_ids(); |
2965 | |
2966 | // Change Ids to logical Ids |
2967 | for (int j = 0; j < depth - 1; ++j) { |
2968 | int new_id = 0; |
2969 | int prev_id = __kmp_topology->at(index: 0).ids[j]; |
2970 | int curr_id = __kmp_topology->at(index: 0).ids[j + 1]; |
2971 | __kmp_topology->at(index: 0).ids[j + 1] = new_id; |
2972 | for (int i = 1; i < __kmp_topology->get_num_hw_threads(); ++i) { |
2973 | kmp_hw_thread_t &hw_thread = __kmp_topology->at(index: i); |
2974 | if (hw_thread.ids[j] == prev_id && hw_thread.ids[j + 1] == curr_id) { |
2975 | hw_thread.ids[j + 1] = new_id; |
2976 | } else if (hw_thread.ids[j] == prev_id && |
2977 | hw_thread.ids[j + 1] != curr_id) { |
2978 | curr_id = hw_thread.ids[j + 1]; |
2979 | hw_thread.ids[j + 1] = ++new_id; |
2980 | } else { |
2981 | prev_id = hw_thread.ids[j]; |
2982 | curr_id = hw_thread.ids[j + 1]; |
2983 | hw_thread.ids[j + 1] = ++new_id; |
2984 | } |
2985 | } |
2986 | } |
2987 | |
2988 | // First check for easy cache placement. This occurs when caches are |
2989 | // equivalent to a layer in the CPUID leaf 0xb or 0x1f topology. |
2990 | if (uniform_caches) { |
2991 | for (size_t i = 0; i < cache_info[0].get_depth(); ++i) { |
2992 | unsigned cache_mask = cache_info[0][i].mask; |
2993 | unsigned cache_level = cache_info[0][i].level; |
2994 | KMP_ASSERT(cache_level <= cpuid_cache_info_t::MAX_CACHE_LEVEL); |
2995 | kmp_hw_t cache_type = cpuid_cache_info_t::get_topology_type(level: cache_level); |
2996 | __kmp_topology->set_equivalent_type(type1: cache_type, type2: cache_type); |
2997 | for (int j = 0; j < depth; ++j) { |
2998 | unsigned hw_cache_mask = proc_info[0].levels[j].cache_mask; |
2999 | if (hw_cache_mask == cache_mask && j < depth - 1) { |
3000 | kmp_hw_t type = __kmp_intel_type_2_topology_type( |
3001 | intel_type: proc_info[0].levels[j + 1].level_type); |
3002 | __kmp_topology->set_equivalent_type(type1: cache_type, type2: type); |
3003 | } |
3004 | } |
3005 | } |
3006 | } else { |
3007 | // If caches are non-uniform, then record which caches exist. |
3008 | for (int i = 0; i < __kmp_topology->get_num_hw_threads(); ++i) { |
3009 | for (size_t j = 0; j < cache_info[i].get_depth(); ++j) { |
3010 | unsigned cache_level = cache_info[i][j].level; |
3011 | kmp_hw_t cache_type = |
3012 | cpuid_cache_info_t::get_topology_type(level: cache_level); |
3013 | if (__kmp_topology->get_equivalent_type(type: cache_type) == KMP_HW_UNKNOWN) |
3014 | __kmp_topology->set_equivalent_type(type1: cache_type, type2: cache_type); |
3015 | } |
3016 | } |
3017 | } |
3018 | |
3019 | // See if any cache level needs to be added manually through cache Ids |
3020 | bool unresolved_cache_levels = false; |
3021 | for (unsigned level = 1; level <= cpuid_cache_info_t::MAX_CACHE_LEVEL; |
3022 | ++level) { |
3023 | kmp_hw_t cache_type = cpuid_cache_info_t::get_topology_type(level); |
3024 | // This also filters out caches which may not be in the topology |
3025 | // since the equivalent type might be KMP_HW_UNKNOWN. |
3026 | if (__kmp_topology->get_equivalent_type(type: cache_type) == cache_type) { |
3027 | unresolved_cache_levels = true; |
3028 | break; |
3029 | } |
3030 | } |
3031 | |
3032 | // Insert unresolved cache layers into machine topology using cache Ids |
3033 | if (unresolved_cache_levels) { |
3034 | int num_hw_threads = __kmp_topology->get_num_hw_threads(); |
3035 | int *ids = (int *)__kmp_allocate(sizeof(int) * num_hw_threads); |
3036 | for (unsigned l = 1; l <= cpuid_cache_info_t::MAX_CACHE_LEVEL; ++l) { |
3037 | kmp_hw_t cache_type = cpuid_cache_info_t::get_topology_type(level: l); |
3038 | if (__kmp_topology->get_equivalent_type(type: cache_type) != cache_type) |
3039 | continue; |
3040 | for (int i = 0; i < num_hw_threads; ++i) { |
3041 | int original_idx = __kmp_topology->at(index: i).original_idx; |
3042 | ids[i] = kmp_hw_thread_t::UNKNOWN_ID; |
3043 | const cpuid_cache_info_t::info_t &info = |
3044 | cache_info[original_idx].get_level(level: l); |
3045 | // if cache level not in topology for this processor, then skip |
3046 | if (info.level == 0) |
3047 | continue; |
3048 | ids[i] = info.mask & proc_info[original_idx].apic_id; |
3049 | } |
3050 | __kmp_topology->insert_layer(type: cache_type, ids); |
3051 | } |
3052 | } |
3053 | |
3054 | if (!__kmp_topology->check_ids()) { |
3055 | kmp_topology_t::deallocate(topology: __kmp_topology); |
3056 | __kmp_topology = nullptr; |
3057 | *msg_id = kmp_i18n_str_x2ApicIDsNotUnique; |
3058 | __kmp_free(proc_info); |
3059 | return false; |
3060 | } |
3061 | __kmp_free(proc_info); |
3062 | return true; |
3063 | } |
3064 | #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ |
3065 | |
3066 | #define osIdIndex 0 |
3067 | #define threadIdIndex 1 |
3068 | #define coreIdIndex 2 |
3069 | #define pkgIdIndex 3 |
3070 | #define nodeIdIndex 4 |
3071 | |
3072 | typedef unsigned *ProcCpuInfo; |
3073 | static unsigned maxIndex = pkgIdIndex; |
3074 | |
3075 | static int __kmp_affinity_cmp_ProcCpuInfo_phys_id(const void *a, |
3076 | const void *b) { |
3077 | unsigned i; |
3078 | const unsigned *aa = *(unsigned *const *)a; |
3079 | const unsigned *bb = *(unsigned *const *)b; |
3080 | for (i = maxIndex;; i--) { |
3081 | if (aa[i] < bb[i]) |
3082 | return -1; |
3083 | if (aa[i] > bb[i]) |
3084 | return 1; |
3085 | if (i == osIdIndex) |
3086 | break; |
3087 | } |
3088 | return 0; |
3089 | } |
3090 | |
3091 | #if KMP_USE_HIER_SCHED |
3092 | // Set the array sizes for the hierarchy layers |
3093 | static void __kmp_dispatch_set_hierarchy_values() { |
3094 | // Set the maximum number of L1's to number of cores |
3095 | // Set the maximum number of L2's to either number of cores / 2 for |
3096 | // Intel(R) Xeon Phi(TM) coprocessor formally codenamed Knights Landing |
3097 | // Or the number of cores for Intel(R) Xeon(R) processors |
3098 | // Set the maximum number of NUMA nodes and L3's to number of packages |
3099 | __kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1] = |
3100 | nPackages * nCoresPerPkg * __kmp_nThreadsPerCore; |
3101 | __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L1 + 1] = __kmp_ncores; |
3102 | #if KMP_ARCH_X86_64 && \ |
3103 | (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \ |
3104 | KMP_OS_WINDOWS) && \ |
3105 | KMP_MIC_SUPPORTED |
3106 | if (__kmp_mic_type >= mic3) |
3107 | __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L2 + 1] = __kmp_ncores / 2; |
3108 | else |
3109 | #endif // KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS) |
3110 | __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L2 + 1] = __kmp_ncores; |
3111 | __kmp_hier_max_units[kmp_hier_layer_e::LAYER_L3 + 1] = nPackages; |
3112 | __kmp_hier_max_units[kmp_hier_layer_e::LAYER_NUMA + 1] = nPackages; |
3113 | __kmp_hier_max_units[kmp_hier_layer_e::LAYER_LOOP + 1] = 1; |
3114 | // Set the number of threads per unit |
3115 | // Number of hardware threads per L1/L2/L3/NUMA/LOOP |
3116 | __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_THREAD + 1] = 1; |
3117 | __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L1 + 1] = |
3118 | __kmp_nThreadsPerCore; |
3119 | #if KMP_ARCH_X86_64 && \ |
3120 | (KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \ |
3121 | KMP_OS_WINDOWS) && \ |
3122 | KMP_MIC_SUPPORTED |
3123 | if (__kmp_mic_type >= mic3) |
3124 | __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L2 + 1] = |
3125 | 2 * __kmp_nThreadsPerCore; |
3126 | else |
3127 | #endif // KMP_ARCH_X86_64 && (KMP_OS_LINUX || KMP_OS_WINDOWS) |
3128 | __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L2 + 1] = |
3129 | __kmp_nThreadsPerCore; |
3130 | __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_L3 + 1] = |
3131 | nCoresPerPkg * __kmp_nThreadsPerCore; |
3132 | __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_NUMA + 1] = |
3133 | nCoresPerPkg * __kmp_nThreadsPerCore; |
3134 | __kmp_hier_threads_per[kmp_hier_layer_e::LAYER_LOOP + 1] = |
3135 | nPackages * nCoresPerPkg * __kmp_nThreadsPerCore; |
3136 | } |
3137 | |
3138 | // Return the index into the hierarchy for this tid and layer type (L1, L2, etc) |
3139 | // i.e., this thread's L1 or this thread's L2, etc. |
3140 | int __kmp_dispatch_get_index(int tid, kmp_hier_layer_e type) { |
3141 | int index = type + 1; |
3142 | int num_hw_threads = __kmp_hier_max_units[kmp_hier_layer_e::LAYER_THREAD + 1]; |
3143 | KMP_DEBUG_ASSERT(type != kmp_hier_layer_e::LAYER_LAST); |
3144 | if (type == kmp_hier_layer_e::LAYER_THREAD) |
3145 | return tid; |
3146 | else if (type == kmp_hier_layer_e::LAYER_LOOP) |
3147 | return 0; |
3148 | KMP_DEBUG_ASSERT(__kmp_hier_max_units[index] != 0); |
3149 | if (tid >= num_hw_threads) |
3150 | tid = tid % num_hw_threads; |
3151 | return (tid / __kmp_hier_threads_per[index]) % __kmp_hier_max_units[index]; |
3152 | } |
3153 | |
3154 | // Return the number of t1's per t2 |
3155 | int __kmp_dispatch_get_t1_per_t2(kmp_hier_layer_e t1, kmp_hier_layer_e t2) { |
3156 | int i1 = t1 + 1; |
3157 | int i2 = t2 + 1; |
3158 | KMP_DEBUG_ASSERT(i1 <= i2); |
3159 | KMP_DEBUG_ASSERT(t1 != kmp_hier_layer_e::LAYER_LAST); |
3160 | KMP_DEBUG_ASSERT(t2 != kmp_hier_layer_e::LAYER_LAST); |
3161 | KMP_DEBUG_ASSERT(__kmp_hier_threads_per[i1] != 0); |
3162 | // (nthreads/t2) / (nthreads/t1) = t1 / t2 |
3163 | return __kmp_hier_threads_per[i2] / __kmp_hier_threads_per[i1]; |
3164 | } |
3165 | #endif // KMP_USE_HIER_SCHED |
3166 | |
3167 | static inline const char *__kmp_cpuinfo_get_filename() { |
3168 | const char *filename; |
3169 | if (__kmp_cpuinfo_file != nullptr) |
3170 | filename = __kmp_cpuinfo_file; |
3171 | else |
3172 | filename = "/proc/cpuinfo" ; |
3173 | return filename; |
3174 | } |
3175 | |
3176 | static inline const char *__kmp_cpuinfo_get_envvar() { |
3177 | const char *envvar = nullptr; |
3178 | if (__kmp_cpuinfo_file != nullptr) |
3179 | envvar = "KMP_CPUINFO_FILE" ; |
3180 | return envvar; |
3181 | } |
3182 | |
3183 | static bool __kmp_package_id_from_core_siblings_list(unsigned **threadInfo, |
3184 | unsigned num_avail, |
3185 | unsigned idx) { |
3186 | if (!KMP_AFFINITY_CAPABLE()) |
3187 | return false; |
3188 | |
3189 | char path[256]; |
3190 | KMP_SNPRINTF(s: path, maxlen: sizeof(path), |
3191 | format: "/sys/devices/system/cpu/cpu%u/topology/core_siblings_list" , |
3192 | threadInfo[idx][osIdIndex]); |
3193 | kmp_affin_mask_t *siblings = __kmp_parse_cpu_list(path); |
3194 | for (unsigned i = 0; i < num_avail; ++i) { |
3195 | unsigned cpu_id = threadInfo[i][osIdIndex]; |
3196 | KMP_ASSERT(cpu_id < __kmp_affin_mask_size * CHAR_BIT); |
3197 | if (!KMP_CPU_ISSET(cpu_id, siblings)) |
3198 | continue; |
3199 | if (threadInfo[i][pkgIdIndex] == UINT_MAX) { |
3200 | // Arbitrarily pick the first index we encounter, it only matters that |
3201 | // the value is the same for all siblings. |
3202 | threadInfo[i][pkgIdIndex] = idx; |
3203 | } else if (threadInfo[i][pkgIdIndex] != idx) { |
3204 | // Contradictory sibling lists. |
3205 | KMP_CPU_FREE(siblings); |
3206 | return false; |
3207 | } |
3208 | } |
3209 | KMP_ASSERT(threadInfo[idx][pkgIdIndex] != UINT_MAX); |
3210 | KMP_CPU_FREE(siblings); |
3211 | return true; |
3212 | } |
3213 | |
3214 | // Parse /proc/cpuinfo (or an alternate file in the same format) to obtain the |
3215 | // affinity map. On AIX, the map is obtained through system SRAD (Scheduler |
3216 | // Resource Allocation Domain). |
3217 | static bool __kmp_affinity_create_cpuinfo_map(int *line, |
3218 | kmp_i18n_id_t *const msg_id) { |
3219 | *msg_id = kmp_i18n_null; |
3220 | |
3221 | #if KMP_OS_AIX |
3222 | unsigned num_records = __kmp_xproc; |
3223 | #else |
3224 | const char *filename = __kmp_cpuinfo_get_filename(); |
3225 | const char *envvar = __kmp_cpuinfo_get_envvar(); |
3226 | |
3227 | if (__kmp_affinity.flags.verbose) { |
3228 | KMP_INFORM(AffParseFilename, "KMP_AFFINITY" , filename); |
3229 | } |
3230 | |
3231 | kmp_safe_raii_file_t f(filename, "r" , envvar); |
3232 | |
3233 | // Scan of the file, and count the number of "processor" (osId) fields, |
3234 | // and find the highest value of <n> for a node_<n> field. |
3235 | char buf[256]; |
3236 | unsigned num_records = 0; |
3237 | while (!feof(stream: f)) { |
3238 | buf[sizeof(buf) - 1] = 1; |
3239 | if (!fgets(s: buf, n: sizeof(buf), stream: f)) { |
3240 | // Read errors presumably because of EOF |
3241 | break; |
3242 | } |
3243 | |
3244 | char s1[] = "processor" ; |
3245 | if (strncmp(s1: buf, s2: s1, n: sizeof(s1) - 1) == 0) { |
3246 | num_records++; |
3247 | continue; |
3248 | } |
3249 | |
3250 | // FIXME - this will match "node_<n> <garbage>" |
3251 | unsigned level; |
3252 | if (KMP_SSCANF(s: buf, format: "node_%u id" , &level) == 1) { |
3253 | // validate the input fisrt: |
3254 | if (level > (unsigned)__kmp_xproc) { // level is too big |
3255 | level = __kmp_xproc; |
3256 | } |
3257 | if (nodeIdIndex + level >= maxIndex) { |
3258 | maxIndex = nodeIdIndex + level; |
3259 | } |
3260 | continue; |
3261 | } |
3262 | } |
3263 | |
3264 | // Check for empty file / no valid processor records, or too many. The number |
3265 | // of records can't exceed the number of valid bits in the affinity mask. |
3266 | if (num_records == 0) { |
3267 | *msg_id = kmp_i18n_str_NoProcRecords; |
3268 | return false; |
3269 | } |
3270 | if (num_records > (unsigned)__kmp_xproc) { |
3271 | *msg_id = kmp_i18n_str_TooManyProcRecords; |
3272 | return false; |
3273 | } |
3274 | |
3275 | // Set the file pointer back to the beginning, so that we can scan the file |
3276 | // again, this time performing a full parse of the data. Allocate a vector of |
3277 | // ProcCpuInfo object, where we will place the data. Adding an extra element |
3278 | // at the end allows us to remove a lot of extra checks for termination |
3279 | // conditions. |
3280 | if (fseek(stream: f, off: 0, SEEK_SET) != 0) { |
3281 | *msg_id = kmp_i18n_str_CantRewindCpuinfo; |
3282 | return false; |
3283 | } |
3284 | #endif // KMP_OS_AIX |
3285 | |
3286 | // Allocate the array of records to store the proc info in. The dummy |
3287 | // element at the end makes the logic in filling them out easier to code. |
3288 | unsigned **threadInfo = |
3289 | (unsigned **)__kmp_allocate((num_records + 1) * sizeof(unsigned *)); |
3290 | unsigned i; |
3291 | for (i = 0; i <= num_records; i++) { |
3292 | threadInfo[i] = |
3293 | (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned)); |
3294 | } |
3295 | |
3296 | #define CLEANUP_THREAD_INFO \ |
3297 | for (i = 0; i <= num_records; i++) { \ |
3298 | __kmp_free(threadInfo[i]); \ |
3299 | } \ |
3300 | __kmp_free(threadInfo); |
3301 | |
3302 | // A value of UINT_MAX means that we didn't find the field |
3303 | unsigned __index; |
3304 | |
3305 | #define INIT_PROC_INFO(p) \ |
3306 | for (__index = 0; __index <= maxIndex; __index++) { \ |
3307 | (p)[__index] = UINT_MAX; \ |
3308 | } |
3309 | |
3310 | for (i = 0; i <= num_records; i++) { |
3311 | INIT_PROC_INFO(threadInfo[i]); |
3312 | } |
3313 | |
3314 | #if KMP_OS_AIX |
3315 | int smt_threads; |
3316 | lpar_info_format1_t cpuinfo; |
3317 | unsigned num_avail = __kmp_xproc; |
3318 | |
3319 | if (__kmp_affinity.flags.verbose) |
3320 | KMP_INFORM(AffParseFilename, "KMP_AFFINITY" , "system info for topology" ); |
3321 | |
3322 | // Get the number of SMT threads per core. |
3323 | smt_threads = syssmt(GET_NUMBER_SMT_SETS, 0, 0, NULL); |
3324 | |
3325 | // Allocate a resource set containing available system resourses. |
3326 | rsethandle_t sys_rset = rs_alloc(RS_SYSTEM); |
3327 | if (sys_rset == NULL) { |
3328 | CLEANUP_THREAD_INFO; |
3329 | *msg_id = kmp_i18n_str_UnknownTopology; |
3330 | return false; |
3331 | } |
3332 | // Allocate a resource set for the SRAD info. |
3333 | rsethandle_t srad = rs_alloc(RS_EMPTY); |
3334 | if (srad == NULL) { |
3335 | rs_free(sys_rset); |
3336 | CLEANUP_THREAD_INFO; |
3337 | *msg_id = kmp_i18n_str_UnknownTopology; |
3338 | return false; |
3339 | } |
3340 | |
3341 | // Get the SRAD system detail level. |
3342 | int sradsdl = rs_getinfo(NULL, R_SRADSDL, 0); |
3343 | if (sradsdl < 0) { |
3344 | rs_free(sys_rset); |
3345 | rs_free(srad); |
3346 | CLEANUP_THREAD_INFO; |
3347 | *msg_id = kmp_i18n_str_UnknownTopology; |
3348 | return false; |
3349 | } |
3350 | // Get the number of RADs at that SRAD SDL. |
3351 | int num_rads = rs_numrads(sys_rset, sradsdl, 0); |
3352 | if (num_rads < 0) { |
3353 | rs_free(sys_rset); |
3354 | rs_free(srad); |
3355 | CLEANUP_THREAD_INFO; |
3356 | *msg_id = kmp_i18n_str_UnknownTopology; |
3357 | return false; |
3358 | } |
3359 | |
3360 | // Get the maximum number of procs that may be contained in a resource set. |
3361 | int max_procs = rs_getinfo(NULL, R_MAXPROCS, 0); |
3362 | if (max_procs < 0) { |
3363 | rs_free(sys_rset); |
3364 | rs_free(srad); |
3365 | CLEANUP_THREAD_INFO; |
3366 | *msg_id = kmp_i18n_str_UnknownTopology; |
3367 | return false; |
3368 | } |
3369 | |
3370 | int cur_rad = 0; |
3371 | int num_set = 0; |
3372 | for (int srad_idx = 0; cur_rad < num_rads && srad_idx < VMI_MAXRADS; |
3373 | ++srad_idx) { |
3374 | // Check if the SRAD is available in the RSET. |
3375 | if (rs_getrad(sys_rset, srad, sradsdl, srad_idx, 0) < 0) |
3376 | continue; |
3377 | |
3378 | for (int cpu = 0; cpu < max_procs; cpu++) { |
3379 | // Set the info for the cpu if it is in the SRAD. |
3380 | if (rs_op(RS_TESTRESOURCE, srad, NULL, R_PROCS, cpu)) { |
3381 | threadInfo[cpu][osIdIndex] = cpu; |
3382 | threadInfo[cpu][pkgIdIndex] = cur_rad; |
3383 | threadInfo[cpu][coreIdIndex] = cpu / smt_threads; |
3384 | ++num_set; |
3385 | if (num_set >= num_avail) { |
3386 | // Done if all available CPUs have been set. |
3387 | break; |
3388 | } |
3389 | } |
3390 | } |
3391 | ++cur_rad; |
3392 | } |
3393 | rs_free(sys_rset); |
3394 | rs_free(srad); |
3395 | |
3396 | // The topology is already sorted. |
3397 | |
3398 | #else // !KMP_OS_AIX |
3399 | unsigned num_avail = 0; |
3400 | *line = 0; |
3401 | #if KMP_ARCH_S390X |
3402 | bool reading_s390x_sys_info = true; |
3403 | #endif |
3404 | while (!feof(stream: f)) { |
3405 | // Create an inner scoping level, so that all the goto targets at the end of |
3406 | // the loop appear in an outer scoping level. This avoids warnings about |
3407 | // jumping past an initialization to a target in the same block. |
3408 | { |
3409 | buf[sizeof(buf) - 1] = 1; |
3410 | bool long_line = false; |
3411 | if (!fgets(s: buf, n: sizeof(buf), stream: f)) { |
3412 | // Read errors presumably because of EOF |
3413 | // If there is valid data in threadInfo[num_avail], then fake |
3414 | // a blank line in ensure that the last address gets parsed. |
3415 | bool valid = false; |
3416 | for (i = 0; i <= maxIndex; i++) { |
3417 | if (threadInfo[num_avail][i] != UINT_MAX) { |
3418 | valid = true; |
3419 | } |
3420 | } |
3421 | if (!valid) { |
3422 | break; |
3423 | } |
3424 | buf[0] = 0; |
3425 | } else if (!buf[sizeof(buf) - 1]) { |
3426 | // The line is longer than the buffer. Set a flag and don't |
3427 | // emit an error if we were going to ignore the line, anyway. |
3428 | long_line = true; |
3429 | |
3430 | #define CHECK_LINE \ |
3431 | if (long_line) { \ |
3432 | CLEANUP_THREAD_INFO; \ |
3433 | *msg_id = kmp_i18n_str_LongLineCpuinfo; \ |
3434 | return false; \ |
3435 | } |
3436 | } |
3437 | (*line)++; |
3438 | |
3439 | #if KMP_ARCH_LOONGARCH64 |
3440 | // The parsing logic of /proc/cpuinfo in this function highly depends on |
3441 | // the blank lines between each processor info block. But on LoongArch a |
3442 | // blank line exists before the first processor info block (i.e. after the |
3443 | // "system type" line). This blank line was added because the "system |
3444 | // type" line is unrelated to any of the CPUs. We must skip this line so |
3445 | // that the original logic works on LoongArch. |
3446 | if (*buf == '\n' && *line == 2) |
3447 | continue; |
3448 | #endif |
3449 | #if KMP_ARCH_S390X |
3450 | // s390x /proc/cpuinfo starts with a variable number of lines containing |
3451 | // the overall system information. Skip them. |
3452 | if (reading_s390x_sys_info) { |
3453 | if (*buf == '\n') |
3454 | reading_s390x_sys_info = false; |
3455 | continue; |
3456 | } |
3457 | #endif |
3458 | |
3459 | #if KMP_ARCH_S390X |
3460 | char s1[] = "cpu number" ; |
3461 | #else |
3462 | char s1[] = "processor" ; |
3463 | #endif |
3464 | if (strncmp(s1: buf, s2: s1, n: sizeof(s1) - 1) == 0) { |
3465 | CHECK_LINE; |
3466 | char *p = strchr(s: buf + sizeof(s1) - 1, c: ':'); |
3467 | unsigned val; |
3468 | if ((p == NULL) || (KMP_SSCANF(s: p + 1, format: "%u\n" , &val) != 1)) |
3469 | goto no_val; |
3470 | if (threadInfo[num_avail][osIdIndex] != UINT_MAX) |
3471 | #if KMP_ARCH_AARCH64 |
3472 | // Handle the old AArch64 /proc/cpuinfo layout differently, |
3473 | // it contains all of the 'processor' entries listed in a |
3474 | // single 'Processor' section, therefore the normal looking |
3475 | // for duplicates in that section will always fail. |
3476 | num_avail++; |
3477 | #else |
3478 | goto dup_field; |
3479 | #endif |
3480 | threadInfo[num_avail][osIdIndex] = val; |
3481 | #if KMP_OS_LINUX && !(KMP_ARCH_X86 || KMP_ARCH_X86_64) |
3482 | char path[256]; |
3483 | KMP_SNPRINTF( |
3484 | path, sizeof(path), |
3485 | "/sys/devices/system/cpu/cpu%u/topology/physical_package_id" , |
3486 | threadInfo[num_avail][osIdIndex]); |
3487 | __kmp_read_from_file(path, "%u" , &threadInfo[num_avail][pkgIdIndex]); |
3488 | |
3489 | #if KMP_ARCH_S390X |
3490 | // Disambiguate physical_package_id. |
3491 | unsigned book_id; |
3492 | KMP_SNPRINTF(path, sizeof(path), |
3493 | "/sys/devices/system/cpu/cpu%u/topology/book_id" , |
3494 | threadInfo[num_avail][osIdIndex]); |
3495 | __kmp_read_from_file(path, "%u" , &book_id); |
3496 | threadInfo[num_avail][pkgIdIndex] |= (book_id << 8); |
3497 | |
3498 | unsigned drawer_id; |
3499 | KMP_SNPRINTF(path, sizeof(path), |
3500 | "/sys/devices/system/cpu/cpu%u/topology/drawer_id" , |
3501 | threadInfo[num_avail][osIdIndex]); |
3502 | __kmp_read_from_file(path, "%u" , &drawer_id); |
3503 | threadInfo[num_avail][pkgIdIndex] |= (drawer_id << 16); |
3504 | #endif |
3505 | |
3506 | KMP_SNPRINTF(path, sizeof(path), |
3507 | "/sys/devices/system/cpu/cpu%u/topology/core_id" , |
3508 | threadInfo[num_avail][osIdIndex]); |
3509 | __kmp_read_from_file(path, "%u" , &threadInfo[num_avail][coreIdIndex]); |
3510 | continue; |
3511 | #else |
3512 | } |
3513 | char s2[] = "physical id" ; |
3514 | if (strncmp(s1: buf, s2: s2, n: sizeof(s2) - 1) == 0) { |
3515 | CHECK_LINE; |
3516 | char *p = strchr(s: buf + sizeof(s2) - 1, c: ':'); |
3517 | unsigned val; |
3518 | if ((p == NULL) || (KMP_SSCANF(s: p + 1, format: "%u\n" , &val) != 1)) |
3519 | goto no_val; |
3520 | if (threadInfo[num_avail][pkgIdIndex] != UINT_MAX) |
3521 | goto dup_field; |
3522 | threadInfo[num_avail][pkgIdIndex] = val; |
3523 | continue; |
3524 | } |
3525 | char s3[] = "core id" ; |
3526 | if (strncmp(s1: buf, s2: s3, n: sizeof(s3) - 1) == 0) { |
3527 | CHECK_LINE; |
3528 | char *p = strchr(s: buf + sizeof(s3) - 1, c: ':'); |
3529 | unsigned val; |
3530 | if ((p == NULL) || (KMP_SSCANF(s: p + 1, format: "%u\n" , &val) != 1)) |
3531 | goto no_val; |
3532 | if (threadInfo[num_avail][coreIdIndex] != UINT_MAX) |
3533 | goto dup_field; |
3534 | threadInfo[num_avail][coreIdIndex] = val; |
3535 | continue; |
3536 | #endif // KMP_OS_LINUX && USE_SYSFS_INFO |
3537 | } |
3538 | char s4[] = "thread id" ; |
3539 | if (strncmp(s1: buf, s2: s4, n: sizeof(s4) - 1) == 0) { |
3540 | CHECK_LINE; |
3541 | char *p = strchr(s: buf + sizeof(s4) - 1, c: ':'); |
3542 | unsigned val; |
3543 | if ((p == NULL) || (KMP_SSCANF(s: p + 1, format: "%u\n" , &val) != 1)) |
3544 | goto no_val; |
3545 | if (threadInfo[num_avail][threadIdIndex] != UINT_MAX) |
3546 | goto dup_field; |
3547 | threadInfo[num_avail][threadIdIndex] = val; |
3548 | continue; |
3549 | } |
3550 | unsigned level; |
3551 | if (KMP_SSCANF(s: buf, format: "node_%u id" , &level) == 1) { |
3552 | CHECK_LINE; |
3553 | char *p = strchr(s: buf + sizeof(s4) - 1, c: ':'); |
3554 | unsigned val; |
3555 | if ((p == NULL) || (KMP_SSCANF(s: p + 1, format: "%u\n" , &val) != 1)) |
3556 | goto no_val; |
3557 | // validate the input before using level: |
3558 | if (level > (unsigned)__kmp_xproc) { // level is too big |
3559 | level = __kmp_xproc; |
3560 | } |
3561 | if (threadInfo[num_avail][nodeIdIndex + level] != UINT_MAX) |
3562 | goto dup_field; |
3563 | threadInfo[num_avail][nodeIdIndex + level] = val; |
3564 | continue; |
3565 | } |
3566 | |
3567 | // We didn't recognize the leading token on the line. There are lots of |
3568 | // leading tokens that we don't recognize - if the line isn't empty, go on |
3569 | // to the next line. |
3570 | if ((*buf != 0) && (*buf != '\n')) { |
3571 | // If the line is longer than the buffer, read characters |
3572 | // until we find a newline. |
3573 | if (long_line) { |
3574 | int ch; |
3575 | while (((ch = fgetc(stream: f)) != EOF) && (ch != '\n')) |
3576 | ; |
3577 | } |
3578 | continue; |
3579 | } |
3580 | |
3581 | // A newline has signalled the end of the processor record. |
3582 | // Check that there aren't too many procs specified. |
3583 | if ((int)num_avail == __kmp_xproc) { |
3584 | CLEANUP_THREAD_INFO; |
3585 | *msg_id = kmp_i18n_str_TooManyEntries; |
3586 | return false; |
3587 | } |
3588 | |
3589 | // Check for missing fields. The osId field must be there. The physical |
3590 | // id field will be checked later. |
3591 | if (threadInfo[num_avail][osIdIndex] == UINT_MAX) { |
3592 | CLEANUP_THREAD_INFO; |
3593 | *msg_id = kmp_i18n_str_MissingProcField; |
3594 | return false; |
3595 | } |
3596 | |
3597 | // Skip this proc if it is not included in the machine model. |
3598 | if (KMP_AFFINITY_CAPABLE() && |
3599 | !KMP_CPU_ISSET(threadInfo[num_avail][osIdIndex], |
3600 | __kmp_affin_fullMask)) { |
3601 | INIT_PROC_INFO(threadInfo[num_avail]); |
3602 | continue; |
3603 | } |
3604 | |
3605 | // We have a successful parse of this proc's info. |
3606 | // Increment the counter, and prepare for the next proc. |
3607 | num_avail++; |
3608 | KMP_ASSERT(num_avail <= num_records); |
3609 | INIT_PROC_INFO(threadInfo[num_avail]); |
3610 | } |
3611 | continue; |
3612 | |
3613 | no_val: |
3614 | CLEANUP_THREAD_INFO; |
3615 | *msg_id = kmp_i18n_str_MissingValCpuinfo; |
3616 | return false; |
3617 | |
3618 | dup_field: |
3619 | CLEANUP_THREAD_INFO; |
3620 | *msg_id = kmp_i18n_str_DuplicateFieldCpuinfo; |
3621 | return false; |
3622 | } |
3623 | *line = 0; |
3624 | |
3625 | // At least on powerpc, Linux may return -1 for physical_package_id. Try |
3626 | // to reconstruct topology from core_siblings_list in that case. |
3627 | for (i = 0; i < num_avail; ++i) { |
3628 | if (threadInfo[i][pkgIdIndex] == UINT_MAX) { |
3629 | if (!__kmp_package_id_from_core_siblings_list(threadInfo, num_avail, idx: i)) { |
3630 | CLEANUP_THREAD_INFO; |
3631 | *msg_id = kmp_i18n_str_MissingPhysicalIDField; |
3632 | return false; |
3633 | } |
3634 | } |
3635 | } |
3636 | |
3637 | #if KMP_MIC && REDUCE_TEAM_SIZE |
3638 | unsigned teamSize = 0; |
3639 | #endif // KMP_MIC && REDUCE_TEAM_SIZE |
3640 | |
3641 | // check for num_records == __kmp_xproc ??? |
3642 | |
3643 | // If it is configured to omit the package level when there is only a single |
3644 | // package, the logic at the end of this routine won't work if there is only a |
3645 | // single thread |
3646 | KMP_ASSERT(num_avail > 0); |
3647 | KMP_ASSERT(num_avail <= num_records); |
3648 | |
3649 | // Sort the threadInfo table by physical Id. |
3650 | qsort(base: threadInfo, nmemb: num_avail, size: sizeof(*threadInfo), |
3651 | compar: __kmp_affinity_cmp_ProcCpuInfo_phys_id); |
3652 | |
3653 | #endif // KMP_OS_AIX |
3654 | |
3655 | // The table is now sorted by pkgId / coreId / threadId, but we really don't |
3656 | // know the radix of any of the fields. pkgId's may be sparsely assigned among |
3657 | // the chips on a system. Although coreId's are usually assigned |
3658 | // [0 .. coresPerPkg-1] and threadId's are usually assigned |
3659 | // [0..threadsPerCore-1], we don't want to make any such assumptions. |
3660 | // |
3661 | // For that matter, we don't know what coresPerPkg and threadsPerCore (or the |
3662 | // total # packages) are at this point - we want to determine that now. We |
3663 | // only have an upper bound on the first two figures. |
3664 | unsigned *counts = |
3665 | (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned)); |
3666 | unsigned *maxCt = |
3667 | (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned)); |
3668 | unsigned *totals = |
3669 | (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned)); |
3670 | unsigned *lastId = |
3671 | (unsigned *)__kmp_allocate((maxIndex + 1) * sizeof(unsigned)); |
3672 | |
3673 | bool assign_thread_ids = false; |
3674 | unsigned threadIdCt; |
3675 | unsigned index; |
3676 | |
3677 | restart_radix_check: |
3678 | threadIdCt = 0; |
3679 | |
3680 | // Initialize the counter arrays with data from threadInfo[0]. |
3681 | if (assign_thread_ids) { |
3682 | if (threadInfo[0][threadIdIndex] == UINT_MAX) { |
3683 | threadInfo[0][threadIdIndex] = threadIdCt++; |
3684 | } else if (threadIdCt <= threadInfo[0][threadIdIndex]) { |
3685 | threadIdCt = threadInfo[0][threadIdIndex] + 1; |
3686 | } |
3687 | } |
3688 | for (index = 0; index <= maxIndex; index++) { |
3689 | counts[index] = 1; |
3690 | maxCt[index] = 1; |
3691 | totals[index] = 1; |
3692 | lastId[index] = threadInfo[0][index]; |
3693 | ; |
3694 | } |
3695 | |
3696 | // Run through the rest of the OS procs. |
3697 | for (i = 1; i < num_avail; i++) { |
3698 | // Find the most significant index whose id differs from the id for the |
3699 | // previous OS proc. |
3700 | for (index = maxIndex; index >= threadIdIndex; index--) { |
3701 | if (assign_thread_ids && (index == threadIdIndex)) { |
3702 | // Auto-assign the thread id field if it wasn't specified. |
3703 | if (threadInfo[i][threadIdIndex] == UINT_MAX) { |
3704 | threadInfo[i][threadIdIndex] = threadIdCt++; |
3705 | } |
3706 | // Apparently the thread id field was specified for some entries and not |
3707 | // others. Start the thread id counter off at the next higher thread id. |
3708 | else if (threadIdCt <= threadInfo[i][threadIdIndex]) { |
3709 | threadIdCt = threadInfo[i][threadIdIndex] + 1; |
3710 | } |
3711 | } |
3712 | if (threadInfo[i][index] != lastId[index]) { |
3713 | // Run through all indices which are less significant, and reset the |
3714 | // counts to 1. At all levels up to and including index, we need to |
3715 | // increment the totals and record the last id. |
3716 | unsigned index2; |
3717 | for (index2 = threadIdIndex; index2 < index; index2++) { |
3718 | totals[index2]++; |
3719 | if (counts[index2] > maxCt[index2]) { |
3720 | maxCt[index2] = counts[index2]; |
3721 | } |
3722 | counts[index2] = 1; |
3723 | lastId[index2] = threadInfo[i][index2]; |
3724 | } |
3725 | counts[index]++; |
3726 | totals[index]++; |
3727 | lastId[index] = threadInfo[i][index]; |
3728 | |
3729 | if (assign_thread_ids && (index > threadIdIndex)) { |
3730 | |
3731 | #if KMP_MIC && REDUCE_TEAM_SIZE |
3732 | // The default team size is the total #threads in the machine |
3733 | // minus 1 thread for every core that has 3 or more threads. |
3734 | teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1); |
3735 | #endif // KMP_MIC && REDUCE_TEAM_SIZE |
3736 | |
3737 | // Restart the thread counter, as we are on a new core. |
3738 | threadIdCt = 0; |
3739 | |
3740 | // Auto-assign the thread id field if it wasn't specified. |
3741 | if (threadInfo[i][threadIdIndex] == UINT_MAX) { |
3742 | threadInfo[i][threadIdIndex] = threadIdCt++; |
3743 | } |
3744 | |
3745 | // Apparently the thread id field was specified for some entries and |
3746 | // not others. Start the thread id counter off at the next higher |
3747 | // thread id. |
3748 | else if (threadIdCt <= threadInfo[i][threadIdIndex]) { |
3749 | threadIdCt = threadInfo[i][threadIdIndex] + 1; |
3750 | } |
3751 | } |
3752 | break; |
3753 | } |
3754 | } |
3755 | if (index < threadIdIndex) { |
3756 | // If thread ids were specified, it is an error if they are not unique. |
3757 | // Also, check that we waven't already restarted the loop (to be safe - |
3758 | // shouldn't need to). |
3759 | if ((threadInfo[i][threadIdIndex] != UINT_MAX) || assign_thread_ids) { |
3760 | __kmp_free(lastId); |
3761 | __kmp_free(totals); |
3762 | __kmp_free(maxCt); |
3763 | __kmp_free(counts); |
3764 | CLEANUP_THREAD_INFO; |
3765 | *msg_id = kmp_i18n_str_PhysicalIDsNotUnique; |
3766 | return false; |
3767 | } |
3768 | |
3769 | // If the thread ids were not specified and we see entries that |
3770 | // are duplicates, start the loop over and assign the thread ids manually. |
3771 | assign_thread_ids = true; |
3772 | goto restart_radix_check; |
3773 | } |
3774 | } |
3775 | |
3776 | #if KMP_MIC && REDUCE_TEAM_SIZE |
3777 | // The default team size is the total #threads in the machine |
3778 | // minus 1 thread for every core that has 3 or more threads. |
3779 | teamSize += (threadIdCt <= 2) ? (threadIdCt) : (threadIdCt - 1); |
3780 | #endif // KMP_MIC && REDUCE_TEAM_SIZE |
3781 | |
3782 | for (index = threadIdIndex; index <= maxIndex; index++) { |
3783 | if (counts[index] > maxCt[index]) { |
3784 | maxCt[index] = counts[index]; |
3785 | } |
3786 | } |
3787 | |
3788 | __kmp_nThreadsPerCore = maxCt[threadIdIndex]; |
3789 | nCoresPerPkg = maxCt[coreIdIndex]; |
3790 | nPackages = totals[pkgIdIndex]; |
3791 | |
3792 | // When affinity is off, this routine will still be called to set |
3793 | // __kmp_ncores, as well as __kmp_nThreadsPerCore, nCoresPerPkg, & nPackages. |
3794 | // Make sure all these vars are set correctly, and return now if affinity is |
3795 | // not enabled. |
3796 | __kmp_ncores = totals[coreIdIndex]; |
3797 | if (!KMP_AFFINITY_CAPABLE()) { |
3798 | KMP_ASSERT(__kmp_affinity.type == affinity_none); |
3799 | return true; |
3800 | } |
3801 | |
3802 | #if KMP_MIC && REDUCE_TEAM_SIZE |
3803 | // Set the default team size. |
3804 | if ((__kmp_dflt_team_nth == 0) && (teamSize > 0)) { |
3805 | __kmp_dflt_team_nth = teamSize; |
3806 | KA_TRACE(20, ("__kmp_affinity_create_cpuinfo_map: setting " |
3807 | "__kmp_dflt_team_nth = %d\n" , |
3808 | __kmp_dflt_team_nth)); |
3809 | } |
3810 | #endif // KMP_MIC && REDUCE_TEAM_SIZE |
3811 | |
3812 | KMP_DEBUG_ASSERT(num_avail == (unsigned)__kmp_avail_proc); |
3813 | |
3814 | // Count the number of levels which have more nodes at that level than at the |
3815 | // parent's level (with there being an implicit root node of the top level). |
3816 | // This is equivalent to saying that there is at least one node at this level |
3817 | // which has a sibling. These levels are in the map, and the package level is |
3818 | // always in the map. |
3819 | bool *inMap = (bool *)__kmp_allocate((maxIndex + 1) * sizeof(bool)); |
3820 | for (index = threadIdIndex; index < maxIndex; index++) { |
3821 | KMP_ASSERT(totals[index] >= totals[index + 1]); |
3822 | inMap[index] = (totals[index] > totals[index + 1]); |
3823 | } |
3824 | inMap[maxIndex] = (totals[maxIndex] > 1); |
3825 | inMap[pkgIdIndex] = true; |
3826 | inMap[coreIdIndex] = true; |
3827 | inMap[threadIdIndex] = true; |
3828 | |
3829 | int depth = 0; |
3830 | int idx = 0; |
3831 | kmp_hw_t types[KMP_HW_LAST]; |
3832 | int pkgLevel = -1; |
3833 | int coreLevel = -1; |
3834 | int threadLevel = -1; |
3835 | for (index = threadIdIndex; index <= maxIndex; index++) { |
3836 | if (inMap[index]) { |
3837 | depth++; |
3838 | } |
3839 | } |
3840 | if (inMap[pkgIdIndex]) { |
3841 | pkgLevel = idx; |
3842 | types[idx++] = KMP_HW_SOCKET; |
3843 | } |
3844 | if (inMap[coreIdIndex]) { |
3845 | coreLevel = idx; |
3846 | types[idx++] = KMP_HW_CORE; |
3847 | } |
3848 | if (inMap[threadIdIndex]) { |
3849 | threadLevel = idx; |
3850 | types[idx++] = KMP_HW_THREAD; |
3851 | } |
3852 | KMP_ASSERT(depth > 0); |
3853 | |
3854 | // Construct the data structure that is to be returned. |
3855 | __kmp_topology = kmp_topology_t::allocate(nproc: num_avail, ndepth: depth, types); |
3856 | |
3857 | for (i = 0; i < num_avail; ++i) { |
3858 | unsigned os = threadInfo[i][osIdIndex]; |
3859 | int src_index; |
3860 | kmp_hw_thread_t &hw_thread = __kmp_topology->at(index: i); |
3861 | hw_thread.clear(); |
3862 | hw_thread.os_id = os; |
3863 | hw_thread.original_idx = i; |
3864 | |
3865 | idx = 0; |
3866 | for (src_index = maxIndex; src_index >= threadIdIndex; src_index--) { |
3867 | if (!inMap[src_index]) { |
3868 | continue; |
3869 | } |
3870 | if (src_index == pkgIdIndex) { |
3871 | hw_thread.ids[pkgLevel] = threadInfo[i][src_index]; |
3872 | } else if (src_index == coreIdIndex) { |
3873 | hw_thread.ids[coreLevel] = threadInfo[i][src_index]; |
3874 | } else if (src_index == threadIdIndex) { |
3875 | hw_thread.ids[threadLevel] = threadInfo[i][src_index]; |
3876 | } |
3877 | } |
3878 | } |
3879 | |
3880 | __kmp_free(inMap); |
3881 | __kmp_free(lastId); |
3882 | __kmp_free(totals); |
3883 | __kmp_free(maxCt); |
3884 | __kmp_free(counts); |
3885 | CLEANUP_THREAD_INFO; |
3886 | __kmp_topology->sort_ids(); |
3887 | |
3888 | int tlevel = __kmp_topology->get_level(type: KMP_HW_THREAD); |
3889 | if (tlevel > 0) { |
3890 | // If the thread level does not have ids, then put them in. |
3891 | if (__kmp_topology->at(index: 0).ids[tlevel] == kmp_hw_thread_t::UNKNOWN_ID) { |
3892 | __kmp_topology->at(index: 0).ids[tlevel] = 0; |
3893 | } |
3894 | for (int i = 1; i < __kmp_topology->get_num_hw_threads(); ++i) { |
3895 | kmp_hw_thread_t &hw_thread = __kmp_topology->at(index: i); |
3896 | if (hw_thread.ids[tlevel] != kmp_hw_thread_t::UNKNOWN_ID) |
3897 | continue; |
3898 | kmp_hw_thread_t &prev_hw_thread = __kmp_topology->at(index: i - 1); |
3899 | // Check if socket, core, anything above thread level changed. |
3900 | // If the ids did change, then restart thread id at 0 |
3901 | // Otherwise, set thread id to prev thread's id + 1 |
3902 | for (int j = 0; j < tlevel; ++j) { |
3903 | if (hw_thread.ids[j] != prev_hw_thread.ids[j]) { |
3904 | hw_thread.ids[tlevel] = 0; |
3905 | break; |
3906 | } |
3907 | } |
3908 | if (hw_thread.ids[tlevel] == kmp_hw_thread_t::UNKNOWN_ID) |
3909 | hw_thread.ids[tlevel] = prev_hw_thread.ids[tlevel] + 1; |
3910 | } |
3911 | } |
3912 | |
3913 | if (!__kmp_topology->check_ids()) { |
3914 | kmp_topology_t::deallocate(topology: __kmp_topology); |
3915 | __kmp_topology = nullptr; |
3916 | *msg_id = kmp_i18n_str_PhysicalIDsNotUnique; |
3917 | return false; |
3918 | } |
3919 | return true; |
3920 | } |
3921 | |
3922 | // Create and return a table of affinity masks, indexed by OS thread ID. |
3923 | // This routine handles OR'ing together all the affinity masks of threads |
3924 | // that are sufficiently close, if granularity > fine. |
3925 | template <typename FindNextFunctionType> |
3926 | static void __kmp_create_os_id_masks(unsigned *numUnique, |
3927 | kmp_affinity_t &affinity, |
3928 | FindNextFunctionType find_next) { |
3929 | // First form a table of affinity masks in order of OS thread id. |
3930 | int maxOsId; |
3931 | int i; |
3932 | int numAddrs = __kmp_topology->get_num_hw_threads(); |
3933 | int depth = __kmp_topology->get_depth(); |
3934 | const char *env_var = __kmp_get_affinity_env_var(affinity); |
3935 | KMP_ASSERT(numAddrs); |
3936 | KMP_ASSERT(depth); |
3937 | |
3938 | i = find_next(-1); |
3939 | // If could not find HW thread location that satisfies find_next conditions, |
3940 | // then return and fallback to increment find_next. |
3941 | if (i >= numAddrs) |
3942 | return; |
3943 | |
3944 | maxOsId = 0; |
3945 | for (i = numAddrs - 1;; --i) { |
3946 | int osId = __kmp_topology->at(index: i).os_id; |
3947 | if (osId > maxOsId) { |
3948 | maxOsId = osId; |
3949 | } |
3950 | if (i == 0) |
3951 | break; |
3952 | } |
3953 | affinity.num_os_id_masks = maxOsId + 1; |
3954 | KMP_CPU_ALLOC_ARRAY(affinity.os_id_masks, affinity.num_os_id_masks); |
3955 | KMP_ASSERT(affinity.gran_levels >= 0); |
3956 | if (affinity.flags.verbose && (affinity.gran_levels > 0)) { |
3957 | KMP_INFORM(ThreadsMigrate, env_var, affinity.gran_levels); |
3958 | } |
3959 | if (affinity.gran_levels >= (int)depth) { |
3960 | KMP_AFF_WARNING(affinity, AffThreadsMayMigrate); |
3961 | } |
3962 | |
3963 | // Run through the table, forming the masks for all threads on each core. |
3964 | // Threads on the same core will have identical kmp_hw_thread_t objects, not |
3965 | // considering the last level, which must be the thread id. All threads on a |
3966 | // core will appear consecutively. |
3967 | int unique = 0; |
3968 | int j = 0; // index of 1st thread on core |
3969 | int leader = 0; |
3970 | kmp_affin_mask_t *sum; |
3971 | KMP_CPU_ALLOC_ON_STACK(sum); |
3972 | KMP_CPU_ZERO(sum); |
3973 | |
3974 | i = j = leader = find_next(-1); |
3975 | KMP_CPU_SET(__kmp_topology->at(i).os_id, sum); |
3976 | kmp_full_mask_modifier_t full_mask; |
3977 | for (i = find_next(i); i < numAddrs; i = find_next(i)) { |
3978 | // If this thread is sufficiently close to the leader (within the |
3979 | // granularity setting), then set the bit for this os thread in the |
3980 | // affinity mask for this group, and go on to the next thread. |
3981 | if (__kmp_topology->is_close(hwt1: leader, hwt2: i, stgs: affinity)) { |
3982 | KMP_CPU_SET(__kmp_topology->at(i).os_id, sum); |
3983 | continue; |
3984 | } |
3985 | |
3986 | // For every thread in this group, copy the mask to the thread's entry in |
3987 | // the OS Id mask table. Mark the first address as a leader. |
3988 | for (; j < i; j = find_next(j)) { |
3989 | int osId = __kmp_topology->at(index: j).os_id; |
3990 | KMP_DEBUG_ASSERT(osId <= maxOsId); |
3991 | kmp_affin_mask_t *mask = KMP_CPU_INDEX(affinity.os_id_masks, osId); |
3992 | KMP_CPU_COPY(mask, sum); |
3993 | __kmp_topology->at(index: j).leader = (j == leader); |
3994 | } |
3995 | unique++; |
3996 | |
3997 | // Start a new mask. |
3998 | leader = i; |
3999 | full_mask.include(other: sum); |
4000 | KMP_CPU_ZERO(sum); |
4001 | KMP_CPU_SET(__kmp_topology->at(i).os_id, sum); |
4002 | } |
4003 | |
4004 | // For every thread in last group, copy the mask to the thread's |
4005 | // entry in the OS Id mask table. |
4006 | for (; j < i; j = find_next(j)) { |
4007 | int osId = __kmp_topology->at(index: j).os_id; |
4008 | KMP_DEBUG_ASSERT(osId <= maxOsId); |
4009 | kmp_affin_mask_t *mask = KMP_CPU_INDEX(affinity.os_id_masks, osId); |
4010 | KMP_CPU_COPY(mask, sum); |
4011 | __kmp_topology->at(index: j).leader = (j == leader); |
4012 | } |
4013 | full_mask.include(other: sum); |
4014 | unique++; |
4015 | KMP_CPU_FREE_FROM_STACK(sum); |
4016 | |
4017 | // See if the OS Id mask table further restricts or changes the full mask |
4018 | if (full_mask.restrict_to_mask() && affinity.flags.verbose) { |
4019 | __kmp_topology->print(env_var); |
4020 | } |
4021 | |
4022 | *numUnique = unique; |
4023 | } |
4024 | |
4025 | // Stuff for the affinity proclist parsers. It's easier to declare these vars |
4026 | // as file-static than to try and pass them through the calling sequence of |
4027 | // the recursive-descent OMP_PLACES parser. |
4028 | static kmp_affin_mask_t *newMasks; |
4029 | static int numNewMasks; |
4030 | static int nextNewMask; |
4031 | |
4032 | #define ADD_MASK(_mask) \ |
4033 | { \ |
4034 | if (nextNewMask >= numNewMasks) { \ |
4035 | int i; \ |
4036 | numNewMasks *= 2; \ |
4037 | kmp_affin_mask_t *temp; \ |
4038 | KMP_CPU_INTERNAL_ALLOC_ARRAY(temp, numNewMasks); \ |
4039 | for (i = 0; i < numNewMasks / 2; i++) { \ |
4040 | kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i); \ |
4041 | kmp_affin_mask_t *dest = KMP_CPU_INDEX(temp, i); \ |
4042 | KMP_CPU_COPY(dest, src); \ |
4043 | } \ |
4044 | KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks / 2); \ |
4045 | newMasks = temp; \ |
4046 | } \ |
4047 | KMP_CPU_COPY(KMP_CPU_INDEX(newMasks, nextNewMask), (_mask)); \ |
4048 | nextNewMask++; \ |
4049 | } |
4050 | |
4051 | #define ADD_MASK_OSID(_osId, _osId2Mask, _maxOsId) \ |
4052 | { \ |
4053 | if (((_osId) > _maxOsId) || \ |
4054 | (!KMP_CPU_ISSET((_osId), KMP_CPU_INDEX((_osId2Mask), (_osId))))) { \ |
4055 | KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, _osId); \ |
4056 | } else { \ |
4057 | ADD_MASK(KMP_CPU_INDEX(_osId2Mask, (_osId))); \ |
4058 | } \ |
4059 | } |
4060 | |
4061 | // Re-parse the proclist (for the explicit affinity type), and form the list |
4062 | // of affinity newMasks indexed by gtid. |
4063 | static void __kmp_affinity_process_proclist(kmp_affinity_t &affinity) { |
4064 | int i; |
4065 | kmp_affin_mask_t **out_masks = &affinity.masks; |
4066 | unsigned *out_numMasks = &affinity.num_masks; |
4067 | const char *proclist = affinity.proclist; |
4068 | kmp_affin_mask_t *osId2Mask = affinity.os_id_masks; |
4069 | int maxOsId = affinity.num_os_id_masks - 1; |
4070 | const char *scan = proclist; |
4071 | const char *next = proclist; |
4072 | |
4073 | // We use malloc() for the temporary mask vector, so that we can use |
4074 | // realloc() to extend it. |
4075 | numNewMasks = 2; |
4076 | KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks); |
4077 | nextNewMask = 0; |
4078 | kmp_affin_mask_t *sumMask; |
4079 | KMP_CPU_ALLOC(sumMask); |
4080 | int setSize = 0; |
4081 | |
4082 | for (;;) { |
4083 | int start, end, stride; |
4084 | |
4085 | SKIP_WS(scan); |
4086 | next = scan; |
4087 | if (*next == '\0') { |
4088 | break; |
4089 | } |
4090 | |
4091 | if (*next == '{') { |
4092 | int num; |
4093 | setSize = 0; |
4094 | next++; // skip '{' |
4095 | SKIP_WS(next); |
4096 | scan = next; |
4097 | |
4098 | // Read the first integer in the set. |
4099 | KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad proclist" ); |
4100 | SKIP_DIGITS(next); |
4101 | num = __kmp_str_to_int(str: scan, sentinel: *next); |
4102 | KMP_ASSERT2(num >= 0, "bad explicit proc list" ); |
4103 | |
4104 | // Copy the mask for that osId to the sum (union) mask. |
4105 | if ((num > maxOsId) || |
4106 | (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) { |
4107 | KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, num); |
4108 | KMP_CPU_ZERO(sumMask); |
4109 | } else { |
4110 | KMP_CPU_COPY(sumMask, KMP_CPU_INDEX(osId2Mask, num)); |
4111 | setSize = 1; |
4112 | } |
4113 | |
4114 | for (;;) { |
4115 | // Check for end of set. |
4116 | SKIP_WS(next); |
4117 | if (*next == '}') { |
4118 | next++; // skip '}' |
4119 | break; |
4120 | } |
4121 | |
4122 | // Skip optional comma. |
4123 | if (*next == ',') { |
4124 | next++; |
4125 | } |
4126 | SKIP_WS(next); |
4127 | |
4128 | // Read the next integer in the set. |
4129 | scan = next; |
4130 | KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list" ); |
4131 | |
4132 | SKIP_DIGITS(next); |
4133 | num = __kmp_str_to_int(str: scan, sentinel: *next); |
4134 | KMP_ASSERT2(num >= 0, "bad explicit proc list" ); |
4135 | |
4136 | // Add the mask for that osId to the sum mask. |
4137 | if ((num > maxOsId) || |
4138 | (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) { |
4139 | KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, num); |
4140 | } else { |
4141 | KMP_CPU_UNION(sumMask, KMP_CPU_INDEX(osId2Mask, num)); |
4142 | setSize++; |
4143 | } |
4144 | } |
4145 | if (setSize > 0) { |
4146 | ADD_MASK(sumMask); |
4147 | } |
4148 | |
4149 | SKIP_WS(next); |
4150 | if (*next == ',') { |
4151 | next++; |
4152 | } |
4153 | scan = next; |
4154 | continue; |
4155 | } |
4156 | |
4157 | // Read the first integer. |
4158 | KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list" ); |
4159 | SKIP_DIGITS(next); |
4160 | start = __kmp_str_to_int(str: scan, sentinel: *next); |
4161 | KMP_ASSERT2(start >= 0, "bad explicit proc list" ); |
4162 | SKIP_WS(next); |
4163 | |
4164 | // If this isn't a range, then add a mask to the list and go on. |
4165 | if (*next != '-') { |
4166 | ADD_MASK_OSID(start, osId2Mask, maxOsId); |
4167 | |
4168 | // Skip optional comma. |
4169 | if (*next == ',') { |
4170 | next++; |
4171 | } |
4172 | scan = next; |
4173 | continue; |
4174 | } |
4175 | |
4176 | // This is a range. Skip over the '-' and read in the 2nd int. |
4177 | next++; // skip '-' |
4178 | SKIP_WS(next); |
4179 | scan = next; |
4180 | KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list" ); |
4181 | SKIP_DIGITS(next); |
4182 | end = __kmp_str_to_int(str: scan, sentinel: *next); |
4183 | KMP_ASSERT2(end >= 0, "bad explicit proc list" ); |
4184 | |
4185 | // Check for a stride parameter |
4186 | stride = 1; |
4187 | SKIP_WS(next); |
4188 | if (*next == ':') { |
4189 | // A stride is specified. Skip over the ':" and read the 3rd int. |
4190 | int sign = +1; |
4191 | next++; // skip ':' |
4192 | SKIP_WS(next); |
4193 | scan = next; |
4194 | if (*next == '-') { |
4195 | sign = -1; |
4196 | next++; |
4197 | SKIP_WS(next); |
4198 | scan = next; |
4199 | } |
4200 | KMP_ASSERT2((*next >= '0') && (*next <= '9'), "bad explicit proc list" ); |
4201 | SKIP_DIGITS(next); |
4202 | stride = __kmp_str_to_int(str: scan, sentinel: *next); |
4203 | KMP_ASSERT2(stride >= 0, "bad explicit proc list" ); |
4204 | stride *= sign; |
4205 | } |
4206 | |
4207 | // Do some range checks. |
4208 | KMP_ASSERT2(stride != 0, "bad explicit proc list" ); |
4209 | if (stride > 0) { |
4210 | KMP_ASSERT2(start <= end, "bad explicit proc list" ); |
4211 | } else { |
4212 | KMP_ASSERT2(start >= end, "bad explicit proc list" ); |
4213 | } |
4214 | KMP_ASSERT2((end - start) / stride <= 65536, "bad explicit proc list" ); |
4215 | |
4216 | // Add the mask for each OS proc # to the list. |
4217 | if (stride > 0) { |
4218 | do { |
4219 | ADD_MASK_OSID(start, osId2Mask, maxOsId); |
4220 | start += stride; |
4221 | } while (start <= end); |
4222 | } else { |
4223 | do { |
4224 | ADD_MASK_OSID(start, osId2Mask, maxOsId); |
4225 | start += stride; |
4226 | } while (start >= end); |
4227 | } |
4228 | |
4229 | // Skip optional comma. |
4230 | SKIP_WS(next); |
4231 | if (*next == ',') { |
4232 | next++; |
4233 | } |
4234 | scan = next; |
4235 | } |
4236 | |
4237 | *out_numMasks = nextNewMask; |
4238 | if (nextNewMask == 0) { |
4239 | *out_masks = NULL; |
4240 | KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks); |
4241 | return; |
4242 | } |
4243 | KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask); |
4244 | for (i = 0; i < nextNewMask; i++) { |
4245 | kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i); |
4246 | kmp_affin_mask_t *dest = KMP_CPU_INDEX((*out_masks), i); |
4247 | KMP_CPU_COPY(dest, src); |
4248 | } |
4249 | KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks); |
4250 | KMP_CPU_FREE(sumMask); |
4251 | } |
4252 | |
4253 | /*----------------------------------------------------------------------------- |
4254 | Re-parse the OMP_PLACES proc id list, forming the newMasks for the different |
4255 | places. Again, Here is the grammar: |
4256 | |
4257 | place_list := place |
4258 | place_list := place , place_list |
4259 | place := num |
4260 | place := place : num |
4261 | place := place : num : signed |
4262 | place := { subplacelist } |
4263 | place := ! place // (lowest priority) |
4264 | subplace_list := subplace |
4265 | subplace_list := subplace , subplace_list |
4266 | subplace := num |
4267 | subplace := num : num |
4268 | subplace := num : num : signed |
4269 | signed := num |
4270 | signed := + signed |
4271 | signed := - signed |
4272 | -----------------------------------------------------------------------------*/ |
4273 | static void __kmp_process_subplace_list(const char **scan, |
4274 | kmp_affinity_t &affinity, int maxOsId, |
4275 | kmp_affin_mask_t *tempMask, |
4276 | int *setSize) { |
4277 | const char *next; |
4278 | kmp_affin_mask_t *osId2Mask = affinity.os_id_masks; |
4279 | |
4280 | for (;;) { |
4281 | int start, count, stride, i; |
4282 | |
4283 | // Read in the starting proc id |
4284 | SKIP_WS(*scan); |
4285 | KMP_ASSERT2((**scan >= '0') && (**scan <= '9'), "bad explicit places list" ); |
4286 | next = *scan; |
4287 | SKIP_DIGITS(next); |
4288 | start = __kmp_str_to_int(str: *scan, sentinel: *next); |
4289 | KMP_ASSERT(start >= 0); |
4290 | *scan = next; |
4291 | |
4292 | // valid follow sets are ',' ':' and '}' |
4293 | SKIP_WS(*scan); |
4294 | if (**scan == '}' || **scan == ',') { |
4295 | if ((start > maxOsId) || |
4296 | (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) { |
4297 | KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, start); |
4298 | } else { |
4299 | KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start)); |
4300 | (*setSize)++; |
4301 | } |
4302 | if (**scan == '}') { |
4303 | break; |
4304 | } |
4305 | (*scan)++; // skip ',' |
4306 | continue; |
4307 | } |
4308 | KMP_ASSERT2(**scan == ':', "bad explicit places list" ); |
4309 | (*scan)++; // skip ':' |
4310 | |
4311 | // Read count parameter |
4312 | SKIP_WS(*scan); |
4313 | KMP_ASSERT2((**scan >= '0') && (**scan <= '9'), "bad explicit places list" ); |
4314 | next = *scan; |
4315 | SKIP_DIGITS(next); |
4316 | count = __kmp_str_to_int(str: *scan, sentinel: *next); |
4317 | KMP_ASSERT(count >= 0); |
4318 | *scan = next; |
4319 | |
4320 | // valid follow sets are ',' ':' and '}' |
4321 | SKIP_WS(*scan); |
4322 | if (**scan == '}' || **scan == ',') { |
4323 | for (i = 0; i < count; i++) { |
4324 | if ((start > maxOsId) || |
4325 | (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) { |
4326 | KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, start); |
4327 | break; // don't proliferate warnings for large count |
4328 | } else { |
4329 | KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start)); |
4330 | start++; |
4331 | (*setSize)++; |
4332 | } |
4333 | } |
4334 | if (**scan == '}') { |
4335 | break; |
4336 | } |
4337 | (*scan)++; // skip ',' |
4338 | continue; |
4339 | } |
4340 | KMP_ASSERT2(**scan == ':', "bad explicit places list" ); |
4341 | (*scan)++; // skip ':' |
4342 | |
4343 | // Read stride parameter |
4344 | int sign = +1; |
4345 | for (;;) { |
4346 | SKIP_WS(*scan); |
4347 | if (**scan == '+') { |
4348 | (*scan)++; // skip '+' |
4349 | continue; |
4350 | } |
4351 | if (**scan == '-') { |
4352 | sign *= -1; |
4353 | (*scan)++; // skip '-' |
4354 | continue; |
4355 | } |
4356 | break; |
4357 | } |
4358 | SKIP_WS(*scan); |
4359 | KMP_ASSERT2((**scan >= '0') && (**scan <= '9'), "bad explicit places list" ); |
4360 | next = *scan; |
4361 | SKIP_DIGITS(next); |
4362 | stride = __kmp_str_to_int(str: *scan, sentinel: *next); |
4363 | KMP_ASSERT(stride >= 0); |
4364 | *scan = next; |
4365 | stride *= sign; |
4366 | |
4367 | // valid follow sets are ',' and '}' |
4368 | SKIP_WS(*scan); |
4369 | if (**scan == '}' || **scan == ',') { |
4370 | for (i = 0; i < count; i++) { |
4371 | if ((start > maxOsId) || |
4372 | (!KMP_CPU_ISSET(start, KMP_CPU_INDEX(osId2Mask, start)))) { |
4373 | KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, start); |
4374 | break; // don't proliferate warnings for large count |
4375 | } else { |
4376 | KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, start)); |
4377 | start += stride; |
4378 | (*setSize)++; |
4379 | } |
4380 | } |
4381 | if (**scan == '}') { |
4382 | break; |
4383 | } |
4384 | (*scan)++; // skip ',' |
4385 | continue; |
4386 | } |
4387 | |
4388 | KMP_ASSERT2(0, "bad explicit places list" ); |
4389 | } |
4390 | } |
4391 | |
4392 | static void __kmp_process_place(const char **scan, kmp_affinity_t &affinity, |
4393 | int maxOsId, kmp_affin_mask_t *tempMask, |
4394 | int *setSize) { |
4395 | const char *next; |
4396 | kmp_affin_mask_t *osId2Mask = affinity.os_id_masks; |
4397 | |
4398 | // valid follow sets are '{' '!' and num |
4399 | SKIP_WS(*scan); |
4400 | if (**scan == '{') { |
4401 | (*scan)++; // skip '{' |
4402 | __kmp_process_subplace_list(scan, affinity, maxOsId, tempMask, setSize); |
4403 | KMP_ASSERT2(**scan == '}', "bad explicit places list" ); |
4404 | (*scan)++; // skip '}' |
4405 | } else if (**scan == '!') { |
4406 | (*scan)++; // skip '!' |
4407 | __kmp_process_place(scan, affinity, maxOsId, tempMask, setSize); |
4408 | KMP_CPU_COMPLEMENT(maxOsId, tempMask); |
4409 | } else if ((**scan >= '0') && (**scan <= '9')) { |
4410 | next = *scan; |
4411 | SKIP_DIGITS(next); |
4412 | int num = __kmp_str_to_int(str: *scan, sentinel: *next); |
4413 | KMP_ASSERT(num >= 0); |
4414 | if ((num > maxOsId) || |
4415 | (!KMP_CPU_ISSET(num, KMP_CPU_INDEX(osId2Mask, num)))) { |
4416 | KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, num); |
4417 | } else { |
4418 | KMP_CPU_UNION(tempMask, KMP_CPU_INDEX(osId2Mask, num)); |
4419 | (*setSize)++; |
4420 | } |
4421 | *scan = next; // skip num |
4422 | } else { |
4423 | KMP_ASSERT2(0, "bad explicit places list" ); |
4424 | } |
4425 | } |
4426 | |
4427 | // static void |
4428 | void __kmp_affinity_process_placelist(kmp_affinity_t &affinity) { |
4429 | int i, j, count, stride, sign; |
4430 | kmp_affin_mask_t **out_masks = &affinity.masks; |
4431 | unsigned *out_numMasks = &affinity.num_masks; |
4432 | const char *placelist = affinity.proclist; |
4433 | kmp_affin_mask_t *osId2Mask = affinity.os_id_masks; |
4434 | int maxOsId = affinity.num_os_id_masks - 1; |
4435 | const char *scan = placelist; |
4436 | const char *next = placelist; |
4437 | |
4438 | numNewMasks = 2; |
4439 | KMP_CPU_INTERNAL_ALLOC_ARRAY(newMasks, numNewMasks); |
4440 | nextNewMask = 0; |
4441 | |
4442 | // tempMask is modified based on the previous or initial |
4443 | // place to form the current place |
4444 | // previousMask contains the previous place |
4445 | kmp_affin_mask_t *tempMask; |
4446 | kmp_affin_mask_t *previousMask; |
4447 | KMP_CPU_ALLOC(tempMask); |
4448 | KMP_CPU_ZERO(tempMask); |
4449 | KMP_CPU_ALLOC(previousMask); |
4450 | KMP_CPU_ZERO(previousMask); |
4451 | int setSize = 0; |
4452 | |
4453 | for (;;) { |
4454 | __kmp_process_place(scan: &scan, affinity, maxOsId, tempMask, setSize: &setSize); |
4455 | |
4456 | // valid follow sets are ',' ':' and EOL |
4457 | SKIP_WS(scan); |
4458 | if (*scan == '\0' || *scan == ',') { |
4459 | if (setSize > 0) { |
4460 | ADD_MASK(tempMask); |
4461 | } |
4462 | KMP_CPU_ZERO(tempMask); |
4463 | setSize = 0; |
4464 | if (*scan == '\0') { |
4465 | break; |
4466 | } |
4467 | scan++; // skip ',' |
4468 | continue; |
4469 | } |
4470 | |
4471 | KMP_ASSERT2(*scan == ':', "bad explicit places list" ); |
4472 | scan++; // skip ':' |
4473 | |
4474 | // Read count parameter |
4475 | SKIP_WS(scan); |
4476 | KMP_ASSERT2((*scan >= '0') && (*scan <= '9'), "bad explicit places list" ); |
4477 | next = scan; |
4478 | SKIP_DIGITS(next); |
4479 | count = __kmp_str_to_int(str: scan, sentinel: *next); |
4480 | KMP_ASSERT(count >= 0); |
4481 | scan = next; |
4482 | |
4483 | // valid follow sets are ',' ':' and EOL |
4484 | SKIP_WS(scan); |
4485 | if (*scan == '\0' || *scan == ',') { |
4486 | stride = +1; |
4487 | } else { |
4488 | KMP_ASSERT2(*scan == ':', "bad explicit places list" ); |
4489 | scan++; // skip ':' |
4490 | |
4491 | // Read stride parameter |
4492 | sign = +1; |
4493 | for (;;) { |
4494 | SKIP_WS(scan); |
4495 | if (*scan == '+') { |
4496 | scan++; // skip '+' |
4497 | continue; |
4498 | } |
4499 | if (*scan == '-') { |
4500 | sign *= -1; |
4501 | scan++; // skip '-' |
4502 | continue; |
4503 | } |
4504 | break; |
4505 | } |
4506 | SKIP_WS(scan); |
4507 | KMP_ASSERT2((*scan >= '0') && (*scan <= '9'), "bad explicit places list" ); |
4508 | next = scan; |
4509 | SKIP_DIGITS(next); |
4510 | stride = __kmp_str_to_int(str: scan, sentinel: *next); |
4511 | KMP_DEBUG_ASSERT(stride >= 0); |
4512 | scan = next; |
4513 | stride *= sign; |
4514 | } |
4515 | |
4516 | // Add places determined by initial_place : count : stride |
4517 | for (i = 0; i < count; i++) { |
4518 | if (setSize == 0) { |
4519 | break; |
4520 | } |
4521 | // Add the current place, then build the next place (tempMask) from that |
4522 | KMP_CPU_COPY(previousMask, tempMask); |
4523 | ADD_MASK(previousMask); |
4524 | KMP_CPU_ZERO(tempMask); |
4525 | setSize = 0; |
4526 | KMP_CPU_SET_ITERATE(j, previousMask) { |
4527 | if (!KMP_CPU_ISSET(j, previousMask)) { |
4528 | continue; |
4529 | } |
4530 | if ((j + stride > maxOsId) || (j + stride < 0) || |
4531 | (!KMP_CPU_ISSET(j, __kmp_affin_fullMask)) || |
4532 | (!KMP_CPU_ISSET(j + stride, |
4533 | KMP_CPU_INDEX(osId2Mask, j + stride)))) { |
4534 | if (i < count - 1) { |
4535 | KMP_AFF_WARNING(affinity, AffIgnoreInvalidProcID, j + stride); |
4536 | } |
4537 | continue; |
4538 | } |
4539 | KMP_CPU_SET(j + stride, tempMask); |
4540 | setSize++; |
4541 | } |
4542 | } |
4543 | KMP_CPU_ZERO(tempMask); |
4544 | setSize = 0; |
4545 | |
4546 | // valid follow sets are ',' and EOL |
4547 | SKIP_WS(scan); |
4548 | if (*scan == '\0') { |
4549 | break; |
4550 | } |
4551 | if (*scan == ',') { |
4552 | scan++; // skip ',' |
4553 | continue; |
4554 | } |
4555 | |
4556 | KMP_ASSERT2(0, "bad explicit places list" ); |
4557 | } |
4558 | |
4559 | *out_numMasks = nextNewMask; |
4560 | if (nextNewMask == 0) { |
4561 | *out_masks = NULL; |
4562 | KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks); |
4563 | return; |
4564 | } |
4565 | KMP_CPU_ALLOC_ARRAY((*out_masks), nextNewMask); |
4566 | KMP_CPU_FREE(tempMask); |
4567 | KMP_CPU_FREE(previousMask); |
4568 | for (i = 0; i < nextNewMask; i++) { |
4569 | kmp_affin_mask_t *src = KMP_CPU_INDEX(newMasks, i); |
4570 | kmp_affin_mask_t *dest = KMP_CPU_INDEX((*out_masks), i); |
4571 | KMP_CPU_COPY(dest, src); |
4572 | } |
4573 | KMP_CPU_INTERNAL_FREE_ARRAY(newMasks, numNewMasks); |
4574 | } |
4575 | |
4576 | #undef ADD_MASK |
4577 | #undef ADD_MASK_OSID |
4578 | |
4579 | // This function figures out the deepest level at which there is at least one |
4580 | // cluster/core with more than one processing unit bound to it. |
4581 | static int __kmp_affinity_find_core_level(int nprocs, int bottom_level) { |
4582 | int core_level = 0; |
4583 | |
4584 | for (int i = 0; i < nprocs; i++) { |
4585 | const kmp_hw_thread_t &hw_thread = __kmp_topology->at(index: i); |
4586 | for (int j = bottom_level; j > 0; j--) { |
4587 | if (hw_thread.ids[j] > 0) { |
4588 | if (core_level < (j - 1)) { |
4589 | core_level = j - 1; |
4590 | } |
4591 | } |
4592 | } |
4593 | } |
4594 | return core_level; |
4595 | } |
4596 | |
4597 | // This function counts number of clusters/cores at given level. |
4598 | static int __kmp_affinity_compute_ncores(int nprocs, int bottom_level, |
4599 | int core_level) { |
4600 | return __kmp_topology->get_count(level: core_level); |
4601 | } |
4602 | // This function finds to which cluster/core given processing unit is bound. |
4603 | static int __kmp_affinity_find_core(int proc, int bottom_level, |
4604 | int core_level) { |
4605 | int core = 0; |
4606 | KMP_DEBUG_ASSERT(proc >= 0 && proc < __kmp_topology->get_num_hw_threads()); |
4607 | for (int i = 0; i <= proc; ++i) { |
4608 | if (i + 1 <= proc) { |
4609 | for (int j = 0; j <= core_level; ++j) { |
4610 | if (__kmp_topology->at(index: i + 1).sub_ids[j] != |
4611 | __kmp_topology->at(index: i).sub_ids[j]) { |
4612 | core++; |
4613 | break; |
4614 | } |
4615 | } |
4616 | } |
4617 | } |
4618 | return core; |
4619 | } |
4620 | |
4621 | // This function finds maximal number of processing units bound to a |
4622 | // cluster/core at given level. |
4623 | static int __kmp_affinity_max_proc_per_core(int nprocs, int bottom_level, |
4624 | int core_level) { |
4625 | if (core_level >= bottom_level) |
4626 | return 1; |
4627 | int thread_level = __kmp_topology->get_level(type: KMP_HW_THREAD); |
4628 | return __kmp_topology->calculate_ratio(level1: thread_level, level2: core_level); |
4629 | } |
4630 | |
4631 | static int *procarr = NULL; |
4632 | static int __kmp_aff_depth = 0; |
4633 | static int *__kmp_osid_to_hwthread_map = NULL; |
4634 | |
4635 | static void __kmp_affinity_get_mask_topology_info(const kmp_affin_mask_t *mask, |
4636 | kmp_affinity_ids_t &ids, |
4637 | kmp_affinity_attrs_t &attrs) { |
4638 | if (!KMP_AFFINITY_CAPABLE()) |
4639 | return; |
4640 | |
4641 | // Initiailze ids and attrs thread data |
4642 | for (int i = 0; i < KMP_HW_LAST; ++i) |
4643 | ids.ids[i] = kmp_hw_thread_t::UNKNOWN_ID; |
4644 | attrs = KMP_AFFINITY_ATTRS_UNKNOWN; |
4645 | |
4646 | // Iterate through each os id within the mask and determine |
4647 | // the topology id and attribute information |
4648 | int cpu; |
4649 | int depth = __kmp_topology->get_depth(); |
4650 | KMP_CPU_SET_ITERATE(cpu, mask) { |
4651 | int osid_idx = __kmp_osid_to_hwthread_map[cpu]; |
4652 | ids.os_id = cpu; |
4653 | const kmp_hw_thread_t &hw_thread = __kmp_topology->at(index: osid_idx); |
4654 | for (int level = 0; level < depth; ++level) { |
4655 | kmp_hw_t type = __kmp_topology->get_type(level); |
4656 | int id = hw_thread.sub_ids[level]; |
4657 | if (ids.ids[type] == kmp_hw_thread_t::UNKNOWN_ID || ids.ids[type] == id) { |
4658 | ids.ids[type] = id; |
4659 | } else { |
4660 | // This mask spans across multiple topology units, set it as such |
4661 | // and mark every level below as such as well. |
4662 | ids.ids[type] = kmp_hw_thread_t::MULTIPLE_ID; |
4663 | for (; level < depth; ++level) { |
4664 | kmp_hw_t type = __kmp_topology->get_type(level); |
4665 | ids.ids[type] = kmp_hw_thread_t::MULTIPLE_ID; |
4666 | } |
4667 | } |
4668 | } |
4669 | if (!attrs.valid) { |
4670 | attrs.core_type = hw_thread.attrs.get_core_type(); |
4671 | attrs.core_eff = hw_thread.attrs.get_core_eff(); |
4672 | attrs.valid = 1; |
4673 | } else { |
4674 | // This mask spans across multiple attributes, set it as such |
4675 | if (attrs.core_type != hw_thread.attrs.get_core_type()) |
4676 | attrs.core_type = KMP_HW_CORE_TYPE_UNKNOWN; |
4677 | if (attrs.core_eff != hw_thread.attrs.get_core_eff()) |
4678 | attrs.core_eff = kmp_hw_attr_t::UNKNOWN_CORE_EFF; |
4679 | } |
4680 | } |
4681 | } |
4682 | |
4683 | static void __kmp_affinity_get_thread_topology_info(kmp_info_t *th) { |
4684 | if (!KMP_AFFINITY_CAPABLE()) |
4685 | return; |
4686 | const kmp_affin_mask_t *mask = th->th.th_affin_mask; |
4687 | kmp_affinity_ids_t &ids = th->th.th_topology_ids; |
4688 | kmp_affinity_attrs_t &attrs = th->th.th_topology_attrs; |
4689 | __kmp_affinity_get_mask_topology_info(mask, ids, attrs); |
4690 | } |
4691 | |
4692 | // Assign the topology information to each place in the place list |
4693 | // A thread can then grab not only its affinity mask, but the topology |
4694 | // information associated with that mask. e.g., Which socket is a thread on |
4695 | static void __kmp_affinity_get_topology_info(kmp_affinity_t &affinity) { |
4696 | if (!KMP_AFFINITY_CAPABLE()) |
4697 | return; |
4698 | if (affinity.type != affinity_none) { |
4699 | KMP_ASSERT(affinity.num_os_id_masks); |
4700 | KMP_ASSERT(affinity.os_id_masks); |
4701 | } |
4702 | KMP_ASSERT(affinity.num_masks); |
4703 | KMP_ASSERT(affinity.masks); |
4704 | KMP_ASSERT(__kmp_affin_fullMask); |
4705 | |
4706 | int max_cpu = __kmp_affin_fullMask->get_max_cpu(); |
4707 | int num_hw_threads = __kmp_topology->get_num_hw_threads(); |
4708 | |
4709 | // Allocate thread topology information |
4710 | if (!affinity.ids) { |
4711 | affinity.ids = (kmp_affinity_ids_t *)__kmp_allocate( |
4712 | sizeof(kmp_affinity_ids_t) * affinity.num_masks); |
4713 | } |
4714 | if (!affinity.attrs) { |
4715 | affinity.attrs = (kmp_affinity_attrs_t *)__kmp_allocate( |
4716 | sizeof(kmp_affinity_attrs_t) * affinity.num_masks); |
4717 | } |
4718 | if (!__kmp_osid_to_hwthread_map) { |
4719 | // Want the +1 because max_cpu should be valid index into map |
4720 | __kmp_osid_to_hwthread_map = |
4721 | (int *)__kmp_allocate(sizeof(int) * (max_cpu + 1)); |
4722 | } |
4723 | |
4724 | // Create the OS proc to hardware thread map |
4725 | for (int hw_thread = 0; hw_thread < num_hw_threads; ++hw_thread) { |
4726 | int os_id = __kmp_topology->at(index: hw_thread).os_id; |
4727 | if (KMP_CPU_ISSET(os_id, __kmp_affin_fullMask)) |
4728 | __kmp_osid_to_hwthread_map[os_id] = hw_thread; |
4729 | } |
4730 | |
4731 | for (unsigned i = 0; i < affinity.num_masks; ++i) { |
4732 | kmp_affinity_ids_t &ids = affinity.ids[i]; |
4733 | kmp_affinity_attrs_t &attrs = affinity.attrs[i]; |
4734 | kmp_affin_mask_t *mask = KMP_CPU_INDEX(affinity.masks, i); |
4735 | __kmp_affinity_get_mask_topology_info(mask, ids, attrs); |
4736 | } |
4737 | } |
4738 | |
4739 | // Called when __kmp_topology is ready |
4740 | static void __kmp_aux_affinity_initialize_other_data(kmp_affinity_t &affinity) { |
4741 | // Initialize other data structures which depend on the topology |
4742 | if (__kmp_topology && __kmp_topology->get_num_hw_threads()) { |
4743 | machine_hierarchy.init(num_addrs: __kmp_topology->get_num_hw_threads()); |
4744 | __kmp_affinity_get_topology_info(affinity); |
4745 | #if KMP_WEIGHTED_ITERATIONS_SUPPORTED |
4746 | __kmp_first_osid_with_ecore = __kmp_get_first_osid_with_ecore(); |
4747 | #endif |
4748 | } |
4749 | } |
4750 | |
4751 | // Create a one element mask array (set of places) which only contains the |
4752 | // initial process's affinity mask |
4753 | static void __kmp_create_affinity_none_places(kmp_affinity_t &affinity) { |
4754 | KMP_ASSERT(__kmp_affin_fullMask != NULL); |
4755 | KMP_ASSERT(affinity.type == affinity_none); |
4756 | KMP_ASSERT(__kmp_avail_proc == __kmp_topology->get_num_hw_threads()); |
4757 | affinity.num_masks = 1; |
4758 | KMP_CPU_ALLOC_ARRAY(affinity.masks, affinity.num_masks); |
4759 | kmp_affin_mask_t *dest = KMP_CPU_INDEX(affinity.masks, 0); |
4760 | KMP_CPU_COPY(dest, __kmp_affin_fullMask); |
4761 | __kmp_aux_affinity_initialize_other_data(affinity); |
4762 | } |
4763 | |
4764 | static void __kmp_aux_affinity_initialize_masks(kmp_affinity_t &affinity) { |
4765 | // Create the "full" mask - this defines all of the processors that we |
4766 | // consider to be in the machine model. If respect is set, then it is the |
4767 | // initialization thread's affinity mask. Otherwise, it is all processors that |
4768 | // we know about on the machine. |
4769 | int verbose = affinity.flags.verbose; |
4770 | const char *env_var = affinity.env_var; |
4771 | |
4772 | // Already initialized |
4773 | if (__kmp_affin_fullMask && __kmp_affin_origMask) |
4774 | return; |
4775 | |
4776 | if (__kmp_affin_fullMask == NULL) { |
4777 | KMP_CPU_ALLOC(__kmp_affin_fullMask); |
4778 | } |
4779 | if (__kmp_affin_origMask == NULL) { |
4780 | KMP_CPU_ALLOC(__kmp_affin_origMask); |
4781 | } |
4782 | if (KMP_AFFINITY_CAPABLE()) { |
4783 | __kmp_get_system_affinity(__kmp_affin_fullMask, TRUE); |
4784 | // Make a copy before possible expanding to the entire machine mask |
4785 | __kmp_affin_origMask->copy(src: __kmp_affin_fullMask); |
4786 | if (affinity.flags.respect) { |
4787 | // Count the number of available processors. |
4788 | unsigned i; |
4789 | __kmp_avail_proc = 0; |
4790 | KMP_CPU_SET_ITERATE(i, __kmp_affin_fullMask) { |
4791 | if (!KMP_CPU_ISSET(i, __kmp_affin_fullMask)) { |
4792 | continue; |
4793 | } |
4794 | __kmp_avail_proc++; |
4795 | } |
4796 | if (__kmp_avail_proc > __kmp_xproc) { |
4797 | KMP_AFF_WARNING(affinity, ErrorInitializeAffinity); |
4798 | affinity.type = affinity_none; |
4799 | KMP_AFFINITY_DISABLE(); |
4800 | return; |
4801 | } |
4802 | |
4803 | if (verbose) { |
4804 | char buf[KMP_AFFIN_MASK_PRINT_LEN]; |
4805 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, |
4806 | mask: __kmp_affin_fullMask); |
4807 | KMP_INFORM(InitOSProcSetRespect, env_var, buf); |
4808 | } |
4809 | } else { |
4810 | if (verbose) { |
4811 | char buf[KMP_AFFIN_MASK_PRINT_LEN]; |
4812 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, |
4813 | mask: __kmp_affin_fullMask); |
4814 | KMP_INFORM(InitOSProcSetNotRespect, env_var, buf); |
4815 | } |
4816 | __kmp_avail_proc = |
4817 | __kmp_affinity_entire_machine_mask(mask: __kmp_affin_fullMask); |
4818 | #if KMP_OS_WINDOWS |
4819 | if (__kmp_num_proc_groups <= 1) { |
4820 | // Copy expanded full mask if topology has single processor group |
4821 | __kmp_affin_origMask->copy(__kmp_affin_fullMask); |
4822 | } |
4823 | // Set the process affinity mask since threads' affinity |
4824 | // masks must be subset of process mask in Windows* OS |
4825 | __kmp_affin_fullMask->set_process_affinity(true); |
4826 | #endif |
4827 | } |
4828 | } |
4829 | } |
4830 | |
4831 | static bool __kmp_aux_affinity_initialize_topology(kmp_affinity_t &affinity) { |
4832 | bool success = false; |
4833 | const char *env_var = affinity.env_var; |
4834 | kmp_i18n_id_t msg_id = kmp_i18n_null; |
4835 | int verbose = affinity.flags.verbose; |
4836 | |
4837 | // For backward compatibility, setting KMP_CPUINFO_FILE => |
4838 | // KMP_TOPOLOGY_METHOD=cpuinfo |
4839 | if ((__kmp_cpuinfo_file != NULL) && |
4840 | (__kmp_affinity_top_method == affinity_top_method_all)) { |
4841 | __kmp_affinity_top_method = affinity_top_method_cpuinfo; |
4842 | } |
4843 | |
4844 | if (__kmp_affinity_top_method == affinity_top_method_all) { |
4845 | // In the default code path, errors are not fatal - we just try using |
4846 | // another method. We only emit a warning message if affinity is on, or the |
4847 | // verbose flag is set, an the nowarnings flag was not set. |
4848 | #if KMP_USE_HWLOC |
4849 | if (!success && |
4850 | __kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC) { |
4851 | if (!__kmp_hwloc_error) { |
4852 | success = __kmp_affinity_create_hwloc_map(&msg_id); |
4853 | if (!success && verbose) { |
4854 | KMP_INFORM(AffIgnoringHwloc, env_var); |
4855 | } |
4856 | } else if (verbose) { |
4857 | KMP_INFORM(AffIgnoringHwloc, env_var); |
4858 | } |
4859 | } |
4860 | #endif |
4861 | |
4862 | #if KMP_ARCH_X86 || KMP_ARCH_X86_64 |
4863 | if (!success) { |
4864 | success = __kmp_affinity_create_x2apicid_map(&msg_id); |
4865 | if (!success && verbose && msg_id != kmp_i18n_null) { |
4866 | KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id)); |
4867 | } |
4868 | } |
4869 | if (!success) { |
4870 | success = __kmp_affinity_create_apicid_map(&msg_id); |
4871 | if (!success && verbose && msg_id != kmp_i18n_null) { |
4872 | KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id)); |
4873 | } |
4874 | } |
4875 | #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ |
4876 | |
4877 | #if KMP_OS_LINUX || KMP_OS_AIX |
4878 | if (!success) { |
4879 | int line = 0; |
4880 | success = __kmp_affinity_create_cpuinfo_map(&line, &msg_id); |
4881 | if (!success && verbose && msg_id != kmp_i18n_null) { |
4882 | KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id)); |
4883 | } |
4884 | } |
4885 | #endif /* KMP_OS_LINUX */ |
4886 | |
4887 | #if KMP_GROUP_AFFINITY |
4888 | if (!success && (__kmp_num_proc_groups > 1)) { |
4889 | success = __kmp_affinity_create_proc_group_map(&msg_id); |
4890 | if (!success && verbose && msg_id != kmp_i18n_null) { |
4891 | KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id)); |
4892 | } |
4893 | } |
4894 | #endif /* KMP_GROUP_AFFINITY */ |
4895 | |
4896 | if (!success) { |
4897 | success = __kmp_affinity_create_flat_map(&msg_id); |
4898 | if (!success && verbose && msg_id != kmp_i18n_null) { |
4899 | KMP_INFORM(AffInfoStr, env_var, __kmp_i18n_catgets(msg_id)); |
4900 | } |
4901 | KMP_ASSERT(success); |
4902 | } |
4903 | } |
4904 | |
4905 | // If the user has specified that a paricular topology discovery method is to be |
4906 | // used, then we abort if that method fails. The exception is group affinity, |
4907 | // which might have been implicitly set. |
4908 | #if KMP_USE_HWLOC |
4909 | else if (__kmp_affinity_top_method == affinity_top_method_hwloc) { |
4910 | KMP_ASSERT(__kmp_affinity_dispatch->get_api_type() == KMPAffinity::HWLOC); |
4911 | success = __kmp_affinity_create_hwloc_map(&msg_id); |
4912 | if (!success) { |
4913 | KMP_ASSERT(msg_id != kmp_i18n_null); |
4914 | KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id)); |
4915 | } |
4916 | } |
4917 | #endif // KMP_USE_HWLOC |
4918 | |
4919 | #if KMP_ARCH_X86 || KMP_ARCH_X86_64 |
4920 | else if (__kmp_affinity_top_method == affinity_top_method_x2apicid || |
4921 | __kmp_affinity_top_method == affinity_top_method_x2apicid_1f) { |
4922 | success = __kmp_affinity_create_x2apicid_map(&msg_id); |
4923 | if (!success) { |
4924 | KMP_ASSERT(msg_id != kmp_i18n_null); |
4925 | KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id)); |
4926 | } |
4927 | } else if (__kmp_affinity_top_method == affinity_top_method_apicid) { |
4928 | success = __kmp_affinity_create_apicid_map(&msg_id); |
4929 | if (!success) { |
4930 | KMP_ASSERT(msg_id != kmp_i18n_null); |
4931 | KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id)); |
4932 | } |
4933 | } |
4934 | #endif /* KMP_ARCH_X86 || KMP_ARCH_X86_64 */ |
4935 | |
4936 | else if (__kmp_affinity_top_method == affinity_top_method_cpuinfo) { |
4937 | int line = 0; |
4938 | success = __kmp_affinity_create_cpuinfo_map(&line, &msg_id); |
4939 | if (!success) { |
4940 | KMP_ASSERT(msg_id != kmp_i18n_null); |
4941 | const char *filename = __kmp_cpuinfo_get_filename(); |
4942 | if (line > 0) { |
4943 | KMP_FATAL(FileLineMsgExiting, filename, line, |
4944 | __kmp_i18n_catgets(msg_id)); |
4945 | } else { |
4946 | KMP_FATAL(FileMsgExiting, filename, __kmp_i18n_catgets(msg_id)); |
4947 | } |
4948 | } |
4949 | } |
4950 | |
4951 | #if KMP_GROUP_AFFINITY |
4952 | else if (__kmp_affinity_top_method == affinity_top_method_group) { |
4953 | success = __kmp_affinity_create_proc_group_map(&msg_id); |
4954 | KMP_ASSERT(success); |
4955 | if (!success) { |
4956 | KMP_ASSERT(msg_id != kmp_i18n_null); |
4957 | KMP_FATAL(MsgExiting, __kmp_i18n_catgets(msg_id)); |
4958 | } |
4959 | } |
4960 | #endif /* KMP_GROUP_AFFINITY */ |
4961 | |
4962 | else if (__kmp_affinity_top_method == affinity_top_method_flat) { |
4963 | success = __kmp_affinity_create_flat_map(&msg_id); |
4964 | // should not fail |
4965 | KMP_ASSERT(success); |
4966 | } |
4967 | |
4968 | // Early exit if topology could not be created |
4969 | if (!__kmp_topology) { |
4970 | if (KMP_AFFINITY_CAPABLE()) { |
4971 | KMP_AFF_WARNING(affinity, ErrorInitializeAffinity); |
4972 | } |
4973 | if (nPackages > 0 && nCoresPerPkg > 0 && __kmp_nThreadsPerCore > 0 && |
4974 | __kmp_ncores > 0) { |
4975 | __kmp_topology = kmp_topology_t::allocate(nproc: 0, ndepth: 0, NULL); |
4976 | __kmp_topology->canonicalize(npackages: nPackages, ncores_per_pkg: nCoresPerPkg, |
4977 | nthreads_per_core: __kmp_nThreadsPerCore, ncores: __kmp_ncores); |
4978 | if (verbose) { |
4979 | __kmp_topology->print(env_var); |
4980 | } |
4981 | } |
4982 | return false; |
4983 | } |
4984 | |
4985 | // Canonicalize, print (if requested), apply KMP_HW_SUBSET |
4986 | __kmp_topology->canonicalize(); |
4987 | if (verbose) |
4988 | __kmp_topology->print(env_var); |
4989 | bool filtered = __kmp_topology->filter_hw_subset(); |
4990 | if (filtered && verbose) |
4991 | __kmp_topology->print(env_var: "KMP_HW_SUBSET" ); |
4992 | return success; |
4993 | } |
4994 | |
4995 | static void __kmp_aux_affinity_initialize(kmp_affinity_t &affinity) { |
4996 | bool is_regular_affinity = (&affinity == &__kmp_affinity); |
4997 | bool is_hidden_helper_affinity = (&affinity == &__kmp_hh_affinity); |
4998 | const char *env_var = __kmp_get_affinity_env_var(affinity); |
4999 | |
5000 | if (affinity.flags.initialized) { |
5001 | KMP_ASSERT(__kmp_affin_fullMask != NULL); |
5002 | return; |
5003 | } |
5004 | |
5005 | if (is_regular_affinity && (!__kmp_affin_fullMask || !__kmp_affin_origMask)) |
5006 | __kmp_aux_affinity_initialize_masks(affinity); |
5007 | |
5008 | if (is_regular_affinity && !__kmp_topology) { |
5009 | bool success = __kmp_aux_affinity_initialize_topology(affinity); |
5010 | if (success) { |
5011 | KMP_ASSERT(__kmp_avail_proc == __kmp_topology->get_num_hw_threads()); |
5012 | } else { |
5013 | affinity.type = affinity_none; |
5014 | KMP_AFFINITY_DISABLE(); |
5015 | } |
5016 | } |
5017 | |
5018 | // If KMP_AFFINITY=none, then only create the single "none" place |
5019 | // which is the process's initial affinity mask or the number of |
5020 | // hardware threads depending on respect,norespect |
5021 | if (affinity.type == affinity_none) { |
5022 | __kmp_create_affinity_none_places(affinity); |
5023 | #if KMP_USE_HIER_SCHED |
5024 | __kmp_dispatch_set_hierarchy_values(); |
5025 | #endif |
5026 | affinity.flags.initialized = TRUE; |
5027 | return; |
5028 | } |
5029 | |
5030 | __kmp_topology->set_granularity(affinity); |
5031 | int depth = __kmp_topology->get_depth(); |
5032 | |
5033 | // Create the table of masks, indexed by thread Id. |
5034 | unsigned numUnique = 0; |
5035 | int numAddrs = __kmp_topology->get_num_hw_threads(); |
5036 | // If OMP_PLACES=cores:<attribute> specified, then attempt |
5037 | // to make OS Id mask table using those attributes |
5038 | if (affinity.core_attr_gran.valid) { |
5039 | __kmp_create_os_id_masks(numUnique: &numUnique, affinity, find_next: [&](int idx) { |
5040 | KMP_ASSERT(idx >= -1); |
5041 | for (int i = idx + 1; i < numAddrs; ++i) |
5042 | if (__kmp_topology->at(index: i).attrs.contains(attr: affinity.core_attr_gran)) |
5043 | return i; |
5044 | return numAddrs; |
5045 | }); |
5046 | if (!affinity.os_id_masks) { |
5047 | const char *core_attribute; |
5048 | if (affinity.core_attr_gran.core_eff != kmp_hw_attr_t::UNKNOWN_CORE_EFF) |
5049 | core_attribute = "core_efficiency" ; |
5050 | else |
5051 | core_attribute = "core_type" ; |
5052 | KMP_AFF_WARNING(affinity, AffIgnoringNotAvailable, env_var, |
5053 | core_attribute, |
5054 | __kmp_hw_get_catalog_string(KMP_HW_CORE, /*plural=*/true)) |
5055 | } |
5056 | } |
5057 | // If core attributes did not work, or none were specified, |
5058 | // then make OS Id mask table using typical incremental way with |
5059 | // checking for validity of each id at granularity level specified. |
5060 | if (!affinity.os_id_masks) { |
5061 | int gran = affinity.gran_levels; |
5062 | int gran_level = depth - 1 - affinity.gran_levels; |
5063 | if (gran >= 0 && gran_level >= 0 && gran_level < depth) { |
5064 | __kmp_create_os_id_masks( |
5065 | numUnique: &numUnique, affinity, find_next: [depth, numAddrs, &affinity](int idx) { |
5066 | KMP_ASSERT(idx >= -1); |
5067 | int gran = affinity.gran_levels; |
5068 | int gran_level = depth - 1 - affinity.gran_levels; |
5069 | for (int i = idx + 1; i < numAddrs; ++i) |
5070 | if ((gran >= depth) || |
5071 | (gran < depth && __kmp_topology->at(index: i).ids[gran_level] != |
5072 | kmp_hw_thread_t::UNKNOWN_ID)) |
5073 | return i; |
5074 | return numAddrs; |
5075 | }); |
5076 | } |
5077 | } |
5078 | // Final attempt to make OS Id mask table using typical incremental way. |
5079 | if (!affinity.os_id_masks) { |
5080 | __kmp_create_os_id_masks(numUnique: &numUnique, affinity, find_next: [](int idx) { |
5081 | KMP_ASSERT(idx >= -1); |
5082 | return idx + 1; |
5083 | }); |
5084 | } |
5085 | |
5086 | switch (affinity.type) { |
5087 | |
5088 | case affinity_explicit: |
5089 | KMP_DEBUG_ASSERT(affinity.proclist != NULL); |
5090 | if (is_hidden_helper_affinity || |
5091 | __kmp_nested_proc_bind.bind_types[0] == proc_bind_intel) { |
5092 | __kmp_affinity_process_proclist(affinity); |
5093 | } else { |
5094 | __kmp_affinity_process_placelist(affinity); |
5095 | } |
5096 | if (affinity.num_masks == 0) { |
5097 | KMP_AFF_WARNING(affinity, AffNoValidProcID); |
5098 | affinity.type = affinity_none; |
5099 | __kmp_create_affinity_none_places(affinity); |
5100 | affinity.flags.initialized = TRUE; |
5101 | return; |
5102 | } |
5103 | break; |
5104 | |
5105 | // The other affinity types rely on sorting the hardware threads according to |
5106 | // some permutation of the machine topology tree. Set affinity.compact |
5107 | // and affinity.offset appropriately, then jump to a common code |
5108 | // fragment to do the sort and create the array of affinity masks. |
5109 | case affinity_logical: |
5110 | affinity.compact = 0; |
5111 | if (affinity.offset) { |
5112 | affinity.offset = |
5113 | __kmp_nThreadsPerCore * affinity.offset % __kmp_avail_proc; |
5114 | } |
5115 | goto sortTopology; |
5116 | |
5117 | case affinity_physical: |
5118 | if (__kmp_nThreadsPerCore > 1) { |
5119 | affinity.compact = 1; |
5120 | if (affinity.compact >= depth) { |
5121 | affinity.compact = 0; |
5122 | } |
5123 | } else { |
5124 | affinity.compact = 0; |
5125 | } |
5126 | if (affinity.offset) { |
5127 | affinity.offset = |
5128 | __kmp_nThreadsPerCore * affinity.offset % __kmp_avail_proc; |
5129 | } |
5130 | goto sortTopology; |
5131 | |
5132 | case affinity_scatter: |
5133 | if (affinity.compact >= depth) { |
5134 | affinity.compact = 0; |
5135 | } else { |
5136 | affinity.compact = depth - 1 - affinity.compact; |
5137 | } |
5138 | goto sortTopology; |
5139 | |
5140 | case affinity_compact: |
5141 | if (affinity.compact >= depth) { |
5142 | affinity.compact = depth - 1; |
5143 | } |
5144 | goto sortTopology; |
5145 | |
5146 | case affinity_balanced: |
5147 | if (depth <= 1 || is_hidden_helper_affinity) { |
5148 | KMP_AFF_WARNING(affinity, AffBalancedNotAvail, env_var); |
5149 | affinity.type = affinity_none; |
5150 | __kmp_create_affinity_none_places(affinity); |
5151 | affinity.flags.initialized = TRUE; |
5152 | return; |
5153 | } else if (!__kmp_topology->is_uniform()) { |
5154 | // Save the depth for further usage |
5155 | __kmp_aff_depth = depth; |
5156 | |
5157 | int core_level = |
5158 | __kmp_affinity_find_core_level(nprocs: __kmp_avail_proc, bottom_level: depth - 1); |
5159 | int ncores = __kmp_affinity_compute_ncores(nprocs: __kmp_avail_proc, bottom_level: depth - 1, |
5160 | core_level); |
5161 | int maxprocpercore = __kmp_affinity_max_proc_per_core( |
5162 | nprocs: __kmp_avail_proc, bottom_level: depth - 1, core_level); |
5163 | |
5164 | int nproc = ncores * maxprocpercore; |
5165 | if ((nproc < 2) || (nproc < __kmp_avail_proc)) { |
5166 | KMP_AFF_WARNING(affinity, AffBalancedNotAvail, env_var); |
5167 | affinity.type = affinity_none; |
5168 | __kmp_create_affinity_none_places(affinity); |
5169 | affinity.flags.initialized = TRUE; |
5170 | return; |
5171 | } |
5172 | |
5173 | procarr = (int *)__kmp_allocate(sizeof(int) * nproc); |
5174 | for (int i = 0; i < nproc; i++) { |
5175 | procarr[i] = -1; |
5176 | } |
5177 | |
5178 | int lastcore = -1; |
5179 | int inlastcore = 0; |
5180 | for (int i = 0; i < __kmp_avail_proc; i++) { |
5181 | int proc = __kmp_topology->at(index: i).os_id; |
5182 | int core = __kmp_affinity_find_core(proc: i, bottom_level: depth - 1, core_level); |
5183 | |
5184 | if (core == lastcore) { |
5185 | inlastcore++; |
5186 | } else { |
5187 | inlastcore = 0; |
5188 | } |
5189 | lastcore = core; |
5190 | |
5191 | procarr[core * maxprocpercore + inlastcore] = proc; |
5192 | } |
5193 | } |
5194 | if (affinity.compact >= depth) { |
5195 | affinity.compact = depth - 1; |
5196 | } |
5197 | |
5198 | sortTopology: |
5199 | // Allocate the gtid->affinity mask table. |
5200 | if (affinity.flags.dups) { |
5201 | affinity.num_masks = __kmp_avail_proc; |
5202 | } else { |
5203 | affinity.num_masks = numUnique; |
5204 | } |
5205 | |
5206 | if ((__kmp_nested_proc_bind.bind_types[0] != proc_bind_intel) && |
5207 | (__kmp_affinity_num_places > 0) && |
5208 | ((unsigned)__kmp_affinity_num_places < affinity.num_masks) && |
5209 | !is_hidden_helper_affinity) { |
5210 | affinity.num_masks = __kmp_affinity_num_places; |
5211 | } |
5212 | |
5213 | KMP_CPU_ALLOC_ARRAY(affinity.masks, affinity.num_masks); |
5214 | |
5215 | // Sort the topology table according to the current setting of |
5216 | // affinity.compact, then fill out affinity.masks. |
5217 | __kmp_topology->sort_compact(affinity); |
5218 | { |
5219 | int i; |
5220 | unsigned j; |
5221 | int num_hw_threads = __kmp_topology->get_num_hw_threads(); |
5222 | kmp_full_mask_modifier_t full_mask; |
5223 | for (i = 0, j = 0; i < num_hw_threads; i++) { |
5224 | if ((!affinity.flags.dups) && (!__kmp_topology->at(index: i).leader)) { |
5225 | continue; |
5226 | } |
5227 | int osId = __kmp_topology->at(index: i).os_id; |
5228 | |
5229 | kmp_affin_mask_t *src = KMP_CPU_INDEX(affinity.os_id_masks, osId); |
5230 | if (KMP_CPU_ISEMPTY(src)) |
5231 | continue; |
5232 | kmp_affin_mask_t *dest = KMP_CPU_INDEX(affinity.masks, j); |
5233 | KMP_ASSERT(KMP_CPU_ISSET(osId, src)); |
5234 | KMP_CPU_COPY(dest, src); |
5235 | full_mask.include(other: src); |
5236 | if (++j >= affinity.num_masks) { |
5237 | break; |
5238 | } |
5239 | } |
5240 | KMP_DEBUG_ASSERT(j == affinity.num_masks); |
5241 | // See if the places list further restricts or changes the full mask |
5242 | if (full_mask.restrict_to_mask() && affinity.flags.verbose) { |
5243 | __kmp_topology->print(env_var); |
5244 | } |
5245 | } |
5246 | // Sort the topology back using ids |
5247 | __kmp_topology->sort_ids(); |
5248 | break; |
5249 | |
5250 | default: |
5251 | KMP_ASSERT2(0, "Unexpected affinity setting" ); |
5252 | } |
5253 | __kmp_aux_affinity_initialize_other_data(affinity); |
5254 | affinity.flags.initialized = TRUE; |
5255 | } |
5256 | |
5257 | void __kmp_affinity_initialize(kmp_affinity_t &affinity) { |
5258 | // Much of the code above was written assuming that if a machine was not |
5259 | // affinity capable, then affinity type == affinity_none. |
5260 | // We now explicitly represent this as affinity type == affinity_disabled. |
5261 | // There are too many checks for affinity type == affinity_none in this code. |
5262 | // Instead of trying to change them all, check if |
5263 | // affinity type == affinity_disabled, and if so, slam it with affinity_none, |
5264 | // call the real initialization routine, then restore affinity type to |
5265 | // affinity_disabled. |
5266 | int disabled = (affinity.type == affinity_disabled); |
5267 | if (!KMP_AFFINITY_CAPABLE()) |
5268 | KMP_ASSERT(disabled); |
5269 | if (disabled) |
5270 | affinity.type = affinity_none; |
5271 | __kmp_aux_affinity_initialize(affinity); |
5272 | if (disabled) |
5273 | affinity.type = affinity_disabled; |
5274 | } |
5275 | |
5276 | void __kmp_affinity_uninitialize(void) { |
5277 | for (kmp_affinity_t *affinity : __kmp_affinities) { |
5278 | if (affinity->masks != NULL) |
5279 | KMP_CPU_FREE_ARRAY(affinity->masks, affinity->num_masks); |
5280 | if (affinity->os_id_masks != NULL) |
5281 | KMP_CPU_FREE_ARRAY(affinity->os_id_masks, affinity->num_os_id_masks); |
5282 | if (affinity->proclist != NULL) |
5283 | __kmp_free(affinity->proclist); |
5284 | if (affinity->ids != NULL) |
5285 | __kmp_free(affinity->ids); |
5286 | if (affinity->attrs != NULL) |
5287 | __kmp_free(affinity->attrs); |
5288 | *affinity = KMP_AFFINITY_INIT(affinity->env_var); |
5289 | } |
5290 | if (__kmp_affin_origMask != NULL) { |
5291 | if (KMP_AFFINITY_CAPABLE()) { |
5292 | #if KMP_OS_AIX |
5293 | // Uninitialize by unbinding the thread. |
5294 | bindprocessor(BINDTHREAD, thread_self(), PROCESSOR_CLASS_ANY); |
5295 | #else |
5296 | __kmp_set_system_affinity(__kmp_affin_origMask, FALSE); |
5297 | #endif |
5298 | } |
5299 | KMP_CPU_FREE(__kmp_affin_origMask); |
5300 | __kmp_affin_origMask = NULL; |
5301 | } |
5302 | __kmp_affinity_num_places = 0; |
5303 | if (procarr != NULL) { |
5304 | __kmp_free(procarr); |
5305 | procarr = NULL; |
5306 | } |
5307 | if (__kmp_osid_to_hwthread_map) { |
5308 | __kmp_free(__kmp_osid_to_hwthread_map); |
5309 | __kmp_osid_to_hwthread_map = NULL; |
5310 | } |
5311 | #if KMP_USE_HWLOC |
5312 | if (__kmp_hwloc_topology != NULL) { |
5313 | hwloc_topology_destroy(__kmp_hwloc_topology); |
5314 | __kmp_hwloc_topology = NULL; |
5315 | } |
5316 | #endif |
5317 | if (__kmp_hw_subset) { |
5318 | kmp_hw_subset_t::deallocate(subset: __kmp_hw_subset); |
5319 | __kmp_hw_subset = nullptr; |
5320 | } |
5321 | if (__kmp_topology) { |
5322 | kmp_topology_t::deallocate(topology: __kmp_topology); |
5323 | __kmp_topology = nullptr; |
5324 | } |
5325 | KMPAffinity::destroy_api(); |
5326 | } |
5327 | |
5328 | static void __kmp_select_mask_by_gtid(int gtid, const kmp_affinity_t *affinity, |
5329 | int *place, kmp_affin_mask_t **mask) { |
5330 | int mask_idx; |
5331 | bool is_hidden_helper = KMP_HIDDEN_HELPER_THREAD(gtid); |
5332 | if (is_hidden_helper) |
5333 | // The first gtid is the regular primary thread, the second gtid is the main |
5334 | // thread of hidden team which does not participate in task execution. |
5335 | mask_idx = gtid - 2; |
5336 | else |
5337 | mask_idx = __kmp_adjust_gtid_for_hidden_helpers(gtid); |
5338 | KMP_DEBUG_ASSERT(affinity->num_masks > 0); |
5339 | *place = (mask_idx + affinity->offset) % affinity->num_masks; |
5340 | *mask = KMP_CPU_INDEX(affinity->masks, *place); |
5341 | } |
5342 | |
5343 | // This function initializes the per-thread data concerning affinity including |
5344 | // the mask and topology information |
5345 | void __kmp_affinity_set_init_mask(int gtid, int isa_root) { |
5346 | |
5347 | kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]); |
5348 | |
5349 | // Set the thread topology information to default of unknown |
5350 | for (int id = 0; id < KMP_HW_LAST; ++id) |
5351 | th->th.th_topology_ids.ids[id] = kmp_hw_thread_t::UNKNOWN_ID; |
5352 | th->th.th_topology_attrs = KMP_AFFINITY_ATTRS_UNKNOWN; |
5353 | |
5354 | if (!KMP_AFFINITY_CAPABLE()) { |
5355 | return; |
5356 | } |
5357 | |
5358 | if (th->th.th_affin_mask == NULL) { |
5359 | KMP_CPU_ALLOC(th->th.th_affin_mask); |
5360 | } else { |
5361 | KMP_CPU_ZERO(th->th.th_affin_mask); |
5362 | } |
5363 | |
5364 | // Copy the thread mask to the kmp_info_t structure. If |
5365 | // __kmp_affinity.type == affinity_none, copy the "full" mask, i.e. |
5366 | // one that has all of the OS proc ids set, or if |
5367 | // __kmp_affinity.flags.respect is set, then the full mask is the |
5368 | // same as the mask of the initialization thread. |
5369 | kmp_affin_mask_t *mask; |
5370 | int i; |
5371 | const kmp_affinity_t *affinity; |
5372 | bool is_hidden_helper = KMP_HIDDEN_HELPER_THREAD(gtid); |
5373 | |
5374 | if (is_hidden_helper) |
5375 | affinity = &__kmp_hh_affinity; |
5376 | else |
5377 | affinity = &__kmp_affinity; |
5378 | |
5379 | if (KMP_AFFINITY_NON_PROC_BIND || is_hidden_helper) { |
5380 | if ((affinity->type == affinity_none) || |
5381 | (affinity->type == affinity_balanced) || |
5382 | KMP_HIDDEN_HELPER_MAIN_THREAD(gtid)) { |
5383 | #if KMP_GROUP_AFFINITY |
5384 | if (__kmp_num_proc_groups > 1) { |
5385 | return; |
5386 | } |
5387 | #endif |
5388 | KMP_ASSERT(__kmp_affin_fullMask != NULL); |
5389 | i = 0; |
5390 | mask = __kmp_affin_fullMask; |
5391 | } else { |
5392 | __kmp_select_mask_by_gtid(gtid, affinity, place: &i, mask: &mask); |
5393 | } |
5394 | } else { |
5395 | if (!isa_root || __kmp_nested_proc_bind.bind_types[0] == proc_bind_false) { |
5396 | #if KMP_GROUP_AFFINITY |
5397 | if (__kmp_num_proc_groups > 1) { |
5398 | return; |
5399 | } |
5400 | #endif |
5401 | KMP_ASSERT(__kmp_affin_fullMask != NULL); |
5402 | i = KMP_PLACE_ALL; |
5403 | mask = __kmp_affin_fullMask; |
5404 | } else { |
5405 | __kmp_select_mask_by_gtid(gtid, affinity, place: &i, mask: &mask); |
5406 | } |
5407 | } |
5408 | |
5409 | th->th.th_current_place = i; |
5410 | if (isa_root && !is_hidden_helper) { |
5411 | th->th.th_new_place = i; |
5412 | th->th.th_first_place = 0; |
5413 | th->th.th_last_place = affinity->num_masks - 1; |
5414 | } else if (KMP_AFFINITY_NON_PROC_BIND) { |
5415 | // When using a Non-OMP_PROC_BIND affinity method, |
5416 | // set all threads' place-partition-var to the entire place list |
5417 | th->th.th_first_place = 0; |
5418 | th->th.th_last_place = affinity->num_masks - 1; |
5419 | } |
5420 | // Copy topology information associated with the place |
5421 | if (i >= 0) { |
5422 | th->th.th_topology_ids = __kmp_affinity.ids[i]; |
5423 | th->th.th_topology_attrs = __kmp_affinity.attrs[i]; |
5424 | } |
5425 | |
5426 | if (i == KMP_PLACE_ALL) { |
5427 | KA_TRACE(100, ("__kmp_affinity_set_init_mask: setting T#%d to all places\n" , |
5428 | gtid)); |
5429 | } else { |
5430 | KA_TRACE(100, ("__kmp_affinity_set_init_mask: setting T#%d to place %d\n" , |
5431 | gtid, i)); |
5432 | } |
5433 | |
5434 | KMP_CPU_COPY(th->th.th_affin_mask, mask); |
5435 | } |
5436 | |
5437 | void __kmp_affinity_bind_init_mask(int gtid) { |
5438 | if (!KMP_AFFINITY_CAPABLE()) { |
5439 | return; |
5440 | } |
5441 | kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]); |
5442 | const kmp_affinity_t *affinity; |
5443 | const char *env_var; |
5444 | bool is_hidden_helper = KMP_HIDDEN_HELPER_THREAD(gtid); |
5445 | |
5446 | if (is_hidden_helper) |
5447 | affinity = &__kmp_hh_affinity; |
5448 | else |
5449 | affinity = &__kmp_affinity; |
5450 | env_var = __kmp_get_affinity_env_var(affinity: *affinity, /*for_binding=*/true); |
5451 | /* to avoid duplicate printing (will be correctly printed on barrier) */ |
5452 | if (affinity->flags.verbose && (affinity->type == affinity_none || |
5453 | (th->th.th_current_place != KMP_PLACE_ALL && |
5454 | affinity->type != affinity_balanced)) && |
5455 | !KMP_HIDDEN_HELPER_MAIN_THREAD(gtid)) { |
5456 | char buf[KMP_AFFIN_MASK_PRINT_LEN]; |
5457 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, |
5458 | mask: th->th.th_affin_mask); |
5459 | KMP_INFORM(BoundToOSProcSet, env_var, (kmp_int32)getpid(), __kmp_gettid(), |
5460 | gtid, buf); |
5461 | } |
5462 | |
5463 | #if KMP_OS_WINDOWS |
5464 | // On Windows* OS, the process affinity mask might have changed. If the user |
5465 | // didn't request affinity and this call fails, just continue silently. |
5466 | // See CQ171393. |
5467 | if (affinity->type == affinity_none) { |
5468 | __kmp_set_system_affinity(th->th.th_affin_mask, FALSE); |
5469 | } else |
5470 | #endif |
5471 | #if !KMP_OS_AIX |
5472 | // Do not set the full mask as the init mask on AIX. |
5473 | __kmp_set_system_affinity(th->th.th_affin_mask, TRUE); |
5474 | #endif |
5475 | } |
5476 | |
5477 | void __kmp_affinity_bind_place(int gtid) { |
5478 | // Hidden helper threads should not be affected by OMP_PLACES/OMP_PROC_BIND |
5479 | if (!KMP_AFFINITY_CAPABLE() || KMP_HIDDEN_HELPER_THREAD(gtid)) { |
5480 | return; |
5481 | } |
5482 | |
5483 | kmp_info_t *th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[gtid]); |
5484 | |
5485 | KA_TRACE(100, ("__kmp_affinity_bind_place: binding T#%d to place %d (current " |
5486 | "place = %d)\n" , |
5487 | gtid, th->th.th_new_place, th->th.th_current_place)); |
5488 | |
5489 | // Check that the new place is within this thread's partition. |
5490 | KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL); |
5491 | KMP_ASSERT(th->th.th_new_place >= 0); |
5492 | KMP_ASSERT((unsigned)th->th.th_new_place <= __kmp_affinity.num_masks); |
5493 | if (th->th.th_first_place <= th->th.th_last_place) { |
5494 | KMP_ASSERT((th->th.th_new_place >= th->th.th_first_place) && |
5495 | (th->th.th_new_place <= th->th.th_last_place)); |
5496 | } else { |
5497 | KMP_ASSERT((th->th.th_new_place <= th->th.th_first_place) || |
5498 | (th->th.th_new_place >= th->th.th_last_place)); |
5499 | } |
5500 | |
5501 | // Copy the thread mask to the kmp_info_t structure, |
5502 | // and set this thread's affinity. |
5503 | kmp_affin_mask_t *mask = |
5504 | KMP_CPU_INDEX(__kmp_affinity.masks, th->th.th_new_place); |
5505 | KMP_CPU_COPY(th->th.th_affin_mask, mask); |
5506 | th->th.th_current_place = th->th.th_new_place; |
5507 | |
5508 | if (__kmp_affinity.flags.verbose) { |
5509 | char buf[KMP_AFFIN_MASK_PRINT_LEN]; |
5510 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, |
5511 | mask: th->th.th_affin_mask); |
5512 | KMP_INFORM(BoundToOSProcSet, "OMP_PROC_BIND" , (kmp_int32)getpid(), |
5513 | __kmp_gettid(), gtid, buf); |
5514 | } |
5515 | __kmp_set_system_affinity(th->th.th_affin_mask, TRUE); |
5516 | } |
5517 | |
5518 | int __kmp_aux_set_affinity(void **mask) { |
5519 | int gtid; |
5520 | kmp_info_t *th; |
5521 | int retval; |
5522 | |
5523 | if (!KMP_AFFINITY_CAPABLE()) { |
5524 | return -1; |
5525 | } |
5526 | |
5527 | gtid = __kmp_entry_gtid(); |
5528 | KA_TRACE( |
5529 | 1000, ("" ); { |
5530 | char buf[KMP_AFFIN_MASK_PRINT_LEN]; |
5531 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, |
5532 | (kmp_affin_mask_t *)(*mask)); |
5533 | __kmp_debug_printf( |
5534 | "kmp_set_affinity: setting affinity mask for thread %d = %s\n" , |
5535 | gtid, buf); |
5536 | }); |
5537 | |
5538 | if (__kmp_env_consistency_check) { |
5539 | if ((mask == NULL) || (*mask == NULL)) { |
5540 | KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity" ); |
5541 | } else { |
5542 | unsigned proc; |
5543 | int num_procs = 0; |
5544 | |
5545 | KMP_CPU_SET_ITERATE(proc, ((kmp_affin_mask_t *)(*mask))) { |
5546 | if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) { |
5547 | KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity" ); |
5548 | } |
5549 | if (!KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask))) { |
5550 | continue; |
5551 | } |
5552 | num_procs++; |
5553 | } |
5554 | if (num_procs == 0) { |
5555 | KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity" ); |
5556 | } |
5557 | |
5558 | #if KMP_GROUP_AFFINITY |
5559 | if (__kmp_get_proc_group((kmp_affin_mask_t *)(*mask)) < 0) { |
5560 | KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity" ); |
5561 | } |
5562 | #endif /* KMP_GROUP_AFFINITY */ |
5563 | } |
5564 | } |
5565 | |
5566 | th = __kmp_threads[gtid]; |
5567 | KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL); |
5568 | retval = __kmp_set_system_affinity((kmp_affin_mask_t *)(*mask), FALSE); |
5569 | if (retval == 0) { |
5570 | KMP_CPU_COPY(th->th.th_affin_mask, (kmp_affin_mask_t *)(*mask)); |
5571 | } |
5572 | |
5573 | th->th.th_current_place = KMP_PLACE_UNDEFINED; |
5574 | th->th.th_new_place = KMP_PLACE_UNDEFINED; |
5575 | th->th.th_first_place = 0; |
5576 | th->th.th_last_place = __kmp_affinity.num_masks - 1; |
5577 | |
5578 | // Turn off 4.0 affinity for the current tread at this parallel level. |
5579 | th->th.th_current_task->td_icvs.proc_bind = proc_bind_false; |
5580 | |
5581 | return retval; |
5582 | } |
5583 | |
5584 | int __kmp_aux_get_affinity(void **mask) { |
5585 | int gtid; |
5586 | int retval; |
5587 | #if KMP_OS_WINDOWS || KMP_OS_AIX || KMP_DEBUG |
5588 | kmp_info_t *th; |
5589 | #endif |
5590 | if (!KMP_AFFINITY_CAPABLE()) { |
5591 | return -1; |
5592 | } |
5593 | |
5594 | gtid = __kmp_entry_gtid(); |
5595 | #if KMP_OS_WINDOWS || KMP_OS_AIX || KMP_DEBUG |
5596 | th = __kmp_threads[gtid]; |
5597 | #else |
5598 | (void)gtid; // unused variable |
5599 | #endif |
5600 | KMP_DEBUG_ASSERT(th->th.th_affin_mask != NULL); |
5601 | |
5602 | KA_TRACE( |
5603 | 1000, ("" ); { |
5604 | char buf[KMP_AFFIN_MASK_PRINT_LEN]; |
5605 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, |
5606 | th->th.th_affin_mask); |
5607 | __kmp_printf( |
5608 | "kmp_get_affinity: stored affinity mask for thread %d = %s\n" , gtid, |
5609 | buf); |
5610 | }); |
5611 | |
5612 | if (__kmp_env_consistency_check) { |
5613 | if ((mask == NULL) || (*mask == NULL)) { |
5614 | KMP_FATAL(AffinityInvalidMask, "kmp_get_affinity" ); |
5615 | } |
5616 | } |
5617 | |
5618 | #if !KMP_OS_WINDOWS && !KMP_OS_AIX |
5619 | |
5620 | retval = __kmp_get_system_affinity((kmp_affin_mask_t *)(*mask), FALSE); |
5621 | KA_TRACE( |
5622 | 1000, ("" ); { |
5623 | char buf[KMP_AFFIN_MASK_PRINT_LEN]; |
5624 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, |
5625 | (kmp_affin_mask_t *)(*mask)); |
5626 | __kmp_printf( |
5627 | "kmp_get_affinity: system affinity mask for thread %d = %s\n" , gtid, |
5628 | buf); |
5629 | }); |
5630 | return retval; |
5631 | |
5632 | #else |
5633 | (void)retval; |
5634 | |
5635 | KMP_CPU_COPY((kmp_affin_mask_t *)(*mask), th->th.th_affin_mask); |
5636 | return 0; |
5637 | |
5638 | #endif /* !KMP_OS_WINDOWS && !KMP_OS_AIX */ |
5639 | } |
5640 | |
5641 | int __kmp_aux_get_affinity_max_proc() { |
5642 | if (!KMP_AFFINITY_CAPABLE()) { |
5643 | return 0; |
5644 | } |
5645 | #if KMP_GROUP_AFFINITY |
5646 | if (__kmp_num_proc_groups > 1) { |
5647 | return (int)(__kmp_num_proc_groups * sizeof(DWORD_PTR) * CHAR_BIT); |
5648 | } |
5649 | #endif |
5650 | return __kmp_xproc; |
5651 | } |
5652 | |
5653 | int __kmp_aux_set_affinity_mask_proc(int proc, void **mask) { |
5654 | if (!KMP_AFFINITY_CAPABLE()) { |
5655 | return -1; |
5656 | } |
5657 | |
5658 | KA_TRACE( |
5659 | 1000, ("" ); { |
5660 | int gtid = __kmp_entry_gtid(); |
5661 | char buf[KMP_AFFIN_MASK_PRINT_LEN]; |
5662 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, |
5663 | (kmp_affin_mask_t *)(*mask)); |
5664 | __kmp_debug_printf("kmp_set_affinity_mask_proc: setting proc %d in " |
5665 | "affinity mask for thread %d = %s\n" , |
5666 | proc, gtid, buf); |
5667 | }); |
5668 | |
5669 | if (__kmp_env_consistency_check) { |
5670 | if ((mask == NULL) || (*mask == NULL)) { |
5671 | KMP_FATAL(AffinityInvalidMask, "kmp_set_affinity_mask_proc" ); |
5672 | } |
5673 | } |
5674 | |
5675 | if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) { |
5676 | return -1; |
5677 | } |
5678 | if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) { |
5679 | return -2; |
5680 | } |
5681 | |
5682 | KMP_CPU_SET(proc, (kmp_affin_mask_t *)(*mask)); |
5683 | return 0; |
5684 | } |
5685 | |
5686 | int __kmp_aux_unset_affinity_mask_proc(int proc, void **mask) { |
5687 | if (!KMP_AFFINITY_CAPABLE()) { |
5688 | return -1; |
5689 | } |
5690 | |
5691 | KA_TRACE( |
5692 | 1000, ("" ); { |
5693 | int gtid = __kmp_entry_gtid(); |
5694 | char buf[KMP_AFFIN_MASK_PRINT_LEN]; |
5695 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, |
5696 | (kmp_affin_mask_t *)(*mask)); |
5697 | __kmp_debug_printf("kmp_unset_affinity_mask_proc: unsetting proc %d in " |
5698 | "affinity mask for thread %d = %s\n" , |
5699 | proc, gtid, buf); |
5700 | }); |
5701 | |
5702 | if (__kmp_env_consistency_check) { |
5703 | if ((mask == NULL) || (*mask == NULL)) { |
5704 | KMP_FATAL(AffinityInvalidMask, "kmp_unset_affinity_mask_proc" ); |
5705 | } |
5706 | } |
5707 | |
5708 | if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) { |
5709 | return -1; |
5710 | } |
5711 | if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) { |
5712 | return -2; |
5713 | } |
5714 | |
5715 | KMP_CPU_CLR(proc, (kmp_affin_mask_t *)(*mask)); |
5716 | return 0; |
5717 | } |
5718 | |
5719 | int __kmp_aux_get_affinity_mask_proc(int proc, void **mask) { |
5720 | if (!KMP_AFFINITY_CAPABLE()) { |
5721 | return -1; |
5722 | } |
5723 | |
5724 | KA_TRACE( |
5725 | 1000, ("" ); { |
5726 | int gtid = __kmp_entry_gtid(); |
5727 | char buf[KMP_AFFIN_MASK_PRINT_LEN]; |
5728 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, |
5729 | (kmp_affin_mask_t *)(*mask)); |
5730 | __kmp_debug_printf("kmp_get_affinity_mask_proc: getting proc %d in " |
5731 | "affinity mask for thread %d = %s\n" , |
5732 | proc, gtid, buf); |
5733 | }); |
5734 | |
5735 | if (__kmp_env_consistency_check) { |
5736 | if ((mask == NULL) || (*mask == NULL)) { |
5737 | KMP_FATAL(AffinityInvalidMask, "kmp_get_affinity_mask_proc" ); |
5738 | } |
5739 | } |
5740 | |
5741 | if ((proc < 0) || (proc >= __kmp_aux_get_affinity_max_proc())) { |
5742 | return -1; |
5743 | } |
5744 | if (!KMP_CPU_ISSET(proc, __kmp_affin_fullMask)) { |
5745 | return 0; |
5746 | } |
5747 | |
5748 | return KMP_CPU_ISSET(proc, (kmp_affin_mask_t *)(*mask)); |
5749 | } |
5750 | |
5751 | #if KMP_WEIGHTED_ITERATIONS_SUPPORTED |
5752 | // Returns first os proc id with ATOM core |
5753 | int __kmp_get_first_osid_with_ecore(void) { |
5754 | int low = 0; |
5755 | int high = __kmp_topology->get_num_hw_threads() - 1; |
5756 | int mid = 0; |
5757 | while (high - low > 1) { |
5758 | mid = (high + low) / 2; |
5759 | if (__kmp_topology->at(index: mid).attrs.get_core_type() == |
5760 | KMP_HW_CORE_TYPE_CORE) { |
5761 | low = mid + 1; |
5762 | } else { |
5763 | high = mid; |
5764 | } |
5765 | } |
5766 | if (__kmp_topology->at(index: mid).attrs.get_core_type() == KMP_HW_CORE_TYPE_ATOM) { |
5767 | return mid; |
5768 | } |
5769 | return -1; |
5770 | } |
5771 | #endif |
5772 | |
5773 | // Dynamic affinity settings - Affinity balanced |
5774 | void __kmp_balanced_affinity(kmp_info_t *th, int nthreads) { |
5775 | KMP_DEBUG_ASSERT(th); |
5776 | bool fine_gran = true; |
5777 | int tid = th->th.th_info.ds.ds_tid; |
5778 | const char *env_var = "KMP_AFFINITY" ; |
5779 | |
5780 | // Do not perform balanced affinity for the hidden helper threads |
5781 | if (KMP_HIDDEN_HELPER_THREAD(__kmp_gtid_from_thread(th))) |
5782 | return; |
5783 | |
5784 | switch (__kmp_affinity.gran) { |
5785 | case KMP_HW_THREAD: |
5786 | break; |
5787 | case KMP_HW_CORE: |
5788 | if (__kmp_nThreadsPerCore > 1) { |
5789 | fine_gran = false; |
5790 | } |
5791 | break; |
5792 | case KMP_HW_SOCKET: |
5793 | if (nCoresPerPkg > 1) { |
5794 | fine_gran = false; |
5795 | } |
5796 | break; |
5797 | default: |
5798 | fine_gran = false; |
5799 | } |
5800 | |
5801 | if (__kmp_topology->is_uniform()) { |
5802 | int coreID; |
5803 | int threadID; |
5804 | // Number of hyper threads per core in HT machine |
5805 | int __kmp_nth_per_core = __kmp_avail_proc / __kmp_ncores; |
5806 | // Number of cores |
5807 | int ncores = __kmp_ncores; |
5808 | if ((nPackages > 1) && (__kmp_nth_per_core <= 1)) { |
5809 | __kmp_nth_per_core = __kmp_avail_proc / nPackages; |
5810 | ncores = nPackages; |
5811 | } |
5812 | // How many threads will be bound to each core |
5813 | int chunk = nthreads / ncores; |
5814 | // How many cores will have an additional thread bound to it - "big cores" |
5815 | int big_cores = nthreads % ncores; |
5816 | // Number of threads on the big cores |
5817 | int big_nth = (chunk + 1) * big_cores; |
5818 | if (tid < big_nth) { |
5819 | coreID = tid / (chunk + 1); |
5820 | threadID = (tid % (chunk + 1)) % __kmp_nth_per_core; |
5821 | } else { // tid >= big_nth |
5822 | coreID = (tid - big_cores) / chunk; |
5823 | threadID = ((tid - big_cores) % chunk) % __kmp_nth_per_core; |
5824 | } |
5825 | KMP_DEBUG_ASSERT2(KMP_AFFINITY_CAPABLE(), |
5826 | "Illegal set affinity operation when not capable" ); |
5827 | |
5828 | kmp_affin_mask_t *mask = th->th.th_affin_mask; |
5829 | KMP_CPU_ZERO(mask); |
5830 | |
5831 | if (fine_gran) { |
5832 | int osID = |
5833 | __kmp_topology->at(index: coreID * __kmp_nth_per_core + threadID).os_id; |
5834 | KMP_CPU_SET(osID, mask); |
5835 | } else { |
5836 | for (int i = 0; i < __kmp_nth_per_core; i++) { |
5837 | int osID; |
5838 | osID = __kmp_topology->at(index: coreID * __kmp_nth_per_core + i).os_id; |
5839 | KMP_CPU_SET(osID, mask); |
5840 | } |
5841 | } |
5842 | if (__kmp_affinity.flags.verbose) { |
5843 | char buf[KMP_AFFIN_MASK_PRINT_LEN]; |
5844 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask); |
5845 | KMP_INFORM(BoundToOSProcSet, env_var, (kmp_int32)getpid(), __kmp_gettid(), |
5846 | tid, buf); |
5847 | } |
5848 | __kmp_affinity_get_thread_topology_info(th); |
5849 | __kmp_set_system_affinity(mask, TRUE); |
5850 | } else { // Non-uniform topology |
5851 | |
5852 | kmp_affin_mask_t *mask = th->th.th_affin_mask; |
5853 | KMP_CPU_ZERO(mask); |
5854 | |
5855 | int core_level = |
5856 | __kmp_affinity_find_core_level(nprocs: __kmp_avail_proc, bottom_level: __kmp_aff_depth - 1); |
5857 | int ncores = __kmp_affinity_compute_ncores(nprocs: __kmp_avail_proc, |
5858 | bottom_level: __kmp_aff_depth - 1, core_level); |
5859 | int nth_per_core = __kmp_affinity_max_proc_per_core( |
5860 | nprocs: __kmp_avail_proc, bottom_level: __kmp_aff_depth - 1, core_level); |
5861 | |
5862 | // For performance gain consider the special case nthreads == |
5863 | // __kmp_avail_proc |
5864 | if (nthreads == __kmp_avail_proc) { |
5865 | if (fine_gran) { |
5866 | int osID = __kmp_topology->at(index: tid).os_id; |
5867 | KMP_CPU_SET(osID, mask); |
5868 | } else { |
5869 | int core = |
5870 | __kmp_affinity_find_core(proc: tid, bottom_level: __kmp_aff_depth - 1, core_level); |
5871 | for (int i = 0; i < __kmp_avail_proc; i++) { |
5872 | int osID = __kmp_topology->at(index: i).os_id; |
5873 | if (__kmp_affinity_find_core(proc: i, bottom_level: __kmp_aff_depth - 1, core_level) == |
5874 | core) { |
5875 | KMP_CPU_SET(osID, mask); |
5876 | } |
5877 | } |
5878 | } |
5879 | } else if (nthreads <= ncores) { |
5880 | |
5881 | int core = 0; |
5882 | for (int i = 0; i < ncores; i++) { |
5883 | // Check if this core from procarr[] is in the mask |
5884 | int in_mask = 0; |
5885 | for (int j = 0; j < nth_per_core; j++) { |
5886 | if (procarr[i * nth_per_core + j] != -1) { |
5887 | in_mask = 1; |
5888 | break; |
5889 | } |
5890 | } |
5891 | if (in_mask) { |
5892 | if (tid == core) { |
5893 | for (int j = 0; j < nth_per_core; j++) { |
5894 | int osID = procarr[i * nth_per_core + j]; |
5895 | if (osID != -1) { |
5896 | KMP_CPU_SET(osID, mask); |
5897 | // For fine granularity it is enough to set the first available |
5898 | // osID for this core |
5899 | if (fine_gran) { |
5900 | break; |
5901 | } |
5902 | } |
5903 | } |
5904 | break; |
5905 | } else { |
5906 | core++; |
5907 | } |
5908 | } |
5909 | } |
5910 | } else { // nthreads > ncores |
5911 | // Array to save the number of processors at each core |
5912 | int *nproc_at_core = (int *)KMP_ALLOCA(sizeof(int) * ncores); |
5913 | // Array to save the number of cores with "x" available processors; |
5914 | int *ncores_with_x_procs = |
5915 | (int *)KMP_ALLOCA(sizeof(int) * (nth_per_core + 1)); |
5916 | // Array to save the number of cores with # procs from x to nth_per_core |
5917 | int *ncores_with_x_to_max_procs = |
5918 | (int *)KMP_ALLOCA(sizeof(int) * (nth_per_core + 1)); |
5919 | |
5920 | for (int i = 0; i <= nth_per_core; i++) { |
5921 | ncores_with_x_procs[i] = 0; |
5922 | ncores_with_x_to_max_procs[i] = 0; |
5923 | } |
5924 | |
5925 | for (int i = 0; i < ncores; i++) { |
5926 | int cnt = 0; |
5927 | for (int j = 0; j < nth_per_core; j++) { |
5928 | if (procarr[i * nth_per_core + j] != -1) { |
5929 | cnt++; |
5930 | } |
5931 | } |
5932 | nproc_at_core[i] = cnt; |
5933 | ncores_with_x_procs[cnt]++; |
5934 | } |
5935 | |
5936 | for (int i = 0; i <= nth_per_core; i++) { |
5937 | for (int j = i; j <= nth_per_core; j++) { |
5938 | ncores_with_x_to_max_procs[i] += ncores_with_x_procs[j]; |
5939 | } |
5940 | } |
5941 | |
5942 | // Max number of processors |
5943 | int nproc = nth_per_core * ncores; |
5944 | // An array to keep number of threads per each context |
5945 | int *newarr = (int *)__kmp_allocate(sizeof(int) * nproc); |
5946 | for (int i = 0; i < nproc; i++) { |
5947 | newarr[i] = 0; |
5948 | } |
5949 | |
5950 | int nth = nthreads; |
5951 | int flag = 0; |
5952 | while (nth > 0) { |
5953 | for (int j = 1; j <= nth_per_core; j++) { |
5954 | int cnt = ncores_with_x_to_max_procs[j]; |
5955 | for (int i = 0; i < ncores; i++) { |
5956 | // Skip the core with 0 processors |
5957 | if (nproc_at_core[i] == 0) { |
5958 | continue; |
5959 | } |
5960 | for (int k = 0; k < nth_per_core; k++) { |
5961 | if (procarr[i * nth_per_core + k] != -1) { |
5962 | if (newarr[i * nth_per_core + k] == 0) { |
5963 | newarr[i * nth_per_core + k] = 1; |
5964 | cnt--; |
5965 | nth--; |
5966 | break; |
5967 | } else { |
5968 | if (flag != 0) { |
5969 | newarr[i * nth_per_core + k]++; |
5970 | cnt--; |
5971 | nth--; |
5972 | break; |
5973 | } |
5974 | } |
5975 | } |
5976 | } |
5977 | if (cnt == 0 || nth == 0) { |
5978 | break; |
5979 | } |
5980 | } |
5981 | if (nth == 0) { |
5982 | break; |
5983 | } |
5984 | } |
5985 | flag = 1; |
5986 | } |
5987 | int sum = 0; |
5988 | for (int i = 0; i < nproc; i++) { |
5989 | sum += newarr[i]; |
5990 | if (sum > tid) { |
5991 | if (fine_gran) { |
5992 | int osID = procarr[i]; |
5993 | KMP_CPU_SET(osID, mask); |
5994 | } else { |
5995 | int coreID = i / nth_per_core; |
5996 | for (int ii = 0; ii < nth_per_core; ii++) { |
5997 | int osID = procarr[coreID * nth_per_core + ii]; |
5998 | if (osID != -1) { |
5999 | KMP_CPU_SET(osID, mask); |
6000 | } |
6001 | } |
6002 | } |
6003 | break; |
6004 | } |
6005 | } |
6006 | __kmp_free(newarr); |
6007 | } |
6008 | |
6009 | if (__kmp_affinity.flags.verbose) { |
6010 | char buf[KMP_AFFIN_MASK_PRINT_LEN]; |
6011 | __kmp_affinity_print_mask(buf, KMP_AFFIN_MASK_PRINT_LEN, mask); |
6012 | KMP_INFORM(BoundToOSProcSet, env_var, (kmp_int32)getpid(), __kmp_gettid(), |
6013 | tid, buf); |
6014 | } |
6015 | __kmp_affinity_get_thread_topology_info(th); |
6016 | __kmp_set_system_affinity(mask, TRUE); |
6017 | } |
6018 | } |
6019 | |
6020 | #if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_NETBSD || KMP_OS_DRAGONFLY || \ |
6021 | KMP_OS_AIX |
6022 | // We don't need this entry for Windows because |
6023 | // there is GetProcessAffinityMask() api |
6024 | // |
6025 | // The intended usage is indicated by these steps: |
6026 | // 1) The user gets the current affinity mask |
6027 | // 2) Then sets the affinity by calling this function |
6028 | // 3) Error check the return value |
6029 | // 4) Use non-OpenMP parallelization |
6030 | // 5) Reset the affinity to what was stored in step 1) |
6031 | #ifdef __cplusplus |
6032 | extern "C" |
6033 | #endif |
6034 | int |
6035 | kmp_set_thread_affinity_mask_initial() |
6036 | // the function returns 0 on success, |
6037 | // -1 if we cannot bind thread |
6038 | // >0 (errno) if an error happened during binding |
6039 | { |
6040 | int gtid = __kmp_get_gtid(); |
6041 | if (gtid < 0) { |
6042 | // Do not touch non-omp threads |
6043 | KA_TRACE(30, ("kmp_set_thread_affinity_mask_initial: " |
6044 | "non-omp thread, returning\n" )); |
6045 | return -1; |
6046 | } |
6047 | if (!KMP_AFFINITY_CAPABLE() || !__kmp_init_middle) { |
6048 | KA_TRACE(30, ("kmp_set_thread_affinity_mask_initial: " |
6049 | "affinity not initialized, returning\n" )); |
6050 | return -1; |
6051 | } |
6052 | KA_TRACE(30, ("kmp_set_thread_affinity_mask_initial: " |
6053 | "set full mask for thread %d\n" , |
6054 | gtid)); |
6055 | KMP_DEBUG_ASSERT(__kmp_affin_fullMask != NULL); |
6056 | #if KMP_OS_AIX |
6057 | return bindprocessor(BINDTHREAD, thread_self(), PROCESSOR_CLASS_ANY); |
6058 | #else |
6059 | return __kmp_set_system_affinity(__kmp_affin_fullMask, FALSE); |
6060 | #endif |
6061 | } |
6062 | #endif |
6063 | |
6064 | #endif // KMP_AFFINITY_SUPPORTED |
6065 | |