1 | // SPDX-License-Identifier: GPL-2.0-or-later |
---|---|
2 | /* |
3 | * x86 SMP booting functions |
4 | * |
5 | * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk> |
6 | * (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com> |
7 | * Copyright 2001 Andi Kleen, SuSE Labs. |
8 | * |
9 | * Much of the core SMP work is based on previous work by Thomas Radke, to |
10 | * whom a great many thanks are extended. |
11 | * |
12 | * Thanks to Intel for making available several different Pentium, |
13 | * Pentium Pro and Pentium-II/Xeon MP machines. |
14 | * Original development of Linux SMP code supported by Caldera. |
15 | * |
16 | * Fixes |
17 | * Felix Koop : NR_CPUS used properly |
18 | * Jose Renau : Handle single CPU case. |
19 | * Alan Cox : By repeated request 8) - Total BogoMIPS report. |
20 | * Greg Wright : Fix for kernel stacks panic. |
21 | * Erich Boleyn : MP v1.4 and additional changes. |
22 | * Matthias Sattler : Changes for 2.1 kernel map. |
23 | * Michel Lespinasse : Changes for 2.1 kernel map. |
24 | * Michael Chastain : Change trampoline.S to gnu as. |
25 | * Alan Cox : Dumb bug: 'B' step PPro's are fine |
26 | * Ingo Molnar : Added APIC timers, based on code |
27 | * from Jose Renau |
28 | * Ingo Molnar : various cleanups and rewrites |
29 | * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug. |
30 | * Maciej W. Rozycki : Bits for genuine 82489DX APICs |
31 | * Andi Kleen : Changed for SMP boot into long mode. |
32 | * Martin J. Bligh : Added support for multi-quad systems |
33 | * Dave Jones : Report invalid combinations of Athlon CPUs. |
34 | * Rusty Russell : Hacked into shape for new "hotplug" boot process. |
35 | * Andi Kleen : Converted to new state machine. |
36 | * Ashok Raj : CPU hotplug support |
37 | * Glauber Costa : i386 and x86_64 integration |
38 | */ |
39 | |
40 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
41 | |
42 | #include <linux/init.h> |
43 | #include <linux/smp.h> |
44 | #include <linux/export.h> |
45 | #include <linux/sched.h> |
46 | #include <linux/sched/topology.h> |
47 | #include <linux/sched/hotplug.h> |
48 | #include <linux/sched/task_stack.h> |
49 | #include <linux/percpu.h> |
50 | #include <linux/memblock.h> |
51 | #include <linux/err.h> |
52 | #include <linux/nmi.h> |
53 | #include <linux/tboot.h> |
54 | #include <linux/gfp.h> |
55 | #include <linux/cpuidle.h> |
56 | #include <linux/kexec.h> |
57 | #include <linux/numa.h> |
58 | #include <linux/pgtable.h> |
59 | #include <linux/overflow.h> |
60 | #include <linux/stackprotector.h> |
61 | #include <linux/cpuhotplug.h> |
62 | #include <linux/mc146818rtc.h> |
63 | #include <linux/acpi.h> |
64 | |
65 | #include <asm/acpi.h> |
66 | #include <asm/cacheinfo.h> |
67 | #include <asm/cpuid/api.h> |
68 | #include <asm/desc.h> |
69 | #include <asm/nmi.h> |
70 | #include <asm/irq.h> |
71 | #include <asm/realmode.h> |
72 | #include <asm/cpu.h> |
73 | #include <asm/numa.h> |
74 | #include <asm/tlbflush.h> |
75 | #include <asm/mtrr.h> |
76 | #include <asm/mwait.h> |
77 | #include <asm/apic.h> |
78 | #include <asm/io_apic.h> |
79 | #include <asm/fpu/api.h> |
80 | #include <asm/setup.h> |
81 | #include <asm/uv/uv.h> |
82 | #include <asm/microcode.h> |
83 | #include <asm/i8259.h> |
84 | #include <asm/misc.h> |
85 | #include <asm/qspinlock.h> |
86 | #include <asm/intel-family.h> |
87 | #include <asm/cpu_device_id.h> |
88 | #include <asm/spec-ctrl.h> |
89 | #include <asm/hw_irq.h> |
90 | #include <asm/stackprotector.h> |
91 | #include <asm/sev.h> |
92 | #include <asm/spec-ctrl.h> |
93 | |
94 | /* representing HT siblings of each logical CPU */ |
95 | DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map); |
96 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); |
97 | |
98 | /* representing HT and core siblings of each logical CPU */ |
99 | DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map); |
100 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); |
101 | |
102 | /* representing HT, core, and die siblings of each logical CPU */ |
103 | DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_die_map); |
104 | EXPORT_PER_CPU_SYMBOL(cpu_die_map); |
105 | |
106 | /* CPUs which are the primary SMT threads */ |
107 | struct cpumask __cpu_primary_thread_mask __read_mostly; |
108 | |
109 | /* Representing CPUs for which sibling maps can be computed */ |
110 | static cpumask_var_t cpu_sibling_setup_mask; |
111 | |
112 | struct mwait_cpu_dead { |
113 | unsigned int control; |
114 | unsigned int status; |
115 | }; |
116 | |
117 | #define CPUDEAD_MWAIT_WAIT 0xDEADBEEF |
118 | #define CPUDEAD_MWAIT_KEXEC_HLT 0x4A17DEAD |
119 | |
120 | /* |
121 | * Cache line aligned data for mwait_play_dead(). Separate on purpose so |
122 | * that it's unlikely to be touched by other CPUs. |
123 | */ |
124 | static DEFINE_PER_CPU_ALIGNED(struct mwait_cpu_dead, mwait_cpu_dead); |
125 | |
126 | /* Maximum number of SMT threads on any online core */ |
127 | int __read_mostly __max_smt_threads = 1; |
128 | |
129 | /* Flag to indicate if a complete sched domain rebuild is required */ |
130 | bool x86_topology_update; |
131 | |
132 | int arch_update_cpu_topology(void) |
133 | { |
134 | int retval = x86_topology_update; |
135 | |
136 | x86_topology_update = false; |
137 | return retval; |
138 | } |
139 | |
140 | static unsigned int smpboot_warm_reset_vector_count; |
141 | |
142 | static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip) |
143 | { |
144 | unsigned long flags; |
145 | |
146 | spin_lock_irqsave(&rtc_lock, flags); |
147 | if (!smpboot_warm_reset_vector_count++) { |
148 | CMOS_WRITE(0xa, 0xf); |
149 | *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) = start_eip >> 4; |
150 | *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = start_eip & 0xf; |
151 | } |
152 | spin_unlock_irqrestore(lock: &rtc_lock, flags); |
153 | } |
154 | |
155 | static inline void smpboot_restore_warm_reset_vector(void) |
156 | { |
157 | unsigned long flags; |
158 | |
159 | /* |
160 | * Paranoid: Set warm reset code and vector here back |
161 | * to default values. |
162 | */ |
163 | spin_lock_irqsave(&rtc_lock, flags); |
164 | if (!--smpboot_warm_reset_vector_count) { |
165 | CMOS_WRITE(0, 0xf); |
166 | *((volatile u32 *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0; |
167 | } |
168 | spin_unlock_irqrestore(lock: &rtc_lock, flags); |
169 | |
170 | } |
171 | |
172 | /* Run the next set of setup steps for the upcoming CPU */ |
173 | static void ap_starting(void) |
174 | { |
175 | int cpuid = smp_processor_id(); |
176 | |
177 | /* Mop up eventual mwait_play_dead() wreckage */ |
178 | this_cpu_write(mwait_cpu_dead.status, 0); |
179 | this_cpu_write(mwait_cpu_dead.control, 0); |
180 | |
181 | /* |
182 | * If woken up by an INIT in an 82489DX configuration the alive |
183 | * synchronization guarantees that the CPU does not reach this |
184 | * point before an INIT_deassert IPI reaches the local APIC, so it |
185 | * is now safe to touch the local APIC. |
186 | * |
187 | * Set up this CPU, first the APIC, which is probably redundant on |
188 | * most boards. |
189 | */ |
190 | apic_ap_setup(); |
191 | |
192 | /* Save the processor parameters. */ |
193 | identify_secondary_cpu(cpu: cpuid); |
194 | |
195 | /* |
196 | * The topology information must be up to date before |
197 | * notify_cpu_starting(). |
198 | */ |
199 | set_cpu_sibling_map(cpuid); |
200 | |
201 | ap_init_aperfmperf(); |
202 | |
203 | pr_debug("Stack at about %p\n", &cpuid); |
204 | |
205 | wmb(); |
206 | |
207 | /* |
208 | * This runs the AP through all the cpuhp states to its target |
209 | * state CPUHP_ONLINE. |
210 | */ |
211 | notify_cpu_starting(cpu: cpuid); |
212 | } |
213 | |
214 | static void ap_calibrate_delay(void) |
215 | { |
216 | /* |
217 | * Calibrate the delay loop and update loops_per_jiffy in cpu_data. |
218 | * identify_secondary_cpu() stored a value that is close but not as |
219 | * accurate as the value just calculated. |
220 | * |
221 | * As this is invoked after the TSC synchronization check, |
222 | * calibrate_delay_is_known() will skip the calibration routine |
223 | * when TSC is synchronized across sockets. |
224 | */ |
225 | calibrate_delay(); |
226 | cpu_data(smp_processor_id()).loops_per_jiffy = loops_per_jiffy; |
227 | } |
228 | |
229 | /* |
230 | * Activate a secondary processor. |
231 | */ |
232 | static void notrace __noendbr start_secondary(void *unused) |
233 | { |
234 | /* |
235 | * Don't put *anything* except direct CPU state initialization |
236 | * before cpu_init(), SMP booting is too fragile that we want to |
237 | * limit the things done here to the most necessary things. |
238 | */ |
239 | cr4_init(); |
240 | |
241 | /* |
242 | * 32-bit specific. 64-bit reaches this code with the correct page |
243 | * table established. Yet another historical divergence. |
244 | */ |
245 | if (IS_ENABLED(CONFIG_X86_32)) { |
246 | /* switch away from the initial page table */ |
247 | load_cr3(swapper_pg_dir); |
248 | __flush_tlb_all(); |
249 | } |
250 | |
251 | cpu_init_exception_handling(boot_cpu: false); |
252 | |
253 | /* |
254 | * Load the microcode before reaching the AP alive synchronization |
255 | * point below so it is not part of the full per CPU serialized |
256 | * bringup part when "parallel" bringup is enabled. |
257 | * |
258 | * That's even safe when hyperthreading is enabled in the CPU as |
259 | * the core code starts the primary threads first and leaves the |
260 | * secondary threads waiting for SIPI. Loading microcode on |
261 | * physical cores concurrently is a safe operation. |
262 | * |
263 | * This covers both the Intel specific issue that concurrent |
264 | * microcode loading on SMT siblings must be prohibited and the |
265 | * vendor independent issue`that microcode loading which changes |
266 | * CPUID, MSRs etc. must be strictly serialized to maintain |
267 | * software state correctness. |
268 | */ |
269 | load_ucode_ap(); |
270 | |
271 | /* |
272 | * Synchronization point with the hotplug core. Sets this CPUs |
273 | * synchronization state to ALIVE and spin-waits for the control CPU to |
274 | * release this CPU for further bringup. |
275 | */ |
276 | cpuhp_ap_sync_alive(); |
277 | |
278 | cpu_init(); |
279 | fpu__init_cpu(); |
280 | rcutree_report_cpu_starting(raw_smp_processor_id()); |
281 | x86_cpuinit.early_percpu_clock_init(); |
282 | |
283 | ap_starting(); |
284 | |
285 | /* Check TSC synchronization with the control CPU. */ |
286 | check_tsc_sync_target(); |
287 | |
288 | /* |
289 | * Calibrate the delay loop after the TSC synchronization check. |
290 | * This allows to skip the calibration when TSC is synchronized |
291 | * across sockets. |
292 | */ |
293 | ap_calibrate_delay(); |
294 | |
295 | speculative_store_bypass_ht_init(); |
296 | |
297 | /* |
298 | * Lock vector_lock, set CPU online and bring the vector |
299 | * allocator online. Online must be set with vector_lock held |
300 | * to prevent a concurrent irq setup/teardown from seeing a |
301 | * half valid vector space. |
302 | */ |
303 | lock_vector_lock(); |
304 | set_cpu_online(smp_processor_id(), online: true); |
305 | lapic_online(); |
306 | unlock_vector_lock(); |
307 | x86_platform.nmi_init(); |
308 | |
309 | /* enable local interrupts */ |
310 | local_irq_enable(); |
311 | |
312 | x86_cpuinit.setup_percpu_clockev(); |
313 | |
314 | wmb(); |
315 | cpu_startup_entry(state: CPUHP_AP_ONLINE_IDLE); |
316 | } |
317 | ANNOTATE_NOENDBR_SYM(start_secondary); |
318 | |
319 | static bool |
320 | topology_same_node(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) |
321 | { |
322 | int cpu1 = c->cpu_index, cpu2 = o->cpu_index; |
323 | |
324 | return (cpu_to_node(cpu: cpu1) == cpu_to_node(cpu: cpu2)); |
325 | } |
326 | |
327 | static bool |
328 | topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name) |
329 | { |
330 | int cpu1 = c->cpu_index, cpu2 = o->cpu_index; |
331 | |
332 | return !WARN_ONCE(!topology_same_node(c, o), |
333 | "sched: CPU #%d's %s-sibling CPU #%d is not on the same node! " |
334 | "[node: %d != %d]. Ignoring dependency.\n", |
335 | cpu1, name, cpu2, cpu_to_node(cpu1), cpu_to_node(cpu2)); |
336 | } |
337 | |
338 | #define link_mask(mfunc, c1, c2) \ |
339 | do { \ |
340 | cpumask_set_cpu((c1), mfunc(c2)); \ |
341 | cpumask_set_cpu((c2), mfunc(c1)); \ |
342 | } while (0) |
343 | |
344 | static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) |
345 | { |
346 | if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { |
347 | int cpu1 = c->cpu_index, cpu2 = o->cpu_index; |
348 | |
349 | if (c->topo.pkg_id == o->topo.pkg_id && |
350 | c->topo.die_id == o->topo.die_id && |
351 | c->topo.amd_node_id == o->topo.amd_node_id && |
352 | per_cpu_llc_id(cpu: cpu1) == per_cpu_llc_id(cpu: cpu2)) { |
353 | if (c->topo.core_id == o->topo.core_id) |
354 | return topology_sane(c, o, name: "smt"); |
355 | |
356 | if ((c->topo.cu_id != 0xff) && |
357 | (o->topo.cu_id != 0xff) && |
358 | (c->topo.cu_id == o->topo.cu_id)) |
359 | return topology_sane(c, o, name: "smt"); |
360 | } |
361 | |
362 | } else if (c->topo.pkg_id == o->topo.pkg_id && |
363 | c->topo.die_id == o->topo.die_id && |
364 | c->topo.core_id == o->topo.core_id) { |
365 | return topology_sane(c, o, name: "smt"); |
366 | } |
367 | |
368 | return false; |
369 | } |
370 | |
371 | static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) |
372 | { |
373 | if (c->topo.pkg_id != o->topo.pkg_id || c->topo.die_id != o->topo.die_id) |
374 | return false; |
375 | |
376 | if (cpu_feature_enabled(X86_FEATURE_TOPOEXT) && topology_amd_nodes_per_pkg() > 1) |
377 | return c->topo.amd_node_id == o->topo.amd_node_id; |
378 | |
379 | return true; |
380 | } |
381 | |
382 | static bool match_l2c(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) |
383 | { |
384 | int cpu1 = c->cpu_index, cpu2 = o->cpu_index; |
385 | |
386 | /* If the arch didn't set up l2c_id, fall back to SMT */ |
387 | if (per_cpu_l2c_id(cpu: cpu1) == BAD_APICID) |
388 | return match_smt(c, o); |
389 | |
390 | /* Do not match if L2 cache id does not match: */ |
391 | if (per_cpu_l2c_id(cpu: cpu1) != per_cpu_l2c_id(cpu: cpu2)) |
392 | return false; |
393 | |
394 | return topology_sane(c, o, name: "l2c"); |
395 | } |
396 | |
397 | /* |
398 | * Unlike the other levels, we do not enforce keeping a |
399 | * multicore group inside a NUMA node. If this happens, we will |
400 | * discard the MC level of the topology later. |
401 | */ |
402 | static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) |
403 | { |
404 | if (c->topo.pkg_id == o->topo.pkg_id) |
405 | return true; |
406 | return false; |
407 | } |
408 | |
409 | /* |
410 | * Define intel_cod_cpu[] for Intel COD (Cluster-on-Die) CPUs. |
411 | * |
412 | * Any Intel CPU that has multiple nodes per package and does not |
413 | * match intel_cod_cpu[] has the SNC (Sub-NUMA Cluster) topology. |
414 | * |
415 | * When in SNC mode, these CPUs enumerate an LLC that is shared |
416 | * by multiple NUMA nodes. The LLC is shared for off-package data |
417 | * access but private to the NUMA node (half of the package) for |
418 | * on-package access. CPUID (the source of the information about |
419 | * the LLC) can only enumerate the cache as shared or unshared, |
420 | * but not this particular configuration. |
421 | */ |
422 | |
423 | static const struct x86_cpu_id intel_cod_cpu[] = { |
424 | X86_MATCH_VFM(INTEL_HASWELL_X, 0), /* COD */ |
425 | X86_MATCH_VFM(INTEL_BROADWELL_X, 0), /* COD */ |
426 | X86_MATCH_VFM(INTEL_ANY, 1), /* SNC */ |
427 | {} |
428 | }; |
429 | |
430 | static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) |
431 | { |
432 | const struct x86_cpu_id *id = x86_match_cpu(match: intel_cod_cpu); |
433 | int cpu1 = c->cpu_index, cpu2 = o->cpu_index; |
434 | bool intel_snc = id && id->driver_data; |
435 | |
436 | /* Do not match if we do not have a valid APICID for cpu: */ |
437 | if (per_cpu_llc_id(cpu: cpu1) == BAD_APICID) |
438 | return false; |
439 | |
440 | /* Do not match if LLC id does not match: */ |
441 | if (per_cpu_llc_id(cpu: cpu1) != per_cpu_llc_id(cpu: cpu2)) |
442 | return false; |
443 | |
444 | /* |
445 | * Allow the SNC topology without warning. Return of false |
446 | * means 'c' does not share the LLC of 'o'. This will be |
447 | * reflected to userspace. |
448 | */ |
449 | if (match_pkg(c, o) && !topology_same_node(c, o) && intel_snc) |
450 | return false; |
451 | |
452 | return topology_sane(c, o, name: "llc"); |
453 | } |
454 | |
455 | |
456 | static inline int x86_sched_itmt_flags(void) |
457 | { |
458 | return sysctl_sched_itmt_enabled ? SD_ASYM_PACKING : 0; |
459 | } |
460 | |
461 | #ifdef CONFIG_SCHED_MC |
462 | static int x86_core_flags(void) |
463 | { |
464 | return cpu_core_flags() | x86_sched_itmt_flags(); |
465 | } |
466 | #endif |
467 | #ifdef CONFIG_SCHED_CLUSTER |
468 | static int x86_cluster_flags(void) |
469 | { |
470 | return cpu_cluster_flags() | x86_sched_itmt_flags(); |
471 | } |
472 | #endif |
473 | |
474 | /* |
475 | * Set if a package/die has multiple NUMA nodes inside. |
476 | * AMD Magny-Cours, Intel Cluster-on-Die, and Intel |
477 | * Sub-NUMA Clustering have this. |
478 | */ |
479 | static bool x86_has_numa_in_package; |
480 | |
481 | static struct sched_domain_topology_level x86_topology[6]; |
482 | |
483 | static void __init build_sched_topology(void) |
484 | { |
485 | int i = 0; |
486 | |
487 | #ifdef CONFIG_SCHED_SMT |
488 | x86_topology[i++] = (struct sched_domain_topology_level){ |
489 | cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) |
490 | }; |
491 | #endif |
492 | #ifdef CONFIG_SCHED_CLUSTER |
493 | x86_topology[i++] = (struct sched_domain_topology_level){ |
494 | cpu_clustergroup_mask, x86_cluster_flags, SD_INIT_NAME(CLS) |
495 | }; |
496 | #endif |
497 | #ifdef CONFIG_SCHED_MC |
498 | x86_topology[i++] = (struct sched_domain_topology_level){ |
499 | cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC) |
500 | }; |
501 | #endif |
502 | /* |
503 | * When there is NUMA topology inside the package skip the PKG domain |
504 | * since the NUMA domains will auto-magically create the right spanning |
505 | * domains based on the SLIT. |
506 | */ |
507 | if (!x86_has_numa_in_package) { |
508 | x86_topology[i++] = (struct sched_domain_topology_level){ |
509 | cpu_cpu_mask, x86_sched_itmt_flags, SD_INIT_NAME(PKG) |
510 | }; |
511 | } |
512 | |
513 | /* |
514 | * There must be one trailing NULL entry left. |
515 | */ |
516 | BUG_ON(i >= ARRAY_SIZE(x86_topology)-1); |
517 | |
518 | set_sched_topology(x86_topology); |
519 | } |
520 | |
521 | void set_cpu_sibling_map(int cpu) |
522 | { |
523 | bool has_smt = __max_threads_per_core > 1; |
524 | bool has_mp = has_smt || topology_num_cores_per_package() > 1; |
525 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
526 | struct cpuinfo_x86 *o; |
527 | int i, threads; |
528 | |
529 | cpumask_set_cpu(cpu, dstp: cpu_sibling_setup_mask); |
530 | |
531 | if (!has_mp) { |
532 | cpumask_set_cpu(cpu, topology_sibling_cpumask(cpu)); |
533 | cpumask_set_cpu(cpu, dstp: cpu_llc_shared_mask(cpu)); |
534 | cpumask_set_cpu(cpu, dstp: cpu_l2c_shared_mask(cpu)); |
535 | cpumask_set_cpu(cpu, topology_core_cpumask(cpu)); |
536 | cpumask_set_cpu(cpu, topology_die_cpumask(cpu)); |
537 | c->booted_cores = 1; |
538 | return; |
539 | } |
540 | |
541 | for_each_cpu(i, cpu_sibling_setup_mask) { |
542 | o = &cpu_data(i); |
543 | |
544 | if (match_pkg(c, o) && !topology_same_node(c, o)) |
545 | x86_has_numa_in_package = true; |
546 | |
547 | if ((i == cpu) || (has_smt && match_smt(c, o))) |
548 | link_mask(topology_sibling_cpumask, cpu, i); |
549 | |
550 | if ((i == cpu) || (has_mp && match_llc(c, o))) |
551 | link_mask(cpu_llc_shared_mask, cpu, i); |
552 | |
553 | if ((i == cpu) || (has_mp && match_l2c(c, o))) |
554 | link_mask(cpu_l2c_shared_mask, cpu, i); |
555 | |
556 | if ((i == cpu) || (has_mp && match_die(c, o))) |
557 | link_mask(topology_die_cpumask, cpu, i); |
558 | } |
559 | |
560 | threads = cpumask_weight(topology_sibling_cpumask(cpu)); |
561 | if (threads > __max_smt_threads) |
562 | __max_smt_threads = threads; |
563 | |
564 | for_each_cpu(i, topology_sibling_cpumask(cpu)) |
565 | cpu_data(i).smt_active = threads > 1; |
566 | |
567 | /* |
568 | * This needs a separate iteration over the cpus because we rely on all |
569 | * topology_sibling_cpumask links to be set-up. |
570 | */ |
571 | for_each_cpu(i, cpu_sibling_setup_mask) { |
572 | o = &cpu_data(i); |
573 | |
574 | if ((i == cpu) || (has_mp && match_pkg(c, o))) { |
575 | link_mask(topology_core_cpumask, cpu, i); |
576 | |
577 | /* |
578 | * Does this new cpu bringup a new core? |
579 | */ |
580 | if (threads == 1) { |
581 | /* |
582 | * for each core in package, increment |
583 | * the booted_cores for this new cpu |
584 | */ |
585 | if (cpumask_first( |
586 | topology_sibling_cpumask(i)) == i) |
587 | c->booted_cores++; |
588 | /* |
589 | * increment the core count for all |
590 | * the other cpus in this package |
591 | */ |
592 | if (i != cpu) |
593 | cpu_data(i).booted_cores++; |
594 | } else if (i != cpu && !c->booted_cores) |
595 | c->booted_cores = cpu_data(i).booted_cores; |
596 | } |
597 | } |
598 | } |
599 | |
600 | /* maps the cpu to the sched domain representing multi-core */ |
601 | const struct cpumask *cpu_coregroup_mask(int cpu) |
602 | { |
603 | return cpu_llc_shared_mask(cpu); |
604 | } |
605 | |
606 | const struct cpumask *cpu_clustergroup_mask(int cpu) |
607 | { |
608 | return cpu_l2c_shared_mask(cpu); |
609 | } |
610 | EXPORT_SYMBOL_GPL(cpu_clustergroup_mask); |
611 | |
612 | static void impress_friends(void) |
613 | { |
614 | int cpu; |
615 | unsigned long bogosum = 0; |
616 | /* |
617 | * Allow the user to impress friends. |
618 | */ |
619 | pr_debug("Before bogomips\n"); |
620 | for_each_online_cpu(cpu) |
621 | bogosum += cpu_data(cpu).loops_per_jiffy; |
622 | |
623 | pr_info("Total of %d processors activated (%lu.%02lu BogoMIPS)\n", |
624 | num_online_cpus(), |
625 | bogosum/(500000/HZ), |
626 | (bogosum/(5000/HZ))%100); |
627 | |
628 | pr_debug("Before bogocount - setting activated=1\n"); |
629 | } |
630 | |
631 | /* |
632 | * The Multiprocessor Specification 1.4 (1997) example code suggests |
633 | * that there should be a 10ms delay between the BSP asserting INIT |
634 | * and de-asserting INIT, when starting a remote processor. |
635 | * But that slows boot and resume on modern processors, which include |
636 | * many cores and don't require that delay. |
637 | * |
638 | * Cmdline "cpu_init_udelay=" is available to override this delay. |
639 | */ |
640 | #define UDELAY_10MS_LEGACY 10000 |
641 | |
642 | static unsigned int init_udelay = UINT_MAX; |
643 | |
644 | static int __init cpu_init_udelay(char *str) |
645 | { |
646 | get_option(str: &str, pint: &init_udelay); |
647 | |
648 | return 0; |
649 | } |
650 | early_param("cpu_init_udelay", cpu_init_udelay); |
651 | |
652 | static void __init smp_set_init_udelay(void) |
653 | { |
654 | /* if cmdline changed it from default, leave it alone */ |
655 | if (init_udelay != UINT_MAX) |
656 | return; |
657 | |
658 | /* if modern processor, use no delay */ |
659 | if ((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && boot_cpu_data.x86_vfm >= INTEL_PENTIUM_PRO) || |
660 | (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON && boot_cpu_data.x86 >= 0x18) || |
661 | (boot_cpu_data.x86_vendor == X86_VENDOR_AMD && boot_cpu_data.x86 >= 0xF)) { |
662 | init_udelay = 0; |
663 | return; |
664 | } |
665 | /* else, use legacy delay */ |
666 | init_udelay = UDELAY_10MS_LEGACY; |
667 | } |
668 | |
669 | /* |
670 | * Wake up AP by INIT, INIT, STARTUP sequence. |
671 | */ |
672 | static void send_init_sequence(u32 phys_apicid) |
673 | { |
674 | int maxlvt = lapic_get_maxlvt(); |
675 | |
676 | /* Be paranoid about clearing APIC errors. */ |
677 | if (APIC_INTEGRATED(boot_cpu_apic_version)) { |
678 | /* Due to the Pentium erratum 3AP. */ |
679 | if (maxlvt > 3) |
680 | apic_write(APIC_ESR, val: 0); |
681 | apic_read(APIC_ESR); |
682 | } |
683 | |
684 | /* Assert INIT on the target CPU */ |
685 | apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT, high: phys_apicid); |
686 | safe_apic_wait_icr_idle(); |
687 | |
688 | udelay(usec: init_udelay); |
689 | |
690 | /* Deassert INIT on the target CPU */ |
691 | apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, high: phys_apicid); |
692 | safe_apic_wait_icr_idle(); |
693 | } |
694 | |
695 | /* |
696 | * Wake up AP by INIT, INIT, STARTUP sequence. |
697 | */ |
698 | static int wakeup_secondary_cpu_via_init(u32 phys_apicid, unsigned long start_eip, unsigned int cpu) |
699 | { |
700 | unsigned long send_status = 0, accept_status = 0; |
701 | int num_starts, j, maxlvt; |
702 | |
703 | preempt_disable(); |
704 | maxlvt = lapic_get_maxlvt(); |
705 | send_init_sequence(phys_apicid); |
706 | |
707 | mb(); |
708 | |
709 | /* |
710 | * Should we send STARTUP IPIs ? |
711 | * |
712 | * Determine this based on the APIC version. |
713 | * If we don't have an integrated APIC, don't send the STARTUP IPIs. |
714 | */ |
715 | if (APIC_INTEGRATED(boot_cpu_apic_version)) |
716 | num_starts = 2; |
717 | else |
718 | num_starts = 0; |
719 | |
720 | /* |
721 | * Run STARTUP IPI loop. |
722 | */ |
723 | pr_debug("#startup loops: %d\n", num_starts); |
724 | |
725 | for (j = 1; j <= num_starts; j++) { |
726 | pr_debug("Sending STARTUP #%d\n", j); |
727 | if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ |
728 | apic_write(APIC_ESR, val: 0); |
729 | apic_read(APIC_ESR); |
730 | pr_debug("After apic_write\n"); |
731 | |
732 | /* |
733 | * STARTUP IPI |
734 | */ |
735 | |
736 | /* Target chip */ |
737 | /* Boot on the stack */ |
738 | /* Kick the second */ |
739 | apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12), |
740 | high: phys_apicid); |
741 | |
742 | /* |
743 | * Give the other CPU some time to accept the IPI. |
744 | */ |
745 | if (init_udelay == 0) |
746 | udelay(usec: 10); |
747 | else |
748 | udelay(usec: 300); |
749 | |
750 | pr_debug("Startup point 1\n"); |
751 | |
752 | pr_debug("Waiting for send to finish...\n"); |
753 | send_status = safe_apic_wait_icr_idle(); |
754 | |
755 | /* |
756 | * Give the other CPU some time to accept the IPI. |
757 | */ |
758 | if (init_udelay == 0) |
759 | udelay(usec: 10); |
760 | else |
761 | udelay(usec: 200); |
762 | |
763 | if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ |
764 | apic_write(APIC_ESR, val: 0); |
765 | accept_status = (apic_read(APIC_ESR) & 0xEF); |
766 | if (send_status || accept_status) |
767 | break; |
768 | } |
769 | pr_debug("After Startup\n"); |
770 | |
771 | if (send_status) |
772 | pr_err("APIC never delivered???\n"); |
773 | if (accept_status) |
774 | pr_err("APIC delivery error (%lx)\n", accept_status); |
775 | |
776 | preempt_enable(); |
777 | return (send_status | accept_status); |
778 | } |
779 | |
780 | /* reduce the number of lines printed when booting a large cpu count system */ |
781 | static void announce_cpu(int cpu, int apicid) |
782 | { |
783 | static int width, node_width, first = 1; |
784 | static int current_node = NUMA_NO_NODE; |
785 | int node = early_cpu_to_node(cpu); |
786 | |
787 | if (!width) |
788 | width = num_digits(num_possible_cpus()) + 1; /* + '#' sign */ |
789 | |
790 | if (!node_width) |
791 | node_width = num_digits(num_possible_nodes()) + 1; /* + '#' */ |
792 | |
793 | if (system_state < SYSTEM_RUNNING) { |
794 | if (first) |
795 | pr_info("x86: Booting SMP configuration:\n"); |
796 | |
797 | if (node != current_node) { |
798 | if (current_node > (-1)) |
799 | pr_cont("\n"); |
800 | current_node = node; |
801 | |
802 | printk(KERN_INFO ".... node %*s#%d, CPUs: ", |
803 | node_width - num_digits(node), " ", node); |
804 | } |
805 | |
806 | /* Add padding for the BSP */ |
807 | if (first) |
808 | pr_cont("%*s", width + 1, " "); |
809 | first = 0; |
810 | |
811 | pr_cont("%*s#%d", width - num_digits(cpu), " ", cpu); |
812 | } else |
813 | pr_info("Booting Node %d Processor %d APIC 0x%x\n", |
814 | node, cpu, apicid); |
815 | } |
816 | |
817 | int common_cpu_up(unsigned int cpu, struct task_struct *idle) |
818 | { |
819 | int ret; |
820 | |
821 | /* Just in case we booted with a single CPU. */ |
822 | alternatives_enable_smp(); |
823 | |
824 | per_cpu(current_task, cpu) = idle; |
825 | cpu_init_stack_canary(cpu, idle); |
826 | |
827 | /* Initialize the interrupt stack(s) */ |
828 | ret = irq_init_percpu_irqstack(cpu); |
829 | if (ret) |
830 | return ret; |
831 | |
832 | #ifdef CONFIG_X86_32 |
833 | /* Stack for startup_32 can be just as for start_secondary onwards */ |
834 | per_cpu(cpu_current_top_of_stack, cpu) = task_top_of_stack(idle); |
835 | #endif |
836 | return 0; |
837 | } |
838 | |
839 | /* |
840 | * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad |
841 | * (ie clustered apic addressing mode), this is a LOGICAL apic ID. |
842 | * Returns zero if startup was successfully sent, else error code from |
843 | * ->wakeup_secondary_cpu. |
844 | */ |
845 | static int do_boot_cpu(u32 apicid, unsigned int cpu, struct task_struct *idle) |
846 | { |
847 | unsigned long start_ip = real_mode_header->trampoline_start; |
848 | int ret; |
849 | |
850 | #ifdef CONFIG_X86_64 |
851 | /* If 64-bit wakeup method exists, use the 64-bit mode trampoline IP */ |
852 | if (apic->wakeup_secondary_cpu_64) |
853 | start_ip = real_mode_header->trampoline_start64; |
854 | #endif |
855 | idle->thread.sp = (unsigned long)task_pt_regs(idle); |
856 | initial_code = (unsigned long)start_secondary; |
857 | |
858 | if (IS_ENABLED(CONFIG_X86_32)) { |
859 | early_gdt_descr.address = (unsigned long)get_cpu_gdt_rw(cpu); |
860 | initial_stack = idle->thread.sp; |
861 | } else if (!(smpboot_control & STARTUP_PARALLEL_MASK)) { |
862 | smpboot_control = cpu; |
863 | } |
864 | |
865 | /* Enable the espfix hack for this CPU */ |
866 | init_espfix_ap(cpu); |
867 | |
868 | /* So we see what's up */ |
869 | announce_cpu(cpu, apicid); |
870 | |
871 | /* |
872 | * This grunge runs the startup process for |
873 | * the targeted processor. |
874 | */ |
875 | if (x86_platform.legacy.warm_reset) { |
876 | |
877 | pr_debug("Setting warm reset code and vector.\n"); |
878 | |
879 | smpboot_setup_warm_reset_vector(start_eip: start_ip); |
880 | /* |
881 | * Be paranoid about clearing APIC errors. |
882 | */ |
883 | if (APIC_INTEGRATED(boot_cpu_apic_version)) { |
884 | apic_write(APIC_ESR, val: 0); |
885 | apic_read(APIC_ESR); |
886 | } |
887 | } |
888 | |
889 | smp_mb(); |
890 | |
891 | /* |
892 | * Wake up a CPU in difference cases: |
893 | * - Use a method from the APIC driver if one defined, with wakeup |
894 | * straight to 64-bit mode preferred over wakeup to RM. |
895 | * Otherwise, |
896 | * - Use an INIT boot APIC message |
897 | */ |
898 | if (apic->wakeup_secondary_cpu_64) |
899 | ret = apic->wakeup_secondary_cpu_64(apicid, start_ip, cpu); |
900 | else if (apic->wakeup_secondary_cpu) |
901 | ret = apic->wakeup_secondary_cpu(apicid, start_ip, cpu); |
902 | else |
903 | ret = wakeup_secondary_cpu_via_init(phys_apicid: apicid, start_eip: start_ip, cpu); |
904 | |
905 | /* If the wakeup mechanism failed, cleanup the warm reset vector */ |
906 | if (ret) |
907 | arch_cpuhp_cleanup_kick_cpu(cpu); |
908 | return ret; |
909 | } |
910 | |
911 | int native_kick_ap(unsigned int cpu, struct task_struct *tidle) |
912 | { |
913 | u32 apicid = apic->cpu_present_to_apicid(cpu); |
914 | int err; |
915 | |
916 | lockdep_assert_irqs_enabled(); |
917 | |
918 | pr_debug("++++++++++++++++++++=_---CPU UP %u\n", cpu); |
919 | |
920 | if (apicid == BAD_APICID || !apic_id_valid(apic_id: apicid)) { |
921 | pr_err("CPU %u has invalid APIC ID %x. Aborting bringup\n", cpu, apicid); |
922 | return -EINVAL; |
923 | } |
924 | |
925 | if (!test_bit(apicid, phys_cpu_present_map)) { |
926 | pr_err("CPU %u APIC ID %x is not present. Aborting bringup\n", cpu, apicid); |
927 | return -EINVAL; |
928 | } |
929 | |
930 | /* |
931 | * Save current MTRR state in case it was changed since early boot |
932 | * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync: |
933 | */ |
934 | mtrr_save_state(); |
935 | |
936 | /* the FPU context is blank, nobody can own it */ |
937 | per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL; |
938 | |
939 | err = common_cpu_up(cpu, idle: tidle); |
940 | if (err) |
941 | return err; |
942 | |
943 | err = do_boot_cpu(apicid, cpu, idle: tidle); |
944 | if (err) |
945 | pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu); |
946 | |
947 | return err; |
948 | } |
949 | |
950 | int arch_cpuhp_kick_ap_alive(unsigned int cpu, struct task_struct *tidle) |
951 | { |
952 | return smp_ops.kick_ap_alive(cpu, tidle); |
953 | } |
954 | |
955 | void arch_cpuhp_cleanup_kick_cpu(unsigned int cpu) |
956 | { |
957 | /* Cleanup possible dangling ends... */ |
958 | if (smp_ops.kick_ap_alive == native_kick_ap && x86_platform.legacy.warm_reset) |
959 | smpboot_restore_warm_reset_vector(); |
960 | } |
961 | |
962 | void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) |
963 | { |
964 | if (smp_ops.cleanup_dead_cpu) |
965 | smp_ops.cleanup_dead_cpu(cpu); |
966 | |
967 | if (system_state == SYSTEM_RUNNING) |
968 | pr_info("CPU %u is now offline\n", cpu); |
969 | } |
970 | |
971 | void arch_cpuhp_sync_state_poll(void) |
972 | { |
973 | if (smp_ops.poll_sync_state) |
974 | smp_ops.poll_sync_state(); |
975 | } |
976 | |
977 | /** |
978 | * arch_disable_smp_support() - Disables SMP support for x86 at boottime |
979 | */ |
980 | void __init arch_disable_smp_support(void) |
981 | { |
982 | disable_ioapic_support(); |
983 | } |
984 | |
985 | /* |
986 | * Fall back to non SMP mode after errors. |
987 | * |
988 | * RED-PEN audit/test this more. I bet there is more state messed up here. |
989 | */ |
990 | static __init void disable_smp(void) |
991 | { |
992 | pr_info("SMP disabled\n"); |
993 | |
994 | disable_ioapic_support(); |
995 | topology_reset_possible_cpus_up(); |
996 | |
997 | cpumask_set_cpu(cpu: 0, topology_sibling_cpumask(0)); |
998 | cpumask_set_cpu(cpu: 0, topology_core_cpumask(0)); |
999 | cpumask_set_cpu(cpu: 0, topology_die_cpumask(0)); |
1000 | } |
1001 | |
1002 | void __init smp_prepare_cpus_common(void) |
1003 | { |
1004 | unsigned int cpu, node; |
1005 | |
1006 | /* Mark all except the boot CPU as hotpluggable */ |
1007 | for_each_possible_cpu(cpu) { |
1008 | if (cpu) |
1009 | per_cpu(cpu_info.cpu_index, cpu) = nr_cpu_ids; |
1010 | } |
1011 | |
1012 | for_each_possible_cpu(cpu) { |
1013 | node = cpu_to_node(cpu); |
1014 | |
1015 | zalloc_cpumask_var_node(mask: &per_cpu(cpu_sibling_map, cpu), GFP_KERNEL, node); |
1016 | zalloc_cpumask_var_node(mask: &per_cpu(cpu_core_map, cpu), GFP_KERNEL, node); |
1017 | zalloc_cpumask_var_node(mask: &per_cpu(cpu_die_map, cpu), GFP_KERNEL, node); |
1018 | zalloc_cpumask_var_node(mask: &per_cpu(cpu_llc_shared_map, cpu), GFP_KERNEL, node); |
1019 | zalloc_cpumask_var_node(mask: &per_cpu(cpu_l2c_shared_map, cpu), GFP_KERNEL, node); |
1020 | } |
1021 | |
1022 | set_cpu_sibling_map(0); |
1023 | } |
1024 | |
1025 | void __init smp_prepare_boot_cpu(void) |
1026 | { |
1027 | smp_ops.smp_prepare_boot_cpu(); |
1028 | } |
1029 | |
1030 | #ifdef CONFIG_X86_64 |
1031 | /* Establish whether parallel bringup can be supported. */ |
1032 | bool __init arch_cpuhp_init_parallel_bringup(void) |
1033 | { |
1034 | if (!x86_cpuinit.parallel_bringup) { |
1035 | pr_info("Parallel CPU startup disabled by the platform\n"); |
1036 | return false; |
1037 | } |
1038 | |
1039 | smpboot_control = STARTUP_READ_APICID; |
1040 | pr_debug("Parallel CPU startup enabled: 0x%08x\n", smpboot_control); |
1041 | return true; |
1042 | } |
1043 | #endif |
1044 | |
1045 | /* |
1046 | * Prepare for SMP bootup. |
1047 | * @max_cpus: configured maximum number of CPUs, It is a legacy parameter |
1048 | * for common interface support. |
1049 | */ |
1050 | void __init native_smp_prepare_cpus(unsigned int max_cpus) |
1051 | { |
1052 | smp_prepare_cpus_common(); |
1053 | |
1054 | switch (apic_intr_mode) { |
1055 | case APIC_PIC: |
1056 | case APIC_VIRTUAL_WIRE_NO_CONFIG: |
1057 | disable_smp(); |
1058 | return; |
1059 | case APIC_SYMMETRIC_IO_NO_ROUTING: |
1060 | disable_smp(); |
1061 | /* Setup local timer */ |
1062 | x86_init.timers.setup_percpu_clockev(); |
1063 | return; |
1064 | case APIC_VIRTUAL_WIRE: |
1065 | case APIC_SYMMETRIC_IO: |
1066 | break; |
1067 | } |
1068 | |
1069 | /* Setup local timer */ |
1070 | x86_init.timers.setup_percpu_clockev(); |
1071 | |
1072 | pr_info("CPU0: "); |
1073 | print_cpu_info(&cpu_data(0)); |
1074 | |
1075 | uv_system_init(); |
1076 | |
1077 | smp_set_init_udelay(); |
1078 | |
1079 | speculative_store_bypass_ht_init(); |
1080 | |
1081 | snp_set_wakeup_secondary_cpu(); |
1082 | } |
1083 | |
1084 | void arch_thaw_secondary_cpus_begin(void) |
1085 | { |
1086 | set_cache_aps_delayed_init(true); |
1087 | } |
1088 | |
1089 | void arch_thaw_secondary_cpus_end(void) |
1090 | { |
1091 | cache_aps_init(); |
1092 | } |
1093 | |
1094 | /* |
1095 | * Early setup to make printk work. |
1096 | */ |
1097 | void __init native_smp_prepare_boot_cpu(void) |
1098 | { |
1099 | int me = smp_processor_id(); |
1100 | |
1101 | /* SMP handles this from setup_per_cpu_areas() */ |
1102 | if (!IS_ENABLED(CONFIG_SMP)) |
1103 | switch_gdt_and_percpu_base(me); |
1104 | |
1105 | native_pv_lock_init(); |
1106 | } |
1107 | |
1108 | void __init native_smp_cpus_done(unsigned int max_cpus) |
1109 | { |
1110 | pr_debug("Boot done\n"); |
1111 | |
1112 | build_sched_topology(); |
1113 | nmi_selftest(); |
1114 | impress_friends(); |
1115 | cache_aps_init(); |
1116 | } |
1117 | |
1118 | /* correctly size the local cpu masks */ |
1119 | void __init setup_cpu_local_masks(void) |
1120 | { |
1121 | alloc_bootmem_cpumask_var(mask: &cpu_sibling_setup_mask); |
1122 | } |
1123 | |
1124 | #ifdef CONFIG_HOTPLUG_CPU |
1125 | |
1126 | /* Recompute SMT state for all CPUs on offline */ |
1127 | static void recompute_smt_state(void) |
1128 | { |
1129 | int max_threads, cpu; |
1130 | |
1131 | max_threads = 0; |
1132 | for_each_online_cpu (cpu) { |
1133 | int threads = cpumask_weight(topology_sibling_cpumask(cpu)); |
1134 | |
1135 | if (threads > max_threads) |
1136 | max_threads = threads; |
1137 | } |
1138 | __max_smt_threads = max_threads; |
1139 | } |
1140 | |
1141 | static void remove_siblinginfo(int cpu) |
1142 | { |
1143 | int sibling; |
1144 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
1145 | |
1146 | for_each_cpu(sibling, topology_core_cpumask(cpu)) { |
1147 | cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); |
1148 | /*/ |
1149 | * last thread sibling in this cpu core going down |
1150 | */ |
1151 | if (cpumask_weight(topology_sibling_cpumask(cpu)) == 1) |
1152 | cpu_data(sibling).booted_cores--; |
1153 | } |
1154 | |
1155 | for_each_cpu(sibling, topology_die_cpumask(cpu)) |
1156 | cpumask_clear_cpu(cpu, topology_die_cpumask(sibling)); |
1157 | |
1158 | for_each_cpu(sibling, topology_sibling_cpumask(cpu)) { |
1159 | cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); |
1160 | if (cpumask_weight(topology_sibling_cpumask(sibling)) == 1) |
1161 | cpu_data(sibling).smt_active = false; |
1162 | } |
1163 | |
1164 | for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) |
1165 | cpumask_clear_cpu(cpu, dstp: cpu_llc_shared_mask(cpu: sibling)); |
1166 | for_each_cpu(sibling, cpu_l2c_shared_mask(cpu)) |
1167 | cpumask_clear_cpu(cpu, dstp: cpu_l2c_shared_mask(cpu: sibling)); |
1168 | cpumask_clear(dstp: cpu_llc_shared_mask(cpu)); |
1169 | cpumask_clear(dstp: cpu_l2c_shared_mask(cpu)); |
1170 | cpumask_clear(topology_sibling_cpumask(cpu)); |
1171 | cpumask_clear(topology_core_cpumask(cpu)); |
1172 | cpumask_clear(topology_die_cpumask(cpu)); |
1173 | c->topo.core_id = 0; |
1174 | c->booted_cores = 0; |
1175 | cpumask_clear_cpu(cpu, dstp: cpu_sibling_setup_mask); |
1176 | recompute_smt_state(); |
1177 | } |
1178 | |
1179 | static void remove_cpu_from_maps(int cpu) |
1180 | { |
1181 | set_cpu_online(cpu, online: false); |
1182 | numa_remove_cpu(cpu); |
1183 | } |
1184 | |
1185 | void cpu_disable_common(void) |
1186 | { |
1187 | int cpu = smp_processor_id(); |
1188 | |
1189 | remove_siblinginfo(cpu); |
1190 | |
1191 | /* |
1192 | * Stop allowing kernel-mode FPU. This is needed so that if the CPU is |
1193 | * brought online again, the initial state is not allowed: |
1194 | */ |
1195 | this_cpu_write(kernel_fpu_allowed, false); |
1196 | |
1197 | /* It's now safe to remove this processor from the online map */ |
1198 | lock_vector_lock(); |
1199 | remove_cpu_from_maps(cpu); |
1200 | unlock_vector_lock(); |
1201 | fixup_irqs(); |
1202 | lapic_offline(); |
1203 | } |
1204 | |
1205 | int native_cpu_disable(void) |
1206 | { |
1207 | int ret; |
1208 | |
1209 | ret = lapic_can_unplug_cpu(); |
1210 | if (ret) |
1211 | return ret; |
1212 | |
1213 | cpu_disable_common(); |
1214 | |
1215 | /* |
1216 | * Disable the local APIC. Otherwise IPI broadcasts will reach |
1217 | * it. It still responds normally to INIT, NMI, SMI, and SIPI |
1218 | * messages. |
1219 | * |
1220 | * Disabling the APIC must happen after cpu_disable_common() |
1221 | * which invokes fixup_irqs(). |
1222 | * |
1223 | * Disabling the APIC preserves already set bits in IRR, but |
1224 | * an interrupt arriving after disabling the local APIC does not |
1225 | * set the corresponding IRR bit. |
1226 | * |
1227 | * fixup_irqs() scans IRR for set bits so it can raise a not |
1228 | * yet handled interrupt on the new destination CPU via an IPI |
1229 | * but obviously it can't do so for IRR bits which are not set. |
1230 | * IOW, interrupts arriving after disabling the local APIC will |
1231 | * be lost. |
1232 | */ |
1233 | apic_soft_disable(); |
1234 | |
1235 | return 0; |
1236 | } |
1237 | |
1238 | void play_dead_common(void) |
1239 | { |
1240 | idle_task_exit(); |
1241 | |
1242 | cpuhp_ap_report_dead(); |
1243 | |
1244 | local_irq_disable(); |
1245 | } |
1246 | |
1247 | void __noreturn mwait_play_dead(unsigned int eax_hint) |
1248 | { |
1249 | struct mwait_cpu_dead *md = this_cpu_ptr(&mwait_cpu_dead); |
1250 | |
1251 | /* Set up state for the kexec() hack below */ |
1252 | md->status = CPUDEAD_MWAIT_WAIT; |
1253 | md->control = CPUDEAD_MWAIT_WAIT; |
1254 | |
1255 | wbinvd(); |
1256 | |
1257 | while (1) { |
1258 | /* |
1259 | * The CLFLUSH is a workaround for erratum AAI65 for |
1260 | * the Xeon 7400 series. It's not clear it is actually |
1261 | * needed, but it should be harmless in either case. |
1262 | * The WBINVD is insufficient due to the spurious-wakeup |
1263 | * case where we return around the loop. |
1264 | */ |
1265 | mb(); |
1266 | clflush(p: md); |
1267 | mb(); |
1268 | __monitor(eax: md, ecx: 0, edx: 0); |
1269 | mb(); |
1270 | __mwait(eax: eax_hint, ecx: 0); |
1271 | |
1272 | if (READ_ONCE(md->control) == CPUDEAD_MWAIT_KEXEC_HLT) { |
1273 | /* |
1274 | * Kexec is about to happen. Don't go back into mwait() as |
1275 | * the kexec kernel might overwrite text and data including |
1276 | * page tables and stack. So mwait() would resume when the |
1277 | * monitor cache line is written to and then the CPU goes |
1278 | * south due to overwritten text, page tables and stack. |
1279 | * |
1280 | * Note: This does _NOT_ protect against a stray MCE, NMI, |
1281 | * SMI. They will resume execution at the instruction |
1282 | * following the HLT instruction and run into the problem |
1283 | * which this is trying to prevent. |
1284 | */ |
1285 | WRITE_ONCE(md->status, CPUDEAD_MWAIT_KEXEC_HLT); |
1286 | while(1) |
1287 | native_halt(); |
1288 | } |
1289 | } |
1290 | } |
1291 | |
1292 | /* |
1293 | * We need to flush the caches before going to sleep, lest we have |
1294 | * dirty data in our caches when we come back up. |
1295 | */ |
1296 | static inline void mwait_play_dead_cpuid_hint(void) |
1297 | { |
1298 | unsigned int eax, ebx, ecx, edx; |
1299 | unsigned int highest_cstate = 0; |
1300 | unsigned int highest_subcstate = 0; |
1301 | int i; |
1302 | |
1303 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || |
1304 | boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) |
1305 | return; |
1306 | if (!this_cpu_has(X86_FEATURE_MWAIT)) |
1307 | return; |
1308 | if (!this_cpu_has(X86_FEATURE_CLFLUSH)) |
1309 | return; |
1310 | |
1311 | eax = CPUID_LEAF_MWAIT; |
1312 | ecx = 0; |
1313 | native_cpuid(eax: &eax, ebx: &ebx, ecx: &ecx, edx: &edx); |
1314 | |
1315 | /* |
1316 | * eax will be 0 if EDX enumeration is not valid. |
1317 | * Initialized below to cstate, sub_cstate value when EDX is valid. |
1318 | */ |
1319 | if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) { |
1320 | eax = 0; |
1321 | } else { |
1322 | edx >>= MWAIT_SUBSTATE_SIZE; |
1323 | for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) { |
1324 | if (edx & MWAIT_SUBSTATE_MASK) { |
1325 | highest_cstate = i; |
1326 | highest_subcstate = edx & MWAIT_SUBSTATE_MASK; |
1327 | } |
1328 | } |
1329 | eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) | |
1330 | (highest_subcstate - 1); |
1331 | } |
1332 | |
1333 | mwait_play_dead(eax_hint: eax); |
1334 | } |
1335 | |
1336 | /* |
1337 | * Kick all "offline" CPUs out of mwait on kexec(). See comment in |
1338 | * mwait_play_dead(). |
1339 | */ |
1340 | void smp_kick_mwait_play_dead(void) |
1341 | { |
1342 | u32 newstate = CPUDEAD_MWAIT_KEXEC_HLT; |
1343 | struct mwait_cpu_dead *md; |
1344 | unsigned int cpu, i; |
1345 | |
1346 | for_each_cpu_andnot(cpu, cpu_present_mask, cpu_online_mask) { |
1347 | md = per_cpu_ptr(&mwait_cpu_dead, cpu); |
1348 | |
1349 | /* Does it sit in mwait_play_dead() ? */ |
1350 | if (READ_ONCE(md->status) != CPUDEAD_MWAIT_WAIT) |
1351 | continue; |
1352 | |
1353 | /* Wait up to 5ms */ |
1354 | for (i = 0; READ_ONCE(md->status) != newstate && i < 1000; i++) { |
1355 | /* Bring it out of mwait */ |
1356 | WRITE_ONCE(md->control, newstate); |
1357 | udelay(usec: 5); |
1358 | } |
1359 | |
1360 | if (READ_ONCE(md->status) != newstate) |
1361 | pr_err_once("CPU%u is stuck in mwait_play_dead()\n", cpu); |
1362 | } |
1363 | } |
1364 | |
1365 | void __noreturn hlt_play_dead(void) |
1366 | { |
1367 | if (__this_cpu_read(cpu_info.x86) >= 4) |
1368 | wbinvd(); |
1369 | |
1370 | while (1) |
1371 | native_halt(); |
1372 | } |
1373 | |
1374 | /* |
1375 | * native_play_dead() is essentially a __noreturn function, but it can't |
1376 | * be marked as such as the compiler may complain about it. |
1377 | */ |
1378 | void native_play_dead(void) |
1379 | { |
1380 | if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS)) |
1381 | __update_spec_ctrl(val: 0); |
1382 | |
1383 | play_dead_common(); |
1384 | tboot_shutdown(shutdown_type: TB_SHUTDOWN_WFS); |
1385 | |
1386 | mwait_play_dead_cpuid_hint(); |
1387 | if (cpuidle_play_dead()) |
1388 | hlt_play_dead(); |
1389 | } |
1390 | |
1391 | #else /* ... !CONFIG_HOTPLUG_CPU */ |
1392 | int native_cpu_disable(void) |
1393 | { |
1394 | return -ENOSYS; |
1395 | } |
1396 | |
1397 | void native_play_dead(void) |
1398 | { |
1399 | BUG(); |
1400 | } |
1401 | |
1402 | #endif |
1403 |
Definitions
- cpu_sibling_map
- cpu_core_map
- cpu_die_map
- __cpu_primary_thread_mask
- cpu_sibling_setup_mask
- mwait_cpu_dead
- mwait_cpu_dead
- __max_smt_threads
- x86_topology_update
- arch_update_cpu_topology
- smpboot_warm_reset_vector_count
- smpboot_setup_warm_reset_vector
- smpboot_restore_warm_reset_vector
- ap_starting
- ap_calibrate_delay
- start_secondary
- topology_same_node
- topology_sane
- match_smt
- match_die
- match_l2c
- match_pkg
- intel_cod_cpu
- match_llc
- x86_sched_itmt_flags
- x86_core_flags
- x86_cluster_flags
- x86_has_numa_in_package
- x86_topology
- build_sched_topology
- set_cpu_sibling_map
- cpu_coregroup_mask
- cpu_clustergroup_mask
- impress_friends
- init_udelay
- cpu_init_udelay
- smp_set_init_udelay
- send_init_sequence
- wakeup_secondary_cpu_via_init
- announce_cpu
- common_cpu_up
- do_boot_cpu
- native_kick_ap
- arch_cpuhp_kick_ap_alive
- arch_cpuhp_cleanup_kick_cpu
- arch_cpuhp_cleanup_dead_cpu
- arch_cpuhp_sync_state_poll
- arch_disable_smp_support
- disable_smp
- smp_prepare_cpus_common
- smp_prepare_boot_cpu
- arch_cpuhp_init_parallel_bringup
- native_smp_prepare_cpus
- arch_thaw_secondary_cpus_begin
- arch_thaw_secondary_cpus_end
- native_smp_prepare_boot_cpu
- native_smp_cpus_done
- setup_cpu_local_masks
- recompute_smt_state
- remove_siblinginfo
- remove_cpu_from_maps
- cpu_disable_common
- native_cpu_disable
- play_dead_common
- mwait_play_dead
- mwait_play_dead_cpuid_hint
- smp_kick_mwait_play_dead
- hlt_play_dead
Improve your Profiling and Debugging skills
Find out more