1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * x86 SMP booting functions |
4 | * |
5 | * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk> |
6 | * (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com> |
7 | * Copyright 2001 Andi Kleen, SuSE Labs. |
8 | * |
9 | * Much of the core SMP work is based on previous work by Thomas Radke, to |
10 | * whom a great many thanks are extended. |
11 | * |
12 | * Thanks to Intel for making available several different Pentium, |
13 | * Pentium Pro and Pentium-II/Xeon MP machines. |
14 | * Original development of Linux SMP code supported by Caldera. |
15 | * |
16 | * Fixes |
17 | * Felix Koop : NR_CPUS used properly |
18 | * Jose Renau : Handle single CPU case. |
19 | * Alan Cox : By repeated request 8) - Total BogoMIPS report. |
20 | * Greg Wright : Fix for kernel stacks panic. |
21 | * Erich Boleyn : MP v1.4 and additional changes. |
22 | * Matthias Sattler : Changes for 2.1 kernel map. |
23 | * Michel Lespinasse : Changes for 2.1 kernel map. |
24 | * Michael Chastain : Change trampoline.S to gnu as. |
25 | * Alan Cox : Dumb bug: 'B' step PPro's are fine |
26 | * Ingo Molnar : Added APIC timers, based on code |
27 | * from Jose Renau |
28 | * Ingo Molnar : various cleanups and rewrites |
29 | * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug. |
30 | * Maciej W. Rozycki : Bits for genuine 82489DX APICs |
31 | * Andi Kleen : Changed for SMP boot into long mode. |
32 | * Martin J. Bligh : Added support for multi-quad systems |
33 | * Dave Jones : Report invalid combinations of Athlon CPUs. |
34 | * Rusty Russell : Hacked into shape for new "hotplug" boot process. |
35 | * Andi Kleen : Converted to new state machine. |
36 | * Ashok Raj : CPU hotplug support |
37 | * Glauber Costa : i386 and x86_64 integration |
38 | */ |
39 | |
40 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
41 | |
42 | #include <linux/init.h> |
43 | #include <linux/smp.h> |
44 | #include <linux/export.h> |
45 | #include <linux/sched.h> |
46 | #include <linux/sched/topology.h> |
47 | #include <linux/sched/hotplug.h> |
48 | #include <linux/sched/task_stack.h> |
49 | #include <linux/percpu.h> |
50 | #include <linux/memblock.h> |
51 | #include <linux/err.h> |
52 | #include <linux/nmi.h> |
53 | #include <linux/tboot.h> |
54 | #include <linux/gfp.h> |
55 | #include <linux/cpuidle.h> |
56 | #include <linux/kexec.h> |
57 | #include <linux/numa.h> |
58 | #include <linux/pgtable.h> |
59 | #include <linux/overflow.h> |
60 | #include <linux/stackprotector.h> |
61 | #include <linux/cpuhotplug.h> |
62 | #include <linux/mc146818rtc.h> |
63 | |
64 | #include <asm/acpi.h> |
65 | #include <asm/cacheinfo.h> |
66 | #include <asm/desc.h> |
67 | #include <asm/nmi.h> |
68 | #include <asm/irq.h> |
69 | #include <asm/realmode.h> |
70 | #include <asm/cpu.h> |
71 | #include <asm/numa.h> |
72 | #include <asm/tlbflush.h> |
73 | #include <asm/mtrr.h> |
74 | #include <asm/mwait.h> |
75 | #include <asm/apic.h> |
76 | #include <asm/io_apic.h> |
77 | #include <asm/fpu/api.h> |
78 | #include <asm/setup.h> |
79 | #include <asm/uv/uv.h> |
80 | #include <asm/microcode.h> |
81 | #include <asm/i8259.h> |
82 | #include <asm/misc.h> |
83 | #include <asm/qspinlock.h> |
84 | #include <asm/intel-family.h> |
85 | #include <asm/cpu_device_id.h> |
86 | #include <asm/spec-ctrl.h> |
87 | #include <asm/hw_irq.h> |
88 | #include <asm/stackprotector.h> |
89 | #include <asm/sev.h> |
90 | #include <asm/spec-ctrl.h> |
91 | |
92 | /* representing HT siblings of each logical CPU */ |
93 | DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map); |
94 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); |
95 | |
96 | /* representing HT and core siblings of each logical CPU */ |
97 | DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map); |
98 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); |
99 | |
100 | /* representing HT, core, and die siblings of each logical CPU */ |
101 | DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_die_map); |
102 | EXPORT_PER_CPU_SYMBOL(cpu_die_map); |
103 | |
104 | /* Per CPU bogomips and other parameters */ |
105 | DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info); |
106 | EXPORT_PER_CPU_SYMBOL(cpu_info); |
107 | |
108 | /* CPUs which are the primary SMT threads */ |
109 | struct cpumask __cpu_primary_thread_mask __read_mostly; |
110 | |
111 | /* Representing CPUs for which sibling maps can be computed */ |
112 | static cpumask_var_t cpu_sibling_setup_mask; |
113 | |
114 | struct mwait_cpu_dead { |
115 | unsigned int control; |
116 | unsigned int status; |
117 | }; |
118 | |
119 | #define CPUDEAD_MWAIT_WAIT 0xDEADBEEF |
120 | #define CPUDEAD_MWAIT_KEXEC_HLT 0x4A17DEAD |
121 | |
122 | /* |
123 | * Cache line aligned data for mwait_play_dead(). Separate on purpose so |
124 | * that it's unlikely to be touched by other CPUs. |
125 | */ |
126 | static DEFINE_PER_CPU_ALIGNED(struct mwait_cpu_dead, mwait_cpu_dead); |
127 | |
128 | /* Logical package management. */ |
129 | struct logical_maps { |
130 | u32 phys_pkg_id; |
131 | u32 phys_die_id; |
132 | u32 logical_pkg_id; |
133 | u32 logical_die_id; |
134 | }; |
135 | |
136 | /* Temporary workaround until the full topology mechanics is in place */ |
137 | static DEFINE_PER_CPU_READ_MOSTLY(struct logical_maps, logical_maps) = { |
138 | .phys_pkg_id = U32_MAX, |
139 | .phys_die_id = U32_MAX, |
140 | }; |
141 | |
142 | unsigned int __max_logical_packages __read_mostly; |
143 | EXPORT_SYMBOL(__max_logical_packages); |
144 | static unsigned int logical_packages __read_mostly; |
145 | static unsigned int logical_die __read_mostly; |
146 | |
147 | /* Maximum number of SMT threads on any online core */ |
148 | int __read_mostly __max_smt_threads = 1; |
149 | |
150 | /* Flag to indicate if a complete sched domain rebuild is required */ |
151 | bool x86_topology_update; |
152 | |
153 | int arch_update_cpu_topology(void) |
154 | { |
155 | int retval = x86_topology_update; |
156 | |
157 | x86_topology_update = false; |
158 | return retval; |
159 | } |
160 | |
161 | static unsigned int smpboot_warm_reset_vector_count; |
162 | |
163 | static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip) |
164 | { |
165 | unsigned long flags; |
166 | |
167 | spin_lock_irqsave(&rtc_lock, flags); |
168 | if (!smpboot_warm_reset_vector_count++) { |
169 | CMOS_WRITE(0xa, 0xf); |
170 | *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) = start_eip >> 4; |
171 | *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = start_eip & 0xf; |
172 | } |
173 | spin_unlock_irqrestore(lock: &rtc_lock, flags); |
174 | } |
175 | |
176 | static inline void smpboot_restore_warm_reset_vector(void) |
177 | { |
178 | unsigned long flags; |
179 | |
180 | /* |
181 | * Paranoid: Set warm reset code and vector here back |
182 | * to default values. |
183 | */ |
184 | spin_lock_irqsave(&rtc_lock, flags); |
185 | if (!--smpboot_warm_reset_vector_count) { |
186 | CMOS_WRITE(0, 0xf); |
187 | *((volatile u32 *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0; |
188 | } |
189 | spin_unlock_irqrestore(lock: &rtc_lock, flags); |
190 | |
191 | } |
192 | |
193 | /* Run the next set of setup steps for the upcoming CPU */ |
194 | static void ap_starting(void) |
195 | { |
196 | int cpuid = smp_processor_id(); |
197 | |
198 | /* Mop up eventual mwait_play_dead() wreckage */ |
199 | this_cpu_write(mwait_cpu_dead.status, 0); |
200 | this_cpu_write(mwait_cpu_dead.control, 0); |
201 | |
202 | /* |
203 | * If woken up by an INIT in an 82489DX configuration the alive |
204 | * synchronization guarantees that the CPU does not reach this |
205 | * point before an INIT_deassert IPI reaches the local APIC, so it |
206 | * is now safe to touch the local APIC. |
207 | * |
208 | * Set up this CPU, first the APIC, which is probably redundant on |
209 | * most boards. |
210 | */ |
211 | apic_ap_setup(); |
212 | |
213 | /* Save the processor parameters. */ |
214 | smp_store_cpu_info(id: cpuid); |
215 | |
216 | /* |
217 | * The topology information must be up to date before |
218 | * notify_cpu_starting(). |
219 | */ |
220 | set_cpu_sibling_map(cpuid); |
221 | |
222 | ap_init_aperfmperf(); |
223 | |
224 | pr_debug("Stack at about %p\n" , &cpuid); |
225 | |
226 | wmb(); |
227 | |
228 | /* |
229 | * This runs the AP through all the cpuhp states to its target |
230 | * state CPUHP_ONLINE. |
231 | */ |
232 | notify_cpu_starting(cpu: cpuid); |
233 | } |
234 | |
235 | static void ap_calibrate_delay(void) |
236 | { |
237 | /* |
238 | * Calibrate the delay loop and update loops_per_jiffy in cpu_data. |
239 | * smp_store_cpu_info() stored a value that is close but not as |
240 | * accurate as the value just calculated. |
241 | * |
242 | * As this is invoked after the TSC synchronization check, |
243 | * calibrate_delay_is_known() will skip the calibration routine |
244 | * when TSC is synchronized across sockets. |
245 | */ |
246 | calibrate_delay(); |
247 | cpu_data(smp_processor_id()).loops_per_jiffy = loops_per_jiffy; |
248 | } |
249 | |
250 | /* |
251 | * Activate a secondary processor. |
252 | */ |
253 | static void notrace start_secondary(void *unused) |
254 | { |
255 | /* |
256 | * Don't put *anything* except direct CPU state initialization |
257 | * before cpu_init(), SMP booting is too fragile that we want to |
258 | * limit the things done here to the most necessary things. |
259 | */ |
260 | cr4_init(); |
261 | |
262 | /* |
263 | * 32-bit specific. 64-bit reaches this code with the correct page |
264 | * table established. Yet another historical divergence. |
265 | */ |
266 | if (IS_ENABLED(CONFIG_X86_32)) { |
267 | /* switch away from the initial page table */ |
268 | load_cr3(swapper_pg_dir); |
269 | __flush_tlb_all(); |
270 | } |
271 | |
272 | cpu_init_exception_handling(); |
273 | |
274 | /* |
275 | * Load the microcode before reaching the AP alive synchronization |
276 | * point below so it is not part of the full per CPU serialized |
277 | * bringup part when "parallel" bringup is enabled. |
278 | * |
279 | * That's even safe when hyperthreading is enabled in the CPU as |
280 | * the core code starts the primary threads first and leaves the |
281 | * secondary threads waiting for SIPI. Loading microcode on |
282 | * physical cores concurrently is a safe operation. |
283 | * |
284 | * This covers both the Intel specific issue that concurrent |
285 | * microcode loading on SMT siblings must be prohibited and the |
286 | * vendor independent issue`that microcode loading which changes |
287 | * CPUID, MSRs etc. must be strictly serialized to maintain |
288 | * software state correctness. |
289 | */ |
290 | load_ucode_ap(); |
291 | |
292 | /* |
293 | * Synchronization point with the hotplug core. Sets this CPUs |
294 | * synchronization state to ALIVE and spin-waits for the control CPU to |
295 | * release this CPU for further bringup. |
296 | */ |
297 | cpuhp_ap_sync_alive(); |
298 | |
299 | cpu_init(); |
300 | fpu__init_cpu(); |
301 | rcutree_report_cpu_starting(raw_smp_processor_id()); |
302 | x86_cpuinit.early_percpu_clock_init(); |
303 | |
304 | ap_starting(); |
305 | |
306 | /* Check TSC synchronization with the control CPU. */ |
307 | check_tsc_sync_target(); |
308 | |
309 | /* |
310 | * Calibrate the delay loop after the TSC synchronization check. |
311 | * This allows to skip the calibration when TSC is synchronized |
312 | * across sockets. |
313 | */ |
314 | ap_calibrate_delay(); |
315 | |
316 | speculative_store_bypass_ht_init(); |
317 | |
318 | /* |
319 | * Lock vector_lock, set CPU online and bring the vector |
320 | * allocator online. Online must be set with vector_lock held |
321 | * to prevent a concurrent irq setup/teardown from seeing a |
322 | * half valid vector space. |
323 | */ |
324 | lock_vector_lock(); |
325 | set_cpu_online(smp_processor_id(), online: true); |
326 | lapic_online(); |
327 | unlock_vector_lock(); |
328 | x86_platform.nmi_init(); |
329 | |
330 | /* enable local interrupts */ |
331 | local_irq_enable(); |
332 | |
333 | x86_cpuinit.setup_percpu_clockev(); |
334 | |
335 | wmb(); |
336 | cpu_startup_entry(state: CPUHP_AP_ONLINE_IDLE); |
337 | } |
338 | |
339 | /** |
340 | * topology_phys_to_logical_pkg - Map a physical package id to a logical |
341 | * @phys_pkg: The physical package id to map |
342 | * |
343 | * Returns logical package id or -1 if not found |
344 | */ |
345 | int topology_phys_to_logical_pkg(unsigned int phys_pkg) |
346 | { |
347 | int cpu; |
348 | |
349 | for_each_possible_cpu(cpu) { |
350 | if (per_cpu(logical_maps.phys_pkg_id, cpu) == phys_pkg) |
351 | return per_cpu(logical_maps.logical_pkg_id, cpu); |
352 | } |
353 | return -1; |
354 | } |
355 | EXPORT_SYMBOL(topology_phys_to_logical_pkg); |
356 | |
357 | /** |
358 | * topology_phys_to_logical_die - Map a physical die id to logical |
359 | * @die_id: The physical die id to map |
360 | * @cur_cpu: The CPU for which the mapping is done |
361 | * |
362 | * Returns logical die id or -1 if not found |
363 | */ |
364 | static int topology_phys_to_logical_die(unsigned int die_id, unsigned int cur_cpu) |
365 | { |
366 | int cpu, proc_id = cpu_data(cur_cpu).topo.pkg_id; |
367 | |
368 | for_each_possible_cpu(cpu) { |
369 | if (per_cpu(logical_maps.phys_pkg_id, cpu) == proc_id && |
370 | per_cpu(logical_maps.phys_die_id, cpu) == die_id) |
371 | return per_cpu(logical_maps.logical_die_id, cpu); |
372 | } |
373 | return -1; |
374 | } |
375 | |
376 | /** |
377 | * topology_update_package_map - Update the physical to logical package map |
378 | * @pkg: The physical package id as retrieved via CPUID |
379 | * @cpu: The cpu for which this is updated |
380 | */ |
381 | int topology_update_package_map(unsigned int pkg, unsigned int cpu) |
382 | { |
383 | int new; |
384 | |
385 | /* Already available somewhere? */ |
386 | new = topology_phys_to_logical_pkg(pkg); |
387 | if (new >= 0) |
388 | goto found; |
389 | |
390 | new = logical_packages++; |
391 | if (new != pkg) { |
392 | pr_info("CPU %u Converting physical %u to logical package %u\n" , |
393 | cpu, pkg, new); |
394 | } |
395 | found: |
396 | per_cpu(logical_maps.phys_pkg_id, cpu) = pkg; |
397 | per_cpu(logical_maps.logical_pkg_id, cpu) = new; |
398 | cpu_data(cpu).topo.logical_pkg_id = new; |
399 | return 0; |
400 | } |
401 | /** |
402 | * topology_update_die_map - Update the physical to logical die map |
403 | * @die: The die id as retrieved via CPUID |
404 | * @cpu: The cpu for which this is updated |
405 | */ |
406 | int topology_update_die_map(unsigned int die, unsigned int cpu) |
407 | { |
408 | int new; |
409 | |
410 | /* Already available somewhere? */ |
411 | new = topology_phys_to_logical_die(die_id: die, cur_cpu: cpu); |
412 | if (new >= 0) |
413 | goto found; |
414 | |
415 | new = logical_die++; |
416 | if (new != die) { |
417 | pr_info("CPU %u Converting physical %u to logical die %u\n" , |
418 | cpu, die, new); |
419 | } |
420 | found: |
421 | per_cpu(logical_maps.phys_die_id, cpu) = die; |
422 | per_cpu(logical_maps.logical_die_id, cpu) = new; |
423 | cpu_data(cpu).topo.logical_die_id = new; |
424 | return 0; |
425 | } |
426 | |
427 | static void __init smp_store_boot_cpu_info(void) |
428 | { |
429 | int id = 0; /* CPU 0 */ |
430 | struct cpuinfo_x86 *c = &cpu_data(id); |
431 | |
432 | *c = boot_cpu_data; |
433 | c->cpu_index = id; |
434 | topology_update_package_map(pkg: c->topo.pkg_id, cpu: id); |
435 | topology_update_die_map(die: c->topo.die_id, cpu: id); |
436 | c->initialized = true; |
437 | } |
438 | |
439 | /* |
440 | * The bootstrap kernel entry code has set these up. Save them for |
441 | * a given CPU |
442 | */ |
443 | void smp_store_cpu_info(int id) |
444 | { |
445 | struct cpuinfo_x86 *c = &cpu_data(id); |
446 | |
447 | /* Copy boot_cpu_data only on the first bringup */ |
448 | if (!c->initialized) |
449 | *c = boot_cpu_data; |
450 | c->cpu_index = id; |
451 | /* |
452 | * During boot time, CPU0 has this setup already. Save the info when |
453 | * bringing up an AP. |
454 | */ |
455 | identify_secondary_cpu(c); |
456 | c->initialized = true; |
457 | } |
458 | |
459 | static bool |
460 | topology_same_node(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) |
461 | { |
462 | int cpu1 = c->cpu_index, cpu2 = o->cpu_index; |
463 | |
464 | return (cpu_to_node(cpu: cpu1) == cpu_to_node(cpu: cpu2)); |
465 | } |
466 | |
467 | static bool |
468 | topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name) |
469 | { |
470 | int cpu1 = c->cpu_index, cpu2 = o->cpu_index; |
471 | |
472 | return !WARN_ONCE(!topology_same_node(c, o), |
473 | "sched: CPU #%d's %s-sibling CPU #%d is not on the same node! " |
474 | "[node: %d != %d]. Ignoring dependency.\n" , |
475 | cpu1, name, cpu2, cpu_to_node(cpu1), cpu_to_node(cpu2)); |
476 | } |
477 | |
478 | #define link_mask(mfunc, c1, c2) \ |
479 | do { \ |
480 | cpumask_set_cpu((c1), mfunc(c2)); \ |
481 | cpumask_set_cpu((c2), mfunc(c1)); \ |
482 | } while (0) |
483 | |
484 | static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) |
485 | { |
486 | if (boot_cpu_has(X86_FEATURE_TOPOEXT)) { |
487 | int cpu1 = c->cpu_index, cpu2 = o->cpu_index; |
488 | |
489 | if (c->topo.pkg_id == o->topo.pkg_id && |
490 | c->topo.die_id == o->topo.die_id && |
491 | per_cpu_llc_id(cpu: cpu1) == per_cpu_llc_id(cpu: cpu2)) { |
492 | if (c->topo.core_id == o->topo.core_id) |
493 | return topology_sane(c, o, name: "smt" ); |
494 | |
495 | if ((c->topo.cu_id != 0xff) && |
496 | (o->topo.cu_id != 0xff) && |
497 | (c->topo.cu_id == o->topo.cu_id)) |
498 | return topology_sane(c, o, name: "smt" ); |
499 | } |
500 | |
501 | } else if (c->topo.pkg_id == o->topo.pkg_id && |
502 | c->topo.die_id == o->topo.die_id && |
503 | c->topo.core_id == o->topo.core_id) { |
504 | return topology_sane(c, o, name: "smt" ); |
505 | } |
506 | |
507 | return false; |
508 | } |
509 | |
510 | static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) |
511 | { |
512 | if (c->topo.pkg_id == o->topo.pkg_id && |
513 | c->topo.die_id == o->topo.die_id) |
514 | return true; |
515 | return false; |
516 | } |
517 | |
518 | static bool match_l2c(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) |
519 | { |
520 | int cpu1 = c->cpu_index, cpu2 = o->cpu_index; |
521 | |
522 | /* If the arch didn't set up l2c_id, fall back to SMT */ |
523 | if (per_cpu_l2c_id(cpu: cpu1) == BAD_APICID) |
524 | return match_smt(c, o); |
525 | |
526 | /* Do not match if L2 cache id does not match: */ |
527 | if (per_cpu_l2c_id(cpu: cpu1) != per_cpu_l2c_id(cpu: cpu2)) |
528 | return false; |
529 | |
530 | return topology_sane(c, o, name: "l2c" ); |
531 | } |
532 | |
533 | /* |
534 | * Unlike the other levels, we do not enforce keeping a |
535 | * multicore group inside a NUMA node. If this happens, we will |
536 | * discard the MC level of the topology later. |
537 | */ |
538 | static bool match_pkg(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) |
539 | { |
540 | if (c->topo.pkg_id == o->topo.pkg_id) |
541 | return true; |
542 | return false; |
543 | } |
544 | |
545 | /* |
546 | * Define intel_cod_cpu[] for Intel COD (Cluster-on-Die) CPUs. |
547 | * |
548 | * Any Intel CPU that has multiple nodes per package and does not |
549 | * match intel_cod_cpu[] has the SNC (Sub-NUMA Cluster) topology. |
550 | * |
551 | * When in SNC mode, these CPUs enumerate an LLC that is shared |
552 | * by multiple NUMA nodes. The LLC is shared for off-package data |
553 | * access but private to the NUMA node (half of the package) for |
554 | * on-package access. CPUID (the source of the information about |
555 | * the LLC) can only enumerate the cache as shared or unshared, |
556 | * but not this particular configuration. |
557 | */ |
558 | |
559 | static const struct x86_cpu_id intel_cod_cpu[] = { |
560 | X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, 0), /* COD */ |
561 | X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, 0), /* COD */ |
562 | X86_MATCH_INTEL_FAM6_MODEL(ANY, 1), /* SNC */ |
563 | {} |
564 | }; |
565 | |
566 | static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o) |
567 | { |
568 | const struct x86_cpu_id *id = x86_match_cpu(match: intel_cod_cpu); |
569 | int cpu1 = c->cpu_index, cpu2 = o->cpu_index; |
570 | bool intel_snc = id && id->driver_data; |
571 | |
572 | /* Do not match if we do not have a valid APICID for cpu: */ |
573 | if (per_cpu_llc_id(cpu: cpu1) == BAD_APICID) |
574 | return false; |
575 | |
576 | /* Do not match if LLC id does not match: */ |
577 | if (per_cpu_llc_id(cpu: cpu1) != per_cpu_llc_id(cpu: cpu2)) |
578 | return false; |
579 | |
580 | /* |
581 | * Allow the SNC topology without warning. Return of false |
582 | * means 'c' does not share the LLC of 'o'. This will be |
583 | * reflected to userspace. |
584 | */ |
585 | if (match_pkg(c, o) && !topology_same_node(c, o) && intel_snc) |
586 | return false; |
587 | |
588 | return topology_sane(c, o, name: "llc" ); |
589 | } |
590 | |
591 | |
592 | static inline int x86_sched_itmt_flags(void) |
593 | { |
594 | return sysctl_sched_itmt_enabled ? SD_ASYM_PACKING : 0; |
595 | } |
596 | |
597 | #ifdef CONFIG_SCHED_MC |
598 | static int x86_core_flags(void) |
599 | { |
600 | return cpu_core_flags() | x86_sched_itmt_flags(); |
601 | } |
602 | #endif |
603 | #ifdef CONFIG_SCHED_SMT |
604 | static int x86_smt_flags(void) |
605 | { |
606 | return cpu_smt_flags(); |
607 | } |
608 | #endif |
609 | #ifdef CONFIG_SCHED_CLUSTER |
610 | static int x86_cluster_flags(void) |
611 | { |
612 | return cpu_cluster_flags() | x86_sched_itmt_flags(); |
613 | } |
614 | #endif |
615 | |
616 | static int x86_die_flags(void) |
617 | { |
618 | if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) |
619 | return x86_sched_itmt_flags(); |
620 | |
621 | return 0; |
622 | } |
623 | |
624 | /* |
625 | * Set if a package/die has multiple NUMA nodes inside. |
626 | * AMD Magny-Cours, Intel Cluster-on-Die, and Intel |
627 | * Sub-NUMA Clustering have this. |
628 | */ |
629 | static bool x86_has_numa_in_package; |
630 | |
631 | static struct sched_domain_topology_level x86_topology[6]; |
632 | |
633 | static void __init build_sched_topology(void) |
634 | { |
635 | int i = 0; |
636 | |
637 | #ifdef CONFIG_SCHED_SMT |
638 | x86_topology[i++] = (struct sched_domain_topology_level){ |
639 | cpu_smt_mask, x86_smt_flags, SD_INIT_NAME(SMT) |
640 | }; |
641 | #endif |
642 | #ifdef CONFIG_SCHED_CLUSTER |
643 | x86_topology[i++] = (struct sched_domain_topology_level){ |
644 | cpu_clustergroup_mask, x86_cluster_flags, SD_INIT_NAME(CLS) |
645 | }; |
646 | #endif |
647 | #ifdef CONFIG_SCHED_MC |
648 | x86_topology[i++] = (struct sched_domain_topology_level){ |
649 | cpu_coregroup_mask, x86_core_flags, SD_INIT_NAME(MC) |
650 | }; |
651 | #endif |
652 | /* |
653 | * When there is NUMA topology inside the package skip the PKG domain |
654 | * since the NUMA domains will auto-magically create the right spanning |
655 | * domains based on the SLIT. |
656 | */ |
657 | if (!x86_has_numa_in_package) { |
658 | x86_topology[i++] = (struct sched_domain_topology_level){ |
659 | cpu_cpu_mask, x86_die_flags, SD_INIT_NAME(PKG) |
660 | }; |
661 | } |
662 | |
663 | /* |
664 | * There must be one trailing NULL entry left. |
665 | */ |
666 | BUG_ON(i >= ARRAY_SIZE(x86_topology)-1); |
667 | |
668 | set_sched_topology(x86_topology); |
669 | } |
670 | |
671 | void set_cpu_sibling_map(int cpu) |
672 | { |
673 | bool has_smt = smp_num_siblings > 1; |
674 | bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1; |
675 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
676 | struct cpuinfo_x86 *o; |
677 | int i, threads; |
678 | |
679 | cpumask_set_cpu(cpu, dstp: cpu_sibling_setup_mask); |
680 | |
681 | if (!has_mp) { |
682 | cpumask_set_cpu(cpu, topology_sibling_cpumask(cpu)); |
683 | cpumask_set_cpu(cpu, dstp: cpu_llc_shared_mask(cpu)); |
684 | cpumask_set_cpu(cpu, dstp: cpu_l2c_shared_mask(cpu)); |
685 | cpumask_set_cpu(cpu, topology_core_cpumask(cpu)); |
686 | cpumask_set_cpu(cpu, topology_die_cpumask(cpu)); |
687 | c->booted_cores = 1; |
688 | return; |
689 | } |
690 | |
691 | for_each_cpu(i, cpu_sibling_setup_mask) { |
692 | o = &cpu_data(i); |
693 | |
694 | if (match_pkg(c, o) && !topology_same_node(c, o)) |
695 | x86_has_numa_in_package = true; |
696 | |
697 | if ((i == cpu) || (has_smt && match_smt(c, o))) |
698 | link_mask(topology_sibling_cpumask, cpu, i); |
699 | |
700 | if ((i == cpu) || (has_mp && match_llc(c, o))) |
701 | link_mask(cpu_llc_shared_mask, cpu, i); |
702 | |
703 | if ((i == cpu) || (has_mp && match_l2c(c, o))) |
704 | link_mask(cpu_l2c_shared_mask, cpu, i); |
705 | |
706 | if ((i == cpu) || (has_mp && match_die(c, o))) |
707 | link_mask(topology_die_cpumask, cpu, i); |
708 | } |
709 | |
710 | threads = cpumask_weight(topology_sibling_cpumask(cpu)); |
711 | if (threads > __max_smt_threads) |
712 | __max_smt_threads = threads; |
713 | |
714 | for_each_cpu(i, topology_sibling_cpumask(cpu)) |
715 | cpu_data(i).smt_active = threads > 1; |
716 | |
717 | /* |
718 | * This needs a separate iteration over the cpus because we rely on all |
719 | * topology_sibling_cpumask links to be set-up. |
720 | */ |
721 | for_each_cpu(i, cpu_sibling_setup_mask) { |
722 | o = &cpu_data(i); |
723 | |
724 | if ((i == cpu) || (has_mp && match_pkg(c, o))) { |
725 | link_mask(topology_core_cpumask, cpu, i); |
726 | |
727 | /* |
728 | * Does this new cpu bringup a new core? |
729 | */ |
730 | if (threads == 1) { |
731 | /* |
732 | * for each core in package, increment |
733 | * the booted_cores for this new cpu |
734 | */ |
735 | if (cpumask_first( |
736 | topology_sibling_cpumask(i)) == i) |
737 | c->booted_cores++; |
738 | /* |
739 | * increment the core count for all |
740 | * the other cpus in this package |
741 | */ |
742 | if (i != cpu) |
743 | cpu_data(i).booted_cores++; |
744 | } else if (i != cpu && !c->booted_cores) |
745 | c->booted_cores = cpu_data(i).booted_cores; |
746 | } |
747 | } |
748 | } |
749 | |
750 | /* maps the cpu to the sched domain representing multi-core */ |
751 | const struct cpumask *cpu_coregroup_mask(int cpu) |
752 | { |
753 | return cpu_llc_shared_mask(cpu); |
754 | } |
755 | |
756 | const struct cpumask *cpu_clustergroup_mask(int cpu) |
757 | { |
758 | return cpu_l2c_shared_mask(cpu); |
759 | } |
760 | |
761 | static void impress_friends(void) |
762 | { |
763 | int cpu; |
764 | unsigned long bogosum = 0; |
765 | /* |
766 | * Allow the user to impress friends. |
767 | */ |
768 | pr_debug("Before bogomips\n" ); |
769 | for_each_online_cpu(cpu) |
770 | bogosum += cpu_data(cpu).loops_per_jiffy; |
771 | |
772 | pr_info("Total of %d processors activated (%lu.%02lu BogoMIPS)\n" , |
773 | num_online_cpus(), |
774 | bogosum/(500000/HZ), |
775 | (bogosum/(5000/HZ))%100); |
776 | |
777 | pr_debug("Before bogocount - setting activated=1\n" ); |
778 | } |
779 | |
780 | /* |
781 | * The Multiprocessor Specification 1.4 (1997) example code suggests |
782 | * that there should be a 10ms delay between the BSP asserting INIT |
783 | * and de-asserting INIT, when starting a remote processor. |
784 | * But that slows boot and resume on modern processors, which include |
785 | * many cores and don't require that delay. |
786 | * |
787 | * Cmdline "init_cpu_udelay=" is available to over-ride this delay. |
788 | * Modern processor families are quirked to remove the delay entirely. |
789 | */ |
790 | #define UDELAY_10MS_DEFAULT 10000 |
791 | |
792 | static unsigned int init_udelay = UINT_MAX; |
793 | |
794 | static int __init cpu_init_udelay(char *str) |
795 | { |
796 | get_option(str: &str, pint: &init_udelay); |
797 | |
798 | return 0; |
799 | } |
800 | early_param("cpu_init_udelay" , cpu_init_udelay); |
801 | |
802 | static void __init smp_quirk_init_udelay(void) |
803 | { |
804 | /* if cmdline changed it from default, leave it alone */ |
805 | if (init_udelay != UINT_MAX) |
806 | return; |
807 | |
808 | /* if modern processor, use no delay */ |
809 | if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) || |
810 | ((boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) && (boot_cpu_data.x86 >= 0x18)) || |
811 | ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF))) { |
812 | init_udelay = 0; |
813 | return; |
814 | } |
815 | /* else, use legacy delay */ |
816 | init_udelay = UDELAY_10MS_DEFAULT; |
817 | } |
818 | |
819 | /* |
820 | * Wake up AP by INIT, INIT, STARTUP sequence. |
821 | */ |
822 | static void send_init_sequence(u32 phys_apicid) |
823 | { |
824 | int maxlvt = lapic_get_maxlvt(); |
825 | |
826 | /* Be paranoid about clearing APIC errors. */ |
827 | if (APIC_INTEGRATED(boot_cpu_apic_version)) { |
828 | /* Due to the Pentium erratum 3AP. */ |
829 | if (maxlvt > 3) |
830 | apic_write(APIC_ESR, val: 0); |
831 | apic_read(APIC_ESR); |
832 | } |
833 | |
834 | /* Assert INIT on the target CPU */ |
835 | apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT, high: phys_apicid); |
836 | safe_apic_wait_icr_idle(); |
837 | |
838 | udelay(init_udelay); |
839 | |
840 | /* Deassert INIT on the target CPU */ |
841 | apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, high: phys_apicid); |
842 | safe_apic_wait_icr_idle(); |
843 | } |
844 | |
845 | /* |
846 | * Wake up AP by INIT, INIT, STARTUP sequence. |
847 | */ |
848 | static int wakeup_secondary_cpu_via_init(u32 phys_apicid, unsigned long start_eip) |
849 | { |
850 | unsigned long send_status = 0, accept_status = 0; |
851 | int num_starts, j, maxlvt; |
852 | |
853 | preempt_disable(); |
854 | maxlvt = lapic_get_maxlvt(); |
855 | send_init_sequence(phys_apicid); |
856 | |
857 | mb(); |
858 | |
859 | /* |
860 | * Should we send STARTUP IPIs ? |
861 | * |
862 | * Determine this based on the APIC version. |
863 | * If we don't have an integrated APIC, don't send the STARTUP IPIs. |
864 | */ |
865 | if (APIC_INTEGRATED(boot_cpu_apic_version)) |
866 | num_starts = 2; |
867 | else |
868 | num_starts = 0; |
869 | |
870 | /* |
871 | * Run STARTUP IPI loop. |
872 | */ |
873 | pr_debug("#startup loops: %d\n" , num_starts); |
874 | |
875 | for (j = 1; j <= num_starts; j++) { |
876 | pr_debug("Sending STARTUP #%d\n" , j); |
877 | if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ |
878 | apic_write(APIC_ESR, val: 0); |
879 | apic_read(APIC_ESR); |
880 | pr_debug("After apic_write\n" ); |
881 | |
882 | /* |
883 | * STARTUP IPI |
884 | */ |
885 | |
886 | /* Target chip */ |
887 | /* Boot on the stack */ |
888 | /* Kick the second */ |
889 | apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12), |
890 | high: phys_apicid); |
891 | |
892 | /* |
893 | * Give the other CPU some time to accept the IPI. |
894 | */ |
895 | if (init_udelay == 0) |
896 | udelay(10); |
897 | else |
898 | udelay(300); |
899 | |
900 | pr_debug("Startup point 1\n" ); |
901 | |
902 | pr_debug("Waiting for send to finish...\n" ); |
903 | send_status = safe_apic_wait_icr_idle(); |
904 | |
905 | /* |
906 | * Give the other CPU some time to accept the IPI. |
907 | */ |
908 | if (init_udelay == 0) |
909 | udelay(10); |
910 | else |
911 | udelay(200); |
912 | |
913 | if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */ |
914 | apic_write(APIC_ESR, val: 0); |
915 | accept_status = (apic_read(APIC_ESR) & 0xEF); |
916 | if (send_status || accept_status) |
917 | break; |
918 | } |
919 | pr_debug("After Startup\n" ); |
920 | |
921 | if (send_status) |
922 | pr_err("APIC never delivered???\n" ); |
923 | if (accept_status) |
924 | pr_err("APIC delivery error (%lx)\n" , accept_status); |
925 | |
926 | preempt_enable(); |
927 | return (send_status | accept_status); |
928 | } |
929 | |
930 | /* reduce the number of lines printed when booting a large cpu count system */ |
931 | static void announce_cpu(int cpu, int apicid) |
932 | { |
933 | static int width, node_width, first = 1; |
934 | static int current_node = NUMA_NO_NODE; |
935 | int node = early_cpu_to_node(cpu); |
936 | |
937 | if (!width) |
938 | width = num_digits(num_possible_cpus()) + 1; /* + '#' sign */ |
939 | |
940 | if (!node_width) |
941 | node_width = num_digits(num_possible_nodes()) + 1; /* + '#' */ |
942 | |
943 | if (system_state < SYSTEM_RUNNING) { |
944 | if (first) |
945 | pr_info("x86: Booting SMP configuration:\n" ); |
946 | |
947 | if (node != current_node) { |
948 | if (current_node > (-1)) |
949 | pr_cont("\n" ); |
950 | current_node = node; |
951 | |
952 | printk(KERN_INFO ".... node %*s#%d, CPUs: " , |
953 | node_width - num_digits(node), " " , node); |
954 | } |
955 | |
956 | /* Add padding for the BSP */ |
957 | if (first) |
958 | pr_cont("%*s" , width + 1, " " ); |
959 | first = 0; |
960 | |
961 | pr_cont("%*s#%d" , width - num_digits(cpu), " " , cpu); |
962 | } else |
963 | pr_info("Booting Node %d Processor %d APIC 0x%x\n" , |
964 | node, cpu, apicid); |
965 | } |
966 | |
967 | int common_cpu_up(unsigned int cpu, struct task_struct *idle) |
968 | { |
969 | int ret; |
970 | |
971 | /* Just in case we booted with a single CPU. */ |
972 | alternatives_enable_smp(); |
973 | |
974 | per_cpu(pcpu_hot.current_task, cpu) = idle; |
975 | cpu_init_stack_canary(cpu, idle); |
976 | |
977 | /* Initialize the interrupt stack(s) */ |
978 | ret = irq_init_percpu_irqstack(cpu); |
979 | if (ret) |
980 | return ret; |
981 | |
982 | #ifdef CONFIG_X86_32 |
983 | /* Stack for startup_32 can be just as for start_secondary onwards */ |
984 | per_cpu(pcpu_hot.top_of_stack, cpu) = task_top_of_stack(idle); |
985 | #endif |
986 | return 0; |
987 | } |
988 | |
989 | /* |
990 | * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad |
991 | * (ie clustered apic addressing mode), this is a LOGICAL apic ID. |
992 | * Returns zero if startup was successfully sent, else error code from |
993 | * ->wakeup_secondary_cpu. |
994 | */ |
995 | static int do_boot_cpu(u32 apicid, int cpu, struct task_struct *idle) |
996 | { |
997 | unsigned long start_ip = real_mode_header->trampoline_start; |
998 | int ret; |
999 | |
1000 | #ifdef CONFIG_X86_64 |
1001 | /* If 64-bit wakeup method exists, use the 64-bit mode trampoline IP */ |
1002 | if (apic->wakeup_secondary_cpu_64) |
1003 | start_ip = real_mode_header->trampoline_start64; |
1004 | #endif |
1005 | idle->thread.sp = (unsigned long)task_pt_regs(idle); |
1006 | initial_code = (unsigned long)start_secondary; |
1007 | |
1008 | if (IS_ENABLED(CONFIG_X86_32)) { |
1009 | early_gdt_descr.address = (unsigned long)get_cpu_gdt_rw(cpu); |
1010 | initial_stack = idle->thread.sp; |
1011 | } else if (!(smpboot_control & STARTUP_PARALLEL_MASK)) { |
1012 | smpboot_control = cpu; |
1013 | } |
1014 | |
1015 | /* Enable the espfix hack for this CPU */ |
1016 | init_espfix_ap(cpu); |
1017 | |
1018 | /* So we see what's up */ |
1019 | announce_cpu(cpu, apicid); |
1020 | |
1021 | /* |
1022 | * This grunge runs the startup process for |
1023 | * the targeted processor. |
1024 | */ |
1025 | if (x86_platform.legacy.warm_reset) { |
1026 | |
1027 | pr_debug("Setting warm reset code and vector.\n" ); |
1028 | |
1029 | smpboot_setup_warm_reset_vector(start_eip: start_ip); |
1030 | /* |
1031 | * Be paranoid about clearing APIC errors. |
1032 | */ |
1033 | if (APIC_INTEGRATED(boot_cpu_apic_version)) { |
1034 | apic_write(APIC_ESR, val: 0); |
1035 | apic_read(APIC_ESR); |
1036 | } |
1037 | } |
1038 | |
1039 | smp_mb(); |
1040 | |
1041 | /* |
1042 | * Wake up a CPU in difference cases: |
1043 | * - Use a method from the APIC driver if one defined, with wakeup |
1044 | * straight to 64-bit mode preferred over wakeup to RM. |
1045 | * Otherwise, |
1046 | * - Use an INIT boot APIC message |
1047 | */ |
1048 | if (apic->wakeup_secondary_cpu_64) |
1049 | ret = apic->wakeup_secondary_cpu_64(apicid, start_ip); |
1050 | else if (apic->wakeup_secondary_cpu) |
1051 | ret = apic->wakeup_secondary_cpu(apicid, start_ip); |
1052 | else |
1053 | ret = wakeup_secondary_cpu_via_init(phys_apicid: apicid, start_eip: start_ip); |
1054 | |
1055 | /* If the wakeup mechanism failed, cleanup the warm reset vector */ |
1056 | if (ret) |
1057 | arch_cpuhp_cleanup_kick_cpu(cpu); |
1058 | return ret; |
1059 | } |
1060 | |
1061 | int native_kick_ap(unsigned int cpu, struct task_struct *tidle) |
1062 | { |
1063 | u32 apicid = apic->cpu_present_to_apicid(cpu); |
1064 | int err; |
1065 | |
1066 | lockdep_assert_irqs_enabled(); |
1067 | |
1068 | pr_debug("++++++++++++++++++++=_---CPU UP %u\n" , cpu); |
1069 | |
1070 | if (apicid == BAD_APICID || !physid_isset(apicid, phys_cpu_present_map) || |
1071 | !apic_id_valid(apic_id: apicid)) { |
1072 | pr_err("%s: bad cpu %d\n" , __func__, cpu); |
1073 | return -EINVAL; |
1074 | } |
1075 | |
1076 | /* |
1077 | * Save current MTRR state in case it was changed since early boot |
1078 | * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync: |
1079 | */ |
1080 | mtrr_save_state(); |
1081 | |
1082 | /* the FPU context is blank, nobody can own it */ |
1083 | per_cpu(fpu_fpregs_owner_ctx, cpu) = NULL; |
1084 | |
1085 | err = common_cpu_up(cpu, idle: tidle); |
1086 | if (err) |
1087 | return err; |
1088 | |
1089 | err = do_boot_cpu(apicid, cpu, idle: tidle); |
1090 | if (err) |
1091 | pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n" , err, cpu); |
1092 | |
1093 | return err; |
1094 | } |
1095 | |
1096 | int arch_cpuhp_kick_ap_alive(unsigned int cpu, struct task_struct *tidle) |
1097 | { |
1098 | return smp_ops.kick_ap_alive(cpu, tidle); |
1099 | } |
1100 | |
1101 | void arch_cpuhp_cleanup_kick_cpu(unsigned int cpu) |
1102 | { |
1103 | /* Cleanup possible dangling ends... */ |
1104 | if (smp_ops.kick_ap_alive == native_kick_ap && x86_platform.legacy.warm_reset) |
1105 | smpboot_restore_warm_reset_vector(); |
1106 | } |
1107 | |
1108 | void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) |
1109 | { |
1110 | if (smp_ops.cleanup_dead_cpu) |
1111 | smp_ops.cleanup_dead_cpu(cpu); |
1112 | |
1113 | if (system_state == SYSTEM_RUNNING) |
1114 | pr_info("CPU %u is now offline\n" , cpu); |
1115 | } |
1116 | |
1117 | void arch_cpuhp_sync_state_poll(void) |
1118 | { |
1119 | if (smp_ops.poll_sync_state) |
1120 | smp_ops.poll_sync_state(); |
1121 | } |
1122 | |
1123 | /** |
1124 | * arch_disable_smp_support() - Disables SMP support for x86 at boottime |
1125 | */ |
1126 | void __init arch_disable_smp_support(void) |
1127 | { |
1128 | disable_ioapic_support(); |
1129 | } |
1130 | |
1131 | /* |
1132 | * Fall back to non SMP mode after errors. |
1133 | * |
1134 | * RED-PEN audit/test this more. I bet there is more state messed up here. |
1135 | */ |
1136 | static __init void disable_smp(void) |
1137 | { |
1138 | pr_info("SMP disabled\n" ); |
1139 | |
1140 | disable_ioapic_support(); |
1141 | |
1142 | init_cpu_present(cpumask_of(0)); |
1143 | init_cpu_possible(cpumask_of(0)); |
1144 | |
1145 | if (smp_found_config) |
1146 | physid_set_mask_of_physid(physid: boot_cpu_physical_apicid, map: &phys_cpu_present_map); |
1147 | else |
1148 | physid_set_mask_of_physid(physid: 0, map: &phys_cpu_present_map); |
1149 | cpumask_set_cpu(cpu: 0, topology_sibling_cpumask(0)); |
1150 | cpumask_set_cpu(cpu: 0, topology_core_cpumask(0)); |
1151 | cpumask_set_cpu(cpu: 0, topology_die_cpumask(0)); |
1152 | } |
1153 | |
1154 | static void __init smp_cpu_index_default(void) |
1155 | { |
1156 | int i; |
1157 | struct cpuinfo_x86 *c; |
1158 | |
1159 | for_each_possible_cpu(i) { |
1160 | c = &cpu_data(i); |
1161 | /* mark all to hotplug */ |
1162 | c->cpu_index = nr_cpu_ids; |
1163 | } |
1164 | } |
1165 | |
1166 | void __init smp_prepare_cpus_common(void) |
1167 | { |
1168 | unsigned int i; |
1169 | |
1170 | smp_cpu_index_default(); |
1171 | |
1172 | /* |
1173 | * Setup boot CPU information |
1174 | */ |
1175 | smp_store_boot_cpu_info(); /* Final full version of the data */ |
1176 | mb(); |
1177 | |
1178 | for_each_possible_cpu(i) { |
1179 | zalloc_cpumask_var(mask: &per_cpu(cpu_sibling_map, i), GFP_KERNEL); |
1180 | zalloc_cpumask_var(mask: &per_cpu(cpu_core_map, i), GFP_KERNEL); |
1181 | zalloc_cpumask_var(mask: &per_cpu(cpu_die_map, i), GFP_KERNEL); |
1182 | zalloc_cpumask_var(mask: &per_cpu(cpu_llc_shared_map, i), GFP_KERNEL); |
1183 | zalloc_cpumask_var(mask: &per_cpu(cpu_l2c_shared_map, i), GFP_KERNEL); |
1184 | } |
1185 | |
1186 | set_cpu_sibling_map(0); |
1187 | } |
1188 | |
1189 | #ifdef CONFIG_X86_64 |
1190 | /* Establish whether parallel bringup can be supported. */ |
1191 | bool __init arch_cpuhp_init_parallel_bringup(void) |
1192 | { |
1193 | if (!x86_cpuinit.parallel_bringup) { |
1194 | pr_info("Parallel CPU startup disabled by the platform\n" ); |
1195 | return false; |
1196 | } |
1197 | |
1198 | smpboot_control = STARTUP_READ_APICID; |
1199 | pr_debug("Parallel CPU startup enabled: 0x%08x\n" , smpboot_control); |
1200 | return true; |
1201 | } |
1202 | #endif |
1203 | |
1204 | /* |
1205 | * Prepare for SMP bootup. |
1206 | * @max_cpus: configured maximum number of CPUs, It is a legacy parameter |
1207 | * for common interface support. |
1208 | */ |
1209 | void __init native_smp_prepare_cpus(unsigned int max_cpus) |
1210 | { |
1211 | smp_prepare_cpus_common(); |
1212 | |
1213 | switch (apic_intr_mode) { |
1214 | case APIC_PIC: |
1215 | case APIC_VIRTUAL_WIRE_NO_CONFIG: |
1216 | disable_smp(); |
1217 | return; |
1218 | case APIC_SYMMETRIC_IO_NO_ROUTING: |
1219 | disable_smp(); |
1220 | /* Setup local timer */ |
1221 | x86_init.timers.setup_percpu_clockev(); |
1222 | return; |
1223 | case APIC_VIRTUAL_WIRE: |
1224 | case APIC_SYMMETRIC_IO: |
1225 | break; |
1226 | } |
1227 | |
1228 | /* Setup local timer */ |
1229 | x86_init.timers.setup_percpu_clockev(); |
1230 | |
1231 | pr_info("CPU0: " ); |
1232 | print_cpu_info(&cpu_data(0)); |
1233 | |
1234 | uv_system_init(); |
1235 | |
1236 | smp_quirk_init_udelay(); |
1237 | |
1238 | speculative_store_bypass_ht_init(); |
1239 | |
1240 | snp_set_wakeup_secondary_cpu(); |
1241 | } |
1242 | |
1243 | void arch_thaw_secondary_cpus_begin(void) |
1244 | { |
1245 | set_cache_aps_delayed_init(true); |
1246 | } |
1247 | |
1248 | void arch_thaw_secondary_cpus_end(void) |
1249 | { |
1250 | cache_aps_init(); |
1251 | } |
1252 | |
1253 | /* |
1254 | * Early setup to make printk work. |
1255 | */ |
1256 | void __init native_smp_prepare_boot_cpu(void) |
1257 | { |
1258 | int me = smp_processor_id(); |
1259 | |
1260 | /* SMP handles this from setup_per_cpu_areas() */ |
1261 | if (!IS_ENABLED(CONFIG_SMP)) |
1262 | switch_gdt_and_percpu_base(me); |
1263 | |
1264 | native_pv_lock_init(); |
1265 | } |
1266 | |
1267 | void __init calculate_max_logical_packages(void) |
1268 | { |
1269 | int ncpus; |
1270 | |
1271 | /* |
1272 | * Today neither Intel nor AMD support heterogeneous systems so |
1273 | * extrapolate the boot cpu's data to all packages. |
1274 | */ |
1275 | ncpus = cpu_data(0).booted_cores * topology_max_smt_threads(); |
1276 | __max_logical_packages = DIV_ROUND_UP(total_cpus, ncpus); |
1277 | pr_info("Max logical packages: %u\n" , __max_logical_packages); |
1278 | } |
1279 | |
1280 | void __init native_smp_cpus_done(unsigned int max_cpus) |
1281 | { |
1282 | pr_debug("Boot done\n" ); |
1283 | |
1284 | calculate_max_logical_packages(); |
1285 | build_sched_topology(); |
1286 | nmi_selftest(); |
1287 | impress_friends(); |
1288 | cache_aps_init(); |
1289 | } |
1290 | |
1291 | static int __initdata setup_possible_cpus = -1; |
1292 | static int __init _setup_possible_cpus(char *str) |
1293 | { |
1294 | get_option(str: &str, pint: &setup_possible_cpus); |
1295 | return 0; |
1296 | } |
1297 | early_param("possible_cpus" , _setup_possible_cpus); |
1298 | |
1299 | |
1300 | /* |
1301 | * cpu_possible_mask should be static, it cannot change as cpu's |
1302 | * are onlined, or offlined. The reason is per-cpu data-structures |
1303 | * are allocated by some modules at init time, and don't expect to |
1304 | * do this dynamically on cpu arrival/departure. |
1305 | * cpu_present_mask on the other hand can change dynamically. |
1306 | * In case when cpu_hotplug is not compiled, then we resort to current |
1307 | * behaviour, which is cpu_possible == cpu_present. |
1308 | * - Ashok Raj |
1309 | * |
1310 | * Three ways to find out the number of additional hotplug CPUs: |
1311 | * - If the BIOS specified disabled CPUs in ACPI/mptables use that. |
1312 | * - The user can overwrite it with possible_cpus=NUM |
1313 | * - Otherwise don't reserve additional CPUs. |
1314 | * We do this because additional CPUs waste a lot of memory. |
1315 | * -AK |
1316 | */ |
1317 | __init void prefill_possible_map(void) |
1318 | { |
1319 | int i, possible; |
1320 | |
1321 | i = setup_max_cpus ?: 1; |
1322 | if (setup_possible_cpus == -1) { |
1323 | possible = num_processors; |
1324 | #ifdef CONFIG_HOTPLUG_CPU |
1325 | if (setup_max_cpus) |
1326 | possible += disabled_cpus; |
1327 | #else |
1328 | if (possible > i) |
1329 | possible = i; |
1330 | #endif |
1331 | } else |
1332 | possible = setup_possible_cpus; |
1333 | |
1334 | total_cpus = max_t(int, possible, num_processors + disabled_cpus); |
1335 | |
1336 | /* nr_cpu_ids could be reduced via nr_cpus= */ |
1337 | if (possible > nr_cpu_ids) { |
1338 | pr_warn("%d Processors exceeds NR_CPUS limit of %u\n" , |
1339 | possible, nr_cpu_ids); |
1340 | possible = nr_cpu_ids; |
1341 | } |
1342 | |
1343 | #ifdef CONFIG_HOTPLUG_CPU |
1344 | if (!setup_max_cpus) |
1345 | #endif |
1346 | if (possible > i) { |
1347 | pr_warn("%d Processors exceeds max_cpus limit of %u\n" , |
1348 | possible, setup_max_cpus); |
1349 | possible = i; |
1350 | } |
1351 | |
1352 | set_nr_cpu_ids(possible); |
1353 | |
1354 | pr_info("Allowing %d CPUs, %d hotplug CPUs\n" , |
1355 | possible, max_t(int, possible - num_processors, 0)); |
1356 | |
1357 | reset_cpu_possible_mask(); |
1358 | |
1359 | for (i = 0; i < possible; i++) |
1360 | set_cpu_possible(cpu: i, possible: true); |
1361 | } |
1362 | |
1363 | /* correctly size the local cpu masks */ |
1364 | void __init setup_cpu_local_masks(void) |
1365 | { |
1366 | alloc_bootmem_cpumask_var(mask: &cpu_sibling_setup_mask); |
1367 | } |
1368 | |
1369 | #ifdef CONFIG_HOTPLUG_CPU |
1370 | |
1371 | /* Recompute SMT state for all CPUs on offline */ |
1372 | static void recompute_smt_state(void) |
1373 | { |
1374 | int max_threads, cpu; |
1375 | |
1376 | max_threads = 0; |
1377 | for_each_online_cpu (cpu) { |
1378 | int threads = cpumask_weight(topology_sibling_cpumask(cpu)); |
1379 | |
1380 | if (threads > max_threads) |
1381 | max_threads = threads; |
1382 | } |
1383 | __max_smt_threads = max_threads; |
1384 | } |
1385 | |
1386 | static void remove_siblinginfo(int cpu) |
1387 | { |
1388 | int sibling; |
1389 | struct cpuinfo_x86 *c = &cpu_data(cpu); |
1390 | |
1391 | for_each_cpu(sibling, topology_core_cpumask(cpu)) { |
1392 | cpumask_clear_cpu(cpu, topology_core_cpumask(sibling)); |
1393 | /*/ |
1394 | * last thread sibling in this cpu core going down |
1395 | */ |
1396 | if (cpumask_weight(topology_sibling_cpumask(cpu)) == 1) |
1397 | cpu_data(sibling).booted_cores--; |
1398 | } |
1399 | |
1400 | for_each_cpu(sibling, topology_die_cpumask(cpu)) |
1401 | cpumask_clear_cpu(cpu, topology_die_cpumask(sibling)); |
1402 | |
1403 | for_each_cpu(sibling, topology_sibling_cpumask(cpu)) { |
1404 | cpumask_clear_cpu(cpu, topology_sibling_cpumask(sibling)); |
1405 | if (cpumask_weight(topology_sibling_cpumask(sibling)) == 1) |
1406 | cpu_data(sibling).smt_active = false; |
1407 | } |
1408 | |
1409 | for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) |
1410 | cpumask_clear_cpu(cpu, dstp: cpu_llc_shared_mask(cpu: sibling)); |
1411 | for_each_cpu(sibling, cpu_l2c_shared_mask(cpu)) |
1412 | cpumask_clear_cpu(cpu, dstp: cpu_l2c_shared_mask(cpu: sibling)); |
1413 | cpumask_clear(dstp: cpu_llc_shared_mask(cpu)); |
1414 | cpumask_clear(dstp: cpu_l2c_shared_mask(cpu)); |
1415 | cpumask_clear(topology_sibling_cpumask(cpu)); |
1416 | cpumask_clear(topology_core_cpumask(cpu)); |
1417 | cpumask_clear(topology_die_cpumask(cpu)); |
1418 | c->topo.core_id = 0; |
1419 | c->booted_cores = 0; |
1420 | cpumask_clear_cpu(cpu, dstp: cpu_sibling_setup_mask); |
1421 | recompute_smt_state(); |
1422 | } |
1423 | |
1424 | static void remove_cpu_from_maps(int cpu) |
1425 | { |
1426 | set_cpu_online(cpu, online: false); |
1427 | numa_remove_cpu(cpu); |
1428 | } |
1429 | |
1430 | void cpu_disable_common(void) |
1431 | { |
1432 | int cpu = smp_processor_id(); |
1433 | |
1434 | remove_siblinginfo(cpu); |
1435 | |
1436 | /* It's now safe to remove this processor from the online map */ |
1437 | lock_vector_lock(); |
1438 | remove_cpu_from_maps(cpu); |
1439 | unlock_vector_lock(); |
1440 | fixup_irqs(); |
1441 | lapic_offline(); |
1442 | } |
1443 | |
1444 | int native_cpu_disable(void) |
1445 | { |
1446 | int ret; |
1447 | |
1448 | ret = lapic_can_unplug_cpu(); |
1449 | if (ret) |
1450 | return ret; |
1451 | |
1452 | cpu_disable_common(); |
1453 | |
1454 | /* |
1455 | * Disable the local APIC. Otherwise IPI broadcasts will reach |
1456 | * it. It still responds normally to INIT, NMI, SMI, and SIPI |
1457 | * messages. |
1458 | * |
1459 | * Disabling the APIC must happen after cpu_disable_common() |
1460 | * which invokes fixup_irqs(). |
1461 | * |
1462 | * Disabling the APIC preserves already set bits in IRR, but |
1463 | * an interrupt arriving after disabling the local APIC does not |
1464 | * set the corresponding IRR bit. |
1465 | * |
1466 | * fixup_irqs() scans IRR for set bits so it can raise a not |
1467 | * yet handled interrupt on the new destination CPU via an IPI |
1468 | * but obviously it can't do so for IRR bits which are not set. |
1469 | * IOW, interrupts arriving after disabling the local APIC will |
1470 | * be lost. |
1471 | */ |
1472 | apic_soft_disable(); |
1473 | |
1474 | return 0; |
1475 | } |
1476 | |
1477 | void play_dead_common(void) |
1478 | { |
1479 | idle_task_exit(); |
1480 | |
1481 | cpuhp_ap_report_dead(); |
1482 | |
1483 | local_irq_disable(); |
1484 | } |
1485 | |
1486 | /* |
1487 | * We need to flush the caches before going to sleep, lest we have |
1488 | * dirty data in our caches when we come back up. |
1489 | */ |
1490 | static inline void mwait_play_dead(void) |
1491 | { |
1492 | struct mwait_cpu_dead *md = this_cpu_ptr(&mwait_cpu_dead); |
1493 | unsigned int eax, ebx, ecx, edx; |
1494 | unsigned int highest_cstate = 0; |
1495 | unsigned int highest_subcstate = 0; |
1496 | int i; |
1497 | |
1498 | if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD || |
1499 | boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) |
1500 | return; |
1501 | if (!this_cpu_has(X86_FEATURE_MWAIT)) |
1502 | return; |
1503 | if (!this_cpu_has(X86_FEATURE_CLFLUSH)) |
1504 | return; |
1505 | if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF) |
1506 | return; |
1507 | |
1508 | eax = CPUID_MWAIT_LEAF; |
1509 | ecx = 0; |
1510 | native_cpuid(eax: &eax, ebx: &ebx, ecx: &ecx, edx: &edx); |
1511 | |
1512 | /* |
1513 | * eax will be 0 if EDX enumeration is not valid. |
1514 | * Initialized below to cstate, sub_cstate value when EDX is valid. |
1515 | */ |
1516 | if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) { |
1517 | eax = 0; |
1518 | } else { |
1519 | edx >>= MWAIT_SUBSTATE_SIZE; |
1520 | for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) { |
1521 | if (edx & MWAIT_SUBSTATE_MASK) { |
1522 | highest_cstate = i; |
1523 | highest_subcstate = edx & MWAIT_SUBSTATE_MASK; |
1524 | } |
1525 | } |
1526 | eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) | |
1527 | (highest_subcstate - 1); |
1528 | } |
1529 | |
1530 | /* Set up state for the kexec() hack below */ |
1531 | md->status = CPUDEAD_MWAIT_WAIT; |
1532 | md->control = CPUDEAD_MWAIT_WAIT; |
1533 | |
1534 | wbinvd(); |
1535 | |
1536 | while (1) { |
1537 | /* |
1538 | * The CLFLUSH is a workaround for erratum AAI65 for |
1539 | * the Xeon 7400 series. It's not clear it is actually |
1540 | * needed, but it should be harmless in either case. |
1541 | * The WBINVD is insufficient due to the spurious-wakeup |
1542 | * case where we return around the loop. |
1543 | */ |
1544 | mb(); |
1545 | clflush(p: md); |
1546 | mb(); |
1547 | __monitor(eax: md, ecx: 0, edx: 0); |
1548 | mb(); |
1549 | __mwait(eax, ecx: 0); |
1550 | |
1551 | if (READ_ONCE(md->control) == CPUDEAD_MWAIT_KEXEC_HLT) { |
1552 | /* |
1553 | * Kexec is about to happen. Don't go back into mwait() as |
1554 | * the kexec kernel might overwrite text and data including |
1555 | * page tables and stack. So mwait() would resume when the |
1556 | * monitor cache line is written to and then the CPU goes |
1557 | * south due to overwritten text, page tables and stack. |
1558 | * |
1559 | * Note: This does _NOT_ protect against a stray MCE, NMI, |
1560 | * SMI. They will resume execution at the instruction |
1561 | * following the HLT instruction and run into the problem |
1562 | * which this is trying to prevent. |
1563 | */ |
1564 | WRITE_ONCE(md->status, CPUDEAD_MWAIT_KEXEC_HLT); |
1565 | while(1) |
1566 | native_halt(); |
1567 | } |
1568 | } |
1569 | } |
1570 | |
1571 | /* |
1572 | * Kick all "offline" CPUs out of mwait on kexec(). See comment in |
1573 | * mwait_play_dead(). |
1574 | */ |
1575 | void smp_kick_mwait_play_dead(void) |
1576 | { |
1577 | u32 newstate = CPUDEAD_MWAIT_KEXEC_HLT; |
1578 | struct mwait_cpu_dead *md; |
1579 | unsigned int cpu, i; |
1580 | |
1581 | for_each_cpu_andnot(cpu, cpu_present_mask, cpu_online_mask) { |
1582 | md = per_cpu_ptr(&mwait_cpu_dead, cpu); |
1583 | |
1584 | /* Does it sit in mwait_play_dead() ? */ |
1585 | if (READ_ONCE(md->status) != CPUDEAD_MWAIT_WAIT) |
1586 | continue; |
1587 | |
1588 | /* Wait up to 5ms */ |
1589 | for (i = 0; READ_ONCE(md->status) != newstate && i < 1000; i++) { |
1590 | /* Bring it out of mwait */ |
1591 | WRITE_ONCE(md->control, newstate); |
1592 | udelay(5); |
1593 | } |
1594 | |
1595 | if (READ_ONCE(md->status) != newstate) |
1596 | pr_err_once("CPU%u is stuck in mwait_play_dead()\n" , cpu); |
1597 | } |
1598 | } |
1599 | |
1600 | void __noreturn hlt_play_dead(void) |
1601 | { |
1602 | if (__this_cpu_read(cpu_info.x86) >= 4) |
1603 | wbinvd(); |
1604 | |
1605 | while (1) |
1606 | native_halt(); |
1607 | } |
1608 | |
1609 | /* |
1610 | * native_play_dead() is essentially a __noreturn function, but it can't |
1611 | * be marked as such as the compiler may complain about it. |
1612 | */ |
1613 | void native_play_dead(void) |
1614 | { |
1615 | if (cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS)) |
1616 | __update_spec_ctrl(val: 0); |
1617 | |
1618 | play_dead_common(); |
1619 | tboot_shutdown(shutdown_type: TB_SHUTDOWN_WFS); |
1620 | |
1621 | mwait_play_dead(); |
1622 | if (cpuidle_play_dead()) |
1623 | hlt_play_dead(); |
1624 | } |
1625 | |
1626 | #else /* ... !CONFIG_HOTPLUG_CPU */ |
1627 | int native_cpu_disable(void) |
1628 | { |
1629 | return -ENOSYS; |
1630 | } |
1631 | |
1632 | void native_play_dead(void) |
1633 | { |
1634 | BUG(); |
1635 | } |
1636 | |
1637 | #endif |
1638 | |