1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * SMP support for ppc. |
4 | * |
5 | * Written by Cort Dougan (cort@cs.nmt.edu) borrowing a great |
6 | * deal of code from the sparc and intel versions. |
7 | * |
8 | * Copyright (C) 1999 Cort Dougan <cort@cs.nmt.edu> |
9 | * |
10 | * PowerPC-64 Support added by Dave Engebretsen, Peter Bergner, and |
11 | * Mike Corrigan {engebret|bergner|mikec}@us.ibm.com |
12 | */ |
13 | |
14 | #undef DEBUG |
15 | |
16 | #include <linux/kernel.h> |
17 | #include <linux/export.h> |
18 | #include <linux/sched/mm.h> |
19 | #include <linux/sched/task_stack.h> |
20 | #include <linux/sched/topology.h> |
21 | #include <linux/smp.h> |
22 | #include <linux/interrupt.h> |
23 | #include <linux/delay.h> |
24 | #include <linux/init.h> |
25 | #include <linux/spinlock.h> |
26 | #include <linux/cache.h> |
27 | #include <linux/err.h> |
28 | #include <linux/device.h> |
29 | #include <linux/cpu.h> |
30 | #include <linux/notifier.h> |
31 | #include <linux/topology.h> |
32 | #include <linux/profile.h> |
33 | #include <linux/processor.h> |
34 | #include <linux/random.h> |
35 | #include <linux/stackprotector.h> |
36 | #include <linux/pgtable.h> |
37 | #include <linux/clockchips.h> |
38 | #include <linux/kexec.h> |
39 | |
40 | #include <asm/ptrace.h> |
41 | #include <linux/atomic.h> |
42 | #include <asm/irq.h> |
43 | #include <asm/hw_irq.h> |
44 | #include <asm/kvm_ppc.h> |
45 | #include <asm/dbell.h> |
46 | #include <asm/page.h> |
47 | #include <asm/smp.h> |
48 | #include <asm/time.h> |
49 | #include <asm/machdep.h> |
50 | #include <asm/mmu_context.h> |
51 | #include <asm/cputhreads.h> |
52 | #include <asm/cputable.h> |
53 | #include <asm/mpic.h> |
54 | #include <asm/vdso_datapage.h> |
55 | #ifdef CONFIG_PPC64 |
56 | #include <asm/paca.h> |
57 | #endif |
58 | #include <asm/vdso.h> |
59 | #include <asm/debug.h> |
60 | #include <asm/cpu_has_feature.h> |
61 | #include <asm/ftrace.h> |
62 | #include <asm/kup.h> |
63 | #include <asm/fadump.h> |
64 | |
65 | #include <trace/events/ipi.h> |
66 | |
67 | #ifdef DEBUG |
68 | #include <asm/udbg.h> |
69 | #define DBG(fmt...) udbg_printf(fmt) |
70 | #else |
71 | #define DBG(fmt...) |
72 | #endif |
73 | |
74 | #ifdef CONFIG_HOTPLUG_CPU |
75 | /* State of each CPU during hotplug phases */ |
76 | static DEFINE_PER_CPU(int, cpu_state) = { 0 }; |
77 | #endif |
78 | |
79 | struct task_struct *secondary_current; |
80 | bool has_big_cores; |
81 | bool coregroup_enabled; |
82 | bool thread_group_shares_l2; |
83 | bool thread_group_shares_l3; |
84 | |
85 | DEFINE_PER_CPU(cpumask_var_t, cpu_sibling_map); |
86 | DEFINE_PER_CPU(cpumask_var_t, cpu_smallcore_map); |
87 | DEFINE_PER_CPU(cpumask_var_t, cpu_l2_cache_map); |
88 | DEFINE_PER_CPU(cpumask_var_t, cpu_core_map); |
89 | static DEFINE_PER_CPU(cpumask_var_t, cpu_coregroup_map); |
90 | |
91 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); |
92 | EXPORT_PER_CPU_SYMBOL(cpu_l2_cache_map); |
93 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); |
94 | EXPORT_SYMBOL_GPL(has_big_cores); |
95 | |
96 | enum { |
97 | #ifdef CONFIG_SCHED_SMT |
98 | smt_idx, |
99 | #endif |
100 | cache_idx, |
101 | mc_idx, |
102 | die_idx, |
103 | }; |
104 | |
105 | #define MAX_THREAD_LIST_SIZE 8 |
106 | #define THREAD_GROUP_SHARE_L1 1 |
107 | #define THREAD_GROUP_SHARE_L2_L3 2 |
108 | struct thread_groups { |
109 | unsigned int property; |
110 | unsigned int nr_groups; |
111 | unsigned int threads_per_group; |
112 | unsigned int thread_list[MAX_THREAD_LIST_SIZE]; |
113 | }; |
114 | |
115 | /* Maximum number of properties that groups of threads within a core can share */ |
116 | #define MAX_THREAD_GROUP_PROPERTIES 2 |
117 | |
118 | struct thread_groups_list { |
119 | unsigned int nr_properties; |
120 | struct thread_groups property_tgs[MAX_THREAD_GROUP_PROPERTIES]; |
121 | }; |
122 | |
123 | static struct thread_groups_list tgl[NR_CPUS] __initdata; |
124 | /* |
125 | * On big-cores system, thread_group_l1_cache_map for each CPU corresponds to |
126 | * the set its siblings that share the L1-cache. |
127 | */ |
128 | DEFINE_PER_CPU(cpumask_var_t, thread_group_l1_cache_map); |
129 | |
130 | /* |
131 | * On some big-cores system, thread_group_l2_cache_map for each CPU |
132 | * corresponds to the set its siblings within the core that share the |
133 | * L2-cache. |
134 | */ |
135 | DEFINE_PER_CPU(cpumask_var_t, thread_group_l2_cache_map); |
136 | |
137 | /* |
138 | * On P10, thread_group_l3_cache_map for each CPU is equal to the |
139 | * thread_group_l2_cache_map |
140 | */ |
141 | DEFINE_PER_CPU(cpumask_var_t, thread_group_l3_cache_map); |
142 | |
143 | /* SMP operations for this machine */ |
144 | struct smp_ops_t *smp_ops; |
145 | |
146 | /* Can't be static due to PowerMac hackery */ |
147 | volatile unsigned int cpu_callin_map[NR_CPUS]; |
148 | |
149 | int smt_enabled_at_boot = 1; |
150 | |
151 | /* |
152 | * Returns 1 if the specified cpu should be brought up during boot. |
153 | * Used to inhibit booting threads if they've been disabled or |
154 | * limited on the command line |
155 | */ |
156 | int smp_generic_cpu_bootable(unsigned int nr) |
157 | { |
158 | /* Special case - we inhibit secondary thread startup |
159 | * during boot if the user requests it. |
160 | */ |
161 | if (system_state < SYSTEM_RUNNING && cpu_has_feature(CPU_FTR_SMT)) { |
162 | if (!smt_enabled_at_boot && cpu_thread_in_core(nr) != 0) |
163 | return 0; |
164 | if (smt_enabled_at_boot |
165 | && cpu_thread_in_core(nr) >= smt_enabled_at_boot) |
166 | return 0; |
167 | } |
168 | |
169 | return 1; |
170 | } |
171 | |
172 | |
173 | #ifdef CONFIG_PPC64 |
174 | int smp_generic_kick_cpu(int nr) |
175 | { |
176 | if (nr < 0 || nr >= nr_cpu_ids) |
177 | return -EINVAL; |
178 | |
179 | /* |
180 | * The processor is currently spinning, waiting for the |
181 | * cpu_start field to become non-zero After we set cpu_start, |
182 | * the processor will continue on to secondary_start |
183 | */ |
184 | if (!paca_ptrs[nr]->cpu_start) { |
185 | paca_ptrs[nr]->cpu_start = 1; |
186 | smp_mb(); |
187 | return 0; |
188 | } |
189 | |
190 | #ifdef CONFIG_HOTPLUG_CPU |
191 | /* |
192 | * Ok it's not there, so it might be soft-unplugged, let's |
193 | * try to bring it back |
194 | */ |
195 | generic_set_cpu_up(nr); |
196 | smp_wmb(); |
197 | smp_send_reschedule(nr); |
198 | #endif /* CONFIG_HOTPLUG_CPU */ |
199 | |
200 | return 0; |
201 | } |
202 | #endif /* CONFIG_PPC64 */ |
203 | |
204 | static irqreturn_t call_function_action(int irq, void *data) |
205 | { |
206 | generic_smp_call_function_interrupt(); |
207 | return IRQ_HANDLED; |
208 | } |
209 | |
210 | static irqreturn_t reschedule_action(int irq, void *data) |
211 | { |
212 | scheduler_ipi(); |
213 | return IRQ_HANDLED; |
214 | } |
215 | |
216 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
217 | static irqreturn_t tick_broadcast_ipi_action(int irq, void *data) |
218 | { |
219 | timer_broadcast_interrupt(); |
220 | return IRQ_HANDLED; |
221 | } |
222 | #endif |
223 | |
224 | #ifdef CONFIG_NMI_IPI |
225 | static irqreturn_t nmi_ipi_action(int irq, void *data) |
226 | { |
227 | smp_handle_nmi_ipi(get_irq_regs()); |
228 | return IRQ_HANDLED; |
229 | } |
230 | #endif |
231 | |
232 | static irq_handler_t smp_ipi_action[] = { |
233 | [PPC_MSG_CALL_FUNCTION] = call_function_action, |
234 | [PPC_MSG_RESCHEDULE] = reschedule_action, |
235 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
236 | [PPC_MSG_TICK_BROADCAST] = tick_broadcast_ipi_action, |
237 | #endif |
238 | #ifdef CONFIG_NMI_IPI |
239 | [PPC_MSG_NMI_IPI] = nmi_ipi_action, |
240 | #endif |
241 | }; |
242 | |
243 | /* |
244 | * The NMI IPI is a fallback and not truly non-maskable. It is simpler |
245 | * than going through the call function infrastructure, and strongly |
246 | * serialized, so it is more appropriate for debugging. |
247 | */ |
248 | const char *smp_ipi_name[] = { |
249 | [PPC_MSG_CALL_FUNCTION] = "ipi call function" , |
250 | [PPC_MSG_RESCHEDULE] = "ipi reschedule" , |
251 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
252 | [PPC_MSG_TICK_BROADCAST] = "ipi tick-broadcast" , |
253 | #endif |
254 | #ifdef CONFIG_NMI_IPI |
255 | [PPC_MSG_NMI_IPI] = "nmi ipi" , |
256 | #endif |
257 | }; |
258 | |
259 | /* optional function to request ipi, for controllers with >= 4 ipis */ |
260 | int smp_request_message_ipi(int virq, int msg) |
261 | { |
262 | int err; |
263 | |
264 | if (msg < 0 || msg > PPC_MSG_NMI_IPI) |
265 | return -EINVAL; |
266 | #ifndef CONFIG_NMI_IPI |
267 | if (msg == PPC_MSG_NMI_IPI) |
268 | return 1; |
269 | #endif |
270 | |
271 | err = request_irq(irq: virq, handler: smp_ipi_action[msg], |
272 | IRQF_PERCPU | IRQF_NO_THREAD | IRQF_NO_SUSPEND, |
273 | name: smp_ipi_name[msg], NULL); |
274 | WARN(err < 0, "unable to request_irq %d for %s (rc %d)\n" , |
275 | virq, smp_ipi_name[msg], err); |
276 | |
277 | return err; |
278 | } |
279 | |
280 | #ifdef CONFIG_PPC_SMP_MUXED_IPI |
281 | struct cpu_messages { |
282 | long messages; /* current messages */ |
283 | }; |
284 | static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_messages, ipi_message); |
285 | |
286 | void smp_muxed_ipi_set_message(int cpu, int msg) |
287 | { |
288 | struct cpu_messages *info = &per_cpu(ipi_message, cpu); |
289 | char *message = (char *)&info->messages; |
290 | |
291 | /* |
292 | * Order previous accesses before accesses in the IPI handler. |
293 | */ |
294 | smp_mb(); |
295 | WRITE_ONCE(message[msg], 1); |
296 | } |
297 | |
298 | void smp_muxed_ipi_message_pass(int cpu, int msg) |
299 | { |
300 | smp_muxed_ipi_set_message(cpu, msg); |
301 | |
302 | /* |
303 | * cause_ipi functions are required to include a full barrier |
304 | * before doing whatever causes the IPI. |
305 | */ |
306 | smp_ops->cause_ipi(cpu); |
307 | } |
308 | |
309 | #ifdef __BIG_ENDIAN__ |
310 | #define IPI_MESSAGE(A) (1uL << ((BITS_PER_LONG - 8) - 8 * (A))) |
311 | #else |
312 | #define IPI_MESSAGE(A) (1uL << (8 * (A))) |
313 | #endif |
314 | |
315 | irqreturn_t smp_ipi_demux(void) |
316 | { |
317 | mb(); /* order any irq clear */ |
318 | |
319 | return smp_ipi_demux_relaxed(); |
320 | } |
321 | |
322 | /* sync-free variant. Callers should ensure synchronization */ |
323 | irqreturn_t smp_ipi_demux_relaxed(void) |
324 | { |
325 | struct cpu_messages *info; |
326 | unsigned long all; |
327 | |
328 | info = this_cpu_ptr(&ipi_message); |
329 | do { |
330 | all = xchg(&info->messages, 0); |
331 | #if defined(CONFIG_KVM_XICS) && defined(CONFIG_KVM_BOOK3S_HV_POSSIBLE) |
332 | /* |
333 | * Must check for PPC_MSG_RM_HOST_ACTION messages |
334 | * before PPC_MSG_CALL_FUNCTION messages because when |
335 | * a VM is destroyed, we call kick_all_cpus_sync() |
336 | * to ensure that any pending PPC_MSG_RM_HOST_ACTION |
337 | * messages have completed before we free any VCPUs. |
338 | */ |
339 | if (all & IPI_MESSAGE(PPC_MSG_RM_HOST_ACTION)) |
340 | kvmppc_xics_ipi_action(); |
341 | #endif |
342 | if (all & IPI_MESSAGE(PPC_MSG_CALL_FUNCTION)) |
343 | generic_smp_call_function_interrupt(); |
344 | if (all & IPI_MESSAGE(PPC_MSG_RESCHEDULE)) |
345 | scheduler_ipi(); |
346 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
347 | if (all & IPI_MESSAGE(PPC_MSG_TICK_BROADCAST)) |
348 | timer_broadcast_interrupt(); |
349 | #endif |
350 | #ifdef CONFIG_NMI_IPI |
351 | if (all & IPI_MESSAGE(PPC_MSG_NMI_IPI)) |
352 | nmi_ipi_action(0, NULL); |
353 | #endif |
354 | } while (READ_ONCE(info->messages)); |
355 | |
356 | return IRQ_HANDLED; |
357 | } |
358 | #endif /* CONFIG_PPC_SMP_MUXED_IPI */ |
359 | |
360 | static inline void do_message_pass(int cpu, int msg) |
361 | { |
362 | if (smp_ops->message_pass) |
363 | smp_ops->message_pass(cpu, msg); |
364 | #ifdef CONFIG_PPC_SMP_MUXED_IPI |
365 | else |
366 | smp_muxed_ipi_message_pass(cpu, msg); |
367 | #endif |
368 | } |
369 | |
370 | void arch_smp_send_reschedule(int cpu) |
371 | { |
372 | if (likely(smp_ops)) |
373 | do_message_pass(cpu, msg: PPC_MSG_RESCHEDULE); |
374 | } |
375 | EXPORT_SYMBOL_GPL(arch_smp_send_reschedule); |
376 | |
377 | void arch_send_call_function_single_ipi(int cpu) |
378 | { |
379 | do_message_pass(cpu, msg: PPC_MSG_CALL_FUNCTION); |
380 | } |
381 | |
382 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
383 | { |
384 | unsigned int cpu; |
385 | |
386 | for_each_cpu(cpu, mask) |
387 | do_message_pass(cpu, msg: PPC_MSG_CALL_FUNCTION); |
388 | } |
389 | |
390 | #ifdef CONFIG_NMI_IPI |
391 | |
392 | /* |
393 | * "NMI IPI" system. |
394 | * |
395 | * NMI IPIs may not be recoverable, so should not be used as ongoing part of |
396 | * a running system. They can be used for crash, debug, halt/reboot, etc. |
397 | * |
398 | * The IPI call waits with interrupts disabled until all targets enter the |
399 | * NMI handler, then returns. Subsequent IPIs can be issued before targets |
400 | * have returned from their handlers, so there is no guarantee about |
401 | * concurrency or re-entrancy. |
402 | * |
403 | * A new NMI can be issued before all targets exit the handler. |
404 | * |
405 | * The IPI call may time out without all targets entering the NMI handler. |
406 | * In that case, there is some logic to recover (and ignore subsequent |
407 | * NMI interrupts that may eventually be raised), but the platform interrupt |
408 | * handler may not be able to distinguish this from other exception causes, |
409 | * which may cause a crash. |
410 | */ |
411 | |
412 | static atomic_t __nmi_ipi_lock = ATOMIC_INIT(0); |
413 | static struct cpumask nmi_ipi_pending_mask; |
414 | static bool nmi_ipi_busy = false; |
415 | static void (*nmi_ipi_function)(struct pt_regs *) = NULL; |
416 | |
417 | noinstr static void nmi_ipi_lock_start(unsigned long *flags) |
418 | { |
419 | raw_local_irq_save(*flags); |
420 | hard_irq_disable(); |
421 | while (raw_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) { |
422 | raw_local_irq_restore(*flags); |
423 | spin_until_cond(raw_atomic_read(&__nmi_ipi_lock) == 0); |
424 | raw_local_irq_save(*flags); |
425 | hard_irq_disable(); |
426 | } |
427 | } |
428 | |
429 | noinstr static void nmi_ipi_lock(void) |
430 | { |
431 | while (raw_atomic_cmpxchg(&__nmi_ipi_lock, 0, 1) == 1) |
432 | spin_until_cond(raw_atomic_read(&__nmi_ipi_lock) == 0); |
433 | } |
434 | |
435 | noinstr static void nmi_ipi_unlock(void) |
436 | { |
437 | smp_mb(); |
438 | WARN_ON(raw_atomic_read(&__nmi_ipi_lock) != 1); |
439 | raw_atomic_set(&__nmi_ipi_lock, 0); |
440 | } |
441 | |
442 | noinstr static void nmi_ipi_unlock_end(unsigned long *flags) |
443 | { |
444 | nmi_ipi_unlock(); |
445 | raw_local_irq_restore(*flags); |
446 | } |
447 | |
448 | /* |
449 | * Platform NMI handler calls this to ack |
450 | */ |
451 | noinstr int smp_handle_nmi_ipi(struct pt_regs *regs) |
452 | { |
453 | void (*fn)(struct pt_regs *) = NULL; |
454 | unsigned long flags; |
455 | int me = raw_smp_processor_id(); |
456 | int ret = 0; |
457 | |
458 | /* |
459 | * Unexpected NMIs are possible here because the interrupt may not |
460 | * be able to distinguish NMI IPIs from other types of NMIs, or |
461 | * because the caller may have timed out. |
462 | */ |
463 | nmi_ipi_lock_start(&flags); |
464 | if (cpumask_test_cpu(me, &nmi_ipi_pending_mask)) { |
465 | cpumask_clear_cpu(me, &nmi_ipi_pending_mask); |
466 | fn = READ_ONCE(nmi_ipi_function); |
467 | WARN_ON_ONCE(!fn); |
468 | ret = 1; |
469 | } |
470 | nmi_ipi_unlock_end(&flags); |
471 | |
472 | if (fn) |
473 | fn(regs); |
474 | |
475 | return ret; |
476 | } |
477 | |
478 | static void do_smp_send_nmi_ipi(int cpu, bool safe) |
479 | { |
480 | if (!safe && smp_ops->cause_nmi_ipi && smp_ops->cause_nmi_ipi(cpu)) |
481 | return; |
482 | |
483 | if (cpu >= 0) { |
484 | do_message_pass(cpu, PPC_MSG_NMI_IPI); |
485 | } else { |
486 | int c; |
487 | |
488 | for_each_online_cpu(c) { |
489 | if (c == raw_smp_processor_id()) |
490 | continue; |
491 | do_message_pass(c, PPC_MSG_NMI_IPI); |
492 | } |
493 | } |
494 | } |
495 | |
496 | /* |
497 | * - cpu is the target CPU (must not be this CPU), or NMI_IPI_ALL_OTHERS. |
498 | * - fn is the target callback function. |
499 | * - delay_us > 0 is the delay before giving up waiting for targets to |
500 | * begin executing the handler, == 0 specifies indefinite delay. |
501 | */ |
502 | static int __smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), |
503 | u64 delay_us, bool safe) |
504 | { |
505 | unsigned long flags; |
506 | int me = raw_smp_processor_id(); |
507 | int ret = 1; |
508 | |
509 | BUG_ON(cpu == me); |
510 | BUG_ON(cpu < 0 && cpu != NMI_IPI_ALL_OTHERS); |
511 | |
512 | if (unlikely(!smp_ops)) |
513 | return 0; |
514 | |
515 | nmi_ipi_lock_start(&flags); |
516 | while (nmi_ipi_busy) { |
517 | nmi_ipi_unlock_end(&flags); |
518 | spin_until_cond(!nmi_ipi_busy); |
519 | nmi_ipi_lock_start(&flags); |
520 | } |
521 | nmi_ipi_busy = true; |
522 | nmi_ipi_function = fn; |
523 | |
524 | WARN_ON_ONCE(!cpumask_empty(&nmi_ipi_pending_mask)); |
525 | |
526 | if (cpu < 0) { |
527 | /* ALL_OTHERS */ |
528 | cpumask_copy(&nmi_ipi_pending_mask, cpu_online_mask); |
529 | cpumask_clear_cpu(me, &nmi_ipi_pending_mask); |
530 | } else { |
531 | cpumask_set_cpu(cpu, &nmi_ipi_pending_mask); |
532 | } |
533 | |
534 | nmi_ipi_unlock(); |
535 | |
536 | /* Interrupts remain hard disabled */ |
537 | |
538 | do_smp_send_nmi_ipi(cpu, safe); |
539 | |
540 | nmi_ipi_lock(); |
541 | /* nmi_ipi_busy is set here, so unlock/lock is okay */ |
542 | while (!cpumask_empty(&nmi_ipi_pending_mask)) { |
543 | nmi_ipi_unlock(); |
544 | udelay(1); |
545 | nmi_ipi_lock(); |
546 | if (delay_us) { |
547 | delay_us--; |
548 | if (!delay_us) |
549 | break; |
550 | } |
551 | } |
552 | |
553 | if (!cpumask_empty(&nmi_ipi_pending_mask)) { |
554 | /* Timeout waiting for CPUs to call smp_handle_nmi_ipi */ |
555 | ret = 0; |
556 | cpumask_clear(&nmi_ipi_pending_mask); |
557 | } |
558 | |
559 | nmi_ipi_function = NULL; |
560 | nmi_ipi_busy = false; |
561 | |
562 | nmi_ipi_unlock_end(&flags); |
563 | |
564 | return ret; |
565 | } |
566 | |
567 | int smp_send_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us) |
568 | { |
569 | return __smp_send_nmi_ipi(cpu, fn, delay_us, false); |
570 | } |
571 | |
572 | int smp_send_safe_nmi_ipi(int cpu, void (*fn)(struct pt_regs *), u64 delay_us) |
573 | { |
574 | return __smp_send_nmi_ipi(cpu, fn, delay_us, true); |
575 | } |
576 | #endif /* CONFIG_NMI_IPI */ |
577 | |
578 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
579 | void tick_broadcast(const struct cpumask *mask) |
580 | { |
581 | unsigned int cpu; |
582 | |
583 | for_each_cpu(cpu, mask) |
584 | do_message_pass(cpu, PPC_MSG_TICK_BROADCAST); |
585 | } |
586 | #endif |
587 | |
588 | #ifdef CONFIG_DEBUGGER |
589 | static void debugger_ipi_callback(struct pt_regs *regs) |
590 | { |
591 | debugger_ipi(regs); |
592 | } |
593 | |
594 | void smp_send_debugger_break(void) |
595 | { |
596 | smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, debugger_ipi_callback, 1000000); |
597 | } |
598 | #endif |
599 | |
600 | #ifdef CONFIG_KEXEC_CORE |
601 | void crash_send_ipi(void (*crash_ipi_callback)(struct pt_regs *)) |
602 | { |
603 | int cpu; |
604 | |
605 | smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, crash_ipi_callback, 1000000); |
606 | if (kdump_in_progress() && crash_wake_offline) { |
607 | for_each_present_cpu(cpu) { |
608 | if (cpu_online(cpu)) |
609 | continue; |
610 | /* |
611 | * crash_ipi_callback will wait for |
612 | * all cpus, including offline CPUs. |
613 | * We don't care about nmi_ipi_function. |
614 | * Offline cpus will jump straight into |
615 | * crash_ipi_callback, we can skip the |
616 | * entire NMI dance and waiting for |
617 | * cpus to clear pending mask, etc. |
618 | */ |
619 | do_smp_send_nmi_ipi(cpu, false); |
620 | } |
621 | } |
622 | } |
623 | #endif |
624 | |
625 | void crash_smp_send_stop(void) |
626 | { |
627 | static bool stopped = false; |
628 | |
629 | /* |
630 | * In case of fadump, register data for all CPUs is captured by f/w |
631 | * on ibm,os-term rtas call. Skip IPI callbacks to other CPUs before |
632 | * this rtas call to avoid tricky post processing of those CPUs' |
633 | * backtraces. |
634 | */ |
635 | if (should_fadump_crash()) |
636 | return; |
637 | |
638 | if (stopped) |
639 | return; |
640 | |
641 | stopped = true; |
642 | |
643 | #ifdef CONFIG_KEXEC_CORE |
644 | if (kexec_crash_image) { |
645 | crash_kexec_prepare(); |
646 | return; |
647 | } |
648 | #endif |
649 | |
650 | smp_send_stop(); |
651 | } |
652 | |
653 | #ifdef CONFIG_NMI_IPI |
654 | static void nmi_stop_this_cpu(struct pt_regs *regs) |
655 | { |
656 | /* |
657 | * IRQs are already hard disabled by the smp_handle_nmi_ipi. |
658 | */ |
659 | set_cpu_online(smp_processor_id(), false); |
660 | |
661 | spin_begin(); |
662 | while (1) |
663 | spin_cpu_relax(); |
664 | } |
665 | |
666 | void smp_send_stop(void) |
667 | { |
668 | smp_send_nmi_ipi(NMI_IPI_ALL_OTHERS, nmi_stop_this_cpu, 1000000); |
669 | } |
670 | |
671 | #else /* CONFIG_NMI_IPI */ |
672 | |
673 | static void stop_this_cpu(void *dummy) |
674 | { |
675 | hard_irq_disable(); |
676 | |
677 | /* |
678 | * Offlining CPUs in stop_this_cpu can result in scheduler warnings, |
679 | * (see commit de6e5d38417e), but printk_safe_flush_on_panic() wants |
680 | * to know other CPUs are offline before it breaks locks to flush |
681 | * printk buffers, in case we panic()ed while holding the lock. |
682 | */ |
683 | set_cpu_online(smp_processor_id(), online: false); |
684 | |
685 | spin_begin(); |
686 | while (1) |
687 | spin_cpu_relax(); |
688 | } |
689 | |
690 | void smp_send_stop(void) |
691 | { |
692 | static bool stopped = false; |
693 | |
694 | /* |
695 | * Prevent waiting on csd lock from a previous smp_send_stop. |
696 | * This is racy, but in general callers try to do the right |
697 | * thing and only fire off one smp_send_stop (e.g., see |
698 | * kernel/panic.c) |
699 | */ |
700 | if (stopped) |
701 | return; |
702 | |
703 | stopped = true; |
704 | |
705 | smp_call_function(func: stop_this_cpu, NULL, wait: 0); |
706 | } |
707 | #endif /* CONFIG_NMI_IPI */ |
708 | |
709 | static struct task_struct *current_set[NR_CPUS]; |
710 | |
711 | static void smp_store_cpu_info(int id) |
712 | { |
713 | per_cpu(cpu_pvr, id) = mfspr(SPRN_PVR); |
714 | #ifdef CONFIG_PPC_E500 |
715 | per_cpu(next_tlbcam_idx, id) |
716 | = (mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1; |
717 | #endif |
718 | } |
719 | |
720 | /* |
721 | * Relationships between CPUs are maintained in a set of per-cpu cpumasks so |
722 | * rather than just passing around the cpumask we pass around a function that |
723 | * returns the that cpumask for the given CPU. |
724 | */ |
725 | static void set_cpus_related(int i, int j, struct cpumask *(*get_cpumask)(int)) |
726 | { |
727 | cpumask_set_cpu(cpu: i, dstp: get_cpumask(j)); |
728 | cpumask_set_cpu(cpu: j, dstp: get_cpumask(i)); |
729 | } |
730 | |
731 | #ifdef CONFIG_HOTPLUG_CPU |
732 | static void set_cpus_unrelated(int i, int j, |
733 | struct cpumask *(*get_cpumask)(int)) |
734 | { |
735 | cpumask_clear_cpu(cpu: i, dstp: get_cpumask(j)); |
736 | cpumask_clear_cpu(cpu: j, dstp: get_cpumask(i)); |
737 | } |
738 | #endif |
739 | |
740 | /* |
741 | * Extends set_cpus_related. Instead of setting one CPU at a time in |
742 | * dstmask, set srcmask at oneshot. dstmask should be super set of srcmask. |
743 | */ |
744 | static void or_cpumasks_related(int i, int j, struct cpumask *(*srcmask)(int), |
745 | struct cpumask *(*dstmask)(int)) |
746 | { |
747 | struct cpumask *mask; |
748 | int k; |
749 | |
750 | mask = srcmask(j); |
751 | for_each_cpu(k, srcmask(i)) |
752 | cpumask_or(dstp: dstmask(k), src1p: dstmask(k), src2p: mask); |
753 | |
754 | if (i == j) |
755 | return; |
756 | |
757 | mask = srcmask(i); |
758 | for_each_cpu(k, srcmask(j)) |
759 | cpumask_or(dstp: dstmask(k), src1p: dstmask(k), src2p: mask); |
760 | } |
761 | |
762 | /* |
763 | * parse_thread_groups: Parses the "ibm,thread-groups" device tree |
764 | * property for the CPU device node @dn and stores |
765 | * the parsed output in the thread_groups_list |
766 | * structure @tglp. |
767 | * |
768 | * @dn: The device node of the CPU device. |
769 | * @tglp: Pointer to a thread group list structure into which the parsed |
770 | * output of "ibm,thread-groups" is stored. |
771 | * |
772 | * ibm,thread-groups[0..N-1] array defines which group of threads in |
773 | * the CPU-device node can be grouped together based on the property. |
774 | * |
775 | * This array can represent thread groupings for multiple properties. |
776 | * |
777 | * ibm,thread-groups[i + 0] tells us the property based on which the |
778 | * threads are being grouped together. If this value is 1, it implies |
779 | * that the threads in the same group share L1, translation cache. If |
780 | * the value is 2, it implies that the threads in the same group share |
781 | * the same L2 cache. |
782 | * |
783 | * ibm,thread-groups[i+1] tells us how many such thread groups exist for the |
784 | * property ibm,thread-groups[i] |
785 | * |
786 | * ibm,thread-groups[i+2] tells us the number of threads in each such |
787 | * group. |
788 | * Suppose k = (ibm,thread-groups[i+1] * ibm,thread-groups[i+2]), then, |
789 | * |
790 | * ibm,thread-groups[i+3..i+k+2] (is the list of threads identified by |
791 | * "ibm,ppc-interrupt-server#s" arranged as per their membership in |
792 | * the grouping. |
793 | * |
794 | * Example: |
795 | * If "ibm,thread-groups" = [1,2,4,8,10,12,14,9,11,13,15,2,2,4,8,10,12,14,9,11,13,15] |
796 | * This can be decomposed up into two consecutive arrays: |
797 | * a) [1,2,4,8,10,12,14,9,11,13,15] |
798 | * b) [2,2,4,8,10,12,14,9,11,13,15] |
799 | * |
800 | * where in, |
801 | * |
802 | * a) provides information of Property "1" being shared by "2" groups, |
803 | * each with "4" threads each. The "ibm,ppc-interrupt-server#s" of |
804 | * the first group is {8,10,12,14} and the |
805 | * "ibm,ppc-interrupt-server#s" of the second group is |
806 | * {9,11,13,15}. Property "1" is indicative of the thread in the |
807 | * group sharing L1 cache, translation cache and Instruction Data |
808 | * flow. |
809 | * |
810 | * b) provides information of Property "2" being shared by "2" groups, |
811 | * each group with "4" threads. The "ibm,ppc-interrupt-server#s" of |
812 | * the first group is {8,10,12,14} and the |
813 | * "ibm,ppc-interrupt-server#s" of the second group is |
814 | * {9,11,13,15}. Property "2" indicates that the threads in each |
815 | * group share the L2-cache. |
816 | * |
817 | * Returns 0 on success, -EINVAL if the property does not exist, |
818 | * -ENODATA if property does not have a value, and -EOVERFLOW if the |
819 | * property data isn't large enough. |
820 | */ |
821 | static int parse_thread_groups(struct device_node *dn, |
822 | struct thread_groups_list *tglp) |
823 | { |
824 | unsigned int property_idx = 0; |
825 | u32 *thread_group_array; |
826 | size_t total_threads; |
827 | int ret = 0, count; |
828 | u32 *thread_list; |
829 | int i = 0; |
830 | |
831 | count = of_property_count_u32_elems(np: dn, propname: "ibm,thread-groups" ); |
832 | thread_group_array = kcalloc(n: count, size: sizeof(u32), GFP_KERNEL); |
833 | ret = of_property_read_u32_array(np: dn, propname: "ibm,thread-groups" , |
834 | out_values: thread_group_array, sz: count); |
835 | if (ret) |
836 | goto out_free; |
837 | |
838 | while (i < count && property_idx < MAX_THREAD_GROUP_PROPERTIES) { |
839 | int j; |
840 | struct thread_groups *tg = &tglp->property_tgs[property_idx++]; |
841 | |
842 | tg->property = thread_group_array[i]; |
843 | tg->nr_groups = thread_group_array[i + 1]; |
844 | tg->threads_per_group = thread_group_array[i + 2]; |
845 | total_threads = tg->nr_groups * tg->threads_per_group; |
846 | |
847 | thread_list = &thread_group_array[i + 3]; |
848 | |
849 | for (j = 0; j < total_threads; j++) |
850 | tg->thread_list[j] = thread_list[j]; |
851 | i = i + 3 + total_threads; |
852 | } |
853 | |
854 | tglp->nr_properties = property_idx; |
855 | |
856 | out_free: |
857 | kfree(objp: thread_group_array); |
858 | return ret; |
859 | } |
860 | |
861 | /* |
862 | * get_cpu_thread_group_start : Searches the thread group in tg->thread_list |
863 | * that @cpu belongs to. |
864 | * |
865 | * @cpu : The logical CPU whose thread group is being searched. |
866 | * @tg : The thread-group structure of the CPU node which @cpu belongs |
867 | * to. |
868 | * |
869 | * Returns the index to tg->thread_list that points to the start |
870 | * of the thread_group that @cpu belongs to. |
871 | * |
872 | * Returns -1 if cpu doesn't belong to any of the groups pointed to by |
873 | * tg->thread_list. |
874 | */ |
875 | static int get_cpu_thread_group_start(int cpu, struct thread_groups *tg) |
876 | { |
877 | int hw_cpu_id = get_hard_smp_processor_id(cpu); |
878 | int i, j; |
879 | |
880 | for (i = 0; i < tg->nr_groups; i++) { |
881 | int group_start = i * tg->threads_per_group; |
882 | |
883 | for (j = 0; j < tg->threads_per_group; j++) { |
884 | int idx = group_start + j; |
885 | |
886 | if (tg->thread_list[idx] == hw_cpu_id) |
887 | return group_start; |
888 | } |
889 | } |
890 | |
891 | return -1; |
892 | } |
893 | |
894 | static struct thread_groups *__init get_thread_groups(int cpu, |
895 | int group_property, |
896 | int *err) |
897 | { |
898 | struct device_node *dn = of_get_cpu_node(cpu, NULL); |
899 | struct thread_groups_list *cpu_tgl = &tgl[cpu]; |
900 | struct thread_groups *tg = NULL; |
901 | int i; |
902 | *err = 0; |
903 | |
904 | if (!dn) { |
905 | *err = -ENODATA; |
906 | return NULL; |
907 | } |
908 | |
909 | if (!cpu_tgl->nr_properties) { |
910 | *err = parse_thread_groups(dn, tglp: cpu_tgl); |
911 | if (*err) |
912 | goto out; |
913 | } |
914 | |
915 | for (i = 0; i < cpu_tgl->nr_properties; i++) { |
916 | if (cpu_tgl->property_tgs[i].property == group_property) { |
917 | tg = &cpu_tgl->property_tgs[i]; |
918 | break; |
919 | } |
920 | } |
921 | |
922 | if (!tg) |
923 | *err = -EINVAL; |
924 | out: |
925 | of_node_put(node: dn); |
926 | return tg; |
927 | } |
928 | |
929 | static int __init update_mask_from_threadgroup(cpumask_var_t *mask, struct thread_groups *tg, |
930 | int cpu, int cpu_group_start) |
931 | { |
932 | int first_thread = cpu_first_thread_sibling(cpu); |
933 | int i; |
934 | |
935 | zalloc_cpumask_var_node(mask, GFP_KERNEL, cpu_to_node(cpu)); |
936 | |
937 | for (i = first_thread; i < first_thread + threads_per_core; i++) { |
938 | int i_group_start = get_cpu_thread_group_start(cpu: i, tg); |
939 | |
940 | if (unlikely(i_group_start == -1)) { |
941 | WARN_ON_ONCE(1); |
942 | return -ENODATA; |
943 | } |
944 | |
945 | if (i_group_start == cpu_group_start) |
946 | cpumask_set_cpu(cpu: i, dstp: *mask); |
947 | } |
948 | |
949 | return 0; |
950 | } |
951 | |
952 | static int __init init_thread_group_cache_map(int cpu, int cache_property) |
953 | |
954 | { |
955 | int cpu_group_start = -1, err = 0; |
956 | struct thread_groups *tg = NULL; |
957 | cpumask_var_t *mask = NULL; |
958 | |
959 | if (cache_property != THREAD_GROUP_SHARE_L1 && |
960 | cache_property != THREAD_GROUP_SHARE_L2_L3) |
961 | return -EINVAL; |
962 | |
963 | tg = get_thread_groups(cpu, group_property: cache_property, err: &err); |
964 | |
965 | if (!tg) |
966 | return err; |
967 | |
968 | cpu_group_start = get_cpu_thread_group_start(cpu, tg); |
969 | |
970 | if (unlikely(cpu_group_start == -1)) { |
971 | WARN_ON_ONCE(1); |
972 | return -ENODATA; |
973 | } |
974 | |
975 | if (cache_property == THREAD_GROUP_SHARE_L1) { |
976 | mask = &per_cpu(thread_group_l1_cache_map, cpu); |
977 | update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start); |
978 | } |
979 | else if (cache_property == THREAD_GROUP_SHARE_L2_L3) { |
980 | mask = &per_cpu(thread_group_l2_cache_map, cpu); |
981 | update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start); |
982 | mask = &per_cpu(thread_group_l3_cache_map, cpu); |
983 | update_mask_from_threadgroup(mask, tg, cpu, cpu_group_start); |
984 | } |
985 | |
986 | |
987 | return 0; |
988 | } |
989 | |
990 | static bool shared_caches; |
991 | |
992 | #ifdef CONFIG_SCHED_SMT |
993 | /* cpumask of CPUs with asymmetric SMT dependency */ |
994 | static int powerpc_smt_flags(void) |
995 | { |
996 | int flags = SD_SHARE_CPUCAPACITY | SD_SHARE_PKG_RESOURCES; |
997 | |
998 | if (cpu_has_feature(CPU_FTR_ASYM_SMT)) { |
999 | printk_once(KERN_INFO "Enabling Asymmetric SMT scheduling\n" ); |
1000 | flags |= SD_ASYM_PACKING; |
1001 | } |
1002 | return flags; |
1003 | } |
1004 | #endif |
1005 | |
1006 | /* |
1007 | * P9 has a slightly odd architecture where pairs of cores share an L2 cache. |
1008 | * This topology makes it *much* cheaper to migrate tasks between adjacent cores |
1009 | * since the migrated task remains cache hot. We want to take advantage of this |
1010 | * at the scheduler level so an extra topology level is required. |
1011 | */ |
1012 | static int powerpc_shared_cache_flags(void) |
1013 | { |
1014 | return SD_SHARE_PKG_RESOURCES; |
1015 | } |
1016 | |
1017 | /* |
1018 | * We can't just pass cpu_l2_cache_mask() directly because |
1019 | * returns a non-const pointer and the compiler barfs on that. |
1020 | */ |
1021 | static const struct cpumask *shared_cache_mask(int cpu) |
1022 | { |
1023 | return per_cpu(cpu_l2_cache_map, cpu); |
1024 | } |
1025 | |
1026 | #ifdef CONFIG_SCHED_SMT |
1027 | static const struct cpumask *smallcore_smt_mask(int cpu) |
1028 | { |
1029 | return cpu_smallcore_mask(cpu); |
1030 | } |
1031 | #endif |
1032 | |
1033 | static struct cpumask *cpu_coregroup_mask(int cpu) |
1034 | { |
1035 | return per_cpu(cpu_coregroup_map, cpu); |
1036 | } |
1037 | |
1038 | static bool has_coregroup_support(void) |
1039 | { |
1040 | return coregroup_enabled; |
1041 | } |
1042 | |
1043 | static const struct cpumask *cpu_mc_mask(int cpu) |
1044 | { |
1045 | return cpu_coregroup_mask(cpu); |
1046 | } |
1047 | |
1048 | static struct sched_domain_topology_level powerpc_topology[] = { |
1049 | #ifdef CONFIG_SCHED_SMT |
1050 | { cpu_smt_mask, powerpc_smt_flags, SD_INIT_NAME(SMT) }, |
1051 | #endif |
1052 | { shared_cache_mask, powerpc_shared_cache_flags, SD_INIT_NAME(CACHE) }, |
1053 | { cpu_mc_mask, SD_INIT_NAME(MC) }, |
1054 | { cpu_cpu_mask, SD_INIT_NAME(PKG) }, |
1055 | { NULL, }, |
1056 | }; |
1057 | |
1058 | static int __init init_big_cores(void) |
1059 | { |
1060 | int cpu; |
1061 | |
1062 | for_each_possible_cpu(cpu) { |
1063 | int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L1); |
1064 | |
1065 | if (err) |
1066 | return err; |
1067 | |
1068 | zalloc_cpumask_var_node(mask: &per_cpu(cpu_smallcore_map, cpu), |
1069 | GFP_KERNEL, |
1070 | cpu_to_node(cpu)); |
1071 | } |
1072 | |
1073 | has_big_cores = true; |
1074 | |
1075 | for_each_possible_cpu(cpu) { |
1076 | int err = init_thread_group_cache_map(cpu, THREAD_GROUP_SHARE_L2_L3); |
1077 | |
1078 | if (err) |
1079 | return err; |
1080 | } |
1081 | |
1082 | thread_group_shares_l2 = true; |
1083 | thread_group_shares_l3 = true; |
1084 | pr_debug("L2/L3 cache only shared by the threads in the small core\n" ); |
1085 | |
1086 | return 0; |
1087 | } |
1088 | |
1089 | void __init smp_prepare_cpus(unsigned int max_cpus) |
1090 | { |
1091 | unsigned int cpu, num_threads; |
1092 | |
1093 | DBG("smp_prepare_cpus\n" ); |
1094 | |
1095 | /* |
1096 | * setup_cpu may need to be called on the boot cpu. We haven't |
1097 | * spun any cpus up but lets be paranoid. |
1098 | */ |
1099 | BUG_ON(boot_cpuid != smp_processor_id()); |
1100 | |
1101 | /* Fixup boot cpu */ |
1102 | smp_store_cpu_info(id: boot_cpuid); |
1103 | cpu_callin_map[boot_cpuid] = 1; |
1104 | |
1105 | for_each_possible_cpu(cpu) { |
1106 | zalloc_cpumask_var_node(mask: &per_cpu(cpu_sibling_map, cpu), |
1107 | GFP_KERNEL, cpu_to_node(cpu)); |
1108 | zalloc_cpumask_var_node(mask: &per_cpu(cpu_l2_cache_map, cpu), |
1109 | GFP_KERNEL, cpu_to_node(cpu)); |
1110 | zalloc_cpumask_var_node(mask: &per_cpu(cpu_core_map, cpu), |
1111 | GFP_KERNEL, cpu_to_node(cpu)); |
1112 | if (has_coregroup_support()) |
1113 | zalloc_cpumask_var_node(mask: &per_cpu(cpu_coregroup_map, cpu), |
1114 | GFP_KERNEL, cpu_to_node(cpu)); |
1115 | |
1116 | #ifdef CONFIG_NUMA |
1117 | /* |
1118 | * numa_node_id() works after this. |
1119 | */ |
1120 | if (cpu_present(cpu)) { |
1121 | set_cpu_numa_node(cpu, numa_cpu_lookup_table[cpu]); |
1122 | set_cpu_numa_mem(cpu, |
1123 | local_memory_node(numa_cpu_lookup_table[cpu])); |
1124 | } |
1125 | #endif |
1126 | } |
1127 | |
1128 | /* Init the cpumasks so the boot CPU is related to itself */ |
1129 | cpumask_set_cpu(cpu: boot_cpuid, dstp: cpu_sibling_mask(boot_cpuid)); |
1130 | cpumask_set_cpu(cpu: boot_cpuid, dstp: cpu_l2_cache_mask(boot_cpuid)); |
1131 | cpumask_set_cpu(boot_cpuid, cpu_core_mask(boot_cpuid)); |
1132 | |
1133 | if (has_coregroup_support()) |
1134 | cpumask_set_cpu(boot_cpuid, cpu_coregroup_mask(boot_cpuid)); |
1135 | |
1136 | init_big_cores(); |
1137 | if (has_big_cores) { |
1138 | cpumask_set_cpu(boot_cpuid, |
1139 | cpu_smallcore_mask(boot_cpuid)); |
1140 | } |
1141 | |
1142 | if (cpu_to_chip_id(boot_cpuid) != -1) { |
1143 | int idx = DIV_ROUND_UP(num_possible_cpus(), threads_per_core); |
1144 | |
1145 | /* |
1146 | * All threads of a core will all belong to the same core, |
1147 | * chip_id_lookup_table will have one entry per core. |
1148 | * Assumption: if boot_cpuid doesn't have a chip-id, then no |
1149 | * other CPUs, will also not have chip-id. |
1150 | */ |
1151 | chip_id_lookup_table = kcalloc(idx, sizeof(int), GFP_KERNEL); |
1152 | if (chip_id_lookup_table) |
1153 | memset(chip_id_lookup_table, -1, sizeof(int) * idx); |
1154 | } |
1155 | |
1156 | if (smp_ops && smp_ops->probe) |
1157 | smp_ops->probe(); |
1158 | |
1159 | // Initalise the generic SMT topology support |
1160 | num_threads = 1; |
1161 | if (smt_enabled_at_boot) |
1162 | num_threads = smt_enabled_at_boot; |
1163 | cpu_smt_set_num_threads(num_threads, threads_per_core); |
1164 | } |
1165 | |
1166 | void smp_prepare_boot_cpu(void) |
1167 | { |
1168 | BUG_ON(smp_processor_id() != boot_cpuid); |
1169 | #ifdef CONFIG_PPC64 |
1170 | paca_ptrs[boot_cpuid]->__current = current; |
1171 | #endif |
1172 | set_numa_node(numa_cpu_lookup_table[boot_cpuid]); |
1173 | current_set[boot_cpuid] = current; |
1174 | } |
1175 | |
1176 | #ifdef CONFIG_HOTPLUG_CPU |
1177 | |
1178 | int generic_cpu_disable(void) |
1179 | { |
1180 | unsigned int cpu = smp_processor_id(); |
1181 | |
1182 | if (cpu == boot_cpuid) |
1183 | return -EBUSY; |
1184 | |
1185 | set_cpu_online(cpu, online: false); |
1186 | #ifdef CONFIG_PPC64 |
1187 | vdso_data->processorCount--; |
1188 | #endif |
1189 | /* Update affinity of all IRQs previously aimed at this CPU */ |
1190 | irq_migrate_all_off_this_cpu(); |
1191 | |
1192 | /* |
1193 | * Depending on the details of the interrupt controller, it's possible |
1194 | * that one of the interrupts we just migrated away from this CPU is |
1195 | * actually already pending on this CPU. If we leave it in that state |
1196 | * the interrupt will never be EOI'ed, and will never fire again. So |
1197 | * temporarily enable interrupts here, to allow any pending interrupt to |
1198 | * be received (and EOI'ed), before we take this CPU offline. |
1199 | */ |
1200 | local_irq_enable(); |
1201 | mdelay(1); |
1202 | local_irq_disable(); |
1203 | |
1204 | return 0; |
1205 | } |
1206 | |
1207 | void generic_cpu_die(unsigned int cpu) |
1208 | { |
1209 | int i; |
1210 | |
1211 | for (i = 0; i < 100; i++) { |
1212 | smp_rmb(); |
1213 | if (is_cpu_dead(cpu)) |
1214 | return; |
1215 | msleep(msecs: 100); |
1216 | } |
1217 | printk(KERN_ERR "CPU%d didn't die...\n" , cpu); |
1218 | } |
1219 | |
1220 | void generic_set_cpu_dead(unsigned int cpu) |
1221 | { |
1222 | per_cpu(cpu_state, cpu) = CPU_DEAD; |
1223 | } |
1224 | |
1225 | /* |
1226 | * The cpu_state should be set to CPU_UP_PREPARE in kick_cpu(), otherwise |
1227 | * the cpu_state is always CPU_DEAD after calling generic_set_cpu_dead(), |
1228 | * which makes the delay in generic_cpu_die() not happen. |
1229 | */ |
1230 | void generic_set_cpu_up(unsigned int cpu) |
1231 | { |
1232 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; |
1233 | } |
1234 | |
1235 | int generic_check_cpu_restart(unsigned int cpu) |
1236 | { |
1237 | return per_cpu(cpu_state, cpu) == CPU_UP_PREPARE; |
1238 | } |
1239 | |
1240 | int is_cpu_dead(unsigned int cpu) |
1241 | { |
1242 | return per_cpu(cpu_state, cpu) == CPU_DEAD; |
1243 | } |
1244 | |
1245 | static bool secondaries_inhibited(void) |
1246 | { |
1247 | return kvm_hv_mode_active(); |
1248 | } |
1249 | |
1250 | #else /* HOTPLUG_CPU */ |
1251 | |
1252 | #define secondaries_inhibited() 0 |
1253 | |
1254 | #endif |
1255 | |
1256 | static void cpu_idle_thread_init(unsigned int cpu, struct task_struct *idle) |
1257 | { |
1258 | #ifdef CONFIG_PPC64 |
1259 | paca_ptrs[cpu]->__current = idle; |
1260 | paca_ptrs[cpu]->kstack = (unsigned long)task_stack_page(idle) + |
1261 | THREAD_SIZE - STACK_FRAME_MIN_SIZE; |
1262 | #endif |
1263 | task_thread_info(idle)->cpu = cpu; |
1264 | secondary_current = current_set[cpu] = idle; |
1265 | } |
1266 | |
1267 | int __cpu_up(unsigned int cpu, struct task_struct *tidle) |
1268 | { |
1269 | const unsigned long boot_spin_ms = 5 * MSEC_PER_SEC; |
1270 | const bool booting = system_state < SYSTEM_RUNNING; |
1271 | const unsigned long hp_spin_ms = 1; |
1272 | unsigned long deadline; |
1273 | int rc; |
1274 | const unsigned long spin_wait_ms = booting ? boot_spin_ms : hp_spin_ms; |
1275 | |
1276 | /* |
1277 | * Don't allow secondary threads to come online if inhibited |
1278 | */ |
1279 | if (threads_per_core > 1 && secondaries_inhibited() && |
1280 | cpu_thread_in_subcore(cpu)) |
1281 | return -EBUSY; |
1282 | |
1283 | if (smp_ops == NULL || |
1284 | (smp_ops->cpu_bootable && !smp_ops->cpu_bootable(cpu))) |
1285 | return -EINVAL; |
1286 | |
1287 | cpu_idle_thread_init(cpu, idle: tidle); |
1288 | |
1289 | /* |
1290 | * The platform might need to allocate resources prior to bringing |
1291 | * up the CPU |
1292 | */ |
1293 | if (smp_ops->prepare_cpu) { |
1294 | rc = smp_ops->prepare_cpu(cpu); |
1295 | if (rc) |
1296 | return rc; |
1297 | } |
1298 | |
1299 | /* Make sure callin-map entry is 0 (can be leftover a CPU |
1300 | * hotplug |
1301 | */ |
1302 | cpu_callin_map[cpu] = 0; |
1303 | |
1304 | /* The information for processor bringup must |
1305 | * be written out to main store before we release |
1306 | * the processor. |
1307 | */ |
1308 | smp_mb(); |
1309 | |
1310 | /* wake up cpus */ |
1311 | DBG("smp: kicking cpu %d\n" , cpu); |
1312 | rc = smp_ops->kick_cpu(cpu); |
1313 | if (rc) { |
1314 | pr_err("smp: failed starting cpu %d (rc %d)\n" , cpu, rc); |
1315 | return rc; |
1316 | } |
1317 | |
1318 | /* |
1319 | * At boot time, simply spin on the callin word until the |
1320 | * deadline passes. |
1321 | * |
1322 | * At run time, spin for an optimistic amount of time to avoid |
1323 | * sleeping in the common case. |
1324 | */ |
1325 | deadline = jiffies + msecs_to_jiffies(m: spin_wait_ms); |
1326 | spin_until_cond(cpu_callin_map[cpu] || time_is_before_jiffies(deadline)); |
1327 | |
1328 | if (!cpu_callin_map[cpu] && system_state >= SYSTEM_RUNNING) { |
1329 | const unsigned long sleep_interval_us = 10 * USEC_PER_MSEC; |
1330 | const unsigned long sleep_wait_ms = 100 * MSEC_PER_SEC; |
1331 | |
1332 | deadline = jiffies + msecs_to_jiffies(m: sleep_wait_ms); |
1333 | while (!cpu_callin_map[cpu] && time_is_after_jiffies(deadline)) |
1334 | fsleep(usecs: sleep_interval_us); |
1335 | } |
1336 | |
1337 | if (!cpu_callin_map[cpu]) { |
1338 | printk(KERN_ERR "Processor %u is stuck.\n" , cpu); |
1339 | return -ENOENT; |
1340 | } |
1341 | |
1342 | DBG("Processor %u found.\n" , cpu); |
1343 | |
1344 | if (smp_ops->give_timebase) |
1345 | smp_ops->give_timebase(); |
1346 | |
1347 | /* Wait until cpu puts itself in the online & active maps */ |
1348 | spin_until_cond(cpu_online(cpu)); |
1349 | |
1350 | return 0; |
1351 | } |
1352 | |
1353 | /* Return the value of the reg property corresponding to the given |
1354 | * logical cpu. |
1355 | */ |
1356 | int cpu_to_core_id(int cpu) |
1357 | { |
1358 | struct device_node *np; |
1359 | int id = -1; |
1360 | |
1361 | np = of_get_cpu_node(cpu, NULL); |
1362 | if (!np) |
1363 | goto out; |
1364 | |
1365 | id = of_get_cpu_hwid(cpun: np, thread: 0); |
1366 | out: |
1367 | of_node_put(node: np); |
1368 | return id; |
1369 | } |
1370 | EXPORT_SYMBOL_GPL(cpu_to_core_id); |
1371 | |
1372 | /* Helper routines for cpu to core mapping */ |
1373 | int cpu_core_index_of_thread(int cpu) |
1374 | { |
1375 | return cpu >> threads_shift; |
1376 | } |
1377 | EXPORT_SYMBOL_GPL(cpu_core_index_of_thread); |
1378 | |
1379 | int cpu_first_thread_of_core(int core) |
1380 | { |
1381 | return core << threads_shift; |
1382 | } |
1383 | EXPORT_SYMBOL_GPL(cpu_first_thread_of_core); |
1384 | |
1385 | /* Must be called when no change can occur to cpu_present_mask, |
1386 | * i.e. during cpu online or offline. |
1387 | */ |
1388 | static struct device_node *cpu_to_l2cache(int cpu) |
1389 | { |
1390 | struct device_node *np; |
1391 | struct device_node *cache; |
1392 | |
1393 | if (!cpu_present(cpu)) |
1394 | return NULL; |
1395 | |
1396 | np = of_get_cpu_node(cpu, NULL); |
1397 | if (np == NULL) |
1398 | return NULL; |
1399 | |
1400 | cache = of_find_next_cache_node(np); |
1401 | |
1402 | of_node_put(node: np); |
1403 | |
1404 | return cache; |
1405 | } |
1406 | |
1407 | static bool update_mask_by_l2(int cpu, cpumask_var_t *mask) |
1408 | { |
1409 | struct cpumask *(*submask_fn)(int) = cpu_sibling_mask; |
1410 | struct device_node *l2_cache, *np; |
1411 | int i; |
1412 | |
1413 | if (has_big_cores) |
1414 | submask_fn = cpu_smallcore_mask; |
1415 | |
1416 | /* |
1417 | * If the threads in a thread-group share L2 cache, then the |
1418 | * L2-mask can be obtained from thread_group_l2_cache_map. |
1419 | */ |
1420 | if (thread_group_shares_l2) { |
1421 | cpumask_set_cpu(cpu, dstp: cpu_l2_cache_mask(cpu)); |
1422 | |
1423 | for_each_cpu(i, per_cpu(thread_group_l2_cache_map, cpu)) { |
1424 | if (cpu_online(cpu: i)) |
1425 | set_cpus_related(i, j: cpu, get_cpumask: cpu_l2_cache_mask); |
1426 | } |
1427 | |
1428 | /* Verify that L1-cache siblings are a subset of L2 cache-siblings */ |
1429 | if (!cpumask_equal(src1p: submask_fn(cpu), src2p: cpu_l2_cache_mask(cpu)) && |
1430 | !cpumask_subset(src1p: submask_fn(cpu), src2p: cpu_l2_cache_mask(cpu))) { |
1431 | pr_warn_once("CPU %d : Inconsistent L1 and L2 cache siblings\n" , |
1432 | cpu); |
1433 | } |
1434 | |
1435 | return true; |
1436 | } |
1437 | |
1438 | l2_cache = cpu_to_l2cache(cpu); |
1439 | if (!l2_cache || !*mask) { |
1440 | /* Assume only core siblings share cache with this CPU */ |
1441 | for_each_cpu(i, cpu_sibling_mask(cpu)) |
1442 | set_cpus_related(cpu, i, cpu_l2_cache_mask); |
1443 | |
1444 | return false; |
1445 | } |
1446 | |
1447 | cpumask_and(dstp: *mask, cpu_online_mask, src2p: cpu_cpu_mask(cpu)); |
1448 | |
1449 | /* Update l2-cache mask with all the CPUs that are part of submask */ |
1450 | or_cpumasks_related(cpu, cpu, submask_fn, cpu_l2_cache_mask); |
1451 | |
1452 | /* Skip all CPUs already part of current CPU l2-cache mask */ |
1453 | cpumask_andnot(dstp: *mask, src1p: *mask, src2p: cpu_l2_cache_mask(cpu)); |
1454 | |
1455 | for_each_cpu(i, *mask) { |
1456 | /* |
1457 | * when updating the marks the current CPU has not been marked |
1458 | * online, but we need to update the cache masks |
1459 | */ |
1460 | np = cpu_to_l2cache(cpu: i); |
1461 | |
1462 | /* Skip all CPUs already part of current CPU l2-cache */ |
1463 | if (np == l2_cache) { |
1464 | or_cpumasks_related(i: cpu, j: i, srcmask: submask_fn, dstmask: cpu_l2_cache_mask); |
1465 | cpumask_andnot(dstp: *mask, src1p: *mask, src2p: submask_fn(i)); |
1466 | } else { |
1467 | cpumask_andnot(dstp: *mask, src1p: *mask, src2p: cpu_l2_cache_mask(i)); |
1468 | } |
1469 | |
1470 | of_node_put(node: np); |
1471 | } |
1472 | of_node_put(node: l2_cache); |
1473 | |
1474 | return true; |
1475 | } |
1476 | |
1477 | #ifdef CONFIG_HOTPLUG_CPU |
1478 | static void remove_cpu_from_masks(int cpu) |
1479 | { |
1480 | struct cpumask *(*mask_fn)(int) = cpu_sibling_mask; |
1481 | int i; |
1482 | |
1483 | unmap_cpu_from_node(cpu); |
1484 | |
1485 | if (shared_caches) |
1486 | mask_fn = cpu_l2_cache_mask; |
1487 | |
1488 | for_each_cpu(i, mask_fn(cpu)) { |
1489 | set_cpus_unrelated(cpu, i, cpu_l2_cache_mask); |
1490 | set_cpus_unrelated(cpu, i, cpu_sibling_mask); |
1491 | if (has_big_cores) |
1492 | set_cpus_unrelated(cpu, i, cpu_smallcore_mask); |
1493 | } |
1494 | |
1495 | for_each_cpu(i, cpu_core_mask(cpu)) |
1496 | set_cpus_unrelated(i: cpu, j: i, get_cpumask: cpu_core_mask); |
1497 | |
1498 | if (has_coregroup_support()) { |
1499 | for_each_cpu(i, cpu_coregroup_mask(cpu)) |
1500 | set_cpus_unrelated(i: cpu, j: i, get_cpumask: cpu_coregroup_mask); |
1501 | } |
1502 | } |
1503 | #endif |
1504 | |
1505 | static inline void add_cpu_to_smallcore_masks(int cpu) |
1506 | { |
1507 | int i; |
1508 | |
1509 | if (!has_big_cores) |
1510 | return; |
1511 | |
1512 | cpumask_set_cpu(cpu, dstp: cpu_smallcore_mask(cpu)); |
1513 | |
1514 | for_each_cpu(i, per_cpu(thread_group_l1_cache_map, cpu)) { |
1515 | if (cpu_online(cpu: i)) |
1516 | set_cpus_related(i, j: cpu, get_cpumask: cpu_smallcore_mask); |
1517 | } |
1518 | } |
1519 | |
1520 | static void update_coregroup_mask(int cpu, cpumask_var_t *mask) |
1521 | { |
1522 | struct cpumask *(*submask_fn)(int) = cpu_sibling_mask; |
1523 | int coregroup_id = cpu_to_coregroup_id(cpu); |
1524 | int i; |
1525 | |
1526 | if (shared_caches) |
1527 | submask_fn = cpu_l2_cache_mask; |
1528 | |
1529 | if (!*mask) { |
1530 | /* Assume only siblings are part of this CPU's coregroup */ |
1531 | for_each_cpu(i, submask_fn(cpu)) |
1532 | set_cpus_related(i: cpu, j: i, get_cpumask: cpu_coregroup_mask); |
1533 | |
1534 | return; |
1535 | } |
1536 | |
1537 | cpumask_and(dstp: *mask, cpu_online_mask, src2p: cpu_cpu_mask(cpu)); |
1538 | |
1539 | /* Update coregroup mask with all the CPUs that are part of submask */ |
1540 | or_cpumasks_related(i: cpu, j: cpu, srcmask: submask_fn, dstmask: cpu_coregroup_mask); |
1541 | |
1542 | /* Skip all CPUs already part of coregroup mask */ |
1543 | cpumask_andnot(dstp: *mask, src1p: *mask, src2p: cpu_coregroup_mask(cpu)); |
1544 | |
1545 | for_each_cpu(i, *mask) { |
1546 | /* Skip all CPUs not part of this coregroup */ |
1547 | if (coregroup_id == cpu_to_coregroup_id(i)) { |
1548 | or_cpumasks_related(i: cpu, j: i, srcmask: submask_fn, dstmask: cpu_coregroup_mask); |
1549 | cpumask_andnot(dstp: *mask, src1p: *mask, src2p: submask_fn(i)); |
1550 | } else { |
1551 | cpumask_andnot(dstp: *mask, src1p: *mask, src2p: cpu_coregroup_mask(cpu: i)); |
1552 | } |
1553 | } |
1554 | } |
1555 | |
1556 | static void add_cpu_to_masks(int cpu) |
1557 | { |
1558 | struct cpumask *(*submask_fn)(int) = cpu_sibling_mask; |
1559 | int first_thread = cpu_first_thread_sibling(cpu); |
1560 | cpumask_var_t mask; |
1561 | int chip_id = -1; |
1562 | bool ret; |
1563 | int i; |
1564 | |
1565 | /* |
1566 | * This CPU will not be in the online mask yet so we need to manually |
1567 | * add it to it's own thread sibling mask. |
1568 | */ |
1569 | map_cpu_to_node(cpu, cpu_to_node(cpu)); |
1570 | cpumask_set_cpu(cpu, dstp: cpu_sibling_mask(cpu)); |
1571 | cpumask_set_cpu(cpu, dstp: cpu_core_mask(cpu)); |
1572 | |
1573 | for (i = first_thread; i < first_thread + threads_per_core; i++) |
1574 | if (cpu_online(i)) |
1575 | set_cpus_related(i, cpu, cpu_sibling_mask); |
1576 | |
1577 | add_cpu_to_smallcore_masks(cpu); |
1578 | |
1579 | /* In CPU-hotplug path, hence use GFP_ATOMIC */ |
1580 | ret = alloc_cpumask_var_node(mask: &mask, GFP_ATOMIC, cpu_to_node(cpu)); |
1581 | update_mask_by_l2(cpu, mask: &mask); |
1582 | |
1583 | if (has_coregroup_support()) |
1584 | update_coregroup_mask(cpu, mask: &mask); |
1585 | |
1586 | if (chip_id_lookup_table && ret) |
1587 | chip_id = cpu_to_chip_id(cpu); |
1588 | |
1589 | if (shared_caches) |
1590 | submask_fn = cpu_l2_cache_mask; |
1591 | |
1592 | /* Update core_mask with all the CPUs that are part of submask */ |
1593 | or_cpumasks_related(i: cpu, j: cpu, srcmask: submask_fn, dstmask: cpu_core_mask); |
1594 | |
1595 | /* Skip all CPUs already part of current CPU core mask */ |
1596 | cpumask_andnot(dstp: mask, cpu_online_mask, src2p: cpu_core_mask(cpu)); |
1597 | |
1598 | /* If chip_id is -1; limit the cpu_core_mask to within PKG */ |
1599 | if (chip_id == -1) |
1600 | cpumask_and(dstp: mask, src1p: mask, src2p: cpu_cpu_mask(cpu)); |
1601 | |
1602 | for_each_cpu(i, mask) { |
1603 | if (chip_id == cpu_to_chip_id(i)) { |
1604 | or_cpumasks_related(i: cpu, j: i, srcmask: submask_fn, dstmask: cpu_core_mask); |
1605 | cpumask_andnot(dstp: mask, src1p: mask, src2p: submask_fn(i)); |
1606 | } else { |
1607 | cpumask_andnot(dstp: mask, src1p: mask, src2p: cpu_core_mask(i)); |
1608 | } |
1609 | } |
1610 | |
1611 | free_cpumask_var(mask); |
1612 | } |
1613 | |
1614 | /* Activate a secondary processor. */ |
1615 | __no_stack_protector |
1616 | void start_secondary(void *unused) |
1617 | { |
1618 | unsigned int cpu = raw_smp_processor_id(); |
1619 | |
1620 | /* PPC64 calls setup_kup() in early_setup_secondary() */ |
1621 | if (IS_ENABLED(CONFIG_PPC32)) |
1622 | setup_kup(); |
1623 | |
1624 | mmgrab_lazy_tlb(mm: &init_mm); |
1625 | current->active_mm = &init_mm; |
1626 | VM_WARN_ON(cpumask_test_cpu(smp_processor_id(), mm_cpumask(&init_mm))); |
1627 | cpumask_set_cpu(cpu, dstp: mm_cpumask(mm: &init_mm)); |
1628 | inc_mm_active_cpus(&init_mm); |
1629 | |
1630 | smp_store_cpu_info(id: cpu); |
1631 | set_dec(tb_ticks_per_jiffy); |
1632 | rcutree_report_cpu_starting(cpu); |
1633 | cpu_callin_map[cpu] = 1; |
1634 | |
1635 | if (smp_ops->setup_cpu) |
1636 | smp_ops->setup_cpu(cpu); |
1637 | if (smp_ops->take_timebase) |
1638 | smp_ops->take_timebase(); |
1639 | |
1640 | secondary_cpu_time_init(); |
1641 | |
1642 | #ifdef CONFIG_PPC64 |
1643 | if (system_state == SYSTEM_RUNNING) |
1644 | vdso_data->processorCount++; |
1645 | |
1646 | vdso_getcpu_init(); |
1647 | #endif |
1648 | set_numa_node(numa_cpu_lookup_table[cpu]); |
1649 | set_numa_mem(local_memory_node(numa_cpu_lookup_table[cpu])); |
1650 | |
1651 | /* Update topology CPU masks */ |
1652 | add_cpu_to_masks(cpu); |
1653 | |
1654 | /* |
1655 | * Check for any shared caches. Note that this must be done on a |
1656 | * per-core basis because one core in the pair might be disabled. |
1657 | */ |
1658 | if (!shared_caches) { |
1659 | struct cpumask *(*sibling_mask)(int) = cpu_sibling_mask; |
1660 | struct cpumask *mask = cpu_l2_cache_mask(cpu); |
1661 | |
1662 | if (has_big_cores) |
1663 | sibling_mask = cpu_smallcore_mask; |
1664 | |
1665 | if (cpumask_weight(srcp: mask) > cpumask_weight(srcp: sibling_mask(cpu))) |
1666 | shared_caches = true; |
1667 | } |
1668 | |
1669 | smp_wmb(); |
1670 | notify_cpu_starting(cpu); |
1671 | set_cpu_online(cpu, online: true); |
1672 | |
1673 | boot_init_stack_canary(); |
1674 | |
1675 | local_irq_enable(); |
1676 | |
1677 | /* We can enable ftrace for secondary cpus now */ |
1678 | this_cpu_enable_ftrace(); |
1679 | |
1680 | cpu_startup_entry(state: CPUHP_AP_ONLINE_IDLE); |
1681 | |
1682 | BUG(); |
1683 | } |
1684 | |
1685 | static void __init fixup_topology(void) |
1686 | { |
1687 | int i; |
1688 | |
1689 | #ifdef CONFIG_SCHED_SMT |
1690 | if (has_big_cores) { |
1691 | pr_info("Big cores detected but using small core scheduling\n" ); |
1692 | powerpc_topology[smt_idx].mask = smallcore_smt_mask; |
1693 | } |
1694 | #endif |
1695 | |
1696 | if (!has_coregroup_support()) |
1697 | powerpc_topology[mc_idx].mask = powerpc_topology[cache_idx].mask; |
1698 | |
1699 | /* |
1700 | * Try to consolidate topology levels here instead of |
1701 | * allowing scheduler to degenerate. |
1702 | * - Dont consolidate if masks are different. |
1703 | * - Dont consolidate if sd_flags exists and are different. |
1704 | */ |
1705 | for (i = 1; i <= die_idx; i++) { |
1706 | if (powerpc_topology[i].mask != powerpc_topology[i - 1].mask) |
1707 | continue; |
1708 | |
1709 | if (powerpc_topology[i].sd_flags && powerpc_topology[i - 1].sd_flags && |
1710 | powerpc_topology[i].sd_flags != powerpc_topology[i - 1].sd_flags) |
1711 | continue; |
1712 | |
1713 | if (!powerpc_topology[i - 1].sd_flags) |
1714 | powerpc_topology[i - 1].sd_flags = powerpc_topology[i].sd_flags; |
1715 | |
1716 | powerpc_topology[i].mask = powerpc_topology[i + 1].mask; |
1717 | powerpc_topology[i].sd_flags = powerpc_topology[i + 1].sd_flags; |
1718 | #ifdef CONFIG_SCHED_DEBUG |
1719 | powerpc_topology[i].name = powerpc_topology[i + 1].name; |
1720 | #endif |
1721 | } |
1722 | } |
1723 | |
1724 | void __init smp_cpus_done(unsigned int max_cpus) |
1725 | { |
1726 | /* |
1727 | * We are running pinned to the boot CPU, see rest_init(). |
1728 | */ |
1729 | if (smp_ops && smp_ops->setup_cpu) |
1730 | smp_ops->setup_cpu(boot_cpuid); |
1731 | |
1732 | if (smp_ops && smp_ops->bringup_done) |
1733 | smp_ops->bringup_done(); |
1734 | |
1735 | dump_numa_cpu_topology(); |
1736 | |
1737 | fixup_topology(); |
1738 | set_sched_topology(powerpc_topology); |
1739 | } |
1740 | |
1741 | #ifdef CONFIG_HOTPLUG_CPU |
1742 | int __cpu_disable(void) |
1743 | { |
1744 | int cpu = smp_processor_id(); |
1745 | int err; |
1746 | |
1747 | if (!smp_ops->cpu_disable) |
1748 | return -ENOSYS; |
1749 | |
1750 | this_cpu_disable_ftrace(); |
1751 | |
1752 | err = smp_ops->cpu_disable(); |
1753 | if (err) |
1754 | return err; |
1755 | |
1756 | /* Update sibling maps */ |
1757 | remove_cpu_from_masks(cpu); |
1758 | |
1759 | return 0; |
1760 | } |
1761 | |
1762 | void __cpu_die(unsigned int cpu) |
1763 | { |
1764 | /* |
1765 | * This could perhaps be a generic call in idlea_task_dead(), but |
1766 | * that requires testing from all archs, so first put it here to |
1767 | */ |
1768 | VM_WARN_ON_ONCE(!cpumask_test_cpu(cpu, mm_cpumask(&init_mm))); |
1769 | dec_mm_active_cpus(&init_mm); |
1770 | cpumask_clear_cpu(cpu, dstp: mm_cpumask(mm: &init_mm)); |
1771 | |
1772 | if (smp_ops->cpu_die) |
1773 | smp_ops->cpu_die(cpu); |
1774 | } |
1775 | |
1776 | void __noreturn arch_cpu_idle_dead(void) |
1777 | { |
1778 | /* |
1779 | * Disable on the down path. This will be re-enabled by |
1780 | * start_secondary() via start_secondary_resume() below |
1781 | */ |
1782 | this_cpu_disable_ftrace(); |
1783 | |
1784 | if (smp_ops->cpu_offline_self) |
1785 | smp_ops->cpu_offline_self(); |
1786 | |
1787 | /* If we return, we re-enter start_secondary */ |
1788 | start_secondary_resume(); |
1789 | } |
1790 | |
1791 | #endif |
1792 | |