1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _ASM_X86_SMP_H |
3 | #define _ASM_X86_SMP_H |
4 | #ifndef __ASSEMBLY__ |
5 | #include <linux/cpumask.h> |
6 | |
7 | #include <asm/cpumask.h> |
8 | #include <asm/current.h> |
9 | #include <asm/thread_info.h> |
10 | |
11 | DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map); |
12 | DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map); |
13 | DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_die_map); |
14 | /* cpus sharing the last level cache: */ |
15 | DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map); |
16 | DECLARE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_l2c_shared_map); |
17 | |
18 | DECLARE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_apicid); |
19 | DECLARE_EARLY_PER_CPU_READ_MOSTLY(u32, x86_cpu_to_acpiid); |
20 | |
21 | struct task_struct; |
22 | |
23 | struct smp_ops { |
24 | void (*smp_prepare_boot_cpu)(void); |
25 | void (*smp_prepare_cpus)(unsigned max_cpus); |
26 | void (*smp_cpus_done)(unsigned max_cpus); |
27 | |
28 | void (*stop_other_cpus)(int wait); |
29 | void (*crash_stop_other_cpus)(void); |
30 | void (*smp_send_reschedule)(int cpu); |
31 | |
32 | void (*cleanup_dead_cpu)(unsigned cpu); |
33 | void (*poll_sync_state)(void); |
34 | int (*kick_ap_alive)(unsigned cpu, struct task_struct *tidle); |
35 | int (*cpu_disable)(void); |
36 | void (*cpu_die)(unsigned int cpu); |
37 | void (*play_dead)(void); |
38 | |
39 | void (*send_call_func_ipi)(const struct cpumask *mask); |
40 | void (*send_call_func_single_ipi)(int cpu); |
41 | }; |
42 | |
43 | /* Globals due to paravirt */ |
44 | extern void set_cpu_sibling_map(int cpu); |
45 | |
46 | #ifdef CONFIG_SMP |
47 | extern struct smp_ops smp_ops; |
48 | |
49 | static inline void smp_send_stop(void) |
50 | { |
51 | smp_ops.stop_other_cpus(0); |
52 | } |
53 | |
54 | static inline void stop_other_cpus(void) |
55 | { |
56 | smp_ops.stop_other_cpus(1); |
57 | } |
58 | |
59 | static inline void smp_prepare_cpus(unsigned int max_cpus) |
60 | { |
61 | smp_ops.smp_prepare_cpus(max_cpus); |
62 | } |
63 | |
64 | static inline void smp_cpus_done(unsigned int max_cpus) |
65 | { |
66 | smp_ops.smp_cpus_done(max_cpus); |
67 | } |
68 | |
69 | static inline int __cpu_disable(void) |
70 | { |
71 | return smp_ops.cpu_disable(); |
72 | } |
73 | |
74 | static inline void __cpu_die(unsigned int cpu) |
75 | { |
76 | if (smp_ops.cpu_die) |
77 | smp_ops.cpu_die(cpu); |
78 | } |
79 | |
80 | static inline void __noreturn play_dead(void) |
81 | { |
82 | smp_ops.play_dead(); |
83 | BUG(); |
84 | } |
85 | |
86 | static inline void arch_smp_send_reschedule(int cpu) |
87 | { |
88 | smp_ops.smp_send_reschedule(cpu); |
89 | } |
90 | |
91 | static inline void arch_send_call_function_single_ipi(int cpu) |
92 | { |
93 | smp_ops.send_call_func_single_ipi(cpu); |
94 | } |
95 | |
96 | static inline void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
97 | { |
98 | smp_ops.send_call_func_ipi(mask); |
99 | } |
100 | |
101 | void cpu_disable_common(void); |
102 | void native_smp_prepare_boot_cpu(void); |
103 | void smp_prepare_cpus_common(void); |
104 | void native_smp_prepare_cpus(unsigned int max_cpus); |
105 | void native_smp_cpus_done(unsigned int max_cpus); |
106 | int common_cpu_up(unsigned int cpunum, struct task_struct *tidle); |
107 | int native_kick_ap(unsigned int cpu, struct task_struct *tidle); |
108 | int native_cpu_disable(void); |
109 | void __noreturn hlt_play_dead(void); |
110 | void native_play_dead(void); |
111 | void play_dead_common(void); |
112 | void wbinvd_on_cpu(int cpu); |
113 | int wbinvd_on_all_cpus(void); |
114 | |
115 | void smp_kick_mwait_play_dead(void); |
116 | |
117 | void native_smp_send_reschedule(int cpu); |
118 | void native_send_call_func_ipi(const struct cpumask *mask); |
119 | void native_send_call_func_single_ipi(int cpu); |
120 | |
121 | void smp_store_cpu_info(int id); |
122 | |
123 | asmlinkage __visible void smp_reboot_interrupt(void); |
124 | __visible void smp_reschedule_interrupt(struct pt_regs *regs); |
125 | __visible void smp_call_function_interrupt(struct pt_regs *regs); |
126 | __visible void smp_call_function_single_interrupt(struct pt_regs *r); |
127 | |
128 | #define cpu_physical_id(cpu) per_cpu(x86_cpu_to_apicid, cpu) |
129 | #define cpu_acpi_id(cpu) per_cpu(x86_cpu_to_acpiid, cpu) |
130 | |
131 | /* |
132 | * This function is needed by all SMP systems. It must _always_ be valid |
133 | * from the initial startup. |
134 | */ |
135 | #define raw_smp_processor_id() this_cpu_read(pcpu_hot.cpu_number) |
136 | #define __smp_processor_id() __this_cpu_read(pcpu_hot.cpu_number) |
137 | |
138 | #ifdef CONFIG_X86_32 |
139 | extern int safe_smp_processor_id(void); |
140 | #else |
141 | # define safe_smp_processor_id() smp_processor_id() |
142 | #endif |
143 | |
144 | static inline struct cpumask *cpu_llc_shared_mask(int cpu) |
145 | { |
146 | return per_cpu(cpu_llc_shared_map, cpu); |
147 | } |
148 | |
149 | static inline struct cpumask *cpu_l2c_shared_mask(int cpu) |
150 | { |
151 | return per_cpu(cpu_l2c_shared_map, cpu); |
152 | } |
153 | |
154 | #else /* !CONFIG_SMP */ |
155 | #define wbinvd_on_cpu(cpu) wbinvd() |
156 | static inline int wbinvd_on_all_cpus(void) |
157 | { |
158 | wbinvd(); |
159 | return 0; |
160 | } |
161 | |
162 | static inline struct cpumask *cpu_llc_shared_mask(int cpu) |
163 | { |
164 | return (struct cpumask *)cpumask_of(0); |
165 | } |
166 | #endif /* CONFIG_SMP */ |
167 | |
168 | #ifdef CONFIG_DEBUG_NMI_SELFTEST |
169 | extern void nmi_selftest(void); |
170 | #else |
171 | #define nmi_selftest() do { } while (0) |
172 | #endif |
173 | |
174 | extern unsigned int smpboot_control; |
175 | extern unsigned long apic_mmio_base; |
176 | |
177 | #endif /* !__ASSEMBLY__ */ |
178 | |
179 | /* Control bits for startup_64 */ |
180 | #define STARTUP_READ_APICID 0x80000000 |
181 | |
182 | /* Top 8 bits are reserved for control */ |
183 | #define STARTUP_PARALLEL_MASK 0xFF000000 |
184 | |
185 | #endif /* _ASM_X86_SMP_H */ |
186 | |