1 | /* |
2 | * Copyright (C) 2014 Stefan Kristiansson <stefan.kristiansson@saunalahti.fi> |
3 | * Copyright (C) 2017 Stafford Horne <shorne@gmail.com> |
4 | * |
5 | * Based on arm64 and arc implementations |
6 | * Copyright (C) 2013 ARM Ltd. |
7 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
8 | * |
9 | * This file is licensed under the terms of the GNU General Public License |
10 | * version 2. This program is licensed "as is" without any warranty of any |
11 | * kind, whether express or implied. |
12 | */ |
13 | |
14 | #include <linux/smp.h> |
15 | #include <linux/cpu.h> |
16 | #include <linux/sched.h> |
17 | #include <linux/sched/mm.h> |
18 | #include <linux/irq.h> |
19 | #include <linux/of.h> |
20 | #include <asm/cpuinfo.h> |
21 | #include <asm/mmu_context.h> |
22 | #include <asm/tlbflush.h> |
23 | #include <asm/cacheflush.h> |
24 | #include <asm/time.h> |
25 | |
26 | asmlinkage __init void secondary_start_kernel(void); |
27 | |
28 | static void (*smp_cross_call)(const struct cpumask *, unsigned int); |
29 | |
30 | unsigned long secondary_release = -1; |
31 | struct thread_info *secondary_thread_info; |
32 | |
33 | enum ipi_msg_type { |
34 | IPI_WAKEUP, |
35 | IPI_RESCHEDULE, |
36 | IPI_CALL_FUNC, |
37 | IPI_CALL_FUNC_SINGLE, |
38 | }; |
39 | |
40 | static DEFINE_SPINLOCK(boot_lock); |
41 | |
42 | static void boot_secondary(unsigned int cpu, struct task_struct *idle) |
43 | { |
44 | /* |
45 | * set synchronisation state between this boot processor |
46 | * and the secondary one |
47 | */ |
48 | spin_lock(lock: &boot_lock); |
49 | |
50 | secondary_release = cpu; |
51 | smp_cross_call(cpumask_of(cpu), IPI_WAKEUP); |
52 | |
53 | /* |
54 | * now the secondary core is starting up let it run its |
55 | * calibrations, then wait for it to finish |
56 | */ |
57 | spin_unlock(lock: &boot_lock); |
58 | } |
59 | |
60 | void __init smp_init_cpus(void) |
61 | { |
62 | struct device_node *cpu; |
63 | u32 cpu_id; |
64 | |
65 | for_each_of_cpu_node(cpu) { |
66 | cpu_id = of_get_cpu_hwid(cpun: cpu, thread: 0); |
67 | if (cpu_id < NR_CPUS) |
68 | set_cpu_possible(cpu: cpu_id, possible: true); |
69 | } |
70 | } |
71 | |
72 | void __init smp_prepare_cpus(unsigned int max_cpus) |
73 | { |
74 | unsigned int cpu; |
75 | |
76 | /* |
77 | * Initialise the present map, which describes the set of CPUs |
78 | * actually populated at the present time. |
79 | */ |
80 | for_each_possible_cpu(cpu) { |
81 | if (cpu < max_cpus) |
82 | set_cpu_present(cpu, present: true); |
83 | } |
84 | } |
85 | |
86 | void __init smp_cpus_done(unsigned int max_cpus) |
87 | { |
88 | } |
89 | |
90 | static DECLARE_COMPLETION(cpu_running); |
91 | |
92 | int __cpu_up(unsigned int cpu, struct task_struct *idle) |
93 | { |
94 | if (smp_cross_call == NULL) { |
95 | pr_warn("CPU%u: failed to start, IPI controller missing" , |
96 | cpu); |
97 | return -EIO; |
98 | } |
99 | |
100 | secondary_thread_info = task_thread_info(idle); |
101 | current_pgd[cpu] = init_mm.pgd; |
102 | |
103 | boot_secondary(cpu, idle); |
104 | if (!wait_for_completion_timeout(x: &cpu_running, |
105 | timeout: msecs_to_jiffies(m: 1000))) { |
106 | pr_crit("CPU%u: failed to start\n" , cpu); |
107 | return -EIO; |
108 | } |
109 | synchronise_count_master(cpu); |
110 | |
111 | return 0; |
112 | } |
113 | |
114 | asmlinkage __init void secondary_start_kernel(void) |
115 | { |
116 | struct mm_struct *mm = &init_mm; |
117 | unsigned int cpu = smp_processor_id(); |
118 | /* |
119 | * All kernel threads share the same mm context; grab a |
120 | * reference and switch to it. |
121 | */ |
122 | mmgrab(mm); |
123 | current->active_mm = mm; |
124 | cpumask_set_cpu(cpu, dstp: mm_cpumask(mm)); |
125 | |
126 | pr_info("CPU%u: Booted secondary processor\n" , cpu); |
127 | |
128 | setup_cpuinfo(); |
129 | openrisc_clockevent_init(); |
130 | |
131 | notify_cpu_starting(cpu); |
132 | |
133 | /* |
134 | * OK, now it's safe to let the boot CPU continue |
135 | */ |
136 | complete(&cpu_running); |
137 | |
138 | synchronise_count_slave(cpu); |
139 | set_cpu_online(cpu, online: true); |
140 | |
141 | local_irq_enable(); |
142 | /* |
143 | * OK, it's off to the idle thread for us |
144 | */ |
145 | cpu_startup_entry(state: CPUHP_AP_ONLINE_IDLE); |
146 | } |
147 | |
148 | void handle_IPI(unsigned int ipi_msg) |
149 | { |
150 | unsigned int cpu = smp_processor_id(); |
151 | |
152 | switch (ipi_msg) { |
153 | case IPI_WAKEUP: |
154 | break; |
155 | |
156 | case IPI_RESCHEDULE: |
157 | scheduler_ipi(); |
158 | break; |
159 | |
160 | case IPI_CALL_FUNC: |
161 | generic_smp_call_function_interrupt(); |
162 | break; |
163 | |
164 | case IPI_CALL_FUNC_SINGLE: |
165 | generic_smp_call_function_single_interrupt(); |
166 | break; |
167 | |
168 | default: |
169 | WARN(1, "CPU%u: Unknown IPI message 0x%x\n" , cpu, ipi_msg); |
170 | break; |
171 | } |
172 | } |
173 | |
174 | void arch_smp_send_reschedule(int cpu) |
175 | { |
176 | smp_cross_call(cpumask_of(cpu), IPI_RESCHEDULE); |
177 | } |
178 | |
179 | static void stop_this_cpu(void *dummy) |
180 | { |
181 | /* Remove this CPU */ |
182 | set_cpu_online(smp_processor_id(), online: false); |
183 | |
184 | local_irq_disable(); |
185 | /* CPU Doze */ |
186 | if (mfspr(SPR_UPR) & SPR_UPR_PMP) |
187 | mtspr(SPR_PMR, mfspr(SPR_PMR) | SPR_PMR_DME); |
188 | /* If that didn't work, infinite loop */ |
189 | while (1) |
190 | ; |
191 | } |
192 | |
193 | void smp_send_stop(void) |
194 | { |
195 | smp_call_function(func: stop_this_cpu, NULL, wait: 0); |
196 | } |
197 | |
198 | void __init set_smp_cross_call(void (*fn)(const struct cpumask *, unsigned int)) |
199 | { |
200 | smp_cross_call = fn; |
201 | } |
202 | |
203 | void arch_send_call_function_single_ipi(int cpu) |
204 | { |
205 | smp_cross_call(cpumask_of(cpu), IPI_CALL_FUNC_SINGLE); |
206 | } |
207 | |
208 | void arch_send_call_function_ipi_mask(const struct cpumask *mask) |
209 | { |
210 | smp_cross_call(mask, IPI_CALL_FUNC); |
211 | } |
212 | |
213 | /* TLB flush operations - Performed on each CPU*/ |
214 | static inline void ipi_flush_tlb_all(void *ignored) |
215 | { |
216 | local_flush_tlb_all(); |
217 | } |
218 | |
219 | static inline void ipi_flush_tlb_mm(void *info) |
220 | { |
221 | struct mm_struct *mm = (struct mm_struct *)info; |
222 | |
223 | local_flush_tlb_mm(mm); |
224 | } |
225 | |
226 | static void smp_flush_tlb_mm(struct cpumask *cmask, struct mm_struct *mm) |
227 | { |
228 | unsigned int cpuid; |
229 | |
230 | if (cpumask_empty(srcp: cmask)) |
231 | return; |
232 | |
233 | cpuid = get_cpu(); |
234 | |
235 | if (cpumask_any_but(mask: cmask, cpu: cpuid) >= nr_cpu_ids) { |
236 | /* local cpu is the only cpu present in cpumask */ |
237 | local_flush_tlb_mm(mm); |
238 | } else { |
239 | on_each_cpu_mask(mask: cmask, func: ipi_flush_tlb_mm, info: mm, wait: 1); |
240 | } |
241 | put_cpu(); |
242 | } |
243 | |
244 | struct flush_tlb_data { |
245 | unsigned long addr1; |
246 | unsigned long addr2; |
247 | }; |
248 | |
249 | static inline void ipi_flush_tlb_page(void *info) |
250 | { |
251 | struct flush_tlb_data *fd = (struct flush_tlb_data *)info; |
252 | |
253 | local_flush_tlb_page(NULL, fd->addr1); |
254 | } |
255 | |
256 | static inline void ipi_flush_tlb_range(void *info) |
257 | { |
258 | struct flush_tlb_data *fd = (struct flush_tlb_data *)info; |
259 | |
260 | local_flush_tlb_range(NULL, fd->addr1, fd->addr2); |
261 | } |
262 | |
263 | static void smp_flush_tlb_range(const struct cpumask *cmask, unsigned long start, |
264 | unsigned long end) |
265 | { |
266 | unsigned int cpuid; |
267 | |
268 | if (cpumask_empty(srcp: cmask)) |
269 | return; |
270 | |
271 | cpuid = get_cpu(); |
272 | |
273 | if (cpumask_any_but(mask: cmask, cpu: cpuid) >= nr_cpu_ids) { |
274 | /* local cpu is the only cpu present in cpumask */ |
275 | if ((end - start) <= PAGE_SIZE) |
276 | local_flush_tlb_page(NULL, start); |
277 | else |
278 | local_flush_tlb_range(NULL, start, end); |
279 | } else { |
280 | struct flush_tlb_data fd; |
281 | |
282 | fd.addr1 = start; |
283 | fd.addr2 = end; |
284 | |
285 | if ((end - start) <= PAGE_SIZE) |
286 | on_each_cpu_mask(mask: cmask, func: ipi_flush_tlb_page, info: &fd, wait: 1); |
287 | else |
288 | on_each_cpu_mask(mask: cmask, func: ipi_flush_tlb_range, info: &fd, wait: 1); |
289 | } |
290 | put_cpu(); |
291 | } |
292 | |
293 | void flush_tlb_all(void) |
294 | { |
295 | on_each_cpu(func: ipi_flush_tlb_all, NULL, wait: 1); |
296 | } |
297 | |
298 | void flush_tlb_mm(struct mm_struct *mm) |
299 | { |
300 | smp_flush_tlb_mm(cmask: mm_cpumask(mm), mm); |
301 | } |
302 | |
303 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long uaddr) |
304 | { |
305 | smp_flush_tlb_range(cmask: mm_cpumask(mm: vma->vm_mm), start: uaddr, end: uaddr + PAGE_SIZE); |
306 | } |
307 | |
308 | void flush_tlb_range(struct vm_area_struct *vma, |
309 | unsigned long start, unsigned long end) |
310 | { |
311 | const struct cpumask *cmask = vma ? mm_cpumask(mm: vma->vm_mm) |
312 | : cpu_online_mask; |
313 | smp_flush_tlb_range(cmask, start: start, end: end); |
314 | } |
315 | |
316 | /* Instruction cache invalidate - performed on each cpu */ |
317 | static void ipi_icache_page_inv(void *arg) |
318 | { |
319 | struct page *page = arg; |
320 | |
321 | local_icache_page_inv(page); |
322 | } |
323 | |
324 | void smp_icache_page_inv(struct page *page) |
325 | { |
326 | on_each_cpu(func: ipi_icache_page_inv, info: page, wait: 1); |
327 | } |
328 | EXPORT_SYMBOL(smp_icache_page_inv); |
329 | |