1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * |
4 | * Copyright (C) 2000, 2001 Kanoj Sarcar |
5 | * Copyright (C) 2000, 2001 Ralf Baechle |
6 | * Copyright (C) 2000, 2001 Silicon Graphics, Inc. |
7 | * Copyright (C) 2000, 2001, 2003 Broadcom Corporation |
8 | */ |
9 | #include <linux/cache.h> |
10 | #include <linux/delay.h> |
11 | #include <linux/init.h> |
12 | #include <linux/interrupt.h> |
13 | #include <linux/smp.h> |
14 | #include <linux/spinlock.h> |
15 | #include <linux/threads.h> |
16 | #include <linux/export.h> |
17 | #include <linux/time.h> |
18 | #include <linux/timex.h> |
19 | #include <linux/sched/mm.h> |
20 | #include <linux/cpumask.h> |
21 | #include <linux/cpu.h> |
22 | #include <linux/err.h> |
23 | #include <linux/ftrace.h> |
24 | #include <linux/irqdomain.h> |
25 | #include <linux/of.h> |
26 | #include <linux/of_irq.h> |
27 | |
28 | #include <linux/atomic.h> |
29 | #include <asm/cpu.h> |
30 | #include <asm/ginvt.h> |
31 | #include <asm/processor.h> |
32 | #include <asm/idle.h> |
33 | #include <asm/r4k-timer.h> |
34 | #include <asm/mips-cps.h> |
35 | #include <asm/mmu_context.h> |
36 | #include <asm/time.h> |
37 | #include <asm/setup.h> |
38 | #include <asm/maar.h> |
39 | |
40 | int __cpu_number_map[CONFIG_MIPS_NR_CPU_NR_MAP]; /* Map physical to logical */ |
41 | EXPORT_SYMBOL(__cpu_number_map); |
42 | |
43 | int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */ |
44 | EXPORT_SYMBOL(__cpu_logical_map); |
45 | |
46 | /* Number of TCs (or siblings in Intel speak) per CPU core */ |
47 | int smp_num_siblings = 1; |
48 | EXPORT_SYMBOL(smp_num_siblings); |
49 | |
50 | /* representing the TCs (or siblings in Intel speak) of each logical CPU */ |
51 | cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly; |
52 | EXPORT_SYMBOL(cpu_sibling_map); |
53 | |
54 | /* representing the core map of multi-core chips of each logical CPU */ |
55 | cpumask_t cpu_core_map[NR_CPUS] __read_mostly; |
56 | EXPORT_SYMBOL(cpu_core_map); |
57 | |
58 | static DECLARE_COMPLETION(cpu_starting); |
59 | static DECLARE_COMPLETION(cpu_running); |
60 | |
61 | /* |
62 | * A logical cpu mask containing only one VPE per core to |
63 | * reduce the number of IPIs on large MT systems. |
64 | */ |
65 | cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly; |
66 | EXPORT_SYMBOL(cpu_foreign_map); |
67 | |
68 | /* representing cpus for which sibling maps can be computed */ |
69 | static cpumask_t cpu_sibling_setup_map; |
70 | |
71 | /* representing cpus for which core maps can be computed */ |
72 | static cpumask_t cpu_core_setup_map; |
73 | |
74 | cpumask_t cpu_coherent_mask; |
75 | |
76 | unsigned int smp_max_threads __initdata = UINT_MAX; |
77 | |
78 | static int __init early_nosmt(char *s) |
79 | { |
80 | smp_max_threads = 1; |
81 | return 0; |
82 | } |
83 | early_param("nosmt" , early_nosmt); |
84 | |
85 | static int __init early_smt(char *s) |
86 | { |
87 | get_option(str: &s, pint: &smp_max_threads); |
88 | /* Ensure at least one thread is available */ |
89 | smp_max_threads = clamp_val(smp_max_threads, 1U, UINT_MAX); |
90 | return 0; |
91 | } |
92 | early_param("smt" , early_smt); |
93 | |
94 | #ifdef CONFIG_GENERIC_IRQ_IPI |
95 | static struct irq_desc *call_desc; |
96 | static struct irq_desc *sched_desc; |
97 | #endif |
98 | |
99 | static inline void set_cpu_sibling_map(int cpu) |
100 | { |
101 | int i; |
102 | |
103 | cpumask_set_cpu(cpu, dstp: &cpu_sibling_setup_map); |
104 | |
105 | if (smp_num_siblings > 1) { |
106 | for_each_cpu(i, &cpu_sibling_setup_map) { |
107 | if (cpus_are_siblings(cpu, i)) { |
108 | cpumask_set_cpu(cpu: i, dstp: &cpu_sibling_map[cpu]); |
109 | cpumask_set_cpu(cpu, dstp: &cpu_sibling_map[i]); |
110 | } |
111 | } |
112 | } else |
113 | cpumask_set_cpu(cpu, dstp: &cpu_sibling_map[cpu]); |
114 | } |
115 | |
116 | static inline void set_cpu_core_map(int cpu) |
117 | { |
118 | int i; |
119 | |
120 | cpumask_set_cpu(cpu, dstp: &cpu_core_setup_map); |
121 | |
122 | for_each_cpu(i, &cpu_core_setup_map) { |
123 | if (cpu_data[cpu].package == cpu_data[i].package) { |
124 | cpumask_set_cpu(cpu: i, dstp: &cpu_core_map[cpu]); |
125 | cpumask_set_cpu(cpu, dstp: &cpu_core_map[i]); |
126 | } |
127 | } |
128 | } |
129 | |
130 | /* |
131 | * Calculate a new cpu_foreign_map mask whenever a |
132 | * new cpu appears or disappears. |
133 | */ |
134 | void calculate_cpu_foreign_map(void) |
135 | { |
136 | int i, k, core_present; |
137 | cpumask_t temp_foreign_map; |
138 | |
139 | /* Re-calculate the mask */ |
140 | cpumask_clear(dstp: &temp_foreign_map); |
141 | for_each_online_cpu(i) { |
142 | core_present = 0; |
143 | for_each_cpu(k, &temp_foreign_map) |
144 | if (cpus_are_siblings(i, k)) |
145 | core_present = 1; |
146 | if (!core_present) |
147 | cpumask_set_cpu(cpu: i, dstp: &temp_foreign_map); |
148 | } |
149 | |
150 | for_each_online_cpu(i) |
151 | cpumask_andnot(dstp: &cpu_foreign_map[i], |
152 | src1p: &temp_foreign_map, src2p: &cpu_sibling_map[i]); |
153 | } |
154 | |
155 | const struct plat_smp_ops *mp_ops; |
156 | EXPORT_SYMBOL(mp_ops); |
157 | |
158 | void register_smp_ops(const struct plat_smp_ops *ops) |
159 | { |
160 | if (mp_ops) |
161 | printk(KERN_WARNING "Overriding previously set SMP ops\n" ); |
162 | |
163 | mp_ops = ops; |
164 | } |
165 | |
166 | #ifdef CONFIG_GENERIC_IRQ_IPI |
167 | void mips_smp_send_ipi_single(int cpu, unsigned int action) |
168 | { |
169 | mips_smp_send_ipi_mask(cpumask_of(cpu), action); |
170 | } |
171 | |
172 | void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action) |
173 | { |
174 | unsigned long flags; |
175 | unsigned int core; |
176 | int cpu; |
177 | |
178 | local_irq_save(flags); |
179 | |
180 | switch (action) { |
181 | case SMP_CALL_FUNCTION: |
182 | __ipi_send_mask(call_desc, mask); |
183 | break; |
184 | |
185 | case SMP_RESCHEDULE_YOURSELF: |
186 | __ipi_send_mask(sched_desc, mask); |
187 | break; |
188 | |
189 | default: |
190 | BUG(); |
191 | } |
192 | |
193 | if (mips_cpc_present()) { |
194 | for_each_cpu(cpu, mask) { |
195 | if (cpus_are_siblings(cpu, smp_processor_id())) |
196 | continue; |
197 | |
198 | core = cpu_core(&cpu_data[cpu]); |
199 | |
200 | while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) { |
201 | mips_cm_lock_other_cpu(cpu, CM_GCR_Cx_OTHER_BLOCK_LOCAL); |
202 | mips_cpc_lock_other(core); |
203 | write_cpc_co_cmd(CPC_Cx_CMD_PWRUP); |
204 | mips_cpc_unlock_other(); |
205 | mips_cm_unlock_other(); |
206 | } |
207 | } |
208 | } |
209 | |
210 | local_irq_restore(flags); |
211 | } |
212 | |
213 | |
214 | static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id) |
215 | { |
216 | scheduler_ipi(); |
217 | |
218 | return IRQ_HANDLED; |
219 | } |
220 | |
221 | static irqreturn_t ipi_call_interrupt(int irq, void *dev_id) |
222 | { |
223 | generic_smp_call_function_interrupt(); |
224 | |
225 | return IRQ_HANDLED; |
226 | } |
227 | |
228 | static void smp_ipi_init_one(unsigned int virq, const char *name, |
229 | irq_handler_t handler) |
230 | { |
231 | int ret; |
232 | |
233 | irq_set_handler(virq, handle_percpu_irq); |
234 | ret = request_irq(virq, handler, IRQF_PERCPU, name, NULL); |
235 | BUG_ON(ret); |
236 | } |
237 | |
238 | static unsigned int call_virq, sched_virq; |
239 | |
240 | int mips_smp_ipi_allocate(const struct cpumask *mask) |
241 | { |
242 | int virq; |
243 | struct irq_domain *ipidomain; |
244 | struct device_node *node; |
245 | |
246 | node = of_irq_find_parent(of_root); |
247 | ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI); |
248 | |
249 | /* |
250 | * Some platforms have half DT setup. So if we found irq node but |
251 | * didn't find an ipidomain, try to search for one that is not in the |
252 | * DT. |
253 | */ |
254 | if (node && !ipidomain) |
255 | ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI); |
256 | |
257 | /* |
258 | * There are systems which use IPI IRQ domains, but only have one |
259 | * registered when some runtime condition is met. For example a Malta |
260 | * kernel may include support for GIC & CPU interrupt controller IPI |
261 | * IRQ domains, but if run on a system with no GIC & no MT ASE then |
262 | * neither will be supported or registered. |
263 | * |
264 | * We only have a problem if we're actually using multiple CPUs so fail |
265 | * loudly if that is the case. Otherwise simply return, skipping IPI |
266 | * setup, if we're running with only a single CPU. |
267 | */ |
268 | if (!ipidomain) { |
269 | BUG_ON(num_present_cpus() > 1); |
270 | return 0; |
271 | } |
272 | |
273 | virq = irq_reserve_ipi(ipidomain, mask); |
274 | BUG_ON(!virq); |
275 | if (!call_virq) |
276 | call_virq = virq; |
277 | |
278 | virq = irq_reserve_ipi(ipidomain, mask); |
279 | BUG_ON(!virq); |
280 | if (!sched_virq) |
281 | sched_virq = virq; |
282 | |
283 | if (irq_domain_is_ipi_per_cpu(ipidomain)) { |
284 | int cpu; |
285 | |
286 | for_each_cpu(cpu, mask) { |
287 | smp_ipi_init_one(call_virq + cpu, "IPI call" , |
288 | ipi_call_interrupt); |
289 | smp_ipi_init_one(sched_virq + cpu, "IPI resched" , |
290 | ipi_resched_interrupt); |
291 | } |
292 | } else { |
293 | smp_ipi_init_one(call_virq, "IPI call" , ipi_call_interrupt); |
294 | smp_ipi_init_one(sched_virq, "IPI resched" , |
295 | ipi_resched_interrupt); |
296 | } |
297 | |
298 | return 0; |
299 | } |
300 | |
301 | int mips_smp_ipi_free(const struct cpumask *mask) |
302 | { |
303 | struct irq_domain *ipidomain; |
304 | struct device_node *node; |
305 | |
306 | node = of_irq_find_parent(of_root); |
307 | ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI); |
308 | |
309 | /* |
310 | * Some platforms have half DT setup. So if we found irq node but |
311 | * didn't find an ipidomain, try to search for one that is not in the |
312 | * DT. |
313 | */ |
314 | if (node && !ipidomain) |
315 | ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI); |
316 | |
317 | BUG_ON(!ipidomain); |
318 | |
319 | if (irq_domain_is_ipi_per_cpu(ipidomain)) { |
320 | int cpu; |
321 | |
322 | for_each_cpu(cpu, mask) { |
323 | free_irq(call_virq + cpu, NULL); |
324 | free_irq(sched_virq + cpu, NULL); |
325 | } |
326 | } |
327 | irq_destroy_ipi(call_virq, mask); |
328 | irq_destroy_ipi(sched_virq, mask); |
329 | return 0; |
330 | } |
331 | |
332 | |
333 | static int __init mips_smp_ipi_init(void) |
334 | { |
335 | if (num_possible_cpus() == 1) |
336 | return 0; |
337 | |
338 | mips_smp_ipi_allocate(cpu_possible_mask); |
339 | |
340 | call_desc = irq_to_desc(call_virq); |
341 | sched_desc = irq_to_desc(sched_virq); |
342 | |
343 | return 0; |
344 | } |
345 | early_initcall(mips_smp_ipi_init); |
346 | #endif |
347 | |
348 | /* |
349 | * First C code run on the secondary CPUs after being started up by |
350 | * the master. |
351 | */ |
352 | asmlinkage void start_secondary(void) |
353 | { |
354 | unsigned int cpu; |
355 | |
356 | cpu_probe(); |
357 | per_cpu_trap_init(false); |
358 | mips_clockevent_init(); |
359 | mp_ops->init_secondary(); |
360 | cpu_report(); |
361 | maar_init(); |
362 | |
363 | /* |
364 | * XXX parity protection should be folded in here when it's converted |
365 | * to an option instead of something based on .cputype |
366 | */ |
367 | |
368 | calibrate_delay(); |
369 | cpu = smp_processor_id(); |
370 | cpu_data[cpu].udelay_val = loops_per_jiffy; |
371 | |
372 | set_cpu_sibling_map(cpu); |
373 | set_cpu_core_map(cpu); |
374 | |
375 | cpumask_set_cpu(cpu, dstp: &cpu_coherent_mask); |
376 | notify_cpu_starting(cpu); |
377 | |
378 | /* Notify boot CPU that we're starting & ready to sync counters */ |
379 | complete(&cpu_starting); |
380 | |
381 | synchronise_count_slave(cpu); |
382 | |
383 | /* The CPU is running and counters synchronised, now mark it online */ |
384 | set_cpu_online(cpu, online: true); |
385 | |
386 | calculate_cpu_foreign_map(); |
387 | |
388 | /* |
389 | * Notify boot CPU that we're up & online and it can safely return |
390 | * from __cpu_up |
391 | */ |
392 | complete(&cpu_running); |
393 | |
394 | /* |
395 | * irq will be enabled in ->smp_finish(), enabling it too early |
396 | * is dangerous. |
397 | */ |
398 | WARN_ON_ONCE(!irqs_disabled()); |
399 | mp_ops->smp_finish(); |
400 | |
401 | cpu_startup_entry(state: CPUHP_AP_ONLINE_IDLE); |
402 | } |
403 | |
404 | static void stop_this_cpu(void *dummy) |
405 | { |
406 | /* |
407 | * Remove this CPU: |
408 | */ |
409 | |
410 | set_cpu_online(smp_processor_id(), online: false); |
411 | calculate_cpu_foreign_map(); |
412 | local_irq_disable(); |
413 | while (1); |
414 | } |
415 | |
416 | void smp_send_stop(void) |
417 | { |
418 | smp_call_function(func: stop_this_cpu, NULL, wait: 0); |
419 | } |
420 | |
421 | void __init smp_cpus_done(unsigned int max_cpus) |
422 | { |
423 | } |
424 | |
425 | /* called from main before smp_init() */ |
426 | void __init smp_prepare_cpus(unsigned int max_cpus) |
427 | { |
428 | init_new_context(current, mm: &init_mm); |
429 | current_thread_info()->cpu = 0; |
430 | mp_ops->prepare_cpus(max_cpus); |
431 | set_cpu_sibling_map(0); |
432 | set_cpu_core_map(0); |
433 | calculate_cpu_foreign_map(); |
434 | #ifndef CONFIG_HOTPLUG_CPU |
435 | init_cpu_present(cpu_possible_mask); |
436 | #endif |
437 | cpumask_copy(dstp: &cpu_coherent_mask, cpu_possible_mask); |
438 | } |
439 | |
440 | /* preload SMP state for boot cpu */ |
441 | void smp_prepare_boot_cpu(void) |
442 | { |
443 | if (mp_ops->prepare_boot_cpu) |
444 | mp_ops->prepare_boot_cpu(); |
445 | set_cpu_possible(cpu: 0, possible: true); |
446 | set_cpu_online(cpu: 0, online: true); |
447 | } |
448 | |
449 | int __cpu_up(unsigned int cpu, struct task_struct *tidle) |
450 | { |
451 | int err; |
452 | |
453 | err = mp_ops->boot_secondary(cpu, tidle); |
454 | if (err) |
455 | return err; |
456 | |
457 | /* Wait for CPU to start and be ready to sync counters */ |
458 | if (!wait_for_completion_timeout(x: &cpu_starting, |
459 | timeout: msecs_to_jiffies(m: 1000))) { |
460 | pr_crit("CPU%u: failed to start\n" , cpu); |
461 | return -EIO; |
462 | } |
463 | |
464 | synchronise_count_master(cpu); |
465 | |
466 | /* Wait for CPU to finish startup & mark itself online before return */ |
467 | wait_for_completion(&cpu_running); |
468 | return 0; |
469 | } |
470 | |
471 | /* Not really SMP stuff ... */ |
472 | int setup_profiling_timer(unsigned int multiplier) |
473 | { |
474 | return 0; |
475 | } |
476 | |
477 | static void flush_tlb_all_ipi(void *info) |
478 | { |
479 | local_flush_tlb_all(); |
480 | } |
481 | |
482 | void flush_tlb_all(void) |
483 | { |
484 | if (cpu_has_mmid) { |
485 | htw_stop(); |
486 | ginvt_full(); |
487 | sync_ginv(); |
488 | instruction_hazard(); |
489 | htw_start(); |
490 | return; |
491 | } |
492 | |
493 | on_each_cpu(func: flush_tlb_all_ipi, NULL, wait: 1); |
494 | } |
495 | |
496 | static void flush_tlb_mm_ipi(void *mm) |
497 | { |
498 | drop_mmu_context((struct mm_struct *)mm); |
499 | } |
500 | |
501 | /* |
502 | * Special Variant of smp_call_function for use by TLB functions: |
503 | * |
504 | * o No return value |
505 | * o collapses to normal function call on UP kernels |
506 | * o collapses to normal function call on systems with a single shared |
507 | * primary cache. |
508 | */ |
509 | static inline void smp_on_other_tlbs(void (*func) (void *info), void *info) |
510 | { |
511 | smp_call_function(func, info, wait: 1); |
512 | } |
513 | |
514 | static inline void smp_on_each_tlb(void (*func) (void *info), void *info) |
515 | { |
516 | preempt_disable(); |
517 | |
518 | smp_on_other_tlbs(func, info); |
519 | func(info); |
520 | |
521 | preempt_enable(); |
522 | } |
523 | |
524 | /* |
525 | * The following tlb flush calls are invoked when old translations are |
526 | * being torn down, or pte attributes are changing. For single threaded |
527 | * address spaces, a new context is obtained on the current cpu, and tlb |
528 | * context on other cpus are invalidated to force a new context allocation |
529 | * at switch_mm time, should the mm ever be used on other cpus. For |
530 | * multithreaded address spaces, inter-CPU interrupts have to be sent. |
531 | * Another case where inter-CPU interrupts are required is when the target |
532 | * mm might be active on another cpu (eg debuggers doing the flushes on |
533 | * behalf of debugees, kswapd stealing pages from another process etc). |
534 | * Kanoj 07/00. |
535 | */ |
536 | |
537 | void flush_tlb_mm(struct mm_struct *mm) |
538 | { |
539 | if (!mm) |
540 | return; |
541 | |
542 | if (atomic_read(v: &mm->mm_users) == 0) |
543 | return; /* happens as a result of exit_mmap() */ |
544 | |
545 | preempt_disable(); |
546 | |
547 | if (cpu_has_mmid) { |
548 | /* |
549 | * No need to worry about other CPUs - the ginvt in |
550 | * drop_mmu_context() will be globalized. |
551 | */ |
552 | } else if ((atomic_read(v: &mm->mm_users) != 1) || (current->mm != mm)) { |
553 | smp_on_other_tlbs(func: flush_tlb_mm_ipi, info: mm); |
554 | } else { |
555 | unsigned int cpu; |
556 | |
557 | for_each_online_cpu(cpu) { |
558 | if (cpu != smp_processor_id() && cpu_context(cpu, mm)) |
559 | set_cpu_context(cpu, mm, 0); |
560 | } |
561 | } |
562 | drop_mmu_context(mm); |
563 | |
564 | preempt_enable(); |
565 | } |
566 | |
567 | struct flush_tlb_data { |
568 | struct vm_area_struct *vma; |
569 | unsigned long addr1; |
570 | unsigned long addr2; |
571 | }; |
572 | |
573 | static void flush_tlb_range_ipi(void *info) |
574 | { |
575 | struct flush_tlb_data *fd = info; |
576 | |
577 | local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2); |
578 | } |
579 | |
580 | void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
581 | { |
582 | struct mm_struct *mm = vma->vm_mm; |
583 | unsigned long addr; |
584 | u32 old_mmid; |
585 | |
586 | preempt_disable(); |
587 | if (cpu_has_mmid) { |
588 | htw_stop(); |
589 | old_mmid = read_c0_memorymapid(); |
590 | write_c0_memorymapid(cpu_asid(0, mm)); |
591 | mtc0_tlbw_hazard(); |
592 | addr = round_down(start, PAGE_SIZE * 2); |
593 | end = round_up(end, PAGE_SIZE * 2); |
594 | do { |
595 | ginvt_va_mmid(addr); |
596 | sync_ginv(); |
597 | addr += PAGE_SIZE * 2; |
598 | } while (addr < end); |
599 | write_c0_memorymapid(old_mmid); |
600 | instruction_hazard(); |
601 | htw_start(); |
602 | } else if ((atomic_read(v: &mm->mm_users) != 1) || (current->mm != mm)) { |
603 | struct flush_tlb_data fd = { |
604 | .vma = vma, |
605 | .addr1 = start, |
606 | .addr2 = end, |
607 | }; |
608 | |
609 | smp_on_other_tlbs(func: flush_tlb_range_ipi, info: &fd); |
610 | local_flush_tlb_range(vma, start, end); |
611 | } else { |
612 | unsigned int cpu; |
613 | int exec = vma->vm_flags & VM_EXEC; |
614 | |
615 | for_each_online_cpu(cpu) { |
616 | /* |
617 | * flush_cache_range() will only fully flush icache if |
618 | * the VMA is executable, otherwise we must invalidate |
619 | * ASID without it appearing to has_valid_asid() as if |
620 | * mm has been completely unused by that CPU. |
621 | */ |
622 | if (cpu != smp_processor_id() && cpu_context(cpu, mm)) |
623 | set_cpu_context(cpu, mm, !exec); |
624 | } |
625 | local_flush_tlb_range(vma, start, end); |
626 | } |
627 | preempt_enable(); |
628 | } |
629 | |
630 | static void flush_tlb_kernel_range_ipi(void *info) |
631 | { |
632 | struct flush_tlb_data *fd = info; |
633 | |
634 | local_flush_tlb_kernel_range(fd->addr1, fd->addr2); |
635 | } |
636 | |
637 | void flush_tlb_kernel_range(unsigned long start, unsigned long end) |
638 | { |
639 | struct flush_tlb_data fd = { |
640 | .addr1 = start, |
641 | .addr2 = end, |
642 | }; |
643 | |
644 | on_each_cpu(func: flush_tlb_kernel_range_ipi, info: &fd, wait: 1); |
645 | } |
646 | |
647 | static void flush_tlb_page_ipi(void *info) |
648 | { |
649 | struct flush_tlb_data *fd = info; |
650 | |
651 | local_flush_tlb_page(fd->vma, fd->addr1); |
652 | } |
653 | |
654 | void flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
655 | { |
656 | u32 old_mmid; |
657 | |
658 | preempt_disable(); |
659 | if (cpu_has_mmid) { |
660 | htw_stop(); |
661 | old_mmid = read_c0_memorymapid(); |
662 | write_c0_memorymapid(cpu_asid(0, vma->vm_mm)); |
663 | mtc0_tlbw_hazard(); |
664 | ginvt_va_mmid(page); |
665 | sync_ginv(); |
666 | write_c0_memorymapid(old_mmid); |
667 | instruction_hazard(); |
668 | htw_start(); |
669 | } else if ((atomic_read(v: &vma->vm_mm->mm_users) != 1) || |
670 | (current->mm != vma->vm_mm)) { |
671 | struct flush_tlb_data fd = { |
672 | .vma = vma, |
673 | .addr1 = page, |
674 | }; |
675 | |
676 | smp_on_other_tlbs(func: flush_tlb_page_ipi, info: &fd); |
677 | local_flush_tlb_page(vma, page); |
678 | } else { |
679 | unsigned int cpu; |
680 | |
681 | for_each_online_cpu(cpu) { |
682 | /* |
683 | * flush_cache_page() only does partial flushes, so |
684 | * invalidate ASID without it appearing to |
685 | * has_valid_asid() as if mm has been completely unused |
686 | * by that CPU. |
687 | */ |
688 | if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm)) |
689 | set_cpu_context(cpu, vma->vm_mm, 1); |
690 | } |
691 | local_flush_tlb_page(vma, page); |
692 | } |
693 | preempt_enable(); |
694 | } |
695 | |
696 | static void flush_tlb_one_ipi(void *info) |
697 | { |
698 | unsigned long vaddr = (unsigned long) info; |
699 | |
700 | local_flush_tlb_one(vaddr); |
701 | } |
702 | |
703 | void flush_tlb_one(unsigned long vaddr) |
704 | { |
705 | smp_on_each_tlb(func: flush_tlb_one_ipi, info: (void *) vaddr); |
706 | } |
707 | |
708 | EXPORT_SYMBOL(flush_tlb_page); |
709 | EXPORT_SYMBOL(flush_tlb_one); |
710 | |
711 | #ifdef CONFIG_HOTPLUG_CORE_SYNC_DEAD |
712 | void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) |
713 | { |
714 | if (mp_ops->cleanup_dead_cpu) |
715 | mp_ops->cleanup_dead_cpu(cpu); |
716 | } |
717 | #endif |
718 | |
719 | #ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST |
720 | |
721 | static void tick_broadcast_callee(void *info) |
722 | { |
723 | tick_receive_broadcast(); |
724 | } |
725 | |
726 | static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd) = |
727 | CSD_INIT(tick_broadcast_callee, NULL); |
728 | |
729 | void tick_broadcast(const struct cpumask *mask) |
730 | { |
731 | call_single_data_t *csd; |
732 | int cpu; |
733 | |
734 | for_each_cpu(cpu, mask) { |
735 | csd = &per_cpu(tick_broadcast_csd, cpu); |
736 | smp_call_function_single_async(cpu, csd); |
737 | } |
738 | } |
739 | |
740 | #endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */ |
741 | |