1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * sun4m SMP support. |
4 | * |
5 | * Copyright (C) 1996 David S. Miller (davem@caip.rutgers.edu) |
6 | */ |
7 | |
8 | #include <linux/clockchips.h> |
9 | #include <linux/interrupt.h> |
10 | #include <linux/profile.h> |
11 | #include <linux/delay.h> |
12 | #include <linux/sched/mm.h> |
13 | #include <linux/cpu.h> |
14 | |
15 | #include <asm/cacheflush.h> |
16 | #include <asm/switch_to.h> |
17 | #include <asm/tlbflush.h> |
18 | #include <asm/timer.h> |
19 | #include <asm/oplib.h> |
20 | |
21 | #include "irq.h" |
22 | #include "kernel.h" |
23 | |
24 | #define IRQ_IPI_SINGLE 12 |
25 | #define IRQ_IPI_MASK 13 |
26 | #define IRQ_IPI_RESCHED 14 |
27 | #define IRQ_CROSS_CALL 15 |
28 | |
29 | static inline unsigned long |
30 | swap_ulong(volatile unsigned long *ptr, unsigned long val) |
31 | { |
32 | __asm__ __volatile__("swap [%1], %0\n\t" : |
33 | "=&r" (val), "=&r" (ptr) : |
34 | "0" (val), "1" (ptr)); |
35 | return val; |
36 | } |
37 | |
38 | void sun4m_cpu_pre_starting(void *arg) |
39 | { |
40 | } |
41 | |
42 | void sun4m_cpu_pre_online(void *arg) |
43 | { |
44 | int cpuid = hard_smp_processor_id(); |
45 | |
46 | /* Allow master to continue. The master will then give us the |
47 | * go-ahead by setting the smp_commenced_mask and will wait without |
48 | * timeouts until our setup is completed fully (signified by |
49 | * our bit being set in the cpu_online_mask). |
50 | */ |
51 | swap_ulong(ptr: &cpu_callin_map[cpuid], val: 1); |
52 | |
53 | /* XXX: What's up with all the flushes? */ |
54 | local_ops->cache_all(); |
55 | local_ops->tlb_all(); |
56 | |
57 | /* Fix idle thread fields. */ |
58 | __asm__ __volatile__("ld [%0], %%g6\n\t" |
59 | : : "r" (¤t_set[cpuid]) |
60 | : "memory" /* paranoid */); |
61 | |
62 | /* Attach to the address space of init_task. */ |
63 | mmgrab(mm: &init_mm); |
64 | current->active_mm = &init_mm; |
65 | |
66 | while (!cpumask_test_cpu(cpu: cpuid, cpumask: &smp_commenced_mask)) |
67 | mb(); |
68 | } |
69 | |
70 | /* |
71 | * Cycle through the processors asking the PROM to start each one. |
72 | */ |
73 | void __init smp4m_boot_cpus(void) |
74 | { |
75 | sun4m_unmask_profile_irq(); |
76 | local_ops->cache_all(); |
77 | } |
78 | |
79 | int smp4m_boot_one_cpu(int i, struct task_struct *idle) |
80 | { |
81 | unsigned long *entry = &sun4m_cpu_startup; |
82 | int timeout; |
83 | int cpu_node; |
84 | |
85 | cpu_find_by_mid(i, &cpu_node); |
86 | current_set[i] = task_thread_info(idle); |
87 | |
88 | /* See trampoline.S for details... */ |
89 | entry += ((i - 1) * 3); |
90 | |
91 | /* |
92 | * Initialize the contexts table |
93 | * Since the call to prom_startcpu() trashes the structure, |
94 | * we need to re-initialize it for each cpu |
95 | */ |
96 | smp_penguin_ctable.which_io = 0; |
97 | smp_penguin_ctable.phys_addr = (unsigned int) srmmu_ctx_table_phys; |
98 | smp_penguin_ctable.reg_size = 0; |
99 | |
100 | /* whirrr, whirrr, whirrrrrrrrr... */ |
101 | printk(KERN_INFO "Starting CPU %d at %p\n" , i, entry); |
102 | local_ops->cache_all(); |
103 | prom_startcpu(cpu_node, &smp_penguin_ctable, 0, (char *)entry); |
104 | |
105 | /* wheee... it's going... */ |
106 | for (timeout = 0; timeout < 10000; timeout++) { |
107 | if (cpu_callin_map[i]) |
108 | break; |
109 | udelay(200); |
110 | } |
111 | |
112 | if (!(cpu_callin_map[i])) { |
113 | printk(KERN_ERR "Processor %d is stuck.\n" , i); |
114 | return -ENODEV; |
115 | } |
116 | |
117 | local_ops->cache_all(); |
118 | return 0; |
119 | } |
120 | |
121 | void __init smp4m_smp_done(void) |
122 | { |
123 | int i, first; |
124 | int *prev; |
125 | |
126 | /* setup cpu list for irq rotation */ |
127 | first = 0; |
128 | prev = &first; |
129 | for_each_online_cpu(i) { |
130 | *prev = i; |
131 | prev = &cpu_data(i).next; |
132 | } |
133 | *prev = first; |
134 | local_ops->cache_all(); |
135 | |
136 | /* Ok, they are spinning and ready to go. */ |
137 | } |
138 | |
139 | static void sun4m_send_ipi(int cpu, int level) |
140 | { |
141 | sbus_writel(SUN4M_SOFT_INT(level), &sun4m_irq_percpu[cpu]->set); |
142 | } |
143 | |
144 | static void sun4m_ipi_resched(int cpu) |
145 | { |
146 | sun4m_send_ipi(cpu, IRQ_IPI_RESCHED); |
147 | } |
148 | |
149 | static void sun4m_ipi_single(int cpu) |
150 | { |
151 | sun4m_send_ipi(cpu, IRQ_IPI_SINGLE); |
152 | } |
153 | |
154 | static void sun4m_ipi_mask_one(int cpu) |
155 | { |
156 | sun4m_send_ipi(cpu, IRQ_IPI_MASK); |
157 | } |
158 | |
159 | static struct smp_funcall { |
160 | void *func; |
161 | unsigned long arg1; |
162 | unsigned long arg2; |
163 | unsigned long arg3; |
164 | unsigned long arg4; |
165 | unsigned long arg5; |
166 | unsigned long processors_in[SUN4M_NCPUS]; /* Set when ipi entered. */ |
167 | unsigned long processors_out[SUN4M_NCPUS]; /* Set when ipi exited. */ |
168 | } ccall_info; |
169 | |
170 | static DEFINE_SPINLOCK(cross_call_lock); |
171 | |
172 | /* Cross calls must be serialized, at least currently. */ |
173 | static void sun4m_cross_call(void *func, cpumask_t mask, unsigned long arg1, |
174 | unsigned long arg2, unsigned long arg3, |
175 | unsigned long arg4) |
176 | { |
177 | register int ncpus = SUN4M_NCPUS; |
178 | unsigned long flags; |
179 | |
180 | spin_lock_irqsave(&cross_call_lock, flags); |
181 | |
182 | /* Init function glue. */ |
183 | ccall_info.func = func; |
184 | ccall_info.arg1 = arg1; |
185 | ccall_info.arg2 = arg2; |
186 | ccall_info.arg3 = arg3; |
187 | ccall_info.arg4 = arg4; |
188 | ccall_info.arg5 = 0; |
189 | |
190 | /* Init receive/complete mapping, plus fire the IPI's off. */ |
191 | { |
192 | register int i; |
193 | |
194 | cpumask_clear_cpu(smp_processor_id(), dstp: &mask); |
195 | cpumask_and(dstp: &mask, cpu_online_mask, src2p: &mask); |
196 | for (i = 0; i < ncpus; i++) { |
197 | if (cpumask_test_cpu(cpu: i, cpumask: &mask)) { |
198 | ccall_info.processors_in[i] = 0; |
199 | ccall_info.processors_out[i] = 0; |
200 | sun4m_send_ipi(cpu: i, IRQ_CROSS_CALL); |
201 | } else { |
202 | ccall_info.processors_in[i] = 1; |
203 | ccall_info.processors_out[i] = 1; |
204 | } |
205 | } |
206 | } |
207 | |
208 | { |
209 | register int i; |
210 | |
211 | i = 0; |
212 | do { |
213 | if (!cpumask_test_cpu(cpu: i, cpumask: &mask)) |
214 | continue; |
215 | while (!ccall_info.processors_in[i]) |
216 | barrier(); |
217 | } while (++i < ncpus); |
218 | |
219 | i = 0; |
220 | do { |
221 | if (!cpumask_test_cpu(cpu: i, cpumask: &mask)) |
222 | continue; |
223 | while (!ccall_info.processors_out[i]) |
224 | barrier(); |
225 | } while (++i < ncpus); |
226 | } |
227 | spin_unlock_irqrestore(lock: &cross_call_lock, flags); |
228 | } |
229 | |
230 | /* Running cross calls. */ |
231 | void smp4m_cross_call_irq(void) |
232 | { |
233 | void (*func)(unsigned long, unsigned long, unsigned long, unsigned long, |
234 | unsigned long) = ccall_info.func; |
235 | int i = smp_processor_id(); |
236 | |
237 | ccall_info.processors_in[i] = 1; |
238 | func(ccall_info.arg1, ccall_info.arg2, ccall_info.arg3, ccall_info.arg4, |
239 | ccall_info.arg5); |
240 | ccall_info.processors_out[i] = 1; |
241 | } |
242 | |
243 | void smp4m_percpu_timer_interrupt(struct pt_regs *regs) |
244 | { |
245 | struct pt_regs *old_regs; |
246 | struct clock_event_device *ce; |
247 | int cpu = smp_processor_id(); |
248 | |
249 | old_regs = set_irq_regs(regs); |
250 | |
251 | ce = &per_cpu(sparc32_clockevent, cpu); |
252 | |
253 | if (clockevent_state_periodic(dev: ce)) |
254 | sun4m_clear_profile_irq(cpu); |
255 | else |
256 | sparc_config.load_profile_irq(cpu, 0); /* Is this needless? */ |
257 | |
258 | irq_enter(); |
259 | ce->event_handler(ce); |
260 | irq_exit(); |
261 | |
262 | set_irq_regs(old_regs); |
263 | } |
264 | |
265 | static const struct sparc32_ipi_ops sun4m_ipi_ops = { |
266 | .cross_call = sun4m_cross_call, |
267 | .resched = sun4m_ipi_resched, |
268 | .single = sun4m_ipi_single, |
269 | .mask_one = sun4m_ipi_mask_one, |
270 | }; |
271 | |
272 | void __init sun4m_init_smp(void) |
273 | { |
274 | sparc32_ipi_ops = &sun4m_ipi_ops; |
275 | } |
276 | |