1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Derived from arch/i386/kernel/irq.c |
4 | * Copyright (C) 1992 Linus Torvalds |
5 | * Adapted from arch/i386 by Gary Thomas |
6 | * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) |
7 | * Updated and modified by Cort Dougan <cort@fsmlabs.com> |
8 | * Copyright (C) 1996-2001 Cort Dougan |
9 | * Adapted for Power Macintosh by Paul Mackerras |
10 | * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) |
11 | * |
12 | * This file contains the code used by various IRQ handling routines: |
13 | * asking for different IRQ's should be done through these routines |
14 | * instead of just grabbing them. Thus setups with different IRQ numbers |
15 | * shouldn't result in any weird surprises, and installing new handlers |
16 | * should be easier. |
17 | * |
18 | * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the |
19 | * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit |
20 | * mask register (of which only 16 are defined), hence the weird shifting |
21 | * and complement of the cached_irq_mask. I want to be able to stuff |
22 | * this right into the SIU SMASK register. |
23 | * Many of the prep/chrp functions are conditional compiled on CONFIG_PPC_8xx |
24 | * to reduce code space and undefined function references. |
25 | */ |
26 | |
27 | #undef DEBUG |
28 | |
29 | #include <linux/export.h> |
30 | #include <linux/threads.h> |
31 | #include <linux/kernel_stat.h> |
32 | #include <linux/signal.h> |
33 | #include <linux/sched.h> |
34 | #include <linux/ptrace.h> |
35 | #include <linux/ioport.h> |
36 | #include <linux/interrupt.h> |
37 | #include <linux/timex.h> |
38 | #include <linux/init.h> |
39 | #include <linux/slab.h> |
40 | #include <linux/delay.h> |
41 | #include <linux/irq.h> |
42 | #include <linux/seq_file.h> |
43 | #include <linux/cpumask.h> |
44 | #include <linux/profile.h> |
45 | #include <linux/bitops.h> |
46 | #include <linux/list.h> |
47 | #include <linux/radix-tree.h> |
48 | #include <linux/mutex.h> |
49 | #include <linux/pci.h> |
50 | #include <linux/debugfs.h> |
51 | #include <linux/of.h> |
52 | #include <linux/of_irq.h> |
53 | #include <linux/vmalloc.h> |
54 | #include <linux/pgtable.h> |
55 | #include <linux/static_call.h> |
56 | |
57 | #include <linux/uaccess.h> |
58 | #include <asm/interrupt.h> |
59 | #include <asm/io.h> |
60 | #include <asm/irq.h> |
61 | #include <asm/cache.h> |
62 | #include <asm/ptrace.h> |
63 | #include <asm/machdep.h> |
64 | #include <asm/udbg.h> |
65 | #include <asm/smp.h> |
66 | #include <asm/hw_irq.h> |
67 | #include <asm/softirq_stack.h> |
68 | #include <asm/ppc_asm.h> |
69 | |
70 | #define CREATE_TRACE_POINTS |
71 | #include <asm/trace.h> |
72 | #include <asm/cpu_has_feature.h> |
73 | |
74 | DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); |
75 | EXPORT_PER_CPU_SYMBOL(irq_stat); |
76 | |
77 | #ifdef CONFIG_PPC32 |
78 | atomic_t ppc_n_lost_interrupts; |
79 | |
80 | #ifdef CONFIG_TAU_INT |
81 | extern int tau_initialized; |
82 | u32 tau_interrupts(unsigned long cpu); |
83 | #endif |
84 | #endif /* CONFIG_PPC32 */ |
85 | |
86 | int arch_show_interrupts(struct seq_file *p, int prec) |
87 | { |
88 | int j; |
89 | |
90 | #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT) |
91 | if (tau_initialized) { |
92 | seq_printf(p, "%*s: " , prec, "TAU" ); |
93 | for_each_online_cpu(j) |
94 | seq_printf(p, "%10u " , tau_interrupts(j)); |
95 | seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n" ); |
96 | } |
97 | #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */ |
98 | |
99 | seq_printf(m: p, fmt: "%*s: " , prec, "LOC" ); |
100 | for_each_online_cpu(j) |
101 | seq_printf(m: p, fmt: "%10u " , per_cpu(irq_stat, j).timer_irqs_event); |
102 | seq_printf(m: p, fmt: " Local timer interrupts for timer event device\n" ); |
103 | |
104 | seq_printf(m: p, fmt: "%*s: " , prec, "BCT" ); |
105 | for_each_online_cpu(j) |
106 | seq_printf(m: p, fmt: "%10u " , per_cpu(irq_stat, j).broadcast_irqs_event); |
107 | seq_printf(m: p, fmt: " Broadcast timer interrupts for timer event device\n" ); |
108 | |
109 | seq_printf(m: p, fmt: "%*s: " , prec, "LOC" ); |
110 | for_each_online_cpu(j) |
111 | seq_printf(m: p, fmt: "%10u " , per_cpu(irq_stat, j).timer_irqs_others); |
112 | seq_printf(m: p, fmt: " Local timer interrupts for others\n" ); |
113 | |
114 | seq_printf(m: p, fmt: "%*s: " , prec, "SPU" ); |
115 | for_each_online_cpu(j) |
116 | seq_printf(m: p, fmt: "%10u " , per_cpu(irq_stat, j).spurious_irqs); |
117 | seq_printf(m: p, fmt: " Spurious interrupts\n" ); |
118 | |
119 | seq_printf(m: p, fmt: "%*s: " , prec, "PMI" ); |
120 | for_each_online_cpu(j) |
121 | seq_printf(m: p, fmt: "%10u " , per_cpu(irq_stat, j).pmu_irqs); |
122 | seq_printf(m: p, fmt: " Performance monitoring interrupts\n" ); |
123 | |
124 | seq_printf(m: p, fmt: "%*s: " , prec, "MCE" ); |
125 | for_each_online_cpu(j) |
126 | seq_printf(m: p, fmt: "%10u " , per_cpu(irq_stat, j).mce_exceptions); |
127 | seq_printf(m: p, fmt: " Machine check exceptions\n" ); |
128 | |
129 | #ifdef CONFIG_PPC_BOOK3S_64 |
130 | if (cpu_has_feature(CPU_FTR_HVMODE)) { |
131 | seq_printf(p, "%*s: " , prec, "HMI" ); |
132 | for_each_online_cpu(j) |
133 | seq_printf(p, "%10u " , paca_ptrs[j]->hmi_irqs); |
134 | seq_printf(p, " Hypervisor Maintenance Interrupts\n" ); |
135 | } |
136 | #endif |
137 | |
138 | seq_printf(m: p, fmt: "%*s: " , prec, "NMI" ); |
139 | for_each_online_cpu(j) |
140 | seq_printf(m: p, fmt: "%10u " , per_cpu(irq_stat, j).sreset_irqs); |
141 | seq_printf(m: p, fmt: " System Reset interrupts\n" ); |
142 | |
143 | #ifdef CONFIG_PPC_WATCHDOG |
144 | seq_printf(p, "%*s: " , prec, "WDG" ); |
145 | for_each_online_cpu(j) |
146 | seq_printf(p, "%10u " , per_cpu(irq_stat, j).soft_nmi_irqs); |
147 | seq_printf(p, " Watchdog soft-NMI interrupts\n" ); |
148 | #endif |
149 | |
150 | #ifdef CONFIG_PPC_DOORBELL |
151 | if (cpu_has_feature(CPU_FTR_DBELL)) { |
152 | seq_printf(p, "%*s: " , prec, "DBL" ); |
153 | for_each_online_cpu(j) |
154 | seq_printf(p, "%10u " , per_cpu(irq_stat, j).doorbell_irqs); |
155 | seq_printf(p, " Doorbell interrupts\n" ); |
156 | } |
157 | #endif |
158 | |
159 | return 0; |
160 | } |
161 | |
162 | /* |
163 | * /proc/stat helpers |
164 | */ |
165 | u64 arch_irq_stat_cpu(unsigned int cpu) |
166 | { |
167 | u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event; |
168 | |
169 | sum += per_cpu(irq_stat, cpu).broadcast_irqs_event; |
170 | sum += per_cpu(irq_stat, cpu).pmu_irqs; |
171 | sum += per_cpu(irq_stat, cpu).mce_exceptions; |
172 | sum += per_cpu(irq_stat, cpu).spurious_irqs; |
173 | sum += per_cpu(irq_stat, cpu).timer_irqs_others; |
174 | #ifdef CONFIG_PPC_BOOK3S_64 |
175 | sum += paca_ptrs[cpu]->hmi_irqs; |
176 | #endif |
177 | sum += per_cpu(irq_stat, cpu).sreset_irqs; |
178 | #ifdef CONFIG_PPC_WATCHDOG |
179 | sum += per_cpu(irq_stat, cpu).soft_nmi_irqs; |
180 | #endif |
181 | #ifdef CONFIG_PPC_DOORBELL |
182 | sum += per_cpu(irq_stat, cpu).doorbell_irqs; |
183 | #endif |
184 | |
185 | return sum; |
186 | } |
187 | |
188 | static inline void check_stack_overflow(unsigned long sp) |
189 | { |
190 | if (!IS_ENABLED(CONFIG_DEBUG_STACKOVERFLOW)) |
191 | return; |
192 | |
193 | sp &= THREAD_SIZE - 1; |
194 | |
195 | /* check for stack overflow: is there less than 1/4th free? */ |
196 | if (unlikely(sp < THREAD_SIZE / 4)) { |
197 | pr_err("do_IRQ: stack overflow: %ld\n" , sp); |
198 | dump_stack(); |
199 | } |
200 | } |
201 | |
202 | #ifdef CONFIG_SOFTIRQ_ON_OWN_STACK |
203 | static __always_inline void call_do_softirq(const void *sp) |
204 | { |
205 | /* Temporarily switch r1 to sp, call __do_softirq() then restore r1. */ |
206 | asm volatile ( |
207 | PPC_STLU " %%r1, %[offset](%[sp]) ;" |
208 | "mr %%r1, %[sp] ;" |
209 | #ifdef CONFIG_PPC_KERNEL_PCREL |
210 | "bl %[callee]@notoc ;" |
211 | #else |
212 | "bl %[callee] ;" |
213 | #endif |
214 | PPC_LL " %%r1, 0(%%r1) ;" |
215 | : // Outputs |
216 | : // Inputs |
217 | [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_MIN_SIZE), |
218 | [callee] "i" (__do_softirq) |
219 | : // Clobbers |
220 | "lr" , "xer" , "ctr" , "memory" , "cr0" , "cr1" , "cr5" , "cr6" , |
221 | "cr7" , "r0" , "r3" , "r4" , "r5" , "r6" , "r7" , "r8" , "r9" , "r10" , |
222 | "r11" , "r12" |
223 | ); |
224 | } |
225 | #endif |
226 | |
227 | DEFINE_STATIC_CALL_RET0(ppc_get_irq, *ppc_md.get_irq); |
228 | |
229 | static void __do_irq(struct pt_regs *regs, unsigned long oldsp) |
230 | { |
231 | unsigned int irq; |
232 | |
233 | trace_irq_entry(regs); |
234 | |
235 | check_stack_overflow(sp: oldsp); |
236 | |
237 | /* |
238 | * Query the platform PIC for the interrupt & ack it. |
239 | * |
240 | * This will typically lower the interrupt line to the CPU |
241 | */ |
242 | irq = static_call(ppc_get_irq)(); |
243 | |
244 | /* We can hard enable interrupts now to allow perf interrupts */ |
245 | if (should_hard_irq_enable(regs)) |
246 | do_hard_irq_enable(); |
247 | |
248 | /* And finally process it */ |
249 | if (unlikely(!irq)) |
250 | __this_cpu_inc(irq_stat.spurious_irqs); |
251 | else |
252 | generic_handle_irq(irq); |
253 | |
254 | trace_irq_exit(regs); |
255 | } |
256 | |
257 | static __always_inline void call_do_irq(struct pt_regs *regs, void *sp) |
258 | { |
259 | register unsigned long r3 asm("r3" ) = (unsigned long)regs; |
260 | |
261 | /* Temporarily switch r1 to sp, call __do_irq() then restore r1. */ |
262 | asm volatile ( |
263 | PPC_STLU " %%r1, %[offset](%[sp]) ;" |
264 | "mr %%r4, %%r1 ;" |
265 | "mr %%r1, %[sp] ;" |
266 | #ifdef CONFIG_PPC_KERNEL_PCREL |
267 | "bl %[callee]@notoc ;" |
268 | #else |
269 | "bl %[callee] ;" |
270 | #endif |
271 | PPC_LL " %%r1, 0(%%r1) ;" |
272 | : // Outputs |
273 | "+r" (r3) |
274 | : // Inputs |
275 | [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_MIN_SIZE), |
276 | [callee] "i" (__do_irq) |
277 | : // Clobbers |
278 | "lr" , "xer" , "ctr" , "memory" , "cr0" , "cr1" , "cr5" , "cr6" , |
279 | "cr7" , "r0" , "r4" , "r5" , "r6" , "r7" , "r8" , "r9" , "r10" , |
280 | "r11" , "r12" |
281 | ); |
282 | } |
283 | |
284 | void __do_IRQ(struct pt_regs *regs) |
285 | { |
286 | struct pt_regs *old_regs = set_irq_regs(regs); |
287 | void *cursp, *irqsp; |
288 | |
289 | /* Switch to the irq stack to handle this */ |
290 | cursp = (void *)(current_stack_pointer & ~(THREAD_SIZE - 1)); |
291 | irqsp = hardirq_ctx[raw_smp_processor_id()]; |
292 | |
293 | /* Already there ? If not switch stack and call */ |
294 | if (unlikely(cursp == irqsp)) |
295 | __do_irq(regs, oldsp: current_stack_pointer); |
296 | else |
297 | call_do_irq(regs, sp: irqsp); |
298 | |
299 | set_irq_regs(old_regs); |
300 | } |
301 | |
302 | DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ) |
303 | { |
304 | __do_IRQ(regs); |
305 | } |
306 | |
307 | static void *__init alloc_vm_stack(void) |
308 | { |
309 | return __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP, |
310 | NUMA_NO_NODE, caller: (void *)_RET_IP_); |
311 | } |
312 | |
313 | static void __init vmap_irqstack_init(void) |
314 | { |
315 | int i; |
316 | |
317 | for_each_possible_cpu(i) { |
318 | softirq_ctx[i] = alloc_vm_stack(); |
319 | hardirq_ctx[i] = alloc_vm_stack(); |
320 | } |
321 | } |
322 | |
323 | |
324 | void __init init_IRQ(void) |
325 | { |
326 | if (IS_ENABLED(CONFIG_VMAP_STACK)) |
327 | vmap_irqstack_init(); |
328 | |
329 | if (ppc_md.init_IRQ) |
330 | ppc_md.init_IRQ(); |
331 | |
332 | if (!WARN_ON(!ppc_md.get_irq)) |
333 | static_call_update(ppc_get_irq, ppc_md.get_irq); |
334 | } |
335 | |
336 | #ifdef CONFIG_BOOKE_OR_40x |
337 | void *critirq_ctx[NR_CPUS] __read_mostly; |
338 | void *dbgirq_ctx[NR_CPUS] __read_mostly; |
339 | void *mcheckirq_ctx[NR_CPUS] __read_mostly; |
340 | #endif |
341 | |
342 | void *softirq_ctx[NR_CPUS] __read_mostly; |
343 | void *hardirq_ctx[NR_CPUS] __read_mostly; |
344 | |
345 | #ifdef CONFIG_SOFTIRQ_ON_OWN_STACK |
346 | void do_softirq_own_stack(void) |
347 | { |
348 | call_do_softirq(softirq_ctx[smp_processor_id()]); |
349 | } |
350 | #endif |
351 | |
352 | irq_hw_number_t virq_to_hw(unsigned int virq) |
353 | { |
354 | struct irq_data *irq_data = irq_get_irq_data(irq: virq); |
355 | return WARN_ON(!irq_data) ? 0 : irq_data->hwirq; |
356 | } |
357 | EXPORT_SYMBOL_GPL(virq_to_hw); |
358 | |
359 | #ifdef CONFIG_SMP |
360 | int irq_choose_cpu(const struct cpumask *mask) |
361 | { |
362 | int cpuid; |
363 | |
364 | if (cpumask_equal(src1p: mask, cpu_online_mask)) { |
365 | static int irq_rover; |
366 | static DEFINE_RAW_SPINLOCK(irq_rover_lock); |
367 | unsigned long flags; |
368 | |
369 | /* Round-robin distribution... */ |
370 | do_round_robin: |
371 | raw_spin_lock_irqsave(&irq_rover_lock, flags); |
372 | |
373 | irq_rover = cpumask_next(n: irq_rover, cpu_online_mask); |
374 | if (irq_rover >= nr_cpu_ids) |
375 | irq_rover = cpumask_first(cpu_online_mask); |
376 | |
377 | cpuid = irq_rover; |
378 | |
379 | raw_spin_unlock_irqrestore(&irq_rover_lock, flags); |
380 | } else { |
381 | cpuid = cpumask_first_and(srcp1: mask, cpu_online_mask); |
382 | if (cpuid >= nr_cpu_ids) |
383 | goto do_round_robin; |
384 | } |
385 | |
386 | return get_hard_smp_processor_id(cpuid); |
387 | } |
388 | #else |
389 | int irq_choose_cpu(const struct cpumask *mask) |
390 | { |
391 | return hard_smp_processor_id(); |
392 | } |
393 | #endif |
394 | |