1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 2012 Regents of the University of California |
4 | * Copyright (C) 2017 SiFive |
5 | * Copyright (C) 2018 Christoph Hellwig |
6 | */ |
7 | |
8 | #include <linux/interrupt.h> |
9 | #include <linux/irqchip.h> |
10 | #include <linux/irqdomain.h> |
11 | #include <linux/module.h> |
12 | #include <linux/scs.h> |
13 | #include <linux/seq_file.h> |
14 | #include <asm/sbi.h> |
15 | #include <asm/smp.h> |
16 | #include <asm/softirq_stack.h> |
17 | #include <asm/stacktrace.h> |
18 | |
19 | static struct fwnode_handle *(*__get_intc_node)(void); |
20 | |
21 | void riscv_set_intc_hwnode_fn(struct fwnode_handle *(*fn)(void)) |
22 | { |
23 | __get_intc_node = fn; |
24 | } |
25 | |
26 | struct fwnode_handle *riscv_get_intc_hwnode(void) |
27 | { |
28 | if (__get_intc_node) |
29 | return __get_intc_node(); |
30 | |
31 | return NULL; |
32 | } |
33 | EXPORT_SYMBOL_GPL(riscv_get_intc_hwnode); |
34 | |
35 | #ifdef CONFIG_IRQ_STACKS |
36 | #include <asm/irq_stack.h> |
37 | |
38 | DECLARE_PER_CPU(ulong *, irq_shadow_call_stack_ptr); |
39 | |
40 | #ifdef CONFIG_SHADOW_CALL_STACK |
41 | DEFINE_PER_CPU(ulong *, irq_shadow_call_stack_ptr); |
42 | #endif |
43 | |
44 | static void init_irq_scs(void) |
45 | { |
46 | int cpu; |
47 | |
48 | if (!scs_is_enabled()) |
49 | return; |
50 | |
51 | for_each_possible_cpu(cpu) |
52 | per_cpu(irq_shadow_call_stack_ptr, cpu) = |
53 | scs_alloc(cpu_to_node(cpu)); |
54 | } |
55 | |
56 | DEFINE_PER_CPU(ulong *, irq_stack_ptr); |
57 | |
58 | #ifdef CONFIG_VMAP_STACK |
59 | static void init_irq_stacks(void) |
60 | { |
61 | int cpu; |
62 | ulong *p; |
63 | |
64 | for_each_possible_cpu(cpu) { |
65 | p = arch_alloc_vmap_stack(IRQ_STACK_SIZE, cpu_to_node(cpu)); |
66 | per_cpu(irq_stack_ptr, cpu) = p; |
67 | } |
68 | } |
69 | #else |
70 | /* irq stack only needs to be 16 byte aligned - not IRQ_STACK_SIZE aligned. */ |
71 | DEFINE_PER_CPU_ALIGNED(ulong [IRQ_STACK_SIZE/sizeof(ulong)], irq_stack); |
72 | |
73 | static void init_irq_stacks(void) |
74 | { |
75 | int cpu; |
76 | |
77 | for_each_possible_cpu(cpu) |
78 | per_cpu(irq_stack_ptr, cpu) = per_cpu(irq_stack, cpu); |
79 | } |
80 | #endif /* CONFIG_VMAP_STACK */ |
81 | |
82 | #ifdef CONFIG_SOFTIRQ_ON_OWN_STACK |
83 | static void ___do_softirq(struct pt_regs *regs) |
84 | { |
85 | __do_softirq(); |
86 | } |
87 | |
88 | void do_softirq_own_stack(void) |
89 | { |
90 | if (on_thread_stack()) |
91 | call_on_irq_stack(NULL, ___do_softirq); |
92 | else |
93 | __do_softirq(); |
94 | } |
95 | #endif /* CONFIG_SOFTIRQ_ON_OWN_STACK */ |
96 | |
97 | #else |
98 | static void init_irq_scs(void) {} |
99 | static void init_irq_stacks(void) {} |
100 | #endif /* CONFIG_IRQ_STACKS */ |
101 | |
102 | int arch_show_interrupts(struct seq_file *p, int prec) |
103 | { |
104 | show_ipi_stats(p, prec); |
105 | return 0; |
106 | } |
107 | |
108 | void __init init_IRQ(void) |
109 | { |
110 | init_irq_scs(); |
111 | init_irq_stacks(); |
112 | irqchip_init(); |
113 | if (!handle_arch_irq) |
114 | panic(fmt: "No interrupt controller found." ); |
115 | sbi_ipi_init(); |
116 | } |
117 | |