1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * Copyright (C) 2012 Regents of the University of California |
4 | * Copyright (C) 2017-2018 SiFive |
5 | * Copyright (C) 2020 Western Digital Corporation or its affiliates. |
6 | */ |
7 | |
8 | #define pr_fmt(fmt) "riscv-intc: " fmt |
9 | #include <linux/acpi.h> |
10 | #include <linux/atomic.h> |
11 | #include <linux/bits.h> |
12 | #include <linux/cpu.h> |
13 | #include <linux/irq.h> |
14 | #include <linux/irqchip.h> |
15 | #include <linux/irqdomain.h> |
16 | #include <linux/interrupt.h> |
17 | #include <linux/module.h> |
18 | #include <linux/of.h> |
19 | #include <linux/smp.h> |
20 | #include <linux/soc/andes/irq.h> |
21 | |
22 | #include <asm/hwcap.h> |
23 | |
24 | static struct irq_domain *intc_domain; |
25 | static unsigned int riscv_intc_nr_irqs __ro_after_init = BITS_PER_LONG; |
26 | static unsigned int riscv_intc_custom_base __ro_after_init = BITS_PER_LONG; |
27 | static unsigned int riscv_intc_custom_nr_irqs __ro_after_init; |
28 | |
29 | static asmlinkage void riscv_intc_irq(struct pt_regs *regs) |
30 | { |
31 | unsigned long cause = regs->cause & ~CAUSE_IRQ_FLAG; |
32 | |
33 | if (generic_handle_domain_irq(domain: intc_domain, hwirq: cause)) |
34 | pr_warn_ratelimited("Failed to handle interrupt (cause: %ld)\n" , cause); |
35 | } |
36 | |
37 | static asmlinkage void riscv_intc_aia_irq(struct pt_regs *regs) |
38 | { |
39 | unsigned long topi; |
40 | |
41 | while ((topi = csr_read(CSR_TOPI))) |
42 | generic_handle_domain_irq(domain: intc_domain, hwirq: topi >> TOPI_IID_SHIFT); |
43 | } |
44 | |
45 | /* |
46 | * On RISC-V systems local interrupts are masked or unmasked by writing |
47 | * the SIE (Supervisor Interrupt Enable) CSR. As CSRs can only be written |
48 | * on the local hart, these functions can only be called on the hart that |
49 | * corresponds to the IRQ chip. |
50 | */ |
51 | |
52 | static void riscv_intc_irq_mask(struct irq_data *d) |
53 | { |
54 | if (IS_ENABLED(CONFIG_32BIT) && d->hwirq >= BITS_PER_LONG) |
55 | csr_clear(CSR_IEH, BIT(d->hwirq - BITS_PER_LONG)); |
56 | else |
57 | csr_clear(CSR_IE, BIT(d->hwirq)); |
58 | } |
59 | |
60 | static void riscv_intc_irq_unmask(struct irq_data *d) |
61 | { |
62 | if (IS_ENABLED(CONFIG_32BIT) && d->hwirq >= BITS_PER_LONG) |
63 | csr_set(CSR_IEH, BIT(d->hwirq - BITS_PER_LONG)); |
64 | else |
65 | csr_set(CSR_IE, BIT(d->hwirq)); |
66 | } |
67 | |
68 | static void andes_intc_irq_mask(struct irq_data *d) |
69 | { |
70 | /* |
71 | * Andes specific S-mode local interrupt causes (hwirq) |
72 | * are defined as (256 + n) and controlled by n-th bit |
73 | * of SLIE. |
74 | */ |
75 | unsigned int mask = BIT(d->hwirq % BITS_PER_LONG); |
76 | |
77 | if (d->hwirq < ANDES_SLI_CAUSE_BASE) |
78 | csr_clear(CSR_IE, mask); |
79 | else |
80 | csr_clear(ANDES_CSR_SLIE, mask); |
81 | } |
82 | |
83 | static void andes_intc_irq_unmask(struct irq_data *d) |
84 | { |
85 | unsigned int mask = BIT(d->hwirq % BITS_PER_LONG); |
86 | |
87 | if (d->hwirq < ANDES_SLI_CAUSE_BASE) |
88 | csr_set(CSR_IE, mask); |
89 | else |
90 | csr_set(ANDES_CSR_SLIE, mask); |
91 | } |
92 | |
93 | static void riscv_intc_irq_eoi(struct irq_data *d) |
94 | { |
95 | /* |
96 | * The RISC-V INTC driver uses handle_percpu_devid_irq() flow |
97 | * for the per-HART local interrupts and child irqchip drivers |
98 | * (such as PLIC, SBI IPI, CLINT, APLIC, IMSIC, etc) implement |
99 | * chained handlers for the per-HART local interrupts. |
100 | * |
101 | * In the absence of irq_eoi(), the chained_irq_enter() and |
102 | * chained_irq_exit() functions (used by child irqchip drivers) |
103 | * will do unnecessary mask/unmask of per-HART local interrupts |
104 | * at the time of handling interrupts. To avoid this, we provide |
105 | * an empty irq_eoi() callback for RISC-V INTC irqchip. |
106 | */ |
107 | } |
108 | |
109 | static struct irq_chip riscv_intc_chip = { |
110 | .name = "RISC-V INTC" , |
111 | .irq_mask = riscv_intc_irq_mask, |
112 | .irq_unmask = riscv_intc_irq_unmask, |
113 | .irq_eoi = riscv_intc_irq_eoi, |
114 | }; |
115 | |
116 | static struct irq_chip andes_intc_chip = { |
117 | .name = "RISC-V INTC" , |
118 | .irq_mask = andes_intc_irq_mask, |
119 | .irq_unmask = andes_intc_irq_unmask, |
120 | .irq_eoi = riscv_intc_irq_eoi, |
121 | }; |
122 | |
123 | static int riscv_intc_domain_map(struct irq_domain *d, unsigned int irq, |
124 | irq_hw_number_t hwirq) |
125 | { |
126 | struct irq_chip *chip = d->host_data; |
127 | |
128 | irq_set_percpu_devid(irq); |
129 | irq_domain_set_info(domain: d, virq: irq, hwirq, chip, NULL, handler: handle_percpu_devid_irq, |
130 | NULL, NULL); |
131 | |
132 | return 0; |
133 | } |
134 | |
135 | static int riscv_intc_domain_alloc(struct irq_domain *domain, |
136 | unsigned int virq, unsigned int nr_irqs, |
137 | void *arg) |
138 | { |
139 | int i, ret; |
140 | irq_hw_number_t hwirq; |
141 | unsigned int type = IRQ_TYPE_NONE; |
142 | struct irq_fwspec *fwspec = arg; |
143 | |
144 | ret = irq_domain_translate_onecell(d: domain, fwspec, out_hwirq: &hwirq, out_type: &type); |
145 | if (ret) |
146 | return ret; |
147 | |
148 | /* |
149 | * Only allow hwirq for which we have corresponding standard or |
150 | * custom interrupt enable register. |
151 | */ |
152 | if (hwirq >= riscv_intc_nr_irqs && |
153 | (hwirq < riscv_intc_custom_base || |
154 | hwirq >= riscv_intc_custom_base + riscv_intc_custom_nr_irqs)) |
155 | return -EINVAL; |
156 | |
157 | for (i = 0; i < nr_irqs; i++) { |
158 | ret = riscv_intc_domain_map(d: domain, irq: virq + i, hwirq: hwirq + i); |
159 | if (ret) |
160 | return ret; |
161 | } |
162 | |
163 | return 0; |
164 | } |
165 | |
166 | static const struct irq_domain_ops riscv_intc_domain_ops = { |
167 | .map = riscv_intc_domain_map, |
168 | .xlate = irq_domain_xlate_onecell, |
169 | .alloc = riscv_intc_domain_alloc |
170 | }; |
171 | |
172 | static struct fwnode_handle *riscv_intc_hwnode(void) |
173 | { |
174 | return intc_domain->fwnode; |
175 | } |
176 | |
177 | static int __init riscv_intc_init_common(struct fwnode_handle *fn, struct irq_chip *chip) |
178 | { |
179 | int rc; |
180 | |
181 | intc_domain = irq_domain_create_tree(fwnode: fn, ops: &riscv_intc_domain_ops, host_data: chip); |
182 | if (!intc_domain) { |
183 | pr_err("unable to add IRQ domain\n" ); |
184 | return -ENXIO; |
185 | } |
186 | |
187 | if (riscv_isa_extension_available(NULL, SxAIA)) { |
188 | riscv_intc_nr_irqs = 64; |
189 | rc = set_handle_irq(&riscv_intc_aia_irq); |
190 | } else { |
191 | rc = set_handle_irq(&riscv_intc_irq); |
192 | } |
193 | if (rc) { |
194 | pr_err("failed to set irq handler\n" ); |
195 | return rc; |
196 | } |
197 | |
198 | riscv_set_intc_hwnode_fn(riscv_intc_hwnode); |
199 | |
200 | pr_info("%d local interrupts mapped%s\n" , |
201 | riscv_intc_nr_irqs, |
202 | riscv_isa_extension_available(NULL, SxAIA) ? " using AIA" : "" ); |
203 | if (riscv_intc_custom_nr_irqs) |
204 | pr_info("%d custom local interrupts mapped\n" , riscv_intc_custom_nr_irqs); |
205 | |
206 | return 0; |
207 | } |
208 | |
209 | static int __init riscv_intc_init(struct device_node *node, |
210 | struct device_node *parent) |
211 | { |
212 | struct irq_chip *chip = &riscv_intc_chip; |
213 | unsigned long hartid; |
214 | int rc; |
215 | |
216 | rc = riscv_of_parent_hartid(node, &hartid); |
217 | if (rc < 0) { |
218 | pr_warn("unable to find hart id for %pOF\n" , node); |
219 | return 0; |
220 | } |
221 | |
222 | /* |
223 | * The DT will have one INTC DT node under each CPU (or HART) |
224 | * DT node so riscv_intc_init() function will be called once |
225 | * for each INTC DT node. We only need to do INTC initialization |
226 | * for the INTC DT node belonging to boot CPU (or boot HART). |
227 | */ |
228 | if (riscv_hartid_to_cpuid(hartid) != smp_processor_id()) { |
229 | /* |
230 | * The INTC nodes of each CPU are suppliers for downstream |
231 | * interrupt controllers (such as PLIC, IMSIC and APLIC |
232 | * direct-mode) so we should mark an INTC node as initialized |
233 | * if we are not creating IRQ domain for it. |
234 | */ |
235 | fwnode_dev_initialized(of_fwnode_handle(node), initialized: true); |
236 | return 0; |
237 | } |
238 | |
239 | if (of_device_is_compatible(device: node, "andestech,cpu-intc" )) { |
240 | riscv_intc_custom_base = ANDES_SLI_CAUSE_BASE; |
241 | riscv_intc_custom_nr_irqs = ANDES_RV_IRQ_LAST; |
242 | chip = &andes_intc_chip; |
243 | } |
244 | |
245 | return riscv_intc_init_common(fn: of_node_to_fwnode(node), chip); |
246 | } |
247 | |
248 | IRQCHIP_DECLARE(riscv, "riscv,cpu-intc" , riscv_intc_init); |
249 | IRQCHIP_DECLARE(andes, "andestech,cpu-intc" , riscv_intc_init); |
250 | |
251 | #ifdef CONFIG_ACPI |
252 | |
253 | static int __init riscv_intc_acpi_init(union acpi_subtable_headers *, |
254 | const unsigned long end) |
255 | { |
256 | struct fwnode_handle *fn; |
257 | struct acpi_madt_rintc *rintc; |
258 | |
259 | rintc = (struct acpi_madt_rintc *)header; |
260 | |
261 | /* |
262 | * The ACPI MADT will have one INTC for each CPU (or HART) |
263 | * so riscv_intc_acpi_init() function will be called once |
264 | * for each INTC. We only do INTC initialization |
265 | * for the INTC belonging to the boot CPU (or boot HART). |
266 | */ |
267 | if (riscv_hartid_to_cpuid(rintc->hart_id) != smp_processor_id()) |
268 | return 0; |
269 | |
270 | fn = irq_domain_alloc_named_fwnode(name: "RISCV-INTC" ); |
271 | if (!fn) { |
272 | pr_err("unable to allocate INTC FW node\n" ); |
273 | return -ENOMEM; |
274 | } |
275 | |
276 | return riscv_intc_init_common(fn, chip: &riscv_intc_chip); |
277 | } |
278 | |
279 | IRQCHIP_ACPI_DECLARE(riscv_intc, ACPI_MADT_TYPE_RINTC, NULL, |
280 | ACPI_MADT_RINTC_VERSION_V1, riscv_intc_acpi_init); |
281 | #endif |
282 | |