1 | // SPDX-License-Identifier: GPL-2.0 |
---|---|
2 | /* |
3 | * Copyright (C) 2017 SiFive |
4 | * Copyright (C) 2018 Christoph Hellwig |
5 | */ |
6 | #define pr_fmt(fmt) "riscv-plic: " fmt |
7 | #include <linux/acpi.h> |
8 | #include <linux/cpu.h> |
9 | #include <linux/interrupt.h> |
10 | #include <linux/io.h> |
11 | #include <linux/irq.h> |
12 | #include <linux/irqchip.h> |
13 | #include <linux/irqchip/chained_irq.h> |
14 | #include <linux/irqdomain.h> |
15 | #include <linux/module.h> |
16 | #include <linux/of.h> |
17 | #include <linux/of_address.h> |
18 | #include <linux/of_irq.h> |
19 | #include <linux/platform_device.h> |
20 | #include <linux/spinlock.h> |
21 | #include <linux/syscore_ops.h> |
22 | #include <asm/smp.h> |
23 | |
24 | /* |
25 | * This driver implements a version of the RISC-V PLIC with the actual layout |
26 | * specified in chapter 8 of the SiFive U5 Coreplex Series Manual: |
27 | * |
28 | * https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf |
29 | * |
30 | * The largest number supported by devices marked as 'sifive,plic-1.0.0', is |
31 | * 1024, of which device 0 is defined as non-existent by the RISC-V Privileged |
32 | * Spec. |
33 | */ |
34 | |
35 | #define MAX_DEVICES 1024 |
36 | #define MAX_CONTEXTS 15872 |
37 | |
38 | /* |
39 | * Each interrupt source has a priority register associated with it. |
40 | * We always hardwire it to one in Linux. |
41 | */ |
42 | #define PRIORITY_BASE 0 |
43 | #define PRIORITY_PER_ID 4 |
44 | |
45 | /* |
46 | * Each hart context has a vector of interrupt enable bits associated with it. |
47 | * There's one bit for each interrupt source. |
48 | */ |
49 | #define CONTEXT_ENABLE_BASE 0x2000 |
50 | #define CONTEXT_ENABLE_SIZE 0x80 |
51 | |
52 | /* |
53 | * Each hart context has a set of control registers associated with it. Right |
54 | * now there's only two: a source priority threshold over which the hart will |
55 | * take an interrupt, and a register to claim interrupts. |
56 | */ |
57 | #define CONTEXT_BASE 0x200000 |
58 | #define CONTEXT_SIZE 0x1000 |
59 | #define CONTEXT_THRESHOLD 0x00 |
60 | #define CONTEXT_CLAIM 0x04 |
61 | |
62 | #define PLIC_DISABLE_THRESHOLD 0x7 |
63 | #define PLIC_ENABLE_THRESHOLD 0 |
64 | |
65 | #define PLIC_QUIRK_EDGE_INTERRUPT 0 |
66 | |
67 | struct plic_priv { |
68 | struct fwnode_handle *fwnode; |
69 | struct cpumask lmask; |
70 | struct irq_domain *irqdomain; |
71 | void __iomem *regs; |
72 | unsigned long plic_quirks; |
73 | unsigned int nr_irqs; |
74 | unsigned long *prio_save; |
75 | u32 gsi_base; |
76 | int acpi_plic_id; |
77 | }; |
78 | |
79 | struct plic_handler { |
80 | bool present; |
81 | void __iomem *hart_base; |
82 | /* |
83 | * Protect mask operations on the registers given that we can't |
84 | * assume atomic memory operations work on them. |
85 | */ |
86 | raw_spinlock_t enable_lock; |
87 | void __iomem *enable_base; |
88 | u32 *enable_save; |
89 | struct plic_priv *priv; |
90 | }; |
91 | static int plic_parent_irq __ro_after_init; |
92 | static bool plic_global_setup_done __ro_after_init; |
93 | static DEFINE_PER_CPU(struct plic_handler, plic_handlers); |
94 | |
95 | static int plic_irq_set_type(struct irq_data *d, unsigned int type); |
96 | |
97 | static void __plic_toggle(void __iomem *enable_base, int hwirq, int enable) |
98 | { |
99 | u32 __iomem *reg = enable_base + (hwirq / 32) * sizeof(u32); |
100 | u32 hwirq_mask = 1 << (hwirq % 32); |
101 | |
102 | if (enable) |
103 | writel(readl(addr: reg) | hwirq_mask, addr: reg); |
104 | else |
105 | writel(readl(addr: reg) & ~hwirq_mask, addr: reg); |
106 | } |
107 | |
108 | static void plic_toggle(struct plic_handler *handler, int hwirq, int enable) |
109 | { |
110 | unsigned long flags; |
111 | |
112 | raw_spin_lock_irqsave(&handler->enable_lock, flags); |
113 | __plic_toggle(enable_base: handler->enable_base, hwirq, enable); |
114 | raw_spin_unlock_irqrestore(&handler->enable_lock, flags); |
115 | } |
116 | |
117 | static inline void plic_irq_toggle(const struct cpumask *mask, |
118 | struct irq_data *d, int enable) |
119 | { |
120 | int cpu; |
121 | |
122 | for_each_cpu(cpu, mask) { |
123 | struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); |
124 | |
125 | plic_toggle(handler, hwirq: d->hwirq, enable); |
126 | } |
127 | } |
128 | |
129 | static void plic_irq_unmask(struct irq_data *d) |
130 | { |
131 | struct plic_priv *priv = irq_data_get_irq_chip_data(d); |
132 | |
133 | writel(val: 1, addr: priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); |
134 | } |
135 | |
136 | static void plic_irq_mask(struct irq_data *d) |
137 | { |
138 | struct plic_priv *priv = irq_data_get_irq_chip_data(d); |
139 | |
140 | writel(val: 0, addr: priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); |
141 | } |
142 | |
143 | static void plic_irq_enable(struct irq_data *d) |
144 | { |
145 | plic_irq_toggle(mask: irq_data_get_effective_affinity_mask(d), d, enable: 1); |
146 | plic_irq_unmask(d); |
147 | } |
148 | |
149 | static void plic_irq_disable(struct irq_data *d) |
150 | { |
151 | plic_irq_toggle(mask: irq_data_get_effective_affinity_mask(d), d, enable: 0); |
152 | } |
153 | |
154 | static void plic_irq_eoi(struct irq_data *d) |
155 | { |
156 | struct plic_handler *handler = this_cpu_ptr(&plic_handlers); |
157 | |
158 | if (unlikely(irqd_irq_disabled(d))) { |
159 | plic_toggle(handler, hwirq: d->hwirq, enable: 1); |
160 | writel(val: d->hwirq, addr: handler->hart_base + CONTEXT_CLAIM); |
161 | plic_toggle(handler, hwirq: d->hwirq, enable: 0); |
162 | } else { |
163 | writel(val: d->hwirq, addr: handler->hart_base + CONTEXT_CLAIM); |
164 | } |
165 | } |
166 | |
167 | #ifdef CONFIG_SMP |
168 | static int plic_set_affinity(struct irq_data *d, |
169 | const struct cpumask *mask_val, bool force) |
170 | { |
171 | unsigned int cpu; |
172 | struct plic_priv *priv = irq_data_get_irq_chip_data(d); |
173 | |
174 | if (force) |
175 | cpu = cpumask_first_and(srcp1: &priv->lmask, srcp2: mask_val); |
176 | else |
177 | cpu = cpumask_first_and_and(srcp1: &priv->lmask, srcp2: mask_val, cpu_online_mask); |
178 | |
179 | if (cpu >= nr_cpu_ids) |
180 | return -EINVAL; |
181 | |
182 | plic_irq_disable(d); |
183 | |
184 | irq_data_update_effective_affinity(d, cpumask_of(cpu)); |
185 | |
186 | if (!irqd_irq_disabled(d)) |
187 | plic_irq_enable(d); |
188 | |
189 | return IRQ_SET_MASK_OK_DONE; |
190 | } |
191 | #endif |
192 | |
193 | static struct irq_chip plic_edge_chip = { |
194 | .name = "SiFive PLIC", |
195 | .irq_enable = plic_irq_enable, |
196 | .irq_disable = plic_irq_disable, |
197 | .irq_ack = plic_irq_eoi, |
198 | .irq_mask = plic_irq_mask, |
199 | .irq_unmask = plic_irq_unmask, |
200 | #ifdef CONFIG_SMP |
201 | .irq_set_affinity = plic_set_affinity, |
202 | #endif |
203 | .irq_set_type = plic_irq_set_type, |
204 | .flags = IRQCHIP_SKIP_SET_WAKE | |
205 | IRQCHIP_AFFINITY_PRE_STARTUP, |
206 | }; |
207 | |
208 | static struct irq_chip plic_chip = { |
209 | .name = "SiFive PLIC", |
210 | .irq_enable = plic_irq_enable, |
211 | .irq_disable = plic_irq_disable, |
212 | .irq_mask = plic_irq_mask, |
213 | .irq_unmask = plic_irq_unmask, |
214 | .irq_eoi = plic_irq_eoi, |
215 | #ifdef CONFIG_SMP |
216 | .irq_set_affinity = plic_set_affinity, |
217 | #endif |
218 | .irq_set_type = plic_irq_set_type, |
219 | .flags = IRQCHIP_SKIP_SET_WAKE | |
220 | IRQCHIP_AFFINITY_PRE_STARTUP, |
221 | }; |
222 | |
223 | static int plic_irq_set_type(struct irq_data *d, unsigned int type) |
224 | { |
225 | struct plic_priv *priv = irq_data_get_irq_chip_data(d); |
226 | |
227 | if (!test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks)) |
228 | return IRQ_SET_MASK_OK_NOCOPY; |
229 | |
230 | switch (type) { |
231 | case IRQ_TYPE_EDGE_RISING: |
232 | irq_set_chip_handler_name_locked(data: d, chip: &plic_edge_chip, |
233 | handler: handle_edge_irq, NULL); |
234 | break; |
235 | case IRQ_TYPE_LEVEL_HIGH: |
236 | irq_set_chip_handler_name_locked(data: d, chip: &plic_chip, |
237 | handler: handle_fasteoi_irq, NULL); |
238 | break; |
239 | default: |
240 | return -EINVAL; |
241 | } |
242 | |
243 | return IRQ_SET_MASK_OK; |
244 | } |
245 | |
246 | static int plic_irq_suspend(void) |
247 | { |
248 | unsigned int i, cpu; |
249 | unsigned long flags; |
250 | u32 __iomem *reg; |
251 | struct plic_priv *priv; |
252 | |
253 | priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv; |
254 | |
255 | for (i = 0; i < priv->nr_irqs; i++) { |
256 | __assign_bit(i, priv->prio_save, |
257 | readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID)); |
258 | } |
259 | |
260 | for_each_cpu(cpu, cpu_present_mask) { |
261 | struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); |
262 | |
263 | if (!handler->present) |
264 | continue; |
265 | |
266 | raw_spin_lock_irqsave(&handler->enable_lock, flags); |
267 | for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) { |
268 | reg = handler->enable_base + i * sizeof(u32); |
269 | handler->enable_save[i] = readl(addr: reg); |
270 | } |
271 | raw_spin_unlock_irqrestore(&handler->enable_lock, flags); |
272 | } |
273 | |
274 | return 0; |
275 | } |
276 | |
277 | static void plic_irq_resume(void) |
278 | { |
279 | unsigned int i, index, cpu; |
280 | unsigned long flags; |
281 | u32 __iomem *reg; |
282 | struct plic_priv *priv; |
283 | |
284 | priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv; |
285 | |
286 | for (i = 0; i < priv->nr_irqs; i++) { |
287 | index = BIT_WORD(i); |
288 | writel(val: (priv->prio_save[index] & BIT_MASK(i)) ? 1 : 0, |
289 | addr: priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID); |
290 | } |
291 | |
292 | for_each_cpu(cpu, cpu_present_mask) { |
293 | struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); |
294 | |
295 | if (!handler->present) |
296 | continue; |
297 | |
298 | raw_spin_lock_irqsave(&handler->enable_lock, flags); |
299 | for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) { |
300 | reg = handler->enable_base + i * sizeof(u32); |
301 | writel(val: handler->enable_save[i], addr: reg); |
302 | } |
303 | raw_spin_unlock_irqrestore(&handler->enable_lock, flags); |
304 | } |
305 | } |
306 | |
307 | static struct syscore_ops plic_irq_syscore_ops = { |
308 | .suspend = plic_irq_suspend, |
309 | .resume = plic_irq_resume, |
310 | }; |
311 | |
312 | static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq, |
313 | irq_hw_number_t hwirq) |
314 | { |
315 | struct plic_priv *priv = d->host_data; |
316 | |
317 | irq_domain_set_info(domain: d, virq: irq, hwirq, chip: &plic_chip, chip_data: d->host_data, |
318 | handler: handle_fasteoi_irq, NULL, NULL); |
319 | irq_set_noprobe(irq); |
320 | irq_set_affinity(irq, cpumask: &priv->lmask); |
321 | return 0; |
322 | } |
323 | |
324 | static int plic_irq_domain_translate(struct irq_domain *d, |
325 | struct irq_fwspec *fwspec, |
326 | unsigned long *hwirq, |
327 | unsigned int *type) |
328 | { |
329 | struct plic_priv *priv = d->host_data; |
330 | |
331 | /* For DT, gsi_base is always zero. */ |
332 | if (fwspec->param[0] >= priv->gsi_base) |
333 | fwspec->param[0] = fwspec->param[0] - priv->gsi_base; |
334 | |
335 | if (test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks)) |
336 | return irq_domain_translate_twocell(d, fwspec, out_hwirq: hwirq, out_type: type); |
337 | |
338 | return irq_domain_translate_onecell(d, fwspec, out_hwirq: hwirq, out_type: type); |
339 | } |
340 | |
341 | static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, |
342 | unsigned int nr_irqs, void *arg) |
343 | { |
344 | int i, ret; |
345 | irq_hw_number_t hwirq; |
346 | unsigned int type; |
347 | struct irq_fwspec *fwspec = arg; |
348 | |
349 | ret = plic_irq_domain_translate(d: domain, fwspec, hwirq: &hwirq, type: &type); |
350 | if (ret) |
351 | return ret; |
352 | |
353 | for (i = 0; i < nr_irqs; i++) { |
354 | ret = plic_irqdomain_map(d: domain, irq: virq + i, hwirq: hwirq + i); |
355 | if (ret) |
356 | return ret; |
357 | } |
358 | |
359 | return 0; |
360 | } |
361 | |
362 | static const struct irq_domain_ops plic_irqdomain_ops = { |
363 | .translate = plic_irq_domain_translate, |
364 | .alloc = plic_irq_domain_alloc, |
365 | .free = irq_domain_free_irqs_top, |
366 | }; |
367 | |
368 | /* |
369 | * Handling an interrupt is a two-step process: first you claim the interrupt |
370 | * by reading the claim register, then you complete the interrupt by writing |
371 | * that source ID back to the same claim register. This automatically enables |
372 | * and disables the interrupt, so there's nothing else to do. |
373 | */ |
374 | static void plic_handle_irq(struct irq_desc *desc) |
375 | { |
376 | struct plic_handler *handler = this_cpu_ptr(&plic_handlers); |
377 | struct irq_chip *chip = irq_desc_get_chip(desc); |
378 | void __iomem *claim = handler->hart_base + CONTEXT_CLAIM; |
379 | irq_hw_number_t hwirq; |
380 | |
381 | WARN_ON_ONCE(!handler->present); |
382 | |
383 | chained_irq_enter(chip, desc); |
384 | |
385 | while ((hwirq = readl(addr: claim))) { |
386 | int err = generic_handle_domain_irq(domain: handler->priv->irqdomain, |
387 | hwirq); |
388 | if (unlikely(err)) { |
389 | pr_warn_ratelimited("%pfwP: can't find mapping for hwirq %lu\n", |
390 | handler->priv->fwnode, hwirq); |
391 | } |
392 | } |
393 | |
394 | chained_irq_exit(chip, desc); |
395 | } |
396 | |
397 | static void plic_set_threshold(struct plic_handler *handler, u32 threshold) |
398 | { |
399 | /* priority must be > threshold to trigger an interrupt */ |
400 | writel(val: threshold, addr: handler->hart_base + CONTEXT_THRESHOLD); |
401 | } |
402 | |
403 | static int plic_dying_cpu(unsigned int cpu) |
404 | { |
405 | if (plic_parent_irq) |
406 | disable_percpu_irq(irq: plic_parent_irq); |
407 | |
408 | return 0; |
409 | } |
410 | |
411 | static int plic_starting_cpu(unsigned int cpu) |
412 | { |
413 | struct plic_handler *handler = this_cpu_ptr(&plic_handlers); |
414 | |
415 | if (plic_parent_irq) |
416 | enable_percpu_irq(irq: plic_parent_irq, |
417 | type: irq_get_trigger_type(irq: plic_parent_irq)); |
418 | else |
419 | pr_warn("%pfwP: cpu%d: parent irq not available\n", |
420 | handler->priv->fwnode, cpu); |
421 | plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD); |
422 | |
423 | return 0; |
424 | } |
425 | |
426 | static const struct of_device_id plic_match[] = { |
427 | { .compatible = "sifive,plic-1.0.0"}, |
428 | { .compatible = "riscv,plic0"}, |
429 | { .compatible = "andestech,nceplic100", |
430 | .data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) }, |
431 | { .compatible = "thead,c900-plic", |
432 | .data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) }, |
433 | {} |
434 | }; |
435 | |
436 | #ifdef CONFIG_ACPI |
437 | |
438 | static const struct acpi_device_id plic_acpi_match[] = { |
439 | { "RSCV0001", 0 }, |
440 | {} |
441 | }; |
442 | MODULE_DEVICE_TABLE(acpi, plic_acpi_match); |
443 | |
444 | #endif |
445 | static int plic_parse_nr_irqs_and_contexts(struct fwnode_handle *fwnode, |
446 | u32 *nr_irqs, u32 *nr_contexts, |
447 | u32 *gsi_base, u32 *id) |
448 | { |
449 | int rc; |
450 | |
451 | if (!is_of_node(fwnode)) { |
452 | rc = riscv_acpi_get_gsi_info(fwnode, gsi_base, id, nr_irqs, NULL); |
453 | if (rc) { |
454 | pr_err("%pfwP: failed to find GSI mapping\n", fwnode); |
455 | return rc; |
456 | } |
457 | |
458 | *nr_contexts = acpi_rintc_get_plic_nr_contexts(*id); |
459 | if (WARN_ON(!*nr_contexts)) { |
460 | pr_err("%pfwP: no PLIC context available\n", fwnode); |
461 | return -EINVAL; |
462 | } |
463 | |
464 | return 0; |
465 | } |
466 | |
467 | rc = of_property_read_u32(to_of_node(fwnode), propname: "riscv,ndev", out_value: nr_irqs); |
468 | if (rc) { |
469 | pr_err("%pfwP: riscv,ndev property not available\n", fwnode); |
470 | return rc; |
471 | } |
472 | |
473 | *nr_contexts = of_irq_count(to_of_node(fwnode)); |
474 | if (WARN_ON(!(*nr_contexts))) { |
475 | pr_err("%pfwP: no PLIC context available\n", fwnode); |
476 | return -EINVAL; |
477 | } |
478 | |
479 | *gsi_base = 0; |
480 | *id = 0; |
481 | |
482 | return 0; |
483 | } |
484 | |
485 | static int plic_parse_context_parent(struct fwnode_handle *fwnode, u32 context, |
486 | u32 *parent_hwirq, int *parent_cpu, u32 id) |
487 | { |
488 | struct of_phandle_args parent; |
489 | unsigned long hartid; |
490 | int rc; |
491 | |
492 | if (!is_of_node(fwnode)) { |
493 | hartid = acpi_rintc_ext_parent_to_hartid(id, context); |
494 | if (hartid == INVALID_HARTID) |
495 | return -EINVAL; |
496 | |
497 | *parent_cpu = riscv_hartid_to_cpuid(hartid); |
498 | *parent_hwirq = RV_IRQ_EXT; |
499 | return 0; |
500 | } |
501 | |
502 | rc = of_irq_parse_one(to_of_node(fwnode), index: context, out_irq: &parent); |
503 | if (rc) |
504 | return rc; |
505 | |
506 | rc = riscv_of_parent_hartid(parent.np, &hartid); |
507 | if (rc) |
508 | return rc; |
509 | |
510 | *parent_hwirq = parent.args[0]; |
511 | *parent_cpu = riscv_hartid_to_cpuid(hartid); |
512 | return 0; |
513 | } |
514 | |
515 | static int plic_probe(struct fwnode_handle *fwnode) |
516 | { |
517 | int error = 0, nr_contexts, nr_handlers = 0, cpu, i; |
518 | unsigned long plic_quirks = 0; |
519 | struct plic_handler *handler; |
520 | u32 nr_irqs, parent_hwirq; |
521 | struct plic_priv *priv; |
522 | irq_hw_number_t hwirq; |
523 | void __iomem *regs; |
524 | int id, context_id; |
525 | u32 gsi_base; |
526 | |
527 | if (is_of_node(fwnode)) { |
528 | const struct of_device_id *id; |
529 | |
530 | id = of_match_node(matches: plic_match, to_of_node(fwnode)); |
531 | if (id) |
532 | plic_quirks = (unsigned long)id->data; |
533 | |
534 | regs = of_iomap(to_of_node(fwnode), index: 0); |
535 | if (!regs) |
536 | return -ENOMEM; |
537 | } else { |
538 | regs = devm_platform_ioremap_resource(to_platform_device(fwnode->dev), index: 0); |
539 | if (IS_ERR(ptr: regs)) |
540 | return PTR_ERR(ptr: regs); |
541 | } |
542 | |
543 | error = plic_parse_nr_irqs_and_contexts(fwnode, nr_irqs: &nr_irqs, nr_contexts: &nr_contexts, gsi_base: &gsi_base, id: &id); |
544 | if (error) |
545 | goto fail_free_regs; |
546 | |
547 | priv = kzalloc(sizeof(*priv), GFP_KERNEL); |
548 | if (!priv) { |
549 | error = -ENOMEM; |
550 | goto fail_free_regs; |
551 | } |
552 | |
553 | priv->fwnode = fwnode; |
554 | priv->plic_quirks = plic_quirks; |
555 | priv->nr_irqs = nr_irqs; |
556 | priv->regs = regs; |
557 | priv->gsi_base = gsi_base; |
558 | priv->acpi_plic_id = id; |
559 | |
560 | priv->prio_save = bitmap_zalloc(nbits: nr_irqs, GFP_KERNEL); |
561 | if (!priv->prio_save) { |
562 | error = -ENOMEM; |
563 | goto fail_free_priv; |
564 | } |
565 | |
566 | for (i = 0; i < nr_contexts; i++) { |
567 | error = plic_parse_context_parent(fwnode, context: i, parent_hwirq: &parent_hwirq, parent_cpu: &cpu, |
568 | id: priv->acpi_plic_id); |
569 | if (error) { |
570 | pr_warn("%pfwP: hwirq for context%d not found\n", fwnode, i); |
571 | continue; |
572 | } |
573 | |
574 | if (is_of_node(fwnode)) { |
575 | context_id = i; |
576 | } else { |
577 | context_id = acpi_rintc_get_plic_context(priv->acpi_plic_id, i); |
578 | if (context_id == INVALID_CONTEXT) { |
579 | pr_warn("%pfwP: invalid context id for context%d\n", fwnode, i); |
580 | continue; |
581 | } |
582 | } |
583 | |
584 | /* |
585 | * Skip contexts other than external interrupts for our |
586 | * privilege level. |
587 | */ |
588 | if (parent_hwirq != RV_IRQ_EXT) { |
589 | /* Disable S-mode enable bits if running in M-mode. */ |
590 | if (IS_ENABLED(CONFIG_RISCV_M_MODE)) { |
591 | void __iomem *enable_base = priv->regs + |
592 | CONTEXT_ENABLE_BASE + |
593 | i * CONTEXT_ENABLE_SIZE; |
594 | |
595 | for (hwirq = 1; hwirq <= nr_irqs; hwirq++) |
596 | __plic_toggle(enable_base, hwirq, enable: 0); |
597 | } |
598 | continue; |
599 | } |
600 | |
601 | if (cpu < 0) { |
602 | pr_warn("%pfwP: Invalid cpuid for context %d\n", fwnode, i); |
603 | continue; |
604 | } |
605 | |
606 | /* |
607 | * When running in M-mode we need to ignore the S-mode handler. |
608 | * Here we assume it always comes later, but that might be a |
609 | * little fragile. |
610 | */ |
611 | handler = per_cpu_ptr(&plic_handlers, cpu); |
612 | if (handler->present) { |
613 | pr_warn("%pfwP: handler already present for context %d.\n", fwnode, i); |
614 | plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD); |
615 | goto done; |
616 | } |
617 | |
618 | cpumask_set_cpu(cpu, dstp: &priv->lmask); |
619 | handler->present = true; |
620 | handler->hart_base = priv->regs + CONTEXT_BASE + |
621 | context_id * CONTEXT_SIZE; |
622 | raw_spin_lock_init(&handler->enable_lock); |
623 | handler->enable_base = priv->regs + CONTEXT_ENABLE_BASE + |
624 | context_id * CONTEXT_ENABLE_SIZE; |
625 | handler->priv = priv; |
626 | |
627 | handler->enable_save = kcalloc(DIV_ROUND_UP(nr_irqs, 32), |
628 | sizeof(*handler->enable_save), GFP_KERNEL); |
629 | if (!handler->enable_save) { |
630 | error = -ENOMEM; |
631 | goto fail_cleanup_contexts; |
632 | } |
633 | done: |
634 | for (hwirq = 1; hwirq <= nr_irqs; hwirq++) { |
635 | plic_toggle(handler, hwirq, enable: 0); |
636 | writel(val: 1, addr: priv->regs + PRIORITY_BASE + |
637 | hwirq * PRIORITY_PER_ID); |
638 | } |
639 | nr_handlers++; |
640 | } |
641 | |
642 | priv->irqdomain = irq_domain_create_linear(fwnode, size: nr_irqs + 1, |
643 | ops: &plic_irqdomain_ops, host_data: priv); |
644 | if (WARN_ON(!priv->irqdomain)) { |
645 | error = -ENOMEM; |
646 | goto fail_cleanup_contexts; |
647 | } |
648 | |
649 | /* |
650 | * We can have multiple PLIC instances so setup global state |
651 | * and register syscore operations only once after context |
652 | * handlers of all online CPUs are initialized. |
653 | */ |
654 | if (!plic_global_setup_done) { |
655 | struct irq_domain *domain; |
656 | bool global_setup = true; |
657 | |
658 | for_each_online_cpu(cpu) { |
659 | handler = per_cpu_ptr(&plic_handlers, cpu); |
660 | if (!handler->present) { |
661 | global_setup = false; |
662 | break; |
663 | } |
664 | } |
665 | |
666 | if (global_setup) { |
667 | /* Find parent domain and register chained handler */ |
668 | domain = irq_find_matching_fwnode(fwnode: riscv_get_intc_hwnode(), bus_token: DOMAIN_BUS_ANY); |
669 | if (domain) |
670 | plic_parent_irq = irq_create_mapping(domain, hwirq: RV_IRQ_EXT); |
671 | if (plic_parent_irq) |
672 | irq_set_chained_handler(irq: plic_parent_irq, handle: plic_handle_irq); |
673 | |
674 | cpuhp_setup_state(state: CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, |
675 | name: "irqchip/sifive/plic:starting", |
676 | startup: plic_starting_cpu, teardown: plic_dying_cpu); |
677 | register_syscore_ops(ops: &plic_irq_syscore_ops); |
678 | plic_global_setup_done = true; |
679 | } |
680 | } |
681 | |
682 | #ifdef CONFIG_ACPI |
683 | if (!acpi_disabled) |
684 | acpi_dev_clear_dependencies(ACPI_COMPANION(fwnode->dev)); |
685 | #endif |
686 | |
687 | pr_info("%pfwP: mapped %d interrupts with %d handlers for %d contexts.\n", |
688 | fwnode, nr_irqs, nr_handlers, nr_contexts); |
689 | return 0; |
690 | |
691 | fail_cleanup_contexts: |
692 | for (i = 0; i < nr_contexts; i++) { |
693 | if (plic_parse_context_parent(fwnode, context: i, parent_hwirq: &parent_hwirq, parent_cpu: &cpu, id: priv->acpi_plic_id)) |
694 | continue; |
695 | if (parent_hwirq != RV_IRQ_EXT || cpu < 0) |
696 | continue; |
697 | |
698 | handler = per_cpu_ptr(&plic_handlers, cpu); |
699 | handler->present = false; |
700 | handler->hart_base = NULL; |
701 | handler->enable_base = NULL; |
702 | kfree(objp: handler->enable_save); |
703 | handler->enable_save = NULL; |
704 | handler->priv = NULL; |
705 | } |
706 | bitmap_free(bitmap: priv->prio_save); |
707 | fail_free_priv: |
708 | kfree(objp: priv); |
709 | fail_free_regs: |
710 | iounmap(addr: regs); |
711 | return error; |
712 | } |
713 | |
714 | static int plic_platform_probe(struct platform_device *pdev) |
715 | { |
716 | return plic_probe(fwnode: pdev->dev.fwnode); |
717 | } |
718 | |
719 | static struct platform_driver plic_driver = { |
720 | .driver = { |
721 | .name = "riscv-plic", |
722 | .of_match_table = plic_match, |
723 | .suppress_bind_attrs = true, |
724 | .acpi_match_table = ACPI_PTR(plic_acpi_match), |
725 | }, |
726 | .probe = plic_platform_probe, |
727 | }; |
728 | builtin_platform_driver(plic_driver); |
729 | |
730 | static int __init plic_early_probe(struct device_node *node, |
731 | struct device_node *parent) |
732 | { |
733 | return plic_probe(fwnode: &node->fwnode); |
734 | } |
735 | |
736 | IRQCHIP_DECLARE(riscv, "allwinner,sun20i-d1-plic", plic_early_probe); |
737 |
Definitions
- plic_priv
- plic_handler
- plic_parent_irq
- plic_global_setup_done
- plic_handlers
- __plic_toggle
- plic_toggle
- plic_irq_toggle
- plic_irq_unmask
- plic_irq_mask
- plic_irq_enable
- plic_irq_disable
- plic_irq_eoi
- plic_set_affinity
- plic_edge_chip
- plic_chip
- plic_irq_set_type
- plic_irq_suspend
- plic_irq_resume
- plic_irq_syscore_ops
- plic_irqdomain_map
- plic_irq_domain_translate
- plic_irq_domain_alloc
- plic_irqdomain_ops
- plic_handle_irq
- plic_set_threshold
- plic_dying_cpu
- plic_starting_cpu
- plic_match
- plic_acpi_match
- plic_parse_nr_irqs_and_contexts
- plic_parse_context_parent
- plic_probe
- plic_platform_probe
- plic_driver
Improve your Profiling and Debugging skills
Find out more