1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Loongson Extend I/O Interrupt Controller support
4 *
5 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
6 */
7
8#define pr_fmt(fmt) "eiointc: " fmt
9
10#include <linux/cpuhotplug.h>
11#include <linux/interrupt.h>
12#include <linux/irq.h>
13#include <linux/irqchip.h>
14#include <linux/irqdomain.h>
15#include <linux/irqchip/chained_irq.h>
16#include <linux/kernel.h>
17#include <linux/syscore_ops.h>
18
19#define EIOINTC_REG_NODEMAP 0x14a0
20#define EIOINTC_REG_IPMAP 0x14c0
21#define EIOINTC_REG_ENABLE 0x1600
22#define EIOINTC_REG_BOUNCE 0x1680
23#define EIOINTC_REG_ISR 0x1800
24#define EIOINTC_REG_ROUTE 0x1c00
25
26#define VEC_REG_COUNT 4
27#define VEC_COUNT_PER_REG 64
28#define VEC_COUNT (VEC_REG_COUNT * VEC_COUNT_PER_REG)
29#define VEC_REG_IDX(irq_id) ((irq_id) / VEC_COUNT_PER_REG)
30#define VEC_REG_BIT(irq_id) ((irq_id) % VEC_COUNT_PER_REG)
31#define EIOINTC_ALL_ENABLE 0xffffffff
32
33#define MAX_EIO_NODES (NR_CPUS / CORES_PER_EIO_NODE)
34
35static int nr_pics;
36
37struct eiointc_priv {
38 u32 node;
39 u32 vec_count;
40 nodemask_t node_map;
41 cpumask_t cpuspan_map;
42 struct fwnode_handle *domain_handle;
43 struct irq_domain *eiointc_domain;
44};
45
46static struct eiointc_priv *eiointc_priv[MAX_IO_PICS];
47
48static void eiointc_enable(void)
49{
50 uint64_t misc;
51
52 misc = iocsr_read64(LOONGARCH_IOCSR_MISC_FUNC);
53 misc |= IOCSR_MISC_FUNC_EXT_IOI_EN;
54 iocsr_write64(misc, LOONGARCH_IOCSR_MISC_FUNC);
55}
56
57static int cpu_to_eio_node(int cpu)
58{
59 return cpu_logical_map(cpu) / CORES_PER_EIO_NODE;
60}
61
62static void eiointc_set_irq_route(int pos, unsigned int cpu, unsigned int mnode, nodemask_t *node_map)
63{
64 int i, node, cpu_node, route_node;
65 unsigned char coremap;
66 uint32_t pos_off, data, data_byte, data_mask;
67
68 pos_off = pos & ~3;
69 data_byte = pos & 3;
70 data_mask = ~BIT_MASK(data_byte) & 0xf;
71
72 /* Calculate node and coremap of target irq */
73 cpu_node = cpu_logical_map(cpu) / CORES_PER_EIO_NODE;
74 coremap = BIT(cpu_logical_map(cpu) % CORES_PER_EIO_NODE);
75
76 for_each_online_cpu(i) {
77 node = cpu_to_eio_node(cpu: i);
78 if (!node_isset(node, *node_map))
79 continue;
80
81 /* EIO node 0 is in charge of inter-node interrupt dispatch */
82 route_node = (node == mnode) ? cpu_node : node;
83 data = ((coremap | (route_node << 4)) << (data_byte * 8));
84 csr_any_send(EIOINTC_REG_ROUTE + pos_off, data, data_mask, node * CORES_PER_EIO_NODE);
85 }
86}
87
88static DEFINE_RAW_SPINLOCK(affinity_lock);
89
90static int eiointc_set_irq_affinity(struct irq_data *d, const struct cpumask *affinity, bool force)
91{
92 unsigned int cpu;
93 unsigned long flags;
94 uint32_t vector, regaddr;
95 struct cpumask intersect_affinity;
96 struct eiointc_priv *priv = d->domain->host_data;
97
98 raw_spin_lock_irqsave(&affinity_lock, flags);
99
100 cpumask_and(dstp: &intersect_affinity, src1p: affinity, cpu_online_mask);
101 cpumask_and(dstp: &intersect_affinity, src1p: &intersect_affinity, src2p: &priv->cpuspan_map);
102
103 if (cpumask_empty(srcp: &intersect_affinity)) {
104 raw_spin_unlock_irqrestore(&affinity_lock, flags);
105 return -EINVAL;
106 }
107 cpu = cpumask_first(srcp: &intersect_affinity);
108
109 vector = d->hwirq;
110 regaddr = EIOINTC_REG_ENABLE + ((vector >> 5) << 2);
111
112 /* Mask target vector */
113 csr_any_send(regaddr, EIOINTC_ALL_ENABLE & (~BIT(vector & 0x1F)),
114 0x0, priv->node * CORES_PER_EIO_NODE);
115
116 /* Set route for target vector */
117 eiointc_set_irq_route(pos: vector, cpu, mnode: priv->node, node_map: &priv->node_map);
118
119 /* Unmask target vector */
120 csr_any_send(regaddr, EIOINTC_ALL_ENABLE,
121 0x0, priv->node * CORES_PER_EIO_NODE);
122
123 irq_data_update_effective_affinity(d, cpumask_of(cpu));
124
125 raw_spin_unlock_irqrestore(&affinity_lock, flags);
126
127 return IRQ_SET_MASK_OK;
128}
129
130static int eiointc_index(int node)
131{
132 int i;
133
134 for (i = 0; i < nr_pics; i++) {
135 if (node_isset(node, eiointc_priv[i]->node_map))
136 return i;
137 }
138
139 return -1;
140}
141
142static int eiointc_router_init(unsigned int cpu)
143{
144 int i, bit;
145 uint32_t data;
146 uint32_t node = cpu_to_eio_node(cpu);
147 int index = eiointc_index(node);
148
149 if (index < 0) {
150 pr_err("Error: invalid nodemap!\n");
151 return -1;
152 }
153
154 if ((cpu_logical_map(cpu) % CORES_PER_EIO_NODE) == 0) {
155 eiointc_enable();
156
157 for (i = 0; i < eiointc_priv[0]->vec_count / 32; i++) {
158 data = (((1 << (i * 2 + 1)) << 16) | (1 << (i * 2)));
159 iocsr_write32(data, EIOINTC_REG_NODEMAP + i * 4);
160 }
161
162 for (i = 0; i < eiointc_priv[0]->vec_count / 32 / 4; i++) {
163 bit = BIT(1 + index); /* Route to IP[1 + index] */
164 data = bit | (bit << 8) | (bit << 16) | (bit << 24);
165 iocsr_write32(data, EIOINTC_REG_IPMAP + i * 4);
166 }
167
168 for (i = 0; i < eiointc_priv[0]->vec_count / 4; i++) {
169 /* Route to Node-0 Core-0 */
170 if (index == 0)
171 bit = BIT(cpu_logical_map(0));
172 else
173 bit = (eiointc_priv[index]->node << 4) | 1;
174
175 data = bit | (bit << 8) | (bit << 16) | (bit << 24);
176 iocsr_write32(data, EIOINTC_REG_ROUTE + i * 4);
177 }
178
179 for (i = 0; i < eiointc_priv[0]->vec_count / 32; i++) {
180 data = 0xffffffff;
181 iocsr_write32(data, EIOINTC_REG_ENABLE + i * 4);
182 iocsr_write32(data, EIOINTC_REG_BOUNCE + i * 4);
183 }
184 }
185
186 return 0;
187}
188
189static void eiointc_irq_dispatch(struct irq_desc *desc)
190{
191 int i;
192 u64 pending;
193 bool handled = false;
194 struct irq_chip *chip = irq_desc_get_chip(desc);
195 struct eiointc_priv *priv = irq_desc_get_handler_data(desc);
196
197 chained_irq_enter(chip, desc);
198
199 for (i = 0; i < eiointc_priv[0]->vec_count / VEC_COUNT_PER_REG; i++) {
200 pending = iocsr_read64(EIOINTC_REG_ISR + (i << 3));
201
202 /* Skip handling if pending bitmap is zero */
203 if (!pending)
204 continue;
205
206 /* Clear the IRQs */
207 iocsr_write64(pending, EIOINTC_REG_ISR + (i << 3));
208 while (pending) {
209 int bit = __ffs(pending);
210 int irq = bit + VEC_COUNT_PER_REG * i;
211
212 generic_handle_domain_irq(domain: priv->eiointc_domain, hwirq: irq);
213 pending &= ~BIT(bit);
214 handled = true;
215 }
216 }
217
218 if (!handled)
219 spurious_interrupt();
220
221 chained_irq_exit(chip, desc);
222}
223
224static void eiointc_ack_irq(struct irq_data *d)
225{
226}
227
228static void eiointc_mask_irq(struct irq_data *d)
229{
230}
231
232static void eiointc_unmask_irq(struct irq_data *d)
233{
234}
235
236static struct irq_chip eiointc_irq_chip = {
237 .name = "EIOINTC",
238 .irq_ack = eiointc_ack_irq,
239 .irq_mask = eiointc_mask_irq,
240 .irq_unmask = eiointc_unmask_irq,
241 .irq_set_affinity = eiointc_set_irq_affinity,
242};
243
244static int eiointc_domain_alloc(struct irq_domain *domain, unsigned int virq,
245 unsigned int nr_irqs, void *arg)
246{
247 int ret;
248 unsigned int i, type;
249 unsigned long hwirq = 0;
250 struct eiointc_priv *priv = domain->host_data;
251
252 ret = irq_domain_translate_onecell(d: domain, fwspec: arg, out_hwirq: &hwirq, out_type: &type);
253 if (ret)
254 return ret;
255
256 for (i = 0; i < nr_irqs; i++) {
257 irq_domain_set_info(domain, virq: virq + i, hwirq: hwirq + i, chip: &eiointc_irq_chip,
258 chip_data: priv, handler: handle_edge_irq, NULL, NULL);
259 }
260
261 return 0;
262}
263
264static void eiointc_domain_free(struct irq_domain *domain, unsigned int virq,
265 unsigned int nr_irqs)
266{
267 int i;
268
269 for (i = 0; i < nr_irqs; i++) {
270 struct irq_data *d = irq_domain_get_irq_data(domain, virq: virq + i);
271
272 irq_set_handler(irq: virq + i, NULL);
273 irq_domain_reset_irq_data(irq_data: d);
274 }
275}
276
277static const struct irq_domain_ops eiointc_domain_ops = {
278 .translate = irq_domain_translate_onecell,
279 .alloc = eiointc_domain_alloc,
280 .free = eiointc_domain_free,
281};
282
283static void acpi_set_vec_parent(int node, struct irq_domain *parent, struct acpi_vector_group *vec_group)
284{
285 int i;
286
287 for (i = 0; i < MAX_IO_PICS; i++) {
288 if (node == vec_group[i].node) {
289 vec_group[i].parent = parent;
290 return;
291 }
292 }
293}
294
295static struct irq_domain *acpi_get_vec_parent(int node, struct acpi_vector_group *vec_group)
296{
297 int i;
298
299 for (i = 0; i < MAX_IO_PICS; i++) {
300 if (node == vec_group[i].node)
301 return vec_group[i].parent;
302 }
303 return NULL;
304}
305
306static int eiointc_suspend(void)
307{
308 return 0;
309}
310
311static void eiointc_resume(void)
312{
313 eiointc_router_init(cpu: 0);
314}
315
316static struct syscore_ops eiointc_syscore_ops = {
317 .suspend = eiointc_suspend,
318 .resume = eiointc_resume,
319};
320
321static int __init pch_pic_parse_madt(union acpi_subtable_headers *header,
322 const unsigned long end)
323{
324 struct acpi_madt_bio_pic *pchpic_entry = (struct acpi_madt_bio_pic *)header;
325 unsigned int node = (pchpic_entry->address >> 44) & 0xf;
326 struct irq_domain *parent = acpi_get_vec_parent(node, vec_group: pch_group);
327
328 if (parent)
329 return pch_pic_acpi_init(parent, pchpic_entry);
330
331 return 0;
332}
333
334static int __init pch_msi_parse_madt(union acpi_subtable_headers *header,
335 const unsigned long end)
336{
337 struct irq_domain *parent;
338 struct acpi_madt_msi_pic *pchmsi_entry = (struct acpi_madt_msi_pic *)header;
339 int node;
340
341 if (cpu_has_flatmode)
342 node = cpu_to_node(cpu: eiointc_priv[nr_pics - 1]->node * CORES_PER_EIO_NODE);
343 else
344 node = eiointc_priv[nr_pics - 1]->node;
345
346 parent = acpi_get_vec_parent(node, vec_group: msi_group);
347
348 if (parent)
349 return pch_msi_acpi_init(parent, pchmsi_entry);
350
351 return 0;
352}
353
354static int __init acpi_cascade_irqdomain_init(void)
355{
356 int r;
357
358 r = acpi_table_parse_madt(id: ACPI_MADT_TYPE_BIO_PIC, handler: pch_pic_parse_madt, max_entries: 0);
359 if (r < 0)
360 return r;
361
362 r = acpi_table_parse_madt(id: ACPI_MADT_TYPE_MSI_PIC, handler: pch_msi_parse_madt, max_entries: 1);
363 if (r < 0)
364 return r;
365
366 return 0;
367}
368
369static int __init eiointc_init(struct eiointc_priv *priv, int parent_irq,
370 u64 node_map)
371{
372 int i;
373
374 node_map = node_map ? node_map : -1ULL;
375 for_each_possible_cpu(i) {
376 if (node_map & (1ULL << (cpu_to_eio_node(cpu: i)))) {
377 node_set(cpu_to_eio_node(i), priv->node_map);
378 cpumask_or(dstp: &priv->cpuspan_map, src1p: &priv->cpuspan_map,
379 cpumask_of(i));
380 }
381 }
382
383 priv->eiointc_domain = irq_domain_create_linear(fwnode: priv->domain_handle,
384 size: priv->vec_count,
385 ops: &eiointc_domain_ops,
386 host_data: priv);
387 if (!priv->eiointc_domain) {
388 pr_err("loongson-extioi: cannot add IRQ domain\n");
389 return -ENOMEM;
390 }
391
392 eiointc_priv[nr_pics++] = priv;
393 eiointc_router_init(cpu: 0);
394 irq_set_chained_handler_and_data(irq: parent_irq, handle: eiointc_irq_dispatch, data: priv);
395
396 if (nr_pics == 1) {
397 register_syscore_ops(ops: &eiointc_syscore_ops);
398 cpuhp_setup_state_nocalls(state: CPUHP_AP_IRQ_LOONGARCH_STARTING,
399 name: "irqchip/loongarch/intc:starting",
400 startup: eiointc_router_init, NULL);
401 }
402
403 return 0;
404}
405
406int __init eiointc_acpi_init(struct irq_domain *parent,
407 struct acpi_madt_eio_pic *acpi_eiointc)
408{
409 int parent_irq, ret;
410 struct eiointc_priv *priv;
411 int node;
412
413 priv = kzalloc(size: sizeof(*priv), GFP_KERNEL);
414 if (!priv)
415 return -ENOMEM;
416
417 priv->domain_handle = irq_domain_alloc_named_id_fwnode(name: "EIOPIC",
418 id: acpi_eiointc->node);
419 if (!priv->domain_handle) {
420 pr_err("Unable to allocate domain handle\n");
421 goto out_free_priv;
422 }
423
424 priv->vec_count = VEC_COUNT;
425 priv->node = acpi_eiointc->node;
426
427 parent_irq = irq_create_mapping(host: parent, hwirq: acpi_eiointc->cascade);
428
429 ret = eiointc_init(priv, parent_irq, node_map: acpi_eiointc->node_map);
430 if (ret < 0)
431 goto out_free_handle;
432
433 if (cpu_has_flatmode)
434 node = cpu_to_node(cpu: acpi_eiointc->node * CORES_PER_EIO_NODE);
435 else
436 node = acpi_eiointc->node;
437 acpi_set_vec_parent(node, parent: priv->eiointc_domain, vec_group: pch_group);
438 acpi_set_vec_parent(node, parent: priv->eiointc_domain, vec_group: msi_group);
439
440 ret = acpi_cascade_irqdomain_init();
441 if (ret < 0)
442 goto out_free_handle;
443
444 return ret;
445
446out_free_handle:
447 irq_domain_free_fwnode(fwnode: priv->domain_handle);
448 priv->domain_handle = NULL;
449out_free_priv:
450 kfree(objp: priv);
451
452 return -ENOMEM;
453}
454
455static int __init eiointc_of_init(struct device_node *of_node,
456 struct device_node *parent)
457{
458 int parent_irq, ret;
459 struct eiointc_priv *priv;
460
461 priv = kzalloc(size: sizeof(*priv), GFP_KERNEL);
462 if (!priv)
463 return -ENOMEM;
464
465 parent_irq = irq_of_parse_and_map(node: of_node, index: 0);
466 if (parent_irq <= 0) {
467 ret = -ENODEV;
468 goto out_free_priv;
469 }
470
471 ret = irq_set_handler_data(irq: parent_irq, data: priv);
472 if (ret < 0)
473 goto out_free_priv;
474
475 /*
476 * In particular, the number of devices supported by the LS2K0500
477 * extended I/O interrupt vector is 128.
478 */
479 if (of_device_is_compatible(device: of_node, "loongson,ls2k0500-eiointc"))
480 priv->vec_count = 128;
481 else
482 priv->vec_count = VEC_COUNT;
483
484 priv->node = 0;
485 priv->domain_handle = of_node_to_fwnode(node: of_node);
486
487 ret = eiointc_init(priv, parent_irq, node_map: 0);
488 if (ret < 0)
489 goto out_free_priv;
490
491 return 0;
492
493out_free_priv:
494 kfree(objp: priv);
495 return ret;
496}
497
498IRQCHIP_DECLARE(loongson_ls2k0500_eiointc, "loongson,ls2k0500-eiointc", eiointc_of_init);
499IRQCHIP_DECLARE(loongson_ls2k2000_eiointc, "loongson,ls2k2000-eiointc", eiointc_of_init);
500

source code of linux/drivers/irqchip/irq-loongson-eiointc.c