1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Broadcom BCM6345 style Level 1 interrupt controller driver |
4 | * |
5 | * Copyright (C) 2014 Broadcom Corporation |
6 | * Copyright 2015 Simon Arlott |
7 | * |
8 | * This is based on the BCM7038 (which supports SMP) but with a single |
9 | * enable register instead of separate mask/set/clear registers. |
10 | * |
11 | * The BCM3380 has a similar mask/status register layout, but each pair |
12 | * of words is at separate locations (and SMP is not supported). |
13 | * |
14 | * ENABLE/STATUS words are packed next to each other for each CPU: |
15 | * |
16 | * BCM6368: |
17 | * 0x1000_0020: CPU0_W0_ENABLE |
18 | * 0x1000_0024: CPU0_W1_ENABLE |
19 | * 0x1000_0028: CPU0_W0_STATUS IRQs 31-63 |
20 | * 0x1000_002c: CPU0_W1_STATUS IRQs 0-31 |
21 | * 0x1000_0030: CPU1_W0_ENABLE |
22 | * 0x1000_0034: CPU1_W1_ENABLE |
23 | * 0x1000_0038: CPU1_W0_STATUS IRQs 31-63 |
24 | * 0x1000_003c: CPU1_W1_STATUS IRQs 0-31 |
25 | * |
26 | * BCM63168: |
27 | * 0x1000_0020: CPU0_W0_ENABLE |
28 | * 0x1000_0024: CPU0_W1_ENABLE |
29 | * 0x1000_0028: CPU0_W2_ENABLE |
30 | * 0x1000_002c: CPU0_W3_ENABLE |
31 | * 0x1000_0030: CPU0_W0_STATUS IRQs 96-127 |
32 | * 0x1000_0034: CPU0_W1_STATUS IRQs 64-95 |
33 | * 0x1000_0038: CPU0_W2_STATUS IRQs 32-63 |
34 | * 0x1000_003c: CPU0_W3_STATUS IRQs 0-31 |
35 | * 0x1000_0040: CPU1_W0_ENABLE |
36 | * 0x1000_0044: CPU1_W1_ENABLE |
37 | * 0x1000_0048: CPU1_W2_ENABLE |
38 | * 0x1000_004c: CPU1_W3_ENABLE |
39 | * 0x1000_0050: CPU1_W0_STATUS IRQs 96-127 |
40 | * 0x1000_0054: CPU1_W1_STATUS IRQs 64-95 |
41 | * 0x1000_0058: CPU1_W2_STATUS IRQs 32-63 |
42 | * 0x1000_005c: CPU1_W3_STATUS IRQs 0-31 |
43 | * |
44 | * IRQs are numbered in CPU native endian order |
45 | * (which is big-endian in these examples) |
46 | */ |
47 | |
48 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
49 | |
50 | #include <linux/bitops.h> |
51 | #include <linux/cpumask.h> |
52 | #include <linux/kernel.h> |
53 | #include <linux/init.h> |
54 | #include <linux/interrupt.h> |
55 | #include <linux/io.h> |
56 | #include <linux/ioport.h> |
57 | #include <linux/irq.h> |
58 | #include <linux/irqdomain.h> |
59 | #include <linux/module.h> |
60 | #include <linux/of.h> |
61 | #include <linux/of_irq.h> |
62 | #include <linux/of_address.h> |
63 | #include <linux/platform_device.h> |
64 | #include <linux/slab.h> |
65 | #include <linux/smp.h> |
66 | #include <linux/types.h> |
67 | #include <linux/irqchip.h> |
68 | #include <linux/irqchip/chained_irq.h> |
69 | |
70 | #define IRQS_PER_WORD 32 |
71 | #define REG_BYTES_PER_IRQ_WORD (sizeof(u32) * 2) |
72 | |
73 | struct bcm6345_l1_cpu; |
74 | |
75 | struct bcm6345_l1_chip { |
76 | raw_spinlock_t lock; |
77 | unsigned int n_words; |
78 | struct irq_domain *domain; |
79 | struct cpumask cpumask; |
80 | struct bcm6345_l1_cpu *cpus[NR_CPUS]; |
81 | }; |
82 | |
83 | struct bcm6345_l1_cpu { |
84 | struct bcm6345_l1_chip *intc; |
85 | void __iomem *map_base; |
86 | unsigned int parent_irq; |
87 | u32 enable_cache[]; |
88 | }; |
89 | |
90 | static inline unsigned int reg_enable(struct bcm6345_l1_chip *intc, |
91 | unsigned int word) |
92 | { |
93 | #ifdef __BIG_ENDIAN |
94 | return (1 * intc->n_words - word - 1) * sizeof(u32); |
95 | #else |
96 | return (0 * intc->n_words + word) * sizeof(u32); |
97 | #endif |
98 | } |
99 | |
100 | static inline unsigned int reg_status(struct bcm6345_l1_chip *intc, |
101 | unsigned int word) |
102 | { |
103 | #ifdef __BIG_ENDIAN |
104 | return (2 * intc->n_words - word - 1) * sizeof(u32); |
105 | #else |
106 | return (1 * intc->n_words + word) * sizeof(u32); |
107 | #endif |
108 | } |
109 | |
110 | static inline unsigned int cpu_for_irq(struct bcm6345_l1_chip *intc, |
111 | struct irq_data *d) |
112 | { |
113 | return cpumask_first_and(srcp1: &intc->cpumask, srcp2: irq_data_get_affinity_mask(d)); |
114 | } |
115 | |
116 | static void bcm6345_l1_irq_handle(struct irq_desc *desc) |
117 | { |
118 | struct bcm6345_l1_cpu *cpu = irq_desc_get_handler_data(desc); |
119 | struct bcm6345_l1_chip *intc = cpu->intc; |
120 | struct irq_chip *chip = irq_desc_get_chip(desc); |
121 | unsigned int idx; |
122 | |
123 | chained_irq_enter(chip, desc); |
124 | |
125 | for (idx = 0; idx < intc->n_words; idx++) { |
126 | int base = idx * IRQS_PER_WORD; |
127 | unsigned long pending; |
128 | irq_hw_number_t hwirq; |
129 | |
130 | pending = __raw_readl(addr: cpu->map_base + reg_status(intc, word: idx)); |
131 | pending &= __raw_readl(addr: cpu->map_base + reg_enable(intc, word: idx)); |
132 | |
133 | for_each_set_bit(hwirq, &pending, IRQS_PER_WORD) { |
134 | if (generic_handle_domain_irq(domain: intc->domain, hwirq: base + hwirq)) |
135 | spurious_interrupt(); |
136 | } |
137 | } |
138 | |
139 | chained_irq_exit(chip, desc); |
140 | } |
141 | |
142 | static inline void __bcm6345_l1_unmask(struct irq_data *d) |
143 | { |
144 | struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d); |
145 | u32 word = d->hwirq / IRQS_PER_WORD; |
146 | u32 mask = BIT(d->hwirq % IRQS_PER_WORD); |
147 | unsigned int cpu_idx = cpu_for_irq(intc, d); |
148 | |
149 | intc->cpus[cpu_idx]->enable_cache[word] |= mask; |
150 | __raw_writel(val: intc->cpus[cpu_idx]->enable_cache[word], |
151 | addr: intc->cpus[cpu_idx]->map_base + reg_enable(intc, word)); |
152 | } |
153 | |
154 | static inline void __bcm6345_l1_mask(struct irq_data *d) |
155 | { |
156 | struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d); |
157 | u32 word = d->hwirq / IRQS_PER_WORD; |
158 | u32 mask = BIT(d->hwirq % IRQS_PER_WORD); |
159 | unsigned int cpu_idx = cpu_for_irq(intc, d); |
160 | |
161 | intc->cpus[cpu_idx]->enable_cache[word] &= ~mask; |
162 | __raw_writel(val: intc->cpus[cpu_idx]->enable_cache[word], |
163 | addr: intc->cpus[cpu_idx]->map_base + reg_enable(intc, word)); |
164 | } |
165 | |
166 | static void bcm6345_l1_unmask(struct irq_data *d) |
167 | { |
168 | struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d); |
169 | unsigned long flags; |
170 | |
171 | raw_spin_lock_irqsave(&intc->lock, flags); |
172 | __bcm6345_l1_unmask(d); |
173 | raw_spin_unlock_irqrestore(&intc->lock, flags); |
174 | } |
175 | |
176 | static void bcm6345_l1_mask(struct irq_data *d) |
177 | { |
178 | struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d); |
179 | unsigned long flags; |
180 | |
181 | raw_spin_lock_irqsave(&intc->lock, flags); |
182 | __bcm6345_l1_mask(d); |
183 | raw_spin_unlock_irqrestore(&intc->lock, flags); |
184 | } |
185 | |
186 | static int bcm6345_l1_set_affinity(struct irq_data *d, |
187 | const struct cpumask *dest, |
188 | bool force) |
189 | { |
190 | struct bcm6345_l1_chip *intc = irq_data_get_irq_chip_data(d); |
191 | u32 word = d->hwirq / IRQS_PER_WORD; |
192 | u32 mask = BIT(d->hwirq % IRQS_PER_WORD); |
193 | unsigned int old_cpu = cpu_for_irq(intc, d); |
194 | unsigned int new_cpu; |
195 | struct cpumask valid; |
196 | unsigned long flags; |
197 | bool enabled; |
198 | |
199 | if (!cpumask_and(dstp: &valid, src1p: &intc->cpumask, src2p: dest)) |
200 | return -EINVAL; |
201 | |
202 | new_cpu = cpumask_any_and(&valid, cpu_online_mask); |
203 | if (new_cpu >= nr_cpu_ids) |
204 | return -EINVAL; |
205 | |
206 | dest = cpumask_of(new_cpu); |
207 | |
208 | raw_spin_lock_irqsave(&intc->lock, flags); |
209 | if (old_cpu != new_cpu) { |
210 | enabled = intc->cpus[old_cpu]->enable_cache[word] & mask; |
211 | if (enabled) |
212 | __bcm6345_l1_mask(d); |
213 | irq_data_update_affinity(d, m: dest); |
214 | if (enabled) |
215 | __bcm6345_l1_unmask(d); |
216 | } else { |
217 | irq_data_update_affinity(d, m: dest); |
218 | } |
219 | raw_spin_unlock_irqrestore(&intc->lock, flags); |
220 | |
221 | irq_data_update_effective_affinity(d, cpumask_of(new_cpu)); |
222 | |
223 | return IRQ_SET_MASK_OK_NOCOPY; |
224 | } |
225 | |
226 | static int __init bcm6345_l1_init_one(struct device_node *dn, |
227 | unsigned int idx, |
228 | struct bcm6345_l1_chip *intc) |
229 | { |
230 | struct resource res; |
231 | resource_size_t sz; |
232 | struct bcm6345_l1_cpu *cpu; |
233 | unsigned int i, n_words; |
234 | |
235 | if (of_address_to_resource(dev: dn, index: idx, r: &res)) |
236 | return -EINVAL; |
237 | sz = resource_size(res: &res); |
238 | n_words = sz / REG_BYTES_PER_IRQ_WORD; |
239 | |
240 | if (!intc->n_words) |
241 | intc->n_words = n_words; |
242 | else if (intc->n_words != n_words) |
243 | return -EINVAL; |
244 | |
245 | cpu = intc->cpus[idx] = kzalloc(struct_size(cpu, enable_cache, n_words), |
246 | GFP_KERNEL); |
247 | if (!cpu) |
248 | return -ENOMEM; |
249 | |
250 | cpu->intc = intc; |
251 | cpu->map_base = ioremap(offset: res.start, size: sz); |
252 | if (!cpu->map_base) |
253 | return -ENOMEM; |
254 | |
255 | if (!request_mem_region(res.start, sz, res.name)) |
256 | pr_err("failed to request intc memory" ); |
257 | |
258 | for (i = 0; i < n_words; i++) { |
259 | cpu->enable_cache[i] = 0; |
260 | __raw_writel(val: 0, addr: cpu->map_base + reg_enable(intc, word: i)); |
261 | } |
262 | |
263 | cpu->parent_irq = irq_of_parse_and_map(node: dn, index: idx); |
264 | if (!cpu->parent_irq) { |
265 | pr_err("failed to map parent interrupt %d\n" , cpu->parent_irq); |
266 | return -EINVAL; |
267 | } |
268 | irq_set_chained_handler_and_data(irq: cpu->parent_irq, |
269 | handle: bcm6345_l1_irq_handle, data: cpu); |
270 | |
271 | return 0; |
272 | } |
273 | |
274 | static struct irq_chip bcm6345_l1_irq_chip = { |
275 | .name = "bcm6345-l1" , |
276 | .irq_mask = bcm6345_l1_mask, |
277 | .irq_unmask = bcm6345_l1_unmask, |
278 | .irq_set_affinity = bcm6345_l1_set_affinity, |
279 | }; |
280 | |
281 | static int bcm6345_l1_map(struct irq_domain *d, unsigned int virq, |
282 | irq_hw_number_t hw_irq) |
283 | { |
284 | irq_set_chip_and_handler(irq: virq, |
285 | chip: &bcm6345_l1_irq_chip, handle: handle_percpu_irq); |
286 | irq_set_chip_data(irq: virq, data: d->host_data); |
287 | irqd_set_single_target(d: irq_desc_get_irq_data(desc: irq_to_desc(irq: virq))); |
288 | return 0; |
289 | } |
290 | |
291 | static const struct irq_domain_ops bcm6345_l1_domain_ops = { |
292 | .xlate = irq_domain_xlate_onecell, |
293 | .map = bcm6345_l1_map, |
294 | }; |
295 | |
296 | static int __init bcm6345_l1_of_init(struct device_node *dn, |
297 | struct device_node *parent) |
298 | { |
299 | struct bcm6345_l1_chip *intc; |
300 | unsigned int idx; |
301 | int ret; |
302 | |
303 | intc = kzalloc(size: sizeof(*intc), GFP_KERNEL); |
304 | if (!intc) |
305 | return -ENOMEM; |
306 | |
307 | for_each_possible_cpu(idx) { |
308 | ret = bcm6345_l1_init_one(dn, idx, intc); |
309 | if (ret) |
310 | pr_err("failed to init intc L1 for cpu %d: %d\n" , |
311 | idx, ret); |
312 | else |
313 | cpumask_set_cpu(cpu: idx, dstp: &intc->cpumask); |
314 | } |
315 | |
316 | if (cpumask_empty(srcp: &intc->cpumask)) { |
317 | ret = -ENODEV; |
318 | goto out_free; |
319 | } |
320 | |
321 | raw_spin_lock_init(&intc->lock); |
322 | |
323 | intc->domain = irq_domain_add_linear(of_node: dn, IRQS_PER_WORD * intc->n_words, |
324 | ops: &bcm6345_l1_domain_ops, |
325 | host_data: intc); |
326 | if (!intc->domain) { |
327 | ret = -ENOMEM; |
328 | goto out_unmap; |
329 | } |
330 | |
331 | pr_info("registered BCM6345 L1 intc (IRQs: %d)\n" , |
332 | IRQS_PER_WORD * intc->n_words); |
333 | for_each_cpu(idx, &intc->cpumask) { |
334 | struct bcm6345_l1_cpu *cpu = intc->cpus[idx]; |
335 | |
336 | pr_info(" CPU%u (irq = %d)\n" , idx, cpu->parent_irq); |
337 | } |
338 | |
339 | return 0; |
340 | |
341 | out_unmap: |
342 | for_each_possible_cpu(idx) { |
343 | struct bcm6345_l1_cpu *cpu = intc->cpus[idx]; |
344 | |
345 | if (cpu) { |
346 | if (cpu->map_base) |
347 | iounmap(addr: cpu->map_base); |
348 | kfree(objp: cpu); |
349 | } |
350 | } |
351 | out_free: |
352 | kfree(objp: intc); |
353 | return ret; |
354 | } |
355 | |
356 | IRQCHIP_DECLARE(bcm6345_l1, "brcm,bcm6345-l1-intc" , bcm6345_l1_of_init); |
357 | |