1 | /* |
2 | * Copyright (C) 2017 Marvell |
3 | * |
4 | * Hanna Hawa <hannah@marvell.com> |
5 | * Thomas Petazzoni <thomas.petazzoni@free-electrons.com> |
6 | * |
7 | * This file is licensed under the terms of the GNU General Public |
8 | * License version 2. This program is licensed "as is" without any |
9 | * warranty of any kind, whether express or implied. |
10 | */ |
11 | |
12 | #include <linux/interrupt.h> |
13 | #include <linux/irq.h> |
14 | #include <linux/irqchip.h> |
15 | #include <linux/irqdomain.h> |
16 | #include <linux/jump_label.h> |
17 | #include <linux/kernel.h> |
18 | #include <linux/msi.h> |
19 | #include <linux/of_irq.h> |
20 | #include <linux/of_platform.h> |
21 | #include <linux/platform_device.h> |
22 | |
23 | #include <dt-bindings/interrupt-controller/mvebu-icu.h> |
24 | |
25 | /* ICU registers */ |
26 | #define ICU_SETSPI_NSR_AL 0x10 |
27 | #define ICU_SETSPI_NSR_AH 0x14 |
28 | #define ICU_CLRSPI_NSR_AL 0x18 |
29 | #define ICU_CLRSPI_NSR_AH 0x1c |
30 | #define ICU_SET_SEI_AL 0x50 |
31 | #define ICU_SET_SEI_AH 0x54 |
32 | #define ICU_CLR_SEI_AL 0x58 |
33 | #define ICU_CLR_SEI_AH 0x5C |
34 | #define ICU_INT_CFG(x) (0x100 + 4 * (x)) |
35 | #define ICU_INT_ENABLE BIT(24) |
36 | #define ICU_IS_EDGE BIT(28) |
37 | #define ICU_GROUP_SHIFT 29 |
38 | |
39 | /* ICU definitions */ |
40 | #define ICU_MAX_IRQS 207 |
41 | #define ICU_SATA0_ICU_ID 109 |
42 | #define ICU_SATA1_ICU_ID 107 |
43 | |
44 | struct mvebu_icu_subset_data { |
45 | unsigned int icu_group; |
46 | unsigned int offset_set_ah; |
47 | unsigned int offset_set_al; |
48 | unsigned int offset_clr_ah; |
49 | unsigned int offset_clr_al; |
50 | }; |
51 | |
52 | struct mvebu_icu { |
53 | void __iomem *base; |
54 | struct device *dev; |
55 | }; |
56 | |
57 | struct mvebu_icu_msi_data { |
58 | struct mvebu_icu *icu; |
59 | atomic_t initialized; |
60 | const struct mvebu_icu_subset_data *subset_data; |
61 | }; |
62 | |
63 | struct mvebu_icu_irq_data { |
64 | struct mvebu_icu *icu; |
65 | unsigned int icu_group; |
66 | unsigned int type; |
67 | }; |
68 | |
69 | static DEFINE_STATIC_KEY_FALSE(legacy_bindings); |
70 | |
71 | static void mvebu_icu_init(struct mvebu_icu *icu, |
72 | struct mvebu_icu_msi_data *msi_data, |
73 | struct msi_msg *msg) |
74 | { |
75 | const struct mvebu_icu_subset_data *subset = msi_data->subset_data; |
76 | |
77 | if (atomic_cmpxchg(v: &msi_data->initialized, old: false, new: true)) |
78 | return; |
79 | |
80 | /* Set 'SET' ICU SPI message address in AP */ |
81 | writel_relaxed(msg[0].address_hi, icu->base + subset->offset_set_ah); |
82 | writel_relaxed(msg[0].address_lo, icu->base + subset->offset_set_al); |
83 | |
84 | if (subset->icu_group != ICU_GRP_NSR) |
85 | return; |
86 | |
87 | /* Set 'CLEAR' ICU SPI message address in AP (level-MSI only) */ |
88 | writel_relaxed(msg[1].address_hi, icu->base + subset->offset_clr_ah); |
89 | writel_relaxed(msg[1].address_lo, icu->base + subset->offset_clr_al); |
90 | } |
91 | |
92 | static void mvebu_icu_write_msg(struct msi_desc *desc, struct msi_msg *msg) |
93 | { |
94 | struct irq_data *d = irq_get_irq_data(irq: desc->irq); |
95 | struct mvebu_icu_msi_data *msi_data = platform_msi_get_host_data(domain: d->domain); |
96 | struct mvebu_icu_irq_data *icu_irqd = d->chip_data; |
97 | struct mvebu_icu *icu = icu_irqd->icu; |
98 | unsigned int icu_int; |
99 | |
100 | if (msg->address_lo || msg->address_hi) { |
101 | /* One off initialization per domain */ |
102 | mvebu_icu_init(icu, msi_data, msg); |
103 | /* Configure the ICU with irq number & type */ |
104 | icu_int = msg->data | ICU_INT_ENABLE; |
105 | if (icu_irqd->type & IRQ_TYPE_EDGE_RISING) |
106 | icu_int |= ICU_IS_EDGE; |
107 | icu_int |= icu_irqd->icu_group << ICU_GROUP_SHIFT; |
108 | } else { |
109 | /* De-configure the ICU */ |
110 | icu_int = 0; |
111 | } |
112 | |
113 | writel_relaxed(icu_int, icu->base + ICU_INT_CFG(d->hwirq)); |
114 | |
115 | /* |
116 | * The SATA unit has 2 ports, and a dedicated ICU entry per |
117 | * port. The ahci sata driver supports only one irq interrupt |
118 | * per SATA unit. To solve this conflict, we configure the 2 |
119 | * SATA wired interrupts in the south bridge into 1 GIC |
120 | * interrupt in the north bridge. Even if only a single port |
121 | * is enabled, if sata node is enabled, both interrupts are |
122 | * configured (regardless of which port is actually in use). |
123 | */ |
124 | if (d->hwirq == ICU_SATA0_ICU_ID || d->hwirq == ICU_SATA1_ICU_ID) { |
125 | writel_relaxed(icu_int, |
126 | icu->base + ICU_INT_CFG(ICU_SATA0_ICU_ID)); |
127 | writel_relaxed(icu_int, |
128 | icu->base + ICU_INT_CFG(ICU_SATA1_ICU_ID)); |
129 | } |
130 | } |
131 | |
132 | static struct irq_chip mvebu_icu_nsr_chip = { |
133 | .name = "ICU-NSR" , |
134 | .irq_mask = irq_chip_mask_parent, |
135 | .irq_unmask = irq_chip_unmask_parent, |
136 | .irq_eoi = irq_chip_eoi_parent, |
137 | .irq_set_type = irq_chip_set_type_parent, |
138 | .irq_set_affinity = irq_chip_set_affinity_parent, |
139 | }; |
140 | |
141 | static struct irq_chip mvebu_icu_sei_chip = { |
142 | .name = "ICU-SEI" , |
143 | .irq_ack = irq_chip_ack_parent, |
144 | .irq_mask = irq_chip_mask_parent, |
145 | .irq_unmask = irq_chip_unmask_parent, |
146 | .irq_set_type = irq_chip_set_type_parent, |
147 | .irq_set_affinity = irq_chip_set_affinity_parent, |
148 | }; |
149 | |
150 | static int |
151 | mvebu_icu_irq_domain_translate(struct irq_domain *d, struct irq_fwspec *fwspec, |
152 | unsigned long *hwirq, unsigned int *type) |
153 | { |
154 | unsigned int param_count = static_branch_unlikely(&legacy_bindings) ? 3 : 2; |
155 | struct mvebu_icu_msi_data *msi_data = platform_msi_get_host_data(domain: d); |
156 | struct mvebu_icu *icu = msi_data->icu; |
157 | |
158 | /* Check the count of the parameters in dt */ |
159 | if (WARN_ON(fwspec->param_count != param_count)) { |
160 | dev_err(icu->dev, "wrong ICU parameter count %d\n" , |
161 | fwspec->param_count); |
162 | return -EINVAL; |
163 | } |
164 | |
165 | if (static_branch_unlikely(&legacy_bindings)) { |
166 | *hwirq = fwspec->param[1]; |
167 | *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; |
168 | if (fwspec->param[0] != ICU_GRP_NSR) { |
169 | dev_err(icu->dev, "wrong ICU group type %x\n" , |
170 | fwspec->param[0]); |
171 | return -EINVAL; |
172 | } |
173 | } else { |
174 | *hwirq = fwspec->param[0]; |
175 | *type = fwspec->param[1] & IRQ_TYPE_SENSE_MASK; |
176 | |
177 | /* |
178 | * The ICU receives level interrupts. While the NSR are also |
179 | * level interrupts, SEI are edge interrupts. Force the type |
180 | * here in this case. Please note that this makes the interrupt |
181 | * handling unreliable. |
182 | */ |
183 | if (msi_data->subset_data->icu_group == ICU_GRP_SEI) |
184 | *type = IRQ_TYPE_EDGE_RISING; |
185 | } |
186 | |
187 | if (*hwirq >= ICU_MAX_IRQS) { |
188 | dev_err(icu->dev, "invalid interrupt number %ld\n" , *hwirq); |
189 | return -EINVAL; |
190 | } |
191 | |
192 | return 0; |
193 | } |
194 | |
195 | static int |
196 | mvebu_icu_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, |
197 | unsigned int nr_irqs, void *args) |
198 | { |
199 | int err; |
200 | unsigned long hwirq; |
201 | struct irq_fwspec *fwspec = args; |
202 | struct mvebu_icu_msi_data *msi_data = platform_msi_get_host_data(domain); |
203 | struct mvebu_icu *icu = msi_data->icu; |
204 | struct mvebu_icu_irq_data *icu_irqd; |
205 | struct irq_chip *chip = &mvebu_icu_nsr_chip; |
206 | |
207 | icu_irqd = kmalloc(size: sizeof(*icu_irqd), GFP_KERNEL); |
208 | if (!icu_irqd) |
209 | return -ENOMEM; |
210 | |
211 | err = mvebu_icu_irq_domain_translate(d: domain, fwspec, hwirq: &hwirq, |
212 | type: &icu_irqd->type); |
213 | if (err) { |
214 | dev_err(icu->dev, "failed to translate ICU parameters\n" ); |
215 | goto free_irqd; |
216 | } |
217 | |
218 | if (static_branch_unlikely(&legacy_bindings)) |
219 | icu_irqd->icu_group = fwspec->param[0]; |
220 | else |
221 | icu_irqd->icu_group = msi_data->subset_data->icu_group; |
222 | icu_irqd->icu = icu; |
223 | |
224 | err = platform_msi_device_domain_alloc(domain, virq, nr_irqs); |
225 | if (err) { |
226 | dev_err(icu->dev, "failed to allocate ICU interrupt in parent domain\n" ); |
227 | goto free_irqd; |
228 | } |
229 | |
230 | /* Make sure there is no interrupt left pending by the firmware */ |
231 | err = irq_set_irqchip_state(irq: virq, which: IRQCHIP_STATE_PENDING, state: false); |
232 | if (err) |
233 | goto free_msi; |
234 | |
235 | if (icu_irqd->icu_group == ICU_GRP_SEI) |
236 | chip = &mvebu_icu_sei_chip; |
237 | |
238 | err = irq_domain_set_hwirq_and_chip(domain, virq, hwirq, |
239 | chip, chip_data: icu_irqd); |
240 | if (err) { |
241 | dev_err(icu->dev, "failed to set the data to IRQ domain\n" ); |
242 | goto free_msi; |
243 | } |
244 | |
245 | return 0; |
246 | |
247 | free_msi: |
248 | platform_msi_device_domain_free(domain, virq, nvec: nr_irqs); |
249 | free_irqd: |
250 | kfree(objp: icu_irqd); |
251 | return err; |
252 | } |
253 | |
254 | static void |
255 | mvebu_icu_irq_domain_free(struct irq_domain *domain, unsigned int virq, |
256 | unsigned int nr_irqs) |
257 | { |
258 | struct irq_data *d = irq_get_irq_data(irq: virq); |
259 | struct mvebu_icu_irq_data *icu_irqd = d->chip_data; |
260 | |
261 | kfree(objp: icu_irqd); |
262 | |
263 | platform_msi_device_domain_free(domain, virq, nvec: nr_irqs); |
264 | } |
265 | |
266 | static const struct irq_domain_ops mvebu_icu_domain_ops = { |
267 | .translate = mvebu_icu_irq_domain_translate, |
268 | .alloc = mvebu_icu_irq_domain_alloc, |
269 | .free = mvebu_icu_irq_domain_free, |
270 | }; |
271 | |
272 | static const struct mvebu_icu_subset_data mvebu_icu_nsr_subset_data = { |
273 | .icu_group = ICU_GRP_NSR, |
274 | .offset_set_ah = ICU_SETSPI_NSR_AH, |
275 | .offset_set_al = ICU_SETSPI_NSR_AL, |
276 | .offset_clr_ah = ICU_CLRSPI_NSR_AH, |
277 | .offset_clr_al = ICU_CLRSPI_NSR_AL, |
278 | }; |
279 | |
280 | static const struct mvebu_icu_subset_data mvebu_icu_sei_subset_data = { |
281 | .icu_group = ICU_GRP_SEI, |
282 | .offset_set_ah = ICU_SET_SEI_AH, |
283 | .offset_set_al = ICU_SET_SEI_AL, |
284 | }; |
285 | |
286 | static const struct of_device_id mvebu_icu_subset_of_match[] = { |
287 | { |
288 | .compatible = "marvell,cp110-icu-nsr" , |
289 | .data = &mvebu_icu_nsr_subset_data, |
290 | }, |
291 | { |
292 | .compatible = "marvell,cp110-icu-sei" , |
293 | .data = &mvebu_icu_sei_subset_data, |
294 | }, |
295 | {}, |
296 | }; |
297 | |
298 | static int mvebu_icu_subset_probe(struct platform_device *pdev) |
299 | { |
300 | struct mvebu_icu_msi_data *msi_data; |
301 | struct device_node *msi_parent_dn; |
302 | struct device *dev = &pdev->dev; |
303 | struct irq_domain *irq_domain; |
304 | |
305 | msi_data = devm_kzalloc(dev, size: sizeof(*msi_data), GFP_KERNEL); |
306 | if (!msi_data) |
307 | return -ENOMEM; |
308 | |
309 | if (static_branch_unlikely(&legacy_bindings)) { |
310 | msi_data->icu = dev_get_drvdata(dev); |
311 | msi_data->subset_data = &mvebu_icu_nsr_subset_data; |
312 | } else { |
313 | msi_data->icu = dev_get_drvdata(dev: dev->parent); |
314 | msi_data->subset_data = of_device_get_match_data(dev); |
315 | } |
316 | |
317 | dev->msi.domain = of_msi_get_domain(dev, np: dev->of_node, |
318 | token: DOMAIN_BUS_PLATFORM_MSI); |
319 | if (!dev->msi.domain) |
320 | return -EPROBE_DEFER; |
321 | |
322 | msi_parent_dn = irq_domain_get_of_node(d: dev->msi.domain); |
323 | if (!msi_parent_dn) |
324 | return -ENODEV; |
325 | |
326 | irq_domain = platform_msi_create_device_tree_domain(dev, ICU_MAX_IRQS, |
327 | mvebu_icu_write_msg, |
328 | &mvebu_icu_domain_ops, |
329 | msi_data); |
330 | if (!irq_domain) { |
331 | dev_err(dev, "Failed to create ICU MSI domain\n" ); |
332 | return -ENOMEM; |
333 | } |
334 | |
335 | return 0; |
336 | } |
337 | |
338 | static struct platform_driver mvebu_icu_subset_driver = { |
339 | .probe = mvebu_icu_subset_probe, |
340 | .driver = { |
341 | .name = "mvebu-icu-subset" , |
342 | .of_match_table = mvebu_icu_subset_of_match, |
343 | }, |
344 | }; |
345 | builtin_platform_driver(mvebu_icu_subset_driver); |
346 | |
347 | static int mvebu_icu_probe(struct platform_device *pdev) |
348 | { |
349 | struct mvebu_icu *icu; |
350 | int i; |
351 | |
352 | icu = devm_kzalloc(dev: &pdev->dev, size: sizeof(struct mvebu_icu), |
353 | GFP_KERNEL); |
354 | if (!icu) |
355 | return -ENOMEM; |
356 | |
357 | icu->dev = &pdev->dev; |
358 | |
359 | icu->base = devm_platform_ioremap_resource(pdev, index: 0); |
360 | if (IS_ERR(ptr: icu->base)) |
361 | return PTR_ERR(ptr: icu->base); |
362 | |
363 | /* |
364 | * Legacy bindings: ICU is one node with one MSI parent: force manually |
365 | * the probe of the NSR interrupts side. |
366 | * New bindings: ICU node has children, one per interrupt controller |
367 | * having its own MSI parent: call platform_populate(). |
368 | * All ICU instances should use the same bindings. |
369 | */ |
370 | if (!of_get_child_count(np: pdev->dev.of_node)) |
371 | static_branch_enable(&legacy_bindings); |
372 | |
373 | /* |
374 | * Clean all ICU interrupts of type NSR and SEI, required to |
375 | * avoid unpredictable SPI assignments done by firmware. |
376 | */ |
377 | for (i = 0 ; i < ICU_MAX_IRQS ; i++) { |
378 | u32 icu_int, icu_grp; |
379 | |
380 | icu_int = readl_relaxed(icu->base + ICU_INT_CFG(i)); |
381 | icu_grp = icu_int >> ICU_GROUP_SHIFT; |
382 | |
383 | if (icu_grp == ICU_GRP_NSR || |
384 | (icu_grp == ICU_GRP_SEI && |
385 | !static_branch_unlikely(&legacy_bindings))) |
386 | writel_relaxed(0x0, icu->base + ICU_INT_CFG(i)); |
387 | } |
388 | |
389 | platform_set_drvdata(pdev, data: icu); |
390 | |
391 | if (static_branch_unlikely(&legacy_bindings)) |
392 | return mvebu_icu_subset_probe(pdev); |
393 | else |
394 | return devm_of_platform_populate(dev: &pdev->dev); |
395 | } |
396 | |
397 | static const struct of_device_id mvebu_icu_of_match[] = { |
398 | { .compatible = "marvell,cp110-icu" , }, |
399 | {}, |
400 | }; |
401 | |
402 | static struct platform_driver mvebu_icu_driver = { |
403 | .probe = mvebu_icu_probe, |
404 | .driver = { |
405 | .name = "mvebu-icu" , |
406 | .of_match_table = mvebu_icu_of_match, |
407 | }, |
408 | }; |
409 | builtin_platform_driver(mvebu_icu_driver); |
410 | |