1 | /* |
2 | * Synopsys DW APB ICTL irqchip driver. |
3 | * |
4 | * Sebastian Hesselbarth <sebastian.hesselbarth@gmail.com> |
5 | * |
6 | * based on GPL'ed 2.6 kernel sources |
7 | * (c) Marvell International Ltd. |
8 | * |
9 | * This file is licensed under the terms of the GNU General Public |
10 | * License version 2. This program is licensed "as is" without any |
11 | * warranty of any kind, whether express or implied. |
12 | */ |
13 | |
14 | #include <linux/io.h> |
15 | #include <linux/irq.h> |
16 | #include <linux/irqchip.h> |
17 | #include <linux/irqchip/chained_irq.h> |
18 | #include <linux/of_address.h> |
19 | #include <linux/of_irq.h> |
20 | #include <linux/interrupt.h> |
21 | |
22 | #define APB_INT_ENABLE_L 0x00 |
23 | #define APB_INT_ENABLE_H 0x04 |
24 | #define APB_INT_MASK_L 0x08 |
25 | #define APB_INT_MASK_H 0x0c |
26 | #define APB_INT_FINALSTATUS_L 0x30 |
27 | #define APB_INT_FINALSTATUS_H 0x34 |
28 | #define APB_INT_BASE_OFFSET 0x04 |
29 | |
30 | /* irq domain of the primary interrupt controller. */ |
31 | static struct irq_domain *dw_apb_ictl_irq_domain; |
32 | |
33 | static void __irq_entry dw_apb_ictl_handle_irq(struct pt_regs *regs) |
34 | { |
35 | struct irq_domain *d = dw_apb_ictl_irq_domain; |
36 | int n; |
37 | |
38 | for (n = 0; n < d->revmap_size; n += 32) { |
39 | struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, n); |
40 | u32 stat = readl_relaxed(gc->reg_base + APB_INT_FINALSTATUS_L); |
41 | |
42 | while (stat) { |
43 | u32 hwirq = ffs(stat) - 1; |
44 | |
45 | generic_handle_domain_irq(d, hwirq); |
46 | stat &= ~BIT(hwirq); |
47 | } |
48 | } |
49 | } |
50 | |
51 | static void dw_apb_ictl_handle_irq_cascaded(struct irq_desc *desc) |
52 | { |
53 | struct irq_domain *d = irq_desc_get_handler_data(desc); |
54 | struct irq_chip *chip = irq_desc_get_chip(desc); |
55 | int n; |
56 | |
57 | chained_irq_enter(chip, desc); |
58 | |
59 | for (n = 0; n < d->revmap_size; n += 32) { |
60 | struct irq_chip_generic *gc = irq_get_domain_generic_chip(d, hw_irq: n); |
61 | u32 stat = readl_relaxed(gc->reg_base + APB_INT_FINALSTATUS_L); |
62 | |
63 | while (stat) { |
64 | u32 hwirq = ffs(stat) - 1; |
65 | generic_handle_domain_irq(domain: d, hwirq: gc->irq_base + hwirq); |
66 | |
67 | stat &= ~BIT(hwirq); |
68 | } |
69 | } |
70 | |
71 | chained_irq_exit(chip, desc); |
72 | } |
73 | |
74 | static int dw_apb_ictl_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, |
75 | unsigned int nr_irqs, void *arg) |
76 | { |
77 | int i, ret; |
78 | irq_hw_number_t hwirq; |
79 | unsigned int type = IRQ_TYPE_NONE; |
80 | struct irq_fwspec *fwspec = arg; |
81 | |
82 | ret = irq_domain_translate_onecell(d: domain, fwspec, out_hwirq: &hwirq, out_type: &type); |
83 | if (ret) |
84 | return ret; |
85 | |
86 | for (i = 0; i < nr_irqs; i++) |
87 | irq_map_generic_chip(d: domain, virq: virq + i, hw_irq: hwirq + i); |
88 | |
89 | return 0; |
90 | } |
91 | |
92 | static const struct irq_domain_ops dw_apb_ictl_irq_domain_ops = { |
93 | .translate = irq_domain_translate_onecell, |
94 | .alloc = dw_apb_ictl_irq_domain_alloc, |
95 | .free = irq_domain_free_irqs_top, |
96 | }; |
97 | |
98 | #ifdef CONFIG_PM |
99 | static void dw_apb_ictl_resume(struct irq_data *d) |
100 | { |
101 | struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); |
102 | struct irq_chip_type *ct = irq_data_get_chip_type(d); |
103 | |
104 | irq_gc_lock(gc); |
105 | writel_relaxed(~0, gc->reg_base + ct->regs.enable); |
106 | writel_relaxed(*ct->mask_cache, gc->reg_base + ct->regs.mask); |
107 | irq_gc_unlock(gc); |
108 | } |
109 | #else |
110 | #define dw_apb_ictl_resume NULL |
111 | #endif /* CONFIG_PM */ |
112 | |
113 | static int __init dw_apb_ictl_init(struct device_node *np, |
114 | struct device_node *parent) |
115 | { |
116 | const struct irq_domain_ops *domain_ops; |
117 | unsigned int clr = IRQ_NOREQUEST | IRQ_NOPROBE | IRQ_NOAUTOEN; |
118 | struct resource r; |
119 | struct irq_domain *domain; |
120 | struct irq_chip_generic *gc; |
121 | void __iomem *iobase; |
122 | int ret, nrirqs, parent_irq, i; |
123 | u32 reg; |
124 | |
125 | if (!parent) { |
126 | /* Used as the primary interrupt controller */ |
127 | parent_irq = 0; |
128 | domain_ops = &dw_apb_ictl_irq_domain_ops; |
129 | } else { |
130 | /* Map the parent interrupt for the chained handler */ |
131 | parent_irq = irq_of_parse_and_map(node: np, index: 0); |
132 | if (parent_irq <= 0) { |
133 | pr_err("%pOF: unable to parse irq\n" , np); |
134 | return -EINVAL; |
135 | } |
136 | domain_ops = &irq_generic_chip_ops; |
137 | } |
138 | |
139 | ret = of_address_to_resource(dev: np, index: 0, r: &r); |
140 | if (ret) { |
141 | pr_err("%pOF: unable to get resource\n" , np); |
142 | return ret; |
143 | } |
144 | |
145 | if (!request_mem_region(r.start, resource_size(&r), np->full_name)) { |
146 | pr_err("%pOF: unable to request mem region\n" , np); |
147 | return -ENOMEM; |
148 | } |
149 | |
150 | iobase = ioremap(offset: r.start, size: resource_size(res: &r)); |
151 | if (!iobase) { |
152 | pr_err("%pOF: unable to map resource\n" , np); |
153 | ret = -ENOMEM; |
154 | goto err_release; |
155 | } |
156 | |
157 | /* |
158 | * DW IP can be configured to allow 2-64 irqs. We can determine |
159 | * the number of irqs supported by writing into enable register |
160 | * and look for bits not set, as corresponding flip-flops will |
161 | * have been removed by synthesis tool. |
162 | */ |
163 | |
164 | /* mask and enable all interrupts */ |
165 | writel_relaxed(~0, iobase + APB_INT_MASK_L); |
166 | writel_relaxed(~0, iobase + APB_INT_MASK_H); |
167 | writel_relaxed(~0, iobase + APB_INT_ENABLE_L); |
168 | writel_relaxed(~0, iobase + APB_INT_ENABLE_H); |
169 | |
170 | reg = readl_relaxed(iobase + APB_INT_ENABLE_H); |
171 | if (reg) |
172 | nrirqs = 32 + fls(x: reg); |
173 | else |
174 | nrirqs = fls(readl_relaxed(iobase + APB_INT_ENABLE_L)); |
175 | |
176 | domain = irq_domain_add_linear(of_node: np, size: nrirqs, ops: domain_ops, NULL); |
177 | if (!domain) { |
178 | pr_err("%pOF: unable to add irq domain\n" , np); |
179 | ret = -ENOMEM; |
180 | goto err_unmap; |
181 | } |
182 | |
183 | ret = irq_alloc_domain_generic_chips(domain, 32, 1, np->name, |
184 | handle_level_irq, clr, 0, |
185 | IRQ_GC_INIT_MASK_CACHE); |
186 | if (ret) { |
187 | pr_err("%pOF: unable to alloc irq domain gc\n" , np); |
188 | goto err_unmap; |
189 | } |
190 | |
191 | for (i = 0; i < DIV_ROUND_UP(nrirqs, 32); i++) { |
192 | gc = irq_get_domain_generic_chip(d: domain, hw_irq: i * 32); |
193 | gc->reg_base = iobase + i * APB_INT_BASE_OFFSET; |
194 | gc->chip_types[0].regs.mask = APB_INT_MASK_L; |
195 | gc->chip_types[0].regs.enable = APB_INT_ENABLE_L; |
196 | gc->chip_types[0].chip.irq_mask = irq_gc_mask_set_bit; |
197 | gc->chip_types[0].chip.irq_unmask = irq_gc_mask_clr_bit; |
198 | gc->chip_types[0].chip.irq_resume = dw_apb_ictl_resume; |
199 | } |
200 | |
201 | if (parent_irq) { |
202 | irq_set_chained_handler_and_data(irq: parent_irq, |
203 | handle: dw_apb_ictl_handle_irq_cascaded, data: domain); |
204 | } else { |
205 | dw_apb_ictl_irq_domain = domain; |
206 | set_handle_irq(dw_apb_ictl_handle_irq); |
207 | } |
208 | |
209 | return 0; |
210 | |
211 | err_unmap: |
212 | iounmap(addr: iobase); |
213 | err_release: |
214 | release_mem_region(r.start, resource_size(&r)); |
215 | return ret; |
216 | } |
217 | IRQCHIP_DECLARE(dw_apb_ictl, |
218 | "snps,dw-apb-ictl" , dw_apb_ictl_init); |
219 | |