1 | /* |
2 | * Support for virtual IRQ subgroups. |
3 | * |
4 | * Copyright (C) 2010 Paul Mundt |
5 | * |
6 | * This file is subject to the terms and conditions of the GNU General Public |
7 | * License. See the file "COPYING" in the main directory of this archive |
8 | * for more details. |
9 | */ |
10 | #define pr_fmt(fmt) "intc: " fmt |
11 | |
12 | #include <linux/slab.h> |
13 | #include <linux/irq.h> |
14 | #include <linux/list.h> |
15 | #include <linux/radix-tree.h> |
16 | #include <linux/spinlock.h> |
17 | #include <linux/export.h> |
18 | #include "internals.h" |
19 | |
20 | static struct intc_map_entry intc_irq_xlate[INTC_NR_IRQS]; |
21 | |
22 | struct intc_virq_list { |
23 | unsigned int irq; |
24 | struct intc_virq_list *next; |
25 | }; |
26 | |
27 | #define for_each_virq(entry, head) \ |
28 | for (entry = head; entry; entry = entry->next) |
29 | |
30 | /* |
31 | * Tags for the radix tree |
32 | */ |
33 | #define INTC_TAG_VIRQ_NEEDS_ALLOC 0 |
34 | |
35 | void intc_irq_xlate_set(unsigned int irq, intc_enum id, struct intc_desc_int *d) |
36 | { |
37 | unsigned long flags; |
38 | |
39 | raw_spin_lock_irqsave(&intc_big_lock, flags); |
40 | intc_irq_xlate[irq].enum_id = id; |
41 | intc_irq_xlate[irq].desc = d; |
42 | raw_spin_unlock_irqrestore(&intc_big_lock, flags); |
43 | } |
44 | |
45 | struct intc_map_entry *intc_irq_xlate_get(unsigned int irq) |
46 | { |
47 | return intc_irq_xlate + irq; |
48 | } |
49 | |
50 | int intc_irq_lookup(const char *chipname, intc_enum enum_id) |
51 | { |
52 | struct intc_map_entry *ptr; |
53 | struct intc_desc_int *d; |
54 | int irq = -1; |
55 | |
56 | list_for_each_entry(d, &intc_list, list) { |
57 | int tagged; |
58 | |
59 | if (strcmp(d->chip.name, chipname) != 0) |
60 | continue; |
61 | |
62 | /* |
63 | * Catch early lookups for subgroup VIRQs that have not |
64 | * yet been allocated an IRQ. This already includes a |
65 | * fast-path out if the tree is untagged, so there is no |
66 | * need to explicitly test the root tree. |
67 | */ |
68 | tagged = radix_tree_tag_get(&d->tree, index: enum_id, |
69 | INTC_TAG_VIRQ_NEEDS_ALLOC); |
70 | if (unlikely(tagged)) |
71 | break; |
72 | |
73 | ptr = radix_tree_lookup(&d->tree, enum_id); |
74 | if (ptr) { |
75 | irq = ptr - intc_irq_xlate; |
76 | break; |
77 | } |
78 | } |
79 | |
80 | return irq; |
81 | } |
82 | EXPORT_SYMBOL_GPL(intc_irq_lookup); |
83 | |
84 | static int add_virq_to_pirq(unsigned int irq, unsigned int virq) |
85 | { |
86 | struct intc_virq_list *entry; |
87 | struct intc_virq_list **last = NULL; |
88 | |
89 | /* scan for duplicates */ |
90 | for_each_virq(entry, irq_get_handler_data(irq)) { |
91 | if (entry->irq == virq) |
92 | return 0; |
93 | last = &entry->next; |
94 | } |
95 | |
96 | entry = kzalloc(size: sizeof(struct intc_virq_list), GFP_ATOMIC); |
97 | if (!entry) |
98 | return -ENOMEM; |
99 | |
100 | entry->irq = virq; |
101 | |
102 | if (last) |
103 | *last = entry; |
104 | else |
105 | irq_set_handler_data(irq, data: entry); |
106 | |
107 | return 0; |
108 | } |
109 | |
110 | static void intc_virq_handler(struct irq_desc *desc) |
111 | { |
112 | unsigned int irq = irq_desc_get_irq(desc); |
113 | struct irq_data *data = irq_desc_get_irq_data(desc); |
114 | struct irq_chip *chip = irq_data_get_irq_chip(d: data); |
115 | struct intc_virq_list *entry, *vlist = irq_data_get_irq_handler_data(d: data); |
116 | struct intc_desc_int *d = get_intc_desc(irq); |
117 | |
118 | chip->irq_mask_ack(data); |
119 | |
120 | for_each_virq(entry, vlist) { |
121 | unsigned long addr, handle; |
122 | struct irq_desc *vdesc = irq_to_desc(irq: entry->irq); |
123 | |
124 | if (vdesc) { |
125 | handle = (unsigned long)irq_desc_get_handler_data(desc: vdesc); |
126 | addr = INTC_REG(d, _INTC_ADDR_E(handle), 0); |
127 | if (intc_reg_fns[_INTC_FN(handle)](addr, handle, 0)) |
128 | generic_handle_irq_desc(desc: vdesc); |
129 | } |
130 | } |
131 | |
132 | chip->irq_unmask(data); |
133 | } |
134 | |
135 | static unsigned long __init intc_subgroup_data(struct intc_subgroup *subgroup, |
136 | struct intc_desc_int *d, |
137 | unsigned int index) |
138 | { |
139 | unsigned int fn = REG_FN_TEST_BASE + (subgroup->reg_width >> 3) - 1; |
140 | |
141 | return _INTC_MK(fn, MODE_ENABLE_REG, intc_get_reg(d, subgroup->reg), |
142 | 0, 1, (subgroup->reg_width - 1) - index); |
143 | } |
144 | |
145 | static void __init intc_subgroup_init_one(struct intc_desc *desc, |
146 | struct intc_desc_int *d, |
147 | struct intc_subgroup *subgroup) |
148 | { |
149 | struct intc_map_entry *mapped; |
150 | unsigned int pirq; |
151 | unsigned long flags; |
152 | int i; |
153 | |
154 | mapped = radix_tree_lookup(&d->tree, subgroup->parent_id); |
155 | if (!mapped) { |
156 | WARN_ON(1); |
157 | return; |
158 | } |
159 | |
160 | pirq = mapped - intc_irq_xlate; |
161 | |
162 | raw_spin_lock_irqsave(&d->lock, flags); |
163 | |
164 | for (i = 0; i < ARRAY_SIZE(subgroup->enum_ids); i++) { |
165 | struct intc_subgroup_entry *entry; |
166 | int err; |
167 | |
168 | if (!subgroup->enum_ids[i]) |
169 | continue; |
170 | |
171 | entry = kmalloc(size: sizeof(*entry), GFP_NOWAIT); |
172 | if (!entry) |
173 | break; |
174 | |
175 | entry->pirq = pirq; |
176 | entry->enum_id = subgroup->enum_ids[i]; |
177 | entry->handle = intc_subgroup_data(subgroup, d, index: i); |
178 | |
179 | err = radix_tree_insert(&d->tree, index: entry->enum_id, entry); |
180 | if (unlikely(err < 0)) |
181 | break; |
182 | |
183 | radix_tree_tag_set(&d->tree, index: entry->enum_id, |
184 | INTC_TAG_VIRQ_NEEDS_ALLOC); |
185 | } |
186 | |
187 | raw_spin_unlock_irqrestore(&d->lock, flags); |
188 | } |
189 | |
190 | void __init intc_subgroup_init(struct intc_desc *desc, struct intc_desc_int *d) |
191 | { |
192 | int i; |
193 | |
194 | if (!desc->hw.subgroups) |
195 | return; |
196 | |
197 | for (i = 0; i < desc->hw.nr_subgroups; i++) |
198 | intc_subgroup_init_one(desc, d, subgroup: desc->hw.subgroups + i); |
199 | } |
200 | |
201 | static void __init intc_subgroup_map(struct intc_desc_int *d) |
202 | { |
203 | struct intc_subgroup_entry *entries[32]; |
204 | unsigned long flags; |
205 | unsigned int nr_found; |
206 | int i; |
207 | |
208 | raw_spin_lock_irqsave(&d->lock, flags); |
209 | |
210 | restart: |
211 | nr_found = radix_tree_gang_lookup_tag_slot(&d->tree, |
212 | results: (void ***)entries, first_index: 0, ARRAY_SIZE(entries), |
213 | INTC_TAG_VIRQ_NEEDS_ALLOC); |
214 | |
215 | for (i = 0; i < nr_found; i++) { |
216 | struct intc_subgroup_entry *entry; |
217 | int irq; |
218 | |
219 | entry = radix_tree_deref_slot(slot: (void **)entries[i]); |
220 | if (unlikely(!entry)) |
221 | continue; |
222 | if (radix_tree_deref_retry(arg: entry)) |
223 | goto restart; |
224 | |
225 | irq = irq_alloc_desc(numa_node_id()); |
226 | if (unlikely(irq < 0)) { |
227 | pr_err("no more free IRQs, bailing..\n" ); |
228 | break; |
229 | } |
230 | |
231 | activate_irq(irq); |
232 | |
233 | pr_info("Setting up a chained VIRQ from %d -> %d\n" , |
234 | irq, entry->pirq); |
235 | |
236 | intc_irq_xlate_set(irq, id: entry->enum_id, d); |
237 | |
238 | irq_set_chip_and_handler_name(irq, chip: irq_get_chip(irq: entry->pirq), |
239 | handle: handle_simple_irq, name: "virq" ); |
240 | irq_set_chip_data(irq, data: irq_get_chip_data(irq: entry->pirq)); |
241 | |
242 | irq_set_handler_data(irq, data: (void *)entry->handle); |
243 | |
244 | /* |
245 | * Set the virtual IRQ as non-threadable. |
246 | */ |
247 | irq_set_nothread(irq); |
248 | |
249 | /* Set handler data before installing the handler */ |
250 | add_virq_to_pirq(irq: entry->pirq, virq: irq); |
251 | irq_set_chained_handler(irq: entry->pirq, handle: intc_virq_handler); |
252 | |
253 | radix_tree_tag_clear(&d->tree, index: entry->enum_id, |
254 | INTC_TAG_VIRQ_NEEDS_ALLOC); |
255 | radix_tree_replace_slot(&d->tree, slot: (void **)entries[i], |
256 | entry: &intc_irq_xlate[irq]); |
257 | } |
258 | |
259 | raw_spin_unlock_irqrestore(&d->lock, flags); |
260 | } |
261 | |
262 | void __init intc_finalize(void) |
263 | { |
264 | struct intc_desc_int *d; |
265 | |
266 | list_for_each_entry(d, &intc_list, list) |
267 | if (radix_tree_tagged(&d->tree, INTC_TAG_VIRQ_NEEDS_ALLOC)) |
268 | intc_subgroup_map(d); |
269 | } |
270 | |