1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _LINUX_IRQ_H |
3 | #define _LINUX_IRQ_H |
4 | |
5 | /* |
6 | * Please do not include this file in generic code. There is currently |
7 | * no requirement for any architecture to implement anything held |
8 | * within this file. |
9 | * |
10 | * Thanks. --rmk |
11 | */ |
12 | |
13 | #include <linux/cache.h> |
14 | #include <linux/spinlock.h> |
15 | #include <linux/cpumask.h> |
16 | #include <linux/irqhandler.h> |
17 | #include <linux/irqreturn.h> |
18 | #include <linux/irqnr.h> |
19 | #include <linux/topology.h> |
20 | #include <linux/io.h> |
21 | #include <linux/slab.h> |
22 | |
23 | #include <asm/irq.h> |
24 | #include <asm/ptrace.h> |
25 | #include <asm/irq_regs.h> |
26 | |
27 | struct seq_file; |
28 | struct module; |
29 | struct msi_msg; |
30 | struct irq_affinity_desc; |
31 | enum irqchip_irq_state; |
32 | |
33 | /* |
34 | * IRQ line status. |
35 | * |
36 | * Bits 0-7 are the same as the IRQF_* bits in linux/interrupt.h |
37 | * |
38 | * IRQ_TYPE_NONE - default, unspecified type |
39 | * IRQ_TYPE_EDGE_RISING - rising edge triggered |
40 | * IRQ_TYPE_EDGE_FALLING - falling edge triggered |
41 | * IRQ_TYPE_EDGE_BOTH - rising and falling edge triggered |
42 | * IRQ_TYPE_LEVEL_HIGH - high level triggered |
43 | * IRQ_TYPE_LEVEL_LOW - low level triggered |
44 | * IRQ_TYPE_LEVEL_MASK - Mask to filter out the level bits |
45 | * IRQ_TYPE_SENSE_MASK - Mask for all the above bits |
46 | * IRQ_TYPE_DEFAULT - For use by some PICs to ask irq_set_type |
47 | * to setup the HW to a sane default (used |
48 | * by irqdomain map() callbacks to synchronize |
49 | * the HW state and SW flags for a newly |
50 | * allocated descriptor). |
51 | * |
52 | * IRQ_TYPE_PROBE - Special flag for probing in progress |
53 | * |
54 | * Bits which can be modified via irq_set/clear/modify_status_flags() |
55 | * IRQ_LEVEL - Interrupt is level type. Will be also |
56 | * updated in the code when the above trigger |
57 | * bits are modified via irq_set_irq_type() |
58 | * IRQ_PER_CPU - Mark an interrupt PER_CPU. Will protect |
59 | * it from affinity setting |
60 | * IRQ_NOPROBE - Interrupt cannot be probed by autoprobing |
61 | * IRQ_NOREQUEST - Interrupt cannot be requested via |
62 | * request_irq() |
63 | * IRQ_NOTHREAD - Interrupt cannot be threaded |
64 | * IRQ_NOAUTOEN - Interrupt is not automatically enabled in |
65 | * request/setup_irq() |
66 | * IRQ_NO_BALANCING - Interrupt cannot be balanced (affinity set) |
67 | * IRQ_MOVE_PCNTXT - Interrupt can be migrated from process context |
68 | * IRQ_NESTED_THREAD - Interrupt nests into another thread |
69 | * IRQ_PER_CPU_DEVID - Dev_id is a per-cpu variable |
70 | * IRQ_IS_POLLED - Always polled by another interrupt. Exclude |
71 | * it from the spurious interrupt detection |
72 | * mechanism and from core side polling. |
73 | * IRQ_DISABLE_UNLAZY - Disable lazy irq disable |
74 | * IRQ_HIDDEN - Don't show up in /proc/interrupts |
75 | * IRQ_NO_DEBUG - Exclude from note_interrupt() debugging |
76 | */ |
77 | enum { |
78 | IRQ_TYPE_NONE = 0x00000000, |
79 | IRQ_TYPE_EDGE_RISING = 0x00000001, |
80 | IRQ_TYPE_EDGE_FALLING = 0x00000002, |
81 | IRQ_TYPE_EDGE_BOTH = (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING), |
82 | IRQ_TYPE_LEVEL_HIGH = 0x00000004, |
83 | IRQ_TYPE_LEVEL_LOW = 0x00000008, |
84 | IRQ_TYPE_LEVEL_MASK = (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH), |
85 | IRQ_TYPE_SENSE_MASK = 0x0000000f, |
86 | IRQ_TYPE_DEFAULT = IRQ_TYPE_SENSE_MASK, |
87 | |
88 | IRQ_TYPE_PROBE = 0x00000010, |
89 | |
90 | IRQ_LEVEL = (1 << 8), |
91 | IRQ_PER_CPU = (1 << 9), |
92 | IRQ_NOPROBE = (1 << 10), |
93 | IRQ_NOREQUEST = (1 << 11), |
94 | IRQ_NOAUTOEN = (1 << 12), |
95 | IRQ_NO_BALANCING = (1 << 13), |
96 | IRQ_MOVE_PCNTXT = (1 << 14), |
97 | IRQ_NESTED_THREAD = (1 << 15), |
98 | IRQ_NOTHREAD = (1 << 16), |
99 | IRQ_PER_CPU_DEVID = (1 << 17), |
100 | IRQ_IS_POLLED = (1 << 18), |
101 | IRQ_DISABLE_UNLAZY = (1 << 19), |
102 | IRQ_HIDDEN = (1 << 20), |
103 | IRQ_NO_DEBUG = (1 << 21), |
104 | }; |
105 | |
106 | #define IRQF_MODIFY_MASK \ |
107 | (IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \ |
108 | IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \ |
109 | IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \ |
110 | IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_HIDDEN) |
111 | |
112 | #define IRQ_NO_BALANCING_MASK (IRQ_PER_CPU | IRQ_NO_BALANCING) |
113 | |
114 | /* |
115 | * Return value for chip->irq_set_affinity() |
116 | * |
117 | * IRQ_SET_MASK_OK - OK, core updates irq_common_data.affinity |
118 | * IRQ_SET_MASK_NOCPY - OK, chip did update irq_common_data.affinity |
119 | * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to |
120 | * support stacked irqchips, which indicates skipping |
121 | * all descendant irqchips. |
122 | */ |
123 | enum { |
124 | IRQ_SET_MASK_OK = 0, |
125 | IRQ_SET_MASK_OK_NOCOPY, |
126 | IRQ_SET_MASK_OK_DONE, |
127 | }; |
128 | |
129 | struct msi_desc; |
130 | struct irq_domain; |
131 | |
132 | /** |
133 | * struct irq_common_data - per irq data shared by all irqchips |
134 | * @state_use_accessors: status information for irq chip functions. |
135 | * Use accessor functions to deal with it |
136 | * @node: node index useful for balancing |
137 | * @handler_data: per-IRQ data for the irq_chip methods |
138 | * @affinity: IRQ affinity on SMP. If this is an IPI |
139 | * related irq, then this is the mask of the |
140 | * CPUs to which an IPI can be sent. |
141 | * @effective_affinity: The effective IRQ affinity on SMP as some irq |
142 | * chips do not allow multi CPU destinations. |
143 | * A subset of @affinity. |
144 | * @msi_desc: MSI descriptor |
145 | * @ipi_offset: Offset of first IPI target cpu in @affinity. Optional. |
146 | */ |
147 | struct irq_common_data { |
148 | unsigned int __private state_use_accessors; |
149 | #ifdef CONFIG_NUMA |
150 | unsigned int node; |
151 | #endif |
152 | void *handler_data; |
153 | struct msi_desc *msi_desc; |
154 | #ifdef CONFIG_SMP |
155 | cpumask_var_t affinity; |
156 | #endif |
157 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
158 | cpumask_var_t effective_affinity; |
159 | #endif |
160 | #ifdef CONFIG_GENERIC_IRQ_IPI |
161 | unsigned int ipi_offset; |
162 | #endif |
163 | }; |
164 | |
165 | /** |
166 | * struct irq_data - per irq chip data passed down to chip functions |
167 | * @mask: precomputed bitmask for accessing the chip registers |
168 | * @irq: interrupt number |
169 | * @hwirq: hardware interrupt number, local to the interrupt domain |
170 | * @common: point to data shared by all irqchips |
171 | * @chip: low level interrupt hardware access |
172 | * @domain: Interrupt translation domain; responsible for mapping |
173 | * between hwirq number and linux irq number. |
174 | * @parent_data: pointer to parent struct irq_data to support hierarchy |
175 | * irq_domain |
176 | * @chip_data: platform-specific per-chip private data for the chip |
177 | * methods, to allow shared chip implementations |
178 | */ |
179 | struct irq_data { |
180 | u32 mask; |
181 | unsigned int irq; |
182 | unsigned long hwirq; |
183 | struct irq_common_data *common; |
184 | struct irq_chip *chip; |
185 | struct irq_domain *domain; |
186 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY |
187 | struct irq_data *parent_data; |
188 | #endif |
189 | void *chip_data; |
190 | }; |
191 | |
192 | /* |
193 | * Bit masks for irq_common_data.state_use_accessors |
194 | * |
195 | * IRQD_TRIGGER_MASK - Mask for the trigger type bits |
196 | * IRQD_SETAFFINITY_PENDING - Affinity setting is pending |
197 | * IRQD_ACTIVATED - Interrupt has already been activated |
198 | * IRQD_NO_BALANCING - Balancing disabled for this IRQ |
199 | * IRQD_PER_CPU - Interrupt is per cpu |
200 | * IRQD_AFFINITY_SET - Interrupt affinity was set |
201 | * IRQD_LEVEL - Interrupt is level triggered |
202 | * IRQD_WAKEUP_STATE - Interrupt is configured for wakeup |
203 | * from suspend |
204 | * IRQD_MOVE_PCNTXT - Interrupt can be moved in process |
205 | * context |
206 | * IRQD_IRQ_DISABLED - Disabled state of the interrupt |
207 | * IRQD_IRQ_MASKED - Masked state of the interrupt |
208 | * IRQD_IRQ_INPROGRESS - In progress state of the interrupt |
209 | * IRQD_WAKEUP_ARMED - Wakeup mode armed |
210 | * IRQD_FORWARDED_TO_VCPU - The interrupt is forwarded to a VCPU |
211 | * IRQD_AFFINITY_MANAGED - Affinity is auto-managed by the kernel |
212 | * IRQD_IRQ_STARTED - Startup state of the interrupt |
213 | * IRQD_MANAGED_SHUTDOWN - Interrupt was shutdown due to empty affinity |
214 | * mask. Applies only to affinity managed irqs. |
215 | * IRQD_SINGLE_TARGET - IRQ allows only a single affinity target |
216 | * IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set |
217 | * IRQD_CAN_RESERVE - Can use reservation mode |
218 | * IRQD_HANDLE_ENFORCE_IRQCTX - Enforce that handle_irq_*() is only invoked |
219 | * from actual interrupt context. |
220 | * IRQD_AFFINITY_ON_ACTIVATE - Affinity is set on activation. Don't call |
221 | * irq_chip::irq_set_affinity() when deactivated. |
222 | * IRQD_IRQ_ENABLED_ON_SUSPEND - Interrupt is enabled on suspend by irq pm if |
223 | * irqchip have flag IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND set. |
224 | * IRQD_RESEND_WHEN_IN_PROGRESS - Interrupt may fire when already in progress in which |
225 | * case it must be resent at the next available opportunity. |
226 | */ |
227 | enum { |
228 | IRQD_TRIGGER_MASK = 0xf, |
229 | IRQD_SETAFFINITY_PENDING = BIT(8), |
230 | IRQD_ACTIVATED = BIT(9), |
231 | IRQD_NO_BALANCING = BIT(10), |
232 | IRQD_PER_CPU = BIT(11), |
233 | IRQD_AFFINITY_SET = BIT(12), |
234 | IRQD_LEVEL = BIT(13), |
235 | IRQD_WAKEUP_STATE = BIT(14), |
236 | IRQD_MOVE_PCNTXT = BIT(15), |
237 | IRQD_IRQ_DISABLED = BIT(16), |
238 | IRQD_IRQ_MASKED = BIT(17), |
239 | IRQD_IRQ_INPROGRESS = BIT(18), |
240 | IRQD_WAKEUP_ARMED = BIT(19), |
241 | IRQD_FORWARDED_TO_VCPU = BIT(20), |
242 | IRQD_AFFINITY_MANAGED = BIT(21), |
243 | IRQD_IRQ_STARTED = BIT(22), |
244 | IRQD_MANAGED_SHUTDOWN = BIT(23), |
245 | IRQD_SINGLE_TARGET = BIT(24), |
246 | IRQD_DEFAULT_TRIGGER_SET = BIT(25), |
247 | IRQD_CAN_RESERVE = BIT(26), |
248 | IRQD_HANDLE_ENFORCE_IRQCTX = BIT(27), |
249 | IRQD_AFFINITY_ON_ACTIVATE = BIT(28), |
250 | IRQD_IRQ_ENABLED_ON_SUSPEND = BIT(29), |
251 | IRQD_RESEND_WHEN_IN_PROGRESS = BIT(30), |
252 | }; |
253 | |
254 | #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors) |
255 | |
256 | static inline bool irqd_is_setaffinity_pending(struct irq_data *d) |
257 | { |
258 | return __irqd_to_state(d) & IRQD_SETAFFINITY_PENDING; |
259 | } |
260 | |
261 | static inline bool irqd_is_per_cpu(struct irq_data *d) |
262 | { |
263 | return __irqd_to_state(d) & IRQD_PER_CPU; |
264 | } |
265 | |
266 | static inline bool irqd_can_balance(struct irq_data *d) |
267 | { |
268 | return !(__irqd_to_state(d) & (IRQD_PER_CPU | IRQD_NO_BALANCING)); |
269 | } |
270 | |
271 | static inline bool irqd_affinity_was_set(struct irq_data *d) |
272 | { |
273 | return __irqd_to_state(d) & IRQD_AFFINITY_SET; |
274 | } |
275 | |
276 | static inline void irqd_mark_affinity_was_set(struct irq_data *d) |
277 | { |
278 | __irqd_to_state(d) |= IRQD_AFFINITY_SET; |
279 | } |
280 | |
281 | static inline bool irqd_trigger_type_was_set(struct irq_data *d) |
282 | { |
283 | return __irqd_to_state(d) & IRQD_DEFAULT_TRIGGER_SET; |
284 | } |
285 | |
286 | static inline u32 irqd_get_trigger_type(struct irq_data *d) |
287 | { |
288 | return __irqd_to_state(d) & IRQD_TRIGGER_MASK; |
289 | } |
290 | |
291 | /* |
292 | * Must only be called inside irq_chip.irq_set_type() functions or |
293 | * from the DT/ACPI setup code. |
294 | */ |
295 | static inline void irqd_set_trigger_type(struct irq_data *d, u32 type) |
296 | { |
297 | __irqd_to_state(d) &= ~IRQD_TRIGGER_MASK; |
298 | __irqd_to_state(d) |= type & IRQD_TRIGGER_MASK; |
299 | __irqd_to_state(d) |= IRQD_DEFAULT_TRIGGER_SET; |
300 | } |
301 | |
302 | static inline bool irqd_is_level_type(struct irq_data *d) |
303 | { |
304 | return __irqd_to_state(d) & IRQD_LEVEL; |
305 | } |
306 | |
307 | /* |
308 | * Must only be called of irqchip.irq_set_affinity() or low level |
309 | * hierarchy domain allocation functions. |
310 | */ |
311 | static inline void irqd_set_single_target(struct irq_data *d) |
312 | { |
313 | __irqd_to_state(d) |= IRQD_SINGLE_TARGET; |
314 | } |
315 | |
316 | static inline bool irqd_is_single_target(struct irq_data *d) |
317 | { |
318 | return __irqd_to_state(d) & IRQD_SINGLE_TARGET; |
319 | } |
320 | |
321 | static inline void irqd_set_handle_enforce_irqctx(struct irq_data *d) |
322 | { |
323 | __irqd_to_state(d) |= IRQD_HANDLE_ENFORCE_IRQCTX; |
324 | } |
325 | |
326 | static inline bool irqd_is_handle_enforce_irqctx(struct irq_data *d) |
327 | { |
328 | return __irqd_to_state(d) & IRQD_HANDLE_ENFORCE_IRQCTX; |
329 | } |
330 | |
331 | static inline bool irqd_is_enabled_on_suspend(struct irq_data *d) |
332 | { |
333 | return __irqd_to_state(d) & IRQD_IRQ_ENABLED_ON_SUSPEND; |
334 | } |
335 | |
336 | static inline bool irqd_is_wakeup_set(struct irq_data *d) |
337 | { |
338 | return __irqd_to_state(d) & IRQD_WAKEUP_STATE; |
339 | } |
340 | |
341 | static inline bool irqd_can_move_in_process_context(struct irq_data *d) |
342 | { |
343 | return __irqd_to_state(d) & IRQD_MOVE_PCNTXT; |
344 | } |
345 | |
346 | static inline bool irqd_irq_disabled(struct irq_data *d) |
347 | { |
348 | return __irqd_to_state(d) & IRQD_IRQ_DISABLED; |
349 | } |
350 | |
351 | static inline bool irqd_irq_masked(struct irq_data *d) |
352 | { |
353 | return __irqd_to_state(d) & IRQD_IRQ_MASKED; |
354 | } |
355 | |
356 | static inline bool irqd_irq_inprogress(struct irq_data *d) |
357 | { |
358 | return __irqd_to_state(d) & IRQD_IRQ_INPROGRESS; |
359 | } |
360 | |
361 | static inline bool irqd_is_wakeup_armed(struct irq_data *d) |
362 | { |
363 | return __irqd_to_state(d) & IRQD_WAKEUP_ARMED; |
364 | } |
365 | |
366 | static inline bool irqd_is_forwarded_to_vcpu(struct irq_data *d) |
367 | { |
368 | return __irqd_to_state(d) & IRQD_FORWARDED_TO_VCPU; |
369 | } |
370 | |
371 | static inline void irqd_set_forwarded_to_vcpu(struct irq_data *d) |
372 | { |
373 | __irqd_to_state(d) |= IRQD_FORWARDED_TO_VCPU; |
374 | } |
375 | |
376 | static inline void irqd_clr_forwarded_to_vcpu(struct irq_data *d) |
377 | { |
378 | __irqd_to_state(d) &= ~IRQD_FORWARDED_TO_VCPU; |
379 | } |
380 | |
381 | static inline bool irqd_affinity_is_managed(struct irq_data *d) |
382 | { |
383 | return __irqd_to_state(d) & IRQD_AFFINITY_MANAGED; |
384 | } |
385 | |
386 | static inline bool irqd_is_activated(struct irq_data *d) |
387 | { |
388 | return __irqd_to_state(d) & IRQD_ACTIVATED; |
389 | } |
390 | |
391 | static inline void irqd_set_activated(struct irq_data *d) |
392 | { |
393 | __irqd_to_state(d) |= IRQD_ACTIVATED; |
394 | } |
395 | |
396 | static inline void irqd_clr_activated(struct irq_data *d) |
397 | { |
398 | __irqd_to_state(d) &= ~IRQD_ACTIVATED; |
399 | } |
400 | |
401 | static inline bool irqd_is_started(struct irq_data *d) |
402 | { |
403 | return __irqd_to_state(d) & IRQD_IRQ_STARTED; |
404 | } |
405 | |
406 | static inline bool irqd_is_managed_and_shutdown(struct irq_data *d) |
407 | { |
408 | return __irqd_to_state(d) & IRQD_MANAGED_SHUTDOWN; |
409 | } |
410 | |
411 | static inline void irqd_set_can_reserve(struct irq_data *d) |
412 | { |
413 | __irqd_to_state(d) |= IRQD_CAN_RESERVE; |
414 | } |
415 | |
416 | static inline void irqd_clr_can_reserve(struct irq_data *d) |
417 | { |
418 | __irqd_to_state(d) &= ~IRQD_CAN_RESERVE; |
419 | } |
420 | |
421 | static inline bool irqd_can_reserve(struct irq_data *d) |
422 | { |
423 | return __irqd_to_state(d) & IRQD_CAN_RESERVE; |
424 | } |
425 | |
426 | static inline void irqd_set_affinity_on_activate(struct irq_data *d) |
427 | { |
428 | __irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE; |
429 | } |
430 | |
431 | static inline bool irqd_affinity_on_activate(struct irq_data *d) |
432 | { |
433 | return __irqd_to_state(d) & IRQD_AFFINITY_ON_ACTIVATE; |
434 | } |
435 | |
436 | static inline void irqd_set_resend_when_in_progress(struct irq_data *d) |
437 | { |
438 | __irqd_to_state(d) |= IRQD_RESEND_WHEN_IN_PROGRESS; |
439 | } |
440 | |
441 | static inline bool irqd_needs_resend_when_in_progress(struct irq_data *d) |
442 | { |
443 | return __irqd_to_state(d) & IRQD_RESEND_WHEN_IN_PROGRESS; |
444 | } |
445 | |
446 | #undef __irqd_to_state |
447 | |
448 | static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) |
449 | { |
450 | return d->hwirq; |
451 | } |
452 | |
453 | /** |
454 | * struct irq_chip - hardware interrupt chip descriptor |
455 | * |
456 | * @name: name for /proc/interrupts |
457 | * @irq_startup: start up the interrupt (defaults to ->enable if NULL) |
458 | * @irq_shutdown: shut down the interrupt (defaults to ->disable if NULL) |
459 | * @irq_enable: enable the interrupt (defaults to chip->unmask if NULL) |
460 | * @irq_disable: disable the interrupt |
461 | * @irq_ack: start of a new interrupt |
462 | * @irq_mask: mask an interrupt source |
463 | * @irq_mask_ack: ack and mask an interrupt source |
464 | * @irq_unmask: unmask an interrupt source |
465 | * @irq_eoi: end of interrupt |
466 | * @irq_set_affinity: Set the CPU affinity on SMP machines. If the force |
467 | * argument is true, it tells the driver to |
468 | * unconditionally apply the affinity setting. Sanity |
469 | * checks against the supplied affinity mask are not |
470 | * required. This is used for CPU hotplug where the |
471 | * target CPU is not yet set in the cpu_online_mask. |
472 | * @irq_retrigger: resend an IRQ to the CPU |
473 | * @irq_set_type: set the flow type (IRQ_TYPE_LEVEL/etc.) of an IRQ |
474 | * @irq_set_wake: enable/disable power-management wake-on of an IRQ |
475 | * @irq_bus_lock: function to lock access to slow bus (i2c) chips |
476 | * @irq_bus_sync_unlock:function to sync and unlock slow bus (i2c) chips |
477 | * @irq_cpu_online: configure an interrupt source for a secondary CPU |
478 | * @irq_cpu_offline: un-configure an interrupt source for a secondary CPU |
479 | * @irq_suspend: function called from core code on suspend once per |
480 | * chip, when one or more interrupts are installed |
481 | * @irq_resume: function called from core code on resume once per chip, |
482 | * when one ore more interrupts are installed |
483 | * @irq_pm_shutdown: function called from core code on shutdown once per chip |
484 | * @irq_calc_mask: Optional function to set irq_data.mask for special cases |
485 | * @irq_print_chip: optional to print special chip info in show_interrupts |
486 | * @irq_request_resources: optional to request resources before calling |
487 | * any other callback related to this irq |
488 | * @irq_release_resources: optional to release resources acquired with |
489 | * irq_request_resources |
490 | * @irq_compose_msi_msg: optional to compose message content for MSI |
491 | * @irq_write_msi_msg: optional to write message content for MSI |
492 | * @irq_get_irqchip_state: return the internal state of an interrupt |
493 | * @irq_set_irqchip_state: set the internal state of a interrupt |
494 | * @irq_set_vcpu_affinity: optional to target a vCPU in a virtual machine |
495 | * @ipi_send_single: send a single IPI to destination cpus |
496 | * @ipi_send_mask: send an IPI to destination cpus in cpumask |
497 | * @irq_nmi_setup: function called from core code before enabling an NMI |
498 | * @irq_nmi_teardown: function called from core code after disabling an NMI |
499 | * @flags: chip specific flags |
500 | */ |
501 | struct irq_chip { |
502 | const char *name; |
503 | unsigned int (*irq_startup)(struct irq_data *data); |
504 | void (*irq_shutdown)(struct irq_data *data); |
505 | void (*irq_enable)(struct irq_data *data); |
506 | void (*irq_disable)(struct irq_data *data); |
507 | |
508 | void (*irq_ack)(struct irq_data *data); |
509 | void (*irq_mask)(struct irq_data *data); |
510 | void (*irq_mask_ack)(struct irq_data *data); |
511 | void (*irq_unmask)(struct irq_data *data); |
512 | void (*irq_eoi)(struct irq_data *data); |
513 | |
514 | int (*irq_set_affinity)(struct irq_data *data, const struct cpumask *dest, bool force); |
515 | int (*irq_retrigger)(struct irq_data *data); |
516 | int (*irq_set_type)(struct irq_data *data, unsigned int flow_type); |
517 | int (*irq_set_wake)(struct irq_data *data, unsigned int on); |
518 | |
519 | void (*irq_bus_lock)(struct irq_data *data); |
520 | void (*irq_bus_sync_unlock)(struct irq_data *data); |
521 | |
522 | #ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE |
523 | void (*irq_cpu_online)(struct irq_data *data); |
524 | void (*irq_cpu_offline)(struct irq_data *data); |
525 | #endif |
526 | void (*irq_suspend)(struct irq_data *data); |
527 | void (*irq_resume)(struct irq_data *data); |
528 | void (*irq_pm_shutdown)(struct irq_data *data); |
529 | |
530 | void (*irq_calc_mask)(struct irq_data *data); |
531 | |
532 | void (*irq_print_chip)(struct irq_data *data, struct seq_file *p); |
533 | int (*irq_request_resources)(struct irq_data *data); |
534 | void (*irq_release_resources)(struct irq_data *data); |
535 | |
536 | void (*irq_compose_msi_msg)(struct irq_data *data, struct msi_msg *msg); |
537 | void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg); |
538 | |
539 | int (*irq_get_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool *state); |
540 | int (*irq_set_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool state); |
541 | |
542 | int (*irq_set_vcpu_affinity)(struct irq_data *data, void *vcpu_info); |
543 | |
544 | void (*ipi_send_single)(struct irq_data *data, unsigned int cpu); |
545 | void (*ipi_send_mask)(struct irq_data *data, const struct cpumask *dest); |
546 | |
547 | int (*irq_nmi_setup)(struct irq_data *data); |
548 | void (*irq_nmi_teardown)(struct irq_data *data); |
549 | |
550 | unsigned long flags; |
551 | }; |
552 | |
553 | /* |
554 | * irq_chip specific flags |
555 | * |
556 | * IRQCHIP_SET_TYPE_MASKED: Mask before calling chip.irq_set_type() |
557 | * IRQCHIP_EOI_IF_HANDLED: Only issue irq_eoi() when irq was handled |
558 | * IRQCHIP_MASK_ON_SUSPEND: Mask non wake irqs in the suspend path |
559 | * IRQCHIP_ONOFFLINE_ENABLED: Only call irq_on/off_line callbacks |
560 | * when irq enabled |
561 | * IRQCHIP_SKIP_SET_WAKE: Skip chip.irq_set_wake(), for this irq chip |
562 | * IRQCHIP_ONESHOT_SAFE: One shot does not require mask/unmask |
563 | * IRQCHIP_EOI_THREADED: Chip requires eoi() on unmask in threaded mode |
564 | * IRQCHIP_SUPPORTS_LEVEL_MSI: Chip can provide two doorbells for Level MSIs |
565 | * IRQCHIP_SUPPORTS_NMI: Chip can deliver NMIs, only for root irqchips |
566 | * IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND: Invokes __enable_irq()/__disable_irq() for wake irqs |
567 | * in the suspend path if they are in disabled state |
568 | * IRQCHIP_AFFINITY_PRE_STARTUP: Default affinity update before startup |
569 | * IRQCHIP_IMMUTABLE: Don't ever change anything in this chip |
570 | */ |
571 | enum { |
572 | IRQCHIP_SET_TYPE_MASKED = (1 << 0), |
573 | IRQCHIP_EOI_IF_HANDLED = (1 << 1), |
574 | IRQCHIP_MASK_ON_SUSPEND = (1 << 2), |
575 | IRQCHIP_ONOFFLINE_ENABLED = (1 << 3), |
576 | IRQCHIP_SKIP_SET_WAKE = (1 << 4), |
577 | IRQCHIP_ONESHOT_SAFE = (1 << 5), |
578 | IRQCHIP_EOI_THREADED = (1 << 6), |
579 | IRQCHIP_SUPPORTS_LEVEL_MSI = (1 << 7), |
580 | IRQCHIP_SUPPORTS_NMI = (1 << 8), |
581 | IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND = (1 << 9), |
582 | IRQCHIP_AFFINITY_PRE_STARTUP = (1 << 10), |
583 | IRQCHIP_IMMUTABLE = (1 << 11), |
584 | }; |
585 | |
586 | #include <linux/irqdesc.h> |
587 | |
588 | /* |
589 | * Pick up the arch-dependent methods: |
590 | */ |
591 | #include <asm/hw_irq.h> |
592 | |
593 | #ifndef NR_IRQS_LEGACY |
594 | # define NR_IRQS_LEGACY 0 |
595 | #endif |
596 | |
597 | #ifndef ARCH_IRQ_INIT_FLAGS |
598 | # define ARCH_IRQ_INIT_FLAGS 0 |
599 | #endif |
600 | |
601 | #define IRQ_DEFAULT_INIT_FLAGS ARCH_IRQ_INIT_FLAGS |
602 | |
603 | struct irqaction; |
604 | extern int setup_percpu_irq(unsigned int irq, struct irqaction *new); |
605 | extern void remove_percpu_irq(unsigned int irq, struct irqaction *act); |
606 | |
607 | #ifdef CONFIG_DEPRECATED_IRQ_CPU_ONOFFLINE |
608 | extern void irq_cpu_online(void); |
609 | extern void irq_cpu_offline(void); |
610 | #endif |
611 | extern int irq_set_affinity_locked(struct irq_data *data, |
612 | const struct cpumask *cpumask, bool force); |
613 | extern int irq_set_vcpu_affinity(unsigned int irq, void *vcpu_info); |
614 | |
615 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_IRQ_MIGRATION) |
616 | extern void irq_migrate_all_off_this_cpu(void); |
617 | extern int irq_affinity_online_cpu(unsigned int cpu); |
618 | #else |
619 | # define irq_affinity_online_cpu NULL |
620 | #endif |
621 | |
622 | #if defined(CONFIG_SMP) && defined(CONFIG_GENERIC_PENDING_IRQ) |
623 | void __irq_move_irq(struct irq_data *data); |
624 | static inline void irq_move_irq(struct irq_data *data) |
625 | { |
626 | if (unlikely(irqd_is_setaffinity_pending(data))) |
627 | __irq_move_irq(data); |
628 | } |
629 | void irq_move_masked_irq(struct irq_data *data); |
630 | void irq_force_complete_move(struct irq_desc *desc); |
631 | #else |
632 | static inline void irq_move_irq(struct irq_data *data) { } |
633 | static inline void irq_move_masked_irq(struct irq_data *data) { } |
634 | static inline void irq_force_complete_move(struct irq_desc *desc) { } |
635 | #endif |
636 | |
637 | extern int no_irq_affinity; |
638 | |
639 | #ifdef CONFIG_HARDIRQS_SW_RESEND |
640 | int irq_set_parent(int irq, int parent_irq); |
641 | #else |
642 | static inline int irq_set_parent(int irq, int parent_irq) |
643 | { |
644 | return 0; |
645 | } |
646 | #endif |
647 | |
648 | /* |
649 | * Built-in IRQ handlers for various IRQ types, |
650 | * callable via desc->handle_irq() |
651 | */ |
652 | extern void handle_level_irq(struct irq_desc *desc); |
653 | extern void handle_fasteoi_irq(struct irq_desc *desc); |
654 | extern void handle_edge_irq(struct irq_desc *desc); |
655 | extern void handle_edge_eoi_irq(struct irq_desc *desc); |
656 | extern void handle_simple_irq(struct irq_desc *desc); |
657 | extern void handle_untracked_irq(struct irq_desc *desc); |
658 | extern void handle_percpu_irq(struct irq_desc *desc); |
659 | extern void handle_percpu_devid_irq(struct irq_desc *desc); |
660 | extern void handle_bad_irq(struct irq_desc *desc); |
661 | extern void handle_nested_irq(unsigned int irq); |
662 | |
663 | extern void handle_fasteoi_nmi(struct irq_desc *desc); |
664 | extern void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc); |
665 | |
666 | extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); |
667 | extern int irq_chip_pm_get(struct irq_data *data); |
668 | extern int irq_chip_pm_put(struct irq_data *data); |
669 | #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY |
670 | extern void handle_fasteoi_ack_irq(struct irq_desc *desc); |
671 | extern void handle_fasteoi_mask_irq(struct irq_desc *desc); |
672 | extern int irq_chip_set_parent_state(struct irq_data *data, |
673 | enum irqchip_irq_state which, |
674 | bool val); |
675 | extern int irq_chip_get_parent_state(struct irq_data *data, |
676 | enum irqchip_irq_state which, |
677 | bool *state); |
678 | extern void irq_chip_enable_parent(struct irq_data *data); |
679 | extern void irq_chip_disable_parent(struct irq_data *data); |
680 | extern void irq_chip_ack_parent(struct irq_data *data); |
681 | extern int irq_chip_retrigger_hierarchy(struct irq_data *data); |
682 | extern void irq_chip_mask_parent(struct irq_data *data); |
683 | extern void irq_chip_mask_ack_parent(struct irq_data *data); |
684 | extern void irq_chip_unmask_parent(struct irq_data *data); |
685 | extern void irq_chip_eoi_parent(struct irq_data *data); |
686 | extern int irq_chip_set_affinity_parent(struct irq_data *data, |
687 | const struct cpumask *dest, |
688 | bool force); |
689 | extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on); |
690 | extern int irq_chip_set_vcpu_affinity_parent(struct irq_data *data, |
691 | void *vcpu_info); |
692 | extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type); |
693 | extern int irq_chip_request_resources_parent(struct irq_data *data); |
694 | extern void irq_chip_release_resources_parent(struct irq_data *data); |
695 | #endif |
696 | |
697 | /* Handling of unhandled and spurious interrupts: */ |
698 | extern void note_interrupt(struct irq_desc *desc, irqreturn_t action_ret); |
699 | |
700 | |
701 | /* Enable/disable irq debugging output: */ |
702 | extern int noirqdebug_setup(char *str); |
703 | |
704 | /* Checks whether the interrupt can be requested by request_irq(): */ |
705 | extern int can_request_irq(unsigned int irq, unsigned long irqflags); |
706 | |
707 | /* Dummy irq-chip implementations: */ |
708 | extern struct irq_chip no_irq_chip; |
709 | extern struct irq_chip dummy_irq_chip; |
710 | |
711 | extern void |
712 | irq_set_chip_and_handler_name(unsigned int irq, const struct irq_chip *chip, |
713 | irq_flow_handler_t handle, const char *name); |
714 | |
715 | static inline void irq_set_chip_and_handler(unsigned int irq, |
716 | const struct irq_chip *chip, |
717 | irq_flow_handler_t handle) |
718 | { |
719 | irq_set_chip_and_handler_name(irq, chip, handle, NULL); |
720 | } |
721 | |
722 | extern int irq_set_percpu_devid(unsigned int irq); |
723 | extern int irq_set_percpu_devid_partition(unsigned int irq, |
724 | const struct cpumask *affinity); |
725 | extern int irq_get_percpu_devid_partition(unsigned int irq, |
726 | struct cpumask *affinity); |
727 | |
728 | extern void |
729 | __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, |
730 | const char *name); |
731 | |
732 | static inline void |
733 | irq_set_handler(unsigned int irq, irq_flow_handler_t handle) |
734 | { |
735 | __irq_set_handler(irq, handle, is_chained: 0, NULL); |
736 | } |
737 | |
738 | /* |
739 | * Set a highlevel chained flow handler for a given IRQ. |
740 | * (a chained handler is automatically enabled and set to |
741 | * IRQ_NOREQUEST, IRQ_NOPROBE, and IRQ_NOTHREAD) |
742 | */ |
743 | static inline void |
744 | irq_set_chained_handler(unsigned int irq, irq_flow_handler_t handle) |
745 | { |
746 | __irq_set_handler(irq, handle, is_chained: 1, NULL); |
747 | } |
748 | |
749 | /* |
750 | * Set a highlevel chained flow handler and its data for a given IRQ. |
751 | * (a chained handler is automatically enabled and set to |
752 | * IRQ_NOREQUEST, IRQ_NOPROBE, and IRQ_NOTHREAD) |
753 | */ |
754 | void |
755 | irq_set_chained_handler_and_data(unsigned int irq, irq_flow_handler_t handle, |
756 | void *data); |
757 | |
758 | void irq_modify_status(unsigned int irq, unsigned long clr, unsigned long set); |
759 | |
760 | static inline void irq_set_status_flags(unsigned int irq, unsigned long set) |
761 | { |
762 | irq_modify_status(irq, clr: 0, set); |
763 | } |
764 | |
765 | static inline void irq_clear_status_flags(unsigned int irq, unsigned long clr) |
766 | { |
767 | irq_modify_status(irq, clr, set: 0); |
768 | } |
769 | |
770 | static inline void irq_set_noprobe(unsigned int irq) |
771 | { |
772 | irq_modify_status(irq, clr: 0, set: IRQ_NOPROBE); |
773 | } |
774 | |
775 | static inline void irq_set_probe(unsigned int irq) |
776 | { |
777 | irq_modify_status(irq, clr: IRQ_NOPROBE, set: 0); |
778 | } |
779 | |
780 | static inline void irq_set_nothread(unsigned int irq) |
781 | { |
782 | irq_modify_status(irq, clr: 0, set: IRQ_NOTHREAD); |
783 | } |
784 | |
785 | static inline void irq_set_thread(unsigned int irq) |
786 | { |
787 | irq_modify_status(irq, clr: IRQ_NOTHREAD, set: 0); |
788 | } |
789 | |
790 | static inline void irq_set_nested_thread(unsigned int irq, bool nest) |
791 | { |
792 | if (nest) |
793 | irq_set_status_flags(irq, set: IRQ_NESTED_THREAD); |
794 | else |
795 | irq_clear_status_flags(irq, clr: IRQ_NESTED_THREAD); |
796 | } |
797 | |
798 | static inline void irq_set_percpu_devid_flags(unsigned int irq) |
799 | { |
800 | irq_set_status_flags(irq, |
801 | set: IRQ_NOAUTOEN | IRQ_PER_CPU | IRQ_NOTHREAD | |
802 | IRQ_NOPROBE | IRQ_PER_CPU_DEVID); |
803 | } |
804 | |
805 | /* Set/get chip/data for an IRQ: */ |
806 | extern int irq_set_chip(unsigned int irq, const struct irq_chip *chip); |
807 | extern int irq_set_handler_data(unsigned int irq, void *data); |
808 | extern int irq_set_chip_data(unsigned int irq, void *data); |
809 | extern int irq_set_irq_type(unsigned int irq, unsigned int type); |
810 | extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry); |
811 | extern int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset, |
812 | struct msi_desc *entry); |
813 | extern struct irq_data *irq_get_irq_data(unsigned int irq); |
814 | |
815 | static inline struct irq_chip *irq_get_chip(unsigned int irq) |
816 | { |
817 | struct irq_data *d = irq_get_irq_data(irq); |
818 | return d ? d->chip : NULL; |
819 | } |
820 | |
821 | static inline struct irq_chip *irq_data_get_irq_chip(struct irq_data *d) |
822 | { |
823 | return d->chip; |
824 | } |
825 | |
826 | static inline void *irq_get_chip_data(unsigned int irq) |
827 | { |
828 | struct irq_data *d = irq_get_irq_data(irq); |
829 | return d ? d->chip_data : NULL; |
830 | } |
831 | |
832 | static inline void *irq_data_get_irq_chip_data(struct irq_data *d) |
833 | { |
834 | return d->chip_data; |
835 | } |
836 | |
837 | static inline void *irq_get_handler_data(unsigned int irq) |
838 | { |
839 | struct irq_data *d = irq_get_irq_data(irq); |
840 | return d ? d->common->handler_data : NULL; |
841 | } |
842 | |
843 | static inline void *irq_data_get_irq_handler_data(struct irq_data *d) |
844 | { |
845 | return d->common->handler_data; |
846 | } |
847 | |
848 | static inline struct msi_desc *irq_get_msi_desc(unsigned int irq) |
849 | { |
850 | struct irq_data *d = irq_get_irq_data(irq); |
851 | return d ? d->common->msi_desc : NULL; |
852 | } |
853 | |
854 | static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d) |
855 | { |
856 | return d->common->msi_desc; |
857 | } |
858 | |
859 | static inline u32 irq_get_trigger_type(unsigned int irq) |
860 | { |
861 | struct irq_data *d = irq_get_irq_data(irq); |
862 | return d ? irqd_get_trigger_type(d) : 0; |
863 | } |
864 | |
865 | static inline int irq_common_data_get_node(struct irq_common_data *d) |
866 | { |
867 | #ifdef CONFIG_NUMA |
868 | return d->node; |
869 | #else |
870 | return 0; |
871 | #endif |
872 | } |
873 | |
874 | static inline int irq_data_get_node(struct irq_data *d) |
875 | { |
876 | return irq_common_data_get_node(d: d->common); |
877 | } |
878 | |
879 | static inline |
880 | const struct cpumask *irq_data_get_affinity_mask(struct irq_data *d) |
881 | { |
882 | #ifdef CONFIG_SMP |
883 | return d->common->affinity; |
884 | #else |
885 | return cpumask_of(0); |
886 | #endif |
887 | } |
888 | |
889 | static inline void irq_data_update_affinity(struct irq_data *d, |
890 | const struct cpumask *m) |
891 | { |
892 | #ifdef CONFIG_SMP |
893 | cpumask_copy(dstp: d->common->affinity, srcp: m); |
894 | #endif |
895 | } |
896 | |
897 | static inline const struct cpumask *irq_get_affinity_mask(int irq) |
898 | { |
899 | struct irq_data *d = irq_get_irq_data(irq); |
900 | |
901 | return d ? irq_data_get_affinity_mask(d) : NULL; |
902 | } |
903 | |
904 | #ifdef CONFIG_GENERIC_IRQ_EFFECTIVE_AFF_MASK |
905 | static inline |
906 | const struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) |
907 | { |
908 | return d->common->effective_affinity; |
909 | } |
910 | static inline void irq_data_update_effective_affinity(struct irq_data *d, |
911 | const struct cpumask *m) |
912 | { |
913 | cpumask_copy(dstp: d->common->effective_affinity, srcp: m); |
914 | } |
915 | #else |
916 | static inline void irq_data_update_effective_affinity(struct irq_data *d, |
917 | const struct cpumask *m) |
918 | { |
919 | } |
920 | static inline |
921 | const struct cpumask *irq_data_get_effective_affinity_mask(struct irq_data *d) |
922 | { |
923 | return irq_data_get_affinity_mask(d); |
924 | } |
925 | #endif |
926 | |
927 | static inline |
928 | const struct cpumask *irq_get_effective_affinity_mask(unsigned int irq) |
929 | { |
930 | struct irq_data *d = irq_get_irq_data(irq); |
931 | |
932 | return d ? irq_data_get_effective_affinity_mask(d) : NULL; |
933 | } |
934 | |
935 | unsigned int arch_dynirq_lower_bound(unsigned int from); |
936 | |
937 | int __irq_alloc_descs(int irq, unsigned int from, unsigned int cnt, int node, |
938 | struct module *owner, |
939 | const struct irq_affinity_desc *affinity); |
940 | |
941 | int __devm_irq_alloc_descs(struct device *dev, int irq, unsigned int from, |
942 | unsigned int cnt, int node, struct module *owner, |
943 | const struct irq_affinity_desc *affinity); |
944 | |
945 | /* use macros to avoid needing export.h for THIS_MODULE */ |
946 | #define irq_alloc_descs(irq, from, cnt, node) \ |
947 | __irq_alloc_descs(irq, from, cnt, node, THIS_MODULE, NULL) |
948 | |
949 | #define irq_alloc_desc(node) \ |
950 | irq_alloc_descs(-1, 1, 1, node) |
951 | |
952 | #define irq_alloc_desc_at(at, node) \ |
953 | irq_alloc_descs(at, at, 1, node) |
954 | |
955 | #define irq_alloc_desc_from(from, node) \ |
956 | irq_alloc_descs(-1, from, 1, node) |
957 | |
958 | #define irq_alloc_descs_from(from, cnt, node) \ |
959 | irq_alloc_descs(-1, from, cnt, node) |
960 | |
961 | #define devm_irq_alloc_descs(dev, irq, from, cnt, node) \ |
962 | __devm_irq_alloc_descs(dev, irq, from, cnt, node, THIS_MODULE, NULL) |
963 | |
964 | #define devm_irq_alloc_desc(dev, node) \ |
965 | devm_irq_alloc_descs(dev, -1, 1, 1, node) |
966 | |
967 | #define devm_irq_alloc_desc_at(dev, at, node) \ |
968 | devm_irq_alloc_descs(dev, at, at, 1, node) |
969 | |
970 | #define devm_irq_alloc_desc_from(dev, from, node) \ |
971 | devm_irq_alloc_descs(dev, -1, from, 1, node) |
972 | |
973 | #define devm_irq_alloc_descs_from(dev, from, cnt, node) \ |
974 | devm_irq_alloc_descs(dev, -1, from, cnt, node) |
975 | |
976 | void irq_free_descs(unsigned int irq, unsigned int cnt); |
977 | static inline void irq_free_desc(unsigned int irq) |
978 | { |
979 | irq_free_descs(irq, cnt: 1); |
980 | } |
981 | |
982 | #ifdef CONFIG_GENERIC_IRQ_LEGACY |
983 | void irq_init_desc(unsigned int irq); |
984 | #endif |
985 | |
986 | /** |
987 | * struct irq_chip_regs - register offsets for struct irq_gci |
988 | * @enable: Enable register offset to reg_base |
989 | * @disable: Disable register offset to reg_base |
990 | * @mask: Mask register offset to reg_base |
991 | * @ack: Ack register offset to reg_base |
992 | * @eoi: Eoi register offset to reg_base |
993 | * @type: Type configuration register offset to reg_base |
994 | * @polarity: Polarity configuration register offset to reg_base |
995 | */ |
996 | struct irq_chip_regs { |
997 | unsigned long enable; |
998 | unsigned long disable; |
999 | unsigned long mask; |
1000 | unsigned long ack; |
1001 | unsigned long eoi; |
1002 | unsigned long type; |
1003 | unsigned long polarity; |
1004 | }; |
1005 | |
1006 | /** |
1007 | * struct irq_chip_type - Generic interrupt chip instance for a flow type |
1008 | * @chip: The real interrupt chip which provides the callbacks |
1009 | * @regs: Register offsets for this chip |
1010 | * @handler: Flow handler associated with this chip |
1011 | * @type: Chip can handle these flow types |
1012 | * @mask_cache_priv: Cached mask register private to the chip type |
1013 | * @mask_cache: Pointer to cached mask register |
1014 | * |
1015 | * A irq_generic_chip can have several instances of irq_chip_type when |
1016 | * it requires different functions and register offsets for different |
1017 | * flow types. |
1018 | */ |
1019 | struct irq_chip_type { |
1020 | struct irq_chip chip; |
1021 | struct irq_chip_regs regs; |
1022 | irq_flow_handler_t handler; |
1023 | u32 type; |
1024 | u32 mask_cache_priv; |
1025 | u32 *mask_cache; |
1026 | }; |
1027 | |
1028 | /** |
1029 | * struct irq_chip_generic - Generic irq chip data structure |
1030 | * @lock: Lock to protect register and cache data access |
1031 | * @reg_base: Register base address (virtual) |
1032 | * @reg_readl: Alternate I/O accessor (defaults to readl if NULL) |
1033 | * @reg_writel: Alternate I/O accessor (defaults to writel if NULL) |
1034 | * @suspend: Function called from core code on suspend once per |
1035 | * chip; can be useful instead of irq_chip::suspend to |
1036 | * handle chip details even when no interrupts are in use |
1037 | * @resume: Function called from core code on resume once per chip; |
1038 | * can be useful instead of irq_chip::suspend to handle |
1039 | * chip details even when no interrupts are in use |
1040 | * @irq_base: Interrupt base nr for this chip |
1041 | * @irq_cnt: Number of interrupts handled by this chip |
1042 | * @mask_cache: Cached mask register shared between all chip types |
1043 | * @type_cache: Cached type register |
1044 | * @polarity_cache: Cached polarity register |
1045 | * @wake_enabled: Interrupt can wakeup from suspend |
1046 | * @wake_active: Interrupt is marked as an wakeup from suspend source |
1047 | * @num_ct: Number of available irq_chip_type instances (usually 1) |
1048 | * @private: Private data for non generic chip callbacks |
1049 | * @installed: bitfield to denote installed interrupts |
1050 | * @unused: bitfield to denote unused interrupts |
1051 | * @domain: irq domain pointer |
1052 | * @list: List head for keeping track of instances |
1053 | * @chip_types: Array of interrupt irq_chip_types |
1054 | * |
1055 | * Note, that irq_chip_generic can have multiple irq_chip_type |
1056 | * implementations which can be associated to a particular irq line of |
1057 | * an irq_chip_generic instance. That allows to share and protect |
1058 | * state in an irq_chip_generic instance when we need to implement |
1059 | * different flow mechanisms (level/edge) for it. |
1060 | */ |
1061 | struct irq_chip_generic { |
1062 | raw_spinlock_t lock; |
1063 | void __iomem *reg_base; |
1064 | u32 (*reg_readl)(void __iomem *addr); |
1065 | void (*reg_writel)(u32 val, void __iomem *addr); |
1066 | void (*suspend)(struct irq_chip_generic *gc); |
1067 | void (*resume)(struct irq_chip_generic *gc); |
1068 | unsigned int irq_base; |
1069 | unsigned int irq_cnt; |
1070 | u32 mask_cache; |
1071 | u32 type_cache; |
1072 | u32 polarity_cache; |
1073 | u32 wake_enabled; |
1074 | u32 wake_active; |
1075 | unsigned int num_ct; |
1076 | void *private; |
1077 | unsigned long installed; |
1078 | unsigned long unused; |
1079 | struct irq_domain *domain; |
1080 | struct list_head list; |
1081 | struct irq_chip_type chip_types[]; |
1082 | }; |
1083 | |
1084 | /** |
1085 | * enum irq_gc_flags - Initialization flags for generic irq chips |
1086 | * @IRQ_GC_INIT_MASK_CACHE: Initialize the mask_cache by reading mask reg |
1087 | * @IRQ_GC_INIT_NESTED_LOCK: Set the lock class of the irqs to nested for |
1088 | * irq chips which need to call irq_set_wake() on |
1089 | * the parent irq. Usually GPIO implementations |
1090 | * @IRQ_GC_MASK_CACHE_PER_TYPE: Mask cache is chip type private |
1091 | * @IRQ_GC_NO_MASK: Do not calculate irq_data->mask |
1092 | * @IRQ_GC_BE_IO: Use big-endian register accesses (default: LE) |
1093 | */ |
1094 | enum irq_gc_flags { |
1095 | IRQ_GC_INIT_MASK_CACHE = 1 << 0, |
1096 | IRQ_GC_INIT_NESTED_LOCK = 1 << 1, |
1097 | IRQ_GC_MASK_CACHE_PER_TYPE = 1 << 2, |
1098 | IRQ_GC_NO_MASK = 1 << 3, |
1099 | IRQ_GC_BE_IO = 1 << 4, |
1100 | }; |
1101 | |
1102 | /* |
1103 | * struct irq_domain_chip_generic - Generic irq chip data structure for irq domains |
1104 | * @irqs_per_chip: Number of interrupts per chip |
1105 | * @num_chips: Number of chips |
1106 | * @irq_flags_to_set: IRQ* flags to set on irq setup |
1107 | * @irq_flags_to_clear: IRQ* flags to clear on irq setup |
1108 | * @gc_flags: Generic chip specific setup flags |
1109 | * @gc: Array of pointers to generic interrupt chips |
1110 | */ |
1111 | struct irq_domain_chip_generic { |
1112 | unsigned int irqs_per_chip; |
1113 | unsigned int num_chips; |
1114 | unsigned int irq_flags_to_clear; |
1115 | unsigned int irq_flags_to_set; |
1116 | enum irq_gc_flags gc_flags; |
1117 | struct irq_chip_generic *gc[]; |
1118 | }; |
1119 | |
1120 | /* Generic chip callback functions */ |
1121 | void irq_gc_noop(struct irq_data *d); |
1122 | void irq_gc_mask_disable_reg(struct irq_data *d); |
1123 | void irq_gc_mask_set_bit(struct irq_data *d); |
1124 | void irq_gc_mask_clr_bit(struct irq_data *d); |
1125 | void irq_gc_unmask_enable_reg(struct irq_data *d); |
1126 | void irq_gc_ack_set_bit(struct irq_data *d); |
1127 | void irq_gc_ack_clr_bit(struct irq_data *d); |
1128 | void irq_gc_mask_disable_and_ack_set(struct irq_data *d); |
1129 | void irq_gc_eoi(struct irq_data *d); |
1130 | int irq_gc_set_wake(struct irq_data *d, unsigned int on); |
1131 | |
1132 | /* Setup functions for irq_chip_generic */ |
1133 | int irq_map_generic_chip(struct irq_domain *d, unsigned int virq, |
1134 | irq_hw_number_t hw_irq); |
1135 | void irq_unmap_generic_chip(struct irq_domain *d, unsigned int virq); |
1136 | struct irq_chip_generic * |
1137 | irq_alloc_generic_chip(const char *name, int nr_ct, unsigned int irq_base, |
1138 | void __iomem *reg_base, irq_flow_handler_t handler); |
1139 | void irq_setup_generic_chip(struct irq_chip_generic *gc, u32 msk, |
1140 | enum irq_gc_flags flags, unsigned int clr, |
1141 | unsigned int set); |
1142 | int irq_setup_alt_chip(struct irq_data *d, unsigned int type); |
1143 | void irq_remove_generic_chip(struct irq_chip_generic *gc, u32 msk, |
1144 | unsigned int clr, unsigned int set); |
1145 | |
1146 | struct irq_chip_generic * |
1147 | devm_irq_alloc_generic_chip(struct device *dev, const char *name, int num_ct, |
1148 | unsigned int irq_base, void __iomem *reg_base, |
1149 | irq_flow_handler_t handler); |
1150 | int devm_irq_setup_generic_chip(struct device *dev, struct irq_chip_generic *gc, |
1151 | u32 msk, enum irq_gc_flags flags, |
1152 | unsigned int clr, unsigned int set); |
1153 | |
1154 | struct irq_chip_generic *irq_get_domain_generic_chip(struct irq_domain *d, unsigned int hw_irq); |
1155 | |
1156 | int __irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip, |
1157 | int num_ct, const char *name, |
1158 | irq_flow_handler_t handler, |
1159 | unsigned int clr, unsigned int set, |
1160 | enum irq_gc_flags flags); |
1161 | |
1162 | #define irq_alloc_domain_generic_chips(d, irqs_per_chip, num_ct, name, \ |
1163 | handler, clr, set, flags) \ |
1164 | ({ \ |
1165 | MAYBE_BUILD_BUG_ON(irqs_per_chip > 32); \ |
1166 | __irq_alloc_domain_generic_chips(d, irqs_per_chip, num_ct, name,\ |
1167 | handler, clr, set, flags); \ |
1168 | }) |
1169 | |
1170 | static inline void irq_free_generic_chip(struct irq_chip_generic *gc) |
1171 | { |
1172 | kfree(objp: gc); |
1173 | } |
1174 | |
1175 | static inline void irq_destroy_generic_chip(struct irq_chip_generic *gc, |
1176 | u32 msk, unsigned int clr, |
1177 | unsigned int set) |
1178 | { |
1179 | irq_remove_generic_chip(gc, msk, clr, set); |
1180 | irq_free_generic_chip(gc); |
1181 | } |
1182 | |
1183 | static inline struct irq_chip_type *irq_data_get_chip_type(struct irq_data *d) |
1184 | { |
1185 | return container_of(d->chip, struct irq_chip_type, chip); |
1186 | } |
1187 | |
1188 | #define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX) |
1189 | |
1190 | #ifdef CONFIG_SMP |
1191 | static inline void irq_gc_lock(struct irq_chip_generic *gc) |
1192 | { |
1193 | raw_spin_lock(&gc->lock); |
1194 | } |
1195 | |
1196 | static inline void irq_gc_unlock(struct irq_chip_generic *gc) |
1197 | { |
1198 | raw_spin_unlock(&gc->lock); |
1199 | } |
1200 | #else |
1201 | static inline void irq_gc_lock(struct irq_chip_generic *gc) { } |
1202 | static inline void irq_gc_unlock(struct irq_chip_generic *gc) { } |
1203 | #endif |
1204 | |
1205 | /* |
1206 | * The irqsave variants are for usage in non interrupt code. Do not use |
1207 | * them in irq_chip callbacks. Use irq_gc_lock() instead. |
1208 | */ |
1209 | #define irq_gc_lock_irqsave(gc, flags) \ |
1210 | raw_spin_lock_irqsave(&(gc)->lock, flags) |
1211 | |
1212 | #define irq_gc_unlock_irqrestore(gc, flags) \ |
1213 | raw_spin_unlock_irqrestore(&(gc)->lock, flags) |
1214 | |
1215 | static inline void irq_reg_writel(struct irq_chip_generic *gc, |
1216 | u32 val, int reg_offset) |
1217 | { |
1218 | if (gc->reg_writel) |
1219 | gc->reg_writel(val, gc->reg_base + reg_offset); |
1220 | else |
1221 | writel(val, addr: gc->reg_base + reg_offset); |
1222 | } |
1223 | |
1224 | static inline u32 irq_reg_readl(struct irq_chip_generic *gc, |
1225 | int reg_offset) |
1226 | { |
1227 | if (gc->reg_readl) |
1228 | return gc->reg_readl(gc->reg_base + reg_offset); |
1229 | else |
1230 | return readl(addr: gc->reg_base + reg_offset); |
1231 | } |
1232 | |
1233 | struct irq_matrix; |
1234 | struct irq_matrix *irq_alloc_matrix(unsigned int matrix_bits, |
1235 | unsigned int alloc_start, |
1236 | unsigned int alloc_end); |
1237 | void irq_matrix_online(struct irq_matrix *m); |
1238 | void irq_matrix_offline(struct irq_matrix *m); |
1239 | void irq_matrix_assign_system(struct irq_matrix *m, unsigned int bit, bool replace); |
1240 | int irq_matrix_reserve_managed(struct irq_matrix *m, const struct cpumask *msk); |
1241 | void irq_matrix_remove_managed(struct irq_matrix *m, const struct cpumask *msk); |
1242 | int irq_matrix_alloc_managed(struct irq_matrix *m, const struct cpumask *msk, |
1243 | unsigned int *mapped_cpu); |
1244 | void irq_matrix_reserve(struct irq_matrix *m); |
1245 | void irq_matrix_remove_reserved(struct irq_matrix *m); |
1246 | int irq_matrix_alloc(struct irq_matrix *m, const struct cpumask *msk, |
1247 | bool reserved, unsigned int *mapped_cpu); |
1248 | void irq_matrix_free(struct irq_matrix *m, unsigned int cpu, |
1249 | unsigned int bit, bool managed); |
1250 | void irq_matrix_assign(struct irq_matrix *m, unsigned int bit); |
1251 | unsigned int irq_matrix_available(struct irq_matrix *m, bool cpudown); |
1252 | unsigned int irq_matrix_allocated(struct irq_matrix *m); |
1253 | unsigned int irq_matrix_reserved(struct irq_matrix *m); |
1254 | void irq_matrix_debug_show(struct seq_file *sf, struct irq_matrix *m, int ind); |
1255 | |
1256 | /* Contrary to Linux irqs, for hardware irqs the irq number 0 is valid */ |
1257 | #define INVALID_HWIRQ (~0UL) |
1258 | irq_hw_number_t ipi_get_hwirq(unsigned int irq, unsigned int cpu); |
1259 | int __ipi_send_single(struct irq_desc *desc, unsigned int cpu); |
1260 | int __ipi_send_mask(struct irq_desc *desc, const struct cpumask *dest); |
1261 | int ipi_send_single(unsigned int virq, unsigned int cpu); |
1262 | int ipi_send_mask(unsigned int virq, const struct cpumask *dest); |
1263 | |
1264 | void ipi_mux_process(void); |
1265 | int ipi_mux_create(unsigned int nr_ipi, void (*mux_send)(unsigned int cpu)); |
1266 | |
1267 | #ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER |
1268 | /* |
1269 | * Registers a generic IRQ handling function as the top-level IRQ handler in |
1270 | * the system, which is generally the first C code called from an assembly |
1271 | * architecture-specific interrupt handler. |
1272 | * |
1273 | * Returns 0 on success, or -EBUSY if an IRQ handler has already been |
1274 | * registered. |
1275 | */ |
1276 | int __init set_handle_irq(void (*handle_irq)(struct pt_regs *)); |
1277 | |
1278 | /* |
1279 | * Allows interrupt handlers to find the irqchip that's been registered as the |
1280 | * top-level IRQ handler. |
1281 | */ |
1282 | extern void (*handle_arch_irq)(struct pt_regs *) __ro_after_init; |
1283 | asmlinkage void generic_handle_arch_irq(struct pt_regs *regs); |
1284 | #else |
1285 | #ifndef set_handle_irq |
1286 | #define set_handle_irq(handle_irq) \ |
1287 | do { \ |
1288 | (void)handle_irq; \ |
1289 | WARN_ON(1); \ |
1290 | } while (0) |
1291 | #endif |
1292 | #endif |
1293 | |
1294 | #endif /* _LINUX_IRQ_H */ |
1295 | |