1 | // SPDX-License-Identifier: GPL-2.0-or-later |
2 | /* |
3 | * Copyright The Asahi Linux Contributors |
4 | * |
5 | * Based on irq-lpc32xx: |
6 | * Copyright 2015-2016 Vladimir Zapolskiy <vz@mleia.com> |
7 | * Based on irq-bcm2836: |
8 | * Copyright 2015 Broadcom |
9 | */ |
10 | |
11 | /* |
12 | * AIC is a fairly simple interrupt controller with the following features: |
13 | * |
14 | * - 896 level-triggered hardware IRQs |
15 | * - Single mask bit per IRQ |
16 | * - Per-IRQ affinity setting |
17 | * - Automatic masking on event delivery (auto-ack) |
18 | * - Software triggering (ORed with hw line) |
19 | * - 2 per-CPU IPIs (meant as "self" and "other", but they are |
20 | * interchangeable if not symmetric) |
21 | * - Automatic prioritization (single event/ack register per CPU, lower IRQs = |
22 | * higher priority) |
23 | * - Automatic masking on ack |
24 | * - Default "this CPU" register view and explicit per-CPU views |
25 | * |
26 | * In addition, this driver also handles FIQs, as these are routed to the same |
27 | * IRQ vector. These are used for Fast IPIs, the ARMv8 timer IRQs, and |
28 | * performance counters (TODO). |
29 | * |
30 | * Implementation notes: |
31 | * |
32 | * - This driver creates two IRQ domains, one for HW IRQs and internal FIQs, |
33 | * and one for IPIs. |
34 | * - Since Linux needs more than 2 IPIs, we implement a software IRQ controller |
35 | * and funnel all IPIs into one per-CPU IPI (the second "self" IPI is unused). |
36 | * - FIQ hwirq numbers are assigned after true hwirqs, and are per-cpu. |
37 | * - DT bindings use 3-cell form (like GIC): |
38 | * - <0 nr flags> - hwirq #nr |
39 | * - <1 nr flags> - FIQ #nr |
40 | * - nr=0 Physical HV timer |
41 | * - nr=1 Virtual HV timer |
42 | * - nr=2 Physical guest timer |
43 | * - nr=3 Virtual guest timer |
44 | */ |
45 | |
46 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
47 | |
48 | #include <linux/bits.h> |
49 | #include <linux/bitfield.h> |
50 | #include <linux/cpuhotplug.h> |
51 | #include <linux/io.h> |
52 | #include <linux/irqchip.h> |
53 | #include <linux/irqchip/arm-vgic-info.h> |
54 | #include <linux/irqdomain.h> |
55 | #include <linux/jump_label.h> |
56 | #include <linux/limits.h> |
57 | #include <linux/of_address.h> |
58 | #include <linux/slab.h> |
59 | #include <asm/apple_m1_pmu.h> |
60 | #include <asm/cputype.h> |
61 | #include <asm/exception.h> |
62 | #include <asm/sysreg.h> |
63 | #include <asm/virt.h> |
64 | |
65 | #include <dt-bindings/interrupt-controller/apple-aic.h> |
66 | |
67 | /* |
68 | * AIC v1 registers (MMIO) |
69 | */ |
70 | |
71 | #define AIC_INFO 0x0004 |
72 | #define AIC_INFO_NR_IRQ GENMASK(15, 0) |
73 | |
74 | #define AIC_CONFIG 0x0010 |
75 | |
76 | #define AIC_WHOAMI 0x2000 |
77 | #define AIC_EVENT 0x2004 |
78 | #define AIC_EVENT_DIE GENMASK(31, 24) |
79 | #define AIC_EVENT_TYPE GENMASK(23, 16) |
80 | #define AIC_EVENT_NUM GENMASK(15, 0) |
81 | |
82 | #define AIC_EVENT_TYPE_FIQ 0 /* Software use */ |
83 | #define AIC_EVENT_TYPE_IRQ 1 |
84 | #define AIC_EVENT_TYPE_IPI 4 |
85 | #define AIC_EVENT_IPI_OTHER 1 |
86 | #define AIC_EVENT_IPI_SELF 2 |
87 | |
88 | #define AIC_IPI_SEND 0x2008 |
89 | #define AIC_IPI_ACK 0x200c |
90 | #define AIC_IPI_MASK_SET 0x2024 |
91 | #define AIC_IPI_MASK_CLR 0x2028 |
92 | |
93 | #define AIC_IPI_SEND_CPU(cpu) BIT(cpu) |
94 | |
95 | #define AIC_IPI_OTHER BIT(0) |
96 | #define AIC_IPI_SELF BIT(31) |
97 | |
98 | #define AIC_TARGET_CPU 0x3000 |
99 | |
100 | #define AIC_CPU_IPI_SET(cpu) (0x5008 + ((cpu) << 7)) |
101 | #define AIC_CPU_IPI_CLR(cpu) (0x500c + ((cpu) << 7)) |
102 | #define AIC_CPU_IPI_MASK_SET(cpu) (0x5024 + ((cpu) << 7)) |
103 | #define AIC_CPU_IPI_MASK_CLR(cpu) (0x5028 + ((cpu) << 7)) |
104 | |
105 | #define AIC_MAX_IRQ 0x400 |
106 | |
107 | /* |
108 | * AIC v2 registers (MMIO) |
109 | */ |
110 | |
111 | #define AIC2_VERSION 0x0000 |
112 | #define AIC2_VERSION_VER GENMASK(7, 0) |
113 | |
114 | #define AIC2_INFO1 0x0004 |
115 | #define AIC2_INFO1_NR_IRQ GENMASK(15, 0) |
116 | #define AIC2_INFO1_LAST_DIE GENMASK(27, 24) |
117 | |
118 | #define AIC2_INFO2 0x0008 |
119 | |
120 | #define AIC2_INFO3 0x000c |
121 | #define AIC2_INFO3_MAX_IRQ GENMASK(15, 0) |
122 | #define AIC2_INFO3_MAX_DIE GENMASK(27, 24) |
123 | |
124 | #define AIC2_RESET 0x0010 |
125 | #define AIC2_RESET_RESET BIT(0) |
126 | |
127 | #define AIC2_CONFIG 0x0014 |
128 | #define AIC2_CONFIG_ENABLE BIT(0) |
129 | #define AIC2_CONFIG_PREFER_PCPU BIT(28) |
130 | |
131 | #define AIC2_TIMEOUT 0x0028 |
132 | #define AIC2_CLUSTER_PRIO 0x0030 |
133 | #define AIC2_DELAY_GROUPS 0x0100 |
134 | |
135 | #define AIC2_IRQ_CFG 0x2000 |
136 | |
137 | /* |
138 | * AIC2 registers are laid out like this, starting at AIC2_IRQ_CFG: |
139 | * |
140 | * Repeat for each die: |
141 | * IRQ_CFG: u32 * MAX_IRQS |
142 | * SW_SET: u32 * (MAX_IRQS / 32) |
143 | * SW_CLR: u32 * (MAX_IRQS / 32) |
144 | * MASK_SET: u32 * (MAX_IRQS / 32) |
145 | * MASK_CLR: u32 * (MAX_IRQS / 32) |
146 | * HW_STATE: u32 * (MAX_IRQS / 32) |
147 | * |
148 | * This is followed by a set of event registers, each 16K page aligned. |
149 | * The first one is the AP event register we will use. Unfortunately, |
150 | * the actual implemented die count is not specified anywhere in the |
151 | * capability registers, so we have to explicitly specify the event |
152 | * register as a second reg entry in the device tree to remain |
153 | * forward-compatible. |
154 | */ |
155 | |
156 | #define AIC2_IRQ_CFG_TARGET GENMASK(3, 0) |
157 | #define AIC2_IRQ_CFG_DELAY_IDX GENMASK(7, 5) |
158 | |
159 | #define MASK_REG(x) (4 * ((x) >> 5)) |
160 | #define MASK_BIT(x) BIT((x) & GENMASK(4, 0)) |
161 | |
162 | /* |
163 | * IMP-DEF sysregs that control FIQ sources |
164 | */ |
165 | |
166 | /* IPI request registers */ |
167 | #define SYS_IMP_APL_IPI_RR_LOCAL_EL1 sys_reg(3, 5, 15, 0, 0) |
168 | #define SYS_IMP_APL_IPI_RR_GLOBAL_EL1 sys_reg(3, 5, 15, 0, 1) |
169 | #define IPI_RR_CPU GENMASK(7, 0) |
170 | /* Cluster only used for the GLOBAL register */ |
171 | #define IPI_RR_CLUSTER GENMASK(23, 16) |
172 | #define IPI_RR_TYPE GENMASK(29, 28) |
173 | #define IPI_RR_IMMEDIATE 0 |
174 | #define IPI_RR_RETRACT 1 |
175 | #define IPI_RR_DEFERRED 2 |
176 | #define IPI_RR_NOWAKE 3 |
177 | |
178 | /* IPI status register */ |
179 | #define SYS_IMP_APL_IPI_SR_EL1 sys_reg(3, 5, 15, 1, 1) |
180 | #define IPI_SR_PENDING BIT(0) |
181 | |
182 | /* Guest timer FIQ enable register */ |
183 | #define SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2 sys_reg(3, 5, 15, 1, 3) |
184 | #define VM_TMR_FIQ_ENABLE_V BIT(0) |
185 | #define VM_TMR_FIQ_ENABLE_P BIT(1) |
186 | |
187 | /* Deferred IPI countdown register */ |
188 | #define SYS_IMP_APL_IPI_CR_EL1 sys_reg(3, 5, 15, 3, 1) |
189 | |
190 | /* Uncore PMC control register */ |
191 | #define SYS_IMP_APL_UPMCR0_EL1 sys_reg(3, 7, 15, 0, 4) |
192 | #define UPMCR0_IMODE GENMASK(18, 16) |
193 | #define UPMCR0_IMODE_OFF 0 |
194 | #define UPMCR0_IMODE_AIC 2 |
195 | #define UPMCR0_IMODE_HALT 3 |
196 | #define UPMCR0_IMODE_FIQ 4 |
197 | |
198 | /* Uncore PMC status register */ |
199 | #define SYS_IMP_APL_UPMSR_EL1 sys_reg(3, 7, 15, 6, 4) |
200 | #define UPMSR_IACT BIT(0) |
201 | |
202 | /* MPIDR fields */ |
203 | #define MPIDR_CPU(x) MPIDR_AFFINITY_LEVEL(x, 0) |
204 | #define MPIDR_CLUSTER(x) MPIDR_AFFINITY_LEVEL(x, 1) |
205 | |
206 | #define AIC_IRQ_HWIRQ(die, irq) (FIELD_PREP(AIC_EVENT_DIE, die) | \ |
207 | FIELD_PREP(AIC_EVENT_TYPE, AIC_EVENT_TYPE_IRQ) | \ |
208 | FIELD_PREP(AIC_EVENT_NUM, irq)) |
209 | #define AIC_FIQ_HWIRQ(x) (FIELD_PREP(AIC_EVENT_TYPE, AIC_EVENT_TYPE_FIQ) | \ |
210 | FIELD_PREP(AIC_EVENT_NUM, x)) |
211 | #define AIC_HWIRQ_IRQ(x) FIELD_GET(AIC_EVENT_NUM, x) |
212 | #define AIC_HWIRQ_DIE(x) FIELD_GET(AIC_EVENT_DIE, x) |
213 | #define AIC_NR_SWIPI 32 |
214 | |
215 | /* |
216 | * FIQ hwirq index definitions: FIQ sources use the DT binding defines |
217 | * directly, except that timers are special. At the irqchip level, the |
218 | * two timer types are represented by their access method: _EL0 registers |
219 | * or _EL02 registers. In the DT binding, the timers are represented |
220 | * by their purpose (HV or guest). This mapping is for when the kernel is |
221 | * running at EL2 (with VHE). When the kernel is running at EL1, the |
222 | * mapping differs and aic_irq_domain_translate() performs the remapping. |
223 | */ |
224 | enum fiq_hwirq { |
225 | /* Must be ordered as in apple-aic.h */ |
226 | AIC_TMR_EL0_PHYS = AIC_TMR_HV_PHYS, |
227 | AIC_TMR_EL0_VIRT = AIC_TMR_HV_VIRT, |
228 | AIC_TMR_EL02_PHYS = AIC_TMR_GUEST_PHYS, |
229 | AIC_TMR_EL02_VIRT = AIC_TMR_GUEST_VIRT, |
230 | AIC_CPU_PMU_Effi = AIC_CPU_PMU_E, |
231 | AIC_CPU_PMU_Perf = AIC_CPU_PMU_P, |
232 | /* No need for this to be discovered from DT */ |
233 | AIC_VGIC_MI, |
234 | AIC_NR_FIQ |
235 | }; |
236 | |
237 | static DEFINE_STATIC_KEY_TRUE(use_fast_ipi); |
238 | |
239 | struct aic_info { |
240 | int version; |
241 | |
242 | /* Register offsets */ |
243 | u32 event; |
244 | u32 target_cpu; |
245 | u32 irq_cfg; |
246 | u32 sw_set; |
247 | u32 sw_clr; |
248 | u32 mask_set; |
249 | u32 mask_clr; |
250 | |
251 | u32 die_stride; |
252 | |
253 | /* Features */ |
254 | bool fast_ipi; |
255 | }; |
256 | |
257 | static const struct aic_info aic1_info __initconst = { |
258 | .version = 1, |
259 | |
260 | .event = AIC_EVENT, |
261 | .target_cpu = AIC_TARGET_CPU, |
262 | }; |
263 | |
264 | static const struct aic_info aic1_fipi_info __initconst = { |
265 | .version = 1, |
266 | |
267 | .event = AIC_EVENT, |
268 | .target_cpu = AIC_TARGET_CPU, |
269 | |
270 | .fast_ipi = true, |
271 | }; |
272 | |
273 | static const struct aic_info aic2_info __initconst = { |
274 | .version = 2, |
275 | |
276 | .irq_cfg = AIC2_IRQ_CFG, |
277 | |
278 | .fast_ipi = true, |
279 | }; |
280 | |
281 | static const struct of_device_id aic_info_match[] = { |
282 | { |
283 | .compatible = "apple,t8103-aic" , |
284 | .data = &aic1_fipi_info, |
285 | }, |
286 | { |
287 | .compatible = "apple,aic" , |
288 | .data = &aic1_info, |
289 | }, |
290 | { |
291 | .compatible = "apple,aic2" , |
292 | .data = &aic2_info, |
293 | }, |
294 | {} |
295 | }; |
296 | |
297 | struct aic_irq_chip { |
298 | void __iomem *base; |
299 | void __iomem *event; |
300 | struct irq_domain *hw_domain; |
301 | struct { |
302 | cpumask_t aff; |
303 | } *fiq_aff[AIC_NR_FIQ]; |
304 | |
305 | int nr_irq; |
306 | int max_irq; |
307 | int nr_die; |
308 | int max_die; |
309 | |
310 | struct aic_info info; |
311 | }; |
312 | |
313 | static DEFINE_PER_CPU(uint32_t, aic_fiq_unmasked); |
314 | |
315 | static struct aic_irq_chip *aic_irqc; |
316 | |
317 | static void aic_handle_ipi(struct pt_regs *regs); |
318 | |
319 | static u32 aic_ic_read(struct aic_irq_chip *ic, u32 reg) |
320 | { |
321 | return readl_relaxed(ic->base + reg); |
322 | } |
323 | |
324 | static void aic_ic_write(struct aic_irq_chip *ic, u32 reg, u32 val) |
325 | { |
326 | writel_relaxed(val, ic->base + reg); |
327 | } |
328 | |
329 | /* |
330 | * IRQ irqchip |
331 | */ |
332 | |
333 | static void aic_irq_mask(struct irq_data *d) |
334 | { |
335 | irq_hw_number_t hwirq = irqd_to_hwirq(d); |
336 | struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d); |
337 | |
338 | u32 off = AIC_HWIRQ_DIE(hwirq) * ic->info.die_stride; |
339 | u32 irq = AIC_HWIRQ_IRQ(hwirq); |
340 | |
341 | aic_ic_write(ic, reg: ic->info.mask_set + off + MASK_REG(irq), MASK_BIT(irq)); |
342 | } |
343 | |
344 | static void aic_irq_unmask(struct irq_data *d) |
345 | { |
346 | irq_hw_number_t hwirq = irqd_to_hwirq(d); |
347 | struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d); |
348 | |
349 | u32 off = AIC_HWIRQ_DIE(hwirq) * ic->info.die_stride; |
350 | u32 irq = AIC_HWIRQ_IRQ(hwirq); |
351 | |
352 | aic_ic_write(ic, reg: ic->info.mask_clr + off + MASK_REG(irq), MASK_BIT(irq)); |
353 | } |
354 | |
355 | static void aic_irq_eoi(struct irq_data *d) |
356 | { |
357 | /* |
358 | * Reading the interrupt reason automatically acknowledges and masks |
359 | * the IRQ, so we just unmask it here if needed. |
360 | */ |
361 | if (!irqd_irq_masked(d)) |
362 | aic_irq_unmask(d); |
363 | } |
364 | |
365 | static void __exception_irq_entry aic_handle_irq(struct pt_regs *regs) |
366 | { |
367 | struct aic_irq_chip *ic = aic_irqc; |
368 | u32 event, type, irq; |
369 | |
370 | do { |
371 | /* |
372 | * We cannot use a relaxed read here, as reads from DMA buffers |
373 | * need to be ordered after the IRQ fires. |
374 | */ |
375 | event = readl(ic->event + ic->info.event); |
376 | type = FIELD_GET(AIC_EVENT_TYPE, event); |
377 | irq = FIELD_GET(AIC_EVENT_NUM, event); |
378 | |
379 | if (type == AIC_EVENT_TYPE_IRQ) |
380 | generic_handle_domain_irq(aic_irqc->hw_domain, event); |
381 | else if (type == AIC_EVENT_TYPE_IPI && irq == 1) |
382 | aic_handle_ipi(regs); |
383 | else if (event != 0) |
384 | pr_err_ratelimited("Unknown IRQ event %d, %d\n" , type, irq); |
385 | } while (event); |
386 | |
387 | /* |
388 | * vGIC maintenance interrupts end up here too, so we need to check |
389 | * for them separately. It should however only trigger when NV is |
390 | * in use, and be cleared when coming back from the handler. |
391 | */ |
392 | if (is_kernel_in_hyp_mode() && |
393 | (read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EN) && |
394 | read_sysreg_s(SYS_ICH_MISR_EL2) != 0) { |
395 | generic_handle_domain_irq(aic_irqc->hw_domain, |
396 | AIC_FIQ_HWIRQ(AIC_VGIC_MI)); |
397 | |
398 | if (unlikely((read_sysreg_s(SYS_ICH_HCR_EL2) & ICH_HCR_EN) && |
399 | read_sysreg_s(SYS_ICH_MISR_EL2))) { |
400 | pr_err_ratelimited("vGIC IRQ fired and not handled by KVM, disabling.\n" ); |
401 | sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EN, 0); |
402 | } |
403 | } |
404 | } |
405 | |
406 | static int aic_irq_set_affinity(struct irq_data *d, |
407 | const struct cpumask *mask_val, bool force) |
408 | { |
409 | irq_hw_number_t hwirq = irqd_to_hwirq(d); |
410 | struct aic_irq_chip *ic = irq_data_get_irq_chip_data(d); |
411 | int cpu; |
412 | |
413 | BUG_ON(!ic->info.target_cpu); |
414 | |
415 | if (force) |
416 | cpu = cpumask_first(srcp: mask_val); |
417 | else |
418 | cpu = cpumask_any_and(mask_val, cpu_online_mask); |
419 | |
420 | aic_ic_write(ic, reg: ic->info.target_cpu + AIC_HWIRQ_IRQ(hwirq) * 4, BIT(cpu)); |
421 | irq_data_update_effective_affinity(d, cpumask_of(cpu)); |
422 | |
423 | return IRQ_SET_MASK_OK; |
424 | } |
425 | |
426 | static int aic_irq_set_type(struct irq_data *d, unsigned int type) |
427 | { |
428 | /* |
429 | * Some IRQs (e.g. MSIs) implicitly have edge semantics, and we don't |
430 | * have a way to find out the type of any given IRQ, so just allow both. |
431 | */ |
432 | return (type == IRQ_TYPE_LEVEL_HIGH || type == IRQ_TYPE_EDGE_RISING) ? 0 : -EINVAL; |
433 | } |
434 | |
435 | static struct irq_chip aic_chip = { |
436 | .name = "AIC" , |
437 | .irq_mask = aic_irq_mask, |
438 | .irq_unmask = aic_irq_unmask, |
439 | .irq_eoi = aic_irq_eoi, |
440 | .irq_set_affinity = aic_irq_set_affinity, |
441 | .irq_set_type = aic_irq_set_type, |
442 | }; |
443 | |
444 | static struct irq_chip aic2_chip = { |
445 | .name = "AIC2" , |
446 | .irq_mask = aic_irq_mask, |
447 | .irq_unmask = aic_irq_unmask, |
448 | .irq_eoi = aic_irq_eoi, |
449 | .irq_set_type = aic_irq_set_type, |
450 | }; |
451 | |
452 | /* |
453 | * FIQ irqchip |
454 | */ |
455 | |
456 | static unsigned long aic_fiq_get_idx(struct irq_data *d) |
457 | { |
458 | return AIC_HWIRQ_IRQ(irqd_to_hwirq(d)); |
459 | } |
460 | |
461 | static void aic_fiq_set_mask(struct irq_data *d) |
462 | { |
463 | /* Only the guest timers have real mask bits, unfortunately. */ |
464 | switch (aic_fiq_get_idx(d)) { |
465 | case AIC_TMR_EL02_PHYS: |
466 | sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, VM_TMR_FIQ_ENABLE_P, 0); |
467 | isb(); |
468 | break; |
469 | case AIC_TMR_EL02_VIRT: |
470 | sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, VM_TMR_FIQ_ENABLE_V, 0); |
471 | isb(); |
472 | break; |
473 | default: |
474 | break; |
475 | } |
476 | } |
477 | |
478 | static void aic_fiq_clear_mask(struct irq_data *d) |
479 | { |
480 | switch (aic_fiq_get_idx(d)) { |
481 | case AIC_TMR_EL02_PHYS: |
482 | sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, 0, VM_TMR_FIQ_ENABLE_P); |
483 | isb(); |
484 | break; |
485 | case AIC_TMR_EL02_VIRT: |
486 | sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, 0, VM_TMR_FIQ_ENABLE_V); |
487 | isb(); |
488 | break; |
489 | default: |
490 | break; |
491 | } |
492 | } |
493 | |
494 | static void aic_fiq_mask(struct irq_data *d) |
495 | { |
496 | aic_fiq_set_mask(d); |
497 | __this_cpu_and(aic_fiq_unmasked, ~BIT(aic_fiq_get_idx(d))); |
498 | } |
499 | |
500 | static void aic_fiq_unmask(struct irq_data *d) |
501 | { |
502 | aic_fiq_clear_mask(d); |
503 | __this_cpu_or(aic_fiq_unmasked, BIT(aic_fiq_get_idx(d))); |
504 | } |
505 | |
506 | static void aic_fiq_eoi(struct irq_data *d) |
507 | { |
508 | /* We mask to ack (where we can), so we need to unmask at EOI. */ |
509 | if (__this_cpu_read(aic_fiq_unmasked) & BIT(aic_fiq_get_idx(d))) |
510 | aic_fiq_clear_mask(d); |
511 | } |
512 | |
513 | #define TIMER_FIRING(x) \ |
514 | (((x) & (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_MASK | \ |
515 | ARCH_TIMER_CTRL_IT_STAT)) == \ |
516 | (ARCH_TIMER_CTRL_ENABLE | ARCH_TIMER_CTRL_IT_STAT)) |
517 | |
518 | static void __exception_irq_entry aic_handle_fiq(struct pt_regs *regs) |
519 | { |
520 | /* |
521 | * It would be really nice if we had a system register that lets us get |
522 | * the FIQ source state without having to peek down into sources... |
523 | * but such a register does not seem to exist. |
524 | * |
525 | * So, we have these potential sources to test for: |
526 | * - Fast IPIs (not yet used) |
527 | * - The 4 timers (CNTP, CNTV for each of HV and guest) |
528 | * - Per-core PMCs (not yet supported) |
529 | * - Per-cluster uncore PMCs (not yet supported) |
530 | * |
531 | * Since not dealing with any of these results in a FIQ storm, |
532 | * we check for everything here, even things we don't support yet. |
533 | */ |
534 | |
535 | if (read_sysreg_s(SYS_IMP_APL_IPI_SR_EL1) & IPI_SR_PENDING) { |
536 | if (static_branch_likely(&use_fast_ipi)) { |
537 | aic_handle_ipi(regs); |
538 | } else { |
539 | pr_err_ratelimited("Fast IPI fired. Acking.\n" ); |
540 | write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1); |
541 | } |
542 | } |
543 | |
544 | if (TIMER_FIRING(read_sysreg(cntp_ctl_el0))) |
545 | generic_handle_domain_irq(aic_irqc->hw_domain, |
546 | AIC_FIQ_HWIRQ(AIC_TMR_EL0_PHYS)); |
547 | |
548 | if (TIMER_FIRING(read_sysreg(cntv_ctl_el0))) |
549 | generic_handle_domain_irq(aic_irqc->hw_domain, |
550 | AIC_FIQ_HWIRQ(AIC_TMR_EL0_VIRT)); |
551 | |
552 | if (is_kernel_in_hyp_mode()) { |
553 | uint64_t enabled = read_sysreg_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2); |
554 | |
555 | if ((enabled & VM_TMR_FIQ_ENABLE_P) && |
556 | TIMER_FIRING(read_sysreg_s(SYS_CNTP_CTL_EL02))) |
557 | generic_handle_domain_irq(aic_irqc->hw_domain, |
558 | AIC_FIQ_HWIRQ(AIC_TMR_EL02_PHYS)); |
559 | |
560 | if ((enabled & VM_TMR_FIQ_ENABLE_V) && |
561 | TIMER_FIRING(read_sysreg_s(SYS_CNTV_CTL_EL02))) |
562 | generic_handle_domain_irq(aic_irqc->hw_domain, |
563 | AIC_FIQ_HWIRQ(AIC_TMR_EL02_VIRT)); |
564 | } |
565 | |
566 | if (read_sysreg_s(SYS_IMP_APL_PMCR0_EL1) & PMCR0_IACT) { |
567 | int irq; |
568 | if (cpumask_test_cpu(smp_processor_id(), |
569 | &aic_irqc->fiq_aff[AIC_CPU_PMU_P]->aff)) |
570 | irq = AIC_CPU_PMU_P; |
571 | else |
572 | irq = AIC_CPU_PMU_E; |
573 | generic_handle_domain_irq(aic_irqc->hw_domain, |
574 | AIC_FIQ_HWIRQ(irq)); |
575 | } |
576 | |
577 | if (FIELD_GET(UPMCR0_IMODE, read_sysreg_s(SYS_IMP_APL_UPMCR0_EL1)) == UPMCR0_IMODE_FIQ && |
578 | (read_sysreg_s(SYS_IMP_APL_UPMSR_EL1) & UPMSR_IACT)) { |
579 | /* Same story with uncore PMCs */ |
580 | pr_err_ratelimited("Uncore PMC FIQ fired. Masking.\n" ); |
581 | sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE, |
582 | FIELD_PREP(UPMCR0_IMODE, UPMCR0_IMODE_OFF)); |
583 | } |
584 | } |
585 | |
586 | static int aic_fiq_set_type(struct irq_data *d, unsigned int type) |
587 | { |
588 | return (type == IRQ_TYPE_LEVEL_HIGH) ? 0 : -EINVAL; |
589 | } |
590 | |
591 | static struct irq_chip fiq_chip = { |
592 | .name = "AIC-FIQ" , |
593 | .irq_mask = aic_fiq_mask, |
594 | .irq_unmask = aic_fiq_unmask, |
595 | .irq_ack = aic_fiq_set_mask, |
596 | .irq_eoi = aic_fiq_eoi, |
597 | .irq_set_type = aic_fiq_set_type, |
598 | }; |
599 | |
600 | /* |
601 | * Main IRQ domain |
602 | */ |
603 | |
604 | static int aic_irq_domain_map(struct irq_domain *id, unsigned int irq, |
605 | irq_hw_number_t hw) |
606 | { |
607 | struct aic_irq_chip *ic = id->host_data; |
608 | u32 type = FIELD_GET(AIC_EVENT_TYPE, hw); |
609 | struct irq_chip *chip = &aic_chip; |
610 | |
611 | if (ic->info.version == 2) |
612 | chip = &aic2_chip; |
613 | |
614 | if (type == AIC_EVENT_TYPE_IRQ) { |
615 | irq_domain_set_info(domain: id, virq: irq, hwirq: hw, chip, chip_data: id->host_data, |
616 | handler: handle_fasteoi_irq, NULL, NULL); |
617 | irqd_set_single_target(d: irq_desc_get_irq_data(desc: irq_to_desc(irq))); |
618 | } else { |
619 | int fiq = FIELD_GET(AIC_EVENT_NUM, hw); |
620 | |
621 | switch (fiq) { |
622 | case AIC_CPU_PMU_P: |
623 | case AIC_CPU_PMU_E: |
624 | irq_set_percpu_devid_partition(irq, affinity: &ic->fiq_aff[fiq]->aff); |
625 | break; |
626 | default: |
627 | irq_set_percpu_devid(irq); |
628 | break; |
629 | } |
630 | |
631 | irq_domain_set_info(domain: id, virq: irq, hwirq: hw, chip: &fiq_chip, chip_data: id->host_data, |
632 | handler: handle_percpu_devid_irq, NULL, NULL); |
633 | } |
634 | |
635 | return 0; |
636 | } |
637 | |
638 | static int aic_irq_domain_translate(struct irq_domain *id, |
639 | struct irq_fwspec *fwspec, |
640 | unsigned long *hwirq, |
641 | unsigned int *type) |
642 | { |
643 | struct aic_irq_chip *ic = id->host_data; |
644 | u32 *args; |
645 | u32 die = 0; |
646 | |
647 | if (fwspec->param_count < 3 || fwspec->param_count > 4 || |
648 | !is_of_node(fwnode: fwspec->fwnode)) |
649 | return -EINVAL; |
650 | |
651 | args = &fwspec->param[1]; |
652 | |
653 | if (fwspec->param_count == 4) { |
654 | die = args[0]; |
655 | args++; |
656 | } |
657 | |
658 | switch (fwspec->param[0]) { |
659 | case AIC_IRQ: |
660 | if (die >= ic->nr_die) |
661 | return -EINVAL; |
662 | if (args[0] >= ic->nr_irq) |
663 | return -EINVAL; |
664 | *hwirq = AIC_IRQ_HWIRQ(die, args[0]); |
665 | break; |
666 | case AIC_FIQ: |
667 | if (die != 0) |
668 | return -EINVAL; |
669 | if (args[0] >= AIC_NR_FIQ) |
670 | return -EINVAL; |
671 | *hwirq = AIC_FIQ_HWIRQ(args[0]); |
672 | |
673 | /* |
674 | * In EL1 the non-redirected registers are the guest's, |
675 | * not EL2's, so remap the hwirqs to match. |
676 | */ |
677 | if (!is_kernel_in_hyp_mode()) { |
678 | switch (args[0]) { |
679 | case AIC_TMR_GUEST_PHYS: |
680 | *hwirq = AIC_FIQ_HWIRQ(AIC_TMR_EL0_PHYS); |
681 | break; |
682 | case AIC_TMR_GUEST_VIRT: |
683 | *hwirq = AIC_FIQ_HWIRQ(AIC_TMR_EL0_VIRT); |
684 | break; |
685 | case AIC_TMR_HV_PHYS: |
686 | case AIC_TMR_HV_VIRT: |
687 | return -ENOENT; |
688 | default: |
689 | break; |
690 | } |
691 | } |
692 | break; |
693 | default: |
694 | return -EINVAL; |
695 | } |
696 | |
697 | *type = args[1] & IRQ_TYPE_SENSE_MASK; |
698 | |
699 | return 0; |
700 | } |
701 | |
702 | static int aic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, |
703 | unsigned int nr_irqs, void *arg) |
704 | { |
705 | unsigned int type = IRQ_TYPE_NONE; |
706 | struct irq_fwspec *fwspec = arg; |
707 | irq_hw_number_t hwirq; |
708 | int i, ret; |
709 | |
710 | ret = aic_irq_domain_translate(id: domain, fwspec, hwirq: &hwirq, type: &type); |
711 | if (ret) |
712 | return ret; |
713 | |
714 | for (i = 0; i < nr_irqs; i++) { |
715 | ret = aic_irq_domain_map(id: domain, irq: virq + i, hw: hwirq + i); |
716 | if (ret) |
717 | return ret; |
718 | } |
719 | |
720 | return 0; |
721 | } |
722 | |
723 | static void aic_irq_domain_free(struct irq_domain *domain, unsigned int virq, |
724 | unsigned int nr_irqs) |
725 | { |
726 | int i; |
727 | |
728 | for (i = 0; i < nr_irqs; i++) { |
729 | struct irq_data *d = irq_domain_get_irq_data(domain, virq: virq + i); |
730 | |
731 | irq_set_handler(irq: virq + i, NULL); |
732 | irq_domain_reset_irq_data(irq_data: d); |
733 | } |
734 | } |
735 | |
736 | static const struct irq_domain_ops aic_irq_domain_ops = { |
737 | .translate = aic_irq_domain_translate, |
738 | .alloc = aic_irq_domain_alloc, |
739 | .free = aic_irq_domain_free, |
740 | }; |
741 | |
742 | /* |
743 | * IPI irqchip |
744 | */ |
745 | |
746 | static void aic_ipi_send_fast(int cpu) |
747 | { |
748 | u64 mpidr = cpu_logical_map(cpu); |
749 | u64 my_mpidr = read_cpuid_mpidr(); |
750 | u64 cluster = MPIDR_CLUSTER(mpidr); |
751 | u64 idx = MPIDR_CPU(mpidr); |
752 | |
753 | if (MPIDR_CLUSTER(my_mpidr) == cluster) |
754 | write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx), |
755 | SYS_IMP_APL_IPI_RR_LOCAL_EL1); |
756 | else |
757 | write_sysreg_s(FIELD_PREP(IPI_RR_CPU, idx) | FIELD_PREP(IPI_RR_CLUSTER, cluster), |
758 | SYS_IMP_APL_IPI_RR_GLOBAL_EL1); |
759 | isb(); |
760 | } |
761 | |
762 | static void aic_handle_ipi(struct pt_regs *regs) |
763 | { |
764 | /* |
765 | * Ack the IPI. We need to order this after the AIC event read, but |
766 | * that is enforced by normal MMIO ordering guarantees. |
767 | * |
768 | * For the Fast IPI case, this needs to be ordered before the vIPI |
769 | * handling below, so we need to isb(); |
770 | */ |
771 | if (static_branch_likely(&use_fast_ipi)) { |
772 | write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1); |
773 | isb(); |
774 | } else { |
775 | aic_ic_write(ic: aic_irqc, AIC_IPI_ACK, AIC_IPI_OTHER); |
776 | } |
777 | |
778 | ipi_mux_process(); |
779 | |
780 | /* |
781 | * No ordering needed here; at worst this just changes the timing of |
782 | * when the next IPI will be delivered. |
783 | */ |
784 | if (!static_branch_likely(&use_fast_ipi)) |
785 | aic_ic_write(ic: aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER); |
786 | } |
787 | |
788 | static void aic_ipi_send_single(unsigned int cpu) |
789 | { |
790 | if (static_branch_likely(&use_fast_ipi)) |
791 | aic_ipi_send_fast(cpu); |
792 | else |
793 | aic_ic_write(ic: aic_irqc, AIC_IPI_SEND, AIC_IPI_SEND_CPU(cpu)); |
794 | } |
795 | |
796 | static int __init aic_init_smp(struct aic_irq_chip *irqc, struct device_node *node) |
797 | { |
798 | int base_ipi; |
799 | |
800 | base_ipi = ipi_mux_create(AIC_NR_SWIPI, mux_send: aic_ipi_send_single); |
801 | if (WARN_ON(base_ipi <= 0)) |
802 | return -ENODEV; |
803 | |
804 | set_smp_ipi_range(base_ipi, AIC_NR_SWIPI); |
805 | |
806 | return 0; |
807 | } |
808 | |
809 | static int aic_init_cpu(unsigned int cpu) |
810 | { |
811 | /* Mask all hard-wired per-CPU IRQ/FIQ sources */ |
812 | |
813 | /* Pending Fast IPI FIQs */ |
814 | write_sysreg_s(IPI_SR_PENDING, SYS_IMP_APL_IPI_SR_EL1); |
815 | |
816 | /* Timer FIQs */ |
817 | sysreg_clear_set(cntp_ctl_el0, 0, ARCH_TIMER_CTRL_IT_MASK); |
818 | sysreg_clear_set(cntv_ctl_el0, 0, ARCH_TIMER_CTRL_IT_MASK); |
819 | |
820 | /* EL2-only (VHE mode) IRQ sources */ |
821 | if (is_kernel_in_hyp_mode()) { |
822 | /* Guest timers */ |
823 | sysreg_clear_set_s(SYS_IMP_APL_VM_TMR_FIQ_ENA_EL2, |
824 | VM_TMR_FIQ_ENABLE_V | VM_TMR_FIQ_ENABLE_P, 0); |
825 | |
826 | /* vGIC maintenance IRQ */ |
827 | sysreg_clear_set_s(SYS_ICH_HCR_EL2, ICH_HCR_EN, 0); |
828 | } |
829 | |
830 | /* PMC FIQ */ |
831 | sysreg_clear_set_s(SYS_IMP_APL_PMCR0_EL1, PMCR0_IMODE | PMCR0_IACT, |
832 | FIELD_PREP(PMCR0_IMODE, PMCR0_IMODE_OFF)); |
833 | |
834 | /* Uncore PMC FIQ */ |
835 | sysreg_clear_set_s(SYS_IMP_APL_UPMCR0_EL1, UPMCR0_IMODE, |
836 | FIELD_PREP(UPMCR0_IMODE, UPMCR0_IMODE_OFF)); |
837 | |
838 | /* Commit all of the above */ |
839 | isb(); |
840 | |
841 | if (aic_irqc->info.version == 1) { |
842 | /* |
843 | * Make sure the kernel's idea of logical CPU order is the same as AIC's |
844 | * If we ever end up with a mismatch here, we will have to introduce |
845 | * a mapping table similar to what other irqchip drivers do. |
846 | */ |
847 | WARN_ON(aic_ic_read(aic_irqc, AIC_WHOAMI) != smp_processor_id()); |
848 | |
849 | /* |
850 | * Always keep IPIs unmasked at the hardware level (except auto-masking |
851 | * by AIC during processing). We manage masks at the vIPI level. |
852 | * These registers only exist on AICv1, AICv2 always uses fast IPIs. |
853 | */ |
854 | aic_ic_write(ic: aic_irqc, AIC_IPI_ACK, AIC_IPI_SELF | AIC_IPI_OTHER); |
855 | if (static_branch_likely(&use_fast_ipi)) { |
856 | aic_ic_write(ic: aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF | AIC_IPI_OTHER); |
857 | } else { |
858 | aic_ic_write(ic: aic_irqc, AIC_IPI_MASK_SET, AIC_IPI_SELF); |
859 | aic_ic_write(ic: aic_irqc, AIC_IPI_MASK_CLR, AIC_IPI_OTHER); |
860 | } |
861 | } |
862 | |
863 | /* Initialize the local mask state */ |
864 | __this_cpu_write(aic_fiq_unmasked, 0); |
865 | |
866 | return 0; |
867 | } |
868 | |
869 | static struct gic_kvm_info vgic_info __initdata = { |
870 | .type = GIC_V3, |
871 | .no_maint_irq_mask = true, |
872 | .no_hw_deactivation = true, |
873 | }; |
874 | |
875 | static void build_fiq_affinity(struct aic_irq_chip *ic, struct device_node *aff) |
876 | { |
877 | int i, n; |
878 | u32 fiq; |
879 | |
880 | if (of_property_read_u32(np: aff, propname: "apple,fiq-index" , out_value: &fiq) || |
881 | WARN_ON(fiq >= AIC_NR_FIQ) || ic->fiq_aff[fiq]) |
882 | return; |
883 | |
884 | n = of_property_count_elems_of_size(np: aff, propname: "cpus" , elem_size: sizeof(u32)); |
885 | if (WARN_ON(n < 0)) |
886 | return; |
887 | |
888 | ic->fiq_aff[fiq] = kzalloc(size: sizeof(*ic->fiq_aff[fiq]), GFP_KERNEL); |
889 | if (!ic->fiq_aff[fiq]) |
890 | return; |
891 | |
892 | for (i = 0; i < n; i++) { |
893 | struct device_node *cpu_node; |
894 | u32 cpu_phandle; |
895 | int cpu; |
896 | |
897 | if (of_property_read_u32_index(np: aff, propname: "cpus" , index: i, out_value: &cpu_phandle)) |
898 | continue; |
899 | |
900 | cpu_node = of_find_node_by_phandle(handle: cpu_phandle); |
901 | if (WARN_ON(!cpu_node)) |
902 | continue; |
903 | |
904 | cpu = of_cpu_node_to_id(np: cpu_node); |
905 | of_node_put(node: cpu_node); |
906 | if (WARN_ON(cpu < 0)) |
907 | continue; |
908 | |
909 | cpumask_set_cpu(cpu, dstp: &ic->fiq_aff[fiq]->aff); |
910 | } |
911 | } |
912 | |
913 | static int __init aic_of_ic_init(struct device_node *node, struct device_node *parent) |
914 | { |
915 | int i, die; |
916 | u32 off, start_off; |
917 | void __iomem *regs; |
918 | struct aic_irq_chip *irqc; |
919 | struct device_node *affs; |
920 | const struct of_device_id *match; |
921 | |
922 | regs = of_iomap(node, index: 0); |
923 | if (WARN_ON(!regs)) |
924 | return -EIO; |
925 | |
926 | irqc = kzalloc(size: sizeof(*irqc), GFP_KERNEL); |
927 | if (!irqc) { |
928 | iounmap(addr: regs); |
929 | return -ENOMEM; |
930 | } |
931 | |
932 | irqc->base = regs; |
933 | |
934 | match = of_match_node(matches: aic_info_match, node); |
935 | if (!match) |
936 | goto err_unmap; |
937 | |
938 | irqc->info = *(struct aic_info *)match->data; |
939 | |
940 | aic_irqc = irqc; |
941 | |
942 | switch (irqc->info.version) { |
943 | case 1: { |
944 | u32 info; |
945 | |
946 | info = aic_ic_read(ic: irqc, AIC_INFO); |
947 | irqc->nr_irq = FIELD_GET(AIC_INFO_NR_IRQ, info); |
948 | irqc->max_irq = AIC_MAX_IRQ; |
949 | irqc->nr_die = irqc->max_die = 1; |
950 | |
951 | off = start_off = irqc->info.target_cpu; |
952 | off += sizeof(u32) * irqc->max_irq; /* TARGET_CPU */ |
953 | |
954 | irqc->event = irqc->base; |
955 | |
956 | break; |
957 | } |
958 | case 2: { |
959 | u32 info1, info3; |
960 | |
961 | info1 = aic_ic_read(ic: irqc, AIC2_INFO1); |
962 | info3 = aic_ic_read(ic: irqc, AIC2_INFO3); |
963 | |
964 | irqc->nr_irq = FIELD_GET(AIC2_INFO1_NR_IRQ, info1); |
965 | irqc->max_irq = FIELD_GET(AIC2_INFO3_MAX_IRQ, info3); |
966 | irqc->nr_die = FIELD_GET(AIC2_INFO1_LAST_DIE, info1) + 1; |
967 | irqc->max_die = FIELD_GET(AIC2_INFO3_MAX_DIE, info3); |
968 | |
969 | off = start_off = irqc->info.irq_cfg; |
970 | off += sizeof(u32) * irqc->max_irq; /* IRQ_CFG */ |
971 | |
972 | irqc->event = of_iomap(node, index: 1); |
973 | if (WARN_ON(!irqc->event)) |
974 | goto err_unmap; |
975 | |
976 | break; |
977 | } |
978 | } |
979 | |
980 | irqc->info.sw_set = off; |
981 | off += sizeof(u32) * (irqc->max_irq >> 5); /* SW_SET */ |
982 | irqc->info.sw_clr = off; |
983 | off += sizeof(u32) * (irqc->max_irq >> 5); /* SW_CLR */ |
984 | irqc->info.mask_set = off; |
985 | off += sizeof(u32) * (irqc->max_irq >> 5); /* MASK_SET */ |
986 | irqc->info.mask_clr = off; |
987 | off += sizeof(u32) * (irqc->max_irq >> 5); /* MASK_CLR */ |
988 | off += sizeof(u32) * (irqc->max_irq >> 5); /* HW_STATE */ |
989 | |
990 | if (irqc->info.fast_ipi) |
991 | static_branch_enable(&use_fast_ipi); |
992 | else |
993 | static_branch_disable(&use_fast_ipi); |
994 | |
995 | irqc->info.die_stride = off - start_off; |
996 | |
997 | irqc->hw_domain = irq_domain_create_tree(fwnode: of_node_to_fwnode(node), |
998 | ops: &aic_irq_domain_ops, host_data: irqc); |
999 | if (WARN_ON(!irqc->hw_domain)) |
1000 | goto err_unmap; |
1001 | |
1002 | irq_domain_update_bus_token(domain: irqc->hw_domain, bus_token: DOMAIN_BUS_WIRED); |
1003 | |
1004 | if (aic_init_smp(irqc, node)) |
1005 | goto err_remove_domain; |
1006 | |
1007 | affs = of_get_child_by_name(node, name: "affinities" ); |
1008 | if (affs) { |
1009 | struct device_node *chld; |
1010 | |
1011 | for_each_child_of_node(affs, chld) |
1012 | build_fiq_affinity(ic: irqc, aff: chld); |
1013 | } |
1014 | of_node_put(node: affs); |
1015 | |
1016 | set_handle_irq(aic_handle_irq); |
1017 | set_handle_fiq(aic_handle_fiq); |
1018 | |
1019 | off = 0; |
1020 | for (die = 0; die < irqc->nr_die; die++) { |
1021 | for (i = 0; i < BITS_TO_U32(irqc->nr_irq); i++) |
1022 | aic_ic_write(ic: irqc, reg: irqc->info.mask_set + off + i * 4, U32_MAX); |
1023 | for (i = 0; i < BITS_TO_U32(irqc->nr_irq); i++) |
1024 | aic_ic_write(ic: irqc, reg: irqc->info.sw_clr + off + i * 4, U32_MAX); |
1025 | if (irqc->info.target_cpu) |
1026 | for (i = 0; i < irqc->nr_irq; i++) |
1027 | aic_ic_write(ic: irqc, reg: irqc->info.target_cpu + off + i * 4, val: 1); |
1028 | off += irqc->info.die_stride; |
1029 | } |
1030 | |
1031 | if (irqc->info.version == 2) { |
1032 | u32 config = aic_ic_read(ic: irqc, AIC2_CONFIG); |
1033 | |
1034 | config |= AIC2_CONFIG_ENABLE; |
1035 | aic_ic_write(ic: irqc, AIC2_CONFIG, val: config); |
1036 | } |
1037 | |
1038 | if (!is_kernel_in_hyp_mode()) |
1039 | pr_info("Kernel running in EL1, mapping interrupts" ); |
1040 | |
1041 | if (static_branch_likely(&use_fast_ipi)) |
1042 | pr_info("Using Fast IPIs" ); |
1043 | |
1044 | cpuhp_setup_state(state: CPUHP_AP_IRQ_APPLE_AIC_STARTING, |
1045 | name: "irqchip/apple-aic/ipi:starting" , |
1046 | startup: aic_init_cpu, NULL); |
1047 | |
1048 | if (is_kernel_in_hyp_mode()) { |
1049 | struct irq_fwspec mi = { |
1050 | .fwnode = of_node_to_fwnode(node), |
1051 | .param_count = 3, |
1052 | .param = { |
1053 | [0] = AIC_FIQ, /* This is a lie */ |
1054 | [1] = AIC_VGIC_MI, |
1055 | [2] = IRQ_TYPE_LEVEL_HIGH, |
1056 | }, |
1057 | }; |
1058 | |
1059 | vgic_info.maint_irq = irq_create_fwspec_mapping(fwspec: &mi); |
1060 | WARN_ON(!vgic_info.maint_irq); |
1061 | } |
1062 | |
1063 | vgic_set_kvm_info(info: &vgic_info); |
1064 | |
1065 | pr_info("Initialized with %d/%d IRQs * %d/%d die(s), %d FIQs, %d vIPIs" , |
1066 | irqc->nr_irq, irqc->max_irq, irqc->nr_die, irqc->max_die, AIC_NR_FIQ, AIC_NR_SWIPI); |
1067 | |
1068 | return 0; |
1069 | |
1070 | err_remove_domain: |
1071 | irq_domain_remove(host: irqc->hw_domain); |
1072 | err_unmap: |
1073 | if (irqc->event && irqc->event != irqc->base) |
1074 | iounmap(addr: irqc->event); |
1075 | iounmap(addr: irqc->base); |
1076 | kfree(objp: irqc); |
1077 | return -ENODEV; |
1078 | } |
1079 | |
1080 | IRQCHIP_DECLARE(apple_aic, "apple,aic" , aic_of_ic_init); |
1081 | IRQCHIP_DECLARE(apple_aic2, "apple,aic2" , aic_of_ic_init); |
1082 | |