1 | // SPDX-License-Identifier: GPL-2.0-only |
---|---|
2 | /* |
3 | * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved. |
4 | * Author: Marc Zyngier <marc.zyngier@arm.com> |
5 | */ |
6 | |
7 | #define pr_fmt(fmt) "GICv3: " fmt |
8 | |
9 | #include <linux/acpi.h> |
10 | #include <linux/cpu.h> |
11 | #include <linux/cpu_pm.h> |
12 | #include <linux/delay.h> |
13 | #include <linux/interrupt.h> |
14 | #include <linux/irqdomain.h> |
15 | #include <linux/kernel.h> |
16 | #include <linux/kstrtox.h> |
17 | #include <linux/of.h> |
18 | #include <linux/of_address.h> |
19 | #include <linux/of_irq.h> |
20 | #include <linux/percpu.h> |
21 | #include <linux/refcount.h> |
22 | #include <linux/slab.h> |
23 | #include <linux/iopoll.h> |
24 | |
25 | #include <linux/irqchip.h> |
26 | #include <linux/irqchip/arm-gic-common.h> |
27 | #include <linux/irqchip/arm-gic-v3.h> |
28 | #include <linux/irqchip/arm-gic-v3-prio.h> |
29 | #include <linux/irqchip/irq-partition-percpu.h> |
30 | #include <linux/bitfield.h> |
31 | #include <linux/bits.h> |
32 | #include <linux/arm-smccc.h> |
33 | |
34 | #include <asm/cputype.h> |
35 | #include <asm/exception.h> |
36 | #include <asm/smp_plat.h> |
37 | #include <asm/virt.h> |
38 | |
39 | #include "irq-gic-common.h" |
40 | |
41 | static u8 dist_prio_irq __ro_after_init = GICV3_PRIO_IRQ; |
42 | static u8 dist_prio_nmi __ro_after_init = GICV3_PRIO_NMI; |
43 | |
44 | #define FLAGS_WORKAROUND_GICR_WAKER_MSM8996 (1ULL << 0) |
45 | #define FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539 (1ULL << 1) |
46 | #define FLAGS_WORKAROUND_ASR_ERRATUM_8601001 (1ULL << 2) |
47 | #define FLAGS_WORKAROUND_INSECURE (1ULL << 3) |
48 | |
49 | #define GIC_IRQ_TYPE_PARTITION (GIC_IRQ_TYPE_LPI + 1) |
50 | |
51 | static struct cpumask broken_rdists __read_mostly __maybe_unused; |
52 | |
53 | struct redist_region { |
54 | void __iomem *redist_base; |
55 | phys_addr_t phys_base; |
56 | bool single_redist; |
57 | }; |
58 | |
59 | struct gic_chip_data { |
60 | struct fwnode_handle *fwnode; |
61 | phys_addr_t dist_phys_base; |
62 | void __iomem *dist_base; |
63 | struct redist_region *redist_regions; |
64 | struct rdists rdists; |
65 | struct irq_domain *domain; |
66 | u64 redist_stride; |
67 | u32 nr_redist_regions; |
68 | u64 flags; |
69 | bool has_rss; |
70 | unsigned int ppi_nr; |
71 | struct partition_desc **ppi_descs; |
72 | }; |
73 | |
74 | #define T241_CHIPS_MAX 4 |
75 | static void __iomem *t241_dist_base_alias[T241_CHIPS_MAX] __read_mostly; |
76 | static DEFINE_STATIC_KEY_FALSE(gic_nvidia_t241_erratum); |
77 | |
78 | static DEFINE_STATIC_KEY_FALSE(gic_arm64_2941627_erratum); |
79 | |
80 | static struct gic_chip_data gic_data __read_mostly; |
81 | static DEFINE_STATIC_KEY_TRUE(supports_deactivate_key); |
82 | |
83 | #define GIC_ID_NR (1U << GICD_TYPER_ID_BITS(gic_data.rdists.gicd_typer)) |
84 | #define GIC_LINE_NR min(GICD_TYPER_SPIS(gic_data.rdists.gicd_typer), 1020U) |
85 | #define GIC_ESPI_NR GICD_TYPER_ESPIS(gic_data.rdists.gicd_typer) |
86 | |
87 | static bool nmi_support_forbidden; |
88 | |
89 | /* |
90 | * There are 16 SGIs, though we only actually use 8 in Linux. The other 8 SGIs |
91 | * are potentially stolen by the secure side. Some code, especially code dealing |
92 | * with hwirq IDs, is simplified by accounting for all 16. |
93 | */ |
94 | #define SGI_NR 16 |
95 | |
96 | /* |
97 | * The behaviours of RPR and PMR registers differ depending on the value of |
98 | * SCR_EL3.FIQ, and the behaviour of non-secure priority registers of the |
99 | * distributor and redistributors depends on whether security is enabled in the |
100 | * GIC. |
101 | * |
102 | * When security is enabled, non-secure priority values from the (re)distributor |
103 | * are presented to the GIC CPUIF as follow: |
104 | * (GIC_(R)DIST_PRI[irq] >> 1) | 0x80; |
105 | * |
106 | * If SCR_EL3.FIQ == 1, the values written to/read from PMR and RPR at non-secure |
107 | * EL1 are subject to a similar operation thus matching the priorities presented |
108 | * from the (re)distributor when security is enabled. When SCR_EL3.FIQ == 0, |
109 | * these values are unchanged by the GIC. |
110 | * |
111 | * see GICv3/GICv4 Architecture Specification (IHI0069D): |
112 | * - section 4.8.1 Non-secure accesses to register fields for Secure interrupt |
113 | * priorities. |
114 | * - Figure 4-7 Secure read of the priority field for a Non-secure Group 1 |
115 | * interrupt. |
116 | */ |
117 | static DEFINE_STATIC_KEY_FALSE(supports_pseudo_nmis); |
118 | |
119 | static u32 gic_get_pribits(void) |
120 | { |
121 | u32 pribits; |
122 | |
123 | pribits = gic_read_ctlr(); |
124 | pribits &= ICC_CTLR_EL1_PRI_BITS_MASK; |
125 | pribits >>= ICC_CTLR_EL1_PRI_BITS_SHIFT; |
126 | pribits++; |
127 | |
128 | return pribits; |
129 | } |
130 | |
131 | static bool gic_has_group0(void) |
132 | { |
133 | u32 val; |
134 | u32 old_pmr; |
135 | |
136 | old_pmr = gic_read_pmr(); |
137 | |
138 | /* |
139 | * Let's find out if Group0 is under control of EL3 or not by |
140 | * setting the highest possible, non-zero priority in PMR. |
141 | * |
142 | * If SCR_EL3.FIQ is set, the priority gets shifted down in |
143 | * order for the CPU interface to set bit 7, and keep the |
144 | * actual priority in the non-secure range. In the process, it |
145 | * looses the least significant bit and the actual priority |
146 | * becomes 0x80. Reading it back returns 0, indicating that |
147 | * we're don't have access to Group0. |
148 | */ |
149 | gic_write_pmr(BIT(8 - gic_get_pribits())); |
150 | val = gic_read_pmr(); |
151 | |
152 | gic_write_pmr(old_pmr); |
153 | |
154 | return val != 0; |
155 | } |
156 | |
157 | static inline bool gic_dist_security_disabled(void) |
158 | { |
159 | return readl_relaxed(gic_data.dist_base + GICD_CTLR) & GICD_CTLR_DS; |
160 | } |
161 | |
162 | static bool cpus_have_security_disabled __ro_after_init; |
163 | static bool cpus_have_group0 __ro_after_init; |
164 | |
165 | static void __init gic_prio_init(void) |
166 | { |
167 | bool ds; |
168 | |
169 | cpus_have_group0 = gic_has_group0(); |
170 | |
171 | ds = gic_dist_security_disabled(); |
172 | if ((gic_data.flags & FLAGS_WORKAROUND_INSECURE) && !ds) { |
173 | if (cpus_have_group0) { |
174 | u32 val; |
175 | |
176 | val = readl_relaxed(gic_data.dist_base + GICD_CTLR); |
177 | val |= GICD_CTLR_DS; |
178 | writel_relaxed(val, gic_data.dist_base + GICD_CTLR); |
179 | |
180 | ds = gic_dist_security_disabled(); |
181 | if (ds) |
182 | pr_warn("Broken GIC integration, security disabled\n"); |
183 | } else { |
184 | pr_warn("Broken GIC integration, pNMI forbidden\n"); |
185 | nmi_support_forbidden = true; |
186 | } |
187 | } |
188 | |
189 | cpus_have_security_disabled = ds; |
190 | |
191 | /* |
192 | * How priority values are used by the GIC depends on two things: |
193 | * the security state of the GIC (controlled by the GICD_CTRL.DS bit) |
194 | * and if Group 0 interrupts can be delivered to Linux in the non-secure |
195 | * world as FIQs (controlled by the SCR_EL3.FIQ bit). These affect the |
196 | * way priorities are presented in ICC_PMR_EL1 and in the distributor: |
197 | * |
198 | * GICD_CTRL.DS | SCR_EL3.FIQ | ICC_PMR_EL1 | Distributor |
199 | * ------------------------------------------------------- |
200 | * 1 | - | unchanged | unchanged |
201 | * ------------------------------------------------------- |
202 | * 0 | 1 | non-secure | non-secure |
203 | * ------------------------------------------------------- |
204 | * 0 | 0 | unchanged | non-secure |
205 | * |
206 | * In the non-secure view reads and writes are modified: |
207 | * |
208 | * - A value written is right-shifted by one and the MSB is set, |
209 | * forcing the priority into the non-secure range. |
210 | * |
211 | * - A value read is left-shifted by one. |
212 | * |
213 | * In the first two cases, where ICC_PMR_EL1 and the interrupt priority |
214 | * are both either modified or unchanged, we can use the same set of |
215 | * priorities. |
216 | * |
217 | * In the last case, where only the interrupt priorities are modified to |
218 | * be in the non-secure range, we program the non-secure values into |
219 | * the distributor to match the PMR values we want. |
220 | */ |
221 | if (cpus_have_group0 && !cpus_have_security_disabled) { |
222 | dist_prio_irq = __gicv3_prio_to_ns(dist_prio_irq); |
223 | dist_prio_nmi = __gicv3_prio_to_ns(dist_prio_nmi); |
224 | } |
225 | |
226 | pr_info("GICD_CTRL.DS=%d, SCR_EL3.FIQ=%d\n", |
227 | cpus_have_security_disabled, |
228 | !cpus_have_group0); |
229 | } |
230 | |
231 | /* rdist_nmi_refs[n] == number of cpus having the rdist interrupt n set as NMI */ |
232 | static refcount_t *rdist_nmi_refs; |
233 | |
234 | static struct gic_kvm_info gic_v3_kvm_info __initdata; |
235 | static DEFINE_PER_CPU(bool, has_rss); |
236 | |
237 | #define MPIDR_RS(mpidr) (((mpidr) & 0xF0UL) >> 4) |
238 | #define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist)) |
239 | #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) |
240 | #define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K) |
241 | |
242 | /* Our default, arbitrary priority value. Linux only uses one anyway. */ |
243 | #define DEFAULT_PMR_VALUE 0xf0 |
244 | |
245 | enum gic_intid_range { |
246 | SGI_RANGE, |
247 | PPI_RANGE, |
248 | SPI_RANGE, |
249 | EPPI_RANGE, |
250 | ESPI_RANGE, |
251 | LPI_RANGE, |
252 | __INVALID_RANGE__ |
253 | }; |
254 | |
255 | static enum gic_intid_range __get_intid_range(irq_hw_number_t hwirq) |
256 | { |
257 | switch (hwirq) { |
258 | case 0 ... 15: |
259 | return SGI_RANGE; |
260 | case 16 ... 31: |
261 | return PPI_RANGE; |
262 | case 32 ... 1019: |
263 | return SPI_RANGE; |
264 | case EPPI_BASE_INTID ... (EPPI_BASE_INTID + 63): |
265 | return EPPI_RANGE; |
266 | case ESPI_BASE_INTID ... (ESPI_BASE_INTID + 1023): |
267 | return ESPI_RANGE; |
268 | case 8192 ... GENMASK(23, 0): |
269 | return LPI_RANGE; |
270 | default: |
271 | return __INVALID_RANGE__; |
272 | } |
273 | } |
274 | |
275 | static enum gic_intid_range get_intid_range(struct irq_data *d) |
276 | { |
277 | return __get_intid_range(hwirq: d->hwirq); |
278 | } |
279 | |
280 | static inline bool gic_irq_in_rdist(struct irq_data *d) |
281 | { |
282 | switch (get_intid_range(d)) { |
283 | case SGI_RANGE: |
284 | case PPI_RANGE: |
285 | case EPPI_RANGE: |
286 | return true; |
287 | default: |
288 | return false; |
289 | } |
290 | } |
291 | |
292 | static inline void __iomem *gic_dist_base_alias(struct irq_data *d) |
293 | { |
294 | if (static_branch_unlikely(&gic_nvidia_t241_erratum)) { |
295 | irq_hw_number_t hwirq = irqd_to_hwirq(d); |
296 | u32 chip; |
297 | |
298 | /* |
299 | * For the erratum T241-FABRIC-4, read accesses to GICD_In{E} |
300 | * registers are directed to the chip that owns the SPI. The |
301 | * the alias region can also be used for writes to the |
302 | * GICD_In{E} except GICD_ICENABLERn. Each chip has support |
303 | * for 320 {E}SPIs. Mappings for all 4 chips: |
304 | * Chip0 = 32-351 |
305 | * Chip1 = 352-671 |
306 | * Chip2 = 672-991 |
307 | * Chip3 = 4096-4415 |
308 | */ |
309 | switch (__get_intid_range(hwirq)) { |
310 | case SPI_RANGE: |
311 | chip = (hwirq - 32) / 320; |
312 | break; |
313 | case ESPI_RANGE: |
314 | chip = 3; |
315 | break; |
316 | default: |
317 | unreachable(); |
318 | } |
319 | return t241_dist_base_alias[chip]; |
320 | } |
321 | |
322 | return gic_data.dist_base; |
323 | } |
324 | |
325 | static inline void __iomem *gic_dist_base(struct irq_data *d) |
326 | { |
327 | switch (get_intid_range(d)) { |
328 | case SGI_RANGE: |
329 | case PPI_RANGE: |
330 | case EPPI_RANGE: |
331 | /* SGI+PPI -> SGI_base for this CPU */ |
332 | return gic_data_rdist_sgi_base(); |
333 | |
334 | case SPI_RANGE: |
335 | case ESPI_RANGE: |
336 | /* SPI -> dist_base */ |
337 | return gic_data.dist_base; |
338 | |
339 | default: |
340 | return NULL; |
341 | } |
342 | } |
343 | |
344 | static void gic_do_wait_for_rwp(void __iomem *base, u32 bit) |
345 | { |
346 | u32 val; |
347 | int ret; |
348 | |
349 | ret = readl_relaxed_poll_timeout_atomic(base + GICD_CTLR, val, !(val & bit), |
350 | 1, USEC_PER_SEC); |
351 | if (ret == -ETIMEDOUT) |
352 | pr_err_ratelimited("RWP timeout, gone fishing\n"); |
353 | } |
354 | |
355 | /* Wait for completion of a distributor change */ |
356 | static void gic_dist_wait_for_rwp(void) |
357 | { |
358 | gic_do_wait_for_rwp(base: gic_data.dist_base, GICD_CTLR_RWP); |
359 | } |
360 | |
361 | /* Wait for completion of a redistributor change */ |
362 | static void gic_redist_wait_for_rwp(void) |
363 | { |
364 | gic_do_wait_for_rwp(gic_data_rdist_rd_base(), GICR_CTLR_RWP); |
365 | } |
366 | |
367 | static void gic_enable_redist(bool enable) |
368 | { |
369 | void __iomem *rbase; |
370 | u32 val; |
371 | int ret; |
372 | |
373 | if (gic_data.flags & FLAGS_WORKAROUND_GICR_WAKER_MSM8996) |
374 | return; |
375 | |
376 | rbase = gic_data_rdist_rd_base(); |
377 | |
378 | val = readl_relaxed(rbase + GICR_WAKER); |
379 | if (enable) |
380 | /* Wake up this CPU redistributor */ |
381 | val &= ~GICR_WAKER_ProcessorSleep; |
382 | else |
383 | val |= GICR_WAKER_ProcessorSleep; |
384 | writel_relaxed(val, rbase + GICR_WAKER); |
385 | |
386 | if (!enable) { /* Check that GICR_WAKER is writeable */ |
387 | val = readl_relaxed(rbase + GICR_WAKER); |
388 | if (!(val & GICR_WAKER_ProcessorSleep)) |
389 | return; /* No PM support in this redistributor */ |
390 | } |
391 | |
392 | ret = readl_relaxed_poll_timeout_atomic(rbase + GICR_WAKER, val, |
393 | enable ^ (bool)(val & GICR_WAKER_ChildrenAsleep), |
394 | 1, USEC_PER_SEC); |
395 | if (ret == -ETIMEDOUT) { |
396 | pr_err_ratelimited("redistributor failed to %s...\n", |
397 | enable ? "wakeup": "sleep"); |
398 | } |
399 | } |
400 | |
401 | /* |
402 | * Routines to disable, enable, EOI and route interrupts |
403 | */ |
404 | static u32 convert_offset_index(struct irq_data *d, u32 offset, u32 *index) |
405 | { |
406 | switch (get_intid_range(d)) { |
407 | case SGI_RANGE: |
408 | case PPI_RANGE: |
409 | case SPI_RANGE: |
410 | *index = d->hwirq; |
411 | return offset; |
412 | case EPPI_RANGE: |
413 | /* |
414 | * Contrary to the ESPI range, the EPPI range is contiguous |
415 | * to the PPI range in the registers, so let's adjust the |
416 | * displacement accordingly. Consistency is overrated. |
417 | */ |
418 | *index = d->hwirq - EPPI_BASE_INTID + 32; |
419 | return offset; |
420 | case ESPI_RANGE: |
421 | *index = d->hwirq - ESPI_BASE_INTID; |
422 | switch (offset) { |
423 | case GICD_ISENABLER: |
424 | return GICD_ISENABLERnE; |
425 | case GICD_ICENABLER: |
426 | return GICD_ICENABLERnE; |
427 | case GICD_ISPENDR: |
428 | return GICD_ISPENDRnE; |
429 | case GICD_ICPENDR: |
430 | return GICD_ICPENDRnE; |
431 | case GICD_ISACTIVER: |
432 | return GICD_ISACTIVERnE; |
433 | case GICD_ICACTIVER: |
434 | return GICD_ICACTIVERnE; |
435 | case GICD_IPRIORITYR: |
436 | return GICD_IPRIORITYRnE; |
437 | case GICD_ICFGR: |
438 | return GICD_ICFGRnE; |
439 | case GICD_IROUTER: |
440 | return GICD_IROUTERnE; |
441 | default: |
442 | break; |
443 | } |
444 | break; |
445 | default: |
446 | break; |
447 | } |
448 | |
449 | WARN_ON(1); |
450 | *index = d->hwirq; |
451 | return offset; |
452 | } |
453 | |
454 | static int gic_peek_irq(struct irq_data *d, u32 offset) |
455 | { |
456 | void __iomem *base; |
457 | u32 index, mask; |
458 | |
459 | offset = convert_offset_index(d, offset, index: &index); |
460 | mask = 1 << (index % 32); |
461 | |
462 | if (gic_irq_in_rdist(d)) |
463 | base = gic_data_rdist_sgi_base(); |
464 | else |
465 | base = gic_dist_base_alias(d); |
466 | |
467 | return !!(readl_relaxed(base + offset + (index / 32) * 4) & mask); |
468 | } |
469 | |
470 | static void gic_poke_irq(struct irq_data *d, u32 offset) |
471 | { |
472 | void __iomem *base; |
473 | u32 index, mask; |
474 | |
475 | offset = convert_offset_index(d, offset, index: &index); |
476 | mask = 1 << (index % 32); |
477 | |
478 | if (gic_irq_in_rdist(d)) |
479 | base = gic_data_rdist_sgi_base(); |
480 | else |
481 | base = gic_data.dist_base; |
482 | |
483 | writel_relaxed(mask, base + offset + (index / 32) * 4); |
484 | } |
485 | |
486 | static void gic_mask_irq(struct irq_data *d) |
487 | { |
488 | gic_poke_irq(d, GICD_ICENABLER); |
489 | if (gic_irq_in_rdist(d)) |
490 | gic_redist_wait_for_rwp(); |
491 | else |
492 | gic_dist_wait_for_rwp(); |
493 | } |
494 | |
495 | static void gic_eoimode1_mask_irq(struct irq_data *d) |
496 | { |
497 | gic_mask_irq(d); |
498 | /* |
499 | * When masking a forwarded interrupt, make sure it is |
500 | * deactivated as well. |
501 | * |
502 | * This ensures that an interrupt that is getting |
503 | * disabled/masked will not get "stuck", because there is |
504 | * noone to deactivate it (guest is being terminated). |
505 | */ |
506 | if (irqd_is_forwarded_to_vcpu(d)) |
507 | gic_poke_irq(d, GICD_ICACTIVER); |
508 | } |
509 | |
510 | static void gic_unmask_irq(struct irq_data *d) |
511 | { |
512 | gic_poke_irq(d, GICD_ISENABLER); |
513 | } |
514 | |
515 | static inline bool gic_supports_nmi(void) |
516 | { |
517 | return IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && |
518 | static_branch_likely(&supports_pseudo_nmis); |
519 | } |
520 | |
521 | static int gic_irq_set_irqchip_state(struct irq_data *d, |
522 | enum irqchip_irq_state which, bool val) |
523 | { |
524 | u32 reg; |
525 | |
526 | if (d->hwirq >= 8192) /* SGI/PPI/SPI only */ |
527 | return -EINVAL; |
528 | |
529 | switch (which) { |
530 | case IRQCHIP_STATE_PENDING: |
531 | reg = val ? GICD_ISPENDR : GICD_ICPENDR; |
532 | break; |
533 | |
534 | case IRQCHIP_STATE_ACTIVE: |
535 | reg = val ? GICD_ISACTIVER : GICD_ICACTIVER; |
536 | break; |
537 | |
538 | case IRQCHIP_STATE_MASKED: |
539 | if (val) { |
540 | gic_mask_irq(d); |
541 | return 0; |
542 | } |
543 | reg = GICD_ISENABLER; |
544 | break; |
545 | |
546 | default: |
547 | return -EINVAL; |
548 | } |
549 | |
550 | gic_poke_irq(d, offset: reg); |
551 | |
552 | /* |
553 | * Force read-back to guarantee that the active state has taken |
554 | * effect, and won't race with a guest-driven deactivation. |
555 | */ |
556 | if (reg == GICD_ISACTIVER) |
557 | gic_peek_irq(d, offset: reg); |
558 | return 0; |
559 | } |
560 | |
561 | static int gic_irq_get_irqchip_state(struct irq_data *d, |
562 | enum irqchip_irq_state which, bool *val) |
563 | { |
564 | if (d->hwirq >= 8192) /* PPI/SPI only */ |
565 | return -EINVAL; |
566 | |
567 | switch (which) { |
568 | case IRQCHIP_STATE_PENDING: |
569 | *val = gic_peek_irq(d, GICD_ISPENDR); |
570 | break; |
571 | |
572 | case IRQCHIP_STATE_ACTIVE: |
573 | *val = gic_peek_irq(d, GICD_ISACTIVER); |
574 | break; |
575 | |
576 | case IRQCHIP_STATE_MASKED: |
577 | *val = !gic_peek_irq(d, GICD_ISENABLER); |
578 | break; |
579 | |
580 | default: |
581 | return -EINVAL; |
582 | } |
583 | |
584 | return 0; |
585 | } |
586 | |
587 | static void gic_irq_set_prio(struct irq_data *d, u8 prio) |
588 | { |
589 | void __iomem *base = gic_dist_base(d); |
590 | u32 offset, index; |
591 | |
592 | offset = convert_offset_index(d, GICD_IPRIORITYR, index: &index); |
593 | |
594 | writeb_relaxed(prio, base + offset + index); |
595 | } |
596 | |
597 | static u32 __gic_get_ppi_index(irq_hw_number_t hwirq) |
598 | { |
599 | switch (__get_intid_range(hwirq)) { |
600 | case PPI_RANGE: |
601 | return hwirq - 16; |
602 | case EPPI_RANGE: |
603 | return hwirq - EPPI_BASE_INTID + 16; |
604 | default: |
605 | unreachable(); |
606 | } |
607 | } |
608 | |
609 | static u32 __gic_get_rdist_index(irq_hw_number_t hwirq) |
610 | { |
611 | switch (__get_intid_range(hwirq)) { |
612 | case SGI_RANGE: |
613 | case PPI_RANGE: |
614 | return hwirq; |
615 | case EPPI_RANGE: |
616 | return hwirq - EPPI_BASE_INTID + 32; |
617 | default: |
618 | unreachable(); |
619 | } |
620 | } |
621 | |
622 | static u32 gic_get_rdist_index(struct irq_data *d) |
623 | { |
624 | return __gic_get_rdist_index(hwirq: d->hwirq); |
625 | } |
626 | |
627 | static int gic_irq_nmi_setup(struct irq_data *d) |
628 | { |
629 | struct irq_desc *desc = irq_to_desc(irq: d->irq); |
630 | |
631 | if (!gic_supports_nmi()) |
632 | return -EINVAL; |
633 | |
634 | if (gic_peek_irq(d, GICD_ISENABLER)) { |
635 | pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq); |
636 | return -EINVAL; |
637 | } |
638 | |
639 | /* |
640 | * A secondary irq_chip should be in charge of LPI request, |
641 | * it should not be possible to get there |
642 | */ |
643 | if (WARN_ON(irqd_to_hwirq(d) >= 8192)) |
644 | return -EINVAL; |
645 | |
646 | /* desc lock should already be held */ |
647 | if (gic_irq_in_rdist(d)) { |
648 | u32 idx = gic_get_rdist_index(d); |
649 | |
650 | /* |
651 | * Setting up a percpu interrupt as NMI, only switch handler |
652 | * for first NMI |
653 | */ |
654 | if (!refcount_inc_not_zero(r: &rdist_nmi_refs[idx])) { |
655 | refcount_set(r: &rdist_nmi_refs[idx], n: 1); |
656 | desc->handle_irq = handle_percpu_devid_fasteoi_nmi; |
657 | } |
658 | } else { |
659 | desc->handle_irq = handle_fasteoi_nmi; |
660 | } |
661 | |
662 | gic_irq_set_prio(d, prio: dist_prio_nmi); |
663 | |
664 | return 0; |
665 | } |
666 | |
667 | static void gic_irq_nmi_teardown(struct irq_data *d) |
668 | { |
669 | struct irq_desc *desc = irq_to_desc(irq: d->irq); |
670 | |
671 | if (WARN_ON(!gic_supports_nmi())) |
672 | return; |
673 | |
674 | if (gic_peek_irq(d, GICD_ISENABLER)) { |
675 | pr_err("Cannot set NMI property of enabled IRQ %u\n", d->irq); |
676 | return; |
677 | } |
678 | |
679 | /* |
680 | * A secondary irq_chip should be in charge of LPI request, |
681 | * it should not be possible to get there |
682 | */ |
683 | if (WARN_ON(irqd_to_hwirq(d) >= 8192)) |
684 | return; |
685 | |
686 | /* desc lock should already be held */ |
687 | if (gic_irq_in_rdist(d)) { |
688 | u32 idx = gic_get_rdist_index(d); |
689 | |
690 | /* Tearing down NMI, only switch handler for last NMI */ |
691 | if (refcount_dec_and_test(r: &rdist_nmi_refs[idx])) |
692 | desc->handle_irq = handle_percpu_devid_irq; |
693 | } else { |
694 | desc->handle_irq = handle_fasteoi_irq; |
695 | } |
696 | |
697 | gic_irq_set_prio(d, prio: dist_prio_irq); |
698 | } |
699 | |
700 | static bool gic_arm64_erratum_2941627_needed(struct irq_data *d) |
701 | { |
702 | enum gic_intid_range range; |
703 | |
704 | if (!static_branch_unlikely(&gic_arm64_2941627_erratum)) |
705 | return false; |
706 | |
707 | range = get_intid_range(d); |
708 | |
709 | /* |
710 | * The workaround is needed if the IRQ is an SPI and |
711 | * the target cpu is different from the one we are |
712 | * executing on. |
713 | */ |
714 | return (range == SPI_RANGE || range == ESPI_RANGE) && |
715 | !cpumask_test_cpu(raw_smp_processor_id(), |
716 | cpumask: irq_data_get_effective_affinity_mask(d)); |
717 | } |
718 | |
719 | static void gic_eoi_irq(struct irq_data *d) |
720 | { |
721 | write_gicreg(irqd_to_hwirq(d), ICC_EOIR1_EL1); |
722 | isb(); |
723 | |
724 | if (gic_arm64_erratum_2941627_needed(d)) { |
725 | /* |
726 | * Make sure the GIC stream deactivate packet |
727 | * issued by ICC_EOIR1_EL1 has completed before |
728 | * deactivating through GICD_IACTIVER. |
729 | */ |
730 | dsb(sy); |
731 | gic_poke_irq(d, GICD_ICACTIVER); |
732 | } |
733 | } |
734 | |
735 | static void gic_eoimode1_eoi_irq(struct irq_data *d) |
736 | { |
737 | /* |
738 | * No need to deactivate an LPI, or an interrupt that |
739 | * is is getting forwarded to a vcpu. |
740 | */ |
741 | if (irqd_to_hwirq(d) >= 8192 || irqd_is_forwarded_to_vcpu(d)) |
742 | return; |
743 | |
744 | if (!gic_arm64_erratum_2941627_needed(d)) |
745 | gic_write_dir(irqd_to_hwirq(d)); |
746 | else |
747 | gic_poke_irq(d, GICD_ICACTIVER); |
748 | } |
749 | |
750 | static int gic_set_type(struct irq_data *d, unsigned int type) |
751 | { |
752 | irq_hw_number_t irq = irqd_to_hwirq(d); |
753 | enum gic_intid_range range; |
754 | void __iomem *base; |
755 | u32 offset, index; |
756 | int ret; |
757 | |
758 | range = get_intid_range(d); |
759 | |
760 | /* Interrupt configuration for SGIs can't be changed */ |
761 | if (range == SGI_RANGE) |
762 | return type != IRQ_TYPE_EDGE_RISING ? -EINVAL : 0; |
763 | |
764 | /* SPIs have restrictions on the supported types */ |
765 | if ((range == SPI_RANGE || range == ESPI_RANGE) && |
766 | type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) |
767 | return -EINVAL; |
768 | |
769 | if (gic_irq_in_rdist(d)) |
770 | base = gic_data_rdist_sgi_base(); |
771 | else |
772 | base = gic_dist_base_alias(d); |
773 | |
774 | offset = convert_offset_index(d, GICD_ICFGR, index: &index); |
775 | |
776 | ret = gic_configure_irq(irq: index, type, base: base + offset); |
777 | if (ret && (range == PPI_RANGE || range == EPPI_RANGE)) { |
778 | /* Misconfigured PPIs are usually not fatal */ |
779 | pr_warn("GIC: PPI INTID%ld is secure or misconfigured\n", irq); |
780 | ret = 0; |
781 | } |
782 | |
783 | return ret; |
784 | } |
785 | |
786 | static int gic_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu) |
787 | { |
788 | if (get_intid_range(d) == SGI_RANGE) |
789 | return -EINVAL; |
790 | |
791 | if (vcpu) |
792 | irqd_set_forwarded_to_vcpu(d); |
793 | else |
794 | irqd_clr_forwarded_to_vcpu(d); |
795 | return 0; |
796 | } |
797 | |
798 | static u64 gic_cpu_to_affinity(int cpu) |
799 | { |
800 | u64 mpidr = cpu_logical_map(cpu); |
801 | u64 aff; |
802 | |
803 | /* ASR8601 needs to have its affinities shifted down... */ |
804 | if (unlikely(gic_data.flags & FLAGS_WORKAROUND_ASR_ERRATUM_8601001)) |
805 | mpidr = (MPIDR_AFFINITY_LEVEL(mpidr, 1) | |
806 | (MPIDR_AFFINITY_LEVEL(mpidr, 2) << 8)); |
807 | |
808 | aff = ((u64)MPIDR_AFFINITY_LEVEL(mpidr, 3) << 32 | |
809 | MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | |
810 | MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | |
811 | MPIDR_AFFINITY_LEVEL(mpidr, 0)); |
812 | |
813 | return aff; |
814 | } |
815 | |
816 | static void gic_deactivate_unhandled(u32 irqnr) |
817 | { |
818 | if (static_branch_likely(&supports_deactivate_key)) { |
819 | if (irqnr < 8192) |
820 | gic_write_dir(irqnr); |
821 | } else { |
822 | write_gicreg(irqnr, ICC_EOIR1_EL1); |
823 | isb(); |
824 | } |
825 | } |
826 | |
827 | /* |
828 | * Follow a read of the IAR with any HW maintenance that needs to happen prior |
829 | * to invoking the relevant IRQ handler. We must do two things: |
830 | * |
831 | * (1) Ensure instruction ordering between a read of IAR and subsequent |
832 | * instructions in the IRQ handler using an ISB. |
833 | * |
834 | * It is possible for the IAR to report an IRQ which was signalled *after* |
835 | * the CPU took an IRQ exception as multiple interrupts can race to be |
836 | * recognized by the GIC, earlier interrupts could be withdrawn, and/or |
837 | * later interrupts could be prioritized by the GIC. |
838 | * |
839 | * For devices which are tightly coupled to the CPU, such as PMUs, a |
840 | * context synchronization event is necessary to ensure that system |
841 | * register state is not stale, as these may have been indirectly written |
842 | * *after* exception entry. |
843 | * |
844 | * (2) Execute an interrupt priority drop when EOI mode 1 is in use. |
845 | */ |
846 | static inline void gic_complete_ack(u32 irqnr) |
847 | { |
848 | if (static_branch_likely(&supports_deactivate_key)) |
849 | write_gicreg(irqnr, ICC_EOIR1_EL1); |
850 | |
851 | isb(); |
852 | } |
853 | |
854 | static bool gic_rpr_is_nmi_prio(void) |
855 | { |
856 | if (!gic_supports_nmi()) |
857 | return false; |
858 | |
859 | return unlikely(gic_read_rpr() == GICV3_PRIO_NMI); |
860 | } |
861 | |
862 | static bool gic_irqnr_is_special(u32 irqnr) |
863 | { |
864 | return irqnr >= 1020 && irqnr <= 1023; |
865 | } |
866 | |
867 | static void __gic_handle_irq(u32 irqnr, struct pt_regs *regs) |
868 | { |
869 | if (gic_irqnr_is_special(irqnr)) |
870 | return; |
871 | |
872 | gic_complete_ack(irqnr); |
873 | |
874 | if (generic_handle_domain_irq(domain: gic_data.domain, hwirq: irqnr)) { |
875 | WARN_ONCE(true, "Unexpected interrupt (irqnr %u)\n", irqnr); |
876 | gic_deactivate_unhandled(irqnr); |
877 | } |
878 | } |
879 | |
880 | static void __gic_handle_nmi(u32 irqnr, struct pt_regs *regs) |
881 | { |
882 | if (gic_irqnr_is_special(irqnr)) |
883 | return; |
884 | |
885 | gic_complete_ack(irqnr); |
886 | |
887 | if (generic_handle_domain_nmi(domain: gic_data.domain, hwirq: irqnr)) { |
888 | WARN_ONCE(true, "Unexpected pseudo-NMI (irqnr %u)\n", irqnr); |
889 | gic_deactivate_unhandled(irqnr); |
890 | } |
891 | } |
892 | |
893 | /* |
894 | * An exception has been taken from a context with IRQs enabled, and this could |
895 | * be an IRQ or an NMI. |
896 | * |
897 | * The entry code called us with DAIF.IF set to keep NMIs masked. We must clear |
898 | * DAIF.IF (and update ICC_PMR_EL1 to mask regular IRQs) prior to returning, |
899 | * after handling any NMI but before handling any IRQ. |
900 | * |
901 | * The entry code has performed IRQ entry, and if an NMI is detected we must |
902 | * perform NMI entry/exit around invoking the handler. |
903 | */ |
904 | static void __gic_handle_irq_from_irqson(struct pt_regs *regs) |
905 | { |
906 | bool is_nmi; |
907 | u32 irqnr; |
908 | |
909 | irqnr = gic_read_iar(); |
910 | |
911 | is_nmi = gic_rpr_is_nmi_prio(); |
912 | |
913 | if (is_nmi) { |
914 | nmi_enter(); |
915 | __gic_handle_nmi(irqnr, regs); |
916 | nmi_exit(); |
917 | } |
918 | |
919 | if (gic_prio_masking_enabled()) { |
920 | gic_pmr_mask_irqs(); |
921 | gic_arch_enable_irqs(); |
922 | } |
923 | |
924 | if (!is_nmi) |
925 | __gic_handle_irq(irqnr, regs); |
926 | } |
927 | |
928 | /* |
929 | * An exception has been taken from a context with IRQs disabled, which can only |
930 | * be an NMI. |
931 | * |
932 | * The entry code called us with DAIF.IF set to keep NMIs masked. We must leave |
933 | * DAIF.IF (and ICC_PMR_EL1) unchanged. |
934 | * |
935 | * The entry code has performed NMI entry. |
936 | */ |
937 | static void __gic_handle_irq_from_irqsoff(struct pt_regs *regs) |
938 | { |
939 | u64 pmr; |
940 | u32 irqnr; |
941 | |
942 | /* |
943 | * We were in a context with IRQs disabled. However, the |
944 | * entry code has set PMR to a value that allows any |
945 | * interrupt to be acknowledged, and not just NMIs. This can |
946 | * lead to surprising effects if the NMI has been retired in |
947 | * the meantime, and that there is an IRQ pending. The IRQ |
948 | * would then be taken in NMI context, something that nobody |
949 | * wants to debug twice. |
950 | * |
951 | * Until we sort this, drop PMR again to a level that will |
952 | * actually only allow NMIs before reading IAR, and then |
953 | * restore it to what it was. |
954 | */ |
955 | pmr = gic_read_pmr(); |
956 | gic_pmr_mask_irqs(); |
957 | isb(); |
958 | irqnr = gic_read_iar(); |
959 | gic_write_pmr(pmr); |
960 | |
961 | __gic_handle_nmi(irqnr, regs); |
962 | } |
963 | |
964 | static void __exception_irq_entry gic_handle_irq(struct pt_regs *regs) |
965 | { |
966 | if (unlikely(gic_supports_nmi() && !interrupts_enabled(regs))) |
967 | __gic_handle_irq_from_irqsoff(regs); |
968 | else |
969 | __gic_handle_irq_from_irqson(regs); |
970 | } |
971 | |
972 | static void __init gic_dist_init(void) |
973 | { |
974 | unsigned int i; |
975 | u64 affinity; |
976 | void __iomem *base = gic_data.dist_base; |
977 | u32 val; |
978 | |
979 | /* Disable the distributor */ |
980 | writel_relaxed(0, base + GICD_CTLR); |
981 | gic_dist_wait_for_rwp(); |
982 | |
983 | /* |
984 | * Configure SPIs as non-secure Group-1. This will only matter |
985 | * if the GIC only has a single security state. This will not |
986 | * do the right thing if the kernel is running in secure mode, |
987 | * but that's not the intended use case anyway. |
988 | */ |
989 | for (i = 32; i < GIC_LINE_NR; i += 32) |
990 | writel_relaxed(~0, base + GICD_IGROUPR + i / 8); |
991 | |
992 | /* Extended SPI range, not handled by the GICv2/GICv3 common code */ |
993 | for (i = 0; i < GIC_ESPI_NR; i += 32) { |
994 | writel_relaxed(~0U, base + GICD_ICENABLERnE + i / 8); |
995 | writel_relaxed(~0U, base + GICD_ICACTIVERnE + i / 8); |
996 | } |
997 | |
998 | for (i = 0; i < GIC_ESPI_NR; i += 32) |
999 | writel_relaxed(~0U, base + GICD_IGROUPRnE + i / 8); |
1000 | |
1001 | for (i = 0; i < GIC_ESPI_NR; i += 16) |
1002 | writel_relaxed(0, base + GICD_ICFGRnE + i / 4); |
1003 | |
1004 | for (i = 0; i < GIC_ESPI_NR; i += 4) |
1005 | writel_relaxed(REPEAT_BYTE_U32(dist_prio_irq), |
1006 | base + GICD_IPRIORITYRnE + i); |
1007 | |
1008 | /* Now do the common stuff */ |
1009 | gic_dist_config(base, GIC_LINE_NR, priority: dist_prio_irq); |
1010 | |
1011 | val = GICD_CTLR_ARE_NS | GICD_CTLR_ENABLE_G1A | GICD_CTLR_ENABLE_G1; |
1012 | if (gic_data.rdists.gicd_typer2 & GICD_TYPER2_nASSGIcap) { |
1013 | pr_info("Enabling SGIs without active state\n"); |
1014 | val |= GICD_CTLR_nASSGIreq; |
1015 | } |
1016 | |
1017 | /* Enable distributor with ARE, Group1, and wait for it to drain */ |
1018 | writel_relaxed(val, base + GICD_CTLR); |
1019 | gic_dist_wait_for_rwp(); |
1020 | |
1021 | /* |
1022 | * Set all global interrupts to the boot CPU only. ARE must be |
1023 | * enabled. |
1024 | */ |
1025 | affinity = gic_cpu_to_affinity(smp_processor_id()); |
1026 | for (i = 32; i < GIC_LINE_NR; i++) |
1027 | gic_write_irouter(affinity, base + GICD_IROUTER + i * 8); |
1028 | |
1029 | for (i = 0; i < GIC_ESPI_NR; i++) |
1030 | gic_write_irouter(affinity, base + GICD_IROUTERnE + i * 8); |
1031 | } |
1032 | |
1033 | static int gic_iterate_rdists(int (*fn)(struct redist_region *, void __iomem *)) |
1034 | { |
1035 | int ret = -ENODEV; |
1036 | int i; |
1037 | |
1038 | for (i = 0; i < gic_data.nr_redist_regions; i++) { |
1039 | void __iomem *ptr = gic_data.redist_regions[i].redist_base; |
1040 | u64 typer; |
1041 | u32 reg; |
1042 | |
1043 | reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK; |
1044 | if (reg != GIC_PIDR2_ARCH_GICv3 && |
1045 | reg != GIC_PIDR2_ARCH_GICv4) { /* We're in trouble... */ |
1046 | pr_warn("No redistributor present @%p\n", ptr); |
1047 | break; |
1048 | } |
1049 | |
1050 | do { |
1051 | typer = gic_read_typer(ptr + GICR_TYPER); |
1052 | ret = fn(gic_data.redist_regions + i, ptr); |
1053 | if (!ret) |
1054 | return 0; |
1055 | |
1056 | if (gic_data.redist_regions[i].single_redist) |
1057 | break; |
1058 | |
1059 | if (gic_data.redist_stride) { |
1060 | ptr += gic_data.redist_stride; |
1061 | } else { |
1062 | ptr += SZ_64K * 2; /* Skip RD_base + SGI_base */ |
1063 | if (typer & GICR_TYPER_VLPIS) |
1064 | ptr += SZ_64K * 2; /* Skip VLPI_base + reserved page */ |
1065 | } |
1066 | } while (!(typer & GICR_TYPER_LAST)); |
1067 | } |
1068 | |
1069 | return ret ? -ENODEV : 0; |
1070 | } |
1071 | |
1072 | static int __gic_populate_rdist(struct redist_region *region, void __iomem *ptr) |
1073 | { |
1074 | unsigned long mpidr; |
1075 | u64 typer; |
1076 | u32 aff; |
1077 | |
1078 | /* |
1079 | * Convert affinity to a 32bit value that can be matched to |
1080 | * GICR_TYPER bits [63:32]. |
1081 | */ |
1082 | mpidr = gic_cpu_to_affinity(smp_processor_id()); |
1083 | |
1084 | aff = (MPIDR_AFFINITY_LEVEL(mpidr, 3) << 24 | |
1085 | MPIDR_AFFINITY_LEVEL(mpidr, 2) << 16 | |
1086 | MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | |
1087 | MPIDR_AFFINITY_LEVEL(mpidr, 0)); |
1088 | |
1089 | typer = gic_read_typer(ptr + GICR_TYPER); |
1090 | if ((typer >> 32) == aff) { |
1091 | u64 offset = ptr - region->redist_base; |
1092 | raw_spin_lock_init(&gic_data_rdist()->rd_lock); |
1093 | gic_data_rdist_rd_base() = ptr; |
1094 | gic_data_rdist()->phys_base = region->phys_base + offset; |
1095 | |
1096 | pr_info("CPU%d: found redistributor %lx region %d:%pa\n", |
1097 | smp_processor_id(), mpidr, |
1098 | (int)(region - gic_data.redist_regions), |
1099 | &gic_data_rdist()->phys_base); |
1100 | return 0; |
1101 | } |
1102 | |
1103 | /* Try next one */ |
1104 | return 1; |
1105 | } |
1106 | |
1107 | static int gic_populate_rdist(void) |
1108 | { |
1109 | if (gic_iterate_rdists(fn: __gic_populate_rdist) == 0) |
1110 | return 0; |
1111 | |
1112 | /* We couldn't even deal with ourselves... */ |
1113 | WARN(true, "CPU%d: mpidr %lx has no re-distributor!\n", |
1114 | smp_processor_id(), |
1115 | (unsigned long)cpu_logical_map(smp_processor_id())); |
1116 | return -ENODEV; |
1117 | } |
1118 | |
1119 | static int __gic_update_rdist_properties(struct redist_region *region, |
1120 | void __iomem *ptr) |
1121 | { |
1122 | u64 typer = gic_read_typer(ptr + GICR_TYPER); |
1123 | u32 ctlr = readl_relaxed(ptr + GICR_CTLR); |
1124 | |
1125 | /* Boot-time cleanup */ |
1126 | if ((typer & GICR_TYPER_VLPIS) && (typer & GICR_TYPER_RVPEID)) { |
1127 | u64 val; |
1128 | |
1129 | /* Deactivate any present vPE */ |
1130 | val = gicr_read_vpendbaser(ptr + SZ_128K + GICR_VPENDBASER); |
1131 | if (val & GICR_VPENDBASER_Valid) |
1132 | gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast, |
1133 | ptr + SZ_128K + GICR_VPENDBASER); |
1134 | |
1135 | /* Mark the VPE table as invalid */ |
1136 | val = gicr_read_vpropbaser(ptr + SZ_128K + GICR_VPROPBASER); |
1137 | val &= ~GICR_VPROPBASER_4_1_VALID; |
1138 | gicr_write_vpropbaser(val, ptr + SZ_128K + GICR_VPROPBASER); |
1139 | } |
1140 | |
1141 | gic_data.rdists.has_vlpis &= !!(typer & GICR_TYPER_VLPIS); |
1142 | |
1143 | /* |
1144 | * TYPER.RVPEID implies some form of DirectLPI, no matter what the |
1145 | * doc says... :-/ And CTLR.IR implies another subset of DirectLPI |
1146 | * that the ITS driver can make use of for LPIs (and not VLPIs). |
1147 | * |
1148 | * These are 3 different ways to express the same thing, depending |
1149 | * on the revision of the architecture and its relaxations over |
1150 | * time. Just group them under the 'direct_lpi' banner. |
1151 | */ |
1152 | gic_data.rdists.has_rvpeid &= !!(typer & GICR_TYPER_RVPEID); |
1153 | gic_data.rdists.has_direct_lpi &= (!!(typer & GICR_TYPER_DirectLPIS) | |
1154 | !!(ctlr & GICR_CTLR_IR) | |
1155 | gic_data.rdists.has_rvpeid); |
1156 | gic_data.rdists.has_vpend_valid_dirty &= !!(typer & GICR_TYPER_DIRTY); |
1157 | |
1158 | /* Detect non-sensical configurations */ |
1159 | if (WARN_ON_ONCE(gic_data.rdists.has_rvpeid && !gic_data.rdists.has_vlpis)) { |
1160 | gic_data.rdists.has_direct_lpi = false; |
1161 | gic_data.rdists.has_vlpis = false; |
1162 | gic_data.rdists.has_rvpeid = false; |
1163 | } |
1164 | |
1165 | gic_data.ppi_nr = min(GICR_TYPER_NR_PPIS(typer), gic_data.ppi_nr); |
1166 | |
1167 | return 1; |
1168 | } |
1169 | |
1170 | static void gic_update_rdist_properties(void) |
1171 | { |
1172 | gic_data.ppi_nr = UINT_MAX; |
1173 | gic_iterate_rdists(fn: __gic_update_rdist_properties); |
1174 | if (WARN_ON(gic_data.ppi_nr == UINT_MAX)) |
1175 | gic_data.ppi_nr = 0; |
1176 | pr_info("GICv3 features: %d PPIs%s%s\n", |
1177 | gic_data.ppi_nr, |
1178 | gic_data.has_rss ? ", RSS": "", |
1179 | gic_data.rdists.has_direct_lpi ? ", DirectLPI": ""); |
1180 | |
1181 | if (gic_data.rdists.has_vlpis) |
1182 | pr_info("GICv4 features: %s%s%s\n", |
1183 | gic_data.rdists.has_direct_lpi ? "DirectLPI ": "", |
1184 | gic_data.rdists.has_rvpeid ? "RVPEID ": "", |
1185 | gic_data.rdists.has_vpend_valid_dirty ? "Valid+Dirty ": ""); |
1186 | } |
1187 | |
1188 | static void gic_cpu_sys_reg_enable(void) |
1189 | { |
1190 | /* |
1191 | * Need to check that the SRE bit has actually been set. If |
1192 | * not, it means that SRE is disabled at EL2. We're going to |
1193 | * die painfully, and there is nothing we can do about it. |
1194 | * |
1195 | * Kindly inform the luser. |
1196 | */ |
1197 | if (!gic_enable_sre()) |
1198 | pr_err("GIC: unable to set SRE (disabled at EL2), panic ahead\n"); |
1199 | |
1200 | } |
1201 | |
1202 | static void gic_cpu_sys_reg_init(void) |
1203 | { |
1204 | int i, cpu = smp_processor_id(); |
1205 | u64 mpidr = gic_cpu_to_affinity(cpu); |
1206 | u64 need_rss = MPIDR_RS(mpidr); |
1207 | bool group0; |
1208 | u32 pribits; |
1209 | |
1210 | pribits = gic_get_pribits(); |
1211 | |
1212 | group0 = gic_has_group0(); |
1213 | |
1214 | /* Set priority mask register */ |
1215 | if (!gic_prio_masking_enabled()) { |
1216 | write_gicreg(DEFAULT_PMR_VALUE, ICC_PMR_EL1); |
1217 | } else if (gic_supports_nmi()) { |
1218 | /* |
1219 | * Check that all CPUs use the same priority space. |
1220 | * |
1221 | * If there's a mismatch with the boot CPU, the system is |
1222 | * likely to die as interrupt masking will not work properly on |
1223 | * all CPUs. |
1224 | */ |
1225 | WARN_ON(group0 != cpus_have_group0); |
1226 | WARN_ON(gic_dist_security_disabled() != cpus_have_security_disabled); |
1227 | } |
1228 | |
1229 | /* |
1230 | * Some firmwares hand over to the kernel with the BPR changed from |
1231 | * its reset value (and with a value large enough to prevent |
1232 | * any pre-emptive interrupts from working at all). Writing a zero |
1233 | * to BPR restores is reset value. |
1234 | */ |
1235 | gic_write_bpr1(0); |
1236 | |
1237 | if (static_branch_likely(&supports_deactivate_key)) { |
1238 | /* EOI drops priority only (mode 1) */ |
1239 | gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop); |
1240 | } else { |
1241 | /* EOI deactivates interrupt too (mode 0) */ |
1242 | gic_write_ctlr(ICC_CTLR_EL1_EOImode_drop_dir); |
1243 | } |
1244 | |
1245 | /* Always whack Group0 before Group1 */ |
1246 | if (group0) { |
1247 | switch(pribits) { |
1248 | case 8: |
1249 | case 7: |
1250 | write_gicreg(0, ICC_AP0R3_EL1); |
1251 | write_gicreg(0, ICC_AP0R2_EL1); |
1252 | fallthrough; |
1253 | case 6: |
1254 | write_gicreg(0, ICC_AP0R1_EL1); |
1255 | fallthrough; |
1256 | case 5: |
1257 | case 4: |
1258 | write_gicreg(0, ICC_AP0R0_EL1); |
1259 | } |
1260 | |
1261 | isb(); |
1262 | } |
1263 | |
1264 | switch(pribits) { |
1265 | case 8: |
1266 | case 7: |
1267 | write_gicreg(0, ICC_AP1R3_EL1); |
1268 | write_gicreg(0, ICC_AP1R2_EL1); |
1269 | fallthrough; |
1270 | case 6: |
1271 | write_gicreg(0, ICC_AP1R1_EL1); |
1272 | fallthrough; |
1273 | case 5: |
1274 | case 4: |
1275 | write_gicreg(0, ICC_AP1R0_EL1); |
1276 | } |
1277 | |
1278 | isb(); |
1279 | |
1280 | /* ... and let's hit the road... */ |
1281 | gic_write_grpen1(1); |
1282 | |
1283 | /* Keep the RSS capability status in per_cpu variable */ |
1284 | per_cpu(has_rss, cpu) = !!(gic_read_ctlr() & ICC_CTLR_EL1_RSS); |
1285 | |
1286 | /* Check all the CPUs have capable of sending SGIs to other CPUs */ |
1287 | for_each_online_cpu(i) { |
1288 | bool have_rss = per_cpu(has_rss, i) && per_cpu(has_rss, cpu); |
1289 | |
1290 | need_rss |= MPIDR_RS(gic_cpu_to_affinity(i)); |
1291 | if (need_rss && (!have_rss)) |
1292 | pr_crit("CPU%d (%lx) can't SGI CPU%d (%lx), no RSS\n", |
1293 | cpu, (unsigned long)mpidr, |
1294 | i, (unsigned long)gic_cpu_to_affinity(i)); |
1295 | } |
1296 | |
1297 | /** |
1298 | * GIC spec says, when ICC_CTLR_EL1.RSS==1 and GICD_TYPER.RSS==0, |
1299 | * writing ICC_ASGI1R_EL1 register with RS != 0 is a CONSTRAINED |
1300 | * UNPREDICTABLE choice of : |
1301 | * - The write is ignored. |
1302 | * - The RS field is treated as 0. |
1303 | */ |
1304 | if (need_rss && (!gic_data.has_rss)) |
1305 | pr_crit_once("RSS is required but GICD doesn't support it\n"); |
1306 | } |
1307 | |
1308 | static bool gicv3_nolpi; |
1309 | |
1310 | static int __init gicv3_nolpi_cfg(char *buf) |
1311 | { |
1312 | return kstrtobool(s: buf, res: &gicv3_nolpi); |
1313 | } |
1314 | early_param("irqchip.gicv3_nolpi", gicv3_nolpi_cfg); |
1315 | |
1316 | static int gic_dist_supports_lpis(void) |
1317 | { |
1318 | return (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && |
1319 | !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS) && |
1320 | !gicv3_nolpi); |
1321 | } |
1322 | |
1323 | static void gic_cpu_init(void) |
1324 | { |
1325 | void __iomem *rbase; |
1326 | int i; |
1327 | |
1328 | /* Register ourselves with the rest of the world */ |
1329 | if (gic_populate_rdist()) |
1330 | return; |
1331 | |
1332 | gic_enable_redist(enable: true); |
1333 | |
1334 | WARN((gic_data.ppi_nr > 16 || GIC_ESPI_NR != 0) && |
1335 | !(gic_read_ctlr() & ICC_CTLR_EL1_ExtRange), |
1336 | "Distributor has extended ranges, but CPU%d doesn't\n", |
1337 | smp_processor_id()); |
1338 | |
1339 | rbase = gic_data_rdist_sgi_base(); |
1340 | |
1341 | /* Configure SGIs/PPIs as non-secure Group-1 */ |
1342 | for (i = 0; i < gic_data.ppi_nr + SGI_NR; i += 32) |
1343 | writel_relaxed(~0, rbase + GICR_IGROUPR0 + i / 8); |
1344 | |
1345 | gic_cpu_config(base: rbase, nr: gic_data.ppi_nr + SGI_NR, priority: dist_prio_irq); |
1346 | gic_redist_wait_for_rwp(); |
1347 | |
1348 | /* initialise system registers */ |
1349 | gic_cpu_sys_reg_init(); |
1350 | } |
1351 | |
1352 | #ifdef CONFIG_SMP |
1353 | |
1354 | #define MPIDR_TO_SGI_RS(mpidr) (MPIDR_RS(mpidr) << ICC_SGI1R_RS_SHIFT) |
1355 | #define MPIDR_TO_SGI_CLUSTER_ID(mpidr) ((mpidr) & ~0xFUL) |
1356 | |
1357 | /* |
1358 | * gic_starting_cpu() is called after the last point where cpuhp is allowed |
1359 | * to fail. So pre check for problems earlier. |
1360 | */ |
1361 | static int gic_check_rdist(unsigned int cpu) |
1362 | { |
1363 | if (cpumask_test_cpu(cpu, cpumask: &broken_rdists)) |
1364 | return -EINVAL; |
1365 | |
1366 | return 0; |
1367 | } |
1368 | |
1369 | static int gic_starting_cpu(unsigned int cpu) |
1370 | { |
1371 | gic_cpu_sys_reg_enable(); |
1372 | gic_cpu_init(); |
1373 | |
1374 | if (gic_dist_supports_lpis()) |
1375 | its_cpu_init(); |
1376 | |
1377 | return 0; |
1378 | } |
1379 | |
1380 | static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, |
1381 | unsigned long cluster_id) |
1382 | { |
1383 | int next_cpu, cpu = *base_cpu; |
1384 | unsigned long mpidr; |
1385 | u16 tlist = 0; |
1386 | |
1387 | mpidr = gic_cpu_to_affinity(cpu); |
1388 | |
1389 | while (cpu < nr_cpu_ids) { |
1390 | tlist |= 1 << (mpidr & 0xf); |
1391 | |
1392 | next_cpu = cpumask_next(n: cpu, srcp: mask); |
1393 | if (next_cpu >= nr_cpu_ids) |
1394 | goto out; |
1395 | cpu = next_cpu; |
1396 | |
1397 | mpidr = gic_cpu_to_affinity(cpu); |
1398 | |
1399 | if (cluster_id != MPIDR_TO_SGI_CLUSTER_ID(mpidr)) { |
1400 | cpu--; |
1401 | goto out; |
1402 | } |
1403 | } |
1404 | out: |
1405 | *base_cpu = cpu; |
1406 | return tlist; |
1407 | } |
1408 | |
1409 | #define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \ |
1410 | (MPIDR_AFFINITY_LEVEL(cluster_id, level) \ |
1411 | << ICC_SGI1R_AFFINITY_## level ##_SHIFT) |
1412 | |
1413 | static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq) |
1414 | { |
1415 | u64 val; |
1416 | |
1417 | val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) | |
1418 | MPIDR_TO_SGI_AFFINITY(cluster_id, 2) | |
1419 | irq << ICC_SGI1R_SGI_ID_SHIFT | |
1420 | MPIDR_TO_SGI_AFFINITY(cluster_id, 1) | |
1421 | MPIDR_TO_SGI_RS(cluster_id) | |
1422 | tlist << ICC_SGI1R_TARGET_LIST_SHIFT); |
1423 | |
1424 | pr_devel("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); |
1425 | gic_write_sgi1r(val); |
1426 | } |
1427 | |
1428 | static void gic_ipi_send_mask(struct irq_data *d, const struct cpumask *mask) |
1429 | { |
1430 | int cpu; |
1431 | |
1432 | if (WARN_ON(d->hwirq >= 16)) |
1433 | return; |
1434 | |
1435 | /* |
1436 | * Ensure that stores to Normal memory are visible to the |
1437 | * other CPUs before issuing the IPI. |
1438 | */ |
1439 | dsb(ishst); |
1440 | |
1441 | for_each_cpu(cpu, mask) { |
1442 | u64 cluster_id = MPIDR_TO_SGI_CLUSTER_ID(gic_cpu_to_affinity(cpu)); |
1443 | u16 tlist; |
1444 | |
1445 | tlist = gic_compute_target_list(base_cpu: &cpu, mask, cluster_id); |
1446 | gic_send_sgi(cluster_id, tlist, irq: d->hwirq); |
1447 | } |
1448 | |
1449 | /* Force the above writes to ICC_SGI1R_EL1 to be executed */ |
1450 | isb(); |
1451 | } |
1452 | |
1453 | static void __init gic_smp_init(void) |
1454 | { |
1455 | struct irq_fwspec sgi_fwspec = { |
1456 | .fwnode = gic_data.fwnode, |
1457 | .param_count = 1, |
1458 | }; |
1459 | int base_sgi; |
1460 | |
1461 | cpuhp_setup_state_nocalls(state: CPUHP_BP_PREPARE_DYN, |
1462 | name: "irqchip/arm/gicv3:checkrdist", |
1463 | startup: gic_check_rdist, NULL); |
1464 | |
1465 | cpuhp_setup_state_nocalls(state: CPUHP_AP_IRQ_GIC_STARTING, |
1466 | name: "irqchip/arm/gicv3:starting", |
1467 | startup: gic_starting_cpu, NULL); |
1468 | |
1469 | /* Register all 8 non-secure SGIs */ |
1470 | base_sgi = irq_domain_alloc_irqs(domain: gic_data.domain, nr_irqs: 8, NUMA_NO_NODE, arg: &sgi_fwspec); |
1471 | if (WARN_ON(base_sgi <= 0)) |
1472 | return; |
1473 | |
1474 | set_smp_ipi_range(base_sgi, 8); |
1475 | } |
1476 | |
1477 | static int gic_set_affinity(struct irq_data *d, const struct cpumask *mask_val, |
1478 | bool force) |
1479 | { |
1480 | unsigned int cpu; |
1481 | u32 offset, index; |
1482 | void __iomem *reg; |
1483 | int enabled; |
1484 | u64 val; |
1485 | |
1486 | if (force) |
1487 | cpu = cpumask_first(srcp: mask_val); |
1488 | else |
1489 | cpu = cpumask_any_and(mask_val, cpu_online_mask); |
1490 | |
1491 | if (cpu >= nr_cpu_ids) |
1492 | return -EINVAL; |
1493 | |
1494 | if (gic_irq_in_rdist(d)) |
1495 | return -EINVAL; |
1496 | |
1497 | /* If interrupt was enabled, disable it first */ |
1498 | enabled = gic_peek_irq(d, GICD_ISENABLER); |
1499 | if (enabled) |
1500 | gic_mask_irq(d); |
1501 | |
1502 | offset = convert_offset_index(d, GICD_IROUTER, index: &index); |
1503 | reg = gic_dist_base(d) + offset + (index * 8); |
1504 | val = gic_cpu_to_affinity(cpu); |
1505 | |
1506 | gic_write_irouter(val, reg); |
1507 | |
1508 | /* |
1509 | * If the interrupt was enabled, enabled it again. Otherwise, |
1510 | * just wait for the distributor to have digested our changes. |
1511 | */ |
1512 | if (enabled) |
1513 | gic_unmask_irq(d); |
1514 | |
1515 | irq_data_update_effective_affinity(d, cpumask_of(cpu)); |
1516 | |
1517 | return IRQ_SET_MASK_OK_DONE; |
1518 | } |
1519 | #else |
1520 | #define gic_set_affinity NULL |
1521 | #define gic_ipi_send_mask NULL |
1522 | #define gic_smp_init() do { } while(0) |
1523 | #endif |
1524 | |
1525 | static int gic_retrigger(struct irq_data *data) |
1526 | { |
1527 | return !gic_irq_set_irqchip_state(d: data, which: IRQCHIP_STATE_PENDING, val: true); |
1528 | } |
1529 | |
1530 | #ifdef CONFIG_CPU_PM |
1531 | static int gic_cpu_pm_notifier(struct notifier_block *self, |
1532 | unsigned long cmd, void *v) |
1533 | { |
1534 | if (cmd == CPU_PM_EXIT || cmd == CPU_PM_ENTER_FAILED) { |
1535 | if (gic_dist_security_disabled()) |
1536 | gic_enable_redist(true); |
1537 | gic_cpu_sys_reg_enable(); |
1538 | gic_cpu_sys_reg_init(); |
1539 | } else if (cmd == CPU_PM_ENTER && gic_dist_security_disabled()) { |
1540 | gic_write_grpen1(0); |
1541 | gic_enable_redist(false); |
1542 | } |
1543 | return NOTIFY_OK; |
1544 | } |
1545 | |
1546 | static struct notifier_block gic_cpu_pm_notifier_block = { |
1547 | .notifier_call = gic_cpu_pm_notifier, |
1548 | }; |
1549 | |
1550 | static void gic_cpu_pm_init(void) |
1551 | { |
1552 | cpu_pm_register_notifier(&gic_cpu_pm_notifier_block); |
1553 | } |
1554 | |
1555 | #else |
1556 | static inline void gic_cpu_pm_init(void) { } |
1557 | #endif /* CONFIG_CPU_PM */ |
1558 | |
1559 | static struct irq_chip gic_chip = { |
1560 | .name = "GICv3", |
1561 | .irq_mask = gic_mask_irq, |
1562 | .irq_unmask = gic_unmask_irq, |
1563 | .irq_eoi = gic_eoi_irq, |
1564 | .irq_set_type = gic_set_type, |
1565 | .irq_set_affinity = gic_set_affinity, |
1566 | .irq_retrigger = gic_retrigger, |
1567 | .irq_get_irqchip_state = gic_irq_get_irqchip_state, |
1568 | .irq_set_irqchip_state = gic_irq_set_irqchip_state, |
1569 | .irq_nmi_setup = gic_irq_nmi_setup, |
1570 | .irq_nmi_teardown = gic_irq_nmi_teardown, |
1571 | .ipi_send_mask = gic_ipi_send_mask, |
1572 | .flags = IRQCHIP_SET_TYPE_MASKED | |
1573 | IRQCHIP_SKIP_SET_WAKE | |
1574 | IRQCHIP_MASK_ON_SUSPEND, |
1575 | }; |
1576 | |
1577 | static struct irq_chip gic_eoimode1_chip = { |
1578 | .name = "GICv3", |
1579 | .irq_mask = gic_eoimode1_mask_irq, |
1580 | .irq_unmask = gic_unmask_irq, |
1581 | .irq_eoi = gic_eoimode1_eoi_irq, |
1582 | .irq_set_type = gic_set_type, |
1583 | .irq_set_affinity = gic_set_affinity, |
1584 | .irq_retrigger = gic_retrigger, |
1585 | .irq_get_irqchip_state = gic_irq_get_irqchip_state, |
1586 | .irq_set_irqchip_state = gic_irq_set_irqchip_state, |
1587 | .irq_set_vcpu_affinity = gic_irq_set_vcpu_affinity, |
1588 | .irq_nmi_setup = gic_irq_nmi_setup, |
1589 | .irq_nmi_teardown = gic_irq_nmi_teardown, |
1590 | .ipi_send_mask = gic_ipi_send_mask, |
1591 | .flags = IRQCHIP_SET_TYPE_MASKED | |
1592 | IRQCHIP_SKIP_SET_WAKE | |
1593 | IRQCHIP_MASK_ON_SUSPEND, |
1594 | }; |
1595 | |
1596 | static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, |
1597 | irq_hw_number_t hw) |
1598 | { |
1599 | struct irq_chip *chip = &gic_chip; |
1600 | struct irq_data *irqd = irq_desc_get_irq_data(desc: irq_to_desc(irq)); |
1601 | |
1602 | if (static_branch_likely(&supports_deactivate_key)) |
1603 | chip = &gic_eoimode1_chip; |
1604 | |
1605 | switch (__get_intid_range(hwirq: hw)) { |
1606 | case SGI_RANGE: |
1607 | case PPI_RANGE: |
1608 | case EPPI_RANGE: |
1609 | irq_set_percpu_devid(irq); |
1610 | irq_domain_set_info(domain: d, virq: irq, hwirq: hw, chip, chip_data: d->host_data, |
1611 | handler: handle_percpu_devid_irq, NULL, NULL); |
1612 | break; |
1613 | |
1614 | case SPI_RANGE: |
1615 | case ESPI_RANGE: |
1616 | irq_domain_set_info(domain: d, virq: irq, hwirq: hw, chip, chip_data: d->host_data, |
1617 | handler: handle_fasteoi_irq, NULL, NULL); |
1618 | irq_set_probe(irq); |
1619 | irqd_set_single_target(d: irqd); |
1620 | break; |
1621 | |
1622 | case LPI_RANGE: |
1623 | if (!gic_dist_supports_lpis()) |
1624 | return -EPERM; |
1625 | irq_domain_set_info(domain: d, virq: irq, hwirq: hw, chip, chip_data: d->host_data, |
1626 | handler: handle_fasteoi_irq, NULL, NULL); |
1627 | break; |
1628 | |
1629 | default: |
1630 | return -EPERM; |
1631 | } |
1632 | |
1633 | /* Prevents SW retriggers which mess up the ACK/EOI ordering */ |
1634 | irqd_set_handle_enforce_irqctx(d: irqd); |
1635 | return 0; |
1636 | } |
1637 | |
1638 | static int gic_irq_domain_translate(struct irq_domain *d, |
1639 | struct irq_fwspec *fwspec, |
1640 | unsigned long *hwirq, |
1641 | unsigned int *type) |
1642 | { |
1643 | if (fwspec->param_count == 1 && fwspec->param[0] < 16) { |
1644 | *hwirq = fwspec->param[0]; |
1645 | *type = IRQ_TYPE_EDGE_RISING; |
1646 | return 0; |
1647 | } |
1648 | |
1649 | if (is_of_node(fwnode: fwspec->fwnode)) { |
1650 | if (fwspec->param_count < 3) |
1651 | return -EINVAL; |
1652 | |
1653 | switch (fwspec->param[0]) { |
1654 | case 0: /* SPI */ |
1655 | *hwirq = fwspec->param[1] + 32; |
1656 | break; |
1657 | case 1: /* PPI */ |
1658 | *hwirq = fwspec->param[1] + 16; |
1659 | break; |
1660 | case 2: /* ESPI */ |
1661 | *hwirq = fwspec->param[1] + ESPI_BASE_INTID; |
1662 | break; |
1663 | case 3: /* EPPI */ |
1664 | *hwirq = fwspec->param[1] + EPPI_BASE_INTID; |
1665 | break; |
1666 | case GIC_IRQ_TYPE_LPI: /* LPI */ |
1667 | *hwirq = fwspec->param[1]; |
1668 | break; |
1669 | case GIC_IRQ_TYPE_PARTITION: |
1670 | *hwirq = fwspec->param[1]; |
1671 | if (fwspec->param[1] >= 16) |
1672 | *hwirq += EPPI_BASE_INTID - 16; |
1673 | else |
1674 | *hwirq += 16; |
1675 | break; |
1676 | default: |
1677 | return -EINVAL; |
1678 | } |
1679 | |
1680 | *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; |
1681 | |
1682 | /* |
1683 | * Make it clear that broken DTs are... broken. |
1684 | * Partitioned PPIs are an unfortunate exception. |
1685 | */ |
1686 | WARN_ON(*type == IRQ_TYPE_NONE && |
1687 | fwspec->param[0] != GIC_IRQ_TYPE_PARTITION); |
1688 | return 0; |
1689 | } |
1690 | |
1691 | if (is_fwnode_irqchip(fwnode: fwspec->fwnode)) { |
1692 | if(fwspec->param_count != 2) |
1693 | return -EINVAL; |
1694 | |
1695 | if (fwspec->param[0] < 16) { |
1696 | pr_err(FW_BUG "Illegal GSI%d translation request\n", |
1697 | fwspec->param[0]); |
1698 | return -EINVAL; |
1699 | } |
1700 | |
1701 | *hwirq = fwspec->param[0]; |
1702 | *type = fwspec->param[1]; |
1703 | |
1704 | WARN_ON(*type == IRQ_TYPE_NONE); |
1705 | return 0; |
1706 | } |
1707 | |
1708 | return -EINVAL; |
1709 | } |
1710 | |
1711 | static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, |
1712 | unsigned int nr_irqs, void *arg) |
1713 | { |
1714 | int i, ret; |
1715 | irq_hw_number_t hwirq; |
1716 | unsigned int type = IRQ_TYPE_NONE; |
1717 | struct irq_fwspec *fwspec = arg; |
1718 | |
1719 | ret = gic_irq_domain_translate(d: domain, fwspec, hwirq: &hwirq, type: &type); |
1720 | if (ret) |
1721 | return ret; |
1722 | |
1723 | for (i = 0; i < nr_irqs; i++) { |
1724 | ret = gic_irq_domain_map(d: domain, irq: virq + i, hw: hwirq + i); |
1725 | if (ret) |
1726 | return ret; |
1727 | } |
1728 | |
1729 | return 0; |
1730 | } |
1731 | |
1732 | static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq, |
1733 | unsigned int nr_irqs) |
1734 | { |
1735 | int i; |
1736 | |
1737 | for (i = 0; i < nr_irqs; i++) { |
1738 | struct irq_data *d = irq_domain_get_irq_data(domain, virq: virq + i); |
1739 | irq_set_handler(irq: virq + i, NULL); |
1740 | irq_domain_reset_irq_data(irq_data: d); |
1741 | } |
1742 | } |
1743 | |
1744 | static bool fwspec_is_partitioned_ppi(struct irq_fwspec *fwspec, |
1745 | irq_hw_number_t hwirq) |
1746 | { |
1747 | enum gic_intid_range range; |
1748 | |
1749 | if (!gic_data.ppi_descs) |
1750 | return false; |
1751 | |
1752 | if (!is_of_node(fwnode: fwspec->fwnode)) |
1753 | return false; |
1754 | |
1755 | if (fwspec->param_count < 4 || !fwspec->param[3]) |
1756 | return false; |
1757 | |
1758 | range = __get_intid_range(hwirq); |
1759 | if (range != PPI_RANGE && range != EPPI_RANGE) |
1760 | return false; |
1761 | |
1762 | return true; |
1763 | } |
1764 | |
1765 | static int gic_irq_domain_select(struct irq_domain *d, |
1766 | struct irq_fwspec *fwspec, |
1767 | enum irq_domain_bus_token bus_token) |
1768 | { |
1769 | unsigned int type, ret, ppi_idx; |
1770 | irq_hw_number_t hwirq; |
1771 | |
1772 | /* Not for us */ |
1773 | if (fwspec->fwnode != d->fwnode) |
1774 | return 0; |
1775 | |
1776 | /* Handle pure domain searches */ |
1777 | if (!fwspec->param_count) |
1778 | return d->bus_token == bus_token; |
1779 | |
1780 | /* If this is not DT, then we have a single domain */ |
1781 | if (!is_of_node(fwnode: fwspec->fwnode)) |
1782 | return 1; |
1783 | |
1784 | ret = gic_irq_domain_translate(d, fwspec, hwirq: &hwirq, type: &type); |
1785 | if (WARN_ON_ONCE(ret)) |
1786 | return 0; |
1787 | |
1788 | if (!fwspec_is_partitioned_ppi(fwspec, hwirq)) |
1789 | return d == gic_data.domain; |
1790 | |
1791 | /* |
1792 | * If this is a PPI and we have a 4th (non-null) parameter, |
1793 | * then we need to match the partition domain. |
1794 | */ |
1795 | ppi_idx = __gic_get_ppi_index(hwirq); |
1796 | return d == partition_get_domain(dsc: gic_data.ppi_descs[ppi_idx]); |
1797 | } |
1798 | |
1799 | static const struct irq_domain_ops gic_irq_domain_ops = { |
1800 | .translate = gic_irq_domain_translate, |
1801 | .alloc = gic_irq_domain_alloc, |
1802 | .free = gic_irq_domain_free, |
1803 | .select = gic_irq_domain_select, |
1804 | }; |
1805 | |
1806 | static int partition_domain_translate(struct irq_domain *d, |
1807 | struct irq_fwspec *fwspec, |
1808 | unsigned long *hwirq, |
1809 | unsigned int *type) |
1810 | { |
1811 | unsigned long ppi_intid; |
1812 | struct device_node *np; |
1813 | unsigned int ppi_idx; |
1814 | int ret; |
1815 | |
1816 | if (!gic_data.ppi_descs) |
1817 | return -ENOMEM; |
1818 | |
1819 | np = of_find_node_by_phandle(handle: fwspec->param[3]); |
1820 | if (WARN_ON(!np)) |
1821 | return -EINVAL; |
1822 | |
1823 | ret = gic_irq_domain_translate(d, fwspec, hwirq: &ppi_intid, type); |
1824 | if (WARN_ON_ONCE(ret)) |
1825 | return 0; |
1826 | |
1827 | ppi_idx = __gic_get_ppi_index(hwirq: ppi_intid); |
1828 | ret = partition_translate_id(desc: gic_data.ppi_descs[ppi_idx], |
1829 | of_fwnode_handle(np)); |
1830 | if (ret < 0) |
1831 | return ret; |
1832 | |
1833 | *hwirq = ret; |
1834 | *type = fwspec->param[2] & IRQ_TYPE_SENSE_MASK; |
1835 | |
1836 | return 0; |
1837 | } |
1838 | |
1839 | static const struct irq_domain_ops partition_domain_ops = { |
1840 | .translate = partition_domain_translate, |
1841 | .select = gic_irq_domain_select, |
1842 | }; |
1843 | |
1844 | static bool gic_enable_quirk_msm8996(void *data) |
1845 | { |
1846 | struct gic_chip_data *d = data; |
1847 | |
1848 | d->flags |= FLAGS_WORKAROUND_GICR_WAKER_MSM8996; |
1849 | |
1850 | return true; |
1851 | } |
1852 | |
1853 | static bool gic_enable_quirk_cavium_38539(void *data) |
1854 | { |
1855 | struct gic_chip_data *d = data; |
1856 | |
1857 | d->flags |= FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539; |
1858 | |
1859 | return true; |
1860 | } |
1861 | |
1862 | static bool gic_enable_quirk_hip06_07(void *data) |
1863 | { |
1864 | struct gic_chip_data *d = data; |
1865 | |
1866 | /* |
1867 | * HIP06 GICD_IIDR clashes with GIC-600 product number (despite |
1868 | * not being an actual ARM implementation). The saving grace is |
1869 | * that GIC-600 doesn't have ESPI, so nothing to do in that case. |
1870 | * HIP07 doesn't even have a proper IIDR, and still pretends to |
1871 | * have ESPI. In both cases, put them right. |
1872 | */ |
1873 | if (d->rdists.gicd_typer & GICD_TYPER_ESPI) { |
1874 | /* Zero both ESPI and the RES0 field next to it... */ |
1875 | d->rdists.gicd_typer &= ~GENMASK(9, 8); |
1876 | return true; |
1877 | } |
1878 | |
1879 | return false; |
1880 | } |
1881 | |
1882 | #define T241_CHIPN_MASK GENMASK_ULL(45, 44) |
1883 | #define T241_CHIP_GICDA_OFFSET 0x1580000 |
1884 | #define SMCCC_SOC_ID_T241 0x036b0241 |
1885 | |
1886 | static bool gic_enable_quirk_nvidia_t241(void *data) |
1887 | { |
1888 | s32 soc_id = arm_smccc_get_soc_id_version(); |
1889 | unsigned long chip_bmask = 0; |
1890 | phys_addr_t phys; |
1891 | u32 i; |
1892 | |
1893 | /* Check JEP106 code for NVIDIA T241 chip (036b:0241) */ |
1894 | if ((soc_id < 0) || (soc_id != SMCCC_SOC_ID_T241)) |
1895 | return false; |
1896 | |
1897 | /* Find the chips based on GICR regions PHYS addr */ |
1898 | for (i = 0; i < gic_data.nr_redist_regions; i++) { |
1899 | chip_bmask |= BIT(FIELD_GET(T241_CHIPN_MASK, |
1900 | (u64)gic_data.redist_regions[i].phys_base)); |
1901 | } |
1902 | |
1903 | if (hweight32(chip_bmask) < 3) |
1904 | return false; |
1905 | |
1906 | /* Setup GICD alias regions */ |
1907 | for (i = 0; i < ARRAY_SIZE(t241_dist_base_alias); i++) { |
1908 | if (chip_bmask & BIT(i)) { |
1909 | phys = gic_data.dist_phys_base + T241_CHIP_GICDA_OFFSET; |
1910 | phys |= FIELD_PREP(T241_CHIPN_MASK, i); |
1911 | t241_dist_base_alias[i] = ioremap(offset: phys, SZ_64K); |
1912 | WARN_ON_ONCE(!t241_dist_base_alias[i]); |
1913 | } |
1914 | } |
1915 | static_branch_enable(&gic_nvidia_t241_erratum); |
1916 | return true; |
1917 | } |
1918 | |
1919 | static bool gic_enable_quirk_asr8601(void *data) |
1920 | { |
1921 | struct gic_chip_data *d = data; |
1922 | |
1923 | d->flags |= FLAGS_WORKAROUND_ASR_ERRATUM_8601001; |
1924 | |
1925 | return true; |
1926 | } |
1927 | |
1928 | static bool gic_enable_quirk_arm64_2941627(void *data) |
1929 | { |
1930 | static_branch_enable(&gic_arm64_2941627_erratum); |
1931 | return true; |
1932 | } |
1933 | |
1934 | static bool gic_enable_quirk_rk3399(void *data) |
1935 | { |
1936 | struct gic_chip_data *d = data; |
1937 | |
1938 | if (of_machine_is_compatible(compat: "rockchip,rk3399")) { |
1939 | d->flags |= FLAGS_WORKAROUND_INSECURE; |
1940 | return true; |
1941 | } |
1942 | |
1943 | return false; |
1944 | } |
1945 | |
1946 | static bool rd_set_non_coherent(void *data) |
1947 | { |
1948 | struct gic_chip_data *d = data; |
1949 | |
1950 | d->rdists.flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE; |
1951 | return true; |
1952 | } |
1953 | |
1954 | static const struct gic_quirk gic_quirks[] = { |
1955 | { |
1956 | .desc = "GICv3: Qualcomm MSM8996 broken firmware", |
1957 | .compatible = "qcom,msm8996-gic-v3", |
1958 | .init = gic_enable_quirk_msm8996, |
1959 | }, |
1960 | { |
1961 | .desc = "GICv3: ASR erratum 8601001", |
1962 | .compatible = "asr,asr8601-gic-v3", |
1963 | .init = gic_enable_quirk_asr8601, |
1964 | }, |
1965 | { |
1966 | .desc = "GICv3: HIP06 erratum 161010803", |
1967 | .iidr = 0x0204043b, |
1968 | .mask = 0xffffffff, |
1969 | .init = gic_enable_quirk_hip06_07, |
1970 | }, |
1971 | { |
1972 | .desc = "GICv3: HIP07 erratum 161010803", |
1973 | .iidr = 0x00000000, |
1974 | .mask = 0xffffffff, |
1975 | .init = gic_enable_quirk_hip06_07, |
1976 | }, |
1977 | { |
1978 | /* |
1979 | * Reserved register accesses generate a Synchronous |
1980 | * External Abort. This erratum applies to: |
1981 | * - ThunderX: CN88xx |
1982 | * - OCTEON TX: CN83xx, CN81xx |
1983 | * - OCTEON TX2: CN93xx, CN96xx, CN98xx, CNF95xx* |
1984 | */ |
1985 | .desc = "GICv3: Cavium erratum 38539", |
1986 | .iidr = 0xa000034c, |
1987 | .mask = 0xe8f00fff, |
1988 | .init = gic_enable_quirk_cavium_38539, |
1989 | }, |
1990 | { |
1991 | .desc = "GICv3: NVIDIA erratum T241-FABRIC-4", |
1992 | .iidr = 0x0402043b, |
1993 | .mask = 0xffffffff, |
1994 | .init = gic_enable_quirk_nvidia_t241, |
1995 | }, |
1996 | { |
1997 | /* |
1998 | * GIC-700: 2941627 workaround - IP variant [0,1] |
1999 | * |
2000 | */ |
2001 | .desc = "GICv3: ARM64 erratum 2941627", |
2002 | .iidr = 0x0400043b, |
2003 | .mask = 0xff0e0fff, |
2004 | .init = gic_enable_quirk_arm64_2941627, |
2005 | }, |
2006 | { |
2007 | /* |
2008 | * GIC-700: 2941627 workaround - IP variant [2] |
2009 | */ |
2010 | .desc = "GICv3: ARM64 erratum 2941627", |
2011 | .iidr = 0x0402043b, |
2012 | .mask = 0xff0f0fff, |
2013 | .init = gic_enable_quirk_arm64_2941627, |
2014 | }, |
2015 | { |
2016 | .desc = "GICv3: non-coherent attribute", |
2017 | .property = "dma-noncoherent", |
2018 | .init = rd_set_non_coherent, |
2019 | }, |
2020 | { |
2021 | .desc = "GICv3: Insecure RK3399 integration", |
2022 | .iidr = 0x0000043b, |
2023 | .mask = 0xff000fff, |
2024 | .init = gic_enable_quirk_rk3399, |
2025 | }, |
2026 | { |
2027 | } |
2028 | }; |
2029 | |
2030 | static void gic_enable_nmi_support(void) |
2031 | { |
2032 | int i; |
2033 | |
2034 | if (!gic_prio_masking_enabled() || nmi_support_forbidden) |
2035 | return; |
2036 | |
2037 | rdist_nmi_refs = kcalloc(gic_data.ppi_nr + SGI_NR, |
2038 | sizeof(*rdist_nmi_refs), GFP_KERNEL); |
2039 | if (!rdist_nmi_refs) |
2040 | return; |
2041 | |
2042 | for (i = 0; i < gic_data.ppi_nr + SGI_NR; i++) |
2043 | refcount_set(r: &rdist_nmi_refs[i], n: 0); |
2044 | |
2045 | pr_info("Pseudo-NMIs enabled using %s ICC_PMR_EL1 synchronisation\n", |
2046 | gic_has_relaxed_pmr_sync() ? "relaxed": "forced"); |
2047 | |
2048 | static_branch_enable(&supports_pseudo_nmis); |
2049 | |
2050 | if (static_branch_likely(&supports_deactivate_key)) |
2051 | gic_eoimode1_chip.flags |= IRQCHIP_SUPPORTS_NMI; |
2052 | else |
2053 | gic_chip.flags |= IRQCHIP_SUPPORTS_NMI; |
2054 | } |
2055 | |
2056 | static int __init gic_init_bases(phys_addr_t dist_phys_base, |
2057 | void __iomem *dist_base, |
2058 | struct redist_region *rdist_regs, |
2059 | u32 nr_redist_regions, |
2060 | u64 redist_stride, |
2061 | struct fwnode_handle *handle) |
2062 | { |
2063 | u32 typer; |
2064 | int err; |
2065 | |
2066 | if (!is_hyp_mode_available()) |
2067 | static_branch_disable(&supports_deactivate_key); |
2068 | |
2069 | if (static_branch_likely(&supports_deactivate_key)) |
2070 | pr_info("GIC: Using split EOI/Deactivate mode\n"); |
2071 | |
2072 | gic_data.fwnode = handle; |
2073 | gic_data.dist_phys_base = dist_phys_base; |
2074 | gic_data.dist_base = dist_base; |
2075 | gic_data.redist_regions = rdist_regs; |
2076 | gic_data.nr_redist_regions = nr_redist_regions; |
2077 | gic_data.redist_stride = redist_stride; |
2078 | |
2079 | /* |
2080 | * Find out how many interrupts are supported. |
2081 | */ |
2082 | typer = readl_relaxed(gic_data.dist_base + GICD_TYPER); |
2083 | gic_data.rdists.gicd_typer = typer; |
2084 | |
2085 | gic_enable_quirks(readl_relaxed(gic_data.dist_base + GICD_IIDR), |
2086 | quirks: gic_quirks, data: &gic_data); |
2087 | |
2088 | pr_info("%d SPIs implemented\n", GIC_LINE_NR - 32); |
2089 | pr_info("%d Extended SPIs implemented\n", GIC_ESPI_NR); |
2090 | |
2091 | /* |
2092 | * ThunderX1 explodes on reading GICD_TYPER2, in violation of the |
2093 | * architecture spec (which says that reserved registers are RES0). |
2094 | */ |
2095 | if (!(gic_data.flags & FLAGS_WORKAROUND_CAVIUM_ERRATUM_38539)) |
2096 | gic_data.rdists.gicd_typer2 = readl_relaxed(gic_data.dist_base + GICD_TYPER2); |
2097 | |
2098 | gic_data.domain = irq_domain_create_tree(fwnode: handle, ops: &gic_irq_domain_ops, |
2099 | host_data: &gic_data); |
2100 | gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); |
2101 | if (!static_branch_unlikely(&gic_nvidia_t241_erratum)) { |
2102 | /* Disable GICv4.x features for the erratum T241-FABRIC-4 */ |
2103 | gic_data.rdists.has_rvpeid = true; |
2104 | gic_data.rdists.has_vlpis = true; |
2105 | gic_data.rdists.has_direct_lpi = true; |
2106 | gic_data.rdists.has_vpend_valid_dirty = true; |
2107 | } |
2108 | |
2109 | if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { |
2110 | err = -ENOMEM; |
2111 | goto out_free; |
2112 | } |
2113 | |
2114 | irq_domain_update_bus_token(domain: gic_data.domain, bus_token: DOMAIN_BUS_WIRED); |
2115 | |
2116 | gic_data.has_rss = !!(typer & GICD_TYPER_RSS); |
2117 | |
2118 | if (typer & GICD_TYPER_MBIS) { |
2119 | err = mbi_init(fwnode: handle, parent: gic_data.domain); |
2120 | if (err) |
2121 | pr_err("Failed to initialize MBIs\n"); |
2122 | } |
2123 | |
2124 | set_handle_irq(gic_handle_irq); |
2125 | |
2126 | gic_update_rdist_properties(); |
2127 | |
2128 | gic_cpu_sys_reg_enable(); |
2129 | gic_prio_init(); |
2130 | gic_dist_init(); |
2131 | gic_cpu_init(); |
2132 | gic_enable_nmi_support(); |
2133 | gic_smp_init(); |
2134 | gic_cpu_pm_init(); |
2135 | |
2136 | if (gic_dist_supports_lpis()) { |
2137 | its_init(handle, rdists: &gic_data.rdists, domain: gic_data.domain, irq_prio: dist_prio_irq); |
2138 | its_cpu_init(); |
2139 | its_lpi_memreserve_init(); |
2140 | } else { |
2141 | if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) |
2142 | gicv2m_init(parent_handle: handle, parent: gic_data.domain); |
2143 | } |
2144 | |
2145 | return 0; |
2146 | |
2147 | out_free: |
2148 | if (gic_data.domain) |
2149 | irq_domain_remove(domain: gic_data.domain); |
2150 | free_percpu(pdata: gic_data.rdists.rdist); |
2151 | return err; |
2152 | } |
2153 | |
2154 | static int __init gic_validate_dist_version(void __iomem *dist_base) |
2155 | { |
2156 | u32 reg = readl_relaxed(dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; |
2157 | |
2158 | if (reg != GIC_PIDR2_ARCH_GICv3 && reg != GIC_PIDR2_ARCH_GICv4) |
2159 | return -ENODEV; |
2160 | |
2161 | return 0; |
2162 | } |
2163 | |
2164 | /* Create all possible partitions at boot time */ |
2165 | static void __init gic_populate_ppi_partitions(struct device_node *gic_node) |
2166 | { |
2167 | struct device_node *parts_node, *child_part; |
2168 | int part_idx = 0, i; |
2169 | int nr_parts; |
2170 | struct partition_affinity *parts; |
2171 | |
2172 | parts_node = of_get_child_by_name(node: gic_node, name: "ppi-partitions"); |
2173 | if (!parts_node) |
2174 | return; |
2175 | |
2176 | gic_data.ppi_descs = kcalloc(gic_data.ppi_nr, sizeof(*gic_data.ppi_descs), GFP_KERNEL); |
2177 | if (!gic_data.ppi_descs) |
2178 | goto out_put_node; |
2179 | |
2180 | nr_parts = of_get_child_count(np: parts_node); |
2181 | |
2182 | if (!nr_parts) |
2183 | goto out_put_node; |
2184 | |
2185 | parts = kcalloc(nr_parts, sizeof(*parts), GFP_KERNEL); |
2186 | if (WARN_ON(!parts)) |
2187 | goto out_put_node; |
2188 | |
2189 | for_each_child_of_node(parts_node, child_part) { |
2190 | struct partition_affinity *part; |
2191 | int n; |
2192 | |
2193 | part = &parts[part_idx]; |
2194 | |
2195 | part->partition_id = of_fwnode_handle(child_part); |
2196 | |
2197 | pr_info("GIC: PPI partition %pOFn[%d] { ", |
2198 | child_part, part_idx); |
2199 | |
2200 | n = of_property_count_elems_of_size(np: child_part, propname: "affinity", |
2201 | elem_size: sizeof(u32)); |
2202 | WARN_ON(n <= 0); |
2203 | |
2204 | for (i = 0; i < n; i++) { |
2205 | int err, cpu; |
2206 | u32 cpu_phandle; |
2207 | struct device_node *cpu_node; |
2208 | |
2209 | err = of_property_read_u32_index(np: child_part, propname: "affinity", |
2210 | index: i, out_value: &cpu_phandle); |
2211 | if (WARN_ON(err)) |
2212 | continue; |
2213 | |
2214 | cpu_node = of_find_node_by_phandle(handle: cpu_phandle); |
2215 | if (WARN_ON(!cpu_node)) |
2216 | continue; |
2217 | |
2218 | cpu = of_cpu_node_to_id(np: cpu_node); |
2219 | if (WARN_ON(cpu < 0)) { |
2220 | of_node_put(node: cpu_node); |
2221 | continue; |
2222 | } |
2223 | |
2224 | pr_cont("%pOF[%d] ", cpu_node, cpu); |
2225 | |
2226 | cpumask_set_cpu(cpu, dstp: &part->mask); |
2227 | of_node_put(node: cpu_node); |
2228 | } |
2229 | |
2230 | pr_cont("}\n"); |
2231 | part_idx++; |
2232 | } |
2233 | |
2234 | for (i = 0; i < gic_data.ppi_nr; i++) { |
2235 | unsigned int irq; |
2236 | struct partition_desc *desc; |
2237 | struct irq_fwspec ppi_fwspec = { |
2238 | .fwnode = gic_data.fwnode, |
2239 | .param_count = 3, |
2240 | .param = { |
2241 | [0] = GIC_IRQ_TYPE_PARTITION, |
2242 | [1] = i, |
2243 | [2] = IRQ_TYPE_NONE, |
2244 | }, |
2245 | }; |
2246 | |
2247 | irq = irq_create_fwspec_mapping(fwspec: &ppi_fwspec); |
2248 | if (WARN_ON(!irq)) |
2249 | continue; |
2250 | desc = partition_create_desc(fwnode: gic_data.fwnode, parts, nr_parts, |
2251 | chained_irq: irq, ops: &partition_domain_ops); |
2252 | if (WARN_ON(!desc)) |
2253 | continue; |
2254 | |
2255 | gic_data.ppi_descs[i] = desc; |
2256 | } |
2257 | |
2258 | out_put_node: |
2259 | of_node_put(node: parts_node); |
2260 | } |
2261 | |
2262 | static void __init gic_of_setup_kvm_info(struct device_node *node, u32 nr_redist_regions) |
2263 | { |
2264 | int ret; |
2265 | struct resource r; |
2266 | |
2267 | gic_v3_kvm_info.type = GIC_V3; |
2268 | |
2269 | gic_v3_kvm_info.maint_irq = irq_of_parse_and_map(node, index: 0); |
2270 | if (!gic_v3_kvm_info.maint_irq) |
2271 | return; |
2272 | |
2273 | /* Also skip GICD, GICC, GICH */ |
2274 | ret = of_address_to_resource(dev: node, index: nr_redist_regions + 3, r: &r); |
2275 | if (!ret) |
2276 | gic_v3_kvm_info.vcpu = r; |
2277 | |
2278 | gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; |
2279 | gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; |
2280 | vgic_set_kvm_info(info: &gic_v3_kvm_info); |
2281 | } |
2282 | |
2283 | static void gic_request_region(resource_size_t base, resource_size_t size, |
2284 | const char *name) |
2285 | { |
2286 | if (!request_mem_region(base, size, name)) |
2287 | pr_warn_once(FW_BUG "%s region %pa has overlapping address\n", |
2288 | name, &base); |
2289 | } |
2290 | |
2291 | static void __iomem *gic_of_iomap(struct device_node *node, int idx, |
2292 | const char *name, struct resource *res) |
2293 | { |
2294 | void __iomem *base; |
2295 | int ret; |
2296 | |
2297 | ret = of_address_to_resource(dev: node, index: idx, r: res); |
2298 | if (ret) |
2299 | return IOMEM_ERR_PTR(ret); |
2300 | |
2301 | gic_request_region(base: res->start, size: resource_size(res), name); |
2302 | base = of_iomap(node, index: idx); |
2303 | |
2304 | return base ?: IOMEM_ERR_PTR(-ENOMEM); |
2305 | } |
2306 | |
2307 | static int __init gic_of_init(struct device_node *node, struct device_node *parent) |
2308 | { |
2309 | phys_addr_t dist_phys_base; |
2310 | void __iomem *dist_base; |
2311 | struct redist_region *rdist_regs; |
2312 | struct resource res; |
2313 | u64 redist_stride; |
2314 | u32 nr_redist_regions; |
2315 | int err, i; |
2316 | |
2317 | dist_base = gic_of_iomap(node, idx: 0, name: "GICD", res: &res); |
2318 | if (IS_ERR(ptr: dist_base)) { |
2319 | pr_err("%pOF: unable to map gic dist registers\n", node); |
2320 | return PTR_ERR(ptr: dist_base); |
2321 | } |
2322 | |
2323 | dist_phys_base = res.start; |
2324 | |
2325 | err = gic_validate_dist_version(dist_base); |
2326 | if (err) { |
2327 | pr_err("%pOF: no distributor detected, giving up\n", node); |
2328 | goto out_unmap_dist; |
2329 | } |
2330 | |
2331 | if (of_property_read_u32(np: node, propname: "#redistributor-regions", out_value: &nr_redist_regions)) |
2332 | nr_redist_regions = 1; |
2333 | |
2334 | rdist_regs = kcalloc(nr_redist_regions, sizeof(*rdist_regs), |
2335 | GFP_KERNEL); |
2336 | if (!rdist_regs) { |
2337 | err = -ENOMEM; |
2338 | goto out_unmap_dist; |
2339 | } |
2340 | |
2341 | for (i = 0; i < nr_redist_regions; i++) { |
2342 | rdist_regs[i].redist_base = gic_of_iomap(node, idx: 1 + i, name: "GICR", res: &res); |
2343 | if (IS_ERR(ptr: rdist_regs[i].redist_base)) { |
2344 | pr_err("%pOF: couldn't map region %d\n", node, i); |
2345 | err = -ENODEV; |
2346 | goto out_unmap_rdist; |
2347 | } |
2348 | rdist_regs[i].phys_base = res.start; |
2349 | } |
2350 | |
2351 | if (of_property_read_u64(np: node, propname: "redistributor-stride", out_value: &redist_stride)) |
2352 | redist_stride = 0; |
2353 | |
2354 | gic_enable_of_quirks(np: node, quirks: gic_quirks, data: &gic_data); |
2355 | |
2356 | err = gic_init_bases(dist_phys_base, dist_base, rdist_regs, |
2357 | nr_redist_regions, redist_stride, handle: &node->fwnode); |
2358 | if (err) |
2359 | goto out_unmap_rdist; |
2360 | |
2361 | gic_populate_ppi_partitions(gic_node: node); |
2362 | |
2363 | if (static_branch_likely(&supports_deactivate_key)) |
2364 | gic_of_setup_kvm_info(node, nr_redist_regions); |
2365 | return 0; |
2366 | |
2367 | out_unmap_rdist: |
2368 | for (i = 0; i < nr_redist_regions; i++) |
2369 | if (rdist_regs[i].redist_base && !IS_ERR(ptr: rdist_regs[i].redist_base)) |
2370 | iounmap(addr: rdist_regs[i].redist_base); |
2371 | kfree(objp: rdist_regs); |
2372 | out_unmap_dist: |
2373 | iounmap(addr: dist_base); |
2374 | return err; |
2375 | } |
2376 | |
2377 | IRQCHIP_DECLARE(gic_v3, "arm,gic-v3", gic_of_init); |
2378 | |
2379 | #ifdef CONFIG_ACPI |
2380 | static struct |
2381 | { |
2382 | void __iomem *dist_base; |
2383 | struct redist_region *redist_regs; |
2384 | u32 nr_redist_regions; |
2385 | bool single_redist; |
2386 | int enabled_rdists; |
2387 | u32 maint_irq; |
2388 | int maint_irq_mode; |
2389 | phys_addr_t vcpu_base; |
2390 | } acpi_data __initdata; |
2391 | |
2392 | static void __init |
2393 | gic_acpi_register_redist(phys_addr_t phys_base, void __iomem *redist_base) |
2394 | { |
2395 | static int count = 0; |
2396 | |
2397 | acpi_data.redist_regs[count].phys_base = phys_base; |
2398 | acpi_data.redist_regs[count].redist_base = redist_base; |
2399 | acpi_data.redist_regs[count].single_redist = acpi_data.single_redist; |
2400 | count++; |
2401 | } |
2402 | |
2403 | static int __init |
2404 | gic_acpi_parse_madt_redist(union acpi_subtable_headers *header, |
2405 | const unsigned long end) |
2406 | { |
2407 | struct acpi_madt_generic_redistributor *redist = |
2408 | (struct acpi_madt_generic_redistributor *)header; |
2409 | void __iomem *redist_base; |
2410 | |
2411 | redist_base = ioremap(offset: redist->base_address, size: redist->length); |
2412 | if (!redist_base) { |
2413 | pr_err("Couldn't map GICR region @%llx\n", redist->base_address); |
2414 | return -ENOMEM; |
2415 | } |
2416 | |
2417 | if (acpi_get_madt_revision() >= 7 && |
2418 | (redist->flags & ACPI_MADT_GICR_NON_COHERENT)) |
2419 | gic_data.rdists.flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE; |
2420 | |
2421 | gic_request_region(base: redist->base_address, size: redist->length, name: "GICR"); |
2422 | |
2423 | gic_acpi_register_redist(phys_base: redist->base_address, redist_base); |
2424 | return 0; |
2425 | } |
2426 | |
2427 | static int __init |
2428 | gic_acpi_parse_madt_gicc(union acpi_subtable_headers *header, |
2429 | const unsigned long end) |
2430 | { |
2431 | struct acpi_madt_generic_interrupt *gicc = |
2432 | (struct acpi_madt_generic_interrupt *)header; |
2433 | u32 reg = readl_relaxed(acpi_data.dist_base + GICD_PIDR2) & GIC_PIDR2_ARCH_MASK; |
2434 | u32 size = reg == GIC_PIDR2_ARCH_GICv4 ? SZ_64K * 4 : SZ_64K * 2; |
2435 | void __iomem *redist_base; |
2436 | |
2437 | /* Neither enabled or online capable means it doesn't exist, skip it */ |
2438 | if (!(gicc->flags & (ACPI_MADT_ENABLED | ACPI_MADT_GICC_ONLINE_CAPABLE))) |
2439 | return 0; |
2440 | |
2441 | /* |
2442 | * Capable but disabled CPUs can be brought online later. What about |
2443 | * the redistributor? ACPI doesn't want to say! |
2444 | * Virtual hotplug systems can use the MADT's "always-on" GICR entries. |
2445 | * Otherwise, prevent such CPUs from being brought online. |
2446 | */ |
2447 | if (!(gicc->flags & ACPI_MADT_ENABLED)) { |
2448 | int cpu = get_cpu_for_acpi_id(gicc->uid); |
2449 | |
2450 | pr_warn("CPU %u's redistributor is inaccessible: this CPU can't be brought online\n", cpu); |
2451 | if (cpu >= 0) |
2452 | cpumask_set_cpu(cpu, dstp: &broken_rdists); |
2453 | return 0; |
2454 | } |
2455 | |
2456 | redist_base = ioremap(offset: gicc->gicr_base_address, size); |
2457 | if (!redist_base) |
2458 | return -ENOMEM; |
2459 | gic_request_region(base: gicc->gicr_base_address, size, name: "GICR"); |
2460 | |
2461 | if (acpi_get_madt_revision() >= 7 && |
2462 | (gicc->flags & ACPI_MADT_GICC_NON_COHERENT)) |
2463 | gic_data.rdists.flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE; |
2464 | |
2465 | gic_acpi_register_redist(phys_base: gicc->gicr_base_address, redist_base); |
2466 | return 0; |
2467 | } |
2468 | |
2469 | static int __init gic_acpi_collect_gicr_base(void) |
2470 | { |
2471 | acpi_tbl_entry_handler redist_parser; |
2472 | enum acpi_madt_type type; |
2473 | |
2474 | if (acpi_data.single_redist) { |
2475 | type = ACPI_MADT_TYPE_GENERIC_INTERRUPT; |
2476 | redist_parser = gic_acpi_parse_madt_gicc; |
2477 | } else { |
2478 | type = ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR; |
2479 | redist_parser = gic_acpi_parse_madt_redist; |
2480 | } |
2481 | |
2482 | /* Collect redistributor base addresses in GICR entries */ |
2483 | if (acpi_table_parse_madt(id: type, handler: redist_parser, max_entries: 0) > 0) |
2484 | return 0; |
2485 | |
2486 | pr_info("No valid GICR entries exist\n"); |
2487 | return -ENODEV; |
2488 | } |
2489 | |
2490 | static int __init gic_acpi_match_gicr(union acpi_subtable_headers *header, |
2491 | const unsigned long end) |
2492 | { |
2493 | /* Subtable presence means that redist exists, that's it */ |
2494 | return 0; |
2495 | } |
2496 | |
2497 | static int __init gic_acpi_match_gicc(union acpi_subtable_headers *header, |
2498 | const unsigned long end) |
2499 | { |
2500 | struct acpi_madt_generic_interrupt *gicc = |
2501 | (struct acpi_madt_generic_interrupt *)header; |
2502 | |
2503 | /* |
2504 | * If GICC is enabled and has valid gicr base address, then it means |
2505 | * GICR base is presented via GICC. The redistributor is only known to |
2506 | * be accessible if the GICC is marked as enabled. If this bit is not |
2507 | * set, we'd need to add the redistributor at runtime, which isn't |
2508 | * supported. |
2509 | */ |
2510 | if (gicc->flags & ACPI_MADT_ENABLED && gicc->gicr_base_address) |
2511 | acpi_data.enabled_rdists++; |
2512 | |
2513 | return 0; |
2514 | } |
2515 | |
2516 | static int __init gic_acpi_count_gicr_regions(void) |
2517 | { |
2518 | int count; |
2519 | |
2520 | /* |
2521 | * Count how many redistributor regions we have. It is not allowed |
2522 | * to mix redistributor description, GICR and GICC subtables have to be |
2523 | * mutually exclusive. |
2524 | */ |
2525 | count = acpi_table_parse_madt(id: ACPI_MADT_TYPE_GENERIC_REDISTRIBUTOR, |
2526 | handler: gic_acpi_match_gicr, max_entries: 0); |
2527 | if (count > 0) { |
2528 | acpi_data.single_redist = false; |
2529 | return count; |
2530 | } |
2531 | |
2532 | count = acpi_table_parse_madt(id: ACPI_MADT_TYPE_GENERIC_INTERRUPT, |
2533 | handler: gic_acpi_match_gicc, max_entries: 0); |
2534 | if (count > 0) { |
2535 | acpi_data.single_redist = true; |
2536 | count = acpi_data.enabled_rdists; |
2537 | } |
2538 | |
2539 | return count; |
2540 | } |
2541 | |
2542 | static bool __init acpi_validate_gic_table(struct acpi_subtable_header *header, |
2543 | struct acpi_probe_entry *ape) |
2544 | { |
2545 | struct acpi_madt_generic_distributor *dist; |
2546 | int count; |
2547 | |
2548 | dist = (struct acpi_madt_generic_distributor *)header; |
2549 | if (dist->version != ape->driver_data) |
2550 | return false; |
2551 | |
2552 | /* We need to do that exercise anyway, the sooner the better */ |
2553 | count = gic_acpi_count_gicr_regions(); |
2554 | if (count <= 0) |
2555 | return false; |
2556 | |
2557 | acpi_data.nr_redist_regions = count; |
2558 | return true; |
2559 | } |
2560 | |
2561 | static int __init gic_acpi_parse_virt_madt_gicc(union acpi_subtable_headers *header, |
2562 | const unsigned long end) |
2563 | { |
2564 | struct acpi_madt_generic_interrupt *gicc = |
2565 | (struct acpi_madt_generic_interrupt *)header; |
2566 | int maint_irq_mode; |
2567 | static int first_madt = true; |
2568 | |
2569 | if (!(gicc->flags & |
2570 | (ACPI_MADT_ENABLED | ACPI_MADT_GICC_ONLINE_CAPABLE))) |
2571 | return 0; |
2572 | |
2573 | maint_irq_mode = (gicc->flags & ACPI_MADT_VGIC_IRQ_MODE) ? |
2574 | ACPI_EDGE_SENSITIVE : ACPI_LEVEL_SENSITIVE; |
2575 | |
2576 | if (first_madt) { |
2577 | first_madt = false; |
2578 | |
2579 | acpi_data.maint_irq = gicc->vgic_interrupt; |
2580 | acpi_data.maint_irq_mode = maint_irq_mode; |
2581 | acpi_data.vcpu_base = gicc->gicv_base_address; |
2582 | |
2583 | return 0; |
2584 | } |
2585 | |
2586 | /* |
2587 | * The maintenance interrupt and GICV should be the same for every CPU |
2588 | */ |
2589 | if ((acpi_data.maint_irq != gicc->vgic_interrupt) || |
2590 | (acpi_data.maint_irq_mode != maint_irq_mode) || |
2591 | (acpi_data.vcpu_base != gicc->gicv_base_address)) |
2592 | return -EINVAL; |
2593 | |
2594 | return 0; |
2595 | } |
2596 | |
2597 | static bool __init gic_acpi_collect_virt_info(void) |
2598 | { |
2599 | int count; |
2600 | |
2601 | count = acpi_table_parse_madt(id: ACPI_MADT_TYPE_GENERIC_INTERRUPT, |
2602 | handler: gic_acpi_parse_virt_madt_gicc, max_entries: 0); |
2603 | |
2604 | return (count > 0); |
2605 | } |
2606 | |
2607 | #define ACPI_GICV3_DIST_MEM_SIZE (SZ_64K) |
2608 | #define ACPI_GICV2_VCTRL_MEM_SIZE (SZ_4K) |
2609 | #define ACPI_GICV2_VCPU_MEM_SIZE (SZ_8K) |
2610 | |
2611 | static void __init gic_acpi_setup_kvm_info(void) |
2612 | { |
2613 | int irq; |
2614 | |
2615 | if (!gic_acpi_collect_virt_info()) { |
2616 | pr_warn("Unable to get hardware information used for virtualization\n"); |
2617 | return; |
2618 | } |
2619 | |
2620 | gic_v3_kvm_info.type = GIC_V3; |
2621 | |
2622 | irq = acpi_register_gsi(NULL, gsi: acpi_data.maint_irq, |
2623 | triggering: acpi_data.maint_irq_mode, |
2624 | ACPI_ACTIVE_HIGH); |
2625 | if (irq <= 0) |
2626 | return; |
2627 | |
2628 | gic_v3_kvm_info.maint_irq = irq; |
2629 | |
2630 | if (acpi_data.vcpu_base) { |
2631 | struct resource *vcpu = &gic_v3_kvm_info.vcpu; |
2632 | |
2633 | vcpu->flags = IORESOURCE_MEM; |
2634 | vcpu->start = acpi_data.vcpu_base; |
2635 | vcpu->end = vcpu->start + ACPI_GICV2_VCPU_MEM_SIZE - 1; |
2636 | } |
2637 | |
2638 | gic_v3_kvm_info.has_v4 = gic_data.rdists.has_vlpis; |
2639 | gic_v3_kvm_info.has_v4_1 = gic_data.rdists.has_rvpeid; |
2640 | vgic_set_kvm_info(info: &gic_v3_kvm_info); |
2641 | } |
2642 | |
2643 | static struct fwnode_handle *gsi_domain_handle; |
2644 | |
2645 | static struct fwnode_handle *gic_v3_get_gsi_domain_id(u32 gsi) |
2646 | { |
2647 | return gsi_domain_handle; |
2648 | } |
2649 | |
2650 | static int __init |
2651 | gic_acpi_init(union acpi_subtable_headers *header, const unsigned long end) |
2652 | { |
2653 | struct acpi_madt_generic_distributor *dist; |
2654 | size_t size; |
2655 | int i, err; |
2656 | |
2657 | /* Get distributor base address */ |
2658 | dist = (struct acpi_madt_generic_distributor *)header; |
2659 | acpi_data.dist_base = ioremap(offset: dist->base_address, |
2660 | ACPI_GICV3_DIST_MEM_SIZE); |
2661 | if (!acpi_data.dist_base) { |
2662 | pr_err("Unable to map GICD registers\n"); |
2663 | return -ENOMEM; |
2664 | } |
2665 | gic_request_region(base: dist->base_address, ACPI_GICV3_DIST_MEM_SIZE, name: "GICD"); |
2666 | |
2667 | err = gic_validate_dist_version(dist_base: acpi_data.dist_base); |
2668 | if (err) { |
2669 | pr_err("No distributor detected at @%p, giving up\n", |
2670 | acpi_data.dist_base); |
2671 | goto out_dist_unmap; |
2672 | } |
2673 | |
2674 | size = sizeof(*acpi_data.redist_regs) * acpi_data.nr_redist_regions; |
2675 | acpi_data.redist_regs = kzalloc(size, GFP_KERNEL); |
2676 | if (!acpi_data.redist_regs) { |
2677 | err = -ENOMEM; |
2678 | goto out_dist_unmap; |
2679 | } |
2680 | |
2681 | err = gic_acpi_collect_gicr_base(); |
2682 | if (err) |
2683 | goto out_redist_unmap; |
2684 | |
2685 | gsi_domain_handle = irq_domain_alloc_fwnode(pa: &dist->base_address); |
2686 | if (!gsi_domain_handle) { |
2687 | err = -ENOMEM; |
2688 | goto out_redist_unmap; |
2689 | } |
2690 | |
2691 | err = gic_init_bases(dist_phys_base: dist->base_address, dist_base: acpi_data.dist_base, |
2692 | rdist_regs: acpi_data.redist_regs, nr_redist_regions: acpi_data.nr_redist_regions, |
2693 | redist_stride: 0, handle: gsi_domain_handle); |
2694 | if (err) |
2695 | goto out_fwhandle_free; |
2696 | |
2697 | acpi_set_irq_model(model: ACPI_IRQ_MODEL_GIC, fn: gic_v3_get_gsi_domain_id); |
2698 | |
2699 | if (static_branch_likely(&supports_deactivate_key)) |
2700 | gic_acpi_setup_kvm_info(); |
2701 | |
2702 | return 0; |
2703 | |
2704 | out_fwhandle_free: |
2705 | irq_domain_free_fwnode(fwnode: gsi_domain_handle); |
2706 | out_redist_unmap: |
2707 | for (i = 0; i < acpi_data.nr_redist_regions; i++) |
2708 | if (acpi_data.redist_regs[i].redist_base) |
2709 | iounmap(addr: acpi_data.redist_regs[i].redist_base); |
2710 | kfree(objp: acpi_data.redist_regs); |
2711 | out_dist_unmap: |
2712 | iounmap(addr: acpi_data.dist_base); |
2713 | return err; |
2714 | } |
2715 | IRQCHIP_ACPI_DECLARE(gic_v3, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, |
2716 | acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V3, |
2717 | gic_acpi_init); |
2718 | IRQCHIP_ACPI_DECLARE(gic_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, |
2719 | acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_V4, |
2720 | gic_acpi_init); |
2721 | IRQCHIP_ACPI_DECLARE(gic_v3_or_v4, ACPI_MADT_TYPE_GENERIC_DISTRIBUTOR, |
2722 | acpi_validate_gic_table, ACPI_MADT_GIC_VERSION_NONE, |
2723 | gic_acpi_init); |
2724 | #endif |
2725 |
Definitions
- dist_prio_irq
- dist_prio_nmi
- broken_rdists
- redist_region
- gic_chip_data
- t241_dist_base_alias
- gic_nvidia_t241_erratum
- gic_arm64_2941627_erratum
- gic_data
- supports_deactivate_key
- nmi_support_forbidden
- supports_pseudo_nmis
- gic_get_pribits
- gic_has_group0
- gic_dist_security_disabled
- cpus_have_security_disabled
- cpus_have_group0
- gic_prio_init
- rdist_nmi_refs
- gic_v3_kvm_info
- has_rss
- gic_intid_range
- __get_intid_range
- get_intid_range
- gic_irq_in_rdist
- gic_dist_base_alias
- gic_dist_base
- gic_do_wait_for_rwp
- gic_dist_wait_for_rwp
- gic_redist_wait_for_rwp
- gic_enable_redist
- convert_offset_index
- gic_peek_irq
- gic_poke_irq
- gic_mask_irq
- gic_eoimode1_mask_irq
- gic_unmask_irq
- gic_supports_nmi
- gic_irq_set_irqchip_state
- gic_irq_get_irqchip_state
- gic_irq_set_prio
- __gic_get_ppi_index
- __gic_get_rdist_index
- gic_get_rdist_index
- gic_irq_nmi_setup
- gic_irq_nmi_teardown
- gic_arm64_erratum_2941627_needed
- gic_eoi_irq
- gic_eoimode1_eoi_irq
- gic_set_type
- gic_irq_set_vcpu_affinity
- gic_cpu_to_affinity
- gic_deactivate_unhandled
- gic_complete_ack
- gic_rpr_is_nmi_prio
- gic_irqnr_is_special
- __gic_handle_irq
- __gic_handle_nmi
- __gic_handle_irq_from_irqson
- __gic_handle_irq_from_irqsoff
- __exception_irq_entry
- gic_dist_init
- gic_iterate_rdists
- __gic_populate_rdist
- gic_populate_rdist
- __gic_update_rdist_properties
- gic_update_rdist_properties
- gic_cpu_sys_reg_enable
- gic_cpu_sys_reg_init
- gicv3_nolpi
- gicv3_nolpi_cfg
- gic_dist_supports_lpis
- gic_cpu_init
- gic_check_rdist
- gic_starting_cpu
- gic_compute_target_list
- gic_send_sgi
- gic_ipi_send_mask
- gic_smp_init
- gic_set_affinity
- gic_retrigger
- gic_cpu_pm_init
- gic_chip
- gic_eoimode1_chip
- gic_irq_domain_map
- gic_irq_domain_translate
- gic_irq_domain_alloc
- gic_irq_domain_free
- fwspec_is_partitioned_ppi
- gic_irq_domain_select
- gic_irq_domain_ops
- partition_domain_translate
- partition_domain_ops
- gic_enable_quirk_msm8996
- gic_enable_quirk_cavium_38539
- gic_enable_quirk_hip06_07
- gic_enable_quirk_nvidia_t241
- gic_enable_quirk_asr8601
- gic_enable_quirk_arm64_2941627
- gic_enable_quirk_rk3399
- rd_set_non_coherent
- gic_quirks
- gic_enable_nmi_support
- gic_init_bases
- gic_validate_dist_version
- gic_populate_ppi_partitions
- gic_of_setup_kvm_info
- gic_request_region
- gic_of_iomap
- gic_of_init
- acpi_data
- gic_acpi_register_redist
- gic_acpi_parse_madt_redist
- gic_acpi_parse_madt_gicc
- gic_acpi_collect_gicr_base
- gic_acpi_match_gicr
- gic_acpi_match_gicc
- gic_acpi_count_gicr_regions
- acpi_validate_gic_table
- gic_acpi_parse_virt_madt_gicc
- gic_acpi_collect_virt_info
- gic_acpi_setup_kvm_info
- gsi_domain_handle
- gic_v3_get_gsi_domain_id
Improve your Profiling and Debugging skills
Find out more