1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved. |
4 | * Author: Marc Zyngier <marc.zyngier@arm.com> |
5 | */ |
6 | |
7 | #include <linux/acpi.h> |
8 | #include <linux/acpi_iort.h> |
9 | #include <linux/bitfield.h> |
10 | #include <linux/bitmap.h> |
11 | #include <linux/cpu.h> |
12 | #include <linux/crash_dump.h> |
13 | #include <linux/delay.h> |
14 | #include <linux/efi.h> |
15 | #include <linux/interrupt.h> |
16 | #include <linux/iommu.h> |
17 | #include <linux/iopoll.h> |
18 | #include <linux/irqdomain.h> |
19 | #include <linux/list.h> |
20 | #include <linux/log2.h> |
21 | #include <linux/memblock.h> |
22 | #include <linux/mm.h> |
23 | #include <linux/msi.h> |
24 | #include <linux/of.h> |
25 | #include <linux/of_address.h> |
26 | #include <linux/of_irq.h> |
27 | #include <linux/of_pci.h> |
28 | #include <linux/of_platform.h> |
29 | #include <linux/percpu.h> |
30 | #include <linux/slab.h> |
31 | #include <linux/syscore_ops.h> |
32 | |
33 | #include <linux/irqchip.h> |
34 | #include <linux/irqchip/arm-gic-v3.h> |
35 | #include <linux/irqchip/arm-gic-v4.h> |
36 | |
37 | #include <asm/cputype.h> |
38 | #include <asm/exception.h> |
39 | |
40 | #include "irq-gic-common.h" |
41 | |
42 | #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0) |
43 | #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1) |
44 | #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2) |
45 | #define ITS_FLAGS_FORCE_NON_SHAREABLE (1ULL << 3) |
46 | |
47 | #define RD_LOCAL_LPI_ENABLED BIT(0) |
48 | #define RD_LOCAL_PENDTABLE_PREALLOCATED BIT(1) |
49 | #define RD_LOCAL_MEMRESERVE_DONE BIT(2) |
50 | |
51 | static u32 lpi_id_bits; |
52 | |
53 | /* |
54 | * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to |
55 | * deal with (one configuration byte per interrupt). PENDBASE has to |
56 | * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI). |
57 | */ |
58 | #define LPI_NRBITS lpi_id_bits |
59 | #define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K) |
60 | #define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K) |
61 | |
62 | #define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI |
63 | |
64 | /* |
65 | * Collection structure - just an ID, and a redistributor address to |
66 | * ping. We use one per CPU as a bag of interrupts assigned to this |
67 | * CPU. |
68 | */ |
69 | struct its_collection { |
70 | u64 target_address; |
71 | u16 col_id; |
72 | }; |
73 | |
74 | /* |
75 | * The ITS_BASER structure - contains memory information, cached |
76 | * value of BASER register configuration and ITS page size. |
77 | */ |
78 | struct its_baser { |
79 | void *base; |
80 | u64 val; |
81 | u32 order; |
82 | u32 psz; |
83 | }; |
84 | |
85 | struct its_device; |
86 | |
87 | /* |
88 | * The ITS structure - contains most of the infrastructure, with the |
89 | * top-level MSI domain, the command queue, the collections, and the |
90 | * list of devices writing to it. |
91 | * |
92 | * dev_alloc_lock has to be taken for device allocations, while the |
93 | * spinlock must be taken to parse data structures such as the device |
94 | * list. |
95 | */ |
96 | struct its_node { |
97 | raw_spinlock_t lock; |
98 | struct mutex dev_alloc_lock; |
99 | struct list_head entry; |
100 | void __iomem *base; |
101 | void __iomem *sgir_base; |
102 | phys_addr_t phys_base; |
103 | struct its_cmd_block *cmd_base; |
104 | struct its_cmd_block *cmd_write; |
105 | struct its_baser tables[GITS_BASER_NR_REGS]; |
106 | struct its_collection *collections; |
107 | struct fwnode_handle *fwnode_handle; |
108 | u64 (*get_msi_base)(struct its_device *its_dev); |
109 | u64 typer; |
110 | u64 cbaser_save; |
111 | u32 ctlr_save; |
112 | u32 mpidr; |
113 | struct list_head its_device_list; |
114 | u64 flags; |
115 | unsigned long list_nr; |
116 | int numa_node; |
117 | unsigned int msi_domain_flags; |
118 | u32 pre_its_base; /* for Socionext Synquacer */ |
119 | int vlpi_redist_offset; |
120 | }; |
121 | |
122 | #define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS)) |
123 | #define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP)) |
124 | #define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1) |
125 | |
126 | #define ITS_ITT_ALIGN SZ_256 |
127 | |
128 | /* The maximum number of VPEID bits supported by VLPI commands */ |
129 | #define ITS_MAX_VPEID_BITS \ |
130 | ({ \ |
131 | int nvpeid = 16; \ |
132 | if (gic_rdists->has_rvpeid && \ |
133 | gic_rdists->gicd_typer2 & GICD_TYPER2_VIL) \ |
134 | nvpeid = 1 + (gic_rdists->gicd_typer2 & \ |
135 | GICD_TYPER2_VID); \ |
136 | \ |
137 | nvpeid; \ |
138 | }) |
139 | #define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS)) |
140 | |
141 | /* Convert page order to size in bytes */ |
142 | #define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o)) |
143 | |
144 | struct event_lpi_map { |
145 | unsigned long *lpi_map; |
146 | u16 *col_map; |
147 | irq_hw_number_t lpi_base; |
148 | int nr_lpis; |
149 | raw_spinlock_t vlpi_lock; |
150 | struct its_vm *vm; |
151 | struct its_vlpi_map *vlpi_maps; |
152 | int nr_vlpis; |
153 | }; |
154 | |
155 | /* |
156 | * The ITS view of a device - belongs to an ITS, owns an interrupt |
157 | * translation table, and a list of interrupts. If it some of its |
158 | * LPIs are injected into a guest (GICv4), the event_map.vm field |
159 | * indicates which one. |
160 | */ |
161 | struct its_device { |
162 | struct list_head entry; |
163 | struct its_node *its; |
164 | struct event_lpi_map event_map; |
165 | void *itt; |
166 | u32 nr_ites; |
167 | u32 device_id; |
168 | bool shared; |
169 | }; |
170 | |
171 | static struct { |
172 | raw_spinlock_t lock; |
173 | struct its_device *dev; |
174 | struct its_vpe **vpes; |
175 | int next_victim; |
176 | } vpe_proxy; |
177 | |
178 | struct cpu_lpi_count { |
179 | atomic_t managed; |
180 | atomic_t unmanaged; |
181 | }; |
182 | |
183 | static DEFINE_PER_CPU(struct cpu_lpi_count, cpu_lpi_count); |
184 | |
185 | static LIST_HEAD(its_nodes); |
186 | static DEFINE_RAW_SPINLOCK(its_lock); |
187 | static struct rdists *gic_rdists; |
188 | static struct irq_domain *its_parent; |
189 | |
190 | static unsigned long its_list_map; |
191 | static u16 vmovp_seq_num; |
192 | static DEFINE_RAW_SPINLOCK(vmovp_lock); |
193 | |
194 | static DEFINE_IDA(its_vpeid_ida); |
195 | |
196 | #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) |
197 | #define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu)) |
198 | #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) |
199 | #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K) |
200 | |
201 | /* |
202 | * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we |
203 | * always have vSGIs mapped. |
204 | */ |
205 | static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its) |
206 | { |
207 | return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]); |
208 | } |
209 | |
210 | static bool rdists_support_shareable(void) |
211 | { |
212 | return !(gic_rdists->flags & RDIST_FLAGS_FORCE_NON_SHAREABLE); |
213 | } |
214 | |
215 | static u16 get_its_list(struct its_vm *vm) |
216 | { |
217 | struct its_node *its; |
218 | unsigned long its_list = 0; |
219 | |
220 | list_for_each_entry(its, &its_nodes, entry) { |
221 | if (!is_v4(its)) |
222 | continue; |
223 | |
224 | if (require_its_list_vmovp(vm, its)) |
225 | __set_bit(its->list_nr, &its_list); |
226 | } |
227 | |
228 | return (u16)its_list; |
229 | } |
230 | |
231 | static inline u32 its_get_event_id(struct irq_data *d) |
232 | { |
233 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
234 | return d->hwirq - its_dev->event_map.lpi_base; |
235 | } |
236 | |
237 | static struct its_collection *dev_event_to_col(struct its_device *its_dev, |
238 | u32 event) |
239 | { |
240 | struct its_node *its = its_dev->its; |
241 | |
242 | return its->collections + its_dev->event_map.col_map[event]; |
243 | } |
244 | |
245 | static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev, |
246 | u32 event) |
247 | { |
248 | if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis)) |
249 | return NULL; |
250 | |
251 | return &its_dev->event_map.vlpi_maps[event]; |
252 | } |
253 | |
254 | static struct its_vlpi_map *get_vlpi_map(struct irq_data *d) |
255 | { |
256 | if (irqd_is_forwarded_to_vcpu(d)) { |
257 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
258 | u32 event = its_get_event_id(d); |
259 | |
260 | return dev_event_to_vlpi_map(its_dev, event); |
261 | } |
262 | |
263 | return NULL; |
264 | } |
265 | |
266 | static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags) |
267 | { |
268 | raw_spin_lock_irqsave(&vpe->vpe_lock, *flags); |
269 | return vpe->col_idx; |
270 | } |
271 | |
272 | static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags) |
273 | { |
274 | raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); |
275 | } |
276 | |
277 | static struct irq_chip its_vpe_irq_chip; |
278 | |
279 | static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags) |
280 | { |
281 | struct its_vpe *vpe = NULL; |
282 | int cpu; |
283 | |
284 | if (d->chip == &its_vpe_irq_chip) { |
285 | vpe = irq_data_get_irq_chip_data(d); |
286 | } else { |
287 | struct its_vlpi_map *map = get_vlpi_map(d); |
288 | if (map) |
289 | vpe = map->vpe; |
290 | } |
291 | |
292 | if (vpe) { |
293 | cpu = vpe_to_cpuid_lock(vpe, flags); |
294 | } else { |
295 | /* Physical LPIs are already locked via the irq_desc lock */ |
296 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
297 | cpu = its_dev->event_map.col_map[its_get_event_id(d)]; |
298 | /* Keep GCC quiet... */ |
299 | *flags = 0; |
300 | } |
301 | |
302 | return cpu; |
303 | } |
304 | |
305 | static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags) |
306 | { |
307 | struct its_vpe *vpe = NULL; |
308 | |
309 | if (d->chip == &its_vpe_irq_chip) { |
310 | vpe = irq_data_get_irq_chip_data(d); |
311 | } else { |
312 | struct its_vlpi_map *map = get_vlpi_map(d); |
313 | if (map) |
314 | vpe = map->vpe; |
315 | } |
316 | |
317 | if (vpe) |
318 | vpe_to_cpuid_unlock(vpe, flags); |
319 | } |
320 | |
321 | static struct its_collection *valid_col(struct its_collection *col) |
322 | { |
323 | if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0))) |
324 | return NULL; |
325 | |
326 | return col; |
327 | } |
328 | |
329 | static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe) |
330 | { |
331 | if (valid_col(col: its->collections + vpe->col_idx)) |
332 | return vpe; |
333 | |
334 | return NULL; |
335 | } |
336 | |
337 | /* |
338 | * ITS command descriptors - parameters to be encoded in a command |
339 | * block. |
340 | */ |
341 | struct its_cmd_desc { |
342 | union { |
343 | struct { |
344 | struct its_device *dev; |
345 | u32 event_id; |
346 | } its_inv_cmd; |
347 | |
348 | struct { |
349 | struct its_device *dev; |
350 | u32 event_id; |
351 | } its_clear_cmd; |
352 | |
353 | struct { |
354 | struct its_device *dev; |
355 | u32 event_id; |
356 | } its_int_cmd; |
357 | |
358 | struct { |
359 | struct its_device *dev; |
360 | int valid; |
361 | } its_mapd_cmd; |
362 | |
363 | struct { |
364 | struct its_collection *col; |
365 | int valid; |
366 | } its_mapc_cmd; |
367 | |
368 | struct { |
369 | struct its_device *dev; |
370 | u32 phys_id; |
371 | u32 event_id; |
372 | } its_mapti_cmd; |
373 | |
374 | struct { |
375 | struct its_device *dev; |
376 | struct its_collection *col; |
377 | u32 event_id; |
378 | } its_movi_cmd; |
379 | |
380 | struct { |
381 | struct its_device *dev; |
382 | u32 event_id; |
383 | } its_discard_cmd; |
384 | |
385 | struct { |
386 | struct its_collection *col; |
387 | } its_invall_cmd; |
388 | |
389 | struct { |
390 | struct its_vpe *vpe; |
391 | } its_vinvall_cmd; |
392 | |
393 | struct { |
394 | struct its_vpe *vpe; |
395 | struct its_collection *col; |
396 | bool valid; |
397 | } its_vmapp_cmd; |
398 | |
399 | struct { |
400 | struct its_vpe *vpe; |
401 | struct its_device *dev; |
402 | u32 virt_id; |
403 | u32 event_id; |
404 | bool db_enabled; |
405 | } its_vmapti_cmd; |
406 | |
407 | struct { |
408 | struct its_vpe *vpe; |
409 | struct its_device *dev; |
410 | u32 event_id; |
411 | bool db_enabled; |
412 | } its_vmovi_cmd; |
413 | |
414 | struct { |
415 | struct its_vpe *vpe; |
416 | struct its_collection *col; |
417 | u16 seq_num; |
418 | u16 its_list; |
419 | } its_vmovp_cmd; |
420 | |
421 | struct { |
422 | struct its_vpe *vpe; |
423 | } its_invdb_cmd; |
424 | |
425 | struct { |
426 | struct its_vpe *vpe; |
427 | u8 sgi; |
428 | u8 priority; |
429 | bool enable; |
430 | bool group; |
431 | bool clear; |
432 | } its_vsgi_cmd; |
433 | }; |
434 | }; |
435 | |
436 | /* |
437 | * The ITS command block, which is what the ITS actually parses. |
438 | */ |
439 | struct its_cmd_block { |
440 | union { |
441 | u64 raw_cmd[4]; |
442 | __le64 raw_cmd_le[4]; |
443 | }; |
444 | }; |
445 | |
446 | #define ITS_CMD_QUEUE_SZ SZ_64K |
447 | #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block)) |
448 | |
449 | typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *, |
450 | struct its_cmd_block *, |
451 | struct its_cmd_desc *); |
452 | |
453 | typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *, |
454 | struct its_cmd_block *, |
455 | struct its_cmd_desc *); |
456 | |
457 | static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l) |
458 | { |
459 | u64 mask = GENMASK_ULL(h, l); |
460 | *raw_cmd &= ~mask; |
461 | *raw_cmd |= (val << l) & mask; |
462 | } |
463 | |
464 | static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr) |
465 | { |
466 | its_mask_encode(raw_cmd: &cmd->raw_cmd[0], val: cmd_nr, h: 7, l: 0); |
467 | } |
468 | |
469 | static void its_encode_devid(struct its_cmd_block *cmd, u32 devid) |
470 | { |
471 | its_mask_encode(raw_cmd: &cmd->raw_cmd[0], val: devid, h: 63, l: 32); |
472 | } |
473 | |
474 | static void its_encode_event_id(struct its_cmd_block *cmd, u32 id) |
475 | { |
476 | its_mask_encode(raw_cmd: &cmd->raw_cmd[1], val: id, h: 31, l: 0); |
477 | } |
478 | |
479 | static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id) |
480 | { |
481 | its_mask_encode(raw_cmd: &cmd->raw_cmd[1], val: phys_id, h: 63, l: 32); |
482 | } |
483 | |
484 | static void its_encode_size(struct its_cmd_block *cmd, u8 size) |
485 | { |
486 | its_mask_encode(raw_cmd: &cmd->raw_cmd[1], val: size, h: 4, l: 0); |
487 | } |
488 | |
489 | static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr) |
490 | { |
491 | its_mask_encode(raw_cmd: &cmd->raw_cmd[2], val: itt_addr >> 8, h: 51, l: 8); |
492 | } |
493 | |
494 | static void its_encode_valid(struct its_cmd_block *cmd, int valid) |
495 | { |
496 | its_mask_encode(raw_cmd: &cmd->raw_cmd[2], val: !!valid, h: 63, l: 63); |
497 | } |
498 | |
499 | static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr) |
500 | { |
501 | its_mask_encode(raw_cmd: &cmd->raw_cmd[2], val: target_addr >> 16, h: 51, l: 16); |
502 | } |
503 | |
504 | static void its_encode_collection(struct its_cmd_block *cmd, u16 col) |
505 | { |
506 | its_mask_encode(raw_cmd: &cmd->raw_cmd[2], val: col, h: 15, l: 0); |
507 | } |
508 | |
509 | static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid) |
510 | { |
511 | its_mask_encode(raw_cmd: &cmd->raw_cmd[1], val: vpeid, h: 47, l: 32); |
512 | } |
513 | |
514 | static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id) |
515 | { |
516 | its_mask_encode(raw_cmd: &cmd->raw_cmd[2], val: virt_id, h: 31, l: 0); |
517 | } |
518 | |
519 | static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id) |
520 | { |
521 | its_mask_encode(raw_cmd: &cmd->raw_cmd[2], val: db_phys_id, h: 63, l: 32); |
522 | } |
523 | |
524 | static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid) |
525 | { |
526 | its_mask_encode(raw_cmd: &cmd->raw_cmd[2], val: db_valid, h: 0, l: 0); |
527 | } |
528 | |
529 | static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num) |
530 | { |
531 | its_mask_encode(raw_cmd: &cmd->raw_cmd[0], val: seq_num, h: 47, l: 32); |
532 | } |
533 | |
534 | static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list) |
535 | { |
536 | its_mask_encode(raw_cmd: &cmd->raw_cmd[1], val: its_list, h: 15, l: 0); |
537 | } |
538 | |
539 | static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa) |
540 | { |
541 | its_mask_encode(raw_cmd: &cmd->raw_cmd[3], val: vpt_pa >> 16, h: 51, l: 16); |
542 | } |
543 | |
544 | static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size) |
545 | { |
546 | its_mask_encode(raw_cmd: &cmd->raw_cmd[3], val: vpt_size, h: 4, l: 0); |
547 | } |
548 | |
549 | static void its_encode_vconf_addr(struct its_cmd_block *cmd, u64 vconf_pa) |
550 | { |
551 | its_mask_encode(raw_cmd: &cmd->raw_cmd[0], val: vconf_pa >> 16, h: 51, l: 16); |
552 | } |
553 | |
554 | static void its_encode_alloc(struct its_cmd_block *cmd, bool alloc) |
555 | { |
556 | its_mask_encode(raw_cmd: &cmd->raw_cmd[0], val: alloc, h: 8, l: 8); |
557 | } |
558 | |
559 | static void its_encode_ptz(struct its_cmd_block *cmd, bool ptz) |
560 | { |
561 | its_mask_encode(raw_cmd: &cmd->raw_cmd[0], val: ptz, h: 9, l: 9); |
562 | } |
563 | |
564 | static void its_encode_vmapp_default_db(struct its_cmd_block *cmd, |
565 | u32 vpe_db_lpi) |
566 | { |
567 | its_mask_encode(raw_cmd: &cmd->raw_cmd[1], val: vpe_db_lpi, h: 31, l: 0); |
568 | } |
569 | |
570 | static void its_encode_vmovp_default_db(struct its_cmd_block *cmd, |
571 | u32 vpe_db_lpi) |
572 | { |
573 | its_mask_encode(raw_cmd: &cmd->raw_cmd[3], val: vpe_db_lpi, h: 31, l: 0); |
574 | } |
575 | |
576 | static void its_encode_db(struct its_cmd_block *cmd, bool db) |
577 | { |
578 | its_mask_encode(raw_cmd: &cmd->raw_cmd[2], val: db, h: 63, l: 63); |
579 | } |
580 | |
581 | static void its_encode_sgi_intid(struct its_cmd_block *cmd, u8 sgi) |
582 | { |
583 | its_mask_encode(raw_cmd: &cmd->raw_cmd[0], val: sgi, h: 35, l: 32); |
584 | } |
585 | |
586 | static void its_encode_sgi_priority(struct its_cmd_block *cmd, u8 prio) |
587 | { |
588 | its_mask_encode(raw_cmd: &cmd->raw_cmd[0], val: prio >> 4, h: 23, l: 20); |
589 | } |
590 | |
591 | static void its_encode_sgi_group(struct its_cmd_block *cmd, bool grp) |
592 | { |
593 | its_mask_encode(raw_cmd: &cmd->raw_cmd[0], val: grp, h: 10, l: 10); |
594 | } |
595 | |
596 | static void its_encode_sgi_clear(struct its_cmd_block *cmd, bool clr) |
597 | { |
598 | its_mask_encode(raw_cmd: &cmd->raw_cmd[0], val: clr, h: 9, l: 9); |
599 | } |
600 | |
601 | static void its_encode_sgi_enable(struct its_cmd_block *cmd, bool en) |
602 | { |
603 | its_mask_encode(raw_cmd: &cmd->raw_cmd[0], val: en, h: 8, l: 8); |
604 | } |
605 | |
606 | static inline void its_fixup_cmd(struct its_cmd_block *cmd) |
607 | { |
608 | /* Let's fixup BE commands */ |
609 | cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]); |
610 | cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]); |
611 | cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]); |
612 | cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]); |
613 | } |
614 | |
615 | static struct its_collection *its_build_mapd_cmd(struct its_node *its, |
616 | struct its_cmd_block *cmd, |
617 | struct its_cmd_desc *desc) |
618 | { |
619 | unsigned long itt_addr; |
620 | u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites); |
621 | |
622 | itt_addr = virt_to_phys(address: desc->its_mapd_cmd.dev->itt); |
623 | itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN); |
624 | |
625 | its_encode_cmd(cmd, GITS_CMD_MAPD); |
626 | its_encode_devid(cmd, devid: desc->its_mapd_cmd.dev->device_id); |
627 | its_encode_size(cmd, size: size - 1); |
628 | its_encode_itt(cmd, itt_addr); |
629 | its_encode_valid(cmd, valid: desc->its_mapd_cmd.valid); |
630 | |
631 | its_fixup_cmd(cmd); |
632 | |
633 | return NULL; |
634 | } |
635 | |
636 | static struct its_collection *its_build_mapc_cmd(struct its_node *its, |
637 | struct its_cmd_block *cmd, |
638 | struct its_cmd_desc *desc) |
639 | { |
640 | its_encode_cmd(cmd, GITS_CMD_MAPC); |
641 | its_encode_collection(cmd, col: desc->its_mapc_cmd.col->col_id); |
642 | its_encode_target(cmd, target_addr: desc->its_mapc_cmd.col->target_address); |
643 | its_encode_valid(cmd, valid: desc->its_mapc_cmd.valid); |
644 | |
645 | its_fixup_cmd(cmd); |
646 | |
647 | return desc->its_mapc_cmd.col; |
648 | } |
649 | |
650 | static struct its_collection *its_build_mapti_cmd(struct its_node *its, |
651 | struct its_cmd_block *cmd, |
652 | struct its_cmd_desc *desc) |
653 | { |
654 | struct its_collection *col; |
655 | |
656 | col = dev_event_to_col(its_dev: desc->its_mapti_cmd.dev, |
657 | event: desc->its_mapti_cmd.event_id); |
658 | |
659 | its_encode_cmd(cmd, GITS_CMD_MAPTI); |
660 | its_encode_devid(cmd, devid: desc->its_mapti_cmd.dev->device_id); |
661 | its_encode_event_id(cmd, id: desc->its_mapti_cmd.event_id); |
662 | its_encode_phys_id(cmd, phys_id: desc->its_mapti_cmd.phys_id); |
663 | its_encode_collection(cmd, col: col->col_id); |
664 | |
665 | its_fixup_cmd(cmd); |
666 | |
667 | return valid_col(col); |
668 | } |
669 | |
670 | static struct its_collection *its_build_movi_cmd(struct its_node *its, |
671 | struct its_cmd_block *cmd, |
672 | struct its_cmd_desc *desc) |
673 | { |
674 | struct its_collection *col; |
675 | |
676 | col = dev_event_to_col(its_dev: desc->its_movi_cmd.dev, |
677 | event: desc->its_movi_cmd.event_id); |
678 | |
679 | its_encode_cmd(cmd, GITS_CMD_MOVI); |
680 | its_encode_devid(cmd, devid: desc->its_movi_cmd.dev->device_id); |
681 | its_encode_event_id(cmd, id: desc->its_movi_cmd.event_id); |
682 | its_encode_collection(cmd, col: desc->its_movi_cmd.col->col_id); |
683 | |
684 | its_fixup_cmd(cmd); |
685 | |
686 | return valid_col(col); |
687 | } |
688 | |
689 | static struct its_collection *its_build_discard_cmd(struct its_node *its, |
690 | struct its_cmd_block *cmd, |
691 | struct its_cmd_desc *desc) |
692 | { |
693 | struct its_collection *col; |
694 | |
695 | col = dev_event_to_col(its_dev: desc->its_discard_cmd.dev, |
696 | event: desc->its_discard_cmd.event_id); |
697 | |
698 | its_encode_cmd(cmd, GITS_CMD_DISCARD); |
699 | its_encode_devid(cmd, devid: desc->its_discard_cmd.dev->device_id); |
700 | its_encode_event_id(cmd, id: desc->its_discard_cmd.event_id); |
701 | |
702 | its_fixup_cmd(cmd); |
703 | |
704 | return valid_col(col); |
705 | } |
706 | |
707 | static struct its_collection *its_build_inv_cmd(struct its_node *its, |
708 | struct its_cmd_block *cmd, |
709 | struct its_cmd_desc *desc) |
710 | { |
711 | struct its_collection *col; |
712 | |
713 | col = dev_event_to_col(its_dev: desc->its_inv_cmd.dev, |
714 | event: desc->its_inv_cmd.event_id); |
715 | |
716 | its_encode_cmd(cmd, GITS_CMD_INV); |
717 | its_encode_devid(cmd, devid: desc->its_inv_cmd.dev->device_id); |
718 | its_encode_event_id(cmd, id: desc->its_inv_cmd.event_id); |
719 | |
720 | its_fixup_cmd(cmd); |
721 | |
722 | return valid_col(col); |
723 | } |
724 | |
725 | static struct its_collection *its_build_int_cmd(struct its_node *its, |
726 | struct its_cmd_block *cmd, |
727 | struct its_cmd_desc *desc) |
728 | { |
729 | struct its_collection *col; |
730 | |
731 | col = dev_event_to_col(its_dev: desc->its_int_cmd.dev, |
732 | event: desc->its_int_cmd.event_id); |
733 | |
734 | its_encode_cmd(cmd, GITS_CMD_INT); |
735 | its_encode_devid(cmd, devid: desc->its_int_cmd.dev->device_id); |
736 | its_encode_event_id(cmd, id: desc->its_int_cmd.event_id); |
737 | |
738 | its_fixup_cmd(cmd); |
739 | |
740 | return valid_col(col); |
741 | } |
742 | |
743 | static struct its_collection *its_build_clear_cmd(struct its_node *its, |
744 | struct its_cmd_block *cmd, |
745 | struct its_cmd_desc *desc) |
746 | { |
747 | struct its_collection *col; |
748 | |
749 | col = dev_event_to_col(its_dev: desc->its_clear_cmd.dev, |
750 | event: desc->its_clear_cmd.event_id); |
751 | |
752 | its_encode_cmd(cmd, GITS_CMD_CLEAR); |
753 | its_encode_devid(cmd, devid: desc->its_clear_cmd.dev->device_id); |
754 | its_encode_event_id(cmd, id: desc->its_clear_cmd.event_id); |
755 | |
756 | its_fixup_cmd(cmd); |
757 | |
758 | return valid_col(col); |
759 | } |
760 | |
761 | static struct its_collection *its_build_invall_cmd(struct its_node *its, |
762 | struct its_cmd_block *cmd, |
763 | struct its_cmd_desc *desc) |
764 | { |
765 | its_encode_cmd(cmd, GITS_CMD_INVALL); |
766 | its_encode_collection(cmd, col: desc->its_invall_cmd.col->col_id); |
767 | |
768 | its_fixup_cmd(cmd); |
769 | |
770 | return desc->its_invall_cmd.col; |
771 | } |
772 | |
773 | static struct its_vpe *its_build_vinvall_cmd(struct its_node *its, |
774 | struct its_cmd_block *cmd, |
775 | struct its_cmd_desc *desc) |
776 | { |
777 | its_encode_cmd(cmd, GITS_CMD_VINVALL); |
778 | its_encode_vpeid(cmd, vpeid: desc->its_vinvall_cmd.vpe->vpe_id); |
779 | |
780 | its_fixup_cmd(cmd); |
781 | |
782 | return valid_vpe(its, vpe: desc->its_vinvall_cmd.vpe); |
783 | } |
784 | |
785 | static struct its_vpe *its_build_vmapp_cmd(struct its_node *its, |
786 | struct its_cmd_block *cmd, |
787 | struct its_cmd_desc *desc) |
788 | { |
789 | struct its_vpe *vpe = valid_vpe(its, vpe: desc->its_vmapp_cmd.vpe); |
790 | unsigned long vpt_addr, vconf_addr; |
791 | u64 target; |
792 | bool alloc; |
793 | |
794 | its_encode_cmd(cmd, GITS_CMD_VMAPP); |
795 | its_encode_vpeid(cmd, vpeid: desc->its_vmapp_cmd.vpe->vpe_id); |
796 | its_encode_valid(cmd, valid: desc->its_vmapp_cmd.valid); |
797 | |
798 | if (!desc->its_vmapp_cmd.valid) { |
799 | if (is_v4_1(its)) { |
800 | alloc = !atomic_dec_return(v: &desc->its_vmapp_cmd.vpe->vmapp_count); |
801 | its_encode_alloc(cmd, alloc); |
802 | /* |
803 | * Unmapping a VPE is self-synchronizing on GICv4.1, |
804 | * no need to issue a VSYNC. |
805 | */ |
806 | vpe = NULL; |
807 | } |
808 | |
809 | goto out; |
810 | } |
811 | |
812 | vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page)); |
813 | target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset; |
814 | |
815 | its_encode_target(cmd, target_addr: target); |
816 | its_encode_vpt_addr(cmd, vpt_pa: vpt_addr); |
817 | its_encode_vpt_size(cmd, LPI_NRBITS - 1); |
818 | |
819 | if (!is_v4_1(its)) |
820 | goto out; |
821 | |
822 | vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page)); |
823 | |
824 | alloc = !atomic_fetch_inc(v: &desc->its_vmapp_cmd.vpe->vmapp_count); |
825 | |
826 | its_encode_alloc(cmd, alloc); |
827 | |
828 | /* |
829 | * GICv4.1 provides a way to get the VLPI state, which needs the vPE |
830 | * to be unmapped first, and in this case, we may remap the vPE |
831 | * back while the VPT is not empty. So we can't assume that the |
832 | * VPT is empty on map. This is why we never advertise PTZ. |
833 | */ |
834 | its_encode_ptz(cmd, ptz: false); |
835 | its_encode_vconf_addr(cmd, vconf_pa: vconf_addr); |
836 | its_encode_vmapp_default_db(cmd, vpe_db_lpi: desc->its_vmapp_cmd.vpe->vpe_db_lpi); |
837 | |
838 | out: |
839 | its_fixup_cmd(cmd); |
840 | |
841 | return vpe; |
842 | } |
843 | |
844 | static struct its_vpe *its_build_vmapti_cmd(struct its_node *its, |
845 | struct its_cmd_block *cmd, |
846 | struct its_cmd_desc *desc) |
847 | { |
848 | u32 db; |
849 | |
850 | if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled) |
851 | db = desc->its_vmapti_cmd.vpe->vpe_db_lpi; |
852 | else |
853 | db = 1023; |
854 | |
855 | its_encode_cmd(cmd, GITS_CMD_VMAPTI); |
856 | its_encode_devid(cmd, devid: desc->its_vmapti_cmd.dev->device_id); |
857 | its_encode_vpeid(cmd, vpeid: desc->its_vmapti_cmd.vpe->vpe_id); |
858 | its_encode_event_id(cmd, id: desc->its_vmapti_cmd.event_id); |
859 | its_encode_db_phys_id(cmd, db_phys_id: db); |
860 | its_encode_virt_id(cmd, virt_id: desc->its_vmapti_cmd.virt_id); |
861 | |
862 | its_fixup_cmd(cmd); |
863 | |
864 | return valid_vpe(its, vpe: desc->its_vmapti_cmd.vpe); |
865 | } |
866 | |
867 | static struct its_vpe *its_build_vmovi_cmd(struct its_node *its, |
868 | struct its_cmd_block *cmd, |
869 | struct its_cmd_desc *desc) |
870 | { |
871 | u32 db; |
872 | |
873 | if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled) |
874 | db = desc->its_vmovi_cmd.vpe->vpe_db_lpi; |
875 | else |
876 | db = 1023; |
877 | |
878 | its_encode_cmd(cmd, GITS_CMD_VMOVI); |
879 | its_encode_devid(cmd, devid: desc->its_vmovi_cmd.dev->device_id); |
880 | its_encode_vpeid(cmd, vpeid: desc->its_vmovi_cmd.vpe->vpe_id); |
881 | its_encode_event_id(cmd, id: desc->its_vmovi_cmd.event_id); |
882 | its_encode_db_phys_id(cmd, db_phys_id: db); |
883 | its_encode_db_valid(cmd, db_valid: true); |
884 | |
885 | its_fixup_cmd(cmd); |
886 | |
887 | return valid_vpe(its, vpe: desc->its_vmovi_cmd.vpe); |
888 | } |
889 | |
890 | static struct its_vpe *its_build_vmovp_cmd(struct its_node *its, |
891 | struct its_cmd_block *cmd, |
892 | struct its_cmd_desc *desc) |
893 | { |
894 | u64 target; |
895 | |
896 | target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset; |
897 | its_encode_cmd(cmd, GITS_CMD_VMOVP); |
898 | its_encode_seq_num(cmd, seq_num: desc->its_vmovp_cmd.seq_num); |
899 | its_encode_its_list(cmd, its_list: desc->its_vmovp_cmd.its_list); |
900 | its_encode_vpeid(cmd, vpeid: desc->its_vmovp_cmd.vpe->vpe_id); |
901 | its_encode_target(cmd, target_addr: target); |
902 | |
903 | if (is_v4_1(its)) { |
904 | its_encode_db(cmd, db: true); |
905 | its_encode_vmovp_default_db(cmd, vpe_db_lpi: desc->its_vmovp_cmd.vpe->vpe_db_lpi); |
906 | } |
907 | |
908 | its_fixup_cmd(cmd); |
909 | |
910 | return valid_vpe(its, vpe: desc->its_vmovp_cmd.vpe); |
911 | } |
912 | |
913 | static struct its_vpe *its_build_vinv_cmd(struct its_node *its, |
914 | struct its_cmd_block *cmd, |
915 | struct its_cmd_desc *desc) |
916 | { |
917 | struct its_vlpi_map *map; |
918 | |
919 | map = dev_event_to_vlpi_map(its_dev: desc->its_inv_cmd.dev, |
920 | event: desc->its_inv_cmd.event_id); |
921 | |
922 | its_encode_cmd(cmd, GITS_CMD_INV); |
923 | its_encode_devid(cmd, devid: desc->its_inv_cmd.dev->device_id); |
924 | its_encode_event_id(cmd, id: desc->its_inv_cmd.event_id); |
925 | |
926 | its_fixup_cmd(cmd); |
927 | |
928 | return valid_vpe(its, vpe: map->vpe); |
929 | } |
930 | |
931 | static struct its_vpe *its_build_vint_cmd(struct its_node *its, |
932 | struct its_cmd_block *cmd, |
933 | struct its_cmd_desc *desc) |
934 | { |
935 | struct its_vlpi_map *map; |
936 | |
937 | map = dev_event_to_vlpi_map(its_dev: desc->its_int_cmd.dev, |
938 | event: desc->its_int_cmd.event_id); |
939 | |
940 | its_encode_cmd(cmd, GITS_CMD_INT); |
941 | its_encode_devid(cmd, devid: desc->its_int_cmd.dev->device_id); |
942 | its_encode_event_id(cmd, id: desc->its_int_cmd.event_id); |
943 | |
944 | its_fixup_cmd(cmd); |
945 | |
946 | return valid_vpe(its, vpe: map->vpe); |
947 | } |
948 | |
949 | static struct its_vpe *its_build_vclear_cmd(struct its_node *its, |
950 | struct its_cmd_block *cmd, |
951 | struct its_cmd_desc *desc) |
952 | { |
953 | struct its_vlpi_map *map; |
954 | |
955 | map = dev_event_to_vlpi_map(its_dev: desc->its_clear_cmd.dev, |
956 | event: desc->its_clear_cmd.event_id); |
957 | |
958 | its_encode_cmd(cmd, GITS_CMD_CLEAR); |
959 | its_encode_devid(cmd, devid: desc->its_clear_cmd.dev->device_id); |
960 | its_encode_event_id(cmd, id: desc->its_clear_cmd.event_id); |
961 | |
962 | its_fixup_cmd(cmd); |
963 | |
964 | return valid_vpe(its, vpe: map->vpe); |
965 | } |
966 | |
967 | static struct its_vpe *its_build_invdb_cmd(struct its_node *its, |
968 | struct its_cmd_block *cmd, |
969 | struct its_cmd_desc *desc) |
970 | { |
971 | if (WARN_ON(!is_v4_1(its))) |
972 | return NULL; |
973 | |
974 | its_encode_cmd(cmd, GITS_CMD_INVDB); |
975 | its_encode_vpeid(cmd, vpeid: desc->its_invdb_cmd.vpe->vpe_id); |
976 | |
977 | its_fixup_cmd(cmd); |
978 | |
979 | return valid_vpe(its, vpe: desc->its_invdb_cmd.vpe); |
980 | } |
981 | |
982 | static struct its_vpe *its_build_vsgi_cmd(struct its_node *its, |
983 | struct its_cmd_block *cmd, |
984 | struct its_cmd_desc *desc) |
985 | { |
986 | if (WARN_ON(!is_v4_1(its))) |
987 | return NULL; |
988 | |
989 | its_encode_cmd(cmd, GITS_CMD_VSGI); |
990 | its_encode_vpeid(cmd, vpeid: desc->its_vsgi_cmd.vpe->vpe_id); |
991 | its_encode_sgi_intid(cmd, sgi: desc->its_vsgi_cmd.sgi); |
992 | its_encode_sgi_priority(cmd, prio: desc->its_vsgi_cmd.priority); |
993 | its_encode_sgi_group(cmd, grp: desc->its_vsgi_cmd.group); |
994 | its_encode_sgi_clear(cmd, clr: desc->its_vsgi_cmd.clear); |
995 | its_encode_sgi_enable(cmd, en: desc->its_vsgi_cmd.enable); |
996 | |
997 | its_fixup_cmd(cmd); |
998 | |
999 | return valid_vpe(its, vpe: desc->its_vsgi_cmd.vpe); |
1000 | } |
1001 | |
1002 | static u64 its_cmd_ptr_to_offset(struct its_node *its, |
1003 | struct its_cmd_block *ptr) |
1004 | { |
1005 | return (ptr - its->cmd_base) * sizeof(*ptr); |
1006 | } |
1007 | |
1008 | static int its_queue_full(struct its_node *its) |
1009 | { |
1010 | int widx; |
1011 | int ridx; |
1012 | |
1013 | widx = its->cmd_write - its->cmd_base; |
1014 | ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block); |
1015 | |
1016 | /* This is incredibly unlikely to happen, unless the ITS locks up. */ |
1017 | if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx) |
1018 | return 1; |
1019 | |
1020 | return 0; |
1021 | } |
1022 | |
1023 | static struct its_cmd_block *its_allocate_entry(struct its_node *its) |
1024 | { |
1025 | struct its_cmd_block *cmd; |
1026 | u32 count = 1000000; /* 1s! */ |
1027 | |
1028 | while (its_queue_full(its)) { |
1029 | count--; |
1030 | if (!count) { |
1031 | pr_err_ratelimited("ITS queue not draining\n" ); |
1032 | return NULL; |
1033 | } |
1034 | cpu_relax(); |
1035 | udelay(1); |
1036 | } |
1037 | |
1038 | cmd = its->cmd_write++; |
1039 | |
1040 | /* Handle queue wrapping */ |
1041 | if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES)) |
1042 | its->cmd_write = its->cmd_base; |
1043 | |
1044 | /* Clear command */ |
1045 | cmd->raw_cmd[0] = 0; |
1046 | cmd->raw_cmd[1] = 0; |
1047 | cmd->raw_cmd[2] = 0; |
1048 | cmd->raw_cmd[3] = 0; |
1049 | |
1050 | return cmd; |
1051 | } |
1052 | |
1053 | static struct its_cmd_block *its_post_commands(struct its_node *its) |
1054 | { |
1055 | u64 wr = its_cmd_ptr_to_offset(its, ptr: its->cmd_write); |
1056 | |
1057 | writel_relaxed(wr, its->base + GITS_CWRITER); |
1058 | |
1059 | return its->cmd_write; |
1060 | } |
1061 | |
1062 | static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd) |
1063 | { |
1064 | /* |
1065 | * Make sure the commands written to memory are observable by |
1066 | * the ITS. |
1067 | */ |
1068 | if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING) |
1069 | gic_flush_dcache_to_poc(cmd, sizeof(*cmd)); |
1070 | else |
1071 | dsb(ishst); |
1072 | } |
1073 | |
1074 | static int its_wait_for_range_completion(struct its_node *its, |
1075 | u64 prev_idx, |
1076 | struct its_cmd_block *to) |
1077 | { |
1078 | u64 rd_idx, to_idx, linear_idx; |
1079 | u32 count = 1000000; /* 1s! */ |
1080 | |
1081 | /* Linearize to_idx if the command set has wrapped around */ |
1082 | to_idx = its_cmd_ptr_to_offset(its, ptr: to); |
1083 | if (to_idx < prev_idx) |
1084 | to_idx += ITS_CMD_QUEUE_SZ; |
1085 | |
1086 | linear_idx = prev_idx; |
1087 | |
1088 | while (1) { |
1089 | s64 delta; |
1090 | |
1091 | rd_idx = readl_relaxed(its->base + GITS_CREADR); |
1092 | |
1093 | /* |
1094 | * Compute the read pointer progress, taking the |
1095 | * potential wrap-around into account. |
1096 | */ |
1097 | delta = rd_idx - prev_idx; |
1098 | if (rd_idx < prev_idx) |
1099 | delta += ITS_CMD_QUEUE_SZ; |
1100 | |
1101 | linear_idx += delta; |
1102 | if (linear_idx >= to_idx) |
1103 | break; |
1104 | |
1105 | count--; |
1106 | if (!count) { |
1107 | pr_err_ratelimited("ITS queue timeout (%llu %llu)\n" , |
1108 | to_idx, linear_idx); |
1109 | return -1; |
1110 | } |
1111 | prev_idx = rd_idx; |
1112 | cpu_relax(); |
1113 | udelay(1); |
1114 | } |
1115 | |
1116 | return 0; |
1117 | } |
1118 | |
1119 | /* Warning, macro hell follows */ |
1120 | #define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \ |
1121 | void name(struct its_node *its, \ |
1122 | buildtype builder, \ |
1123 | struct its_cmd_desc *desc) \ |
1124 | { \ |
1125 | struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \ |
1126 | synctype *sync_obj; \ |
1127 | unsigned long flags; \ |
1128 | u64 rd_idx; \ |
1129 | \ |
1130 | raw_spin_lock_irqsave(&its->lock, flags); \ |
1131 | \ |
1132 | cmd = its_allocate_entry(its); \ |
1133 | if (!cmd) { /* We're soooooo screewed... */ \ |
1134 | raw_spin_unlock_irqrestore(&its->lock, flags); \ |
1135 | return; \ |
1136 | } \ |
1137 | sync_obj = builder(its, cmd, desc); \ |
1138 | its_flush_cmd(its, cmd); \ |
1139 | \ |
1140 | if (sync_obj) { \ |
1141 | sync_cmd = its_allocate_entry(its); \ |
1142 | if (!sync_cmd) \ |
1143 | goto post; \ |
1144 | \ |
1145 | buildfn(its, sync_cmd, sync_obj); \ |
1146 | its_flush_cmd(its, sync_cmd); \ |
1147 | } \ |
1148 | \ |
1149 | post: \ |
1150 | rd_idx = readl_relaxed(its->base + GITS_CREADR); \ |
1151 | next_cmd = its_post_commands(its); \ |
1152 | raw_spin_unlock_irqrestore(&its->lock, flags); \ |
1153 | \ |
1154 | if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \ |
1155 | pr_err_ratelimited("ITS cmd %ps failed\n", builder); \ |
1156 | } |
1157 | |
1158 | static void its_build_sync_cmd(struct its_node *its, |
1159 | struct its_cmd_block *sync_cmd, |
1160 | struct its_collection *sync_col) |
1161 | { |
1162 | its_encode_cmd(cmd: sync_cmd, GITS_CMD_SYNC); |
1163 | its_encode_target(cmd: sync_cmd, target_addr: sync_col->target_address); |
1164 | |
1165 | its_fixup_cmd(cmd: sync_cmd); |
1166 | } |
1167 | |
1168 | static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t, |
1169 | struct its_collection, its_build_sync_cmd) |
1170 | |
1171 | static void its_build_vsync_cmd(struct its_node *its, |
1172 | struct its_cmd_block *sync_cmd, |
1173 | struct its_vpe *sync_vpe) |
1174 | { |
1175 | its_encode_cmd(cmd: sync_cmd, GITS_CMD_VSYNC); |
1176 | its_encode_vpeid(cmd: sync_cmd, vpeid: sync_vpe->vpe_id); |
1177 | |
1178 | its_fixup_cmd(cmd: sync_cmd); |
1179 | } |
1180 | |
1181 | static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t, |
1182 | struct its_vpe, its_build_vsync_cmd) |
1183 | |
1184 | static void its_send_int(struct its_device *dev, u32 event_id) |
1185 | { |
1186 | struct its_cmd_desc desc; |
1187 | |
1188 | desc.its_int_cmd.dev = dev; |
1189 | desc.its_int_cmd.event_id = event_id; |
1190 | |
1191 | its_send_single_command(its: dev->its, builder: its_build_int_cmd, desc: &desc); |
1192 | } |
1193 | |
1194 | static void its_send_clear(struct its_device *dev, u32 event_id) |
1195 | { |
1196 | struct its_cmd_desc desc; |
1197 | |
1198 | desc.its_clear_cmd.dev = dev; |
1199 | desc.its_clear_cmd.event_id = event_id; |
1200 | |
1201 | its_send_single_command(its: dev->its, builder: its_build_clear_cmd, desc: &desc); |
1202 | } |
1203 | |
1204 | static void its_send_inv(struct its_device *dev, u32 event_id) |
1205 | { |
1206 | struct its_cmd_desc desc; |
1207 | |
1208 | desc.its_inv_cmd.dev = dev; |
1209 | desc.its_inv_cmd.event_id = event_id; |
1210 | |
1211 | its_send_single_command(its: dev->its, builder: its_build_inv_cmd, desc: &desc); |
1212 | } |
1213 | |
1214 | static void its_send_mapd(struct its_device *dev, int valid) |
1215 | { |
1216 | struct its_cmd_desc desc; |
1217 | |
1218 | desc.its_mapd_cmd.dev = dev; |
1219 | desc.its_mapd_cmd.valid = !!valid; |
1220 | |
1221 | its_send_single_command(its: dev->its, builder: its_build_mapd_cmd, desc: &desc); |
1222 | } |
1223 | |
1224 | static void its_send_mapc(struct its_node *its, struct its_collection *col, |
1225 | int valid) |
1226 | { |
1227 | struct its_cmd_desc desc; |
1228 | |
1229 | desc.its_mapc_cmd.col = col; |
1230 | desc.its_mapc_cmd.valid = !!valid; |
1231 | |
1232 | its_send_single_command(its, builder: its_build_mapc_cmd, desc: &desc); |
1233 | } |
1234 | |
1235 | static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id) |
1236 | { |
1237 | struct its_cmd_desc desc; |
1238 | |
1239 | desc.its_mapti_cmd.dev = dev; |
1240 | desc.its_mapti_cmd.phys_id = irq_id; |
1241 | desc.its_mapti_cmd.event_id = id; |
1242 | |
1243 | its_send_single_command(its: dev->its, builder: its_build_mapti_cmd, desc: &desc); |
1244 | } |
1245 | |
1246 | static void its_send_movi(struct its_device *dev, |
1247 | struct its_collection *col, u32 id) |
1248 | { |
1249 | struct its_cmd_desc desc; |
1250 | |
1251 | desc.its_movi_cmd.dev = dev; |
1252 | desc.its_movi_cmd.col = col; |
1253 | desc.its_movi_cmd.event_id = id; |
1254 | |
1255 | its_send_single_command(its: dev->its, builder: its_build_movi_cmd, desc: &desc); |
1256 | } |
1257 | |
1258 | static void its_send_discard(struct its_device *dev, u32 id) |
1259 | { |
1260 | struct its_cmd_desc desc; |
1261 | |
1262 | desc.its_discard_cmd.dev = dev; |
1263 | desc.its_discard_cmd.event_id = id; |
1264 | |
1265 | its_send_single_command(its: dev->its, builder: its_build_discard_cmd, desc: &desc); |
1266 | } |
1267 | |
1268 | static void its_send_invall(struct its_node *its, struct its_collection *col) |
1269 | { |
1270 | struct its_cmd_desc desc; |
1271 | |
1272 | desc.its_invall_cmd.col = col; |
1273 | |
1274 | its_send_single_command(its, builder: its_build_invall_cmd, desc: &desc); |
1275 | } |
1276 | |
1277 | static void its_send_vmapti(struct its_device *dev, u32 id) |
1278 | { |
1279 | struct its_vlpi_map *map = dev_event_to_vlpi_map(its_dev: dev, event: id); |
1280 | struct its_cmd_desc desc; |
1281 | |
1282 | desc.its_vmapti_cmd.vpe = map->vpe; |
1283 | desc.its_vmapti_cmd.dev = dev; |
1284 | desc.its_vmapti_cmd.virt_id = map->vintid; |
1285 | desc.its_vmapti_cmd.event_id = id; |
1286 | desc.its_vmapti_cmd.db_enabled = map->db_enabled; |
1287 | |
1288 | its_send_single_vcommand(its: dev->its, builder: its_build_vmapti_cmd, desc: &desc); |
1289 | } |
1290 | |
1291 | static void its_send_vmovi(struct its_device *dev, u32 id) |
1292 | { |
1293 | struct its_vlpi_map *map = dev_event_to_vlpi_map(its_dev: dev, event: id); |
1294 | struct its_cmd_desc desc; |
1295 | |
1296 | desc.its_vmovi_cmd.vpe = map->vpe; |
1297 | desc.its_vmovi_cmd.dev = dev; |
1298 | desc.its_vmovi_cmd.event_id = id; |
1299 | desc.its_vmovi_cmd.db_enabled = map->db_enabled; |
1300 | |
1301 | its_send_single_vcommand(its: dev->its, builder: its_build_vmovi_cmd, desc: &desc); |
1302 | } |
1303 | |
1304 | static void its_send_vmapp(struct its_node *its, |
1305 | struct its_vpe *vpe, bool valid) |
1306 | { |
1307 | struct its_cmd_desc desc; |
1308 | |
1309 | desc.its_vmapp_cmd.vpe = vpe; |
1310 | desc.its_vmapp_cmd.valid = valid; |
1311 | desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx]; |
1312 | |
1313 | its_send_single_vcommand(its, builder: its_build_vmapp_cmd, desc: &desc); |
1314 | } |
1315 | |
1316 | static void its_send_vmovp(struct its_vpe *vpe) |
1317 | { |
1318 | struct its_cmd_desc desc = {}; |
1319 | struct its_node *its; |
1320 | unsigned long flags; |
1321 | int col_id = vpe->col_idx; |
1322 | |
1323 | desc.its_vmovp_cmd.vpe = vpe; |
1324 | |
1325 | if (!its_list_map) { |
1326 | its = list_first_entry(&its_nodes, struct its_node, entry); |
1327 | desc.its_vmovp_cmd.col = &its->collections[col_id]; |
1328 | its_send_single_vcommand(its, builder: its_build_vmovp_cmd, desc: &desc); |
1329 | return; |
1330 | } |
1331 | |
1332 | /* |
1333 | * Yet another marvel of the architecture. If using the |
1334 | * its_list "feature", we need to make sure that all ITSs |
1335 | * receive all VMOVP commands in the same order. The only way |
1336 | * to guarantee this is to make vmovp a serialization point. |
1337 | * |
1338 | * Wall <-- Head. |
1339 | */ |
1340 | raw_spin_lock_irqsave(&vmovp_lock, flags); |
1341 | |
1342 | desc.its_vmovp_cmd.seq_num = vmovp_seq_num++; |
1343 | desc.its_vmovp_cmd.its_list = get_its_list(vm: vpe->its_vm); |
1344 | |
1345 | /* Emit VMOVPs */ |
1346 | list_for_each_entry(its, &its_nodes, entry) { |
1347 | if (!is_v4(its)) |
1348 | continue; |
1349 | |
1350 | if (!require_its_list_vmovp(vm: vpe->its_vm, its)) |
1351 | continue; |
1352 | |
1353 | desc.its_vmovp_cmd.col = &its->collections[col_id]; |
1354 | its_send_single_vcommand(its, builder: its_build_vmovp_cmd, desc: &desc); |
1355 | } |
1356 | |
1357 | raw_spin_unlock_irqrestore(&vmovp_lock, flags); |
1358 | } |
1359 | |
1360 | static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe) |
1361 | { |
1362 | struct its_cmd_desc desc; |
1363 | |
1364 | desc.its_vinvall_cmd.vpe = vpe; |
1365 | its_send_single_vcommand(its, builder: its_build_vinvall_cmd, desc: &desc); |
1366 | } |
1367 | |
1368 | static void its_send_vinv(struct its_device *dev, u32 event_id) |
1369 | { |
1370 | struct its_cmd_desc desc; |
1371 | |
1372 | /* |
1373 | * There is no real VINV command. This is just a normal INV, |
1374 | * with a VSYNC instead of a SYNC. |
1375 | */ |
1376 | desc.its_inv_cmd.dev = dev; |
1377 | desc.its_inv_cmd.event_id = event_id; |
1378 | |
1379 | its_send_single_vcommand(its: dev->its, builder: its_build_vinv_cmd, desc: &desc); |
1380 | } |
1381 | |
1382 | static void its_send_vint(struct its_device *dev, u32 event_id) |
1383 | { |
1384 | struct its_cmd_desc desc; |
1385 | |
1386 | /* |
1387 | * There is no real VINT command. This is just a normal INT, |
1388 | * with a VSYNC instead of a SYNC. |
1389 | */ |
1390 | desc.its_int_cmd.dev = dev; |
1391 | desc.its_int_cmd.event_id = event_id; |
1392 | |
1393 | its_send_single_vcommand(its: dev->its, builder: its_build_vint_cmd, desc: &desc); |
1394 | } |
1395 | |
1396 | static void its_send_vclear(struct its_device *dev, u32 event_id) |
1397 | { |
1398 | struct its_cmd_desc desc; |
1399 | |
1400 | /* |
1401 | * There is no real VCLEAR command. This is just a normal CLEAR, |
1402 | * with a VSYNC instead of a SYNC. |
1403 | */ |
1404 | desc.its_clear_cmd.dev = dev; |
1405 | desc.its_clear_cmd.event_id = event_id; |
1406 | |
1407 | its_send_single_vcommand(its: dev->its, builder: its_build_vclear_cmd, desc: &desc); |
1408 | } |
1409 | |
1410 | static void its_send_invdb(struct its_node *its, struct its_vpe *vpe) |
1411 | { |
1412 | struct its_cmd_desc desc; |
1413 | |
1414 | desc.its_invdb_cmd.vpe = vpe; |
1415 | its_send_single_vcommand(its, builder: its_build_invdb_cmd, desc: &desc); |
1416 | } |
1417 | |
1418 | /* |
1419 | * irqchip functions - assumes MSI, mostly. |
1420 | */ |
1421 | static void lpi_write_config(struct irq_data *d, u8 clr, u8 set) |
1422 | { |
1423 | struct its_vlpi_map *map = get_vlpi_map(d); |
1424 | irq_hw_number_t hwirq; |
1425 | void *va; |
1426 | u8 *cfg; |
1427 | |
1428 | if (map) { |
1429 | va = page_address(map->vm->vprop_page); |
1430 | hwirq = map->vintid; |
1431 | |
1432 | /* Remember the updated property */ |
1433 | map->properties &= ~clr; |
1434 | map->properties |= set | LPI_PROP_GROUP1; |
1435 | } else { |
1436 | va = gic_rdists->prop_table_va; |
1437 | hwirq = d->hwirq; |
1438 | } |
1439 | |
1440 | cfg = va + hwirq - 8192; |
1441 | *cfg &= ~clr; |
1442 | *cfg |= set | LPI_PROP_GROUP1; |
1443 | |
1444 | /* |
1445 | * Make the above write visible to the redistributors. |
1446 | * And yes, we're flushing exactly: One. Single. Byte. |
1447 | * Humpf... |
1448 | */ |
1449 | if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING) |
1450 | gic_flush_dcache_to_poc(cfg, sizeof(*cfg)); |
1451 | else |
1452 | dsb(ishst); |
1453 | } |
1454 | |
1455 | static void wait_for_syncr(void __iomem *rdbase) |
1456 | { |
1457 | while (readl_relaxed(rdbase + GICR_SYNCR) & 1) |
1458 | cpu_relax(); |
1459 | } |
1460 | |
1461 | static void __direct_lpi_inv(struct irq_data *d, u64 val) |
1462 | { |
1463 | void __iomem *rdbase; |
1464 | unsigned long flags; |
1465 | int cpu; |
1466 | |
1467 | /* Target the redistributor this LPI is currently routed to */ |
1468 | cpu = irq_to_cpuid_lock(d, flags: &flags); |
1469 | raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); |
1470 | |
1471 | rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; |
1472 | gic_write_lpir(val, rdbase + GICR_INVLPIR); |
1473 | wait_for_syncr(rdbase); |
1474 | |
1475 | raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); |
1476 | irq_to_cpuid_unlock(d, flags); |
1477 | } |
1478 | |
1479 | static void direct_lpi_inv(struct irq_data *d) |
1480 | { |
1481 | struct its_vlpi_map *map = get_vlpi_map(d); |
1482 | u64 val; |
1483 | |
1484 | if (map) { |
1485 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
1486 | |
1487 | WARN_ON(!is_v4_1(its_dev->its)); |
1488 | |
1489 | val = GICR_INVLPIR_V; |
1490 | val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id); |
1491 | val |= FIELD_PREP(GICR_INVLPIR_INTID, map->vintid); |
1492 | } else { |
1493 | val = d->hwirq; |
1494 | } |
1495 | |
1496 | __direct_lpi_inv(d, val); |
1497 | } |
1498 | |
1499 | static void lpi_update_config(struct irq_data *d, u8 clr, u8 set) |
1500 | { |
1501 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
1502 | |
1503 | lpi_write_config(d, clr, set); |
1504 | if (gic_rdists->has_direct_lpi && |
1505 | (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d))) |
1506 | direct_lpi_inv(d); |
1507 | else if (!irqd_is_forwarded_to_vcpu(d)) |
1508 | its_send_inv(dev: its_dev, event_id: its_get_event_id(d)); |
1509 | else |
1510 | its_send_vinv(dev: its_dev, event_id: its_get_event_id(d)); |
1511 | } |
1512 | |
1513 | static void its_vlpi_set_doorbell(struct irq_data *d, bool enable) |
1514 | { |
1515 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
1516 | u32 event = its_get_event_id(d); |
1517 | struct its_vlpi_map *map; |
1518 | |
1519 | /* |
1520 | * GICv4.1 does away with the per-LPI nonsense, nothing to do |
1521 | * here. |
1522 | */ |
1523 | if (is_v4_1(its_dev->its)) |
1524 | return; |
1525 | |
1526 | map = dev_event_to_vlpi_map(its_dev, event); |
1527 | |
1528 | if (map->db_enabled == enable) |
1529 | return; |
1530 | |
1531 | map->db_enabled = enable; |
1532 | |
1533 | /* |
1534 | * More fun with the architecture: |
1535 | * |
1536 | * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI |
1537 | * value or to 1023, depending on the enable bit. But that |
1538 | * would be issuing a mapping for an /existing/ DevID+EventID |
1539 | * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI |
1540 | * to the /same/ vPE, using this opportunity to adjust the |
1541 | * doorbell. Mouahahahaha. We loves it, Precious. |
1542 | */ |
1543 | its_send_vmovi(dev: its_dev, id: event); |
1544 | } |
1545 | |
1546 | static void its_mask_irq(struct irq_data *d) |
1547 | { |
1548 | if (irqd_is_forwarded_to_vcpu(d)) |
1549 | its_vlpi_set_doorbell(d, enable: false); |
1550 | |
1551 | lpi_update_config(d, LPI_PROP_ENABLED, set: 0); |
1552 | } |
1553 | |
1554 | static void its_unmask_irq(struct irq_data *d) |
1555 | { |
1556 | if (irqd_is_forwarded_to_vcpu(d)) |
1557 | its_vlpi_set_doorbell(d, enable: true); |
1558 | |
1559 | lpi_update_config(d, clr: 0, LPI_PROP_ENABLED); |
1560 | } |
1561 | |
1562 | static __maybe_unused u32 its_read_lpi_count(struct irq_data *d, int cpu) |
1563 | { |
1564 | if (irqd_affinity_is_managed(d)) |
1565 | return atomic_read(v: &per_cpu_ptr(&cpu_lpi_count, cpu)->managed); |
1566 | |
1567 | return atomic_read(v: &per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); |
1568 | } |
1569 | |
1570 | static void its_inc_lpi_count(struct irq_data *d, int cpu) |
1571 | { |
1572 | if (irqd_affinity_is_managed(d)) |
1573 | atomic_inc(v: &per_cpu_ptr(&cpu_lpi_count, cpu)->managed); |
1574 | else |
1575 | atomic_inc(v: &per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); |
1576 | } |
1577 | |
1578 | static void its_dec_lpi_count(struct irq_data *d, int cpu) |
1579 | { |
1580 | if (irqd_affinity_is_managed(d)) |
1581 | atomic_dec(v: &per_cpu_ptr(&cpu_lpi_count, cpu)->managed); |
1582 | else |
1583 | atomic_dec(v: &per_cpu_ptr(&cpu_lpi_count, cpu)->unmanaged); |
1584 | } |
1585 | |
1586 | static unsigned int cpumask_pick_least_loaded(struct irq_data *d, |
1587 | const struct cpumask *cpu_mask) |
1588 | { |
1589 | unsigned int cpu = nr_cpu_ids, tmp; |
1590 | int count = S32_MAX; |
1591 | |
1592 | for_each_cpu(tmp, cpu_mask) { |
1593 | int this_count = its_read_lpi_count(d, cpu: tmp); |
1594 | if (this_count < count) { |
1595 | cpu = tmp; |
1596 | count = this_count; |
1597 | } |
1598 | } |
1599 | |
1600 | return cpu; |
1601 | } |
1602 | |
1603 | /* |
1604 | * As suggested by Thomas Gleixner in: |
1605 | * https://lore.kernel.org/r/87h80q2aoc.fsf@nanos.tec.linutronix.de |
1606 | */ |
1607 | static int its_select_cpu(struct irq_data *d, |
1608 | const struct cpumask *aff_mask) |
1609 | { |
1610 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
1611 | static DEFINE_RAW_SPINLOCK(tmpmask_lock); |
1612 | static struct cpumask __tmpmask; |
1613 | struct cpumask *tmpmask; |
1614 | unsigned long flags; |
1615 | int cpu, node; |
1616 | node = its_dev->its->numa_node; |
1617 | tmpmask = &__tmpmask; |
1618 | |
1619 | raw_spin_lock_irqsave(&tmpmask_lock, flags); |
1620 | |
1621 | if (!irqd_affinity_is_managed(d)) { |
1622 | /* First try the NUMA node */ |
1623 | if (node != NUMA_NO_NODE) { |
1624 | /* |
1625 | * Try the intersection of the affinity mask and the |
1626 | * node mask (and the online mask, just to be safe). |
1627 | */ |
1628 | cpumask_and(dstp: tmpmask, src1p: cpumask_of_node(node), src2p: aff_mask); |
1629 | cpumask_and(dstp: tmpmask, src1p: tmpmask, cpu_online_mask); |
1630 | |
1631 | /* |
1632 | * Ideally, we would check if the mask is empty, and |
1633 | * try again on the full node here. |
1634 | * |
1635 | * But it turns out that the way ACPI describes the |
1636 | * affinity for ITSs only deals about memory, and |
1637 | * not target CPUs, so it cannot describe a single |
1638 | * ITS placed next to two NUMA nodes. |
1639 | * |
1640 | * Instead, just fallback on the online mask. This |
1641 | * diverges from Thomas' suggestion above. |
1642 | */ |
1643 | cpu = cpumask_pick_least_loaded(d, cpu_mask: tmpmask); |
1644 | if (cpu < nr_cpu_ids) |
1645 | goto out; |
1646 | |
1647 | /* If we can't cross sockets, give up */ |
1648 | if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)) |
1649 | goto out; |
1650 | |
1651 | /* If the above failed, expand the search */ |
1652 | } |
1653 | |
1654 | /* Try the intersection of the affinity and online masks */ |
1655 | cpumask_and(dstp: tmpmask, src1p: aff_mask, cpu_online_mask); |
1656 | |
1657 | /* If that doesn't fly, the online mask is the last resort */ |
1658 | if (cpumask_empty(srcp: tmpmask)) |
1659 | cpumask_copy(dstp: tmpmask, cpu_online_mask); |
1660 | |
1661 | cpu = cpumask_pick_least_loaded(d, cpu_mask: tmpmask); |
1662 | } else { |
1663 | cpumask_copy(dstp: tmpmask, srcp: aff_mask); |
1664 | |
1665 | /* If we cannot cross sockets, limit the search to that node */ |
1666 | if ((its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) && |
1667 | node != NUMA_NO_NODE) |
1668 | cpumask_and(dstp: tmpmask, src1p: tmpmask, src2p: cpumask_of_node(node)); |
1669 | |
1670 | cpu = cpumask_pick_least_loaded(d, cpu_mask: tmpmask); |
1671 | } |
1672 | out: |
1673 | raw_spin_unlock_irqrestore(&tmpmask_lock, flags); |
1674 | |
1675 | pr_debug("IRQ%d -> %*pbl CPU%d\n" , d->irq, cpumask_pr_args(aff_mask), cpu); |
1676 | return cpu; |
1677 | } |
1678 | |
1679 | static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, |
1680 | bool force) |
1681 | { |
1682 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
1683 | struct its_collection *target_col; |
1684 | u32 id = its_get_event_id(d); |
1685 | int cpu, prev_cpu; |
1686 | |
1687 | /* A forwarded interrupt should use irq_set_vcpu_affinity */ |
1688 | if (irqd_is_forwarded_to_vcpu(d)) |
1689 | return -EINVAL; |
1690 | |
1691 | prev_cpu = its_dev->event_map.col_map[id]; |
1692 | its_dec_lpi_count(d, cpu: prev_cpu); |
1693 | |
1694 | if (!force) |
1695 | cpu = its_select_cpu(d, aff_mask: mask_val); |
1696 | else |
1697 | cpu = cpumask_pick_least_loaded(d, cpu_mask: mask_val); |
1698 | |
1699 | if (cpu < 0 || cpu >= nr_cpu_ids) |
1700 | goto err; |
1701 | |
1702 | /* don't set the affinity when the target cpu is same as current one */ |
1703 | if (cpu != prev_cpu) { |
1704 | target_col = &its_dev->its->collections[cpu]; |
1705 | its_send_movi(dev: its_dev, col: target_col, id); |
1706 | its_dev->event_map.col_map[id] = cpu; |
1707 | irq_data_update_effective_affinity(d, cpumask_of(cpu)); |
1708 | } |
1709 | |
1710 | its_inc_lpi_count(d, cpu); |
1711 | |
1712 | return IRQ_SET_MASK_OK_DONE; |
1713 | |
1714 | err: |
1715 | its_inc_lpi_count(d, cpu: prev_cpu); |
1716 | return -EINVAL; |
1717 | } |
1718 | |
1719 | static u64 its_irq_get_msi_base(struct its_device *its_dev) |
1720 | { |
1721 | struct its_node *its = its_dev->its; |
1722 | |
1723 | return its->phys_base + GITS_TRANSLATER; |
1724 | } |
1725 | |
1726 | static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) |
1727 | { |
1728 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
1729 | struct its_node *its; |
1730 | u64 addr; |
1731 | |
1732 | its = its_dev->its; |
1733 | addr = its->get_msi_base(its_dev); |
1734 | |
1735 | msg->address_lo = lower_32_bits(addr); |
1736 | msg->address_hi = upper_32_bits(addr); |
1737 | msg->data = its_get_event_id(d); |
1738 | |
1739 | iommu_dma_compose_msi_msg(desc: irq_data_get_msi_desc(d), msg); |
1740 | } |
1741 | |
1742 | static int its_irq_set_irqchip_state(struct irq_data *d, |
1743 | enum irqchip_irq_state which, |
1744 | bool state) |
1745 | { |
1746 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
1747 | u32 event = its_get_event_id(d); |
1748 | |
1749 | if (which != IRQCHIP_STATE_PENDING) |
1750 | return -EINVAL; |
1751 | |
1752 | if (irqd_is_forwarded_to_vcpu(d)) { |
1753 | if (state) |
1754 | its_send_vint(dev: its_dev, event_id: event); |
1755 | else |
1756 | its_send_vclear(dev: its_dev, event_id: event); |
1757 | } else { |
1758 | if (state) |
1759 | its_send_int(dev: its_dev, event_id: event); |
1760 | else |
1761 | its_send_clear(dev: its_dev, event_id: event); |
1762 | } |
1763 | |
1764 | return 0; |
1765 | } |
1766 | |
1767 | static int its_irq_retrigger(struct irq_data *d) |
1768 | { |
1769 | return !its_irq_set_irqchip_state(d, which: IRQCHIP_STATE_PENDING, state: true); |
1770 | } |
1771 | |
1772 | /* |
1773 | * Two favourable cases: |
1774 | * |
1775 | * (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times |
1776 | * for vSGI delivery |
1777 | * |
1778 | * (b) Or the ITSs do not use a list map, meaning that VMOVP is cheap enough |
1779 | * and we're better off mapping all VPEs always |
1780 | * |
1781 | * If neither (a) nor (b) is true, then we map vPEs on demand. |
1782 | * |
1783 | */ |
1784 | static bool gic_requires_eager_mapping(void) |
1785 | { |
1786 | if (!its_list_map || gic_rdists->has_rvpeid) |
1787 | return true; |
1788 | |
1789 | return false; |
1790 | } |
1791 | |
1792 | static void its_map_vm(struct its_node *its, struct its_vm *vm) |
1793 | { |
1794 | unsigned long flags; |
1795 | |
1796 | if (gic_requires_eager_mapping()) |
1797 | return; |
1798 | |
1799 | raw_spin_lock_irqsave(&vmovp_lock, flags); |
1800 | |
1801 | /* |
1802 | * If the VM wasn't mapped yet, iterate over the vpes and get |
1803 | * them mapped now. |
1804 | */ |
1805 | vm->vlpi_count[its->list_nr]++; |
1806 | |
1807 | if (vm->vlpi_count[its->list_nr] == 1) { |
1808 | int i; |
1809 | |
1810 | for (i = 0; i < vm->nr_vpes; i++) { |
1811 | struct its_vpe *vpe = vm->vpes[i]; |
1812 | struct irq_data *d = irq_get_irq_data(irq: vpe->irq); |
1813 | |
1814 | /* Map the VPE to the first possible CPU */ |
1815 | vpe->col_idx = cpumask_first(cpu_online_mask); |
1816 | its_send_vmapp(its, vpe, valid: true); |
1817 | its_send_vinvall(its, vpe); |
1818 | irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); |
1819 | } |
1820 | } |
1821 | |
1822 | raw_spin_unlock_irqrestore(&vmovp_lock, flags); |
1823 | } |
1824 | |
1825 | static void its_unmap_vm(struct its_node *its, struct its_vm *vm) |
1826 | { |
1827 | unsigned long flags; |
1828 | |
1829 | /* Not using the ITS list? Everything is always mapped. */ |
1830 | if (gic_requires_eager_mapping()) |
1831 | return; |
1832 | |
1833 | raw_spin_lock_irqsave(&vmovp_lock, flags); |
1834 | |
1835 | if (!--vm->vlpi_count[its->list_nr]) { |
1836 | int i; |
1837 | |
1838 | for (i = 0; i < vm->nr_vpes; i++) |
1839 | its_send_vmapp(its, vpe: vm->vpes[i], valid: false); |
1840 | } |
1841 | |
1842 | raw_spin_unlock_irqrestore(&vmovp_lock, flags); |
1843 | } |
1844 | |
1845 | static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info) |
1846 | { |
1847 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
1848 | u32 event = its_get_event_id(d); |
1849 | int ret = 0; |
1850 | |
1851 | if (!info->map) |
1852 | return -EINVAL; |
1853 | |
1854 | raw_spin_lock(&its_dev->event_map.vlpi_lock); |
1855 | |
1856 | if (!its_dev->event_map.vm) { |
1857 | struct its_vlpi_map *maps; |
1858 | |
1859 | maps = kcalloc(n: its_dev->event_map.nr_lpis, size: sizeof(*maps), |
1860 | GFP_ATOMIC); |
1861 | if (!maps) { |
1862 | ret = -ENOMEM; |
1863 | goto out; |
1864 | } |
1865 | |
1866 | its_dev->event_map.vm = info->map->vm; |
1867 | its_dev->event_map.vlpi_maps = maps; |
1868 | } else if (its_dev->event_map.vm != info->map->vm) { |
1869 | ret = -EINVAL; |
1870 | goto out; |
1871 | } |
1872 | |
1873 | /* Get our private copy of the mapping information */ |
1874 | its_dev->event_map.vlpi_maps[event] = *info->map; |
1875 | |
1876 | if (irqd_is_forwarded_to_vcpu(d)) { |
1877 | /* Already mapped, move it around */ |
1878 | its_send_vmovi(dev: its_dev, id: event); |
1879 | } else { |
1880 | /* Ensure all the VPEs are mapped on this ITS */ |
1881 | its_map_vm(its: its_dev->its, vm: info->map->vm); |
1882 | |
1883 | /* |
1884 | * Flag the interrupt as forwarded so that we can |
1885 | * start poking the virtual property table. |
1886 | */ |
1887 | irqd_set_forwarded_to_vcpu(d); |
1888 | |
1889 | /* Write out the property to the prop table */ |
1890 | lpi_write_config(d, clr: 0xff, set: info->map->properties); |
1891 | |
1892 | /* Drop the physical mapping */ |
1893 | its_send_discard(dev: its_dev, id: event); |
1894 | |
1895 | /* and install the virtual one */ |
1896 | its_send_vmapti(dev: its_dev, id: event); |
1897 | |
1898 | /* Increment the number of VLPIs */ |
1899 | its_dev->event_map.nr_vlpis++; |
1900 | } |
1901 | |
1902 | out: |
1903 | raw_spin_unlock(&its_dev->event_map.vlpi_lock); |
1904 | return ret; |
1905 | } |
1906 | |
1907 | static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info) |
1908 | { |
1909 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
1910 | struct its_vlpi_map *map; |
1911 | int ret = 0; |
1912 | |
1913 | raw_spin_lock(&its_dev->event_map.vlpi_lock); |
1914 | |
1915 | map = get_vlpi_map(d); |
1916 | |
1917 | if (!its_dev->event_map.vm || !map) { |
1918 | ret = -EINVAL; |
1919 | goto out; |
1920 | } |
1921 | |
1922 | /* Copy our mapping information to the incoming request */ |
1923 | *info->map = *map; |
1924 | |
1925 | out: |
1926 | raw_spin_unlock(&its_dev->event_map.vlpi_lock); |
1927 | return ret; |
1928 | } |
1929 | |
1930 | static int its_vlpi_unmap(struct irq_data *d) |
1931 | { |
1932 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
1933 | u32 event = its_get_event_id(d); |
1934 | int ret = 0; |
1935 | |
1936 | raw_spin_lock(&its_dev->event_map.vlpi_lock); |
1937 | |
1938 | if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) { |
1939 | ret = -EINVAL; |
1940 | goto out; |
1941 | } |
1942 | |
1943 | /* Drop the virtual mapping */ |
1944 | its_send_discard(dev: its_dev, id: event); |
1945 | |
1946 | /* and restore the physical one */ |
1947 | irqd_clr_forwarded_to_vcpu(d); |
1948 | its_send_mapti(dev: its_dev, irq_id: d->hwirq, id: event); |
1949 | lpi_update_config(d, clr: 0xff, set: (LPI_PROP_DEFAULT_PRIO | |
1950 | LPI_PROP_ENABLED | |
1951 | LPI_PROP_GROUP1)); |
1952 | |
1953 | /* Potentially unmap the VM from this ITS */ |
1954 | its_unmap_vm(its: its_dev->its, vm: its_dev->event_map.vm); |
1955 | |
1956 | /* |
1957 | * Drop the refcount and make the device available again if |
1958 | * this was the last VLPI. |
1959 | */ |
1960 | if (!--its_dev->event_map.nr_vlpis) { |
1961 | its_dev->event_map.vm = NULL; |
1962 | kfree(objp: its_dev->event_map.vlpi_maps); |
1963 | } |
1964 | |
1965 | out: |
1966 | raw_spin_unlock(&its_dev->event_map.vlpi_lock); |
1967 | return ret; |
1968 | } |
1969 | |
1970 | static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info) |
1971 | { |
1972 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
1973 | |
1974 | if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) |
1975 | return -EINVAL; |
1976 | |
1977 | if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI) |
1978 | lpi_update_config(d, clr: 0xff, set: info->config); |
1979 | else |
1980 | lpi_write_config(d, clr: 0xff, set: info->config); |
1981 | its_vlpi_set_doorbell(d, enable: !!(info->config & LPI_PROP_ENABLED)); |
1982 | |
1983 | return 0; |
1984 | } |
1985 | |
1986 | static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) |
1987 | { |
1988 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
1989 | struct its_cmd_info *info = vcpu_info; |
1990 | |
1991 | /* Need a v4 ITS */ |
1992 | if (!is_v4(its_dev->its)) |
1993 | return -EINVAL; |
1994 | |
1995 | /* Unmap request? */ |
1996 | if (!info) |
1997 | return its_vlpi_unmap(d); |
1998 | |
1999 | switch (info->cmd_type) { |
2000 | case MAP_VLPI: |
2001 | return its_vlpi_map(d, info); |
2002 | |
2003 | case GET_VLPI: |
2004 | return its_vlpi_get(d, info); |
2005 | |
2006 | case PROP_UPDATE_VLPI: |
2007 | case PROP_UPDATE_AND_INV_VLPI: |
2008 | return its_vlpi_prop_update(d, info); |
2009 | |
2010 | default: |
2011 | return -EINVAL; |
2012 | } |
2013 | } |
2014 | |
2015 | static struct irq_chip its_irq_chip = { |
2016 | .name = "ITS" , |
2017 | .irq_mask = its_mask_irq, |
2018 | .irq_unmask = its_unmask_irq, |
2019 | .irq_eoi = irq_chip_eoi_parent, |
2020 | .irq_set_affinity = its_set_affinity, |
2021 | .irq_compose_msi_msg = its_irq_compose_msi_msg, |
2022 | .irq_set_irqchip_state = its_irq_set_irqchip_state, |
2023 | .irq_retrigger = its_irq_retrigger, |
2024 | .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity, |
2025 | }; |
2026 | |
2027 | |
2028 | /* |
2029 | * How we allocate LPIs: |
2030 | * |
2031 | * lpi_range_list contains ranges of LPIs that are to available to |
2032 | * allocate from. To allocate LPIs, just pick the first range that |
2033 | * fits the required allocation, and reduce it by the required |
2034 | * amount. Once empty, remove the range from the list. |
2035 | * |
2036 | * To free a range of LPIs, add a free range to the list, sort it and |
2037 | * merge the result if the new range happens to be adjacent to an |
2038 | * already free block. |
2039 | * |
2040 | * The consequence of the above is that allocation is cost is low, but |
2041 | * freeing is expensive. We assumes that freeing rarely occurs. |
2042 | */ |
2043 | #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */ |
2044 | |
2045 | static DEFINE_MUTEX(lpi_range_lock); |
2046 | static LIST_HEAD(lpi_range_list); |
2047 | |
2048 | struct lpi_range { |
2049 | struct list_head entry; |
2050 | u32 base_id; |
2051 | u32 span; |
2052 | }; |
2053 | |
2054 | static struct lpi_range *mk_lpi_range(u32 base, u32 span) |
2055 | { |
2056 | struct lpi_range *range; |
2057 | |
2058 | range = kmalloc(size: sizeof(*range), GFP_KERNEL); |
2059 | if (range) { |
2060 | range->base_id = base; |
2061 | range->span = span; |
2062 | } |
2063 | |
2064 | return range; |
2065 | } |
2066 | |
2067 | static int alloc_lpi_range(u32 nr_lpis, u32 *base) |
2068 | { |
2069 | struct lpi_range *range, *tmp; |
2070 | int err = -ENOSPC; |
2071 | |
2072 | mutex_lock(&lpi_range_lock); |
2073 | |
2074 | list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) { |
2075 | if (range->span >= nr_lpis) { |
2076 | *base = range->base_id; |
2077 | range->base_id += nr_lpis; |
2078 | range->span -= nr_lpis; |
2079 | |
2080 | if (range->span == 0) { |
2081 | list_del(entry: &range->entry); |
2082 | kfree(objp: range); |
2083 | } |
2084 | |
2085 | err = 0; |
2086 | break; |
2087 | } |
2088 | } |
2089 | |
2090 | mutex_unlock(lock: &lpi_range_lock); |
2091 | |
2092 | pr_debug("ITS: alloc %u:%u\n" , *base, nr_lpis); |
2093 | return err; |
2094 | } |
2095 | |
2096 | static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b) |
2097 | { |
2098 | if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list) |
2099 | return; |
2100 | if (a->base_id + a->span != b->base_id) |
2101 | return; |
2102 | b->base_id = a->base_id; |
2103 | b->span += a->span; |
2104 | list_del(entry: &a->entry); |
2105 | kfree(objp: a); |
2106 | } |
2107 | |
2108 | static int free_lpi_range(u32 base, u32 nr_lpis) |
2109 | { |
2110 | struct lpi_range *new, *old; |
2111 | |
2112 | new = mk_lpi_range(base, span: nr_lpis); |
2113 | if (!new) |
2114 | return -ENOMEM; |
2115 | |
2116 | mutex_lock(&lpi_range_lock); |
2117 | |
2118 | list_for_each_entry_reverse(old, &lpi_range_list, entry) { |
2119 | if (old->base_id < base) |
2120 | break; |
2121 | } |
2122 | /* |
2123 | * old is the last element with ->base_id smaller than base, |
2124 | * so new goes right after it. If there are no elements with |
2125 | * ->base_id smaller than base, &old->entry ends up pointing |
2126 | * at the head of the list, and inserting new it the start of |
2127 | * the list is the right thing to do in that case as well. |
2128 | */ |
2129 | list_add(new: &new->entry, head: &old->entry); |
2130 | /* |
2131 | * Now check if we can merge with the preceding and/or |
2132 | * following ranges. |
2133 | */ |
2134 | merge_lpi_ranges(a: old, b: new); |
2135 | merge_lpi_ranges(a: new, list_next_entry(new, entry)); |
2136 | |
2137 | mutex_unlock(lock: &lpi_range_lock); |
2138 | return 0; |
2139 | } |
2140 | |
2141 | static int __init its_lpi_init(u32 id_bits) |
2142 | { |
2143 | u32 lpis = (1UL << id_bits) - 8192; |
2144 | u32 numlpis; |
2145 | int err; |
2146 | |
2147 | numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer); |
2148 | |
2149 | if (numlpis > 2 && !WARN_ON(numlpis > lpis)) { |
2150 | lpis = numlpis; |
2151 | pr_info("ITS: Using hypervisor restricted LPI range [%u]\n" , |
2152 | lpis); |
2153 | } |
2154 | |
2155 | /* |
2156 | * Initializing the allocator is just the same as freeing the |
2157 | * full range of LPIs. |
2158 | */ |
2159 | err = free_lpi_range(base: 8192, nr_lpis: lpis); |
2160 | pr_debug("ITS: Allocator initialized for %u LPIs\n" , lpis); |
2161 | return err; |
2162 | } |
2163 | |
2164 | static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids) |
2165 | { |
2166 | unsigned long *bitmap = NULL; |
2167 | int err = 0; |
2168 | |
2169 | do { |
2170 | err = alloc_lpi_range(nr_lpis: nr_irqs, base); |
2171 | if (!err) |
2172 | break; |
2173 | |
2174 | nr_irqs /= 2; |
2175 | } while (nr_irqs > 0); |
2176 | |
2177 | if (!nr_irqs) |
2178 | err = -ENOSPC; |
2179 | |
2180 | if (err) |
2181 | goto out; |
2182 | |
2183 | bitmap = bitmap_zalloc(nbits: nr_irqs, GFP_ATOMIC); |
2184 | if (!bitmap) |
2185 | goto out; |
2186 | |
2187 | *nr_ids = nr_irqs; |
2188 | |
2189 | out: |
2190 | if (!bitmap) |
2191 | *base = *nr_ids = 0; |
2192 | |
2193 | return bitmap; |
2194 | } |
2195 | |
2196 | static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids) |
2197 | { |
2198 | WARN_ON(free_lpi_range(base, nr_ids)); |
2199 | bitmap_free(bitmap); |
2200 | } |
2201 | |
2202 | static void gic_reset_prop_table(void *va) |
2203 | { |
2204 | /* Priority 0xa0, Group-1, disabled */ |
2205 | memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ); |
2206 | |
2207 | /* Make sure the GIC will observe the written configuration */ |
2208 | gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ); |
2209 | } |
2210 | |
2211 | static struct page *its_allocate_prop_table(gfp_t gfp_flags) |
2212 | { |
2213 | struct page *prop_page; |
2214 | |
2215 | prop_page = alloc_pages(gfp: gfp_flags, order: get_order(LPI_PROPBASE_SZ)); |
2216 | if (!prop_page) |
2217 | return NULL; |
2218 | |
2219 | gic_reset_prop_table(page_address(prop_page)); |
2220 | |
2221 | return prop_page; |
2222 | } |
2223 | |
2224 | static void its_free_prop_table(struct page *prop_page) |
2225 | { |
2226 | free_pages(addr: (unsigned long)page_address(prop_page), |
2227 | order: get_order(LPI_PROPBASE_SZ)); |
2228 | } |
2229 | |
2230 | static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size) |
2231 | { |
2232 | phys_addr_t start, end, addr_end; |
2233 | u64 i; |
2234 | |
2235 | /* |
2236 | * We don't bother checking for a kdump kernel as by |
2237 | * construction, the LPI tables are out of this kernel's |
2238 | * memory map. |
2239 | */ |
2240 | if (is_kdump_kernel()) |
2241 | return true; |
2242 | |
2243 | addr_end = addr + size - 1; |
2244 | |
2245 | for_each_reserved_mem_range(i, &start, &end) { |
2246 | if (addr >= start && addr_end <= end) |
2247 | return true; |
2248 | } |
2249 | |
2250 | /* Not found, not a good sign... */ |
2251 | pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n" , |
2252 | &addr, &addr_end); |
2253 | add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); |
2254 | return false; |
2255 | } |
2256 | |
2257 | static int gic_reserve_range(phys_addr_t addr, unsigned long size) |
2258 | { |
2259 | if (efi_enabled(EFI_CONFIG_TABLES)) |
2260 | return efi_mem_reserve_persistent(addr, size); |
2261 | |
2262 | return 0; |
2263 | } |
2264 | |
2265 | static int __init its_setup_lpi_prop_table(void) |
2266 | { |
2267 | if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) { |
2268 | u64 val; |
2269 | |
2270 | val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); |
2271 | lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1; |
2272 | |
2273 | gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12); |
2274 | gic_rdists->prop_table_va = memremap(offset: gic_rdists->prop_table_pa, |
2275 | LPI_PROPBASE_SZ, |
2276 | flags: MEMREMAP_WB); |
2277 | gic_reset_prop_table(va: gic_rdists->prop_table_va); |
2278 | } else { |
2279 | struct page *page; |
2280 | |
2281 | lpi_id_bits = min_t(u32, |
2282 | GICD_TYPER_ID_BITS(gic_rdists->gicd_typer), |
2283 | ITS_MAX_LPI_NRBITS); |
2284 | page = its_allocate_prop_table(GFP_NOWAIT); |
2285 | if (!page) { |
2286 | pr_err("Failed to allocate PROPBASE\n" ); |
2287 | return -ENOMEM; |
2288 | } |
2289 | |
2290 | gic_rdists->prop_table_pa = page_to_phys(page); |
2291 | gic_rdists->prop_table_va = page_address(page); |
2292 | WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa, |
2293 | LPI_PROPBASE_SZ)); |
2294 | } |
2295 | |
2296 | pr_info("GICv3: using LPI property table @%pa\n" , |
2297 | &gic_rdists->prop_table_pa); |
2298 | |
2299 | return its_lpi_init(id_bits: lpi_id_bits); |
2300 | } |
2301 | |
2302 | static const char *its_base_type_string[] = { |
2303 | [GITS_BASER_TYPE_DEVICE] = "Devices" , |
2304 | [GITS_BASER_TYPE_VCPU] = "Virtual CPUs" , |
2305 | [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)" , |
2306 | [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections" , |
2307 | [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)" , |
2308 | [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)" , |
2309 | [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)" , |
2310 | }; |
2311 | |
2312 | static u64 its_read_baser(struct its_node *its, struct its_baser *baser) |
2313 | { |
2314 | u32 idx = baser - its->tables; |
2315 | |
2316 | return gits_read_baser(its->base + GITS_BASER + (idx << 3)); |
2317 | } |
2318 | |
2319 | static void its_write_baser(struct its_node *its, struct its_baser *baser, |
2320 | u64 val) |
2321 | { |
2322 | u32 idx = baser - its->tables; |
2323 | |
2324 | gits_write_baser(val, its->base + GITS_BASER + (idx << 3)); |
2325 | baser->val = its_read_baser(its, baser); |
2326 | } |
2327 | |
2328 | static int its_setup_baser(struct its_node *its, struct its_baser *baser, |
2329 | u64 cache, u64 shr, u32 order, bool indirect) |
2330 | { |
2331 | u64 val = its_read_baser(its, baser); |
2332 | u64 esz = GITS_BASER_ENTRY_SIZE(val); |
2333 | u64 type = GITS_BASER_TYPE(val); |
2334 | u64 baser_phys, tmp; |
2335 | u32 alloc_pages, psz; |
2336 | struct page *page; |
2337 | void *base; |
2338 | |
2339 | psz = baser->psz; |
2340 | alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz); |
2341 | if (alloc_pages > GITS_BASER_PAGES_MAX) { |
2342 | pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n" , |
2343 | &its->phys_base, its_base_type_string[type], |
2344 | alloc_pages, GITS_BASER_PAGES_MAX); |
2345 | alloc_pages = GITS_BASER_PAGES_MAX; |
2346 | order = get_order(GITS_BASER_PAGES_MAX * psz); |
2347 | } |
2348 | |
2349 | page = alloc_pages_node(nid: its->numa_node, GFP_KERNEL | __GFP_ZERO, order); |
2350 | if (!page) |
2351 | return -ENOMEM; |
2352 | |
2353 | base = (void *)page_address(page); |
2354 | baser_phys = virt_to_phys(address: base); |
2355 | |
2356 | /* Check if the physical address of the memory is above 48bits */ |
2357 | if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) { |
2358 | |
2359 | /* 52bit PA is supported only when PageSize=64K */ |
2360 | if (psz != SZ_64K) { |
2361 | pr_err("ITS: no 52bit PA support when psz=%d\n" , psz); |
2362 | free_pages(addr: (unsigned long)base, order); |
2363 | return -ENXIO; |
2364 | } |
2365 | |
2366 | /* Convert 52bit PA to 48bit field */ |
2367 | baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys); |
2368 | } |
2369 | |
2370 | retry_baser: |
2371 | val = (baser_phys | |
2372 | (type << GITS_BASER_TYPE_SHIFT) | |
2373 | ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | |
2374 | ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) | |
2375 | cache | |
2376 | shr | |
2377 | GITS_BASER_VALID); |
2378 | |
2379 | val |= indirect ? GITS_BASER_INDIRECT : 0x0; |
2380 | |
2381 | switch (psz) { |
2382 | case SZ_4K: |
2383 | val |= GITS_BASER_PAGE_SIZE_4K; |
2384 | break; |
2385 | case SZ_16K: |
2386 | val |= GITS_BASER_PAGE_SIZE_16K; |
2387 | break; |
2388 | case SZ_64K: |
2389 | val |= GITS_BASER_PAGE_SIZE_64K; |
2390 | break; |
2391 | } |
2392 | |
2393 | if (!shr) |
2394 | gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order)); |
2395 | |
2396 | its_write_baser(its, baser, val); |
2397 | tmp = baser->val; |
2398 | |
2399 | if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) { |
2400 | /* |
2401 | * Shareability didn't stick. Just use |
2402 | * whatever the read reported, which is likely |
2403 | * to be the only thing this redistributor |
2404 | * supports. If that's zero, make it |
2405 | * non-cacheable as well. |
2406 | */ |
2407 | shr = tmp & GITS_BASER_SHAREABILITY_MASK; |
2408 | if (!shr) |
2409 | cache = GITS_BASER_nC; |
2410 | |
2411 | goto retry_baser; |
2412 | } |
2413 | |
2414 | if (val != tmp) { |
2415 | pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n" , |
2416 | &its->phys_base, its_base_type_string[type], |
2417 | val, tmp); |
2418 | free_pages(addr: (unsigned long)base, order); |
2419 | return -ENXIO; |
2420 | } |
2421 | |
2422 | baser->order = order; |
2423 | baser->base = base; |
2424 | baser->psz = psz; |
2425 | tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz; |
2426 | |
2427 | pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n" , |
2428 | &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp), |
2429 | its_base_type_string[type], |
2430 | (unsigned long)virt_to_phys(base), |
2431 | indirect ? "indirect" : "flat" , (int)esz, |
2432 | psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); |
2433 | |
2434 | return 0; |
2435 | } |
2436 | |
2437 | static bool its_parse_indirect_baser(struct its_node *its, |
2438 | struct its_baser *baser, |
2439 | u32 *order, u32 ids) |
2440 | { |
2441 | u64 tmp = its_read_baser(its, baser); |
2442 | u64 type = GITS_BASER_TYPE(tmp); |
2443 | u64 esz = GITS_BASER_ENTRY_SIZE(tmp); |
2444 | u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb; |
2445 | u32 new_order = *order; |
2446 | u32 psz = baser->psz; |
2447 | bool indirect = false; |
2448 | |
2449 | /* No need to enable Indirection if memory requirement < (psz*2)bytes */ |
2450 | if ((esz << ids) > (psz * 2)) { |
2451 | /* |
2452 | * Find out whether hw supports a single or two-level table by |
2453 | * table by reading bit at offset '62' after writing '1' to it. |
2454 | */ |
2455 | its_write_baser(its, baser, val: val | GITS_BASER_INDIRECT); |
2456 | indirect = !!(baser->val & GITS_BASER_INDIRECT); |
2457 | |
2458 | if (indirect) { |
2459 | /* |
2460 | * The size of the lvl2 table is equal to ITS page size |
2461 | * which is 'psz'. For computing lvl1 table size, |
2462 | * subtract ID bits that sparse lvl2 table from 'ids' |
2463 | * which is reported by ITS hardware times lvl1 table |
2464 | * entry size. |
2465 | */ |
2466 | ids -= ilog2(psz / (int)esz); |
2467 | esz = GITS_LVL1_ENTRY_SIZE; |
2468 | } |
2469 | } |
2470 | |
2471 | /* |
2472 | * Allocate as many entries as required to fit the |
2473 | * range of device IDs that the ITS can grok... The ID |
2474 | * space being incredibly sparse, this results in a |
2475 | * massive waste of memory if two-level device table |
2476 | * feature is not supported by hardware. |
2477 | */ |
2478 | new_order = max_t(u32, get_order(esz << ids), new_order); |
2479 | if (new_order > MAX_PAGE_ORDER) { |
2480 | new_order = MAX_PAGE_ORDER; |
2481 | ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz); |
2482 | pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n" , |
2483 | &its->phys_base, its_base_type_string[type], |
2484 | device_ids(its), ids); |
2485 | } |
2486 | |
2487 | *order = new_order; |
2488 | |
2489 | return indirect; |
2490 | } |
2491 | |
2492 | static u32 compute_common_aff(u64 val) |
2493 | { |
2494 | u32 aff, clpiaff; |
2495 | |
2496 | aff = FIELD_GET(GICR_TYPER_AFFINITY, val); |
2497 | clpiaff = FIELD_GET(GICR_TYPER_COMMON_LPI_AFF, val); |
2498 | |
2499 | return aff & ~(GENMASK(31, 0) >> (clpiaff * 8)); |
2500 | } |
2501 | |
2502 | static u32 compute_its_aff(struct its_node *its) |
2503 | { |
2504 | u64 val; |
2505 | u32 svpet; |
2506 | |
2507 | /* |
2508 | * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute |
2509 | * the resulting affinity. We then use that to see if this match |
2510 | * our own affinity. |
2511 | */ |
2512 | svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer); |
2513 | val = FIELD_PREP(GICR_TYPER_COMMON_LPI_AFF, svpet); |
2514 | val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr); |
2515 | return compute_common_aff(val); |
2516 | } |
2517 | |
2518 | static struct its_node *find_sibling_its(struct its_node *cur_its) |
2519 | { |
2520 | struct its_node *its; |
2521 | u32 aff; |
2522 | |
2523 | if (!FIELD_GET(GITS_TYPER_SVPET, cur_its->typer)) |
2524 | return NULL; |
2525 | |
2526 | aff = compute_its_aff(its: cur_its); |
2527 | |
2528 | list_for_each_entry(its, &its_nodes, entry) { |
2529 | u64 baser; |
2530 | |
2531 | if (!is_v4_1(its) || its == cur_its) |
2532 | continue; |
2533 | |
2534 | if (!FIELD_GET(GITS_TYPER_SVPET, its->typer)) |
2535 | continue; |
2536 | |
2537 | if (aff != compute_its_aff(its)) |
2538 | continue; |
2539 | |
2540 | /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */ |
2541 | baser = its->tables[2].val; |
2542 | if (!(baser & GITS_BASER_VALID)) |
2543 | continue; |
2544 | |
2545 | return its; |
2546 | } |
2547 | |
2548 | return NULL; |
2549 | } |
2550 | |
2551 | static void its_free_tables(struct its_node *its) |
2552 | { |
2553 | int i; |
2554 | |
2555 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { |
2556 | if (its->tables[i].base) { |
2557 | free_pages(addr: (unsigned long)its->tables[i].base, |
2558 | order: its->tables[i].order); |
2559 | its->tables[i].base = NULL; |
2560 | } |
2561 | } |
2562 | } |
2563 | |
2564 | static int its_probe_baser_psz(struct its_node *its, struct its_baser *baser) |
2565 | { |
2566 | u64 psz = SZ_64K; |
2567 | |
2568 | while (psz) { |
2569 | u64 val, gpsz; |
2570 | |
2571 | val = its_read_baser(its, baser); |
2572 | val &= ~GITS_BASER_PAGE_SIZE_MASK; |
2573 | |
2574 | switch (psz) { |
2575 | case SZ_64K: |
2576 | gpsz = GITS_BASER_PAGE_SIZE_64K; |
2577 | break; |
2578 | case SZ_16K: |
2579 | gpsz = GITS_BASER_PAGE_SIZE_16K; |
2580 | break; |
2581 | case SZ_4K: |
2582 | default: |
2583 | gpsz = GITS_BASER_PAGE_SIZE_4K; |
2584 | break; |
2585 | } |
2586 | |
2587 | gpsz >>= GITS_BASER_PAGE_SIZE_SHIFT; |
2588 | |
2589 | val |= FIELD_PREP(GITS_BASER_PAGE_SIZE_MASK, gpsz); |
2590 | its_write_baser(its, baser, val); |
2591 | |
2592 | if (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser->val) == gpsz) |
2593 | break; |
2594 | |
2595 | switch (psz) { |
2596 | case SZ_64K: |
2597 | psz = SZ_16K; |
2598 | break; |
2599 | case SZ_16K: |
2600 | psz = SZ_4K; |
2601 | break; |
2602 | case SZ_4K: |
2603 | default: |
2604 | return -1; |
2605 | } |
2606 | } |
2607 | |
2608 | baser->psz = psz; |
2609 | return 0; |
2610 | } |
2611 | |
2612 | static int its_alloc_tables(struct its_node *its) |
2613 | { |
2614 | u64 shr = GITS_BASER_InnerShareable; |
2615 | u64 cache = GITS_BASER_RaWaWb; |
2616 | int err, i; |
2617 | |
2618 | if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375) |
2619 | /* erratum 24313: ignore memory access type */ |
2620 | cache = GITS_BASER_nCnB; |
2621 | |
2622 | if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE) { |
2623 | cache = GITS_BASER_nC; |
2624 | shr = 0; |
2625 | } |
2626 | |
2627 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { |
2628 | struct its_baser *baser = its->tables + i; |
2629 | u64 val = its_read_baser(its, baser); |
2630 | u64 type = GITS_BASER_TYPE(val); |
2631 | bool indirect = false; |
2632 | u32 order; |
2633 | |
2634 | if (type == GITS_BASER_TYPE_NONE) |
2635 | continue; |
2636 | |
2637 | if (its_probe_baser_psz(its, baser)) { |
2638 | its_free_tables(its); |
2639 | return -ENXIO; |
2640 | } |
2641 | |
2642 | order = get_order(size: baser->psz); |
2643 | |
2644 | switch (type) { |
2645 | case GITS_BASER_TYPE_DEVICE: |
2646 | indirect = its_parse_indirect_baser(its, baser, order: &order, |
2647 | device_ids(its)); |
2648 | break; |
2649 | |
2650 | case GITS_BASER_TYPE_VCPU: |
2651 | if (is_v4_1(its)) { |
2652 | struct its_node *sibling; |
2653 | |
2654 | WARN_ON(i != 2); |
2655 | if ((sibling = find_sibling_its(cur_its: its))) { |
2656 | *baser = sibling->tables[2]; |
2657 | its_write_baser(its, baser, val: baser->val); |
2658 | continue; |
2659 | } |
2660 | } |
2661 | |
2662 | indirect = its_parse_indirect_baser(its, baser, order: &order, |
2663 | ITS_MAX_VPEID_BITS); |
2664 | break; |
2665 | } |
2666 | |
2667 | err = its_setup_baser(its, baser, cache, shr, order, indirect); |
2668 | if (err < 0) { |
2669 | its_free_tables(its); |
2670 | return err; |
2671 | } |
2672 | |
2673 | /* Update settings which will be used for next BASERn */ |
2674 | cache = baser->val & GITS_BASER_CACHEABILITY_MASK; |
2675 | shr = baser->val & GITS_BASER_SHAREABILITY_MASK; |
2676 | } |
2677 | |
2678 | return 0; |
2679 | } |
2680 | |
2681 | static u64 inherit_vpe_l1_table_from_its(void) |
2682 | { |
2683 | struct its_node *its; |
2684 | u64 val; |
2685 | u32 aff; |
2686 | |
2687 | val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); |
2688 | aff = compute_common_aff(val); |
2689 | |
2690 | list_for_each_entry(its, &its_nodes, entry) { |
2691 | u64 baser, addr; |
2692 | |
2693 | if (!is_v4_1(its)) |
2694 | continue; |
2695 | |
2696 | if (!FIELD_GET(GITS_TYPER_SVPET, its->typer)) |
2697 | continue; |
2698 | |
2699 | if (aff != compute_its_aff(its)) |
2700 | continue; |
2701 | |
2702 | /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */ |
2703 | baser = its->tables[2].val; |
2704 | if (!(baser & GITS_BASER_VALID)) |
2705 | continue; |
2706 | |
2707 | /* We have a winner! */ |
2708 | gic_data_rdist()->vpe_l1_base = its->tables[2].base; |
2709 | |
2710 | val = GICR_VPROPBASER_4_1_VALID; |
2711 | if (baser & GITS_BASER_INDIRECT) |
2712 | val |= GICR_VPROPBASER_4_1_INDIRECT; |
2713 | val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, |
2714 | FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)); |
2715 | switch (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)) { |
2716 | case GIC_PAGE_SIZE_64K: |
2717 | addr = GITS_BASER_ADDR_48_to_52(baser); |
2718 | break; |
2719 | default: |
2720 | addr = baser & GENMASK_ULL(47, 12); |
2721 | break; |
2722 | } |
2723 | val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12); |
2724 | if (rdists_support_shareable()) { |
2725 | val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK, |
2726 | FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser)); |
2727 | val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK, |
2728 | FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser)); |
2729 | } |
2730 | val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1); |
2731 | |
2732 | return val; |
2733 | } |
2734 | |
2735 | return 0; |
2736 | } |
2737 | |
2738 | static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask) |
2739 | { |
2740 | u32 aff; |
2741 | u64 val; |
2742 | int cpu; |
2743 | |
2744 | val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); |
2745 | aff = compute_common_aff(val); |
2746 | |
2747 | for_each_possible_cpu(cpu) { |
2748 | void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base; |
2749 | |
2750 | if (!base || cpu == smp_processor_id()) |
2751 | continue; |
2752 | |
2753 | val = gic_read_typer(base + GICR_TYPER); |
2754 | if (aff != compute_common_aff(val)) |
2755 | continue; |
2756 | |
2757 | /* |
2758 | * At this point, we have a victim. This particular CPU |
2759 | * has already booted, and has an affinity that matches |
2760 | * ours wrt CommonLPIAff. Let's use its own VPROPBASER. |
2761 | * Make sure we don't write the Z bit in that case. |
2762 | */ |
2763 | val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER); |
2764 | val &= ~GICR_VPROPBASER_4_1_Z; |
2765 | |
2766 | gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base; |
2767 | *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask; |
2768 | |
2769 | return val; |
2770 | } |
2771 | |
2772 | return 0; |
2773 | } |
2774 | |
2775 | static bool allocate_vpe_l2_table(int cpu, u32 id) |
2776 | { |
2777 | void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base; |
2778 | unsigned int psz, esz, idx, npg, gpsz; |
2779 | u64 val; |
2780 | struct page *page; |
2781 | __le64 *table; |
2782 | |
2783 | if (!gic_rdists->has_rvpeid) |
2784 | return true; |
2785 | |
2786 | /* Skip non-present CPUs */ |
2787 | if (!base) |
2788 | return true; |
2789 | |
2790 | val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER); |
2791 | |
2792 | esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1; |
2793 | gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val); |
2794 | npg = FIELD_GET(GICR_VPROPBASER_4_1_SIZE, val) + 1; |
2795 | |
2796 | switch (gpsz) { |
2797 | default: |
2798 | WARN_ON(1); |
2799 | fallthrough; |
2800 | case GIC_PAGE_SIZE_4K: |
2801 | psz = SZ_4K; |
2802 | break; |
2803 | case GIC_PAGE_SIZE_16K: |
2804 | psz = SZ_16K; |
2805 | break; |
2806 | case GIC_PAGE_SIZE_64K: |
2807 | psz = SZ_64K; |
2808 | break; |
2809 | } |
2810 | |
2811 | /* Don't allow vpe_id that exceeds single, flat table limit */ |
2812 | if (!(val & GICR_VPROPBASER_4_1_INDIRECT)) |
2813 | return (id < (npg * psz / (esz * SZ_8))); |
2814 | |
2815 | /* Compute 1st level table index & check if that exceeds table limit */ |
2816 | idx = id >> ilog2(psz / (esz * SZ_8)); |
2817 | if (idx >= (npg * psz / GITS_LVL1_ENTRY_SIZE)) |
2818 | return false; |
2819 | |
2820 | table = gic_data_rdist_cpu(cpu)->vpe_l1_base; |
2821 | |
2822 | /* Allocate memory for 2nd level table */ |
2823 | if (!table[idx]) { |
2824 | page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order: get_order(size: psz)); |
2825 | if (!page) |
2826 | return false; |
2827 | |
2828 | /* Flush Lvl2 table to PoC if hw doesn't support coherency */ |
2829 | if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK)) |
2830 | gic_flush_dcache_to_poc(page_address(page), psz); |
2831 | |
2832 | table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID); |
2833 | |
2834 | /* Flush Lvl1 entry to PoC if hw doesn't support coherency */ |
2835 | if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK)) |
2836 | gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE); |
2837 | |
2838 | /* Ensure updated table contents are visible to RD hardware */ |
2839 | dsb(sy); |
2840 | } |
2841 | |
2842 | return true; |
2843 | } |
2844 | |
2845 | static int allocate_vpe_l1_table(void) |
2846 | { |
2847 | void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); |
2848 | u64 val, gpsz, npg, pa; |
2849 | unsigned int psz = SZ_64K; |
2850 | unsigned int np, epp, esz; |
2851 | struct page *page; |
2852 | |
2853 | if (!gic_rdists->has_rvpeid) |
2854 | return 0; |
2855 | |
2856 | /* |
2857 | * if VPENDBASER.Valid is set, disable any previously programmed |
2858 | * VPE by setting PendingLast while clearing Valid. This has the |
2859 | * effect of making sure no doorbell will be generated and we can |
2860 | * then safely clear VPROPBASER.Valid. |
2861 | */ |
2862 | if (gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid) |
2863 | gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast, |
2864 | vlpi_base + GICR_VPENDBASER); |
2865 | |
2866 | /* |
2867 | * If we can inherit the configuration from another RD, let's do |
2868 | * so. Otherwise, we have to go through the allocation process. We |
2869 | * assume that all RDs have the exact same requirements, as |
2870 | * nothing will work otherwise. |
2871 | */ |
2872 | val = inherit_vpe_l1_table_from_rd(mask: &gic_data_rdist()->vpe_table_mask); |
2873 | if (val & GICR_VPROPBASER_4_1_VALID) |
2874 | goto out; |
2875 | |
2876 | gic_data_rdist()->vpe_table_mask = kzalloc(size: sizeof(cpumask_t), GFP_ATOMIC); |
2877 | if (!gic_data_rdist()->vpe_table_mask) |
2878 | return -ENOMEM; |
2879 | |
2880 | val = inherit_vpe_l1_table_from_its(); |
2881 | if (val & GICR_VPROPBASER_4_1_VALID) |
2882 | goto out; |
2883 | |
2884 | /* First probe the page size */ |
2885 | val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K); |
2886 | gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); |
2887 | val = gicr_read_vpropbaser(vlpi_base + GICR_VPROPBASER); |
2888 | gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val); |
2889 | esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val); |
2890 | |
2891 | switch (gpsz) { |
2892 | default: |
2893 | gpsz = GIC_PAGE_SIZE_4K; |
2894 | fallthrough; |
2895 | case GIC_PAGE_SIZE_4K: |
2896 | psz = SZ_4K; |
2897 | break; |
2898 | case GIC_PAGE_SIZE_16K: |
2899 | psz = SZ_16K; |
2900 | break; |
2901 | case GIC_PAGE_SIZE_64K: |
2902 | psz = SZ_64K; |
2903 | break; |
2904 | } |
2905 | |
2906 | /* |
2907 | * Start populating the register from scratch, including RO fields |
2908 | * (which we want to print in debug cases...) |
2909 | */ |
2910 | val = 0; |
2911 | val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, gpsz); |
2912 | val |= FIELD_PREP(GICR_VPROPBASER_4_1_ENTRY_SIZE, esz); |
2913 | |
2914 | /* How many entries per GIC page? */ |
2915 | esz++; |
2916 | epp = psz / (esz * SZ_8); |
2917 | |
2918 | /* |
2919 | * If we need more than just a single L1 page, flag the table |
2920 | * as indirect and compute the number of required L1 pages. |
2921 | */ |
2922 | if (epp < ITS_MAX_VPEID) { |
2923 | int nl2; |
2924 | |
2925 | val |= GICR_VPROPBASER_4_1_INDIRECT; |
2926 | |
2927 | /* Number of L2 pages required to cover the VPEID space */ |
2928 | nl2 = DIV_ROUND_UP(ITS_MAX_VPEID, epp); |
2929 | |
2930 | /* Number of L1 pages to point to the L2 pages */ |
2931 | npg = DIV_ROUND_UP(nl2 * SZ_8, psz); |
2932 | } else { |
2933 | npg = 1; |
2934 | } |
2935 | |
2936 | val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg - 1); |
2937 | |
2938 | /* Right, that's the number of CPU pages we need for L1 */ |
2939 | np = DIV_ROUND_UP(npg * psz, PAGE_SIZE); |
2940 | |
2941 | pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n" , |
2942 | np, npg, psz, epp, esz); |
2943 | page = alloc_pages(GFP_ATOMIC | __GFP_ZERO, order: get_order(size: np * PAGE_SIZE)); |
2944 | if (!page) |
2945 | return -ENOMEM; |
2946 | |
2947 | gic_data_rdist()->vpe_l1_base = page_address(page); |
2948 | pa = virt_to_phys(page_address(page)); |
2949 | WARN_ON(!IS_ALIGNED(pa, psz)); |
2950 | |
2951 | val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12); |
2952 | if (rdists_support_shareable()) { |
2953 | val |= GICR_VPROPBASER_RaWb; |
2954 | val |= GICR_VPROPBASER_InnerShareable; |
2955 | } |
2956 | val |= GICR_VPROPBASER_4_1_Z; |
2957 | val |= GICR_VPROPBASER_4_1_VALID; |
2958 | |
2959 | out: |
2960 | gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); |
2961 | cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask); |
2962 | |
2963 | pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n" , |
2964 | smp_processor_id(), val, |
2965 | cpumask_pr_args(gic_data_rdist()->vpe_table_mask)); |
2966 | |
2967 | return 0; |
2968 | } |
2969 | |
2970 | static int its_alloc_collections(struct its_node *its) |
2971 | { |
2972 | int i; |
2973 | |
2974 | its->collections = kcalloc(n: nr_cpu_ids, size: sizeof(*its->collections), |
2975 | GFP_KERNEL); |
2976 | if (!its->collections) |
2977 | return -ENOMEM; |
2978 | |
2979 | for (i = 0; i < nr_cpu_ids; i++) |
2980 | its->collections[i].target_address = ~0ULL; |
2981 | |
2982 | return 0; |
2983 | } |
2984 | |
2985 | static struct page *its_allocate_pending_table(gfp_t gfp_flags) |
2986 | { |
2987 | struct page *pend_page; |
2988 | |
2989 | pend_page = alloc_pages(gfp: gfp_flags | __GFP_ZERO, |
2990 | order: get_order(LPI_PENDBASE_SZ)); |
2991 | if (!pend_page) |
2992 | return NULL; |
2993 | |
2994 | /* Make sure the GIC will observe the zero-ed page */ |
2995 | gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ); |
2996 | |
2997 | return pend_page; |
2998 | } |
2999 | |
3000 | static void its_free_pending_table(struct page *pt) |
3001 | { |
3002 | free_pages(addr: (unsigned long)page_address(pt), order: get_order(LPI_PENDBASE_SZ)); |
3003 | } |
3004 | |
3005 | /* |
3006 | * Booting with kdump and LPIs enabled is generally fine. Any other |
3007 | * case is wrong in the absence of firmware/EFI support. |
3008 | */ |
3009 | static bool enabled_lpis_allowed(void) |
3010 | { |
3011 | phys_addr_t addr; |
3012 | u64 val; |
3013 | |
3014 | /* Check whether the property table is in a reserved region */ |
3015 | val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER); |
3016 | addr = val & GENMASK_ULL(51, 12); |
3017 | |
3018 | return gic_check_reserved_range(addr, LPI_PROPBASE_SZ); |
3019 | } |
3020 | |
3021 | static int __init allocate_lpi_tables(void) |
3022 | { |
3023 | u64 val; |
3024 | int err, cpu; |
3025 | |
3026 | /* |
3027 | * If LPIs are enabled while we run this from the boot CPU, |
3028 | * flag the RD tables as pre-allocated if the stars do align. |
3029 | */ |
3030 | val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR); |
3031 | if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) { |
3032 | gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED | |
3033 | RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING); |
3034 | pr_info("GICv3: Using preallocated redistributor tables\n" ); |
3035 | } |
3036 | |
3037 | err = its_setup_lpi_prop_table(); |
3038 | if (err) |
3039 | return err; |
3040 | |
3041 | /* |
3042 | * We allocate all the pending tables anyway, as we may have a |
3043 | * mix of RDs that have had LPIs enabled, and some that |
3044 | * don't. We'll free the unused ones as each CPU comes online. |
3045 | */ |
3046 | for_each_possible_cpu(cpu) { |
3047 | struct page *pend_page; |
3048 | |
3049 | pend_page = its_allocate_pending_table(GFP_NOWAIT); |
3050 | if (!pend_page) { |
3051 | pr_err("Failed to allocate PENDBASE for CPU%d\n" , cpu); |
3052 | return -ENOMEM; |
3053 | } |
3054 | |
3055 | gic_data_rdist_cpu(cpu)->pend_page = pend_page; |
3056 | } |
3057 | |
3058 | return 0; |
3059 | } |
3060 | |
3061 | static u64 read_vpend_dirty_clear(void __iomem *vlpi_base) |
3062 | { |
3063 | u32 count = 1000000; /* 1s! */ |
3064 | bool clean; |
3065 | u64 val; |
3066 | |
3067 | do { |
3068 | val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER); |
3069 | clean = !(val & GICR_VPENDBASER_Dirty); |
3070 | if (!clean) { |
3071 | count--; |
3072 | cpu_relax(); |
3073 | udelay(1); |
3074 | } |
3075 | } while (!clean && count); |
3076 | |
3077 | if (unlikely(!clean)) |
3078 | pr_err_ratelimited("ITS virtual pending table not cleaning\n" ); |
3079 | |
3080 | return val; |
3081 | } |
3082 | |
3083 | static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set) |
3084 | { |
3085 | u64 val; |
3086 | |
3087 | /* Make sure we wait until the RD is done with the initial scan */ |
3088 | val = read_vpend_dirty_clear(vlpi_base); |
3089 | val &= ~GICR_VPENDBASER_Valid; |
3090 | val &= ~clr; |
3091 | val |= set; |
3092 | gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); |
3093 | |
3094 | val = read_vpend_dirty_clear(vlpi_base); |
3095 | if (unlikely(val & GICR_VPENDBASER_Dirty)) |
3096 | val |= GICR_VPENDBASER_PendingLast; |
3097 | |
3098 | return val; |
3099 | } |
3100 | |
3101 | static void its_cpu_init_lpis(void) |
3102 | { |
3103 | void __iomem *rbase = gic_data_rdist_rd_base(); |
3104 | struct page *pend_page; |
3105 | phys_addr_t paddr; |
3106 | u64 val, tmp; |
3107 | |
3108 | if (gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED) |
3109 | return; |
3110 | |
3111 | val = readl_relaxed(rbase + GICR_CTLR); |
3112 | if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) && |
3113 | (val & GICR_CTLR_ENABLE_LPIS)) { |
3114 | /* |
3115 | * Check that we get the same property table on all |
3116 | * RDs. If we don't, this is hopeless. |
3117 | */ |
3118 | paddr = gicr_read_propbaser(rbase + GICR_PROPBASER); |
3119 | paddr &= GENMASK_ULL(51, 12); |
3120 | if (WARN_ON(gic_rdists->prop_table_pa != paddr)) |
3121 | add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); |
3122 | |
3123 | paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER); |
3124 | paddr &= GENMASK_ULL(51, 16); |
3125 | |
3126 | WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ)); |
3127 | gic_data_rdist()->flags |= RD_LOCAL_PENDTABLE_PREALLOCATED; |
3128 | |
3129 | goto out; |
3130 | } |
3131 | |
3132 | pend_page = gic_data_rdist()->pend_page; |
3133 | paddr = page_to_phys(pend_page); |
3134 | |
3135 | /* set PROPBASE */ |
3136 | val = (gic_rdists->prop_table_pa | |
3137 | GICR_PROPBASER_InnerShareable | |
3138 | GICR_PROPBASER_RaWaWb | |
3139 | ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK)); |
3140 | |
3141 | gicr_write_propbaser(val, rbase + GICR_PROPBASER); |
3142 | tmp = gicr_read_propbaser(rbase + GICR_PROPBASER); |
3143 | |
3144 | if (!rdists_support_shareable()) |
3145 | tmp &= ~GICR_PROPBASER_SHAREABILITY_MASK; |
3146 | |
3147 | if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { |
3148 | if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) { |
3149 | /* |
3150 | * The HW reports non-shareable, we must |
3151 | * remove the cacheability attributes as |
3152 | * well. |
3153 | */ |
3154 | val &= ~(GICR_PROPBASER_SHAREABILITY_MASK | |
3155 | GICR_PROPBASER_CACHEABILITY_MASK); |
3156 | val |= GICR_PROPBASER_nC; |
3157 | gicr_write_propbaser(val, rbase + GICR_PROPBASER); |
3158 | } |
3159 | pr_info_once("GIC: using cache flushing for LPI property table\n" ); |
3160 | gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING; |
3161 | } |
3162 | |
3163 | /* set PENDBASE */ |
3164 | val = (page_to_phys(pend_page) | |
3165 | GICR_PENDBASER_InnerShareable | |
3166 | GICR_PENDBASER_RaWaWb); |
3167 | |
3168 | gicr_write_pendbaser(val, rbase + GICR_PENDBASER); |
3169 | tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER); |
3170 | |
3171 | if (!rdists_support_shareable()) |
3172 | tmp &= ~GICR_PENDBASER_SHAREABILITY_MASK; |
3173 | |
3174 | if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) { |
3175 | /* |
3176 | * The HW reports non-shareable, we must remove the |
3177 | * cacheability attributes as well. |
3178 | */ |
3179 | val &= ~(GICR_PENDBASER_SHAREABILITY_MASK | |
3180 | GICR_PENDBASER_CACHEABILITY_MASK); |
3181 | val |= GICR_PENDBASER_nC; |
3182 | gicr_write_pendbaser(val, rbase + GICR_PENDBASER); |
3183 | } |
3184 | |
3185 | /* Enable LPIs */ |
3186 | val = readl_relaxed(rbase + GICR_CTLR); |
3187 | val |= GICR_CTLR_ENABLE_LPIS; |
3188 | writel_relaxed(val, rbase + GICR_CTLR); |
3189 | |
3190 | out: |
3191 | if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) { |
3192 | void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); |
3193 | |
3194 | /* |
3195 | * It's possible for CPU to receive VLPIs before it is |
3196 | * scheduled as a vPE, especially for the first CPU, and the |
3197 | * VLPI with INTID larger than 2^(IDbits+1) will be considered |
3198 | * as out of range and dropped by GIC. |
3199 | * So we initialize IDbits to known value to avoid VLPI drop. |
3200 | */ |
3201 | val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; |
3202 | pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n" , |
3203 | smp_processor_id(), val); |
3204 | gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); |
3205 | |
3206 | /* |
3207 | * Also clear Valid bit of GICR_VPENDBASER, in case some |
3208 | * ancient programming gets left in and has possibility of |
3209 | * corrupting memory. |
3210 | */ |
3211 | val = its_clear_vpend_valid(vlpi_base, clr: 0, set: 0); |
3212 | } |
3213 | |
3214 | if (allocate_vpe_l1_table()) { |
3215 | /* |
3216 | * If the allocation has failed, we're in massive trouble. |
3217 | * Disable direct injection, and pray that no VM was |
3218 | * already running... |
3219 | */ |
3220 | gic_rdists->has_rvpeid = false; |
3221 | gic_rdists->has_vlpis = false; |
3222 | } |
3223 | |
3224 | /* Make sure the GIC has seen the above */ |
3225 | dsb(sy); |
3226 | gic_data_rdist()->flags |= RD_LOCAL_LPI_ENABLED; |
3227 | pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n" , |
3228 | smp_processor_id(), |
3229 | gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED ? |
3230 | "reserved" : "allocated" , |
3231 | &paddr); |
3232 | } |
3233 | |
3234 | static void its_cpu_init_collection(struct its_node *its) |
3235 | { |
3236 | int cpu = smp_processor_id(); |
3237 | u64 target; |
3238 | |
3239 | /* avoid cross node collections and its mapping */ |
3240 | if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) { |
3241 | struct device_node *cpu_node; |
3242 | |
3243 | cpu_node = of_get_cpu_node(cpu, NULL); |
3244 | if (its->numa_node != NUMA_NO_NODE && |
3245 | its->numa_node != of_node_to_nid(np: cpu_node)) |
3246 | return; |
3247 | } |
3248 | |
3249 | /* |
3250 | * We now have to bind each collection to its target |
3251 | * redistributor. |
3252 | */ |
3253 | if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) { |
3254 | /* |
3255 | * This ITS wants the physical address of the |
3256 | * redistributor. |
3257 | */ |
3258 | target = gic_data_rdist()->phys_base; |
3259 | } else { |
3260 | /* This ITS wants a linear CPU number. */ |
3261 | target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER); |
3262 | target = GICR_TYPER_CPU_NUMBER(target) << 16; |
3263 | } |
3264 | |
3265 | /* Perform collection mapping */ |
3266 | its->collections[cpu].target_address = target; |
3267 | its->collections[cpu].col_id = cpu; |
3268 | |
3269 | its_send_mapc(its, col: &its->collections[cpu], valid: 1); |
3270 | its_send_invall(its, col: &its->collections[cpu]); |
3271 | } |
3272 | |
3273 | static void its_cpu_init_collections(void) |
3274 | { |
3275 | struct its_node *its; |
3276 | |
3277 | raw_spin_lock(&its_lock); |
3278 | |
3279 | list_for_each_entry(its, &its_nodes, entry) |
3280 | its_cpu_init_collection(its); |
3281 | |
3282 | raw_spin_unlock(&its_lock); |
3283 | } |
3284 | |
3285 | static struct its_device *its_find_device(struct its_node *its, u32 dev_id) |
3286 | { |
3287 | struct its_device *its_dev = NULL, *tmp; |
3288 | unsigned long flags; |
3289 | |
3290 | raw_spin_lock_irqsave(&its->lock, flags); |
3291 | |
3292 | list_for_each_entry(tmp, &its->its_device_list, entry) { |
3293 | if (tmp->device_id == dev_id) { |
3294 | its_dev = tmp; |
3295 | break; |
3296 | } |
3297 | } |
3298 | |
3299 | raw_spin_unlock_irqrestore(&its->lock, flags); |
3300 | |
3301 | return its_dev; |
3302 | } |
3303 | |
3304 | static struct its_baser *its_get_baser(struct its_node *its, u32 type) |
3305 | { |
3306 | int i; |
3307 | |
3308 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { |
3309 | if (GITS_BASER_TYPE(its->tables[i].val) == type) |
3310 | return &its->tables[i]; |
3311 | } |
3312 | |
3313 | return NULL; |
3314 | } |
3315 | |
3316 | static bool its_alloc_table_entry(struct its_node *its, |
3317 | struct its_baser *baser, u32 id) |
3318 | { |
3319 | struct page *page; |
3320 | u32 esz, idx; |
3321 | __le64 *table; |
3322 | |
3323 | /* Don't allow device id that exceeds single, flat table limit */ |
3324 | esz = GITS_BASER_ENTRY_SIZE(baser->val); |
3325 | if (!(baser->val & GITS_BASER_INDIRECT)) |
3326 | return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz)); |
3327 | |
3328 | /* Compute 1st level table index & check if that exceeds table limit */ |
3329 | idx = id >> ilog2(baser->psz / esz); |
3330 | if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE)) |
3331 | return false; |
3332 | |
3333 | table = baser->base; |
3334 | |
3335 | /* Allocate memory for 2nd level table */ |
3336 | if (!table[idx]) { |
3337 | page = alloc_pages_node(nid: its->numa_node, GFP_KERNEL | __GFP_ZERO, |
3338 | order: get_order(size: baser->psz)); |
3339 | if (!page) |
3340 | return false; |
3341 | |
3342 | /* Flush Lvl2 table to PoC if hw doesn't support coherency */ |
3343 | if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) |
3344 | gic_flush_dcache_to_poc(page_address(page), baser->psz); |
3345 | |
3346 | table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID); |
3347 | |
3348 | /* Flush Lvl1 entry to PoC if hw doesn't support coherency */ |
3349 | if (!(baser->val & GITS_BASER_SHAREABILITY_MASK)) |
3350 | gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE); |
3351 | |
3352 | /* Ensure updated table contents are visible to ITS hardware */ |
3353 | dsb(sy); |
3354 | } |
3355 | |
3356 | return true; |
3357 | } |
3358 | |
3359 | static bool its_alloc_device_table(struct its_node *its, u32 dev_id) |
3360 | { |
3361 | struct its_baser *baser; |
3362 | |
3363 | baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE); |
3364 | |
3365 | /* Don't allow device id that exceeds ITS hardware limit */ |
3366 | if (!baser) |
3367 | return (ilog2(dev_id) < device_ids(its)); |
3368 | |
3369 | return its_alloc_table_entry(its, baser, id: dev_id); |
3370 | } |
3371 | |
3372 | static bool its_alloc_vpe_table(u32 vpe_id) |
3373 | { |
3374 | struct its_node *its; |
3375 | int cpu; |
3376 | |
3377 | /* |
3378 | * Make sure the L2 tables are allocated on *all* v4 ITSs. We |
3379 | * could try and only do it on ITSs corresponding to devices |
3380 | * that have interrupts targeted at this VPE, but the |
3381 | * complexity becomes crazy (and you have tons of memory |
3382 | * anyway, right?). |
3383 | */ |
3384 | list_for_each_entry(its, &its_nodes, entry) { |
3385 | struct its_baser *baser; |
3386 | |
3387 | if (!is_v4(its)) |
3388 | continue; |
3389 | |
3390 | baser = its_get_baser(its, GITS_BASER_TYPE_VCPU); |
3391 | if (!baser) |
3392 | return false; |
3393 | |
3394 | if (!its_alloc_table_entry(its, baser, id: vpe_id)) |
3395 | return false; |
3396 | } |
3397 | |
3398 | /* Non v4.1? No need to iterate RDs and go back early. */ |
3399 | if (!gic_rdists->has_rvpeid) |
3400 | return true; |
3401 | |
3402 | /* |
3403 | * Make sure the L2 tables are allocated for all copies of |
3404 | * the L1 table on *all* v4.1 RDs. |
3405 | */ |
3406 | for_each_possible_cpu(cpu) { |
3407 | if (!allocate_vpe_l2_table(cpu, id: vpe_id)) |
3408 | return false; |
3409 | } |
3410 | |
3411 | return true; |
3412 | } |
3413 | |
3414 | static struct its_device *its_create_device(struct its_node *its, u32 dev_id, |
3415 | int nvecs, bool alloc_lpis) |
3416 | { |
3417 | struct its_device *dev; |
3418 | unsigned long *lpi_map = NULL; |
3419 | unsigned long flags; |
3420 | u16 *col_map = NULL; |
3421 | void *itt; |
3422 | int lpi_base; |
3423 | int nr_lpis; |
3424 | int nr_ites; |
3425 | int sz; |
3426 | |
3427 | if (!its_alloc_device_table(its, dev_id)) |
3428 | return NULL; |
3429 | |
3430 | if (WARN_ON(!is_power_of_2(nvecs))) |
3431 | nvecs = roundup_pow_of_two(nvecs); |
3432 | |
3433 | dev = kzalloc(size: sizeof(*dev), GFP_KERNEL); |
3434 | /* |
3435 | * Even if the device wants a single LPI, the ITT must be |
3436 | * sized as a power of two (and you need at least one bit...). |
3437 | */ |
3438 | nr_ites = max(2, nvecs); |
3439 | sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1); |
3440 | sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; |
3441 | itt = kzalloc_node(size: sz, GFP_KERNEL, node: its->numa_node); |
3442 | if (alloc_lpis) { |
3443 | lpi_map = its_lpi_alloc(nr_irqs: nvecs, base: &lpi_base, nr_ids: &nr_lpis); |
3444 | if (lpi_map) |
3445 | col_map = kcalloc(n: nr_lpis, size: sizeof(*col_map), |
3446 | GFP_KERNEL); |
3447 | } else { |
3448 | col_map = kcalloc(n: nr_ites, size: sizeof(*col_map), GFP_KERNEL); |
3449 | nr_lpis = 0; |
3450 | lpi_base = 0; |
3451 | } |
3452 | |
3453 | if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) { |
3454 | kfree(objp: dev); |
3455 | kfree(objp: itt); |
3456 | bitmap_free(bitmap: lpi_map); |
3457 | kfree(objp: col_map); |
3458 | return NULL; |
3459 | } |
3460 | |
3461 | gic_flush_dcache_to_poc(itt, sz); |
3462 | |
3463 | dev->its = its; |
3464 | dev->itt = itt; |
3465 | dev->nr_ites = nr_ites; |
3466 | dev->event_map.lpi_map = lpi_map; |
3467 | dev->event_map.col_map = col_map; |
3468 | dev->event_map.lpi_base = lpi_base; |
3469 | dev->event_map.nr_lpis = nr_lpis; |
3470 | raw_spin_lock_init(&dev->event_map.vlpi_lock); |
3471 | dev->device_id = dev_id; |
3472 | INIT_LIST_HEAD(list: &dev->entry); |
3473 | |
3474 | raw_spin_lock_irqsave(&its->lock, flags); |
3475 | list_add(new: &dev->entry, head: &its->its_device_list); |
3476 | raw_spin_unlock_irqrestore(&its->lock, flags); |
3477 | |
3478 | /* Map device to its ITT */ |
3479 | its_send_mapd(dev, valid: 1); |
3480 | |
3481 | return dev; |
3482 | } |
3483 | |
3484 | static void its_free_device(struct its_device *its_dev) |
3485 | { |
3486 | unsigned long flags; |
3487 | |
3488 | raw_spin_lock_irqsave(&its_dev->its->lock, flags); |
3489 | list_del(entry: &its_dev->entry); |
3490 | raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); |
3491 | kfree(objp: its_dev->event_map.col_map); |
3492 | kfree(objp: its_dev->itt); |
3493 | kfree(objp: its_dev); |
3494 | } |
3495 | |
3496 | static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq) |
3497 | { |
3498 | int idx; |
3499 | |
3500 | /* Find a free LPI region in lpi_map and allocate them. */ |
3501 | idx = bitmap_find_free_region(bitmap: dev->event_map.lpi_map, |
3502 | bits: dev->event_map.nr_lpis, |
3503 | order: get_count_order(count: nvecs)); |
3504 | if (idx < 0) |
3505 | return -ENOSPC; |
3506 | |
3507 | *hwirq = dev->event_map.lpi_base + idx; |
3508 | |
3509 | return 0; |
3510 | } |
3511 | |
3512 | static int its_msi_prepare(struct irq_domain *domain, struct device *dev, |
3513 | int nvec, msi_alloc_info_t *info) |
3514 | { |
3515 | struct its_node *its; |
3516 | struct its_device *its_dev; |
3517 | struct msi_domain_info *msi_info; |
3518 | u32 dev_id; |
3519 | int err = 0; |
3520 | |
3521 | /* |
3522 | * We ignore "dev" entirely, and rely on the dev_id that has |
3523 | * been passed via the scratchpad. This limits this domain's |
3524 | * usefulness to upper layers that definitely know that they |
3525 | * are built on top of the ITS. |
3526 | */ |
3527 | dev_id = info->scratchpad[0].ul; |
3528 | |
3529 | msi_info = msi_get_domain_info(domain); |
3530 | its = msi_info->data; |
3531 | |
3532 | if (!gic_rdists->has_direct_lpi && |
3533 | vpe_proxy.dev && |
3534 | vpe_proxy.dev->its == its && |
3535 | dev_id == vpe_proxy.dev->device_id) { |
3536 | /* Bad luck. Get yourself a better implementation */ |
3537 | WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n" , |
3538 | dev_id); |
3539 | return -EINVAL; |
3540 | } |
3541 | |
3542 | mutex_lock(&its->dev_alloc_lock); |
3543 | its_dev = its_find_device(its, dev_id); |
3544 | if (its_dev) { |
3545 | /* |
3546 | * We already have seen this ID, probably through |
3547 | * another alias (PCI bridge of some sort). No need to |
3548 | * create the device. |
3549 | */ |
3550 | its_dev->shared = true; |
3551 | pr_debug("Reusing ITT for devID %x\n" , dev_id); |
3552 | goto out; |
3553 | } |
3554 | |
3555 | its_dev = its_create_device(its, dev_id, nvecs: nvec, alloc_lpis: true); |
3556 | if (!its_dev) { |
3557 | err = -ENOMEM; |
3558 | goto out; |
3559 | } |
3560 | |
3561 | if (info->flags & MSI_ALLOC_FLAGS_PROXY_DEVICE) |
3562 | its_dev->shared = true; |
3563 | |
3564 | pr_debug("ITT %d entries, %d bits\n" , nvec, ilog2(nvec)); |
3565 | out: |
3566 | mutex_unlock(lock: &its->dev_alloc_lock); |
3567 | info->scratchpad[0].ptr = its_dev; |
3568 | return err; |
3569 | } |
3570 | |
3571 | static struct msi_domain_ops its_msi_domain_ops = { |
3572 | .msi_prepare = its_msi_prepare, |
3573 | }; |
3574 | |
3575 | static int its_irq_gic_domain_alloc(struct irq_domain *domain, |
3576 | unsigned int virq, |
3577 | irq_hw_number_t hwirq) |
3578 | { |
3579 | struct irq_fwspec fwspec; |
3580 | |
3581 | if (irq_domain_get_of_node(d: domain->parent)) { |
3582 | fwspec.fwnode = domain->parent->fwnode; |
3583 | fwspec.param_count = 3; |
3584 | fwspec.param[0] = GIC_IRQ_TYPE_LPI; |
3585 | fwspec.param[1] = hwirq; |
3586 | fwspec.param[2] = IRQ_TYPE_EDGE_RISING; |
3587 | } else if (is_fwnode_irqchip(fwnode: domain->parent->fwnode)) { |
3588 | fwspec.fwnode = domain->parent->fwnode; |
3589 | fwspec.param_count = 2; |
3590 | fwspec.param[0] = hwirq; |
3591 | fwspec.param[1] = IRQ_TYPE_EDGE_RISING; |
3592 | } else { |
3593 | return -EINVAL; |
3594 | } |
3595 | |
3596 | return irq_domain_alloc_irqs_parent(domain, irq_base: virq, nr_irqs: 1, arg: &fwspec); |
3597 | } |
3598 | |
3599 | static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, |
3600 | unsigned int nr_irqs, void *args) |
3601 | { |
3602 | msi_alloc_info_t *info = args; |
3603 | struct its_device *its_dev = info->scratchpad[0].ptr; |
3604 | struct its_node *its = its_dev->its; |
3605 | struct irq_data *irqd; |
3606 | irq_hw_number_t hwirq; |
3607 | int err; |
3608 | int i; |
3609 | |
3610 | err = its_alloc_device_irq(dev: its_dev, nvecs: nr_irqs, hwirq: &hwirq); |
3611 | if (err) |
3612 | return err; |
3613 | |
3614 | err = iommu_dma_prepare_msi(desc: info->desc, msi_addr: its->get_msi_base(its_dev)); |
3615 | if (err) |
3616 | return err; |
3617 | |
3618 | for (i = 0; i < nr_irqs; i++) { |
3619 | err = its_irq_gic_domain_alloc(domain, virq: virq + i, hwirq: hwirq + i); |
3620 | if (err) |
3621 | return err; |
3622 | |
3623 | irq_domain_set_hwirq_and_chip(domain, virq: virq + i, |
3624 | hwirq: hwirq + i, chip: &its_irq_chip, chip_data: its_dev); |
3625 | irqd = irq_get_irq_data(irq: virq + i); |
3626 | irqd_set_single_target(d: irqd); |
3627 | irqd_set_affinity_on_activate(d: irqd); |
3628 | irqd_set_resend_when_in_progress(d: irqd); |
3629 | pr_debug("ID:%d pID:%d vID:%d\n" , |
3630 | (int)(hwirq + i - its_dev->event_map.lpi_base), |
3631 | (int)(hwirq + i), virq + i); |
3632 | } |
3633 | |
3634 | return 0; |
3635 | } |
3636 | |
3637 | static int its_irq_domain_activate(struct irq_domain *domain, |
3638 | struct irq_data *d, bool reserve) |
3639 | { |
3640 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
3641 | u32 event = its_get_event_id(d); |
3642 | int cpu; |
3643 | |
3644 | cpu = its_select_cpu(d, cpu_online_mask); |
3645 | if (cpu < 0 || cpu >= nr_cpu_ids) |
3646 | return -EINVAL; |
3647 | |
3648 | its_inc_lpi_count(d, cpu); |
3649 | its_dev->event_map.col_map[event] = cpu; |
3650 | irq_data_update_effective_affinity(d, cpumask_of(cpu)); |
3651 | |
3652 | /* Map the GIC IRQ and event to the device */ |
3653 | its_send_mapti(dev: its_dev, irq_id: d->hwirq, id: event); |
3654 | return 0; |
3655 | } |
3656 | |
3657 | static void its_irq_domain_deactivate(struct irq_domain *domain, |
3658 | struct irq_data *d) |
3659 | { |
3660 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
3661 | u32 event = its_get_event_id(d); |
3662 | |
3663 | its_dec_lpi_count(d, cpu: its_dev->event_map.col_map[event]); |
3664 | /* Stop the delivery of interrupts */ |
3665 | its_send_discard(dev: its_dev, id: event); |
3666 | } |
3667 | |
3668 | static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, |
3669 | unsigned int nr_irqs) |
3670 | { |
3671 | struct irq_data *d = irq_domain_get_irq_data(domain, virq); |
3672 | struct its_device *its_dev = irq_data_get_irq_chip_data(d); |
3673 | struct its_node *its = its_dev->its; |
3674 | int i; |
3675 | |
3676 | bitmap_release_region(bitmap: its_dev->event_map.lpi_map, |
3677 | pos: its_get_event_id(d: irq_domain_get_irq_data(domain, virq)), |
3678 | order: get_count_order(count: nr_irqs)); |
3679 | |
3680 | for (i = 0; i < nr_irqs; i++) { |
3681 | struct irq_data *data = irq_domain_get_irq_data(domain, |
3682 | virq: virq + i); |
3683 | /* Nuke the entry in the domain */ |
3684 | irq_domain_reset_irq_data(irq_data: data); |
3685 | } |
3686 | |
3687 | mutex_lock(&its->dev_alloc_lock); |
3688 | |
3689 | /* |
3690 | * If all interrupts have been freed, start mopping the |
3691 | * floor. This is conditioned on the device not being shared. |
3692 | */ |
3693 | if (!its_dev->shared && |
3694 | bitmap_empty(src: its_dev->event_map.lpi_map, |
3695 | nbits: its_dev->event_map.nr_lpis)) { |
3696 | its_lpi_free(bitmap: its_dev->event_map.lpi_map, |
3697 | base: its_dev->event_map.lpi_base, |
3698 | nr_ids: its_dev->event_map.nr_lpis); |
3699 | |
3700 | /* Unmap device/itt */ |
3701 | its_send_mapd(dev: its_dev, valid: 0); |
3702 | its_free_device(its_dev); |
3703 | } |
3704 | |
3705 | mutex_unlock(lock: &its->dev_alloc_lock); |
3706 | |
3707 | irq_domain_free_irqs_parent(domain, irq_base: virq, nr_irqs); |
3708 | } |
3709 | |
3710 | static const struct irq_domain_ops its_domain_ops = { |
3711 | .alloc = its_irq_domain_alloc, |
3712 | .free = its_irq_domain_free, |
3713 | .activate = its_irq_domain_activate, |
3714 | .deactivate = its_irq_domain_deactivate, |
3715 | }; |
3716 | |
3717 | /* |
3718 | * This is insane. |
3719 | * |
3720 | * If a GICv4.0 doesn't implement Direct LPIs (which is extremely |
3721 | * likely), the only way to perform an invalidate is to use a fake |
3722 | * device to issue an INV command, implying that the LPI has first |
3723 | * been mapped to some event on that device. Since this is not exactly |
3724 | * cheap, we try to keep that mapping around as long as possible, and |
3725 | * only issue an UNMAP if we're short on available slots. |
3726 | * |
3727 | * Broken by design(tm). |
3728 | * |
3729 | * GICv4.1, on the other hand, mandates that we're able to invalidate |
3730 | * by writing to a MMIO register. It doesn't implement the whole of |
3731 | * DirectLPI, but that's good enough. And most of the time, we don't |
3732 | * even have to invalidate anything, as the redistributor can be told |
3733 | * whether to generate a doorbell or not (we thus leave it enabled, |
3734 | * always). |
3735 | */ |
3736 | static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe) |
3737 | { |
3738 | /* GICv4.1 doesn't use a proxy, so nothing to do here */ |
3739 | if (gic_rdists->has_rvpeid) |
3740 | return; |
3741 | |
3742 | /* Already unmapped? */ |
3743 | if (vpe->vpe_proxy_event == -1) |
3744 | return; |
3745 | |
3746 | its_send_discard(dev: vpe_proxy.dev, id: vpe->vpe_proxy_event); |
3747 | vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL; |
3748 | |
3749 | /* |
3750 | * We don't track empty slots at all, so let's move the |
3751 | * next_victim pointer if we can quickly reuse that slot |
3752 | * instead of nuking an existing entry. Not clear that this is |
3753 | * always a win though, and this might just generate a ripple |
3754 | * effect... Let's just hope VPEs don't migrate too often. |
3755 | */ |
3756 | if (vpe_proxy.vpes[vpe_proxy.next_victim]) |
3757 | vpe_proxy.next_victim = vpe->vpe_proxy_event; |
3758 | |
3759 | vpe->vpe_proxy_event = -1; |
3760 | } |
3761 | |
3762 | static void its_vpe_db_proxy_unmap(struct its_vpe *vpe) |
3763 | { |
3764 | /* GICv4.1 doesn't use a proxy, so nothing to do here */ |
3765 | if (gic_rdists->has_rvpeid) |
3766 | return; |
3767 | |
3768 | if (!gic_rdists->has_direct_lpi) { |
3769 | unsigned long flags; |
3770 | |
3771 | raw_spin_lock_irqsave(&vpe_proxy.lock, flags); |
3772 | its_vpe_db_proxy_unmap_locked(vpe); |
3773 | raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); |
3774 | } |
3775 | } |
3776 | |
3777 | static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe) |
3778 | { |
3779 | /* GICv4.1 doesn't use a proxy, so nothing to do here */ |
3780 | if (gic_rdists->has_rvpeid) |
3781 | return; |
3782 | |
3783 | /* Already mapped? */ |
3784 | if (vpe->vpe_proxy_event != -1) |
3785 | return; |
3786 | |
3787 | /* This slot was already allocated. Kick the other VPE out. */ |
3788 | if (vpe_proxy.vpes[vpe_proxy.next_victim]) |
3789 | its_vpe_db_proxy_unmap_locked(vpe: vpe_proxy.vpes[vpe_proxy.next_victim]); |
3790 | |
3791 | /* Map the new VPE instead */ |
3792 | vpe_proxy.vpes[vpe_proxy.next_victim] = vpe; |
3793 | vpe->vpe_proxy_event = vpe_proxy.next_victim; |
3794 | vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites; |
3795 | |
3796 | vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx; |
3797 | its_send_mapti(dev: vpe_proxy.dev, irq_id: vpe->vpe_db_lpi, id: vpe->vpe_proxy_event); |
3798 | } |
3799 | |
3800 | static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to) |
3801 | { |
3802 | unsigned long flags; |
3803 | struct its_collection *target_col; |
3804 | |
3805 | /* GICv4.1 doesn't use a proxy, so nothing to do here */ |
3806 | if (gic_rdists->has_rvpeid) |
3807 | return; |
3808 | |
3809 | if (gic_rdists->has_direct_lpi) { |
3810 | void __iomem *rdbase; |
3811 | |
3812 | rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base; |
3813 | gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); |
3814 | wait_for_syncr(rdbase); |
3815 | |
3816 | return; |
3817 | } |
3818 | |
3819 | raw_spin_lock_irqsave(&vpe_proxy.lock, flags); |
3820 | |
3821 | its_vpe_db_proxy_map_locked(vpe); |
3822 | |
3823 | target_col = &vpe_proxy.dev->its->collections[to]; |
3824 | its_send_movi(dev: vpe_proxy.dev, col: target_col, id: vpe->vpe_proxy_event); |
3825 | vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to; |
3826 | |
3827 | raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); |
3828 | } |
3829 | |
3830 | static int its_vpe_set_affinity(struct irq_data *d, |
3831 | const struct cpumask *mask_val, |
3832 | bool force) |
3833 | { |
3834 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); |
3835 | struct cpumask common, *table_mask; |
3836 | unsigned long flags; |
3837 | int from, cpu; |
3838 | |
3839 | /* |
3840 | * Changing affinity is mega expensive, so let's be as lazy as |
3841 | * we can and only do it if we really have to. Also, if mapped |
3842 | * into the proxy device, we need to move the doorbell |
3843 | * interrupt to its new location. |
3844 | * |
3845 | * Another thing is that changing the affinity of a vPE affects |
3846 | * *other interrupts* such as all the vLPIs that are routed to |
3847 | * this vPE. This means that the irq_desc lock is not enough to |
3848 | * protect us, and that we must ensure nobody samples vpe->col_idx |
3849 | * during the update, hence the lock below which must also be |
3850 | * taken on any vLPI handling path that evaluates vpe->col_idx. |
3851 | */ |
3852 | from = vpe_to_cpuid_lock(vpe, flags: &flags); |
3853 | table_mask = gic_data_rdist_cpu(from)->vpe_table_mask; |
3854 | |
3855 | /* |
3856 | * If we are offered another CPU in the same GICv4.1 ITS |
3857 | * affinity, pick this one. Otherwise, any CPU will do. |
3858 | */ |
3859 | if (table_mask && cpumask_and(dstp: &common, src1p: mask_val, src2p: table_mask)) |
3860 | cpu = cpumask_test_cpu(cpu: from, cpumask: &common) ? from : cpumask_first(srcp: &common); |
3861 | else |
3862 | cpu = cpumask_first(srcp: mask_val); |
3863 | |
3864 | if (from == cpu) |
3865 | goto out; |
3866 | |
3867 | vpe->col_idx = cpu; |
3868 | |
3869 | its_send_vmovp(vpe); |
3870 | its_vpe_db_proxy_move(vpe, from, to: cpu); |
3871 | |
3872 | out: |
3873 | irq_data_update_effective_affinity(d, cpumask_of(cpu)); |
3874 | vpe_to_cpuid_unlock(vpe, flags); |
3875 | |
3876 | return IRQ_SET_MASK_OK_DONE; |
3877 | } |
3878 | |
3879 | static void its_wait_vpt_parse_complete(void) |
3880 | { |
3881 | void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); |
3882 | u64 val; |
3883 | |
3884 | if (!gic_rdists->has_vpend_valid_dirty) |
3885 | return; |
3886 | |
3887 | WARN_ON_ONCE(readq_relaxed_poll_timeout_atomic(vlpi_base + GICR_VPENDBASER, |
3888 | val, |
3889 | !(val & GICR_VPENDBASER_Dirty), |
3890 | 1, 500)); |
3891 | } |
3892 | |
3893 | static void its_vpe_schedule(struct its_vpe *vpe) |
3894 | { |
3895 | void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); |
3896 | u64 val; |
3897 | |
3898 | /* Schedule the VPE */ |
3899 | val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) & |
3900 | GENMASK_ULL(51, 12); |
3901 | val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK; |
3902 | if (rdists_support_shareable()) { |
3903 | val |= GICR_VPROPBASER_RaWb; |
3904 | val |= GICR_VPROPBASER_InnerShareable; |
3905 | } |
3906 | gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER); |
3907 | |
3908 | val = virt_to_phys(page_address(vpe->vpt_page)) & |
3909 | GENMASK_ULL(51, 16); |
3910 | if (rdists_support_shareable()) { |
3911 | val |= GICR_VPENDBASER_RaWaWb; |
3912 | val |= GICR_VPENDBASER_InnerShareable; |
3913 | } |
3914 | /* |
3915 | * There is no good way of finding out if the pending table is |
3916 | * empty as we can race against the doorbell interrupt very |
3917 | * easily. So in the end, vpe->pending_last is only an |
3918 | * indication that the vcpu has something pending, not one |
3919 | * that the pending table is empty. A good implementation |
3920 | * would be able to read its coarse map pretty quickly anyway, |
3921 | * making this a tolerable issue. |
3922 | */ |
3923 | val |= GICR_VPENDBASER_PendingLast; |
3924 | val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0; |
3925 | val |= GICR_VPENDBASER_Valid; |
3926 | gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); |
3927 | } |
3928 | |
3929 | static void its_vpe_deschedule(struct its_vpe *vpe) |
3930 | { |
3931 | void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); |
3932 | u64 val; |
3933 | |
3934 | val = its_clear_vpend_valid(vlpi_base, clr: 0, set: 0); |
3935 | |
3936 | vpe->idai = !!(val & GICR_VPENDBASER_IDAI); |
3937 | vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); |
3938 | } |
3939 | |
3940 | static void its_vpe_invall(struct its_vpe *vpe) |
3941 | { |
3942 | struct its_node *its; |
3943 | |
3944 | list_for_each_entry(its, &its_nodes, entry) { |
3945 | if (!is_v4(its)) |
3946 | continue; |
3947 | |
3948 | if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr]) |
3949 | continue; |
3950 | |
3951 | /* |
3952 | * Sending a VINVALL to a single ITS is enough, as all |
3953 | * we need is to reach the redistributors. |
3954 | */ |
3955 | its_send_vinvall(its, vpe); |
3956 | return; |
3957 | } |
3958 | } |
3959 | |
3960 | static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) |
3961 | { |
3962 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); |
3963 | struct its_cmd_info *info = vcpu_info; |
3964 | |
3965 | switch (info->cmd_type) { |
3966 | case SCHEDULE_VPE: |
3967 | its_vpe_schedule(vpe); |
3968 | return 0; |
3969 | |
3970 | case DESCHEDULE_VPE: |
3971 | its_vpe_deschedule(vpe); |
3972 | return 0; |
3973 | |
3974 | case COMMIT_VPE: |
3975 | its_wait_vpt_parse_complete(); |
3976 | return 0; |
3977 | |
3978 | case INVALL_VPE: |
3979 | its_vpe_invall(vpe); |
3980 | return 0; |
3981 | |
3982 | default: |
3983 | return -EINVAL; |
3984 | } |
3985 | } |
3986 | |
3987 | static void its_vpe_send_cmd(struct its_vpe *vpe, |
3988 | void (*cmd)(struct its_device *, u32)) |
3989 | { |
3990 | unsigned long flags; |
3991 | |
3992 | raw_spin_lock_irqsave(&vpe_proxy.lock, flags); |
3993 | |
3994 | its_vpe_db_proxy_map_locked(vpe); |
3995 | cmd(vpe_proxy.dev, vpe->vpe_proxy_event); |
3996 | |
3997 | raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags); |
3998 | } |
3999 | |
4000 | static void its_vpe_send_inv(struct irq_data *d) |
4001 | { |
4002 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); |
4003 | |
4004 | if (gic_rdists->has_direct_lpi) |
4005 | __direct_lpi_inv(d, val: d->parent_data->hwirq); |
4006 | else |
4007 | its_vpe_send_cmd(vpe, cmd: its_send_inv); |
4008 | } |
4009 | |
4010 | static void its_vpe_mask_irq(struct irq_data *d) |
4011 | { |
4012 | /* |
4013 | * We need to unmask the LPI, which is described by the parent |
4014 | * irq_data. Instead of calling into the parent (which won't |
4015 | * exactly do the right thing, let's simply use the |
4016 | * parent_data pointer. Yes, I'm naughty. |
4017 | */ |
4018 | lpi_write_config(d: d->parent_data, LPI_PROP_ENABLED, set: 0); |
4019 | its_vpe_send_inv(d); |
4020 | } |
4021 | |
4022 | static void its_vpe_unmask_irq(struct irq_data *d) |
4023 | { |
4024 | /* Same hack as above... */ |
4025 | lpi_write_config(d: d->parent_data, clr: 0, LPI_PROP_ENABLED); |
4026 | its_vpe_send_inv(d); |
4027 | } |
4028 | |
4029 | static int its_vpe_set_irqchip_state(struct irq_data *d, |
4030 | enum irqchip_irq_state which, |
4031 | bool state) |
4032 | { |
4033 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); |
4034 | |
4035 | if (which != IRQCHIP_STATE_PENDING) |
4036 | return -EINVAL; |
4037 | |
4038 | if (gic_rdists->has_direct_lpi) { |
4039 | void __iomem *rdbase; |
4040 | |
4041 | rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base; |
4042 | if (state) { |
4043 | gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR); |
4044 | } else { |
4045 | gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR); |
4046 | wait_for_syncr(rdbase); |
4047 | } |
4048 | } else { |
4049 | if (state) |
4050 | its_vpe_send_cmd(vpe, cmd: its_send_int); |
4051 | else |
4052 | its_vpe_send_cmd(vpe, cmd: its_send_clear); |
4053 | } |
4054 | |
4055 | return 0; |
4056 | } |
4057 | |
4058 | static int its_vpe_retrigger(struct irq_data *d) |
4059 | { |
4060 | return !its_vpe_set_irqchip_state(d, which: IRQCHIP_STATE_PENDING, state: true); |
4061 | } |
4062 | |
4063 | static struct irq_chip its_vpe_irq_chip = { |
4064 | .name = "GICv4-vpe" , |
4065 | .irq_mask = its_vpe_mask_irq, |
4066 | .irq_unmask = its_vpe_unmask_irq, |
4067 | .irq_eoi = irq_chip_eoi_parent, |
4068 | .irq_set_affinity = its_vpe_set_affinity, |
4069 | .irq_retrigger = its_vpe_retrigger, |
4070 | .irq_set_irqchip_state = its_vpe_set_irqchip_state, |
4071 | .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity, |
4072 | }; |
4073 | |
4074 | static struct its_node *find_4_1_its(void) |
4075 | { |
4076 | static struct its_node *its = NULL; |
4077 | |
4078 | if (!its) { |
4079 | list_for_each_entry(its, &its_nodes, entry) { |
4080 | if (is_v4_1(its)) |
4081 | return its; |
4082 | } |
4083 | |
4084 | /* Oops? */ |
4085 | its = NULL; |
4086 | } |
4087 | |
4088 | return its; |
4089 | } |
4090 | |
4091 | static void its_vpe_4_1_send_inv(struct irq_data *d) |
4092 | { |
4093 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); |
4094 | struct its_node *its; |
4095 | |
4096 | /* |
4097 | * GICv4.1 wants doorbells to be invalidated using the |
4098 | * INVDB command in order to be broadcast to all RDs. Send |
4099 | * it to the first valid ITS, and let the HW do its magic. |
4100 | */ |
4101 | its = find_4_1_its(); |
4102 | if (its) |
4103 | its_send_invdb(its, vpe); |
4104 | } |
4105 | |
4106 | static void its_vpe_4_1_mask_irq(struct irq_data *d) |
4107 | { |
4108 | lpi_write_config(d: d->parent_data, LPI_PROP_ENABLED, set: 0); |
4109 | its_vpe_4_1_send_inv(d); |
4110 | } |
4111 | |
4112 | static void its_vpe_4_1_unmask_irq(struct irq_data *d) |
4113 | { |
4114 | lpi_write_config(d: d->parent_data, clr: 0, LPI_PROP_ENABLED); |
4115 | its_vpe_4_1_send_inv(d); |
4116 | } |
4117 | |
4118 | static void its_vpe_4_1_schedule(struct its_vpe *vpe, |
4119 | struct its_cmd_info *info) |
4120 | { |
4121 | void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); |
4122 | u64 val = 0; |
4123 | |
4124 | /* Schedule the VPE */ |
4125 | val |= GICR_VPENDBASER_Valid; |
4126 | val |= info->g0en ? GICR_VPENDBASER_4_1_VGRP0EN : 0; |
4127 | val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0; |
4128 | val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id); |
4129 | |
4130 | gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER); |
4131 | } |
4132 | |
4133 | static void its_vpe_4_1_deschedule(struct its_vpe *vpe, |
4134 | struct its_cmd_info *info) |
4135 | { |
4136 | void __iomem *vlpi_base = gic_data_rdist_vlpi_base(); |
4137 | u64 val; |
4138 | |
4139 | if (info->req_db) { |
4140 | unsigned long flags; |
4141 | |
4142 | /* |
4143 | * vPE is going to block: make the vPE non-resident with |
4144 | * PendingLast clear and DB set. The GIC guarantees that if |
4145 | * we read-back PendingLast clear, then a doorbell will be |
4146 | * delivered when an interrupt comes. |
4147 | * |
4148 | * Note the locking to deal with the concurrent update of |
4149 | * pending_last from the doorbell interrupt handler that can |
4150 | * run concurrently. |
4151 | */ |
4152 | raw_spin_lock_irqsave(&vpe->vpe_lock, flags); |
4153 | val = its_clear_vpend_valid(vlpi_base, |
4154 | GICR_VPENDBASER_PendingLast, |
4155 | GICR_VPENDBASER_4_1_DB); |
4156 | vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast); |
4157 | raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags); |
4158 | } else { |
4159 | /* |
4160 | * We're not blocking, so just make the vPE non-resident |
4161 | * with PendingLast set, indicating that we'll be back. |
4162 | */ |
4163 | val = its_clear_vpend_valid(vlpi_base, |
4164 | clr: 0, |
4165 | GICR_VPENDBASER_PendingLast); |
4166 | vpe->pending_last = true; |
4167 | } |
4168 | } |
4169 | |
4170 | static void its_vpe_4_1_invall(struct its_vpe *vpe) |
4171 | { |
4172 | void __iomem *rdbase; |
4173 | unsigned long flags; |
4174 | u64 val; |
4175 | int cpu; |
4176 | |
4177 | val = GICR_INVALLR_V; |
4178 | val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id); |
4179 | |
4180 | /* Target the redistributor this vPE is currently known on */ |
4181 | cpu = vpe_to_cpuid_lock(vpe, flags: &flags); |
4182 | raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); |
4183 | rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base; |
4184 | gic_write_lpir(val, rdbase + GICR_INVALLR); |
4185 | |
4186 | wait_for_syncr(rdbase); |
4187 | raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); |
4188 | vpe_to_cpuid_unlock(vpe, flags); |
4189 | } |
4190 | |
4191 | static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) |
4192 | { |
4193 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); |
4194 | struct its_cmd_info *info = vcpu_info; |
4195 | |
4196 | switch (info->cmd_type) { |
4197 | case SCHEDULE_VPE: |
4198 | its_vpe_4_1_schedule(vpe, info); |
4199 | return 0; |
4200 | |
4201 | case DESCHEDULE_VPE: |
4202 | its_vpe_4_1_deschedule(vpe, info); |
4203 | return 0; |
4204 | |
4205 | case COMMIT_VPE: |
4206 | its_wait_vpt_parse_complete(); |
4207 | return 0; |
4208 | |
4209 | case INVALL_VPE: |
4210 | its_vpe_4_1_invall(vpe); |
4211 | return 0; |
4212 | |
4213 | default: |
4214 | return -EINVAL; |
4215 | } |
4216 | } |
4217 | |
4218 | static struct irq_chip its_vpe_4_1_irq_chip = { |
4219 | .name = "GICv4.1-vpe" , |
4220 | .irq_mask = its_vpe_4_1_mask_irq, |
4221 | .irq_unmask = its_vpe_4_1_unmask_irq, |
4222 | .irq_eoi = irq_chip_eoi_parent, |
4223 | .irq_set_affinity = its_vpe_set_affinity, |
4224 | .irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity, |
4225 | }; |
4226 | |
4227 | static void its_configure_sgi(struct irq_data *d, bool clear) |
4228 | { |
4229 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); |
4230 | struct its_cmd_desc desc; |
4231 | |
4232 | desc.its_vsgi_cmd.vpe = vpe; |
4233 | desc.its_vsgi_cmd.sgi = d->hwirq; |
4234 | desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority; |
4235 | desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled; |
4236 | desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group; |
4237 | desc.its_vsgi_cmd.clear = clear; |
4238 | |
4239 | /* |
4240 | * GICv4.1 allows us to send VSGI commands to any ITS as long as the |
4241 | * destination VPE is mapped there. Since we map them eagerly at |
4242 | * activation time, we're pretty sure the first GICv4.1 ITS will do. |
4243 | */ |
4244 | its_send_single_vcommand(its: find_4_1_its(), builder: its_build_vsgi_cmd, desc: &desc); |
4245 | } |
4246 | |
4247 | static void its_sgi_mask_irq(struct irq_data *d) |
4248 | { |
4249 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); |
4250 | |
4251 | vpe->sgi_config[d->hwirq].enabled = false; |
4252 | its_configure_sgi(d, clear: false); |
4253 | } |
4254 | |
4255 | static void its_sgi_unmask_irq(struct irq_data *d) |
4256 | { |
4257 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); |
4258 | |
4259 | vpe->sgi_config[d->hwirq].enabled = true; |
4260 | its_configure_sgi(d, clear: false); |
4261 | } |
4262 | |
4263 | static int its_sgi_set_affinity(struct irq_data *d, |
4264 | const struct cpumask *mask_val, |
4265 | bool force) |
4266 | { |
4267 | /* |
4268 | * There is no notion of affinity for virtual SGIs, at least |
4269 | * not on the host (since they can only be targeting a vPE). |
4270 | * Tell the kernel we've done whatever it asked for. |
4271 | */ |
4272 | irq_data_update_effective_affinity(d, m: mask_val); |
4273 | return IRQ_SET_MASK_OK; |
4274 | } |
4275 | |
4276 | static int its_sgi_set_irqchip_state(struct irq_data *d, |
4277 | enum irqchip_irq_state which, |
4278 | bool state) |
4279 | { |
4280 | if (which != IRQCHIP_STATE_PENDING) |
4281 | return -EINVAL; |
4282 | |
4283 | if (state) { |
4284 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); |
4285 | struct its_node *its = find_4_1_its(); |
4286 | u64 val; |
4287 | |
4288 | val = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id); |
4289 | val |= FIELD_PREP(GITS_SGIR_VINTID, d->hwirq); |
4290 | writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K); |
4291 | } else { |
4292 | its_configure_sgi(d, clear: true); |
4293 | } |
4294 | |
4295 | return 0; |
4296 | } |
4297 | |
4298 | static int its_sgi_get_irqchip_state(struct irq_data *d, |
4299 | enum irqchip_irq_state which, bool *val) |
4300 | { |
4301 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); |
4302 | void __iomem *base; |
4303 | unsigned long flags; |
4304 | u32 count = 1000000; /* 1s! */ |
4305 | u32 status; |
4306 | int cpu; |
4307 | |
4308 | if (which != IRQCHIP_STATE_PENDING) |
4309 | return -EINVAL; |
4310 | |
4311 | /* |
4312 | * Locking galore! We can race against two different events: |
4313 | * |
4314 | * - Concurrent vPE affinity change: we must make sure it cannot |
4315 | * happen, or we'll talk to the wrong redistributor. This is |
4316 | * identical to what happens with vLPIs. |
4317 | * |
4318 | * - Concurrent VSGIPENDR access: As it involves accessing two |
4319 | * MMIO registers, this must be made atomic one way or another. |
4320 | */ |
4321 | cpu = vpe_to_cpuid_lock(vpe, flags: &flags); |
4322 | raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock); |
4323 | base = gic_data_rdist_cpu(cpu)->rd_base + SZ_128K; |
4324 | writel_relaxed(vpe->vpe_id, base + GICR_VSGIR); |
4325 | do { |
4326 | status = readl_relaxed(base + GICR_VSGIPENDR); |
4327 | if (!(status & GICR_VSGIPENDR_BUSY)) |
4328 | goto out; |
4329 | |
4330 | count--; |
4331 | if (!count) { |
4332 | pr_err_ratelimited("Unable to get SGI status\n" ); |
4333 | goto out; |
4334 | } |
4335 | cpu_relax(); |
4336 | udelay(1); |
4337 | } while (count); |
4338 | |
4339 | out: |
4340 | raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock); |
4341 | vpe_to_cpuid_unlock(vpe, flags); |
4342 | |
4343 | if (!count) |
4344 | return -ENXIO; |
4345 | |
4346 | *val = !!(status & (1 << d->hwirq)); |
4347 | |
4348 | return 0; |
4349 | } |
4350 | |
4351 | static int its_sgi_set_vcpu_affinity(struct irq_data *d, void *vcpu_info) |
4352 | { |
4353 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); |
4354 | struct its_cmd_info *info = vcpu_info; |
4355 | |
4356 | switch (info->cmd_type) { |
4357 | case PROP_UPDATE_VSGI: |
4358 | vpe->sgi_config[d->hwirq].priority = info->priority; |
4359 | vpe->sgi_config[d->hwirq].group = info->group; |
4360 | its_configure_sgi(d, clear: false); |
4361 | return 0; |
4362 | |
4363 | default: |
4364 | return -EINVAL; |
4365 | } |
4366 | } |
4367 | |
4368 | static struct irq_chip its_sgi_irq_chip = { |
4369 | .name = "GICv4.1-sgi" , |
4370 | .irq_mask = its_sgi_mask_irq, |
4371 | .irq_unmask = its_sgi_unmask_irq, |
4372 | .irq_set_affinity = its_sgi_set_affinity, |
4373 | .irq_set_irqchip_state = its_sgi_set_irqchip_state, |
4374 | .irq_get_irqchip_state = its_sgi_get_irqchip_state, |
4375 | .irq_set_vcpu_affinity = its_sgi_set_vcpu_affinity, |
4376 | }; |
4377 | |
4378 | static int its_sgi_irq_domain_alloc(struct irq_domain *domain, |
4379 | unsigned int virq, unsigned int nr_irqs, |
4380 | void *args) |
4381 | { |
4382 | struct its_vpe *vpe = args; |
4383 | int i; |
4384 | |
4385 | /* Yes, we do want 16 SGIs */ |
4386 | WARN_ON(nr_irqs != 16); |
4387 | |
4388 | for (i = 0; i < 16; i++) { |
4389 | vpe->sgi_config[i].priority = 0; |
4390 | vpe->sgi_config[i].enabled = false; |
4391 | vpe->sgi_config[i].group = false; |
4392 | |
4393 | irq_domain_set_hwirq_and_chip(domain, virq: virq + i, hwirq: i, |
4394 | chip: &its_sgi_irq_chip, chip_data: vpe); |
4395 | irq_set_status_flags(irq: virq + i, set: IRQ_DISABLE_UNLAZY); |
4396 | } |
4397 | |
4398 | return 0; |
4399 | } |
4400 | |
4401 | static void its_sgi_irq_domain_free(struct irq_domain *domain, |
4402 | unsigned int virq, |
4403 | unsigned int nr_irqs) |
4404 | { |
4405 | /* Nothing to do */ |
4406 | } |
4407 | |
4408 | static int its_sgi_irq_domain_activate(struct irq_domain *domain, |
4409 | struct irq_data *d, bool reserve) |
4410 | { |
4411 | /* Write out the initial SGI configuration */ |
4412 | its_configure_sgi(d, clear: false); |
4413 | return 0; |
4414 | } |
4415 | |
4416 | static void its_sgi_irq_domain_deactivate(struct irq_domain *domain, |
4417 | struct irq_data *d) |
4418 | { |
4419 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); |
4420 | |
4421 | /* |
4422 | * The VSGI command is awkward: |
4423 | * |
4424 | * - To change the configuration, CLEAR must be set to false, |
4425 | * leaving the pending bit unchanged. |
4426 | * - To clear the pending bit, CLEAR must be set to true, leaving |
4427 | * the configuration unchanged. |
4428 | * |
4429 | * You just can't do both at once, hence the two commands below. |
4430 | */ |
4431 | vpe->sgi_config[d->hwirq].enabled = false; |
4432 | its_configure_sgi(d, clear: false); |
4433 | its_configure_sgi(d, clear: true); |
4434 | } |
4435 | |
4436 | static const struct irq_domain_ops its_sgi_domain_ops = { |
4437 | .alloc = its_sgi_irq_domain_alloc, |
4438 | .free = its_sgi_irq_domain_free, |
4439 | .activate = its_sgi_irq_domain_activate, |
4440 | .deactivate = its_sgi_irq_domain_deactivate, |
4441 | }; |
4442 | |
4443 | static int its_vpe_id_alloc(void) |
4444 | { |
4445 | return ida_alloc_max(ida: &its_vpeid_ida, ITS_MAX_VPEID - 1, GFP_KERNEL); |
4446 | } |
4447 | |
4448 | static void its_vpe_id_free(u16 id) |
4449 | { |
4450 | ida_free(&its_vpeid_ida, id); |
4451 | } |
4452 | |
4453 | static int its_vpe_init(struct its_vpe *vpe) |
4454 | { |
4455 | struct page *vpt_page; |
4456 | int vpe_id; |
4457 | |
4458 | /* Allocate vpe_id */ |
4459 | vpe_id = its_vpe_id_alloc(); |
4460 | if (vpe_id < 0) |
4461 | return vpe_id; |
4462 | |
4463 | /* Allocate VPT */ |
4464 | vpt_page = its_allocate_pending_table(GFP_KERNEL); |
4465 | if (!vpt_page) { |
4466 | its_vpe_id_free(id: vpe_id); |
4467 | return -ENOMEM; |
4468 | } |
4469 | |
4470 | if (!its_alloc_vpe_table(vpe_id)) { |
4471 | its_vpe_id_free(id: vpe_id); |
4472 | its_free_pending_table(pt: vpt_page); |
4473 | return -ENOMEM; |
4474 | } |
4475 | |
4476 | raw_spin_lock_init(&vpe->vpe_lock); |
4477 | vpe->vpe_id = vpe_id; |
4478 | vpe->vpt_page = vpt_page; |
4479 | if (gic_rdists->has_rvpeid) |
4480 | atomic_set(v: &vpe->vmapp_count, i: 0); |
4481 | else |
4482 | vpe->vpe_proxy_event = -1; |
4483 | |
4484 | return 0; |
4485 | } |
4486 | |
4487 | static void its_vpe_teardown(struct its_vpe *vpe) |
4488 | { |
4489 | its_vpe_db_proxy_unmap(vpe); |
4490 | its_vpe_id_free(id: vpe->vpe_id); |
4491 | its_free_pending_table(pt: vpe->vpt_page); |
4492 | } |
4493 | |
4494 | static void its_vpe_irq_domain_free(struct irq_domain *domain, |
4495 | unsigned int virq, |
4496 | unsigned int nr_irqs) |
4497 | { |
4498 | struct its_vm *vm = domain->host_data; |
4499 | int i; |
4500 | |
4501 | irq_domain_free_irqs_parent(domain, irq_base: virq, nr_irqs); |
4502 | |
4503 | for (i = 0; i < nr_irqs; i++) { |
4504 | struct irq_data *data = irq_domain_get_irq_data(domain, |
4505 | virq: virq + i); |
4506 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d: data); |
4507 | |
4508 | BUG_ON(vm != vpe->its_vm); |
4509 | |
4510 | clear_bit(nr: data->hwirq, addr: vm->db_bitmap); |
4511 | its_vpe_teardown(vpe); |
4512 | irq_domain_reset_irq_data(irq_data: data); |
4513 | } |
4514 | |
4515 | if (bitmap_empty(src: vm->db_bitmap, nbits: vm->nr_db_lpis)) { |
4516 | its_lpi_free(bitmap: vm->db_bitmap, base: vm->db_lpi_base, nr_ids: vm->nr_db_lpis); |
4517 | its_free_prop_table(prop_page: vm->vprop_page); |
4518 | } |
4519 | } |
4520 | |
4521 | static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, |
4522 | unsigned int nr_irqs, void *args) |
4523 | { |
4524 | struct irq_chip *irqchip = &its_vpe_irq_chip; |
4525 | struct its_vm *vm = args; |
4526 | unsigned long *bitmap; |
4527 | struct page *vprop_page; |
4528 | int base, nr_ids, i, err = 0; |
4529 | |
4530 | BUG_ON(!vm); |
4531 | |
4532 | bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), base: &base, nr_ids: &nr_ids); |
4533 | if (!bitmap) |
4534 | return -ENOMEM; |
4535 | |
4536 | if (nr_ids < nr_irqs) { |
4537 | its_lpi_free(bitmap, base, nr_ids); |
4538 | return -ENOMEM; |
4539 | } |
4540 | |
4541 | vprop_page = its_allocate_prop_table(GFP_KERNEL); |
4542 | if (!vprop_page) { |
4543 | its_lpi_free(bitmap, base, nr_ids); |
4544 | return -ENOMEM; |
4545 | } |
4546 | |
4547 | vm->db_bitmap = bitmap; |
4548 | vm->db_lpi_base = base; |
4549 | vm->nr_db_lpis = nr_ids; |
4550 | vm->vprop_page = vprop_page; |
4551 | |
4552 | if (gic_rdists->has_rvpeid) |
4553 | irqchip = &its_vpe_4_1_irq_chip; |
4554 | |
4555 | for (i = 0; i < nr_irqs; i++) { |
4556 | vm->vpes[i]->vpe_db_lpi = base + i; |
4557 | err = its_vpe_init(vpe: vm->vpes[i]); |
4558 | if (err) |
4559 | break; |
4560 | err = its_irq_gic_domain_alloc(domain, virq: virq + i, |
4561 | hwirq: vm->vpes[i]->vpe_db_lpi); |
4562 | if (err) |
4563 | break; |
4564 | irq_domain_set_hwirq_and_chip(domain, virq: virq + i, hwirq: i, |
4565 | chip: irqchip, chip_data: vm->vpes[i]); |
4566 | set_bit(nr: i, addr: bitmap); |
4567 | irqd_set_resend_when_in_progress(d: irq_get_irq_data(irq: virq + i)); |
4568 | } |
4569 | |
4570 | if (err) { |
4571 | if (i > 0) |
4572 | its_vpe_irq_domain_free(domain, virq, nr_irqs: i); |
4573 | |
4574 | its_lpi_free(bitmap, base, nr_ids); |
4575 | its_free_prop_table(prop_page: vprop_page); |
4576 | } |
4577 | |
4578 | return err; |
4579 | } |
4580 | |
4581 | static int its_vpe_irq_domain_activate(struct irq_domain *domain, |
4582 | struct irq_data *d, bool reserve) |
4583 | { |
4584 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); |
4585 | struct its_node *its; |
4586 | |
4587 | /* |
4588 | * If we use the list map, we issue VMAPP on demand... Unless |
4589 | * we're on a GICv4.1 and we eagerly map the VPE on all ITSs |
4590 | * so that VSGIs can work. |
4591 | */ |
4592 | if (!gic_requires_eager_mapping()) |
4593 | return 0; |
4594 | |
4595 | /* Map the VPE to the first possible CPU */ |
4596 | vpe->col_idx = cpumask_first(cpu_online_mask); |
4597 | |
4598 | list_for_each_entry(its, &its_nodes, entry) { |
4599 | if (!is_v4(its)) |
4600 | continue; |
4601 | |
4602 | its_send_vmapp(its, vpe, valid: true); |
4603 | its_send_vinvall(its, vpe); |
4604 | } |
4605 | |
4606 | irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx)); |
4607 | |
4608 | return 0; |
4609 | } |
4610 | |
4611 | static void its_vpe_irq_domain_deactivate(struct irq_domain *domain, |
4612 | struct irq_data *d) |
4613 | { |
4614 | struct its_vpe *vpe = irq_data_get_irq_chip_data(d); |
4615 | struct its_node *its; |
4616 | |
4617 | /* |
4618 | * If we use the list map on GICv4.0, we unmap the VPE once no |
4619 | * VLPIs are associated with the VM. |
4620 | */ |
4621 | if (!gic_requires_eager_mapping()) |
4622 | return; |
4623 | |
4624 | list_for_each_entry(its, &its_nodes, entry) { |
4625 | if (!is_v4(its)) |
4626 | continue; |
4627 | |
4628 | its_send_vmapp(its, vpe, valid: false); |
4629 | } |
4630 | |
4631 | /* |
4632 | * There may be a direct read to the VPT after unmapping the |
4633 | * vPE, to guarantee the validity of this, we make the VPT |
4634 | * memory coherent with the CPU caches here. |
4635 | */ |
4636 | if (find_4_1_its() && !atomic_read(v: &vpe->vmapp_count)) |
4637 | gic_flush_dcache_to_poc(page_address(vpe->vpt_page), |
4638 | LPI_PENDBASE_SZ); |
4639 | } |
4640 | |
4641 | static const struct irq_domain_ops its_vpe_domain_ops = { |
4642 | .alloc = its_vpe_irq_domain_alloc, |
4643 | .free = its_vpe_irq_domain_free, |
4644 | .activate = its_vpe_irq_domain_activate, |
4645 | .deactivate = its_vpe_irq_domain_deactivate, |
4646 | }; |
4647 | |
4648 | static int its_force_quiescent(void __iomem *base) |
4649 | { |
4650 | u32 count = 1000000; /* 1s */ |
4651 | u32 val; |
4652 | |
4653 | val = readl_relaxed(base + GITS_CTLR); |
4654 | /* |
4655 | * GIC architecture specification requires the ITS to be both |
4656 | * disabled and quiescent for writes to GITS_BASER<n> or |
4657 | * GITS_CBASER to not have UNPREDICTABLE results. |
4658 | */ |
4659 | if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE)) |
4660 | return 0; |
4661 | |
4662 | /* Disable the generation of all interrupts to this ITS */ |
4663 | val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe); |
4664 | writel_relaxed(val, base + GITS_CTLR); |
4665 | |
4666 | /* Poll GITS_CTLR and wait until ITS becomes quiescent */ |
4667 | while (1) { |
4668 | val = readl_relaxed(base + GITS_CTLR); |
4669 | if (val & GITS_CTLR_QUIESCENT) |
4670 | return 0; |
4671 | |
4672 | count--; |
4673 | if (!count) |
4674 | return -EBUSY; |
4675 | |
4676 | cpu_relax(); |
4677 | udelay(1); |
4678 | } |
4679 | } |
4680 | |
4681 | static bool __maybe_unused its_enable_quirk_cavium_22375(void *data) |
4682 | { |
4683 | struct its_node *its = data; |
4684 | |
4685 | /* erratum 22375: only alloc 8MB table size (20 bits) */ |
4686 | its->typer &= ~GITS_TYPER_DEVBITS; |
4687 | its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1); |
4688 | its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375; |
4689 | |
4690 | return true; |
4691 | } |
4692 | |
4693 | static bool __maybe_unused its_enable_quirk_cavium_23144(void *data) |
4694 | { |
4695 | struct its_node *its = data; |
4696 | |
4697 | its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144; |
4698 | |
4699 | return true; |
4700 | } |
4701 | |
4702 | static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data) |
4703 | { |
4704 | struct its_node *its = data; |
4705 | |
4706 | /* On QDF2400, the size of the ITE is 16Bytes */ |
4707 | its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE; |
4708 | its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1); |
4709 | |
4710 | return true; |
4711 | } |
4712 | |
4713 | static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev) |
4714 | { |
4715 | struct its_node *its = its_dev->its; |
4716 | |
4717 | /* |
4718 | * The Socionext Synquacer SoC has a so-called 'pre-ITS', |
4719 | * which maps 32-bit writes targeted at a separate window of |
4720 | * size '4 << device_id_bits' onto writes to GITS_TRANSLATER |
4721 | * with device ID taken from bits [device_id_bits + 1:2] of |
4722 | * the window offset. |
4723 | */ |
4724 | return its->pre_its_base + (its_dev->device_id << 2); |
4725 | } |
4726 | |
4727 | static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data) |
4728 | { |
4729 | struct its_node *its = data; |
4730 | u32 pre_its_window[2]; |
4731 | u32 ids; |
4732 | |
4733 | if (!fwnode_property_read_u32_array(fwnode: its->fwnode_handle, |
4734 | propname: "socionext,synquacer-pre-its" , |
4735 | val: pre_its_window, |
4736 | ARRAY_SIZE(pre_its_window))) { |
4737 | |
4738 | its->pre_its_base = pre_its_window[0]; |
4739 | its->get_msi_base = its_irq_get_msi_base_pre_its; |
4740 | |
4741 | ids = ilog2(pre_its_window[1]) - 2; |
4742 | if (device_ids(its) > ids) { |
4743 | its->typer &= ~GITS_TYPER_DEVBITS; |
4744 | its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1); |
4745 | } |
4746 | |
4747 | /* the pre-ITS breaks isolation, so disable MSI remapping */ |
4748 | its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_ISOLATED_MSI; |
4749 | return true; |
4750 | } |
4751 | return false; |
4752 | } |
4753 | |
4754 | static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data) |
4755 | { |
4756 | struct its_node *its = data; |
4757 | |
4758 | /* |
4759 | * Hip07 insists on using the wrong address for the VLPI |
4760 | * page. Trick it into doing the right thing... |
4761 | */ |
4762 | its->vlpi_redist_offset = SZ_128K; |
4763 | return true; |
4764 | } |
4765 | |
4766 | static bool __maybe_unused its_enable_rk3588001(void *data) |
4767 | { |
4768 | struct its_node *its = data; |
4769 | |
4770 | if (!of_machine_is_compatible(compat: "rockchip,rk3588" ) && |
4771 | !of_machine_is_compatible(compat: "rockchip,rk3588s" )) |
4772 | return false; |
4773 | |
4774 | its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE; |
4775 | gic_rdists->flags |= RDIST_FLAGS_FORCE_NON_SHAREABLE; |
4776 | |
4777 | return true; |
4778 | } |
4779 | |
4780 | static bool its_set_non_coherent(void *data) |
4781 | { |
4782 | struct its_node *its = data; |
4783 | |
4784 | its->flags |= ITS_FLAGS_FORCE_NON_SHAREABLE; |
4785 | return true; |
4786 | } |
4787 | |
4788 | static const struct gic_quirk its_quirks[] = { |
4789 | #ifdef CONFIG_CAVIUM_ERRATUM_22375 |
4790 | { |
4791 | .desc = "ITS: Cavium errata 22375, 24313" , |
4792 | .iidr = 0xa100034c, /* ThunderX pass 1.x */ |
4793 | .mask = 0xffff0fff, |
4794 | .init = its_enable_quirk_cavium_22375, |
4795 | }, |
4796 | #endif |
4797 | #ifdef CONFIG_CAVIUM_ERRATUM_23144 |
4798 | { |
4799 | .desc = "ITS: Cavium erratum 23144" , |
4800 | .iidr = 0xa100034c, /* ThunderX pass 1.x */ |
4801 | .mask = 0xffff0fff, |
4802 | .init = its_enable_quirk_cavium_23144, |
4803 | }, |
4804 | #endif |
4805 | #ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065 |
4806 | { |
4807 | .desc = "ITS: QDF2400 erratum 0065" , |
4808 | .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */ |
4809 | .mask = 0xffffffff, |
4810 | .init = its_enable_quirk_qdf2400_e0065, |
4811 | }, |
4812 | #endif |
4813 | #ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS |
4814 | { |
4815 | /* |
4816 | * The Socionext Synquacer SoC incorporates ARM's own GIC-500 |
4817 | * implementation, but with a 'pre-ITS' added that requires |
4818 | * special handling in software. |
4819 | */ |
4820 | .desc = "ITS: Socionext Synquacer pre-ITS" , |
4821 | .iidr = 0x0001143b, |
4822 | .mask = 0xffffffff, |
4823 | .init = its_enable_quirk_socionext_synquacer, |
4824 | }, |
4825 | #endif |
4826 | #ifdef CONFIG_HISILICON_ERRATUM_161600802 |
4827 | { |
4828 | .desc = "ITS: Hip07 erratum 161600802" , |
4829 | .iidr = 0x00000004, |
4830 | .mask = 0xffffffff, |
4831 | .init = its_enable_quirk_hip07_161600802, |
4832 | }, |
4833 | #endif |
4834 | #ifdef CONFIG_ROCKCHIP_ERRATUM_3588001 |
4835 | { |
4836 | .desc = "ITS: Rockchip erratum RK3588001" , |
4837 | .iidr = 0x0201743b, |
4838 | .mask = 0xffffffff, |
4839 | .init = its_enable_rk3588001, |
4840 | }, |
4841 | #endif |
4842 | { |
4843 | .desc = "ITS: non-coherent attribute" , |
4844 | .property = "dma-noncoherent" , |
4845 | .init = its_set_non_coherent, |
4846 | }, |
4847 | { |
4848 | } |
4849 | }; |
4850 | |
4851 | static void its_enable_quirks(struct its_node *its) |
4852 | { |
4853 | u32 iidr = readl_relaxed(its->base + GITS_IIDR); |
4854 | |
4855 | gic_enable_quirks(iidr, quirks: its_quirks, data: its); |
4856 | |
4857 | if (is_of_node(fwnode: its->fwnode_handle)) |
4858 | gic_enable_of_quirks(to_of_node(its->fwnode_handle), |
4859 | quirks: its_quirks, data: its); |
4860 | } |
4861 | |
4862 | static int its_save_disable(void) |
4863 | { |
4864 | struct its_node *its; |
4865 | int err = 0; |
4866 | |
4867 | raw_spin_lock(&its_lock); |
4868 | list_for_each_entry(its, &its_nodes, entry) { |
4869 | void __iomem *base; |
4870 | |
4871 | base = its->base; |
4872 | its->ctlr_save = readl_relaxed(base + GITS_CTLR); |
4873 | err = its_force_quiescent(base); |
4874 | if (err) { |
4875 | pr_err("ITS@%pa: failed to quiesce: %d\n" , |
4876 | &its->phys_base, err); |
4877 | writel_relaxed(its->ctlr_save, base + GITS_CTLR); |
4878 | goto err; |
4879 | } |
4880 | |
4881 | its->cbaser_save = gits_read_cbaser(base + GITS_CBASER); |
4882 | } |
4883 | |
4884 | err: |
4885 | if (err) { |
4886 | list_for_each_entry_continue_reverse(its, &its_nodes, entry) { |
4887 | void __iomem *base; |
4888 | |
4889 | base = its->base; |
4890 | writel_relaxed(its->ctlr_save, base + GITS_CTLR); |
4891 | } |
4892 | } |
4893 | raw_spin_unlock(&its_lock); |
4894 | |
4895 | return err; |
4896 | } |
4897 | |
4898 | static void its_restore_enable(void) |
4899 | { |
4900 | struct its_node *its; |
4901 | int ret; |
4902 | |
4903 | raw_spin_lock(&its_lock); |
4904 | list_for_each_entry(its, &its_nodes, entry) { |
4905 | void __iomem *base; |
4906 | int i; |
4907 | |
4908 | base = its->base; |
4909 | |
4910 | /* |
4911 | * Make sure that the ITS is disabled. If it fails to quiesce, |
4912 | * don't restore it since writing to CBASER or BASER<n> |
4913 | * registers is undefined according to the GIC v3 ITS |
4914 | * Specification. |
4915 | * |
4916 | * Firmware resuming with the ITS enabled is terminally broken. |
4917 | */ |
4918 | WARN_ON(readl_relaxed(base + GITS_CTLR) & GITS_CTLR_ENABLE); |
4919 | ret = its_force_quiescent(base); |
4920 | if (ret) { |
4921 | pr_err("ITS@%pa: failed to quiesce on resume: %d\n" , |
4922 | &its->phys_base, ret); |
4923 | continue; |
4924 | } |
4925 | |
4926 | gits_write_cbaser(its->cbaser_save, base + GITS_CBASER); |
4927 | |
4928 | /* |
4929 | * Writing CBASER resets CREADR to 0, so make CWRITER and |
4930 | * cmd_write line up with it. |
4931 | */ |
4932 | its->cmd_write = its->cmd_base; |
4933 | gits_write_cwriter(0, base + GITS_CWRITER); |
4934 | |
4935 | /* Restore GITS_BASER from the value cache. */ |
4936 | for (i = 0; i < GITS_BASER_NR_REGS; i++) { |
4937 | struct its_baser *baser = &its->tables[i]; |
4938 | |
4939 | if (!(baser->val & GITS_BASER_VALID)) |
4940 | continue; |
4941 | |
4942 | its_write_baser(its, baser, val: baser->val); |
4943 | } |
4944 | writel_relaxed(its->ctlr_save, base + GITS_CTLR); |
4945 | |
4946 | /* |
4947 | * Reinit the collection if it's stored in the ITS. This is |
4948 | * indicated by the col_id being less than the HCC field. |
4949 | * CID < HCC as specified in the GIC v3 Documentation. |
4950 | */ |
4951 | if (its->collections[smp_processor_id()].col_id < |
4952 | GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER))) |
4953 | its_cpu_init_collection(its); |
4954 | } |
4955 | raw_spin_unlock(&its_lock); |
4956 | } |
4957 | |
4958 | static struct syscore_ops its_syscore_ops = { |
4959 | .suspend = its_save_disable, |
4960 | .resume = its_restore_enable, |
4961 | }; |
4962 | |
4963 | static void __init __iomem *its_map_one(struct resource *res, int *err) |
4964 | { |
4965 | void __iomem *its_base; |
4966 | u32 val; |
4967 | |
4968 | its_base = ioremap(offset: res->start, SZ_64K); |
4969 | if (!its_base) { |
4970 | pr_warn("ITS@%pa: Unable to map ITS registers\n" , &res->start); |
4971 | *err = -ENOMEM; |
4972 | return NULL; |
4973 | } |
4974 | |
4975 | val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK; |
4976 | if (val != 0x30 && val != 0x40) { |
4977 | pr_warn("ITS@%pa: No ITS detected, giving up\n" , &res->start); |
4978 | *err = -ENODEV; |
4979 | goto out_unmap; |
4980 | } |
4981 | |
4982 | *err = its_force_quiescent(base: its_base); |
4983 | if (*err) { |
4984 | pr_warn("ITS@%pa: Failed to quiesce, giving up\n" , &res->start); |
4985 | goto out_unmap; |
4986 | } |
4987 | |
4988 | return its_base; |
4989 | |
4990 | out_unmap: |
4991 | iounmap(addr: its_base); |
4992 | return NULL; |
4993 | } |
4994 | |
4995 | static int its_init_domain(struct its_node *its) |
4996 | { |
4997 | struct irq_domain *inner_domain; |
4998 | struct msi_domain_info *info; |
4999 | |
5000 | info = kzalloc(size: sizeof(*info), GFP_KERNEL); |
5001 | if (!info) |
5002 | return -ENOMEM; |
5003 | |
5004 | info->ops = &its_msi_domain_ops; |
5005 | info->data = its; |
5006 | |
5007 | inner_domain = irq_domain_create_hierarchy(parent: its_parent, |
5008 | flags: its->msi_domain_flags, size: 0, |
5009 | fwnode: its->fwnode_handle, ops: &its_domain_ops, |
5010 | host_data: info); |
5011 | if (!inner_domain) { |
5012 | kfree(objp: info); |
5013 | return -ENOMEM; |
5014 | } |
5015 | |
5016 | irq_domain_update_bus_token(domain: inner_domain, bus_token: DOMAIN_BUS_NEXUS); |
5017 | |
5018 | return 0; |
5019 | } |
5020 | |
5021 | static int its_init_vpe_domain(void) |
5022 | { |
5023 | struct its_node *its; |
5024 | u32 devid; |
5025 | int entries; |
5026 | |
5027 | if (gic_rdists->has_direct_lpi) { |
5028 | pr_info("ITS: Using DirectLPI for VPE invalidation\n" ); |
5029 | return 0; |
5030 | } |
5031 | |
5032 | /* Any ITS will do, even if not v4 */ |
5033 | its = list_first_entry(&its_nodes, struct its_node, entry); |
5034 | |
5035 | entries = roundup_pow_of_two(nr_cpu_ids); |
5036 | vpe_proxy.vpes = kcalloc(n: entries, size: sizeof(*vpe_proxy.vpes), |
5037 | GFP_KERNEL); |
5038 | if (!vpe_proxy.vpes) |
5039 | return -ENOMEM; |
5040 | |
5041 | /* Use the last possible DevID */ |
5042 | devid = GENMASK(device_ids(its) - 1, 0); |
5043 | vpe_proxy.dev = its_create_device(its, dev_id: devid, nvecs: entries, alloc_lpis: false); |
5044 | if (!vpe_proxy.dev) { |
5045 | kfree(objp: vpe_proxy.vpes); |
5046 | pr_err("ITS: Can't allocate GICv4 proxy device\n" ); |
5047 | return -ENOMEM; |
5048 | } |
5049 | |
5050 | BUG_ON(entries > vpe_proxy.dev->nr_ites); |
5051 | |
5052 | raw_spin_lock_init(&vpe_proxy.lock); |
5053 | vpe_proxy.next_victim = 0; |
5054 | pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n" , |
5055 | devid, vpe_proxy.dev->nr_ites); |
5056 | |
5057 | return 0; |
5058 | } |
5059 | |
5060 | static int __init its_compute_its_list_map(struct its_node *its) |
5061 | { |
5062 | int its_number; |
5063 | u32 ctlr; |
5064 | |
5065 | /* |
5066 | * This is assumed to be done early enough that we're |
5067 | * guaranteed to be single-threaded, hence no |
5068 | * locking. Should this change, we should address |
5069 | * this. |
5070 | */ |
5071 | its_number = find_first_zero_bit(addr: &its_list_map, GICv4_ITS_LIST_MAX); |
5072 | if (its_number >= GICv4_ITS_LIST_MAX) { |
5073 | pr_err("ITS@%pa: No ITSList entry available!\n" , |
5074 | &its->phys_base); |
5075 | return -EINVAL; |
5076 | } |
5077 | |
5078 | ctlr = readl_relaxed(its->base + GITS_CTLR); |
5079 | ctlr &= ~GITS_CTLR_ITS_NUMBER; |
5080 | ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT; |
5081 | writel_relaxed(ctlr, its->base + GITS_CTLR); |
5082 | ctlr = readl_relaxed(its->base + GITS_CTLR); |
5083 | if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) { |
5084 | its_number = ctlr & GITS_CTLR_ITS_NUMBER; |
5085 | its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT; |
5086 | } |
5087 | |
5088 | if (test_and_set_bit(nr: its_number, addr: &its_list_map)) { |
5089 | pr_err("ITS@%pa: Duplicate ITSList entry %d\n" , |
5090 | &its->phys_base, its_number); |
5091 | return -EINVAL; |
5092 | } |
5093 | |
5094 | return its_number; |
5095 | } |
5096 | |
5097 | static int __init its_probe_one(struct its_node *its) |
5098 | { |
5099 | u64 baser, tmp; |
5100 | struct page *page; |
5101 | u32 ctlr; |
5102 | int err; |
5103 | |
5104 | its_enable_quirks(its); |
5105 | |
5106 | if (is_v4(its)) { |
5107 | if (!(its->typer & GITS_TYPER_VMOVP)) { |
5108 | err = its_compute_its_list_map(its); |
5109 | if (err < 0) |
5110 | goto out; |
5111 | |
5112 | its->list_nr = err; |
5113 | |
5114 | pr_info("ITS@%pa: Using ITS number %d\n" , |
5115 | &its->phys_base, err); |
5116 | } else { |
5117 | pr_info("ITS@%pa: Single VMOVP capable\n" , &its->phys_base); |
5118 | } |
5119 | |
5120 | if (is_v4_1(its)) { |
5121 | u32 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer); |
5122 | |
5123 | its->sgir_base = ioremap(offset: its->phys_base + SZ_128K, SZ_64K); |
5124 | if (!its->sgir_base) { |
5125 | err = -ENOMEM; |
5126 | goto out; |
5127 | } |
5128 | |
5129 | its->mpidr = readl_relaxed(its->base + GITS_MPIDR); |
5130 | |
5131 | pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n" , |
5132 | &its->phys_base, its->mpidr, svpet); |
5133 | } |
5134 | } |
5135 | |
5136 | page = alloc_pages_node(nid: its->numa_node, GFP_KERNEL | __GFP_ZERO, |
5137 | order: get_order(ITS_CMD_QUEUE_SZ)); |
5138 | if (!page) { |
5139 | err = -ENOMEM; |
5140 | goto out_unmap_sgir; |
5141 | } |
5142 | its->cmd_base = (void *)page_address(page); |
5143 | its->cmd_write = its->cmd_base; |
5144 | |
5145 | err = its_alloc_tables(its); |
5146 | if (err) |
5147 | goto out_free_cmd; |
5148 | |
5149 | err = its_alloc_collections(its); |
5150 | if (err) |
5151 | goto out_free_tables; |
5152 | |
5153 | baser = (virt_to_phys(address: its->cmd_base) | |
5154 | GITS_CBASER_RaWaWb | |
5155 | GITS_CBASER_InnerShareable | |
5156 | (ITS_CMD_QUEUE_SZ / SZ_4K - 1) | |
5157 | GITS_CBASER_VALID); |
5158 | |
5159 | gits_write_cbaser(baser, its->base + GITS_CBASER); |
5160 | tmp = gits_read_cbaser(its->base + GITS_CBASER); |
5161 | |
5162 | if (its->flags & ITS_FLAGS_FORCE_NON_SHAREABLE) |
5163 | tmp &= ~GITS_CBASER_SHAREABILITY_MASK; |
5164 | |
5165 | if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) { |
5166 | if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) { |
5167 | /* |
5168 | * The HW reports non-shareable, we must |
5169 | * remove the cacheability attributes as |
5170 | * well. |
5171 | */ |
5172 | baser &= ~(GITS_CBASER_SHAREABILITY_MASK | |
5173 | GITS_CBASER_CACHEABILITY_MASK); |
5174 | baser |= GITS_CBASER_nC; |
5175 | gits_write_cbaser(baser, its->base + GITS_CBASER); |
5176 | } |
5177 | pr_info("ITS: using cache flushing for cmd queue\n" ); |
5178 | its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING; |
5179 | } |
5180 | |
5181 | gits_write_cwriter(0, its->base + GITS_CWRITER); |
5182 | ctlr = readl_relaxed(its->base + GITS_CTLR); |
5183 | ctlr |= GITS_CTLR_ENABLE; |
5184 | if (is_v4(its)) |
5185 | ctlr |= GITS_CTLR_ImDe; |
5186 | writel_relaxed(ctlr, its->base + GITS_CTLR); |
5187 | |
5188 | err = its_init_domain(its); |
5189 | if (err) |
5190 | goto out_free_tables; |
5191 | |
5192 | raw_spin_lock(&its_lock); |
5193 | list_add(new: &its->entry, head: &its_nodes); |
5194 | raw_spin_unlock(&its_lock); |
5195 | |
5196 | return 0; |
5197 | |
5198 | out_free_tables: |
5199 | its_free_tables(its); |
5200 | out_free_cmd: |
5201 | free_pages(addr: (unsigned long)its->cmd_base, order: get_order(ITS_CMD_QUEUE_SZ)); |
5202 | out_unmap_sgir: |
5203 | if (its->sgir_base) |
5204 | iounmap(addr: its->sgir_base); |
5205 | out: |
5206 | pr_err("ITS@%pa: failed probing (%d)\n" , &its->phys_base, err); |
5207 | return err; |
5208 | } |
5209 | |
5210 | static bool gic_rdists_supports_plpis(void) |
5211 | { |
5212 | return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS); |
5213 | } |
5214 | |
5215 | static int redist_disable_lpis(void) |
5216 | { |
5217 | void __iomem *rbase = gic_data_rdist_rd_base(); |
5218 | u64 timeout = USEC_PER_SEC; |
5219 | u64 val; |
5220 | |
5221 | if (!gic_rdists_supports_plpis()) { |
5222 | pr_info("CPU%d: LPIs not supported\n" , smp_processor_id()); |
5223 | return -ENXIO; |
5224 | } |
5225 | |
5226 | val = readl_relaxed(rbase + GICR_CTLR); |
5227 | if (!(val & GICR_CTLR_ENABLE_LPIS)) |
5228 | return 0; |
5229 | |
5230 | /* |
5231 | * If coming via a CPU hotplug event, we don't need to disable |
5232 | * LPIs before trying to re-enable them. They are already |
5233 | * configured and all is well in the world. |
5234 | * |
5235 | * If running with preallocated tables, there is nothing to do. |
5236 | */ |
5237 | if ((gic_data_rdist()->flags & RD_LOCAL_LPI_ENABLED) || |
5238 | (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED)) |
5239 | return 0; |
5240 | |
5241 | /* |
5242 | * From that point on, we only try to do some damage control. |
5243 | */ |
5244 | pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n" , |
5245 | smp_processor_id()); |
5246 | add_taint(TAINT_CRAP, LOCKDEP_STILL_OK); |
5247 | |
5248 | /* Disable LPIs */ |
5249 | val &= ~GICR_CTLR_ENABLE_LPIS; |
5250 | writel_relaxed(val, rbase + GICR_CTLR); |
5251 | |
5252 | /* Make sure any change to GICR_CTLR is observable by the GIC */ |
5253 | dsb(sy); |
5254 | |
5255 | /* |
5256 | * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs |
5257 | * from 1 to 0 before programming GICR_PEND{PROP}BASER registers. |
5258 | * Error out if we time out waiting for RWP to clear. |
5259 | */ |
5260 | while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) { |
5261 | if (!timeout) { |
5262 | pr_err("CPU%d: Timeout while disabling LPIs\n" , |
5263 | smp_processor_id()); |
5264 | return -ETIMEDOUT; |
5265 | } |
5266 | udelay(1); |
5267 | timeout--; |
5268 | } |
5269 | |
5270 | /* |
5271 | * After it has been written to 1, it is IMPLEMENTATION |
5272 | * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be |
5273 | * cleared to 0. Error out if clearing the bit failed. |
5274 | */ |
5275 | if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) { |
5276 | pr_err("CPU%d: Failed to disable LPIs\n" , smp_processor_id()); |
5277 | return -EBUSY; |
5278 | } |
5279 | |
5280 | return 0; |
5281 | } |
5282 | |
5283 | int its_cpu_init(void) |
5284 | { |
5285 | if (!list_empty(head: &its_nodes)) { |
5286 | int ret; |
5287 | |
5288 | ret = redist_disable_lpis(); |
5289 | if (ret) |
5290 | return ret; |
5291 | |
5292 | its_cpu_init_lpis(); |
5293 | its_cpu_init_collections(); |
5294 | } |
5295 | |
5296 | return 0; |
5297 | } |
5298 | |
5299 | static void rdist_memreserve_cpuhp_cleanup_workfn(struct work_struct *work) |
5300 | { |
5301 | cpuhp_remove_state_nocalls(state: gic_rdists->cpuhp_memreserve_state); |
5302 | gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID; |
5303 | } |
5304 | |
5305 | static DECLARE_WORK(rdist_memreserve_cpuhp_cleanup_work, |
5306 | rdist_memreserve_cpuhp_cleanup_workfn); |
5307 | |
5308 | static int its_cpu_memreserve_lpi(unsigned int cpu) |
5309 | { |
5310 | struct page *pend_page; |
5311 | int ret = 0; |
5312 | |
5313 | /* This gets to run exactly once per CPU */ |
5314 | if (gic_data_rdist()->flags & RD_LOCAL_MEMRESERVE_DONE) |
5315 | return 0; |
5316 | |
5317 | pend_page = gic_data_rdist()->pend_page; |
5318 | if (WARN_ON(!pend_page)) { |
5319 | ret = -ENOMEM; |
5320 | goto out; |
5321 | } |
5322 | /* |
5323 | * If the pending table was pre-programmed, free the memory we |
5324 | * preemptively allocated. Otherwise, reserve that memory for |
5325 | * later kexecs. |
5326 | */ |
5327 | if (gic_data_rdist()->flags & RD_LOCAL_PENDTABLE_PREALLOCATED) { |
5328 | its_free_pending_table(pt: pend_page); |
5329 | gic_data_rdist()->pend_page = NULL; |
5330 | } else { |
5331 | phys_addr_t paddr = page_to_phys(pend_page); |
5332 | WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ)); |
5333 | } |
5334 | |
5335 | out: |
5336 | /* Last CPU being brought up gets to issue the cleanup */ |
5337 | if (!IS_ENABLED(CONFIG_SMP) || |
5338 | cpumask_equal(src1p: &cpus_booted_once_mask, cpu_possible_mask)) |
5339 | schedule_work(work: &rdist_memreserve_cpuhp_cleanup_work); |
5340 | |
5341 | gic_data_rdist()->flags |= RD_LOCAL_MEMRESERVE_DONE; |
5342 | return ret; |
5343 | } |
5344 | |
5345 | /* Mark all the BASER registers as invalid before they get reprogrammed */ |
5346 | static int __init its_reset_one(struct resource *res) |
5347 | { |
5348 | void __iomem *its_base; |
5349 | int err, i; |
5350 | |
5351 | its_base = its_map_one(res, err: &err); |
5352 | if (!its_base) |
5353 | return err; |
5354 | |
5355 | for (i = 0; i < GITS_BASER_NR_REGS; i++) |
5356 | gits_write_baser(0, its_base + GITS_BASER + (i << 3)); |
5357 | |
5358 | iounmap(addr: its_base); |
5359 | return 0; |
5360 | } |
5361 | |
5362 | static const struct of_device_id its_device_id[] = { |
5363 | { .compatible = "arm,gic-v3-its" , }, |
5364 | {}, |
5365 | }; |
5366 | |
5367 | static struct its_node __init *its_node_init(struct resource *res, |
5368 | struct fwnode_handle *handle, int numa_node) |
5369 | { |
5370 | void __iomem *its_base; |
5371 | struct its_node *its; |
5372 | int err; |
5373 | |
5374 | its_base = its_map_one(res, err: &err); |
5375 | if (!its_base) |
5376 | return NULL; |
5377 | |
5378 | pr_info("ITS %pR\n" , res); |
5379 | |
5380 | its = kzalloc(size: sizeof(*its), GFP_KERNEL); |
5381 | if (!its) |
5382 | goto out_unmap; |
5383 | |
5384 | raw_spin_lock_init(&its->lock); |
5385 | mutex_init(&its->dev_alloc_lock); |
5386 | INIT_LIST_HEAD(list: &its->entry); |
5387 | INIT_LIST_HEAD(list: &its->its_device_list); |
5388 | |
5389 | its->typer = gic_read_typer(its_base + GITS_TYPER); |
5390 | its->base = its_base; |
5391 | its->phys_base = res->start; |
5392 | its->get_msi_base = its_irq_get_msi_base; |
5393 | its->msi_domain_flags = IRQ_DOMAIN_FLAG_ISOLATED_MSI; |
5394 | |
5395 | its->numa_node = numa_node; |
5396 | its->fwnode_handle = handle; |
5397 | |
5398 | return its; |
5399 | |
5400 | out_unmap: |
5401 | iounmap(addr: its_base); |
5402 | return NULL; |
5403 | } |
5404 | |
5405 | static void its_node_destroy(struct its_node *its) |
5406 | { |
5407 | iounmap(addr: its->base); |
5408 | kfree(objp: its); |
5409 | } |
5410 | |
5411 | static int __init its_of_probe(struct device_node *node) |
5412 | { |
5413 | struct device_node *np; |
5414 | struct resource res; |
5415 | int err; |
5416 | |
5417 | /* |
5418 | * Make sure *all* the ITS are reset before we probe any, as |
5419 | * they may be sharing memory. If any of the ITS fails to |
5420 | * reset, don't even try to go any further, as this could |
5421 | * result in something even worse. |
5422 | */ |
5423 | for (np = of_find_matching_node(from: node, matches: its_device_id); np; |
5424 | np = of_find_matching_node(from: np, matches: its_device_id)) { |
5425 | if (!of_device_is_available(device: np) || |
5426 | !of_property_read_bool(np, propname: "msi-controller" ) || |
5427 | of_address_to_resource(dev: np, index: 0, r: &res)) |
5428 | continue; |
5429 | |
5430 | err = its_reset_one(res: &res); |
5431 | if (err) |
5432 | return err; |
5433 | } |
5434 | |
5435 | for (np = of_find_matching_node(from: node, matches: its_device_id); np; |
5436 | np = of_find_matching_node(from: np, matches: its_device_id)) { |
5437 | struct its_node *its; |
5438 | |
5439 | if (!of_device_is_available(device: np)) |
5440 | continue; |
5441 | if (!of_property_read_bool(np, propname: "msi-controller" )) { |
5442 | pr_warn("%pOF: no msi-controller property, ITS ignored\n" , |
5443 | np); |
5444 | continue; |
5445 | } |
5446 | |
5447 | if (of_address_to_resource(dev: np, index: 0, r: &res)) { |
5448 | pr_warn("%pOF: no regs?\n" , np); |
5449 | continue; |
5450 | } |
5451 | |
5452 | |
5453 | its = its_node_init(res: &res, handle: &np->fwnode, numa_node: of_node_to_nid(np)); |
5454 | if (!its) |
5455 | return -ENOMEM; |
5456 | |
5457 | err = its_probe_one(its); |
5458 | if (err) { |
5459 | its_node_destroy(its); |
5460 | return err; |
5461 | } |
5462 | } |
5463 | return 0; |
5464 | } |
5465 | |
5466 | #ifdef CONFIG_ACPI |
5467 | |
5468 | #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K) |
5469 | |
5470 | #ifdef CONFIG_ACPI_NUMA |
5471 | struct its_srat_map { |
5472 | /* numa node id */ |
5473 | u32 numa_node; |
5474 | /* GIC ITS ID */ |
5475 | u32 its_id; |
5476 | }; |
5477 | |
5478 | static struct its_srat_map *its_srat_maps __initdata; |
5479 | static int its_in_srat __initdata; |
5480 | |
5481 | static int __init acpi_get_its_numa_node(u32 its_id) |
5482 | { |
5483 | int i; |
5484 | |
5485 | for (i = 0; i < its_in_srat; i++) { |
5486 | if (its_id == its_srat_maps[i].its_id) |
5487 | return its_srat_maps[i].numa_node; |
5488 | } |
5489 | return NUMA_NO_NODE; |
5490 | } |
5491 | |
5492 | static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *, |
5493 | const unsigned long end) |
5494 | { |
5495 | return 0; |
5496 | } |
5497 | |
5498 | static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *, |
5499 | const unsigned long end) |
5500 | { |
5501 | int node; |
5502 | struct acpi_srat_gic_its_affinity *its_affinity; |
5503 | |
5504 | its_affinity = (struct acpi_srat_gic_its_affinity *)header; |
5505 | if (!its_affinity) |
5506 | return -EINVAL; |
5507 | |
5508 | if (its_affinity->header.length < sizeof(*its_affinity)) { |
5509 | pr_err("SRAT: Invalid header length %d in ITS affinity\n" , |
5510 | its_affinity->header.length); |
5511 | return -EINVAL; |
5512 | } |
5513 | |
5514 | /* |
5515 | * Note that in theory a new proximity node could be created by this |
5516 | * entry as it is an SRAT resource allocation structure. |
5517 | * We do not currently support doing so. |
5518 | */ |
5519 | node = pxm_to_node(its_affinity->proximity_domain); |
5520 | |
5521 | if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) { |
5522 | pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n" , node); |
5523 | return 0; |
5524 | } |
5525 | |
5526 | its_srat_maps[its_in_srat].numa_node = node; |
5527 | its_srat_maps[its_in_srat].its_id = its_affinity->its_id; |
5528 | its_in_srat++; |
5529 | pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n" , |
5530 | its_affinity->proximity_domain, its_affinity->its_id, node); |
5531 | |
5532 | return 0; |
5533 | } |
5534 | |
5535 | static void __init acpi_table_parse_srat_its(void) |
5536 | { |
5537 | int count; |
5538 | |
5539 | count = acpi_table_parse_entries(ACPI_SIG_SRAT, |
5540 | table_size: sizeof(struct acpi_table_srat), |
5541 | entry_id: ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, |
5542 | handler: gic_acpi_match_srat_its, max_entries: 0); |
5543 | if (count <= 0) |
5544 | return; |
5545 | |
5546 | its_srat_maps = kmalloc_array(n: count, size: sizeof(struct its_srat_map), |
5547 | GFP_KERNEL); |
5548 | if (!its_srat_maps) |
5549 | return; |
5550 | |
5551 | acpi_table_parse_entries(ACPI_SIG_SRAT, |
5552 | table_size: sizeof(struct acpi_table_srat), |
5553 | entry_id: ACPI_SRAT_TYPE_GIC_ITS_AFFINITY, |
5554 | handler: gic_acpi_parse_srat_its, max_entries: 0); |
5555 | } |
5556 | |
5557 | /* free the its_srat_maps after ITS probing */ |
5558 | static void __init acpi_its_srat_maps_free(void) |
5559 | { |
5560 | kfree(objp: its_srat_maps); |
5561 | } |
5562 | #else |
5563 | static void __init acpi_table_parse_srat_its(void) { } |
5564 | static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; } |
5565 | static void __init acpi_its_srat_maps_free(void) { } |
5566 | #endif |
5567 | |
5568 | static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *, |
5569 | const unsigned long end) |
5570 | { |
5571 | struct acpi_madt_generic_translator *its_entry; |
5572 | struct fwnode_handle *dom_handle; |
5573 | struct its_node *its; |
5574 | struct resource res; |
5575 | int err; |
5576 | |
5577 | its_entry = (struct acpi_madt_generic_translator *)header; |
5578 | memset(&res, 0, sizeof(res)); |
5579 | res.start = its_entry->base_address; |
5580 | res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1; |
5581 | res.flags = IORESOURCE_MEM; |
5582 | |
5583 | dom_handle = irq_domain_alloc_fwnode(pa: &res.start); |
5584 | if (!dom_handle) { |
5585 | pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n" , |
5586 | &res.start); |
5587 | return -ENOMEM; |
5588 | } |
5589 | |
5590 | err = iort_register_domain_token(trans_id: its_entry->translation_id, base: res.start, |
5591 | fw_node: dom_handle); |
5592 | if (err) { |
5593 | pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n" , |
5594 | &res.start, its_entry->translation_id); |
5595 | goto dom_err; |
5596 | } |
5597 | |
5598 | its = its_node_init(res: &res, handle: dom_handle, |
5599 | numa_node: acpi_get_its_numa_node(its_id: its_entry->translation_id)); |
5600 | if (!its) { |
5601 | err = -ENOMEM; |
5602 | goto node_err; |
5603 | } |
5604 | |
5605 | err = its_probe_one(its); |
5606 | if (!err) |
5607 | return 0; |
5608 | |
5609 | node_err: |
5610 | iort_deregister_domain_token(trans_id: its_entry->translation_id); |
5611 | dom_err: |
5612 | irq_domain_free_fwnode(fwnode: dom_handle); |
5613 | return err; |
5614 | } |
5615 | |
5616 | static int __init its_acpi_reset(union acpi_subtable_headers *, |
5617 | const unsigned long end) |
5618 | { |
5619 | struct acpi_madt_generic_translator *its_entry; |
5620 | struct resource res; |
5621 | |
5622 | its_entry = (struct acpi_madt_generic_translator *)header; |
5623 | res = (struct resource) { |
5624 | .start = its_entry->base_address, |
5625 | .end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1, |
5626 | .flags = IORESOURCE_MEM, |
5627 | }; |
5628 | |
5629 | return its_reset_one(res: &res); |
5630 | } |
5631 | |
5632 | static void __init its_acpi_probe(void) |
5633 | { |
5634 | acpi_table_parse_srat_its(); |
5635 | /* |
5636 | * Make sure *all* the ITS are reset before we probe any, as |
5637 | * they may be sharing memory. If any of the ITS fails to |
5638 | * reset, don't even try to go any further, as this could |
5639 | * result in something even worse. |
5640 | */ |
5641 | if (acpi_table_parse_madt(id: ACPI_MADT_TYPE_GENERIC_TRANSLATOR, |
5642 | handler: its_acpi_reset, max_entries: 0) > 0) |
5643 | acpi_table_parse_madt(id: ACPI_MADT_TYPE_GENERIC_TRANSLATOR, |
5644 | handler: gic_acpi_parse_madt_its, max_entries: 0); |
5645 | acpi_its_srat_maps_free(); |
5646 | } |
5647 | #else |
5648 | static void __init its_acpi_probe(void) { } |
5649 | #endif |
5650 | |
5651 | int __init its_lpi_memreserve_init(void) |
5652 | { |
5653 | int state; |
5654 | |
5655 | if (!efi_enabled(EFI_CONFIG_TABLES)) |
5656 | return 0; |
5657 | |
5658 | if (list_empty(head: &its_nodes)) |
5659 | return 0; |
5660 | |
5661 | gic_rdists->cpuhp_memreserve_state = CPUHP_INVALID; |
5662 | state = cpuhp_setup_state(state: CPUHP_AP_ONLINE_DYN, |
5663 | name: "irqchip/arm/gicv3/memreserve:online" , |
5664 | startup: its_cpu_memreserve_lpi, |
5665 | NULL); |
5666 | if (state < 0) |
5667 | return state; |
5668 | |
5669 | gic_rdists->cpuhp_memreserve_state = state; |
5670 | |
5671 | return 0; |
5672 | } |
5673 | |
5674 | int __init its_init(struct fwnode_handle *handle, struct rdists *rdists, |
5675 | struct irq_domain *parent_domain) |
5676 | { |
5677 | struct device_node *of_node; |
5678 | struct its_node *its; |
5679 | bool has_v4 = false; |
5680 | bool has_v4_1 = false; |
5681 | int err; |
5682 | |
5683 | gic_rdists = rdists; |
5684 | |
5685 | its_parent = parent_domain; |
5686 | of_node = to_of_node(handle); |
5687 | if (of_node) |
5688 | its_of_probe(node: of_node); |
5689 | else |
5690 | its_acpi_probe(); |
5691 | |
5692 | if (list_empty(head: &its_nodes)) { |
5693 | pr_warn("ITS: No ITS available, not enabling LPIs\n" ); |
5694 | return -ENXIO; |
5695 | } |
5696 | |
5697 | err = allocate_lpi_tables(); |
5698 | if (err) |
5699 | return err; |
5700 | |
5701 | list_for_each_entry(its, &its_nodes, entry) { |
5702 | has_v4 |= is_v4(its); |
5703 | has_v4_1 |= is_v4_1(its); |
5704 | } |
5705 | |
5706 | /* Don't bother with inconsistent systems */ |
5707 | if (WARN_ON(!has_v4_1 && rdists->has_rvpeid)) |
5708 | rdists->has_rvpeid = false; |
5709 | |
5710 | if (has_v4 & rdists->has_vlpis) { |
5711 | const struct irq_domain_ops *sgi_ops; |
5712 | |
5713 | if (has_v4_1) |
5714 | sgi_ops = &its_sgi_domain_ops; |
5715 | else |
5716 | sgi_ops = NULL; |
5717 | |
5718 | if (its_init_vpe_domain() || |
5719 | its_init_v4(domain: parent_domain, vpe_ops: &its_vpe_domain_ops, sgi_ops)) { |
5720 | rdists->has_vlpis = false; |
5721 | pr_err("ITS: Disabling GICv4 support\n" ); |
5722 | } |
5723 | } |
5724 | |
5725 | register_syscore_ops(ops: &its_syscore_ops); |
5726 | |
5727 | return 0; |
5728 | } |
5729 | |