1 | // SPDX-License-Identifier: GPL-2.0-only |
---|---|
2 | /* |
3 | * Local APIC related interfaces to support IOAPIC, MSI, etc. |
4 | * |
5 | * Copyright (C) 1997, 1998, 1999, 2000, 2009 Ingo Molnar, Hajnalka Szabo |
6 | * Moved from arch/x86/kernel/apic/io_apic.c. |
7 | * Jiang Liu <jiang.liu@linux.intel.com> |
8 | * Enable support of hierarchical irqdomains |
9 | */ |
10 | #include <linux/interrupt.h> |
11 | #include <linux/irq.h> |
12 | #include <linux/seq_file.h> |
13 | #include <linux/init.h> |
14 | #include <linux/compiler.h> |
15 | #include <linux/slab.h> |
16 | #include <asm/irqdomain.h> |
17 | #include <asm/hw_irq.h> |
18 | #include <asm/traps.h> |
19 | #include <asm/apic.h> |
20 | #include <asm/i8259.h> |
21 | #include <asm/desc.h> |
22 | #include <asm/irq_remapping.h> |
23 | |
24 | #include <asm/trace/irq_vectors.h> |
25 | |
26 | struct apic_chip_data { |
27 | struct irq_cfg hw_irq_cfg; |
28 | unsigned int vector; |
29 | unsigned int prev_vector; |
30 | unsigned int cpu; |
31 | unsigned int prev_cpu; |
32 | unsigned int irq; |
33 | struct hlist_node clist; |
34 | unsigned int move_in_progress : 1, |
35 | is_managed : 1, |
36 | can_reserve : 1, |
37 | has_reserved : 1; |
38 | }; |
39 | |
40 | struct irq_domain *x86_vector_domain; |
41 | EXPORT_SYMBOL_GPL(x86_vector_domain); |
42 | static DEFINE_RAW_SPINLOCK(vector_lock); |
43 | static cpumask_var_t vector_searchmask; |
44 | static struct irq_chip lapic_controller; |
45 | static struct irq_matrix *vector_matrix; |
46 | #ifdef CONFIG_SMP |
47 | |
48 | static void vector_cleanup_callback(struct timer_list *tmr); |
49 | |
50 | struct vector_cleanup { |
51 | struct hlist_head head; |
52 | struct timer_list timer; |
53 | }; |
54 | |
55 | static DEFINE_PER_CPU(struct vector_cleanup, vector_cleanup) = { |
56 | .head = HLIST_HEAD_INIT, |
57 | .timer = __TIMER_INITIALIZER(vector_cleanup_callback, TIMER_PINNED), |
58 | }; |
59 | #endif |
60 | |
61 | void lock_vector_lock(void) |
62 | { |
63 | /* Used to the online set of cpus does not change |
64 | * during assign_irq_vector. |
65 | */ |
66 | raw_spin_lock(&vector_lock); |
67 | } |
68 | |
69 | void unlock_vector_lock(void) |
70 | { |
71 | raw_spin_unlock(&vector_lock); |
72 | } |
73 | |
74 | void init_irq_alloc_info(struct irq_alloc_info *info, |
75 | const struct cpumask *mask) |
76 | { |
77 | memset(info, 0, sizeof(*info)); |
78 | info->mask = mask; |
79 | } |
80 | |
81 | void copy_irq_alloc_info(struct irq_alloc_info *dst, struct irq_alloc_info *src) |
82 | { |
83 | if (src) |
84 | *dst = *src; |
85 | else |
86 | memset(dst, 0, sizeof(*dst)); |
87 | } |
88 | |
89 | static struct apic_chip_data *apic_chip_data(struct irq_data *irqd) |
90 | { |
91 | if (!irqd) |
92 | return NULL; |
93 | |
94 | while (irqd->parent_data) |
95 | irqd = irqd->parent_data; |
96 | |
97 | return irqd->chip_data; |
98 | } |
99 | |
100 | struct irq_cfg *irqd_cfg(struct irq_data *irqd) |
101 | { |
102 | struct apic_chip_data *apicd = apic_chip_data(irqd); |
103 | |
104 | return apicd ? &apicd->hw_irq_cfg : NULL; |
105 | } |
106 | EXPORT_SYMBOL_GPL(irqd_cfg); |
107 | |
108 | struct irq_cfg *irq_cfg(unsigned int irq) |
109 | { |
110 | return irqd_cfg(irq_get_irq_data(irq)); |
111 | } |
112 | |
113 | static struct apic_chip_data *alloc_apic_chip_data(int node) |
114 | { |
115 | struct apic_chip_data *apicd; |
116 | |
117 | apicd = kzalloc_node(size: sizeof(*apicd), GFP_KERNEL, node); |
118 | if (apicd) |
119 | INIT_HLIST_NODE(h: &apicd->clist); |
120 | return apicd; |
121 | } |
122 | |
123 | static void free_apic_chip_data(struct apic_chip_data *apicd) |
124 | { |
125 | kfree(objp: apicd); |
126 | } |
127 | |
128 | static void apic_update_irq_cfg(struct irq_data *irqd, unsigned int vector, |
129 | unsigned int cpu) |
130 | { |
131 | struct apic_chip_data *apicd = apic_chip_data(irqd); |
132 | |
133 | lockdep_assert_held(&vector_lock); |
134 | |
135 | apicd->hw_irq_cfg.vector = vector; |
136 | apicd->hw_irq_cfg.dest_apicid = apic->calc_dest_apicid(cpu); |
137 | irq_data_update_effective_affinity(d: irqd, cpumask_of(cpu)); |
138 | trace_vector_config(irq: irqd->irq, vector, cpu, |
139 | apicdest: apicd->hw_irq_cfg.dest_apicid); |
140 | } |
141 | |
142 | static void apic_update_vector(struct irq_data *irqd, unsigned int newvec, |
143 | unsigned int newcpu) |
144 | { |
145 | struct apic_chip_data *apicd = apic_chip_data(irqd); |
146 | struct irq_desc *desc = irq_data_to_desc(data: irqd); |
147 | bool managed = irqd_affinity_is_managed(d: irqd); |
148 | |
149 | lockdep_assert_held(&vector_lock); |
150 | |
151 | trace_vector_update(irq: irqd->irq, vector: newvec, cpu: newcpu, prev_vector: apicd->vector, |
152 | prev_cpu: apicd->cpu); |
153 | |
154 | /* |
155 | * If there is no vector associated or if the associated vector is |
156 | * the shutdown vector, which is associated to make PCI/MSI |
157 | * shutdown mode work, then there is nothing to release. Clear out |
158 | * prev_vector for this and the offlined target case. |
159 | */ |
160 | apicd->prev_vector = 0; |
161 | if (!apicd->vector || apicd->vector == MANAGED_IRQ_SHUTDOWN_VECTOR) |
162 | goto setnew; |
163 | /* |
164 | * If the target CPU of the previous vector is online, then mark |
165 | * the vector as move in progress and store it for cleanup when the |
166 | * first interrupt on the new vector arrives. If the target CPU is |
167 | * offline then the regular release mechanism via the cleanup |
168 | * vector is not possible and the vector can be immediately freed |
169 | * in the underlying matrix allocator. |
170 | */ |
171 | if (cpu_online(cpu: apicd->cpu)) { |
172 | apicd->move_in_progress = true; |
173 | apicd->prev_vector = apicd->vector; |
174 | apicd->prev_cpu = apicd->cpu; |
175 | WARN_ON_ONCE(apicd->cpu == newcpu); |
176 | } else { |
177 | irq_matrix_free(m: vector_matrix, cpu: apicd->cpu, bit: apicd->vector, |
178 | managed); |
179 | } |
180 | |
181 | setnew: |
182 | apicd->vector = newvec; |
183 | apicd->cpu = newcpu; |
184 | BUG_ON(!IS_ERR_OR_NULL(per_cpu(vector_irq, newcpu)[newvec])); |
185 | per_cpu(vector_irq, newcpu)[newvec] = desc; |
186 | } |
187 | |
188 | static void vector_assign_managed_shutdown(struct irq_data *irqd) |
189 | { |
190 | unsigned int cpu = cpumask_first(cpu_online_mask); |
191 | |
192 | apic_update_irq_cfg(irqd, MANAGED_IRQ_SHUTDOWN_VECTOR, cpu); |
193 | } |
194 | |
195 | static int reserve_managed_vector(struct irq_data *irqd) |
196 | { |
197 | const struct cpumask *affmsk = irq_data_get_affinity_mask(d: irqd); |
198 | struct apic_chip_data *apicd = apic_chip_data(irqd); |
199 | unsigned long flags; |
200 | int ret; |
201 | |
202 | raw_spin_lock_irqsave(&vector_lock, flags); |
203 | apicd->is_managed = true; |
204 | ret = irq_matrix_reserve_managed(m: vector_matrix, msk: affmsk); |
205 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
206 | trace_vector_reserve_managed(irq: irqd->irq, ret); |
207 | return ret; |
208 | } |
209 | |
210 | static void reserve_irq_vector_locked(struct irq_data *irqd) |
211 | { |
212 | struct apic_chip_data *apicd = apic_chip_data(irqd); |
213 | |
214 | irq_matrix_reserve(m: vector_matrix); |
215 | apicd->can_reserve = true; |
216 | apicd->has_reserved = true; |
217 | irqd_set_can_reserve(d: irqd); |
218 | trace_vector_reserve(irq: irqd->irq, ret: 0); |
219 | vector_assign_managed_shutdown(irqd); |
220 | } |
221 | |
222 | static int reserve_irq_vector(struct irq_data *irqd) |
223 | { |
224 | unsigned long flags; |
225 | |
226 | raw_spin_lock_irqsave(&vector_lock, flags); |
227 | reserve_irq_vector_locked(irqd); |
228 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
229 | return 0; |
230 | } |
231 | |
232 | static int |
233 | assign_vector_locked(struct irq_data *irqd, const struct cpumask *dest) |
234 | { |
235 | struct apic_chip_data *apicd = apic_chip_data(irqd); |
236 | bool resvd = apicd->has_reserved; |
237 | unsigned int cpu = apicd->cpu; |
238 | int vector = apicd->vector; |
239 | |
240 | lockdep_assert_held(&vector_lock); |
241 | |
242 | /* |
243 | * If the current target CPU is online and in the new requested |
244 | * affinity mask, there is no point in moving the interrupt from |
245 | * one CPU to another. |
246 | */ |
247 | if (vector && cpu_online(cpu) && cpumask_test_cpu(cpu, cpumask: dest)) |
248 | return 0; |
249 | |
250 | /* |
251 | * Careful here. @apicd might either have move_in_progress set or |
252 | * be enqueued for cleanup. Assigning a new vector would either |
253 | * leave a stale vector on some CPU around or in case of a pending |
254 | * cleanup corrupt the hlist. |
255 | */ |
256 | if (apicd->move_in_progress || !hlist_unhashed(h: &apicd->clist)) |
257 | return -EBUSY; |
258 | |
259 | vector = irq_matrix_alloc(m: vector_matrix, msk: dest, reserved: resvd, mapped_cpu: &cpu); |
260 | trace_vector_alloc(irq: irqd->irq, vector, reserved: resvd, ret: vector); |
261 | if (vector < 0) |
262 | return vector; |
263 | apic_update_vector(irqd, newvec: vector, newcpu: cpu); |
264 | apic_update_irq_cfg(irqd, vector, cpu); |
265 | |
266 | return 0; |
267 | } |
268 | |
269 | static int assign_irq_vector(struct irq_data *irqd, const struct cpumask *dest) |
270 | { |
271 | unsigned long flags; |
272 | int ret; |
273 | |
274 | raw_spin_lock_irqsave(&vector_lock, flags); |
275 | cpumask_and(dstp: vector_searchmask, src1p: dest, cpu_online_mask); |
276 | ret = assign_vector_locked(irqd, dest: vector_searchmask); |
277 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
278 | return ret; |
279 | } |
280 | |
281 | static int assign_irq_vector_any_locked(struct irq_data *irqd) |
282 | { |
283 | /* Get the affinity mask - either irq_default_affinity or (user) set */ |
284 | const struct cpumask *affmsk = irq_data_get_affinity_mask(d: irqd); |
285 | int node = irq_data_get_node(d: irqd); |
286 | |
287 | if (node != NUMA_NO_NODE) { |
288 | /* Try the intersection of @affmsk and node mask */ |
289 | cpumask_and(dstp: vector_searchmask, src1p: cpumask_of_node(node), src2p: affmsk); |
290 | if (!assign_vector_locked(irqd, dest: vector_searchmask)) |
291 | return 0; |
292 | } |
293 | |
294 | /* Try the full affinity mask */ |
295 | cpumask_and(dstp: vector_searchmask, src1p: affmsk, cpu_online_mask); |
296 | if (!assign_vector_locked(irqd, dest: vector_searchmask)) |
297 | return 0; |
298 | |
299 | if (node != NUMA_NO_NODE) { |
300 | /* Try the node mask */ |
301 | if (!assign_vector_locked(irqd, dest: cpumask_of_node(node))) |
302 | return 0; |
303 | } |
304 | |
305 | /* Try the full online mask */ |
306 | return assign_vector_locked(irqd, cpu_online_mask); |
307 | } |
308 | |
309 | static int |
310 | assign_irq_vector_policy(struct irq_data *irqd, struct irq_alloc_info *info) |
311 | { |
312 | if (irqd_affinity_is_managed(d: irqd)) |
313 | return reserve_managed_vector(irqd); |
314 | if (info->mask) |
315 | return assign_irq_vector(irqd, dest: info->mask); |
316 | /* |
317 | * Make only a global reservation with no guarantee. A real vector |
318 | * is associated at activation time. |
319 | */ |
320 | return reserve_irq_vector(irqd); |
321 | } |
322 | |
323 | static int |
324 | assign_managed_vector(struct irq_data *irqd, const struct cpumask *dest) |
325 | { |
326 | const struct cpumask *affmsk = irq_data_get_affinity_mask(d: irqd); |
327 | struct apic_chip_data *apicd = apic_chip_data(irqd); |
328 | int vector, cpu; |
329 | |
330 | cpumask_and(dstp: vector_searchmask, src1p: dest, src2p: affmsk); |
331 | |
332 | /* set_affinity might call here for nothing */ |
333 | if (apicd->vector && cpumask_test_cpu(cpu: apicd->cpu, cpumask: vector_searchmask)) |
334 | return 0; |
335 | vector = irq_matrix_alloc_managed(m: vector_matrix, msk: vector_searchmask, |
336 | mapped_cpu: &cpu); |
337 | trace_vector_alloc_managed(irq: irqd->irq, vector, ret: vector); |
338 | if (vector < 0) |
339 | return vector; |
340 | apic_update_vector(irqd, newvec: vector, newcpu: cpu); |
341 | apic_update_irq_cfg(irqd, vector, cpu); |
342 | return 0; |
343 | } |
344 | |
345 | static void clear_irq_vector(struct irq_data *irqd) |
346 | { |
347 | struct apic_chip_data *apicd = apic_chip_data(irqd); |
348 | bool managed = irqd_affinity_is_managed(d: irqd); |
349 | unsigned int vector = apicd->vector; |
350 | |
351 | lockdep_assert_held(&vector_lock); |
352 | |
353 | if (!vector) |
354 | return; |
355 | |
356 | trace_vector_clear(irq: irqd->irq, vector, cpu: apicd->cpu, prev_vector: apicd->prev_vector, |
357 | prev_cpu: apicd->prev_cpu); |
358 | |
359 | per_cpu(vector_irq, apicd->cpu)[vector] = VECTOR_SHUTDOWN; |
360 | irq_matrix_free(m: vector_matrix, cpu: apicd->cpu, bit: vector, managed); |
361 | apicd->vector = 0; |
362 | |
363 | /* Clean up move in progress */ |
364 | vector = apicd->prev_vector; |
365 | if (!vector) |
366 | return; |
367 | |
368 | per_cpu(vector_irq, apicd->prev_cpu)[vector] = VECTOR_SHUTDOWN; |
369 | irq_matrix_free(m: vector_matrix, cpu: apicd->prev_cpu, bit: vector, managed); |
370 | apicd->prev_vector = 0; |
371 | apicd->move_in_progress = 0; |
372 | hlist_del_init(n: &apicd->clist); |
373 | } |
374 | |
375 | static void x86_vector_deactivate(struct irq_domain *dom, struct irq_data *irqd) |
376 | { |
377 | struct apic_chip_data *apicd = apic_chip_data(irqd); |
378 | unsigned long flags; |
379 | |
380 | trace_vector_deactivate(irq: irqd->irq, is_managed: apicd->is_managed, |
381 | can_reserve: apicd->can_reserve, reserve: false); |
382 | |
383 | /* Regular fixed assigned interrupt */ |
384 | if (!apicd->is_managed && !apicd->can_reserve) |
385 | return; |
386 | /* If the interrupt has a global reservation, nothing to do */ |
387 | if (apicd->has_reserved) |
388 | return; |
389 | |
390 | raw_spin_lock_irqsave(&vector_lock, flags); |
391 | clear_irq_vector(irqd); |
392 | if (apicd->can_reserve) |
393 | reserve_irq_vector_locked(irqd); |
394 | else |
395 | vector_assign_managed_shutdown(irqd); |
396 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
397 | } |
398 | |
399 | static int activate_reserved(struct irq_data *irqd) |
400 | { |
401 | struct apic_chip_data *apicd = apic_chip_data(irqd); |
402 | int ret; |
403 | |
404 | ret = assign_irq_vector_any_locked(irqd); |
405 | if (!ret) { |
406 | apicd->has_reserved = false; |
407 | /* |
408 | * Core might have disabled reservation mode after |
409 | * allocating the irq descriptor. Ideally this should |
410 | * happen before allocation time, but that would require |
411 | * completely convoluted ways of transporting that |
412 | * information. |
413 | */ |
414 | if (!irqd_can_reserve(d: irqd)) |
415 | apicd->can_reserve = false; |
416 | } |
417 | |
418 | /* |
419 | * Check to ensure that the effective affinity mask is a subset |
420 | * the user supplied affinity mask, and warn the user if it is not |
421 | */ |
422 | if (!cpumask_subset(src1p: irq_data_get_effective_affinity_mask(d: irqd), |
423 | src2p: irq_data_get_affinity_mask(d: irqd))) { |
424 | pr_warn("irq %u: Affinity broken due to vector space exhaustion.\n", |
425 | irqd->irq); |
426 | } |
427 | |
428 | return ret; |
429 | } |
430 | |
431 | static int activate_managed(struct irq_data *irqd) |
432 | { |
433 | const struct cpumask *dest = irq_data_get_affinity_mask(d: irqd); |
434 | int ret; |
435 | |
436 | cpumask_and(dstp: vector_searchmask, src1p: dest, cpu_online_mask); |
437 | if (WARN_ON_ONCE(cpumask_empty(vector_searchmask))) { |
438 | /* Something in the core code broke! Survive gracefully */ |
439 | pr_err("Managed startup for irq %u, but no CPU\n", irqd->irq); |
440 | return -EINVAL; |
441 | } |
442 | |
443 | ret = assign_managed_vector(irqd, dest: vector_searchmask); |
444 | /* |
445 | * This should not happen. The vector reservation got buggered. Handle |
446 | * it gracefully. |
447 | */ |
448 | if (WARN_ON_ONCE(ret < 0)) { |
449 | pr_err("Managed startup irq %u, no vector available\n", |
450 | irqd->irq); |
451 | } |
452 | return ret; |
453 | } |
454 | |
455 | static int x86_vector_activate(struct irq_domain *dom, struct irq_data *irqd, |
456 | bool reserve) |
457 | { |
458 | struct apic_chip_data *apicd = apic_chip_data(irqd); |
459 | unsigned long flags; |
460 | int ret = 0; |
461 | |
462 | trace_vector_activate(irq: irqd->irq, is_managed: apicd->is_managed, |
463 | can_reserve: apicd->can_reserve, reserve); |
464 | |
465 | raw_spin_lock_irqsave(&vector_lock, flags); |
466 | if (!apicd->can_reserve && !apicd->is_managed) |
467 | assign_irq_vector_any_locked(irqd); |
468 | else if (reserve || irqd_is_managed_and_shutdown(d: irqd)) |
469 | vector_assign_managed_shutdown(irqd); |
470 | else if (apicd->is_managed) |
471 | ret = activate_managed(irqd); |
472 | else if (apicd->has_reserved) |
473 | ret = activate_reserved(irqd); |
474 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
475 | return ret; |
476 | } |
477 | |
478 | static void vector_free_reserved_and_managed(struct irq_data *irqd) |
479 | { |
480 | const struct cpumask *dest = irq_data_get_affinity_mask(d: irqd); |
481 | struct apic_chip_data *apicd = apic_chip_data(irqd); |
482 | |
483 | trace_vector_teardown(irq: irqd->irq, is_managed: apicd->is_managed, |
484 | has_reserved: apicd->has_reserved); |
485 | |
486 | if (apicd->has_reserved) |
487 | irq_matrix_remove_reserved(m: vector_matrix); |
488 | if (apicd->is_managed) |
489 | irq_matrix_remove_managed(m: vector_matrix, msk: dest); |
490 | } |
491 | |
492 | static void x86_vector_free_irqs(struct irq_domain *domain, |
493 | unsigned int virq, unsigned int nr_irqs) |
494 | { |
495 | struct apic_chip_data *apicd; |
496 | struct irq_data *irqd; |
497 | unsigned long flags; |
498 | int i; |
499 | |
500 | for (i = 0; i < nr_irqs; i++) { |
501 | irqd = irq_domain_get_irq_data(domain: x86_vector_domain, virq: virq + i); |
502 | if (irqd && irqd->chip_data) { |
503 | raw_spin_lock_irqsave(&vector_lock, flags); |
504 | clear_irq_vector(irqd); |
505 | vector_free_reserved_and_managed(irqd); |
506 | apicd = irqd->chip_data; |
507 | irq_domain_reset_irq_data(irq_data: irqd); |
508 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
509 | free_apic_chip_data(apicd); |
510 | } |
511 | } |
512 | } |
513 | |
514 | static bool vector_configure_legacy(unsigned int virq, struct irq_data *irqd, |
515 | struct apic_chip_data *apicd) |
516 | { |
517 | unsigned long flags; |
518 | bool realloc = false; |
519 | |
520 | apicd->vector = ISA_IRQ_VECTOR(virq); |
521 | apicd->cpu = 0; |
522 | |
523 | raw_spin_lock_irqsave(&vector_lock, flags); |
524 | /* |
525 | * If the interrupt is activated, then it must stay at this vector |
526 | * position. That's usually the timer interrupt (0). |
527 | */ |
528 | if (irqd_is_activated(d: irqd)) { |
529 | trace_vector_setup(irq: virq, is_legacy: true, ret: 0); |
530 | apic_update_irq_cfg(irqd, vector: apicd->vector, cpu: apicd->cpu); |
531 | } else { |
532 | /* Release the vector */ |
533 | apicd->can_reserve = true; |
534 | irqd_set_can_reserve(d: irqd); |
535 | clear_irq_vector(irqd); |
536 | realloc = true; |
537 | } |
538 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
539 | return realloc; |
540 | } |
541 | |
542 | static int x86_vector_alloc_irqs(struct irq_domain *domain, unsigned int virq, |
543 | unsigned int nr_irqs, void *arg) |
544 | { |
545 | struct irq_alloc_info *info = arg; |
546 | struct apic_chip_data *apicd; |
547 | struct irq_data *irqd; |
548 | int i, err, node; |
549 | |
550 | if (apic_is_disabled) |
551 | return -ENXIO; |
552 | |
553 | /* |
554 | * Catch any attempt to touch the cascade interrupt on a PIC |
555 | * equipped system. |
556 | */ |
557 | if (WARN_ON_ONCE(info->flags & X86_IRQ_ALLOC_LEGACY && |
558 | virq == PIC_CASCADE_IR)) |
559 | return -EINVAL; |
560 | |
561 | for (i = 0; i < nr_irqs; i++) { |
562 | irqd = irq_domain_get_irq_data(domain, virq: virq + i); |
563 | BUG_ON(!irqd); |
564 | node = irq_data_get_node(d: irqd); |
565 | WARN_ON_ONCE(irqd->chip_data); |
566 | apicd = alloc_apic_chip_data(node); |
567 | if (!apicd) { |
568 | err = -ENOMEM; |
569 | goto error; |
570 | } |
571 | |
572 | apicd->irq = virq + i; |
573 | irqd->chip = &lapic_controller; |
574 | irqd->chip_data = apicd; |
575 | irqd->hwirq = virq + i; |
576 | irqd_set_single_target(d: irqd); |
577 | /* |
578 | * Prevent that any of these interrupts is invoked in |
579 | * non interrupt context via e.g. generic_handle_irq() |
580 | * as that can corrupt the affinity move state. |
581 | */ |
582 | irqd_set_handle_enforce_irqctx(d: irqd); |
583 | |
584 | /* Don't invoke affinity setter on deactivated interrupts */ |
585 | irqd_set_affinity_on_activate(d: irqd); |
586 | |
587 | /* |
588 | * Legacy vectors are already assigned when the IOAPIC |
589 | * takes them over. They stay on the same vector. This is |
590 | * required for check_timer() to work correctly as it might |
591 | * switch back to legacy mode. Only update the hardware |
592 | * config. |
593 | */ |
594 | if (info->flags & X86_IRQ_ALLOC_LEGACY) { |
595 | if (!vector_configure_legacy(virq: virq + i, irqd, apicd)) |
596 | continue; |
597 | } |
598 | |
599 | err = assign_irq_vector_policy(irqd, info); |
600 | trace_vector_setup(irq: virq + i, is_legacy: false, ret: err); |
601 | if (err) { |
602 | irqd->chip_data = NULL; |
603 | free_apic_chip_data(apicd); |
604 | goto error; |
605 | } |
606 | } |
607 | |
608 | return 0; |
609 | |
610 | error: |
611 | x86_vector_free_irqs(domain, virq, nr_irqs: i); |
612 | return err; |
613 | } |
614 | |
615 | #ifdef CONFIG_GENERIC_IRQ_DEBUGFS |
616 | static void x86_vector_debug_show(struct seq_file *m, struct irq_domain *d, |
617 | struct irq_data *irqd, int ind) |
618 | { |
619 | struct apic_chip_data apicd; |
620 | unsigned long flags; |
621 | int irq; |
622 | |
623 | if (!irqd) { |
624 | irq_matrix_debug_show(sf: m, m: vector_matrix, ind); |
625 | return; |
626 | } |
627 | |
628 | irq = irqd->irq; |
629 | if (irq < nr_legacy_irqs() && !test_bit(irq, &io_apic_irqs)) { |
630 | seq_printf(m, fmt: "%*sVector: %5d\n", ind, "", ISA_IRQ_VECTOR(irq)); |
631 | seq_printf(m, fmt: "%*sTarget: Legacy PIC all CPUs\n", ind, ""); |
632 | return; |
633 | } |
634 | |
635 | if (!irqd->chip_data) { |
636 | seq_printf(m, fmt: "%*sVector: Not assigned\n", ind, ""); |
637 | return; |
638 | } |
639 | |
640 | raw_spin_lock_irqsave(&vector_lock, flags); |
641 | memcpy(&apicd, irqd->chip_data, sizeof(apicd)); |
642 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
643 | |
644 | seq_printf(m, fmt: "%*sVector: %5u\n", ind, "", apicd.vector); |
645 | seq_printf(m, fmt: "%*sTarget: %5u\n", ind, "", apicd.cpu); |
646 | if (apicd.prev_vector) { |
647 | seq_printf(m, fmt: "%*sPrevious vector: %5u\n", ind, "", apicd.prev_vector); |
648 | seq_printf(m, fmt: "%*sPrevious target: %5u\n", ind, "", apicd.prev_cpu); |
649 | } |
650 | seq_printf(m, fmt: "%*smove_in_progress: %u\n", ind, "", apicd.move_in_progress ? 1 : 0); |
651 | seq_printf(m, fmt: "%*sis_managed: %u\n", ind, "", apicd.is_managed ? 1 : 0); |
652 | seq_printf(m, fmt: "%*scan_reserve: %u\n", ind, "", apicd.can_reserve ? 1 : 0); |
653 | seq_printf(m, fmt: "%*shas_reserved: %u\n", ind, "", apicd.has_reserved ? 1 : 0); |
654 | seq_printf(m, fmt: "%*scleanup_pending: %u\n", ind, "", !hlist_unhashed(h: &apicd.clist)); |
655 | } |
656 | #endif |
657 | |
658 | int x86_fwspec_is_ioapic(struct irq_fwspec *fwspec) |
659 | { |
660 | if (fwspec->param_count != 1) |
661 | return 0; |
662 | |
663 | if (is_fwnode_irqchip(fwnode: fwspec->fwnode)) { |
664 | const char *fwname = fwnode_get_name(fwnode: fwspec->fwnode); |
665 | return fwname && !strncmp(fwname, "IO-APIC-", 8) && |
666 | simple_strtol(fwname+8, NULL, 10) == fwspec->param[0]; |
667 | } |
668 | return to_of_node(fwspec->fwnode) && |
669 | of_device_is_compatible(to_of_node(fwspec->fwnode), |
670 | "intel,ce4100-ioapic"); |
671 | } |
672 | |
673 | int x86_fwspec_is_hpet(struct irq_fwspec *fwspec) |
674 | { |
675 | if (fwspec->param_count != 1) |
676 | return 0; |
677 | |
678 | if (is_fwnode_irqchip(fwnode: fwspec->fwnode)) { |
679 | const char *fwname = fwnode_get_name(fwnode: fwspec->fwnode); |
680 | return fwname && !strncmp(fwname, "HPET-MSI-", 9) && |
681 | simple_strtol(fwname+9, NULL, 10) == fwspec->param[0]; |
682 | } |
683 | return 0; |
684 | } |
685 | |
686 | static int x86_vector_select(struct irq_domain *d, struct irq_fwspec *fwspec, |
687 | enum irq_domain_bus_token bus_token) |
688 | { |
689 | /* |
690 | * HPET and I/OAPIC cannot be parented in the vector domain |
691 | * if IRQ remapping is enabled. APIC IDs above 15 bits are |
692 | * only permitted if IRQ remapping is enabled, so check that. |
693 | */ |
694 | if (apic_id_valid(apic_id: 32768)) |
695 | return 0; |
696 | |
697 | return x86_fwspec_is_ioapic(fwspec) || x86_fwspec_is_hpet(fwspec); |
698 | } |
699 | |
700 | static const struct irq_domain_ops x86_vector_domain_ops = { |
701 | .select = x86_vector_select, |
702 | .alloc = x86_vector_alloc_irqs, |
703 | .free = x86_vector_free_irqs, |
704 | .activate = x86_vector_activate, |
705 | .deactivate = x86_vector_deactivate, |
706 | #ifdef CONFIG_GENERIC_IRQ_DEBUGFS |
707 | .debug_show = x86_vector_debug_show, |
708 | #endif |
709 | }; |
710 | |
711 | int __init arch_probe_nr_irqs(void) |
712 | { |
713 | int nr; |
714 | |
715 | if (nr_irqs > (NR_VECTORS * nr_cpu_ids)) |
716 | nr_irqs = NR_VECTORS * nr_cpu_ids; |
717 | |
718 | nr = (gsi_top + nr_legacy_irqs()) + 8 * nr_cpu_ids; |
719 | #if defined(CONFIG_PCI_MSI) |
720 | /* |
721 | * for MSI and HT dyn irq |
722 | */ |
723 | if (gsi_top <= NR_IRQS_LEGACY) |
724 | nr += 8 * nr_cpu_ids; |
725 | else |
726 | nr += gsi_top * 16; |
727 | #endif |
728 | if (nr < nr_irqs) |
729 | nr_irqs = nr; |
730 | |
731 | /* |
732 | * We don't know if PIC is present at this point so we need to do |
733 | * probe() to get the right number of legacy IRQs. |
734 | */ |
735 | return legacy_pic->probe(); |
736 | } |
737 | |
738 | void lapic_assign_legacy_vector(unsigned int irq, bool replace) |
739 | { |
740 | /* |
741 | * Use assign system here so it won't get accounted as allocated |
742 | * and movable in the cpu hotplug check and it prevents managed |
743 | * irq reservation from touching it. |
744 | */ |
745 | irq_matrix_assign_system(m: vector_matrix, ISA_IRQ_VECTOR(irq), replace); |
746 | } |
747 | |
748 | void __init lapic_update_legacy_vectors(void) |
749 | { |
750 | unsigned int i; |
751 | |
752 | if (IS_ENABLED(CONFIG_X86_IO_APIC) && nr_ioapics > 0) |
753 | return; |
754 | |
755 | /* |
756 | * If the IO/APIC is disabled via config, kernel command line or |
757 | * lack of enumeration then all legacy interrupts are routed |
758 | * through the PIC. Make sure that they are marked as legacy |
759 | * vectors. PIC_CASCADE_IRQ has already been marked in |
760 | * lapic_assign_system_vectors(). |
761 | */ |
762 | for (i = 0; i < nr_legacy_irqs(); i++) { |
763 | if (i != PIC_CASCADE_IR) |
764 | lapic_assign_legacy_vector(irq: i, replace: true); |
765 | } |
766 | } |
767 | |
768 | void __init lapic_assign_system_vectors(void) |
769 | { |
770 | unsigned int i, vector; |
771 | |
772 | for_each_set_bit(vector, system_vectors, NR_VECTORS) |
773 | irq_matrix_assign_system(m: vector_matrix, bit: vector, replace: false); |
774 | |
775 | if (nr_legacy_irqs() > 1) |
776 | lapic_assign_legacy_vector(PIC_CASCADE_IR, replace: false); |
777 | |
778 | /* System vectors are reserved, online it */ |
779 | irq_matrix_online(m: vector_matrix); |
780 | |
781 | /* Mark the preallocated legacy interrupts */ |
782 | for (i = 0; i < nr_legacy_irqs(); i++) { |
783 | /* |
784 | * Don't touch the cascade interrupt. It's unusable |
785 | * on PIC equipped machines. See the large comment |
786 | * in the IO/APIC code. |
787 | */ |
788 | if (i != PIC_CASCADE_IR) |
789 | irq_matrix_assign(m: vector_matrix, ISA_IRQ_VECTOR(i)); |
790 | } |
791 | } |
792 | |
793 | int __init arch_early_irq_init(void) |
794 | { |
795 | struct fwnode_handle *fn; |
796 | |
797 | fn = irq_domain_alloc_named_fwnode(name: "VECTOR"); |
798 | BUG_ON(!fn); |
799 | x86_vector_domain = irq_domain_create_tree(fwnode: fn, ops: &x86_vector_domain_ops, |
800 | NULL); |
801 | BUG_ON(x86_vector_domain == NULL); |
802 | irq_set_default_host(host: x86_vector_domain); |
803 | |
804 | BUG_ON(!alloc_cpumask_var(&vector_searchmask, GFP_KERNEL)); |
805 | |
806 | /* |
807 | * Allocate the vector matrix allocator data structure and limit the |
808 | * search area. |
809 | */ |
810 | vector_matrix = irq_alloc_matrix(NR_VECTORS, FIRST_EXTERNAL_VECTOR, |
811 | FIRST_SYSTEM_VECTOR); |
812 | BUG_ON(!vector_matrix); |
813 | |
814 | return arch_early_ioapic_init(); |
815 | } |
816 | |
817 | #ifdef CONFIG_SMP |
818 | |
819 | static struct irq_desc *__setup_vector_irq(int vector) |
820 | { |
821 | int isairq = vector - ISA_IRQ_VECTOR(0); |
822 | |
823 | /* Check whether the irq is in the legacy space */ |
824 | if (isairq < 0 || isairq >= nr_legacy_irqs()) |
825 | return VECTOR_UNUSED; |
826 | /* Check whether the irq is handled by the IOAPIC */ |
827 | if (test_bit(isairq, &io_apic_irqs)) |
828 | return VECTOR_UNUSED; |
829 | return irq_to_desc(irq: isairq); |
830 | } |
831 | |
832 | /* Online the local APIC infrastructure and initialize the vectors */ |
833 | void lapic_online(void) |
834 | { |
835 | unsigned int vector; |
836 | |
837 | lockdep_assert_held(&vector_lock); |
838 | |
839 | /* Online the vector matrix array for this CPU */ |
840 | irq_matrix_online(m: vector_matrix); |
841 | |
842 | /* |
843 | * The interrupt affinity logic never targets interrupts to offline |
844 | * CPUs. The exception are the legacy PIC interrupts. In general |
845 | * they are only targeted to CPU0, but depending on the platform |
846 | * they can be distributed to any online CPU in hardware. The |
847 | * kernel has no influence on that. So all active legacy vectors |
848 | * must be installed on all CPUs. All non legacy interrupts can be |
849 | * cleared. |
850 | */ |
851 | for (vector = 0; vector < NR_VECTORS; vector++) |
852 | this_cpu_write(vector_irq[vector], __setup_vector_irq(vector)); |
853 | } |
854 | |
855 | static void __vector_cleanup(struct vector_cleanup *cl, bool check_irr); |
856 | |
857 | void lapic_offline(void) |
858 | { |
859 | struct vector_cleanup *cl = this_cpu_ptr(&vector_cleanup); |
860 | |
861 | lock_vector_lock(); |
862 | |
863 | /* In case the vector cleanup timer has not expired */ |
864 | __vector_cleanup(cl, check_irr: false); |
865 | |
866 | irq_matrix_offline(m: vector_matrix); |
867 | WARN_ON_ONCE(try_to_del_timer_sync(&cl->timer) < 0); |
868 | WARN_ON_ONCE(!hlist_empty(&cl->head)); |
869 | |
870 | unlock_vector_lock(); |
871 | } |
872 | |
873 | static int apic_set_affinity(struct irq_data *irqd, |
874 | const struct cpumask *dest, bool force) |
875 | { |
876 | int err; |
877 | |
878 | if (WARN_ON_ONCE(!irqd_is_activated(irqd))) |
879 | return -EIO; |
880 | |
881 | raw_spin_lock(&vector_lock); |
882 | cpumask_and(dstp: vector_searchmask, src1p: dest, cpu_online_mask); |
883 | if (irqd_affinity_is_managed(d: irqd)) |
884 | err = assign_managed_vector(irqd, dest: vector_searchmask); |
885 | else |
886 | err = assign_vector_locked(irqd, dest: vector_searchmask); |
887 | raw_spin_unlock(&vector_lock); |
888 | return err ? err : IRQ_SET_MASK_OK; |
889 | } |
890 | |
891 | #else |
892 | # define apic_set_affinity NULL |
893 | #endif |
894 | |
895 | static int apic_retrigger_irq(struct irq_data *irqd) |
896 | { |
897 | struct apic_chip_data *apicd = apic_chip_data(irqd); |
898 | unsigned long flags; |
899 | |
900 | raw_spin_lock_irqsave(&vector_lock, flags); |
901 | __apic_send_IPI(cpu: apicd->cpu, vector: apicd->vector); |
902 | raw_spin_unlock_irqrestore(&vector_lock, flags); |
903 | |
904 | return 1; |
905 | } |
906 | |
907 | void apic_ack_irq(struct irq_data *irqd) |
908 | { |
909 | irq_move_irq(data: irqd); |
910 | apic_eoi(); |
911 | } |
912 | |
913 | void apic_ack_edge(struct irq_data *irqd) |
914 | { |
915 | irq_complete_move(cfg: irqd_cfg(irqd)); |
916 | apic_ack_irq(irqd); |
917 | } |
918 | |
919 | static void x86_vector_msi_compose_msg(struct irq_data *data, |
920 | struct msi_msg *msg) |
921 | { |
922 | __irq_msi_compose_msg(cfg: irqd_cfg(data), msg, dmar: false); |
923 | } |
924 | |
925 | static struct irq_chip lapic_controller = { |
926 | .name = "APIC", |
927 | .irq_ack = apic_ack_edge, |
928 | .irq_set_affinity = apic_set_affinity, |
929 | .irq_compose_msi_msg = x86_vector_msi_compose_msg, |
930 | .irq_retrigger = apic_retrigger_irq, |
931 | }; |
932 | |
933 | #ifdef CONFIG_SMP |
934 | |
935 | static void free_moved_vector(struct apic_chip_data *apicd) |
936 | { |
937 | unsigned int vector = apicd->prev_vector; |
938 | unsigned int cpu = apicd->prev_cpu; |
939 | bool managed = apicd->is_managed; |
940 | |
941 | /* |
942 | * Managed interrupts are usually not migrated away |
943 | * from an online CPU, but CPU isolation 'managed_irq' |
944 | * can make that happen. |
945 | * 1) Activation does not take the isolation into account |
946 | * to keep the code simple |
947 | * 2) Migration away from an isolated CPU can happen when |
948 | * a non-isolated CPU which is in the calculated |
949 | * affinity mask comes online. |
950 | */ |
951 | trace_vector_free_moved(irq: apicd->irq, cpu, vector, is_managed: managed); |
952 | irq_matrix_free(m: vector_matrix, cpu, bit: vector, managed); |
953 | per_cpu(vector_irq, cpu)[vector] = VECTOR_UNUSED; |
954 | hlist_del_init(n: &apicd->clist); |
955 | apicd->prev_vector = 0; |
956 | apicd->move_in_progress = 0; |
957 | } |
958 | |
959 | static void __vector_cleanup(struct vector_cleanup *cl, bool check_irr) |
960 | { |
961 | struct apic_chip_data *apicd; |
962 | struct hlist_node *tmp; |
963 | bool rearm = false; |
964 | |
965 | lockdep_assert_held(&vector_lock); |
966 | |
967 | hlist_for_each_entry_safe(apicd, tmp, &cl->head, clist) { |
968 | unsigned int irr, vector = apicd->prev_vector; |
969 | |
970 | /* |
971 | * Paranoia: Check if the vector that needs to be cleaned |
972 | * up is registered at the APICs IRR. That's clearly a |
973 | * hardware issue if the vector arrived on the old target |
974 | * _after_ interrupts were disabled above. Keep @apicd |
975 | * on the list and schedule the timer again to give the CPU |
976 | * a chance to handle the pending interrupt. |
977 | * |
978 | * Do not check IRR when called from lapic_offline(), because |
979 | * fixup_irqs() was just called to scan IRR for set bits and |
980 | * forward them to new destination CPUs via IPIs. |
981 | */ |
982 | irr = check_irr ? apic_read(APIC_IRR + (vector / 32 * 0x10)) : 0; |
983 | if (irr & (1U << (vector % 32))) { |
984 | pr_warn_once("Moved interrupt pending in old target APIC %u\n", apicd->irq); |
985 | rearm = true; |
986 | continue; |
987 | } |
988 | free_moved_vector(apicd); |
989 | } |
990 | |
991 | /* |
992 | * Must happen under vector_lock to make the timer_pending() check |
993 | * in __vector_schedule_cleanup() race free against the rearm here. |
994 | */ |
995 | if (rearm) |
996 | mod_timer(timer: &cl->timer, expires: jiffies + 1); |
997 | } |
998 | |
999 | static void vector_cleanup_callback(struct timer_list *tmr) |
1000 | { |
1001 | struct vector_cleanup *cl = container_of(tmr, typeof(*cl), timer); |
1002 | |
1003 | /* Prevent vectors vanishing under us */ |
1004 | raw_spin_lock_irq(&vector_lock); |
1005 | __vector_cleanup(cl, check_irr: true); |
1006 | raw_spin_unlock_irq(&vector_lock); |
1007 | } |
1008 | |
1009 | static void __vector_schedule_cleanup(struct apic_chip_data *apicd) |
1010 | { |
1011 | unsigned int cpu = apicd->prev_cpu; |
1012 | |
1013 | raw_spin_lock(&vector_lock); |
1014 | apicd->move_in_progress = 0; |
1015 | if (cpu_online(cpu)) { |
1016 | struct vector_cleanup *cl = per_cpu_ptr(&vector_cleanup, cpu); |
1017 | |
1018 | hlist_add_head(n: &apicd->clist, h: &cl->head); |
1019 | |
1020 | /* |
1021 | * The lockless timer_pending() check is safe here. If it |
1022 | * returns true, then the callback will observe this new |
1023 | * apic data in the hlist as everything is serialized by |
1024 | * vector lock. |
1025 | * |
1026 | * If it returns false then the timer is either not armed |
1027 | * or the other CPU executes the callback, which again |
1028 | * would be blocked on vector lock. Rearming it in the |
1029 | * latter case makes it fire for nothing. |
1030 | * |
1031 | * This is also safe against the callback rearming the timer |
1032 | * because that's serialized via vector lock too. |
1033 | */ |
1034 | if (!timer_pending(timer: &cl->timer)) { |
1035 | cl->timer.expires = jiffies + 1; |
1036 | add_timer_on(timer: &cl->timer, cpu); |
1037 | } |
1038 | } else { |
1039 | apicd->prev_vector = 0; |
1040 | } |
1041 | raw_spin_unlock(&vector_lock); |
1042 | } |
1043 | |
1044 | void vector_schedule_cleanup(struct irq_cfg *cfg) |
1045 | { |
1046 | struct apic_chip_data *apicd; |
1047 | |
1048 | apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg); |
1049 | if (apicd->move_in_progress) |
1050 | __vector_schedule_cleanup(apicd); |
1051 | } |
1052 | |
1053 | void irq_complete_move(struct irq_cfg *cfg) |
1054 | { |
1055 | struct apic_chip_data *apicd; |
1056 | |
1057 | apicd = container_of(cfg, struct apic_chip_data, hw_irq_cfg); |
1058 | if (likely(!apicd->move_in_progress)) |
1059 | return; |
1060 | |
1061 | /* |
1062 | * If the interrupt arrived on the new target CPU, cleanup the |
1063 | * vector on the old target CPU. A vector check is not required |
1064 | * because an interrupt can never move from one vector to another |
1065 | * on the same CPU. |
1066 | */ |
1067 | if (apicd->cpu == smp_processor_id()) |
1068 | __vector_schedule_cleanup(apicd); |
1069 | } |
1070 | |
1071 | /* |
1072 | * Called from fixup_irqs() with @desc->lock held and interrupts disabled. |
1073 | */ |
1074 | void irq_force_complete_move(struct irq_desc *desc) |
1075 | { |
1076 | struct apic_chip_data *apicd; |
1077 | struct irq_data *irqd; |
1078 | unsigned int vector; |
1079 | |
1080 | /* |
1081 | * The function is called for all descriptors regardless of which |
1082 | * irqdomain they belong to. For example if an IRQ is provided by |
1083 | * an irq_chip as part of a GPIO driver, the chip data for that |
1084 | * descriptor is specific to the irq_chip in question. |
1085 | * |
1086 | * Check first that the chip_data is what we expect |
1087 | * (apic_chip_data) before touching it any further. |
1088 | */ |
1089 | irqd = irq_domain_get_irq_data(domain: x86_vector_domain, |
1090 | virq: irq_desc_get_irq(desc)); |
1091 | if (!irqd) |
1092 | return; |
1093 | |
1094 | raw_spin_lock(&vector_lock); |
1095 | apicd = apic_chip_data(irqd); |
1096 | if (!apicd) |
1097 | goto unlock; |
1098 | |
1099 | /* |
1100 | * If prev_vector is empty, no action required. |
1101 | */ |
1102 | vector = apicd->prev_vector; |
1103 | if (!vector) |
1104 | goto unlock; |
1105 | |
1106 | /* |
1107 | * This is tricky. If the cleanup of the old vector has not been |
1108 | * done yet, then the following setaffinity call will fail with |
1109 | * -EBUSY. This can leave the interrupt in a stale state. |
1110 | * |
1111 | * All CPUs are stuck in stop machine with interrupts disabled so |
1112 | * calling __irq_complete_move() would be completely pointless. |
1113 | * |
1114 | * 1) The interrupt is in move_in_progress state. That means that we |
1115 | * have not seen an interrupt since the io_apic was reprogrammed to |
1116 | * the new vector. |
1117 | * |
1118 | * 2) The interrupt has fired on the new vector, but the cleanup IPIs |
1119 | * have not been processed yet. |
1120 | */ |
1121 | if (apicd->move_in_progress) { |
1122 | /* |
1123 | * In theory there is a race: |
1124 | * |
1125 | * set_ioapic(new_vector) <-- Interrupt is raised before update |
1126 | * is effective, i.e. it's raised on |
1127 | * the old vector. |
1128 | * |
1129 | * So if the target cpu cannot handle that interrupt before |
1130 | * the old vector is cleaned up, we get a spurious interrupt |
1131 | * and in the worst case the ioapic irq line becomes stale. |
1132 | * |
1133 | * But in case of cpu hotplug this should be a non issue |
1134 | * because if the affinity update happens right before all |
1135 | * cpus rendezvous in stop machine, there is no way that the |
1136 | * interrupt can be blocked on the target cpu because all cpus |
1137 | * loops first with interrupts enabled in stop machine, so the |
1138 | * old vector is not yet cleaned up when the interrupt fires. |
1139 | * |
1140 | * So the only way to run into this issue is if the delivery |
1141 | * of the interrupt on the apic/system bus would be delayed |
1142 | * beyond the point where the target cpu disables interrupts |
1143 | * in stop machine. I doubt that it can happen, but at least |
1144 | * there is a theoretical chance. Virtualization might be |
1145 | * able to expose this, but AFAICT the IOAPIC emulation is not |
1146 | * as stupid as the real hardware. |
1147 | * |
1148 | * Anyway, there is nothing we can do about that at this point |
1149 | * w/o refactoring the whole fixup_irq() business completely. |
1150 | * We print at least the irq number and the old vector number, |
1151 | * so we have the necessary information when a problem in that |
1152 | * area arises. |
1153 | */ |
1154 | pr_warn("IRQ fixup: irq %d move in progress, old vector %d\n", |
1155 | irqd->irq, vector); |
1156 | } |
1157 | free_moved_vector(apicd); |
1158 | unlock: |
1159 | raw_spin_unlock(&vector_lock); |
1160 | } |
1161 | |
1162 | #ifdef CONFIG_HOTPLUG_CPU |
1163 | /* |
1164 | * Note, this is not accurate accounting, but at least good enough to |
1165 | * prevent that the actual interrupt move will run out of vectors. |
1166 | */ |
1167 | int lapic_can_unplug_cpu(void) |
1168 | { |
1169 | unsigned int rsvd, avl, tomove, cpu = smp_processor_id(); |
1170 | int ret = 0; |
1171 | |
1172 | raw_spin_lock(&vector_lock); |
1173 | tomove = irq_matrix_allocated(m: vector_matrix); |
1174 | avl = irq_matrix_available(m: vector_matrix, cpudown: true); |
1175 | if (avl < tomove) { |
1176 | pr_warn("CPU %u has %u vectors, %u available. Cannot disable CPU\n", |
1177 | cpu, tomove, avl); |
1178 | ret = -ENOSPC; |
1179 | goto out; |
1180 | } |
1181 | rsvd = irq_matrix_reserved(m: vector_matrix); |
1182 | if (avl < rsvd) { |
1183 | pr_warn("Reserved vectors %u > available %u. IRQ request may fail\n", |
1184 | rsvd, avl); |
1185 | } |
1186 | out: |
1187 | raw_spin_unlock(&vector_lock); |
1188 | return ret; |
1189 | } |
1190 | #endif /* HOTPLUG_CPU */ |
1191 | #endif /* SMP */ |
1192 | |
1193 | static void __init print_APIC_field(int base) |
1194 | { |
1195 | int i; |
1196 | |
1197 | printk(KERN_DEBUG); |
1198 | |
1199 | for (i = 0; i < 8; i++) |
1200 | pr_cont("%08x", apic_read(base + i*0x10)); |
1201 | |
1202 | pr_cont("\n"); |
1203 | } |
1204 | |
1205 | static void __init print_local_APIC(void *dummy) |
1206 | { |
1207 | unsigned int i, v, ver, maxlvt; |
1208 | u64 icr; |
1209 | |
1210 | pr_debug("printing local APIC contents on CPU#%d/%d:\n", |
1211 | smp_processor_id(), read_apic_id()); |
1212 | v = apic_read(APIC_ID); |
1213 | pr_info("... APIC ID: %08x (%01x)\n", v, read_apic_id()); |
1214 | v = apic_read(APIC_LVR); |
1215 | pr_info("... APIC VERSION: %08x\n", v); |
1216 | ver = GET_APIC_VERSION(v); |
1217 | maxlvt = lapic_get_maxlvt(); |
1218 | |
1219 | v = apic_read(APIC_TASKPRI); |
1220 | pr_debug("... APIC TASKPRI: %08x (%02x)\n", v, v & APIC_TPRI_MASK); |
1221 | |
1222 | /* !82489DX */ |
1223 | if (APIC_INTEGRATED(ver)) { |
1224 | if (!APIC_XAPIC(ver)) { |
1225 | v = apic_read(APIC_ARBPRI); |
1226 | pr_debug("... APIC ARBPRI: %08x (%02x)\n", |
1227 | v, v & APIC_ARBPRI_MASK); |
1228 | } |
1229 | v = apic_read(APIC_PROCPRI); |
1230 | pr_debug("... APIC PROCPRI: %08x\n", v); |
1231 | } |
1232 | |
1233 | /* |
1234 | * Remote read supported only in the 82489DX and local APIC for |
1235 | * Pentium processors. |
1236 | */ |
1237 | if (!APIC_INTEGRATED(ver) || maxlvt == 3) { |
1238 | v = apic_read(APIC_RRR); |
1239 | pr_debug("... APIC RRR: %08x\n", v); |
1240 | } |
1241 | |
1242 | v = apic_read(APIC_LDR); |
1243 | pr_debug("... APIC LDR: %08x\n", v); |
1244 | if (!x2apic_enabled()) { |
1245 | v = apic_read(APIC_DFR); |
1246 | pr_debug("... APIC DFR: %08x\n", v); |
1247 | } |
1248 | v = apic_read(APIC_SPIV); |
1249 | pr_debug("... APIC SPIV: %08x\n", v); |
1250 | |
1251 | pr_debug("... APIC ISR field:\n"); |
1252 | print_APIC_field(APIC_ISR); |
1253 | pr_debug("... APIC TMR field:\n"); |
1254 | print_APIC_field(APIC_TMR); |
1255 | pr_debug("... APIC IRR field:\n"); |
1256 | print_APIC_field(APIC_IRR); |
1257 | |
1258 | /* !82489DX */ |
1259 | if (APIC_INTEGRATED(ver)) { |
1260 | /* Due to the Pentium erratum 3AP. */ |
1261 | if (maxlvt > 3) |
1262 | apic_write(APIC_ESR, val: 0); |
1263 | |
1264 | v = apic_read(APIC_ESR); |
1265 | pr_debug("... APIC ESR: %08x\n", v); |
1266 | } |
1267 | |
1268 | icr = apic_icr_read(); |
1269 | pr_debug("... APIC ICR: %08x\n", (u32)icr); |
1270 | pr_debug("... APIC ICR2: %08x\n", (u32)(icr >> 32)); |
1271 | |
1272 | v = apic_read(APIC_LVTT); |
1273 | pr_debug("... APIC LVTT: %08x\n", v); |
1274 | |
1275 | if (maxlvt > 3) { |
1276 | /* PC is LVT#4. */ |
1277 | v = apic_read(APIC_LVTPC); |
1278 | pr_debug("... APIC LVTPC: %08x\n", v); |
1279 | } |
1280 | v = apic_read(APIC_LVT0); |
1281 | pr_debug("... APIC LVT0: %08x\n", v); |
1282 | v = apic_read(APIC_LVT1); |
1283 | pr_debug("... APIC LVT1: %08x\n", v); |
1284 | |
1285 | if (maxlvt > 2) { |
1286 | /* ERR is LVT#3. */ |
1287 | v = apic_read(APIC_LVTERR); |
1288 | pr_debug("... APIC LVTERR: %08x\n", v); |
1289 | } |
1290 | |
1291 | v = apic_read(APIC_TMICT); |
1292 | pr_debug("... APIC TMICT: %08x\n", v); |
1293 | v = apic_read(APIC_TMCCT); |
1294 | pr_debug("... APIC TMCCT: %08x\n", v); |
1295 | v = apic_read(APIC_TDCR); |
1296 | pr_debug("... APIC TDCR: %08x\n", v); |
1297 | |
1298 | if (boot_cpu_has(X86_FEATURE_EXTAPIC)) { |
1299 | v = apic_read(APIC_EFEAT); |
1300 | maxlvt = (v >> 16) & 0xff; |
1301 | pr_debug("... APIC EFEAT: %08x\n", v); |
1302 | v = apic_read(APIC_ECTRL); |
1303 | pr_debug("... APIC ECTRL: %08x\n", v); |
1304 | for (i = 0; i < maxlvt; i++) { |
1305 | v = apic_read(APIC_EILVTn(i)); |
1306 | pr_debug("... APIC EILVT%d: %08x\n", i, v); |
1307 | } |
1308 | } |
1309 | pr_cont("\n"); |
1310 | } |
1311 | |
1312 | static void __init print_local_APICs(int maxcpu) |
1313 | { |
1314 | int cpu; |
1315 | |
1316 | if (!maxcpu) |
1317 | return; |
1318 | |
1319 | preempt_disable(); |
1320 | for_each_online_cpu(cpu) { |
1321 | if (cpu >= maxcpu) |
1322 | break; |
1323 | smp_call_function_single(cpuid: cpu, func: print_local_APIC, NULL, wait: 1); |
1324 | } |
1325 | preempt_enable(); |
1326 | } |
1327 | |
1328 | static void __init print_PIC(void) |
1329 | { |
1330 | unsigned int v; |
1331 | unsigned long flags; |
1332 | |
1333 | if (!nr_legacy_irqs()) |
1334 | return; |
1335 | |
1336 | pr_debug("\nprinting PIC contents\n"); |
1337 | |
1338 | raw_spin_lock_irqsave(&i8259A_lock, flags); |
1339 | |
1340 | v = inb(port: 0xa1) << 8 | inb(port: 0x21); |
1341 | pr_debug("... PIC IMR: %04x\n", v); |
1342 | |
1343 | v = inb(port: 0xa0) << 8 | inb(port: 0x20); |
1344 | pr_debug("... PIC IRR: %04x\n", v); |
1345 | |
1346 | outb(value: 0x0b, port: 0xa0); |
1347 | outb(value: 0x0b, port: 0x20); |
1348 | v = inb(port: 0xa0) << 8 | inb(port: 0x20); |
1349 | outb(value: 0x0a, port: 0xa0); |
1350 | outb(value: 0x0a, port: 0x20); |
1351 | |
1352 | raw_spin_unlock_irqrestore(&i8259A_lock, flags); |
1353 | |
1354 | pr_debug("... PIC ISR: %04x\n", v); |
1355 | |
1356 | v = inb(PIC_ELCR2) << 8 | inb(PIC_ELCR1); |
1357 | pr_debug("... PIC ELCR: %04x\n", v); |
1358 | } |
1359 | |
1360 | static int show_lapic __initdata = 1; |
1361 | static __init int setup_show_lapic(char *arg) |
1362 | { |
1363 | int num = -1; |
1364 | |
1365 | if (strcmp(arg, "all") == 0) { |
1366 | show_lapic = CONFIG_NR_CPUS; |
1367 | } else { |
1368 | get_option(str: &arg, pint: &num); |
1369 | if (num >= 0) |
1370 | show_lapic = num; |
1371 | } |
1372 | |
1373 | return 1; |
1374 | } |
1375 | __setup("show_lapic=", setup_show_lapic); |
1376 | |
1377 | static int __init print_ICs(void) |
1378 | { |
1379 | if (apic_verbosity == APIC_QUIET) |
1380 | return 0; |
1381 | |
1382 | print_PIC(); |
1383 | |
1384 | /* don't print out if apic is not there */ |
1385 | if (!boot_cpu_has(X86_FEATURE_APIC) && !apic_from_smp_config()) |
1386 | return 0; |
1387 | |
1388 | print_local_APICs(maxcpu: show_lapic); |
1389 | print_IO_APICs(); |
1390 | |
1391 | return 0; |
1392 | } |
1393 | |
1394 | late_initcall(print_ICs); |
1395 |
Definitions
- apic_chip_data
- x86_vector_domain
- vector_lock
- vector_searchmask
- lapic_controller
- vector_matrix
- vector_cleanup
- vector_cleanup
- lock_vector_lock
- unlock_vector_lock
- init_irq_alloc_info
- copy_irq_alloc_info
- apic_chip_data
- irqd_cfg
- irq_cfg
- alloc_apic_chip_data
- free_apic_chip_data
- apic_update_irq_cfg
- apic_update_vector
- vector_assign_managed_shutdown
- reserve_managed_vector
- reserve_irq_vector_locked
- reserve_irq_vector
- assign_vector_locked
- assign_irq_vector
- assign_irq_vector_any_locked
- assign_irq_vector_policy
- assign_managed_vector
- clear_irq_vector
- x86_vector_deactivate
- activate_reserved
- activate_managed
- x86_vector_activate
- vector_free_reserved_and_managed
- x86_vector_free_irqs
- vector_configure_legacy
- x86_vector_alloc_irqs
- x86_vector_debug_show
- x86_fwspec_is_ioapic
- x86_fwspec_is_hpet
- x86_vector_select
- x86_vector_domain_ops
- arch_probe_nr_irqs
- lapic_assign_legacy_vector
- lapic_update_legacy_vectors
- lapic_assign_system_vectors
- arch_early_irq_init
- __setup_vector_irq
- lapic_online
- lapic_offline
- apic_set_affinity
- apic_retrigger_irq
- apic_ack_irq
- apic_ack_edge
- x86_vector_msi_compose_msg
- lapic_controller
- free_moved_vector
- __vector_cleanup
- vector_cleanup_callback
- __vector_schedule_cleanup
- vector_schedule_cleanup
- irq_complete_move
- irq_force_complete_move
- lapic_can_unplug_cpu
- print_APIC_field
- print_local_APIC
- print_local_APICs
- print_PIC
- show_lapic
- setup_show_lapic
Improve your Profiling and Debugging skills
Find out more