1 | // SPDX-License-Identifier: GPL-2.0-only |
---|---|
2 | /* |
3 | * Copyright (C) 2017 ARM Ltd. |
4 | * Author: Marc Zyngier <marc.zyngier@arm.com> |
5 | */ |
6 | |
7 | #include <linux/interrupt.h> |
8 | #include <linux/irq.h> |
9 | #include <linux/irqdomain.h> |
10 | #include <linux/kvm_host.h> |
11 | #include <linux/irqchip/arm-gic-v3.h> |
12 | |
13 | #include "vgic.h" |
14 | |
15 | /* |
16 | * How KVM uses GICv4 (insert rude comments here): |
17 | * |
18 | * The vgic-v4 layer acts as a bridge between several entities: |
19 | * - The GICv4 ITS representation offered by the ITS driver |
20 | * - VFIO, which is in charge of the PCI endpoint |
21 | * - The virtual ITS, which is the only thing the guest sees |
22 | * |
23 | * The configuration of VLPIs is triggered by a callback from VFIO, |
24 | * instructing KVM that a PCI device has been configured to deliver |
25 | * MSIs to a vITS. |
26 | * |
27 | * kvm_vgic_v4_set_forwarding() is thus called with the routing entry, |
28 | * and this is used to find the corresponding vITS data structures |
29 | * (ITS instance, device, event and irq) using a process that is |
30 | * extremely similar to the injection of an MSI. |
31 | * |
32 | * At this stage, we can link the guest's view of an LPI (uniquely |
33 | * identified by the routing entry) and the host irq, using the GICv4 |
34 | * driver mapping operation. Should the mapping succeed, we've then |
35 | * successfully upgraded the guest's LPI to a VLPI. We can then start |
36 | * with updating GICv4's view of the property table and generating an |
37 | * INValidation in order to kickstart the delivery of this VLPI to the |
38 | * guest directly, without software intervention. Well, almost. |
39 | * |
40 | * When the PCI endpoint is deconfigured, this operation is reversed |
41 | * with VFIO calling kvm_vgic_v4_unset_forwarding(). |
42 | * |
43 | * Once the VLPI has been mapped, it needs to follow any change the |
44 | * guest performs on its LPI through the vITS. For that, a number of |
45 | * command handlers have hooks to communicate these changes to the HW: |
46 | * - Any invalidation triggers a call to its_prop_update_vlpi() |
47 | * - The INT command results in a irq_set_irqchip_state(), which |
48 | * generates an INT on the corresponding VLPI. |
49 | * - The CLEAR command results in a irq_set_irqchip_state(), which |
50 | * generates an CLEAR on the corresponding VLPI. |
51 | * - DISCARD translates into an unmap, similar to a call to |
52 | * kvm_vgic_v4_unset_forwarding(). |
53 | * - MOVI is translated by an update of the existing mapping, changing |
54 | * the target vcpu, resulting in a VMOVI being generated. |
55 | * - MOVALL is translated by a string of mapping updates (similar to |
56 | * the handling of MOVI). MOVALL is horrible. |
57 | * |
58 | * Note that a DISCARD/MAPTI sequence emitted from the guest without |
59 | * reprogramming the PCI endpoint after MAPTI does not result in a |
60 | * VLPI being mapped, as there is no callback from VFIO (the guest |
61 | * will get the interrupt via the normal SW injection). Fixing this is |
62 | * not trivial, and requires some horrible messing with the VFIO |
63 | * internals. Not fun. Don't do that. |
64 | * |
65 | * Then there is the scheduling. Each time a vcpu is about to run on a |
66 | * physical CPU, KVM must tell the corresponding redistributor about |
67 | * it. And if we've migrated our vcpu from one CPU to another, we must |
68 | * tell the ITS (so that the messages reach the right redistributor). |
69 | * This is done in two steps: first issue a irq_set_affinity() on the |
70 | * irq corresponding to the vcpu, then call its_make_vpe_resident(). |
71 | * You must be in a non-preemptible context. On exit, a call to |
72 | * its_make_vpe_non_resident() tells the redistributor that we're done |
73 | * with the vcpu. |
74 | * |
75 | * Finally, the doorbell handling: Each vcpu is allocated an interrupt |
76 | * which will fire each time a VLPI is made pending whilst the vcpu is |
77 | * not running. Each time the vcpu gets blocked, the doorbell |
78 | * interrupt gets enabled. When the vcpu is unblocked (for whatever |
79 | * reason), the doorbell interrupt is disabled. |
80 | */ |
81 | |
82 | #define DB_IRQ_FLAGS (IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY | IRQ_NO_BALANCING) |
83 | |
84 | static irqreturn_t vgic_v4_doorbell_handler(int irq, void *info) |
85 | { |
86 | struct kvm_vcpu *vcpu = info; |
87 | |
88 | /* We got the message, no need to fire again */ |
89 | if (!kvm_vgic_global_state.has_gicv4_1 && |
90 | !irqd_irq_disabled(d: &irq_to_desc(irq)->irq_data)) |
91 | disable_irq_nosync(irq); |
92 | |
93 | /* |
94 | * The v4.1 doorbell can fire concurrently with the vPE being |
95 | * made non-resident. Ensure we only update pending_last |
96 | * *after* the non-residency sequence has completed. |
97 | */ |
98 | raw_spin_lock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock); |
99 | vcpu->arch.vgic_cpu.vgic_v3.its_vpe.pending_last = true; |
100 | raw_spin_unlock(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vpe_lock); |
101 | |
102 | kvm_make_request(req: KVM_REQ_IRQ_PENDING, vcpu); |
103 | kvm_vcpu_kick(vcpu); |
104 | |
105 | return IRQ_HANDLED; |
106 | } |
107 | |
108 | static void vgic_v4_sync_sgi_config(struct its_vpe *vpe, struct vgic_irq *irq) |
109 | { |
110 | vpe->sgi_config[irq->intid].enabled = irq->enabled; |
111 | vpe->sgi_config[irq->intid].group = irq->group; |
112 | vpe->sgi_config[irq->intid].priority = irq->priority; |
113 | } |
114 | |
115 | static void vgic_v4_enable_vsgis(struct kvm_vcpu *vcpu) |
116 | { |
117 | struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; |
118 | int i; |
119 | |
120 | /* |
121 | * With GICv4.1, every virtual SGI can be directly injected. So |
122 | * let's pretend that they are HW interrupts, tied to a host |
123 | * IRQ. The SGI code will do its magic. |
124 | */ |
125 | for (i = 0; i < VGIC_NR_SGIS; i++) { |
126 | struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid: i); |
127 | struct irq_desc *desc; |
128 | unsigned long flags; |
129 | int ret; |
130 | |
131 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
132 | |
133 | if (irq->hw) |
134 | goto unlock; |
135 | |
136 | irq->hw = true; |
137 | irq->host_irq = irq_find_mapping(domain: vpe->sgi_domain, hwirq: i); |
138 | |
139 | /* Transfer the full irq state to the vPE */ |
140 | vgic_v4_sync_sgi_config(vpe, irq); |
141 | desc = irq_to_desc(irq: irq->host_irq); |
142 | ret = irq_domain_activate_irq(irq_data: irq_desc_get_irq_data(desc), |
143 | early: false); |
144 | if (!WARN_ON(ret)) { |
145 | /* Transfer pending state */ |
146 | ret = irq_set_irqchip_state(irq: irq->host_irq, |
147 | which: IRQCHIP_STATE_PENDING, |
148 | state: irq->pending_latch); |
149 | WARN_ON(ret); |
150 | irq->pending_latch = false; |
151 | } |
152 | unlock: |
153 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
154 | vgic_put_irq(kvm: vcpu->kvm, irq); |
155 | } |
156 | } |
157 | |
158 | static void vgic_v4_disable_vsgis(struct kvm_vcpu *vcpu) |
159 | { |
160 | int i; |
161 | |
162 | for (i = 0; i < VGIC_NR_SGIS; i++) { |
163 | struct vgic_irq *irq = vgic_get_vcpu_irq(vcpu, intid: i); |
164 | struct irq_desc *desc; |
165 | unsigned long flags; |
166 | int ret; |
167 | |
168 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
169 | |
170 | if (!irq->hw) |
171 | goto unlock; |
172 | |
173 | irq->hw = false; |
174 | ret = irq_get_irqchip_state(irq: irq->host_irq, |
175 | which: IRQCHIP_STATE_PENDING, |
176 | state: &irq->pending_latch); |
177 | WARN_ON(ret); |
178 | |
179 | desc = irq_to_desc(irq: irq->host_irq); |
180 | irq_domain_deactivate_irq(irq_data: irq_desc_get_irq_data(desc)); |
181 | unlock: |
182 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
183 | vgic_put_irq(kvm: vcpu->kvm, irq); |
184 | } |
185 | } |
186 | |
187 | void vgic_v4_configure_vsgis(struct kvm *kvm) |
188 | { |
189 | struct vgic_dist *dist = &kvm->arch.vgic; |
190 | struct kvm_vcpu *vcpu; |
191 | unsigned long i; |
192 | |
193 | lockdep_assert_held(&kvm->arch.config_lock); |
194 | |
195 | kvm_arm_halt_guest(kvm); |
196 | |
197 | kvm_for_each_vcpu(i, vcpu, kvm) { |
198 | if (dist->nassgireq) |
199 | vgic_v4_enable_vsgis(vcpu); |
200 | else |
201 | vgic_v4_disable_vsgis(vcpu); |
202 | } |
203 | |
204 | kvm_arm_resume_guest(kvm); |
205 | } |
206 | |
207 | /* |
208 | * Must be called with GICv4.1 and the vPE unmapped, which |
209 | * indicates the invalidation of any VPT caches associated |
210 | * with the vPE, thus we can get the VLPI state by peeking |
211 | * at the VPT. |
212 | */ |
213 | void vgic_v4_get_vlpi_state(struct vgic_irq *irq, bool *val) |
214 | { |
215 | struct its_vpe *vpe = &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe; |
216 | int mask = BIT(irq->intid % BITS_PER_BYTE); |
217 | void *va; |
218 | u8 *ptr; |
219 | |
220 | va = page_address(vpe->vpt_page); |
221 | ptr = va + irq->intid / BITS_PER_BYTE; |
222 | |
223 | *val = !!(*ptr & mask); |
224 | } |
225 | |
226 | int vgic_v4_request_vpe_irq(struct kvm_vcpu *vcpu, int irq) |
227 | { |
228 | return request_irq(irq, handler: vgic_v4_doorbell_handler, flags: 0, name: "vcpu", dev: vcpu); |
229 | } |
230 | |
231 | /** |
232 | * vgic_v4_init - Initialize the GICv4 data structures |
233 | * @kvm: Pointer to the VM being initialized |
234 | * |
235 | * We may be called each time a vITS is created, or when the |
236 | * vgic is initialized. In both cases, the number of vcpus |
237 | * should now be fixed. |
238 | */ |
239 | int vgic_v4_init(struct kvm *kvm) |
240 | { |
241 | struct vgic_dist *dist = &kvm->arch.vgic; |
242 | struct kvm_vcpu *vcpu; |
243 | int nr_vcpus, ret; |
244 | unsigned long i; |
245 | |
246 | lockdep_assert_held(&kvm->arch.config_lock); |
247 | |
248 | if (!kvm_vgic_global_state.has_gicv4) |
249 | return 0; /* Nothing to see here... move along. */ |
250 | |
251 | if (dist->its_vm.vpes) |
252 | return 0; |
253 | |
254 | nr_vcpus = atomic_read(v: &kvm->online_vcpus); |
255 | |
256 | dist->its_vm.vpes = kcalloc(nr_vcpus, sizeof(*dist->its_vm.vpes), |
257 | GFP_KERNEL_ACCOUNT); |
258 | if (!dist->its_vm.vpes) |
259 | return -ENOMEM; |
260 | |
261 | dist->its_vm.nr_vpes = nr_vcpus; |
262 | |
263 | kvm_for_each_vcpu(i, vcpu, kvm) |
264 | dist->its_vm.vpes[i] = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; |
265 | |
266 | ret = its_alloc_vcpu_irqs(&dist->its_vm); |
267 | if (ret < 0) { |
268 | kvm_err("VPE IRQ allocation failure\n"); |
269 | kfree(objp: dist->its_vm.vpes); |
270 | dist->its_vm.nr_vpes = 0; |
271 | dist->its_vm.vpes = NULL; |
272 | return ret; |
273 | } |
274 | |
275 | kvm_for_each_vcpu(i, vcpu, kvm) { |
276 | int irq = dist->its_vm.vpes[i]->irq; |
277 | unsigned long irq_flags = DB_IRQ_FLAGS; |
278 | |
279 | /* |
280 | * Don't automatically enable the doorbell, as we're |
281 | * flipping it back and forth when the vcpu gets |
282 | * blocked. Also disable the lazy disabling, as the |
283 | * doorbell could kick us out of the guest too |
284 | * early... |
285 | * |
286 | * On GICv4.1, the doorbell is managed in HW and must |
287 | * be left enabled. |
288 | */ |
289 | if (kvm_vgic_global_state.has_gicv4_1) |
290 | irq_flags &= ~IRQ_NOAUTOEN; |
291 | irq_set_status_flags(irq, set: irq_flags); |
292 | |
293 | ret = vgic_v4_request_vpe_irq(vcpu, irq); |
294 | if (ret) { |
295 | kvm_err("failed to allocate vcpu IRQ%d\n", irq); |
296 | /* |
297 | * Trick: adjust the number of vpes so we know |
298 | * how many to nuke on teardown... |
299 | */ |
300 | dist->its_vm.nr_vpes = i; |
301 | break; |
302 | } |
303 | } |
304 | |
305 | if (ret) |
306 | vgic_v4_teardown(kvm); |
307 | |
308 | return ret; |
309 | } |
310 | |
311 | /** |
312 | * vgic_v4_teardown - Free the GICv4 data structures |
313 | * @kvm: Pointer to the VM being destroyed |
314 | */ |
315 | void vgic_v4_teardown(struct kvm *kvm) |
316 | { |
317 | struct its_vm *its_vm = &kvm->arch.vgic.its_vm; |
318 | int i; |
319 | |
320 | lockdep_assert_held(&kvm->arch.config_lock); |
321 | |
322 | if (!its_vm->vpes) |
323 | return; |
324 | |
325 | for (i = 0; i < its_vm->nr_vpes; i++) { |
326 | struct kvm_vcpu *vcpu = kvm_get_vcpu(kvm, i); |
327 | int irq = its_vm->vpes[i]->irq; |
328 | |
329 | irq_clear_status_flags(irq, DB_IRQ_FLAGS); |
330 | free_irq(irq, vcpu); |
331 | } |
332 | |
333 | its_free_vcpu_irqs(its_vm); |
334 | kfree(objp: its_vm->vpes); |
335 | its_vm->nr_vpes = 0; |
336 | its_vm->vpes = NULL; |
337 | } |
338 | |
339 | static inline bool vgic_v4_want_doorbell(struct kvm_vcpu *vcpu) |
340 | { |
341 | if (vcpu_get_flag(vcpu, IN_WFI)) |
342 | return true; |
343 | |
344 | if (likely(!vcpu_has_nv(vcpu))) |
345 | return false; |
346 | |
347 | /* |
348 | * GICv4 hardware is only ever used for the L1. Mark the vPE (i.e. the |
349 | * L1 context) nonresident and request a doorbell to kick us out of the |
350 | * L2 when an IRQ becomes pending. |
351 | */ |
352 | return vcpu_get_flag(vcpu, IN_NESTED_ERET); |
353 | } |
354 | |
355 | int vgic_v4_put(struct kvm_vcpu *vcpu) |
356 | { |
357 | struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; |
358 | |
359 | if (!vgic_supports_direct_msis(kvm: vcpu->kvm) || !vpe->resident) |
360 | return 0; |
361 | |
362 | return its_make_vpe_non_resident(vpe, vgic_v4_want_doorbell(vcpu)); |
363 | } |
364 | |
365 | int vgic_v4_load(struct kvm_vcpu *vcpu) |
366 | { |
367 | struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; |
368 | int err; |
369 | |
370 | if (!vgic_supports_direct_msis(kvm: vcpu->kvm) || vpe->resident) |
371 | return 0; |
372 | |
373 | if (vcpu_get_flag(vcpu, IN_WFI)) |
374 | return 0; |
375 | |
376 | /* |
377 | * Before making the VPE resident, make sure the redistributor |
378 | * corresponding to our current CPU expects us here. See the |
379 | * doc in drivers/irqchip/irq-gic-v4.c to understand how this |
380 | * turns into a VMOVP command at the ITS level. |
381 | */ |
382 | err = irq_set_affinity(irq: vpe->irq, cpumask_of(smp_processor_id())); |
383 | if (err) |
384 | return err; |
385 | |
386 | err = its_make_vpe_resident(vpe, false, vcpu->kvm->arch.vgic.enabled); |
387 | if (err) |
388 | return err; |
389 | |
390 | /* |
391 | * Now that the VPE is resident, let's get rid of a potential |
392 | * doorbell interrupt that would still be pending. This is a |
393 | * GICv4.0 only "feature"... |
394 | */ |
395 | if (!kvm_vgic_global_state.has_gicv4_1) |
396 | err = irq_set_irqchip_state(irq: vpe->irq, which: IRQCHIP_STATE_PENDING, state: false); |
397 | |
398 | return err; |
399 | } |
400 | |
401 | void vgic_v4_commit(struct kvm_vcpu *vcpu) |
402 | { |
403 | struct its_vpe *vpe = &vcpu->arch.vgic_cpu.vgic_v3.its_vpe; |
404 | |
405 | /* |
406 | * No need to wait for the vPE to be ready across a shallow guest |
407 | * exit, as only a vcpu_put will invalidate it. |
408 | */ |
409 | if (!vpe->ready) |
410 | its_commit_vpe(vpe); |
411 | } |
412 | |
413 | static struct vgic_its *vgic_get_its(struct kvm *kvm, |
414 | struct kvm_kernel_irq_routing_entry *irq_entry) |
415 | { |
416 | struct kvm_msi msi = (struct kvm_msi) { |
417 | .address_lo = irq_entry->msi.address_lo, |
418 | .address_hi = irq_entry->msi.address_hi, |
419 | .data = irq_entry->msi.data, |
420 | .flags = irq_entry->msi.flags, |
421 | .devid = irq_entry->msi.devid, |
422 | }; |
423 | |
424 | return vgic_msi_to_its(kvm, msi: &msi); |
425 | } |
426 | |
427 | int kvm_vgic_v4_set_forwarding(struct kvm *kvm, int virq, |
428 | struct kvm_kernel_irq_routing_entry *irq_entry) |
429 | { |
430 | struct vgic_its *its; |
431 | struct vgic_irq *irq; |
432 | struct its_vlpi_map map; |
433 | unsigned long flags; |
434 | int ret = 0; |
435 | |
436 | if (!vgic_supports_direct_msis(kvm)) |
437 | return 0; |
438 | |
439 | /* |
440 | * Get the ITS, and escape early on error (not a valid |
441 | * doorbell for any of our vITSs). |
442 | */ |
443 | its = vgic_get_its(kvm, irq_entry); |
444 | if (IS_ERR(ptr: its)) |
445 | return 0; |
446 | |
447 | guard(mutex)(T: &its->its_lock); |
448 | |
449 | /* |
450 | * Perform the actual DevID/EventID -> LPI translation. |
451 | * |
452 | * Silently exit if translation fails as the guest (or userspace!) has |
453 | * managed to do something stupid. Emulated LPI injection will still |
454 | * work if the guest figures itself out at a later time. |
455 | */ |
456 | if (vgic_its_resolve_lpi(kvm, its, devid: irq_entry->msi.devid, |
457 | eventid: irq_entry->msi.data, irq: &irq)) |
458 | return 0; |
459 | |
460 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
461 | |
462 | /* Silently exit if the vLPI is already mapped */ |
463 | if (irq->hw) |
464 | goto out_unlock_irq; |
465 | |
466 | /* |
467 | * Emit the mapping request. If it fails, the ITS probably |
468 | * isn't v4 compatible, so let's silently bail out. Holding |
469 | * the ITS lock should ensure that nothing can modify the |
470 | * target vcpu. |
471 | */ |
472 | map = (struct its_vlpi_map) { |
473 | .vm = &kvm->arch.vgic.its_vm, |
474 | .vpe = &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe, |
475 | .vintid = irq->intid, |
476 | .properties = ((irq->priority & 0xfc) | |
477 | (irq->enabled ? LPI_PROP_ENABLED : 0) | |
478 | LPI_PROP_GROUP1), |
479 | .db_enabled = true, |
480 | }; |
481 | |
482 | ret = its_map_vlpi(virq, &map); |
483 | if (ret) |
484 | goto out_unlock_irq; |
485 | |
486 | irq->hw = true; |
487 | irq->host_irq = virq; |
488 | atomic_inc(v: &map.vpe->vlpi_count); |
489 | |
490 | /* Transfer pending state */ |
491 | if (!irq->pending_latch) |
492 | goto out_unlock_irq; |
493 | |
494 | ret = irq_set_irqchip_state(irq: irq->host_irq, which: IRQCHIP_STATE_PENDING, |
495 | state: irq->pending_latch); |
496 | WARN_RATELIMIT(ret, "IRQ %d", irq->host_irq); |
497 | |
498 | /* |
499 | * Clear pending_latch and communicate this state |
500 | * change via vgic_queue_irq_unlock. |
501 | */ |
502 | irq->pending_latch = false; |
503 | vgic_queue_irq_unlock(kvm, irq, flags); |
504 | return ret; |
505 | |
506 | out_unlock_irq: |
507 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
508 | return ret; |
509 | } |
510 | |
511 | static struct vgic_irq *__vgic_host_irq_get_vlpi(struct kvm *kvm, int host_irq) |
512 | { |
513 | struct vgic_irq *irq; |
514 | unsigned long idx; |
515 | |
516 | guard(rcu)(); |
517 | xa_for_each(&kvm->arch.vgic.lpi_xa, idx, irq) { |
518 | if (!irq->hw || irq->host_irq != host_irq) |
519 | continue; |
520 | |
521 | if (!vgic_try_get_irq_kref(irq)) |
522 | return NULL; |
523 | |
524 | return irq; |
525 | } |
526 | |
527 | return NULL; |
528 | } |
529 | |
530 | int kvm_vgic_v4_unset_forwarding(struct kvm *kvm, int host_irq) |
531 | { |
532 | struct vgic_irq *irq; |
533 | unsigned long flags; |
534 | int ret = 0; |
535 | |
536 | if (!vgic_supports_direct_msis(kvm)) |
537 | return 0; |
538 | |
539 | irq = __vgic_host_irq_get_vlpi(kvm, host_irq); |
540 | if (!irq) |
541 | return 0; |
542 | |
543 | raw_spin_lock_irqsave(&irq->irq_lock, flags); |
544 | WARN_ON(irq->hw && irq->host_irq != host_irq); |
545 | if (irq->hw) { |
546 | atomic_dec(v: &irq->target_vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count); |
547 | irq->hw = false; |
548 | ret = its_unmap_vlpi(host_irq); |
549 | } |
550 | |
551 | raw_spin_unlock_irqrestore(&irq->irq_lock, flags); |
552 | vgic_put_irq(kvm, irq); |
553 | return ret; |
554 | } |
555 |
Definitions
- vgic_v4_doorbell_handler
- vgic_v4_sync_sgi_config
- vgic_v4_enable_vsgis
- vgic_v4_disable_vsgis
- vgic_v4_configure_vsgis
- vgic_v4_get_vlpi_state
- vgic_v4_request_vpe_irq
- vgic_v4_init
- vgic_v4_teardown
- vgic_v4_want_doorbell
- vgic_v4_put
- vgic_v4_load
- vgic_v4_commit
- vgic_get_its
- kvm_vgic_v4_set_forwarding
- __vgic_host_irq_get_vlpi
Improve your Profiling and Debugging skills
Find out more