1 | // SPDX-License-Identifier: GPL-2.0-only |
---|---|
2 | /* |
3 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University |
4 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> |
5 | */ |
6 | |
7 | #include <linux/bug.h> |
8 | #include <linux/cpu_pm.h> |
9 | #include <linux/entry-kvm.h> |
10 | #include <linux/errno.h> |
11 | #include <linux/err.h> |
12 | #include <linux/kvm_host.h> |
13 | #include <linux/list.h> |
14 | #include <linux/module.h> |
15 | #include <linux/vmalloc.h> |
16 | #include <linux/fs.h> |
17 | #include <linux/mman.h> |
18 | #include <linux/sched.h> |
19 | #include <linux/kvm.h> |
20 | #include <linux/kvm_irqfd.h> |
21 | #include <linux/irqbypass.h> |
22 | #include <linux/sched/stat.h> |
23 | #include <linux/psci.h> |
24 | #include <trace/events/kvm.h> |
25 | |
26 | #define CREATE_TRACE_POINTS |
27 | #include "trace_arm.h" |
28 | |
29 | #include <linux/uaccess.h> |
30 | #include <asm/ptrace.h> |
31 | #include <asm/mman.h> |
32 | #include <asm/tlbflush.h> |
33 | #include <asm/cacheflush.h> |
34 | #include <asm/cpufeature.h> |
35 | #include <asm/virt.h> |
36 | #include <asm/kvm_arm.h> |
37 | #include <asm/kvm_asm.h> |
38 | #include <asm/kvm_emulate.h> |
39 | #include <asm/kvm_mmu.h> |
40 | #include <asm/kvm_nested.h> |
41 | #include <asm/kvm_pkvm.h> |
42 | #include <asm/kvm_ptrauth.h> |
43 | #include <asm/sections.h> |
44 | |
45 | #include <kvm/arm_hypercalls.h> |
46 | #include <kvm/arm_pmu.h> |
47 | #include <kvm/arm_psci.h> |
48 | |
49 | #include "sys_regs.h" |
50 | |
51 | static enum kvm_mode kvm_mode = KVM_MODE_DEFAULT; |
52 | |
53 | enum kvm_wfx_trap_policy { |
54 | KVM_WFX_NOTRAP_SINGLE_TASK, /* Default option */ |
55 | KVM_WFX_NOTRAP, |
56 | KVM_WFX_TRAP, |
57 | }; |
58 | |
59 | static enum kvm_wfx_trap_policy kvm_wfi_trap_policy __read_mostly = KVM_WFX_NOTRAP_SINGLE_TASK; |
60 | static enum kvm_wfx_trap_policy kvm_wfe_trap_policy __read_mostly = KVM_WFX_NOTRAP_SINGLE_TASK; |
61 | |
62 | DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector); |
63 | |
64 | DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_base); |
65 | DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params); |
66 | |
67 | DECLARE_KVM_NVHE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); |
68 | |
69 | static bool vgic_present, kvm_arm_initialised; |
70 | |
71 | static DEFINE_PER_CPU(unsigned char, kvm_hyp_initialized); |
72 | |
73 | bool is_kvm_arm_initialised(void) |
74 | { |
75 | return kvm_arm_initialised; |
76 | } |
77 | |
78 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) |
79 | { |
80 | return kvm_vcpu_exiting_guest_mode(vcpu) == IN_GUEST_MODE; |
81 | } |
82 | |
83 | int kvm_vm_ioctl_enable_cap(struct kvm *kvm, |
84 | struct kvm_enable_cap *cap) |
85 | { |
86 | int r = -EINVAL; |
87 | |
88 | if (cap->flags) |
89 | return -EINVAL; |
90 | |
91 | if (kvm_vm_is_protected(kvm) && !kvm_pvm_ext_allowed(cap->cap)) |
92 | return -EINVAL; |
93 | |
94 | switch (cap->cap) { |
95 | case KVM_CAP_ARM_NISV_TO_USER: |
96 | r = 0; |
97 | set_bit(nr: KVM_ARCH_FLAG_RETURN_NISV_IO_ABORT_TO_USER, |
98 | addr: &kvm->arch.flags); |
99 | break; |
100 | case KVM_CAP_ARM_MTE: |
101 | mutex_lock(&kvm->lock); |
102 | if (system_supports_mte() && !kvm->created_vcpus) { |
103 | r = 0; |
104 | set_bit(nr: KVM_ARCH_FLAG_MTE_ENABLED, addr: &kvm->arch.flags); |
105 | } |
106 | mutex_unlock(lock: &kvm->lock); |
107 | break; |
108 | case KVM_CAP_ARM_SYSTEM_SUSPEND: |
109 | r = 0; |
110 | set_bit(nr: KVM_ARCH_FLAG_SYSTEM_SUSPEND_ENABLED, addr: &kvm->arch.flags); |
111 | break; |
112 | case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE: |
113 | mutex_lock(&kvm->slots_lock); |
114 | /* |
115 | * To keep things simple, allow changing the chunk |
116 | * size only when no memory slots have been created. |
117 | */ |
118 | if (kvm_are_all_memslots_empty(kvm)) { |
119 | u64 new_cap = cap->args[0]; |
120 | |
121 | if (!new_cap || kvm_is_block_size_supported(new_cap)) { |
122 | r = 0; |
123 | kvm->arch.mmu.split_page_chunk_size = new_cap; |
124 | } |
125 | } |
126 | mutex_unlock(lock: &kvm->slots_lock); |
127 | break; |
128 | case KVM_CAP_ARM_WRITABLE_IMP_ID_REGS: |
129 | mutex_lock(&kvm->lock); |
130 | if (!kvm->created_vcpus) { |
131 | r = 0; |
132 | set_bit(nr: KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, addr: &kvm->arch.flags); |
133 | } |
134 | mutex_unlock(lock: &kvm->lock); |
135 | break; |
136 | default: |
137 | break; |
138 | } |
139 | |
140 | return r; |
141 | } |
142 | |
143 | static int kvm_arm_default_max_vcpus(void) |
144 | { |
145 | return vgic_present ? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS; |
146 | } |
147 | |
148 | /** |
149 | * kvm_arch_init_vm - initializes a VM data structure |
150 | * @kvm: pointer to the KVM struct |
151 | * @type: kvm device type |
152 | */ |
153 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) |
154 | { |
155 | int ret; |
156 | |
157 | mutex_init(&kvm->arch.config_lock); |
158 | |
159 | #ifdef CONFIG_LOCKDEP |
160 | /* Clue in lockdep that the config_lock must be taken inside kvm->lock */ |
161 | mutex_lock(&kvm->lock); |
162 | mutex_lock(&kvm->arch.config_lock); |
163 | mutex_unlock(lock: &kvm->arch.config_lock); |
164 | mutex_unlock(lock: &kvm->lock); |
165 | #endif |
166 | |
167 | kvm_init_nested(kvm); |
168 | |
169 | ret = kvm_share_hyp(kvm, kvm + 1); |
170 | if (ret) |
171 | return ret; |
172 | |
173 | ret = pkvm_init_host_vm(kvm); |
174 | if (ret) |
175 | goto err_unshare_kvm; |
176 | |
177 | if (!zalloc_cpumask_var(mask: &kvm->arch.supported_cpus, GFP_KERNEL_ACCOUNT)) { |
178 | ret = -ENOMEM; |
179 | goto err_unshare_kvm; |
180 | } |
181 | cpumask_copy(dstp: kvm->arch.supported_cpus, cpu_possible_mask); |
182 | |
183 | ret = kvm_init_stage2_mmu(kvm, &kvm->arch.mmu, type); |
184 | if (ret) |
185 | goto err_free_cpumask; |
186 | |
187 | kvm_vgic_early_init(kvm); |
188 | |
189 | kvm_timer_init_vm(kvm); |
190 | |
191 | /* The maximum number of VCPUs is limited by the host's GIC model */ |
192 | kvm->max_vcpus = kvm_arm_default_max_vcpus(); |
193 | |
194 | kvm_arm_init_hypercalls(kvm); |
195 | |
196 | bitmap_zero(dst: kvm->arch.vcpu_features, nbits: KVM_VCPU_MAX_FEATURES); |
197 | |
198 | return 0; |
199 | |
200 | err_free_cpumask: |
201 | free_cpumask_var(mask: kvm->arch.supported_cpus); |
202 | err_unshare_kvm: |
203 | kvm_unshare_hyp(kvm, kvm + 1); |
204 | return ret; |
205 | } |
206 | |
207 | vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) |
208 | { |
209 | return VM_FAULT_SIGBUS; |
210 | } |
211 | |
212 | void kvm_arch_create_vm_debugfs(struct kvm *kvm) |
213 | { |
214 | kvm_sys_regs_create_debugfs(kvm); |
215 | kvm_s2_ptdump_create_debugfs(kvm); |
216 | } |
217 | |
218 | static void kvm_destroy_mpidr_data(struct kvm *kvm) |
219 | { |
220 | struct kvm_mpidr_data *data; |
221 | |
222 | mutex_lock(&kvm->arch.config_lock); |
223 | |
224 | data = rcu_dereference_protected(kvm->arch.mpidr_data, |
225 | lockdep_is_held(&kvm->arch.config_lock)); |
226 | if (data) { |
227 | rcu_assign_pointer(kvm->arch.mpidr_data, NULL); |
228 | synchronize_rcu(); |
229 | kfree(objp: data); |
230 | } |
231 | |
232 | mutex_unlock(lock: &kvm->arch.config_lock); |
233 | } |
234 | |
235 | /** |
236 | * kvm_arch_destroy_vm - destroy the VM data structure |
237 | * @kvm: pointer to the KVM struct |
238 | */ |
239 | void kvm_arch_destroy_vm(struct kvm *kvm) |
240 | { |
241 | bitmap_free(bitmap: kvm->arch.pmu_filter); |
242 | free_cpumask_var(mask: kvm->arch.supported_cpus); |
243 | |
244 | kvm_vgic_destroy(kvm); |
245 | |
246 | if (is_protected_kvm_enabled()) |
247 | pkvm_destroy_hyp_vm(kvm); |
248 | |
249 | kvm_destroy_mpidr_data(kvm); |
250 | |
251 | kfree(objp: kvm->arch.sysreg_masks); |
252 | kvm_destroy_vcpus(kvm); |
253 | |
254 | kvm_unshare_hyp(kvm, kvm + 1); |
255 | |
256 | kvm_arm_teardown_hypercalls(kvm); |
257 | } |
258 | |
259 | static bool kvm_has_full_ptr_auth(void) |
260 | { |
261 | bool apa, gpa, api, gpi, apa3, gpa3; |
262 | u64 isar1, isar2, val; |
263 | |
264 | /* |
265 | * Check that: |
266 | * |
267 | * - both Address and Generic auth are implemented for a given |
268 | * algorithm (Q5, IMPDEF or Q3) |
269 | * - only a single algorithm is implemented. |
270 | */ |
271 | if (!system_has_full_ptr_auth()) |
272 | return false; |
273 | |
274 | isar1 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1); |
275 | isar2 = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1); |
276 | |
277 | apa = !!FIELD_GET(ID_AA64ISAR1_EL1_APA_MASK, isar1); |
278 | val = FIELD_GET(ID_AA64ISAR1_EL1_GPA_MASK, isar1); |
279 | gpa = (val == ID_AA64ISAR1_EL1_GPA_IMP); |
280 | |
281 | api = !!FIELD_GET(ID_AA64ISAR1_EL1_API_MASK, isar1); |
282 | val = FIELD_GET(ID_AA64ISAR1_EL1_GPI_MASK, isar1); |
283 | gpi = (val == ID_AA64ISAR1_EL1_GPI_IMP); |
284 | |
285 | apa3 = !!FIELD_GET(ID_AA64ISAR2_EL1_APA3_MASK, isar2); |
286 | val = FIELD_GET(ID_AA64ISAR2_EL1_GPA3_MASK, isar2); |
287 | gpa3 = (val == ID_AA64ISAR2_EL1_GPA3_IMP); |
288 | |
289 | return (apa == gpa && api == gpi && apa3 == gpa3 && |
290 | (apa + api + apa3) == 1); |
291 | } |
292 | |
293 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) |
294 | { |
295 | int r; |
296 | |
297 | if (kvm && kvm_vm_is_protected(kvm) && !kvm_pvm_ext_allowed(ext)) |
298 | return 0; |
299 | |
300 | switch (ext) { |
301 | case KVM_CAP_IRQCHIP: |
302 | r = vgic_present; |
303 | break; |
304 | case KVM_CAP_IOEVENTFD: |
305 | case KVM_CAP_USER_MEMORY: |
306 | case KVM_CAP_SYNC_MMU: |
307 | case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: |
308 | case KVM_CAP_ONE_REG: |
309 | case KVM_CAP_ARM_PSCI: |
310 | case KVM_CAP_ARM_PSCI_0_2: |
311 | case KVM_CAP_READONLY_MEM: |
312 | case KVM_CAP_MP_STATE: |
313 | case KVM_CAP_IMMEDIATE_EXIT: |
314 | case KVM_CAP_VCPU_EVENTS: |
315 | case KVM_CAP_ARM_IRQ_LINE_LAYOUT_2: |
316 | case KVM_CAP_ARM_NISV_TO_USER: |
317 | case KVM_CAP_ARM_INJECT_EXT_DABT: |
318 | case KVM_CAP_SET_GUEST_DEBUG: |
319 | case KVM_CAP_VCPU_ATTRIBUTES: |
320 | case KVM_CAP_PTP_KVM: |
321 | case KVM_CAP_ARM_SYSTEM_SUSPEND: |
322 | case KVM_CAP_IRQFD_RESAMPLE: |
323 | case KVM_CAP_COUNTER_OFFSET: |
324 | case KVM_CAP_ARM_WRITABLE_IMP_ID_REGS: |
325 | r = 1; |
326 | break; |
327 | case KVM_CAP_SET_GUEST_DEBUG2: |
328 | return KVM_GUESTDBG_VALID_MASK; |
329 | case KVM_CAP_ARM_SET_DEVICE_ADDR: |
330 | r = 1; |
331 | break; |
332 | case KVM_CAP_NR_VCPUS: |
333 | /* |
334 | * ARM64 treats KVM_CAP_NR_CPUS differently from all other |
335 | * architectures, as it does not always bound it to |
336 | * KVM_CAP_MAX_VCPUS. It should not matter much because |
337 | * this is just an advisory value. |
338 | */ |
339 | r = min_t(unsigned int, num_online_cpus(), |
340 | kvm_arm_default_max_vcpus()); |
341 | break; |
342 | case KVM_CAP_MAX_VCPUS: |
343 | case KVM_CAP_MAX_VCPU_ID: |
344 | if (kvm) |
345 | r = kvm->max_vcpus; |
346 | else |
347 | r = kvm_arm_default_max_vcpus(); |
348 | break; |
349 | case KVM_CAP_MSI_DEVID: |
350 | if (!kvm) |
351 | r = -EINVAL; |
352 | else |
353 | r = kvm->arch.vgic.msis_require_devid; |
354 | break; |
355 | case KVM_CAP_ARM_USER_IRQ: |
356 | /* |
357 | * 1: EL1_VTIMER, EL1_PTIMER, and PMU. |
358 | * (bump this number if adding more devices) |
359 | */ |
360 | r = 1; |
361 | break; |
362 | case KVM_CAP_ARM_MTE: |
363 | r = system_supports_mte(); |
364 | break; |
365 | case KVM_CAP_STEAL_TIME: |
366 | r = kvm_arm_pvtime_supported(); |
367 | break; |
368 | case KVM_CAP_ARM_EL1_32BIT: |
369 | r = cpus_have_final_cap(ARM64_HAS_32BIT_EL1); |
370 | break; |
371 | case KVM_CAP_ARM_EL2: |
372 | r = cpus_have_final_cap(ARM64_HAS_NESTED_VIRT); |
373 | break; |
374 | case KVM_CAP_ARM_EL2_E2H0: |
375 | r = cpus_have_final_cap(ARM64_HAS_HCR_NV1); |
376 | break; |
377 | case KVM_CAP_GUEST_DEBUG_HW_BPS: |
378 | r = get_num_brps(); |
379 | break; |
380 | case KVM_CAP_GUEST_DEBUG_HW_WPS: |
381 | r = get_num_wrps(); |
382 | break; |
383 | case KVM_CAP_ARM_PMU_V3: |
384 | r = kvm_supports_guest_pmuv3(); |
385 | break; |
386 | case KVM_CAP_ARM_INJECT_SERROR_ESR: |
387 | r = cpus_have_final_cap(ARM64_HAS_RAS_EXTN); |
388 | break; |
389 | case KVM_CAP_ARM_VM_IPA_SIZE: |
390 | r = get_kvm_ipa_limit(); |
391 | break; |
392 | case KVM_CAP_ARM_SVE: |
393 | r = system_supports_sve(); |
394 | break; |
395 | case KVM_CAP_ARM_PTRAUTH_ADDRESS: |
396 | case KVM_CAP_ARM_PTRAUTH_GENERIC: |
397 | r = kvm_has_full_ptr_auth(); |
398 | break; |
399 | case KVM_CAP_ARM_EAGER_SPLIT_CHUNK_SIZE: |
400 | if (kvm) |
401 | r = kvm->arch.mmu.split_page_chunk_size; |
402 | else |
403 | r = KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT; |
404 | break; |
405 | case KVM_CAP_ARM_SUPPORTED_BLOCK_SIZES: |
406 | r = kvm_supported_block_sizes(); |
407 | break; |
408 | case KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES: |
409 | r = BIT(0); |
410 | break; |
411 | default: |
412 | r = 0; |
413 | } |
414 | |
415 | return r; |
416 | } |
417 | |
418 | long kvm_arch_dev_ioctl(struct file *filp, |
419 | unsigned int ioctl, unsigned long arg) |
420 | { |
421 | return -EINVAL; |
422 | } |
423 | |
424 | struct kvm *kvm_arch_alloc_vm(void) |
425 | { |
426 | size_t sz = sizeof(struct kvm); |
427 | |
428 | if (!has_vhe()) |
429 | return kzalloc(sz, GFP_KERNEL_ACCOUNT); |
430 | |
431 | return __vmalloc(sz, GFP_KERNEL_ACCOUNT | __GFP_HIGHMEM | __GFP_ZERO); |
432 | } |
433 | |
434 | int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) |
435 | { |
436 | if (irqchip_in_kernel(kvm) && vgic_initialized(kvm)) |
437 | return -EBUSY; |
438 | |
439 | if (id >= kvm->max_vcpus) |
440 | return -EINVAL; |
441 | |
442 | return 0; |
443 | } |
444 | |
445 | int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) |
446 | { |
447 | int err; |
448 | |
449 | spin_lock_init(&vcpu->arch.mp_state_lock); |
450 | |
451 | #ifdef CONFIG_LOCKDEP |
452 | /* Inform lockdep that the config_lock is acquired after vcpu->mutex */ |
453 | mutex_lock(&vcpu->mutex); |
454 | mutex_lock(&vcpu->kvm->arch.config_lock); |
455 | mutex_unlock(lock: &vcpu->kvm->arch.config_lock); |
456 | mutex_unlock(lock: &vcpu->mutex); |
457 | #endif |
458 | |
459 | /* Force users to call KVM_ARM_VCPU_INIT */ |
460 | vcpu_clear_flag(vcpu, VCPU_INITIALIZED); |
461 | |
462 | vcpu->arch.mmu_page_cache.gfp_zero = __GFP_ZERO; |
463 | |
464 | /* Set up the timer */ |
465 | kvm_timer_vcpu_init(vcpu); |
466 | |
467 | kvm_pmu_vcpu_init(vcpu); |
468 | |
469 | kvm_arm_pvtime_vcpu_init(&vcpu->arch); |
470 | |
471 | vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu; |
472 | |
473 | /* |
474 | * This vCPU may have been created after mpidr_data was initialized. |
475 | * Throw out the pre-computed mappings if that is the case which forces |
476 | * KVM to fall back to iteratively searching the vCPUs. |
477 | */ |
478 | kvm_destroy_mpidr_data(kvm: vcpu->kvm); |
479 | |
480 | err = kvm_vgic_vcpu_init(vcpu); |
481 | if (err) |
482 | return err; |
483 | |
484 | err = kvm_share_hyp(vcpu, vcpu + 1); |
485 | if (err) |
486 | kvm_vgic_vcpu_destroy(vcpu); |
487 | |
488 | return err; |
489 | } |
490 | |
491 | void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) |
492 | { |
493 | } |
494 | |
495 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) |
496 | { |
497 | if (!is_protected_kvm_enabled()) |
498 | kvm_mmu_free_memory_cache(mc: &vcpu->arch.mmu_page_cache); |
499 | else |
500 | free_hyp_memcache(&vcpu->arch.pkvm_memcache); |
501 | kvm_timer_vcpu_terminate(vcpu); |
502 | kvm_pmu_vcpu_destroy(vcpu); |
503 | kvm_vgic_vcpu_destroy(vcpu); |
504 | kvm_arm_vcpu_destroy(vcpu); |
505 | } |
506 | |
507 | void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) |
508 | { |
509 | |
510 | } |
511 | |
512 | void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) |
513 | { |
514 | |
515 | } |
516 | |
517 | static void vcpu_set_pauth_traps(struct kvm_vcpu *vcpu) |
518 | { |
519 | if (vcpu_has_ptrauth(vcpu) && !is_protected_kvm_enabled()) { |
520 | /* |
521 | * Either we're running an L2 guest, and the API/APK bits come |
522 | * from L1's HCR_EL2, or API/APK are both set. |
523 | */ |
524 | if (unlikely(vcpu_has_nv(vcpu) && !is_hyp_ctxt(vcpu))) { |
525 | u64 val; |
526 | |
527 | val = __vcpu_sys_reg(vcpu, HCR_EL2); |
528 | val &= (HCR_API | HCR_APK); |
529 | vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK); |
530 | vcpu->arch.hcr_el2 |= val; |
531 | } else { |
532 | vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK); |
533 | } |
534 | |
535 | /* |
536 | * Save the host keys if there is any chance for the guest |
537 | * to use pauth, as the entry code will reload the guest |
538 | * keys in that case. |
539 | */ |
540 | if (vcpu->arch.hcr_el2 & (HCR_API | HCR_APK)) { |
541 | struct kvm_cpu_context *ctxt; |
542 | |
543 | ctxt = this_cpu_ptr_hyp_sym(kvm_hyp_ctxt); |
544 | ptrauth_save_keys(ctxt); |
545 | } |
546 | } |
547 | } |
548 | |
549 | static bool kvm_vcpu_should_clear_twi(struct kvm_vcpu *vcpu) |
550 | { |
551 | if (unlikely(kvm_wfi_trap_policy != KVM_WFX_NOTRAP_SINGLE_TASK)) |
552 | return kvm_wfi_trap_policy == KVM_WFX_NOTRAP; |
553 | |
554 | return single_task_running() && |
555 | (atomic_read(v: &vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) || |
556 | vcpu->kvm->arch.vgic.nassgireq); |
557 | } |
558 | |
559 | static bool kvm_vcpu_should_clear_twe(struct kvm_vcpu *vcpu) |
560 | { |
561 | if (unlikely(kvm_wfe_trap_policy != KVM_WFX_NOTRAP_SINGLE_TASK)) |
562 | return kvm_wfe_trap_policy == KVM_WFX_NOTRAP; |
563 | |
564 | return single_task_running(); |
565 | } |
566 | |
567 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
568 | { |
569 | struct kvm_s2_mmu *mmu; |
570 | int *last_ran; |
571 | |
572 | if (is_protected_kvm_enabled()) |
573 | goto nommu; |
574 | |
575 | if (vcpu_has_nv(vcpu)) |
576 | kvm_vcpu_load_hw_mmu(vcpu); |
577 | |
578 | mmu = vcpu->arch.hw_mmu; |
579 | last_ran = this_cpu_ptr(mmu->last_vcpu_ran); |
580 | |
581 | /* |
582 | * Ensure a VMID is allocated for the MMU before programming VTTBR_EL2, |
583 | * which happens eagerly in VHE. |
584 | * |
585 | * Also, the VMID allocator only preserves VMIDs that are active at the |
586 | * time of rollover, so KVM might need to grab a new VMID for the MMU if |
587 | * this is called from kvm_sched_in(). |
588 | */ |
589 | kvm_arm_vmid_update(&mmu->vmid); |
590 | |
591 | /* |
592 | * We guarantee that both TLBs and I-cache are private to each |
593 | * vcpu. If detecting that a vcpu from the same VM has |
594 | * previously run on the same physical CPU, call into the |
595 | * hypervisor code to nuke the relevant contexts. |
596 | * |
597 | * We might get preempted before the vCPU actually runs, but |
598 | * over-invalidation doesn't affect correctness. |
599 | */ |
600 | if (*last_ran != vcpu->vcpu_idx) { |
601 | kvm_call_hyp(__kvm_flush_cpu_context, mmu); |
602 | *last_ran = vcpu->vcpu_idx; |
603 | } |
604 | |
605 | nommu: |
606 | vcpu->cpu = cpu; |
607 | |
608 | /* |
609 | * The timer must be loaded before the vgic to correctly set up physical |
610 | * interrupt deactivation in nested state (e.g. timer interrupt). |
611 | */ |
612 | kvm_timer_vcpu_load(vcpu); |
613 | kvm_vgic_load(vcpu); |
614 | kvm_vcpu_load_debug(vcpu); |
615 | if (has_vhe()) |
616 | kvm_vcpu_load_vhe(vcpu); |
617 | kvm_arch_vcpu_load_fp(vcpu); |
618 | kvm_vcpu_pmu_restore_guest(vcpu); |
619 | if (kvm_arm_is_pvtime_enabled(&vcpu->arch)) |
620 | kvm_make_request(KVM_REQ_RECORD_STEAL, vcpu); |
621 | |
622 | if (kvm_vcpu_should_clear_twe(vcpu)) |
623 | vcpu->arch.hcr_el2 &= ~HCR_TWE; |
624 | else |
625 | vcpu->arch.hcr_el2 |= HCR_TWE; |
626 | |
627 | if (kvm_vcpu_should_clear_twi(vcpu)) |
628 | vcpu->arch.hcr_el2 &= ~HCR_TWI; |
629 | else |
630 | vcpu->arch.hcr_el2 |= HCR_TWI; |
631 | |
632 | vcpu_set_pauth_traps(vcpu); |
633 | |
634 | if (is_protected_kvm_enabled()) { |
635 | kvm_call_hyp_nvhe(__pkvm_vcpu_load, |
636 | vcpu->kvm->arch.pkvm.handle, |
637 | vcpu->vcpu_idx, vcpu->arch.hcr_el2); |
638 | kvm_call_hyp(__vgic_v3_restore_vmcr_aprs, |
639 | &vcpu->arch.vgic_cpu.vgic_v3); |
640 | } |
641 | |
642 | if (!cpumask_test_cpu(cpu, cpumask: vcpu->kvm->arch.supported_cpus)) |
643 | vcpu_set_on_unsupported_cpu(vcpu); |
644 | } |
645 | |
646 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
647 | { |
648 | if (is_protected_kvm_enabled()) { |
649 | kvm_call_hyp(__vgic_v3_save_vmcr_aprs, |
650 | &vcpu->arch.vgic_cpu.vgic_v3); |
651 | kvm_call_hyp_nvhe(__pkvm_vcpu_put); |
652 | } |
653 | |
654 | kvm_vcpu_put_debug(vcpu); |
655 | kvm_arch_vcpu_put_fp(vcpu); |
656 | if (has_vhe()) |
657 | kvm_vcpu_put_vhe(vcpu); |
658 | kvm_timer_vcpu_put(vcpu); |
659 | kvm_vgic_put(vcpu); |
660 | kvm_vcpu_pmu_restore_host(vcpu); |
661 | if (vcpu_has_nv(vcpu)) |
662 | kvm_vcpu_put_hw_mmu(vcpu); |
663 | kvm_arm_vmid_clear_active(); |
664 | |
665 | vcpu_clear_on_unsupported_cpu(vcpu); |
666 | vcpu->cpu = -1; |
667 | } |
668 | |
669 | static void __kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu) |
670 | { |
671 | WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED); |
672 | kvm_make_request(KVM_REQ_SLEEP, vcpu); |
673 | kvm_vcpu_kick(vcpu); |
674 | } |
675 | |
676 | void kvm_arm_vcpu_power_off(struct kvm_vcpu *vcpu) |
677 | { |
678 | spin_lock(lock: &vcpu->arch.mp_state_lock); |
679 | __kvm_arm_vcpu_power_off(vcpu); |
680 | spin_unlock(lock: &vcpu->arch.mp_state_lock); |
681 | } |
682 | |
683 | bool kvm_arm_vcpu_stopped(struct kvm_vcpu *vcpu) |
684 | { |
685 | return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_STOPPED; |
686 | } |
687 | |
688 | static void kvm_arm_vcpu_suspend(struct kvm_vcpu *vcpu) |
689 | { |
690 | WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_SUSPENDED); |
691 | kvm_make_request(KVM_REQ_SUSPEND, vcpu); |
692 | kvm_vcpu_kick(vcpu); |
693 | } |
694 | |
695 | static bool kvm_arm_vcpu_suspended(struct kvm_vcpu *vcpu) |
696 | { |
697 | return READ_ONCE(vcpu->arch.mp_state.mp_state) == KVM_MP_STATE_SUSPENDED; |
698 | } |
699 | |
700 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
701 | struct kvm_mp_state *mp_state) |
702 | { |
703 | *mp_state = READ_ONCE(vcpu->arch.mp_state); |
704 | |
705 | return 0; |
706 | } |
707 | |
708 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, |
709 | struct kvm_mp_state *mp_state) |
710 | { |
711 | int ret = 0; |
712 | |
713 | spin_lock(lock: &vcpu->arch.mp_state_lock); |
714 | |
715 | switch (mp_state->mp_state) { |
716 | case KVM_MP_STATE_RUNNABLE: |
717 | WRITE_ONCE(vcpu->arch.mp_state, *mp_state); |
718 | break; |
719 | case KVM_MP_STATE_STOPPED: |
720 | __kvm_arm_vcpu_power_off(vcpu); |
721 | break; |
722 | case KVM_MP_STATE_SUSPENDED: |
723 | kvm_arm_vcpu_suspend(vcpu); |
724 | break; |
725 | default: |
726 | ret = -EINVAL; |
727 | } |
728 | |
729 | spin_unlock(lock: &vcpu->arch.mp_state_lock); |
730 | |
731 | return ret; |
732 | } |
733 | |
734 | /** |
735 | * kvm_arch_vcpu_runnable - determine if the vcpu can be scheduled |
736 | * @v: The VCPU pointer |
737 | * |
738 | * If the guest CPU is not waiting for interrupts or an interrupt line is |
739 | * asserted, the CPU is by definition runnable. |
740 | */ |
741 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) |
742 | { |
743 | bool irq_lines = *vcpu_hcr(v) & (HCR_VI | HCR_VF); |
744 | return ((irq_lines || kvm_vgic_vcpu_pending_irq(v)) |
745 | && !kvm_arm_vcpu_stopped(vcpu: v) && !v->arch.pause); |
746 | } |
747 | |
748 | bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu) |
749 | { |
750 | return vcpu_mode_priv(vcpu); |
751 | } |
752 | |
753 | #ifdef CONFIG_GUEST_PERF_EVENTS |
754 | unsigned long kvm_arch_vcpu_get_ip(struct kvm_vcpu *vcpu) |
755 | { |
756 | return *vcpu_pc(vcpu); |
757 | } |
758 | #endif |
759 | |
760 | static void kvm_init_mpidr_data(struct kvm *kvm) |
761 | { |
762 | struct kvm_mpidr_data *data = NULL; |
763 | unsigned long c, mask, nr_entries; |
764 | u64 aff_set = 0, aff_clr = ~0UL; |
765 | struct kvm_vcpu *vcpu; |
766 | |
767 | mutex_lock(&kvm->arch.config_lock); |
768 | |
769 | if (rcu_access_pointer(kvm->arch.mpidr_data) || |
770 | atomic_read(v: &kvm->online_vcpus) == 1) |
771 | goto out; |
772 | |
773 | kvm_for_each_vcpu(c, vcpu, kvm) { |
774 | u64 aff = kvm_vcpu_get_mpidr_aff(vcpu); |
775 | aff_set |= aff; |
776 | aff_clr &= aff; |
777 | } |
778 | |
779 | /* |
780 | * A significant bit can be either 0 or 1, and will only appear in |
781 | * aff_set. Use aff_clr to weed out the useless stuff. |
782 | */ |
783 | mask = aff_set ^ aff_clr; |
784 | nr_entries = BIT_ULL(hweight_long(mask)); |
785 | |
786 | /* |
787 | * Don't let userspace fool us. If we need more than a single page |
788 | * to describe the compressed MPIDR array, just fall back to the |
789 | * iterative method. Single vcpu VMs do not need this either. |
790 | */ |
791 | if (struct_size(data, cmpidr_to_idx, nr_entries) <= PAGE_SIZE) |
792 | data = kzalloc(struct_size(data, cmpidr_to_idx, nr_entries), |
793 | GFP_KERNEL_ACCOUNT); |
794 | |
795 | if (!data) |
796 | goto out; |
797 | |
798 | data->mpidr_mask = mask; |
799 | |
800 | kvm_for_each_vcpu(c, vcpu, kvm) { |
801 | u64 aff = kvm_vcpu_get_mpidr_aff(vcpu); |
802 | u16 index = kvm_mpidr_index(data, aff); |
803 | |
804 | data->cmpidr_to_idx[index] = c; |
805 | } |
806 | |
807 | rcu_assign_pointer(kvm->arch.mpidr_data, data); |
808 | out: |
809 | mutex_unlock(lock: &kvm->arch.config_lock); |
810 | } |
811 | |
812 | /* |
813 | * Handle both the initialisation that is being done when the vcpu is |
814 | * run for the first time, as well as the updates that must be |
815 | * performed each time we get a new thread dealing with this vcpu. |
816 | */ |
817 | int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) |
818 | { |
819 | struct kvm *kvm = vcpu->kvm; |
820 | int ret; |
821 | |
822 | if (!kvm_vcpu_initialized(vcpu)) |
823 | return -ENOEXEC; |
824 | |
825 | if (!kvm_arm_vcpu_is_finalized(vcpu)) |
826 | return -EPERM; |
827 | |
828 | ret = kvm_arch_vcpu_run_map_fp(vcpu); |
829 | if (ret) |
830 | return ret; |
831 | |
832 | if (likely(vcpu_has_run_once(vcpu))) |
833 | return 0; |
834 | |
835 | kvm_init_mpidr_data(kvm); |
836 | |
837 | if (likely(irqchip_in_kernel(kvm))) { |
838 | /* |
839 | * Map the VGIC hardware resources before running a vcpu the |
840 | * first time on this VM. |
841 | */ |
842 | ret = kvm_vgic_map_resources(kvm); |
843 | if (ret) |
844 | return ret; |
845 | } |
846 | |
847 | ret = kvm_finalize_sys_regs(vcpu); |
848 | if (ret) |
849 | return ret; |
850 | |
851 | if (vcpu_has_nv(vcpu)) { |
852 | ret = kvm_vcpu_allocate_vncr_tlb(vcpu); |
853 | if (ret) |
854 | return ret; |
855 | |
856 | ret = kvm_vgic_vcpu_nv_init(vcpu); |
857 | if (ret) |
858 | return ret; |
859 | } |
860 | |
861 | /* |
862 | * This needs to happen after any restriction has been applied |
863 | * to the feature set. |
864 | */ |
865 | kvm_calculate_traps(vcpu); |
866 | |
867 | ret = kvm_timer_enable(vcpu); |
868 | if (ret) |
869 | return ret; |
870 | |
871 | if (kvm_vcpu_has_pmu(vcpu)) { |
872 | ret = kvm_arm_pmu_v3_enable(vcpu); |
873 | if (ret) |
874 | return ret; |
875 | } |
876 | |
877 | if (is_protected_kvm_enabled()) { |
878 | ret = pkvm_create_hyp_vm(kvm); |
879 | if (ret) |
880 | return ret; |
881 | |
882 | ret = pkvm_create_hyp_vcpu(vcpu); |
883 | if (ret) |
884 | return ret; |
885 | } |
886 | |
887 | mutex_lock(&kvm->arch.config_lock); |
888 | set_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags); |
889 | mutex_unlock(lock: &kvm->arch.config_lock); |
890 | |
891 | return ret; |
892 | } |
893 | |
894 | bool kvm_arch_intc_initialized(struct kvm *kvm) |
895 | { |
896 | return vgic_initialized(kvm); |
897 | } |
898 | |
899 | void kvm_arm_halt_guest(struct kvm *kvm) |
900 | { |
901 | unsigned long i; |
902 | struct kvm_vcpu *vcpu; |
903 | |
904 | kvm_for_each_vcpu(i, vcpu, kvm) |
905 | vcpu->arch.pause = true; |
906 | kvm_make_all_cpus_request(kvm, KVM_REQ_SLEEP); |
907 | } |
908 | |
909 | void kvm_arm_resume_guest(struct kvm *kvm) |
910 | { |
911 | unsigned long i; |
912 | struct kvm_vcpu *vcpu; |
913 | |
914 | kvm_for_each_vcpu(i, vcpu, kvm) { |
915 | vcpu->arch.pause = false; |
916 | __kvm_vcpu_wake_up(vcpu); |
917 | } |
918 | } |
919 | |
920 | static void kvm_vcpu_sleep(struct kvm_vcpu *vcpu) |
921 | { |
922 | struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); |
923 | |
924 | rcuwait_wait_event(wait, |
925 | (!kvm_arm_vcpu_stopped(vcpu)) && (!vcpu->arch.pause), |
926 | TASK_INTERRUPTIBLE); |
927 | |
928 | if (kvm_arm_vcpu_stopped(vcpu) || vcpu->arch.pause) { |
929 | /* Awaken to handle a signal, request we sleep again later. */ |
930 | kvm_make_request(KVM_REQ_SLEEP, vcpu); |
931 | } |
932 | |
933 | /* |
934 | * Make sure we will observe a potential reset request if we've |
935 | * observed a change to the power state. Pairs with the smp_wmb() in |
936 | * kvm_psci_vcpu_on(). |
937 | */ |
938 | smp_rmb(); |
939 | } |
940 | |
941 | /** |
942 | * kvm_vcpu_wfi - emulate Wait-For-Interrupt behavior |
943 | * @vcpu: The VCPU pointer |
944 | * |
945 | * Suspend execution of a vCPU until a valid wake event is detected, i.e. until |
946 | * the vCPU is runnable. The vCPU may or may not be scheduled out, depending |
947 | * on when a wake event arrives, e.g. there may already be a pending wake event. |
948 | */ |
949 | void kvm_vcpu_wfi(struct kvm_vcpu *vcpu) |
950 | { |
951 | /* |
952 | * Sync back the state of the GIC CPU interface so that we have |
953 | * the latest PMR and group enables. This ensures that |
954 | * kvm_arch_vcpu_runnable has up-to-date data to decide whether |
955 | * we have pending interrupts, e.g. when determining if the |
956 | * vCPU should block. |
957 | * |
958 | * For the same reason, we want to tell GICv4 that we need |
959 | * doorbells to be signalled, should an interrupt become pending. |
960 | */ |
961 | preempt_disable(); |
962 | vcpu_set_flag(vcpu, IN_WFI); |
963 | kvm_vgic_put(vcpu); |
964 | preempt_enable(); |
965 | |
966 | kvm_vcpu_halt(vcpu); |
967 | vcpu_clear_flag(vcpu, IN_WFIT); |
968 | |
969 | preempt_disable(); |
970 | vcpu_clear_flag(vcpu, IN_WFI); |
971 | kvm_vgic_load(vcpu); |
972 | preempt_enable(); |
973 | } |
974 | |
975 | static int kvm_vcpu_suspend(struct kvm_vcpu *vcpu) |
976 | { |
977 | if (!kvm_arm_vcpu_suspended(vcpu)) |
978 | return 1; |
979 | |
980 | kvm_vcpu_wfi(vcpu); |
981 | |
982 | /* |
983 | * The suspend state is sticky; we do not leave it until userspace |
984 | * explicitly marks the vCPU as runnable. Request that we suspend again |
985 | * later. |
986 | */ |
987 | kvm_make_request(KVM_REQ_SUSPEND, vcpu); |
988 | |
989 | /* |
990 | * Check to make sure the vCPU is actually runnable. If so, exit to |
991 | * userspace informing it of the wakeup condition. |
992 | */ |
993 | if (kvm_arch_vcpu_runnable(v: vcpu)) { |
994 | memset(&vcpu->run->system_event, 0, sizeof(vcpu->run->system_event)); |
995 | vcpu->run->system_event.type = KVM_SYSTEM_EVENT_WAKEUP; |
996 | vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT; |
997 | return 0; |
998 | } |
999 | |
1000 | /* |
1001 | * Otherwise, we were unblocked to process a different event, such as a |
1002 | * pending signal. Return 1 and allow kvm_arch_vcpu_ioctl_run() to |
1003 | * process the event. |
1004 | */ |
1005 | return 1; |
1006 | } |
1007 | |
1008 | /** |
1009 | * check_vcpu_requests - check and handle pending vCPU requests |
1010 | * @vcpu: the VCPU pointer |
1011 | * |
1012 | * Return: 1 if we should enter the guest |
1013 | * 0 if we should exit to userspace |
1014 | * < 0 if we should exit to userspace, where the return value indicates |
1015 | * an error |
1016 | */ |
1017 | static int check_vcpu_requests(struct kvm_vcpu *vcpu) |
1018 | { |
1019 | if (kvm_request_pending(vcpu)) { |
1020 | if (kvm_check_request(KVM_REQ_VM_DEAD, vcpu)) |
1021 | return -EIO; |
1022 | |
1023 | if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) |
1024 | kvm_vcpu_sleep(vcpu); |
1025 | |
1026 | if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu)) |
1027 | kvm_reset_vcpu(vcpu); |
1028 | |
1029 | /* |
1030 | * Clear IRQ_PENDING requests that were made to guarantee |
1031 | * that a VCPU sees new virtual interrupts. |
1032 | */ |
1033 | kvm_check_request(KVM_REQ_IRQ_PENDING, vcpu); |
1034 | |
1035 | if (kvm_check_request(KVM_REQ_RECORD_STEAL, vcpu)) |
1036 | kvm_update_stolen_time(vcpu); |
1037 | |
1038 | if (kvm_check_request(KVM_REQ_RELOAD_GICv4, vcpu)) { |
1039 | /* The distributor enable bits were changed */ |
1040 | preempt_disable(); |
1041 | vgic_v4_put(vcpu); |
1042 | vgic_v4_load(vcpu); |
1043 | preempt_enable(); |
1044 | } |
1045 | |
1046 | if (kvm_check_request(KVM_REQ_RELOAD_PMU, vcpu)) |
1047 | kvm_vcpu_reload_pmu(vcpu); |
1048 | |
1049 | if (kvm_check_request(KVM_REQ_RESYNC_PMU_EL0, vcpu)) |
1050 | kvm_vcpu_pmu_restore_guest(vcpu); |
1051 | |
1052 | if (kvm_check_request(KVM_REQ_SUSPEND, vcpu)) |
1053 | return kvm_vcpu_suspend(vcpu); |
1054 | |
1055 | if (kvm_dirty_ring_check_request(vcpu)) |
1056 | return 0; |
1057 | |
1058 | check_nested_vcpu_requests(vcpu); |
1059 | } |
1060 | |
1061 | return 1; |
1062 | } |
1063 | |
1064 | static bool vcpu_mode_is_bad_32bit(struct kvm_vcpu *vcpu) |
1065 | { |
1066 | if (likely(!vcpu_mode_is_32bit(vcpu))) |
1067 | return false; |
1068 | |
1069 | if (vcpu_has_nv(vcpu)) |
1070 | return true; |
1071 | |
1072 | return !kvm_supports_32bit_el0(); |
1073 | } |
1074 | |
1075 | /** |
1076 | * kvm_vcpu_exit_request - returns true if the VCPU should *not* enter the guest |
1077 | * @vcpu: The VCPU pointer |
1078 | * @ret: Pointer to write optional return code |
1079 | * |
1080 | * Returns: true if the VCPU needs to return to a preemptible + interruptible |
1081 | * and skip guest entry. |
1082 | * |
1083 | * This function disambiguates between two different types of exits: exits to a |
1084 | * preemptible + interruptible kernel context and exits to userspace. For an |
1085 | * exit to userspace, this function will write the return code to ret and return |
1086 | * true. For an exit to preemptible + interruptible kernel context (i.e. check |
1087 | * for pending work and re-enter), return true without writing to ret. |
1088 | */ |
1089 | static bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu, int *ret) |
1090 | { |
1091 | struct kvm_run *run = vcpu->run; |
1092 | |
1093 | /* |
1094 | * If we're using a userspace irqchip, then check if we need |
1095 | * to tell a userspace irqchip about timer or PMU level |
1096 | * changes and if so, exit to userspace (the actual level |
1097 | * state gets updated in kvm_timer_update_run and |
1098 | * kvm_pmu_update_run below). |
1099 | */ |
1100 | if (unlikely(!irqchip_in_kernel(vcpu->kvm))) { |
1101 | if (kvm_timer_should_notify_user(vcpu) || |
1102 | kvm_pmu_should_notify_user(vcpu)) { |
1103 | *ret = -EINTR; |
1104 | run->exit_reason = KVM_EXIT_INTR; |
1105 | return true; |
1106 | } |
1107 | } |
1108 | |
1109 | if (unlikely(vcpu_on_unsupported_cpu(vcpu))) { |
1110 | run->exit_reason = KVM_EXIT_FAIL_ENTRY; |
1111 | run->fail_entry.hardware_entry_failure_reason = KVM_EXIT_FAIL_ENTRY_CPU_UNSUPPORTED; |
1112 | run->fail_entry.cpu = smp_processor_id(); |
1113 | *ret = 0; |
1114 | return true; |
1115 | } |
1116 | |
1117 | return kvm_request_pending(vcpu) || |
1118 | xfer_to_guest_mode_work_pending(); |
1119 | } |
1120 | |
1121 | /* |
1122 | * Actually run the vCPU, entering an RCU extended quiescent state (EQS) while |
1123 | * the vCPU is running. |
1124 | * |
1125 | * This must be noinstr as instrumentation may make use of RCU, and this is not |
1126 | * safe during the EQS. |
1127 | */ |
1128 | static int noinstr kvm_arm_vcpu_enter_exit(struct kvm_vcpu *vcpu) |
1129 | { |
1130 | int ret; |
1131 | |
1132 | guest_state_enter_irqoff(); |
1133 | ret = kvm_call_hyp_ret(__kvm_vcpu_run, vcpu); |
1134 | guest_state_exit_irqoff(); |
1135 | |
1136 | return ret; |
1137 | } |
1138 | |
1139 | /** |
1140 | * kvm_arch_vcpu_ioctl_run - the main VCPU run function to execute guest code |
1141 | * @vcpu: The VCPU pointer |
1142 | * |
1143 | * This function is called through the VCPU_RUN ioctl called from user space. It |
1144 | * will execute VM code in a loop until the time slice for the process is used |
1145 | * or some emulation is needed from user space in which case the function will |
1146 | * return with return value 0 and with the kvm_run structure filled in with the |
1147 | * required data for the requested emulation. |
1148 | */ |
1149 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) |
1150 | { |
1151 | struct kvm_run *run = vcpu->run; |
1152 | int ret; |
1153 | |
1154 | if (run->exit_reason == KVM_EXIT_MMIO) { |
1155 | ret = kvm_handle_mmio_return(vcpu); |
1156 | if (ret <= 0) |
1157 | return ret; |
1158 | } |
1159 | |
1160 | vcpu_load(vcpu); |
1161 | |
1162 | if (!vcpu->wants_to_run) { |
1163 | ret = -EINTR; |
1164 | goto out; |
1165 | } |
1166 | |
1167 | kvm_sigset_activate(vcpu); |
1168 | |
1169 | ret = 1; |
1170 | run->exit_reason = KVM_EXIT_UNKNOWN; |
1171 | run->flags = 0; |
1172 | while (ret > 0) { |
1173 | /* |
1174 | * Check conditions before entering the guest |
1175 | */ |
1176 | ret = xfer_to_guest_mode_handle_work(vcpu); |
1177 | if (!ret) |
1178 | ret = 1; |
1179 | |
1180 | if (ret > 0) |
1181 | ret = check_vcpu_requests(vcpu); |
1182 | |
1183 | /* |
1184 | * Preparing the interrupts to be injected also |
1185 | * involves poking the GIC, which must be done in a |
1186 | * non-preemptible context. |
1187 | */ |
1188 | preempt_disable(); |
1189 | |
1190 | if (kvm_vcpu_has_pmu(vcpu)) |
1191 | kvm_pmu_flush_hwstate(vcpu); |
1192 | |
1193 | local_irq_disable(); |
1194 | |
1195 | kvm_vgic_flush_hwstate(vcpu); |
1196 | |
1197 | kvm_pmu_update_vcpu_events(vcpu); |
1198 | |
1199 | /* |
1200 | * Ensure we set mode to IN_GUEST_MODE after we disable |
1201 | * interrupts and before the final VCPU requests check. |
1202 | * See the comment in kvm_vcpu_exiting_guest_mode() and |
1203 | * Documentation/virt/kvm/vcpu-requests.rst |
1204 | */ |
1205 | smp_store_mb(vcpu->mode, IN_GUEST_MODE); |
1206 | |
1207 | if (ret <= 0 || kvm_vcpu_exit_request(vcpu, ret: &ret)) { |
1208 | vcpu->mode = OUTSIDE_GUEST_MODE; |
1209 | isb(); /* Ensure work in x_flush_hwstate is committed */ |
1210 | if (kvm_vcpu_has_pmu(vcpu)) |
1211 | kvm_pmu_sync_hwstate(vcpu); |
1212 | if (unlikely(!irqchip_in_kernel(vcpu->kvm))) |
1213 | kvm_timer_sync_user(vcpu); |
1214 | kvm_vgic_sync_hwstate(vcpu); |
1215 | local_irq_enable(); |
1216 | preempt_enable(); |
1217 | continue; |
1218 | } |
1219 | |
1220 | kvm_arch_vcpu_ctxflush_fp(vcpu); |
1221 | |
1222 | /************************************************************** |
1223 | * Enter the guest |
1224 | */ |
1225 | trace_kvm_entry(vcpu_pc: *vcpu_pc(vcpu)); |
1226 | guest_timing_enter_irqoff(); |
1227 | |
1228 | ret = kvm_arm_vcpu_enter_exit(vcpu); |
1229 | |
1230 | vcpu->mode = OUTSIDE_GUEST_MODE; |
1231 | vcpu->stat.exits++; |
1232 | /* |
1233 | * Back from guest |
1234 | *************************************************************/ |
1235 | |
1236 | /* |
1237 | * We must sync the PMU state before the vgic state so |
1238 | * that the vgic can properly sample the updated state of the |
1239 | * interrupt line. |
1240 | */ |
1241 | if (kvm_vcpu_has_pmu(vcpu)) |
1242 | kvm_pmu_sync_hwstate(vcpu); |
1243 | |
1244 | /* |
1245 | * Sync the vgic state before syncing the timer state because |
1246 | * the timer code needs to know if the virtual timer |
1247 | * interrupts are active. |
1248 | */ |
1249 | kvm_vgic_sync_hwstate(vcpu); |
1250 | |
1251 | /* |
1252 | * Sync the timer hardware state before enabling interrupts as |
1253 | * we don't want vtimer interrupts to race with syncing the |
1254 | * timer virtual interrupt state. |
1255 | */ |
1256 | if (unlikely(!irqchip_in_kernel(vcpu->kvm))) |
1257 | kvm_timer_sync_user(vcpu); |
1258 | |
1259 | if (is_hyp_ctxt(vcpu)) |
1260 | kvm_timer_sync_nested(vcpu); |
1261 | |
1262 | kvm_arch_vcpu_ctxsync_fp(vcpu); |
1263 | |
1264 | /* |
1265 | * We must ensure that any pending interrupts are taken before |
1266 | * we exit guest timing so that timer ticks are accounted as |
1267 | * guest time. Transiently unmask interrupts so that any |
1268 | * pending interrupts are taken. |
1269 | * |
1270 | * Per ARM DDI 0487G.b section D1.13.4, an ISB (or other |
1271 | * context synchronization event) is necessary to ensure that |
1272 | * pending interrupts are taken. |
1273 | */ |
1274 | if (ARM_EXCEPTION_CODE(ret) == ARM_EXCEPTION_IRQ) { |
1275 | local_irq_enable(); |
1276 | isb(); |
1277 | local_irq_disable(); |
1278 | } |
1279 | |
1280 | guest_timing_exit_irqoff(); |
1281 | |
1282 | local_irq_enable(); |
1283 | |
1284 | trace_kvm_exit(ret, esr_ec: kvm_vcpu_trap_get_class(vcpu), vcpu_pc: *vcpu_pc(vcpu)); |
1285 | |
1286 | /* Exit types that need handling before we can be preempted */ |
1287 | handle_exit_early(vcpu, ret); |
1288 | |
1289 | preempt_enable(); |
1290 | |
1291 | /* |
1292 | * The ARMv8 architecture doesn't give the hypervisor |
1293 | * a mechanism to prevent a guest from dropping to AArch32 EL0 |
1294 | * if implemented by the CPU. If we spot the guest in such |
1295 | * state and that we decided it wasn't supposed to do so (like |
1296 | * with the asymmetric AArch32 case), return to userspace with |
1297 | * a fatal error. |
1298 | */ |
1299 | if (vcpu_mode_is_bad_32bit(vcpu)) { |
1300 | /* |
1301 | * As we have caught the guest red-handed, decide that |
1302 | * it isn't fit for purpose anymore by making the vcpu |
1303 | * invalid. The VMM can try and fix it by issuing a |
1304 | * KVM_ARM_VCPU_INIT if it really wants to. |
1305 | */ |
1306 | vcpu_clear_flag(vcpu, VCPU_INITIALIZED); |
1307 | ret = ARM_EXCEPTION_IL; |
1308 | } |
1309 | |
1310 | ret = handle_exit(vcpu, ret); |
1311 | } |
1312 | |
1313 | /* Tell userspace about in-kernel device output levels */ |
1314 | if (unlikely(!irqchip_in_kernel(vcpu->kvm))) { |
1315 | kvm_timer_update_run(vcpu); |
1316 | kvm_pmu_update_run(vcpu); |
1317 | } |
1318 | |
1319 | kvm_sigset_deactivate(vcpu); |
1320 | |
1321 | out: |
1322 | /* |
1323 | * In the unlikely event that we are returning to userspace |
1324 | * with pending exceptions or PC adjustment, commit these |
1325 | * adjustments in order to give userspace a consistent view of |
1326 | * the vcpu state. Note that this relies on __kvm_adjust_pc() |
1327 | * being preempt-safe on VHE. |
1328 | */ |
1329 | if (unlikely(vcpu_get_flag(vcpu, PENDING_EXCEPTION) || |
1330 | vcpu_get_flag(vcpu, INCREMENT_PC))) |
1331 | kvm_call_hyp(__kvm_adjust_pc, vcpu); |
1332 | |
1333 | vcpu_put(vcpu); |
1334 | return ret; |
1335 | } |
1336 | |
1337 | static int vcpu_interrupt_line(struct kvm_vcpu *vcpu, int number, bool level) |
1338 | { |
1339 | int bit_index; |
1340 | bool set; |
1341 | unsigned long *hcr; |
1342 | |
1343 | if (number == KVM_ARM_IRQ_CPU_IRQ) |
1344 | bit_index = __ffs(HCR_VI); |
1345 | else /* KVM_ARM_IRQ_CPU_FIQ */ |
1346 | bit_index = __ffs(HCR_VF); |
1347 | |
1348 | hcr = vcpu_hcr(vcpu); |
1349 | if (level) |
1350 | set = test_and_set_bit(nr: bit_index, addr: hcr); |
1351 | else |
1352 | set = test_and_clear_bit(nr: bit_index, addr: hcr); |
1353 | |
1354 | /* |
1355 | * If we didn't change anything, no need to wake up or kick other CPUs |
1356 | */ |
1357 | if (set == level) |
1358 | return 0; |
1359 | |
1360 | /* |
1361 | * The vcpu irq_lines field was updated, wake up sleeping VCPUs and |
1362 | * trigger a world-switch round on the running physical CPU to set the |
1363 | * virtual IRQ/FIQ fields in the HCR appropriately. |
1364 | */ |
1365 | kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); |
1366 | kvm_vcpu_kick(vcpu); |
1367 | |
1368 | return 0; |
1369 | } |
1370 | |
1371 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, |
1372 | bool line_status) |
1373 | { |
1374 | u32 irq = irq_level->irq; |
1375 | unsigned int irq_type, vcpu_id, irq_num; |
1376 | struct kvm_vcpu *vcpu = NULL; |
1377 | bool level = irq_level->level; |
1378 | |
1379 | irq_type = (irq >> KVM_ARM_IRQ_TYPE_SHIFT) & KVM_ARM_IRQ_TYPE_MASK; |
1380 | vcpu_id = (irq >> KVM_ARM_IRQ_VCPU_SHIFT) & KVM_ARM_IRQ_VCPU_MASK; |
1381 | vcpu_id += ((irq >> KVM_ARM_IRQ_VCPU2_SHIFT) & KVM_ARM_IRQ_VCPU2_MASK) * (KVM_ARM_IRQ_VCPU_MASK + 1); |
1382 | irq_num = (irq >> KVM_ARM_IRQ_NUM_SHIFT) & KVM_ARM_IRQ_NUM_MASK; |
1383 | |
1384 | trace_kvm_irq_line(type: irq_type, vcpu_idx: vcpu_id, irq_num, level: irq_level->level); |
1385 | |
1386 | switch (irq_type) { |
1387 | case KVM_ARM_IRQ_TYPE_CPU: |
1388 | if (irqchip_in_kernel(kvm)) |
1389 | return -ENXIO; |
1390 | |
1391 | vcpu = kvm_get_vcpu_by_id(kvm, id: vcpu_id); |
1392 | if (!vcpu) |
1393 | return -EINVAL; |
1394 | |
1395 | if (irq_num > KVM_ARM_IRQ_CPU_FIQ) |
1396 | return -EINVAL; |
1397 | |
1398 | return vcpu_interrupt_line(vcpu, number: irq_num, level); |
1399 | case KVM_ARM_IRQ_TYPE_PPI: |
1400 | if (!irqchip_in_kernel(kvm)) |
1401 | return -ENXIO; |
1402 | |
1403 | vcpu = kvm_get_vcpu_by_id(kvm, id: vcpu_id); |
1404 | if (!vcpu) |
1405 | return -EINVAL; |
1406 | |
1407 | if (irq_num < VGIC_NR_SGIS || irq_num >= VGIC_NR_PRIVATE_IRQS) |
1408 | return -EINVAL; |
1409 | |
1410 | return kvm_vgic_inject_irq(kvm, vcpu, irq_num, level, NULL); |
1411 | case KVM_ARM_IRQ_TYPE_SPI: |
1412 | if (!irqchip_in_kernel(kvm)) |
1413 | return -ENXIO; |
1414 | |
1415 | if (irq_num < VGIC_NR_PRIVATE_IRQS) |
1416 | return -EINVAL; |
1417 | |
1418 | return kvm_vgic_inject_irq(kvm, NULL, irq_num, level, NULL); |
1419 | } |
1420 | |
1421 | return -EINVAL; |
1422 | } |
1423 | |
1424 | static unsigned long system_supported_vcpu_features(void) |
1425 | { |
1426 | unsigned long features = KVM_VCPU_VALID_FEATURES; |
1427 | |
1428 | if (!cpus_have_final_cap(ARM64_HAS_32BIT_EL1)) |
1429 | clear_bit(KVM_ARM_VCPU_EL1_32BIT, &features); |
1430 | |
1431 | if (!kvm_supports_guest_pmuv3()) |
1432 | clear_bit(KVM_ARM_VCPU_PMU_V3, &features); |
1433 | |
1434 | if (!system_supports_sve()) |
1435 | clear_bit(KVM_ARM_VCPU_SVE, &features); |
1436 | |
1437 | if (!kvm_has_full_ptr_auth()) { |
1438 | clear_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, &features); |
1439 | clear_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features); |
1440 | } |
1441 | |
1442 | if (!cpus_have_final_cap(ARM64_HAS_NESTED_VIRT)) |
1443 | clear_bit(KVM_ARM_VCPU_HAS_EL2, &features); |
1444 | |
1445 | return features; |
1446 | } |
1447 | |
1448 | static int kvm_vcpu_init_check_features(struct kvm_vcpu *vcpu, |
1449 | const struct kvm_vcpu_init *init) |
1450 | { |
1451 | unsigned long features = init->features[0]; |
1452 | int i; |
1453 | |
1454 | if (features & ~KVM_VCPU_VALID_FEATURES) |
1455 | return -ENOENT; |
1456 | |
1457 | for (i = 1; i < ARRAY_SIZE(init->features); i++) { |
1458 | if (init->features[i]) |
1459 | return -ENOENT; |
1460 | } |
1461 | |
1462 | if (features & ~system_supported_vcpu_features()) |
1463 | return -EINVAL; |
1464 | |
1465 | /* |
1466 | * For now make sure that both address/generic pointer authentication |
1467 | * features are requested by the userspace together. |
1468 | */ |
1469 | if (test_bit(KVM_ARM_VCPU_PTRAUTH_ADDRESS, &features) != |
1470 | test_bit(KVM_ARM_VCPU_PTRAUTH_GENERIC, &features)) |
1471 | return -EINVAL; |
1472 | |
1473 | if (!test_bit(KVM_ARM_VCPU_EL1_32BIT, &features)) |
1474 | return 0; |
1475 | |
1476 | /* MTE is incompatible with AArch32 */ |
1477 | if (kvm_has_mte(vcpu->kvm)) |
1478 | return -EINVAL; |
1479 | |
1480 | /* NV is incompatible with AArch32 */ |
1481 | if (test_bit(KVM_ARM_VCPU_HAS_EL2, &features)) |
1482 | return -EINVAL; |
1483 | |
1484 | return 0; |
1485 | } |
1486 | |
1487 | static bool kvm_vcpu_init_changed(struct kvm_vcpu *vcpu, |
1488 | const struct kvm_vcpu_init *init) |
1489 | { |
1490 | unsigned long features = init->features[0]; |
1491 | |
1492 | return !bitmap_equal(vcpu->kvm->arch.vcpu_features, &features, |
1493 | KVM_VCPU_MAX_FEATURES); |
1494 | } |
1495 | |
1496 | static int kvm_setup_vcpu(struct kvm_vcpu *vcpu) |
1497 | { |
1498 | struct kvm *kvm = vcpu->kvm; |
1499 | int ret = 0; |
1500 | |
1501 | /* |
1502 | * When the vCPU has a PMU, but no PMU is set for the guest |
1503 | * yet, set the default one. |
1504 | */ |
1505 | if (kvm_vcpu_has_pmu(vcpu) && !kvm->arch.arm_pmu) |
1506 | ret = kvm_arm_set_default_pmu(kvm); |
1507 | |
1508 | /* Prepare for nested if required */ |
1509 | if (!ret && vcpu_has_nv(vcpu)) |
1510 | ret = kvm_vcpu_init_nested(vcpu); |
1511 | |
1512 | return ret; |
1513 | } |
1514 | |
1515 | static int __kvm_vcpu_set_target(struct kvm_vcpu *vcpu, |
1516 | const struct kvm_vcpu_init *init) |
1517 | { |
1518 | unsigned long features = init->features[0]; |
1519 | struct kvm *kvm = vcpu->kvm; |
1520 | int ret = -EINVAL; |
1521 | |
1522 | mutex_lock(&kvm->arch.config_lock); |
1523 | |
1524 | if (test_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags) && |
1525 | kvm_vcpu_init_changed(vcpu, init)) |
1526 | goto out_unlock; |
1527 | |
1528 | bitmap_copy(kvm->arch.vcpu_features, &features, KVM_VCPU_MAX_FEATURES); |
1529 | |
1530 | ret = kvm_setup_vcpu(vcpu); |
1531 | if (ret) |
1532 | goto out_unlock; |
1533 | |
1534 | /* Now we know what it is, we can reset it. */ |
1535 | kvm_reset_vcpu(vcpu); |
1536 | |
1537 | set_bit(KVM_ARCH_FLAG_VCPU_FEATURES_CONFIGURED, &kvm->arch.flags); |
1538 | vcpu_set_flag(vcpu, VCPU_INITIALIZED); |
1539 | ret = 0; |
1540 | out_unlock: |
1541 | mutex_unlock(lock: &kvm->arch.config_lock); |
1542 | return ret; |
1543 | } |
1544 | |
1545 | static int kvm_vcpu_set_target(struct kvm_vcpu *vcpu, |
1546 | const struct kvm_vcpu_init *init) |
1547 | { |
1548 | int ret; |
1549 | |
1550 | if (init->target != KVM_ARM_TARGET_GENERIC_V8 && |
1551 | init->target != kvm_target_cpu()) |
1552 | return -EINVAL; |
1553 | |
1554 | ret = kvm_vcpu_init_check_features(vcpu, init); |
1555 | if (ret) |
1556 | return ret; |
1557 | |
1558 | if (!kvm_vcpu_initialized(vcpu)) |
1559 | return __kvm_vcpu_set_target(vcpu, init); |
1560 | |
1561 | if (kvm_vcpu_init_changed(vcpu, init)) |
1562 | return -EINVAL; |
1563 | |
1564 | kvm_reset_vcpu(vcpu); |
1565 | return 0; |
1566 | } |
1567 | |
1568 | static int kvm_arch_vcpu_ioctl_vcpu_init(struct kvm_vcpu *vcpu, |
1569 | struct kvm_vcpu_init *init) |
1570 | { |
1571 | bool power_off = false; |
1572 | int ret; |
1573 | |
1574 | /* |
1575 | * Treat the power-off vCPU feature as ephemeral. Clear the bit to avoid |
1576 | * reflecting it in the finalized feature set, thus limiting its scope |
1577 | * to a single KVM_ARM_VCPU_INIT call. |
1578 | */ |
1579 | if (init->features[0] & BIT(KVM_ARM_VCPU_POWER_OFF)) { |
1580 | init->features[0] &= ~BIT(KVM_ARM_VCPU_POWER_OFF); |
1581 | power_off = true; |
1582 | } |
1583 | |
1584 | ret = kvm_vcpu_set_target(vcpu, init); |
1585 | if (ret) |
1586 | return ret; |
1587 | |
1588 | /* |
1589 | * Ensure a rebooted VM will fault in RAM pages and detect if the |
1590 | * guest MMU is turned off and flush the caches as needed. |
1591 | * |
1592 | * S2FWB enforces all memory accesses to RAM being cacheable, |
1593 | * ensuring that the data side is always coherent. We still |
1594 | * need to invalidate the I-cache though, as FWB does *not* |
1595 | * imply CTR_EL0.DIC. |
1596 | */ |
1597 | if (vcpu_has_run_once(vcpu)) { |
1598 | if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) |
1599 | stage2_unmap_vm(vcpu->kvm); |
1600 | else |
1601 | icache_inval_all_pou(); |
1602 | } |
1603 | |
1604 | vcpu_reset_hcr(vcpu); |
1605 | |
1606 | /* |
1607 | * Handle the "start in power-off" case. |
1608 | */ |
1609 | spin_lock(lock: &vcpu->arch.mp_state_lock); |
1610 | |
1611 | if (power_off) |
1612 | __kvm_arm_vcpu_power_off(vcpu); |
1613 | else |
1614 | WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_RUNNABLE); |
1615 | |
1616 | spin_unlock(lock: &vcpu->arch.mp_state_lock); |
1617 | |
1618 | return 0; |
1619 | } |
1620 | |
1621 | static int kvm_arm_vcpu_set_attr(struct kvm_vcpu *vcpu, |
1622 | struct kvm_device_attr *attr) |
1623 | { |
1624 | int ret = -ENXIO; |
1625 | |
1626 | switch (attr->group) { |
1627 | default: |
1628 | ret = kvm_arm_vcpu_arch_set_attr(vcpu, attr); |
1629 | break; |
1630 | } |
1631 | |
1632 | return ret; |
1633 | } |
1634 | |
1635 | static int kvm_arm_vcpu_get_attr(struct kvm_vcpu *vcpu, |
1636 | struct kvm_device_attr *attr) |
1637 | { |
1638 | int ret = -ENXIO; |
1639 | |
1640 | switch (attr->group) { |
1641 | default: |
1642 | ret = kvm_arm_vcpu_arch_get_attr(vcpu, attr); |
1643 | break; |
1644 | } |
1645 | |
1646 | return ret; |
1647 | } |
1648 | |
1649 | static int kvm_arm_vcpu_has_attr(struct kvm_vcpu *vcpu, |
1650 | struct kvm_device_attr *attr) |
1651 | { |
1652 | int ret = -ENXIO; |
1653 | |
1654 | switch (attr->group) { |
1655 | default: |
1656 | ret = kvm_arm_vcpu_arch_has_attr(vcpu, attr); |
1657 | break; |
1658 | } |
1659 | |
1660 | return ret; |
1661 | } |
1662 | |
1663 | static int kvm_arm_vcpu_get_events(struct kvm_vcpu *vcpu, |
1664 | struct kvm_vcpu_events *events) |
1665 | { |
1666 | memset(events, 0, sizeof(*events)); |
1667 | |
1668 | return __kvm_arm_vcpu_get_events(vcpu, events); |
1669 | } |
1670 | |
1671 | static int kvm_arm_vcpu_set_events(struct kvm_vcpu *vcpu, |
1672 | struct kvm_vcpu_events *events) |
1673 | { |
1674 | int i; |
1675 | |
1676 | /* check whether the reserved field is zero */ |
1677 | for (i = 0; i < ARRAY_SIZE(events->reserved); i++) |
1678 | if (events->reserved[i]) |
1679 | return -EINVAL; |
1680 | |
1681 | /* check whether the pad field is zero */ |
1682 | for (i = 0; i < ARRAY_SIZE(events->exception.pad); i++) |
1683 | if (events->exception.pad[i]) |
1684 | return -EINVAL; |
1685 | |
1686 | return __kvm_arm_vcpu_set_events(vcpu, events); |
1687 | } |
1688 | |
1689 | long kvm_arch_vcpu_ioctl(struct file *filp, |
1690 | unsigned int ioctl, unsigned long arg) |
1691 | { |
1692 | struct kvm_vcpu *vcpu = filp->private_data; |
1693 | void __user *argp = (void __user *)arg; |
1694 | struct kvm_device_attr attr; |
1695 | long r; |
1696 | |
1697 | switch (ioctl) { |
1698 | case KVM_ARM_VCPU_INIT: { |
1699 | struct kvm_vcpu_init init; |
1700 | |
1701 | r = -EFAULT; |
1702 | if (copy_from_user(to: &init, from: argp, n: sizeof(init))) |
1703 | break; |
1704 | |
1705 | r = kvm_arch_vcpu_ioctl_vcpu_init(vcpu, init: &init); |
1706 | break; |
1707 | } |
1708 | case KVM_SET_ONE_REG: |
1709 | case KVM_GET_ONE_REG: { |
1710 | struct kvm_one_reg reg; |
1711 | |
1712 | r = -ENOEXEC; |
1713 | if (unlikely(!kvm_vcpu_initialized(vcpu))) |
1714 | break; |
1715 | |
1716 | r = -EFAULT; |
1717 | if (copy_from_user(to: ®, from: argp, n: sizeof(reg))) |
1718 | break; |
1719 | |
1720 | /* |
1721 | * We could owe a reset due to PSCI. Handle the pending reset |
1722 | * here to ensure userspace register accesses are ordered after |
1723 | * the reset. |
1724 | */ |
1725 | if (kvm_check_request(KVM_REQ_VCPU_RESET, vcpu)) |
1726 | kvm_reset_vcpu(vcpu); |
1727 | |
1728 | if (ioctl == KVM_SET_ONE_REG) |
1729 | r = kvm_arm_set_reg(vcpu, ®); |
1730 | else |
1731 | r = kvm_arm_get_reg(vcpu, ®); |
1732 | break; |
1733 | } |
1734 | case KVM_GET_REG_LIST: { |
1735 | struct kvm_reg_list __user *user_list = argp; |
1736 | struct kvm_reg_list reg_list; |
1737 | unsigned n; |
1738 | |
1739 | r = -ENOEXEC; |
1740 | if (unlikely(!kvm_vcpu_initialized(vcpu))) |
1741 | break; |
1742 | |
1743 | r = -EPERM; |
1744 | if (!kvm_arm_vcpu_is_finalized(vcpu)) |
1745 | break; |
1746 | |
1747 | r = -EFAULT; |
1748 | if (copy_from_user(to: ®_list, from: user_list, n: sizeof(reg_list))) |
1749 | break; |
1750 | n = reg_list.n; |
1751 | reg_list.n = kvm_arm_num_regs(vcpu); |
1752 | if (copy_to_user(to: user_list, from: ®_list, n: sizeof(reg_list))) |
1753 | break; |
1754 | r = -E2BIG; |
1755 | if (n < reg_list.n) |
1756 | break; |
1757 | r = kvm_arm_copy_reg_indices(vcpu, user_list->reg); |
1758 | break; |
1759 | } |
1760 | case KVM_SET_DEVICE_ATTR: { |
1761 | r = -EFAULT; |
1762 | if (copy_from_user(to: &attr, from: argp, n: sizeof(attr))) |
1763 | break; |
1764 | r = kvm_arm_vcpu_set_attr(vcpu, attr: &attr); |
1765 | break; |
1766 | } |
1767 | case KVM_GET_DEVICE_ATTR: { |
1768 | r = -EFAULT; |
1769 | if (copy_from_user(to: &attr, from: argp, n: sizeof(attr))) |
1770 | break; |
1771 | r = kvm_arm_vcpu_get_attr(vcpu, attr: &attr); |
1772 | break; |
1773 | } |
1774 | case KVM_HAS_DEVICE_ATTR: { |
1775 | r = -EFAULT; |
1776 | if (copy_from_user(to: &attr, from: argp, n: sizeof(attr))) |
1777 | break; |
1778 | r = kvm_arm_vcpu_has_attr(vcpu, attr: &attr); |
1779 | break; |
1780 | } |
1781 | case KVM_GET_VCPU_EVENTS: { |
1782 | struct kvm_vcpu_events events; |
1783 | |
1784 | if (kvm_arm_vcpu_get_events(vcpu, events: &events)) |
1785 | return -EINVAL; |
1786 | |
1787 | if (copy_to_user(to: argp, from: &events, n: sizeof(events))) |
1788 | return -EFAULT; |
1789 | |
1790 | return 0; |
1791 | } |
1792 | case KVM_SET_VCPU_EVENTS: { |
1793 | struct kvm_vcpu_events events; |
1794 | |
1795 | if (copy_from_user(to: &events, from: argp, n: sizeof(events))) |
1796 | return -EFAULT; |
1797 | |
1798 | return kvm_arm_vcpu_set_events(vcpu, events: &events); |
1799 | } |
1800 | case KVM_ARM_VCPU_FINALIZE: { |
1801 | int what; |
1802 | |
1803 | if (!kvm_vcpu_initialized(vcpu)) |
1804 | return -ENOEXEC; |
1805 | |
1806 | if (get_user(what, (const int __user *)argp)) |
1807 | return -EFAULT; |
1808 | |
1809 | return kvm_arm_vcpu_finalize(vcpu, what); |
1810 | } |
1811 | default: |
1812 | r = -EINVAL; |
1813 | } |
1814 | |
1815 | return r; |
1816 | } |
1817 | |
1818 | void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) |
1819 | { |
1820 | |
1821 | } |
1822 | |
1823 | static int kvm_vm_ioctl_set_device_addr(struct kvm *kvm, |
1824 | struct kvm_arm_device_addr *dev_addr) |
1825 | { |
1826 | switch (FIELD_GET(KVM_ARM_DEVICE_ID_MASK, dev_addr->id)) { |
1827 | case KVM_ARM_DEVICE_VGIC_V2: |
1828 | if (!vgic_present) |
1829 | return -ENXIO; |
1830 | return kvm_set_legacy_vgic_v2_addr(kvm, dev_addr); |
1831 | default: |
1832 | return -ENODEV; |
1833 | } |
1834 | } |
1835 | |
1836 | static int kvm_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr) |
1837 | { |
1838 | switch (attr->group) { |
1839 | case KVM_ARM_VM_SMCCC_CTRL: |
1840 | return kvm_vm_smccc_has_attr(kvm, attr); |
1841 | default: |
1842 | return -ENXIO; |
1843 | } |
1844 | } |
1845 | |
1846 | static int kvm_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr) |
1847 | { |
1848 | switch (attr->group) { |
1849 | case KVM_ARM_VM_SMCCC_CTRL: |
1850 | return kvm_vm_smccc_set_attr(kvm, attr); |
1851 | default: |
1852 | return -ENXIO; |
1853 | } |
1854 | } |
1855 | |
1856 | int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) |
1857 | { |
1858 | struct kvm *kvm = filp->private_data; |
1859 | void __user *argp = (void __user *)arg; |
1860 | struct kvm_device_attr attr; |
1861 | |
1862 | switch (ioctl) { |
1863 | case KVM_CREATE_IRQCHIP: { |
1864 | int ret; |
1865 | if (!vgic_present) |
1866 | return -ENXIO; |
1867 | mutex_lock(&kvm->lock); |
1868 | ret = kvm_vgic_create(kvm, KVM_DEV_TYPE_ARM_VGIC_V2); |
1869 | mutex_unlock(lock: &kvm->lock); |
1870 | return ret; |
1871 | } |
1872 | case KVM_ARM_SET_DEVICE_ADDR: { |
1873 | struct kvm_arm_device_addr dev_addr; |
1874 | |
1875 | if (copy_from_user(to: &dev_addr, from: argp, n: sizeof(dev_addr))) |
1876 | return -EFAULT; |
1877 | return kvm_vm_ioctl_set_device_addr(kvm, dev_addr: &dev_addr); |
1878 | } |
1879 | case KVM_ARM_PREFERRED_TARGET: { |
1880 | struct kvm_vcpu_init init = { |
1881 | .target = KVM_ARM_TARGET_GENERIC_V8, |
1882 | }; |
1883 | |
1884 | if (copy_to_user(to: argp, from: &init, n: sizeof(init))) |
1885 | return -EFAULT; |
1886 | |
1887 | return 0; |
1888 | } |
1889 | case KVM_ARM_MTE_COPY_TAGS: { |
1890 | struct kvm_arm_copy_mte_tags copy_tags; |
1891 | |
1892 | if (copy_from_user(to: ©_tags, from: argp, n: sizeof(copy_tags))) |
1893 | return -EFAULT; |
1894 | return kvm_vm_ioctl_mte_copy_tags(kvm, ©_tags); |
1895 | } |
1896 | case KVM_ARM_SET_COUNTER_OFFSET: { |
1897 | struct kvm_arm_counter_offset offset; |
1898 | |
1899 | if (copy_from_user(to: &offset, from: argp, n: sizeof(offset))) |
1900 | return -EFAULT; |
1901 | return kvm_vm_ioctl_set_counter_offset(kvm, &offset); |
1902 | } |
1903 | case KVM_HAS_DEVICE_ATTR: { |
1904 | if (copy_from_user(to: &attr, from: argp, n: sizeof(attr))) |
1905 | return -EFAULT; |
1906 | |
1907 | return kvm_vm_has_attr(kvm, attr: &attr); |
1908 | } |
1909 | case KVM_SET_DEVICE_ATTR: { |
1910 | if (copy_from_user(to: &attr, from: argp, n: sizeof(attr))) |
1911 | return -EFAULT; |
1912 | |
1913 | return kvm_vm_set_attr(kvm, attr: &attr); |
1914 | } |
1915 | case KVM_ARM_GET_REG_WRITABLE_MASKS: { |
1916 | struct reg_mask_range range; |
1917 | |
1918 | if (copy_from_user(to: &range, from: argp, n: sizeof(range))) |
1919 | return -EFAULT; |
1920 | return kvm_vm_ioctl_get_reg_writable_masks(kvm, &range); |
1921 | } |
1922 | default: |
1923 | return -EINVAL; |
1924 | } |
1925 | } |
1926 | |
1927 | static unsigned long nvhe_percpu_size(void) |
1928 | { |
1929 | return (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_end) - |
1930 | (unsigned long)CHOOSE_NVHE_SYM(__per_cpu_start); |
1931 | } |
1932 | |
1933 | static unsigned long nvhe_percpu_order(void) |
1934 | { |
1935 | unsigned long size = nvhe_percpu_size(); |
1936 | |
1937 | return size ? get_order(size) : 0; |
1938 | } |
1939 | |
1940 | static size_t pkvm_host_sve_state_order(void) |
1941 | { |
1942 | return get_order(size: pkvm_host_sve_state_size()); |
1943 | } |
1944 | |
1945 | /* A lookup table holding the hypervisor VA for each vector slot */ |
1946 | static void *hyp_spectre_vector_selector[BP_HARDEN_EL2_SLOTS]; |
1947 | |
1948 | static void kvm_init_vector_slot(void *base, enum arm64_hyp_spectre_vector slot) |
1949 | { |
1950 | hyp_spectre_vector_selector[slot] = __kvm_vector_slot2addr(base, slot); |
1951 | } |
1952 | |
1953 | static int kvm_init_vector_slots(void) |
1954 | { |
1955 | int err; |
1956 | void *base; |
1957 | |
1958 | base = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector)); |
1959 | kvm_init_vector_slot(base, HYP_VECTOR_DIRECT); |
1960 | |
1961 | base = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs)); |
1962 | kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_DIRECT); |
1963 | |
1964 | if (kvm_system_needs_idmapped_vectors() && |
1965 | !is_protected_kvm_enabled()) { |
1966 | err = create_hyp_exec_mappings(__pa_symbol(__bp_harden_hyp_vecs), |
1967 | __BP_HARDEN_HYP_VECS_SZ, &base); |
1968 | if (err) |
1969 | return err; |
1970 | } |
1971 | |
1972 | kvm_init_vector_slot(base, HYP_VECTOR_INDIRECT); |
1973 | kvm_init_vector_slot(base, HYP_VECTOR_SPECTRE_INDIRECT); |
1974 | return 0; |
1975 | } |
1976 | |
1977 | static void __init cpu_prepare_hyp_mode(int cpu, u32 hyp_va_bits) |
1978 | { |
1979 | struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu); |
1980 | unsigned long tcr; |
1981 | |
1982 | /* |
1983 | * Calculate the raw per-cpu offset without a translation from the |
1984 | * kernel's mapping to the linear mapping, and store it in tpidr_el2 |
1985 | * so that we can use adr_l to access per-cpu variables in EL2. |
1986 | * Also drop the KASAN tag which gets in the way... |
1987 | */ |
1988 | params->tpidr_el2 = (unsigned long)kasan_reset_tag(addr: per_cpu_ptr_nvhe_sym(__per_cpu_start, cpu)) - |
1989 | (unsigned long)kvm_ksym_ref(CHOOSE_NVHE_SYM(__per_cpu_start)); |
1990 | |
1991 | params->mair_el2 = read_sysreg(mair_el1); |
1992 | |
1993 | tcr = read_sysreg(tcr_el1); |
1994 | if (cpus_have_final_cap(ARM64_KVM_HVHE)) { |
1995 | tcr &= ~(TCR_HD | TCR_HA | TCR_A1 | TCR_T0SZ_MASK); |
1996 | tcr |= TCR_EPD1_MASK; |
1997 | } else { |
1998 | unsigned long ips = FIELD_GET(TCR_IPS_MASK, tcr); |
1999 | |
2000 | tcr &= TCR_EL2_MASK; |
2001 | tcr |= TCR_EL2_RES1 | FIELD_PREP(TCR_EL2_PS_MASK, ips); |
2002 | if (lpa2_is_enabled()) |
2003 | tcr |= TCR_EL2_DS; |
2004 | } |
2005 | tcr |= TCR_T0SZ(hyp_va_bits); |
2006 | params->tcr_el2 = tcr; |
2007 | |
2008 | params->pgd_pa = kvm_mmu_get_httbr(); |
2009 | if (is_protected_kvm_enabled()) |
2010 | params->hcr_el2 = HCR_HOST_NVHE_PROTECTED_FLAGS; |
2011 | else |
2012 | params->hcr_el2 = HCR_HOST_NVHE_FLAGS; |
2013 | if (cpus_have_final_cap(ARM64_KVM_HVHE)) |
2014 | params->hcr_el2 |= HCR_E2H; |
2015 | params->vttbr = params->vtcr = 0; |
2016 | |
2017 | /* |
2018 | * Flush the init params from the data cache because the struct will |
2019 | * be read while the MMU is off. |
2020 | */ |
2021 | kvm_flush_dcache_to_poc(params, sizeof(*params)); |
2022 | } |
2023 | |
2024 | static void hyp_install_host_vector(void) |
2025 | { |
2026 | struct kvm_nvhe_init_params *params; |
2027 | struct arm_smccc_res res; |
2028 | |
2029 | /* Switch from the HYP stub to our own HYP init vector */ |
2030 | __hyp_set_vectors(kvm_get_idmap_vector()); |
2031 | |
2032 | /* |
2033 | * Call initialization code, and switch to the full blown HYP code. |
2034 | * If the cpucaps haven't been finalized yet, something has gone very |
2035 | * wrong, and hyp will crash and burn when it uses any |
2036 | * cpus_have_*_cap() wrapper. |
2037 | */ |
2038 | BUG_ON(!system_capabilities_finalized()); |
2039 | params = this_cpu_ptr_nvhe_sym(kvm_init_params); |
2040 | arm_smccc_1_1_hvc(KVM_HOST_SMCCC_FUNC(__kvm_hyp_init), virt_to_phys(params), &res); |
2041 | WARN_ON(res.a0 != SMCCC_RET_SUCCESS); |
2042 | } |
2043 | |
2044 | static void cpu_init_hyp_mode(void) |
2045 | { |
2046 | hyp_install_host_vector(); |
2047 | |
2048 | /* |
2049 | * Disabling SSBD on a non-VHE system requires us to enable SSBS |
2050 | * at EL2. |
2051 | */ |
2052 | if (this_cpu_has_cap(ARM64_SSBS) && |
2053 | arm64_get_spectre_v4_state() == SPECTRE_VULNERABLE) { |
2054 | kvm_call_hyp_nvhe(__kvm_enable_ssbs); |
2055 | } |
2056 | } |
2057 | |
2058 | static void cpu_hyp_reset(void) |
2059 | { |
2060 | if (!is_kernel_in_hyp_mode()) |
2061 | __hyp_reset_vectors(); |
2062 | } |
2063 | |
2064 | /* |
2065 | * EL2 vectors can be mapped and rerouted in a number of ways, |
2066 | * depending on the kernel configuration and CPU present: |
2067 | * |
2068 | * - If the CPU is affected by Spectre-v2, the hardening sequence is |
2069 | * placed in one of the vector slots, which is executed before jumping |
2070 | * to the real vectors. |
2071 | * |
2072 | * - If the CPU also has the ARM64_SPECTRE_V3A cap, the slot |
2073 | * containing the hardening sequence is mapped next to the idmap page, |
2074 | * and executed before jumping to the real vectors. |
2075 | * |
2076 | * - If the CPU only has the ARM64_SPECTRE_V3A cap, then an |
2077 | * empty slot is selected, mapped next to the idmap page, and |
2078 | * executed before jumping to the real vectors. |
2079 | * |
2080 | * Note that ARM64_SPECTRE_V3A is somewhat incompatible with |
2081 | * VHE, as we don't have hypervisor-specific mappings. If the system |
2082 | * is VHE and yet selects this capability, it will be ignored. |
2083 | */ |
2084 | static void cpu_set_hyp_vector(void) |
2085 | { |
2086 | struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data); |
2087 | void *vector = hyp_spectre_vector_selector[data->slot]; |
2088 | |
2089 | if (!is_protected_kvm_enabled()) |
2090 | *this_cpu_ptr_hyp_sym(kvm_hyp_vector) = (unsigned long)vector; |
2091 | else |
2092 | kvm_call_hyp_nvhe(__pkvm_cpu_set_vector, data->slot); |
2093 | } |
2094 | |
2095 | static void cpu_hyp_init_context(void) |
2096 | { |
2097 | kvm_init_host_cpu_context(host_data_ptr(host_ctxt)); |
2098 | kvm_init_host_debug_data(); |
2099 | |
2100 | if (!is_kernel_in_hyp_mode()) |
2101 | cpu_init_hyp_mode(); |
2102 | } |
2103 | |
2104 | static void cpu_hyp_init_features(void) |
2105 | { |
2106 | cpu_set_hyp_vector(); |
2107 | |
2108 | if (is_kernel_in_hyp_mode()) |
2109 | kvm_timer_init_vhe(); |
2110 | |
2111 | if (vgic_present) |
2112 | kvm_vgic_init_cpu_hardware(); |
2113 | } |
2114 | |
2115 | static void cpu_hyp_reinit(void) |
2116 | { |
2117 | cpu_hyp_reset(); |
2118 | cpu_hyp_init_context(); |
2119 | cpu_hyp_init_features(); |
2120 | } |
2121 | |
2122 | static void cpu_hyp_init(void *discard) |
2123 | { |
2124 | if (!__this_cpu_read(kvm_hyp_initialized)) { |
2125 | cpu_hyp_reinit(); |
2126 | __this_cpu_write(kvm_hyp_initialized, 1); |
2127 | } |
2128 | } |
2129 | |
2130 | static void cpu_hyp_uninit(void *discard) |
2131 | { |
2132 | if (__this_cpu_read(kvm_hyp_initialized)) { |
2133 | cpu_hyp_reset(); |
2134 | __this_cpu_write(kvm_hyp_initialized, 0); |
2135 | } |
2136 | } |
2137 | |
2138 | int kvm_arch_enable_virtualization_cpu(void) |
2139 | { |
2140 | /* |
2141 | * Most calls to this function are made with migration |
2142 | * disabled, but not with preemption disabled. The former is |
2143 | * enough to ensure correctness, but most of the helpers |
2144 | * expect the later and will throw a tantrum otherwise. |
2145 | */ |
2146 | preempt_disable(); |
2147 | |
2148 | cpu_hyp_init(NULL); |
2149 | |
2150 | kvm_vgic_cpu_up(); |
2151 | kvm_timer_cpu_up(); |
2152 | |
2153 | preempt_enable(); |
2154 | |
2155 | return 0; |
2156 | } |
2157 | |
2158 | void kvm_arch_disable_virtualization_cpu(void) |
2159 | { |
2160 | kvm_timer_cpu_down(); |
2161 | kvm_vgic_cpu_down(); |
2162 | |
2163 | if (!is_protected_kvm_enabled()) |
2164 | cpu_hyp_uninit(NULL); |
2165 | } |
2166 | |
2167 | #ifdef CONFIG_CPU_PM |
2168 | static int hyp_init_cpu_pm_notifier(struct notifier_block *self, |
2169 | unsigned long cmd, |
2170 | void *v) |
2171 | { |
2172 | /* |
2173 | * kvm_hyp_initialized is left with its old value over |
2174 | * PM_ENTER->PM_EXIT. It is used to indicate PM_EXIT should |
2175 | * re-enable hyp. |
2176 | */ |
2177 | switch (cmd) { |
2178 | case CPU_PM_ENTER: |
2179 | if (__this_cpu_read(kvm_hyp_initialized)) |
2180 | /* |
2181 | * don't update kvm_hyp_initialized here |
2182 | * so that the hyp will be re-enabled |
2183 | * when we resume. See below. |
2184 | */ |
2185 | cpu_hyp_reset(); |
2186 | |
2187 | return NOTIFY_OK; |
2188 | case CPU_PM_ENTER_FAILED: |
2189 | case CPU_PM_EXIT: |
2190 | if (__this_cpu_read(kvm_hyp_initialized)) |
2191 | /* The hyp was enabled before suspend. */ |
2192 | cpu_hyp_reinit(); |
2193 | |
2194 | return NOTIFY_OK; |
2195 | |
2196 | default: |
2197 | return NOTIFY_DONE; |
2198 | } |
2199 | } |
2200 | |
2201 | static struct notifier_block hyp_init_cpu_pm_nb = { |
2202 | .notifier_call = hyp_init_cpu_pm_notifier, |
2203 | }; |
2204 | |
2205 | static void __init hyp_cpu_pm_init(void) |
2206 | { |
2207 | if (!is_protected_kvm_enabled()) |
2208 | cpu_pm_register_notifier(&hyp_init_cpu_pm_nb); |
2209 | } |
2210 | static void __init hyp_cpu_pm_exit(void) |
2211 | { |
2212 | if (!is_protected_kvm_enabled()) |
2213 | cpu_pm_unregister_notifier(&hyp_init_cpu_pm_nb); |
2214 | } |
2215 | #else |
2216 | static inline void __init hyp_cpu_pm_init(void) |
2217 | { |
2218 | } |
2219 | static inline void __init hyp_cpu_pm_exit(void) |
2220 | { |
2221 | } |
2222 | #endif |
2223 | |
2224 | static void __init init_cpu_logical_map(void) |
2225 | { |
2226 | unsigned int cpu; |
2227 | |
2228 | /* |
2229 | * Copy the MPIDR <-> logical CPU ID mapping to hyp. |
2230 | * Only copy the set of online CPUs whose features have been checked |
2231 | * against the finalized system capabilities. The hypervisor will not |
2232 | * allow any other CPUs from the `possible` set to boot. |
2233 | */ |
2234 | for_each_online_cpu(cpu) |
2235 | hyp_cpu_logical_map[cpu] = cpu_logical_map(cpu); |
2236 | } |
2237 | |
2238 | #define init_psci_0_1_impl_state(config, what) \ |
2239 | config.psci_0_1_ ## what ## _implemented = psci_ops.what |
2240 | |
2241 | static bool __init init_psci_relay(void) |
2242 | { |
2243 | /* |
2244 | * If PSCI has not been initialized, protected KVM cannot install |
2245 | * itself on newly booted CPUs. |
2246 | */ |
2247 | if (!psci_ops.get_version) { |
2248 | kvm_err("Cannot initialize protected mode without PSCI\n"); |
2249 | return false; |
2250 | } |
2251 | |
2252 | kvm_host_psci_config.version = psci_ops.get_version(); |
2253 | kvm_host_psci_config.smccc_version = arm_smccc_get_version(); |
2254 | |
2255 | if (kvm_host_psci_config.version == PSCI_VERSION(0, 1)) { |
2256 | kvm_host_psci_config.function_ids_0_1 = get_psci_0_1_function_ids(); |
2257 | init_psci_0_1_impl_state(kvm_host_psci_config, cpu_suspend); |
2258 | init_psci_0_1_impl_state(kvm_host_psci_config, cpu_on); |
2259 | init_psci_0_1_impl_state(kvm_host_psci_config, cpu_off); |
2260 | init_psci_0_1_impl_state(kvm_host_psci_config, migrate); |
2261 | } |
2262 | return true; |
2263 | } |
2264 | |
2265 | static int __init init_subsystems(void) |
2266 | { |
2267 | int err = 0; |
2268 | |
2269 | /* |
2270 | * Enable hardware so that subsystem initialisation can access EL2. |
2271 | */ |
2272 | on_each_cpu(func: cpu_hyp_init, NULL, wait: 1); |
2273 | |
2274 | /* |
2275 | * Register CPU lower-power notifier |
2276 | */ |
2277 | hyp_cpu_pm_init(); |
2278 | |
2279 | /* |
2280 | * Init HYP view of VGIC |
2281 | */ |
2282 | err = kvm_vgic_hyp_init(); |
2283 | switch (err) { |
2284 | case 0: |
2285 | vgic_present = true; |
2286 | break; |
2287 | case -ENODEV: |
2288 | case -ENXIO: |
2289 | /* |
2290 | * No VGIC? No pKVM for you. |
2291 | * |
2292 | * Protected mode assumes that VGICv3 is present, so no point |
2293 | * in trying to hobble along if vgic initialization fails. |
2294 | */ |
2295 | if (is_protected_kvm_enabled()) |
2296 | goto out; |
2297 | |
2298 | /* |
2299 | * Otherwise, userspace could choose to implement a GIC for its |
2300 | * guest on non-cooperative hardware. |
2301 | */ |
2302 | vgic_present = false; |
2303 | err = 0; |
2304 | break; |
2305 | default: |
2306 | goto out; |
2307 | } |
2308 | |
2309 | if (kvm_mode == KVM_MODE_NV && |
2310 | !(vgic_present && kvm_vgic_global_state.type == VGIC_V3)) { |
2311 | kvm_err("NV support requires GICv3, giving up\n"); |
2312 | err = -EINVAL; |
2313 | goto out; |
2314 | } |
2315 | |
2316 | /* |
2317 | * Init HYP architected timer support |
2318 | */ |
2319 | err = kvm_timer_hyp_init(has_gic: vgic_present); |
2320 | if (err) |
2321 | goto out; |
2322 | |
2323 | kvm_register_perf_callbacks(NULL); |
2324 | |
2325 | out: |
2326 | if (err) |
2327 | hyp_cpu_pm_exit(); |
2328 | |
2329 | if (err || !is_protected_kvm_enabled()) |
2330 | on_each_cpu(func: cpu_hyp_uninit, NULL, wait: 1); |
2331 | |
2332 | return err; |
2333 | } |
2334 | |
2335 | static void __init teardown_subsystems(void) |
2336 | { |
2337 | kvm_unregister_perf_callbacks(); |
2338 | hyp_cpu_pm_exit(); |
2339 | } |
2340 | |
2341 | static void __init teardown_hyp_mode(void) |
2342 | { |
2343 | bool free_sve = system_supports_sve() && is_protected_kvm_enabled(); |
2344 | int cpu; |
2345 | |
2346 | free_hyp_pgds(); |
2347 | for_each_possible_cpu(cpu) { |
2348 | free_pages(per_cpu(kvm_arm_hyp_stack_base, cpu), NVHE_STACK_SHIFT - PAGE_SHIFT); |
2349 | free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order()); |
2350 | |
2351 | if (free_sve) { |
2352 | struct cpu_sve_state *sve_state; |
2353 | |
2354 | sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state; |
2355 | free_pages(addr: (unsigned long) sve_state, order: pkvm_host_sve_state_order()); |
2356 | } |
2357 | } |
2358 | } |
2359 | |
2360 | static int __init do_pkvm_init(u32 hyp_va_bits) |
2361 | { |
2362 | void *per_cpu_base = kvm_ksym_ref(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)); |
2363 | int ret; |
2364 | |
2365 | preempt_disable(); |
2366 | cpu_hyp_init_context(); |
2367 | ret = kvm_call_hyp_nvhe(__pkvm_init, hyp_mem_base, hyp_mem_size, |
2368 | num_possible_cpus(), kern_hyp_va(per_cpu_base), |
2369 | hyp_va_bits); |
2370 | cpu_hyp_init_features(); |
2371 | |
2372 | /* |
2373 | * The stub hypercalls are now disabled, so set our local flag to |
2374 | * prevent a later re-init attempt in kvm_arch_enable_virtualization_cpu(). |
2375 | */ |
2376 | __this_cpu_write(kvm_hyp_initialized, 1); |
2377 | preempt_enable(); |
2378 | |
2379 | return ret; |
2380 | } |
2381 | |
2382 | static u64 get_hyp_id_aa64pfr0_el1(void) |
2383 | { |
2384 | /* |
2385 | * Track whether the system isn't affected by spectre/meltdown in the |
2386 | * hypervisor's view of id_aa64pfr0_el1, used for protected VMs. |
2387 | * Although this is per-CPU, we make it global for simplicity, e.g., not |
2388 | * to have to worry about vcpu migration. |
2389 | * |
2390 | * Unlike for non-protected VMs, userspace cannot override this for |
2391 | * protected VMs. |
2392 | */ |
2393 | u64 val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1); |
2394 | |
2395 | val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) | |
2396 | ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3)); |
2397 | |
2398 | val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), |
2399 | arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED); |
2400 | val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), |
2401 | arm64_get_meltdown_state() == SPECTRE_UNAFFECTED); |
2402 | |
2403 | return val; |
2404 | } |
2405 | |
2406 | static void kvm_hyp_init_symbols(void) |
2407 | { |
2408 | kvm_nvhe_sym(id_aa64pfr0_el1_sys_val) = get_hyp_id_aa64pfr0_el1(); |
2409 | kvm_nvhe_sym(id_aa64pfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1); |
2410 | kvm_nvhe_sym(id_aa64isar0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR0_EL1); |
2411 | kvm_nvhe_sym(id_aa64isar1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR1_EL1); |
2412 | kvm_nvhe_sym(id_aa64isar2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64ISAR2_EL1); |
2413 | kvm_nvhe_sym(id_aa64mmfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); |
2414 | kvm_nvhe_sym(id_aa64mmfr1_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); |
2415 | kvm_nvhe_sym(id_aa64mmfr2_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1); |
2416 | kvm_nvhe_sym(id_aa64smfr0_el1_sys_val) = read_sanitised_ftr_reg(SYS_ID_AA64SMFR0_EL1); |
2417 | kvm_nvhe_sym(__icache_flags) = __icache_flags; |
2418 | kvm_nvhe_sym(kvm_arm_vmid_bits) = kvm_arm_vmid_bits; |
2419 | |
2420 | /* Propagate the FGT state to the the nVHE side */ |
2421 | kvm_nvhe_sym(hfgrtr_masks) = hfgrtr_masks; |
2422 | kvm_nvhe_sym(hfgwtr_masks) = hfgwtr_masks; |
2423 | kvm_nvhe_sym(hfgitr_masks) = hfgitr_masks; |
2424 | kvm_nvhe_sym(hdfgrtr_masks) = hdfgrtr_masks; |
2425 | kvm_nvhe_sym(hdfgwtr_masks) = hdfgwtr_masks; |
2426 | kvm_nvhe_sym(hafgrtr_masks) = hafgrtr_masks; |
2427 | kvm_nvhe_sym(hfgrtr2_masks) = hfgrtr2_masks; |
2428 | kvm_nvhe_sym(hfgwtr2_masks) = hfgwtr2_masks; |
2429 | kvm_nvhe_sym(hfgitr2_masks) = hfgitr2_masks; |
2430 | kvm_nvhe_sym(hdfgrtr2_masks)= hdfgrtr2_masks; |
2431 | kvm_nvhe_sym(hdfgwtr2_masks)= hdfgwtr2_masks; |
2432 | |
2433 | /* |
2434 | * Flush entire BSS since part of its data containing init symbols is read |
2435 | * while the MMU is off. |
2436 | */ |
2437 | kvm_flush_dcache_to_poc(kvm_ksym_ref(__hyp_bss_start), |
2438 | kvm_ksym_ref(__hyp_bss_end) - kvm_ksym_ref(__hyp_bss_start)); |
2439 | } |
2440 | |
2441 | static int __init kvm_hyp_init_protection(u32 hyp_va_bits) |
2442 | { |
2443 | void *addr = phys_to_virt(hyp_mem_base); |
2444 | int ret; |
2445 | |
2446 | ret = create_hyp_mappings(addr, addr + hyp_mem_size, PAGE_HYP); |
2447 | if (ret) |
2448 | return ret; |
2449 | |
2450 | ret = do_pkvm_init(hyp_va_bits); |
2451 | if (ret) |
2452 | return ret; |
2453 | |
2454 | free_hyp_pgds(); |
2455 | |
2456 | return 0; |
2457 | } |
2458 | |
2459 | static int init_pkvm_host_sve_state(void) |
2460 | { |
2461 | int cpu; |
2462 | |
2463 | if (!system_supports_sve()) |
2464 | return 0; |
2465 | |
2466 | /* Allocate pages for host sve state in protected mode. */ |
2467 | for_each_possible_cpu(cpu) { |
2468 | struct page *page = alloc_pages(GFP_KERNEL, pkvm_host_sve_state_order()); |
2469 | |
2470 | if (!page) |
2471 | return -ENOMEM; |
2472 | |
2473 | per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state = page_address(page); |
2474 | } |
2475 | |
2476 | /* |
2477 | * Don't map the pages in hyp since these are only used in protected |
2478 | * mode, which will (re)create its own mapping when initialized. |
2479 | */ |
2480 | |
2481 | return 0; |
2482 | } |
2483 | |
2484 | /* |
2485 | * Finalizes the initialization of hyp mode, once everything else is initialized |
2486 | * and the initialziation process cannot fail. |
2487 | */ |
2488 | static void finalize_init_hyp_mode(void) |
2489 | { |
2490 | int cpu; |
2491 | |
2492 | if (system_supports_sve() && is_protected_kvm_enabled()) { |
2493 | for_each_possible_cpu(cpu) { |
2494 | struct cpu_sve_state *sve_state; |
2495 | |
2496 | sve_state = per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state; |
2497 | per_cpu_ptr_nvhe_sym(kvm_host_data, cpu)->sve_state = |
2498 | kern_hyp_va(sve_state); |
2499 | } |
2500 | } |
2501 | } |
2502 | |
2503 | static void pkvm_hyp_init_ptrauth(void) |
2504 | { |
2505 | struct kvm_cpu_context *hyp_ctxt; |
2506 | int cpu; |
2507 | |
2508 | for_each_possible_cpu(cpu) { |
2509 | hyp_ctxt = per_cpu_ptr_nvhe_sym(kvm_hyp_ctxt, cpu); |
2510 | hyp_ctxt->sys_regs[APIAKEYLO_EL1] = get_random_long(); |
2511 | hyp_ctxt->sys_regs[APIAKEYHI_EL1] = get_random_long(); |
2512 | hyp_ctxt->sys_regs[APIBKEYLO_EL1] = get_random_long(); |
2513 | hyp_ctxt->sys_regs[APIBKEYHI_EL1] = get_random_long(); |
2514 | hyp_ctxt->sys_regs[APDAKEYLO_EL1] = get_random_long(); |
2515 | hyp_ctxt->sys_regs[APDAKEYHI_EL1] = get_random_long(); |
2516 | hyp_ctxt->sys_regs[APDBKEYLO_EL1] = get_random_long(); |
2517 | hyp_ctxt->sys_regs[APDBKEYHI_EL1] = get_random_long(); |
2518 | hyp_ctxt->sys_regs[APGAKEYLO_EL1] = get_random_long(); |
2519 | hyp_ctxt->sys_regs[APGAKEYHI_EL1] = get_random_long(); |
2520 | } |
2521 | } |
2522 | |
2523 | /* Inits Hyp-mode on all online CPUs */ |
2524 | static int __init init_hyp_mode(void) |
2525 | { |
2526 | u32 hyp_va_bits; |
2527 | int cpu; |
2528 | int err = -ENOMEM; |
2529 | |
2530 | /* |
2531 | * The protected Hyp-mode cannot be initialized if the memory pool |
2532 | * allocation has failed. |
2533 | */ |
2534 | if (is_protected_kvm_enabled() && !hyp_mem_base) |
2535 | goto out_err; |
2536 | |
2537 | /* |
2538 | * Allocate Hyp PGD and setup Hyp identity mapping |
2539 | */ |
2540 | err = kvm_mmu_init(&hyp_va_bits); |
2541 | if (err) |
2542 | goto out_err; |
2543 | |
2544 | /* |
2545 | * Allocate stack pages for Hypervisor-mode |
2546 | */ |
2547 | for_each_possible_cpu(cpu) { |
2548 | unsigned long stack_base; |
2549 | |
2550 | stack_base = __get_free_pages(GFP_KERNEL, NVHE_STACK_SHIFT - PAGE_SHIFT); |
2551 | if (!stack_base) { |
2552 | err = -ENOMEM; |
2553 | goto out_err; |
2554 | } |
2555 | |
2556 | per_cpu(kvm_arm_hyp_stack_base, cpu) = stack_base; |
2557 | } |
2558 | |
2559 | /* |
2560 | * Allocate and initialize pages for Hypervisor-mode percpu regions. |
2561 | */ |
2562 | for_each_possible_cpu(cpu) { |
2563 | struct page *page; |
2564 | void *page_addr; |
2565 | |
2566 | page = alloc_pages(GFP_KERNEL, nvhe_percpu_order()); |
2567 | if (!page) { |
2568 | err = -ENOMEM; |
2569 | goto out_err; |
2570 | } |
2571 | |
2572 | page_addr = page_address(page); |
2573 | memcpy(page_addr, CHOOSE_NVHE_SYM(__per_cpu_start), nvhe_percpu_size()); |
2574 | kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu] = (unsigned long)page_addr; |
2575 | } |
2576 | |
2577 | /* |
2578 | * Map the Hyp-code called directly from the host |
2579 | */ |
2580 | err = create_hyp_mappings(kvm_ksym_ref(__hyp_text_start), |
2581 | kvm_ksym_ref(__hyp_text_end), PAGE_HYP_EXEC); |
2582 | if (err) { |
2583 | kvm_err("Cannot map world-switch code\n"); |
2584 | goto out_err; |
2585 | } |
2586 | |
2587 | err = create_hyp_mappings(kvm_ksym_ref(__hyp_data_start), |
2588 | kvm_ksym_ref(__hyp_data_end), PAGE_HYP); |
2589 | if (err) { |
2590 | kvm_err("Cannot map .hyp.data section\n"); |
2591 | goto out_err; |
2592 | } |
2593 | |
2594 | err = create_hyp_mappings(kvm_ksym_ref(__hyp_rodata_start), |
2595 | kvm_ksym_ref(__hyp_rodata_end), PAGE_HYP_RO); |
2596 | if (err) { |
2597 | kvm_err("Cannot map .hyp.rodata section\n"); |
2598 | goto out_err; |
2599 | } |
2600 | |
2601 | err = create_hyp_mappings(kvm_ksym_ref(__start_rodata), |
2602 | kvm_ksym_ref(__end_rodata), PAGE_HYP_RO); |
2603 | if (err) { |
2604 | kvm_err("Cannot map rodata section\n"); |
2605 | goto out_err; |
2606 | } |
2607 | |
2608 | /* |
2609 | * .hyp.bss is guaranteed to be placed at the beginning of the .bss |
2610 | * section thanks to an assertion in the linker script. Map it RW and |
2611 | * the rest of .bss RO. |
2612 | */ |
2613 | err = create_hyp_mappings(kvm_ksym_ref(__hyp_bss_start), |
2614 | kvm_ksym_ref(__hyp_bss_end), PAGE_HYP); |
2615 | if (err) { |
2616 | kvm_err("Cannot map hyp bss section: %d\n", err); |
2617 | goto out_err; |
2618 | } |
2619 | |
2620 | err = create_hyp_mappings(kvm_ksym_ref(__hyp_bss_end), |
2621 | kvm_ksym_ref(__bss_stop), PAGE_HYP_RO); |
2622 | if (err) { |
2623 | kvm_err("Cannot map bss section\n"); |
2624 | goto out_err; |
2625 | } |
2626 | |
2627 | /* |
2628 | * Map the Hyp stack pages |
2629 | */ |
2630 | for_each_possible_cpu(cpu) { |
2631 | struct kvm_nvhe_init_params *params = per_cpu_ptr_nvhe_sym(kvm_init_params, cpu); |
2632 | char *stack_base = (char *)per_cpu(kvm_arm_hyp_stack_base, cpu); |
2633 | |
2634 | err = create_hyp_stack(__pa(stack_base), ¶ms->stack_hyp_va); |
2635 | if (err) { |
2636 | kvm_err("Cannot map hyp stack\n"); |
2637 | goto out_err; |
2638 | } |
2639 | |
2640 | /* |
2641 | * Save the stack PA in nvhe_init_params. This will be needed |
2642 | * to recreate the stack mapping in protected nVHE mode. |
2643 | * __hyp_pa() won't do the right thing there, since the stack |
2644 | * has been mapped in the flexible private VA space. |
2645 | */ |
2646 | params->stack_pa = __pa(stack_base); |
2647 | } |
2648 | |
2649 | for_each_possible_cpu(cpu) { |
2650 | char *percpu_begin = (char *)kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu]; |
2651 | char *percpu_end = percpu_begin + nvhe_percpu_size(); |
2652 | |
2653 | /* Map Hyp percpu pages */ |
2654 | err = create_hyp_mappings(percpu_begin, percpu_end, PAGE_HYP); |
2655 | if (err) { |
2656 | kvm_err("Cannot map hyp percpu region\n"); |
2657 | goto out_err; |
2658 | } |
2659 | |
2660 | /* Prepare the CPU initialization parameters */ |
2661 | cpu_prepare_hyp_mode(cpu, hyp_va_bits); |
2662 | } |
2663 | |
2664 | kvm_hyp_init_symbols(); |
2665 | |
2666 | if (is_protected_kvm_enabled()) { |
2667 | if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) && |
2668 | cpus_have_final_cap(ARM64_HAS_ADDRESS_AUTH)) |
2669 | pkvm_hyp_init_ptrauth(); |
2670 | |
2671 | init_cpu_logical_map(); |
2672 | |
2673 | if (!init_psci_relay()) { |
2674 | err = -ENODEV; |
2675 | goto out_err; |
2676 | } |
2677 | |
2678 | err = init_pkvm_host_sve_state(); |
2679 | if (err) |
2680 | goto out_err; |
2681 | |
2682 | err = kvm_hyp_init_protection(hyp_va_bits); |
2683 | if (err) { |
2684 | kvm_err("Failed to init hyp memory protection\n"); |
2685 | goto out_err; |
2686 | } |
2687 | } |
2688 | |
2689 | return 0; |
2690 | |
2691 | out_err: |
2692 | teardown_hyp_mode(); |
2693 | kvm_err("error initializing Hyp mode: %d\n", err); |
2694 | return err; |
2695 | } |
2696 | |
2697 | struct kvm_vcpu *kvm_mpidr_to_vcpu(struct kvm *kvm, unsigned long mpidr) |
2698 | { |
2699 | struct kvm_vcpu *vcpu = NULL; |
2700 | struct kvm_mpidr_data *data; |
2701 | unsigned long i; |
2702 | |
2703 | mpidr &= MPIDR_HWID_BITMASK; |
2704 | |
2705 | rcu_read_lock(); |
2706 | data = rcu_dereference(kvm->arch.mpidr_data); |
2707 | |
2708 | if (data) { |
2709 | u16 idx = kvm_mpidr_index(data, mpidr); |
2710 | |
2711 | vcpu = kvm_get_vcpu(kvm, i: data->cmpidr_to_idx[idx]); |
2712 | if (mpidr != kvm_vcpu_get_mpidr_aff(vcpu)) |
2713 | vcpu = NULL; |
2714 | } |
2715 | |
2716 | rcu_read_unlock(); |
2717 | |
2718 | if (vcpu) |
2719 | return vcpu; |
2720 | |
2721 | kvm_for_each_vcpu(i, vcpu, kvm) { |
2722 | if (mpidr == kvm_vcpu_get_mpidr_aff(vcpu)) |
2723 | return vcpu; |
2724 | } |
2725 | return NULL; |
2726 | } |
2727 | |
2728 | bool kvm_arch_irqchip_in_kernel(struct kvm *kvm) |
2729 | { |
2730 | return irqchip_in_kernel(kvm); |
2731 | } |
2732 | |
2733 | int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, |
2734 | struct irq_bypass_producer *prod) |
2735 | { |
2736 | struct kvm_kernel_irqfd *irqfd = |
2737 | container_of(cons, struct kvm_kernel_irqfd, consumer); |
2738 | struct kvm_kernel_irq_routing_entry *irq_entry = &irqfd->irq_entry; |
2739 | |
2740 | /* |
2741 | * The only thing we have a chance of directly-injecting is LPIs. Maybe |
2742 | * one day... |
2743 | */ |
2744 | if (irq_entry->type != KVM_IRQ_ROUTING_MSI) |
2745 | return 0; |
2746 | |
2747 | return kvm_vgic_v4_set_forwarding(irqfd->kvm, prod->irq, |
2748 | &irqfd->irq_entry); |
2749 | } |
2750 | |
2751 | void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, |
2752 | struct irq_bypass_producer *prod) |
2753 | { |
2754 | struct kvm_kernel_irqfd *irqfd = |
2755 | container_of(cons, struct kvm_kernel_irqfd, consumer); |
2756 | struct kvm_kernel_irq_routing_entry *irq_entry = &irqfd->irq_entry; |
2757 | |
2758 | if (irq_entry->type != KVM_IRQ_ROUTING_MSI) |
2759 | return; |
2760 | |
2761 | kvm_vgic_v4_unset_forwarding(irqfd->kvm, prod->irq); |
2762 | } |
2763 | |
2764 | bool kvm_arch_irqfd_route_changed(struct kvm_kernel_irq_routing_entry *old, |
2765 | struct kvm_kernel_irq_routing_entry *new) |
2766 | { |
2767 | if (new->type != KVM_IRQ_ROUTING_MSI) |
2768 | return true; |
2769 | |
2770 | return memcmp(p: &old->msi, q: &new->msi, size: sizeof(new->msi)); |
2771 | } |
2772 | |
2773 | int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, |
2774 | uint32_t guest_irq, bool set) |
2775 | { |
2776 | /* |
2777 | * Remapping the vLPI requires taking the its_lock mutex to resolve |
2778 | * the new translation. We're in spinlock land at this point, so no |
2779 | * chance of resolving the translation. |
2780 | * |
2781 | * Unmap the vLPI and fall back to software LPI injection. |
2782 | */ |
2783 | return kvm_vgic_v4_unset_forwarding(kvm, host_irq); |
2784 | } |
2785 | |
2786 | void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *cons) |
2787 | { |
2788 | struct kvm_kernel_irqfd *irqfd = |
2789 | container_of(cons, struct kvm_kernel_irqfd, consumer); |
2790 | |
2791 | kvm_arm_halt_guest(kvm: irqfd->kvm); |
2792 | } |
2793 | |
2794 | void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *cons) |
2795 | { |
2796 | struct kvm_kernel_irqfd *irqfd = |
2797 | container_of(cons, struct kvm_kernel_irqfd, consumer); |
2798 | |
2799 | kvm_arm_resume_guest(kvm: irqfd->kvm); |
2800 | } |
2801 | |
2802 | /* Initialize Hyp-mode and memory mappings on all CPUs */ |
2803 | static __init int kvm_arm_init(void) |
2804 | { |
2805 | int err; |
2806 | bool in_hyp_mode; |
2807 | |
2808 | if (!is_hyp_mode_available()) { |
2809 | kvm_info("HYP mode not available\n"); |
2810 | return -ENODEV; |
2811 | } |
2812 | |
2813 | if (kvm_get_mode() == KVM_MODE_NONE) { |
2814 | kvm_info("KVM disabled from command line\n"); |
2815 | return -ENODEV; |
2816 | } |
2817 | |
2818 | err = kvm_sys_reg_table_init(); |
2819 | if (err) { |
2820 | kvm_info("Error initializing system register tables"); |
2821 | return err; |
2822 | } |
2823 | |
2824 | in_hyp_mode = is_kernel_in_hyp_mode(); |
2825 | |
2826 | if (cpus_have_final_cap(ARM64_WORKAROUND_DEVICE_LOAD_ACQUIRE) || |
2827 | cpus_have_final_cap(ARM64_WORKAROUND_1508412)) |
2828 | kvm_info("Guests without required CPU erratum workarounds can deadlock system!\n"\ |
2829 | "Only trusted guests should be used on this system.\n"); |
2830 | |
2831 | err = kvm_set_ipa_limit(); |
2832 | if (err) |
2833 | return err; |
2834 | |
2835 | err = kvm_arm_init_sve(); |
2836 | if (err) |
2837 | return err; |
2838 | |
2839 | err = kvm_arm_vmid_alloc_init(); |
2840 | if (err) { |
2841 | kvm_err("Failed to initialize VMID allocator.\n"); |
2842 | return err; |
2843 | } |
2844 | |
2845 | if (!in_hyp_mode) { |
2846 | err = init_hyp_mode(); |
2847 | if (err) |
2848 | goto out_err; |
2849 | } |
2850 | |
2851 | err = kvm_init_vector_slots(); |
2852 | if (err) { |
2853 | kvm_err("Cannot initialise vector slots\n"); |
2854 | goto out_hyp; |
2855 | } |
2856 | |
2857 | err = init_subsystems(); |
2858 | if (err) |
2859 | goto out_hyp; |
2860 | |
2861 | kvm_info("%s%sVHE%s mode initialized successfully\n", |
2862 | in_hyp_mode ? "": (is_protected_kvm_enabled() ? |
2863 | "Protected ": "Hyp "), |
2864 | in_hyp_mode ? "": (cpus_have_final_cap(ARM64_KVM_HVHE) ? |
2865 | "h": "n"), |
2866 | cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) ? "+NV2": ""); |
2867 | |
2868 | /* |
2869 | * FIXME: Do something reasonable if kvm_init() fails after pKVM |
2870 | * hypervisor protection is finalized. |
2871 | */ |
2872 | err = kvm_init(vcpu_size: sizeof(struct kvm_vcpu), vcpu_align: 0, THIS_MODULE); |
2873 | if (err) |
2874 | goto out_subs; |
2875 | |
2876 | /* |
2877 | * This should be called after initialization is done and failure isn't |
2878 | * possible anymore. |
2879 | */ |
2880 | if (!in_hyp_mode) |
2881 | finalize_init_hyp_mode(); |
2882 | |
2883 | kvm_arm_initialised = true; |
2884 | |
2885 | return 0; |
2886 | |
2887 | out_subs: |
2888 | teardown_subsystems(); |
2889 | out_hyp: |
2890 | if (!in_hyp_mode) |
2891 | teardown_hyp_mode(); |
2892 | out_err: |
2893 | kvm_arm_vmid_alloc_free(); |
2894 | return err; |
2895 | } |
2896 | |
2897 | static int __init early_kvm_mode_cfg(char *arg) |
2898 | { |
2899 | if (!arg) |
2900 | return -EINVAL; |
2901 | |
2902 | if (strcmp(arg, "none") == 0) { |
2903 | kvm_mode = KVM_MODE_NONE; |
2904 | return 0; |
2905 | } |
2906 | |
2907 | if (!is_hyp_mode_available()) { |
2908 | pr_warn_once("KVM is not available. Ignoring kvm-arm.mode\n"); |
2909 | return 0; |
2910 | } |
2911 | |
2912 | if (strcmp(arg, "protected") == 0) { |
2913 | if (!is_kernel_in_hyp_mode()) |
2914 | kvm_mode = KVM_MODE_PROTECTED; |
2915 | else |
2916 | pr_warn_once("Protected KVM not available with VHE\n"); |
2917 | |
2918 | return 0; |
2919 | } |
2920 | |
2921 | if (strcmp(arg, "nvhe") == 0 && !WARN_ON(is_kernel_in_hyp_mode())) { |
2922 | kvm_mode = KVM_MODE_DEFAULT; |
2923 | return 0; |
2924 | } |
2925 | |
2926 | if (strcmp(arg, "nested") == 0 && !WARN_ON(!is_kernel_in_hyp_mode())) { |
2927 | kvm_mode = KVM_MODE_NV; |
2928 | return 0; |
2929 | } |
2930 | |
2931 | return -EINVAL; |
2932 | } |
2933 | early_param("kvm-arm.mode", early_kvm_mode_cfg); |
2934 | |
2935 | static int __init early_kvm_wfx_trap_policy_cfg(char *arg, enum kvm_wfx_trap_policy *p) |
2936 | { |
2937 | if (!arg) |
2938 | return -EINVAL; |
2939 | |
2940 | if (strcmp(arg, "trap") == 0) { |
2941 | *p = KVM_WFX_TRAP; |
2942 | return 0; |
2943 | } |
2944 | |
2945 | if (strcmp(arg, "notrap") == 0) { |
2946 | *p = KVM_WFX_NOTRAP; |
2947 | return 0; |
2948 | } |
2949 | |
2950 | return -EINVAL; |
2951 | } |
2952 | |
2953 | static int __init early_kvm_wfi_trap_policy_cfg(char *arg) |
2954 | { |
2955 | return early_kvm_wfx_trap_policy_cfg(arg, p: &kvm_wfi_trap_policy); |
2956 | } |
2957 | early_param("kvm-arm.wfi_trap_policy", early_kvm_wfi_trap_policy_cfg); |
2958 | |
2959 | static int __init early_kvm_wfe_trap_policy_cfg(char *arg) |
2960 | { |
2961 | return early_kvm_wfx_trap_policy_cfg(arg, p: &kvm_wfe_trap_policy); |
2962 | } |
2963 | early_param("kvm-arm.wfe_trap_policy", early_kvm_wfe_trap_policy_cfg); |
2964 | |
2965 | enum kvm_mode kvm_get_mode(void) |
2966 | { |
2967 | return kvm_mode; |
2968 | } |
2969 | |
2970 | module_init(kvm_arm_init); |
2971 |
Definitions
- kvm_mode
- kvm_wfx_trap_policy
- kvm_wfi_trap_policy
- kvm_wfe_trap_policy
- kvm_arm_hyp_stack_base
- vgic_present
- kvm_arm_initialised
- kvm_hyp_initialized
- is_kvm_arm_initialised
- kvm_arch_vcpu_should_kick
- kvm_vm_ioctl_enable_cap
- kvm_arm_default_max_vcpus
- kvm_arch_init_vm
- kvm_arch_vcpu_fault
- kvm_arch_create_vm_debugfs
- kvm_destroy_mpidr_data
- kvm_arch_destroy_vm
- kvm_has_full_ptr_auth
- kvm_vm_ioctl_check_extension
- kvm_arch_dev_ioctl
- kvm_arch_alloc_vm
- kvm_arch_vcpu_precreate
- kvm_arch_vcpu_create
- kvm_arch_vcpu_postcreate
- kvm_arch_vcpu_destroy
- kvm_arch_vcpu_blocking
- kvm_arch_vcpu_unblocking
- vcpu_set_pauth_traps
- kvm_vcpu_should_clear_twi
- kvm_vcpu_should_clear_twe
- kvm_arch_vcpu_load
- kvm_arch_vcpu_put
- __kvm_arm_vcpu_power_off
- kvm_arm_vcpu_power_off
- kvm_arm_vcpu_stopped
- kvm_arm_vcpu_suspend
- kvm_arm_vcpu_suspended
- kvm_arch_vcpu_ioctl_get_mpstate
- kvm_arch_vcpu_ioctl_set_mpstate
- kvm_arch_vcpu_runnable
- kvm_arch_vcpu_in_kernel
- kvm_arch_vcpu_get_ip
- kvm_init_mpidr_data
- kvm_arch_vcpu_run_pid_change
- kvm_arch_intc_initialized
- kvm_arm_halt_guest
- kvm_arm_resume_guest
- kvm_vcpu_sleep
- kvm_vcpu_wfi
- kvm_vcpu_suspend
- check_vcpu_requests
- vcpu_mode_is_bad_32bit
- kvm_vcpu_exit_request
- kvm_arm_vcpu_enter_exit
- kvm_arch_vcpu_ioctl_run
- vcpu_interrupt_line
- kvm_vm_ioctl_irq_line
- system_supported_vcpu_features
- kvm_vcpu_init_check_features
- kvm_vcpu_init_changed
- kvm_setup_vcpu
- __kvm_vcpu_set_target
- kvm_vcpu_set_target
- kvm_arch_vcpu_ioctl_vcpu_init
- kvm_arm_vcpu_set_attr
- kvm_arm_vcpu_get_attr
- kvm_arm_vcpu_has_attr
- kvm_arm_vcpu_get_events
- kvm_arm_vcpu_set_events
- kvm_arch_vcpu_ioctl
- kvm_arch_sync_dirty_log
- kvm_vm_ioctl_set_device_addr
- kvm_vm_has_attr
- kvm_vm_set_attr
- kvm_arch_vm_ioctl
- nvhe_percpu_size
- nvhe_percpu_order
- pkvm_host_sve_state_order
- hyp_spectre_vector_selector
- kvm_init_vector_slot
- kvm_init_vector_slots
- cpu_prepare_hyp_mode
- hyp_install_host_vector
- cpu_init_hyp_mode
- cpu_hyp_reset
- cpu_set_hyp_vector
- cpu_hyp_init_context
- cpu_hyp_init_features
- cpu_hyp_reinit
- cpu_hyp_init
- cpu_hyp_uninit
- kvm_arch_enable_virtualization_cpu
- kvm_arch_disable_virtualization_cpu
- hyp_cpu_pm_init
- hyp_cpu_pm_exit
- init_cpu_logical_map
- init_psci_relay
- init_subsystems
- teardown_subsystems
- teardown_hyp_mode
- do_pkvm_init
- get_hyp_id_aa64pfr0_el1
- kvm_hyp_init_symbols
- kvm_hyp_init_protection
- init_pkvm_host_sve_state
- finalize_init_hyp_mode
- pkvm_hyp_init_ptrauth
- init_hyp_mode
- kvm_mpidr_to_vcpu
- kvm_arch_irqchip_in_kernel
- kvm_arch_irq_bypass_add_producer
- kvm_arch_irq_bypass_del_producer
- kvm_arch_irqfd_route_changed
- kvm_arch_update_irqfd_routing
- kvm_arch_irq_bypass_stop
- kvm_arch_irq_bypass_start
- kvm_arm_init
- early_kvm_mode_cfg
- early_kvm_wfx_trap_policy_cfg
- early_kvm_wfi_trap_policy_cfg
- early_kvm_wfe_trap_policy_cfg
Improve your Profiling and Debugging skills
Find out more