1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University |
4 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> |
5 | */ |
6 | |
7 | #include <linux/mman.h> |
8 | #include <linux/kvm_host.h> |
9 | #include <linux/io.h> |
10 | #include <linux/hugetlb.h> |
11 | #include <linux/sched/signal.h> |
12 | #include <trace/events/kvm.h> |
13 | #include <asm/pgalloc.h> |
14 | #include <asm/cacheflush.h> |
15 | #include <asm/kvm_arm.h> |
16 | #include <asm/kvm_mmu.h> |
17 | #include <asm/kvm_pgtable.h> |
18 | #include <asm/kvm_ras.h> |
19 | #include <asm/kvm_asm.h> |
20 | #include <asm/kvm_emulate.h> |
21 | #include <asm/virt.h> |
22 | |
23 | #include "trace.h" |
24 | |
25 | static struct kvm_pgtable *hyp_pgtable; |
26 | static DEFINE_MUTEX(kvm_hyp_pgd_mutex); |
27 | |
28 | static unsigned long __ro_after_init hyp_idmap_start; |
29 | static unsigned long __ro_after_init hyp_idmap_end; |
30 | static phys_addr_t __ro_after_init hyp_idmap_vector; |
31 | |
32 | static unsigned long __ro_after_init io_map_base; |
33 | |
34 | static phys_addr_t __stage2_range_addr_end(phys_addr_t addr, phys_addr_t end, |
35 | phys_addr_t size) |
36 | { |
37 | phys_addr_t boundary = ALIGN_DOWN(addr + size, size); |
38 | |
39 | return (boundary - 1 < end - 1) ? boundary : end; |
40 | } |
41 | |
42 | static phys_addr_t stage2_range_addr_end(phys_addr_t addr, phys_addr_t end) |
43 | { |
44 | phys_addr_t size = kvm_granule_size(KVM_PGTABLE_MIN_BLOCK_LEVEL); |
45 | |
46 | return __stage2_range_addr_end(addr, end, size); |
47 | } |
48 | |
49 | /* |
50 | * Release kvm_mmu_lock periodically if the memory region is large. Otherwise, |
51 | * we may see kernel panics with CONFIG_DETECT_HUNG_TASK, |
52 | * CONFIG_LOCKUP_DETECTOR, CONFIG_LOCKDEP. Additionally, holding the lock too |
53 | * long will also starve other vCPUs. We have to also make sure that the page |
54 | * tables are not freed while we released the lock. |
55 | */ |
56 | static int stage2_apply_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, |
57 | phys_addr_t end, |
58 | int (*fn)(struct kvm_pgtable *, u64, u64), |
59 | bool resched) |
60 | { |
61 | struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); |
62 | int ret; |
63 | u64 next; |
64 | |
65 | do { |
66 | struct kvm_pgtable *pgt = mmu->pgt; |
67 | if (!pgt) |
68 | return -EINVAL; |
69 | |
70 | next = stage2_range_addr_end(addr, end); |
71 | ret = fn(pgt, addr, next - addr); |
72 | if (ret) |
73 | break; |
74 | |
75 | if (resched && next != end) |
76 | cond_resched_rwlock_write(&kvm->mmu_lock); |
77 | } while (addr = next, addr != end); |
78 | |
79 | return ret; |
80 | } |
81 | |
82 | #define stage2_apply_range_resched(mmu, addr, end, fn) \ |
83 | stage2_apply_range(mmu, addr, end, fn, true) |
84 | |
85 | /* |
86 | * Get the maximum number of page-tables pages needed to split a range |
87 | * of blocks into PAGE_SIZE PTEs. It assumes the range is already |
88 | * mapped at level 2, or at level 1 if allowed. |
89 | */ |
90 | static int kvm_mmu_split_nr_page_tables(u64 range) |
91 | { |
92 | int n = 0; |
93 | |
94 | if (KVM_PGTABLE_MIN_BLOCK_LEVEL < 2) |
95 | n += DIV_ROUND_UP(range, PUD_SIZE); |
96 | n += DIV_ROUND_UP(range, PMD_SIZE); |
97 | return n; |
98 | } |
99 | |
100 | static bool need_split_memcache_topup_or_resched(struct kvm *kvm) |
101 | { |
102 | struct kvm_mmu_memory_cache *cache; |
103 | u64 chunk_size, min; |
104 | |
105 | if (need_resched() || rwlock_needbreak(lock: &kvm->mmu_lock)) |
106 | return true; |
107 | |
108 | chunk_size = kvm->arch.mmu.split_page_chunk_size; |
109 | min = kvm_mmu_split_nr_page_tables(range: chunk_size); |
110 | cache = &kvm->arch.mmu.split_page_cache; |
111 | return kvm_mmu_memory_cache_nr_free_objects(mc: cache) < min; |
112 | } |
113 | |
114 | static int kvm_mmu_split_huge_pages(struct kvm *kvm, phys_addr_t addr, |
115 | phys_addr_t end) |
116 | { |
117 | struct kvm_mmu_memory_cache *cache; |
118 | struct kvm_pgtable *pgt; |
119 | int ret, cache_capacity; |
120 | u64 next, chunk_size; |
121 | |
122 | lockdep_assert_held_write(&kvm->mmu_lock); |
123 | |
124 | chunk_size = kvm->arch.mmu.split_page_chunk_size; |
125 | cache_capacity = kvm_mmu_split_nr_page_tables(range: chunk_size); |
126 | |
127 | if (chunk_size == 0) |
128 | return 0; |
129 | |
130 | cache = &kvm->arch.mmu.split_page_cache; |
131 | |
132 | do { |
133 | if (need_split_memcache_topup_or_resched(kvm)) { |
134 | write_unlock(&kvm->mmu_lock); |
135 | cond_resched(); |
136 | /* Eager page splitting is best-effort. */ |
137 | ret = __kvm_mmu_topup_memory_cache(mc: cache, |
138 | capacity: cache_capacity, |
139 | min: cache_capacity); |
140 | write_lock(&kvm->mmu_lock); |
141 | if (ret) |
142 | break; |
143 | } |
144 | |
145 | pgt = kvm->arch.mmu.pgt; |
146 | if (!pgt) |
147 | return -EINVAL; |
148 | |
149 | next = __stage2_range_addr_end(addr, end, size: chunk_size); |
150 | ret = kvm_pgtable_stage2_split(pgt, addr, next - addr, cache); |
151 | if (ret) |
152 | break; |
153 | } while (addr = next, addr != end); |
154 | |
155 | return ret; |
156 | } |
157 | |
158 | static bool memslot_is_logging(struct kvm_memory_slot *memslot) |
159 | { |
160 | return memslot->dirty_bitmap && !(memslot->flags & KVM_MEM_READONLY); |
161 | } |
162 | |
163 | /** |
164 | * kvm_arch_flush_remote_tlbs() - flush all VM TLB entries for v7/8 |
165 | * @kvm: pointer to kvm structure. |
166 | * |
167 | * Interface to HYP function to flush all VM TLB entries |
168 | */ |
169 | int kvm_arch_flush_remote_tlbs(struct kvm *kvm) |
170 | { |
171 | kvm_call_hyp(__kvm_tlb_flush_vmid, &kvm->arch.mmu); |
172 | return 0; |
173 | } |
174 | |
175 | int kvm_arch_flush_remote_tlbs_range(struct kvm *kvm, |
176 | gfn_t gfn, u64 nr_pages) |
177 | { |
178 | kvm_tlb_flush_vmid_range(&kvm->arch.mmu, |
179 | gfn << PAGE_SHIFT, nr_pages << PAGE_SHIFT); |
180 | return 0; |
181 | } |
182 | |
183 | static bool kvm_is_device_pfn(unsigned long pfn) |
184 | { |
185 | return !pfn_is_map_memory(pfn); |
186 | } |
187 | |
188 | static void *stage2_memcache_zalloc_page(void *arg) |
189 | { |
190 | struct kvm_mmu_memory_cache *mc = arg; |
191 | void *virt; |
192 | |
193 | /* Allocated with __GFP_ZERO, so no need to zero */ |
194 | virt = kvm_mmu_memory_cache_alloc(mc); |
195 | if (virt) |
196 | kvm_account_pgtable_pages(virt, nr: 1); |
197 | return virt; |
198 | } |
199 | |
200 | static void *kvm_host_zalloc_pages_exact(size_t size) |
201 | { |
202 | return alloc_pages_exact(size, GFP_KERNEL_ACCOUNT | __GFP_ZERO); |
203 | } |
204 | |
205 | static void *kvm_s2_zalloc_pages_exact(size_t size) |
206 | { |
207 | void *virt = kvm_host_zalloc_pages_exact(size); |
208 | |
209 | if (virt) |
210 | kvm_account_pgtable_pages(virt, nr: (size >> PAGE_SHIFT)); |
211 | return virt; |
212 | } |
213 | |
214 | static void kvm_s2_free_pages_exact(void *virt, size_t size) |
215 | { |
216 | kvm_account_pgtable_pages(virt, nr: -(size >> PAGE_SHIFT)); |
217 | free_pages_exact(virt, size); |
218 | } |
219 | |
220 | static struct kvm_pgtable_mm_ops kvm_s2_mm_ops; |
221 | |
222 | static void stage2_free_unlinked_table_rcu_cb(struct rcu_head *head) |
223 | { |
224 | struct page *page = container_of(head, struct page, rcu_head); |
225 | void *pgtable = page_to_virt(page); |
226 | s8 level = page_private(page); |
227 | |
228 | kvm_pgtable_stage2_free_unlinked(&kvm_s2_mm_ops, pgtable, level); |
229 | } |
230 | |
231 | static void stage2_free_unlinked_table(void *addr, s8 level) |
232 | { |
233 | struct page *page = virt_to_page(addr); |
234 | |
235 | set_page_private(page, private: (unsigned long)level); |
236 | call_rcu(head: &page->rcu_head, func: stage2_free_unlinked_table_rcu_cb); |
237 | } |
238 | |
239 | static void kvm_host_get_page(void *addr) |
240 | { |
241 | get_page(virt_to_page(addr)); |
242 | } |
243 | |
244 | static void kvm_host_put_page(void *addr) |
245 | { |
246 | put_page(virt_to_page(addr)); |
247 | } |
248 | |
249 | static void kvm_s2_put_page(void *addr) |
250 | { |
251 | struct page *p = virt_to_page(addr); |
252 | /* Dropping last refcount, the page will be freed */ |
253 | if (page_count(page: p) == 1) |
254 | kvm_account_pgtable_pages(virt: addr, nr: -1); |
255 | put_page(page: p); |
256 | } |
257 | |
258 | static int kvm_host_page_count(void *addr) |
259 | { |
260 | return page_count(virt_to_page(addr)); |
261 | } |
262 | |
263 | static phys_addr_t kvm_host_pa(void *addr) |
264 | { |
265 | return __pa(addr); |
266 | } |
267 | |
268 | static void *kvm_host_va(phys_addr_t phys) |
269 | { |
270 | return __va(phys); |
271 | } |
272 | |
273 | static void clean_dcache_guest_page(void *va, size_t size) |
274 | { |
275 | __clean_dcache_guest_page(va, size); |
276 | } |
277 | |
278 | static void invalidate_icache_guest_page(void *va, size_t size) |
279 | { |
280 | __invalidate_icache_guest_page(va, size); |
281 | } |
282 | |
283 | /* |
284 | * Unmapping vs dcache management: |
285 | * |
286 | * If a guest maps certain memory pages as uncached, all writes will |
287 | * bypass the data cache and go directly to RAM. However, the CPUs |
288 | * can still speculate reads (not writes) and fill cache lines with |
289 | * data. |
290 | * |
291 | * Those cache lines will be *clean* cache lines though, so a |
292 | * clean+invalidate operation is equivalent to an invalidate |
293 | * operation, because no cache lines are marked dirty. |
294 | * |
295 | * Those clean cache lines could be filled prior to an uncached write |
296 | * by the guest, and the cache coherent IO subsystem would therefore |
297 | * end up writing old data to disk. |
298 | * |
299 | * This is why right after unmapping a page/section and invalidating |
300 | * the corresponding TLBs, we flush to make sure the IO subsystem will |
301 | * never hit in the cache. |
302 | * |
303 | * This is all avoided on systems that have ARM64_HAS_STAGE2_FWB, as |
304 | * we then fully enforce cacheability of RAM, no matter what the guest |
305 | * does. |
306 | */ |
307 | /** |
308 | * __unmap_stage2_range -- Clear stage2 page table entries to unmap a range |
309 | * @mmu: The KVM stage-2 MMU pointer |
310 | * @start: The intermediate physical base address of the range to unmap |
311 | * @size: The size of the area to unmap |
312 | * @may_block: Whether or not we are permitted to block |
313 | * |
314 | * Clear a range of stage-2 mappings, lowering the various ref-counts. Must |
315 | * be called while holding mmu_lock (unless for freeing the stage2 pgd before |
316 | * destroying the VM), otherwise another faulting VCPU may come in and mess |
317 | * with things behind our backs. |
318 | */ |
319 | static void __unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size, |
320 | bool may_block) |
321 | { |
322 | struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); |
323 | phys_addr_t end = start + size; |
324 | |
325 | lockdep_assert_held_write(&kvm->mmu_lock); |
326 | WARN_ON(size & ~PAGE_MASK); |
327 | WARN_ON(stage2_apply_range(mmu, start, end, kvm_pgtable_stage2_unmap, |
328 | may_block)); |
329 | } |
330 | |
331 | static void unmap_stage2_range(struct kvm_s2_mmu *mmu, phys_addr_t start, u64 size) |
332 | { |
333 | __unmap_stage2_range(mmu, start, size, may_block: true); |
334 | } |
335 | |
336 | static void stage2_flush_memslot(struct kvm *kvm, |
337 | struct kvm_memory_slot *memslot) |
338 | { |
339 | phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; |
340 | phys_addr_t end = addr + PAGE_SIZE * memslot->npages; |
341 | |
342 | stage2_apply_range_resched(&kvm->arch.mmu, addr, end, kvm_pgtable_stage2_flush); |
343 | } |
344 | |
345 | /** |
346 | * stage2_flush_vm - Invalidate cache for pages mapped in stage 2 |
347 | * @kvm: The struct kvm pointer |
348 | * |
349 | * Go through the stage 2 page tables and invalidate any cache lines |
350 | * backing memory already mapped to the VM. |
351 | */ |
352 | static void stage2_flush_vm(struct kvm *kvm) |
353 | { |
354 | struct kvm_memslots *slots; |
355 | struct kvm_memory_slot *memslot; |
356 | int idx, bkt; |
357 | |
358 | idx = srcu_read_lock(ssp: &kvm->srcu); |
359 | write_lock(&kvm->mmu_lock); |
360 | |
361 | slots = kvm_memslots(kvm); |
362 | kvm_for_each_memslot(memslot, bkt, slots) |
363 | stage2_flush_memslot(kvm, memslot); |
364 | |
365 | write_unlock(&kvm->mmu_lock); |
366 | srcu_read_unlock(ssp: &kvm->srcu, idx); |
367 | } |
368 | |
369 | /** |
370 | * free_hyp_pgds - free Hyp-mode page tables |
371 | */ |
372 | void __init free_hyp_pgds(void) |
373 | { |
374 | mutex_lock(&kvm_hyp_pgd_mutex); |
375 | if (hyp_pgtable) { |
376 | kvm_pgtable_hyp_destroy(hyp_pgtable); |
377 | kfree(objp: hyp_pgtable); |
378 | hyp_pgtable = NULL; |
379 | } |
380 | mutex_unlock(lock: &kvm_hyp_pgd_mutex); |
381 | } |
382 | |
383 | static bool kvm_host_owns_hyp_mappings(void) |
384 | { |
385 | if (is_kernel_in_hyp_mode()) |
386 | return false; |
387 | |
388 | if (static_branch_likely(&kvm_protected_mode_initialized)) |
389 | return false; |
390 | |
391 | /* |
392 | * This can happen at boot time when __create_hyp_mappings() is called |
393 | * after the hyp protection has been enabled, but the static key has |
394 | * not been flipped yet. |
395 | */ |
396 | if (!hyp_pgtable && is_protected_kvm_enabled()) |
397 | return false; |
398 | |
399 | WARN_ON(!hyp_pgtable); |
400 | |
401 | return true; |
402 | } |
403 | |
404 | int __create_hyp_mappings(unsigned long start, unsigned long size, |
405 | unsigned long phys, enum kvm_pgtable_prot prot) |
406 | { |
407 | int err; |
408 | |
409 | if (WARN_ON(!kvm_host_owns_hyp_mappings())) |
410 | return -EINVAL; |
411 | |
412 | mutex_lock(&kvm_hyp_pgd_mutex); |
413 | err = kvm_pgtable_hyp_map(hyp_pgtable, start, size, phys, prot); |
414 | mutex_unlock(lock: &kvm_hyp_pgd_mutex); |
415 | |
416 | return err; |
417 | } |
418 | |
419 | static phys_addr_t kvm_kaddr_to_phys(void *kaddr) |
420 | { |
421 | if (!is_vmalloc_addr(x: kaddr)) { |
422 | BUG_ON(!virt_addr_valid(kaddr)); |
423 | return __pa(kaddr); |
424 | } else { |
425 | return page_to_phys(vmalloc_to_page(kaddr)) + |
426 | offset_in_page(kaddr); |
427 | } |
428 | } |
429 | |
430 | struct hyp_shared_pfn { |
431 | u64 pfn; |
432 | int count; |
433 | struct rb_node node; |
434 | }; |
435 | |
436 | static DEFINE_MUTEX(hyp_shared_pfns_lock); |
437 | static struct rb_root hyp_shared_pfns = RB_ROOT; |
438 | |
439 | static struct hyp_shared_pfn *find_shared_pfn(u64 pfn, struct rb_node ***node, |
440 | struct rb_node **parent) |
441 | { |
442 | struct hyp_shared_pfn *this; |
443 | |
444 | *node = &hyp_shared_pfns.rb_node; |
445 | *parent = NULL; |
446 | while (**node) { |
447 | this = container_of(**node, struct hyp_shared_pfn, node); |
448 | *parent = **node; |
449 | if (this->pfn < pfn) |
450 | *node = &((**node)->rb_left); |
451 | else if (this->pfn > pfn) |
452 | *node = &((**node)->rb_right); |
453 | else |
454 | return this; |
455 | } |
456 | |
457 | return NULL; |
458 | } |
459 | |
460 | static int share_pfn_hyp(u64 pfn) |
461 | { |
462 | struct rb_node **node, *parent; |
463 | struct hyp_shared_pfn *this; |
464 | int ret = 0; |
465 | |
466 | mutex_lock(&hyp_shared_pfns_lock); |
467 | this = find_shared_pfn(pfn, node: &node, parent: &parent); |
468 | if (this) { |
469 | this->count++; |
470 | goto unlock; |
471 | } |
472 | |
473 | this = kzalloc(size: sizeof(*this), GFP_KERNEL); |
474 | if (!this) { |
475 | ret = -ENOMEM; |
476 | goto unlock; |
477 | } |
478 | |
479 | this->pfn = pfn; |
480 | this->count = 1; |
481 | rb_link_node(node: &this->node, parent, rb_link: node); |
482 | rb_insert_color(&this->node, &hyp_shared_pfns); |
483 | ret = kvm_call_hyp_nvhe(__pkvm_host_share_hyp, pfn, 1); |
484 | unlock: |
485 | mutex_unlock(lock: &hyp_shared_pfns_lock); |
486 | |
487 | return ret; |
488 | } |
489 | |
490 | static int unshare_pfn_hyp(u64 pfn) |
491 | { |
492 | struct rb_node **node, *parent; |
493 | struct hyp_shared_pfn *this; |
494 | int ret = 0; |
495 | |
496 | mutex_lock(&hyp_shared_pfns_lock); |
497 | this = find_shared_pfn(pfn, node: &node, parent: &parent); |
498 | if (WARN_ON(!this)) { |
499 | ret = -ENOENT; |
500 | goto unlock; |
501 | } |
502 | |
503 | this->count--; |
504 | if (this->count) |
505 | goto unlock; |
506 | |
507 | rb_erase(&this->node, &hyp_shared_pfns); |
508 | kfree(objp: this); |
509 | ret = kvm_call_hyp_nvhe(__pkvm_host_unshare_hyp, pfn, 1); |
510 | unlock: |
511 | mutex_unlock(lock: &hyp_shared_pfns_lock); |
512 | |
513 | return ret; |
514 | } |
515 | |
516 | int kvm_share_hyp(void *from, void *to) |
517 | { |
518 | phys_addr_t start, end, cur; |
519 | u64 pfn; |
520 | int ret; |
521 | |
522 | if (is_kernel_in_hyp_mode()) |
523 | return 0; |
524 | |
525 | /* |
526 | * The share hcall maps things in the 'fixed-offset' region of the hyp |
527 | * VA space, so we can only share physically contiguous data-structures |
528 | * for now. |
529 | */ |
530 | if (is_vmalloc_or_module_addr(x: from) || is_vmalloc_or_module_addr(x: to)) |
531 | return -EINVAL; |
532 | |
533 | if (kvm_host_owns_hyp_mappings()) |
534 | return create_hyp_mappings(from, to, PAGE_HYP); |
535 | |
536 | start = ALIGN_DOWN(__pa(from), PAGE_SIZE); |
537 | end = PAGE_ALIGN(__pa(to)); |
538 | for (cur = start; cur < end; cur += PAGE_SIZE) { |
539 | pfn = __phys_to_pfn(cur); |
540 | ret = share_pfn_hyp(pfn); |
541 | if (ret) |
542 | return ret; |
543 | } |
544 | |
545 | return 0; |
546 | } |
547 | |
548 | void kvm_unshare_hyp(void *from, void *to) |
549 | { |
550 | phys_addr_t start, end, cur; |
551 | u64 pfn; |
552 | |
553 | if (is_kernel_in_hyp_mode() || kvm_host_owns_hyp_mappings() || !from) |
554 | return; |
555 | |
556 | start = ALIGN_DOWN(__pa(from), PAGE_SIZE); |
557 | end = PAGE_ALIGN(__pa(to)); |
558 | for (cur = start; cur < end; cur += PAGE_SIZE) { |
559 | pfn = __phys_to_pfn(cur); |
560 | WARN_ON(unshare_pfn_hyp(pfn)); |
561 | } |
562 | } |
563 | |
564 | /** |
565 | * create_hyp_mappings - duplicate a kernel virtual address range in Hyp mode |
566 | * @from: The virtual kernel start address of the range |
567 | * @to: The virtual kernel end address of the range (exclusive) |
568 | * @prot: The protection to be applied to this range |
569 | * |
570 | * The same virtual address as the kernel virtual address is also used |
571 | * in Hyp-mode mapping (modulo HYP_PAGE_OFFSET) to the same underlying |
572 | * physical pages. |
573 | */ |
574 | int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot) |
575 | { |
576 | phys_addr_t phys_addr; |
577 | unsigned long virt_addr; |
578 | unsigned long start = kern_hyp_va((unsigned long)from); |
579 | unsigned long end = kern_hyp_va((unsigned long)to); |
580 | |
581 | if (is_kernel_in_hyp_mode()) |
582 | return 0; |
583 | |
584 | if (!kvm_host_owns_hyp_mappings()) |
585 | return -EPERM; |
586 | |
587 | start = start & PAGE_MASK; |
588 | end = PAGE_ALIGN(end); |
589 | |
590 | for (virt_addr = start; virt_addr < end; virt_addr += PAGE_SIZE) { |
591 | int err; |
592 | |
593 | phys_addr = kvm_kaddr_to_phys(kaddr: from + virt_addr - start); |
594 | err = __create_hyp_mappings(start: virt_addr, PAGE_SIZE, phys: phys_addr, |
595 | prot: prot); |
596 | if (err) |
597 | return err; |
598 | } |
599 | |
600 | return 0; |
601 | } |
602 | |
603 | static int __hyp_alloc_private_va_range(unsigned long base) |
604 | { |
605 | lockdep_assert_held(&kvm_hyp_pgd_mutex); |
606 | |
607 | if (!PAGE_ALIGNED(base)) |
608 | return -EINVAL; |
609 | |
610 | /* |
611 | * Verify that BIT(VA_BITS - 1) hasn't been flipped by |
612 | * allocating the new area, as it would indicate we've |
613 | * overflowed the idmap/IO address range. |
614 | */ |
615 | if ((base ^ io_map_base) & BIT(VA_BITS - 1)) |
616 | return -ENOMEM; |
617 | |
618 | io_map_base = base; |
619 | |
620 | return 0; |
621 | } |
622 | |
623 | /** |
624 | * hyp_alloc_private_va_range - Allocates a private VA range. |
625 | * @size: The size of the VA range to reserve. |
626 | * @haddr: The hypervisor virtual start address of the allocation. |
627 | * |
628 | * The private virtual address (VA) range is allocated below io_map_base |
629 | * and aligned based on the order of @size. |
630 | * |
631 | * Return: 0 on success or negative error code on failure. |
632 | */ |
633 | int hyp_alloc_private_va_range(size_t size, unsigned long *haddr) |
634 | { |
635 | unsigned long base; |
636 | int ret = 0; |
637 | |
638 | mutex_lock(&kvm_hyp_pgd_mutex); |
639 | |
640 | /* |
641 | * This assumes that we have enough space below the idmap |
642 | * page to allocate our VAs. If not, the check in |
643 | * __hyp_alloc_private_va_range() will kick. A potential |
644 | * alternative would be to detect that overflow and switch |
645 | * to an allocation above the idmap. |
646 | * |
647 | * The allocated size is always a multiple of PAGE_SIZE. |
648 | */ |
649 | size = PAGE_ALIGN(size); |
650 | base = io_map_base - size; |
651 | ret = __hyp_alloc_private_va_range(base); |
652 | |
653 | mutex_unlock(lock: &kvm_hyp_pgd_mutex); |
654 | |
655 | if (!ret) |
656 | *haddr = base; |
657 | |
658 | return ret; |
659 | } |
660 | |
661 | static int __create_hyp_private_mapping(phys_addr_t phys_addr, size_t size, |
662 | unsigned long *haddr, |
663 | enum kvm_pgtable_prot prot) |
664 | { |
665 | unsigned long addr; |
666 | int ret = 0; |
667 | |
668 | if (!kvm_host_owns_hyp_mappings()) { |
669 | addr = kvm_call_hyp_nvhe(__pkvm_create_private_mapping, |
670 | phys_addr, size, prot); |
671 | if (IS_ERR_VALUE(addr)) |
672 | return addr; |
673 | *haddr = addr; |
674 | |
675 | return 0; |
676 | } |
677 | |
678 | size = PAGE_ALIGN(size + offset_in_page(phys_addr)); |
679 | ret = hyp_alloc_private_va_range(size, haddr: &addr); |
680 | if (ret) |
681 | return ret; |
682 | |
683 | ret = __create_hyp_mappings(start: addr, size, phys: phys_addr, prot: prot); |
684 | if (ret) |
685 | return ret; |
686 | |
687 | *haddr = addr + offset_in_page(phys_addr); |
688 | return ret; |
689 | } |
690 | |
691 | int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr) |
692 | { |
693 | unsigned long base; |
694 | size_t size; |
695 | int ret; |
696 | |
697 | mutex_lock(&kvm_hyp_pgd_mutex); |
698 | /* |
699 | * Efficient stack verification using the PAGE_SHIFT bit implies |
700 | * an alignment of our allocation on the order of the size. |
701 | */ |
702 | size = PAGE_SIZE * 2; |
703 | base = ALIGN_DOWN(io_map_base - size, size); |
704 | |
705 | ret = __hyp_alloc_private_va_range(base); |
706 | |
707 | mutex_unlock(lock: &kvm_hyp_pgd_mutex); |
708 | |
709 | if (ret) { |
710 | kvm_err("Cannot allocate hyp stack guard page\n" ); |
711 | return ret; |
712 | } |
713 | |
714 | /* |
715 | * Since the stack grows downwards, map the stack to the page |
716 | * at the higher address and leave the lower guard page |
717 | * unbacked. |
718 | * |
719 | * Any valid stack address now has the PAGE_SHIFT bit as 1 |
720 | * and addresses corresponding to the guard page have the |
721 | * PAGE_SHIFT bit as 0 - this is used for overflow detection. |
722 | */ |
723 | ret = __create_hyp_mappings(start: base + PAGE_SIZE, PAGE_SIZE, phys: phys_addr, |
724 | prot: PAGE_HYP); |
725 | if (ret) |
726 | kvm_err("Cannot map hyp stack\n" ); |
727 | |
728 | *haddr = base + size; |
729 | |
730 | return ret; |
731 | } |
732 | |
733 | /** |
734 | * create_hyp_io_mappings - Map IO into both kernel and HYP |
735 | * @phys_addr: The physical start address which gets mapped |
736 | * @size: Size of the region being mapped |
737 | * @kaddr: Kernel VA for this mapping |
738 | * @haddr: HYP VA for this mapping |
739 | */ |
740 | int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size, |
741 | void __iomem **kaddr, |
742 | void __iomem **haddr) |
743 | { |
744 | unsigned long addr; |
745 | int ret; |
746 | |
747 | if (is_protected_kvm_enabled()) |
748 | return -EPERM; |
749 | |
750 | *kaddr = ioremap(offset: phys_addr, size); |
751 | if (!*kaddr) |
752 | return -ENOMEM; |
753 | |
754 | if (is_kernel_in_hyp_mode()) { |
755 | *haddr = *kaddr; |
756 | return 0; |
757 | } |
758 | |
759 | ret = __create_hyp_private_mapping(phys_addr, size, |
760 | haddr: &addr, prot: PAGE_HYP_DEVICE); |
761 | if (ret) { |
762 | iounmap(addr: *kaddr); |
763 | *kaddr = NULL; |
764 | *haddr = NULL; |
765 | return ret; |
766 | } |
767 | |
768 | *haddr = (void __iomem *)addr; |
769 | return 0; |
770 | } |
771 | |
772 | /** |
773 | * create_hyp_exec_mappings - Map an executable range into HYP |
774 | * @phys_addr: The physical start address which gets mapped |
775 | * @size: Size of the region being mapped |
776 | * @haddr: HYP VA for this mapping |
777 | */ |
778 | int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size, |
779 | void **haddr) |
780 | { |
781 | unsigned long addr; |
782 | int ret; |
783 | |
784 | BUG_ON(is_kernel_in_hyp_mode()); |
785 | |
786 | ret = __create_hyp_private_mapping(phys_addr, size, |
787 | haddr: &addr, prot: PAGE_HYP_EXEC); |
788 | if (ret) { |
789 | *haddr = NULL; |
790 | return ret; |
791 | } |
792 | |
793 | *haddr = (void *)addr; |
794 | return 0; |
795 | } |
796 | |
797 | static struct kvm_pgtable_mm_ops kvm_user_mm_ops = { |
798 | /* We shouldn't need any other callback to walk the PT */ |
799 | .phys_to_virt = kvm_host_va, |
800 | }; |
801 | |
802 | static int get_user_mapping_size(struct kvm *kvm, u64 addr) |
803 | { |
804 | struct kvm_pgtable pgt = { |
805 | .pgd = (kvm_pteref_t)kvm->mm->pgd, |
806 | .ia_bits = vabits_actual, |
807 | .start_level = (KVM_PGTABLE_LAST_LEVEL - |
808 | ARM64_HW_PGTABLE_LEVELS(pgt.ia_bits) + 1), |
809 | .mm_ops = &kvm_user_mm_ops, |
810 | }; |
811 | unsigned long flags; |
812 | kvm_pte_t pte = 0; /* Keep GCC quiet... */ |
813 | s8 level = S8_MAX; |
814 | int ret; |
815 | |
816 | /* |
817 | * Disable IRQs so that we hazard against a concurrent |
818 | * teardown of the userspace page tables (which relies on |
819 | * IPI-ing threads). |
820 | */ |
821 | local_irq_save(flags); |
822 | ret = kvm_pgtable_get_leaf(&pgt, addr, &pte, &level); |
823 | local_irq_restore(flags); |
824 | |
825 | if (ret) |
826 | return ret; |
827 | |
828 | /* |
829 | * Not seeing an error, but not updating level? Something went |
830 | * deeply wrong... |
831 | */ |
832 | if (WARN_ON(level > KVM_PGTABLE_LAST_LEVEL)) |
833 | return -EFAULT; |
834 | if (WARN_ON(level < KVM_PGTABLE_FIRST_LEVEL)) |
835 | return -EFAULT; |
836 | |
837 | /* Oops, the userspace PTs are gone... Replay the fault */ |
838 | if (!kvm_pte_valid(pte)) |
839 | return -EAGAIN; |
840 | |
841 | return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(level)); |
842 | } |
843 | |
844 | static struct kvm_pgtable_mm_ops kvm_s2_mm_ops = { |
845 | .zalloc_page = stage2_memcache_zalloc_page, |
846 | .zalloc_pages_exact = kvm_s2_zalloc_pages_exact, |
847 | .free_pages_exact = kvm_s2_free_pages_exact, |
848 | .free_unlinked_table = stage2_free_unlinked_table, |
849 | .get_page = kvm_host_get_page, |
850 | .put_page = kvm_s2_put_page, |
851 | .page_count = kvm_host_page_count, |
852 | .phys_to_virt = kvm_host_va, |
853 | .virt_to_phys = kvm_host_pa, |
854 | .dcache_clean_inval_poc = clean_dcache_guest_page, |
855 | .icache_inval_pou = invalidate_icache_guest_page, |
856 | }; |
857 | |
858 | /** |
859 | * kvm_init_stage2_mmu - Initialise a S2 MMU structure |
860 | * @kvm: The pointer to the KVM structure |
861 | * @mmu: The pointer to the s2 MMU structure |
862 | * @type: The machine type of the virtual machine |
863 | * |
864 | * Allocates only the stage-2 HW PGD level table(s). |
865 | * Note we don't need locking here as this is only called when the VM is |
866 | * created, which can only be done once. |
867 | */ |
868 | int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type) |
869 | { |
870 | u32 kvm_ipa_limit = get_kvm_ipa_limit(); |
871 | int cpu, err; |
872 | struct kvm_pgtable *pgt; |
873 | u64 mmfr0, mmfr1; |
874 | u32 phys_shift; |
875 | |
876 | if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK) |
877 | return -EINVAL; |
878 | |
879 | phys_shift = KVM_VM_TYPE_ARM_IPA_SIZE(type); |
880 | if (is_protected_kvm_enabled()) { |
881 | phys_shift = kvm_ipa_limit; |
882 | } else if (phys_shift) { |
883 | if (phys_shift > kvm_ipa_limit || |
884 | phys_shift < ARM64_MIN_PARANGE_BITS) |
885 | return -EINVAL; |
886 | } else { |
887 | phys_shift = KVM_PHYS_SHIFT; |
888 | if (phys_shift > kvm_ipa_limit) { |
889 | pr_warn_once("%s using unsupported default IPA limit, upgrade your VMM\n" , |
890 | current->comm); |
891 | return -EINVAL; |
892 | } |
893 | } |
894 | |
895 | mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1); |
896 | mmfr1 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1); |
897 | mmu->vtcr = kvm_get_vtcr(mmfr0, mmfr1, phys_shift); |
898 | |
899 | if (mmu->pgt != NULL) { |
900 | kvm_err("kvm_arch already initialized?\n" ); |
901 | return -EINVAL; |
902 | } |
903 | |
904 | pgt = kzalloc(sizeof(*pgt), GFP_KERNEL_ACCOUNT); |
905 | if (!pgt) |
906 | return -ENOMEM; |
907 | |
908 | mmu->arch = &kvm->arch; |
909 | err = kvm_pgtable_stage2_init(pgt, mmu, &kvm_s2_mm_ops); |
910 | if (err) |
911 | goto out_free_pgtable; |
912 | |
913 | mmu->last_vcpu_ran = alloc_percpu(typeof(*mmu->last_vcpu_ran)); |
914 | if (!mmu->last_vcpu_ran) { |
915 | err = -ENOMEM; |
916 | goto out_destroy_pgtable; |
917 | } |
918 | |
919 | for_each_possible_cpu(cpu) |
920 | *per_cpu_ptr(mmu->last_vcpu_ran, cpu) = -1; |
921 | |
922 | /* The eager page splitting is disabled by default */ |
923 | mmu->split_page_chunk_size = KVM_ARM_EAGER_SPLIT_CHUNK_SIZE_DEFAULT; |
924 | mmu->split_page_cache.gfp_zero = __GFP_ZERO; |
925 | |
926 | mmu->pgt = pgt; |
927 | mmu->pgd_phys = __pa(pgt->pgd); |
928 | return 0; |
929 | |
930 | out_destroy_pgtable: |
931 | kvm_pgtable_stage2_destroy(pgt); |
932 | out_free_pgtable: |
933 | kfree(objp: pgt); |
934 | return err; |
935 | } |
936 | |
937 | void kvm_uninit_stage2_mmu(struct kvm *kvm) |
938 | { |
939 | kvm_free_stage2_pgd(&kvm->arch.mmu); |
940 | kvm_mmu_free_memory_cache(mc: &kvm->arch.mmu.split_page_cache); |
941 | } |
942 | |
943 | static void stage2_unmap_memslot(struct kvm *kvm, |
944 | struct kvm_memory_slot *memslot) |
945 | { |
946 | hva_t hva = memslot->userspace_addr; |
947 | phys_addr_t addr = memslot->base_gfn << PAGE_SHIFT; |
948 | phys_addr_t size = PAGE_SIZE * memslot->npages; |
949 | hva_t reg_end = hva + size; |
950 | |
951 | /* |
952 | * A memory region could potentially cover multiple VMAs, and any holes |
953 | * between them, so iterate over all of them to find out if we should |
954 | * unmap any of them. |
955 | * |
956 | * +--------------------------------------------+ |
957 | * +---------------+----------------+ +----------------+ |
958 | * | : VMA 1 | VMA 2 | | VMA 3 : | |
959 | * +---------------+----------------+ +----------------+ |
960 | * | memory region | |
961 | * +--------------------------------------------+ |
962 | */ |
963 | do { |
964 | struct vm_area_struct *vma; |
965 | hva_t vm_start, vm_end; |
966 | |
967 | vma = find_vma_intersection(current->mm, start_addr: hva, end_addr: reg_end); |
968 | if (!vma) |
969 | break; |
970 | |
971 | /* |
972 | * Take the intersection of this VMA with the memory region |
973 | */ |
974 | vm_start = max(hva, vma->vm_start); |
975 | vm_end = min(reg_end, vma->vm_end); |
976 | |
977 | if (!(vma->vm_flags & VM_PFNMAP)) { |
978 | gpa_t gpa = addr + (vm_start - memslot->userspace_addr); |
979 | unmap_stage2_range(mmu: &kvm->arch.mmu, start: gpa, size: vm_end - vm_start); |
980 | } |
981 | hva = vm_end; |
982 | } while (hva < reg_end); |
983 | } |
984 | |
985 | /** |
986 | * stage2_unmap_vm - Unmap Stage-2 RAM mappings |
987 | * @kvm: The struct kvm pointer |
988 | * |
989 | * Go through the memregions and unmap any regular RAM |
990 | * backing memory already mapped to the VM. |
991 | */ |
992 | void stage2_unmap_vm(struct kvm *kvm) |
993 | { |
994 | struct kvm_memslots *slots; |
995 | struct kvm_memory_slot *memslot; |
996 | int idx, bkt; |
997 | |
998 | idx = srcu_read_lock(ssp: &kvm->srcu); |
999 | mmap_read_lock(current->mm); |
1000 | write_lock(&kvm->mmu_lock); |
1001 | |
1002 | slots = kvm_memslots(kvm); |
1003 | kvm_for_each_memslot(memslot, bkt, slots) |
1004 | stage2_unmap_memslot(kvm, memslot); |
1005 | |
1006 | write_unlock(&kvm->mmu_lock); |
1007 | mmap_read_unlock(current->mm); |
1008 | srcu_read_unlock(ssp: &kvm->srcu, idx); |
1009 | } |
1010 | |
1011 | void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu) |
1012 | { |
1013 | struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu); |
1014 | struct kvm_pgtable *pgt = NULL; |
1015 | |
1016 | write_lock(&kvm->mmu_lock); |
1017 | pgt = mmu->pgt; |
1018 | if (pgt) { |
1019 | mmu->pgd_phys = 0; |
1020 | mmu->pgt = NULL; |
1021 | free_percpu(pdata: mmu->last_vcpu_ran); |
1022 | } |
1023 | write_unlock(&kvm->mmu_lock); |
1024 | |
1025 | if (pgt) { |
1026 | kvm_pgtable_stage2_destroy(pgt); |
1027 | kfree(objp: pgt); |
1028 | } |
1029 | } |
1030 | |
1031 | static void hyp_mc_free_fn(void *addr, void *unused) |
1032 | { |
1033 | free_page((unsigned long)addr); |
1034 | } |
1035 | |
1036 | static void *hyp_mc_alloc_fn(void *unused) |
1037 | { |
1038 | return (void *)__get_free_page(GFP_KERNEL_ACCOUNT); |
1039 | } |
1040 | |
1041 | void free_hyp_memcache(struct kvm_hyp_memcache *mc) |
1042 | { |
1043 | if (is_protected_kvm_enabled()) |
1044 | __free_hyp_memcache(mc, hyp_mc_free_fn, |
1045 | kvm_host_va, NULL); |
1046 | } |
1047 | |
1048 | int topup_hyp_memcache(struct kvm_hyp_memcache *mc, unsigned long min_pages) |
1049 | { |
1050 | if (!is_protected_kvm_enabled()) |
1051 | return 0; |
1052 | |
1053 | return __topup_hyp_memcache(mc, min_pages, hyp_mc_alloc_fn, |
1054 | kvm_host_pa, NULL); |
1055 | } |
1056 | |
1057 | /** |
1058 | * kvm_phys_addr_ioremap - map a device range to guest IPA |
1059 | * |
1060 | * @kvm: The KVM pointer |
1061 | * @guest_ipa: The IPA at which to insert the mapping |
1062 | * @pa: The physical address of the device |
1063 | * @size: The size of the mapping |
1064 | * @writable: Whether or not to create a writable mapping |
1065 | */ |
1066 | int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa, |
1067 | phys_addr_t pa, unsigned long size, bool writable) |
1068 | { |
1069 | phys_addr_t addr; |
1070 | int ret = 0; |
1071 | struct kvm_mmu_memory_cache cache = { .gfp_zero = __GFP_ZERO }; |
1072 | struct kvm_s2_mmu *mmu = &kvm->arch.mmu; |
1073 | struct kvm_pgtable *pgt = mmu->pgt; |
1074 | enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_DEVICE | |
1075 | KVM_PGTABLE_PROT_R | |
1076 | (writable ? KVM_PGTABLE_PROT_W : 0); |
1077 | |
1078 | if (is_protected_kvm_enabled()) |
1079 | return -EPERM; |
1080 | |
1081 | size += offset_in_page(guest_ipa); |
1082 | guest_ipa &= PAGE_MASK; |
1083 | |
1084 | for (addr = guest_ipa; addr < guest_ipa + size; addr += PAGE_SIZE) { |
1085 | ret = kvm_mmu_topup_memory_cache(mc: &cache, |
1086 | min: kvm_mmu_cache_min_pages(mmu)); |
1087 | if (ret) |
1088 | break; |
1089 | |
1090 | write_lock(&kvm->mmu_lock); |
1091 | ret = kvm_pgtable_stage2_map(pgt, addr, PAGE_SIZE, pa, prot, |
1092 | &cache, 0); |
1093 | write_unlock(&kvm->mmu_lock); |
1094 | if (ret) |
1095 | break; |
1096 | |
1097 | pa += PAGE_SIZE; |
1098 | } |
1099 | |
1100 | kvm_mmu_free_memory_cache(mc: &cache); |
1101 | return ret; |
1102 | } |
1103 | |
1104 | /** |
1105 | * stage2_wp_range() - write protect stage2 memory region range |
1106 | * @mmu: The KVM stage-2 MMU pointer |
1107 | * @addr: Start address of range |
1108 | * @end: End address of range |
1109 | */ |
1110 | static void stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end) |
1111 | { |
1112 | stage2_apply_range_resched(mmu, addr, end, kvm_pgtable_stage2_wrprotect); |
1113 | } |
1114 | |
1115 | /** |
1116 | * kvm_mmu_wp_memory_region() - write protect stage 2 entries for memory slot |
1117 | * @kvm: The KVM pointer |
1118 | * @slot: The memory slot to write protect |
1119 | * |
1120 | * Called to start logging dirty pages after memory region |
1121 | * KVM_MEM_LOG_DIRTY_PAGES operation is called. After this function returns |
1122 | * all present PUD, PMD and PTEs are write protected in the memory region. |
1123 | * Afterwards read of dirty page log can be called. |
1124 | * |
1125 | * Acquires kvm_mmu_lock. Called with kvm->slots_lock mutex acquired, |
1126 | * serializing operations for VM memory regions. |
1127 | */ |
1128 | static void kvm_mmu_wp_memory_region(struct kvm *kvm, int slot) |
1129 | { |
1130 | struct kvm_memslots *slots = kvm_memslots(kvm); |
1131 | struct kvm_memory_slot *memslot = id_to_memslot(slots, id: slot); |
1132 | phys_addr_t start, end; |
1133 | |
1134 | if (WARN_ON_ONCE(!memslot)) |
1135 | return; |
1136 | |
1137 | start = memslot->base_gfn << PAGE_SHIFT; |
1138 | end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; |
1139 | |
1140 | write_lock(&kvm->mmu_lock); |
1141 | stage2_wp_range(mmu: &kvm->arch.mmu, addr: start, end); |
1142 | write_unlock(&kvm->mmu_lock); |
1143 | kvm_flush_remote_tlbs_memslot(kvm, memslot); |
1144 | } |
1145 | |
1146 | /** |
1147 | * kvm_mmu_split_memory_region() - split the stage 2 blocks into PAGE_SIZE |
1148 | * pages for memory slot |
1149 | * @kvm: The KVM pointer |
1150 | * @slot: The memory slot to split |
1151 | * |
1152 | * Acquires kvm->mmu_lock. Called with kvm->slots_lock mutex acquired, |
1153 | * serializing operations for VM memory regions. |
1154 | */ |
1155 | static void kvm_mmu_split_memory_region(struct kvm *kvm, int slot) |
1156 | { |
1157 | struct kvm_memslots *slots; |
1158 | struct kvm_memory_slot *memslot; |
1159 | phys_addr_t start, end; |
1160 | |
1161 | lockdep_assert_held(&kvm->slots_lock); |
1162 | |
1163 | slots = kvm_memslots(kvm); |
1164 | memslot = id_to_memslot(slots, id: slot); |
1165 | |
1166 | start = memslot->base_gfn << PAGE_SHIFT; |
1167 | end = (memslot->base_gfn + memslot->npages) << PAGE_SHIFT; |
1168 | |
1169 | write_lock(&kvm->mmu_lock); |
1170 | kvm_mmu_split_huge_pages(kvm, addr: start, end); |
1171 | write_unlock(&kvm->mmu_lock); |
1172 | } |
1173 | |
1174 | /* |
1175 | * kvm_arch_mmu_enable_log_dirty_pt_masked() - enable dirty logging for selected pages. |
1176 | * @kvm: The KVM pointer |
1177 | * @slot: The memory slot associated with mask |
1178 | * @gfn_offset: The gfn offset in memory slot |
1179 | * @mask: The mask of pages at offset 'gfn_offset' in this memory |
1180 | * slot to enable dirty logging on |
1181 | * |
1182 | * Writes protect selected pages to enable dirty logging, and then |
1183 | * splits them to PAGE_SIZE. Caller must acquire kvm->mmu_lock. |
1184 | */ |
1185 | void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, |
1186 | struct kvm_memory_slot *slot, |
1187 | gfn_t gfn_offset, unsigned long mask) |
1188 | { |
1189 | phys_addr_t base_gfn = slot->base_gfn + gfn_offset; |
1190 | phys_addr_t start = (base_gfn + __ffs(mask)) << PAGE_SHIFT; |
1191 | phys_addr_t end = (base_gfn + __fls(word: mask) + 1) << PAGE_SHIFT; |
1192 | |
1193 | lockdep_assert_held_write(&kvm->mmu_lock); |
1194 | |
1195 | stage2_wp_range(mmu: &kvm->arch.mmu, addr: start, end); |
1196 | |
1197 | /* |
1198 | * Eager-splitting is done when manual-protect is set. We |
1199 | * also check for initially-all-set because we can avoid |
1200 | * eager-splitting if initially-all-set is false. |
1201 | * Initially-all-set equal false implies that huge-pages were |
1202 | * already split when enabling dirty logging: no need to do it |
1203 | * again. |
1204 | */ |
1205 | if (kvm_dirty_log_manual_protect_and_init_set(kvm)) |
1206 | kvm_mmu_split_huge_pages(kvm, addr: start, end); |
1207 | } |
1208 | |
1209 | static void kvm_send_hwpoison_signal(unsigned long address, short lsb) |
1210 | { |
1211 | send_sig_mceerr(BUS_MCEERR_AR, (void __user *)address, lsb, current); |
1212 | } |
1213 | |
1214 | static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot, |
1215 | unsigned long hva, |
1216 | unsigned long map_size) |
1217 | { |
1218 | gpa_t gpa_start; |
1219 | hva_t uaddr_start, uaddr_end; |
1220 | size_t size; |
1221 | |
1222 | /* The memslot and the VMA are guaranteed to be aligned to PAGE_SIZE */ |
1223 | if (map_size == PAGE_SIZE) |
1224 | return true; |
1225 | |
1226 | size = memslot->npages * PAGE_SIZE; |
1227 | |
1228 | gpa_start = memslot->base_gfn << PAGE_SHIFT; |
1229 | |
1230 | uaddr_start = memslot->userspace_addr; |
1231 | uaddr_end = uaddr_start + size; |
1232 | |
1233 | /* |
1234 | * Pages belonging to memslots that don't have the same alignment |
1235 | * within a PMD/PUD for userspace and IPA cannot be mapped with stage-2 |
1236 | * PMD/PUD entries, because we'll end up mapping the wrong pages. |
1237 | * |
1238 | * Consider a layout like the following: |
1239 | * |
1240 | * memslot->userspace_addr: |
1241 | * +-----+--------------------+--------------------+---+ |
1242 | * |abcde|fgh Stage-1 block | Stage-1 block tv|xyz| |
1243 | * +-----+--------------------+--------------------+---+ |
1244 | * |
1245 | * memslot->base_gfn << PAGE_SHIFT: |
1246 | * +---+--------------------+--------------------+-----+ |
1247 | * |abc|def Stage-2 block | Stage-2 block |tvxyz| |
1248 | * +---+--------------------+--------------------+-----+ |
1249 | * |
1250 | * If we create those stage-2 blocks, we'll end up with this incorrect |
1251 | * mapping: |
1252 | * d -> f |
1253 | * e -> g |
1254 | * f -> h |
1255 | */ |
1256 | if ((gpa_start & (map_size - 1)) != (uaddr_start & (map_size - 1))) |
1257 | return false; |
1258 | |
1259 | /* |
1260 | * Next, let's make sure we're not trying to map anything not covered |
1261 | * by the memslot. This means we have to prohibit block size mappings |
1262 | * for the beginning and end of a non-block aligned and non-block sized |
1263 | * memory slot (illustrated by the head and tail parts of the |
1264 | * userspace view above containing pages 'abcde' and 'xyz', |
1265 | * respectively). |
1266 | * |
1267 | * Note that it doesn't matter if we do the check using the |
1268 | * userspace_addr or the base_gfn, as both are equally aligned (per |
1269 | * the check above) and equally sized. |
1270 | */ |
1271 | return (hva & ~(map_size - 1)) >= uaddr_start && |
1272 | (hva & ~(map_size - 1)) + map_size <= uaddr_end; |
1273 | } |
1274 | |
1275 | /* |
1276 | * Check if the given hva is backed by a transparent huge page (THP) and |
1277 | * whether it can be mapped using block mapping in stage2. If so, adjust |
1278 | * the stage2 PFN and IPA accordingly. Only PMD_SIZE THPs are currently |
1279 | * supported. This will need to be updated to support other THP sizes. |
1280 | * |
1281 | * Returns the size of the mapping. |
1282 | */ |
1283 | static long |
1284 | transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot, |
1285 | unsigned long hva, kvm_pfn_t *pfnp, |
1286 | phys_addr_t *ipap) |
1287 | { |
1288 | kvm_pfn_t pfn = *pfnp; |
1289 | |
1290 | /* |
1291 | * Make sure the adjustment is done only for THP pages. Also make |
1292 | * sure that the HVA and IPA are sufficiently aligned and that the |
1293 | * block map is contained within the memslot. |
1294 | */ |
1295 | if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) { |
1296 | int sz = get_user_mapping_size(kvm, addr: hva); |
1297 | |
1298 | if (sz < 0) |
1299 | return sz; |
1300 | |
1301 | if (sz < PMD_SIZE) |
1302 | return PAGE_SIZE; |
1303 | |
1304 | *ipap &= PMD_MASK; |
1305 | pfn &= ~(PTRS_PER_PMD - 1); |
1306 | *pfnp = pfn; |
1307 | |
1308 | return PMD_SIZE; |
1309 | } |
1310 | |
1311 | /* Use page mapping if we cannot use block mapping. */ |
1312 | return PAGE_SIZE; |
1313 | } |
1314 | |
1315 | static int get_vma_page_shift(struct vm_area_struct *vma, unsigned long hva) |
1316 | { |
1317 | unsigned long pa; |
1318 | |
1319 | if (is_vm_hugetlb_page(vma) && !(vma->vm_flags & VM_PFNMAP)) |
1320 | return huge_page_shift(h: hstate_vma(vma)); |
1321 | |
1322 | if (!(vma->vm_flags & VM_PFNMAP)) |
1323 | return PAGE_SHIFT; |
1324 | |
1325 | VM_BUG_ON(is_vm_hugetlb_page(vma)); |
1326 | |
1327 | pa = (vma->vm_pgoff << PAGE_SHIFT) + (hva - vma->vm_start); |
1328 | |
1329 | #ifndef __PAGETABLE_PMD_FOLDED |
1330 | if ((hva & (PUD_SIZE - 1)) == (pa & (PUD_SIZE - 1)) && |
1331 | ALIGN_DOWN(hva, PUD_SIZE) >= vma->vm_start && |
1332 | ALIGN(hva, PUD_SIZE) <= vma->vm_end) |
1333 | return PUD_SHIFT; |
1334 | #endif |
1335 | |
1336 | if ((hva & (PMD_SIZE - 1)) == (pa & (PMD_SIZE - 1)) && |
1337 | ALIGN_DOWN(hva, PMD_SIZE) >= vma->vm_start && |
1338 | ALIGN(hva, PMD_SIZE) <= vma->vm_end) |
1339 | return PMD_SHIFT; |
1340 | |
1341 | return PAGE_SHIFT; |
1342 | } |
1343 | |
1344 | /* |
1345 | * The page will be mapped in stage 2 as Normal Cacheable, so the VM will be |
1346 | * able to see the page's tags and therefore they must be initialised first. If |
1347 | * PG_mte_tagged is set, tags have already been initialised. |
1348 | * |
1349 | * The race in the test/set of the PG_mte_tagged flag is handled by: |
1350 | * - preventing VM_SHARED mappings in a memslot with MTE preventing two VMs |
1351 | * racing to santise the same page |
1352 | * - mmap_lock protects between a VM faulting a page in and the VMM performing |
1353 | * an mprotect() to add VM_MTE |
1354 | */ |
1355 | static void sanitise_mte_tags(struct kvm *kvm, kvm_pfn_t pfn, |
1356 | unsigned long size) |
1357 | { |
1358 | unsigned long i, nr_pages = size >> PAGE_SHIFT; |
1359 | struct page *page = pfn_to_page(pfn); |
1360 | |
1361 | if (!kvm_has_mte(kvm)) |
1362 | return; |
1363 | |
1364 | for (i = 0; i < nr_pages; i++, page++) { |
1365 | if (try_page_mte_tagging(page)) { |
1366 | mte_clear_page_tags(page_address(page)); |
1367 | set_page_mte_tagged(page); |
1368 | } |
1369 | } |
1370 | } |
1371 | |
1372 | static bool kvm_vma_mte_allowed(struct vm_area_struct *vma) |
1373 | { |
1374 | return vma->vm_flags & VM_MTE_ALLOWED; |
1375 | } |
1376 | |
1377 | static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, |
1378 | struct kvm_memory_slot *memslot, unsigned long hva, |
1379 | bool fault_is_perm) |
1380 | { |
1381 | int ret = 0; |
1382 | bool write_fault, writable, force_pte = false; |
1383 | bool exec_fault, mte_allowed; |
1384 | bool device = false, vfio_allow_any_uc = false; |
1385 | unsigned long mmu_seq; |
1386 | struct kvm *kvm = vcpu->kvm; |
1387 | struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache; |
1388 | struct vm_area_struct *vma; |
1389 | short vma_shift; |
1390 | gfn_t gfn; |
1391 | kvm_pfn_t pfn; |
1392 | bool logging_active = memslot_is_logging(memslot); |
1393 | long vma_pagesize, fault_granule; |
1394 | enum kvm_pgtable_prot prot = KVM_PGTABLE_PROT_R; |
1395 | struct kvm_pgtable *pgt; |
1396 | |
1397 | if (fault_is_perm) |
1398 | fault_granule = kvm_vcpu_trap_get_perm_fault_granule(vcpu); |
1399 | write_fault = kvm_is_write_fault(vcpu); |
1400 | exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu); |
1401 | VM_BUG_ON(write_fault && exec_fault); |
1402 | |
1403 | if (fault_is_perm && !write_fault && !exec_fault) { |
1404 | kvm_err("Unexpected L2 read permission error\n" ); |
1405 | return -EFAULT; |
1406 | } |
1407 | |
1408 | /* |
1409 | * Permission faults just need to update the existing leaf entry, |
1410 | * and so normally don't require allocations from the memcache. The |
1411 | * only exception to this is when dirty logging is enabled at runtime |
1412 | * and a write fault needs to collapse a block entry into a table. |
1413 | */ |
1414 | if (!fault_is_perm || (logging_active && write_fault)) { |
1415 | ret = kvm_mmu_topup_memory_cache(mc: memcache, |
1416 | min: kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu)); |
1417 | if (ret) |
1418 | return ret; |
1419 | } |
1420 | |
1421 | /* |
1422 | * Let's check if we will get back a huge page backed by hugetlbfs, or |
1423 | * get block mapping for device MMIO region. |
1424 | */ |
1425 | mmap_read_lock(current->mm); |
1426 | vma = vma_lookup(current->mm, addr: hva); |
1427 | if (unlikely(!vma)) { |
1428 | kvm_err("Failed to find VMA for hva 0x%lx\n" , hva); |
1429 | mmap_read_unlock(current->mm); |
1430 | return -EFAULT; |
1431 | } |
1432 | |
1433 | /* |
1434 | * logging_active is guaranteed to never be true for VM_PFNMAP |
1435 | * memslots. |
1436 | */ |
1437 | if (logging_active) { |
1438 | force_pte = true; |
1439 | vma_shift = PAGE_SHIFT; |
1440 | } else { |
1441 | vma_shift = get_vma_page_shift(vma, hva); |
1442 | } |
1443 | |
1444 | switch (vma_shift) { |
1445 | #ifndef __PAGETABLE_PMD_FOLDED |
1446 | case PUD_SHIFT: |
1447 | if (fault_supports_stage2_huge_mapping(memslot, hva, PUD_SIZE)) |
1448 | break; |
1449 | fallthrough; |
1450 | #endif |
1451 | case CONT_PMD_SHIFT: |
1452 | vma_shift = PMD_SHIFT; |
1453 | fallthrough; |
1454 | case PMD_SHIFT: |
1455 | if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE)) |
1456 | break; |
1457 | fallthrough; |
1458 | case CONT_PTE_SHIFT: |
1459 | vma_shift = PAGE_SHIFT; |
1460 | force_pte = true; |
1461 | fallthrough; |
1462 | case PAGE_SHIFT: |
1463 | break; |
1464 | default: |
1465 | WARN_ONCE(1, "Unknown vma_shift %d" , vma_shift); |
1466 | } |
1467 | |
1468 | vma_pagesize = 1UL << vma_shift; |
1469 | if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE) |
1470 | fault_ipa &= ~(vma_pagesize - 1); |
1471 | |
1472 | gfn = fault_ipa >> PAGE_SHIFT; |
1473 | mte_allowed = kvm_vma_mte_allowed(vma); |
1474 | |
1475 | vfio_allow_any_uc = vma->vm_flags & VM_ALLOW_ANY_UNCACHED; |
1476 | |
1477 | /* Don't use the VMA after the unlock -- it may have vanished */ |
1478 | vma = NULL; |
1479 | |
1480 | /* |
1481 | * Read mmu_invalidate_seq so that KVM can detect if the results of |
1482 | * vma_lookup() or __gfn_to_pfn_memslot() become stale prior to |
1483 | * acquiring kvm->mmu_lock. |
1484 | * |
1485 | * Rely on mmap_read_unlock() for an implicit smp_rmb(), which pairs |
1486 | * with the smp_wmb() in kvm_mmu_invalidate_end(). |
1487 | */ |
1488 | mmu_seq = vcpu->kvm->mmu_invalidate_seq; |
1489 | mmap_read_unlock(current->mm); |
1490 | |
1491 | pfn = __gfn_to_pfn_memslot(slot: memslot, gfn, atomic: false, interruptible: false, NULL, |
1492 | write_fault, writable: &writable, NULL); |
1493 | if (pfn == KVM_PFN_ERR_HWPOISON) { |
1494 | kvm_send_hwpoison_signal(address: hva, lsb: vma_shift); |
1495 | return 0; |
1496 | } |
1497 | if (is_error_noslot_pfn(pfn)) |
1498 | return -EFAULT; |
1499 | |
1500 | if (kvm_is_device_pfn(pfn)) { |
1501 | /* |
1502 | * If the page was identified as device early by looking at |
1503 | * the VMA flags, vma_pagesize is already representing the |
1504 | * largest quantity we can map. If instead it was mapped |
1505 | * via gfn_to_pfn_prot(), vma_pagesize is set to PAGE_SIZE |
1506 | * and must not be upgraded. |
1507 | * |
1508 | * In both cases, we don't let transparent_hugepage_adjust() |
1509 | * change things at the last minute. |
1510 | */ |
1511 | device = true; |
1512 | } else if (logging_active && !write_fault) { |
1513 | /* |
1514 | * Only actually map the page as writable if this was a write |
1515 | * fault. |
1516 | */ |
1517 | writable = false; |
1518 | } |
1519 | |
1520 | if (exec_fault && device) |
1521 | return -ENOEXEC; |
1522 | |
1523 | read_lock(&kvm->mmu_lock); |
1524 | pgt = vcpu->arch.hw_mmu->pgt; |
1525 | if (mmu_invalidate_retry(kvm, mmu_seq)) |
1526 | goto out_unlock; |
1527 | |
1528 | /* |
1529 | * If we are not forced to use page mapping, check if we are |
1530 | * backed by a THP and thus use block mapping if possible. |
1531 | */ |
1532 | if (vma_pagesize == PAGE_SIZE && !(force_pte || device)) { |
1533 | if (fault_is_perm && fault_granule > PAGE_SIZE) |
1534 | vma_pagesize = fault_granule; |
1535 | else |
1536 | vma_pagesize = transparent_hugepage_adjust(kvm, memslot, |
1537 | hva, pfnp: &pfn, |
1538 | ipap: &fault_ipa); |
1539 | |
1540 | if (vma_pagesize < 0) { |
1541 | ret = vma_pagesize; |
1542 | goto out_unlock; |
1543 | } |
1544 | } |
1545 | |
1546 | if (!fault_is_perm && !device && kvm_has_mte(kvm)) { |
1547 | /* Check the VMM hasn't introduced a new disallowed VMA */ |
1548 | if (mte_allowed) { |
1549 | sanitise_mte_tags(kvm, pfn, size: vma_pagesize); |
1550 | } else { |
1551 | ret = -EFAULT; |
1552 | goto out_unlock; |
1553 | } |
1554 | } |
1555 | |
1556 | if (writable) |
1557 | prot |= KVM_PGTABLE_PROT_W; |
1558 | |
1559 | if (exec_fault) |
1560 | prot |= KVM_PGTABLE_PROT_X; |
1561 | |
1562 | if (device) { |
1563 | if (vfio_allow_any_uc) |
1564 | prot |= KVM_PGTABLE_PROT_NORMAL_NC; |
1565 | else |
1566 | prot |= KVM_PGTABLE_PROT_DEVICE; |
1567 | } else if (cpus_have_final_cap(ARM64_HAS_CACHE_DIC)) { |
1568 | prot |= KVM_PGTABLE_PROT_X; |
1569 | } |
1570 | |
1571 | /* |
1572 | * Under the premise of getting a FSC_PERM fault, we just need to relax |
1573 | * permissions only if vma_pagesize equals fault_granule. Otherwise, |
1574 | * kvm_pgtable_stage2_map() should be called to change block size. |
1575 | */ |
1576 | if (fault_is_perm && vma_pagesize == fault_granule) |
1577 | ret = kvm_pgtable_stage2_relax_perms(pgt, fault_ipa, prot); |
1578 | else |
1579 | ret = kvm_pgtable_stage2_map(pgt, fault_ipa, vma_pagesize, |
1580 | __pfn_to_phys(pfn), prot, |
1581 | memcache, |
1582 | KVM_PGTABLE_WALK_HANDLE_FAULT | |
1583 | KVM_PGTABLE_WALK_SHARED); |
1584 | |
1585 | /* Mark the page dirty only if the fault is handled successfully */ |
1586 | if (writable && !ret) { |
1587 | kvm_set_pfn_dirty(pfn); |
1588 | mark_page_dirty_in_slot(kvm, memslot, gfn); |
1589 | } |
1590 | |
1591 | out_unlock: |
1592 | read_unlock(&kvm->mmu_lock); |
1593 | kvm_release_pfn_clean(pfn); |
1594 | return ret != -EAGAIN ? ret : 0; |
1595 | } |
1596 | |
1597 | /* Resolve the access fault by making the page young again. */ |
1598 | static void handle_access_fault(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa) |
1599 | { |
1600 | kvm_pte_t pte; |
1601 | struct kvm_s2_mmu *mmu; |
1602 | |
1603 | trace_kvm_access_fault(ipa: fault_ipa); |
1604 | |
1605 | read_lock(&vcpu->kvm->mmu_lock); |
1606 | mmu = vcpu->arch.hw_mmu; |
1607 | pte = kvm_pgtable_stage2_mkyoung(mmu->pgt, fault_ipa); |
1608 | read_unlock(&vcpu->kvm->mmu_lock); |
1609 | |
1610 | if (kvm_pte_valid(pte)) |
1611 | kvm_set_pfn_accessed(kvm_pte_to_pfn(pte)); |
1612 | } |
1613 | |
1614 | /** |
1615 | * kvm_handle_guest_abort - handles all 2nd stage aborts |
1616 | * @vcpu: the VCPU pointer |
1617 | * |
1618 | * Any abort that gets to the host is almost guaranteed to be caused by a |
1619 | * missing second stage translation table entry, which can mean that either the |
1620 | * guest simply needs more memory and we must allocate an appropriate page or it |
1621 | * can mean that the guest tried to access I/O memory, which is emulated by user |
1622 | * space. The distinction is based on the IPA causing the fault and whether this |
1623 | * memory region has been registered as standard RAM by user space. |
1624 | */ |
1625 | int kvm_handle_guest_abort(struct kvm_vcpu *vcpu) |
1626 | { |
1627 | unsigned long esr; |
1628 | phys_addr_t fault_ipa; |
1629 | struct kvm_memory_slot *memslot; |
1630 | unsigned long hva; |
1631 | bool is_iabt, write_fault, writable; |
1632 | gfn_t gfn; |
1633 | int ret, idx; |
1634 | |
1635 | esr = kvm_vcpu_get_esr(vcpu); |
1636 | |
1637 | fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); |
1638 | is_iabt = kvm_vcpu_trap_is_iabt(vcpu); |
1639 | |
1640 | if (esr_fsc_is_translation_fault(esr)) { |
1641 | /* Beyond sanitised PARange (which is the IPA limit) */ |
1642 | if (fault_ipa >= BIT_ULL(get_kvm_ipa_limit())) { |
1643 | kvm_inject_size_fault(vcpu); |
1644 | return 1; |
1645 | } |
1646 | |
1647 | /* Falls between the IPA range and the PARange? */ |
1648 | if (fault_ipa >= BIT_ULL(vcpu->arch.hw_mmu->pgt->ia_bits)) { |
1649 | fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0); |
1650 | |
1651 | if (is_iabt) |
1652 | kvm_inject_pabt(vcpu, fault_ipa); |
1653 | else |
1654 | kvm_inject_dabt(vcpu, fault_ipa); |
1655 | return 1; |
1656 | } |
1657 | } |
1658 | |
1659 | /* Synchronous External Abort? */ |
1660 | if (kvm_vcpu_abt_issea(vcpu)) { |
1661 | /* |
1662 | * For RAS the host kernel may handle this abort. |
1663 | * There is no need to pass the error into the guest. |
1664 | */ |
1665 | if (kvm_handle_guest_sea(fault_ipa, kvm_vcpu_get_esr(vcpu))) |
1666 | kvm_inject_vabt(vcpu); |
1667 | |
1668 | return 1; |
1669 | } |
1670 | |
1671 | trace_kvm_guest_fault(vcpu_pc: *vcpu_pc(vcpu), hsr: kvm_vcpu_get_esr(vcpu), |
1672 | hxfar: kvm_vcpu_get_hfar(vcpu), ipa: fault_ipa); |
1673 | |
1674 | /* Check the stage-2 fault is trans. fault or write fault */ |
1675 | if (!esr_fsc_is_translation_fault(esr) && |
1676 | !esr_fsc_is_permission_fault(esr) && |
1677 | !esr_fsc_is_access_flag_fault(esr)) { |
1678 | kvm_err("Unsupported FSC: EC=%#x xFSC=%#lx ESR_EL2=%#lx\n" , |
1679 | kvm_vcpu_trap_get_class(vcpu), |
1680 | (unsigned long)kvm_vcpu_trap_get_fault(vcpu), |
1681 | (unsigned long)kvm_vcpu_get_esr(vcpu)); |
1682 | return -EFAULT; |
1683 | } |
1684 | |
1685 | idx = srcu_read_lock(ssp: &vcpu->kvm->srcu); |
1686 | |
1687 | gfn = fault_ipa >> PAGE_SHIFT; |
1688 | memslot = gfn_to_memslot(kvm: vcpu->kvm, gfn); |
1689 | hva = gfn_to_hva_memslot_prot(slot: memslot, gfn, writable: &writable); |
1690 | write_fault = kvm_is_write_fault(vcpu); |
1691 | if (kvm_is_error_hva(addr: hva) || (write_fault && !writable)) { |
1692 | /* |
1693 | * The guest has put either its instructions or its page-tables |
1694 | * somewhere it shouldn't have. Userspace won't be able to do |
1695 | * anything about this (there's no syndrome for a start), so |
1696 | * re-inject the abort back into the guest. |
1697 | */ |
1698 | if (is_iabt) { |
1699 | ret = -ENOEXEC; |
1700 | goto out; |
1701 | } |
1702 | |
1703 | if (kvm_vcpu_abt_iss1tw(vcpu)) { |
1704 | kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); |
1705 | ret = 1; |
1706 | goto out_unlock; |
1707 | } |
1708 | |
1709 | /* |
1710 | * Check for a cache maintenance operation. Since we |
1711 | * ended-up here, we know it is outside of any memory |
1712 | * slot. But we can't find out if that is for a device, |
1713 | * or if the guest is just being stupid. The only thing |
1714 | * we know for sure is that this range cannot be cached. |
1715 | * |
1716 | * So let's assume that the guest is just being |
1717 | * cautious, and skip the instruction. |
1718 | */ |
1719 | if (kvm_is_error_hva(addr: hva) && kvm_vcpu_dabt_is_cm(vcpu)) { |
1720 | kvm_incr_pc(vcpu); |
1721 | ret = 1; |
1722 | goto out_unlock; |
1723 | } |
1724 | |
1725 | /* |
1726 | * The IPA is reported as [MAX:12], so we need to |
1727 | * complement it with the bottom 12 bits from the |
1728 | * faulting VA. This is always 12 bits, irrespective |
1729 | * of the page size. |
1730 | */ |
1731 | fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1); |
1732 | ret = io_mem_abort(vcpu, fault_ipa); |
1733 | goto out_unlock; |
1734 | } |
1735 | |
1736 | /* Userspace should not be able to register out-of-bounds IPAs */ |
1737 | VM_BUG_ON(fault_ipa >= kvm_phys_size(vcpu->arch.hw_mmu)); |
1738 | |
1739 | if (esr_fsc_is_access_flag_fault(esr)) { |
1740 | handle_access_fault(vcpu, fault_ipa); |
1741 | ret = 1; |
1742 | goto out_unlock; |
1743 | } |
1744 | |
1745 | ret = user_mem_abort(vcpu, fault_ipa, memslot, hva, |
1746 | fault_is_perm: esr_fsc_is_permission_fault(esr)); |
1747 | if (ret == 0) |
1748 | ret = 1; |
1749 | out: |
1750 | if (ret == -ENOEXEC) { |
1751 | kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); |
1752 | ret = 1; |
1753 | } |
1754 | out_unlock: |
1755 | srcu_read_unlock(ssp: &vcpu->kvm->srcu, idx); |
1756 | return ret; |
1757 | } |
1758 | |
1759 | bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range) |
1760 | { |
1761 | if (!kvm->arch.mmu.pgt) |
1762 | return false; |
1763 | |
1764 | __unmap_stage2_range(mmu: &kvm->arch.mmu, start: range->start << PAGE_SHIFT, |
1765 | size: (range->end - range->start) << PAGE_SHIFT, |
1766 | may_block: range->may_block); |
1767 | |
1768 | return false; |
1769 | } |
1770 | |
1771 | bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range) |
1772 | { |
1773 | kvm_pfn_t pfn = pte_pfn(pte: range->arg.pte); |
1774 | |
1775 | if (!kvm->arch.mmu.pgt) |
1776 | return false; |
1777 | |
1778 | WARN_ON(range->end - range->start != 1); |
1779 | |
1780 | /* |
1781 | * If the page isn't tagged, defer to user_mem_abort() for sanitising |
1782 | * the MTE tags. The S2 pte should have been unmapped by |
1783 | * mmu_notifier_invalidate_range_end(). |
1784 | */ |
1785 | if (kvm_has_mte(kvm) && !page_mte_tagged(pfn_to_page(pfn))) |
1786 | return false; |
1787 | |
1788 | /* |
1789 | * We've moved a page around, probably through CoW, so let's treat |
1790 | * it just like a translation fault and the map handler will clean |
1791 | * the cache to the PoC. |
1792 | * |
1793 | * The MMU notifiers will have unmapped a huge PMD before calling |
1794 | * ->change_pte() (which in turn calls kvm_set_spte_gfn()) and |
1795 | * therefore we never need to clear out a huge PMD through this |
1796 | * calling path and a memcache is not required. |
1797 | */ |
1798 | kvm_pgtable_stage2_map(kvm->arch.mmu.pgt, range->start << PAGE_SHIFT, |
1799 | PAGE_SIZE, __pfn_to_phys(pfn), |
1800 | KVM_PGTABLE_PROT_R, NULL, 0); |
1801 | |
1802 | return false; |
1803 | } |
1804 | |
1805 | bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) |
1806 | { |
1807 | u64 size = (range->end - range->start) << PAGE_SHIFT; |
1808 | |
1809 | if (!kvm->arch.mmu.pgt) |
1810 | return false; |
1811 | |
1812 | return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt, |
1813 | range->start << PAGE_SHIFT, |
1814 | size, true); |
1815 | } |
1816 | |
1817 | bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) |
1818 | { |
1819 | u64 size = (range->end - range->start) << PAGE_SHIFT; |
1820 | |
1821 | if (!kvm->arch.mmu.pgt) |
1822 | return false; |
1823 | |
1824 | return kvm_pgtable_stage2_test_clear_young(kvm->arch.mmu.pgt, |
1825 | range->start << PAGE_SHIFT, |
1826 | size, false); |
1827 | } |
1828 | |
1829 | phys_addr_t kvm_mmu_get_httbr(void) |
1830 | { |
1831 | return __pa(hyp_pgtable->pgd); |
1832 | } |
1833 | |
1834 | phys_addr_t kvm_get_idmap_vector(void) |
1835 | { |
1836 | return hyp_idmap_vector; |
1837 | } |
1838 | |
1839 | static int kvm_map_idmap_text(void) |
1840 | { |
1841 | unsigned long size = hyp_idmap_end - hyp_idmap_start; |
1842 | int err = __create_hyp_mappings(hyp_idmap_start, size, hyp_idmap_start, |
1843 | PAGE_HYP_EXEC); |
1844 | if (err) |
1845 | kvm_err("Failed to idmap %lx-%lx\n" , |
1846 | hyp_idmap_start, hyp_idmap_end); |
1847 | |
1848 | return err; |
1849 | } |
1850 | |
1851 | static void *kvm_hyp_zalloc_page(void *arg) |
1852 | { |
1853 | return (void *)get_zeroed_page(GFP_KERNEL); |
1854 | } |
1855 | |
1856 | static struct kvm_pgtable_mm_ops kvm_hyp_mm_ops = { |
1857 | .zalloc_page = kvm_hyp_zalloc_page, |
1858 | .get_page = kvm_host_get_page, |
1859 | .put_page = kvm_host_put_page, |
1860 | .phys_to_virt = kvm_host_va, |
1861 | .virt_to_phys = kvm_host_pa, |
1862 | }; |
1863 | |
1864 | int __init kvm_mmu_init(u32 *hyp_va_bits) |
1865 | { |
1866 | int err; |
1867 | u32 idmap_bits; |
1868 | u32 kernel_bits; |
1869 | |
1870 | hyp_idmap_start = __pa_symbol(__hyp_idmap_text_start); |
1871 | hyp_idmap_start = ALIGN_DOWN(hyp_idmap_start, PAGE_SIZE); |
1872 | hyp_idmap_end = __pa_symbol(__hyp_idmap_text_end); |
1873 | hyp_idmap_end = ALIGN(hyp_idmap_end, PAGE_SIZE); |
1874 | hyp_idmap_vector = __pa_symbol(__kvm_hyp_init); |
1875 | |
1876 | /* |
1877 | * We rely on the linker script to ensure at build time that the HYP |
1878 | * init code does not cross a page boundary. |
1879 | */ |
1880 | BUG_ON((hyp_idmap_start ^ (hyp_idmap_end - 1)) & PAGE_MASK); |
1881 | |
1882 | /* |
1883 | * The ID map is always configured for 48 bits of translation, which |
1884 | * may be fewer than the number of VA bits used by the regular kernel |
1885 | * stage 1, when VA_BITS=52. |
1886 | * |
1887 | * At EL2, there is only one TTBR register, and we can't switch between |
1888 | * translation tables *and* update TCR_EL2.T0SZ at the same time. Bottom |
1889 | * line: we need to use the extended range with *both* our translation |
1890 | * tables. |
1891 | * |
1892 | * So use the maximum of the idmap VA bits and the regular kernel stage |
1893 | * 1 VA bits to assure that the hypervisor can both ID map its code page |
1894 | * and map any kernel memory. |
1895 | */ |
1896 | idmap_bits = IDMAP_VA_BITS; |
1897 | kernel_bits = vabits_actual; |
1898 | *hyp_va_bits = max(idmap_bits, kernel_bits); |
1899 | |
1900 | kvm_debug("Using %u-bit virtual addresses at EL2\n" , *hyp_va_bits); |
1901 | kvm_debug("IDMAP page: %lx\n" , hyp_idmap_start); |
1902 | kvm_debug("HYP VA range: %lx:%lx\n" , |
1903 | kern_hyp_va(PAGE_OFFSET), |
1904 | kern_hyp_va((unsigned long)high_memory - 1)); |
1905 | |
1906 | if (hyp_idmap_start >= kern_hyp_va(PAGE_OFFSET) && |
1907 | hyp_idmap_start < kern_hyp_va((unsigned long)high_memory - 1) && |
1908 | hyp_idmap_start != (unsigned long)__hyp_idmap_text_start) { |
1909 | /* |
1910 | * The idmap page is intersecting with the VA space, |
1911 | * it is not safe to continue further. |
1912 | */ |
1913 | kvm_err("IDMAP intersecting with HYP VA, unable to continue\n" ); |
1914 | err = -EINVAL; |
1915 | goto out; |
1916 | } |
1917 | |
1918 | hyp_pgtable = kzalloc(sizeof(*hyp_pgtable), GFP_KERNEL); |
1919 | if (!hyp_pgtable) { |
1920 | kvm_err("Hyp mode page-table not allocated\n" ); |
1921 | err = -ENOMEM; |
1922 | goto out; |
1923 | } |
1924 | |
1925 | err = kvm_pgtable_hyp_init(hyp_pgtable, *hyp_va_bits, &kvm_hyp_mm_ops); |
1926 | if (err) |
1927 | goto out_free_pgtable; |
1928 | |
1929 | err = kvm_map_idmap_text(); |
1930 | if (err) |
1931 | goto out_destroy_pgtable; |
1932 | |
1933 | io_map_base = hyp_idmap_start; |
1934 | return 0; |
1935 | |
1936 | out_destroy_pgtable: |
1937 | kvm_pgtable_hyp_destroy(hyp_pgtable); |
1938 | out_free_pgtable: |
1939 | kfree(objp: hyp_pgtable); |
1940 | hyp_pgtable = NULL; |
1941 | out: |
1942 | return err; |
1943 | } |
1944 | |
1945 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
1946 | struct kvm_memory_slot *old, |
1947 | const struct kvm_memory_slot *new, |
1948 | enum kvm_mr_change change) |
1949 | { |
1950 | bool log_dirty_pages = new && new->flags & KVM_MEM_LOG_DIRTY_PAGES; |
1951 | |
1952 | /* |
1953 | * At this point memslot has been committed and there is an |
1954 | * allocated dirty_bitmap[], dirty pages will be tracked while the |
1955 | * memory slot is write protected. |
1956 | */ |
1957 | if (log_dirty_pages) { |
1958 | |
1959 | if (change == KVM_MR_DELETE) |
1960 | return; |
1961 | |
1962 | /* |
1963 | * Huge and normal pages are write-protected and split |
1964 | * on either of these two cases: |
1965 | * |
1966 | * 1. with initial-all-set: gradually with CLEAR ioctls, |
1967 | */ |
1968 | if (kvm_dirty_log_manual_protect_and_init_set(kvm)) |
1969 | return; |
1970 | /* |
1971 | * or |
1972 | * 2. without initial-all-set: all in one shot when |
1973 | * enabling dirty logging. |
1974 | */ |
1975 | kvm_mmu_wp_memory_region(kvm, slot: new->id); |
1976 | kvm_mmu_split_memory_region(kvm, slot: new->id); |
1977 | } else { |
1978 | /* |
1979 | * Free any leftovers from the eager page splitting cache. Do |
1980 | * this when deleting, moving, disabling dirty logging, or |
1981 | * creating the memslot (a nop). Doing it for deletes makes |
1982 | * sure we don't leak memory, and there's no need to keep the |
1983 | * cache around for any of the other cases. |
1984 | */ |
1985 | kvm_mmu_free_memory_cache(mc: &kvm->arch.mmu.split_page_cache); |
1986 | } |
1987 | } |
1988 | |
1989 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
1990 | const struct kvm_memory_slot *old, |
1991 | struct kvm_memory_slot *new, |
1992 | enum kvm_mr_change change) |
1993 | { |
1994 | hva_t hva, reg_end; |
1995 | int ret = 0; |
1996 | |
1997 | if (change != KVM_MR_CREATE && change != KVM_MR_MOVE && |
1998 | change != KVM_MR_FLAGS_ONLY) |
1999 | return 0; |
2000 | |
2001 | /* |
2002 | * Prevent userspace from creating a memory region outside of the IPA |
2003 | * space addressable by the KVM guest IPA space. |
2004 | */ |
2005 | if ((new->base_gfn + new->npages) > (kvm_phys_size(&kvm->arch.mmu) >> PAGE_SHIFT)) |
2006 | return -EFAULT; |
2007 | |
2008 | hva = new->userspace_addr; |
2009 | reg_end = hva + (new->npages << PAGE_SHIFT); |
2010 | |
2011 | mmap_read_lock(current->mm); |
2012 | /* |
2013 | * A memory region could potentially cover multiple VMAs, and any holes |
2014 | * between them, so iterate over all of them. |
2015 | * |
2016 | * +--------------------------------------------+ |
2017 | * +---------------+----------------+ +----------------+ |
2018 | * | : VMA 1 | VMA 2 | | VMA 3 : | |
2019 | * +---------------+----------------+ +----------------+ |
2020 | * | memory region | |
2021 | * +--------------------------------------------+ |
2022 | */ |
2023 | do { |
2024 | struct vm_area_struct *vma; |
2025 | |
2026 | vma = find_vma_intersection(current->mm, start_addr: hva, end_addr: reg_end); |
2027 | if (!vma) |
2028 | break; |
2029 | |
2030 | if (kvm_has_mte(kvm) && !kvm_vma_mte_allowed(vma)) { |
2031 | ret = -EINVAL; |
2032 | break; |
2033 | } |
2034 | |
2035 | if (vma->vm_flags & VM_PFNMAP) { |
2036 | /* IO region dirty page logging not allowed */ |
2037 | if (new->flags & KVM_MEM_LOG_DIRTY_PAGES) { |
2038 | ret = -EINVAL; |
2039 | break; |
2040 | } |
2041 | } |
2042 | hva = min(reg_end, vma->vm_end); |
2043 | } while (hva < reg_end); |
2044 | |
2045 | mmap_read_unlock(current->mm); |
2046 | return ret; |
2047 | } |
2048 | |
2049 | void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot) |
2050 | { |
2051 | } |
2052 | |
2053 | void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen) |
2054 | { |
2055 | } |
2056 | |
2057 | void kvm_arch_flush_shadow_all(struct kvm *kvm) |
2058 | { |
2059 | kvm_uninit_stage2_mmu(kvm); |
2060 | } |
2061 | |
2062 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, |
2063 | struct kvm_memory_slot *slot) |
2064 | { |
2065 | gpa_t gpa = slot->base_gfn << PAGE_SHIFT; |
2066 | phys_addr_t size = slot->npages << PAGE_SHIFT; |
2067 | |
2068 | write_lock(&kvm->mmu_lock); |
2069 | unmap_stage2_range(mmu: &kvm->arch.mmu, start: gpa, size); |
2070 | write_unlock(&kvm->mmu_lock); |
2071 | } |
2072 | |
2073 | /* |
2074 | * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized). |
2075 | * |
2076 | * Main problems: |
2077 | * - S/W ops are local to a CPU (not broadcast) |
2078 | * - We have line migration behind our back (speculation) |
2079 | * - System caches don't support S/W at all (damn!) |
2080 | * |
2081 | * In the face of the above, the best we can do is to try and convert |
2082 | * S/W ops to VA ops. Because the guest is not allowed to infer the |
2083 | * S/W to PA mapping, it can only use S/W to nuke the whole cache, |
2084 | * which is a rather good thing for us. |
2085 | * |
2086 | * Also, it is only used when turning caches on/off ("The expected |
2087 | * usage of the cache maintenance instructions that operate by set/way |
2088 | * is associated with the cache maintenance instructions associated |
2089 | * with the powerdown and powerup of caches, if this is required by |
2090 | * the implementation."). |
2091 | * |
2092 | * We use the following policy: |
2093 | * |
2094 | * - If we trap a S/W operation, we enable VM trapping to detect |
2095 | * caches being turned on/off, and do a full clean. |
2096 | * |
2097 | * - We flush the caches on both caches being turned on and off. |
2098 | * |
2099 | * - Once the caches are enabled, we stop trapping VM ops. |
2100 | */ |
2101 | void kvm_set_way_flush(struct kvm_vcpu *vcpu) |
2102 | { |
2103 | unsigned long hcr = *vcpu_hcr(vcpu); |
2104 | |
2105 | /* |
2106 | * If this is the first time we do a S/W operation |
2107 | * (i.e. HCR_TVM not set) flush the whole memory, and set the |
2108 | * VM trapping. |
2109 | * |
2110 | * Otherwise, rely on the VM trapping to wait for the MMU + |
2111 | * Caches to be turned off. At that point, we'll be able to |
2112 | * clean the caches again. |
2113 | */ |
2114 | if (!(hcr & HCR_TVM)) { |
2115 | trace_kvm_set_way_flush(vcpu_pc: *vcpu_pc(vcpu), |
2116 | cache: vcpu_has_cache_enabled(vcpu)); |
2117 | stage2_flush_vm(kvm: vcpu->kvm); |
2118 | *vcpu_hcr(vcpu) = hcr | HCR_TVM; |
2119 | } |
2120 | } |
2121 | |
2122 | void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled) |
2123 | { |
2124 | bool now_enabled = vcpu_has_cache_enabled(vcpu); |
2125 | |
2126 | /* |
2127 | * If switching the MMU+caches on, need to invalidate the caches. |
2128 | * If switching it off, need to clean the caches. |
2129 | * Clean + invalidate does the trick always. |
2130 | */ |
2131 | if (now_enabled != was_enabled) |
2132 | stage2_flush_vm(kvm: vcpu->kvm); |
2133 | |
2134 | /* Caches are now on, stop trapping VM ops (until a S/W op) */ |
2135 | if (now_enabled) |
2136 | *vcpu_hcr(vcpu) &= ~HCR_TVM; |
2137 | |
2138 | trace_kvm_toggle_cache(vcpu_pc: *vcpu_pc(vcpu), was: was_enabled, now: now_enabled); |
2139 | } |
2140 | |