1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * |
4 | * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> |
5 | */ |
6 | |
7 | #include <linux/types.h> |
8 | #include <linux/string.h> |
9 | #include <linux/kvm.h> |
10 | #include <linux/kvm_host.h> |
11 | #include <linux/highmem.h> |
12 | #include <linux/gfp.h> |
13 | #include <linux/slab.h> |
14 | #include <linux/hugetlb.h> |
15 | #include <linux/vmalloc.h> |
16 | #include <linux/srcu.h> |
17 | #include <linux/anon_inodes.h> |
18 | #include <linux/file.h> |
19 | #include <linux/debugfs.h> |
20 | |
21 | #include <asm/kvm_ppc.h> |
22 | #include <asm/kvm_book3s.h> |
23 | #include <asm/book3s/64/mmu-hash.h> |
24 | #include <asm/hvcall.h> |
25 | #include <asm/synch.h> |
26 | #include <asm/ppc-opcode.h> |
27 | #include <asm/cputable.h> |
28 | #include <asm/pte-walk.h> |
29 | |
30 | #include "book3s.h" |
31 | #include "book3s_hv.h" |
32 | #include "trace_hv.h" |
33 | |
34 | //#define DEBUG_RESIZE_HPT 1 |
35 | |
36 | #ifdef DEBUG_RESIZE_HPT |
37 | #define resize_hpt_debug(resize, ...) \ |
38 | do { \ |
39 | printk(KERN_DEBUG "RESIZE HPT %p: ", resize); \ |
40 | printk(__VA_ARGS__); \ |
41 | } while (0) |
42 | #else |
43 | #define resize_hpt_debug(resize, ...) \ |
44 | do { } while (0) |
45 | #endif |
46 | |
47 | static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags, |
48 | long pte_index, unsigned long pteh, |
49 | unsigned long ptel, unsigned long *pte_idx_ret); |
50 | |
51 | struct kvm_resize_hpt { |
52 | /* These fields read-only after init */ |
53 | struct kvm *kvm; |
54 | struct work_struct work; |
55 | u32 order; |
56 | |
57 | /* These fields protected by kvm->arch.mmu_setup_lock */ |
58 | |
59 | /* Possible values and their usage: |
60 | * <0 an error occurred during allocation, |
61 | * -EBUSY allocation is in the progress, |
62 | * 0 allocation made successfully. |
63 | */ |
64 | int error; |
65 | |
66 | /* Private to the work thread, until error != -EBUSY, |
67 | * then protected by kvm->arch.mmu_setup_lock. |
68 | */ |
69 | struct kvm_hpt_info hpt; |
70 | }; |
71 | |
72 | int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order) |
73 | { |
74 | unsigned long hpt = 0; |
75 | int cma = 0; |
76 | struct page *page = NULL; |
77 | struct revmap_entry *rev; |
78 | unsigned long npte; |
79 | |
80 | if ((order < PPC_MIN_HPT_ORDER) || (order > PPC_MAX_HPT_ORDER)) |
81 | return -EINVAL; |
82 | |
83 | page = kvm_alloc_hpt_cma(1ul << (order - PAGE_SHIFT)); |
84 | if (page) { |
85 | hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page)); |
86 | memset((void *)hpt, 0, (1ul << order)); |
87 | cma = 1; |
88 | } |
89 | |
90 | if (!hpt) |
91 | hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_RETRY_MAYFAIL |
92 | |__GFP_NOWARN, order: order - PAGE_SHIFT); |
93 | |
94 | if (!hpt) |
95 | return -ENOMEM; |
96 | |
97 | /* HPTEs are 2**4 bytes long */ |
98 | npte = 1ul << (order - 4); |
99 | |
100 | /* Allocate reverse map array */ |
101 | rev = vmalloc(array_size(npte, sizeof(struct revmap_entry))); |
102 | if (!rev) { |
103 | if (cma) |
104 | kvm_free_hpt_cma(page, 1 << (order - PAGE_SHIFT)); |
105 | else |
106 | free_pages(addr: hpt, order: order - PAGE_SHIFT); |
107 | return -ENOMEM; |
108 | } |
109 | |
110 | info->order = order; |
111 | info->virt = hpt; |
112 | info->cma = cma; |
113 | info->rev = rev; |
114 | |
115 | return 0; |
116 | } |
117 | |
118 | void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info) |
119 | { |
120 | atomic64_set(v: &kvm->arch.mmio_update, i: 0); |
121 | kvm->arch.hpt = *info; |
122 | kvm->arch.sdr1 = __pa(info->virt) | (info->order - 18); |
123 | |
124 | pr_debug("KVM guest htab at %lx (order %ld), LPID %llx\n" , |
125 | info->virt, (long)info->order, kvm->arch.lpid); |
126 | } |
127 | |
128 | int kvmppc_alloc_reset_hpt(struct kvm *kvm, int order) |
129 | { |
130 | int err = -EBUSY; |
131 | struct kvm_hpt_info info; |
132 | |
133 | mutex_lock(&kvm->arch.mmu_setup_lock); |
134 | if (kvm->arch.mmu_ready) { |
135 | kvm->arch.mmu_ready = 0; |
136 | /* order mmu_ready vs. vcpus_running */ |
137 | smp_mb(); |
138 | if (atomic_read(v: &kvm->arch.vcpus_running)) { |
139 | kvm->arch.mmu_ready = 1; |
140 | goto out; |
141 | } |
142 | } |
143 | if (kvm_is_radix(kvm)) { |
144 | err = kvmppc_switch_mmu_to_hpt(kvm); |
145 | if (err) |
146 | goto out; |
147 | } |
148 | |
149 | if (kvm->arch.hpt.order == order) { |
150 | /* We already have a suitable HPT */ |
151 | |
152 | /* Set the entire HPT to 0, i.e. invalid HPTEs */ |
153 | memset((void *)kvm->arch.hpt.virt, 0, 1ul << order); |
154 | /* |
155 | * Reset all the reverse-mapping chains for all memslots |
156 | */ |
157 | kvmppc_rmap_reset(kvm); |
158 | err = 0; |
159 | goto out; |
160 | } |
161 | |
162 | if (kvm->arch.hpt.virt) { |
163 | kvmppc_free_hpt(&kvm->arch.hpt); |
164 | kvmppc_rmap_reset(kvm); |
165 | } |
166 | |
167 | err = kvmppc_allocate_hpt(info: &info, order); |
168 | if (err < 0) |
169 | goto out; |
170 | kvmppc_set_hpt(kvm, info: &info); |
171 | |
172 | out: |
173 | if (err == 0) |
174 | /* Ensure that each vcpu will flush its TLB on next entry. */ |
175 | cpumask_setall(dstp: &kvm->arch.need_tlb_flush); |
176 | |
177 | mutex_unlock(lock: &kvm->arch.mmu_setup_lock); |
178 | return err; |
179 | } |
180 | |
181 | void kvmppc_free_hpt(struct kvm_hpt_info *info) |
182 | { |
183 | vfree(addr: info->rev); |
184 | info->rev = NULL; |
185 | if (info->cma) |
186 | kvm_free_hpt_cma(virt_to_page((void *)info->virt), |
187 | 1 << (info->order - PAGE_SHIFT)); |
188 | else if (info->virt) |
189 | free_pages(addr: info->virt, order: info->order - PAGE_SHIFT); |
190 | info->virt = 0; |
191 | info->order = 0; |
192 | } |
193 | |
194 | /* Bits in first HPTE dword for pagesize 4k, 64k or 16M */ |
195 | static inline unsigned long hpte0_pgsize_encoding(unsigned long pgsize) |
196 | { |
197 | return (pgsize > 0x1000) ? HPTE_V_LARGE : 0; |
198 | } |
199 | |
200 | /* Bits in second HPTE dword for pagesize 4k, 64k or 16M */ |
201 | static inline unsigned long hpte1_pgsize_encoding(unsigned long pgsize) |
202 | { |
203 | return (pgsize == 0x10000) ? 0x1000 : 0; |
204 | } |
205 | |
206 | void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, |
207 | unsigned long porder) |
208 | { |
209 | unsigned long i; |
210 | unsigned long npages; |
211 | unsigned long hp_v, hp_r; |
212 | unsigned long addr, hash; |
213 | unsigned long psize; |
214 | unsigned long hp0, hp1; |
215 | unsigned long idx_ret; |
216 | long ret; |
217 | struct kvm *kvm = vcpu->kvm; |
218 | |
219 | psize = 1ul << porder; |
220 | npages = memslot->npages >> (porder - PAGE_SHIFT); |
221 | |
222 | /* VRMA can't be > 1TB */ |
223 | if (npages > 1ul << (40 - porder)) |
224 | npages = 1ul << (40 - porder); |
225 | /* Can't use more than 1 HPTE per HPTEG */ |
226 | if (npages > kvmppc_hpt_mask(&kvm->arch.hpt) + 1) |
227 | npages = kvmppc_hpt_mask(&kvm->arch.hpt) + 1; |
228 | |
229 | hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) | |
230 | HPTE_V_BOLTED | hpte0_pgsize_encoding(psize); |
231 | hp1 = hpte1_pgsize_encoding(psize) | |
232 | HPTE_R_R | HPTE_R_C | HPTE_R_M | PP_RWXX; |
233 | |
234 | for (i = 0; i < npages; ++i) { |
235 | addr = i << porder; |
236 | /* can't use hpt_hash since va > 64 bits */ |
237 | hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) |
238 | & kvmppc_hpt_mask(&kvm->arch.hpt); |
239 | /* |
240 | * We assume that the hash table is empty and no |
241 | * vcpus are using it at this stage. Since we create |
242 | * at most one HPTE per HPTEG, we just assume entry 7 |
243 | * is available and use it. |
244 | */ |
245 | hash = (hash << 3) + 7; |
246 | hp_v = hp0 | ((addr >> 16) & ~0x7fUL); |
247 | hp_r = hp1 | addr; |
248 | ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, hash, hp_v, hp_r, |
249 | &idx_ret); |
250 | if (ret != H_SUCCESS) { |
251 | pr_err("KVM: map_vrma at %lx failed, ret=%ld\n" , |
252 | addr, ret); |
253 | break; |
254 | } |
255 | } |
256 | } |
257 | |
258 | int kvmppc_mmu_hv_init(void) |
259 | { |
260 | unsigned long nr_lpids; |
261 | |
262 | if (!mmu_has_feature(MMU_FTR_LOCKLESS_TLBIE)) |
263 | return -EINVAL; |
264 | |
265 | if (cpu_has_feature(CPU_FTR_HVMODE)) { |
266 | if (WARN_ON(mfspr(SPRN_LPID) != 0)) |
267 | return -EINVAL; |
268 | nr_lpids = 1UL << mmu_lpid_bits; |
269 | } else { |
270 | nr_lpids = 1UL << KVM_MAX_NESTED_GUESTS_SHIFT; |
271 | } |
272 | |
273 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) { |
274 | /* POWER7 has 10-bit LPIDs, POWER8 has 12-bit LPIDs */ |
275 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) |
276 | WARN_ON(nr_lpids != 1UL << 12); |
277 | else |
278 | WARN_ON(nr_lpids != 1UL << 10); |
279 | |
280 | /* |
281 | * Reserve the last implemented LPID use in partition |
282 | * switching for POWER7 and POWER8. |
283 | */ |
284 | nr_lpids -= 1; |
285 | } |
286 | |
287 | kvmppc_init_lpid(nr_lpids); |
288 | |
289 | return 0; |
290 | } |
291 | |
292 | static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags, |
293 | long pte_index, unsigned long pteh, |
294 | unsigned long ptel, unsigned long *pte_idx_ret) |
295 | { |
296 | long ret; |
297 | |
298 | preempt_disable(); |
299 | ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel, |
300 | kvm->mm->pgd, false, pte_idx_ret); |
301 | preempt_enable(); |
302 | if (ret == H_TOO_HARD) { |
303 | /* this can't happen */ |
304 | pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n" ); |
305 | ret = H_RESOURCE; /* or something */ |
306 | } |
307 | return ret; |
308 | |
309 | } |
310 | |
311 | static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu, |
312 | gva_t eaddr) |
313 | { |
314 | u64 mask; |
315 | int i; |
316 | |
317 | for (i = 0; i < vcpu->arch.slb_nr; i++) { |
318 | if (!(vcpu->arch.slb[i].orige & SLB_ESID_V)) |
319 | continue; |
320 | |
321 | if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T) |
322 | mask = ESID_MASK_1T; |
323 | else |
324 | mask = ESID_MASK; |
325 | |
326 | if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0) |
327 | return &vcpu->arch.slb[i]; |
328 | } |
329 | return NULL; |
330 | } |
331 | |
332 | static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r, |
333 | unsigned long ea) |
334 | { |
335 | unsigned long ra_mask; |
336 | |
337 | ra_mask = kvmppc_actual_pgsz(v, r) - 1; |
338 | return (r & HPTE_R_RPN & ~ra_mask) | (ea & ra_mask); |
339 | } |
340 | |
341 | static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, |
342 | struct kvmppc_pte *gpte, bool data, bool iswrite) |
343 | { |
344 | struct kvm *kvm = vcpu->kvm; |
345 | struct kvmppc_slb *slbe; |
346 | unsigned long slb_v; |
347 | unsigned long pp, key; |
348 | unsigned long v, orig_v, gr; |
349 | __be64 *hptep; |
350 | long int index; |
351 | int virtmode = __kvmppc_get_msr_hv(vcpu) & (data ? MSR_DR : MSR_IR); |
352 | |
353 | if (kvm_is_radix(vcpu->kvm)) |
354 | return kvmppc_mmu_radix_xlate(vcpu, eaddr, gpte, data, iswrite); |
355 | |
356 | /* Get SLB entry */ |
357 | if (virtmode) { |
358 | slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr); |
359 | if (!slbe) |
360 | return -EINVAL; |
361 | slb_v = slbe->origv; |
362 | } else { |
363 | /* real mode access */ |
364 | slb_v = vcpu->kvm->arch.vrma_slb_v; |
365 | } |
366 | |
367 | preempt_disable(); |
368 | /* Find the HPTE in the hash table */ |
369 | index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, |
370 | HPTE_V_VALID | HPTE_V_ABSENT); |
371 | if (index < 0) { |
372 | preempt_enable(); |
373 | return -ENOENT; |
374 | } |
375 | hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4)); |
376 | v = orig_v = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK; |
377 | if (cpu_has_feature(CPU_FTR_ARCH_300)) |
378 | v = hpte_new_to_old_v(v, be64_to_cpu(hptep[1])); |
379 | gr = kvm->arch.hpt.rev[index].guest_rpte; |
380 | |
381 | unlock_hpte(hptep, orig_v); |
382 | preempt_enable(); |
383 | |
384 | gpte->eaddr = eaddr; |
385 | gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); |
386 | |
387 | /* Get PP bits and key for permission check */ |
388 | pp = gr & (HPTE_R_PP0 | HPTE_R_PP); |
389 | key = (__kvmppc_get_msr_hv(vcpu) & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS; |
390 | key &= slb_v; |
391 | |
392 | /* Calculate permissions */ |
393 | gpte->may_read = hpte_read_permission(pp, key); |
394 | gpte->may_write = hpte_write_permission(pp, key); |
395 | gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G)); |
396 | |
397 | /* Storage key permission check for POWER7 */ |
398 | if (data && virtmode) { |
399 | int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr); |
400 | if (amrfield & 1) |
401 | gpte->may_read = 0; |
402 | if (amrfield & 2) |
403 | gpte->may_write = 0; |
404 | } |
405 | |
406 | /* Get the guest physical address */ |
407 | gpte->raddr = kvmppc_mmu_get_real_addr(v, r: gr, ea: eaddr); |
408 | return 0; |
409 | } |
410 | |
411 | /* |
412 | * Quick test for whether an instruction is a load or a store. |
413 | * If the instruction is a load or a store, then this will indicate |
414 | * which it is, at least on server processors. (Embedded processors |
415 | * have some external PID instructions that don't follow the rule |
416 | * embodied here.) If the instruction isn't a load or store, then |
417 | * this doesn't return anything useful. |
418 | */ |
419 | static int instruction_is_store(ppc_inst_t instr) |
420 | { |
421 | unsigned int mask; |
422 | unsigned int suffix; |
423 | |
424 | mask = 0x10000000; |
425 | suffix = ppc_inst_val(instr); |
426 | if (ppc_inst_prefixed(instr)) |
427 | suffix = ppc_inst_suffix(instr); |
428 | else if ((suffix & 0xfc000000) == 0x7c000000) |
429 | mask = 0x100; /* major opcode 31 */ |
430 | return (suffix & mask) != 0; |
431 | } |
432 | |
433 | int kvmppc_hv_emulate_mmio(struct kvm_vcpu *vcpu, |
434 | unsigned long gpa, gva_t ea, int is_store) |
435 | { |
436 | ppc_inst_t last_inst; |
437 | bool is_prefixed = !!(kvmppc_get_msr(vcpu) & SRR1_PREFIXED); |
438 | |
439 | /* |
440 | * Fast path - check if the guest physical address corresponds to a |
441 | * device on the FAST_MMIO_BUS, if so we can avoid loading the |
442 | * instruction all together, then we can just handle it and return. |
443 | */ |
444 | if (is_store) { |
445 | int idx, ret; |
446 | |
447 | idx = srcu_read_lock(ssp: &vcpu->kvm->srcu); |
448 | ret = kvm_io_bus_write(vcpu, bus_idx: KVM_FAST_MMIO_BUS, addr: (gpa_t) gpa, len: 0, |
449 | NULL); |
450 | srcu_read_unlock(ssp: &vcpu->kvm->srcu, idx); |
451 | if (!ret) { |
452 | kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + (is_prefixed ? 8 : 4)); |
453 | return RESUME_GUEST; |
454 | } |
455 | } |
456 | |
457 | /* |
458 | * If we fail, we just return to the guest and try executing it again. |
459 | */ |
460 | if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) != |
461 | EMULATE_DONE) |
462 | return RESUME_GUEST; |
463 | |
464 | /* |
465 | * WARNING: We do not know for sure whether the instruction we just |
466 | * read from memory is the same that caused the fault in the first |
467 | * place. |
468 | * |
469 | * If the fault is prefixed but the instruction is not or vice |
470 | * versa, try again so that we don't advance pc the wrong amount. |
471 | */ |
472 | if (ppc_inst_prefixed(last_inst) != is_prefixed) |
473 | return RESUME_GUEST; |
474 | |
475 | /* |
476 | * If the instruction we read is neither an load or a store, |
477 | * then it can't access memory, so we don't need to worry about |
478 | * enforcing access permissions. So, assuming it is a load or |
479 | * store, we just check that its direction (load or store) is |
480 | * consistent with the original fault, since that's what we |
481 | * checked the access permissions against. If there is a mismatch |
482 | * we just return and retry the instruction. |
483 | */ |
484 | |
485 | if (instruction_is_store(last_inst) != !!is_store) |
486 | return RESUME_GUEST; |
487 | |
488 | /* |
489 | * Emulated accesses are emulated by looking at the hash for |
490 | * translation once, then performing the access later. The |
491 | * translation could be invalidated in the meantime in which |
492 | * point performing the subsequent memory access on the old |
493 | * physical address could possibly be a security hole for the |
494 | * guest (but not the host). |
495 | * |
496 | * This is less of an issue for MMIO stores since they aren't |
497 | * globally visible. It could be an issue for MMIO loads to |
498 | * a certain extent but we'll ignore it for now. |
499 | */ |
500 | |
501 | vcpu->arch.paddr_accessed = gpa; |
502 | vcpu->arch.vaddr_accessed = ea; |
503 | return kvmppc_emulate_mmio(vcpu); |
504 | } |
505 | |
506 | int kvmppc_book3s_hv_page_fault(struct kvm_vcpu *vcpu, |
507 | unsigned long ea, unsigned long dsisr) |
508 | { |
509 | struct kvm *kvm = vcpu->kvm; |
510 | unsigned long hpte[3], r; |
511 | unsigned long hnow_v, hnow_r; |
512 | __be64 *hptep; |
513 | unsigned long mmu_seq, psize, pte_size; |
514 | unsigned long gpa_base, gfn_base; |
515 | unsigned long gpa, gfn, hva, pfn, hpa; |
516 | struct kvm_memory_slot *memslot; |
517 | unsigned long *rmap; |
518 | struct revmap_entry *rev; |
519 | struct page *page; |
520 | long index, ret; |
521 | bool is_ci; |
522 | bool writing, write_ok; |
523 | unsigned int shift; |
524 | unsigned long rcbits; |
525 | long mmio_update; |
526 | pte_t pte, *ptep; |
527 | |
528 | if (kvm_is_radix(kvm)) |
529 | return kvmppc_book3s_radix_page_fault(vcpu, ea, dsisr); |
530 | |
531 | /* |
532 | * Real-mode code has already searched the HPT and found the |
533 | * entry we're interested in. Lock the entry and check that |
534 | * it hasn't changed. If it has, just return and re-execute the |
535 | * instruction. |
536 | */ |
537 | if (ea != vcpu->arch.pgfault_addr) |
538 | return RESUME_GUEST; |
539 | |
540 | if (vcpu->arch.pgfault_cache) { |
541 | mmio_update = atomic64_read(v: &kvm->arch.mmio_update); |
542 | if (mmio_update == vcpu->arch.pgfault_cache->mmio_update) { |
543 | r = vcpu->arch.pgfault_cache->rpte; |
544 | psize = kvmppc_actual_pgsz(vcpu->arch.pgfault_hpte[0], |
545 | r); |
546 | gpa_base = r & HPTE_R_RPN & ~(psize - 1); |
547 | gfn_base = gpa_base >> PAGE_SHIFT; |
548 | gpa = gpa_base | (ea & (psize - 1)); |
549 | return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, |
550 | dsisr & DSISR_ISSTORE); |
551 | } |
552 | } |
553 | index = vcpu->arch.pgfault_index; |
554 | hptep = (__be64 *)(kvm->arch.hpt.virt + (index << 4)); |
555 | rev = &kvm->arch.hpt.rev[index]; |
556 | preempt_disable(); |
557 | while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) |
558 | cpu_relax(); |
559 | hpte[0] = be64_to_cpu(hptep[0]) & ~HPTE_V_HVLOCK; |
560 | hpte[1] = be64_to_cpu(hptep[1]); |
561 | hpte[2] = r = rev->guest_rpte; |
562 | unlock_hpte(hptep, hpte[0]); |
563 | preempt_enable(); |
564 | |
565 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { |
566 | hpte[0] = hpte_new_to_old_v(hpte[0], hpte[1]); |
567 | hpte[1] = hpte_new_to_old_r(hpte[1]); |
568 | } |
569 | if (hpte[0] != vcpu->arch.pgfault_hpte[0] || |
570 | hpte[1] != vcpu->arch.pgfault_hpte[1]) |
571 | return RESUME_GUEST; |
572 | |
573 | /* Translate the logical address and get the page */ |
574 | psize = kvmppc_actual_pgsz(hpte[0], r); |
575 | gpa_base = r & HPTE_R_RPN & ~(psize - 1); |
576 | gfn_base = gpa_base >> PAGE_SHIFT; |
577 | gpa = gpa_base | (ea & (psize - 1)); |
578 | gfn = gpa >> PAGE_SHIFT; |
579 | memslot = gfn_to_memslot(kvm, gfn); |
580 | |
581 | trace_kvm_page_fault_enter(vcpu, hptep: hpte, memslot, ea, dsisr); |
582 | |
583 | /* No memslot means it's an emulated MMIO region */ |
584 | if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) |
585 | return kvmppc_hv_emulate_mmio(vcpu, gpa, ea, |
586 | dsisr & DSISR_ISSTORE); |
587 | |
588 | /* |
589 | * This should never happen, because of the slot_is_aligned() |
590 | * check in kvmppc_do_h_enter(). |
591 | */ |
592 | if (gfn_base < memslot->base_gfn) |
593 | return -EFAULT; |
594 | |
595 | /* used to check for invalidations in progress */ |
596 | mmu_seq = kvm->mmu_invalidate_seq; |
597 | smp_rmb(); |
598 | |
599 | ret = -EFAULT; |
600 | page = NULL; |
601 | writing = (dsisr & DSISR_ISSTORE) != 0; |
602 | /* If writing != 0, then the HPTE must allow writing, if we get here */ |
603 | write_ok = writing; |
604 | hva = gfn_to_hva_memslot(slot: memslot, gfn); |
605 | |
606 | /* |
607 | * Do a fast check first, since __gfn_to_pfn_memslot doesn't |
608 | * do it with !atomic && !async, which is how we call it. |
609 | * We always ask for write permission since the common case |
610 | * is that the page is writable. |
611 | */ |
612 | if (get_user_page_fast_only(addr: hva, gup_flags: FOLL_WRITE, pagep: &page)) { |
613 | write_ok = true; |
614 | } else { |
615 | /* Call KVM generic code to do the slow-path check */ |
616 | pfn = __gfn_to_pfn_memslot(slot: memslot, gfn, atomic: false, interruptible: false, NULL, |
617 | write_fault: writing, writable: &write_ok, NULL); |
618 | if (is_error_noslot_pfn(pfn)) |
619 | return -EFAULT; |
620 | page = NULL; |
621 | if (pfn_valid(pfn)) { |
622 | page = pfn_to_page(pfn); |
623 | if (PageReserved(page)) |
624 | page = NULL; |
625 | } |
626 | } |
627 | |
628 | /* |
629 | * Read the PTE from the process' radix tree and use that |
630 | * so we get the shift and attribute bits. |
631 | */ |
632 | spin_lock(lock: &kvm->mmu_lock); |
633 | ptep = find_kvm_host_pte(kvm, mmu_seq, hva, &shift); |
634 | pte = __pte(val: 0); |
635 | if (ptep) |
636 | pte = READ_ONCE(*ptep); |
637 | spin_unlock(lock: &kvm->mmu_lock); |
638 | /* |
639 | * If the PTE disappeared temporarily due to a THP |
640 | * collapse, just return and let the guest try again. |
641 | */ |
642 | if (!pte_present(a: pte)) { |
643 | if (page) |
644 | put_page(page); |
645 | return RESUME_GUEST; |
646 | } |
647 | hpa = pte_pfn(pte) << PAGE_SHIFT; |
648 | pte_size = PAGE_SIZE; |
649 | if (shift) |
650 | pte_size = 1ul << shift; |
651 | is_ci = pte_ci(pte); |
652 | |
653 | if (psize > pte_size) |
654 | goto out_put; |
655 | if (pte_size > psize) |
656 | hpa |= hva & (pte_size - psize); |
657 | |
658 | /* Check WIMG vs. the actual page we're accessing */ |
659 | if (!hpte_cache_flags_ok(r, is_ci)) { |
660 | if (is_ci) |
661 | goto out_put; |
662 | /* |
663 | * Allow guest to map emulated device memory as |
664 | * uncacheable, but actually make it cacheable. |
665 | */ |
666 | r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M; |
667 | } |
668 | |
669 | /* |
670 | * Set the HPTE to point to hpa. |
671 | * Since the hpa is at PAGE_SIZE granularity, make sure we |
672 | * don't mask out lower-order bits if psize < PAGE_SIZE. |
673 | */ |
674 | if (psize < PAGE_SIZE) |
675 | psize = PAGE_SIZE; |
676 | r = (r & HPTE_R_KEY_HI) | (r & ~(HPTE_R_PP0 - psize)) | hpa; |
677 | if (hpte_is_writable(r) && !write_ok) |
678 | r = hpte_make_readonly(r); |
679 | ret = RESUME_GUEST; |
680 | preempt_disable(); |
681 | while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) |
682 | cpu_relax(); |
683 | hnow_v = be64_to_cpu(hptep[0]); |
684 | hnow_r = be64_to_cpu(hptep[1]); |
685 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { |
686 | hnow_v = hpte_new_to_old_v(hnow_v, hnow_r); |
687 | hnow_r = hpte_new_to_old_r(hnow_r); |
688 | } |
689 | |
690 | /* |
691 | * If the HPT is being resized, don't update the HPTE, |
692 | * instead let the guest retry after the resize operation is complete. |
693 | * The synchronization for mmu_ready test vs. set is provided |
694 | * by the HPTE lock. |
695 | */ |
696 | if (!kvm->arch.mmu_ready) |
697 | goto out_unlock; |
698 | |
699 | if ((hnow_v & ~HPTE_V_HVLOCK) != hpte[0] || hnow_r != hpte[1] || |
700 | rev->guest_rpte != hpte[2]) |
701 | /* HPTE has been changed under us; let the guest retry */ |
702 | goto out_unlock; |
703 | hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID; |
704 | |
705 | /* Always put the HPTE in the rmap chain for the page base address */ |
706 | rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn]; |
707 | lock_rmap(rmap); |
708 | |
709 | /* Check if we might have been invalidated; let the guest retry if so */ |
710 | ret = RESUME_GUEST; |
711 | if (mmu_invalidate_retry(kvm: vcpu->kvm, mmu_seq)) { |
712 | unlock_rmap(rmap); |
713 | goto out_unlock; |
714 | } |
715 | |
716 | /* Only set R/C in real HPTE if set in both *rmap and guest_rpte */ |
717 | rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT; |
718 | r &= rcbits | ~(HPTE_R_R | HPTE_R_C); |
719 | |
720 | if (be64_to_cpu(hptep[0]) & HPTE_V_VALID) { |
721 | /* HPTE was previously valid, so we need to invalidate it */ |
722 | unlock_rmap(rmap); |
723 | hptep[0] |= cpu_to_be64(HPTE_V_ABSENT); |
724 | kvmppc_invalidate_hpte(kvm, hptep, index); |
725 | /* don't lose previous R and C bits */ |
726 | r |= be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C); |
727 | } else { |
728 | kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0); |
729 | } |
730 | |
731 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { |
732 | r = hpte_old_to_new_r(hpte[0], r); |
733 | hpte[0] = hpte_old_to_new_v(hpte[0]); |
734 | } |
735 | hptep[1] = cpu_to_be64(r); |
736 | eieio(); |
737 | __unlock_hpte(hptep, hpte[0]); |
738 | asm volatile("ptesync" : : : "memory" ); |
739 | preempt_enable(); |
740 | if (page && hpte_is_writable(r)) |
741 | set_page_dirty_lock(page); |
742 | |
743 | out_put: |
744 | trace_kvm_page_fault_exit(vcpu, hptep: hpte, ret); |
745 | |
746 | if (page) |
747 | put_page(page); |
748 | return ret; |
749 | |
750 | out_unlock: |
751 | __unlock_hpte(hptep, be64_to_cpu(hptep[0])); |
752 | preempt_enable(); |
753 | goto out_put; |
754 | } |
755 | |
756 | void kvmppc_rmap_reset(struct kvm *kvm) |
757 | { |
758 | struct kvm_memslots *slots; |
759 | struct kvm_memory_slot *memslot; |
760 | int srcu_idx, bkt; |
761 | |
762 | srcu_idx = srcu_read_lock(ssp: &kvm->srcu); |
763 | slots = kvm_memslots(kvm); |
764 | kvm_for_each_memslot(memslot, bkt, slots) { |
765 | /* Mutual exclusion with kvm_unmap_hva_range etc. */ |
766 | spin_lock(lock: &kvm->mmu_lock); |
767 | /* |
768 | * This assumes it is acceptable to lose reference and |
769 | * change bits across a reset. |
770 | */ |
771 | memset(memslot->arch.rmap, 0, |
772 | memslot->npages * sizeof(*memslot->arch.rmap)); |
773 | spin_unlock(lock: &kvm->mmu_lock); |
774 | } |
775 | srcu_read_unlock(ssp: &kvm->srcu, idx: srcu_idx); |
776 | } |
777 | |
778 | /* Must be called with both HPTE and rmap locked */ |
779 | static void kvmppc_unmap_hpte(struct kvm *kvm, unsigned long i, |
780 | struct kvm_memory_slot *memslot, |
781 | unsigned long *rmapp, unsigned long gfn) |
782 | { |
783 | __be64 *hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); |
784 | struct revmap_entry *rev = kvm->arch.hpt.rev; |
785 | unsigned long j, h; |
786 | unsigned long ptel, psize, rcbits; |
787 | |
788 | j = rev[i].forw; |
789 | if (j == i) { |
790 | /* chain is now empty */ |
791 | *rmapp &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX); |
792 | } else { |
793 | /* remove i from chain */ |
794 | h = rev[i].back; |
795 | rev[h].forw = j; |
796 | rev[j].back = h; |
797 | rev[i].forw = rev[i].back = i; |
798 | *rmapp = (*rmapp & ~KVMPPC_RMAP_INDEX) | j; |
799 | } |
800 | |
801 | /* Now check and modify the HPTE */ |
802 | ptel = rev[i].guest_rpte; |
803 | psize = kvmppc_actual_pgsz(be64_to_cpu(hptep[0]), ptel); |
804 | if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) && |
805 | hpte_rpn(ptel, psize) == gfn) { |
806 | hptep[0] |= cpu_to_be64(HPTE_V_ABSENT); |
807 | kvmppc_invalidate_hpte(kvm, hptep, i); |
808 | hptep[1] &= ~cpu_to_be64(HPTE_R_KEY_HI | HPTE_R_KEY_LO); |
809 | /* Harvest R and C */ |
810 | rcbits = be64_to_cpu(hptep[1]) & (HPTE_R_R | HPTE_R_C); |
811 | *rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT; |
812 | if ((rcbits & HPTE_R_C) && memslot->dirty_bitmap) |
813 | kvmppc_update_dirty_map(memslot, gfn, psize); |
814 | if (rcbits & ~rev[i].guest_rpte) { |
815 | rev[i].guest_rpte = ptel | rcbits; |
816 | note_hpte_modification(kvm, &rev[i]); |
817 | } |
818 | } |
819 | } |
820 | |
821 | static void kvm_unmap_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, |
822 | unsigned long gfn) |
823 | { |
824 | unsigned long i; |
825 | __be64 *hptep; |
826 | unsigned long *rmapp; |
827 | |
828 | rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; |
829 | for (;;) { |
830 | lock_rmap(rmapp); |
831 | if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { |
832 | unlock_rmap(rmapp); |
833 | break; |
834 | } |
835 | |
836 | /* |
837 | * To avoid an ABBA deadlock with the HPTE lock bit, |
838 | * we can't spin on the HPTE lock while holding the |
839 | * rmap chain lock. |
840 | */ |
841 | i = *rmapp & KVMPPC_RMAP_INDEX; |
842 | hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); |
843 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { |
844 | /* unlock rmap before spinning on the HPTE lock */ |
845 | unlock_rmap(rmapp); |
846 | while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK) |
847 | cpu_relax(); |
848 | continue; |
849 | } |
850 | |
851 | kvmppc_unmap_hpte(kvm, i, memslot, rmapp, gfn); |
852 | unlock_rmap(rmapp); |
853 | __unlock_hpte(hptep, be64_to_cpu(hptep[0])); |
854 | } |
855 | } |
856 | |
857 | bool kvm_unmap_gfn_range_hv(struct kvm *kvm, struct kvm_gfn_range *range) |
858 | { |
859 | gfn_t gfn; |
860 | |
861 | if (kvm_is_radix(kvm)) { |
862 | for (gfn = range->start; gfn < range->end; gfn++) |
863 | kvm_unmap_radix(kvm, range->slot, gfn); |
864 | } else { |
865 | for (gfn = range->start; gfn < range->end; gfn++) |
866 | kvm_unmap_rmapp(kvm, memslot: range->slot, gfn); |
867 | } |
868 | |
869 | return false; |
870 | } |
871 | |
872 | void kvmppc_core_flush_memslot_hv(struct kvm *kvm, |
873 | struct kvm_memory_slot *memslot) |
874 | { |
875 | unsigned long gfn; |
876 | unsigned long n; |
877 | unsigned long *rmapp; |
878 | |
879 | gfn = memslot->base_gfn; |
880 | rmapp = memslot->arch.rmap; |
881 | if (kvm_is_radix(kvm)) { |
882 | kvmppc_radix_flush_memslot(kvm, memslot); |
883 | return; |
884 | } |
885 | |
886 | for (n = memslot->npages; n; --n, ++gfn) { |
887 | /* |
888 | * Testing the present bit without locking is OK because |
889 | * the memslot has been marked invalid already, and hence |
890 | * no new HPTEs referencing this page can be created, |
891 | * thus the present bit can't go from 0 to 1. |
892 | */ |
893 | if (*rmapp & KVMPPC_RMAP_PRESENT) |
894 | kvm_unmap_rmapp(kvm, memslot, gfn); |
895 | ++rmapp; |
896 | } |
897 | } |
898 | |
899 | static bool kvm_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, |
900 | unsigned long gfn) |
901 | { |
902 | struct revmap_entry *rev = kvm->arch.hpt.rev; |
903 | unsigned long head, i, j; |
904 | __be64 *hptep; |
905 | bool ret = false; |
906 | unsigned long *rmapp; |
907 | |
908 | rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; |
909 | retry: |
910 | lock_rmap(rmapp); |
911 | if (*rmapp & KVMPPC_RMAP_REFERENCED) { |
912 | *rmapp &= ~KVMPPC_RMAP_REFERENCED; |
913 | ret = true; |
914 | } |
915 | if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { |
916 | unlock_rmap(rmapp); |
917 | return ret; |
918 | } |
919 | |
920 | i = head = *rmapp & KVMPPC_RMAP_INDEX; |
921 | do { |
922 | hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); |
923 | j = rev[i].forw; |
924 | |
925 | /* If this HPTE isn't referenced, ignore it */ |
926 | if (!(be64_to_cpu(hptep[1]) & HPTE_R_R)) |
927 | continue; |
928 | |
929 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { |
930 | /* unlock rmap before spinning on the HPTE lock */ |
931 | unlock_rmap(rmapp); |
932 | while (be64_to_cpu(hptep[0]) & HPTE_V_HVLOCK) |
933 | cpu_relax(); |
934 | goto retry; |
935 | } |
936 | |
937 | /* Now check and modify the HPTE */ |
938 | if ((be64_to_cpu(hptep[0]) & HPTE_V_VALID) && |
939 | (be64_to_cpu(hptep[1]) & HPTE_R_R)) { |
940 | kvmppc_clear_ref_hpte(kvm, hptep, i); |
941 | if (!(rev[i].guest_rpte & HPTE_R_R)) { |
942 | rev[i].guest_rpte |= HPTE_R_R; |
943 | note_hpte_modification(kvm, &rev[i]); |
944 | } |
945 | ret = true; |
946 | } |
947 | __unlock_hpte(hptep, be64_to_cpu(hptep[0])); |
948 | } while ((i = j) != head); |
949 | |
950 | unlock_rmap(rmapp); |
951 | return ret; |
952 | } |
953 | |
954 | bool kvm_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range) |
955 | { |
956 | gfn_t gfn; |
957 | bool ret = false; |
958 | |
959 | if (kvm_is_radix(kvm)) { |
960 | for (gfn = range->start; gfn < range->end; gfn++) |
961 | ret |= kvm_age_radix(kvm, range->slot, gfn); |
962 | } else { |
963 | for (gfn = range->start; gfn < range->end; gfn++) |
964 | ret |= kvm_age_rmapp(kvm, memslot: range->slot, gfn); |
965 | } |
966 | |
967 | return ret; |
968 | } |
969 | |
970 | static bool kvm_test_age_rmapp(struct kvm *kvm, struct kvm_memory_slot *memslot, |
971 | unsigned long gfn) |
972 | { |
973 | struct revmap_entry *rev = kvm->arch.hpt.rev; |
974 | unsigned long head, i, j; |
975 | unsigned long *hp; |
976 | bool ret = true; |
977 | unsigned long *rmapp; |
978 | |
979 | rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; |
980 | if (*rmapp & KVMPPC_RMAP_REFERENCED) |
981 | return true; |
982 | |
983 | lock_rmap(rmapp); |
984 | if (*rmapp & KVMPPC_RMAP_REFERENCED) |
985 | goto out; |
986 | |
987 | if (*rmapp & KVMPPC_RMAP_PRESENT) { |
988 | i = head = *rmapp & KVMPPC_RMAP_INDEX; |
989 | do { |
990 | hp = (unsigned long *)(kvm->arch.hpt.virt + (i << 4)); |
991 | j = rev[i].forw; |
992 | if (be64_to_cpu(hp[1]) & HPTE_R_R) |
993 | goto out; |
994 | } while ((i = j) != head); |
995 | } |
996 | ret = false; |
997 | |
998 | out: |
999 | unlock_rmap(rmapp); |
1000 | return ret; |
1001 | } |
1002 | |
1003 | bool kvm_test_age_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range) |
1004 | { |
1005 | WARN_ON(range->start + 1 != range->end); |
1006 | |
1007 | if (kvm_is_radix(kvm)) |
1008 | return kvm_test_age_radix(kvm, range->slot, range->start); |
1009 | else |
1010 | return kvm_test_age_rmapp(kvm, memslot: range->slot, gfn: range->start); |
1011 | } |
1012 | |
1013 | bool kvm_set_spte_gfn_hv(struct kvm *kvm, struct kvm_gfn_range *range) |
1014 | { |
1015 | WARN_ON(range->start + 1 != range->end); |
1016 | |
1017 | if (kvm_is_radix(kvm)) |
1018 | kvm_unmap_radix(kvm, range->slot, range->start); |
1019 | else |
1020 | kvm_unmap_rmapp(kvm, memslot: range->slot, gfn: range->start); |
1021 | |
1022 | return false; |
1023 | } |
1024 | |
1025 | static int vcpus_running(struct kvm *kvm) |
1026 | { |
1027 | return atomic_read(v: &kvm->arch.vcpus_running) != 0; |
1028 | } |
1029 | |
1030 | /* |
1031 | * Returns the number of system pages that are dirty. |
1032 | * This can be more than 1 if we find a huge-page HPTE. |
1033 | */ |
1034 | static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp) |
1035 | { |
1036 | struct revmap_entry *rev = kvm->arch.hpt.rev; |
1037 | unsigned long head, i, j; |
1038 | unsigned long n; |
1039 | unsigned long v, r; |
1040 | __be64 *hptep; |
1041 | int npages_dirty = 0; |
1042 | |
1043 | retry: |
1044 | lock_rmap(rmapp); |
1045 | if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { |
1046 | unlock_rmap(rmapp); |
1047 | return npages_dirty; |
1048 | } |
1049 | |
1050 | i = head = *rmapp & KVMPPC_RMAP_INDEX; |
1051 | do { |
1052 | unsigned long hptep1; |
1053 | hptep = (__be64 *) (kvm->arch.hpt.virt + (i << 4)); |
1054 | j = rev[i].forw; |
1055 | |
1056 | /* |
1057 | * Checking the C (changed) bit here is racy since there |
1058 | * is no guarantee about when the hardware writes it back. |
1059 | * If the HPTE is not writable then it is stable since the |
1060 | * page can't be written to, and we would have done a tlbie |
1061 | * (which forces the hardware to complete any writeback) |
1062 | * when making the HPTE read-only. |
1063 | * If vcpus are running then this call is racy anyway |
1064 | * since the page could get dirtied subsequently, so we |
1065 | * expect there to be a further call which would pick up |
1066 | * any delayed C bit writeback. |
1067 | * Otherwise we need to do the tlbie even if C==0 in |
1068 | * order to pick up any delayed writeback of C. |
1069 | */ |
1070 | hptep1 = be64_to_cpu(hptep[1]); |
1071 | if (!(hptep1 & HPTE_R_C) && |
1072 | (!hpte_is_writable(hptep1) || vcpus_running(kvm))) |
1073 | continue; |
1074 | |
1075 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { |
1076 | /* unlock rmap before spinning on the HPTE lock */ |
1077 | unlock_rmap(rmapp); |
1078 | while (hptep[0] & cpu_to_be64(HPTE_V_HVLOCK)) |
1079 | cpu_relax(); |
1080 | goto retry; |
1081 | } |
1082 | |
1083 | /* Now check and modify the HPTE */ |
1084 | if (!(hptep[0] & cpu_to_be64(HPTE_V_VALID))) { |
1085 | __unlock_hpte(hptep, be64_to_cpu(hptep[0])); |
1086 | continue; |
1087 | } |
1088 | |
1089 | /* need to make it temporarily absent so C is stable */ |
1090 | hptep[0] |= cpu_to_be64(HPTE_V_ABSENT); |
1091 | kvmppc_invalidate_hpte(kvm, hptep, i); |
1092 | v = be64_to_cpu(hptep[0]); |
1093 | r = be64_to_cpu(hptep[1]); |
1094 | if (r & HPTE_R_C) { |
1095 | hptep[1] = cpu_to_be64(r & ~HPTE_R_C); |
1096 | if (!(rev[i].guest_rpte & HPTE_R_C)) { |
1097 | rev[i].guest_rpte |= HPTE_R_C; |
1098 | note_hpte_modification(kvm, &rev[i]); |
1099 | } |
1100 | n = kvmppc_actual_pgsz(v, r); |
1101 | n = (n + PAGE_SIZE - 1) >> PAGE_SHIFT; |
1102 | if (n > npages_dirty) |
1103 | npages_dirty = n; |
1104 | eieio(); |
1105 | } |
1106 | v &= ~HPTE_V_ABSENT; |
1107 | v |= HPTE_V_VALID; |
1108 | __unlock_hpte(hptep, v); |
1109 | } while ((i = j) != head); |
1110 | |
1111 | unlock_rmap(rmapp); |
1112 | return npages_dirty; |
1113 | } |
1114 | |
1115 | void kvmppc_harvest_vpa_dirty(struct kvmppc_vpa *vpa, |
1116 | struct kvm_memory_slot *memslot, |
1117 | unsigned long *map) |
1118 | { |
1119 | unsigned long gfn; |
1120 | |
1121 | if (!vpa->dirty || !vpa->pinned_addr) |
1122 | return; |
1123 | gfn = vpa->gpa >> PAGE_SHIFT; |
1124 | if (gfn < memslot->base_gfn || |
1125 | gfn >= memslot->base_gfn + memslot->npages) |
1126 | return; |
1127 | |
1128 | vpa->dirty = false; |
1129 | if (map) |
1130 | __set_bit_le(nr: gfn - memslot->base_gfn, addr: map); |
1131 | } |
1132 | |
1133 | long kvmppc_hv_get_dirty_log_hpt(struct kvm *kvm, |
1134 | struct kvm_memory_slot *memslot, unsigned long *map) |
1135 | { |
1136 | unsigned long i; |
1137 | unsigned long *rmapp; |
1138 | |
1139 | preempt_disable(); |
1140 | rmapp = memslot->arch.rmap; |
1141 | for (i = 0; i < memslot->npages; ++i) { |
1142 | int npages = kvm_test_clear_dirty_npages(kvm, rmapp); |
1143 | /* |
1144 | * Note that if npages > 0 then i must be a multiple of npages, |
1145 | * since we always put huge-page HPTEs in the rmap chain |
1146 | * corresponding to their page base address. |
1147 | */ |
1148 | if (npages) |
1149 | set_dirty_bits(map, i, npages); |
1150 | ++rmapp; |
1151 | } |
1152 | preempt_enable(); |
1153 | return 0; |
1154 | } |
1155 | |
1156 | void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa, |
1157 | unsigned long *nb_ret) |
1158 | { |
1159 | struct kvm_memory_slot *memslot; |
1160 | unsigned long gfn = gpa >> PAGE_SHIFT; |
1161 | struct page *page, *pages[1]; |
1162 | int npages; |
1163 | unsigned long hva, offset; |
1164 | int srcu_idx; |
1165 | |
1166 | srcu_idx = srcu_read_lock(ssp: &kvm->srcu); |
1167 | memslot = gfn_to_memslot(kvm, gfn); |
1168 | if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) |
1169 | goto err; |
1170 | hva = gfn_to_hva_memslot(slot: memslot, gfn); |
1171 | npages = get_user_pages_fast(start: hva, nr_pages: 1, gup_flags: FOLL_WRITE, pages); |
1172 | if (npages < 1) |
1173 | goto err; |
1174 | page = pages[0]; |
1175 | srcu_read_unlock(ssp: &kvm->srcu, idx: srcu_idx); |
1176 | |
1177 | offset = gpa & (PAGE_SIZE - 1); |
1178 | if (nb_ret) |
1179 | *nb_ret = PAGE_SIZE - offset; |
1180 | return page_address(page) + offset; |
1181 | |
1182 | err: |
1183 | srcu_read_unlock(ssp: &kvm->srcu, idx: srcu_idx); |
1184 | return NULL; |
1185 | } |
1186 | |
1187 | void kvmppc_unpin_guest_page(struct kvm *kvm, void *va, unsigned long gpa, |
1188 | bool dirty) |
1189 | { |
1190 | struct page *page = virt_to_page(va); |
1191 | struct kvm_memory_slot *memslot; |
1192 | unsigned long gfn; |
1193 | int srcu_idx; |
1194 | |
1195 | put_page(page); |
1196 | |
1197 | if (!dirty) |
1198 | return; |
1199 | |
1200 | /* We need to mark this page dirty in the memslot dirty_bitmap, if any */ |
1201 | gfn = gpa >> PAGE_SHIFT; |
1202 | srcu_idx = srcu_read_lock(ssp: &kvm->srcu); |
1203 | memslot = gfn_to_memslot(kvm, gfn); |
1204 | if (memslot && memslot->dirty_bitmap) |
1205 | set_bit_le(nr: gfn - memslot->base_gfn, addr: memslot->dirty_bitmap); |
1206 | srcu_read_unlock(ssp: &kvm->srcu, idx: srcu_idx); |
1207 | } |
1208 | |
1209 | /* |
1210 | * HPT resizing |
1211 | */ |
1212 | static int resize_hpt_allocate(struct kvm_resize_hpt *resize) |
1213 | { |
1214 | int rc; |
1215 | |
1216 | rc = kvmppc_allocate_hpt(info: &resize->hpt, order: resize->order); |
1217 | if (rc < 0) |
1218 | return rc; |
1219 | |
1220 | resize_hpt_debug(resize, "%s(): HPT @ 0x%lx\n" , __func__, |
1221 | resize->hpt.virt); |
1222 | |
1223 | return 0; |
1224 | } |
1225 | |
1226 | static unsigned long resize_hpt_rehash_hpte(struct kvm_resize_hpt *resize, |
1227 | unsigned long idx) |
1228 | { |
1229 | struct kvm *kvm = resize->kvm; |
1230 | struct kvm_hpt_info *old = &kvm->arch.hpt; |
1231 | struct kvm_hpt_info *new = &resize->hpt; |
1232 | unsigned long old_hash_mask = (1ULL << (old->order - 7)) - 1; |
1233 | unsigned long new_hash_mask = (1ULL << (new->order - 7)) - 1; |
1234 | __be64 *hptep, *new_hptep; |
1235 | unsigned long vpte, rpte, guest_rpte; |
1236 | int ret; |
1237 | struct revmap_entry *rev; |
1238 | unsigned long apsize, avpn, pteg, hash; |
1239 | unsigned long new_idx, new_pteg, replace_vpte; |
1240 | int pshift; |
1241 | |
1242 | hptep = (__be64 *)(old->virt + (idx << 4)); |
1243 | |
1244 | /* Guest is stopped, so new HPTEs can't be added or faulted |
1245 | * in, only unmapped or altered by host actions. So, it's |
1246 | * safe to check this before we take the HPTE lock */ |
1247 | vpte = be64_to_cpu(hptep[0]); |
1248 | if (!(vpte & HPTE_V_VALID) && !(vpte & HPTE_V_ABSENT)) |
1249 | return 0; /* nothing to do */ |
1250 | |
1251 | while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) |
1252 | cpu_relax(); |
1253 | |
1254 | vpte = be64_to_cpu(hptep[0]); |
1255 | |
1256 | ret = 0; |
1257 | if (!(vpte & HPTE_V_VALID) && !(vpte & HPTE_V_ABSENT)) |
1258 | /* Nothing to do */ |
1259 | goto out; |
1260 | |
1261 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { |
1262 | rpte = be64_to_cpu(hptep[1]); |
1263 | vpte = hpte_new_to_old_v(vpte, rpte); |
1264 | } |
1265 | |
1266 | /* Unmap */ |
1267 | rev = &old->rev[idx]; |
1268 | guest_rpte = rev->guest_rpte; |
1269 | |
1270 | ret = -EIO; |
1271 | apsize = kvmppc_actual_pgsz(vpte, guest_rpte); |
1272 | if (!apsize) |
1273 | goto out; |
1274 | |
1275 | if (vpte & HPTE_V_VALID) { |
1276 | unsigned long gfn = hpte_rpn(guest_rpte, apsize); |
1277 | int srcu_idx = srcu_read_lock(ssp: &kvm->srcu); |
1278 | struct kvm_memory_slot *memslot = |
1279 | __gfn_to_memslot(slots: kvm_memslots(kvm), gfn); |
1280 | |
1281 | if (memslot) { |
1282 | unsigned long *rmapp; |
1283 | rmapp = &memslot->arch.rmap[gfn - memslot->base_gfn]; |
1284 | |
1285 | lock_rmap(rmapp); |
1286 | kvmppc_unmap_hpte(kvm, i: idx, memslot, rmapp, gfn); |
1287 | unlock_rmap(rmapp); |
1288 | } |
1289 | |
1290 | srcu_read_unlock(ssp: &kvm->srcu, idx: srcu_idx); |
1291 | } |
1292 | |
1293 | /* Reload PTE after unmap */ |
1294 | vpte = be64_to_cpu(hptep[0]); |
1295 | BUG_ON(vpte & HPTE_V_VALID); |
1296 | BUG_ON(!(vpte & HPTE_V_ABSENT)); |
1297 | |
1298 | ret = 0; |
1299 | if (!(vpte & HPTE_V_BOLTED)) |
1300 | goto out; |
1301 | |
1302 | rpte = be64_to_cpu(hptep[1]); |
1303 | |
1304 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { |
1305 | vpte = hpte_new_to_old_v(vpte, rpte); |
1306 | rpte = hpte_new_to_old_r(rpte); |
1307 | } |
1308 | |
1309 | pshift = kvmppc_hpte_base_page_shift(vpte, rpte); |
1310 | avpn = HPTE_V_AVPN_VAL(vpte) & ~(((1ul << pshift) - 1) >> 23); |
1311 | pteg = idx / HPTES_PER_GROUP; |
1312 | if (vpte & HPTE_V_SECONDARY) |
1313 | pteg = ~pteg; |
1314 | |
1315 | if (!(vpte & HPTE_V_1TB_SEG)) { |
1316 | unsigned long offset, vsid; |
1317 | |
1318 | /* We only have 28 - 23 bits of offset in avpn */ |
1319 | offset = (avpn & 0x1f) << 23; |
1320 | vsid = avpn >> 5; |
1321 | /* We can find more bits from the pteg value */ |
1322 | if (pshift < 23) |
1323 | offset |= ((vsid ^ pteg) & old_hash_mask) << pshift; |
1324 | |
1325 | hash = vsid ^ (offset >> pshift); |
1326 | } else { |
1327 | unsigned long offset, vsid; |
1328 | |
1329 | /* We only have 40 - 23 bits of seg_off in avpn */ |
1330 | offset = (avpn & 0x1ffff) << 23; |
1331 | vsid = avpn >> 17; |
1332 | if (pshift < 23) |
1333 | offset |= ((vsid ^ (vsid << 25) ^ pteg) & old_hash_mask) << pshift; |
1334 | |
1335 | hash = vsid ^ (vsid << 25) ^ (offset >> pshift); |
1336 | } |
1337 | |
1338 | new_pteg = hash & new_hash_mask; |
1339 | if (vpte & HPTE_V_SECONDARY) |
1340 | new_pteg = ~hash & new_hash_mask; |
1341 | |
1342 | new_idx = new_pteg * HPTES_PER_GROUP + (idx % HPTES_PER_GROUP); |
1343 | new_hptep = (__be64 *)(new->virt + (new_idx << 4)); |
1344 | |
1345 | replace_vpte = be64_to_cpu(new_hptep[0]); |
1346 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { |
1347 | unsigned long replace_rpte = be64_to_cpu(new_hptep[1]); |
1348 | replace_vpte = hpte_new_to_old_v(replace_vpte, replace_rpte); |
1349 | } |
1350 | |
1351 | if (replace_vpte & (HPTE_V_VALID | HPTE_V_ABSENT)) { |
1352 | BUG_ON(new->order >= old->order); |
1353 | |
1354 | if (replace_vpte & HPTE_V_BOLTED) { |
1355 | if (vpte & HPTE_V_BOLTED) |
1356 | /* Bolted collision, nothing we can do */ |
1357 | ret = -ENOSPC; |
1358 | /* Discard the new HPTE */ |
1359 | goto out; |
1360 | } |
1361 | |
1362 | /* Discard the previous HPTE */ |
1363 | } |
1364 | |
1365 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { |
1366 | rpte = hpte_old_to_new_r(vpte, rpte); |
1367 | vpte = hpte_old_to_new_v(vpte); |
1368 | } |
1369 | |
1370 | new_hptep[1] = cpu_to_be64(rpte); |
1371 | new->rev[new_idx].guest_rpte = guest_rpte; |
1372 | /* No need for a barrier, since new HPT isn't active */ |
1373 | new_hptep[0] = cpu_to_be64(vpte); |
1374 | unlock_hpte(new_hptep, vpte); |
1375 | |
1376 | out: |
1377 | unlock_hpte(hptep, vpte); |
1378 | return ret; |
1379 | } |
1380 | |
1381 | static int resize_hpt_rehash(struct kvm_resize_hpt *resize) |
1382 | { |
1383 | struct kvm *kvm = resize->kvm; |
1384 | unsigned long i; |
1385 | int rc; |
1386 | |
1387 | for (i = 0; i < kvmppc_hpt_npte(&kvm->arch.hpt); i++) { |
1388 | rc = resize_hpt_rehash_hpte(resize, idx: i); |
1389 | if (rc != 0) |
1390 | return rc; |
1391 | } |
1392 | |
1393 | return 0; |
1394 | } |
1395 | |
1396 | static void resize_hpt_pivot(struct kvm_resize_hpt *resize) |
1397 | { |
1398 | struct kvm *kvm = resize->kvm; |
1399 | struct kvm_hpt_info hpt_tmp; |
1400 | |
1401 | /* Exchange the pending tables in the resize structure with |
1402 | * the active tables */ |
1403 | |
1404 | resize_hpt_debug(resize, "resize_hpt_pivot()\n" ); |
1405 | |
1406 | spin_lock(lock: &kvm->mmu_lock); |
1407 | asm volatile("ptesync" : : : "memory" ); |
1408 | |
1409 | hpt_tmp = kvm->arch.hpt; |
1410 | kvmppc_set_hpt(kvm, info: &resize->hpt); |
1411 | resize->hpt = hpt_tmp; |
1412 | |
1413 | spin_unlock(lock: &kvm->mmu_lock); |
1414 | |
1415 | synchronize_srcu_expedited(ssp: &kvm->srcu); |
1416 | |
1417 | if (cpu_has_feature(CPU_FTR_ARCH_300)) |
1418 | kvmppc_setup_partition_table(kvm); |
1419 | |
1420 | resize_hpt_debug(resize, "resize_hpt_pivot() done\n" ); |
1421 | } |
1422 | |
1423 | static void resize_hpt_release(struct kvm *kvm, struct kvm_resize_hpt *resize) |
1424 | { |
1425 | if (WARN_ON(!mutex_is_locked(&kvm->arch.mmu_setup_lock))) |
1426 | return; |
1427 | |
1428 | if (!resize) |
1429 | return; |
1430 | |
1431 | if (resize->error != -EBUSY) { |
1432 | if (resize->hpt.virt) |
1433 | kvmppc_free_hpt(&resize->hpt); |
1434 | kfree(objp: resize); |
1435 | } |
1436 | |
1437 | if (kvm->arch.resize_hpt == resize) |
1438 | kvm->arch.resize_hpt = NULL; |
1439 | } |
1440 | |
1441 | static void resize_hpt_prepare_work(struct work_struct *work) |
1442 | { |
1443 | struct kvm_resize_hpt *resize = container_of(work, |
1444 | struct kvm_resize_hpt, |
1445 | work); |
1446 | struct kvm *kvm = resize->kvm; |
1447 | int err = 0; |
1448 | |
1449 | if (WARN_ON(resize->error != -EBUSY)) |
1450 | return; |
1451 | |
1452 | mutex_lock(&kvm->arch.mmu_setup_lock); |
1453 | |
1454 | /* Request is still current? */ |
1455 | if (kvm->arch.resize_hpt == resize) { |
1456 | /* We may request large allocations here: |
1457 | * do not sleep with kvm->arch.mmu_setup_lock held for a while. |
1458 | */ |
1459 | mutex_unlock(lock: &kvm->arch.mmu_setup_lock); |
1460 | |
1461 | resize_hpt_debug(resize, "%s(): order = %d\n" , __func__, |
1462 | resize->order); |
1463 | |
1464 | err = resize_hpt_allocate(resize); |
1465 | |
1466 | /* We have strict assumption about -EBUSY |
1467 | * when preparing for HPT resize. |
1468 | */ |
1469 | if (WARN_ON(err == -EBUSY)) |
1470 | err = -EINPROGRESS; |
1471 | |
1472 | mutex_lock(&kvm->arch.mmu_setup_lock); |
1473 | /* It is possible that kvm->arch.resize_hpt != resize |
1474 | * after we grab kvm->arch.mmu_setup_lock again. |
1475 | */ |
1476 | } |
1477 | |
1478 | resize->error = err; |
1479 | |
1480 | if (kvm->arch.resize_hpt != resize) |
1481 | resize_hpt_release(kvm, resize); |
1482 | |
1483 | mutex_unlock(lock: &kvm->arch.mmu_setup_lock); |
1484 | } |
1485 | |
1486 | int kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm, |
1487 | struct kvm_ppc_resize_hpt *rhpt) |
1488 | { |
1489 | unsigned long flags = rhpt->flags; |
1490 | unsigned long shift = rhpt->shift; |
1491 | struct kvm_resize_hpt *resize; |
1492 | int ret; |
1493 | |
1494 | if (flags != 0 || kvm_is_radix(kvm)) |
1495 | return -EINVAL; |
1496 | |
1497 | if (shift && ((shift < 18) || (shift > 46))) |
1498 | return -EINVAL; |
1499 | |
1500 | mutex_lock(&kvm->arch.mmu_setup_lock); |
1501 | |
1502 | resize = kvm->arch.resize_hpt; |
1503 | |
1504 | if (resize) { |
1505 | if (resize->order == shift) { |
1506 | /* Suitable resize in progress? */ |
1507 | ret = resize->error; |
1508 | if (ret == -EBUSY) |
1509 | ret = 100; /* estimated time in ms */ |
1510 | else if (ret) |
1511 | resize_hpt_release(kvm, resize); |
1512 | |
1513 | goto out; |
1514 | } |
1515 | |
1516 | /* not suitable, cancel it */ |
1517 | resize_hpt_release(kvm, resize); |
1518 | } |
1519 | |
1520 | ret = 0; |
1521 | if (!shift) |
1522 | goto out; /* nothing to do */ |
1523 | |
1524 | /* start new resize */ |
1525 | |
1526 | resize = kzalloc(size: sizeof(*resize), GFP_KERNEL); |
1527 | if (!resize) { |
1528 | ret = -ENOMEM; |
1529 | goto out; |
1530 | } |
1531 | |
1532 | resize->error = -EBUSY; |
1533 | resize->order = shift; |
1534 | resize->kvm = kvm; |
1535 | INIT_WORK(&resize->work, resize_hpt_prepare_work); |
1536 | kvm->arch.resize_hpt = resize; |
1537 | |
1538 | schedule_work(work: &resize->work); |
1539 | |
1540 | ret = 100; /* estimated time in ms */ |
1541 | |
1542 | out: |
1543 | mutex_unlock(lock: &kvm->arch.mmu_setup_lock); |
1544 | return ret; |
1545 | } |
1546 | |
1547 | static void resize_hpt_boot_vcpu(void *opaque) |
1548 | { |
1549 | /* Nothing to do, just force a KVM exit */ |
1550 | } |
1551 | |
1552 | int kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm, |
1553 | struct kvm_ppc_resize_hpt *rhpt) |
1554 | { |
1555 | unsigned long flags = rhpt->flags; |
1556 | unsigned long shift = rhpt->shift; |
1557 | struct kvm_resize_hpt *resize; |
1558 | int ret; |
1559 | |
1560 | if (flags != 0 || kvm_is_radix(kvm)) |
1561 | return -EINVAL; |
1562 | |
1563 | if (shift && ((shift < 18) || (shift > 46))) |
1564 | return -EINVAL; |
1565 | |
1566 | mutex_lock(&kvm->arch.mmu_setup_lock); |
1567 | |
1568 | resize = kvm->arch.resize_hpt; |
1569 | |
1570 | /* This shouldn't be possible */ |
1571 | ret = -EIO; |
1572 | if (WARN_ON(!kvm->arch.mmu_ready)) |
1573 | goto out_no_hpt; |
1574 | |
1575 | /* Stop VCPUs from running while we mess with the HPT */ |
1576 | kvm->arch.mmu_ready = 0; |
1577 | smp_mb(); |
1578 | |
1579 | /* Boot all CPUs out of the guest so they re-read |
1580 | * mmu_ready */ |
1581 | on_each_cpu(func: resize_hpt_boot_vcpu, NULL, wait: 1); |
1582 | |
1583 | ret = -ENXIO; |
1584 | if (!resize || (resize->order != shift)) |
1585 | goto out; |
1586 | |
1587 | ret = resize->error; |
1588 | if (ret) |
1589 | goto out; |
1590 | |
1591 | ret = resize_hpt_rehash(resize); |
1592 | if (ret) |
1593 | goto out; |
1594 | |
1595 | resize_hpt_pivot(resize); |
1596 | |
1597 | out: |
1598 | /* Let VCPUs run again */ |
1599 | kvm->arch.mmu_ready = 1; |
1600 | smp_mb(); |
1601 | out_no_hpt: |
1602 | resize_hpt_release(kvm, resize); |
1603 | mutex_unlock(lock: &kvm->arch.mmu_setup_lock); |
1604 | return ret; |
1605 | } |
1606 | |
1607 | /* |
1608 | * Functions for reading and writing the hash table via reads and |
1609 | * writes on a file descriptor. |
1610 | * |
1611 | * Reads return the guest view of the hash table, which has to be |
1612 | * pieced together from the real hash table and the guest_rpte |
1613 | * values in the revmap array. |
1614 | * |
1615 | * On writes, each HPTE written is considered in turn, and if it |
1616 | * is valid, it is written to the HPT as if an H_ENTER with the |
1617 | * exact flag set was done. When the invalid count is non-zero |
1618 | * in the header written to the stream, the kernel will make |
1619 | * sure that that many HPTEs are invalid, and invalidate them |
1620 | * if not. |
1621 | */ |
1622 | |
1623 | struct kvm_htab_ctx { |
1624 | unsigned long index; |
1625 | unsigned long flags; |
1626 | struct kvm *kvm; |
1627 | int first_pass; |
1628 | }; |
1629 | |
1630 | #define HPTE_SIZE (2 * sizeof(unsigned long)) |
1631 | |
1632 | /* |
1633 | * Returns 1 if this HPT entry has been modified or has pending |
1634 | * R/C bit changes. |
1635 | */ |
1636 | static int hpte_dirty(struct revmap_entry *revp, __be64 *hptp) |
1637 | { |
1638 | unsigned long rcbits_unset; |
1639 | |
1640 | if (revp->guest_rpte & HPTE_GR_MODIFIED) |
1641 | return 1; |
1642 | |
1643 | /* Also need to consider changes in reference and changed bits */ |
1644 | rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C); |
1645 | if ((be64_to_cpu(hptp[0]) & HPTE_V_VALID) && |
1646 | (be64_to_cpu(hptp[1]) & rcbits_unset)) |
1647 | return 1; |
1648 | |
1649 | return 0; |
1650 | } |
1651 | |
1652 | static long record_hpte(unsigned long flags, __be64 *hptp, |
1653 | unsigned long *hpte, struct revmap_entry *revp, |
1654 | int want_valid, int first_pass) |
1655 | { |
1656 | unsigned long v, r, hr; |
1657 | unsigned long rcbits_unset; |
1658 | int ok = 1; |
1659 | int valid, dirty; |
1660 | |
1661 | /* Unmodified entries are uninteresting except on the first pass */ |
1662 | dirty = hpte_dirty(revp, hptp); |
1663 | if (!first_pass && !dirty) |
1664 | return 0; |
1665 | |
1666 | valid = 0; |
1667 | if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)) { |
1668 | valid = 1; |
1669 | if ((flags & KVM_GET_HTAB_BOLTED_ONLY) && |
1670 | !(be64_to_cpu(hptp[0]) & HPTE_V_BOLTED)) |
1671 | valid = 0; |
1672 | } |
1673 | if (valid != want_valid) |
1674 | return 0; |
1675 | |
1676 | v = r = 0; |
1677 | if (valid || dirty) { |
1678 | /* lock the HPTE so it's stable and read it */ |
1679 | preempt_disable(); |
1680 | while (!try_lock_hpte(hptp, HPTE_V_HVLOCK)) |
1681 | cpu_relax(); |
1682 | v = be64_to_cpu(hptp[0]); |
1683 | hr = be64_to_cpu(hptp[1]); |
1684 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { |
1685 | v = hpte_new_to_old_v(v, hr); |
1686 | hr = hpte_new_to_old_r(hr); |
1687 | } |
1688 | |
1689 | /* re-evaluate valid and dirty from synchronized HPTE value */ |
1690 | valid = !!(v & HPTE_V_VALID); |
1691 | dirty = !!(revp->guest_rpte & HPTE_GR_MODIFIED); |
1692 | |
1693 | /* Harvest R and C into guest view if necessary */ |
1694 | rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C); |
1695 | if (valid && (rcbits_unset & hr)) { |
1696 | revp->guest_rpte |= (hr & |
1697 | (HPTE_R_R | HPTE_R_C)) | HPTE_GR_MODIFIED; |
1698 | dirty = 1; |
1699 | } |
1700 | |
1701 | if (v & HPTE_V_ABSENT) { |
1702 | v &= ~HPTE_V_ABSENT; |
1703 | v |= HPTE_V_VALID; |
1704 | valid = 1; |
1705 | } |
1706 | if ((flags & KVM_GET_HTAB_BOLTED_ONLY) && !(v & HPTE_V_BOLTED)) |
1707 | valid = 0; |
1708 | |
1709 | r = revp->guest_rpte; |
1710 | /* only clear modified if this is the right sort of entry */ |
1711 | if (valid == want_valid && dirty) { |
1712 | r &= ~HPTE_GR_MODIFIED; |
1713 | revp->guest_rpte = r; |
1714 | } |
1715 | unlock_hpte(hptp, be64_to_cpu(hptp[0])); |
1716 | preempt_enable(); |
1717 | if (!(valid == want_valid && (first_pass || dirty))) |
1718 | ok = 0; |
1719 | } |
1720 | hpte[0] = cpu_to_be64(v); |
1721 | hpte[1] = cpu_to_be64(r); |
1722 | return ok; |
1723 | } |
1724 | |
1725 | static ssize_t kvm_htab_read(struct file *file, char __user *buf, |
1726 | size_t count, loff_t *ppos) |
1727 | { |
1728 | struct kvm_htab_ctx *ctx = file->private_data; |
1729 | struct kvm *kvm = ctx->kvm; |
1730 | struct hdr; |
1731 | __be64 *hptp; |
1732 | struct revmap_entry *revp; |
1733 | unsigned long i, nb, nw; |
1734 | unsigned long __user *lbuf; |
1735 | struct kvm_get_htab_header __user *hptr; |
1736 | unsigned long flags; |
1737 | int first_pass; |
1738 | unsigned long hpte[2]; |
1739 | |
1740 | if (!access_ok(buf, count)) |
1741 | return -EFAULT; |
1742 | if (kvm_is_radix(kvm)) |
1743 | return 0; |
1744 | |
1745 | first_pass = ctx->first_pass; |
1746 | flags = ctx->flags; |
1747 | |
1748 | i = ctx->index; |
1749 | hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE)); |
1750 | revp = kvm->arch.hpt.rev + i; |
1751 | lbuf = (unsigned long __user *)buf; |
1752 | |
1753 | nb = 0; |
1754 | while (nb + sizeof(hdr) + HPTE_SIZE < count) { |
1755 | /* Initialize header */ |
1756 | hptr = (struct kvm_get_htab_header __user *)buf; |
1757 | hdr.n_valid = 0; |
1758 | hdr.n_invalid = 0; |
1759 | nw = nb; |
1760 | nb += sizeof(hdr); |
1761 | lbuf = (unsigned long __user *)(buf + sizeof(hdr)); |
1762 | |
1763 | /* Skip uninteresting entries, i.e. clean on not-first pass */ |
1764 | if (!first_pass) { |
1765 | while (i < kvmppc_hpt_npte(&kvm->arch.hpt) && |
1766 | !hpte_dirty(revp, hptp)) { |
1767 | ++i; |
1768 | hptp += 2; |
1769 | ++revp; |
1770 | } |
1771 | } |
1772 | hdr.index = i; |
1773 | |
1774 | /* Grab a series of valid entries */ |
1775 | while (i < kvmppc_hpt_npte(&kvm->arch.hpt) && |
1776 | hdr.n_valid < 0xffff && |
1777 | nb + HPTE_SIZE < count && |
1778 | record_hpte(flags, hptp, hpte, revp, want_valid: 1, first_pass)) { |
1779 | /* valid entry, write it out */ |
1780 | ++hdr.n_valid; |
1781 | if (__put_user(hpte[0], lbuf) || |
1782 | __put_user(hpte[1], lbuf + 1)) |
1783 | return -EFAULT; |
1784 | nb += HPTE_SIZE; |
1785 | lbuf += 2; |
1786 | ++i; |
1787 | hptp += 2; |
1788 | ++revp; |
1789 | } |
1790 | /* Now skip invalid entries while we can */ |
1791 | while (i < kvmppc_hpt_npte(&kvm->arch.hpt) && |
1792 | hdr.n_invalid < 0xffff && |
1793 | record_hpte(flags, hptp, hpte, revp, want_valid: 0, first_pass)) { |
1794 | /* found an invalid entry */ |
1795 | ++hdr.n_invalid; |
1796 | ++i; |
1797 | hptp += 2; |
1798 | ++revp; |
1799 | } |
1800 | |
1801 | if (hdr.n_valid || hdr.n_invalid) { |
1802 | /* write back the header */ |
1803 | if (__copy_to_user(to: hptr, from: &hdr, n: sizeof(hdr))) |
1804 | return -EFAULT; |
1805 | nw = nb; |
1806 | buf = (char __user *)lbuf; |
1807 | } else { |
1808 | nb = nw; |
1809 | } |
1810 | |
1811 | /* Check if we've wrapped around the hash table */ |
1812 | if (i >= kvmppc_hpt_npte(&kvm->arch.hpt)) { |
1813 | i = 0; |
1814 | ctx->first_pass = 0; |
1815 | break; |
1816 | } |
1817 | } |
1818 | |
1819 | ctx->index = i; |
1820 | |
1821 | return nb; |
1822 | } |
1823 | |
1824 | static ssize_t kvm_htab_write(struct file *file, const char __user *buf, |
1825 | size_t count, loff_t *ppos) |
1826 | { |
1827 | struct kvm_htab_ctx *ctx = file->private_data; |
1828 | struct kvm *kvm = ctx->kvm; |
1829 | struct hdr; |
1830 | unsigned long i, j; |
1831 | unsigned long v, r; |
1832 | unsigned long __user *lbuf; |
1833 | __be64 *hptp; |
1834 | unsigned long tmp[2]; |
1835 | ssize_t nb; |
1836 | long int err, ret; |
1837 | int mmu_ready; |
1838 | int pshift; |
1839 | |
1840 | if (!access_ok(buf, count)) |
1841 | return -EFAULT; |
1842 | if (kvm_is_radix(kvm)) |
1843 | return -EINVAL; |
1844 | |
1845 | /* lock out vcpus from running while we're doing this */ |
1846 | mutex_lock(&kvm->arch.mmu_setup_lock); |
1847 | mmu_ready = kvm->arch.mmu_ready; |
1848 | if (mmu_ready) { |
1849 | kvm->arch.mmu_ready = 0; /* temporarily */ |
1850 | /* order mmu_ready vs. vcpus_running */ |
1851 | smp_mb(); |
1852 | if (atomic_read(v: &kvm->arch.vcpus_running)) { |
1853 | kvm->arch.mmu_ready = 1; |
1854 | mutex_unlock(lock: &kvm->arch.mmu_setup_lock); |
1855 | return -EBUSY; |
1856 | } |
1857 | } |
1858 | |
1859 | err = 0; |
1860 | for (nb = 0; nb + sizeof(hdr) <= count; ) { |
1861 | err = -EFAULT; |
1862 | if (__copy_from_user(to: &hdr, from: buf, n: sizeof(hdr))) |
1863 | break; |
1864 | |
1865 | err = 0; |
1866 | if (nb + hdr.n_valid * HPTE_SIZE > count) |
1867 | break; |
1868 | |
1869 | nb += sizeof(hdr); |
1870 | buf += sizeof(hdr); |
1871 | |
1872 | err = -EINVAL; |
1873 | i = hdr.index; |
1874 | if (i >= kvmppc_hpt_npte(&kvm->arch.hpt) || |
1875 | i + hdr.n_valid + hdr.n_invalid > kvmppc_hpt_npte(&kvm->arch.hpt)) |
1876 | break; |
1877 | |
1878 | hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE)); |
1879 | lbuf = (unsigned long __user *)buf; |
1880 | for (j = 0; j < hdr.n_valid; ++j) { |
1881 | __be64 hpte_v; |
1882 | __be64 hpte_r; |
1883 | |
1884 | err = -EFAULT; |
1885 | if (__get_user(hpte_v, lbuf) || |
1886 | __get_user(hpte_r, lbuf + 1)) |
1887 | goto out; |
1888 | v = be64_to_cpu(hpte_v); |
1889 | r = be64_to_cpu(hpte_r); |
1890 | err = -EINVAL; |
1891 | if (!(v & HPTE_V_VALID)) |
1892 | goto out; |
1893 | pshift = kvmppc_hpte_base_page_shift(v, r); |
1894 | if (pshift <= 0) |
1895 | goto out; |
1896 | lbuf += 2; |
1897 | nb += HPTE_SIZE; |
1898 | |
1899 | if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)) |
1900 | kvmppc_do_h_remove(kvm, 0, i, 0, tmp); |
1901 | err = -EIO; |
1902 | ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r, |
1903 | tmp); |
1904 | if (ret != H_SUCCESS) { |
1905 | pr_err("%s ret %ld i=%ld v=%lx r=%lx\n" , __func__, ret, i, v, r); |
1906 | goto out; |
1907 | } |
1908 | if (!mmu_ready && is_vrma_hpte(v)) { |
1909 | unsigned long senc, lpcr; |
1910 | |
1911 | senc = slb_pgsize_encoding(1ul << pshift); |
1912 | kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | |
1913 | (VRMA_VSID << SLB_VSID_SHIFT_1T); |
1914 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) { |
1915 | lpcr = senc << (LPCR_VRMASD_SH - 4); |
1916 | kvmppc_update_lpcr(kvm, lpcr, |
1917 | LPCR_VRMASD); |
1918 | } else { |
1919 | kvmppc_setup_partition_table(kvm); |
1920 | } |
1921 | mmu_ready = 1; |
1922 | } |
1923 | ++i; |
1924 | hptp += 2; |
1925 | } |
1926 | |
1927 | for (j = 0; j < hdr.n_invalid; ++j) { |
1928 | if (be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT)) |
1929 | kvmppc_do_h_remove(kvm, 0, i, 0, tmp); |
1930 | ++i; |
1931 | hptp += 2; |
1932 | } |
1933 | err = 0; |
1934 | } |
1935 | |
1936 | out: |
1937 | /* Order HPTE updates vs. mmu_ready */ |
1938 | smp_wmb(); |
1939 | kvm->arch.mmu_ready = mmu_ready; |
1940 | mutex_unlock(lock: &kvm->arch.mmu_setup_lock); |
1941 | |
1942 | if (err) |
1943 | return err; |
1944 | return nb; |
1945 | } |
1946 | |
1947 | static int kvm_htab_release(struct inode *inode, struct file *filp) |
1948 | { |
1949 | struct kvm_htab_ctx *ctx = filp->private_data; |
1950 | |
1951 | filp->private_data = NULL; |
1952 | if (!(ctx->flags & KVM_GET_HTAB_WRITE)) |
1953 | atomic_dec(v: &ctx->kvm->arch.hpte_mod_interest); |
1954 | kvm_put_kvm(kvm: ctx->kvm); |
1955 | kfree(objp: ctx); |
1956 | return 0; |
1957 | } |
1958 | |
1959 | static const struct file_operations kvm_htab_fops = { |
1960 | .read = kvm_htab_read, |
1961 | .write = kvm_htab_write, |
1962 | .llseek = default_llseek, |
1963 | .release = kvm_htab_release, |
1964 | }; |
1965 | |
1966 | int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *ghf) |
1967 | { |
1968 | int ret; |
1969 | struct kvm_htab_ctx *ctx; |
1970 | int rwflag; |
1971 | |
1972 | /* reject flags we don't recognize */ |
1973 | if (ghf->flags & ~(KVM_GET_HTAB_BOLTED_ONLY | KVM_GET_HTAB_WRITE)) |
1974 | return -EINVAL; |
1975 | ctx = kzalloc(size: sizeof(*ctx), GFP_KERNEL); |
1976 | if (!ctx) |
1977 | return -ENOMEM; |
1978 | kvm_get_kvm(kvm); |
1979 | ctx->kvm = kvm; |
1980 | ctx->index = ghf->start_index; |
1981 | ctx->flags = ghf->flags; |
1982 | ctx->first_pass = 1; |
1983 | |
1984 | rwflag = (ghf->flags & KVM_GET_HTAB_WRITE) ? O_WRONLY : O_RDONLY; |
1985 | ret = anon_inode_getfd(name: "kvm-htab" , fops: &kvm_htab_fops, priv: ctx, flags: rwflag | O_CLOEXEC); |
1986 | if (ret < 0) { |
1987 | kfree(objp: ctx); |
1988 | kvm_put_kvm_no_destroy(kvm); |
1989 | return ret; |
1990 | } |
1991 | |
1992 | if (rwflag == O_RDONLY) { |
1993 | mutex_lock(&kvm->slots_lock); |
1994 | atomic_inc(v: &kvm->arch.hpte_mod_interest); |
1995 | /* make sure kvmppc_do_h_enter etc. see the increment */ |
1996 | synchronize_srcu_expedited(ssp: &kvm->srcu); |
1997 | mutex_unlock(lock: &kvm->slots_lock); |
1998 | } |
1999 | |
2000 | return ret; |
2001 | } |
2002 | |
2003 | struct debugfs_htab_state { |
2004 | struct kvm *kvm; |
2005 | struct mutex mutex; |
2006 | unsigned long hpt_index; |
2007 | int chars_left; |
2008 | int buf_index; |
2009 | char buf[64]; |
2010 | }; |
2011 | |
2012 | static int debugfs_htab_open(struct inode *inode, struct file *file) |
2013 | { |
2014 | struct kvm *kvm = inode->i_private; |
2015 | struct debugfs_htab_state *p; |
2016 | |
2017 | p = kzalloc(size: sizeof(*p), GFP_KERNEL); |
2018 | if (!p) |
2019 | return -ENOMEM; |
2020 | |
2021 | kvm_get_kvm(kvm); |
2022 | p->kvm = kvm; |
2023 | mutex_init(&p->mutex); |
2024 | file->private_data = p; |
2025 | |
2026 | return nonseekable_open(inode, filp: file); |
2027 | } |
2028 | |
2029 | static int debugfs_htab_release(struct inode *inode, struct file *file) |
2030 | { |
2031 | struct debugfs_htab_state *p = file->private_data; |
2032 | |
2033 | kvm_put_kvm(kvm: p->kvm); |
2034 | kfree(objp: p); |
2035 | return 0; |
2036 | } |
2037 | |
2038 | static ssize_t debugfs_htab_read(struct file *file, char __user *buf, |
2039 | size_t len, loff_t *ppos) |
2040 | { |
2041 | struct debugfs_htab_state *p = file->private_data; |
2042 | ssize_t ret, r; |
2043 | unsigned long i, n; |
2044 | unsigned long v, hr, gr; |
2045 | struct kvm *kvm; |
2046 | __be64 *hptp; |
2047 | |
2048 | kvm = p->kvm; |
2049 | if (kvm_is_radix(kvm)) |
2050 | return 0; |
2051 | |
2052 | ret = mutex_lock_interruptible(&p->mutex); |
2053 | if (ret) |
2054 | return ret; |
2055 | |
2056 | if (p->chars_left) { |
2057 | n = p->chars_left; |
2058 | if (n > len) |
2059 | n = len; |
2060 | r = copy_to_user(to: buf, from: p->buf + p->buf_index, n); |
2061 | n -= r; |
2062 | p->chars_left -= n; |
2063 | p->buf_index += n; |
2064 | buf += n; |
2065 | len -= n; |
2066 | ret = n; |
2067 | if (r) { |
2068 | if (!n) |
2069 | ret = -EFAULT; |
2070 | goto out; |
2071 | } |
2072 | } |
2073 | |
2074 | i = p->hpt_index; |
2075 | hptp = (__be64 *)(kvm->arch.hpt.virt + (i * HPTE_SIZE)); |
2076 | for (; len != 0 && i < kvmppc_hpt_npte(&kvm->arch.hpt); |
2077 | ++i, hptp += 2) { |
2078 | if (!(be64_to_cpu(hptp[0]) & (HPTE_V_VALID | HPTE_V_ABSENT))) |
2079 | continue; |
2080 | |
2081 | /* lock the HPTE so it's stable and read it */ |
2082 | preempt_disable(); |
2083 | while (!try_lock_hpte(hptp, HPTE_V_HVLOCK)) |
2084 | cpu_relax(); |
2085 | v = be64_to_cpu(hptp[0]) & ~HPTE_V_HVLOCK; |
2086 | hr = be64_to_cpu(hptp[1]); |
2087 | gr = kvm->arch.hpt.rev[i].guest_rpte; |
2088 | unlock_hpte(hptp, v); |
2089 | preempt_enable(); |
2090 | |
2091 | if (!(v & (HPTE_V_VALID | HPTE_V_ABSENT))) |
2092 | continue; |
2093 | |
2094 | n = scnprintf(buf: p->buf, size: sizeof(p->buf), |
2095 | fmt: "%6lx %.16lx %.16lx %.16lx\n" , |
2096 | i, v, hr, gr); |
2097 | p->chars_left = n; |
2098 | if (n > len) |
2099 | n = len; |
2100 | r = copy_to_user(to: buf, from: p->buf, n); |
2101 | n -= r; |
2102 | p->chars_left -= n; |
2103 | p->buf_index = n; |
2104 | buf += n; |
2105 | len -= n; |
2106 | ret += n; |
2107 | if (r) { |
2108 | if (!ret) |
2109 | ret = -EFAULT; |
2110 | goto out; |
2111 | } |
2112 | } |
2113 | p->hpt_index = i; |
2114 | |
2115 | out: |
2116 | mutex_unlock(lock: &p->mutex); |
2117 | return ret; |
2118 | } |
2119 | |
2120 | static ssize_t debugfs_htab_write(struct file *file, const char __user *buf, |
2121 | size_t len, loff_t *ppos) |
2122 | { |
2123 | return -EACCES; |
2124 | } |
2125 | |
2126 | static const struct file_operations debugfs_htab_fops = { |
2127 | .owner = THIS_MODULE, |
2128 | .open = debugfs_htab_open, |
2129 | .release = debugfs_htab_release, |
2130 | .read = debugfs_htab_read, |
2131 | .write = debugfs_htab_write, |
2132 | .llseek = generic_file_llseek, |
2133 | }; |
2134 | |
2135 | void kvmppc_mmu_debugfs_init(struct kvm *kvm) |
2136 | { |
2137 | debugfs_create_file(name: "htab" , mode: 0400, parent: kvm->debugfs_dentry, data: kvm, |
2138 | fops: &debugfs_htab_fops); |
2139 | } |
2140 | |
2141 | void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu) |
2142 | { |
2143 | struct kvmppc_mmu *mmu = &vcpu->arch.mmu; |
2144 | |
2145 | vcpu->arch.slb_nr = 32; /* POWER7/POWER8 */ |
2146 | |
2147 | mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate; |
2148 | |
2149 | vcpu->arch.hflags |= BOOK3S_HFLAG_SLB; |
2150 | } |
2151 | |