1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Kernel-based Virtual Machine driver for Linux |
4 | * |
5 | * Macros and functions to access KVM PTEs (also known as SPTEs) |
6 | * |
7 | * Copyright (C) 2006 Qumranet, Inc. |
8 | * Copyright 2020 Red Hat, Inc. and/or its affiliates. |
9 | */ |
10 | #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt |
11 | |
12 | #include <linux/kvm_host.h> |
13 | #include "mmu.h" |
14 | #include "mmu_internal.h" |
15 | #include "x86.h" |
16 | #include "spte.h" |
17 | |
18 | #include <asm/e820/api.h> |
19 | #include <asm/memtype.h> |
20 | #include <asm/vmx.h> |
21 | |
22 | bool __read_mostly enable_mmio_caching = true; |
23 | static bool __ro_after_init allow_mmio_caching; |
24 | module_param_named(mmio_caching, enable_mmio_caching, bool, 0444); |
25 | EXPORT_SYMBOL_GPL(enable_mmio_caching); |
26 | |
27 | u64 __read_mostly shadow_host_writable_mask; |
28 | u64 __read_mostly shadow_mmu_writable_mask; |
29 | u64 __read_mostly shadow_nx_mask; |
30 | u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ |
31 | u64 __read_mostly shadow_user_mask; |
32 | u64 __read_mostly shadow_accessed_mask; |
33 | u64 __read_mostly shadow_dirty_mask; |
34 | u64 __read_mostly shadow_mmio_value; |
35 | u64 __read_mostly shadow_mmio_mask; |
36 | u64 __read_mostly shadow_mmio_access_mask; |
37 | u64 __read_mostly shadow_present_mask; |
38 | u64 __read_mostly shadow_memtype_mask; |
39 | u64 __read_mostly shadow_me_value; |
40 | u64 __read_mostly shadow_me_mask; |
41 | u64 __read_mostly shadow_acc_track_mask; |
42 | |
43 | u64 __read_mostly shadow_nonpresent_or_rsvd_mask; |
44 | u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask; |
45 | |
46 | u8 __read_mostly shadow_phys_bits; |
47 | |
48 | void __init kvm_mmu_spte_module_init(void) |
49 | { |
50 | /* |
51 | * Snapshot userspace's desire to allow MMIO caching. Whether or not |
52 | * KVM can actually enable MMIO caching depends on vendor-specific |
53 | * hardware capabilities and other module params that can't be resolved |
54 | * until the vendor module is loaded, i.e. enable_mmio_caching can and |
55 | * will change when the vendor module is (re)loaded. |
56 | */ |
57 | allow_mmio_caching = enable_mmio_caching; |
58 | } |
59 | |
60 | static u64 generation_mmio_spte_mask(u64 gen) |
61 | { |
62 | u64 mask; |
63 | |
64 | WARN_ON_ONCE(gen & ~MMIO_SPTE_GEN_MASK); |
65 | |
66 | mask = (gen << MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_SPTE_GEN_LOW_MASK; |
67 | mask |= (gen << MMIO_SPTE_GEN_HIGH_SHIFT) & MMIO_SPTE_GEN_HIGH_MASK; |
68 | return mask; |
69 | } |
70 | |
71 | u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access) |
72 | { |
73 | u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK; |
74 | u64 spte = generation_mmio_spte_mask(gen); |
75 | u64 gpa = gfn << PAGE_SHIFT; |
76 | |
77 | WARN_ON_ONCE(!shadow_mmio_value); |
78 | |
79 | access &= shadow_mmio_access_mask; |
80 | spte |= shadow_mmio_value | access; |
81 | spte |= gpa | shadow_nonpresent_or_rsvd_mask; |
82 | spte |= (gpa & shadow_nonpresent_or_rsvd_mask) |
83 | << SHADOW_NONPRESENT_OR_RSVD_MASK_LEN; |
84 | |
85 | return spte; |
86 | } |
87 | |
88 | static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) |
89 | { |
90 | if (pfn_valid(pfn)) |
91 | return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) && |
92 | /* |
93 | * Some reserved pages, such as those from NVDIMM |
94 | * DAX devices, are not for MMIO, and can be mapped |
95 | * with cached memory type for better performance. |
96 | * However, the above check misconceives those pages |
97 | * as MMIO, and results in KVM mapping them with UC |
98 | * memory type, which would hurt the performance. |
99 | * Therefore, we check the host memory type in addition |
100 | * and only treat UC/UC-/WC pages as MMIO. |
101 | */ |
102 | (!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn)); |
103 | |
104 | return !e820__mapped_raw_any(start: pfn_to_hpa(pfn), |
105 | end: pfn_to_hpa(pfn: pfn + 1) - 1, |
106 | type: E820_TYPE_RAM); |
107 | } |
108 | |
109 | /* |
110 | * Returns true if the SPTE has bits that may be set without holding mmu_lock. |
111 | * The caller is responsible for checking if the SPTE is shadow-present, and |
112 | * for determining whether or not the caller cares about non-leaf SPTEs. |
113 | */ |
114 | bool spte_has_volatile_bits(u64 spte) |
115 | { |
116 | /* |
117 | * Always atomically update spte if it can be updated |
118 | * out of mmu-lock, it can ensure dirty bit is not lost, |
119 | * also, it can help us to get a stable is_writable_pte() |
120 | * to ensure tlb flush is not missed. |
121 | */ |
122 | if (!is_writable_pte(pte: spte) && is_mmu_writable_spte(spte)) |
123 | return true; |
124 | |
125 | if (is_access_track_spte(spte)) |
126 | return true; |
127 | |
128 | if (spte_ad_enabled(spte)) { |
129 | if (!(spte & shadow_accessed_mask) || |
130 | (is_writable_pte(pte: spte) && !(spte & shadow_dirty_mask))) |
131 | return true; |
132 | } |
133 | |
134 | return false; |
135 | } |
136 | |
137 | bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
138 | const struct kvm_memory_slot *slot, |
139 | unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, |
140 | u64 old_spte, bool prefetch, bool can_unsync, |
141 | bool host_writable, u64 *new_spte) |
142 | { |
143 | int level = sp->role.level; |
144 | u64 spte = SPTE_MMU_PRESENT_MASK; |
145 | bool wrprot = false; |
146 | |
147 | WARN_ON_ONCE(!pte_access && !shadow_present_mask); |
148 | |
149 | if (sp->role.ad_disabled) |
150 | spte |= SPTE_TDP_AD_DISABLED; |
151 | else if (kvm_mmu_page_ad_need_write_protect(sp)) |
152 | spte |= SPTE_TDP_AD_WRPROT_ONLY; |
153 | |
154 | /* |
155 | * For the EPT case, shadow_present_mask is 0 if hardware |
156 | * supports exec-only page table entries. In that case, |
157 | * ACC_USER_MASK and shadow_user_mask are used to represent |
158 | * read access. See FNAME(gpte_access) in paging_tmpl.h. |
159 | */ |
160 | spte |= shadow_present_mask; |
161 | if (!prefetch) |
162 | spte |= spte_shadow_accessed_mask(spte); |
163 | |
164 | /* |
165 | * For simplicity, enforce the NX huge page mitigation even if not |
166 | * strictly necessary. KVM could ignore the mitigation if paging is |
167 | * disabled in the guest, as the guest doesn't have any page tables to |
168 | * abuse. But to safely ignore the mitigation, KVM would have to |
169 | * ensure a new MMU is loaded (or all shadow pages zapped) when CR0.PG |
170 | * is toggled on, and that's a net negative for performance when TDP is |
171 | * enabled. When TDP is disabled, KVM will always switch to a new MMU |
172 | * when CR0.PG is toggled, but leveraging that to ignore the mitigation |
173 | * would tie make_spte() further to vCPU/MMU state, and add complexity |
174 | * just to optimize a mode that is anything but performance critical. |
175 | */ |
176 | if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) && |
177 | is_nx_huge_page_enabled(kvm: vcpu->kvm)) { |
178 | pte_access &= ~ACC_EXEC_MASK; |
179 | } |
180 | |
181 | if (pte_access & ACC_EXEC_MASK) |
182 | spte |= shadow_x_mask; |
183 | else |
184 | spte |= shadow_nx_mask; |
185 | |
186 | if (pte_access & ACC_USER_MASK) |
187 | spte |= shadow_user_mask; |
188 | |
189 | if (level > PG_LEVEL_4K) |
190 | spte |= PT_PAGE_SIZE_MASK; |
191 | |
192 | if (shadow_memtype_mask) |
193 | spte |= static_call(kvm_x86_get_mt_mask)(vcpu, gfn, |
194 | kvm_is_mmio_pfn(pfn)); |
195 | if (host_writable) |
196 | spte |= shadow_host_writable_mask; |
197 | else |
198 | pte_access &= ~ACC_WRITE_MASK; |
199 | |
200 | if (shadow_me_value && !kvm_is_mmio_pfn(pfn)) |
201 | spte |= shadow_me_value; |
202 | |
203 | spte |= (u64)pfn << PAGE_SHIFT; |
204 | |
205 | if (pte_access & ACC_WRITE_MASK) { |
206 | spte |= PT_WRITABLE_MASK | shadow_mmu_writable_mask; |
207 | |
208 | /* |
209 | * Optimization: for pte sync, if spte was writable the hash |
210 | * lookup is unnecessary (and expensive). Write protection |
211 | * is responsibility of kvm_mmu_get_page / kvm_mmu_sync_roots. |
212 | * Same reasoning can be applied to dirty page accounting. |
213 | */ |
214 | if (is_writable_pte(pte: old_spte)) |
215 | goto out; |
216 | |
217 | /* |
218 | * Unsync shadow pages that are reachable by the new, writable |
219 | * SPTE. Write-protect the SPTE if the page can't be unsync'd, |
220 | * e.g. it's write-tracked (upper-level SPs) or has one or more |
221 | * shadow pages and unsync'ing pages is not allowed. |
222 | */ |
223 | if (mmu_try_to_unsync_pages(kvm: vcpu->kvm, slot, gfn, can_unsync, prefetch)) { |
224 | wrprot = true; |
225 | pte_access &= ~ACC_WRITE_MASK; |
226 | spte &= ~(PT_WRITABLE_MASK | shadow_mmu_writable_mask); |
227 | } |
228 | } |
229 | |
230 | if (pte_access & ACC_WRITE_MASK) |
231 | spte |= spte_shadow_dirty_mask(spte); |
232 | |
233 | out: |
234 | if (prefetch) |
235 | spte = mark_spte_for_access_track(spte); |
236 | |
237 | WARN_ONCE(is_rsvd_spte(&vcpu->arch.mmu->shadow_zero_check, spte, level), |
238 | "spte = 0x%llx, level = %d, rsvd bits = 0x%llx" , spte, level, |
239 | get_rsvd_bits(&vcpu->arch.mmu->shadow_zero_check, spte, level)); |
240 | |
241 | if ((spte & PT_WRITABLE_MASK) && kvm_slot_dirty_track_enabled(slot)) { |
242 | /* Enforced by kvm_mmu_hugepage_adjust. */ |
243 | WARN_ON_ONCE(level > PG_LEVEL_4K); |
244 | mark_page_dirty_in_slot(kvm: vcpu->kvm, memslot: slot, gfn); |
245 | } |
246 | |
247 | *new_spte = spte; |
248 | return wrprot; |
249 | } |
250 | |
251 | static u64 make_spte_executable(u64 spte) |
252 | { |
253 | bool is_access_track = is_access_track_spte(spte); |
254 | |
255 | if (is_access_track) |
256 | spte = restore_acc_track_spte(spte); |
257 | |
258 | spte &= ~shadow_nx_mask; |
259 | spte |= shadow_x_mask; |
260 | |
261 | if (is_access_track) |
262 | spte = mark_spte_for_access_track(spte); |
263 | |
264 | return spte; |
265 | } |
266 | |
267 | /* |
268 | * Construct an SPTE that maps a sub-page of the given huge page SPTE where |
269 | * `index` identifies which sub-page. |
270 | * |
271 | * This is used during huge page splitting to build the SPTEs that make up the |
272 | * new page table. |
273 | */ |
274 | u64 make_huge_page_split_spte(struct kvm *kvm, u64 huge_spte, union kvm_mmu_page_role role, |
275 | int index) |
276 | { |
277 | u64 child_spte; |
278 | |
279 | if (WARN_ON_ONCE(!is_shadow_present_pte(huge_spte))) |
280 | return 0; |
281 | |
282 | if (WARN_ON_ONCE(!is_large_pte(huge_spte))) |
283 | return 0; |
284 | |
285 | child_spte = huge_spte; |
286 | |
287 | /* |
288 | * The child_spte already has the base address of the huge page being |
289 | * split. So we just have to OR in the offset to the page at the next |
290 | * lower level for the given index. |
291 | */ |
292 | child_spte |= (index * KVM_PAGES_PER_HPAGE(role.level)) << PAGE_SHIFT; |
293 | |
294 | if (role.level == PG_LEVEL_4K) { |
295 | child_spte &= ~PT_PAGE_SIZE_MASK; |
296 | |
297 | /* |
298 | * When splitting to a 4K page where execution is allowed, mark |
299 | * the page executable as the NX hugepage mitigation no longer |
300 | * applies. |
301 | */ |
302 | if ((role.access & ACC_EXEC_MASK) && is_nx_huge_page_enabled(kvm)) |
303 | child_spte = make_spte_executable(spte: child_spte); |
304 | } |
305 | |
306 | return child_spte; |
307 | } |
308 | |
309 | |
310 | u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled) |
311 | { |
312 | u64 spte = SPTE_MMU_PRESENT_MASK; |
313 | |
314 | spte |= __pa(child_pt) | shadow_present_mask | PT_WRITABLE_MASK | |
315 | shadow_user_mask | shadow_x_mask | shadow_me_value; |
316 | |
317 | if (ad_disabled) |
318 | spte |= SPTE_TDP_AD_DISABLED; |
319 | else |
320 | spte |= shadow_accessed_mask; |
321 | |
322 | return spte; |
323 | } |
324 | |
325 | u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn) |
326 | { |
327 | u64 new_spte; |
328 | |
329 | new_spte = old_spte & ~SPTE_BASE_ADDR_MASK; |
330 | new_spte |= (u64)new_pfn << PAGE_SHIFT; |
331 | |
332 | new_spte &= ~PT_WRITABLE_MASK; |
333 | new_spte &= ~shadow_host_writable_mask; |
334 | new_spte &= ~shadow_mmu_writable_mask; |
335 | |
336 | new_spte = mark_spte_for_access_track(spte: new_spte); |
337 | |
338 | return new_spte; |
339 | } |
340 | |
341 | u64 mark_spte_for_access_track(u64 spte) |
342 | { |
343 | if (spte_ad_enabled(spte)) |
344 | return spte & ~shadow_accessed_mask; |
345 | |
346 | if (is_access_track_spte(spte)) |
347 | return spte; |
348 | |
349 | check_spte_writable_invariants(spte); |
350 | |
351 | WARN_ONCE(spte & (SHADOW_ACC_TRACK_SAVED_BITS_MASK << |
352 | SHADOW_ACC_TRACK_SAVED_BITS_SHIFT), |
353 | "Access Tracking saved bit locations are not zero\n" ); |
354 | |
355 | spte |= (spte & SHADOW_ACC_TRACK_SAVED_BITS_MASK) << |
356 | SHADOW_ACC_TRACK_SAVED_BITS_SHIFT; |
357 | spte &= ~shadow_acc_track_mask; |
358 | |
359 | return spte; |
360 | } |
361 | |
362 | void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask) |
363 | { |
364 | BUG_ON((u64)(unsigned)access_mask != access_mask); |
365 | WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask); |
366 | |
367 | /* |
368 | * Reset to the original module param value to honor userspace's desire |
369 | * to (dis)allow MMIO caching. Update the param itself so that |
370 | * userspace can see whether or not KVM is actually using MMIO caching. |
371 | */ |
372 | enable_mmio_caching = allow_mmio_caching; |
373 | if (!enable_mmio_caching) |
374 | mmio_value = 0; |
375 | |
376 | /* |
377 | * The mask must contain only bits that are carved out specifically for |
378 | * the MMIO SPTE mask, e.g. to ensure there's no overlap with the MMIO |
379 | * generation. |
380 | */ |
381 | if (WARN_ON(mmio_mask & ~SPTE_MMIO_ALLOWED_MASK)) |
382 | mmio_value = 0; |
383 | |
384 | /* |
385 | * Disable MMIO caching if the MMIO value collides with the bits that |
386 | * are used to hold the relocated GFN when the L1TF mitigation is |
387 | * enabled. This should never fire as there is no known hardware that |
388 | * can trigger this condition, e.g. SME/SEV CPUs that require a custom |
389 | * MMIO value are not susceptible to L1TF. |
390 | */ |
391 | if (WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << |
392 | SHADOW_NONPRESENT_OR_RSVD_MASK_LEN))) |
393 | mmio_value = 0; |
394 | |
395 | /* |
396 | * The masked MMIO value must obviously match itself and a removed SPTE |
397 | * must not get a false positive. Removed SPTEs and MMIO SPTEs should |
398 | * never collide as MMIO must set some RWX bits, and removed SPTEs must |
399 | * not set any RWX bits. |
400 | */ |
401 | if (WARN_ON((mmio_value & mmio_mask) != mmio_value) || |
402 | WARN_ON(mmio_value && (REMOVED_SPTE & mmio_mask) == mmio_value)) |
403 | mmio_value = 0; |
404 | |
405 | if (!mmio_value) |
406 | enable_mmio_caching = false; |
407 | |
408 | shadow_mmio_value = mmio_value; |
409 | shadow_mmio_mask = mmio_mask; |
410 | shadow_mmio_access_mask = access_mask; |
411 | } |
412 | EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); |
413 | |
414 | void kvm_mmu_set_me_spte_mask(u64 me_value, u64 me_mask) |
415 | { |
416 | /* shadow_me_value must be a subset of shadow_me_mask */ |
417 | if (WARN_ON(me_value & ~me_mask)) |
418 | me_value = me_mask = 0; |
419 | |
420 | shadow_me_value = me_value; |
421 | shadow_me_mask = me_mask; |
422 | } |
423 | EXPORT_SYMBOL_GPL(kvm_mmu_set_me_spte_mask); |
424 | |
425 | void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only) |
426 | { |
427 | shadow_user_mask = VMX_EPT_READABLE_MASK; |
428 | shadow_accessed_mask = has_ad_bits ? VMX_EPT_ACCESS_BIT : 0ull; |
429 | shadow_dirty_mask = has_ad_bits ? VMX_EPT_DIRTY_BIT : 0ull; |
430 | shadow_nx_mask = 0ull; |
431 | shadow_x_mask = VMX_EPT_EXECUTABLE_MASK; |
432 | shadow_present_mask = has_exec_only ? 0ull : VMX_EPT_READABLE_MASK; |
433 | /* |
434 | * EPT overrides the host MTRRs, and so KVM must program the desired |
435 | * memtype directly into the SPTEs. Note, this mask is just the mask |
436 | * of all bits that factor into the memtype, the actual memtype must be |
437 | * dynamically calculated, e.g. to ensure host MMIO is mapped UC. |
438 | */ |
439 | shadow_memtype_mask = VMX_EPT_MT_MASK | VMX_EPT_IPAT_BIT; |
440 | shadow_acc_track_mask = VMX_EPT_RWX_MASK; |
441 | shadow_host_writable_mask = EPT_SPTE_HOST_WRITABLE; |
442 | shadow_mmu_writable_mask = EPT_SPTE_MMU_WRITABLE; |
443 | |
444 | /* |
445 | * EPT Misconfigurations are generated if the value of bits 2:0 |
446 | * of an EPT paging-structure entry is 110b (write/execute). |
447 | */ |
448 | kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE, |
449 | VMX_EPT_RWX_MASK, 0); |
450 | } |
451 | EXPORT_SYMBOL_GPL(kvm_mmu_set_ept_masks); |
452 | |
453 | void kvm_mmu_reset_all_pte_masks(void) |
454 | { |
455 | u8 low_phys_bits; |
456 | u64 mask; |
457 | |
458 | shadow_phys_bits = kvm_get_shadow_phys_bits(); |
459 | |
460 | /* |
461 | * If the CPU has 46 or less physical address bits, then set an |
462 | * appropriate mask to guard against L1TF attacks. Otherwise, it is |
463 | * assumed that the CPU is not vulnerable to L1TF. |
464 | * |
465 | * Some Intel CPUs address the L1 cache using more PA bits than are |
466 | * reported by CPUID. Use the PA width of the L1 cache when possible |
467 | * to achieve more effective mitigation, e.g. if system RAM overlaps |
468 | * the most significant bits of legal physical address space. |
469 | */ |
470 | shadow_nonpresent_or_rsvd_mask = 0; |
471 | low_phys_bits = boot_cpu_data.x86_phys_bits; |
472 | if (boot_cpu_has_bug(X86_BUG_L1TF) && |
473 | !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >= |
474 | 52 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)) { |
475 | low_phys_bits = boot_cpu_data.x86_cache_bits |
476 | - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN; |
477 | shadow_nonpresent_or_rsvd_mask = |
478 | rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1); |
479 | } |
480 | |
481 | shadow_nonpresent_or_rsvd_lower_gfn_mask = |
482 | GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT); |
483 | |
484 | shadow_user_mask = PT_USER_MASK; |
485 | shadow_accessed_mask = PT_ACCESSED_MASK; |
486 | shadow_dirty_mask = PT_DIRTY_MASK; |
487 | shadow_nx_mask = PT64_NX_MASK; |
488 | shadow_x_mask = 0; |
489 | shadow_present_mask = PT_PRESENT_MASK; |
490 | |
491 | /* |
492 | * For shadow paging and NPT, KVM uses PAT entry '0' to encode WB |
493 | * memtype in the SPTEs, i.e. relies on host MTRRs to provide the |
494 | * correct memtype (WB is the "weakest" memtype). |
495 | */ |
496 | shadow_memtype_mask = 0; |
497 | shadow_acc_track_mask = 0; |
498 | shadow_me_mask = 0; |
499 | shadow_me_value = 0; |
500 | |
501 | shadow_host_writable_mask = DEFAULT_SPTE_HOST_WRITABLE; |
502 | shadow_mmu_writable_mask = DEFAULT_SPTE_MMU_WRITABLE; |
503 | |
504 | /* |
505 | * Set a reserved PA bit in MMIO SPTEs to generate page faults with |
506 | * PFEC.RSVD=1 on MMIO accesses. 64-bit PTEs (PAE, x86-64, and EPT |
507 | * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports |
508 | * 52-bit physical addresses then there are no reserved PA bits in the |
509 | * PTEs and so the reserved PA approach must be disabled. |
510 | */ |
511 | if (shadow_phys_bits < 52) |
512 | mask = BIT_ULL(51) | PT_PRESENT_MASK; |
513 | else |
514 | mask = 0; |
515 | |
516 | kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK); |
517 | } |
518 | |