1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | |
3 | #ifndef KVM_X86_MMU_SPTE_H |
4 | #define KVM_X86_MMU_SPTE_H |
5 | |
6 | #include "mmu.h" |
7 | #include "mmu_internal.h" |
8 | |
9 | /* |
10 | * A MMU present SPTE is backed by actual memory and may or may not be present |
11 | * in hardware. E.g. MMIO SPTEs are not considered present. Use bit 11, as it |
12 | * is ignored by all flavors of SPTEs and checking a low bit often generates |
13 | * better code than for a high bit, e.g. 56+. MMU present checks are pervasive |
14 | * enough that the improved code generation is noticeable in KVM's footprint. |
15 | */ |
16 | #define SPTE_MMU_PRESENT_MASK BIT_ULL(11) |
17 | |
18 | /* |
19 | * TDP SPTES (more specifically, EPT SPTEs) may not have A/D bits, and may also |
20 | * be restricted to using write-protection (for L2 when CPU dirty logging, i.e. |
21 | * PML, is enabled). Use bits 52 and 53 to hold the type of A/D tracking that |
22 | * is must be employed for a given TDP SPTE. |
23 | * |
24 | * Note, the "enabled" mask must be '0', as bits 62:52 are _reserved_ for PAE |
25 | * paging, including NPT PAE. This scheme works because legacy shadow paging |
26 | * is guaranteed to have A/D bits and write-protection is forced only for |
27 | * TDP with CPU dirty logging (PML). If NPT ever gains PML-like support, it |
28 | * must be restricted to 64-bit KVM. |
29 | */ |
30 | #define SPTE_TDP_AD_SHIFT 52 |
31 | #define SPTE_TDP_AD_MASK (3ULL << SPTE_TDP_AD_SHIFT) |
32 | #define SPTE_TDP_AD_ENABLED (0ULL << SPTE_TDP_AD_SHIFT) |
33 | #define SPTE_TDP_AD_DISABLED (1ULL << SPTE_TDP_AD_SHIFT) |
34 | #define SPTE_TDP_AD_WRPROT_ONLY (2ULL << SPTE_TDP_AD_SHIFT) |
35 | static_assert(SPTE_TDP_AD_ENABLED == 0); |
36 | |
37 | #ifdef CONFIG_DYNAMIC_PHYSICAL_MASK |
38 | #define SPTE_BASE_ADDR_MASK (physical_mask & ~(u64)(PAGE_SIZE-1)) |
39 | #else |
40 | #define SPTE_BASE_ADDR_MASK (((1ULL << 52) - 1) & ~(u64)(PAGE_SIZE-1)) |
41 | #endif |
42 | |
43 | #define SPTE_PERM_MASK (PT_PRESENT_MASK | PT_WRITABLE_MASK | shadow_user_mask \ |
44 | | shadow_x_mask | shadow_nx_mask | shadow_me_mask) |
45 | |
46 | #define ACC_EXEC_MASK 1 |
47 | #define ACC_WRITE_MASK PT_WRITABLE_MASK |
48 | #define ACC_USER_MASK PT_USER_MASK |
49 | #define ACC_ALL (ACC_EXEC_MASK | ACC_WRITE_MASK | ACC_USER_MASK) |
50 | |
51 | /* The mask for the R/X bits in EPT PTEs */ |
52 | #define SPTE_EPT_READABLE_MASK 0x1ull |
53 | #define SPTE_EPT_EXECUTABLE_MASK 0x4ull |
54 | |
55 | #define SPTE_LEVEL_BITS 9 |
56 | #define SPTE_LEVEL_SHIFT(level) __PT_LEVEL_SHIFT(level, SPTE_LEVEL_BITS) |
57 | #define SPTE_INDEX(address, level) __PT_INDEX(address, level, SPTE_LEVEL_BITS) |
58 | #define SPTE_ENT_PER_PAGE __PT_ENT_PER_PAGE(SPTE_LEVEL_BITS) |
59 | |
60 | /* |
61 | * The mask/shift to use for saving the original R/X bits when marking the PTE |
62 | * as not-present for access tracking purposes. We do not save the W bit as the |
63 | * PTEs being access tracked also need to be dirty tracked, so the W bit will be |
64 | * restored only when a write is attempted to the page. This mask obviously |
65 | * must not overlap the A/D type mask. |
66 | */ |
67 | #define SHADOW_ACC_TRACK_SAVED_BITS_MASK (SPTE_EPT_READABLE_MASK | \ |
68 | SPTE_EPT_EXECUTABLE_MASK) |
69 | #define SHADOW_ACC_TRACK_SAVED_BITS_SHIFT 54 |
70 | #define SHADOW_ACC_TRACK_SAVED_MASK (SHADOW_ACC_TRACK_SAVED_BITS_MASK << \ |
71 | SHADOW_ACC_TRACK_SAVED_BITS_SHIFT) |
72 | static_assert(!(SPTE_TDP_AD_MASK & SHADOW_ACC_TRACK_SAVED_MASK)); |
73 | |
74 | /* |
75 | * {DEFAULT,EPT}_SPTE_{HOST,MMU}_WRITABLE are used to keep track of why a given |
76 | * SPTE is write-protected. See is_writable_pte() for details. |
77 | */ |
78 | |
79 | /* Bits 9 and 10 are ignored by all non-EPT PTEs. */ |
80 | #define DEFAULT_SPTE_HOST_WRITABLE BIT_ULL(9) |
81 | #define DEFAULT_SPTE_MMU_WRITABLE BIT_ULL(10) |
82 | |
83 | /* |
84 | * Low ignored bits are at a premium for EPT, use high ignored bits, taking care |
85 | * to not overlap the A/D type mask or the saved access bits of access-tracked |
86 | * SPTEs when A/D bits are disabled. |
87 | */ |
88 | #define EPT_SPTE_HOST_WRITABLE BIT_ULL(57) |
89 | #define EPT_SPTE_MMU_WRITABLE BIT_ULL(58) |
90 | |
91 | static_assert(!(EPT_SPTE_HOST_WRITABLE & SPTE_TDP_AD_MASK)); |
92 | static_assert(!(EPT_SPTE_MMU_WRITABLE & SPTE_TDP_AD_MASK)); |
93 | static_assert(!(EPT_SPTE_HOST_WRITABLE & SHADOW_ACC_TRACK_SAVED_MASK)); |
94 | static_assert(!(EPT_SPTE_MMU_WRITABLE & SHADOW_ACC_TRACK_SAVED_MASK)); |
95 | |
96 | /* Defined only to keep the above static asserts readable. */ |
97 | #undef SHADOW_ACC_TRACK_SAVED_MASK |
98 | |
99 | /* |
100 | * Due to limited space in PTEs, the MMIO generation is a 19 bit subset of |
101 | * the memslots generation and is derived as follows: |
102 | * |
103 | * Bits 0-7 of the MMIO generation are propagated to spte bits 3-10 |
104 | * Bits 8-18 of the MMIO generation are propagated to spte bits 52-62 |
105 | * |
106 | * The KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS flag is intentionally not included in |
107 | * the MMIO generation number, as doing so would require stealing a bit from |
108 | * the "real" generation number and thus effectively halve the maximum number |
109 | * of MMIO generations that can be handled before encountering a wrap (which |
110 | * requires a full MMU zap). The flag is instead explicitly queried when |
111 | * checking for MMIO spte cache hits. |
112 | */ |
113 | |
114 | #define MMIO_SPTE_GEN_LOW_START 3 |
115 | #define MMIO_SPTE_GEN_LOW_END 10 |
116 | |
117 | #define MMIO_SPTE_GEN_HIGH_START 52 |
118 | #define MMIO_SPTE_GEN_HIGH_END 62 |
119 | |
120 | #define MMIO_SPTE_GEN_LOW_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_END, \ |
121 | MMIO_SPTE_GEN_LOW_START) |
122 | #define MMIO_SPTE_GEN_HIGH_MASK GENMASK_ULL(MMIO_SPTE_GEN_HIGH_END, \ |
123 | MMIO_SPTE_GEN_HIGH_START) |
124 | static_assert(!(SPTE_MMU_PRESENT_MASK & |
125 | (MMIO_SPTE_GEN_LOW_MASK | MMIO_SPTE_GEN_HIGH_MASK))); |
126 | |
127 | /* |
128 | * The SPTE MMIO mask must NOT overlap the MMIO generation bits or the |
129 | * MMU-present bit. The generation obviously co-exists with the magic MMIO |
130 | * mask/value, and MMIO SPTEs are considered !MMU-present. |
131 | * |
132 | * The SPTE MMIO mask is allowed to use hardware "present" bits (i.e. all EPT |
133 | * RWX bits), all physical address bits (legal PA bits are used for "fast" MMIO |
134 | * and so they're off-limits for generation; additional checks ensure the mask |
135 | * doesn't overlap legal PA bits), and bit 63 (carved out for future usage). |
136 | */ |
137 | #define SPTE_MMIO_ALLOWED_MASK (BIT_ULL(63) | GENMASK_ULL(51, 12) | GENMASK_ULL(2, 0)) |
138 | static_assert(!(SPTE_MMIO_ALLOWED_MASK & |
139 | (SPTE_MMU_PRESENT_MASK | MMIO_SPTE_GEN_LOW_MASK | MMIO_SPTE_GEN_HIGH_MASK))); |
140 | |
141 | #define MMIO_SPTE_GEN_LOW_BITS (MMIO_SPTE_GEN_LOW_END - MMIO_SPTE_GEN_LOW_START + 1) |
142 | #define MMIO_SPTE_GEN_HIGH_BITS (MMIO_SPTE_GEN_HIGH_END - MMIO_SPTE_GEN_HIGH_START + 1) |
143 | |
144 | /* remember to adjust the comment above as well if you change these */ |
145 | static_assert(MMIO_SPTE_GEN_LOW_BITS == 8 && MMIO_SPTE_GEN_HIGH_BITS == 11); |
146 | |
147 | #define MMIO_SPTE_GEN_LOW_SHIFT (MMIO_SPTE_GEN_LOW_START - 0) |
148 | #define MMIO_SPTE_GEN_HIGH_SHIFT (MMIO_SPTE_GEN_HIGH_START - MMIO_SPTE_GEN_LOW_BITS) |
149 | |
150 | #define MMIO_SPTE_GEN_MASK GENMASK_ULL(MMIO_SPTE_GEN_LOW_BITS + MMIO_SPTE_GEN_HIGH_BITS - 1, 0) |
151 | |
152 | extern u64 __read_mostly shadow_host_writable_mask; |
153 | extern u64 __read_mostly shadow_mmu_writable_mask; |
154 | extern u64 __read_mostly shadow_nx_mask; |
155 | extern u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ |
156 | extern u64 __read_mostly shadow_user_mask; |
157 | extern u64 __read_mostly shadow_accessed_mask; |
158 | extern u64 __read_mostly shadow_dirty_mask; |
159 | extern u64 __read_mostly shadow_mmio_value; |
160 | extern u64 __read_mostly shadow_mmio_mask; |
161 | extern u64 __read_mostly shadow_mmio_access_mask; |
162 | extern u64 __read_mostly shadow_present_mask; |
163 | extern u64 __read_mostly shadow_memtype_mask; |
164 | extern u64 __read_mostly shadow_me_value; |
165 | extern u64 __read_mostly shadow_me_mask; |
166 | |
167 | /* |
168 | * SPTEs in MMUs without A/D bits are marked with SPTE_TDP_AD_DISABLED; |
169 | * shadow_acc_track_mask is the set of bits to be cleared in non-accessed |
170 | * pages. |
171 | */ |
172 | extern u64 __read_mostly shadow_acc_track_mask; |
173 | |
174 | /* |
175 | * This mask must be set on all non-zero Non-Present or Reserved SPTEs in order |
176 | * to guard against L1TF attacks. |
177 | */ |
178 | extern u64 __read_mostly shadow_nonpresent_or_rsvd_mask; |
179 | |
180 | /* |
181 | * The number of high-order 1 bits to use in the mask above. |
182 | */ |
183 | #define SHADOW_NONPRESENT_OR_RSVD_MASK_LEN 5 |
184 | |
185 | /* |
186 | * If a thread running without exclusive control of the MMU lock must perform a |
187 | * multi-part operation on an SPTE, it can set the SPTE to REMOVED_SPTE as a |
188 | * non-present intermediate value. Other threads which encounter this value |
189 | * should not modify the SPTE. |
190 | * |
191 | * Use a semi-arbitrary value that doesn't set RWX bits, i.e. is not-present on |
192 | * both AMD and Intel CPUs, and doesn't set PFN bits, i.e. doesn't create a L1TF |
193 | * vulnerability. Use only low bits to avoid 64-bit immediates. |
194 | * |
195 | * Only used by the TDP MMU. |
196 | */ |
197 | #define REMOVED_SPTE 0x5a0ULL |
198 | |
199 | /* Removed SPTEs must not be misconstrued as shadow present PTEs. */ |
200 | static_assert(!(REMOVED_SPTE & SPTE_MMU_PRESENT_MASK)); |
201 | |
202 | static inline bool is_removed_spte(u64 spte) |
203 | { |
204 | return spte == REMOVED_SPTE; |
205 | } |
206 | |
207 | /* Get an SPTE's index into its parent's page table (and the spt array). */ |
208 | static inline int spte_index(u64 *sptep) |
209 | { |
210 | return ((unsigned long)sptep / sizeof(*sptep)) & (SPTE_ENT_PER_PAGE - 1); |
211 | } |
212 | |
213 | /* |
214 | * In some cases, we need to preserve the GFN of a non-present or reserved |
215 | * SPTE when we usurp the upper five bits of the physical address space to |
216 | * defend against L1TF, e.g. for MMIO SPTEs. To preserve the GFN, we'll |
217 | * shift bits of the GFN that overlap with shadow_nonpresent_or_rsvd_mask |
218 | * left into the reserved bits, i.e. the GFN in the SPTE will be split into |
219 | * high and low parts. This mask covers the lower bits of the GFN. |
220 | */ |
221 | extern u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask; |
222 | |
223 | static inline struct kvm_mmu_page *to_shadow_page(hpa_t shadow_page) |
224 | { |
225 | struct page *page = pfn_to_page((shadow_page) >> PAGE_SHIFT); |
226 | |
227 | return (struct kvm_mmu_page *)page_private(page); |
228 | } |
229 | |
230 | static inline struct kvm_mmu_page *spte_to_child_sp(u64 spte) |
231 | { |
232 | return to_shadow_page(shadow_page: spte & SPTE_BASE_ADDR_MASK); |
233 | } |
234 | |
235 | static inline struct kvm_mmu_page *sptep_to_sp(u64 *sptep) |
236 | { |
237 | return to_shadow_page(__pa(sptep)); |
238 | } |
239 | |
240 | static inline struct kvm_mmu_page *root_to_sp(hpa_t root) |
241 | { |
242 | if (kvm_mmu_is_dummy_root(shadow_page: root)) |
243 | return NULL; |
244 | |
245 | /* |
246 | * The "root" may be a special root, e.g. a PAE entry, treat it as a |
247 | * SPTE to ensure any non-PA bits are dropped. |
248 | */ |
249 | return spte_to_child_sp(spte: root); |
250 | } |
251 | |
252 | static inline bool is_mmio_spte(u64 spte) |
253 | { |
254 | return (spte & shadow_mmio_mask) == shadow_mmio_value && |
255 | likely(enable_mmio_caching); |
256 | } |
257 | |
258 | static inline bool is_shadow_present_pte(u64 pte) |
259 | { |
260 | return !!(pte & SPTE_MMU_PRESENT_MASK); |
261 | } |
262 | |
263 | /* |
264 | * Returns true if A/D bits are supported in hardware and are enabled by KVM. |
265 | * When enabled, KVM uses A/D bits for all non-nested MMUs. Because L1 can |
266 | * disable A/D bits in EPTP12, SP and SPTE variants are needed to handle the |
267 | * scenario where KVM is using A/D bits for L1, but not L2. |
268 | */ |
269 | static inline bool kvm_ad_enabled(void) |
270 | { |
271 | return !!shadow_accessed_mask; |
272 | } |
273 | |
274 | static inline bool sp_ad_disabled(struct kvm_mmu_page *sp) |
275 | { |
276 | return sp->role.ad_disabled; |
277 | } |
278 | |
279 | static inline bool spte_ad_enabled(u64 spte) |
280 | { |
281 | KVM_MMU_WARN_ON(!is_shadow_present_pte(spte)); |
282 | return (spte & SPTE_TDP_AD_MASK) != SPTE_TDP_AD_DISABLED; |
283 | } |
284 | |
285 | static inline bool spte_ad_need_write_protect(u64 spte) |
286 | { |
287 | KVM_MMU_WARN_ON(!is_shadow_present_pte(spte)); |
288 | /* |
289 | * This is benign for non-TDP SPTEs as SPTE_TDP_AD_ENABLED is '0', |
290 | * and non-TDP SPTEs will never set these bits. Optimize for 64-bit |
291 | * TDP and do the A/D type check unconditionally. |
292 | */ |
293 | return (spte & SPTE_TDP_AD_MASK) != SPTE_TDP_AD_ENABLED; |
294 | } |
295 | |
296 | static inline u64 spte_shadow_accessed_mask(u64 spte) |
297 | { |
298 | KVM_MMU_WARN_ON(!is_shadow_present_pte(spte)); |
299 | return spte_ad_enabled(spte) ? shadow_accessed_mask : 0; |
300 | } |
301 | |
302 | static inline u64 spte_shadow_dirty_mask(u64 spte) |
303 | { |
304 | KVM_MMU_WARN_ON(!is_shadow_present_pte(spte)); |
305 | return spte_ad_enabled(spte) ? shadow_dirty_mask : 0; |
306 | } |
307 | |
308 | static inline bool is_access_track_spte(u64 spte) |
309 | { |
310 | return !spte_ad_enabled(spte) && (spte & shadow_acc_track_mask) == 0; |
311 | } |
312 | |
313 | static inline bool is_large_pte(u64 pte) |
314 | { |
315 | return pte & PT_PAGE_SIZE_MASK; |
316 | } |
317 | |
318 | static inline bool is_last_spte(u64 pte, int level) |
319 | { |
320 | return (level == PG_LEVEL_4K) || is_large_pte(pte); |
321 | } |
322 | |
323 | static inline bool is_executable_pte(u64 spte) |
324 | { |
325 | return (spte & (shadow_x_mask | shadow_nx_mask)) == shadow_x_mask; |
326 | } |
327 | |
328 | static inline kvm_pfn_t spte_to_pfn(u64 pte) |
329 | { |
330 | return (pte & SPTE_BASE_ADDR_MASK) >> PAGE_SHIFT; |
331 | } |
332 | |
333 | static inline bool is_accessed_spte(u64 spte) |
334 | { |
335 | u64 accessed_mask = spte_shadow_accessed_mask(spte); |
336 | |
337 | return accessed_mask ? spte & accessed_mask |
338 | : !is_access_track_spte(spte); |
339 | } |
340 | |
341 | static inline bool is_dirty_spte(u64 spte) |
342 | { |
343 | u64 dirty_mask = spte_shadow_dirty_mask(spte); |
344 | |
345 | return dirty_mask ? spte & dirty_mask : spte & PT_WRITABLE_MASK; |
346 | } |
347 | |
348 | static inline u64 get_rsvd_bits(struct rsvd_bits_validate *rsvd_check, u64 pte, |
349 | int level) |
350 | { |
351 | int bit7 = (pte >> 7) & 1; |
352 | |
353 | return rsvd_check->rsvd_bits_mask[bit7][level-1]; |
354 | } |
355 | |
356 | static inline bool __is_rsvd_bits_set(struct rsvd_bits_validate *rsvd_check, |
357 | u64 pte, int level) |
358 | { |
359 | return pte & get_rsvd_bits(rsvd_check, pte, level); |
360 | } |
361 | |
362 | static inline bool __is_bad_mt_xwr(struct rsvd_bits_validate *rsvd_check, |
363 | u64 pte) |
364 | { |
365 | return rsvd_check->bad_mt_xwr & BIT_ULL(pte & 0x3f); |
366 | } |
367 | |
368 | static __always_inline bool is_rsvd_spte(struct rsvd_bits_validate *rsvd_check, |
369 | u64 spte, int level) |
370 | { |
371 | return __is_bad_mt_xwr(rsvd_check, pte: spte) || |
372 | __is_rsvd_bits_set(rsvd_check, pte: spte, level); |
373 | } |
374 | |
375 | /* |
376 | * A shadow-present leaf SPTE may be non-writable for 4 possible reasons: |
377 | * |
378 | * 1. To intercept writes for dirty logging. KVM write-protects huge pages |
379 | * so that they can be split down into the dirty logging |
380 | * granularity (4KiB) whenever the guest writes to them. KVM also |
381 | * write-protects 4KiB pages so that writes can be recorded in the dirty log |
382 | * (e.g. if not using PML). SPTEs are write-protected for dirty logging |
383 | * during the VM-iotcls that enable dirty logging. |
384 | * |
385 | * 2. To intercept writes to guest page tables that KVM is shadowing. When a |
386 | * guest writes to its page table the corresponding shadow page table will |
387 | * be marked "unsync". That way KVM knows which shadow page tables need to |
388 | * be updated on the next TLB flush, INVLPG, etc. and which do not. |
389 | * |
390 | * 3. To prevent guest writes to read-only memory, such as for memory in a |
391 | * read-only memslot or guest memory backed by a read-only VMA. Writes to |
392 | * such pages are disallowed entirely. |
393 | * |
394 | * 4. To emulate the Accessed bit for SPTEs without A/D bits. Note, in this |
395 | * case, the SPTE is access-protected, not just write-protected! |
396 | * |
397 | * For cases #1 and #4, KVM can safely make such SPTEs writable without taking |
398 | * mmu_lock as capturing the Accessed/Dirty state doesn't require taking it. |
399 | * To differentiate #1 and #4 from #2 and #3, KVM uses two software-only bits |
400 | * in the SPTE: |
401 | * |
402 | * shadow_mmu_writable_mask, aka MMU-writable - |
403 | * Cleared on SPTEs that KVM is currently write-protecting for shadow paging |
404 | * purposes (case 2 above). |
405 | * |
406 | * shadow_host_writable_mask, aka Host-writable - |
407 | * Cleared on SPTEs that are not host-writable (case 3 above) |
408 | * |
409 | * Note, not all possible combinations of PT_WRITABLE_MASK, |
410 | * shadow_mmu_writable_mask, and shadow_host_writable_mask are valid. A given |
411 | * SPTE can be in only one of the following states, which map to the |
412 | * aforementioned 3 cases: |
413 | * |
414 | * shadow_host_writable_mask | shadow_mmu_writable_mask | PT_WRITABLE_MASK |
415 | * ------------------------- | ------------------------ | ---------------- |
416 | * 1 | 1 | 1 (writable) |
417 | * 1 | 1 | 0 (case 1) |
418 | * 1 | 0 | 0 (case 2) |
419 | * 0 | 0 | 0 (case 3) |
420 | * |
421 | * The valid combinations of these bits are checked by |
422 | * check_spte_writable_invariants() whenever an SPTE is modified. |
423 | * |
424 | * Clearing the MMU-writable bit is always done under the MMU lock and always |
425 | * accompanied by a TLB flush before dropping the lock to avoid corrupting the |
426 | * shadow page tables between vCPUs. Write-protecting an SPTE for dirty logging |
427 | * (which does not clear the MMU-writable bit), does not flush TLBs before |
428 | * dropping the lock, as it only needs to synchronize guest writes with the |
429 | * dirty bitmap. Similarly, making the SPTE inaccessible (and non-writable) for |
430 | * access-tracking via the clear_young() MMU notifier also does not flush TLBs. |
431 | * |
432 | * So, there is the problem: clearing the MMU-writable bit can encounter a |
433 | * write-protected SPTE while CPUs still have writable mappings for that SPTE |
434 | * cached in their TLB. To address this, KVM always flushes TLBs when |
435 | * write-protecting SPTEs if the MMU-writable bit is set on the old SPTE. |
436 | * |
437 | * The Host-writable bit is not modified on present SPTEs, it is only set or |
438 | * cleared when an SPTE is first faulted in from non-present and then remains |
439 | * immutable. |
440 | */ |
441 | static inline bool is_writable_pte(unsigned long pte) |
442 | { |
443 | return pte & PT_WRITABLE_MASK; |
444 | } |
445 | |
446 | /* Note: spte must be a shadow-present leaf SPTE. */ |
447 | static inline void check_spte_writable_invariants(u64 spte) |
448 | { |
449 | if (spte & shadow_mmu_writable_mask) |
450 | WARN_ONCE(!(spte & shadow_host_writable_mask), |
451 | KBUILD_MODNAME ": MMU-writable SPTE is not Host-writable: %llx" , |
452 | spte); |
453 | else |
454 | WARN_ONCE(is_writable_pte(spte), |
455 | KBUILD_MODNAME ": Writable SPTE is not MMU-writable: %llx" , spte); |
456 | } |
457 | |
458 | static inline bool is_mmu_writable_spte(u64 spte) |
459 | { |
460 | return spte & shadow_mmu_writable_mask; |
461 | } |
462 | |
463 | static inline u64 get_mmio_spte_generation(u64 spte) |
464 | { |
465 | u64 gen; |
466 | |
467 | gen = (spte & MMIO_SPTE_GEN_LOW_MASK) >> MMIO_SPTE_GEN_LOW_SHIFT; |
468 | gen |= (spte & MMIO_SPTE_GEN_HIGH_MASK) >> MMIO_SPTE_GEN_HIGH_SHIFT; |
469 | return gen; |
470 | } |
471 | |
472 | bool spte_has_volatile_bits(u64 spte); |
473 | |
474 | bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, |
475 | const struct kvm_memory_slot *slot, |
476 | unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, |
477 | u64 old_spte, bool prefetch, bool can_unsync, |
478 | bool host_writable, u64 *new_spte); |
479 | u64 make_huge_page_split_spte(struct kvm *kvm, u64 huge_spte, |
480 | union kvm_mmu_page_role role, int index); |
481 | u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled); |
482 | u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access); |
483 | u64 mark_spte_for_access_track(u64 spte); |
484 | |
485 | /* Restore an acc-track PTE back to a regular PTE */ |
486 | static inline u64 restore_acc_track_spte(u64 spte) |
487 | { |
488 | u64 saved_bits = (spte >> SHADOW_ACC_TRACK_SAVED_BITS_SHIFT) |
489 | & SHADOW_ACC_TRACK_SAVED_BITS_MASK; |
490 | |
491 | spte &= ~shadow_acc_track_mask; |
492 | spte &= ~(SHADOW_ACC_TRACK_SAVED_BITS_MASK << |
493 | SHADOW_ACC_TRACK_SAVED_BITS_SHIFT); |
494 | spte |= saved_bits; |
495 | |
496 | return spte; |
497 | } |
498 | |
499 | u64 kvm_mmu_changed_pte_notifier_make_spte(u64 old_spte, kvm_pfn_t new_pfn); |
500 | |
501 | void __init kvm_mmu_spte_module_init(void); |
502 | void kvm_mmu_reset_all_pte_masks(void); |
503 | |
504 | #endif |
505 | |