1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _ASM_X86_PGTABLE_DEFS_H
3#define _ASM_X86_PGTABLE_DEFS_H
4
5#include <linux/const.h>
6#include <linux/mem_encrypt.h>
7
8#include <asm/page_types.h>
9
10#define _PAGE_BIT_PRESENT 0 /* is present */
11#define _PAGE_BIT_RW 1 /* writeable */
12#define _PAGE_BIT_USER 2 /* userspace addressable */
13#define _PAGE_BIT_PWT 3 /* page write through */
14#define _PAGE_BIT_PCD 4 /* page cache disabled */
15#define _PAGE_BIT_ACCESSED 5 /* was accessed (raised by CPU) */
16#define _PAGE_BIT_DIRTY 6 /* was written to (raised by CPU) */
17#define _PAGE_BIT_PSE 7 /* 4 MB (or 2MB) page */
18#define _PAGE_BIT_PAT 7 /* on 4KB pages */
19#define _PAGE_BIT_GLOBAL 8 /* Global TLB entry PPro+ */
20#define _PAGE_BIT_SOFTW1 9 /* available for programmer */
21#define _PAGE_BIT_SOFTW2 10 /* " */
22#define _PAGE_BIT_SOFTW3 11 /* " */
23#define _PAGE_BIT_PAT_LARGE 12 /* On 2MB or 1GB pages */
24#define _PAGE_BIT_SOFTW4 58 /* available for programmer */
25#define _PAGE_BIT_PKEY_BIT0 59 /* Protection Keys, bit 1/4 */
26#define _PAGE_BIT_PKEY_BIT1 60 /* Protection Keys, bit 2/4 */
27#define _PAGE_BIT_PKEY_BIT2 61 /* Protection Keys, bit 3/4 */
28#define _PAGE_BIT_PKEY_BIT3 62 /* Protection Keys, bit 4/4 */
29#define _PAGE_BIT_NX 63 /* No execute: only valid after cpuid check */
30
31#define _PAGE_BIT_SPECIAL _PAGE_BIT_SOFTW1
32#define _PAGE_BIT_CPA_TEST _PAGE_BIT_SOFTW1
33#define _PAGE_BIT_UFFD_WP _PAGE_BIT_SOFTW2 /* userfaultfd wrprotected */
34#define _PAGE_BIT_SOFT_DIRTY _PAGE_BIT_SOFTW3 /* software dirty tracking */
35#define _PAGE_BIT_DEVMAP _PAGE_BIT_SOFTW4
36
37/* If _PAGE_BIT_PRESENT is clear, we use these: */
38/* - if the user mapped it with PROT_NONE; pte_present gives true */
39#define _PAGE_BIT_PROTNONE _PAGE_BIT_GLOBAL
40
41#define _PAGE_PRESENT (_AT(pteval_t, 1) << _PAGE_BIT_PRESENT)
42#define _PAGE_RW (_AT(pteval_t, 1) << _PAGE_BIT_RW)
43#define _PAGE_USER (_AT(pteval_t, 1) << _PAGE_BIT_USER)
44#define _PAGE_PWT (_AT(pteval_t, 1) << _PAGE_BIT_PWT)
45#define _PAGE_PCD (_AT(pteval_t, 1) << _PAGE_BIT_PCD)
46#define _PAGE_ACCESSED (_AT(pteval_t, 1) << _PAGE_BIT_ACCESSED)
47#define _PAGE_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_DIRTY)
48#define _PAGE_PSE (_AT(pteval_t, 1) << _PAGE_BIT_PSE)
49#define _PAGE_GLOBAL (_AT(pteval_t, 1) << _PAGE_BIT_GLOBAL)
50#define _PAGE_SOFTW1 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW1)
51#define _PAGE_SOFTW2 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW2)
52#define _PAGE_SOFTW3 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW3)
53#define _PAGE_PAT (_AT(pteval_t, 1) << _PAGE_BIT_PAT)
54#define _PAGE_PAT_LARGE (_AT(pteval_t, 1) << _PAGE_BIT_PAT_LARGE)
55#define _PAGE_SPECIAL (_AT(pteval_t, 1) << _PAGE_BIT_SPECIAL)
56#define _PAGE_CPA_TEST (_AT(pteval_t, 1) << _PAGE_BIT_CPA_TEST)
57#ifdef CONFIG_X86_INTEL_MEMORY_PROTECTION_KEYS
58#define _PAGE_PKEY_BIT0 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT0)
59#define _PAGE_PKEY_BIT1 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT1)
60#define _PAGE_PKEY_BIT2 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT2)
61#define _PAGE_PKEY_BIT3 (_AT(pteval_t, 1) << _PAGE_BIT_PKEY_BIT3)
62#else
63#define _PAGE_PKEY_BIT0 (_AT(pteval_t, 0))
64#define _PAGE_PKEY_BIT1 (_AT(pteval_t, 0))
65#define _PAGE_PKEY_BIT2 (_AT(pteval_t, 0))
66#define _PAGE_PKEY_BIT3 (_AT(pteval_t, 0))
67#endif
68
69#define _PAGE_PKEY_MASK (_PAGE_PKEY_BIT0 | \
70 _PAGE_PKEY_BIT1 | \
71 _PAGE_PKEY_BIT2 | \
72 _PAGE_PKEY_BIT3)
73
74#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
75#define _PAGE_KNL_ERRATUM_MASK (_PAGE_DIRTY | _PAGE_ACCESSED)
76#else
77#define _PAGE_KNL_ERRATUM_MASK 0
78#endif
79
80#ifdef CONFIG_MEM_SOFT_DIRTY
81#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 1) << _PAGE_BIT_SOFT_DIRTY)
82#else
83#define _PAGE_SOFT_DIRTY (_AT(pteval_t, 0))
84#endif
85
86/*
87 * Tracking soft dirty bit when a page goes to a swap is tricky.
88 * We need a bit which can be stored in pte _and_ not conflict
89 * with swap entry format. On x86 bits 1-4 are *not* involved
90 * into swap entry computation, but bit 7 is used for thp migration,
91 * so we borrow bit 1 for soft dirty tracking.
92 *
93 * Please note that this bit must be treated as swap dirty page
94 * mark if and only if the PTE/PMD has present bit clear!
95 */
96#ifdef CONFIG_MEM_SOFT_DIRTY
97#define _PAGE_SWP_SOFT_DIRTY _PAGE_RW
98#else
99#define _PAGE_SWP_SOFT_DIRTY (_AT(pteval_t, 0))
100#endif
101
102#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_WP
103#define _PAGE_UFFD_WP (_AT(pteval_t, 1) << _PAGE_BIT_UFFD_WP)
104#define _PAGE_SWP_UFFD_WP _PAGE_USER
105#else
106#define _PAGE_UFFD_WP (_AT(pteval_t, 0))
107#define _PAGE_SWP_UFFD_WP (_AT(pteval_t, 0))
108#endif
109
110#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
111#define _PAGE_NX (_AT(pteval_t, 1) << _PAGE_BIT_NX)
112#define _PAGE_DEVMAP (_AT(u64, 1) << _PAGE_BIT_DEVMAP)
113#define _PAGE_SOFTW4 (_AT(pteval_t, 1) << _PAGE_BIT_SOFTW4)
114#else
115#define _PAGE_NX (_AT(pteval_t, 0))
116#define _PAGE_DEVMAP (_AT(pteval_t, 0))
117#define _PAGE_SOFTW4 (_AT(pteval_t, 0))
118#endif
119
120#define _PAGE_PROTNONE (_AT(pteval_t, 1) << _PAGE_BIT_PROTNONE)
121
122/*
123 * Set of bits not changed in pte_modify. The pte's
124 * protection key is treated like _PAGE_RW, for
125 * instance, and is *not* included in this mask since
126 * pte_modify() does modify it.
127 */
128#define _PAGE_CHG_MASK (PTE_PFN_MASK | _PAGE_PCD | _PAGE_PWT | \
129 _PAGE_SPECIAL | _PAGE_ACCESSED | _PAGE_DIRTY | \
130 _PAGE_SOFT_DIRTY | _PAGE_DEVMAP | _PAGE_ENC | \
131 _PAGE_UFFD_WP)
132#define _HPAGE_CHG_MASK (_PAGE_CHG_MASK | _PAGE_PSE)
133
134/*
135 * The cache modes defined here are used to translate between pure SW usage
136 * and the HW defined cache mode bits and/or PAT entries.
137 *
138 * The resulting bits for PWT, PCD and PAT should be chosen in a way
139 * to have the WB mode at index 0 (all bits clear). This is the default
140 * right now and likely would break too much if changed.
141 */
142#ifndef __ASSEMBLY__
143enum page_cache_mode {
144 _PAGE_CACHE_MODE_WB = 0,
145 _PAGE_CACHE_MODE_WC = 1,
146 _PAGE_CACHE_MODE_UC_MINUS = 2,
147 _PAGE_CACHE_MODE_UC = 3,
148 _PAGE_CACHE_MODE_WT = 4,
149 _PAGE_CACHE_MODE_WP = 5,
150
151 _PAGE_CACHE_MODE_NUM = 8
152};
153#endif
154
155#define _PAGE_ENC (_AT(pteval_t, sme_me_mask))
156
157#define _PAGE_CACHE_MASK (_PAGE_PWT | _PAGE_PCD | _PAGE_PAT)
158#define _PAGE_LARGE_CACHE_MASK (_PAGE_PWT | _PAGE_PCD | _PAGE_PAT_LARGE)
159
160#define _PAGE_NOCACHE (cachemode2protval(_PAGE_CACHE_MODE_UC))
161#define _PAGE_CACHE_WP (cachemode2protval(_PAGE_CACHE_MODE_WP))
162
163#define __PP _PAGE_PRESENT
164#define __RW _PAGE_RW
165#define _USR _PAGE_USER
166#define ___A _PAGE_ACCESSED
167#define ___D _PAGE_DIRTY
168#define ___G _PAGE_GLOBAL
169#define __NX _PAGE_NX
170
171#define _ENC _PAGE_ENC
172#define __WP _PAGE_CACHE_WP
173#define __NC _PAGE_NOCACHE
174#define _PSE _PAGE_PSE
175
176#define pgprot_val(x) ((x).pgprot)
177#define __pgprot(x) ((pgprot_t) { (x) } )
178#define __pg(x) __pgprot(x)
179
180#define PAGE_NONE __pg( 0| 0| 0|___A| 0| 0| 0|___G)
181#define PAGE_SHARED __pg(__PP|__RW|_USR|___A|__NX| 0| 0| 0)
182#define PAGE_SHARED_EXEC __pg(__PP|__RW|_USR|___A| 0| 0| 0| 0)
183#define PAGE_COPY_NOEXEC __pg(__PP| 0|_USR|___A|__NX| 0| 0| 0)
184#define PAGE_COPY_EXEC __pg(__PP| 0|_USR|___A| 0| 0| 0| 0)
185#define PAGE_COPY __pg(__PP| 0|_USR|___A|__NX| 0| 0| 0)
186#define PAGE_READONLY __pg(__PP| 0|_USR|___A|__NX| 0| 0| 0)
187#define PAGE_READONLY_EXEC __pg(__PP| 0|_USR|___A| 0| 0| 0| 0)
188
189#define __PAGE_KERNEL (__PP|__RW| 0|___A|__NX|___D| 0|___G)
190#define __PAGE_KERNEL_EXEC (__PP|__RW| 0|___A| 0|___D| 0|___G)
191#define _KERNPG_TABLE_NOENC (__PP|__RW| 0|___A| 0|___D| 0| 0)
192#define _KERNPG_TABLE (__PP|__RW| 0|___A| 0|___D| 0| 0| _ENC)
193#define _PAGE_TABLE_NOENC (__PP|__RW|_USR|___A| 0|___D| 0| 0)
194#define _PAGE_TABLE (__PP|__RW|_USR|___A| 0|___D| 0| 0| _ENC)
195#define __PAGE_KERNEL_RO (__PP| 0| 0|___A|__NX|___D| 0|___G)
196#define __PAGE_KERNEL_ROX (__PP| 0| 0|___A| 0|___D| 0|___G)
197#define __PAGE_KERNEL_NOCACHE (__PP|__RW| 0|___A|__NX|___D| 0|___G| __NC)
198#define __PAGE_KERNEL_VVAR (__PP| 0|_USR|___A|__NX|___D| 0|___G)
199#define __PAGE_KERNEL_LARGE (__PP|__RW| 0|___A|__NX|___D|_PSE|___G)
200#define __PAGE_KERNEL_LARGE_EXEC (__PP|__RW| 0|___A| 0|___D|_PSE|___G)
201#define __PAGE_KERNEL_WP (__PP|__RW| 0|___A|__NX|___D| 0|___G| __WP)
202
203
204#define __PAGE_KERNEL_IO __PAGE_KERNEL
205#define __PAGE_KERNEL_IO_NOCACHE __PAGE_KERNEL_NOCACHE
206
207
208#ifndef __ASSEMBLY__
209
210#define __PAGE_KERNEL_ENC (__PAGE_KERNEL | _ENC)
211#define __PAGE_KERNEL_ENC_WP (__PAGE_KERNEL_WP | _ENC)
212#define __PAGE_KERNEL_NOENC (__PAGE_KERNEL | 0)
213#define __PAGE_KERNEL_NOENC_WP (__PAGE_KERNEL_WP | 0)
214
215#define __pgprot_mask(x) __pgprot((x) & __default_kernel_pte_mask)
216
217#define PAGE_KERNEL __pgprot_mask(__PAGE_KERNEL | _ENC)
218#define PAGE_KERNEL_NOENC __pgprot_mask(__PAGE_KERNEL | 0)
219#define PAGE_KERNEL_RO __pgprot_mask(__PAGE_KERNEL_RO | _ENC)
220#define PAGE_KERNEL_EXEC __pgprot_mask(__PAGE_KERNEL_EXEC | _ENC)
221#define PAGE_KERNEL_EXEC_NOENC __pgprot_mask(__PAGE_KERNEL_EXEC | 0)
222#define PAGE_KERNEL_ROX __pgprot_mask(__PAGE_KERNEL_ROX | _ENC)
223#define PAGE_KERNEL_NOCACHE __pgprot_mask(__PAGE_KERNEL_NOCACHE | _ENC)
224#define PAGE_KERNEL_LARGE __pgprot_mask(__PAGE_KERNEL_LARGE | _ENC)
225#define PAGE_KERNEL_LARGE_EXEC __pgprot_mask(__PAGE_KERNEL_LARGE_EXEC | _ENC)
226#define PAGE_KERNEL_VVAR __pgprot_mask(__PAGE_KERNEL_VVAR | _ENC)
227
228#define PAGE_KERNEL_IO __pgprot_mask(__PAGE_KERNEL_IO)
229#define PAGE_KERNEL_IO_NOCACHE __pgprot_mask(__PAGE_KERNEL_IO_NOCACHE)
230
231#endif /* __ASSEMBLY__ */
232
233/*
234 * early identity mapping pte attrib macros.
235 */
236#ifdef CONFIG_X86_64
237#define __PAGE_KERNEL_IDENT_LARGE_EXEC __PAGE_KERNEL_LARGE_EXEC
238#else
239#define PTE_IDENT_ATTR 0x003 /* PRESENT+RW */
240#define PDE_IDENT_ATTR 0x063 /* PRESENT+RW+DIRTY+ACCESSED */
241#define PGD_IDENT_ATTR 0x001 /* PRESENT (no other attributes) */
242#endif
243
244#ifdef CONFIG_X86_32
245# include <asm/pgtable_32_types.h>
246#else
247# include <asm/pgtable_64_types.h>
248#endif
249
250#ifndef __ASSEMBLY__
251
252#include <linux/types.h>
253
254/* Extracts the PFN from a (pte|pmd|pud|pgd)val_t of a 4KB page */
255#define PTE_PFN_MASK ((pteval_t)PHYSICAL_PAGE_MASK)
256
257/*
258 * Extracts the flags from a (pte|pmd|pud|pgd)val_t
259 * This includes the protection key value.
260 */
261#define PTE_FLAGS_MASK (~PTE_PFN_MASK)
262
263typedef struct pgprot { pgprotval_t pgprot; } pgprot_t;
264
265typedef struct { pgdval_t pgd; } pgd_t;
266
267static inline pgprot_t pgprot_nx(pgprot_t prot)
268{
269 return __pgprot(pgprot_val(prot) | _PAGE_NX);
270}
271#define pgprot_nx pgprot_nx
272
273#ifdef CONFIG_X86_PAE
274
275/*
276 * PHYSICAL_PAGE_MASK might be non-constant when SME is compiled in, so we can't
277 * use it here.
278 */
279
280#define PGD_PAE_PAGE_MASK ((signed long)PAGE_MASK)
281#define PGD_PAE_PHYS_MASK (((1ULL << __PHYSICAL_MASK_SHIFT)-1) & PGD_PAE_PAGE_MASK)
282
283/*
284 * PAE allows Base Address, P, PWT, PCD and AVL bits to be set in PGD entries.
285 * All other bits are Reserved MBZ
286 */
287#define PGD_ALLOWED_BITS (PGD_PAE_PHYS_MASK | _PAGE_PRESENT | \
288 _PAGE_PWT | _PAGE_PCD | \
289 _PAGE_SOFTW1 | _PAGE_SOFTW2 | _PAGE_SOFTW3)
290
291#else
292/* No need to mask any bits for !PAE */
293#define PGD_ALLOWED_BITS (~0ULL)
294#endif
295
296static inline pgd_t native_make_pgd(pgdval_t val)
297{
298 return (pgd_t) { val & PGD_ALLOWED_BITS };
299}
300
301static inline pgdval_t native_pgd_val(pgd_t pgd)
302{
303 return pgd.pgd & PGD_ALLOWED_BITS;
304}
305
306static inline pgdval_t pgd_flags(pgd_t pgd)
307{
308 return native_pgd_val(pgd) & PTE_FLAGS_MASK;
309}
310
311#if CONFIG_PGTABLE_LEVELS > 4
312typedef struct { p4dval_t p4d; } p4d_t;
313
314static inline p4d_t native_make_p4d(pudval_t val)
315{
316 return (p4d_t) { val };
317}
318
319static inline p4dval_t native_p4d_val(p4d_t p4d)
320{
321 return p4d.p4d;
322}
323#else
324#include <asm-generic/pgtable-nop4d.h>
325
326static inline p4d_t native_make_p4d(pudval_t val)
327{
328 return (p4d_t) { .pgd = native_make_pgd(val: (pgdval_t)val) };
329}
330
331static inline p4dval_t native_p4d_val(p4d_t p4d)
332{
333 return native_pgd_val(pgd: p4d.pgd);
334}
335#endif
336
337#if CONFIG_PGTABLE_LEVELS > 3
338typedef struct { pudval_t pud; } pud_t;
339
340static inline pud_t native_make_pud(pmdval_t val)
341{
342 return (pud_t) { val };
343}
344
345static inline pudval_t native_pud_val(pud_t pud)
346{
347 return pud.pud;
348}
349#else
350#include <asm-generic/pgtable-nopud.h>
351
352static inline pud_t native_make_pud(pudval_t val)
353{
354 return (pud_t) { .p4d.pgd = native_make_pgd(val) };
355}
356
357static inline pudval_t native_pud_val(pud_t pud)
358{
359 return native_pgd_val(pgd: pud.p4d.pgd);
360}
361#endif
362
363#if CONFIG_PGTABLE_LEVELS > 2
364static inline pmd_t native_make_pmd(pmdval_t val)
365{
366 return (pmd_t) { .pmd = val };
367}
368
369static inline pmdval_t native_pmd_val(pmd_t pmd)
370{
371 return pmd.pmd;
372}
373#else
374#include <asm-generic/pgtable-nopmd.h>
375
376static inline pmd_t native_make_pmd(pmdval_t val)
377{
378 return (pmd_t) { .pud.p4d.pgd = native_make_pgd(val) };
379}
380
381static inline pmdval_t native_pmd_val(pmd_t pmd)
382{
383 return native_pgd_val(pgd: pmd.pud.p4d.pgd);
384}
385#endif
386
387static inline p4dval_t p4d_pfn_mask(p4d_t p4d)
388{
389 /* No 512 GiB huge pages yet */
390 return PTE_PFN_MASK;
391}
392
393static inline p4dval_t p4d_flags_mask(p4d_t p4d)
394{
395 return ~p4d_pfn_mask(p4d);
396}
397
398static inline p4dval_t p4d_flags(p4d_t p4d)
399{
400 return native_p4d_val(p4d) & p4d_flags_mask(p4d);
401}
402
403static inline pudval_t pud_pfn_mask(pud_t pud)
404{
405 if (native_pud_val(pud) & _PAGE_PSE)
406 return PHYSICAL_PUD_PAGE_MASK;
407 else
408 return PTE_PFN_MASK;
409}
410
411static inline pudval_t pud_flags_mask(pud_t pud)
412{
413 return ~pud_pfn_mask(pud);
414}
415
416static inline pudval_t pud_flags(pud_t pud)
417{
418 return native_pud_val(pud) & pud_flags_mask(pud);
419}
420
421static inline pmdval_t pmd_pfn_mask(pmd_t pmd)
422{
423 if (native_pmd_val(pmd) & _PAGE_PSE)
424 return PHYSICAL_PMD_PAGE_MASK;
425 else
426 return PTE_PFN_MASK;
427}
428
429static inline pmdval_t pmd_flags_mask(pmd_t pmd)
430{
431 return ~pmd_pfn_mask(pmd);
432}
433
434static inline pmdval_t pmd_flags(pmd_t pmd)
435{
436 return native_pmd_val(pmd) & pmd_flags_mask(pmd);
437}
438
439static inline pte_t native_make_pte(pteval_t val)
440{
441 return (pte_t) { .pte = val };
442}
443
444static inline pteval_t native_pte_val(pte_t pte)
445{
446 return pte.pte;
447}
448
449static inline pteval_t pte_flags(pte_t pte)
450{
451 return native_pte_val(pte) & PTE_FLAGS_MASK;
452}
453
454#define __pte2cm_idx(cb) \
455 ((((cb) >> (_PAGE_BIT_PAT - 2)) & 4) | \
456 (((cb) >> (_PAGE_BIT_PCD - 1)) & 2) | \
457 (((cb) >> _PAGE_BIT_PWT) & 1))
458#define __cm_idx2pte(i) \
459 ((((i) & 4) << (_PAGE_BIT_PAT - 2)) | \
460 (((i) & 2) << (_PAGE_BIT_PCD - 1)) | \
461 (((i) & 1) << _PAGE_BIT_PWT))
462
463unsigned long cachemode2protval(enum page_cache_mode pcm);
464
465static inline pgprotval_t protval_4k_2_large(pgprotval_t val)
466{
467 return (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
468 ((val & _PAGE_PAT) << (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
469}
470static inline pgprot_t pgprot_4k_2_large(pgprot_t pgprot)
471{
472 return __pgprot(protval_4k_2_large(pgprot_val(pgprot)));
473}
474static inline pgprotval_t protval_large_2_4k(pgprotval_t val)
475{
476 return (val & ~(_PAGE_PAT | _PAGE_PAT_LARGE)) |
477 ((val & _PAGE_PAT_LARGE) >>
478 (_PAGE_BIT_PAT_LARGE - _PAGE_BIT_PAT));
479}
480static inline pgprot_t pgprot_large_2_4k(pgprot_t pgprot)
481{
482 return __pgprot(protval_large_2_4k(pgprot_val(pgprot)));
483}
484
485
486typedef struct page *pgtable_t;
487
488extern pteval_t __supported_pte_mask;
489extern pteval_t __default_kernel_pte_mask;
490extern void set_nx(void);
491extern int nx_enabled;
492
493#define pgprot_writecombine pgprot_writecombine
494extern pgprot_t pgprot_writecombine(pgprot_t prot);
495
496#define pgprot_writethrough pgprot_writethrough
497extern pgprot_t pgprot_writethrough(pgprot_t prot);
498
499/* Indicate that x86 has its own track and untrack pfn vma functions */
500#define __HAVE_PFNMAP_TRACKING
501
502#define __HAVE_PHYS_MEM_ACCESS_PROT
503struct file;
504pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
505 unsigned long size, pgprot_t vma_prot);
506
507/* Install a pte for a particular vaddr in kernel space. */
508void set_pte_vaddr(unsigned long vaddr, pte_t pte);
509
510#ifdef CONFIG_X86_32
511extern void native_pagetable_init(void);
512#else
513#define native_pagetable_init paging_init
514#endif
515
516struct seq_file;
517extern void arch_report_meminfo(struct seq_file *m);
518
519enum pg_level {
520 PG_LEVEL_NONE,
521 PG_LEVEL_4K,
522 PG_LEVEL_2M,
523 PG_LEVEL_1G,
524 PG_LEVEL_512G,
525 PG_LEVEL_NUM
526};
527
528#ifdef CONFIG_PROC_FS
529extern void update_page_count(int level, unsigned long pages);
530#else
531static inline void update_page_count(int level, unsigned long pages) { }
532#endif
533
534/*
535 * Helper function that returns the kernel pagetable entry controlling
536 * the virtual address 'address'. NULL means no pagetable entry present.
537 * NOTE: the return type is pte_t but if the pmd is PSE then we return it
538 * as a pte too.
539 */
540extern pte_t *lookup_address(unsigned long address, unsigned int *level);
541extern pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address,
542 unsigned int *level);
543extern pmd_t *lookup_pmd_address(unsigned long address);
544extern phys_addr_t slow_virt_to_phys(void *__address);
545extern int __init kernel_map_pages_in_pgd(pgd_t *pgd, u64 pfn,
546 unsigned long address,
547 unsigned numpages,
548 unsigned long page_flags);
549extern int __init kernel_unmap_pages_in_pgd(pgd_t *pgd, unsigned long address,
550 unsigned long numpages);
551#endif /* !__ASSEMBLY__ */
552
553#endif /* _ASM_X86_PGTABLE_DEFS_H */
554

source code of linux/arch/x86/include/asm/pgtable_types.h