1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_HUGE_MM_H
3#define _LINUX_HUGE_MM_H
4
5#include <linux/sched/coredump.h>
6#include <linux/mm_types.h>
7
8#include <linux/fs.h> /* only for vma_is_dax() */
9
10vm_fault_t do_huge_pmd_anonymous_page(struct vm_fault *vmf);
11int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
12 pmd_t *dst_pmd, pmd_t *src_pmd, unsigned long addr,
13 struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma);
14void huge_pmd_set_accessed(struct vm_fault *vmf);
15int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm,
16 pud_t *dst_pud, pud_t *src_pud, unsigned long addr,
17 struct vm_area_struct *vma);
18
19#ifdef CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE_PUD
20void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud);
21#else
22static inline void huge_pud_set_accessed(struct vm_fault *vmf, pud_t orig_pud)
23{
24}
25#endif
26
27vm_fault_t do_huge_pmd_wp_page(struct vm_fault *vmf);
28struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
29 unsigned long addr, pmd_t *pmd,
30 unsigned int flags);
31bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
32 pmd_t *pmd, unsigned long addr, unsigned long next);
33int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma, pmd_t *pmd,
34 unsigned long addr);
35int zap_huge_pud(struct mmu_gather *tlb, struct vm_area_struct *vma, pud_t *pud,
36 unsigned long addr);
37bool move_huge_pmd(struct vm_area_struct *vma, unsigned long old_addr,
38 unsigned long new_addr, pmd_t *old_pmd, pmd_t *new_pmd);
39int change_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
40 pmd_t *pmd, unsigned long addr, pgprot_t newprot,
41 unsigned long cp_flags);
42vm_fault_t vmf_insert_pfn_pmd_prot(struct vm_fault *vmf, pfn_t pfn,
43 pgprot_t pgprot, bool write);
44
45/**
46 * vmf_insert_pfn_pmd - insert a pmd size pfn
47 * @vmf: Structure describing the fault
48 * @pfn: pfn to insert
49 * @pgprot: page protection to use
50 * @write: whether it's a write fault
51 *
52 * Insert a pmd size pfn. See vmf_insert_pfn() for additional info.
53 *
54 * Return: vm_fault_t value.
55 */
56static inline vm_fault_t vmf_insert_pfn_pmd(struct vm_fault *vmf, pfn_t pfn,
57 bool write)
58{
59 return vmf_insert_pfn_pmd_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
60}
61vm_fault_t vmf_insert_pfn_pud_prot(struct vm_fault *vmf, pfn_t pfn,
62 pgprot_t pgprot, bool write);
63
64/**
65 * vmf_insert_pfn_pud - insert a pud size pfn
66 * @vmf: Structure describing the fault
67 * @pfn: pfn to insert
68 * @pgprot: page protection to use
69 * @write: whether it's a write fault
70 *
71 * Insert a pud size pfn. See vmf_insert_pfn() for additional info.
72 *
73 * Return: vm_fault_t value.
74 */
75static inline vm_fault_t vmf_insert_pfn_pud(struct vm_fault *vmf, pfn_t pfn,
76 bool write)
77{
78 return vmf_insert_pfn_pud_prot(vmf, pfn, vmf->vma->vm_page_prot, write);
79}
80
81enum transparent_hugepage_flag {
82 TRANSPARENT_HUGEPAGE_NEVER_DAX,
83 TRANSPARENT_HUGEPAGE_FLAG,
84 TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG,
85 TRANSPARENT_HUGEPAGE_DEFRAG_DIRECT_FLAG,
86 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_FLAG,
87 TRANSPARENT_HUGEPAGE_DEFRAG_KSWAPD_OR_MADV_FLAG,
88 TRANSPARENT_HUGEPAGE_DEFRAG_REQ_MADV_FLAG,
89 TRANSPARENT_HUGEPAGE_DEFRAG_KHUGEPAGED_FLAG,
90 TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG,
91};
92
93struct kobject;
94struct kobj_attribute;
95
96ssize_t single_hugepage_flag_store(struct kobject *kobj,
97 struct kobj_attribute *attr,
98 const char *buf, size_t count,
99 enum transparent_hugepage_flag flag);
100ssize_t single_hugepage_flag_show(struct kobject *kobj,
101 struct kobj_attribute *attr, char *buf,
102 enum transparent_hugepage_flag flag);
103extern struct kobj_attribute shmem_enabled_attr;
104
105#define HPAGE_PMD_ORDER (HPAGE_PMD_SHIFT-PAGE_SHIFT)
106#define HPAGE_PMD_NR (1<<HPAGE_PMD_ORDER)
107
108#ifdef CONFIG_TRANSPARENT_HUGEPAGE
109#define HPAGE_PMD_SHIFT PMD_SHIFT
110#define HPAGE_PMD_SIZE ((1UL) << HPAGE_PMD_SHIFT)
111#define HPAGE_PMD_MASK (~(HPAGE_PMD_SIZE - 1))
112
113#define HPAGE_PUD_SHIFT PUD_SHIFT
114#define HPAGE_PUD_SIZE ((1UL) << HPAGE_PUD_SHIFT)
115#define HPAGE_PUD_MASK (~(HPAGE_PUD_SIZE - 1))
116
117extern unsigned long transparent_hugepage_flags;
118
119#define hugepage_flags_enabled() \
120 (transparent_hugepage_flags & \
121 ((1<<TRANSPARENT_HUGEPAGE_FLAG) | \
122 (1<<TRANSPARENT_HUGEPAGE_REQ_MADV_FLAG)))
123#define hugepage_flags_always() \
124 (transparent_hugepage_flags & \
125 (1<<TRANSPARENT_HUGEPAGE_FLAG))
126
127/*
128 * Do the below checks:
129 * - For file vma, check if the linear page offset of vma is
130 * HPAGE_PMD_NR aligned within the file. The hugepage is
131 * guaranteed to be hugepage-aligned within the file, but we must
132 * check that the PMD-aligned addresses in the VMA map to
133 * PMD-aligned offsets within the file, else the hugepage will
134 * not be PMD-mappable.
135 * - For all vmas, check if the haddr is in an aligned HPAGE_PMD_SIZE
136 * area.
137 */
138static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
139 unsigned long addr)
140{
141 unsigned long haddr;
142
143 /* Don't have to check pgoff for anonymous vma */
144 if (!vma_is_anonymous(vma)) {
145 if (!IS_ALIGNED((vma->vm_start >> PAGE_SHIFT) - vma->vm_pgoff,
146 HPAGE_PMD_NR))
147 return false;
148 }
149
150 haddr = addr & HPAGE_PMD_MASK;
151
152 if (haddr < vma->vm_start || haddr + HPAGE_PMD_SIZE > vma->vm_end)
153 return false;
154 return true;
155}
156
157static inline bool file_thp_enabled(struct vm_area_struct *vma)
158{
159 struct inode *inode;
160
161 if (!vma->vm_file)
162 return false;
163
164 inode = vma->vm_file->f_inode;
165
166 return (IS_ENABLED(CONFIG_READ_ONLY_THP_FOR_FS)) &&
167 (vma->vm_flags & VM_EXEC) &&
168 !inode_is_open_for_write(inode) && S_ISREG(inode->i_mode);
169}
170
171bool hugepage_vma_check(struct vm_area_struct *vma,
172 unsigned long vm_flags,
173 bool smaps, bool in_pf);
174
175#define transparent_hugepage_use_zero_page() \
176 (transparent_hugepage_flags & \
177 (1<<TRANSPARENT_HUGEPAGE_USE_ZERO_PAGE_FLAG))
178
179unsigned long thp_get_unmapped_area(struct file *filp, unsigned long addr,
180 unsigned long len, unsigned long pgoff, unsigned long flags);
181
182void prep_transhuge_page(struct page *page);
183void free_transhuge_page(struct page *page);
184
185bool can_split_folio(struct folio *folio, int *pextra_pins);
186int split_huge_page_to_list(struct page *page, struct list_head *list);
187static inline int split_huge_page(struct page *page)
188{
189 return split_huge_page_to_list(page, NULL);
190}
191void deferred_split_huge_page(struct page *page);
192
193void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
194 unsigned long address, bool freeze, struct folio *folio);
195
196#define split_huge_pmd(__vma, __pmd, __address) \
197 do { \
198 pmd_t *____pmd = (__pmd); \
199 if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \
200 || pmd_devmap(*____pmd)) \
201 __split_huge_pmd(__vma, __pmd, __address, \
202 false, NULL); \
203 } while (0)
204
205
206void split_huge_pmd_address(struct vm_area_struct *vma, unsigned long address,
207 bool freeze, struct folio *folio);
208
209void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
210 unsigned long address);
211
212#define split_huge_pud(__vma, __pud, __address) \
213 do { \
214 pud_t *____pud = (__pud); \
215 if (pud_trans_huge(*____pud) \
216 || pud_devmap(*____pud)) \
217 __split_huge_pud(__vma, __pud, __address); \
218 } while (0)
219
220int hugepage_madvise(struct vm_area_struct *vma, unsigned long *vm_flags,
221 int advice);
222void vma_adjust_trans_huge(struct vm_area_struct *vma, unsigned long start,
223 unsigned long end, long adjust_next);
224spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma);
225spinlock_t *__pud_trans_huge_lock(pud_t *pud, struct vm_area_struct *vma);
226
227static inline int is_swap_pmd(pmd_t pmd)
228{
229 return !pmd_none(pmd) && !pmd_present(pmd);
230}
231
232/* mmap_lock must be held on entry */
233static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
234 struct vm_area_struct *vma)
235{
236 if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
237 return __pmd_trans_huge_lock(pmd, vma);
238 else
239 return NULL;
240}
241static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
242 struct vm_area_struct *vma)
243{
244 if (pud_trans_huge(*pud) || pud_devmap(*pud))
245 return __pud_trans_huge_lock(pud, vma);
246 else
247 return NULL;
248}
249
250/**
251 * folio_test_pmd_mappable - Can we map this folio with a PMD?
252 * @folio: The folio to test
253 */
254static inline bool folio_test_pmd_mappable(struct folio *folio)
255{
256 return folio_order(folio) >= HPAGE_PMD_ORDER;
257}
258
259struct page *follow_devmap_pmd(struct vm_area_struct *vma, unsigned long addr,
260 pmd_t *pmd, int flags, struct dev_pagemap **pgmap);
261struct page *follow_devmap_pud(struct vm_area_struct *vma, unsigned long addr,
262 pud_t *pud, int flags, struct dev_pagemap **pgmap);
263
264vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf);
265
266extern struct page *huge_zero_page;
267extern unsigned long huge_zero_pfn;
268
269static inline bool is_huge_zero_page(struct page *page)
270{
271 return READ_ONCE(huge_zero_page) == page;
272}
273
274static inline bool is_huge_zero_pmd(pmd_t pmd)
275{
276 return pmd_present(pmd) && READ_ONCE(huge_zero_pfn) == pmd_pfn(pmd);
277}
278
279static inline bool is_huge_zero_pud(pud_t pud)
280{
281 return false;
282}
283
284struct page *mm_get_huge_zero_page(struct mm_struct *mm);
285void mm_put_huge_zero_page(struct mm_struct *mm);
286
287#define mk_huge_pmd(page, prot) pmd_mkhuge(mk_pmd(page, prot))
288
289static inline bool thp_migration_supported(void)
290{
291 return IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION);
292}
293
294static inline struct list_head *page_deferred_list(struct page *page)
295{
296 /*
297 * See organization of tail pages of compound page in
298 * "struct page" definition.
299 */
300 return &page[2].deferred_list;
301}
302
303#else /* CONFIG_TRANSPARENT_HUGEPAGE */
304#define HPAGE_PMD_SHIFT ({ BUILD_BUG(); 0; })
305#define HPAGE_PMD_MASK ({ BUILD_BUG(); 0; })
306#define HPAGE_PMD_SIZE ({ BUILD_BUG(); 0; })
307
308#define HPAGE_PUD_SHIFT ({ BUILD_BUG(); 0; })
309#define HPAGE_PUD_MASK ({ BUILD_BUG(); 0; })
310#define HPAGE_PUD_SIZE ({ BUILD_BUG(); 0; })
311
312static inline bool folio_test_pmd_mappable(struct folio *folio)
313{
314 return false;
315}
316
317static inline bool transhuge_vma_suitable(struct vm_area_struct *vma,
318 unsigned long addr)
319{
320 return false;
321}
322
323static inline bool hugepage_vma_check(struct vm_area_struct *vma,
324 unsigned long vm_flags,
325 bool smaps, bool in_pf)
326{
327 return false;
328}
329
330static inline void prep_transhuge_page(struct page *page) {}
331
332#define transparent_hugepage_flags 0UL
333
334#define thp_get_unmapped_area NULL
335
336static inline bool
337can_split_folio(struct folio *folio, int *pextra_pins)
338{
339 return false;
340}
341static inline int
342split_huge_page_to_list(struct page *page, struct list_head *list)
343{
344 return 0;
345}
346static inline int split_huge_page(struct page *page)
347{
348 return 0;
349}
350static inline void deferred_split_huge_page(struct page *page) {}
351#define split_huge_pmd(__vma, __pmd, __address) \
352 do { } while (0)
353
354static inline void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
355 unsigned long address, bool freeze, struct folio *folio) {}
356static inline void split_huge_pmd_address(struct vm_area_struct *vma,
357 unsigned long address, bool freeze, struct folio *folio) {}
358
359#define split_huge_pud(__vma, __pmd, __address) \
360 do { } while (0)
361
362static inline int hugepage_madvise(struct vm_area_struct *vma,
363 unsigned long *vm_flags, int advice)
364{
365 BUG();
366 return 0;
367}
368static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
369 unsigned long start,
370 unsigned long end,
371 long adjust_next)
372{
373}
374static inline int is_swap_pmd(pmd_t pmd)
375{
376 return 0;
377}
378static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
379 struct vm_area_struct *vma)
380{
381 return NULL;
382}
383static inline spinlock_t *pud_trans_huge_lock(pud_t *pud,
384 struct vm_area_struct *vma)
385{
386 return NULL;
387}
388
389static inline vm_fault_t do_huge_pmd_numa_page(struct vm_fault *vmf)
390{
391 return 0;
392}
393
394static inline bool is_huge_zero_page(struct page *page)
395{
396 return false;
397}
398
399static inline bool is_huge_zero_pmd(pmd_t pmd)
400{
401 return false;
402}
403
404static inline bool is_huge_zero_pud(pud_t pud)
405{
406 return false;
407}
408
409static inline void mm_put_huge_zero_page(struct mm_struct *mm)
410{
411 return;
412}
413
414static inline struct page *follow_devmap_pmd(struct vm_area_struct *vma,
415 unsigned long addr, pmd_t *pmd, int flags, struct dev_pagemap **pgmap)
416{
417 return NULL;
418}
419
420static inline struct page *follow_devmap_pud(struct vm_area_struct *vma,
421 unsigned long addr, pud_t *pud, int flags, struct dev_pagemap **pgmap)
422{
423 return NULL;
424}
425
426static inline bool thp_migration_supported(void)
427{
428 return false;
429}
430#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
431
432static inline int split_folio_to_list(struct folio *folio,
433 struct list_head *list)
434{
435 return split_huge_page_to_list(&folio->page, list);
436}
437
438/*
439 * archs that select ARCH_WANTS_THP_SWAP but don't support THP_SWP due to
440 * limitations in the implementation like arm64 MTE can override this to
441 * false
442 */
443#ifndef arch_thp_swp_supported
444static inline bool arch_thp_swp_supported(void)
445{
446 return true;
447}
448#endif
449
450#endif /* _LINUX_HUGE_MM_H */
451

source code of linux/include/linux/huge_mm.h