1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _LINUX_HUGETLB_H |
3 | #define _LINUX_HUGETLB_H |
4 | |
5 | #include <linux/mm.h> |
6 | #include <linux/mm_types.h> |
7 | #include <linux/mmdebug.h> |
8 | #include <linux/fs.h> |
9 | #include <linux/hugetlb_inline.h> |
10 | #include <linux/cgroup.h> |
11 | #include <linux/page_ref.h> |
12 | #include <linux/list.h> |
13 | #include <linux/kref.h> |
14 | #include <linux/pgtable.h> |
15 | #include <linux/gfp.h> |
16 | #include <linux/userfaultfd_k.h> |
17 | |
18 | struct ctl_table; |
19 | struct user_struct; |
20 | struct mmu_gather; |
21 | struct node; |
22 | |
23 | #ifndef CONFIG_ARCH_HAS_HUGEPD |
24 | typedef struct { unsigned long pd; } hugepd_t; |
25 | #define is_hugepd(hugepd) (0) |
26 | #define __hugepd(x) ((hugepd_t) { (x) }) |
27 | #endif |
28 | |
29 | void free_huge_folio(struct folio *folio); |
30 | |
31 | #ifdef CONFIG_HUGETLB_PAGE |
32 | |
33 | #include <linux/pagemap.h> |
34 | #include <linux/shm.h> |
35 | #include <asm/tlbflush.h> |
36 | |
37 | /* |
38 | * For HugeTLB page, there are more metadata to save in the struct page. But |
39 | * the head struct page cannot meet our needs, so we have to abuse other tail |
40 | * struct page to store the metadata. |
41 | */ |
42 | #define __NR_USED_SUBPAGE 3 |
43 | |
44 | struct hugepage_subpool { |
45 | spinlock_t lock; |
46 | long count; |
47 | long max_hpages; /* Maximum huge pages or -1 if no maximum. */ |
48 | long used_hpages; /* Used count against maximum, includes */ |
49 | /* both allocated and reserved pages. */ |
50 | struct hstate *hstate; |
51 | long min_hpages; /* Minimum huge pages or -1 if no minimum. */ |
52 | long rsv_hpages; /* Pages reserved against global pool to */ |
53 | /* satisfy minimum size. */ |
54 | }; |
55 | |
56 | struct resv_map { |
57 | struct kref refs; |
58 | spinlock_t lock; |
59 | struct list_head regions; |
60 | long adds_in_progress; |
61 | struct list_head region_cache; |
62 | long region_cache_count; |
63 | struct rw_semaphore rw_sema; |
64 | #ifdef CONFIG_CGROUP_HUGETLB |
65 | /* |
66 | * On private mappings, the counter to uncharge reservations is stored |
67 | * here. If these fields are 0, then either the mapping is shared, or |
68 | * cgroup accounting is disabled for this resv_map. |
69 | */ |
70 | struct page_counter *reservation_counter; |
71 | unsigned long pages_per_hpage; |
72 | struct cgroup_subsys_state *css; |
73 | #endif |
74 | }; |
75 | |
76 | /* |
77 | * Region tracking -- allows tracking of reservations and instantiated pages |
78 | * across the pages in a mapping. |
79 | * |
80 | * The region data structures are embedded into a resv_map and protected |
81 | * by a resv_map's lock. The set of regions within the resv_map represent |
82 | * reservations for huge pages, or huge pages that have already been |
83 | * instantiated within the map. The from and to elements are huge page |
84 | * indices into the associated mapping. from indicates the starting index |
85 | * of the region. to represents the first index past the end of the region. |
86 | * |
87 | * For example, a file region structure with from == 0 and to == 4 represents |
88 | * four huge pages in a mapping. It is important to note that the to element |
89 | * represents the first element past the end of the region. This is used in |
90 | * arithmetic as 4(to) - 0(from) = 4 huge pages in the region. |
91 | * |
92 | * Interval notation of the form [from, to) will be used to indicate that |
93 | * the endpoint from is inclusive and to is exclusive. |
94 | */ |
95 | struct file_region { |
96 | struct list_head link; |
97 | long from; |
98 | long to; |
99 | #ifdef CONFIG_CGROUP_HUGETLB |
100 | /* |
101 | * On shared mappings, each reserved region appears as a struct |
102 | * file_region in resv_map. These fields hold the info needed to |
103 | * uncharge each reservation. |
104 | */ |
105 | struct page_counter *reservation_counter; |
106 | struct cgroup_subsys_state *css; |
107 | #endif |
108 | }; |
109 | |
110 | struct hugetlb_vma_lock { |
111 | struct kref refs; |
112 | struct rw_semaphore rw_sema; |
113 | struct vm_area_struct *vma; |
114 | }; |
115 | |
116 | extern struct resv_map *resv_map_alloc(void); |
117 | void resv_map_release(struct kref *ref); |
118 | |
119 | extern spinlock_t hugetlb_lock; |
120 | extern int hugetlb_max_hstate __read_mostly; |
121 | #define for_each_hstate(h) \ |
122 | for ((h) = hstates; (h) < &hstates[hugetlb_max_hstate]; (h)++) |
123 | |
124 | struct hugepage_subpool *hugepage_new_subpool(struct hstate *h, long max_hpages, |
125 | long min_hpages); |
126 | void hugepage_put_subpool(struct hugepage_subpool *spool); |
127 | |
128 | void hugetlb_dup_vma_private(struct vm_area_struct *vma); |
129 | void clear_vma_resv_huge_pages(struct vm_area_struct *vma); |
130 | int move_hugetlb_page_tables(struct vm_area_struct *vma, |
131 | struct vm_area_struct *new_vma, |
132 | unsigned long old_addr, unsigned long new_addr, |
133 | unsigned long len); |
134 | int copy_hugetlb_page_range(struct mm_struct *, struct mm_struct *, |
135 | struct vm_area_struct *, struct vm_area_struct *); |
136 | struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma, |
137 | unsigned long address, unsigned int flags, |
138 | unsigned int *page_mask); |
139 | void unmap_hugepage_range(struct vm_area_struct *, |
140 | unsigned long, unsigned long, struct page *, |
141 | zap_flags_t); |
142 | void __unmap_hugepage_range(struct mmu_gather *tlb, |
143 | struct vm_area_struct *vma, |
144 | unsigned long start, unsigned long end, |
145 | struct page *ref_page, zap_flags_t zap_flags); |
146 | void hugetlb_report_meminfo(struct seq_file *); |
147 | int hugetlb_report_node_meminfo(char *buf, int len, int nid); |
148 | void hugetlb_show_meminfo_node(int nid); |
149 | unsigned long hugetlb_total_pages(void); |
150 | vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma, |
151 | unsigned long address, unsigned int flags); |
152 | #ifdef CONFIG_USERFAULTFD |
153 | int hugetlb_mfill_atomic_pte(pte_t *dst_pte, |
154 | struct vm_area_struct *dst_vma, |
155 | unsigned long dst_addr, |
156 | unsigned long src_addr, |
157 | uffd_flags_t flags, |
158 | struct folio **foliop); |
159 | #endif /* CONFIG_USERFAULTFD */ |
160 | bool hugetlb_reserve_pages(struct inode *inode, long from, long to, |
161 | struct vm_area_struct *vma, |
162 | vm_flags_t vm_flags); |
163 | long hugetlb_unreserve_pages(struct inode *inode, long start, long end, |
164 | long freed); |
165 | bool isolate_hugetlb(struct folio *folio, struct list_head *list); |
166 | int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison); |
167 | int get_huge_page_for_hwpoison(unsigned long pfn, int flags, |
168 | bool *migratable_cleared); |
169 | void folio_putback_active_hugetlb(struct folio *folio); |
170 | void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int reason); |
171 | void hugetlb_fix_reserve_counts(struct inode *inode); |
172 | extern struct mutex *hugetlb_fault_mutex_table; |
173 | u32 hugetlb_fault_mutex_hash(struct address_space *mapping, pgoff_t idx); |
174 | |
175 | pte_t *huge_pmd_share(struct mm_struct *mm, struct vm_area_struct *vma, |
176 | unsigned long addr, pud_t *pud); |
177 | |
178 | struct address_space *hugetlb_page_mapping_lock_write(struct page *hpage); |
179 | |
180 | extern int sysctl_hugetlb_shm_group; |
181 | extern struct list_head huge_boot_pages[MAX_NUMNODES]; |
182 | |
183 | /* arch callbacks */ |
184 | |
185 | #ifndef CONFIG_HIGHPTE |
186 | /* |
187 | * pte_offset_huge() and pte_alloc_huge() are helpers for those architectures |
188 | * which may go down to the lowest PTE level in their huge_pte_offset() and |
189 | * huge_pte_alloc(): to avoid reliance on pte_offset_map() without pte_unmap(). |
190 | */ |
191 | static inline pte_t *pte_offset_huge(pmd_t *pmd, unsigned long address) |
192 | { |
193 | return pte_offset_kernel(pmd, address); |
194 | } |
195 | static inline pte_t *pte_alloc_huge(struct mm_struct *mm, pmd_t *pmd, |
196 | unsigned long address) |
197 | { |
198 | return pte_alloc(mm, pmd) ? NULL : pte_offset_huge(pmd, address); |
199 | } |
200 | #endif |
201 | |
202 | pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma, |
203 | unsigned long addr, unsigned long sz); |
204 | /* |
205 | * huge_pte_offset(): Walk the hugetlb pgtable until the last level PTE. |
206 | * Returns the pte_t* if found, or NULL if the address is not mapped. |
207 | * |
208 | * IMPORTANT: we should normally not directly call this function, instead |
209 | * this is only a common interface to implement arch-specific |
210 | * walker. Please use hugetlb_walk() instead, because that will attempt to |
211 | * verify the locking for you. |
212 | * |
213 | * Since this function will walk all the pgtable pages (including not only |
214 | * high-level pgtable page, but also PUD entry that can be unshared |
215 | * concurrently for VM_SHARED), the caller of this function should be |
216 | * responsible of its thread safety. One can follow this rule: |
217 | * |
218 | * (1) For private mappings: pmd unsharing is not possible, so holding the |
219 | * mmap_lock for either read or write is sufficient. Most callers |
220 | * already hold the mmap_lock, so normally, no special action is |
221 | * required. |
222 | * |
223 | * (2) For shared mappings: pmd unsharing is possible (so the PUD-ranged |
224 | * pgtable page can go away from under us! It can be done by a pmd |
225 | * unshare with a follow up munmap() on the other process), then we |
226 | * need either: |
227 | * |
228 | * (2.1) hugetlb vma lock read or write held, to make sure pmd unshare |
229 | * won't happen upon the range (it also makes sure the pte_t we |
230 | * read is the right and stable one), or, |
231 | * |
232 | * (2.2) hugetlb mapping i_mmap_rwsem lock held read or write, to make |
233 | * sure even if unshare happened the racy unmap() will wait until |
234 | * i_mmap_rwsem is released. |
235 | * |
236 | * Option (2.1) is the safest, which guarantees pte stability from pmd |
237 | * sharing pov, until the vma lock released. Option (2.2) doesn't protect |
238 | * a concurrent pmd unshare, but it makes sure the pgtable page is safe to |
239 | * access. |
240 | */ |
241 | pte_t *huge_pte_offset(struct mm_struct *mm, |
242 | unsigned long addr, unsigned long sz); |
243 | unsigned long hugetlb_mask_last_page(struct hstate *h); |
244 | int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma, |
245 | unsigned long addr, pte_t *ptep); |
246 | void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma, |
247 | unsigned long *start, unsigned long *end); |
248 | |
249 | extern void __hugetlb_zap_begin(struct vm_area_struct *vma, |
250 | unsigned long *begin, unsigned long *end); |
251 | extern void __hugetlb_zap_end(struct vm_area_struct *vma, |
252 | struct zap_details *details); |
253 | |
254 | static inline void hugetlb_zap_begin(struct vm_area_struct *vma, |
255 | unsigned long *start, unsigned long *end) |
256 | { |
257 | if (is_vm_hugetlb_page(vma)) |
258 | __hugetlb_zap_begin(vma, begin: start, end); |
259 | } |
260 | |
261 | static inline void hugetlb_zap_end(struct vm_area_struct *vma, |
262 | struct zap_details *details) |
263 | { |
264 | if (is_vm_hugetlb_page(vma)) |
265 | __hugetlb_zap_end(vma, details); |
266 | } |
267 | |
268 | void hugetlb_vma_lock_read(struct vm_area_struct *vma); |
269 | void hugetlb_vma_unlock_read(struct vm_area_struct *vma); |
270 | void hugetlb_vma_lock_write(struct vm_area_struct *vma); |
271 | void hugetlb_vma_unlock_write(struct vm_area_struct *vma); |
272 | int hugetlb_vma_trylock_write(struct vm_area_struct *vma); |
273 | void hugetlb_vma_assert_locked(struct vm_area_struct *vma); |
274 | void hugetlb_vma_lock_release(struct kref *kref); |
275 | |
276 | int pmd_huge(pmd_t pmd); |
277 | int pud_huge(pud_t pud); |
278 | long hugetlb_change_protection(struct vm_area_struct *vma, |
279 | unsigned long address, unsigned long end, pgprot_t newprot, |
280 | unsigned long cp_flags); |
281 | |
282 | bool is_hugetlb_entry_migration(pte_t pte); |
283 | bool is_hugetlb_entry_hwpoisoned(pte_t pte); |
284 | void hugetlb_unshare_all_pmds(struct vm_area_struct *vma); |
285 | |
286 | #else /* !CONFIG_HUGETLB_PAGE */ |
287 | |
288 | static inline void hugetlb_dup_vma_private(struct vm_area_struct *vma) |
289 | { |
290 | } |
291 | |
292 | static inline void clear_vma_resv_huge_pages(struct vm_area_struct *vma) |
293 | { |
294 | } |
295 | |
296 | static inline unsigned long hugetlb_total_pages(void) |
297 | { |
298 | return 0; |
299 | } |
300 | |
301 | static inline struct address_space *hugetlb_page_mapping_lock_write( |
302 | struct page *hpage) |
303 | { |
304 | return NULL; |
305 | } |
306 | |
307 | static inline int huge_pmd_unshare(struct mm_struct *mm, |
308 | struct vm_area_struct *vma, |
309 | unsigned long addr, pte_t *ptep) |
310 | { |
311 | return 0; |
312 | } |
313 | |
314 | static inline void adjust_range_if_pmd_sharing_possible( |
315 | struct vm_area_struct *vma, |
316 | unsigned long *start, unsigned long *end) |
317 | { |
318 | } |
319 | |
320 | static inline void hugetlb_zap_begin( |
321 | struct vm_area_struct *vma, |
322 | unsigned long *start, unsigned long *end) |
323 | { |
324 | } |
325 | |
326 | static inline void hugetlb_zap_end( |
327 | struct vm_area_struct *vma, |
328 | struct zap_details *details) |
329 | { |
330 | } |
331 | |
332 | static inline struct page *hugetlb_follow_page_mask( |
333 | struct vm_area_struct *vma, unsigned long address, unsigned int flags, |
334 | unsigned int *page_mask) |
335 | { |
336 | BUILD_BUG(); /* should never be compiled in if !CONFIG_HUGETLB_PAGE*/ |
337 | } |
338 | |
339 | static inline int copy_hugetlb_page_range(struct mm_struct *dst, |
340 | struct mm_struct *src, |
341 | struct vm_area_struct *dst_vma, |
342 | struct vm_area_struct *src_vma) |
343 | { |
344 | BUG(); |
345 | return 0; |
346 | } |
347 | |
348 | static inline int move_hugetlb_page_tables(struct vm_area_struct *vma, |
349 | struct vm_area_struct *new_vma, |
350 | unsigned long old_addr, |
351 | unsigned long new_addr, |
352 | unsigned long len) |
353 | { |
354 | BUG(); |
355 | return 0; |
356 | } |
357 | |
358 | static inline void hugetlb_report_meminfo(struct seq_file *m) |
359 | { |
360 | } |
361 | |
362 | static inline int hugetlb_report_node_meminfo(char *buf, int len, int nid) |
363 | { |
364 | return 0; |
365 | } |
366 | |
367 | static inline void hugetlb_show_meminfo_node(int nid) |
368 | { |
369 | } |
370 | |
371 | static inline int prepare_hugepage_range(struct file *file, |
372 | unsigned long addr, unsigned long len) |
373 | { |
374 | return -EINVAL; |
375 | } |
376 | |
377 | static inline void hugetlb_vma_lock_read(struct vm_area_struct *vma) |
378 | { |
379 | } |
380 | |
381 | static inline void hugetlb_vma_unlock_read(struct vm_area_struct *vma) |
382 | { |
383 | } |
384 | |
385 | static inline void hugetlb_vma_lock_write(struct vm_area_struct *vma) |
386 | { |
387 | } |
388 | |
389 | static inline void hugetlb_vma_unlock_write(struct vm_area_struct *vma) |
390 | { |
391 | } |
392 | |
393 | static inline int hugetlb_vma_trylock_write(struct vm_area_struct *vma) |
394 | { |
395 | return 1; |
396 | } |
397 | |
398 | static inline void hugetlb_vma_assert_locked(struct vm_area_struct *vma) |
399 | { |
400 | } |
401 | |
402 | static inline int pmd_huge(pmd_t pmd) |
403 | { |
404 | return 0; |
405 | } |
406 | |
407 | static inline int pud_huge(pud_t pud) |
408 | { |
409 | return 0; |
410 | } |
411 | |
412 | static inline int is_hugepage_only_range(struct mm_struct *mm, |
413 | unsigned long addr, unsigned long len) |
414 | { |
415 | return 0; |
416 | } |
417 | |
418 | static inline void hugetlb_free_pgd_range(struct mmu_gather *tlb, |
419 | unsigned long addr, unsigned long end, |
420 | unsigned long floor, unsigned long ceiling) |
421 | { |
422 | BUG(); |
423 | } |
424 | |
425 | #ifdef CONFIG_USERFAULTFD |
426 | static inline int hugetlb_mfill_atomic_pte(pte_t *dst_pte, |
427 | struct vm_area_struct *dst_vma, |
428 | unsigned long dst_addr, |
429 | unsigned long src_addr, |
430 | uffd_flags_t flags, |
431 | struct folio **foliop) |
432 | { |
433 | BUG(); |
434 | return 0; |
435 | } |
436 | #endif /* CONFIG_USERFAULTFD */ |
437 | |
438 | static inline pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr, |
439 | unsigned long sz) |
440 | { |
441 | return NULL; |
442 | } |
443 | |
444 | static inline bool isolate_hugetlb(struct folio *folio, struct list_head *list) |
445 | { |
446 | return false; |
447 | } |
448 | |
449 | static inline int get_hwpoison_hugetlb_folio(struct folio *folio, bool *hugetlb, bool unpoison) |
450 | { |
451 | return 0; |
452 | } |
453 | |
454 | static inline int get_huge_page_for_hwpoison(unsigned long pfn, int flags, |
455 | bool *migratable_cleared) |
456 | { |
457 | return 0; |
458 | } |
459 | |
460 | static inline void folio_putback_active_hugetlb(struct folio *folio) |
461 | { |
462 | } |
463 | |
464 | static inline void move_hugetlb_state(struct folio *old_folio, |
465 | struct folio *new_folio, int reason) |
466 | { |
467 | } |
468 | |
469 | static inline long hugetlb_change_protection( |
470 | struct vm_area_struct *vma, unsigned long address, |
471 | unsigned long end, pgprot_t newprot, |
472 | unsigned long cp_flags) |
473 | { |
474 | return 0; |
475 | } |
476 | |
477 | static inline void __unmap_hugepage_range(struct mmu_gather *tlb, |
478 | struct vm_area_struct *vma, unsigned long start, |
479 | unsigned long end, struct page *ref_page, |
480 | zap_flags_t zap_flags) |
481 | { |
482 | BUG(); |
483 | } |
484 | |
485 | static inline vm_fault_t hugetlb_fault(struct mm_struct *mm, |
486 | struct vm_area_struct *vma, unsigned long address, |
487 | unsigned int flags) |
488 | { |
489 | BUG(); |
490 | return 0; |
491 | } |
492 | |
493 | static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { } |
494 | |
495 | #endif /* !CONFIG_HUGETLB_PAGE */ |
496 | /* |
497 | * hugepages at page global directory. If arch support |
498 | * hugepages at pgd level, they need to define this. |
499 | */ |
500 | #ifndef pgd_huge |
501 | #define pgd_huge(x) 0 |
502 | #endif |
503 | #ifndef p4d_huge |
504 | #define p4d_huge(x) 0 |
505 | #endif |
506 | |
507 | #ifndef pgd_write |
508 | static inline int pgd_write(pgd_t pgd) |
509 | { |
510 | BUG(); |
511 | return 0; |
512 | } |
513 | #endif |
514 | |
515 | #define HUGETLB_ANON_FILE "anon_hugepage" |
516 | |
517 | enum { |
518 | /* |
519 | * The file will be used as an shm file so shmfs accounting rules |
520 | * apply |
521 | */ |
522 | HUGETLB_SHMFS_INODE = 1, |
523 | /* |
524 | * The file is being created on the internal vfs mount and shmfs |
525 | * accounting rules do not apply |
526 | */ |
527 | HUGETLB_ANONHUGE_INODE = 2, |
528 | }; |
529 | |
530 | #ifdef CONFIG_HUGETLBFS |
531 | struct hugetlbfs_sb_info { |
532 | long max_inodes; /* inodes allowed */ |
533 | long free_inodes; /* inodes free */ |
534 | spinlock_t stat_lock; |
535 | struct hstate *hstate; |
536 | struct hugepage_subpool *spool; |
537 | kuid_t uid; |
538 | kgid_t gid; |
539 | umode_t mode; |
540 | }; |
541 | |
542 | static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb) |
543 | { |
544 | return sb->s_fs_info; |
545 | } |
546 | |
547 | struct hugetlbfs_inode_info { |
548 | struct inode vfs_inode; |
549 | unsigned int seals; |
550 | }; |
551 | |
552 | static inline struct hugetlbfs_inode_info *HUGETLBFS_I(struct inode *inode) |
553 | { |
554 | return container_of(inode, struct hugetlbfs_inode_info, vfs_inode); |
555 | } |
556 | |
557 | extern const struct file_operations hugetlbfs_file_operations; |
558 | extern const struct vm_operations_struct hugetlb_vm_ops; |
559 | struct file *hugetlb_file_setup(const char *name, size_t size, vm_flags_t acct, |
560 | int creat_flags, int page_size_log); |
561 | |
562 | static inline bool is_file_hugepages(struct file *file) |
563 | { |
564 | if (file->f_op == &hugetlbfs_file_operations) |
565 | return true; |
566 | |
567 | return is_file_shm_hugepages(file); |
568 | } |
569 | |
570 | static inline struct hstate *hstate_inode(struct inode *i) |
571 | { |
572 | return HUGETLBFS_SB(sb: i->i_sb)->hstate; |
573 | } |
574 | #else /* !CONFIG_HUGETLBFS */ |
575 | |
576 | #define is_file_hugepages(file) false |
577 | static inline struct file * |
578 | hugetlb_file_setup(const char *name, size_t size, vm_flags_t acctflag, |
579 | int creat_flags, int page_size_log) |
580 | { |
581 | return ERR_PTR(-ENOSYS); |
582 | } |
583 | |
584 | static inline struct hstate *hstate_inode(struct inode *i) |
585 | { |
586 | return NULL; |
587 | } |
588 | #endif /* !CONFIG_HUGETLBFS */ |
589 | |
590 | #ifdef HAVE_ARCH_HUGETLB_UNMAPPED_AREA |
591 | unsigned long hugetlb_get_unmapped_area(struct file *file, unsigned long addr, |
592 | unsigned long len, unsigned long pgoff, |
593 | unsigned long flags); |
594 | #endif /* HAVE_ARCH_HUGETLB_UNMAPPED_AREA */ |
595 | |
596 | unsigned long |
597 | generic_hugetlb_get_unmapped_area(struct file *file, unsigned long addr, |
598 | unsigned long len, unsigned long pgoff, |
599 | unsigned long flags); |
600 | |
601 | /* |
602 | * huegtlb page specific state flags. These flags are located in page.private |
603 | * of the hugetlb head page. Functions created via the below macros should be |
604 | * used to manipulate these flags. |
605 | * |
606 | * HPG_restore_reserve - Set when a hugetlb page consumes a reservation at |
607 | * allocation time. Cleared when page is fully instantiated. Free |
608 | * routine checks flag to restore a reservation on error paths. |
609 | * Synchronization: Examined or modified by code that knows it has |
610 | * the only reference to page. i.e. After allocation but before use |
611 | * or when the page is being freed. |
612 | * HPG_migratable - Set after a newly allocated page is added to the page |
613 | * cache and/or page tables. Indicates the page is a candidate for |
614 | * migration. |
615 | * Synchronization: Initially set after new page allocation with no |
616 | * locking. When examined and modified during migration processing |
617 | * (isolate, migrate, putback) the hugetlb_lock is held. |
618 | * HPG_temporary - Set on a page that is temporarily allocated from the buddy |
619 | * allocator. Typically used for migration target pages when no pages |
620 | * are available in the pool. The hugetlb free page path will |
621 | * immediately free pages with this flag set to the buddy allocator. |
622 | * Synchronization: Can be set after huge page allocation from buddy when |
623 | * code knows it has only reference. All other examinations and |
624 | * modifications require hugetlb_lock. |
625 | * HPG_freed - Set when page is on the free lists. |
626 | * Synchronization: hugetlb_lock held for examination and modification. |
627 | * HPG_vmemmap_optimized - Set when the vmemmap pages of the page are freed. |
628 | * HPG_raw_hwp_unreliable - Set when the hugetlb page has a hwpoison sub-page |
629 | * that is not tracked by raw_hwp_page list. |
630 | */ |
631 | enum hugetlb_page_flags { |
632 | HPG_restore_reserve = 0, |
633 | HPG_migratable, |
634 | HPG_temporary, |
635 | HPG_freed, |
636 | HPG_vmemmap_optimized, |
637 | HPG_raw_hwp_unreliable, |
638 | __NR_HPAGEFLAGS, |
639 | }; |
640 | |
641 | /* |
642 | * Macros to create test, set and clear function definitions for |
643 | * hugetlb specific page flags. |
644 | */ |
645 | #ifdef CONFIG_HUGETLB_PAGE |
646 | #define TESTHPAGEFLAG(uname, flname) \ |
647 | static __always_inline \ |
648 | bool folio_test_hugetlb_##flname(struct folio *folio) \ |
649 | { void *private = &folio->private; \ |
650 | return test_bit(HPG_##flname, private); \ |
651 | } \ |
652 | static inline int HPage##uname(struct page *page) \ |
653 | { return test_bit(HPG_##flname, &(page->private)); } |
654 | |
655 | #define SETHPAGEFLAG(uname, flname) \ |
656 | static __always_inline \ |
657 | void folio_set_hugetlb_##flname(struct folio *folio) \ |
658 | { void *private = &folio->private; \ |
659 | set_bit(HPG_##flname, private); \ |
660 | } \ |
661 | static inline void SetHPage##uname(struct page *page) \ |
662 | { set_bit(HPG_##flname, &(page->private)); } |
663 | |
664 | #define CLEARHPAGEFLAG(uname, flname) \ |
665 | static __always_inline \ |
666 | void folio_clear_hugetlb_##flname(struct folio *folio) \ |
667 | { void *private = &folio->private; \ |
668 | clear_bit(HPG_##flname, private); \ |
669 | } \ |
670 | static inline void ClearHPage##uname(struct page *page) \ |
671 | { clear_bit(HPG_##flname, &(page->private)); } |
672 | #else |
673 | #define TESTHPAGEFLAG(uname, flname) \ |
674 | static inline bool \ |
675 | folio_test_hugetlb_##flname(struct folio *folio) \ |
676 | { return 0; } \ |
677 | static inline int HPage##uname(struct page *page) \ |
678 | { return 0; } |
679 | |
680 | #define SETHPAGEFLAG(uname, flname) \ |
681 | static inline void \ |
682 | folio_set_hugetlb_##flname(struct folio *folio) \ |
683 | { } \ |
684 | static inline void SetHPage##uname(struct page *page) \ |
685 | { } |
686 | |
687 | #define CLEARHPAGEFLAG(uname, flname) \ |
688 | static inline void \ |
689 | folio_clear_hugetlb_##flname(struct folio *folio) \ |
690 | { } \ |
691 | static inline void ClearHPage##uname(struct page *page) \ |
692 | { } |
693 | #endif |
694 | |
695 | #define HPAGEFLAG(uname, flname) \ |
696 | TESTHPAGEFLAG(uname, flname) \ |
697 | SETHPAGEFLAG(uname, flname) \ |
698 | CLEARHPAGEFLAG(uname, flname) \ |
699 | |
700 | /* |
701 | * Create functions associated with hugetlb page flags |
702 | */ |
703 | HPAGEFLAG(RestoreReserve, restore_reserve) |
704 | HPAGEFLAG(Migratable, migratable) |
705 | HPAGEFLAG(Temporary, temporary) |
706 | HPAGEFLAG(Freed, freed) |
707 | HPAGEFLAG(VmemmapOptimized, vmemmap_optimized) |
708 | HPAGEFLAG(RawHwpUnreliable, raw_hwp_unreliable) |
709 | |
710 | #ifdef CONFIG_HUGETLB_PAGE |
711 | |
712 | #define HSTATE_NAME_LEN 32 |
713 | /* Defines one hugetlb page size */ |
714 | struct hstate { |
715 | struct mutex resize_lock; |
716 | int next_nid_to_alloc; |
717 | int next_nid_to_free; |
718 | unsigned int order; |
719 | unsigned int demote_order; |
720 | unsigned long mask; |
721 | unsigned long max_huge_pages; |
722 | unsigned long nr_huge_pages; |
723 | unsigned long free_huge_pages; |
724 | unsigned long resv_huge_pages; |
725 | unsigned long surplus_huge_pages; |
726 | unsigned long nr_overcommit_huge_pages; |
727 | struct list_head hugepage_activelist; |
728 | struct list_head hugepage_freelists[MAX_NUMNODES]; |
729 | unsigned int max_huge_pages_node[MAX_NUMNODES]; |
730 | unsigned int nr_huge_pages_node[MAX_NUMNODES]; |
731 | unsigned int free_huge_pages_node[MAX_NUMNODES]; |
732 | unsigned int surplus_huge_pages_node[MAX_NUMNODES]; |
733 | #ifdef CONFIG_CGROUP_HUGETLB |
734 | /* cgroup control files */ |
735 | struct cftype cgroup_files_dfl[8]; |
736 | struct cftype cgroup_files_legacy[10]; |
737 | #endif |
738 | char name[HSTATE_NAME_LEN]; |
739 | }; |
740 | |
741 | struct huge_bootmem_page { |
742 | struct list_head list; |
743 | struct hstate *hstate; |
744 | }; |
745 | |
746 | int isolate_or_dissolve_huge_page(struct page *page, struct list_head *list); |
747 | struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, |
748 | unsigned long addr, int avoid_reserve); |
749 | struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, |
750 | nodemask_t *nmask, gfp_t gfp_mask); |
751 | int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping, |
752 | pgoff_t idx); |
753 | void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma, |
754 | unsigned long address, struct folio *folio); |
755 | |
756 | /* arch callback */ |
757 | int __init __alloc_bootmem_huge_page(struct hstate *h, int nid); |
758 | int __init alloc_bootmem_huge_page(struct hstate *h, int nid); |
759 | bool __init hugetlb_node_alloc_supported(void); |
760 | |
761 | void __init hugetlb_add_hstate(unsigned order); |
762 | bool __init arch_hugetlb_valid_size(unsigned long size); |
763 | struct hstate *size_to_hstate(unsigned long size); |
764 | |
765 | #ifndef HUGE_MAX_HSTATE |
766 | #define HUGE_MAX_HSTATE 1 |
767 | #endif |
768 | |
769 | extern struct hstate hstates[HUGE_MAX_HSTATE]; |
770 | extern unsigned int default_hstate_idx; |
771 | |
772 | #define default_hstate (hstates[default_hstate_idx]) |
773 | |
774 | static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio) |
775 | { |
776 | return folio->_hugetlb_subpool; |
777 | } |
778 | |
779 | static inline void hugetlb_set_folio_subpool(struct folio *folio, |
780 | struct hugepage_subpool *subpool) |
781 | { |
782 | folio->_hugetlb_subpool = subpool; |
783 | } |
784 | |
785 | static inline struct hstate *hstate_file(struct file *f) |
786 | { |
787 | return hstate_inode(i: file_inode(f)); |
788 | } |
789 | |
790 | static inline struct hstate *hstate_sizelog(int page_size_log) |
791 | { |
792 | if (!page_size_log) |
793 | return &default_hstate; |
794 | |
795 | if (page_size_log < BITS_PER_LONG) |
796 | return size_to_hstate(size: 1UL << page_size_log); |
797 | |
798 | return NULL; |
799 | } |
800 | |
801 | static inline struct hstate *hstate_vma(struct vm_area_struct *vma) |
802 | { |
803 | return hstate_file(f: vma->vm_file); |
804 | } |
805 | |
806 | static inline unsigned long huge_page_size(const struct hstate *h) |
807 | { |
808 | return (unsigned long)PAGE_SIZE << h->order; |
809 | } |
810 | |
811 | extern unsigned long vma_kernel_pagesize(struct vm_area_struct *vma); |
812 | |
813 | extern unsigned long vma_mmu_pagesize(struct vm_area_struct *vma); |
814 | |
815 | static inline unsigned long huge_page_mask(struct hstate *h) |
816 | { |
817 | return h->mask; |
818 | } |
819 | |
820 | static inline unsigned int huge_page_order(struct hstate *h) |
821 | { |
822 | return h->order; |
823 | } |
824 | |
825 | static inline unsigned huge_page_shift(struct hstate *h) |
826 | { |
827 | return h->order + PAGE_SHIFT; |
828 | } |
829 | |
830 | static inline bool hstate_is_gigantic(struct hstate *h) |
831 | { |
832 | return huge_page_order(h) > MAX_PAGE_ORDER; |
833 | } |
834 | |
835 | static inline unsigned int pages_per_huge_page(const struct hstate *h) |
836 | { |
837 | return 1 << h->order; |
838 | } |
839 | |
840 | static inline unsigned int blocks_per_huge_page(struct hstate *h) |
841 | { |
842 | return huge_page_size(h) / 512; |
843 | } |
844 | |
845 | static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h, |
846 | struct address_space *mapping, pgoff_t idx) |
847 | { |
848 | return filemap_lock_folio(mapping, index: idx << huge_page_order(h)); |
849 | } |
850 | |
851 | #include <asm/hugetlb.h> |
852 | |
853 | #ifndef is_hugepage_only_range |
854 | static inline int is_hugepage_only_range(struct mm_struct *mm, |
855 | unsigned long addr, unsigned long len) |
856 | { |
857 | return 0; |
858 | } |
859 | #define is_hugepage_only_range is_hugepage_only_range |
860 | #endif |
861 | |
862 | #ifndef arch_clear_hugepage_flags |
863 | static inline void arch_clear_hugepage_flags(struct page *page) { } |
864 | #define arch_clear_hugepage_flags arch_clear_hugepage_flags |
865 | #endif |
866 | |
867 | #ifndef arch_make_huge_pte |
868 | static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, |
869 | vm_flags_t flags) |
870 | { |
871 | return pte_mkhuge(pte: entry); |
872 | } |
873 | #endif |
874 | |
875 | static inline struct hstate *folio_hstate(struct folio *folio) |
876 | { |
877 | VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio); |
878 | return size_to_hstate(size: folio_size(folio)); |
879 | } |
880 | |
881 | static inline unsigned hstate_index_to_shift(unsigned index) |
882 | { |
883 | return hstates[index].order + PAGE_SHIFT; |
884 | } |
885 | |
886 | static inline int hstate_index(struct hstate *h) |
887 | { |
888 | return h - hstates; |
889 | } |
890 | |
891 | extern int dissolve_free_huge_page(struct page *page); |
892 | extern int dissolve_free_huge_pages(unsigned long start_pfn, |
893 | unsigned long end_pfn); |
894 | |
895 | #ifdef CONFIG_MEMORY_FAILURE |
896 | extern void folio_clear_hugetlb_hwpoison(struct folio *folio); |
897 | #else |
898 | static inline void folio_clear_hugetlb_hwpoison(struct folio *folio) |
899 | { |
900 | } |
901 | #endif |
902 | |
903 | #ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION |
904 | #ifndef arch_hugetlb_migration_supported |
905 | static inline bool arch_hugetlb_migration_supported(struct hstate *h) |
906 | { |
907 | if ((huge_page_shift(h) == PMD_SHIFT) || |
908 | (huge_page_shift(h) == PUD_SHIFT) || |
909 | (huge_page_shift(h) == PGDIR_SHIFT)) |
910 | return true; |
911 | else |
912 | return false; |
913 | } |
914 | #endif |
915 | #else |
916 | static inline bool arch_hugetlb_migration_supported(struct hstate *h) |
917 | { |
918 | return false; |
919 | } |
920 | #endif |
921 | |
922 | static inline bool hugepage_migration_supported(struct hstate *h) |
923 | { |
924 | return arch_hugetlb_migration_supported(h); |
925 | } |
926 | |
927 | /* |
928 | * Movability check is different as compared to migration check. |
929 | * It determines whether or not a huge page should be placed on |
930 | * movable zone or not. Movability of any huge page should be |
931 | * required only if huge page size is supported for migration. |
932 | * There won't be any reason for the huge page to be movable if |
933 | * it is not migratable to start with. Also the size of the huge |
934 | * page should be large enough to be placed under a movable zone |
935 | * and still feasible enough to be migratable. Just the presence |
936 | * in movable zone does not make the migration feasible. |
937 | * |
938 | * So even though large huge page sizes like the gigantic ones |
939 | * are migratable they should not be movable because its not |
940 | * feasible to migrate them from movable zone. |
941 | */ |
942 | static inline bool hugepage_movable_supported(struct hstate *h) |
943 | { |
944 | if (!hugepage_migration_supported(h)) |
945 | return false; |
946 | |
947 | if (hstate_is_gigantic(h)) |
948 | return false; |
949 | return true; |
950 | } |
951 | |
952 | /* Movability of hugepages depends on migration support. */ |
953 | static inline gfp_t htlb_alloc_mask(struct hstate *h) |
954 | { |
955 | if (hugepage_movable_supported(h)) |
956 | return GFP_HIGHUSER_MOVABLE; |
957 | else |
958 | return GFP_HIGHUSER; |
959 | } |
960 | |
961 | static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) |
962 | { |
963 | gfp_t modified_mask = htlb_alloc_mask(h); |
964 | |
965 | /* Some callers might want to enforce node */ |
966 | modified_mask |= (gfp_mask & __GFP_THISNODE); |
967 | |
968 | modified_mask |= (gfp_mask & __GFP_NOWARN); |
969 | |
970 | return modified_mask; |
971 | } |
972 | |
973 | static inline spinlock_t *huge_pte_lockptr(struct hstate *h, |
974 | struct mm_struct *mm, pte_t *pte) |
975 | { |
976 | if (huge_page_size(h) == PMD_SIZE) |
977 | return pmd_lockptr(mm, pmd: (pmd_t *) pte); |
978 | VM_BUG_ON(huge_page_size(h) == PAGE_SIZE); |
979 | return &mm->page_table_lock; |
980 | } |
981 | |
982 | #ifndef hugepages_supported |
983 | /* |
984 | * Some platform decide whether they support huge pages at boot |
985 | * time. Some of them, such as powerpc, set HPAGE_SHIFT to 0 |
986 | * when there is no such support |
987 | */ |
988 | #define hugepages_supported() (HPAGE_SHIFT != 0) |
989 | #endif |
990 | |
991 | void hugetlb_report_usage(struct seq_file *m, struct mm_struct *mm); |
992 | |
993 | static inline void hugetlb_count_init(struct mm_struct *mm) |
994 | { |
995 | atomic_long_set(v: &mm->hugetlb_usage, i: 0); |
996 | } |
997 | |
998 | static inline void hugetlb_count_add(long l, struct mm_struct *mm) |
999 | { |
1000 | atomic_long_add(i: l, v: &mm->hugetlb_usage); |
1001 | } |
1002 | |
1003 | static inline void hugetlb_count_sub(long l, struct mm_struct *mm) |
1004 | { |
1005 | atomic_long_sub(i: l, v: &mm->hugetlb_usage); |
1006 | } |
1007 | |
1008 | #ifndef huge_ptep_modify_prot_start |
1009 | #define huge_ptep_modify_prot_start huge_ptep_modify_prot_start |
1010 | static inline pte_t huge_ptep_modify_prot_start(struct vm_area_struct *vma, |
1011 | unsigned long addr, pte_t *ptep) |
1012 | { |
1013 | return huge_ptep_get_and_clear(mm: vma->vm_mm, addr, ptep); |
1014 | } |
1015 | #endif |
1016 | |
1017 | #ifndef huge_ptep_modify_prot_commit |
1018 | #define huge_ptep_modify_prot_commit huge_ptep_modify_prot_commit |
1019 | static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma, |
1020 | unsigned long addr, pte_t *ptep, |
1021 | pte_t old_pte, pte_t pte) |
1022 | { |
1023 | unsigned long psize = huge_page_size(h: hstate_vma(vma)); |
1024 | |
1025 | set_huge_pte_at(mm: vma->vm_mm, addr, ptep, pte, sz: psize); |
1026 | } |
1027 | #endif |
1028 | |
1029 | #ifdef CONFIG_NUMA |
1030 | void hugetlb_register_node(struct node *node); |
1031 | void hugetlb_unregister_node(struct node *node); |
1032 | #endif |
1033 | |
1034 | /* |
1035 | * Check if a given raw @page in a hugepage is HWPOISON. |
1036 | */ |
1037 | bool is_raw_hwpoison_page_in_hugepage(struct page *page); |
1038 | |
1039 | #else /* CONFIG_HUGETLB_PAGE */ |
1040 | struct hstate {}; |
1041 | |
1042 | static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio) |
1043 | { |
1044 | return NULL; |
1045 | } |
1046 | |
1047 | static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h, |
1048 | struct address_space *mapping, pgoff_t idx) |
1049 | { |
1050 | return NULL; |
1051 | } |
1052 | |
1053 | static inline int isolate_or_dissolve_huge_page(struct page *page, |
1054 | struct list_head *list) |
1055 | { |
1056 | return -ENOMEM; |
1057 | } |
1058 | |
1059 | static inline struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma, |
1060 | unsigned long addr, |
1061 | int avoid_reserve) |
1062 | { |
1063 | return NULL; |
1064 | } |
1065 | |
1066 | static inline struct folio * |
1067 | alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid, |
1068 | nodemask_t *nmask, gfp_t gfp_mask) |
1069 | { |
1070 | return NULL; |
1071 | } |
1072 | |
1073 | static inline int __alloc_bootmem_huge_page(struct hstate *h) |
1074 | { |
1075 | return 0; |
1076 | } |
1077 | |
1078 | static inline struct hstate *hstate_file(struct file *f) |
1079 | { |
1080 | return NULL; |
1081 | } |
1082 | |
1083 | static inline struct hstate *hstate_sizelog(int page_size_log) |
1084 | { |
1085 | return NULL; |
1086 | } |
1087 | |
1088 | static inline struct hstate *hstate_vma(struct vm_area_struct *vma) |
1089 | { |
1090 | return NULL; |
1091 | } |
1092 | |
1093 | static inline struct hstate *folio_hstate(struct folio *folio) |
1094 | { |
1095 | return NULL; |
1096 | } |
1097 | |
1098 | static inline struct hstate *size_to_hstate(unsigned long size) |
1099 | { |
1100 | return NULL; |
1101 | } |
1102 | |
1103 | static inline unsigned long huge_page_size(struct hstate *h) |
1104 | { |
1105 | return PAGE_SIZE; |
1106 | } |
1107 | |
1108 | static inline unsigned long huge_page_mask(struct hstate *h) |
1109 | { |
1110 | return PAGE_MASK; |
1111 | } |
1112 | |
1113 | static inline unsigned long vma_kernel_pagesize(struct vm_area_struct *vma) |
1114 | { |
1115 | return PAGE_SIZE; |
1116 | } |
1117 | |
1118 | static inline unsigned long vma_mmu_pagesize(struct vm_area_struct *vma) |
1119 | { |
1120 | return PAGE_SIZE; |
1121 | } |
1122 | |
1123 | static inline unsigned int huge_page_order(struct hstate *h) |
1124 | { |
1125 | return 0; |
1126 | } |
1127 | |
1128 | static inline unsigned int huge_page_shift(struct hstate *h) |
1129 | { |
1130 | return PAGE_SHIFT; |
1131 | } |
1132 | |
1133 | static inline bool hstate_is_gigantic(struct hstate *h) |
1134 | { |
1135 | return false; |
1136 | } |
1137 | |
1138 | static inline unsigned int pages_per_huge_page(struct hstate *h) |
1139 | { |
1140 | return 1; |
1141 | } |
1142 | |
1143 | static inline unsigned hstate_index_to_shift(unsigned index) |
1144 | { |
1145 | return 0; |
1146 | } |
1147 | |
1148 | static inline int hstate_index(struct hstate *h) |
1149 | { |
1150 | return 0; |
1151 | } |
1152 | |
1153 | static inline int dissolve_free_huge_page(struct page *page) |
1154 | { |
1155 | return 0; |
1156 | } |
1157 | |
1158 | static inline int dissolve_free_huge_pages(unsigned long start_pfn, |
1159 | unsigned long end_pfn) |
1160 | { |
1161 | return 0; |
1162 | } |
1163 | |
1164 | static inline bool hugepage_migration_supported(struct hstate *h) |
1165 | { |
1166 | return false; |
1167 | } |
1168 | |
1169 | static inline bool hugepage_movable_supported(struct hstate *h) |
1170 | { |
1171 | return false; |
1172 | } |
1173 | |
1174 | static inline gfp_t htlb_alloc_mask(struct hstate *h) |
1175 | { |
1176 | return 0; |
1177 | } |
1178 | |
1179 | static inline gfp_t htlb_modify_alloc_mask(struct hstate *h, gfp_t gfp_mask) |
1180 | { |
1181 | return 0; |
1182 | } |
1183 | |
1184 | static inline spinlock_t *huge_pte_lockptr(struct hstate *h, |
1185 | struct mm_struct *mm, pte_t *pte) |
1186 | { |
1187 | return &mm->page_table_lock; |
1188 | } |
1189 | |
1190 | static inline void hugetlb_count_init(struct mm_struct *mm) |
1191 | { |
1192 | } |
1193 | |
1194 | static inline void hugetlb_report_usage(struct seq_file *f, struct mm_struct *m) |
1195 | { |
1196 | } |
1197 | |
1198 | static inline void hugetlb_count_sub(long l, struct mm_struct *mm) |
1199 | { |
1200 | } |
1201 | |
1202 | static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma, |
1203 | unsigned long addr, pte_t *ptep) |
1204 | { |
1205 | #ifdef CONFIG_MMU |
1206 | return ptep_get(ptep); |
1207 | #else |
1208 | return *ptep; |
1209 | #endif |
1210 | } |
1211 | |
1212 | static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, |
1213 | pte_t *ptep, pte_t pte, unsigned long sz) |
1214 | { |
1215 | } |
1216 | |
1217 | static inline void hugetlb_register_node(struct node *node) |
1218 | { |
1219 | } |
1220 | |
1221 | static inline void hugetlb_unregister_node(struct node *node) |
1222 | { |
1223 | } |
1224 | #endif /* CONFIG_HUGETLB_PAGE */ |
1225 | |
1226 | static inline spinlock_t *huge_pte_lock(struct hstate *h, |
1227 | struct mm_struct *mm, pte_t *pte) |
1228 | { |
1229 | spinlock_t *ptl; |
1230 | |
1231 | ptl = huge_pte_lockptr(h, mm, pte); |
1232 | spin_lock(lock: ptl); |
1233 | return ptl; |
1234 | } |
1235 | |
1236 | #if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA) |
1237 | extern void __init hugetlb_cma_reserve(int order); |
1238 | #else |
1239 | static inline __init void hugetlb_cma_reserve(int order) |
1240 | { |
1241 | } |
1242 | #endif |
1243 | |
1244 | #ifdef CONFIG_ARCH_WANT_HUGE_PMD_SHARE |
1245 | static inline bool hugetlb_pmd_shared(pte_t *pte) |
1246 | { |
1247 | return page_count(virt_to_page(pte)) > 1; |
1248 | } |
1249 | #else |
1250 | static inline bool hugetlb_pmd_shared(pte_t *pte) |
1251 | { |
1252 | return false; |
1253 | } |
1254 | #endif |
1255 | |
1256 | bool want_pmd_share(struct vm_area_struct *vma, unsigned long addr); |
1257 | |
1258 | #ifndef __HAVE_ARCH_FLUSH_HUGETLB_TLB_RANGE |
1259 | /* |
1260 | * ARCHes with special requirements for evicting HUGETLB backing TLB entries can |
1261 | * implement this. |
1262 | */ |
1263 | #define flush_hugetlb_tlb_range(vma, addr, end) flush_tlb_range(vma, addr, end) |
1264 | #endif |
1265 | |
1266 | static inline bool __vma_shareable_lock(struct vm_area_struct *vma) |
1267 | { |
1268 | return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data; |
1269 | } |
1270 | |
1271 | bool __vma_private_lock(struct vm_area_struct *vma); |
1272 | |
1273 | /* |
1274 | * Safe version of huge_pte_offset() to check the locks. See comments |
1275 | * above huge_pte_offset(). |
1276 | */ |
1277 | static inline pte_t * |
1278 | hugetlb_walk(struct vm_area_struct *vma, unsigned long addr, unsigned long sz) |
1279 | { |
1280 | #if defined(CONFIG_HUGETLB_PAGE) && \ |
1281 | defined(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && defined(CONFIG_LOCKDEP) |
1282 | struct hugetlb_vma_lock *vma_lock = vma->vm_private_data; |
1283 | |
1284 | /* |
1285 | * If pmd sharing possible, locking needed to safely walk the |
1286 | * hugetlb pgtables. More information can be found at the comment |
1287 | * above huge_pte_offset() in the same file. |
1288 | * |
1289 | * NOTE: lockdep_is_held() is only defined with CONFIG_LOCKDEP. |
1290 | */ |
1291 | if (__vma_shareable_lock(vma)) |
1292 | WARN_ON_ONCE(!lockdep_is_held(&vma_lock->rw_sema) && |
1293 | !lockdep_is_held( |
1294 | &vma->vm_file->f_mapping->i_mmap_rwsem)); |
1295 | #endif |
1296 | return huge_pte_offset(mm: vma->vm_mm, addr, sz); |
1297 | } |
1298 | |
1299 | #endif /* _LINUX_HUGETLB_H */ |
1300 | |