1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* |
3 | * IA-32 Huge TLB Page Support for Kernel. |
4 | * |
5 | * Copyright (C) 2002, Rohit Seth <rohit.seth@intel.com> |
6 | */ |
7 | |
8 | #include <linux/init.h> |
9 | #include <linux/fs.h> |
10 | #include <linux/mm.h> |
11 | #include <linux/sched/mm.h> |
12 | #include <linux/hugetlb.h> |
13 | #include <linux/pagemap.h> |
14 | #include <linux/err.h> |
15 | #include <linux/sysctl.h> |
16 | #include <linux/compat.h> |
17 | #include <asm/mman.h> |
18 | #include <asm/tlb.h> |
19 | #include <asm/tlbflush.h> |
20 | #include <asm/elf.h> |
21 | |
22 | /* |
23 | * pmd_huge() returns 1 if @pmd is hugetlb related entry, that is normal |
24 | * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry. |
25 | * Otherwise, returns 0. |
26 | */ |
27 | int pmd_huge(pmd_t pmd) |
28 | { |
29 | return !pmd_none(pmd) && |
30 | (pmd_val(pmd) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT; |
31 | } |
32 | |
33 | /* |
34 | * pud_huge() returns 1 if @pud is hugetlb related entry, that is normal |
35 | * hugetlb entry or non-present (migration or hwpoisoned) hugetlb entry. |
36 | * Otherwise, returns 0. |
37 | */ |
38 | int pud_huge(pud_t pud) |
39 | { |
40 | #if CONFIG_PGTABLE_LEVELS > 2 |
41 | return !pud_none(pud) && |
42 | (pud_val(pud) & (_PAGE_PRESENT|_PAGE_PSE)) != _PAGE_PRESENT; |
43 | #else |
44 | return 0; |
45 | #endif |
46 | } |
47 | |
48 | #ifdef CONFIG_HUGETLB_PAGE |
49 | static unsigned long hugetlb_get_unmapped_area_bottomup(struct file *file, |
50 | unsigned long addr, unsigned long len, |
51 | unsigned long pgoff, unsigned long flags) |
52 | { |
53 | struct hstate *h = hstate_file(f: file); |
54 | struct vm_unmapped_area_info info; |
55 | |
56 | info.flags = 0; |
57 | info.length = len; |
58 | info.low_limit = get_mmap_base(is_legacy: 1); |
59 | |
60 | /* |
61 | * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area |
62 | * in the full address space. |
63 | */ |
64 | info.high_limit = in_32bit_syscall() ? |
65 | task_size_32bit() : task_size_64bit(full_addr_space: addr > DEFAULT_MAP_WINDOW); |
66 | |
67 | info.align_mask = PAGE_MASK & ~huge_page_mask(h); |
68 | info.align_offset = 0; |
69 | return vm_unmapped_area(info: &info); |
70 | } |
71 | |
72 | static unsigned long hugetlb_get_unmapped_area_topdown(struct file *file, |
73 | unsigned long addr, unsigned long len, |
74 | unsigned long pgoff, unsigned long flags) |
75 | { |
76 | struct hstate *h = hstate_file(f: file); |
77 | struct vm_unmapped_area_info info; |
78 | |
79 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; |
80 | info.length = len; |
81 | info.low_limit = PAGE_SIZE; |
82 | info.high_limit = get_mmap_base(is_legacy: 0); |
83 | |
84 | /* |
85 | * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area |
86 | * in the full address space. |
87 | */ |
88 | if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall()) |
89 | info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW; |
90 | |
91 | info.align_mask = PAGE_MASK & ~huge_page_mask(h); |
92 | info.align_offset = 0; |
93 | addr = vm_unmapped_area(info: &info); |
94 | |
95 | /* |
96 | * A failed mmap() very likely causes application failure, |
97 | * so fall back to the bottom-up function here. This scenario |
98 | * can happen with large stack limits and large mmap() |
99 | * allocations. |
100 | */ |
101 | if (addr & ~PAGE_MASK) { |
102 | VM_BUG_ON(addr != -ENOMEM); |
103 | info.flags = 0; |
104 | info.low_limit = TASK_UNMAPPED_BASE; |
105 | info.high_limit = TASK_SIZE_LOW; |
106 | addr = vm_unmapped_area(info: &info); |
107 | } |
108 | |
109 | return addr; |
110 | } |
111 | |
112 | unsigned long |
113 | hugetlb_get_unmapped_area(struct file *file, unsigned long addr, |
114 | unsigned long len, unsigned long pgoff, unsigned long flags) |
115 | { |
116 | struct hstate *h = hstate_file(f: file); |
117 | struct mm_struct *mm = current->mm; |
118 | struct vm_area_struct *vma; |
119 | |
120 | if (len & ~huge_page_mask(h)) |
121 | return -EINVAL; |
122 | |
123 | if (len > TASK_SIZE) |
124 | return -ENOMEM; |
125 | |
126 | /* No address checking. See comment at mmap_address_hint_valid() */ |
127 | if (flags & MAP_FIXED) { |
128 | if (prepare_hugepage_range(file, addr, len)) |
129 | return -EINVAL; |
130 | return addr; |
131 | } |
132 | |
133 | if (addr) { |
134 | addr &= huge_page_mask(h); |
135 | if (!mmap_address_hint_valid(addr, len)) |
136 | goto get_unmapped_area; |
137 | |
138 | vma = find_vma(mm, addr); |
139 | if (!vma || addr + len <= vm_start_gap(vma)) |
140 | return addr; |
141 | } |
142 | |
143 | get_unmapped_area: |
144 | if (mm->get_unmapped_area == arch_get_unmapped_area) |
145 | return hugetlb_get_unmapped_area_bottomup(file, addr, len, |
146 | pgoff, flags); |
147 | else |
148 | return hugetlb_get_unmapped_area_topdown(file, addr, len, |
149 | pgoff, flags); |
150 | } |
151 | #endif /* CONFIG_HUGETLB_PAGE */ |
152 | |
153 | #ifdef CONFIG_X86_64 |
154 | bool __init arch_hugetlb_valid_size(unsigned long size) |
155 | { |
156 | if (size == PMD_SIZE) |
157 | return true; |
158 | else if (size == PUD_SIZE && boot_cpu_has(X86_FEATURE_GBPAGES)) |
159 | return true; |
160 | else |
161 | return false; |
162 | } |
163 | |
164 | #ifdef CONFIG_CONTIG_ALLOC |
165 | static __init int gigantic_pages_init(void) |
166 | { |
167 | /* With compaction or CMA we can allocate gigantic pages at runtime */ |
168 | if (boot_cpu_has(X86_FEATURE_GBPAGES)) |
169 | hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); |
170 | return 0; |
171 | } |
172 | arch_initcall(gigantic_pages_init); |
173 | #endif |
174 | #endif |
175 | |