1 | // SPDX-License-Identifier: GPL-2.0+ |
2 | /* |
3 | * flexible mmap layout support |
4 | * |
5 | * Copyright 2003-2004 Red Hat Inc., Durham, North Carolina. |
6 | * All Rights Reserved. |
7 | * |
8 | * Started by Ingo Molnar <mingo@elte.hu> |
9 | */ |
10 | |
11 | #include <linux/elf-randomize.h> |
12 | #include <linux/personality.h> |
13 | #include <linux/mm.h> |
14 | #include <linux/mman.h> |
15 | #include <linux/sched/signal.h> |
16 | #include <linux/sched/mm.h> |
17 | #include <linux/random.h> |
18 | #include <linux/compat.h> |
19 | #include <linux/security.h> |
20 | #include <asm/elf.h> |
21 | |
22 | static unsigned long stack_maxrandom_size(void) |
23 | { |
24 | if (!(current->flags & PF_RANDOMIZE)) |
25 | return 0; |
26 | return STACK_RND_MASK << PAGE_SHIFT; |
27 | } |
28 | |
29 | static inline int mmap_is_legacy(struct rlimit *rlim_stack) |
30 | { |
31 | if (current->personality & ADDR_COMPAT_LAYOUT) |
32 | return 1; |
33 | if (rlim_stack->rlim_cur == RLIM_INFINITY) |
34 | return 1; |
35 | return sysctl_legacy_va_layout; |
36 | } |
37 | |
38 | unsigned long arch_mmap_rnd(void) |
39 | { |
40 | return (get_random_u32() & MMAP_RND_MASK) << PAGE_SHIFT; |
41 | } |
42 | |
43 | static unsigned long mmap_base_legacy(unsigned long rnd) |
44 | { |
45 | return TASK_UNMAPPED_BASE + rnd; |
46 | } |
47 | |
48 | static inline unsigned long mmap_base(unsigned long rnd, |
49 | struct rlimit *rlim_stack) |
50 | { |
51 | unsigned long gap = rlim_stack->rlim_cur; |
52 | unsigned long pad = stack_maxrandom_size() + stack_guard_gap; |
53 | unsigned long gap_min, gap_max; |
54 | |
55 | /* Values close to RLIM_INFINITY can overflow. */ |
56 | if (gap + pad > gap) |
57 | gap += pad; |
58 | |
59 | /* |
60 | * Top of mmap area (just below the process stack). |
61 | * Leave at least a ~128 MB hole. |
62 | */ |
63 | gap_min = SZ_128M; |
64 | gap_max = (STACK_TOP / 6) * 5; |
65 | |
66 | if (gap < gap_min) |
67 | gap = gap_min; |
68 | else if (gap > gap_max) |
69 | gap = gap_max; |
70 | |
71 | return PAGE_ALIGN(STACK_TOP - gap - rnd); |
72 | } |
73 | |
74 | static int get_align_mask(struct file *filp, unsigned long flags) |
75 | { |
76 | if (!(current->flags & PF_RANDOMIZE)) |
77 | return 0; |
78 | if (filp || (flags & MAP_SHARED)) |
79 | return MMAP_ALIGN_MASK << PAGE_SHIFT; |
80 | return 0; |
81 | } |
82 | |
83 | unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr, |
84 | unsigned long len, unsigned long pgoff, |
85 | unsigned long flags) |
86 | { |
87 | struct mm_struct *mm = current->mm; |
88 | struct vm_area_struct *vma; |
89 | struct vm_unmapped_area_info info; |
90 | |
91 | if (len > TASK_SIZE - mmap_min_addr) |
92 | return -ENOMEM; |
93 | |
94 | if (flags & MAP_FIXED) |
95 | goto check_asce_limit; |
96 | |
97 | if (addr) { |
98 | addr = PAGE_ALIGN(addr); |
99 | vma = find_vma(mm, addr); |
100 | if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && |
101 | (!vma || addr + len <= vm_start_gap(vma))) |
102 | goto check_asce_limit; |
103 | } |
104 | |
105 | info.flags = 0; |
106 | info.length = len; |
107 | info.low_limit = mm->mmap_base; |
108 | info.high_limit = TASK_SIZE; |
109 | info.align_mask = get_align_mask(filp, flags); |
110 | info.align_offset = pgoff << PAGE_SHIFT; |
111 | addr = vm_unmapped_area(info: &info); |
112 | if (offset_in_page(addr)) |
113 | return addr; |
114 | |
115 | check_asce_limit: |
116 | return check_asce_limit(mm, addr, len); |
117 | } |
118 | |
119 | unsigned long arch_get_unmapped_area_topdown(struct file *filp, unsigned long addr, |
120 | unsigned long len, unsigned long pgoff, |
121 | unsigned long flags) |
122 | { |
123 | struct vm_area_struct *vma; |
124 | struct mm_struct *mm = current->mm; |
125 | struct vm_unmapped_area_info info; |
126 | |
127 | /* requested length too big for entire address space */ |
128 | if (len > TASK_SIZE - mmap_min_addr) |
129 | return -ENOMEM; |
130 | |
131 | if (flags & MAP_FIXED) |
132 | goto check_asce_limit; |
133 | |
134 | /* requesting a specific address */ |
135 | if (addr) { |
136 | addr = PAGE_ALIGN(addr); |
137 | vma = find_vma(mm, addr); |
138 | if (TASK_SIZE - len >= addr && addr >= mmap_min_addr && |
139 | (!vma || addr + len <= vm_start_gap(vma))) |
140 | goto check_asce_limit; |
141 | } |
142 | |
143 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; |
144 | info.length = len; |
145 | info.low_limit = PAGE_SIZE; |
146 | info.high_limit = mm->mmap_base; |
147 | info.align_mask = get_align_mask(filp, flags); |
148 | info.align_offset = pgoff << PAGE_SHIFT; |
149 | addr = vm_unmapped_area(info: &info); |
150 | |
151 | /* |
152 | * A failed mmap() very likely causes application failure, |
153 | * so fall back to the bottom-up function here. This scenario |
154 | * can happen with large stack limits and large mmap() |
155 | * allocations. |
156 | */ |
157 | if (offset_in_page(addr)) { |
158 | VM_BUG_ON(addr != -ENOMEM); |
159 | info.flags = 0; |
160 | info.low_limit = TASK_UNMAPPED_BASE; |
161 | info.high_limit = TASK_SIZE; |
162 | addr = vm_unmapped_area(info: &info); |
163 | if (offset_in_page(addr)) |
164 | return addr; |
165 | } |
166 | |
167 | check_asce_limit: |
168 | return check_asce_limit(mm, addr, len); |
169 | } |
170 | |
171 | /* |
172 | * This function, called very early during the creation of a new |
173 | * process VM image, sets up which VM layout function to use: |
174 | */ |
175 | void arch_pick_mmap_layout(struct mm_struct *mm, struct rlimit *rlim_stack) |
176 | { |
177 | unsigned long random_factor = 0UL; |
178 | |
179 | if (current->flags & PF_RANDOMIZE) |
180 | random_factor = arch_mmap_rnd(); |
181 | |
182 | /* |
183 | * Fall back to the standard layout if the personality |
184 | * bit is set, or if the expected stack growth is unlimited: |
185 | */ |
186 | if (mmap_is_legacy(rlim_stack)) { |
187 | mm->mmap_base = mmap_base_legacy(rnd: random_factor); |
188 | mm->get_unmapped_area = arch_get_unmapped_area; |
189 | } else { |
190 | mm->mmap_base = mmap_base(rnd: random_factor, rlim_stack); |
191 | mm->get_unmapped_area = arch_get_unmapped_area_topdown; |
192 | } |
193 | } |
194 | |
195 | static const pgprot_t protection_map[16] = { |
196 | [VM_NONE] = PAGE_NONE, |
197 | [VM_READ] = PAGE_RO, |
198 | [VM_WRITE] = PAGE_RO, |
199 | [VM_WRITE | VM_READ] = PAGE_RO, |
200 | [VM_EXEC] = PAGE_RX, |
201 | [VM_EXEC | VM_READ] = PAGE_RX, |
202 | [VM_EXEC | VM_WRITE] = PAGE_RX, |
203 | [VM_EXEC | VM_WRITE | VM_READ] = PAGE_RX, |
204 | [VM_SHARED] = PAGE_NONE, |
205 | [VM_SHARED | VM_READ] = PAGE_RO, |
206 | [VM_SHARED | VM_WRITE] = PAGE_RW, |
207 | [VM_SHARED | VM_WRITE | VM_READ] = PAGE_RW, |
208 | [VM_SHARED | VM_EXEC] = PAGE_RX, |
209 | [VM_SHARED | VM_EXEC | VM_READ] = PAGE_RX, |
210 | [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_RWX, |
211 | [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_RWX |
212 | }; |
213 | DECLARE_VM_GET_PAGE_PROT |
214 | |