1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/compat.h> |
3 | #include <linux/errno.h> |
4 | #include <linux/sched.h> |
5 | #include <linux/sched/mm.h> |
6 | #include <linux/syscalls.h> |
7 | #include <linux/mm.h> |
8 | #include <linux/fs.h> |
9 | #include <linux/smp.h> |
10 | #include <linux/sem.h> |
11 | #include <linux/msg.h> |
12 | #include <linux/shm.h> |
13 | #include <linux/stat.h> |
14 | #include <linux/mman.h> |
15 | #include <linux/file.h> |
16 | #include <linux/utsname.h> |
17 | #include <linux/personality.h> |
18 | #include <linux/random.h> |
19 | #include <linux/uaccess.h> |
20 | #include <linux/elf.h> |
21 | |
22 | #include <asm/elf.h> |
23 | #include <asm/ia32.h> |
24 | |
25 | /* |
26 | * Align a virtual address to avoid aliasing in the I$ on AMD F15h. |
27 | */ |
28 | static unsigned long get_align_mask(void) |
29 | { |
30 | /* handle 32- and 64-bit case with a single conditional */ |
31 | if (va_align.flags < 0 || !(va_align.flags & (2 - mmap_is_ia32()))) |
32 | return 0; |
33 | |
34 | if (!(current->flags & PF_RANDOMIZE)) |
35 | return 0; |
36 | |
37 | return va_align.mask; |
38 | } |
39 | |
40 | /* |
41 | * To avoid aliasing in the I$ on AMD F15h, the bits defined by the |
42 | * va_align.bits, [12:upper_bit), are set to a random value instead of |
43 | * zeroing them. This random value is computed once per boot. This form |
44 | * of ASLR is known as "per-boot ASLR". |
45 | * |
46 | * To achieve this, the random value is added to the info.align_offset |
47 | * value before calling vm_unmapped_area() or ORed directly to the |
48 | * address. |
49 | */ |
50 | static unsigned long get_align_bits(void) |
51 | { |
52 | return va_align.bits & get_align_mask(); |
53 | } |
54 | |
55 | static int __init control_va_addr_alignment(char *str) |
56 | { |
57 | /* guard against enabling this on other CPU families */ |
58 | if (va_align.flags < 0) |
59 | return 1; |
60 | |
61 | if (*str == 0) |
62 | return 1; |
63 | |
64 | if (!strcmp(str, "32" )) |
65 | va_align.flags = ALIGN_VA_32; |
66 | else if (!strcmp(str, "64" )) |
67 | va_align.flags = ALIGN_VA_64; |
68 | else if (!strcmp(str, "off" )) |
69 | va_align.flags = 0; |
70 | else if (!strcmp(str, "on" )) |
71 | va_align.flags = ALIGN_VA_32 | ALIGN_VA_64; |
72 | else |
73 | pr_warn("invalid option value: 'align_va_addr=%s'\n" , str); |
74 | |
75 | return 1; |
76 | } |
77 | __setup("align_va_addr=" , control_va_addr_alignment); |
78 | |
79 | SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, |
80 | unsigned long, prot, unsigned long, flags, |
81 | unsigned long, fd, unsigned long, off) |
82 | { |
83 | if (off & ~PAGE_MASK) |
84 | return -EINVAL; |
85 | |
86 | return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff: off >> PAGE_SHIFT); |
87 | } |
88 | |
89 | static void find_start_end(unsigned long addr, unsigned long flags, |
90 | unsigned long *begin, unsigned long *end) |
91 | { |
92 | if (!in_32bit_syscall() && (flags & MAP_32BIT)) { |
93 | /* This is usually used needed to map code in small |
94 | model, so it needs to be in the first 31bit. Limit |
95 | it to that. This means we need to move the |
96 | unmapped base down for this case. This can give |
97 | conflicts with the heap, but we assume that glibc |
98 | malloc knows how to fall back to mmap. Give it 1GB |
99 | of playground for now. -AK */ |
100 | *begin = 0x40000000; |
101 | *end = 0x80000000; |
102 | if (current->flags & PF_RANDOMIZE) { |
103 | *begin = randomize_page(start: *begin, range: 0x02000000); |
104 | } |
105 | return; |
106 | } |
107 | |
108 | *begin = get_mmap_base(is_legacy: 1); |
109 | if (in_32bit_syscall()) |
110 | *end = task_size_32bit(); |
111 | else |
112 | *end = task_size_64bit(full_addr_space: addr > DEFAULT_MAP_WINDOW); |
113 | } |
114 | |
115 | unsigned long |
116 | arch_get_unmapped_area(struct file *filp, unsigned long addr, |
117 | unsigned long len, unsigned long pgoff, unsigned long flags) |
118 | { |
119 | struct mm_struct *mm = current->mm; |
120 | struct vm_area_struct *vma; |
121 | struct vm_unmapped_area_info info; |
122 | unsigned long begin, end; |
123 | |
124 | if (flags & MAP_FIXED) |
125 | return addr; |
126 | |
127 | find_start_end(addr, flags, begin: &begin, end: &end); |
128 | |
129 | if (len > end) |
130 | return -ENOMEM; |
131 | |
132 | if (addr) { |
133 | addr = PAGE_ALIGN(addr); |
134 | vma = find_vma(mm, addr); |
135 | if (end - len >= addr && |
136 | (!vma || addr + len <= vm_start_gap(vma))) |
137 | return addr; |
138 | } |
139 | |
140 | info.flags = 0; |
141 | info.length = len; |
142 | info.low_limit = begin; |
143 | info.high_limit = end; |
144 | info.align_mask = 0; |
145 | info.align_offset = pgoff << PAGE_SHIFT; |
146 | if (filp) { |
147 | info.align_mask = get_align_mask(); |
148 | info.align_offset += get_align_bits(); |
149 | } |
150 | return vm_unmapped_area(info: &info); |
151 | } |
152 | |
153 | unsigned long |
154 | arch_get_unmapped_area_topdown(struct file *filp, const unsigned long addr0, |
155 | const unsigned long len, const unsigned long pgoff, |
156 | const unsigned long flags) |
157 | { |
158 | struct vm_area_struct *vma; |
159 | struct mm_struct *mm = current->mm; |
160 | unsigned long addr = addr0; |
161 | struct vm_unmapped_area_info info; |
162 | |
163 | /* requested length too big for entire address space */ |
164 | if (len > TASK_SIZE) |
165 | return -ENOMEM; |
166 | |
167 | /* No address checking. See comment at mmap_address_hint_valid() */ |
168 | if (flags & MAP_FIXED) |
169 | return addr; |
170 | |
171 | /* for MAP_32BIT mappings we force the legacy mmap base */ |
172 | if (!in_32bit_syscall() && (flags & MAP_32BIT)) |
173 | goto bottomup; |
174 | |
175 | /* requesting a specific address */ |
176 | if (addr) { |
177 | addr &= PAGE_MASK; |
178 | if (!mmap_address_hint_valid(addr, len)) |
179 | goto get_unmapped_area; |
180 | |
181 | vma = find_vma(mm, addr); |
182 | if (!vma || addr + len <= vm_start_gap(vma)) |
183 | return addr; |
184 | } |
185 | get_unmapped_area: |
186 | |
187 | info.flags = VM_UNMAPPED_AREA_TOPDOWN; |
188 | info.length = len; |
189 | if (!in_32bit_syscall() && (flags & MAP_ABOVE4G)) |
190 | info.low_limit = SZ_4G; |
191 | else |
192 | info.low_limit = PAGE_SIZE; |
193 | |
194 | info.high_limit = get_mmap_base(is_legacy: 0); |
195 | |
196 | /* |
197 | * If hint address is above DEFAULT_MAP_WINDOW, look for unmapped area |
198 | * in the full address space. |
199 | * |
200 | * !in_32bit_syscall() check to avoid high addresses for x32 |
201 | * (and make it no op on native i386). |
202 | */ |
203 | if (addr > DEFAULT_MAP_WINDOW && !in_32bit_syscall()) |
204 | info.high_limit += TASK_SIZE_MAX - DEFAULT_MAP_WINDOW; |
205 | |
206 | info.align_mask = 0; |
207 | info.align_offset = pgoff << PAGE_SHIFT; |
208 | if (filp) { |
209 | info.align_mask = get_align_mask(); |
210 | info.align_offset += get_align_bits(); |
211 | } |
212 | addr = vm_unmapped_area(info: &info); |
213 | if (!(addr & ~PAGE_MASK)) |
214 | return addr; |
215 | VM_BUG_ON(addr != -ENOMEM); |
216 | |
217 | bottomup: |
218 | /* |
219 | * A failed mmap() very likely causes application failure, |
220 | * so fall back to the bottom-up function here. This scenario |
221 | * can happen with large stack limits and large mmap() |
222 | * allocations. |
223 | */ |
224 | return arch_get_unmapped_area(filp, addr: addr0, len, pgoff, flags); |
225 | } |
226 | |