1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _LINUX_MMAN_H |
3 | #define _LINUX_MMAN_H |
4 | |
5 | #include <linux/mm.h> |
6 | #include <linux/percpu_counter.h> |
7 | |
8 | #include <linux/atomic.h> |
9 | #include <uapi/linux/mman.h> |
10 | |
11 | /* |
12 | * Arrange for legacy / undefined architecture specific flags to be |
13 | * ignored by mmap handling code. |
14 | */ |
15 | #ifndef MAP_32BIT |
16 | #define MAP_32BIT 0 |
17 | #endif |
18 | #ifndef MAP_ABOVE4G |
19 | #define MAP_ABOVE4G 0 |
20 | #endif |
21 | #ifndef MAP_HUGE_2MB |
22 | #define MAP_HUGE_2MB 0 |
23 | #endif |
24 | #ifndef MAP_HUGE_1GB |
25 | #define MAP_HUGE_1GB 0 |
26 | #endif |
27 | #ifndef MAP_UNINITIALIZED |
28 | #define MAP_UNINITIALIZED 0 |
29 | #endif |
30 | #ifndef MAP_SYNC |
31 | #define MAP_SYNC 0 |
32 | #endif |
33 | |
34 | /* |
35 | * The historical set of flags that all mmap implementations implicitly |
36 | * support when a ->mmap_validate() op is not provided in file_operations. |
37 | * |
38 | * MAP_EXECUTABLE and MAP_DENYWRITE are completely ignored throughout the |
39 | * kernel. |
40 | */ |
41 | #define LEGACY_MAP_MASK (MAP_SHARED \ |
42 | | MAP_PRIVATE \ |
43 | | MAP_FIXED \ |
44 | | MAP_ANONYMOUS \ |
45 | | MAP_DENYWRITE \ |
46 | | MAP_EXECUTABLE \ |
47 | | MAP_UNINITIALIZED \ |
48 | | MAP_GROWSDOWN \ |
49 | | MAP_LOCKED \ |
50 | | MAP_NORESERVE \ |
51 | | MAP_POPULATE \ |
52 | | MAP_NONBLOCK \ |
53 | | MAP_STACK \ |
54 | | MAP_HUGETLB \ |
55 | | MAP_32BIT \ |
56 | | MAP_ABOVE4G \ |
57 | | MAP_HUGE_2MB \ |
58 | | MAP_HUGE_1GB) |
59 | |
60 | extern int sysctl_overcommit_memory; |
61 | extern int sysctl_overcommit_ratio; |
62 | extern unsigned long sysctl_overcommit_kbytes; |
63 | extern struct percpu_counter vm_committed_as; |
64 | |
65 | #ifdef CONFIG_SMP |
66 | extern s32 vm_committed_as_batch; |
67 | extern void mm_compute_batch(int overcommit_policy); |
68 | #else |
69 | #define vm_committed_as_batch 0 |
70 | static inline void mm_compute_batch(int overcommit_policy) |
71 | { |
72 | } |
73 | #endif |
74 | |
75 | unsigned long vm_memory_committed(void); |
76 | |
77 | static inline void vm_acct_memory(long pages) |
78 | { |
79 | percpu_counter_add_batch(fbc: &vm_committed_as, amount: pages, batch: vm_committed_as_batch); |
80 | } |
81 | |
82 | static inline void vm_unacct_memory(long pages) |
83 | { |
84 | vm_acct_memory(pages: -pages); |
85 | } |
86 | |
87 | /* |
88 | * Allow architectures to handle additional protection and flag bits. The |
89 | * overriding macros must be defined in the arch-specific asm/mman.h file. |
90 | */ |
91 | |
92 | #ifndef arch_calc_vm_prot_bits |
93 | #define arch_calc_vm_prot_bits(prot, pkey) 0 |
94 | #endif |
95 | |
96 | #ifndef arch_calc_vm_flag_bits |
97 | #define arch_calc_vm_flag_bits(flags) 0 |
98 | #endif |
99 | |
100 | #ifndef arch_validate_prot |
101 | /* |
102 | * This is called from mprotect(). PROT_GROWSDOWN and PROT_GROWSUP have |
103 | * already been masked out. |
104 | * |
105 | * Returns true if the prot flags are valid |
106 | */ |
107 | static inline bool arch_validate_prot(unsigned long prot, unsigned long addr) |
108 | { |
109 | return (prot & ~(PROT_READ | PROT_WRITE | PROT_EXEC | PROT_SEM)) == 0; |
110 | } |
111 | #define arch_validate_prot arch_validate_prot |
112 | #endif |
113 | |
114 | #ifndef arch_validate_flags |
115 | /* |
116 | * This is called from mmap() and mprotect() with the updated vma->vm_flags. |
117 | * |
118 | * Returns true if the VM_* flags are valid. |
119 | */ |
120 | static inline bool arch_validate_flags(unsigned long flags) |
121 | { |
122 | return true; |
123 | } |
124 | #define arch_validate_flags arch_validate_flags |
125 | #endif |
126 | |
127 | /* |
128 | * Optimisation macro. It is equivalent to: |
129 | * (x & bit1) ? bit2 : 0 |
130 | * but this version is faster. |
131 | * ("bit1" and "bit2" must be single bits) |
132 | */ |
133 | #define _calc_vm_trans(x, bit1, bit2) \ |
134 | ((!(bit1) || !(bit2)) ? 0 : \ |
135 | ((bit1) <= (bit2) ? ((x) & (bit1)) * ((bit2) / (bit1)) \ |
136 | : ((x) & (bit1)) / ((bit1) / (bit2)))) |
137 | |
138 | /* |
139 | * Combine the mmap "prot" argument into "vm_flags" used internally. |
140 | */ |
141 | static inline unsigned long |
142 | calc_vm_prot_bits(unsigned long prot, unsigned long pkey) |
143 | { |
144 | return _calc_vm_trans(prot, PROT_READ, VM_READ ) | |
145 | _calc_vm_trans(prot, PROT_WRITE, VM_WRITE) | |
146 | _calc_vm_trans(prot, PROT_EXEC, VM_EXEC) | |
147 | arch_calc_vm_prot_bits(prot, pkey); |
148 | } |
149 | |
150 | /* |
151 | * Combine the mmap "flags" argument into "vm_flags" used internally. |
152 | */ |
153 | static inline unsigned long |
154 | calc_vm_flag_bits(unsigned long flags) |
155 | { |
156 | return _calc_vm_trans(flags, MAP_GROWSDOWN, VM_GROWSDOWN ) | |
157 | _calc_vm_trans(flags, MAP_LOCKED, VM_LOCKED ) | |
158 | _calc_vm_trans(flags, MAP_SYNC, VM_SYNC ) | |
159 | _calc_vm_trans(flags, MAP_STACK, VM_NOHUGEPAGE) | |
160 | arch_calc_vm_flag_bits(flags); |
161 | } |
162 | |
163 | unsigned long vm_commit_limit(void); |
164 | |
165 | #ifndef arch_memory_deny_write_exec_supported |
166 | static inline bool arch_memory_deny_write_exec_supported(void) |
167 | { |
168 | return true; |
169 | } |
170 | #define arch_memory_deny_write_exec_supported arch_memory_deny_write_exec_supported |
171 | #endif |
172 | |
173 | /* |
174 | * Denies creating a writable executable mapping or gaining executable permissions. |
175 | * |
176 | * This denies the following: |
177 | * |
178 | * a) mmap(PROT_WRITE | PROT_EXEC) |
179 | * |
180 | * b) mmap(PROT_WRITE) |
181 | * mprotect(PROT_EXEC) |
182 | * |
183 | * c) mmap(PROT_WRITE) |
184 | * mprotect(PROT_READ) |
185 | * mprotect(PROT_EXEC) |
186 | * |
187 | * But allows the following: |
188 | * |
189 | * d) mmap(PROT_READ | PROT_EXEC) |
190 | * mmap(PROT_READ | PROT_EXEC | PROT_BTI) |
191 | */ |
192 | static inline bool map_deny_write_exec(struct vm_area_struct *vma, unsigned long vm_flags) |
193 | { |
194 | if (!test_bit(MMF_HAS_MDWE, ¤t->mm->flags)) |
195 | return false; |
196 | |
197 | if ((vm_flags & VM_EXEC) && (vm_flags & VM_WRITE)) |
198 | return true; |
199 | |
200 | if (!(vma->vm_flags & VM_EXEC) && (vm_flags & VM_EXEC)) |
201 | return true; |
202 | |
203 | return false; |
204 | } |
205 | |
206 | #endif /* _LINUX_MMAN_H */ |
207 | |