1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* |
3 | * NUMA memory policies for Linux. |
4 | * Copyright 2003,2004 Andi Kleen SuSE Labs |
5 | */ |
6 | #ifndef _LINUX_MEMPOLICY_H |
7 | #define _LINUX_MEMPOLICY_H 1 |
8 | |
9 | #include <linux/sched.h> |
10 | #include <linux/mmzone.h> |
11 | #include <linux/slab.h> |
12 | #include <linux/rbtree.h> |
13 | #include <linux/spinlock.h> |
14 | #include <linux/nodemask.h> |
15 | #include <linux/pagemap.h> |
16 | #include <uapi/linux/mempolicy.h> |
17 | |
18 | struct mm_struct; |
19 | |
20 | #ifdef CONFIG_NUMA |
21 | |
22 | /* |
23 | * Describe a memory policy. |
24 | * |
25 | * A mempolicy can be either associated with a process or with a VMA. |
26 | * For VMA related allocations the VMA policy is preferred, otherwise |
27 | * the process policy is used. Interrupts ignore the memory policy |
28 | * of the current process. |
29 | * |
30 | * Locking policy for interleave: |
31 | * In process context there is no locking because only the process accesses |
32 | * its own state. All vma manipulation is somewhat protected by a down_read on |
33 | * mmap_lock. |
34 | * |
35 | * Freeing policy: |
36 | * Mempolicy objects are reference counted. A mempolicy will be freed when |
37 | * mpol_put() decrements the reference count to zero. |
38 | * |
39 | * Duplicating policy objects: |
40 | * mpol_dup() allocates a new mempolicy and copies the specified mempolicy |
41 | * to the new storage. The reference count of the new object is initialized |
42 | * to 1, representing the caller of mpol_dup(). |
43 | */ |
44 | struct mempolicy { |
45 | atomic_t refcnt; |
46 | unsigned short mode; /* See MPOL_* above */ |
47 | unsigned short flags; /* See set_mempolicy() MPOL_F_* above */ |
48 | nodemask_t nodes; /* interleave/bind/perfer */ |
49 | int home_node; /* Home node to use for MPOL_BIND and MPOL_PREFERRED_MANY */ |
50 | |
51 | union { |
52 | nodemask_t cpuset_mems_allowed; /* relative to these nodes */ |
53 | nodemask_t user_nodemask; /* nodemask passed by user */ |
54 | } w; |
55 | }; |
56 | |
57 | /* |
58 | * Support for managing mempolicy data objects (clone, copy, destroy) |
59 | * The default fast path of a NULL MPOL_DEFAULT policy is always inlined. |
60 | */ |
61 | |
62 | extern void __mpol_put(struct mempolicy *pol); |
63 | static inline void mpol_put(struct mempolicy *pol) |
64 | { |
65 | if (pol) |
66 | __mpol_put(pol); |
67 | } |
68 | |
69 | /* |
70 | * Does mempolicy pol need explicit unref after use? |
71 | * Currently only needed for shared policies. |
72 | */ |
73 | static inline int mpol_needs_cond_ref(struct mempolicy *pol) |
74 | { |
75 | return (pol && (pol->flags & MPOL_F_SHARED)); |
76 | } |
77 | |
78 | static inline void mpol_cond_put(struct mempolicy *pol) |
79 | { |
80 | if (mpol_needs_cond_ref(pol)) |
81 | __mpol_put(pol); |
82 | } |
83 | |
84 | extern struct mempolicy *__mpol_dup(struct mempolicy *pol); |
85 | static inline struct mempolicy *mpol_dup(struct mempolicy *pol) |
86 | { |
87 | if (pol) |
88 | pol = __mpol_dup(pol); |
89 | return pol; |
90 | } |
91 | |
92 | #define vma_policy(vma) ((vma)->vm_policy) |
93 | |
94 | static inline void mpol_get(struct mempolicy *pol) |
95 | { |
96 | if (pol) |
97 | atomic_inc(&pol->refcnt); |
98 | } |
99 | |
100 | extern bool __mpol_equal(struct mempolicy *a, struct mempolicy *b); |
101 | static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) |
102 | { |
103 | if (a == b) |
104 | return true; |
105 | return __mpol_equal(a, b); |
106 | } |
107 | |
108 | /* |
109 | * Tree of shared policies for a shared memory region. |
110 | * Maintain the policies in a pseudo mm that contains vmas. The vmas |
111 | * carry the policy. As a special twist the pseudo mm is indexed in pages, not |
112 | * bytes, so that we can work with shared memory segments bigger than |
113 | * unsigned long. |
114 | */ |
115 | |
116 | struct sp_node { |
117 | struct rb_node nd; |
118 | unsigned long start, end; |
119 | struct mempolicy *policy; |
120 | }; |
121 | |
122 | struct shared_policy { |
123 | struct rb_root root; |
124 | rwlock_t lock; |
125 | }; |
126 | |
127 | int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst); |
128 | void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol); |
129 | int mpol_set_shared_policy(struct shared_policy *info, |
130 | struct vm_area_struct *vma, |
131 | struct mempolicy *new); |
132 | void mpol_free_shared_policy(struct shared_policy *p); |
133 | struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp, |
134 | unsigned long idx); |
135 | |
136 | struct mempolicy *get_task_policy(struct task_struct *p); |
137 | struct mempolicy *__get_vma_policy(struct vm_area_struct *vma, |
138 | unsigned long addr); |
139 | bool vma_policy_mof(struct vm_area_struct *vma); |
140 | |
141 | extern void numa_default_policy(void); |
142 | extern void numa_policy_init(void); |
143 | extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new); |
144 | extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new); |
145 | |
146 | extern int huge_node(struct vm_area_struct *vma, |
147 | unsigned long addr, gfp_t gfp_flags, |
148 | struct mempolicy **mpol, nodemask_t **nodemask); |
149 | extern bool init_nodemask_of_mempolicy(nodemask_t *mask); |
150 | extern bool mempolicy_in_oom_domain(struct task_struct *tsk, |
151 | const nodemask_t *mask); |
152 | extern nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy); |
153 | |
154 | static inline nodemask_t *policy_nodemask_current(gfp_t gfp) |
155 | { |
156 | struct mempolicy *mpol = get_task_policy(current); |
157 | |
158 | return policy_nodemask(gfp, mpol); |
159 | } |
160 | |
161 | extern unsigned int mempolicy_slab_node(void); |
162 | |
163 | extern enum zone_type policy_zone; |
164 | |
165 | static inline void check_highest_zone(enum zone_type k) |
166 | { |
167 | if (k > policy_zone && k != ZONE_MOVABLE) |
168 | policy_zone = k; |
169 | } |
170 | |
171 | int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, |
172 | const nodemask_t *to, int flags); |
173 | |
174 | |
175 | #ifdef CONFIG_TMPFS |
176 | extern int mpol_parse_str(char *str, struct mempolicy **mpol); |
177 | #endif |
178 | |
179 | extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol); |
180 | |
181 | /* Check if a vma is migratable */ |
182 | extern bool vma_migratable(struct vm_area_struct *vma); |
183 | |
184 | extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long); |
185 | extern void mpol_put_task_policy(struct task_struct *); |
186 | |
187 | static inline bool mpol_is_preferred_many(struct mempolicy *pol) |
188 | { |
189 | return (pol->mode == MPOL_PREFERRED_MANY); |
190 | } |
191 | |
192 | |
193 | #else |
194 | |
195 | struct mempolicy {}; |
196 | |
197 | static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b) |
198 | { |
199 | return true; |
200 | } |
201 | |
202 | static inline void mpol_put(struct mempolicy *p) |
203 | { |
204 | } |
205 | |
206 | static inline void mpol_cond_put(struct mempolicy *pol) |
207 | { |
208 | } |
209 | |
210 | static inline void mpol_get(struct mempolicy *pol) |
211 | { |
212 | } |
213 | |
214 | struct shared_policy {}; |
215 | |
216 | static inline void mpol_shared_policy_init(struct shared_policy *sp, |
217 | struct mempolicy *mpol) |
218 | { |
219 | } |
220 | |
221 | static inline void mpol_free_shared_policy(struct shared_policy *p) |
222 | { |
223 | } |
224 | |
225 | static inline struct mempolicy * |
226 | mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx) |
227 | { |
228 | return NULL; |
229 | } |
230 | |
231 | #define vma_policy(vma) NULL |
232 | |
233 | static inline int |
234 | vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst) |
235 | { |
236 | return 0; |
237 | } |
238 | |
239 | static inline void numa_policy_init(void) |
240 | { |
241 | } |
242 | |
243 | static inline void numa_default_policy(void) |
244 | { |
245 | } |
246 | |
247 | static inline void mpol_rebind_task(struct task_struct *tsk, |
248 | const nodemask_t *new) |
249 | { |
250 | } |
251 | |
252 | static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new) |
253 | { |
254 | } |
255 | |
256 | static inline int huge_node(struct vm_area_struct *vma, |
257 | unsigned long addr, gfp_t gfp_flags, |
258 | struct mempolicy **mpol, nodemask_t **nodemask) |
259 | { |
260 | *mpol = NULL; |
261 | *nodemask = NULL; |
262 | return 0; |
263 | } |
264 | |
265 | static inline bool init_nodemask_of_mempolicy(nodemask_t *m) |
266 | { |
267 | return false; |
268 | } |
269 | |
270 | static inline int do_migrate_pages(struct mm_struct *mm, const nodemask_t *from, |
271 | const nodemask_t *to, int flags) |
272 | { |
273 | return 0; |
274 | } |
275 | |
276 | static inline void check_highest_zone(int k) |
277 | { |
278 | } |
279 | |
280 | #ifdef CONFIG_TMPFS |
281 | static inline int mpol_parse_str(char *str, struct mempolicy **mpol) |
282 | { |
283 | return 1; /* error */ |
284 | } |
285 | #endif |
286 | |
287 | static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma, |
288 | unsigned long address) |
289 | { |
290 | return -1; /* no node preference */ |
291 | } |
292 | |
293 | static inline void mpol_put_task_policy(struct task_struct *task) |
294 | { |
295 | } |
296 | |
297 | static inline nodemask_t *policy_nodemask_current(gfp_t gfp) |
298 | { |
299 | return NULL; |
300 | } |
301 | |
302 | static inline bool mpol_is_preferred_many(struct mempolicy *pol) |
303 | { |
304 | return false; |
305 | } |
306 | |
307 | #endif /* CONFIG_NUMA */ |
308 | #endif |
309 | |