1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef MM_SLAB_H |
3 | #define MM_SLAB_H |
4 | /* |
5 | * Internal slab definitions |
6 | */ |
7 | void __init kmem_cache_init(void); |
8 | |
9 | #ifdef CONFIG_64BIT |
10 | # ifdef system_has_cmpxchg128 |
11 | # define system_has_freelist_aba() system_has_cmpxchg128() |
12 | # define try_cmpxchg_freelist try_cmpxchg128 |
13 | # endif |
14 | #define this_cpu_try_cmpxchg_freelist this_cpu_try_cmpxchg128 |
15 | typedef u128 freelist_full_t; |
16 | #else /* CONFIG_64BIT */ |
17 | # ifdef system_has_cmpxchg64 |
18 | # define system_has_freelist_aba() system_has_cmpxchg64() |
19 | # define try_cmpxchg_freelist try_cmpxchg64 |
20 | # endif |
21 | #define this_cpu_try_cmpxchg_freelist this_cpu_try_cmpxchg64 |
22 | typedef u64 freelist_full_t; |
23 | #endif /* CONFIG_64BIT */ |
24 | |
25 | #if defined(system_has_freelist_aba) && !defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) |
26 | #undef system_has_freelist_aba |
27 | #endif |
28 | |
29 | /* |
30 | * Freelist pointer and counter to cmpxchg together, avoids the typical ABA |
31 | * problems with cmpxchg of just a pointer. |
32 | */ |
33 | typedef union { |
34 | struct { |
35 | void *freelist; |
36 | unsigned long counter; |
37 | }; |
38 | freelist_full_t full; |
39 | } freelist_aba_t; |
40 | |
41 | /* Reuses the bits in struct page */ |
42 | struct slab { |
43 | unsigned long __page_flags; |
44 | |
45 | #if defined(CONFIG_SLAB) |
46 | |
47 | struct kmem_cache *slab_cache; |
48 | union { |
49 | struct { |
50 | struct list_head slab_list; |
51 | void *freelist; /* array of free object indexes */ |
52 | void *s_mem; /* first object */ |
53 | }; |
54 | struct rcu_head rcu_head; |
55 | }; |
56 | unsigned int active; |
57 | |
58 | #elif defined(CONFIG_SLUB) |
59 | |
60 | struct kmem_cache *slab_cache; |
61 | union { |
62 | struct { |
63 | union { |
64 | struct list_head slab_list; |
65 | #ifdef CONFIG_SLUB_CPU_PARTIAL |
66 | struct { |
67 | struct slab *next; |
68 | int slabs; /* Nr of slabs left */ |
69 | }; |
70 | #endif |
71 | }; |
72 | /* Double-word boundary */ |
73 | union { |
74 | struct { |
75 | void *freelist; /* first free object */ |
76 | union { |
77 | unsigned long counters; |
78 | struct { |
79 | unsigned inuse:16; |
80 | unsigned objects:15; |
81 | unsigned frozen:1; |
82 | }; |
83 | }; |
84 | }; |
85 | #ifdef system_has_freelist_aba |
86 | freelist_aba_t freelist_counter; |
87 | #endif |
88 | }; |
89 | }; |
90 | struct rcu_head rcu_head; |
91 | }; |
92 | unsigned int __unused; |
93 | |
94 | #else |
95 | #error "Unexpected slab allocator configured" |
96 | #endif |
97 | |
98 | atomic_t __page_refcount; |
99 | #ifdef CONFIG_MEMCG |
100 | unsigned long memcg_data; |
101 | #endif |
102 | }; |
103 | |
104 | #define SLAB_MATCH(pg, sl) \ |
105 | static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl)) |
106 | SLAB_MATCH(flags, __page_flags); |
107 | SLAB_MATCH(compound_head, slab_cache); /* Ensure bit 0 is clear */ |
108 | SLAB_MATCH(_refcount, __page_refcount); |
109 | #ifdef CONFIG_MEMCG |
110 | SLAB_MATCH(memcg_data, memcg_data); |
111 | #endif |
112 | #undef SLAB_MATCH |
113 | static_assert(sizeof(struct slab) <= sizeof(struct page)); |
114 | #if defined(system_has_freelist_aba) && defined(CONFIG_SLUB) |
115 | static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t))); |
116 | #endif |
117 | |
118 | /** |
119 | * folio_slab - Converts from folio to slab. |
120 | * @folio: The folio. |
121 | * |
122 | * Currently struct slab is a different representation of a folio where |
123 | * folio_test_slab() is true. |
124 | * |
125 | * Return: The slab which contains this folio. |
126 | */ |
127 | #define folio_slab(folio) (_Generic((folio), \ |
128 | const struct folio *: (const struct slab *)(folio), \ |
129 | struct folio *: (struct slab *)(folio))) |
130 | |
131 | /** |
132 | * slab_folio - The folio allocated for a slab |
133 | * @slab: The slab. |
134 | * |
135 | * Slabs are allocated as folios that contain the individual objects and are |
136 | * using some fields in the first struct page of the folio - those fields are |
137 | * now accessed by struct slab. It is occasionally necessary to convert back to |
138 | * a folio in order to communicate with the rest of the mm. Please use this |
139 | * helper function instead of casting yourself, as the implementation may change |
140 | * in the future. |
141 | */ |
142 | #define slab_folio(s) (_Generic((s), \ |
143 | const struct slab *: (const struct folio *)s, \ |
144 | struct slab *: (struct folio *)s)) |
145 | |
146 | /** |
147 | * page_slab - Converts from first struct page to slab. |
148 | * @p: The first (either head of compound or single) page of slab. |
149 | * |
150 | * A temporary wrapper to convert struct page to struct slab in situations where |
151 | * we know the page is the compound head, or single order-0 page. |
152 | * |
153 | * Long-term ideally everything would work with struct slab directly or go |
154 | * through folio to struct slab. |
155 | * |
156 | * Return: The slab which contains this page |
157 | */ |
158 | #define page_slab(p) (_Generic((p), \ |
159 | const struct page *: (const struct slab *)(p), \ |
160 | struct page *: (struct slab *)(p))) |
161 | |
162 | /** |
163 | * slab_page - The first struct page allocated for a slab |
164 | * @slab: The slab. |
165 | * |
166 | * A convenience wrapper for converting slab to the first struct page of the |
167 | * underlying folio, to communicate with code not yet converted to folio or |
168 | * struct slab. |
169 | */ |
170 | #define slab_page(s) folio_page(slab_folio(s), 0) |
171 | |
172 | /* |
173 | * If network-based swap is enabled, sl*b must keep track of whether pages |
174 | * were allocated from pfmemalloc reserves. |
175 | */ |
176 | static inline bool slab_test_pfmemalloc(const struct slab *slab) |
177 | { |
178 | return folio_test_active(folio: (struct folio *)slab_folio(slab)); |
179 | } |
180 | |
181 | static inline void slab_set_pfmemalloc(struct slab *slab) |
182 | { |
183 | folio_set_active(slab_folio(slab)); |
184 | } |
185 | |
186 | static inline void slab_clear_pfmemalloc(struct slab *slab) |
187 | { |
188 | folio_clear_active(slab_folio(slab)); |
189 | } |
190 | |
191 | static inline void __slab_clear_pfmemalloc(struct slab *slab) |
192 | { |
193 | __folio_clear_active(slab_folio(slab)); |
194 | } |
195 | |
196 | static inline void *slab_address(const struct slab *slab) |
197 | { |
198 | return folio_address(slab_folio(slab)); |
199 | } |
200 | |
201 | static inline int slab_nid(const struct slab *slab) |
202 | { |
203 | return folio_nid(slab_folio(slab)); |
204 | } |
205 | |
206 | static inline pg_data_t *slab_pgdat(const struct slab *slab) |
207 | { |
208 | return folio_pgdat(slab_folio(slab)); |
209 | } |
210 | |
211 | static inline struct slab *virt_to_slab(const void *addr) |
212 | { |
213 | struct folio *folio = virt_to_folio(x: addr); |
214 | |
215 | if (!folio_test_slab(folio)) |
216 | return NULL; |
217 | |
218 | return folio_slab(folio); |
219 | } |
220 | |
221 | static inline int slab_order(const struct slab *slab) |
222 | { |
223 | return folio_order(folio: (struct folio *)slab_folio(slab)); |
224 | } |
225 | |
226 | static inline size_t slab_size(const struct slab *slab) |
227 | { |
228 | return PAGE_SIZE << slab_order(slab); |
229 | } |
230 | |
231 | #ifdef CONFIG_SLAB |
232 | #include <linux/slab_def.h> |
233 | #endif |
234 | |
235 | #ifdef CONFIG_SLUB |
236 | #include <linux/slub_def.h> |
237 | #endif |
238 | |
239 | #include <linux/memcontrol.h> |
240 | #include <linux/fault-inject.h> |
241 | #include <linux/kasan.h> |
242 | #include <linux/kmemleak.h> |
243 | #include <linux/random.h> |
244 | #include <linux/sched/mm.h> |
245 | #include <linux/list_lru.h> |
246 | |
247 | /* |
248 | * State of the slab allocator. |
249 | * |
250 | * This is used to describe the states of the allocator during bootup. |
251 | * Allocators use this to gradually bootstrap themselves. Most allocators |
252 | * have the problem that the structures used for managing slab caches are |
253 | * allocated from slab caches themselves. |
254 | */ |
255 | enum slab_state { |
256 | DOWN, /* No slab functionality yet */ |
257 | PARTIAL, /* SLUB: kmem_cache_node available */ |
258 | PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ |
259 | UP, /* Slab caches usable but not all extras yet */ |
260 | FULL /* Everything is working */ |
261 | }; |
262 | |
263 | extern enum slab_state slab_state; |
264 | |
265 | /* The slab cache mutex protects the management structures during changes */ |
266 | extern struct mutex slab_mutex; |
267 | |
268 | /* The list of all slab caches on the system */ |
269 | extern struct list_head slab_caches; |
270 | |
271 | /* The slab cache that manages slab cache information */ |
272 | extern struct kmem_cache *kmem_cache; |
273 | |
274 | /* A table of kmalloc cache names and sizes */ |
275 | extern const struct kmalloc_info_struct { |
276 | const char *name[NR_KMALLOC_TYPES]; |
277 | unsigned int size; |
278 | } kmalloc_info[]; |
279 | |
280 | /* Kmalloc array related functions */ |
281 | void setup_kmalloc_cache_index_table(void); |
282 | void create_kmalloc_caches(slab_flags_t); |
283 | |
284 | /* Find the kmalloc slab corresponding for a certain size */ |
285 | struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags, unsigned long caller); |
286 | |
287 | void *__kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, |
288 | int node, size_t orig_size, |
289 | unsigned long caller); |
290 | void __kmem_cache_free(struct kmem_cache *s, void *x, unsigned long caller); |
291 | |
292 | gfp_t kmalloc_fix_flags(gfp_t flags); |
293 | |
294 | /* Functions provided by the slab allocators */ |
295 | int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags); |
296 | |
297 | void __init new_kmalloc_cache(int idx, enum kmalloc_cache_type type, |
298 | slab_flags_t flags); |
299 | extern void create_boot_cache(struct kmem_cache *, const char *name, |
300 | unsigned int size, slab_flags_t flags, |
301 | unsigned int useroffset, unsigned int usersize); |
302 | |
303 | int slab_unmergeable(struct kmem_cache *s); |
304 | struct kmem_cache *find_mergeable(unsigned size, unsigned align, |
305 | slab_flags_t flags, const char *name, void (*ctor)(void *)); |
306 | struct kmem_cache * |
307 | __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, |
308 | slab_flags_t flags, void (*ctor)(void *)); |
309 | |
310 | slab_flags_t kmem_cache_flags(unsigned int object_size, |
311 | slab_flags_t flags, const char *name); |
312 | |
313 | static inline bool is_kmalloc_cache(struct kmem_cache *s) |
314 | { |
315 | return (s->flags & SLAB_KMALLOC); |
316 | } |
317 | |
318 | /* Legal flag mask for kmem_cache_create(), for various configurations */ |
319 | #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \ |
320 | SLAB_CACHE_DMA32 | SLAB_PANIC | \ |
321 | SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS ) |
322 | |
323 | #if defined(CONFIG_DEBUG_SLAB) |
324 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) |
325 | #elif defined(CONFIG_SLUB_DEBUG) |
326 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ |
327 | SLAB_TRACE | SLAB_CONSISTENCY_CHECKS) |
328 | #else |
329 | #define SLAB_DEBUG_FLAGS (0) |
330 | #endif |
331 | |
332 | #if defined(CONFIG_SLAB) |
333 | #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ |
334 | SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \ |
335 | SLAB_ACCOUNT | SLAB_NO_MERGE) |
336 | #elif defined(CONFIG_SLUB) |
337 | #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ |
338 | SLAB_TEMPORARY | SLAB_ACCOUNT | \ |
339 | SLAB_NO_USER_FLAGS | SLAB_KMALLOC | SLAB_NO_MERGE) |
340 | #else |
341 | #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE) |
342 | #endif |
343 | |
344 | /* Common flags available with current configuration */ |
345 | #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) |
346 | |
347 | /* Common flags permitted for kmem_cache_create */ |
348 | #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \ |
349 | SLAB_RED_ZONE | \ |
350 | SLAB_POISON | \ |
351 | SLAB_STORE_USER | \ |
352 | SLAB_TRACE | \ |
353 | SLAB_CONSISTENCY_CHECKS | \ |
354 | SLAB_MEM_SPREAD | \ |
355 | SLAB_NOLEAKTRACE | \ |
356 | SLAB_RECLAIM_ACCOUNT | \ |
357 | SLAB_TEMPORARY | \ |
358 | SLAB_ACCOUNT | \ |
359 | SLAB_KMALLOC | \ |
360 | SLAB_NO_MERGE | \ |
361 | SLAB_NO_USER_FLAGS) |
362 | |
363 | bool __kmem_cache_empty(struct kmem_cache *); |
364 | int __kmem_cache_shutdown(struct kmem_cache *); |
365 | void __kmem_cache_release(struct kmem_cache *); |
366 | int __kmem_cache_shrink(struct kmem_cache *); |
367 | void slab_kmem_cache_release(struct kmem_cache *); |
368 | |
369 | struct seq_file; |
370 | struct file; |
371 | |
372 | struct slabinfo { |
373 | unsigned long active_objs; |
374 | unsigned long num_objs; |
375 | unsigned long active_slabs; |
376 | unsigned long num_slabs; |
377 | unsigned long shared_avail; |
378 | unsigned int limit; |
379 | unsigned int batchcount; |
380 | unsigned int shared; |
381 | unsigned int objects_per_slab; |
382 | unsigned int cache_order; |
383 | }; |
384 | |
385 | void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); |
386 | void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); |
387 | ssize_t slabinfo_write(struct file *file, const char __user *buffer, |
388 | size_t count, loff_t *ppos); |
389 | |
390 | static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s) |
391 | { |
392 | return (s->flags & SLAB_RECLAIM_ACCOUNT) ? |
393 | NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B; |
394 | } |
395 | |
396 | #ifdef CONFIG_SLUB_DEBUG |
397 | #ifdef CONFIG_SLUB_DEBUG_ON |
398 | DECLARE_STATIC_KEY_TRUE(slub_debug_enabled); |
399 | #else |
400 | DECLARE_STATIC_KEY_FALSE(slub_debug_enabled); |
401 | #endif |
402 | extern void print_tracking(struct kmem_cache *s, void *object); |
403 | long validate_slab_cache(struct kmem_cache *s); |
404 | static inline bool __slub_debug_enabled(void) |
405 | { |
406 | return static_branch_unlikely(&slub_debug_enabled); |
407 | } |
408 | #else |
409 | static inline void print_tracking(struct kmem_cache *s, void *object) |
410 | { |
411 | } |
412 | static inline bool __slub_debug_enabled(void) |
413 | { |
414 | return false; |
415 | } |
416 | #endif |
417 | |
418 | /* |
419 | * Returns true if any of the specified slub_debug flags is enabled for the |
420 | * cache. Use only for flags parsed by setup_slub_debug() as it also enables |
421 | * the static key. |
422 | */ |
423 | static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags) |
424 | { |
425 | if (IS_ENABLED(CONFIG_SLUB_DEBUG)) |
426 | VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS)); |
427 | if (__slub_debug_enabled()) |
428 | return s->flags & flags; |
429 | return false; |
430 | } |
431 | |
432 | #ifdef CONFIG_MEMCG_KMEM |
433 | /* |
434 | * slab_objcgs - get the object cgroups vector associated with a slab |
435 | * @slab: a pointer to the slab struct |
436 | * |
437 | * Returns a pointer to the object cgroups vector associated with the slab, |
438 | * or NULL if no such vector has been associated yet. |
439 | */ |
440 | static inline struct obj_cgroup **slab_objcgs(struct slab *slab) |
441 | { |
442 | unsigned long memcg_data = READ_ONCE(slab->memcg_data); |
443 | |
444 | VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS), |
445 | slab_page(slab)); |
446 | VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, slab_page(slab)); |
447 | |
448 | return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); |
449 | } |
450 | |
451 | int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s, |
452 | gfp_t gfp, bool new_slab); |
453 | void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, |
454 | enum node_stat_item idx, int nr); |
455 | |
456 | static inline void memcg_free_slab_cgroups(struct slab *slab) |
457 | { |
458 | kfree(objp: slab_objcgs(slab)); |
459 | slab->memcg_data = 0; |
460 | } |
461 | |
462 | static inline size_t obj_full_size(struct kmem_cache *s) |
463 | { |
464 | /* |
465 | * For each accounted object there is an extra space which is used |
466 | * to store obj_cgroup membership. Charge it too. |
467 | */ |
468 | return s->size + sizeof(struct obj_cgroup *); |
469 | } |
470 | |
471 | /* |
472 | * Returns false if the allocation should fail. |
473 | */ |
474 | static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, |
475 | struct list_lru *lru, |
476 | struct obj_cgroup **objcgp, |
477 | size_t objects, gfp_t flags) |
478 | { |
479 | struct obj_cgroup *objcg; |
480 | |
481 | if (!memcg_kmem_online()) |
482 | return true; |
483 | |
484 | if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT)) |
485 | return true; |
486 | |
487 | /* |
488 | * The obtained objcg pointer is safe to use within the current scope, |
489 | * defined by current task or set_active_memcg() pair. |
490 | * obj_cgroup_get() is used to get a permanent reference. |
491 | */ |
492 | objcg = current_obj_cgroup(); |
493 | if (!objcg) |
494 | return true; |
495 | |
496 | if (lru) { |
497 | int ret; |
498 | struct mem_cgroup *memcg; |
499 | |
500 | memcg = get_mem_cgroup_from_objcg(objcg); |
501 | ret = memcg_list_lru_alloc(memcg, lru, gfp: flags); |
502 | css_put(css: &memcg->css); |
503 | |
504 | if (ret) |
505 | return false; |
506 | } |
507 | |
508 | if (obj_cgroup_charge(objcg, gfp: flags, size: objects * obj_full_size(s))) |
509 | return false; |
510 | |
511 | *objcgp = objcg; |
512 | return true; |
513 | } |
514 | |
515 | static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, |
516 | struct obj_cgroup *objcg, |
517 | gfp_t flags, size_t size, |
518 | void **p) |
519 | { |
520 | struct slab *slab; |
521 | unsigned long off; |
522 | size_t i; |
523 | |
524 | if (!memcg_kmem_online() || !objcg) |
525 | return; |
526 | |
527 | for (i = 0; i < size; i++) { |
528 | if (likely(p[i])) { |
529 | slab = virt_to_slab(addr: p[i]); |
530 | |
531 | if (!slab_objcgs(slab) && |
532 | memcg_alloc_slab_cgroups(slab, s, gfp: flags, |
533 | new_slab: false)) { |
534 | obj_cgroup_uncharge(objcg, size: obj_full_size(s)); |
535 | continue; |
536 | } |
537 | |
538 | off = obj_to_index(cache: s, slab, obj: p[i]); |
539 | obj_cgroup_get(objcg); |
540 | slab_objcgs(slab)[off] = objcg; |
541 | mod_objcg_state(objcg, pgdat: slab_pgdat(slab), |
542 | idx: cache_vmstat_idx(s), nr: obj_full_size(s)); |
543 | } else { |
544 | obj_cgroup_uncharge(objcg, size: obj_full_size(s)); |
545 | } |
546 | } |
547 | } |
548 | |
549 | static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, |
550 | void **p, int objects) |
551 | { |
552 | struct obj_cgroup **objcgs; |
553 | int i; |
554 | |
555 | if (!memcg_kmem_online()) |
556 | return; |
557 | |
558 | objcgs = slab_objcgs(slab); |
559 | if (!objcgs) |
560 | return; |
561 | |
562 | for (i = 0; i < objects; i++) { |
563 | struct obj_cgroup *objcg; |
564 | unsigned int off; |
565 | |
566 | off = obj_to_index(cache: s, slab, obj: p[i]); |
567 | objcg = objcgs[off]; |
568 | if (!objcg) |
569 | continue; |
570 | |
571 | objcgs[off] = NULL; |
572 | obj_cgroup_uncharge(objcg, size: obj_full_size(s)); |
573 | mod_objcg_state(objcg, pgdat: slab_pgdat(slab), idx: cache_vmstat_idx(s), |
574 | nr: -obj_full_size(s)); |
575 | obj_cgroup_put(objcg); |
576 | } |
577 | } |
578 | |
579 | #else /* CONFIG_MEMCG_KMEM */ |
580 | static inline struct obj_cgroup **slab_objcgs(struct slab *slab) |
581 | { |
582 | return NULL; |
583 | } |
584 | |
585 | static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr) |
586 | { |
587 | return NULL; |
588 | } |
589 | |
590 | static inline int memcg_alloc_slab_cgroups(struct slab *slab, |
591 | struct kmem_cache *s, gfp_t gfp, |
592 | bool new_slab) |
593 | { |
594 | return 0; |
595 | } |
596 | |
597 | static inline void memcg_free_slab_cgroups(struct slab *slab) |
598 | { |
599 | } |
600 | |
601 | static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, |
602 | struct list_lru *lru, |
603 | struct obj_cgroup **objcgp, |
604 | size_t objects, gfp_t flags) |
605 | { |
606 | return true; |
607 | } |
608 | |
609 | static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, |
610 | struct obj_cgroup *objcg, |
611 | gfp_t flags, size_t size, |
612 | void **p) |
613 | { |
614 | } |
615 | |
616 | static inline void memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, |
617 | void **p, int objects) |
618 | { |
619 | } |
620 | #endif /* CONFIG_MEMCG_KMEM */ |
621 | |
622 | static inline struct kmem_cache *virt_to_cache(const void *obj) |
623 | { |
624 | struct slab *slab; |
625 | |
626 | slab = virt_to_slab(addr: obj); |
627 | if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n" , |
628 | __func__)) |
629 | return NULL; |
630 | return slab->slab_cache; |
631 | } |
632 | |
633 | static __always_inline void account_slab(struct slab *slab, int order, |
634 | struct kmem_cache *s, gfp_t gfp) |
635 | { |
636 | if (memcg_kmem_online() && (s->flags & SLAB_ACCOUNT)) |
637 | memcg_alloc_slab_cgroups(slab, s, gfp, new_slab: true); |
638 | |
639 | mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), |
640 | PAGE_SIZE << order); |
641 | } |
642 | |
643 | static __always_inline void unaccount_slab(struct slab *slab, int order, |
644 | struct kmem_cache *s) |
645 | { |
646 | if (memcg_kmem_online()) |
647 | memcg_free_slab_cgroups(slab); |
648 | |
649 | mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), |
650 | -(PAGE_SIZE << order)); |
651 | } |
652 | |
653 | static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) |
654 | { |
655 | struct kmem_cache *cachep; |
656 | |
657 | if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) && |
658 | !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) |
659 | return s; |
660 | |
661 | cachep = virt_to_cache(obj: x); |
662 | if (WARN(cachep && cachep != s, |
663 | "%s: Wrong slab cache. %s but object is from %s\n" , |
664 | __func__, s->name, cachep->name)) |
665 | print_tracking(s: cachep, object: x); |
666 | return cachep; |
667 | } |
668 | |
669 | void free_large_kmalloc(struct folio *folio, void *object); |
670 | |
671 | size_t __ksize(const void *objp); |
672 | |
673 | static inline size_t slab_ksize(const struct kmem_cache *s) |
674 | { |
675 | #ifndef CONFIG_SLUB |
676 | return s->object_size; |
677 | |
678 | #else /* CONFIG_SLUB */ |
679 | # ifdef CONFIG_SLUB_DEBUG |
680 | /* |
681 | * Debugging requires use of the padding between object |
682 | * and whatever may come after it. |
683 | */ |
684 | if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) |
685 | return s->object_size; |
686 | # endif |
687 | if (s->flags & SLAB_KASAN) |
688 | return s->object_size; |
689 | /* |
690 | * If we have the need to store the freelist pointer |
691 | * back there or track user information then we can |
692 | * only use the space before that information. |
693 | */ |
694 | if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) |
695 | return s->inuse; |
696 | /* |
697 | * Else we can use all the padding etc for the allocation |
698 | */ |
699 | return s->size; |
700 | #endif |
701 | } |
702 | |
703 | static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, |
704 | struct list_lru *lru, |
705 | struct obj_cgroup **objcgp, |
706 | size_t size, gfp_t flags) |
707 | { |
708 | flags &= gfp_allowed_mask; |
709 | |
710 | might_alloc(gfp_mask: flags); |
711 | |
712 | if (should_failslab(s, gfpflags: flags)) |
713 | return NULL; |
714 | |
715 | if (!memcg_slab_pre_alloc_hook(s, lru, objcgp, objects: size, flags)) |
716 | return NULL; |
717 | |
718 | return s; |
719 | } |
720 | |
721 | static inline void slab_post_alloc_hook(struct kmem_cache *s, |
722 | struct obj_cgroup *objcg, gfp_t flags, |
723 | size_t size, void **p, bool init, |
724 | unsigned int orig_size) |
725 | { |
726 | unsigned int zero_size = s->object_size; |
727 | bool kasan_init = init; |
728 | size_t i; |
729 | |
730 | flags &= gfp_allowed_mask; |
731 | |
732 | /* |
733 | * For kmalloc object, the allocated memory size(object_size) is likely |
734 | * larger than the requested size(orig_size). If redzone check is |
735 | * enabled for the extra space, don't zero it, as it will be redzoned |
736 | * soon. The redzone operation for this extra space could be seen as a |
737 | * replacement of current poisoning under certain debug option, and |
738 | * won't break other sanity checks. |
739 | */ |
740 | if (kmem_cache_debug_flags(s, SLAB_STORE_USER | SLAB_RED_ZONE) && |
741 | (s->flags & SLAB_KMALLOC)) |
742 | zero_size = orig_size; |
743 | |
744 | /* |
745 | * When slub_debug is enabled, avoid memory initialization integrated |
746 | * into KASAN and instead zero out the memory via the memset below with |
747 | * the proper size. Otherwise, KASAN might overwrite SLUB redzones and |
748 | * cause false-positive reports. This does not lead to a performance |
749 | * penalty on production builds, as slub_debug is not intended to be |
750 | * enabled there. |
751 | */ |
752 | if (__slub_debug_enabled()) |
753 | kasan_init = false; |
754 | |
755 | /* |
756 | * As memory initialization might be integrated into KASAN, |
757 | * kasan_slab_alloc and initialization memset must be |
758 | * kept together to avoid discrepancies in behavior. |
759 | * |
760 | * As p[i] might get tagged, memset and kmemleak hook come after KASAN. |
761 | */ |
762 | for (i = 0; i < size; i++) { |
763 | p[i] = kasan_slab_alloc(s, object: p[i], flags, init: kasan_init); |
764 | if (p[i] && init && (!kasan_init || !kasan_has_integrated_init())) |
765 | memset(p[i], 0, zero_size); |
766 | kmemleak_alloc_recursive(ptr: p[i], size: s->object_size, min_count: 1, |
767 | flags: s->flags, gfp: flags); |
768 | kmsan_slab_alloc(s, object: p[i], flags); |
769 | } |
770 | |
771 | memcg_slab_post_alloc_hook(s, objcg, flags, size, p); |
772 | } |
773 | |
774 | /* |
775 | * The slab lists for all objects. |
776 | */ |
777 | struct kmem_cache_node { |
778 | #ifdef CONFIG_SLAB |
779 | raw_spinlock_t list_lock; |
780 | struct list_head slabs_partial; /* partial list first, better asm code */ |
781 | struct list_head slabs_full; |
782 | struct list_head slabs_free; |
783 | unsigned long total_slabs; /* length of all slab lists */ |
784 | unsigned long free_slabs; /* length of free slab list only */ |
785 | unsigned long free_objects; |
786 | unsigned int free_limit; |
787 | unsigned int colour_next; /* Per-node cache coloring */ |
788 | struct array_cache *shared; /* shared per node */ |
789 | struct alien_cache **alien; /* on other nodes */ |
790 | unsigned long next_reap; /* updated without locking */ |
791 | int free_touched; /* updated without locking */ |
792 | #endif |
793 | |
794 | #ifdef CONFIG_SLUB |
795 | spinlock_t list_lock; |
796 | unsigned long nr_partial; |
797 | struct list_head partial; |
798 | #ifdef CONFIG_SLUB_DEBUG |
799 | atomic_long_t nr_slabs; |
800 | atomic_long_t total_objects; |
801 | struct list_head full; |
802 | #endif |
803 | #endif |
804 | |
805 | }; |
806 | |
807 | static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) |
808 | { |
809 | return s->node[node]; |
810 | } |
811 | |
812 | /* |
813 | * Iterator over all nodes. The body will be executed for each node that has |
814 | * a kmem_cache_node structure allocated (which is true for all online nodes) |
815 | */ |
816 | #define for_each_kmem_cache_node(__s, __node, __n) \ |
817 | for (__node = 0; __node < nr_node_ids; __node++) \ |
818 | if ((__n = get_node(__s, __node))) |
819 | |
820 | |
821 | #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG) |
822 | void dump_unreclaimable_slab(void); |
823 | #else |
824 | static inline void dump_unreclaimable_slab(void) |
825 | { |
826 | } |
827 | #endif |
828 | |
829 | void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr); |
830 | |
831 | #ifdef CONFIG_SLAB_FREELIST_RANDOM |
832 | int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, |
833 | gfp_t gfp); |
834 | void cache_random_seq_destroy(struct kmem_cache *cachep); |
835 | #else |
836 | static inline int cache_random_seq_create(struct kmem_cache *cachep, |
837 | unsigned int count, gfp_t gfp) |
838 | { |
839 | return 0; |
840 | } |
841 | static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { } |
842 | #endif /* CONFIG_SLAB_FREELIST_RANDOM */ |
843 | |
844 | static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c) |
845 | { |
846 | if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, |
847 | &init_on_alloc)) { |
848 | if (c->ctor) |
849 | return false; |
850 | if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) |
851 | return flags & __GFP_ZERO; |
852 | return true; |
853 | } |
854 | return flags & __GFP_ZERO; |
855 | } |
856 | |
857 | static inline bool slab_want_init_on_free(struct kmem_cache *c) |
858 | { |
859 | if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON, |
860 | &init_on_free)) |
861 | return !(c->ctor || |
862 | (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))); |
863 | return false; |
864 | } |
865 | |
866 | #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG) |
867 | void debugfs_slab_release(struct kmem_cache *); |
868 | #else |
869 | static inline void debugfs_slab_release(struct kmem_cache *s) { } |
870 | #endif |
871 | |
872 | #ifdef CONFIG_PRINTK |
873 | #define KS_ADDRS_COUNT 16 |
874 | struct kmem_obj_info { |
875 | void *kp_ptr; |
876 | struct slab *kp_slab; |
877 | void *kp_objp; |
878 | unsigned long kp_data_offset; |
879 | struct kmem_cache *kp_slab_cache; |
880 | void *kp_ret; |
881 | void *kp_stack[KS_ADDRS_COUNT]; |
882 | void *kp_free_stack[KS_ADDRS_COUNT]; |
883 | }; |
884 | void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab); |
885 | #endif |
886 | |
887 | void __check_heap_object(const void *ptr, unsigned long n, |
888 | const struct slab *slab, bool to_user); |
889 | |
890 | #ifdef CONFIG_SLUB_DEBUG |
891 | void skip_orig_size_check(struct kmem_cache *s, const void *object); |
892 | #endif |
893 | |
894 | #endif /* MM_SLAB_H */ |
895 | |