1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef _LINUX_SHRINKER_H
3#define _LINUX_SHRINKER_H
4
5#include <linux/atomic.h>
6#include <linux/types.h>
7#include <linux/refcount.h>
8#include <linux/completion.h>
9
10#define SHRINKER_UNIT_BITS BITS_PER_LONG
11
12/*
13 * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
14 * shrinkers, which have elements charged to the memcg.
15 */
16struct shrinker_info_unit {
17 atomic_long_t nr_deferred[SHRINKER_UNIT_BITS];
18 DECLARE_BITMAP(map, SHRINKER_UNIT_BITS);
19};
20
21struct shrinker_info {
22 struct rcu_head rcu;
23 int map_nr_max;
24 struct shrinker_info_unit *unit[];
25};
26
27/*
28 * This struct is used to pass information from page reclaim to the shrinkers.
29 * We consolidate the values for easier extension later.
30 *
31 * The 'gfpmask' refers to the allocation we are currently trying to
32 * fulfil.
33 */
34struct shrink_control {
35 gfp_t gfp_mask;
36
37 /* current node being shrunk (for NUMA aware shrinkers) */
38 int nid;
39
40 /*
41 * How many objects scan_objects should scan and try to reclaim.
42 * This is reset before every call, so it is safe for callees
43 * to modify.
44 */
45 unsigned long nr_to_scan;
46
47 /*
48 * How many objects did scan_objects process?
49 * This defaults to nr_to_scan before every call, but the callee
50 * should track its actual progress.
51 */
52 unsigned long nr_scanned;
53
54 /* current memcg being shrunk (for memcg aware shrinkers) */
55 struct mem_cgroup *memcg;
56};
57
58#define SHRINK_STOP (~0UL)
59#define SHRINK_EMPTY (~0UL - 1)
60/*
61 * A callback you can register to apply pressure to ageable caches.
62 *
63 * @count_objects should return the number of freeable items in the cache. If
64 * there are no objects to free, it should return SHRINK_EMPTY, while 0 is
65 * returned in cases of the number of freeable items cannot be determined
66 * or shrinker should skip this cache for this time (e.g., their number
67 * is below shrinkable limit). No deadlock checks should be done during the
68 * count callback - the shrinker relies on aggregating scan counts that couldn't
69 * be executed due to potential deadlocks to be run at a later call when the
70 * deadlock condition is no longer pending.
71 *
72 * @scan_objects will only be called if @count_objects returned a non-zero
73 * value for the number of freeable objects. The callout should scan the cache
74 * and attempt to free items from the cache. It should then return the number
75 * of objects freed during the scan, or SHRINK_STOP if progress cannot be made
76 * due to potential deadlocks. If SHRINK_STOP is returned, then no further
77 * attempts to call the @scan_objects will be made from the current reclaim
78 * context.
79 *
80 * @flags determine the shrinker abilities, like numa awareness
81 */
82struct shrinker {
83 unsigned long (*count_objects)(struct shrinker *,
84 struct shrink_control *sc);
85 unsigned long (*scan_objects)(struct shrinker *,
86 struct shrink_control *sc);
87
88 long batch; /* reclaim batch size, 0 = default */
89 int seeks; /* seeks to recreate an obj */
90 unsigned flags;
91
92 /*
93 * The reference count of this shrinker. Registered shrinker have an
94 * initial refcount of 1, then the lookup operations are now allowed
95 * to use it via shrinker_try_get(). Later in the unregistration step,
96 * the initial refcount will be discarded, and will free the shrinker
97 * asynchronously via RCU after its refcount reaches 0.
98 */
99 refcount_t refcount;
100 struct completion done; /* use to wait for refcount to reach 0 */
101 struct rcu_head rcu;
102
103 void *private_data;
104
105 /* These are for internal use */
106 struct list_head list;
107#ifdef CONFIG_MEMCG
108 /* ID in shrinker_idr */
109 int id;
110#endif
111#ifdef CONFIG_SHRINKER_DEBUG
112 int debugfs_id;
113 const char *name;
114 struct dentry *debugfs_entry;
115#endif
116 /* objs pending delete, per node */
117 atomic_long_t *nr_deferred;
118};
119#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
120
121/* Internal flags */
122#define SHRINKER_REGISTERED BIT(0)
123#define SHRINKER_ALLOCATED BIT(1)
124
125/* Flags for users to use */
126#define SHRINKER_NUMA_AWARE BIT(2)
127#define SHRINKER_MEMCG_AWARE BIT(3)
128/*
129 * It just makes sense when the shrinker is also MEMCG_AWARE for now,
130 * non-MEMCG_AWARE shrinker should not have this flag set.
131 */
132#define SHRINKER_NONSLAB BIT(4)
133
134__printf(2, 3)
135struct shrinker *shrinker_alloc(unsigned int flags, const char *fmt, ...);
136void shrinker_register(struct shrinker *shrinker);
137void shrinker_free(struct shrinker *shrinker);
138
139static inline bool shrinker_try_get(struct shrinker *shrinker)
140{
141 return refcount_inc_not_zero(r: &shrinker->refcount);
142}
143
144static inline void shrinker_put(struct shrinker *shrinker)
145{
146 if (refcount_dec_and_test(r: &shrinker->refcount))
147 complete(&shrinker->done);
148}
149
150#ifdef CONFIG_SHRINKER_DEBUG
151extern int __printf(2, 3) shrinker_debugfs_rename(struct shrinker *shrinker,
152 const char *fmt, ...);
153#else /* CONFIG_SHRINKER_DEBUG */
154static inline __printf(2, 3)
155int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
156{
157 return 0;
158}
159#endif /* CONFIG_SHRINKER_DEBUG */
160#endif /* _LINUX_SHRINKER_H */
161

source code of linux/include/linux/shrinker.h