1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Variant of atomic_t specialized for reference counts.
4 *
5 * The interface matches the atomic_t interface (to aid in porting) but only
6 * provides the few functions one should use for reference counting.
7 *
8 * Saturation semantics
9 * ====================
10 *
11 * refcount_t differs from atomic_t in that the counter saturates at
12 * REFCOUNT_SATURATED and will not move once there. This avoids wrapping the
13 * counter and causing 'spurious' use-after-free issues. In order to avoid the
14 * cost associated with introducing cmpxchg() loops into all of the saturating
15 * operations, we temporarily allow the counter to take on an unchecked value
16 * and then explicitly set it to REFCOUNT_SATURATED on detecting that underflow
17 * or overflow has occurred. Although this is racy when multiple threads
18 * access the refcount concurrently, by placing REFCOUNT_SATURATED roughly
19 * equidistant from 0 and INT_MAX we minimise the scope for error:
20 *
21 * INT_MAX REFCOUNT_SATURATED UINT_MAX
22 * 0 (0x7fff_ffff) (0xc000_0000) (0xffff_ffff)
23 * +--------------------------------+----------------+----------------+
24 * <---------- bad value! ---------->
25 *
26 * (in a signed view of the world, the "bad value" range corresponds to
27 * a negative counter value).
28 *
29 * As an example, consider a refcount_inc() operation that causes the counter
30 * to overflow:
31 *
32 * int old = atomic_fetch_add_relaxed(r);
33 * // old is INT_MAX, refcount now INT_MIN (0x8000_0000)
34 * if (old < 0)
35 * atomic_set(r, REFCOUNT_SATURATED);
36 *
37 * If another thread also performs a refcount_inc() operation between the two
38 * atomic operations, then the count will continue to edge closer to 0. If it
39 * reaches a value of 1 before /any/ of the threads reset it to the saturated
40 * value, then a concurrent refcount_dec_and_test() may erroneously free the
41 * underlying object.
42 * Linux limits the maximum number of tasks to PID_MAX_LIMIT, which is currently
43 * 0x400000 (and can't easily be raised in the future beyond FUTEX_TID_MASK).
44 * With the current PID limit, if no batched refcounting operations are used and
45 * the attacker can't repeatedly trigger kernel oopses in the middle of refcount
46 * operations, this makes it impossible for a saturated refcount to leave the
47 * saturation range, even if it is possible for multiple uses of the same
48 * refcount to nest in the context of a single task:
49 *
50 * (UINT_MAX+1-REFCOUNT_SATURATED) / PID_MAX_LIMIT =
51 * 0x40000000 / 0x400000 = 0x100 = 256
52 *
53 * If hundreds of references are added/removed with a single refcounting
54 * operation, it may potentially be possible to leave the saturation range; but
55 * given the precise timing details involved with the round-robin scheduling of
56 * each thread manipulating the refcount and the need to hit the race multiple
57 * times in succession, there doesn't appear to be a practical avenue of attack
58 * even if using refcount_add() operations with larger increments.
59 *
60 * Memory ordering
61 * ===============
62 *
63 * Memory ordering rules are slightly relaxed wrt regular atomic_t functions
64 * and provide only what is strictly required for refcounts.
65 *
66 * The increments are fully relaxed; these will not provide ordering. The
67 * rationale is that whatever is used to obtain the object we're increasing the
68 * reference count on will provide the ordering. For locked data structures,
69 * its the lock acquire, for RCU/lockless data structures its the dependent
70 * load.
71 *
72 * Do note that inc_not_zero() provides a control dependency which will order
73 * future stores against the inc, this ensures we'll never modify the object
74 * if we did not in fact acquire a reference.
75 *
76 * The decrements will provide release order, such that all the prior loads and
77 * stores will be issued before, it also provides a control dependency, which
78 * will order us against the subsequent free().
79 *
80 * The control dependency is against the load of the cmpxchg (ll/sc) that
81 * succeeded. This means the stores aren't fully ordered, but this is fine
82 * because the 1->0 transition indicates no concurrency.
83 *
84 * Note that the allocator is responsible for ordering things between free()
85 * and alloc().
86 *
87 * The decrements dec_and_test() and sub_and_test() also provide acquire
88 * ordering on success.
89 *
90 */
91
92#ifndef _LINUX_REFCOUNT_H
93#define _LINUX_REFCOUNT_H
94
95#include <linux/atomic.h>
96#include <linux/bug.h>
97#include <linux/compiler.h>
98#include <linux/limits.h>
99#include <linux/refcount_types.h>
100#include <linux/spinlock_types.h>
101
102struct mutex;
103
104#define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), }
105#define REFCOUNT_MAX INT_MAX
106#define REFCOUNT_SATURATED (INT_MIN / 2)
107
108enum refcount_saturation_type {
109 REFCOUNT_ADD_NOT_ZERO_OVF,
110 REFCOUNT_ADD_OVF,
111 REFCOUNT_ADD_UAF,
112 REFCOUNT_SUB_UAF,
113 REFCOUNT_DEC_LEAK,
114};
115
116void refcount_warn_saturate(refcount_t *r, enum refcount_saturation_type t);
117
118/**
119 * refcount_set - set a refcount's value
120 * @r: the refcount
121 * @n: value to which the refcount will be set
122 */
123static inline void refcount_set(refcount_t *r, int n)
124{
125 atomic_set(v: &r->refs, i: n);
126}
127
128/**
129 * refcount_read - get a refcount's value
130 * @r: the refcount
131 *
132 * Return: the refcount's value
133 */
134static inline unsigned int refcount_read(const refcount_t *r)
135{
136 return atomic_read(v: &r->refs);
137}
138
139static inline __must_check __signed_wrap
140bool __refcount_add_not_zero(int i, refcount_t *r, int *oldp)
141{
142 int old = refcount_read(r);
143
144 do {
145 if (!old)
146 break;
147 } while (!atomic_try_cmpxchg_relaxed(v: &r->refs, old: &old, new: old + i));
148
149 if (oldp)
150 *oldp = old;
151
152 if (unlikely(old < 0 || old + i < 0))
153 refcount_warn_saturate(r, t: REFCOUNT_ADD_NOT_ZERO_OVF);
154
155 return old;
156}
157
158/**
159 * refcount_add_not_zero - add a value to a refcount unless it is 0
160 * @i: the value to add to the refcount
161 * @r: the refcount
162 *
163 * Will saturate at REFCOUNT_SATURATED and WARN.
164 *
165 * Provides no memory ordering, it is assumed the caller has guaranteed the
166 * object memory to be stable (RCU, etc.). It does provide a control dependency
167 * and thereby orders future stores. See the comment on top.
168 *
169 * Use of this function is not recommended for the normal reference counting
170 * use case in which references are taken and released one at a time. In these
171 * cases, refcount_inc(), or one of its variants, should instead be used to
172 * increment a reference count.
173 *
174 * Return: false if the passed refcount is 0, true otherwise
175 */
176static inline __must_check bool refcount_add_not_zero(int i, refcount_t *r)
177{
178 return __refcount_add_not_zero(i, r, NULL);
179}
180
181static inline __signed_wrap
182void __refcount_add(int i, refcount_t *r, int *oldp)
183{
184 int old = atomic_fetch_add_relaxed(i, v: &r->refs);
185
186 if (oldp)
187 *oldp = old;
188
189 if (unlikely(!old))
190 refcount_warn_saturate(r, t: REFCOUNT_ADD_UAF);
191 else if (unlikely(old < 0 || old + i < 0))
192 refcount_warn_saturate(r, t: REFCOUNT_ADD_OVF);
193}
194
195/**
196 * refcount_add - add a value to a refcount
197 * @i: the value to add to the refcount
198 * @r: the refcount
199 *
200 * Similar to atomic_add(), but will saturate at REFCOUNT_SATURATED and WARN.
201 *
202 * Provides no memory ordering, it is assumed the caller has guaranteed the
203 * object memory to be stable (RCU, etc.). It does provide a control dependency
204 * and thereby orders future stores. See the comment on top.
205 *
206 * Use of this function is not recommended for the normal reference counting
207 * use case in which references are taken and released one at a time. In these
208 * cases, refcount_inc(), or one of its variants, should instead be used to
209 * increment a reference count.
210 */
211static inline void refcount_add(int i, refcount_t *r)
212{
213 __refcount_add(i, r, NULL);
214}
215
216static inline __must_check bool __refcount_inc_not_zero(refcount_t *r, int *oldp)
217{
218 return __refcount_add_not_zero(i: 1, r, oldp);
219}
220
221/**
222 * refcount_inc_not_zero - increment a refcount unless it is 0
223 * @r: the refcount to increment
224 *
225 * Similar to atomic_inc_not_zero(), but will saturate at REFCOUNT_SATURATED
226 * and WARN.
227 *
228 * Provides no memory ordering, it is assumed the caller has guaranteed the
229 * object memory to be stable (RCU, etc.). It does provide a control dependency
230 * and thereby orders future stores. See the comment on top.
231 *
232 * Return: true if the increment was successful, false otherwise
233 */
234static inline __must_check bool refcount_inc_not_zero(refcount_t *r)
235{
236 return __refcount_inc_not_zero(r, NULL);
237}
238
239static inline void __refcount_inc(refcount_t *r, int *oldp)
240{
241 __refcount_add(i: 1, r, oldp);
242}
243
244/**
245 * refcount_inc - increment a refcount
246 * @r: the refcount to increment
247 *
248 * Similar to atomic_inc(), but will saturate at REFCOUNT_SATURATED and WARN.
249 *
250 * Provides no memory ordering, it is assumed the caller already has a
251 * reference on the object.
252 *
253 * Will WARN if the refcount is 0, as this represents a possible use-after-free
254 * condition.
255 */
256static inline void refcount_inc(refcount_t *r)
257{
258 __refcount_inc(r, NULL);
259}
260
261static inline __must_check __signed_wrap
262bool __refcount_sub_and_test(int i, refcount_t *r, int *oldp)
263{
264 int old = atomic_fetch_sub_release(i, v: &r->refs);
265
266 if (oldp)
267 *oldp = old;
268
269 if (old == i) {
270 smp_acquire__after_ctrl_dep();
271 return true;
272 }
273
274 if (unlikely(old < 0 || old - i < 0))
275 refcount_warn_saturate(r, t: REFCOUNT_SUB_UAF);
276
277 return false;
278}
279
280/**
281 * refcount_sub_and_test - subtract from a refcount and test if it is 0
282 * @i: amount to subtract from the refcount
283 * @r: the refcount
284 *
285 * Similar to atomic_dec_and_test(), but it will WARN, return false and
286 * ultimately leak on underflow and will fail to decrement when saturated
287 * at REFCOUNT_SATURATED.
288 *
289 * Provides release memory ordering, such that prior loads and stores are done
290 * before, and provides an acquire ordering on success such that free()
291 * must come after.
292 *
293 * Use of this function is not recommended for the normal reference counting
294 * use case in which references are taken and released one at a time. In these
295 * cases, refcount_dec(), or one of its variants, should instead be used to
296 * decrement a reference count.
297 *
298 * Return: true if the resulting refcount is 0, false otherwise
299 */
300static inline __must_check bool refcount_sub_and_test(int i, refcount_t *r)
301{
302 return __refcount_sub_and_test(i, r, NULL);
303}
304
305static inline __must_check bool __refcount_dec_and_test(refcount_t *r, int *oldp)
306{
307 return __refcount_sub_and_test(i: 1, r, oldp);
308}
309
310/**
311 * refcount_dec_and_test - decrement a refcount and test if it is 0
312 * @r: the refcount
313 *
314 * Similar to atomic_dec_and_test(), it will WARN on underflow and fail to
315 * decrement when saturated at REFCOUNT_SATURATED.
316 *
317 * Provides release memory ordering, such that prior loads and stores are done
318 * before, and provides an acquire ordering on success such that free()
319 * must come after.
320 *
321 * Return: true if the resulting refcount is 0, false otherwise
322 */
323static inline __must_check bool refcount_dec_and_test(refcount_t *r)
324{
325 return __refcount_dec_and_test(r, NULL);
326}
327
328static inline void __refcount_dec(refcount_t *r, int *oldp)
329{
330 int old = atomic_fetch_sub_release(i: 1, v: &r->refs);
331
332 if (oldp)
333 *oldp = old;
334
335 if (unlikely(old <= 1))
336 refcount_warn_saturate(r, t: REFCOUNT_DEC_LEAK);
337}
338
339/**
340 * refcount_dec - decrement a refcount
341 * @r: the refcount
342 *
343 * Similar to atomic_dec(), it will WARN on underflow and fail to decrement
344 * when saturated at REFCOUNT_SATURATED.
345 *
346 * Provides release memory ordering, such that prior loads and stores are done
347 * before.
348 */
349static inline void refcount_dec(refcount_t *r)
350{
351 __refcount_dec(r, NULL);
352}
353
354extern __must_check bool refcount_dec_if_one(refcount_t *r);
355extern __must_check bool refcount_dec_not_one(refcount_t *r);
356extern __must_check bool refcount_dec_and_mutex_lock(refcount_t *r, struct mutex *lock) __cond_acquires(lock);
357extern __must_check bool refcount_dec_and_lock(refcount_t *r, spinlock_t *lock) __cond_acquires(lock);
358extern __must_check bool refcount_dec_and_lock_irqsave(refcount_t *r,
359 spinlock_t *lock,
360 unsigned long *flags) __cond_acquires(lock);
361#endif /* _LINUX_REFCOUNT_H */
362

source code of linux/include/linux/refcount.h