1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/export.h> |
3 | #include <linux/spinlock.h> |
4 | #include <linux/atomic.h> |
5 | |
6 | /* |
7 | * This is an implementation of the notion of "decrement a |
8 | * reference count, and return locked if it decremented to zero". |
9 | * |
10 | * NOTE NOTE NOTE! This is _not_ equivalent to |
11 | * |
12 | * if (atomic_dec_and_test(&atomic)) { |
13 | * spin_lock(&lock); |
14 | * return 1; |
15 | * } |
16 | * return 0; |
17 | * |
18 | * because the spin-lock and the decrement must be |
19 | * "atomic". |
20 | */ |
21 | int _atomic_dec_and_lock(atomic_t *atomic, spinlock_t *lock) |
22 | { |
23 | /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ |
24 | if (atomic_add_unless(v: atomic, a: -1, u: 1)) |
25 | return 0; |
26 | |
27 | /* Otherwise do it the slow way */ |
28 | spin_lock(lock); |
29 | if (atomic_dec_and_test(v: atomic)) |
30 | return 1; |
31 | spin_unlock(lock); |
32 | return 0; |
33 | } |
34 | |
35 | EXPORT_SYMBOL(_atomic_dec_and_lock); |
36 | |
37 | int _atomic_dec_and_lock_irqsave(atomic_t *atomic, spinlock_t *lock, |
38 | unsigned long *flags) |
39 | { |
40 | /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ |
41 | if (atomic_add_unless(v: atomic, a: -1, u: 1)) |
42 | return 0; |
43 | |
44 | /* Otherwise do it the slow way */ |
45 | spin_lock_irqsave(lock, *flags); |
46 | if (atomic_dec_and_test(v: atomic)) |
47 | return 1; |
48 | spin_unlock_irqrestore(lock, flags: *flags); |
49 | return 0; |
50 | } |
51 | EXPORT_SYMBOL(_atomic_dec_and_lock_irqsave); |
52 | |
53 | int _atomic_dec_and_raw_lock(atomic_t *atomic, raw_spinlock_t *lock) |
54 | { |
55 | /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ |
56 | if (atomic_add_unless(v: atomic, a: -1, u: 1)) |
57 | return 0; |
58 | |
59 | /* Otherwise do it the slow way */ |
60 | raw_spin_lock(lock); |
61 | if (atomic_dec_and_test(v: atomic)) |
62 | return 1; |
63 | raw_spin_unlock(lock); |
64 | return 0; |
65 | } |
66 | EXPORT_SYMBOL(_atomic_dec_and_raw_lock); |
67 | |
68 | int _atomic_dec_and_raw_lock_irqsave(atomic_t *atomic, raw_spinlock_t *lock, |
69 | unsigned long *flags) |
70 | { |
71 | /* Subtract 1 from counter unless that drops it to 0 (ie. it was 1) */ |
72 | if (atomic_add_unless(v: atomic, a: -1, u: 1)) |
73 | return 0; |
74 | |
75 | /* Otherwise do it the slow way */ |
76 | raw_spin_lock_irqsave(lock, *flags); |
77 | if (atomic_dec_and_test(v: atomic)) |
78 | return 1; |
79 | raw_spin_unlock_irqrestore(lock, *flags); |
80 | return 0; |
81 | } |
82 | EXPORT_SYMBOL(_atomic_dec_and_raw_lock_irqsave); |
83 | |