1 | // SPDX-License-Identifier: GPL-2.0 |
2 | #include <linux/export.h> |
3 | #include <linux/lockref.h> |
4 | |
5 | #if USE_CMPXCHG_LOCKREF |
6 | |
7 | /* |
8 | * Note that the "cmpxchg()" reloads the "old" value for the |
9 | * failure case. |
10 | */ |
11 | #define CMPXCHG_LOOP(CODE, SUCCESS) do { \ |
12 | int retry = 100; \ |
13 | struct lockref old; \ |
14 | BUILD_BUG_ON(sizeof(old) != 8); \ |
15 | old.lock_count = READ_ONCE(lockref->lock_count); \ |
16 | while (likely(arch_spin_value_unlocked(old.lock.rlock.raw_lock))) { \ |
17 | struct lockref new = old; \ |
18 | CODE \ |
19 | if (likely(try_cmpxchg64_relaxed(&lockref->lock_count, \ |
20 | &old.lock_count, \ |
21 | new.lock_count))) { \ |
22 | SUCCESS; \ |
23 | } \ |
24 | if (!--retry) \ |
25 | break; \ |
26 | } \ |
27 | } while (0) |
28 | |
29 | #else |
30 | |
31 | #define CMPXCHG_LOOP(CODE, SUCCESS) do { } while (0) |
32 | |
33 | #endif |
34 | |
35 | /** |
36 | * lockref_get - Increments reference count unconditionally |
37 | * @lockref: pointer to lockref structure |
38 | * |
39 | * This operation is only valid if you already hold a reference |
40 | * to the object, so you know the count cannot be zero. |
41 | */ |
42 | void lockref_get(struct lockref *lockref) |
43 | { |
44 | CMPXCHG_LOOP( |
45 | new.count++; |
46 | , |
47 | return; |
48 | ); |
49 | |
50 | spin_lock(lock: &lockref->lock); |
51 | lockref->count++; |
52 | spin_unlock(lock: &lockref->lock); |
53 | } |
54 | EXPORT_SYMBOL(lockref_get); |
55 | |
56 | /** |
57 | * lockref_get_not_zero - Increments count unless the count is 0 or dead |
58 | * @lockref: pointer to lockref structure |
59 | * Return: 1 if count updated successfully or 0 if count was zero |
60 | */ |
61 | int lockref_get_not_zero(struct lockref *lockref) |
62 | { |
63 | int retval; |
64 | |
65 | CMPXCHG_LOOP( |
66 | new.count++; |
67 | if (old.count <= 0) |
68 | return 0; |
69 | , |
70 | return 1; |
71 | ); |
72 | |
73 | spin_lock(lock: &lockref->lock); |
74 | retval = 0; |
75 | if (lockref->count > 0) { |
76 | lockref->count++; |
77 | retval = 1; |
78 | } |
79 | spin_unlock(lock: &lockref->lock); |
80 | return retval; |
81 | } |
82 | EXPORT_SYMBOL(lockref_get_not_zero); |
83 | |
84 | /** |
85 | * lockref_put_not_zero - Decrements count unless count <= 1 before decrement |
86 | * @lockref: pointer to lockref structure |
87 | * Return: 1 if count updated successfully or 0 if count would become zero |
88 | */ |
89 | int lockref_put_not_zero(struct lockref *lockref) |
90 | { |
91 | int retval; |
92 | |
93 | CMPXCHG_LOOP( |
94 | new.count--; |
95 | if (old.count <= 1) |
96 | return 0; |
97 | , |
98 | return 1; |
99 | ); |
100 | |
101 | spin_lock(lock: &lockref->lock); |
102 | retval = 0; |
103 | if (lockref->count > 1) { |
104 | lockref->count--; |
105 | retval = 1; |
106 | } |
107 | spin_unlock(lock: &lockref->lock); |
108 | return retval; |
109 | } |
110 | EXPORT_SYMBOL(lockref_put_not_zero); |
111 | |
112 | /** |
113 | * lockref_put_return - Decrement reference count if possible |
114 | * @lockref: pointer to lockref structure |
115 | * |
116 | * Decrement the reference count and return the new value. |
117 | * If the lockref was dead or locked, return an error. |
118 | */ |
119 | int lockref_put_return(struct lockref *lockref) |
120 | { |
121 | CMPXCHG_LOOP( |
122 | new.count--; |
123 | if (old.count <= 0) |
124 | return -1; |
125 | , |
126 | return new.count; |
127 | ); |
128 | return -1; |
129 | } |
130 | EXPORT_SYMBOL(lockref_put_return); |
131 | |
132 | /** |
133 | * lockref_put_or_lock - decrements count unless count <= 1 before decrement |
134 | * @lockref: pointer to lockref structure |
135 | * Return: 1 if count updated successfully or 0 if count <= 1 and lock taken |
136 | */ |
137 | int lockref_put_or_lock(struct lockref *lockref) |
138 | { |
139 | CMPXCHG_LOOP( |
140 | new.count--; |
141 | if (old.count <= 1) |
142 | break; |
143 | , |
144 | return 1; |
145 | ); |
146 | |
147 | spin_lock(lock: &lockref->lock); |
148 | if (lockref->count <= 1) |
149 | return 0; |
150 | lockref->count--; |
151 | spin_unlock(lock: &lockref->lock); |
152 | return 1; |
153 | } |
154 | EXPORT_SYMBOL(lockref_put_or_lock); |
155 | |
156 | /** |
157 | * lockref_mark_dead - mark lockref dead |
158 | * @lockref: pointer to lockref structure |
159 | */ |
160 | void lockref_mark_dead(struct lockref *lockref) |
161 | { |
162 | assert_spin_locked(&lockref->lock); |
163 | lockref->count = -128; |
164 | } |
165 | EXPORT_SYMBOL(lockref_mark_dead); |
166 | |
167 | /** |
168 | * lockref_get_not_dead - Increments count unless the ref is dead |
169 | * @lockref: pointer to lockref structure |
170 | * Return: 1 if count updated successfully or 0 if lockref was dead |
171 | */ |
172 | int lockref_get_not_dead(struct lockref *lockref) |
173 | { |
174 | int retval; |
175 | |
176 | CMPXCHG_LOOP( |
177 | new.count++; |
178 | if (old.count < 0) |
179 | return 0; |
180 | , |
181 | return 1; |
182 | ); |
183 | |
184 | spin_lock(lock: &lockref->lock); |
185 | retval = 0; |
186 | if (lockref->count >= 0) { |
187 | lockref->count++; |
188 | retval = 1; |
189 | } |
190 | spin_unlock(lock: &lockref->lock); |
191 | return retval; |
192 | } |
193 | EXPORT_SYMBOL(lockref_get_not_dead); |
194 | |