1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef _LINUX_BITOPS_H |
3 | #define _LINUX_BITOPS_H |
4 | |
5 | #include <asm/types.h> |
6 | #include <linux/bits.h> |
7 | #include <linux/typecheck.h> |
8 | |
9 | #include <uapi/linux/kernel.h> |
10 | |
11 | /* Set bits in the first 'n' bytes when loaded from memory */ |
12 | #ifdef __LITTLE_ENDIAN |
13 | # define aligned_byte_mask(n) ((1UL << 8*(n))-1) |
14 | #else |
15 | # define aligned_byte_mask(n) (~0xffUL << (BITS_PER_LONG - 8 - 8*(n))) |
16 | #endif |
17 | |
18 | #define BITS_PER_TYPE(type) (sizeof(type) * BITS_PER_BYTE) |
19 | #define BITS_TO_LONGS(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(long)) |
20 | #define BITS_TO_U64(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u64)) |
21 | #define BITS_TO_U32(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(u32)) |
22 | #define BITS_TO_BYTES(nr) __KERNEL_DIV_ROUND_UP(nr, BITS_PER_TYPE(char)) |
23 | |
24 | extern unsigned int __sw_hweight8(unsigned int w); |
25 | extern unsigned int __sw_hweight16(unsigned int w); |
26 | extern unsigned int __sw_hweight32(unsigned int w); |
27 | extern unsigned long __sw_hweight64(__u64 w); |
28 | |
29 | /* |
30 | * Defined here because those may be needed by architecture-specific static |
31 | * inlines. |
32 | */ |
33 | |
34 | #include <asm-generic/bitops/generic-non-atomic.h> |
35 | |
36 | /* |
37 | * Many architecture-specific non-atomic bitops contain inline asm code and due |
38 | * to that the compiler can't optimize them to compile-time expressions or |
39 | * constants. In contrary, generic_*() helpers are defined in pure C and |
40 | * compilers optimize them just well. |
41 | * Therefore, to make `unsigned long foo = 0; __set_bit(BAR, &foo)` effectively |
42 | * equal to `unsigned long foo = BIT(BAR)`, pick the generic C alternative when |
43 | * the arguments can be resolved at compile time. That expression itself is a |
44 | * constant and doesn't bring any functional changes to the rest of cases. |
45 | * The casts to `uintptr_t` are needed to mitigate `-Waddress` warnings when |
46 | * passing a bitmap from .bss or .data (-> `!!addr` is always true). |
47 | */ |
48 | #define bitop(op, nr, addr) \ |
49 | ((__builtin_constant_p(nr) && \ |
50 | __builtin_constant_p((uintptr_t)(addr) != (uintptr_t)NULL) && \ |
51 | (uintptr_t)(addr) != (uintptr_t)NULL && \ |
52 | __builtin_constant_p(*(const unsigned long *)(addr))) ? \ |
53 | const##op(nr, addr) : op(nr, addr)) |
54 | |
55 | #define __set_bit(nr, addr) bitop(___set_bit, nr, addr) |
56 | #define __clear_bit(nr, addr) bitop(___clear_bit, nr, addr) |
57 | #define __change_bit(nr, addr) bitop(___change_bit, nr, addr) |
58 | #define __test_and_set_bit(nr, addr) bitop(___test_and_set_bit, nr, addr) |
59 | #define __test_and_clear_bit(nr, addr) bitop(___test_and_clear_bit, nr, addr) |
60 | #define __test_and_change_bit(nr, addr) bitop(___test_and_change_bit, nr, addr) |
61 | #define test_bit(nr, addr) bitop(_test_bit, nr, addr) |
62 | #define test_bit_acquire(nr, addr) bitop(_test_bit_acquire, nr, addr) |
63 | |
64 | /* |
65 | * Include this here because some architectures need generic_ffs/fls in |
66 | * scope |
67 | */ |
68 | #include <asm/bitops.h> |
69 | |
70 | /* Check that the bitops prototypes are sane */ |
71 | #define __check_bitop_pr(name) \ |
72 | static_assert(__same_type(arch_##name, generic_##name) && \ |
73 | __same_type(const_##name, generic_##name) && \ |
74 | __same_type(_##name, generic_##name)) |
75 | |
76 | __check_bitop_pr(__set_bit); |
77 | __check_bitop_pr(__clear_bit); |
78 | __check_bitop_pr(__change_bit); |
79 | __check_bitop_pr(__test_and_set_bit); |
80 | __check_bitop_pr(__test_and_clear_bit); |
81 | __check_bitop_pr(__test_and_change_bit); |
82 | __check_bitop_pr(test_bit); |
83 | |
84 | #undef __check_bitop_pr |
85 | |
86 | static inline int get_bitmask_order(unsigned int count) |
87 | { |
88 | int order; |
89 | |
90 | order = fls(x: count); |
91 | return order; /* We could be slightly more clever with -1 here... */ |
92 | } |
93 | |
94 | static __always_inline unsigned long hweight_long(unsigned long w) |
95 | { |
96 | return sizeof(w) == 4 ? hweight32(w) : hweight64((__u64)w); |
97 | } |
98 | |
99 | /** |
100 | * rol64 - rotate a 64-bit value left |
101 | * @word: value to rotate |
102 | * @shift: bits to roll |
103 | */ |
104 | static inline __u64 rol64(__u64 word, unsigned int shift) |
105 | { |
106 | return (word << (shift & 63)) | (word >> ((-shift) & 63)); |
107 | } |
108 | |
109 | /** |
110 | * ror64 - rotate a 64-bit value right |
111 | * @word: value to rotate |
112 | * @shift: bits to roll |
113 | */ |
114 | static inline __u64 ror64(__u64 word, unsigned int shift) |
115 | { |
116 | return (word >> (shift & 63)) | (word << ((-shift) & 63)); |
117 | } |
118 | |
119 | /** |
120 | * rol32 - rotate a 32-bit value left |
121 | * @word: value to rotate |
122 | * @shift: bits to roll |
123 | */ |
124 | static inline __u32 rol32(__u32 word, unsigned int shift) |
125 | { |
126 | return (word << (shift & 31)) | (word >> ((-shift) & 31)); |
127 | } |
128 | |
129 | /** |
130 | * ror32 - rotate a 32-bit value right |
131 | * @word: value to rotate |
132 | * @shift: bits to roll |
133 | */ |
134 | static inline __u32 ror32(__u32 word, unsigned int shift) |
135 | { |
136 | return (word >> (shift & 31)) | (word << ((-shift) & 31)); |
137 | } |
138 | |
139 | /** |
140 | * rol16 - rotate a 16-bit value left |
141 | * @word: value to rotate |
142 | * @shift: bits to roll |
143 | */ |
144 | static inline __u16 rol16(__u16 word, unsigned int shift) |
145 | { |
146 | return (word << (shift & 15)) | (word >> ((-shift) & 15)); |
147 | } |
148 | |
149 | /** |
150 | * ror16 - rotate a 16-bit value right |
151 | * @word: value to rotate |
152 | * @shift: bits to roll |
153 | */ |
154 | static inline __u16 ror16(__u16 word, unsigned int shift) |
155 | { |
156 | return (word >> (shift & 15)) | (word << ((-shift) & 15)); |
157 | } |
158 | |
159 | /** |
160 | * rol8 - rotate an 8-bit value left |
161 | * @word: value to rotate |
162 | * @shift: bits to roll |
163 | */ |
164 | static inline __u8 rol8(__u8 word, unsigned int shift) |
165 | { |
166 | return (word << (shift & 7)) | (word >> ((-shift) & 7)); |
167 | } |
168 | |
169 | /** |
170 | * ror8 - rotate an 8-bit value right |
171 | * @word: value to rotate |
172 | * @shift: bits to roll |
173 | */ |
174 | static inline __u8 ror8(__u8 word, unsigned int shift) |
175 | { |
176 | return (word >> (shift & 7)) | (word << ((-shift) & 7)); |
177 | } |
178 | |
179 | /** |
180 | * sign_extend32 - sign extend a 32-bit value using specified bit as sign-bit |
181 | * @value: value to sign extend |
182 | * @index: 0 based bit index (0<=index<32) to sign bit |
183 | * |
184 | * This is safe to use for 16- and 8-bit types as well. |
185 | */ |
186 | static __always_inline __s32 sign_extend32(__u32 value, int index) |
187 | { |
188 | __u8 shift = 31 - index; |
189 | return (__s32)(value << shift) >> shift; |
190 | } |
191 | |
192 | /** |
193 | * sign_extend64 - sign extend a 64-bit value using specified bit as sign-bit |
194 | * @value: value to sign extend |
195 | * @index: 0 based bit index (0<=index<64) to sign bit |
196 | */ |
197 | static __always_inline __s64 sign_extend64(__u64 value, int index) |
198 | { |
199 | __u8 shift = 63 - index; |
200 | return (__s64)(value << shift) >> shift; |
201 | } |
202 | |
203 | static inline unsigned fls_long(unsigned long l) |
204 | { |
205 | if (sizeof(l) == 4) |
206 | return fls(x: l); |
207 | return fls64(x: l); |
208 | } |
209 | |
210 | static inline int get_count_order(unsigned int count) |
211 | { |
212 | if (count == 0) |
213 | return -1; |
214 | |
215 | return fls(x: --count); |
216 | } |
217 | |
218 | /** |
219 | * get_count_order_long - get order after rounding @l up to power of 2 |
220 | * @l: parameter |
221 | * |
222 | * it is same as get_count_order() but with long type parameter |
223 | */ |
224 | static inline int get_count_order_long(unsigned long l) |
225 | { |
226 | if (l == 0UL) |
227 | return -1; |
228 | return (int)fls_long(l: --l); |
229 | } |
230 | |
231 | /** |
232 | * __ffs64 - find first set bit in a 64 bit word |
233 | * @word: The 64 bit word |
234 | * |
235 | * On 64 bit arches this is a synonym for __ffs |
236 | * The result is not defined if no bits are set, so check that @word |
237 | * is non-zero before calling this. |
238 | */ |
239 | static inline unsigned long __ffs64(u64 word) |
240 | { |
241 | #if BITS_PER_LONG == 32 |
242 | if (((u32)word) == 0UL) |
243 | return __ffs((u32)(word >> 32)) + 32; |
244 | #elif BITS_PER_LONG != 64 |
245 | #error BITS_PER_LONG not 32 or 64 |
246 | #endif |
247 | return __ffs((unsigned long)word); |
248 | } |
249 | |
250 | /** |
251 | * fns - find N'th set bit in a word |
252 | * @word: The word to search |
253 | * @n: Bit to find |
254 | */ |
255 | static inline unsigned long fns(unsigned long word, unsigned int n) |
256 | { |
257 | unsigned int bit; |
258 | |
259 | while (word) { |
260 | bit = __ffs(word); |
261 | if (n-- == 0) |
262 | return bit; |
263 | __clear_bit(bit, &word); |
264 | } |
265 | |
266 | return BITS_PER_LONG; |
267 | } |
268 | |
269 | /** |
270 | * assign_bit - Assign value to a bit in memory |
271 | * @nr: the bit to set |
272 | * @addr: the address to start counting from |
273 | * @value: the value to assign |
274 | */ |
275 | static __always_inline void assign_bit(long nr, volatile unsigned long *addr, |
276 | bool value) |
277 | { |
278 | if (value) |
279 | set_bit(nr, addr); |
280 | else |
281 | clear_bit(nr, addr); |
282 | } |
283 | |
284 | static __always_inline void __assign_bit(long nr, volatile unsigned long *addr, |
285 | bool value) |
286 | { |
287 | if (value) |
288 | __set_bit(nr, addr); |
289 | else |
290 | __clear_bit(nr, addr); |
291 | } |
292 | |
293 | /** |
294 | * __ptr_set_bit - Set bit in a pointer's value |
295 | * @nr: the bit to set |
296 | * @addr: the address of the pointer variable |
297 | * |
298 | * Example: |
299 | * void *p = foo(); |
300 | * __ptr_set_bit(bit, &p); |
301 | */ |
302 | #define __ptr_set_bit(nr, addr) \ |
303 | ({ \ |
304 | typecheck_pointer(*(addr)); \ |
305 | __set_bit(nr, (unsigned long *)(addr)); \ |
306 | }) |
307 | |
308 | /** |
309 | * __ptr_clear_bit - Clear bit in a pointer's value |
310 | * @nr: the bit to clear |
311 | * @addr: the address of the pointer variable |
312 | * |
313 | * Example: |
314 | * void *p = foo(); |
315 | * __ptr_clear_bit(bit, &p); |
316 | */ |
317 | #define __ptr_clear_bit(nr, addr) \ |
318 | ({ \ |
319 | typecheck_pointer(*(addr)); \ |
320 | __clear_bit(nr, (unsigned long *)(addr)); \ |
321 | }) |
322 | |
323 | /** |
324 | * __ptr_test_bit - Test bit in a pointer's value |
325 | * @nr: the bit to test |
326 | * @addr: the address of the pointer variable |
327 | * |
328 | * Example: |
329 | * void *p = foo(); |
330 | * if (__ptr_test_bit(bit, &p)) { |
331 | * ... |
332 | * } else { |
333 | * ... |
334 | * } |
335 | */ |
336 | #define __ptr_test_bit(nr, addr) \ |
337 | ({ \ |
338 | typecheck_pointer(*(addr)); \ |
339 | test_bit(nr, (unsigned long *)(addr)); \ |
340 | }) |
341 | |
342 | #ifdef __KERNEL__ |
343 | |
344 | #ifndef set_mask_bits |
345 | #define set_mask_bits(ptr, mask, bits) \ |
346 | ({ \ |
347 | const typeof(*(ptr)) mask__ = (mask), bits__ = (bits); \ |
348 | typeof(*(ptr)) old__, new__; \ |
349 | \ |
350 | old__ = READ_ONCE(*(ptr)); \ |
351 | do { \ |
352 | new__ = (old__ & ~mask__) | bits__; \ |
353 | } while (!try_cmpxchg(ptr, &old__, new__)); \ |
354 | \ |
355 | old__; \ |
356 | }) |
357 | #endif |
358 | |
359 | #ifndef bit_clear_unless |
360 | #define bit_clear_unless(ptr, clear, test) \ |
361 | ({ \ |
362 | const typeof(*(ptr)) clear__ = (clear), test__ = (test);\ |
363 | typeof(*(ptr)) old__, new__; \ |
364 | \ |
365 | old__ = READ_ONCE(*(ptr)); \ |
366 | do { \ |
367 | if (old__ & test__) \ |
368 | break; \ |
369 | new__ = old__ & ~clear__; \ |
370 | } while (!try_cmpxchg(ptr, &old__, new__)); \ |
371 | \ |
372 | !(old__ & test__); \ |
373 | }) |
374 | #endif |
375 | |
376 | #endif /* __KERNEL__ */ |
377 | #endif |
378 | |