1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef __LINUX_BITMAP_H |
3 | #define __LINUX_BITMAP_H |
4 | |
5 | #ifndef __ASSEMBLY__ |
6 | |
7 | #include <linux/align.h> |
8 | #include <linux/bitops.h> |
9 | #include <linux/cleanup.h> |
10 | #include <linux/errno.h> |
11 | #include <linux/find.h> |
12 | #include <linux/limits.h> |
13 | #include <linux/string.h> |
14 | #include <linux/types.h> |
15 | #include <linux/bitmap-str.h> |
16 | |
17 | struct device; |
18 | |
19 | /* |
20 | * bitmaps provide bit arrays that consume one or more unsigned |
21 | * longs. The bitmap interface and available operations are listed |
22 | * here, in bitmap.h |
23 | * |
24 | * Function implementations generic to all architectures are in |
25 | * lib/bitmap.c. Functions implementations that are architecture |
26 | * specific are in various include/asm-<arch>/bitops.h headers |
27 | * and other arch/<arch> specific files. |
28 | * |
29 | * See lib/bitmap.c for more details. |
30 | */ |
31 | |
32 | /** |
33 | * DOC: bitmap overview |
34 | * |
35 | * The available bitmap operations and their rough meaning in the |
36 | * case that the bitmap is a single unsigned long are thus: |
37 | * |
38 | * The generated code is more efficient when nbits is known at |
39 | * compile-time and at most BITS_PER_LONG. |
40 | * |
41 | * :: |
42 | * |
43 | * bitmap_zero(dst, nbits) *dst = 0UL |
44 | * bitmap_fill(dst, nbits) *dst = ~0UL |
45 | * bitmap_copy(dst, src, nbits) *dst = *src |
46 | * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2 |
47 | * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2 |
48 | * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2 |
49 | * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2) |
50 | * bitmap_complement(dst, src, nbits) *dst = ~(*src) |
51 | * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal? |
52 | * bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap? |
53 | * bitmap_subset(src1, src2, nbits) Is *src1 a subset of *src2? |
54 | * bitmap_empty(src, nbits) Are all bits zero in *src? |
55 | * bitmap_full(src, nbits) Are all bits set in *src? |
56 | * bitmap_weight(src, nbits) Hamming Weight: number set bits |
57 | * bitmap_weight_and(src1, src2, nbits) Hamming Weight of and'ed bitmap |
58 | * bitmap_weight_andnot(src1, src2, nbits) Hamming Weight of andnot'ed bitmap |
59 | * bitmap_set(dst, pos, nbits) Set specified bit area |
60 | * bitmap_clear(dst, pos, nbits) Clear specified bit area |
61 | * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area |
62 | * bitmap_find_next_zero_area_off(buf, len, pos, n, mask, mask_off) as above |
63 | * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n |
64 | * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n |
65 | * bitmap_cut(dst, src, first, n, nbits) Cut n bits from first, copy rest |
66 | * bitmap_replace(dst, old, new, mask, nbits) *dst = (*old & ~(*mask)) | (*new & *mask) |
67 | * bitmap_scatter(dst, src, mask, nbits) *dst = map(dense, sparse)(src) |
68 | * bitmap_gather(dst, src, mask, nbits) *dst = map(sparse, dense)(src) |
69 | * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src) |
70 | * bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit) |
71 | * bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap |
72 | * bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz |
73 | * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf |
74 | * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf |
75 | * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf |
76 | * bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf |
77 | * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region |
78 | * bitmap_release_region(bitmap, pos, order) Free specified bit region |
79 | * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region |
80 | * bitmap_from_arr32(dst, buf, nbits) Copy nbits from u32[] buf to dst |
81 | * bitmap_from_arr64(dst, buf, nbits) Copy nbits from u64[] buf to dst |
82 | * bitmap_to_arr32(buf, src, nbits) Copy nbits from buf to u32[] dst |
83 | * bitmap_to_arr64(buf, src, nbits) Copy nbits from buf to u64[] dst |
84 | * bitmap_get_value8(map, start) Get 8bit value from map at start |
85 | * bitmap_set_value8(map, value, start) Set 8bit value to map at start |
86 | * |
87 | * Note, bitmap_zero() and bitmap_fill() operate over the region of |
88 | * unsigned longs, that is, bits behind bitmap till the unsigned long |
89 | * boundary will be zeroed or filled as well. Consider to use |
90 | * bitmap_clear() or bitmap_set() to make explicit zeroing or filling |
91 | * respectively. |
92 | */ |
93 | |
94 | /** |
95 | * DOC: bitmap bitops |
96 | * |
97 | * Also the following operations in asm/bitops.h apply to bitmaps.:: |
98 | * |
99 | * set_bit(bit, addr) *addr |= bit |
100 | * clear_bit(bit, addr) *addr &= ~bit |
101 | * change_bit(bit, addr) *addr ^= bit |
102 | * test_bit(bit, addr) Is bit set in *addr? |
103 | * test_and_set_bit(bit, addr) Set bit and return old value |
104 | * test_and_clear_bit(bit, addr) Clear bit and return old value |
105 | * test_and_change_bit(bit, addr) Change bit and return old value |
106 | * find_first_zero_bit(addr, nbits) Position first zero bit in *addr |
107 | * find_first_bit(addr, nbits) Position first set bit in *addr |
108 | * find_next_zero_bit(addr, nbits, bit) |
109 | * Position next zero bit in *addr >= bit |
110 | * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit |
111 | * find_next_and_bit(addr1, addr2, nbits, bit) |
112 | * Same as find_next_bit, but in |
113 | * (*addr1 & *addr2) |
114 | * |
115 | */ |
116 | |
117 | /** |
118 | * DOC: declare bitmap |
119 | * The DECLARE_BITMAP(name,bits) macro, in linux/types.h, can be used |
120 | * to declare an array named 'name' of just enough unsigned longs to |
121 | * contain all bit positions from 0 to 'bits' - 1. |
122 | */ |
123 | |
124 | /* |
125 | * Allocation and deallocation of bitmap. |
126 | * Provided in lib/bitmap.c to avoid circular dependency. |
127 | */ |
128 | unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags); |
129 | unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags); |
130 | unsigned long *bitmap_alloc_node(unsigned int nbits, gfp_t flags, int node); |
131 | unsigned long *bitmap_zalloc_node(unsigned int nbits, gfp_t flags, int node); |
132 | void bitmap_free(const unsigned long *bitmap); |
133 | |
134 | DEFINE_FREE(bitmap, unsigned long *, if (_T) bitmap_free(_T)) |
135 | |
136 | /* Managed variants of the above. */ |
137 | unsigned long *devm_bitmap_alloc(struct device *dev, |
138 | unsigned int nbits, gfp_t flags); |
139 | unsigned long *devm_bitmap_zalloc(struct device *dev, |
140 | unsigned int nbits, gfp_t flags); |
141 | |
142 | /* |
143 | * lib/bitmap.c provides these functions: |
144 | */ |
145 | |
146 | bool __bitmap_equal(const unsigned long *bitmap1, |
147 | const unsigned long *bitmap2, unsigned int nbits); |
148 | bool __pure __bitmap_or_equal(const unsigned long *src1, |
149 | const unsigned long *src2, |
150 | const unsigned long *src3, |
151 | unsigned int nbits); |
152 | void __bitmap_complement(unsigned long *dst, const unsigned long *src, |
153 | unsigned int nbits); |
154 | void __bitmap_shift_right(unsigned long *dst, const unsigned long *src, |
155 | unsigned int shift, unsigned int nbits); |
156 | void __bitmap_shift_left(unsigned long *dst, const unsigned long *src, |
157 | unsigned int shift, unsigned int nbits); |
158 | void bitmap_cut(unsigned long *dst, const unsigned long *src, |
159 | unsigned int first, unsigned int cut, unsigned int nbits); |
160 | bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, |
161 | const unsigned long *bitmap2, unsigned int nbits); |
162 | void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, |
163 | const unsigned long *bitmap2, unsigned int nbits); |
164 | void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, |
165 | const unsigned long *bitmap2, unsigned int nbits); |
166 | bool __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, |
167 | const unsigned long *bitmap2, unsigned int nbits); |
168 | void __bitmap_replace(unsigned long *dst, |
169 | const unsigned long *old, const unsigned long *new, |
170 | const unsigned long *mask, unsigned int nbits); |
171 | bool __bitmap_intersects(const unsigned long *bitmap1, |
172 | const unsigned long *bitmap2, unsigned int nbits); |
173 | bool __bitmap_subset(const unsigned long *bitmap1, |
174 | const unsigned long *bitmap2, unsigned int nbits); |
175 | unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits); |
176 | unsigned int __bitmap_weight_and(const unsigned long *bitmap1, |
177 | const unsigned long *bitmap2, unsigned int nbits); |
178 | unsigned int __bitmap_weight_andnot(const unsigned long *bitmap1, |
179 | const unsigned long *bitmap2, unsigned int nbits); |
180 | void __bitmap_set(unsigned long *map, unsigned int start, int len); |
181 | void __bitmap_clear(unsigned long *map, unsigned int start, int len); |
182 | |
183 | unsigned long bitmap_find_next_zero_area_off(unsigned long *map, |
184 | unsigned long size, |
185 | unsigned long start, |
186 | unsigned int nr, |
187 | unsigned long align_mask, |
188 | unsigned long align_offset); |
189 | |
190 | /** |
191 | * bitmap_find_next_zero_area - find a contiguous aligned zero area |
192 | * @map: The address to base the search on |
193 | * @size: The bitmap size in bits |
194 | * @start: The bitnumber to start searching at |
195 | * @nr: The number of zeroed bits we're looking for |
196 | * @align_mask: Alignment mask for zero area |
197 | * |
198 | * The @align_mask should be one less than a power of 2; the effect is that |
199 | * the bit offset of all zero areas this function finds is multiples of that |
200 | * power of 2. A @align_mask of 0 means no alignment is required. |
201 | */ |
202 | static inline unsigned long |
203 | bitmap_find_next_zero_area(unsigned long *map, |
204 | unsigned long size, |
205 | unsigned long start, |
206 | unsigned int nr, |
207 | unsigned long align_mask) |
208 | { |
209 | return bitmap_find_next_zero_area_off(map, size, start, nr, |
210 | align_mask, align_offset: 0); |
211 | } |
212 | |
213 | void bitmap_remap(unsigned long *dst, const unsigned long *src, |
214 | const unsigned long *old, const unsigned long *new, unsigned int nbits); |
215 | int bitmap_bitremap(int oldbit, |
216 | const unsigned long *old, const unsigned long *new, int bits); |
217 | void bitmap_onto(unsigned long *dst, const unsigned long *orig, |
218 | const unsigned long *relmap, unsigned int bits); |
219 | void bitmap_fold(unsigned long *dst, const unsigned long *orig, |
220 | unsigned int sz, unsigned int nbits); |
221 | |
222 | #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) |
223 | #define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1))) |
224 | |
225 | static inline void bitmap_zero(unsigned long *dst, unsigned int nbits) |
226 | { |
227 | unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); |
228 | |
229 | if (small_const_nbits(nbits)) |
230 | *dst = 0; |
231 | else |
232 | memset(dst, 0, len); |
233 | } |
234 | |
235 | static inline void bitmap_fill(unsigned long *dst, unsigned int nbits) |
236 | { |
237 | unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); |
238 | |
239 | if (small_const_nbits(nbits)) |
240 | *dst = ~0UL; |
241 | else |
242 | memset(dst, 0xff, len); |
243 | } |
244 | |
245 | static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, |
246 | unsigned int nbits) |
247 | { |
248 | unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); |
249 | |
250 | if (small_const_nbits(nbits)) |
251 | *dst = *src; |
252 | else |
253 | memcpy(dst, src, len); |
254 | } |
255 | |
256 | /* |
257 | * Copy bitmap and clear tail bits in last word. |
258 | */ |
259 | static inline void bitmap_copy_clear_tail(unsigned long *dst, |
260 | const unsigned long *src, unsigned int nbits) |
261 | { |
262 | bitmap_copy(dst, src, nbits); |
263 | if (nbits % BITS_PER_LONG) |
264 | dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits); |
265 | } |
266 | |
267 | /* |
268 | * On 32-bit systems bitmaps are represented as u32 arrays internally. On LE64 |
269 | * machines the order of hi and lo parts of numbers match the bitmap structure. |
270 | * In both cases conversion is not needed when copying data from/to arrays of |
271 | * u32. But in LE64 case, typecast in bitmap_copy_clear_tail() may lead |
272 | * to out-of-bound access. To avoid that, both LE and BE variants of 64-bit |
273 | * architectures are not using bitmap_copy_clear_tail(). |
274 | */ |
275 | #if BITS_PER_LONG == 64 |
276 | void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, |
277 | unsigned int nbits); |
278 | void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, |
279 | unsigned int nbits); |
280 | #else |
281 | #define bitmap_from_arr32(bitmap, buf, nbits) \ |
282 | bitmap_copy_clear_tail((unsigned long *) (bitmap), \ |
283 | (const unsigned long *) (buf), (nbits)) |
284 | #define bitmap_to_arr32(buf, bitmap, nbits) \ |
285 | bitmap_copy_clear_tail((unsigned long *) (buf), \ |
286 | (const unsigned long *) (bitmap), (nbits)) |
287 | #endif |
288 | |
289 | /* |
290 | * On 64-bit systems bitmaps are represented as u64 arrays internally. So, |
291 | * the conversion is not needed when copying data from/to arrays of u64. |
292 | */ |
293 | #if BITS_PER_LONG == 32 |
294 | void bitmap_from_arr64(unsigned long *bitmap, const u64 *buf, unsigned int nbits); |
295 | void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits); |
296 | #else |
297 | #define bitmap_from_arr64(bitmap, buf, nbits) \ |
298 | bitmap_copy_clear_tail((unsigned long *)(bitmap), (const unsigned long *)(buf), (nbits)) |
299 | #define bitmap_to_arr64(buf, bitmap, nbits) \ |
300 | bitmap_copy_clear_tail((unsigned long *)(buf), (const unsigned long *)(bitmap), (nbits)) |
301 | #endif |
302 | |
303 | static inline bool bitmap_and(unsigned long *dst, const unsigned long *src1, |
304 | const unsigned long *src2, unsigned int nbits) |
305 | { |
306 | if (small_const_nbits(nbits)) |
307 | return (*dst = *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)) != 0; |
308 | return __bitmap_and(dst, bitmap1: src1, bitmap2: src2, nbits); |
309 | } |
310 | |
311 | static inline void bitmap_or(unsigned long *dst, const unsigned long *src1, |
312 | const unsigned long *src2, unsigned int nbits) |
313 | { |
314 | if (small_const_nbits(nbits)) |
315 | *dst = *src1 | *src2; |
316 | else |
317 | __bitmap_or(dst, bitmap1: src1, bitmap2: src2, nbits); |
318 | } |
319 | |
320 | static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1, |
321 | const unsigned long *src2, unsigned int nbits) |
322 | { |
323 | if (small_const_nbits(nbits)) |
324 | *dst = *src1 ^ *src2; |
325 | else |
326 | __bitmap_xor(dst, bitmap1: src1, bitmap2: src2, nbits); |
327 | } |
328 | |
329 | static inline bool bitmap_andnot(unsigned long *dst, const unsigned long *src1, |
330 | const unsigned long *src2, unsigned int nbits) |
331 | { |
332 | if (small_const_nbits(nbits)) |
333 | return (*dst = *src1 & ~(*src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0; |
334 | return __bitmap_andnot(dst, bitmap1: src1, bitmap2: src2, nbits); |
335 | } |
336 | |
337 | static inline void bitmap_complement(unsigned long *dst, const unsigned long *src, |
338 | unsigned int nbits) |
339 | { |
340 | if (small_const_nbits(nbits)) |
341 | *dst = ~(*src); |
342 | else |
343 | __bitmap_complement(dst, src, nbits); |
344 | } |
345 | |
346 | #ifdef __LITTLE_ENDIAN |
347 | #define BITMAP_MEM_ALIGNMENT 8 |
348 | #else |
349 | #define BITMAP_MEM_ALIGNMENT (8 * sizeof(unsigned long)) |
350 | #endif |
351 | #define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1) |
352 | |
353 | static inline bool bitmap_equal(const unsigned long *src1, |
354 | const unsigned long *src2, unsigned int nbits) |
355 | { |
356 | if (small_const_nbits(nbits)) |
357 | return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); |
358 | if (__builtin_constant_p(nbits & BITMAP_MEM_MASK) && |
359 | IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT)) |
360 | return !memcmp(p: src1, q: src2, size: nbits / 8); |
361 | return __bitmap_equal(bitmap1: src1, bitmap2: src2, nbits); |
362 | } |
363 | |
364 | /** |
365 | * bitmap_or_equal - Check whether the or of two bitmaps is equal to a third |
366 | * @src1: Pointer to bitmap 1 |
367 | * @src2: Pointer to bitmap 2 will be or'ed with bitmap 1 |
368 | * @src3: Pointer to bitmap 3. Compare to the result of *@src1 | *@src2 |
369 | * @nbits: number of bits in each of these bitmaps |
370 | * |
371 | * Returns: True if (*@src1 | *@src2) == *@src3, false otherwise |
372 | */ |
373 | static inline bool bitmap_or_equal(const unsigned long *src1, |
374 | const unsigned long *src2, |
375 | const unsigned long *src3, |
376 | unsigned int nbits) |
377 | { |
378 | if (!small_const_nbits(nbits)) |
379 | return __bitmap_or_equal(src1, src2, src3, nbits); |
380 | |
381 | return !(((*src1 | *src2) ^ *src3) & BITMAP_LAST_WORD_MASK(nbits)); |
382 | } |
383 | |
384 | static inline bool bitmap_intersects(const unsigned long *src1, |
385 | const unsigned long *src2, |
386 | unsigned int nbits) |
387 | { |
388 | if (small_const_nbits(nbits)) |
389 | return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0; |
390 | else |
391 | return __bitmap_intersects(bitmap1: src1, bitmap2: src2, nbits); |
392 | } |
393 | |
394 | static inline bool bitmap_subset(const unsigned long *src1, |
395 | const unsigned long *src2, unsigned int nbits) |
396 | { |
397 | if (small_const_nbits(nbits)) |
398 | return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits)); |
399 | else |
400 | return __bitmap_subset(bitmap1: src1, bitmap2: src2, nbits); |
401 | } |
402 | |
403 | static inline bool bitmap_empty(const unsigned long *src, unsigned nbits) |
404 | { |
405 | if (small_const_nbits(nbits)) |
406 | return ! (*src & BITMAP_LAST_WORD_MASK(nbits)); |
407 | |
408 | return find_first_bit(addr: src, size: nbits) == nbits; |
409 | } |
410 | |
411 | static inline bool bitmap_full(const unsigned long *src, unsigned int nbits) |
412 | { |
413 | if (small_const_nbits(nbits)) |
414 | return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits)); |
415 | |
416 | return find_first_zero_bit(addr: src, size: nbits) == nbits; |
417 | } |
418 | |
419 | static __always_inline |
420 | unsigned int bitmap_weight(const unsigned long *src, unsigned int nbits) |
421 | { |
422 | if (small_const_nbits(nbits)) |
423 | return hweight_long(w: *src & BITMAP_LAST_WORD_MASK(nbits)); |
424 | return __bitmap_weight(bitmap: src, nbits); |
425 | } |
426 | |
427 | static __always_inline |
428 | unsigned long bitmap_weight_and(const unsigned long *src1, |
429 | const unsigned long *src2, unsigned int nbits) |
430 | { |
431 | if (small_const_nbits(nbits)) |
432 | return hweight_long(w: *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)); |
433 | return __bitmap_weight_and(bitmap1: src1, bitmap2: src2, nbits); |
434 | } |
435 | |
436 | static __always_inline |
437 | unsigned long bitmap_weight_andnot(const unsigned long *src1, |
438 | const unsigned long *src2, unsigned int nbits) |
439 | { |
440 | if (small_const_nbits(nbits)) |
441 | return hweight_long(w: *src1 & ~(*src2) & BITMAP_LAST_WORD_MASK(nbits)); |
442 | return __bitmap_weight_andnot(bitmap1: src1, bitmap2: src2, nbits); |
443 | } |
444 | |
445 | static __always_inline void bitmap_set(unsigned long *map, unsigned int start, |
446 | unsigned int nbits) |
447 | { |
448 | if (__builtin_constant_p(nbits) && nbits == 1) |
449 | __set_bit(start, map); |
450 | else if (small_const_nbits(start + nbits)) |
451 | *map |= GENMASK(start + nbits - 1, start); |
452 | else if (__builtin_constant_p(start & BITMAP_MEM_MASK) && |
453 | IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) && |
454 | __builtin_constant_p(nbits & BITMAP_MEM_MASK) && |
455 | IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT)) |
456 | memset((char *)map + start / 8, 0xff, nbits / 8); |
457 | else |
458 | __bitmap_set(map, start, len: nbits); |
459 | } |
460 | |
461 | static __always_inline void bitmap_clear(unsigned long *map, unsigned int start, |
462 | unsigned int nbits) |
463 | { |
464 | if (__builtin_constant_p(nbits) && nbits == 1) |
465 | __clear_bit(start, map); |
466 | else if (small_const_nbits(start + nbits)) |
467 | *map &= ~GENMASK(start + nbits - 1, start); |
468 | else if (__builtin_constant_p(start & BITMAP_MEM_MASK) && |
469 | IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) && |
470 | __builtin_constant_p(nbits & BITMAP_MEM_MASK) && |
471 | IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT)) |
472 | memset((char *)map + start / 8, 0, nbits / 8); |
473 | else |
474 | __bitmap_clear(map, start, len: nbits); |
475 | } |
476 | |
477 | static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src, |
478 | unsigned int shift, unsigned int nbits) |
479 | { |
480 | if (small_const_nbits(nbits)) |
481 | *dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift; |
482 | else |
483 | __bitmap_shift_right(dst, src, shift, nbits); |
484 | } |
485 | |
486 | static inline void bitmap_shift_left(unsigned long *dst, const unsigned long *src, |
487 | unsigned int shift, unsigned int nbits) |
488 | { |
489 | if (small_const_nbits(nbits)) |
490 | *dst = (*src << shift) & BITMAP_LAST_WORD_MASK(nbits); |
491 | else |
492 | __bitmap_shift_left(dst, src, shift, nbits); |
493 | } |
494 | |
495 | static inline void bitmap_replace(unsigned long *dst, |
496 | const unsigned long *old, |
497 | const unsigned long *new, |
498 | const unsigned long *mask, |
499 | unsigned int nbits) |
500 | { |
501 | if (small_const_nbits(nbits)) |
502 | *dst = (*old & ~(*mask)) | (*new & *mask); |
503 | else |
504 | __bitmap_replace(dst, old, new, mask, nbits); |
505 | } |
506 | |
507 | /** |
508 | * bitmap_scatter - Scatter a bitmap according to the given mask |
509 | * @dst: scattered bitmap |
510 | * @src: gathered bitmap |
511 | * @mask: mask representing bits to assign to in the scattered bitmap |
512 | * @nbits: number of bits in each of these bitmaps |
513 | * |
514 | * Scatters bitmap with sequential bits according to the given @mask. |
515 | * |
516 | * Example: |
517 | * If @src bitmap = 0x005a, with @mask = 0x1313, @dst will be 0x0302. |
518 | * |
519 | * Or in binary form |
520 | * @src @mask @dst |
521 | * 0000000001011010 0001001100010011 0000001100000010 |
522 | * |
523 | * (Bits 0, 1, 2, 3, 4, 5 are copied to the bits 0, 1, 4, 8, 9, 12) |
524 | * |
525 | * A more 'visual' description of the operation:: |
526 | * |
527 | * src: 0000000001011010 |
528 | * |||||| |
529 | * +------+||||| |
530 | * | +----+|||| |
531 | * | |+----+||| |
532 | * | || +-+|| |
533 | * | || | || |
534 | * mask: ...v..vv...v..vv |
535 | * ...0..11...0..10 |
536 | * dst: 0000001100000010 |
537 | * |
538 | * A relationship exists between bitmap_scatter() and bitmap_gather(). |
539 | * bitmap_gather() can be seen as the 'reverse' bitmap_scatter() operation. |
540 | * See bitmap_scatter() for details related to this relationship. |
541 | */ |
542 | static inline void bitmap_scatter(unsigned long *dst, const unsigned long *src, |
543 | const unsigned long *mask, unsigned int nbits) |
544 | { |
545 | unsigned int n = 0; |
546 | unsigned int bit; |
547 | |
548 | bitmap_zero(dst, nbits); |
549 | |
550 | for_each_set_bit(bit, mask, nbits) |
551 | __assign_bit(nr: bit, addr: dst, test_bit(n++, src)); |
552 | } |
553 | |
554 | /** |
555 | * bitmap_gather - Gather a bitmap according to given mask |
556 | * @dst: gathered bitmap |
557 | * @src: scattered bitmap |
558 | * @mask: mask representing bits to extract from in the scattered bitmap |
559 | * @nbits: number of bits in each of these bitmaps |
560 | * |
561 | * Gathers bitmap with sparse bits according to the given @mask. |
562 | * |
563 | * Example: |
564 | * If @src bitmap = 0x0302, with @mask = 0x1313, @dst will be 0x001a. |
565 | * |
566 | * Or in binary form |
567 | * @src @mask @dst |
568 | * 0000001100000010 0001001100010011 0000000000011010 |
569 | * |
570 | * (Bits 0, 1, 4, 8, 9, 12 are copied to the bits 0, 1, 2, 3, 4, 5) |
571 | * |
572 | * A more 'visual' description of the operation:: |
573 | * |
574 | * mask: ...v..vv...v..vv |
575 | * src: 0000001100000010 |
576 | * ^ ^^ ^ 0 |
577 | * | || | 10 |
578 | * | || > 010 |
579 | * | |+--> 1010 |
580 | * | +--> 11010 |
581 | * +----> 011010 |
582 | * dst: 0000000000011010 |
583 | * |
584 | * A relationship exists between bitmap_gather() and bitmap_scatter(). See |
585 | * bitmap_scatter() for the bitmap scatter detailed operations. |
586 | * Suppose scattered computed using bitmap_scatter(scattered, src, mask, n). |
587 | * The operation bitmap_gather(result, scattered, mask, n) leads to a result |
588 | * equal or equivalent to src. |
589 | * |
590 | * The result can be 'equivalent' because bitmap_scatter() and bitmap_gather() |
591 | * are not bijective. |
592 | * The result and src values are equivalent in that sense that a call to |
593 | * bitmap_scatter(res, src, mask, n) and a call to |
594 | * bitmap_scatter(res, result, mask, n) will lead to the same res value. |
595 | */ |
596 | static inline void bitmap_gather(unsigned long *dst, const unsigned long *src, |
597 | const unsigned long *mask, unsigned int nbits) |
598 | { |
599 | unsigned int n = 0; |
600 | unsigned int bit; |
601 | |
602 | bitmap_zero(dst, nbits); |
603 | |
604 | for_each_set_bit(bit, mask, nbits) |
605 | __assign_bit(nr: n++, addr: dst, test_bit(bit, src)); |
606 | } |
607 | |
608 | static inline void bitmap_next_set_region(unsigned long *bitmap, |
609 | unsigned int *rs, unsigned int *re, |
610 | unsigned int end) |
611 | { |
612 | *rs = find_next_bit(addr: bitmap, size: end, offset: *rs); |
613 | *re = find_next_zero_bit(addr: bitmap, size: end, offset: *rs + 1); |
614 | } |
615 | |
616 | /** |
617 | * bitmap_release_region - release allocated bitmap region |
618 | * @bitmap: array of unsigned longs corresponding to the bitmap |
619 | * @pos: beginning of bit region to release |
620 | * @order: region size (log base 2 of number of bits) to release |
621 | * |
622 | * This is the complement to __bitmap_find_free_region() and releases |
623 | * the found region (by clearing it in the bitmap). |
624 | */ |
625 | static inline void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order) |
626 | { |
627 | bitmap_clear(map: bitmap, start: pos, BIT(order)); |
628 | } |
629 | |
630 | /** |
631 | * bitmap_allocate_region - allocate bitmap region |
632 | * @bitmap: array of unsigned longs corresponding to the bitmap |
633 | * @pos: beginning of bit region to allocate |
634 | * @order: region size (log base 2 of number of bits) to allocate |
635 | * |
636 | * Allocate (set bits in) a specified region of a bitmap. |
637 | * |
638 | * Returns: 0 on success, or %-EBUSY if specified region wasn't |
639 | * free (not all bits were zero). |
640 | */ |
641 | static inline int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order) |
642 | { |
643 | unsigned int len = BIT(order); |
644 | |
645 | if (find_next_bit(addr: bitmap, size: pos + len, offset: pos) < pos + len) |
646 | return -EBUSY; |
647 | bitmap_set(map: bitmap, start: pos, nbits: len); |
648 | return 0; |
649 | } |
650 | |
651 | /** |
652 | * bitmap_find_free_region - find a contiguous aligned mem region |
653 | * @bitmap: array of unsigned longs corresponding to the bitmap |
654 | * @bits: number of bits in the bitmap |
655 | * @order: region size (log base 2 of number of bits) to find |
656 | * |
657 | * Find a region of free (zero) bits in a @bitmap of @bits bits and |
658 | * allocate them (set them to one). Only consider regions of length |
659 | * a power (@order) of two, aligned to that power of two, which |
660 | * makes the search algorithm much faster. |
661 | * |
662 | * Returns: the bit offset in bitmap of the allocated region, |
663 | * or -errno on failure. |
664 | */ |
665 | static inline int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order) |
666 | { |
667 | unsigned int pos, end; /* scans bitmap by regions of size order */ |
668 | |
669 | for (pos = 0; (end = pos + BIT(order)) <= bits; pos = end) { |
670 | if (!bitmap_allocate_region(bitmap, pos, order)) |
671 | return pos; |
672 | } |
673 | return -ENOMEM; |
674 | } |
675 | |
676 | /** |
677 | * BITMAP_FROM_U64() - Represent u64 value in the format suitable for bitmap. |
678 | * @n: u64 value |
679 | * |
680 | * Linux bitmaps are internally arrays of unsigned longs, i.e. 32-bit |
681 | * integers in 32-bit environment, and 64-bit integers in 64-bit one. |
682 | * |
683 | * There are four combinations of endianness and length of the word in linux |
684 | * ABIs: LE64, BE64, LE32 and BE32. |
685 | * |
686 | * On 64-bit kernels 64-bit LE and BE numbers are naturally ordered in |
687 | * bitmaps and therefore don't require any special handling. |
688 | * |
689 | * On 32-bit kernels 32-bit LE ABI orders lo word of 64-bit number in memory |
690 | * prior to hi, and 32-bit BE orders hi word prior to lo. The bitmap on the |
691 | * other hand is represented as an array of 32-bit words and the position of |
692 | * bit N may therefore be calculated as: word #(N/32) and bit #(N%32) in that |
693 | * word. For example, bit #42 is located at 10th position of 2nd word. |
694 | * It matches 32-bit LE ABI, and we can simply let the compiler store 64-bit |
695 | * values in memory as it usually does. But for BE we need to swap hi and lo |
696 | * words manually. |
697 | * |
698 | * With all that, the macro BITMAP_FROM_U64() does explicit reordering of hi and |
699 | * lo parts of u64. For LE32 it does nothing, and for BE environment it swaps |
700 | * hi and lo words, as is expected by bitmap. |
701 | */ |
702 | #if __BITS_PER_LONG == 64 |
703 | #define BITMAP_FROM_U64(n) (n) |
704 | #else |
705 | #define BITMAP_FROM_U64(n) ((unsigned long) ((u64)(n) & ULONG_MAX)), \ |
706 | ((unsigned long) ((u64)(n) >> 32)) |
707 | #endif |
708 | |
709 | /** |
710 | * bitmap_from_u64 - Check and swap words within u64. |
711 | * @mask: source bitmap |
712 | * @dst: destination bitmap |
713 | * |
714 | * In 32-bit Big Endian kernel, when using ``(u32 *)(&val)[*]`` |
715 | * to read u64 mask, we will get the wrong word. |
716 | * That is ``(u32 *)(&val)[0]`` gets the upper 32 bits, |
717 | * but we expect the lower 32-bits of u64. |
718 | */ |
719 | static inline void bitmap_from_u64(unsigned long *dst, u64 mask) |
720 | { |
721 | bitmap_from_arr64(dst, &mask, 64); |
722 | } |
723 | |
724 | /** |
725 | * bitmap_get_value8 - get an 8-bit value within a memory region |
726 | * @map: address to the bitmap memory region |
727 | * @start: bit offset of the 8-bit value; must be a multiple of 8 |
728 | * |
729 | * Returns the 8-bit value located at the @start bit offset within the @src |
730 | * memory region. |
731 | */ |
732 | static inline unsigned long bitmap_get_value8(const unsigned long *map, |
733 | unsigned long start) |
734 | { |
735 | const size_t index = BIT_WORD(start); |
736 | const unsigned long offset = start % BITS_PER_LONG; |
737 | |
738 | return (map[index] >> offset) & 0xFF; |
739 | } |
740 | |
741 | /** |
742 | * bitmap_set_value8 - set an 8-bit value within a memory region |
743 | * @map: address to the bitmap memory region |
744 | * @value: the 8-bit value; values wider than 8 bits may clobber bitmap |
745 | * @start: bit offset of the 8-bit value; must be a multiple of 8 |
746 | */ |
747 | static inline void bitmap_set_value8(unsigned long *map, unsigned long value, |
748 | unsigned long start) |
749 | { |
750 | const size_t index = BIT_WORD(start); |
751 | const unsigned long offset = start % BITS_PER_LONG; |
752 | |
753 | map[index] &= ~(0xFFUL << offset); |
754 | map[index] |= value << offset; |
755 | } |
756 | |
757 | #endif /* __ASSEMBLY__ */ |
758 | |
759 | #endif /* __LINUX_BITMAP_H */ |
760 | |