1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | #ifndef __LINUX_BITMAP_H |
3 | #define __LINUX_BITMAP_H |
4 | |
5 | #ifndef __ASSEMBLY__ |
6 | |
7 | #include <linux/align.h> |
8 | #include <linux/bitops.h> |
9 | #include <linux/find.h> |
10 | #include <linux/limits.h> |
11 | #include <linux/string.h> |
12 | #include <linux/types.h> |
13 | |
14 | struct device; |
15 | |
16 | /* |
17 | * bitmaps provide bit arrays that consume one or more unsigned |
18 | * longs. The bitmap interface and available operations are listed |
19 | * here, in bitmap.h |
20 | * |
21 | * Function implementations generic to all architectures are in |
22 | * lib/bitmap.c. Functions implementations that are architecture |
23 | * specific are in various include/asm-<arch>/bitops.h headers |
24 | * and other arch/<arch> specific files. |
25 | * |
26 | * See lib/bitmap.c for more details. |
27 | */ |
28 | |
29 | /** |
30 | * DOC: bitmap overview |
31 | * |
32 | * The available bitmap operations and their rough meaning in the |
33 | * case that the bitmap is a single unsigned long are thus: |
34 | * |
35 | * The generated code is more efficient when nbits is known at |
36 | * compile-time and at most BITS_PER_LONG. |
37 | * |
38 | * :: |
39 | * |
40 | * bitmap_zero(dst, nbits) *dst = 0UL |
41 | * bitmap_fill(dst, nbits) *dst = ~0UL |
42 | * bitmap_copy(dst, src, nbits) *dst = *src |
43 | * bitmap_and(dst, src1, src2, nbits) *dst = *src1 & *src2 |
44 | * bitmap_or(dst, src1, src2, nbits) *dst = *src1 | *src2 |
45 | * bitmap_xor(dst, src1, src2, nbits) *dst = *src1 ^ *src2 |
46 | * bitmap_andnot(dst, src1, src2, nbits) *dst = *src1 & ~(*src2) |
47 | * bitmap_complement(dst, src, nbits) *dst = ~(*src) |
48 | * bitmap_equal(src1, src2, nbits) Are *src1 and *src2 equal? |
49 | * bitmap_intersects(src1, src2, nbits) Do *src1 and *src2 overlap? |
50 | * bitmap_subset(src1, src2, nbits) Is *src1 a subset of *src2? |
51 | * bitmap_empty(src, nbits) Are all bits zero in *src? |
52 | * bitmap_full(src, nbits) Are all bits set in *src? |
53 | * bitmap_weight(src, nbits) Hamming Weight: number set bits |
54 | * bitmap_weight_and(src1, src2, nbits) Hamming Weight of and'ed bitmap |
55 | * bitmap_set(dst, pos, nbits) Set specified bit area |
56 | * bitmap_clear(dst, pos, nbits) Clear specified bit area |
57 | * bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area |
58 | * bitmap_find_next_zero_area_off(buf, len, pos, n, mask, mask_off) as above |
59 | * bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n |
60 | * bitmap_shift_left(dst, src, n, nbits) *dst = *src << n |
61 | * bitmap_cut(dst, src, first, n, nbits) Cut n bits from first, copy rest |
62 | * bitmap_replace(dst, old, new, mask, nbits) *dst = (*old & ~(*mask)) | (*new & *mask) |
63 | * bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src) |
64 | * bitmap_bitremap(oldbit, old, new, nbits) newbit = map(old, new)(oldbit) |
65 | * bitmap_onto(dst, orig, relmap, nbits) *dst = orig relative to relmap |
66 | * bitmap_fold(dst, orig, sz, nbits) dst bits = orig bits mod sz |
67 | * bitmap_parse(buf, buflen, dst, nbits) Parse bitmap dst from kernel buf |
68 | * bitmap_parse_user(ubuf, ulen, dst, nbits) Parse bitmap dst from user buf |
69 | * bitmap_parselist(buf, dst, nbits) Parse bitmap dst from kernel buf |
70 | * bitmap_parselist_user(buf, dst, nbits) Parse bitmap dst from user buf |
71 | * bitmap_find_free_region(bitmap, bits, order) Find and allocate bit region |
72 | * bitmap_release_region(bitmap, pos, order) Free specified bit region |
73 | * bitmap_allocate_region(bitmap, pos, order) Allocate specified bit region |
74 | * bitmap_from_arr32(dst, buf, nbits) Copy nbits from u32[] buf to dst |
75 | * bitmap_from_arr64(dst, buf, nbits) Copy nbits from u64[] buf to dst |
76 | * bitmap_to_arr32(buf, src, nbits) Copy nbits from buf to u32[] dst |
77 | * bitmap_to_arr64(buf, src, nbits) Copy nbits from buf to u64[] dst |
78 | * bitmap_get_value8(map, start) Get 8bit value from map at start |
79 | * bitmap_set_value8(map, value, start) Set 8bit value to map at start |
80 | * |
81 | * Note, bitmap_zero() and bitmap_fill() operate over the region of |
82 | * unsigned longs, that is, bits behind bitmap till the unsigned long |
83 | * boundary will be zeroed or filled as well. Consider to use |
84 | * bitmap_clear() or bitmap_set() to make explicit zeroing or filling |
85 | * respectively. |
86 | */ |
87 | |
88 | /** |
89 | * DOC: bitmap bitops |
90 | * |
91 | * Also the following operations in asm/bitops.h apply to bitmaps.:: |
92 | * |
93 | * set_bit(bit, addr) *addr |= bit |
94 | * clear_bit(bit, addr) *addr &= ~bit |
95 | * change_bit(bit, addr) *addr ^= bit |
96 | * test_bit(bit, addr) Is bit set in *addr? |
97 | * test_and_set_bit(bit, addr) Set bit and return old value |
98 | * test_and_clear_bit(bit, addr) Clear bit and return old value |
99 | * test_and_change_bit(bit, addr) Change bit and return old value |
100 | * find_first_zero_bit(addr, nbits) Position first zero bit in *addr |
101 | * find_first_bit(addr, nbits) Position first set bit in *addr |
102 | * find_next_zero_bit(addr, nbits, bit) |
103 | * Position next zero bit in *addr >= bit |
104 | * find_next_bit(addr, nbits, bit) Position next set bit in *addr >= bit |
105 | * find_next_and_bit(addr1, addr2, nbits, bit) |
106 | * Same as find_next_bit, but in |
107 | * (*addr1 & *addr2) |
108 | * |
109 | */ |
110 | |
111 | /** |
112 | * DOC: declare bitmap |
113 | * The DECLARE_BITMAP(name,bits) macro, in linux/types.h, can be used |
114 | * to declare an array named 'name' of just enough unsigned longs to |
115 | * contain all bit positions from 0 to 'bits' - 1. |
116 | */ |
117 | |
118 | /* |
119 | * Allocation and deallocation of bitmap. |
120 | * Provided in lib/bitmap.c to avoid circular dependency. |
121 | */ |
122 | unsigned long *bitmap_alloc(unsigned int nbits, gfp_t flags); |
123 | unsigned long *bitmap_zalloc(unsigned int nbits, gfp_t flags); |
124 | unsigned long *bitmap_alloc_node(unsigned int nbits, gfp_t flags, int node); |
125 | unsigned long *bitmap_zalloc_node(unsigned int nbits, gfp_t flags, int node); |
126 | void bitmap_free(const unsigned long *bitmap); |
127 | |
128 | /* Managed variants of the above. */ |
129 | unsigned long *devm_bitmap_alloc(struct device *dev, |
130 | unsigned int nbits, gfp_t flags); |
131 | unsigned long *devm_bitmap_zalloc(struct device *dev, |
132 | unsigned int nbits, gfp_t flags); |
133 | |
134 | /* |
135 | * lib/bitmap.c provides these functions: |
136 | */ |
137 | |
138 | bool __bitmap_equal(const unsigned long *bitmap1, |
139 | const unsigned long *bitmap2, unsigned int nbits); |
140 | bool __pure __bitmap_or_equal(const unsigned long *src1, |
141 | const unsigned long *src2, |
142 | const unsigned long *src3, |
143 | unsigned int nbits); |
144 | void __bitmap_complement(unsigned long *dst, const unsigned long *src, |
145 | unsigned int nbits); |
146 | void __bitmap_shift_right(unsigned long *dst, const unsigned long *src, |
147 | unsigned int shift, unsigned int nbits); |
148 | void __bitmap_shift_left(unsigned long *dst, const unsigned long *src, |
149 | unsigned int shift, unsigned int nbits); |
150 | void bitmap_cut(unsigned long *dst, const unsigned long *src, |
151 | unsigned int first, unsigned int cut, unsigned int nbits); |
152 | bool __bitmap_and(unsigned long *dst, const unsigned long *bitmap1, |
153 | const unsigned long *bitmap2, unsigned int nbits); |
154 | void __bitmap_or(unsigned long *dst, const unsigned long *bitmap1, |
155 | const unsigned long *bitmap2, unsigned int nbits); |
156 | void __bitmap_xor(unsigned long *dst, const unsigned long *bitmap1, |
157 | const unsigned long *bitmap2, unsigned int nbits); |
158 | bool __bitmap_andnot(unsigned long *dst, const unsigned long *bitmap1, |
159 | const unsigned long *bitmap2, unsigned int nbits); |
160 | void __bitmap_replace(unsigned long *dst, |
161 | const unsigned long *old, const unsigned long *new, |
162 | const unsigned long *mask, unsigned int nbits); |
163 | bool __bitmap_intersects(const unsigned long *bitmap1, |
164 | const unsigned long *bitmap2, unsigned int nbits); |
165 | bool __bitmap_subset(const unsigned long *bitmap1, |
166 | const unsigned long *bitmap2, unsigned int nbits); |
167 | unsigned int __bitmap_weight(const unsigned long *bitmap, unsigned int nbits); |
168 | unsigned int __bitmap_weight_and(const unsigned long *bitmap1, |
169 | const unsigned long *bitmap2, unsigned int nbits); |
170 | void __bitmap_set(unsigned long *map, unsigned int start, int len); |
171 | void __bitmap_clear(unsigned long *map, unsigned int start, int len); |
172 | |
173 | unsigned long bitmap_find_next_zero_area_off(unsigned long *map, |
174 | unsigned long size, |
175 | unsigned long start, |
176 | unsigned int nr, |
177 | unsigned long align_mask, |
178 | unsigned long align_offset); |
179 | |
180 | /** |
181 | * bitmap_find_next_zero_area - find a contiguous aligned zero area |
182 | * @map: The address to base the search on |
183 | * @size: The bitmap size in bits |
184 | * @start: The bitnumber to start searching at |
185 | * @nr: The number of zeroed bits we're looking for |
186 | * @align_mask: Alignment mask for zero area |
187 | * |
188 | * The @align_mask should be one less than a power of 2; the effect is that |
189 | * the bit offset of all zero areas this function finds is multiples of that |
190 | * power of 2. A @align_mask of 0 means no alignment is required. |
191 | */ |
192 | static inline unsigned long |
193 | bitmap_find_next_zero_area(unsigned long *map, |
194 | unsigned long size, |
195 | unsigned long start, |
196 | unsigned int nr, |
197 | unsigned long align_mask) |
198 | { |
199 | return bitmap_find_next_zero_area_off(map, size, start, nr, |
200 | align_mask, align_offset: 0); |
201 | } |
202 | |
203 | int bitmap_parse(const char *buf, unsigned int buflen, |
204 | unsigned long *dst, int nbits); |
205 | int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, |
206 | unsigned long *dst, int nbits); |
207 | int bitmap_parselist(const char *buf, unsigned long *maskp, |
208 | int nmaskbits); |
209 | int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen, |
210 | unsigned long *dst, int nbits); |
211 | void bitmap_remap(unsigned long *dst, const unsigned long *src, |
212 | const unsigned long *old, const unsigned long *new, unsigned int nbits); |
213 | int bitmap_bitremap(int oldbit, |
214 | const unsigned long *old, const unsigned long *new, int bits); |
215 | void bitmap_onto(unsigned long *dst, const unsigned long *orig, |
216 | const unsigned long *relmap, unsigned int bits); |
217 | void bitmap_fold(unsigned long *dst, const unsigned long *orig, |
218 | unsigned int sz, unsigned int nbits); |
219 | int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order); |
220 | void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order); |
221 | int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order); |
222 | |
223 | #ifdef __BIG_ENDIAN |
224 | void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits); |
225 | #else |
226 | #define bitmap_copy_le bitmap_copy |
227 | #endif |
228 | int bitmap_print_to_pagebuf(bool list, char *buf, |
229 | const unsigned long *maskp, int nmaskbits); |
230 | |
231 | extern int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp, |
232 | int nmaskbits, loff_t off, size_t count); |
233 | |
234 | extern int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp, |
235 | int nmaskbits, loff_t off, size_t count); |
236 | |
237 | #define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1))) |
238 | #define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1))) |
239 | |
240 | static inline void bitmap_zero(unsigned long *dst, unsigned int nbits) |
241 | { |
242 | unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); |
243 | |
244 | if (small_const_nbits(nbits)) |
245 | *dst = 0; |
246 | else |
247 | memset(s: dst, c: 0, n: len); |
248 | } |
249 | |
250 | static inline void bitmap_fill(unsigned long *dst, unsigned int nbits) |
251 | { |
252 | unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); |
253 | |
254 | if (small_const_nbits(nbits)) |
255 | *dst = ~0UL; |
256 | else |
257 | memset(s: dst, c: 0xff, n: len); |
258 | } |
259 | |
260 | static inline void bitmap_copy(unsigned long *dst, const unsigned long *src, |
261 | unsigned int nbits) |
262 | { |
263 | unsigned int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); |
264 | |
265 | if (small_const_nbits(nbits)) |
266 | *dst = *src; |
267 | else |
268 | memcpy(to: dst, from: src, len); |
269 | } |
270 | |
271 | /* |
272 | * Copy bitmap and clear tail bits in last word. |
273 | */ |
274 | static inline void bitmap_copy_clear_tail(unsigned long *dst, |
275 | const unsigned long *src, unsigned int nbits) |
276 | { |
277 | bitmap_copy(dst, src, nbits); |
278 | if (nbits % BITS_PER_LONG) |
279 | dst[nbits / BITS_PER_LONG] &= BITMAP_LAST_WORD_MASK(nbits); |
280 | } |
281 | |
282 | /* |
283 | * On 32-bit systems bitmaps are represented as u32 arrays internally. On LE64 |
284 | * machines the order of hi and lo parts of numbers match the bitmap structure. |
285 | * In both cases conversion is not needed when copying data from/to arrays of |
286 | * u32. But in LE64 case, typecast in bitmap_copy_clear_tail() may lead |
287 | * to out-of-bound access. To avoid that, both LE and BE variants of 64-bit |
288 | * architectures are not using bitmap_copy_clear_tail(). |
289 | */ |
290 | #if BITS_PER_LONG == 64 |
291 | void bitmap_from_arr32(unsigned long *bitmap, const u32 *buf, |
292 | unsigned int nbits); |
293 | void bitmap_to_arr32(u32 *buf, const unsigned long *bitmap, |
294 | unsigned int nbits); |
295 | #else |
296 | #define bitmap_from_arr32(bitmap, buf, nbits) \ |
297 | bitmap_copy_clear_tail((unsigned long *) (bitmap), \ |
298 | (const unsigned long *) (buf), (nbits)) |
299 | #define bitmap_to_arr32(buf, bitmap, nbits) \ |
300 | bitmap_copy_clear_tail((unsigned long *) (buf), \ |
301 | (const unsigned long *) (bitmap), (nbits)) |
302 | #endif |
303 | |
304 | /* |
305 | * On 64-bit systems bitmaps are represented as u64 arrays internally. On LE32 |
306 | * machines the order of hi and lo parts of numbers match the bitmap structure. |
307 | * In both cases conversion is not needed when copying data from/to arrays of |
308 | * u64. |
309 | */ |
310 | #if (BITS_PER_LONG == 32) && defined(__BIG_ENDIAN) |
311 | void bitmap_from_arr64(unsigned long *bitmap, const u64 *buf, unsigned int nbits); |
312 | void bitmap_to_arr64(u64 *buf, const unsigned long *bitmap, unsigned int nbits); |
313 | #else |
314 | #define bitmap_from_arr64(bitmap, buf, nbits) \ |
315 | bitmap_copy_clear_tail((unsigned long *)(bitmap), (const unsigned long *)(buf), (nbits)) |
316 | #define bitmap_to_arr64(buf, bitmap, nbits) \ |
317 | bitmap_copy_clear_tail((unsigned long *)(buf), (const unsigned long *)(bitmap), (nbits)) |
318 | #endif |
319 | |
320 | static inline bool bitmap_and(unsigned long *dst, const unsigned long *src1, |
321 | const unsigned long *src2, unsigned int nbits) |
322 | { |
323 | if (small_const_nbits(nbits)) |
324 | return (*dst = *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)) != 0; |
325 | return __bitmap_and(dst, bitmap1: src1, bitmap2: src2, nbits); |
326 | } |
327 | |
328 | static inline void bitmap_or(unsigned long *dst, const unsigned long *src1, |
329 | const unsigned long *src2, unsigned int nbits) |
330 | { |
331 | if (small_const_nbits(nbits)) |
332 | *dst = *src1 | *src2; |
333 | else |
334 | __bitmap_or(dst, bitmap1: src1, bitmap2: src2, nbits); |
335 | } |
336 | |
337 | static inline void bitmap_xor(unsigned long *dst, const unsigned long *src1, |
338 | const unsigned long *src2, unsigned int nbits) |
339 | { |
340 | if (small_const_nbits(nbits)) |
341 | *dst = *src1 ^ *src2; |
342 | else |
343 | __bitmap_xor(dst, bitmap1: src1, bitmap2: src2, nbits); |
344 | } |
345 | |
346 | static inline bool bitmap_andnot(unsigned long *dst, const unsigned long *src1, |
347 | const unsigned long *src2, unsigned int nbits) |
348 | { |
349 | if (small_const_nbits(nbits)) |
350 | return (*dst = *src1 & ~(*src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0; |
351 | return __bitmap_andnot(dst, bitmap1: src1, bitmap2: src2, nbits); |
352 | } |
353 | |
354 | static inline void bitmap_complement(unsigned long *dst, const unsigned long *src, |
355 | unsigned int nbits) |
356 | { |
357 | if (small_const_nbits(nbits)) |
358 | *dst = ~(*src); |
359 | else |
360 | __bitmap_complement(dst, src, nbits); |
361 | } |
362 | |
363 | #ifdef __LITTLE_ENDIAN |
364 | #define BITMAP_MEM_ALIGNMENT 8 |
365 | #else |
366 | #define BITMAP_MEM_ALIGNMENT (8 * sizeof(unsigned long)) |
367 | #endif |
368 | #define BITMAP_MEM_MASK (BITMAP_MEM_ALIGNMENT - 1) |
369 | |
370 | static inline bool bitmap_equal(const unsigned long *src1, |
371 | const unsigned long *src2, unsigned int nbits) |
372 | { |
373 | if (small_const_nbits(nbits)) |
374 | return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits)); |
375 | if (__builtin_constant_p(nbits & BITMAP_MEM_MASK) && |
376 | IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT)) |
377 | return !memcmp(src1, src2, nbits / 8); |
378 | return __bitmap_equal(bitmap1: src1, bitmap2: src2, nbits); |
379 | } |
380 | |
381 | /** |
382 | * bitmap_or_equal - Check whether the or of two bitmaps is equal to a third |
383 | * @src1: Pointer to bitmap 1 |
384 | * @src2: Pointer to bitmap 2 will be or'ed with bitmap 1 |
385 | * @src3: Pointer to bitmap 3. Compare to the result of *@src1 | *@src2 |
386 | * @nbits: number of bits in each of these bitmaps |
387 | * |
388 | * Returns: True if (*@src1 | *@src2) == *@src3, false otherwise |
389 | */ |
390 | static inline bool bitmap_or_equal(const unsigned long *src1, |
391 | const unsigned long *src2, |
392 | const unsigned long *src3, |
393 | unsigned int nbits) |
394 | { |
395 | if (!small_const_nbits(nbits)) |
396 | return __bitmap_or_equal(src1, src2, src3, nbits); |
397 | |
398 | return !(((*src1 | *src2) ^ *src3) & BITMAP_LAST_WORD_MASK(nbits)); |
399 | } |
400 | |
401 | static inline bool bitmap_intersects(const unsigned long *src1, |
402 | const unsigned long *src2, |
403 | unsigned int nbits) |
404 | { |
405 | if (small_const_nbits(nbits)) |
406 | return ((*src1 & *src2) & BITMAP_LAST_WORD_MASK(nbits)) != 0; |
407 | else |
408 | return __bitmap_intersects(bitmap1: src1, bitmap2: src2, nbits); |
409 | } |
410 | |
411 | static inline bool bitmap_subset(const unsigned long *src1, |
412 | const unsigned long *src2, unsigned int nbits) |
413 | { |
414 | if (small_const_nbits(nbits)) |
415 | return ! ((*src1 & ~(*src2)) & BITMAP_LAST_WORD_MASK(nbits)); |
416 | else |
417 | return __bitmap_subset(bitmap1: src1, bitmap2: src2, nbits); |
418 | } |
419 | |
420 | static inline bool bitmap_empty(const unsigned long *src, unsigned nbits) |
421 | { |
422 | if (small_const_nbits(nbits)) |
423 | return ! (*src & BITMAP_LAST_WORD_MASK(nbits)); |
424 | |
425 | return find_first_bit(addr: src, size: nbits) == nbits; |
426 | } |
427 | |
428 | static inline bool bitmap_full(const unsigned long *src, unsigned int nbits) |
429 | { |
430 | if (small_const_nbits(nbits)) |
431 | return ! (~(*src) & BITMAP_LAST_WORD_MASK(nbits)); |
432 | |
433 | return find_first_zero_bit(addr: src, size: nbits) == nbits; |
434 | } |
435 | |
436 | static __always_inline |
437 | unsigned int bitmap_weight(const unsigned long *src, unsigned int nbits) |
438 | { |
439 | if (small_const_nbits(nbits)) |
440 | return hweight_long(w: *src & BITMAP_LAST_WORD_MASK(nbits)); |
441 | return __bitmap_weight(bitmap: src, nbits); |
442 | } |
443 | |
444 | static __always_inline |
445 | unsigned long bitmap_weight_and(const unsigned long *src1, |
446 | const unsigned long *src2, unsigned int nbits) |
447 | { |
448 | if (small_const_nbits(nbits)) |
449 | return hweight_long(w: *src1 & *src2 & BITMAP_LAST_WORD_MASK(nbits)); |
450 | return __bitmap_weight_and(bitmap1: src1, bitmap2: src2, nbits); |
451 | } |
452 | |
453 | static __always_inline void bitmap_set(unsigned long *map, unsigned int start, |
454 | unsigned int nbits) |
455 | { |
456 | if (__builtin_constant_p(nbits) && nbits == 1) |
457 | __set_bit(start, map); |
458 | else if (small_const_nbits(start + nbits)) |
459 | *map |= GENMASK(start + nbits - 1, start); |
460 | else if (__builtin_constant_p(start & BITMAP_MEM_MASK) && |
461 | IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) && |
462 | __builtin_constant_p(nbits & BITMAP_MEM_MASK) && |
463 | IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT)) |
464 | memset(s: (char *)map + start / 8, c: 0xff, n: nbits / 8); |
465 | else |
466 | __bitmap_set(map, start, len: nbits); |
467 | } |
468 | |
469 | static __always_inline void bitmap_clear(unsigned long *map, unsigned int start, |
470 | unsigned int nbits) |
471 | { |
472 | if (__builtin_constant_p(nbits) && nbits == 1) |
473 | __clear_bit(start, map); |
474 | else if (small_const_nbits(start + nbits)) |
475 | *map &= ~GENMASK(start + nbits - 1, start); |
476 | else if (__builtin_constant_p(start & BITMAP_MEM_MASK) && |
477 | IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) && |
478 | __builtin_constant_p(nbits & BITMAP_MEM_MASK) && |
479 | IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT)) |
480 | memset(s: (char *)map + start / 8, c: 0, n: nbits / 8); |
481 | else |
482 | __bitmap_clear(map, start, len: nbits); |
483 | } |
484 | |
485 | static inline void bitmap_shift_right(unsigned long *dst, const unsigned long *src, |
486 | unsigned int shift, unsigned int nbits) |
487 | { |
488 | if (small_const_nbits(nbits)) |
489 | *dst = (*src & BITMAP_LAST_WORD_MASK(nbits)) >> shift; |
490 | else |
491 | __bitmap_shift_right(dst, src, shift, nbits); |
492 | } |
493 | |
494 | static inline void bitmap_shift_left(unsigned long *dst, const unsigned long *src, |
495 | unsigned int shift, unsigned int nbits) |
496 | { |
497 | if (small_const_nbits(nbits)) |
498 | *dst = (*src << shift) & BITMAP_LAST_WORD_MASK(nbits); |
499 | else |
500 | __bitmap_shift_left(dst, src, shift, nbits); |
501 | } |
502 | |
503 | static inline void bitmap_replace(unsigned long *dst, |
504 | const unsigned long *old, |
505 | const unsigned long *new, |
506 | const unsigned long *mask, |
507 | unsigned int nbits) |
508 | { |
509 | if (small_const_nbits(nbits)) |
510 | *dst = (*old & ~(*mask)) | (*new & *mask); |
511 | else |
512 | __bitmap_replace(dst, old, new, mask, nbits); |
513 | } |
514 | |
515 | static inline void bitmap_next_set_region(unsigned long *bitmap, |
516 | unsigned int *rs, unsigned int *re, |
517 | unsigned int end) |
518 | { |
519 | *rs = find_next_bit(addr: bitmap, size: end, offset: *rs); |
520 | *re = find_next_zero_bit(addr: bitmap, size: end, offset: *rs + 1); |
521 | } |
522 | |
523 | /** |
524 | * BITMAP_FROM_U64() - Represent u64 value in the format suitable for bitmap. |
525 | * @n: u64 value |
526 | * |
527 | * Linux bitmaps are internally arrays of unsigned longs, i.e. 32-bit |
528 | * integers in 32-bit environment, and 64-bit integers in 64-bit one. |
529 | * |
530 | * There are four combinations of endianness and length of the word in linux |
531 | * ABIs: LE64, BE64, LE32 and BE32. |
532 | * |
533 | * On 64-bit kernels 64-bit LE and BE numbers are naturally ordered in |
534 | * bitmaps and therefore don't require any special handling. |
535 | * |
536 | * On 32-bit kernels 32-bit LE ABI orders lo word of 64-bit number in memory |
537 | * prior to hi, and 32-bit BE orders hi word prior to lo. The bitmap on the |
538 | * other hand is represented as an array of 32-bit words and the position of |
539 | * bit N may therefore be calculated as: word #(N/32) and bit #(N%32) in that |
540 | * word. For example, bit #42 is located at 10th position of 2nd word. |
541 | * It matches 32-bit LE ABI, and we can simply let the compiler store 64-bit |
542 | * values in memory as it usually does. But for BE we need to swap hi and lo |
543 | * words manually. |
544 | * |
545 | * With all that, the macro BITMAP_FROM_U64() does explicit reordering of hi and |
546 | * lo parts of u64. For LE32 it does nothing, and for BE environment it swaps |
547 | * hi and lo words, as is expected by bitmap. |
548 | */ |
549 | #if __BITS_PER_LONG == 64 |
550 | #define BITMAP_FROM_U64(n) (n) |
551 | #else |
552 | #define BITMAP_FROM_U64(n) ((unsigned long) ((u64)(n) & ULONG_MAX)), \ |
553 | ((unsigned long) ((u64)(n) >> 32)) |
554 | #endif |
555 | |
556 | /** |
557 | * bitmap_from_u64 - Check and swap words within u64. |
558 | * @mask: source bitmap |
559 | * @dst: destination bitmap |
560 | * |
561 | * In 32-bit Big Endian kernel, when using ``(u32 *)(&val)[*]`` |
562 | * to read u64 mask, we will get the wrong word. |
563 | * That is ``(u32 *)(&val)[0]`` gets the upper 32 bits, |
564 | * but we expect the lower 32-bits of u64. |
565 | */ |
566 | static inline void bitmap_from_u64(unsigned long *dst, u64 mask) |
567 | { |
568 | bitmap_from_arr64(dst, &mask, 64); |
569 | } |
570 | |
571 | /** |
572 | * bitmap_get_value8 - get an 8-bit value within a memory region |
573 | * @map: address to the bitmap memory region |
574 | * @start: bit offset of the 8-bit value; must be a multiple of 8 |
575 | * |
576 | * Returns the 8-bit value located at the @start bit offset within the @src |
577 | * memory region. |
578 | */ |
579 | static inline unsigned long bitmap_get_value8(const unsigned long *map, |
580 | unsigned long start) |
581 | { |
582 | const size_t index = BIT_WORD(start); |
583 | const unsigned long offset = start % BITS_PER_LONG; |
584 | |
585 | return (map[index] >> offset) & 0xFF; |
586 | } |
587 | |
588 | /** |
589 | * bitmap_set_value8 - set an 8-bit value within a memory region |
590 | * @map: address to the bitmap memory region |
591 | * @value: the 8-bit value; values wider than 8 bits may clobber bitmap |
592 | * @start: bit offset of the 8-bit value; must be a multiple of 8 |
593 | */ |
594 | static inline void bitmap_set_value8(unsigned long *map, unsigned long value, |
595 | unsigned long start) |
596 | { |
597 | const size_t index = BIT_WORD(start); |
598 | const unsigned long offset = start % BITS_PER_LONG; |
599 | |
600 | map[index] &= ~(0xFFUL << offset); |
601 | map[index] |= value << offset; |
602 | } |
603 | |
604 | #endif /* __ASSEMBLY__ */ |
605 | |
606 | #endif /* __LINUX_BITMAP_H */ |
607 | |