1// SPDX-License-Identifier: MIT
2/*
3 * Copyright © 2021 Intel Corporation
4 */
5
6#include <linux/kmemleak.h>
7#include <linux/module.h>
8#include <linux/sizes.h>
9
10#include <drm/drm_buddy.h>
11
12static struct kmem_cache *slab_blocks;
13
14static struct drm_buddy_block *drm_block_alloc(struct drm_buddy *mm,
15 struct drm_buddy_block *parent,
16 unsigned int order,
17 u64 offset)
18{
19 struct drm_buddy_block *block;
20
21 BUG_ON(order > DRM_BUDDY_MAX_ORDER);
22
23 block = kmem_cache_zalloc(k: slab_blocks, GFP_KERNEL);
24 if (!block)
25 return NULL;
26
27 block->header = offset;
28 block->header |= order;
29 block->parent = parent;
30
31 BUG_ON(block->header & DRM_BUDDY_HEADER_UNUSED);
32 return block;
33}
34
35static void drm_block_free(struct drm_buddy *mm,
36 struct drm_buddy_block *block)
37{
38 kmem_cache_free(s: slab_blocks, objp: block);
39}
40
41static void list_insert_sorted(struct drm_buddy *mm,
42 struct drm_buddy_block *block)
43{
44 struct drm_buddy_block *node;
45 struct list_head *head;
46
47 head = &mm->free_list[drm_buddy_block_order(block)];
48 if (list_empty(head)) {
49 list_add(new: &block->link, head);
50 return;
51 }
52
53 list_for_each_entry(node, head, link)
54 if (drm_buddy_block_offset(block) < drm_buddy_block_offset(block: node))
55 break;
56
57 __list_add(new: &block->link, prev: node->link.prev, next: &node->link);
58}
59
60static void mark_allocated(struct drm_buddy_block *block)
61{
62 block->header &= ~DRM_BUDDY_HEADER_STATE;
63 block->header |= DRM_BUDDY_ALLOCATED;
64
65 list_del(entry: &block->link);
66}
67
68static void mark_free(struct drm_buddy *mm,
69 struct drm_buddy_block *block)
70{
71 block->header &= ~DRM_BUDDY_HEADER_STATE;
72 block->header |= DRM_BUDDY_FREE;
73
74 list_insert_sorted(mm, block);
75}
76
77static void mark_split(struct drm_buddy_block *block)
78{
79 block->header &= ~DRM_BUDDY_HEADER_STATE;
80 block->header |= DRM_BUDDY_SPLIT;
81
82 list_del(entry: &block->link);
83}
84
85/**
86 * drm_buddy_init - init memory manager
87 *
88 * @mm: DRM buddy manager to initialize
89 * @size: size in bytes to manage
90 * @chunk_size: minimum page size in bytes for our allocations
91 *
92 * Initializes the memory manager and its resources.
93 *
94 * Returns:
95 * 0 on success, error code on failure.
96 */
97int drm_buddy_init(struct drm_buddy *mm, u64 size, u64 chunk_size)
98{
99 unsigned int i;
100 u64 offset;
101
102 if (size < chunk_size)
103 return -EINVAL;
104
105 if (chunk_size < PAGE_SIZE)
106 return -EINVAL;
107
108 if (!is_power_of_2(n: chunk_size))
109 return -EINVAL;
110
111 size = round_down(size, chunk_size);
112
113 mm->size = size;
114 mm->avail = size;
115 mm->chunk_size = chunk_size;
116 mm->max_order = ilog2(size) - ilog2(chunk_size);
117
118 BUG_ON(mm->max_order > DRM_BUDDY_MAX_ORDER);
119
120 mm->free_list = kmalloc_array(n: mm->max_order + 1,
121 size: sizeof(struct list_head),
122 GFP_KERNEL);
123 if (!mm->free_list)
124 return -ENOMEM;
125
126 for (i = 0; i <= mm->max_order; ++i)
127 INIT_LIST_HEAD(list: &mm->free_list[i]);
128
129 mm->n_roots = hweight64(size);
130
131 mm->roots = kmalloc_array(n: mm->n_roots,
132 size: sizeof(struct drm_buddy_block *),
133 GFP_KERNEL);
134 if (!mm->roots)
135 goto out_free_list;
136
137 offset = 0;
138 i = 0;
139
140 /*
141 * Split into power-of-two blocks, in case we are given a size that is
142 * not itself a power-of-two.
143 */
144 do {
145 struct drm_buddy_block *root;
146 unsigned int order;
147 u64 root_size;
148
149 order = ilog2(size) - ilog2(chunk_size);
150 root_size = chunk_size << order;
151
152 root = drm_block_alloc(mm, NULL, order, offset);
153 if (!root)
154 goto out_free_roots;
155
156 mark_free(mm, block: root);
157
158 BUG_ON(i > mm->max_order);
159 BUG_ON(drm_buddy_block_size(mm, root) < chunk_size);
160
161 mm->roots[i] = root;
162
163 offset += root_size;
164 size -= root_size;
165 i++;
166 } while (size);
167
168 return 0;
169
170out_free_roots:
171 while (i--)
172 drm_block_free(mm, block: mm->roots[i]);
173 kfree(objp: mm->roots);
174out_free_list:
175 kfree(objp: mm->free_list);
176 return -ENOMEM;
177}
178EXPORT_SYMBOL(drm_buddy_init);
179
180/**
181 * drm_buddy_fini - tear down the memory manager
182 *
183 * @mm: DRM buddy manager to free
184 *
185 * Cleanup memory manager resources and the freelist
186 */
187void drm_buddy_fini(struct drm_buddy *mm)
188{
189 int i;
190
191 for (i = 0; i < mm->n_roots; ++i) {
192 WARN_ON(!drm_buddy_block_is_free(mm->roots[i]));
193 drm_block_free(mm, block: mm->roots[i]);
194 }
195
196 WARN_ON(mm->avail != mm->size);
197
198 kfree(objp: mm->roots);
199 kfree(objp: mm->free_list);
200}
201EXPORT_SYMBOL(drm_buddy_fini);
202
203static int split_block(struct drm_buddy *mm,
204 struct drm_buddy_block *block)
205{
206 unsigned int block_order = drm_buddy_block_order(block) - 1;
207 u64 offset = drm_buddy_block_offset(block);
208
209 BUG_ON(!drm_buddy_block_is_free(block));
210 BUG_ON(!drm_buddy_block_order(block));
211
212 block->left = drm_block_alloc(mm, parent: block, order: block_order, offset);
213 if (!block->left)
214 return -ENOMEM;
215
216 block->right = drm_block_alloc(mm, parent: block, order: block_order,
217 offset: offset + (mm->chunk_size << block_order));
218 if (!block->right) {
219 drm_block_free(mm, block: block->left);
220 return -ENOMEM;
221 }
222
223 mark_free(mm, block: block->left);
224 mark_free(mm, block: block->right);
225
226 mark_split(block);
227
228 return 0;
229}
230
231static struct drm_buddy_block *
232__get_buddy(struct drm_buddy_block *block)
233{
234 struct drm_buddy_block *parent;
235
236 parent = block->parent;
237 if (!parent)
238 return NULL;
239
240 if (parent->left == block)
241 return parent->right;
242
243 return parent->left;
244}
245
246/**
247 * drm_get_buddy - get buddy address
248 *
249 * @block: DRM buddy block
250 *
251 * Returns the corresponding buddy block for @block, or NULL
252 * if this is a root block and can't be merged further.
253 * Requires some kind of locking to protect against
254 * any concurrent allocate and free operations.
255 */
256struct drm_buddy_block *
257drm_get_buddy(struct drm_buddy_block *block)
258{
259 return __get_buddy(block);
260}
261EXPORT_SYMBOL(drm_get_buddy);
262
263static void __drm_buddy_free(struct drm_buddy *mm,
264 struct drm_buddy_block *block)
265{
266 struct drm_buddy_block *parent;
267
268 while ((parent = block->parent)) {
269 struct drm_buddy_block *buddy;
270
271 buddy = __get_buddy(block);
272
273 if (!drm_buddy_block_is_free(block: buddy))
274 break;
275
276 list_del(entry: &buddy->link);
277
278 drm_block_free(mm, block);
279 drm_block_free(mm, block: buddy);
280
281 block = parent;
282 }
283
284 mark_free(mm, block);
285}
286
287/**
288 * drm_buddy_free_block - free a block
289 *
290 * @mm: DRM buddy manager
291 * @block: block to be freed
292 */
293void drm_buddy_free_block(struct drm_buddy *mm,
294 struct drm_buddy_block *block)
295{
296 BUG_ON(!drm_buddy_block_is_allocated(block));
297 mm->avail += drm_buddy_block_size(mm, block);
298 __drm_buddy_free(mm, block);
299}
300EXPORT_SYMBOL(drm_buddy_free_block);
301
302/**
303 * drm_buddy_free_list - free blocks
304 *
305 * @mm: DRM buddy manager
306 * @objects: input list head to free blocks
307 */
308void drm_buddy_free_list(struct drm_buddy *mm, struct list_head *objects)
309{
310 struct drm_buddy_block *block, *on;
311
312 list_for_each_entry_safe(block, on, objects, link) {
313 drm_buddy_free_block(mm, block);
314 cond_resched();
315 }
316 INIT_LIST_HEAD(list: objects);
317}
318EXPORT_SYMBOL(drm_buddy_free_list);
319
320static inline bool overlaps(u64 s1, u64 e1, u64 s2, u64 e2)
321{
322 return s1 <= e2 && e1 >= s2;
323}
324
325static inline bool contains(u64 s1, u64 e1, u64 s2, u64 e2)
326{
327 return s1 <= s2 && e1 >= e2;
328}
329
330static struct drm_buddy_block *
331alloc_range_bias(struct drm_buddy *mm,
332 u64 start, u64 end,
333 unsigned int order)
334{
335 struct drm_buddy_block *block;
336 struct drm_buddy_block *buddy;
337 LIST_HEAD(dfs);
338 int err;
339 int i;
340
341 end = end - 1;
342
343 for (i = 0; i < mm->n_roots; ++i)
344 list_add_tail(new: &mm->roots[i]->tmp_link, head: &dfs);
345
346 do {
347 u64 block_start;
348 u64 block_end;
349
350 block = list_first_entry_or_null(&dfs,
351 struct drm_buddy_block,
352 tmp_link);
353 if (!block)
354 break;
355
356 list_del(entry: &block->tmp_link);
357
358 if (drm_buddy_block_order(block) < order)
359 continue;
360
361 block_start = drm_buddy_block_offset(block);
362 block_end = block_start + drm_buddy_block_size(mm, block) - 1;
363
364 if (!overlaps(s1: start, e1: end, s2: block_start, e2: block_end))
365 continue;
366
367 if (drm_buddy_block_is_allocated(block))
368 continue;
369
370 if (contains(s1: start, e1: end, s2: block_start, e2: block_end) &&
371 order == drm_buddy_block_order(block)) {
372 /*
373 * Find the free block within the range.
374 */
375 if (drm_buddy_block_is_free(block))
376 return block;
377
378 continue;
379 }
380
381 if (!drm_buddy_block_is_split(block)) {
382 err = split_block(mm, block);
383 if (unlikely(err))
384 goto err_undo;
385 }
386
387 list_add(new: &block->right->tmp_link, head: &dfs);
388 list_add(new: &block->left->tmp_link, head: &dfs);
389 } while (1);
390
391 return ERR_PTR(error: -ENOSPC);
392
393err_undo:
394 /*
395 * We really don't want to leave around a bunch of split blocks, since
396 * bigger is better, so make sure we merge everything back before we
397 * free the allocated blocks.
398 */
399 buddy = __get_buddy(block);
400 if (buddy &&
401 (drm_buddy_block_is_free(block) &&
402 drm_buddy_block_is_free(block: buddy)))
403 __drm_buddy_free(mm, block);
404 return ERR_PTR(error: err);
405}
406
407static struct drm_buddy_block *
408get_maxblock(struct drm_buddy *mm, unsigned int order)
409{
410 struct drm_buddy_block *max_block = NULL, *node;
411 unsigned int i;
412
413 for (i = order; i <= mm->max_order; ++i) {
414 if (!list_empty(head: &mm->free_list[i])) {
415 node = list_last_entry(&mm->free_list[i],
416 struct drm_buddy_block,
417 link);
418 if (!max_block) {
419 max_block = node;
420 continue;
421 }
422
423 if (drm_buddy_block_offset(block: node) >
424 drm_buddy_block_offset(block: max_block)) {
425 max_block = node;
426 }
427 }
428 }
429
430 return max_block;
431}
432
433static struct drm_buddy_block *
434alloc_from_freelist(struct drm_buddy *mm,
435 unsigned int order,
436 unsigned long flags)
437{
438 struct drm_buddy_block *block = NULL;
439 unsigned int tmp;
440 int err;
441
442 if (flags & DRM_BUDDY_TOPDOWN_ALLOCATION) {
443 block = get_maxblock(mm, order);
444 if (block)
445 /* Store the obtained block order */
446 tmp = drm_buddy_block_order(block);
447 } else {
448 for (tmp = order; tmp <= mm->max_order; ++tmp) {
449 if (!list_empty(head: &mm->free_list[tmp])) {
450 block = list_last_entry(&mm->free_list[tmp],
451 struct drm_buddy_block,
452 link);
453 if (block)
454 break;
455 }
456 }
457 }
458
459 if (!block)
460 return ERR_PTR(error: -ENOSPC);
461
462 BUG_ON(!drm_buddy_block_is_free(block));
463
464 while (tmp != order) {
465 err = split_block(mm, block);
466 if (unlikely(err))
467 goto err_undo;
468
469 block = block->right;
470 tmp--;
471 }
472 return block;
473
474err_undo:
475 if (tmp != order)
476 __drm_buddy_free(mm, block);
477 return ERR_PTR(error: err);
478}
479
480static int __alloc_range(struct drm_buddy *mm,
481 struct list_head *dfs,
482 u64 start, u64 size,
483 struct list_head *blocks,
484 u64 *total_allocated_on_err)
485{
486 struct drm_buddy_block *block;
487 struct drm_buddy_block *buddy;
488 u64 total_allocated = 0;
489 LIST_HEAD(allocated);
490 u64 end;
491 int err;
492
493 end = start + size - 1;
494
495 do {
496 u64 block_start;
497 u64 block_end;
498
499 block = list_first_entry_or_null(dfs,
500 struct drm_buddy_block,
501 tmp_link);
502 if (!block)
503 break;
504
505 list_del(entry: &block->tmp_link);
506
507 block_start = drm_buddy_block_offset(block);
508 block_end = block_start + drm_buddy_block_size(mm, block) - 1;
509
510 if (!overlaps(s1: start, e1: end, s2: block_start, e2: block_end))
511 continue;
512
513 if (drm_buddy_block_is_allocated(block)) {
514 err = -ENOSPC;
515 goto err_free;
516 }
517
518 if (contains(s1: start, e1: end, s2: block_start, e2: block_end)) {
519 if (!drm_buddy_block_is_free(block)) {
520 err = -ENOSPC;
521 goto err_free;
522 }
523
524 mark_allocated(block);
525 total_allocated += drm_buddy_block_size(mm, block);
526 mm->avail -= drm_buddy_block_size(mm, block);
527 list_add_tail(new: &block->link, head: &allocated);
528 continue;
529 }
530
531 if (!drm_buddy_block_is_split(block)) {
532 err = split_block(mm, block);
533 if (unlikely(err))
534 goto err_undo;
535 }
536
537 list_add(new: &block->right->tmp_link, head: dfs);
538 list_add(new: &block->left->tmp_link, head: dfs);
539 } while (1);
540
541 list_splice_tail(list: &allocated, head: blocks);
542 return 0;
543
544err_undo:
545 /*
546 * We really don't want to leave around a bunch of split blocks, since
547 * bigger is better, so make sure we merge everything back before we
548 * free the allocated blocks.
549 */
550 buddy = __get_buddy(block);
551 if (buddy &&
552 (drm_buddy_block_is_free(block) &&
553 drm_buddy_block_is_free(block: buddy)))
554 __drm_buddy_free(mm, block);
555
556err_free:
557 if (err == -ENOSPC && total_allocated_on_err) {
558 list_splice_tail(list: &allocated, head: blocks);
559 *total_allocated_on_err = total_allocated;
560 } else {
561 drm_buddy_free_list(mm, &allocated);
562 }
563
564 return err;
565}
566
567static int __drm_buddy_alloc_range(struct drm_buddy *mm,
568 u64 start,
569 u64 size,
570 u64 *total_allocated_on_err,
571 struct list_head *blocks)
572{
573 LIST_HEAD(dfs);
574 int i;
575
576 for (i = 0; i < mm->n_roots; ++i)
577 list_add_tail(new: &mm->roots[i]->tmp_link, head: &dfs);
578
579 return __alloc_range(mm, dfs: &dfs, start, size,
580 blocks, total_allocated_on_err);
581}
582
583static int __alloc_contig_try_harder(struct drm_buddy *mm,
584 u64 size,
585 u64 min_block_size,
586 struct list_head *blocks)
587{
588 u64 rhs_offset, lhs_offset, lhs_size, filled;
589 struct drm_buddy_block *block;
590 struct list_head *list;
591 LIST_HEAD(blocks_lhs);
592 unsigned long pages;
593 unsigned int order;
594 u64 modify_size;
595 int err;
596
597 modify_size = rounddown_pow_of_two(size);
598 pages = modify_size >> ilog2(mm->chunk_size);
599 order = fls(x: pages) - 1;
600 if (order == 0)
601 return -ENOSPC;
602
603 list = &mm->free_list[order];
604 if (list_empty(head: list))
605 return -ENOSPC;
606
607 list_for_each_entry_reverse(block, list, link) {
608 /* Allocate blocks traversing RHS */
609 rhs_offset = drm_buddy_block_offset(block);
610 err = __drm_buddy_alloc_range(mm, start: rhs_offset, size,
611 total_allocated_on_err: &filled, blocks);
612 if (!err || err != -ENOSPC)
613 return err;
614
615 lhs_size = max((size - filled), min_block_size);
616 if (!IS_ALIGNED(lhs_size, min_block_size))
617 lhs_size = round_up(lhs_size, min_block_size);
618
619 /* Allocate blocks traversing LHS */
620 lhs_offset = drm_buddy_block_offset(block) - lhs_size;
621 err = __drm_buddy_alloc_range(mm, start: lhs_offset, size: lhs_size,
622 NULL, blocks: &blocks_lhs);
623 if (!err) {
624 list_splice(list: &blocks_lhs, head: blocks);
625 return 0;
626 } else if (err != -ENOSPC) {
627 drm_buddy_free_list(mm, blocks);
628 return err;
629 }
630 /* Free blocks for the next iteration */
631 drm_buddy_free_list(mm, blocks);
632 }
633
634 return -ENOSPC;
635}
636
637/**
638 * drm_buddy_block_trim - free unused pages
639 *
640 * @mm: DRM buddy manager
641 * @new_size: original size requested
642 * @blocks: Input and output list of allocated blocks.
643 * MUST contain single block as input to be trimmed.
644 * On success will contain the newly allocated blocks
645 * making up the @new_size. Blocks always appear in
646 * ascending order
647 *
648 * For contiguous allocation, we round up the size to the nearest
649 * power of two value, drivers consume *actual* size, so remaining
650 * portions are unused and can be optionally freed with this function
651 *
652 * Returns:
653 * 0 on success, error code on failure.
654 */
655int drm_buddy_block_trim(struct drm_buddy *mm,
656 u64 new_size,
657 struct list_head *blocks)
658{
659 struct drm_buddy_block *parent;
660 struct drm_buddy_block *block;
661 LIST_HEAD(dfs);
662 u64 new_start;
663 int err;
664
665 if (!list_is_singular(head: blocks))
666 return -EINVAL;
667
668 block = list_first_entry(blocks,
669 struct drm_buddy_block,
670 link);
671
672 if (WARN_ON(!drm_buddy_block_is_allocated(block)))
673 return -EINVAL;
674
675 if (new_size > drm_buddy_block_size(mm, block))
676 return -EINVAL;
677
678 if (!new_size || !IS_ALIGNED(new_size, mm->chunk_size))
679 return -EINVAL;
680
681 if (new_size == drm_buddy_block_size(mm, block))
682 return 0;
683
684 list_del(entry: &block->link);
685 mark_free(mm, block);
686 mm->avail += drm_buddy_block_size(mm, block);
687
688 /* Prevent recursively freeing this node */
689 parent = block->parent;
690 block->parent = NULL;
691
692 new_start = drm_buddy_block_offset(block);
693 list_add(new: &block->tmp_link, head: &dfs);
694 err = __alloc_range(mm, dfs: &dfs, start: new_start, size: new_size, blocks, NULL);
695 if (err) {
696 mark_allocated(block);
697 mm->avail -= drm_buddy_block_size(mm, block);
698 list_add(new: &block->link, head: blocks);
699 }
700
701 block->parent = parent;
702 return err;
703}
704EXPORT_SYMBOL(drm_buddy_block_trim);
705
706/**
707 * drm_buddy_alloc_blocks - allocate power-of-two blocks
708 *
709 * @mm: DRM buddy manager to allocate from
710 * @start: start of the allowed range for this block
711 * @end: end of the allowed range for this block
712 * @size: size of the allocation
713 * @min_block_size: alignment of the allocation
714 * @blocks: output list head to add allocated blocks
715 * @flags: DRM_BUDDY_*_ALLOCATION flags
716 *
717 * alloc_range_bias() called on range limitations, which traverses
718 * the tree and returns the desired block.
719 *
720 * alloc_from_freelist() called when *no* range restrictions
721 * are enforced, which picks the block from the freelist.
722 *
723 * Returns:
724 * 0 on success, error code on failure.
725 */
726int drm_buddy_alloc_blocks(struct drm_buddy *mm,
727 u64 start, u64 end, u64 size,
728 u64 min_block_size,
729 struct list_head *blocks,
730 unsigned long flags)
731{
732 struct drm_buddy_block *block = NULL;
733 u64 original_size, original_min_size;
734 unsigned int min_order, order;
735 LIST_HEAD(allocated);
736 unsigned long pages;
737 int err;
738
739 if (size < mm->chunk_size)
740 return -EINVAL;
741
742 if (min_block_size < mm->chunk_size)
743 return -EINVAL;
744
745 if (!is_power_of_2(n: min_block_size))
746 return -EINVAL;
747
748 if (!IS_ALIGNED(start | end | size, mm->chunk_size))
749 return -EINVAL;
750
751 if (end > mm->size)
752 return -EINVAL;
753
754 if (range_overflows(start, size, mm->size))
755 return -EINVAL;
756
757 /* Actual range allocation */
758 if (start + size == end)
759 return __drm_buddy_alloc_range(mm, start, size, NULL, blocks);
760
761 original_size = size;
762 original_min_size = min_block_size;
763
764 /* Roundup the size to power of 2 */
765 if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION) {
766 size = roundup_pow_of_two(size);
767 min_block_size = size;
768 /* Align size value to min_block_size */
769 } else if (!IS_ALIGNED(size, min_block_size)) {
770 size = round_up(size, min_block_size);
771 }
772
773 pages = size >> ilog2(mm->chunk_size);
774 order = fls(x: pages) - 1;
775 min_order = ilog2(min_block_size) - ilog2(mm->chunk_size);
776
777 do {
778 order = min(order, (unsigned int)fls(pages) - 1);
779 BUG_ON(order > mm->max_order);
780 BUG_ON(order < min_order);
781
782 do {
783 if (flags & DRM_BUDDY_RANGE_ALLOCATION)
784 /* Allocate traversing within the range */
785 block = alloc_range_bias(mm, start, end, order);
786 else
787 /* Allocate from freelist */
788 block = alloc_from_freelist(mm, order, flags);
789
790 if (!IS_ERR(ptr: block))
791 break;
792
793 if (order-- == min_order) {
794 if (flags & DRM_BUDDY_CONTIGUOUS_ALLOCATION &&
795 !(flags & DRM_BUDDY_RANGE_ALLOCATION))
796 /*
797 * Try contiguous block allocation through
798 * try harder method
799 */
800 return __alloc_contig_try_harder(mm,
801 size: original_size,
802 min_block_size: original_min_size,
803 blocks);
804 err = -ENOSPC;
805 goto err_free;
806 }
807 } while (1);
808
809 mark_allocated(block);
810 mm->avail -= drm_buddy_block_size(mm, block);
811 kmemleak_update_trace(ptr: block);
812 list_add_tail(new: &block->link, head: &allocated);
813
814 pages -= BIT(order);
815
816 if (!pages)
817 break;
818 } while (1);
819
820 /* Trim the allocated block to the required size */
821 if (original_size != size) {
822 struct list_head *trim_list;
823 LIST_HEAD(temp);
824 u64 trim_size;
825
826 trim_list = &allocated;
827 trim_size = original_size;
828
829 if (!list_is_singular(head: &allocated)) {
830 block = list_last_entry(&allocated, typeof(*block), link);
831 list_move(list: &block->link, head: &temp);
832 trim_list = &temp;
833 trim_size = drm_buddy_block_size(mm, block) -
834 (size - original_size);
835 }
836
837 drm_buddy_block_trim(mm,
838 trim_size,
839 trim_list);
840
841 if (!list_empty(head: &temp))
842 list_splice_tail(list: trim_list, head: &allocated);
843 }
844
845 list_splice_tail(list: &allocated, head: blocks);
846 return 0;
847
848err_free:
849 drm_buddy_free_list(mm, &allocated);
850 return err;
851}
852EXPORT_SYMBOL(drm_buddy_alloc_blocks);
853
854/**
855 * drm_buddy_block_print - print block information
856 *
857 * @mm: DRM buddy manager
858 * @block: DRM buddy block
859 * @p: DRM printer to use
860 */
861void drm_buddy_block_print(struct drm_buddy *mm,
862 struct drm_buddy_block *block,
863 struct drm_printer *p)
864{
865 u64 start = drm_buddy_block_offset(block);
866 u64 size = drm_buddy_block_size(mm, block);
867
868 drm_printf(p, f: "%#018llx-%#018llx: %llu\n", start, start + size, size);
869}
870EXPORT_SYMBOL(drm_buddy_block_print);
871
872/**
873 * drm_buddy_print - print allocator state
874 *
875 * @mm: DRM buddy manager
876 * @p: DRM printer to use
877 */
878void drm_buddy_print(struct drm_buddy *mm, struct drm_printer *p)
879{
880 int order;
881
882 drm_printf(p, f: "chunk_size: %lluKiB, total: %lluMiB, free: %lluMiB\n",
883 mm->chunk_size >> 10, mm->size >> 20, mm->avail >> 20);
884
885 for (order = mm->max_order; order >= 0; order--) {
886 struct drm_buddy_block *block;
887 u64 count = 0, free;
888
889 list_for_each_entry(block, &mm->free_list[order], link) {
890 BUG_ON(!drm_buddy_block_is_free(block));
891 count++;
892 }
893
894 drm_printf(p, f: "order-%2d ", order);
895
896 free = count * (mm->chunk_size << order);
897 if (free < SZ_1M)
898 drm_printf(p, f: "free: %8llu KiB", free >> 10);
899 else
900 drm_printf(p, f: "free: %8llu MiB", free >> 20);
901
902 drm_printf(p, f: ", blocks: %llu\n", count);
903 }
904}
905EXPORT_SYMBOL(drm_buddy_print);
906
907static void drm_buddy_module_exit(void)
908{
909 kmem_cache_destroy(s: slab_blocks);
910}
911
912static int __init drm_buddy_module_init(void)
913{
914 slab_blocks = KMEM_CACHE(drm_buddy_block, 0);
915 if (!slab_blocks)
916 return -ENOMEM;
917
918 return 0;
919}
920
921module_init(drm_buddy_module_init);
922module_exit(drm_buddy_module_exit);
923
924MODULE_DESCRIPTION("DRM Buddy Allocator");
925MODULE_LICENSE("Dual MIT/GPL");
926

source code of linux/drivers/gpu/drm/drm_buddy.c