1 | /************************************************************************** |
2 | * |
3 | * Copyright 2006 Tungsten Graphics, Inc., Bismarck, ND., USA. |
4 | * Copyright 2016 Intel Corporation |
5 | * All Rights Reserved. |
6 | * |
7 | * Permission is hereby granted, free of charge, to any person obtaining a |
8 | * copy of this software and associated documentation files (the |
9 | * "Software"), to deal in the Software without restriction, including |
10 | * without limitation the rights to use, copy, modify, merge, publish, |
11 | * distribute, sub license, and/or sell copies of the Software, and to |
12 | * permit persons to whom the Software is furnished to do so, subject to |
13 | * the following conditions: |
14 | * |
15 | * The above copyright notice and this permission notice (including the |
16 | * next paragraph) shall be included in all copies or substantial portions |
17 | * of the Software. |
18 | * |
19 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
20 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
21 | * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL |
22 | * THE COPYRIGHT HOLDERS, AUTHORS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, |
23 | * DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR |
24 | * OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE |
25 | * USE OR OTHER DEALINGS IN THE SOFTWARE. |
26 | * |
27 | * |
28 | **************************************************************************/ |
29 | |
30 | /* |
31 | * Generic simple memory manager implementation. Intended to be used as a base |
32 | * class implementation for more advanced memory managers. |
33 | * |
34 | * Note that the algorithm used is quite simple and there might be substantial |
35 | * performance gains if a smarter free list is implemented. Currently it is |
36 | * just an unordered stack of free regions. This could easily be improved if |
37 | * an RB-tree is used instead. At least if we expect heavy fragmentation. |
38 | * |
39 | * Aligned allocations can also see improvement. |
40 | * |
41 | * Authors: |
42 | * Thomas Hellström <thomas-at-tungstengraphics-dot-com> |
43 | */ |
44 | |
45 | #include <linux/export.h> |
46 | #include <linux/interval_tree_generic.h> |
47 | #include <linux/seq_file.h> |
48 | #include <linux/slab.h> |
49 | #include <linux/stacktrace.h> |
50 | |
51 | #include <drm/drm_mm.h> |
52 | |
53 | /** |
54 | * DOC: Overview |
55 | * |
56 | * drm_mm provides a simple range allocator. The drivers are free to use the |
57 | * resource allocator from the linux core if it suits them, the upside of drm_mm |
58 | * is that it's in the DRM core. Which means that it's easier to extend for |
59 | * some of the crazier special purpose needs of gpus. |
60 | * |
61 | * The main data struct is &drm_mm, allocations are tracked in &drm_mm_node. |
62 | * Drivers are free to embed either of them into their own suitable |
63 | * datastructures. drm_mm itself will not do any memory allocations of its own, |
64 | * so if drivers choose not to embed nodes they need to still allocate them |
65 | * themselves. |
66 | * |
67 | * The range allocator also supports reservation of preallocated blocks. This is |
68 | * useful for taking over initial mode setting configurations from the firmware, |
69 | * where an object needs to be created which exactly matches the firmware's |
70 | * scanout target. As long as the range is still free it can be inserted anytime |
71 | * after the allocator is initialized, which helps with avoiding looped |
72 | * dependencies in the driver load sequence. |
73 | * |
74 | * drm_mm maintains a stack of most recently freed holes, which of all |
75 | * simplistic datastructures seems to be a fairly decent approach to clustering |
76 | * allocations and avoiding too much fragmentation. This means free space |
77 | * searches are O(num_holes). Given that all the fancy features drm_mm supports |
78 | * something better would be fairly complex and since gfx thrashing is a fairly |
79 | * steep cliff not a real concern. Removing a node again is O(1). |
80 | * |
81 | * drm_mm supports a few features: Alignment and range restrictions can be |
82 | * supplied. Furthermore every &drm_mm_node has a color value (which is just an |
83 | * opaque unsigned long) which in conjunction with a driver callback can be used |
84 | * to implement sophisticated placement restrictions. The i915 DRM driver uses |
85 | * this to implement guard pages between incompatible caching domains in the |
86 | * graphics TT. |
87 | * |
88 | * Two behaviors are supported for searching and allocating: bottom-up and |
89 | * top-down. The default is bottom-up. Top-down allocation can be used if the |
90 | * memory area has different restrictions, or just to reduce fragmentation. |
91 | * |
92 | * Finally iteration helpers to walk all nodes and all holes are provided as are |
93 | * some basic allocator dumpers for debugging. |
94 | * |
95 | * Note that this range allocator is not thread-safe, drivers need to protect |
96 | * modifications with their own locking. The idea behind this is that for a full |
97 | * memory manager additional data needs to be protected anyway, hence internal |
98 | * locking would be fully redundant. |
99 | */ |
100 | |
101 | #ifdef CONFIG_DRM_DEBUG_MM |
102 | #include <linux/stackdepot.h> |
103 | |
104 | #define STACKDEPTH 32 |
105 | #define BUFSZ 4096 |
106 | |
107 | static noinline void save_stack(struct drm_mm_node *node) |
108 | { |
109 | unsigned long entries[STACKDEPTH]; |
110 | unsigned int n; |
111 | |
112 | n = stack_trace_save(store: entries, ARRAY_SIZE(entries), skipnr: 1); |
113 | |
114 | /* May be called under spinlock, so avoid sleeping */ |
115 | node->stack = stack_depot_save(entries, nr_entries: n, GFP_NOWAIT); |
116 | } |
117 | |
118 | static void show_leaks(struct drm_mm *mm) |
119 | { |
120 | struct drm_mm_node *node; |
121 | char *buf; |
122 | |
123 | buf = kmalloc(BUFSZ, GFP_KERNEL); |
124 | if (!buf) |
125 | return; |
126 | |
127 | list_for_each_entry(node, drm_mm_nodes(mm), node_list) { |
128 | if (!node->stack) { |
129 | DRM_ERROR("node [%08llx + %08llx]: unknown owner\n" , |
130 | node->start, node->size); |
131 | continue; |
132 | } |
133 | |
134 | stack_depot_snprint(handle: node->stack, buf, BUFSZ, spaces: 0); |
135 | DRM_ERROR("node [%08llx + %08llx]: inserted at\n%s" , |
136 | node->start, node->size, buf); |
137 | } |
138 | |
139 | kfree(objp: buf); |
140 | } |
141 | |
142 | #undef STACKDEPTH |
143 | #undef BUFSZ |
144 | #else |
145 | static void save_stack(struct drm_mm_node *node) { } |
146 | static void show_leaks(struct drm_mm *mm) { } |
147 | #endif |
148 | |
149 | #define START(node) ((node)->start) |
150 | #define LAST(node) ((node)->start + (node)->size - 1) |
151 | |
152 | INTERVAL_TREE_DEFINE(struct drm_mm_node, rb, |
153 | u64, __subtree_last, |
154 | START, LAST, static inline, drm_mm_interval_tree) |
155 | |
156 | struct drm_mm_node * |
157 | __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last) |
158 | { |
159 | return drm_mm_interval_tree_iter_first(root: (struct rb_root_cached *)&mm->interval_tree, |
160 | start, last) ?: (struct drm_mm_node *)&mm->head_node; |
161 | } |
162 | EXPORT_SYMBOL(__drm_mm_interval_first); |
163 | |
164 | static void drm_mm_interval_tree_add_node(struct drm_mm_node *hole_node, |
165 | struct drm_mm_node *node) |
166 | { |
167 | struct drm_mm *mm = hole_node->mm; |
168 | struct rb_node **link, *rb; |
169 | struct drm_mm_node *parent; |
170 | bool leftmost; |
171 | |
172 | node->__subtree_last = LAST(node); |
173 | |
174 | if (drm_mm_node_allocated(node: hole_node)) { |
175 | rb = &hole_node->rb; |
176 | while (rb) { |
177 | parent = rb_entry(rb, struct drm_mm_node, rb); |
178 | if (parent->__subtree_last >= node->__subtree_last) |
179 | break; |
180 | |
181 | parent->__subtree_last = node->__subtree_last; |
182 | rb = rb_parent(rb); |
183 | } |
184 | |
185 | rb = &hole_node->rb; |
186 | link = &hole_node->rb.rb_right; |
187 | leftmost = false; |
188 | } else { |
189 | rb = NULL; |
190 | link = &mm->interval_tree.rb_root.rb_node; |
191 | leftmost = true; |
192 | } |
193 | |
194 | while (*link) { |
195 | rb = *link; |
196 | parent = rb_entry(rb, struct drm_mm_node, rb); |
197 | if (parent->__subtree_last < node->__subtree_last) |
198 | parent->__subtree_last = node->__subtree_last; |
199 | if (node->start < parent->start) { |
200 | link = &parent->rb.rb_left; |
201 | } else { |
202 | link = &parent->rb.rb_right; |
203 | leftmost = false; |
204 | } |
205 | } |
206 | |
207 | rb_link_node(node: &node->rb, parent: rb, rb_link: link); |
208 | rb_insert_augmented_cached(node: &node->rb, root: &mm->interval_tree, newleft: leftmost, |
209 | augment: &drm_mm_interval_tree_augment); |
210 | } |
211 | |
212 | #define HOLE_SIZE(NODE) ((NODE)->hole_size) |
213 | #define HOLE_ADDR(NODE) (__drm_mm_hole_node_start(NODE)) |
214 | |
215 | static u64 rb_to_hole_size(struct rb_node *rb) |
216 | { |
217 | return rb_entry(rb, struct drm_mm_node, rb_hole_size)->hole_size; |
218 | } |
219 | |
220 | static void insert_hole_size(struct rb_root_cached *root, |
221 | struct drm_mm_node *node) |
222 | { |
223 | struct rb_node **link = &root->rb_root.rb_node, *rb = NULL; |
224 | u64 x = node->hole_size; |
225 | bool first = true; |
226 | |
227 | while (*link) { |
228 | rb = *link; |
229 | if (x > rb_to_hole_size(rb)) { |
230 | link = &rb->rb_left; |
231 | } else { |
232 | link = &rb->rb_right; |
233 | first = false; |
234 | } |
235 | } |
236 | |
237 | rb_link_node(node: &node->rb_hole_size, parent: rb, rb_link: link); |
238 | rb_insert_color_cached(node: &node->rb_hole_size, root, leftmost: first); |
239 | } |
240 | |
241 | RB_DECLARE_CALLBACKS_MAX(static, augment_callbacks, |
242 | struct drm_mm_node, rb_hole_addr, |
243 | u64, subtree_max_hole, HOLE_SIZE) |
244 | |
245 | static void insert_hole_addr(struct rb_root *root, struct drm_mm_node *node) |
246 | { |
247 | struct rb_node **link = &root->rb_node, *rb_parent = NULL; |
248 | u64 start = HOLE_ADDR(node), subtree_max_hole = node->subtree_max_hole; |
249 | struct drm_mm_node *parent; |
250 | |
251 | while (*link) { |
252 | rb_parent = *link; |
253 | parent = rb_entry(rb_parent, struct drm_mm_node, rb_hole_addr); |
254 | if (parent->subtree_max_hole < subtree_max_hole) |
255 | parent->subtree_max_hole = subtree_max_hole; |
256 | if (start < HOLE_ADDR(parent)) |
257 | link = &parent->rb_hole_addr.rb_left; |
258 | else |
259 | link = &parent->rb_hole_addr.rb_right; |
260 | } |
261 | |
262 | rb_link_node(node: &node->rb_hole_addr, parent: rb_parent, rb_link: link); |
263 | rb_insert_augmented(node: &node->rb_hole_addr, root, augment: &augment_callbacks); |
264 | } |
265 | |
266 | static void add_hole(struct drm_mm_node *node) |
267 | { |
268 | struct drm_mm *mm = node->mm; |
269 | |
270 | node->hole_size = |
271 | __drm_mm_hole_node_end(hole_node: node) - __drm_mm_hole_node_start(hole_node: node); |
272 | node->subtree_max_hole = node->hole_size; |
273 | DRM_MM_BUG_ON(!drm_mm_hole_follows(node)); |
274 | |
275 | insert_hole_size(root: &mm->holes_size, node); |
276 | insert_hole_addr(root: &mm->holes_addr, node); |
277 | |
278 | list_add(new: &node->hole_stack, head: &mm->hole_stack); |
279 | } |
280 | |
281 | static void rm_hole(struct drm_mm_node *node) |
282 | { |
283 | DRM_MM_BUG_ON(!drm_mm_hole_follows(node)); |
284 | |
285 | list_del(entry: &node->hole_stack); |
286 | rb_erase_cached(node: &node->rb_hole_size, root: &node->mm->holes_size); |
287 | rb_erase_augmented(node: &node->rb_hole_addr, root: &node->mm->holes_addr, |
288 | augment: &augment_callbacks); |
289 | node->hole_size = 0; |
290 | node->subtree_max_hole = 0; |
291 | |
292 | DRM_MM_BUG_ON(drm_mm_hole_follows(node)); |
293 | } |
294 | |
295 | static inline struct drm_mm_node *rb_hole_size_to_node(struct rb_node *rb) |
296 | { |
297 | return rb_entry_safe(rb, struct drm_mm_node, rb_hole_size); |
298 | } |
299 | |
300 | static inline struct drm_mm_node *rb_hole_addr_to_node(struct rb_node *rb) |
301 | { |
302 | return rb_entry_safe(rb, struct drm_mm_node, rb_hole_addr); |
303 | } |
304 | |
305 | static struct drm_mm_node *best_hole(struct drm_mm *mm, u64 size) |
306 | { |
307 | struct rb_node *rb = mm->holes_size.rb_root.rb_node; |
308 | struct drm_mm_node *best = NULL; |
309 | |
310 | do { |
311 | struct drm_mm_node *node = |
312 | rb_entry(rb, struct drm_mm_node, rb_hole_size); |
313 | |
314 | if (size <= node->hole_size) { |
315 | best = node; |
316 | rb = rb->rb_right; |
317 | } else { |
318 | rb = rb->rb_left; |
319 | } |
320 | } while (rb); |
321 | |
322 | return best; |
323 | } |
324 | |
325 | static bool usable_hole_addr(struct rb_node *rb, u64 size) |
326 | { |
327 | return rb && rb_hole_addr_to_node(rb)->subtree_max_hole >= size; |
328 | } |
329 | |
330 | static struct drm_mm_node *find_hole_addr(struct drm_mm *mm, u64 addr, u64 size) |
331 | { |
332 | struct rb_node *rb = mm->holes_addr.rb_node; |
333 | struct drm_mm_node *node = NULL; |
334 | |
335 | while (rb) { |
336 | u64 hole_start; |
337 | |
338 | if (!usable_hole_addr(rb, size)) |
339 | break; |
340 | |
341 | node = rb_hole_addr_to_node(rb); |
342 | hole_start = __drm_mm_hole_node_start(hole_node: node); |
343 | |
344 | if (addr < hole_start) |
345 | rb = node->rb_hole_addr.rb_left; |
346 | else if (addr > hole_start + node->hole_size) |
347 | rb = node->rb_hole_addr.rb_right; |
348 | else |
349 | break; |
350 | } |
351 | |
352 | return node; |
353 | } |
354 | |
355 | static struct drm_mm_node * |
356 | first_hole(struct drm_mm *mm, |
357 | u64 start, u64 end, u64 size, |
358 | enum drm_mm_insert_mode mode) |
359 | { |
360 | switch (mode) { |
361 | default: |
362 | case DRM_MM_INSERT_BEST: |
363 | return best_hole(mm, size); |
364 | |
365 | case DRM_MM_INSERT_LOW: |
366 | return find_hole_addr(mm, addr: start, size); |
367 | |
368 | case DRM_MM_INSERT_HIGH: |
369 | return find_hole_addr(mm, addr: end, size); |
370 | |
371 | case DRM_MM_INSERT_EVICT: |
372 | return list_first_entry_or_null(&mm->hole_stack, |
373 | struct drm_mm_node, |
374 | hole_stack); |
375 | } |
376 | } |
377 | |
378 | /** |
379 | * DECLARE_NEXT_HOLE_ADDR - macro to declare next hole functions |
380 | * @name: name of function to declare |
381 | * @first: first rb member to traverse (either rb_left or rb_right). |
382 | * @last: last rb member to traverse (either rb_right or rb_left). |
383 | * |
384 | * This macro declares a function to return the next hole of the addr rb tree. |
385 | * While traversing the tree we take the searched size into account and only |
386 | * visit branches with potential big enough holes. |
387 | */ |
388 | |
389 | #define DECLARE_NEXT_HOLE_ADDR(name, first, last) \ |
390 | static struct drm_mm_node *name(struct drm_mm_node *entry, u64 size) \ |
391 | { \ |
392 | struct rb_node *parent, *node = &entry->rb_hole_addr; \ |
393 | \ |
394 | if (!entry || RB_EMPTY_NODE(node)) \ |
395 | return NULL; \ |
396 | \ |
397 | if (usable_hole_addr(node->first, size)) { \ |
398 | node = node->first; \ |
399 | while (usable_hole_addr(node->last, size)) \ |
400 | node = node->last; \ |
401 | return rb_hole_addr_to_node(node); \ |
402 | } \ |
403 | \ |
404 | while ((parent = rb_parent(node)) && node == parent->first) \ |
405 | node = parent; \ |
406 | \ |
407 | return rb_hole_addr_to_node(parent); \ |
408 | } |
409 | |
410 | DECLARE_NEXT_HOLE_ADDR(next_hole_high_addr, rb_left, rb_right) |
411 | DECLARE_NEXT_HOLE_ADDR(next_hole_low_addr, rb_right, rb_left) |
412 | |
413 | static struct drm_mm_node * |
414 | next_hole(struct drm_mm *mm, |
415 | struct drm_mm_node *node, |
416 | u64 size, |
417 | enum drm_mm_insert_mode mode) |
418 | { |
419 | switch (mode) { |
420 | default: |
421 | case DRM_MM_INSERT_BEST: |
422 | return rb_hole_size_to_node(rb: rb_prev(&node->rb_hole_size)); |
423 | |
424 | case DRM_MM_INSERT_LOW: |
425 | return next_hole_low_addr(entry: node, size); |
426 | |
427 | case DRM_MM_INSERT_HIGH: |
428 | return next_hole_high_addr(entry: node, size); |
429 | |
430 | case DRM_MM_INSERT_EVICT: |
431 | node = list_next_entry(node, hole_stack); |
432 | return &node->hole_stack == &mm->hole_stack ? NULL : node; |
433 | } |
434 | } |
435 | |
436 | /** |
437 | * drm_mm_reserve_node - insert an pre-initialized node |
438 | * @mm: drm_mm allocator to insert @node into |
439 | * @node: drm_mm_node to insert |
440 | * |
441 | * This functions inserts an already set-up &drm_mm_node into the allocator, |
442 | * meaning that start, size and color must be set by the caller. All other |
443 | * fields must be cleared to 0. This is useful to initialize the allocator with |
444 | * preallocated objects which must be set-up before the range allocator can be |
445 | * set-up, e.g. when taking over a firmware framebuffer. |
446 | * |
447 | * Returns: |
448 | * 0 on success, -ENOSPC if there's no hole where @node is. |
449 | */ |
450 | int drm_mm_reserve_node(struct drm_mm *mm, struct drm_mm_node *node) |
451 | { |
452 | struct drm_mm_node *hole; |
453 | u64 hole_start, hole_end; |
454 | u64 adj_start, adj_end; |
455 | u64 end; |
456 | |
457 | end = node->start + node->size; |
458 | if (unlikely(end <= node->start)) |
459 | return -ENOSPC; |
460 | |
461 | /* Find the relevant hole to add our node to */ |
462 | hole = find_hole_addr(mm, addr: node->start, size: 0); |
463 | if (!hole) |
464 | return -ENOSPC; |
465 | |
466 | adj_start = hole_start = __drm_mm_hole_node_start(hole_node: hole); |
467 | adj_end = hole_end = hole_start + hole->hole_size; |
468 | |
469 | if (mm->color_adjust) |
470 | mm->color_adjust(hole, node->color, &adj_start, &adj_end); |
471 | |
472 | if (adj_start > node->start || adj_end < end) |
473 | return -ENOSPC; |
474 | |
475 | node->mm = mm; |
476 | |
477 | __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags); |
478 | list_add(new: &node->node_list, head: &hole->node_list); |
479 | drm_mm_interval_tree_add_node(hole_node: hole, node); |
480 | node->hole_size = 0; |
481 | |
482 | rm_hole(node: hole); |
483 | if (node->start > hole_start) |
484 | add_hole(node: hole); |
485 | if (end < hole_end) |
486 | add_hole(node); |
487 | |
488 | save_stack(node); |
489 | return 0; |
490 | } |
491 | EXPORT_SYMBOL(drm_mm_reserve_node); |
492 | |
493 | static u64 rb_to_hole_size_or_zero(struct rb_node *rb) |
494 | { |
495 | return rb ? rb_to_hole_size(rb) : 0; |
496 | } |
497 | |
498 | /** |
499 | * drm_mm_insert_node_in_range - ranged search for space and insert @node |
500 | * @mm: drm_mm to allocate from |
501 | * @node: preallocate node to insert |
502 | * @size: size of the allocation |
503 | * @alignment: alignment of the allocation |
504 | * @color: opaque tag value to use for this node |
505 | * @range_start: start of the allowed range for this node |
506 | * @range_end: end of the allowed range for this node |
507 | * @mode: fine-tune the allocation search and placement |
508 | * |
509 | * The preallocated @node must be cleared to 0. |
510 | * |
511 | * Returns: |
512 | * 0 on success, -ENOSPC if there's no suitable hole. |
513 | */ |
514 | int drm_mm_insert_node_in_range(struct drm_mm * const mm, |
515 | struct drm_mm_node * const node, |
516 | u64 size, u64 alignment, |
517 | unsigned long color, |
518 | u64 range_start, u64 range_end, |
519 | enum drm_mm_insert_mode mode) |
520 | { |
521 | struct drm_mm_node *hole; |
522 | u64 remainder_mask; |
523 | bool once; |
524 | |
525 | DRM_MM_BUG_ON(range_start > range_end); |
526 | |
527 | if (unlikely(size == 0 || range_end - range_start < size)) |
528 | return -ENOSPC; |
529 | |
530 | if (rb_to_hole_size_or_zero(rb_first_cached(&mm->holes_size)) < size) |
531 | return -ENOSPC; |
532 | |
533 | if (alignment <= 1) |
534 | alignment = 0; |
535 | |
536 | once = mode & DRM_MM_INSERT_ONCE; |
537 | mode &= ~DRM_MM_INSERT_ONCE; |
538 | |
539 | remainder_mask = is_power_of_2(n: alignment) ? alignment - 1 : 0; |
540 | for (hole = first_hole(mm, start: range_start, end: range_end, size, mode); |
541 | hole; |
542 | hole = once ? NULL : next_hole(mm, node: hole, size, mode)) { |
543 | u64 hole_start = __drm_mm_hole_node_start(hole_node: hole); |
544 | u64 hole_end = hole_start + hole->hole_size; |
545 | u64 adj_start, adj_end; |
546 | u64 col_start, col_end; |
547 | |
548 | if (mode == DRM_MM_INSERT_LOW && hole_start >= range_end) |
549 | break; |
550 | |
551 | if (mode == DRM_MM_INSERT_HIGH && hole_end <= range_start) |
552 | break; |
553 | |
554 | col_start = hole_start; |
555 | col_end = hole_end; |
556 | if (mm->color_adjust) |
557 | mm->color_adjust(hole, color, &col_start, &col_end); |
558 | |
559 | adj_start = max(col_start, range_start); |
560 | adj_end = min(col_end, range_end); |
561 | |
562 | if (adj_end <= adj_start || adj_end - adj_start < size) |
563 | continue; |
564 | |
565 | if (mode == DRM_MM_INSERT_HIGH) |
566 | adj_start = adj_end - size; |
567 | |
568 | if (alignment) { |
569 | u64 rem; |
570 | |
571 | if (likely(remainder_mask)) |
572 | rem = adj_start & remainder_mask; |
573 | else |
574 | div64_u64_rem(dividend: adj_start, divisor: alignment, remainder: &rem); |
575 | if (rem) { |
576 | adj_start -= rem; |
577 | if (mode != DRM_MM_INSERT_HIGH) |
578 | adj_start += alignment; |
579 | |
580 | if (adj_start < max(col_start, range_start) || |
581 | min(col_end, range_end) - adj_start < size) |
582 | continue; |
583 | |
584 | if (adj_end <= adj_start || |
585 | adj_end - adj_start < size) |
586 | continue; |
587 | } |
588 | } |
589 | |
590 | node->mm = mm; |
591 | node->size = size; |
592 | node->start = adj_start; |
593 | node->color = color; |
594 | node->hole_size = 0; |
595 | |
596 | __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &node->flags); |
597 | list_add(new: &node->node_list, head: &hole->node_list); |
598 | drm_mm_interval_tree_add_node(hole_node: hole, node); |
599 | |
600 | rm_hole(node: hole); |
601 | if (adj_start > hole_start) |
602 | add_hole(node: hole); |
603 | if (adj_start + size < hole_end) |
604 | add_hole(node); |
605 | |
606 | save_stack(node); |
607 | return 0; |
608 | } |
609 | |
610 | return -ENOSPC; |
611 | } |
612 | EXPORT_SYMBOL(drm_mm_insert_node_in_range); |
613 | |
614 | static inline bool drm_mm_node_scanned_block(const struct drm_mm_node *node) |
615 | { |
616 | return test_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags); |
617 | } |
618 | |
619 | /** |
620 | * drm_mm_remove_node - Remove a memory node from the allocator. |
621 | * @node: drm_mm_node to remove |
622 | * |
623 | * This just removes a node from its drm_mm allocator. The node does not need to |
624 | * be cleared again before it can be re-inserted into this or any other drm_mm |
625 | * allocator. It is a bug to call this function on a unallocated node. |
626 | */ |
627 | void drm_mm_remove_node(struct drm_mm_node *node) |
628 | { |
629 | struct drm_mm *mm = node->mm; |
630 | struct drm_mm_node *prev_node; |
631 | |
632 | DRM_MM_BUG_ON(!drm_mm_node_allocated(node)); |
633 | DRM_MM_BUG_ON(drm_mm_node_scanned_block(node)); |
634 | |
635 | prev_node = list_prev_entry(node, node_list); |
636 | |
637 | if (drm_mm_hole_follows(node)) |
638 | rm_hole(node); |
639 | |
640 | drm_mm_interval_tree_remove(node, root: &mm->interval_tree); |
641 | list_del(entry: &node->node_list); |
642 | |
643 | if (drm_mm_hole_follows(node: prev_node)) |
644 | rm_hole(node: prev_node); |
645 | add_hole(node: prev_node); |
646 | |
647 | clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, addr: &node->flags); |
648 | } |
649 | EXPORT_SYMBOL(drm_mm_remove_node); |
650 | |
651 | /** |
652 | * drm_mm_replace_node - move an allocation from @old to @new |
653 | * @old: drm_mm_node to remove from the allocator |
654 | * @new: drm_mm_node which should inherit @old's allocation |
655 | * |
656 | * This is useful for when drivers embed the drm_mm_node structure and hence |
657 | * can't move allocations by reassigning pointers. It's a combination of remove |
658 | * and insert with the guarantee that the allocation start will match. |
659 | */ |
660 | void drm_mm_replace_node(struct drm_mm_node *old, struct drm_mm_node *new) |
661 | { |
662 | struct drm_mm *mm = old->mm; |
663 | |
664 | DRM_MM_BUG_ON(!drm_mm_node_allocated(old)); |
665 | |
666 | *new = *old; |
667 | |
668 | __set_bit(DRM_MM_NODE_ALLOCATED_BIT, &new->flags); |
669 | list_replace(old: &old->node_list, new: &new->node_list); |
670 | rb_replace_node_cached(victim: &old->rb, new: &new->rb, root: &mm->interval_tree); |
671 | |
672 | if (drm_mm_hole_follows(node: old)) { |
673 | list_replace(old: &old->hole_stack, new: &new->hole_stack); |
674 | rb_replace_node_cached(victim: &old->rb_hole_size, |
675 | new: &new->rb_hole_size, |
676 | root: &mm->holes_size); |
677 | rb_replace_node(victim: &old->rb_hole_addr, |
678 | new: &new->rb_hole_addr, |
679 | root: &mm->holes_addr); |
680 | } |
681 | |
682 | clear_bit_unlock(DRM_MM_NODE_ALLOCATED_BIT, addr: &old->flags); |
683 | } |
684 | EXPORT_SYMBOL(drm_mm_replace_node); |
685 | |
686 | /** |
687 | * DOC: lru scan roster |
688 | * |
689 | * Very often GPUs need to have continuous allocations for a given object. When |
690 | * evicting objects to make space for a new one it is therefore not most |
691 | * efficient when we simply start to select all objects from the tail of an LRU |
692 | * until there's a suitable hole: Especially for big objects or nodes that |
693 | * otherwise have special allocation constraints there's a good chance we evict |
694 | * lots of (smaller) objects unnecessarily. |
695 | * |
696 | * The DRM range allocator supports this use-case through the scanning |
697 | * interfaces. First a scan operation needs to be initialized with |
698 | * drm_mm_scan_init() or drm_mm_scan_init_with_range(). The driver adds |
699 | * objects to the roster, probably by walking an LRU list, but this can be |
700 | * freely implemented. Eviction candidates are added using |
701 | * drm_mm_scan_add_block() until a suitable hole is found or there are no |
702 | * further evictable objects. Eviction roster metadata is tracked in &struct |
703 | * drm_mm_scan. |
704 | * |
705 | * The driver must walk through all objects again in exactly the reverse |
706 | * order to restore the allocator state. Note that while the allocator is used |
707 | * in the scan mode no other operation is allowed. |
708 | * |
709 | * Finally the driver evicts all objects selected (drm_mm_scan_remove_block() |
710 | * reported true) in the scan, and any overlapping nodes after color adjustment |
711 | * (drm_mm_scan_color_evict()). Adding and removing an object is O(1), and |
712 | * since freeing a node is also O(1) the overall complexity is |
713 | * O(scanned_objects). So like the free stack which needs to be walked before a |
714 | * scan operation even begins this is linear in the number of objects. It |
715 | * doesn't seem to hurt too badly. |
716 | */ |
717 | |
718 | /** |
719 | * drm_mm_scan_init_with_range - initialize range-restricted lru scanning |
720 | * @scan: scan state |
721 | * @mm: drm_mm to scan |
722 | * @size: size of the allocation |
723 | * @alignment: alignment of the allocation |
724 | * @color: opaque tag value to use for the allocation |
725 | * @start: start of the allowed range for the allocation |
726 | * @end: end of the allowed range for the allocation |
727 | * @mode: fine-tune the allocation search and placement |
728 | * |
729 | * This simply sets up the scanning routines with the parameters for the desired |
730 | * hole. |
731 | * |
732 | * Warning: |
733 | * As long as the scan list is non-empty, no other operations than |
734 | * adding/removing nodes to/from the scan list are allowed. |
735 | */ |
736 | void drm_mm_scan_init_with_range(struct drm_mm_scan *scan, |
737 | struct drm_mm *mm, |
738 | u64 size, |
739 | u64 alignment, |
740 | unsigned long color, |
741 | u64 start, |
742 | u64 end, |
743 | enum drm_mm_insert_mode mode) |
744 | { |
745 | DRM_MM_BUG_ON(start >= end); |
746 | DRM_MM_BUG_ON(!size || size > end - start); |
747 | DRM_MM_BUG_ON(mm->scan_active); |
748 | |
749 | scan->mm = mm; |
750 | |
751 | if (alignment <= 1) |
752 | alignment = 0; |
753 | |
754 | scan->color = color; |
755 | scan->alignment = alignment; |
756 | scan->remainder_mask = is_power_of_2(n: alignment) ? alignment - 1 : 0; |
757 | scan->size = size; |
758 | scan->mode = mode; |
759 | |
760 | DRM_MM_BUG_ON(end <= start); |
761 | scan->range_start = start; |
762 | scan->range_end = end; |
763 | |
764 | scan->hit_start = U64_MAX; |
765 | scan->hit_end = 0; |
766 | } |
767 | EXPORT_SYMBOL(drm_mm_scan_init_with_range); |
768 | |
769 | /** |
770 | * drm_mm_scan_add_block - add a node to the scan list |
771 | * @scan: the active drm_mm scanner |
772 | * @node: drm_mm_node to add |
773 | * |
774 | * Add a node to the scan list that might be freed to make space for the desired |
775 | * hole. |
776 | * |
777 | * Returns: |
778 | * True if a hole has been found, false otherwise. |
779 | */ |
780 | bool drm_mm_scan_add_block(struct drm_mm_scan *scan, |
781 | struct drm_mm_node *node) |
782 | { |
783 | struct drm_mm *mm = scan->mm; |
784 | struct drm_mm_node *hole; |
785 | u64 hole_start, hole_end; |
786 | u64 col_start, col_end; |
787 | u64 adj_start, adj_end; |
788 | |
789 | DRM_MM_BUG_ON(node->mm != mm); |
790 | DRM_MM_BUG_ON(!drm_mm_node_allocated(node)); |
791 | DRM_MM_BUG_ON(drm_mm_node_scanned_block(node)); |
792 | __set_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags); |
793 | mm->scan_active++; |
794 | |
795 | /* Remove this block from the node_list so that we enlarge the hole |
796 | * (distance between the end of our previous node and the start of |
797 | * or next), without poisoning the link so that we can restore it |
798 | * later in drm_mm_scan_remove_block(). |
799 | */ |
800 | hole = list_prev_entry(node, node_list); |
801 | DRM_MM_BUG_ON(list_next_entry(hole, node_list) != node); |
802 | __list_del_entry(entry: &node->node_list); |
803 | |
804 | hole_start = __drm_mm_hole_node_start(hole_node: hole); |
805 | hole_end = __drm_mm_hole_node_end(hole_node: hole); |
806 | |
807 | col_start = hole_start; |
808 | col_end = hole_end; |
809 | if (mm->color_adjust) |
810 | mm->color_adjust(hole, scan->color, &col_start, &col_end); |
811 | |
812 | adj_start = max(col_start, scan->range_start); |
813 | adj_end = min(col_end, scan->range_end); |
814 | if (adj_end <= adj_start || adj_end - adj_start < scan->size) |
815 | return false; |
816 | |
817 | if (scan->mode == DRM_MM_INSERT_HIGH) |
818 | adj_start = adj_end - scan->size; |
819 | |
820 | if (scan->alignment) { |
821 | u64 rem; |
822 | |
823 | if (likely(scan->remainder_mask)) |
824 | rem = adj_start & scan->remainder_mask; |
825 | else |
826 | div64_u64_rem(dividend: adj_start, divisor: scan->alignment, remainder: &rem); |
827 | if (rem) { |
828 | adj_start -= rem; |
829 | if (scan->mode != DRM_MM_INSERT_HIGH) |
830 | adj_start += scan->alignment; |
831 | if (adj_start < max(col_start, scan->range_start) || |
832 | min(col_end, scan->range_end) - adj_start < scan->size) |
833 | return false; |
834 | |
835 | if (adj_end <= adj_start || |
836 | adj_end - adj_start < scan->size) |
837 | return false; |
838 | } |
839 | } |
840 | |
841 | scan->hit_start = adj_start; |
842 | scan->hit_end = adj_start + scan->size; |
843 | |
844 | DRM_MM_BUG_ON(scan->hit_start >= scan->hit_end); |
845 | DRM_MM_BUG_ON(scan->hit_start < hole_start); |
846 | DRM_MM_BUG_ON(scan->hit_end > hole_end); |
847 | |
848 | return true; |
849 | } |
850 | EXPORT_SYMBOL(drm_mm_scan_add_block); |
851 | |
852 | /** |
853 | * drm_mm_scan_remove_block - remove a node from the scan list |
854 | * @scan: the active drm_mm scanner |
855 | * @node: drm_mm_node to remove |
856 | * |
857 | * Nodes **must** be removed in exactly the reverse order from the scan list as |
858 | * they have been added (e.g. using list_add() as they are added and then |
859 | * list_for_each() over that eviction list to remove), otherwise the internal |
860 | * state of the memory manager will be corrupted. |
861 | * |
862 | * When the scan list is empty, the selected memory nodes can be freed. An |
863 | * immediately following drm_mm_insert_node_in_range_generic() or one of the |
864 | * simpler versions of that function with !DRM_MM_SEARCH_BEST will then return |
865 | * the just freed block (because it's at the top of the free_stack list). |
866 | * |
867 | * Returns: |
868 | * True if this block should be evicted, false otherwise. Will always |
869 | * return false when no hole has been found. |
870 | */ |
871 | bool drm_mm_scan_remove_block(struct drm_mm_scan *scan, |
872 | struct drm_mm_node *node) |
873 | { |
874 | struct drm_mm_node *prev_node; |
875 | |
876 | DRM_MM_BUG_ON(node->mm != scan->mm); |
877 | DRM_MM_BUG_ON(!drm_mm_node_scanned_block(node)); |
878 | __clear_bit(DRM_MM_NODE_SCANNED_BIT, &node->flags); |
879 | |
880 | DRM_MM_BUG_ON(!node->mm->scan_active); |
881 | node->mm->scan_active--; |
882 | |
883 | /* During drm_mm_scan_add_block() we decoupled this node leaving |
884 | * its pointers intact. Now that the caller is walking back along |
885 | * the eviction list we can restore this block into its rightful |
886 | * place on the full node_list. To confirm that the caller is walking |
887 | * backwards correctly we check that prev_node->next == node->next, |
888 | * i.e. both believe the same node should be on the other side of the |
889 | * hole. |
890 | */ |
891 | prev_node = list_prev_entry(node, node_list); |
892 | DRM_MM_BUG_ON(list_next_entry(prev_node, node_list) != |
893 | list_next_entry(node, node_list)); |
894 | list_add(new: &node->node_list, head: &prev_node->node_list); |
895 | |
896 | return (node->start + node->size > scan->hit_start && |
897 | node->start < scan->hit_end); |
898 | } |
899 | EXPORT_SYMBOL(drm_mm_scan_remove_block); |
900 | |
901 | /** |
902 | * drm_mm_scan_color_evict - evict overlapping nodes on either side of hole |
903 | * @scan: drm_mm scan with target hole |
904 | * |
905 | * After completing an eviction scan and removing the selected nodes, we may |
906 | * need to remove a few more nodes from either side of the target hole if |
907 | * mm.color_adjust is being used. |
908 | * |
909 | * Returns: |
910 | * A node to evict, or NULL if there are no overlapping nodes. |
911 | */ |
912 | struct drm_mm_node *drm_mm_scan_color_evict(struct drm_mm_scan *scan) |
913 | { |
914 | struct drm_mm *mm = scan->mm; |
915 | struct drm_mm_node *hole; |
916 | u64 hole_start, hole_end; |
917 | |
918 | DRM_MM_BUG_ON(list_empty(&mm->hole_stack)); |
919 | |
920 | if (!mm->color_adjust) |
921 | return NULL; |
922 | |
923 | /* |
924 | * The hole found during scanning should ideally be the first element |
925 | * in the hole_stack list, but due to side-effects in the driver it |
926 | * may not be. |
927 | */ |
928 | list_for_each_entry(hole, &mm->hole_stack, hole_stack) { |
929 | hole_start = __drm_mm_hole_node_start(hole_node: hole); |
930 | hole_end = hole_start + hole->hole_size; |
931 | |
932 | if (hole_start <= scan->hit_start && |
933 | hole_end >= scan->hit_end) |
934 | break; |
935 | } |
936 | |
937 | /* We should only be called after we found the hole previously */ |
938 | DRM_MM_BUG_ON(&hole->hole_stack == &mm->hole_stack); |
939 | if (unlikely(&hole->hole_stack == &mm->hole_stack)) |
940 | return NULL; |
941 | |
942 | DRM_MM_BUG_ON(hole_start > scan->hit_start); |
943 | DRM_MM_BUG_ON(hole_end < scan->hit_end); |
944 | |
945 | mm->color_adjust(hole, scan->color, &hole_start, &hole_end); |
946 | if (hole_start > scan->hit_start) |
947 | return hole; |
948 | if (hole_end < scan->hit_end) |
949 | return list_next_entry(hole, node_list); |
950 | |
951 | return NULL; |
952 | } |
953 | EXPORT_SYMBOL(drm_mm_scan_color_evict); |
954 | |
955 | /** |
956 | * drm_mm_init - initialize a drm-mm allocator |
957 | * @mm: the drm_mm structure to initialize |
958 | * @start: start of the range managed by @mm |
959 | * @size: end of the range managed by @mm |
960 | * |
961 | * Note that @mm must be cleared to 0 before calling this function. |
962 | */ |
963 | void drm_mm_init(struct drm_mm *mm, u64 start, u64 size) |
964 | { |
965 | DRM_MM_BUG_ON(start + size <= start); |
966 | |
967 | mm->color_adjust = NULL; |
968 | |
969 | INIT_LIST_HEAD(list: &mm->hole_stack); |
970 | mm->interval_tree = RB_ROOT_CACHED; |
971 | mm->holes_size = RB_ROOT_CACHED; |
972 | mm->holes_addr = RB_ROOT; |
973 | |
974 | /* Clever trick to avoid a special case in the free hole tracking. */ |
975 | INIT_LIST_HEAD(list: &mm->head_node.node_list); |
976 | mm->head_node.flags = 0; |
977 | mm->head_node.mm = mm; |
978 | mm->head_node.start = start + size; |
979 | mm->head_node.size = -size; |
980 | add_hole(node: &mm->head_node); |
981 | |
982 | mm->scan_active = 0; |
983 | |
984 | #ifdef CONFIG_DRM_DEBUG_MM |
985 | stack_depot_init(); |
986 | #endif |
987 | } |
988 | EXPORT_SYMBOL(drm_mm_init); |
989 | |
990 | /** |
991 | * drm_mm_takedown - clean up a drm_mm allocator |
992 | * @mm: drm_mm allocator to clean up |
993 | * |
994 | * Note that it is a bug to call this function on an allocator which is not |
995 | * clean. |
996 | */ |
997 | void drm_mm_takedown(struct drm_mm *mm) |
998 | { |
999 | if (WARN(!drm_mm_clean(mm), |
1000 | "Memory manager not clean during takedown.\n" )) |
1001 | show_leaks(mm); |
1002 | } |
1003 | EXPORT_SYMBOL(drm_mm_takedown); |
1004 | |
1005 | static u64 drm_mm_dump_hole(struct drm_printer *p, const struct drm_mm_node *entry) |
1006 | { |
1007 | u64 start, size; |
1008 | |
1009 | size = entry->hole_size; |
1010 | if (size) { |
1011 | start = drm_mm_hole_node_start(hole_node: entry); |
1012 | drm_printf(p, f: "%#018llx-%#018llx: %llu: free\n" , |
1013 | start, start + size, size); |
1014 | } |
1015 | |
1016 | return size; |
1017 | } |
1018 | /** |
1019 | * drm_mm_print - print allocator state |
1020 | * @mm: drm_mm allocator to print |
1021 | * @p: DRM printer to use |
1022 | */ |
1023 | void drm_mm_print(const struct drm_mm *mm, struct drm_printer *p) |
1024 | { |
1025 | const struct drm_mm_node *entry; |
1026 | u64 total_used = 0, total_free = 0, total = 0; |
1027 | |
1028 | total_free += drm_mm_dump_hole(p, entry: &mm->head_node); |
1029 | |
1030 | drm_mm_for_each_node(entry, mm) { |
1031 | drm_printf(p, f: "%#018llx-%#018llx: %llu: used\n" , entry->start, |
1032 | entry->start + entry->size, entry->size); |
1033 | total_used += entry->size; |
1034 | total_free += drm_mm_dump_hole(p, entry); |
1035 | } |
1036 | total = total_free + total_used; |
1037 | |
1038 | drm_printf(p, f: "total: %llu, used %llu free %llu\n" , total, |
1039 | total_used, total_free); |
1040 | } |
1041 | EXPORT_SYMBOL(drm_mm_print); |
1042 | |