1 | // SPDX-License-Identifier: GPL-2.0 OR MIT |
2 | /* |
3 | * Copyright (c) 2022 Red Hat. |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the "Software"), |
7 | * to deal in the Software without restriction, including without limitation |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
9 | * and/or sell copies of the Software, and to permit persons to whom the |
10 | * Software is furnished to do so, subject to the following conditions: |
11 | * |
12 | * The above copyright notice and this permission notice shall be included in |
13 | * all copies or substantial portions of the Software. |
14 | * |
15 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
16 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
17 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
18 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
19 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
20 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
21 | * OTHER DEALINGS IN THE SOFTWARE. |
22 | * |
23 | * Authors: |
24 | * Danilo Krummrich <dakr@redhat.com> |
25 | * |
26 | */ |
27 | |
28 | #include <drm/drm_gpuvm.h> |
29 | |
30 | #include <linux/interval_tree_generic.h> |
31 | #include <linux/mm.h> |
32 | |
33 | /** |
34 | * DOC: Overview |
35 | * |
36 | * The DRM GPU VA Manager, represented by struct drm_gpuvm keeps track of a |
37 | * GPU's virtual address (VA) space and manages the corresponding virtual |
38 | * mappings represented by &drm_gpuva objects. It also keeps track of the |
39 | * mapping's backing &drm_gem_object buffers. |
40 | * |
41 | * &drm_gem_object buffers maintain a list of &drm_gpuva objects representing |
42 | * all existent GPU VA mappings using this &drm_gem_object as backing buffer. |
43 | * |
44 | * GPU VAs can be flagged as sparse, such that drivers may use GPU VAs to also |
45 | * keep track of sparse PTEs in order to support Vulkan 'Sparse Resources'. |
46 | * |
47 | * The GPU VA manager internally uses a rb-tree to manage the |
48 | * &drm_gpuva mappings within a GPU's virtual address space. |
49 | * |
50 | * The &drm_gpuvm structure contains a special &drm_gpuva representing the |
51 | * portion of VA space reserved by the kernel. This node is initialized together |
52 | * with the GPU VA manager instance and removed when the GPU VA manager is |
53 | * destroyed. |
54 | * |
55 | * In a typical application drivers would embed struct drm_gpuvm and |
56 | * struct drm_gpuva within their own driver specific structures, there won't be |
57 | * any memory allocations of its own nor memory allocations of &drm_gpuva |
58 | * entries. |
59 | * |
60 | * The data structures needed to store &drm_gpuvas within the &drm_gpuvm are |
61 | * contained within struct drm_gpuva already. Hence, for inserting &drm_gpuva |
62 | * entries from within dma-fence signalling critical sections it is enough to |
63 | * pre-allocate the &drm_gpuva structures. |
64 | */ |
65 | |
66 | /** |
67 | * DOC: Split and Merge |
68 | * |
69 | * Besides its capability to manage and represent a GPU VA space, the |
70 | * GPU VA manager also provides functions to let the &drm_gpuvm calculate a |
71 | * sequence of operations to satisfy a given map or unmap request. |
72 | * |
73 | * Therefore the DRM GPU VA manager provides an algorithm implementing splitting |
74 | * and merging of existent GPU VA mappings with the ones that are requested to |
75 | * be mapped or unmapped. This feature is required by the Vulkan API to |
76 | * implement Vulkan 'Sparse Memory Bindings' - drivers UAPIs often refer to this |
77 | * as VM BIND. |
78 | * |
79 | * Drivers can call drm_gpuvm_sm_map() to receive a sequence of callbacks |
80 | * containing map, unmap and remap operations for a given newly requested |
81 | * mapping. The sequence of callbacks represents the set of operations to |
82 | * execute in order to integrate the new mapping cleanly into the current state |
83 | * of the GPU VA space. |
84 | * |
85 | * Depending on how the new GPU VA mapping intersects with the existent mappings |
86 | * of the GPU VA space the &drm_gpuvm_ops callbacks contain an arbitrary amount |
87 | * of unmap operations, a maximum of two remap operations and a single map |
88 | * operation. The caller might receive no callback at all if no operation is |
89 | * required, e.g. if the requested mapping already exists in the exact same way. |
90 | * |
91 | * The single map operation represents the original map operation requested by |
92 | * the caller. |
93 | * |
94 | * &drm_gpuva_op_unmap contains a 'keep' field, which indicates whether the |
95 | * &drm_gpuva to unmap is physically contiguous with the original mapping |
96 | * request. Optionally, if 'keep' is set, drivers may keep the actual page table |
97 | * entries for this &drm_gpuva, adding the missing page table entries only and |
98 | * update the &drm_gpuvm's view of things accordingly. |
99 | * |
100 | * Drivers may do the same optimization, namely delta page table updates, also |
101 | * for remap operations. This is possible since &drm_gpuva_op_remap consists of |
102 | * one unmap operation and one or two map operations, such that drivers can |
103 | * derive the page table update delta accordingly. |
104 | * |
105 | * Note that there can't be more than two existent mappings to split up, one at |
106 | * the beginning and one at the end of the new mapping, hence there is a |
107 | * maximum of two remap operations. |
108 | * |
109 | * Analogous to drm_gpuvm_sm_map() drm_gpuvm_sm_unmap() uses &drm_gpuvm_ops to |
110 | * call back into the driver in order to unmap a range of GPU VA space. The |
111 | * logic behind this function is way simpler though: For all existent mappings |
112 | * enclosed by the given range unmap operations are created. For mappings which |
113 | * are only partically located within the given range, remap operations are |
114 | * created such that those mappings are split up and re-mapped partically. |
115 | * |
116 | * As an alternative to drm_gpuvm_sm_map() and drm_gpuvm_sm_unmap(), |
117 | * drm_gpuvm_sm_map_ops_create() and drm_gpuvm_sm_unmap_ops_create() can be used |
118 | * to directly obtain an instance of struct drm_gpuva_ops containing a list of |
119 | * &drm_gpuva_op, which can be iterated with drm_gpuva_for_each_op(). This list |
120 | * contains the &drm_gpuva_ops analogous to the callbacks one would receive when |
121 | * calling drm_gpuvm_sm_map() or drm_gpuvm_sm_unmap(). While this way requires |
122 | * more memory (to allocate the &drm_gpuva_ops), it provides drivers a way to |
123 | * iterate the &drm_gpuva_op multiple times, e.g. once in a context where memory |
124 | * allocations are possible (e.g. to allocate GPU page tables) and once in the |
125 | * dma-fence signalling critical path. |
126 | * |
127 | * To update the &drm_gpuvm's view of the GPU VA space drm_gpuva_insert() and |
128 | * drm_gpuva_remove() may be used. These functions can safely be used from |
129 | * &drm_gpuvm_ops callbacks originating from drm_gpuvm_sm_map() or |
130 | * drm_gpuvm_sm_unmap(). However, it might be more convenient to use the |
131 | * provided helper functions drm_gpuva_map(), drm_gpuva_remap() and |
132 | * drm_gpuva_unmap() instead. |
133 | * |
134 | * The following diagram depicts the basic relationships of existent GPU VA |
135 | * mappings, a newly requested mapping and the resulting mappings as implemented |
136 | * by drm_gpuvm_sm_map() - it doesn't cover any arbitrary combinations of these. |
137 | * |
138 | * 1) Requested mapping is identical. Replace it, but indicate the backing PTEs |
139 | * could be kept. |
140 | * |
141 | * :: |
142 | * |
143 | * 0 a 1 |
144 | * old: |-----------| (bo_offset=n) |
145 | * |
146 | * 0 a 1 |
147 | * req: |-----------| (bo_offset=n) |
148 | * |
149 | * 0 a 1 |
150 | * new: |-----------| (bo_offset=n) |
151 | * |
152 | * |
153 | * 2) Requested mapping is identical, except for the BO offset, hence replace |
154 | * the mapping. |
155 | * |
156 | * :: |
157 | * |
158 | * 0 a 1 |
159 | * old: |-----------| (bo_offset=n) |
160 | * |
161 | * 0 a 1 |
162 | * req: |-----------| (bo_offset=m) |
163 | * |
164 | * 0 a 1 |
165 | * new: |-----------| (bo_offset=m) |
166 | * |
167 | * |
168 | * 3) Requested mapping is identical, except for the backing BO, hence replace |
169 | * the mapping. |
170 | * |
171 | * :: |
172 | * |
173 | * 0 a 1 |
174 | * old: |-----------| (bo_offset=n) |
175 | * |
176 | * 0 b 1 |
177 | * req: |-----------| (bo_offset=n) |
178 | * |
179 | * 0 b 1 |
180 | * new: |-----------| (bo_offset=n) |
181 | * |
182 | * |
183 | * 4) Existent mapping is a left aligned subset of the requested one, hence |
184 | * replace the existent one. |
185 | * |
186 | * :: |
187 | * |
188 | * 0 a 1 |
189 | * old: |-----| (bo_offset=n) |
190 | * |
191 | * 0 a 2 |
192 | * req: |-----------| (bo_offset=n) |
193 | * |
194 | * 0 a 2 |
195 | * new: |-----------| (bo_offset=n) |
196 | * |
197 | * .. note:: |
198 | * We expect to see the same result for a request with a different BO |
199 | * and/or non-contiguous BO offset. |
200 | * |
201 | * |
202 | * 5) Requested mapping's range is a left aligned subset of the existent one, |
203 | * but backed by a different BO. Hence, map the requested mapping and split |
204 | * the existent one adjusting its BO offset. |
205 | * |
206 | * :: |
207 | * |
208 | * 0 a 2 |
209 | * old: |-----------| (bo_offset=n) |
210 | * |
211 | * 0 b 1 |
212 | * req: |-----| (bo_offset=n) |
213 | * |
214 | * 0 b 1 a' 2 |
215 | * new: |-----|-----| (b.bo_offset=n, a.bo_offset=n+1) |
216 | * |
217 | * .. note:: |
218 | * We expect to see the same result for a request with a different BO |
219 | * and/or non-contiguous BO offset. |
220 | * |
221 | * |
222 | * 6) Existent mapping is a superset of the requested mapping. Split it up, but |
223 | * indicate that the backing PTEs could be kept. |
224 | * |
225 | * :: |
226 | * |
227 | * 0 a 2 |
228 | * old: |-----------| (bo_offset=n) |
229 | * |
230 | * 0 a 1 |
231 | * req: |-----| (bo_offset=n) |
232 | * |
233 | * 0 a 1 a' 2 |
234 | * new: |-----|-----| (a.bo_offset=n, a'.bo_offset=n+1) |
235 | * |
236 | * |
237 | * 7) Requested mapping's range is a right aligned subset of the existent one, |
238 | * but backed by a different BO. Hence, map the requested mapping and split |
239 | * the existent one, without adjusting the BO offset. |
240 | * |
241 | * :: |
242 | * |
243 | * 0 a 2 |
244 | * old: |-----------| (bo_offset=n) |
245 | * |
246 | * 1 b 2 |
247 | * req: |-----| (bo_offset=m) |
248 | * |
249 | * 0 a 1 b 2 |
250 | * new: |-----|-----| (a.bo_offset=n,b.bo_offset=m) |
251 | * |
252 | * |
253 | * 8) Existent mapping is a superset of the requested mapping. Split it up, but |
254 | * indicate that the backing PTEs could be kept. |
255 | * |
256 | * :: |
257 | * |
258 | * 0 a 2 |
259 | * old: |-----------| (bo_offset=n) |
260 | * |
261 | * 1 a 2 |
262 | * req: |-----| (bo_offset=n+1) |
263 | * |
264 | * 0 a' 1 a 2 |
265 | * new: |-----|-----| (a'.bo_offset=n, a.bo_offset=n+1) |
266 | * |
267 | * |
268 | * 9) Existent mapping is overlapped at the end by the requested mapping backed |
269 | * by a different BO. Hence, map the requested mapping and split up the |
270 | * existent one, without adjusting the BO offset. |
271 | * |
272 | * :: |
273 | * |
274 | * 0 a 2 |
275 | * old: |-----------| (bo_offset=n) |
276 | * |
277 | * 1 b 3 |
278 | * req: |-----------| (bo_offset=m) |
279 | * |
280 | * 0 a 1 b 3 |
281 | * new: |-----|-----------| (a.bo_offset=n,b.bo_offset=m) |
282 | * |
283 | * |
284 | * 10) Existent mapping is overlapped by the requested mapping, both having the |
285 | * same backing BO with a contiguous offset. Indicate the backing PTEs of |
286 | * the old mapping could be kept. |
287 | * |
288 | * :: |
289 | * |
290 | * 0 a 2 |
291 | * old: |-----------| (bo_offset=n) |
292 | * |
293 | * 1 a 3 |
294 | * req: |-----------| (bo_offset=n+1) |
295 | * |
296 | * 0 a' 1 a 3 |
297 | * new: |-----|-----------| (a'.bo_offset=n, a.bo_offset=n+1) |
298 | * |
299 | * |
300 | * 11) Requested mapping's range is a centered subset of the existent one |
301 | * having a different backing BO. Hence, map the requested mapping and split |
302 | * up the existent one in two mappings, adjusting the BO offset of the right |
303 | * one accordingly. |
304 | * |
305 | * :: |
306 | * |
307 | * 0 a 3 |
308 | * old: |-----------------| (bo_offset=n) |
309 | * |
310 | * 1 b 2 |
311 | * req: |-----| (bo_offset=m) |
312 | * |
313 | * 0 a 1 b 2 a' 3 |
314 | * new: |-----|-----|-----| (a.bo_offset=n,b.bo_offset=m,a'.bo_offset=n+2) |
315 | * |
316 | * |
317 | * 12) Requested mapping is a contiguous subset of the existent one. Split it |
318 | * up, but indicate that the backing PTEs could be kept. |
319 | * |
320 | * :: |
321 | * |
322 | * 0 a 3 |
323 | * old: |-----------------| (bo_offset=n) |
324 | * |
325 | * 1 a 2 |
326 | * req: |-----| (bo_offset=n+1) |
327 | * |
328 | * 0 a' 1 a 2 a'' 3 |
329 | * old: |-----|-----|-----| (a'.bo_offset=n, a.bo_offset=n+1, a''.bo_offset=n+2) |
330 | * |
331 | * |
332 | * 13) Existent mapping is a right aligned subset of the requested one, hence |
333 | * replace the existent one. |
334 | * |
335 | * :: |
336 | * |
337 | * 1 a 2 |
338 | * old: |-----| (bo_offset=n+1) |
339 | * |
340 | * 0 a 2 |
341 | * req: |-----------| (bo_offset=n) |
342 | * |
343 | * 0 a 2 |
344 | * new: |-----------| (bo_offset=n) |
345 | * |
346 | * .. note:: |
347 | * We expect to see the same result for a request with a different bo |
348 | * and/or non-contiguous bo_offset. |
349 | * |
350 | * |
351 | * 14) Existent mapping is a centered subset of the requested one, hence |
352 | * replace the existent one. |
353 | * |
354 | * :: |
355 | * |
356 | * 1 a 2 |
357 | * old: |-----| (bo_offset=n+1) |
358 | * |
359 | * 0 a 3 |
360 | * req: |----------------| (bo_offset=n) |
361 | * |
362 | * 0 a 3 |
363 | * new: |----------------| (bo_offset=n) |
364 | * |
365 | * .. note:: |
366 | * We expect to see the same result for a request with a different bo |
367 | * and/or non-contiguous bo_offset. |
368 | * |
369 | * |
370 | * 15) Existent mappings is overlapped at the beginning by the requested mapping |
371 | * backed by a different BO. Hence, map the requested mapping and split up |
372 | * the existent one, adjusting its BO offset accordingly. |
373 | * |
374 | * :: |
375 | * |
376 | * 1 a 3 |
377 | * old: |-----------| (bo_offset=n) |
378 | * |
379 | * 0 b 2 |
380 | * req: |-----------| (bo_offset=m) |
381 | * |
382 | * 0 b 2 a' 3 |
383 | * new: |-----------|-----| (b.bo_offset=m,a.bo_offset=n+2) |
384 | */ |
385 | |
386 | /** |
387 | * DOC: Locking |
388 | * |
389 | * Generally, the GPU VA manager does not take care of locking itself, it is |
390 | * the drivers responsibility to take care about locking. Drivers might want to |
391 | * protect the following operations: inserting, removing and iterating |
392 | * &drm_gpuva objects as well as generating all kinds of operations, such as |
393 | * split / merge or prefetch. |
394 | * |
395 | * The GPU VA manager also does not take care of the locking of the backing |
396 | * &drm_gem_object buffers GPU VA lists by itself; drivers are responsible to |
397 | * enforce mutual exclusion using either the GEMs dma_resv lock or alternatively |
398 | * a driver specific external lock. For the latter see also |
399 | * drm_gem_gpuva_set_lock(). |
400 | * |
401 | * However, the GPU VA manager contains lockdep checks to ensure callers of its |
402 | * API hold the corresponding lock whenever the &drm_gem_objects GPU VA list is |
403 | * accessed by functions such as drm_gpuva_link() or drm_gpuva_unlink(). |
404 | */ |
405 | |
406 | /** |
407 | * DOC: Examples |
408 | * |
409 | * This section gives two examples on how to let the DRM GPUVA Manager generate |
410 | * &drm_gpuva_op in order to satisfy a given map or unmap request and how to |
411 | * make use of them. |
412 | * |
413 | * The below code is strictly limited to illustrate the generic usage pattern. |
414 | * To maintain simplicitly, it doesn't make use of any abstractions for common |
415 | * code, different (asyncronous) stages with fence signalling critical paths, |
416 | * any other helpers or error handling in terms of freeing memory and dropping |
417 | * previously taken locks. |
418 | * |
419 | * 1) Obtain a list of &drm_gpuva_op to create a new mapping:: |
420 | * |
421 | * // Allocates a new &drm_gpuva. |
422 | * struct drm_gpuva * driver_gpuva_alloc(void); |
423 | * |
424 | * // Typically drivers would embedd the &drm_gpuvm and &drm_gpuva |
425 | * // structure in individual driver structures and lock the dma-resv with |
426 | * // drm_exec or similar helpers. |
427 | * int driver_mapping_create(struct drm_gpuvm *gpuvm, |
428 | * u64 addr, u64 range, |
429 | * struct drm_gem_object *obj, u64 offset) |
430 | * { |
431 | * struct drm_gpuva_ops *ops; |
432 | * struct drm_gpuva_op *op |
433 | * |
434 | * driver_lock_va_space(); |
435 | * ops = drm_gpuvm_sm_map_ops_create(gpuvm, addr, range, |
436 | * obj, offset); |
437 | * if (IS_ERR(ops)) |
438 | * return PTR_ERR(ops); |
439 | * |
440 | * drm_gpuva_for_each_op(op, ops) { |
441 | * struct drm_gpuva *va; |
442 | * |
443 | * switch (op->op) { |
444 | * case DRM_GPUVA_OP_MAP: |
445 | * va = driver_gpuva_alloc(); |
446 | * if (!va) |
447 | * ; // unwind previous VA space updates, |
448 | * // free memory and unlock |
449 | * |
450 | * driver_vm_map(); |
451 | * drm_gpuva_map(gpuvm, va, &op->map); |
452 | * drm_gpuva_link(va); |
453 | * |
454 | * break; |
455 | * case DRM_GPUVA_OP_REMAP: { |
456 | * struct drm_gpuva *prev = NULL, *next = NULL; |
457 | * |
458 | * va = op->remap.unmap->va; |
459 | * |
460 | * if (op->remap.prev) { |
461 | * prev = driver_gpuva_alloc(); |
462 | * if (!prev) |
463 | * ; // unwind previous VA space |
464 | * // updates, free memory and |
465 | * // unlock |
466 | * } |
467 | * |
468 | * if (op->remap.next) { |
469 | * next = driver_gpuva_alloc(); |
470 | * if (!next) |
471 | * ; // unwind previous VA space |
472 | * // updates, free memory and |
473 | * // unlock |
474 | * } |
475 | * |
476 | * driver_vm_remap(); |
477 | * drm_gpuva_remap(prev, next, &op->remap); |
478 | * |
479 | * drm_gpuva_unlink(va); |
480 | * if (prev) |
481 | * drm_gpuva_link(prev); |
482 | * if (next) |
483 | * drm_gpuva_link(next); |
484 | * |
485 | * break; |
486 | * } |
487 | * case DRM_GPUVA_OP_UNMAP: |
488 | * va = op->unmap->va; |
489 | * |
490 | * driver_vm_unmap(); |
491 | * drm_gpuva_unlink(va); |
492 | * drm_gpuva_unmap(&op->unmap); |
493 | * |
494 | * break; |
495 | * default: |
496 | * break; |
497 | * } |
498 | * } |
499 | * driver_unlock_va_space(); |
500 | * |
501 | * return 0; |
502 | * } |
503 | * |
504 | * 2) Receive a callback for each &drm_gpuva_op to create a new mapping:: |
505 | * |
506 | * struct driver_context { |
507 | * struct drm_gpuvm *gpuvm; |
508 | * struct drm_gpuva *new_va; |
509 | * struct drm_gpuva *prev_va; |
510 | * struct drm_gpuva *next_va; |
511 | * }; |
512 | * |
513 | * // ops to pass to drm_gpuvm_init() |
514 | * static const struct drm_gpuvm_ops driver_gpuvm_ops = { |
515 | * .sm_step_map = driver_gpuva_map, |
516 | * .sm_step_remap = driver_gpuva_remap, |
517 | * .sm_step_unmap = driver_gpuva_unmap, |
518 | * }; |
519 | * |
520 | * // Typically drivers would embedd the &drm_gpuvm and &drm_gpuva |
521 | * // structure in individual driver structures and lock the dma-resv with |
522 | * // drm_exec or similar helpers. |
523 | * int driver_mapping_create(struct drm_gpuvm *gpuvm, |
524 | * u64 addr, u64 range, |
525 | * struct drm_gem_object *obj, u64 offset) |
526 | * { |
527 | * struct driver_context ctx; |
528 | * struct drm_gpuva_ops *ops; |
529 | * struct drm_gpuva_op *op; |
530 | * int ret = 0; |
531 | * |
532 | * ctx.gpuvm = gpuvm; |
533 | * |
534 | * ctx.new_va = kzalloc(sizeof(*ctx.new_va), GFP_KERNEL); |
535 | * ctx.prev_va = kzalloc(sizeof(*ctx.prev_va), GFP_KERNEL); |
536 | * ctx.next_va = kzalloc(sizeof(*ctx.next_va), GFP_KERNEL); |
537 | * if (!ctx.new_va || !ctx.prev_va || !ctx.next_va) { |
538 | * ret = -ENOMEM; |
539 | * goto out; |
540 | * } |
541 | * |
542 | * driver_lock_va_space(); |
543 | * ret = drm_gpuvm_sm_map(gpuvm, &ctx, addr, range, obj, offset); |
544 | * driver_unlock_va_space(); |
545 | * |
546 | * out: |
547 | * kfree(ctx.new_va); |
548 | * kfree(ctx.prev_va); |
549 | * kfree(ctx.next_va); |
550 | * return ret; |
551 | * } |
552 | * |
553 | * int driver_gpuva_map(struct drm_gpuva_op *op, void *__ctx) |
554 | * { |
555 | * struct driver_context *ctx = __ctx; |
556 | * |
557 | * drm_gpuva_map(ctx->vm, ctx->new_va, &op->map); |
558 | * |
559 | * drm_gpuva_link(ctx->new_va); |
560 | * |
561 | * // prevent the new GPUVA from being freed in |
562 | * // driver_mapping_create() |
563 | * ctx->new_va = NULL; |
564 | * |
565 | * return 0; |
566 | * } |
567 | * |
568 | * int driver_gpuva_remap(struct drm_gpuva_op *op, void *__ctx) |
569 | * { |
570 | * struct driver_context *ctx = __ctx; |
571 | * |
572 | * drm_gpuva_remap(ctx->prev_va, ctx->next_va, &op->remap); |
573 | * |
574 | * drm_gpuva_unlink(op->remap.unmap->va); |
575 | * kfree(op->remap.unmap->va); |
576 | * |
577 | * if (op->remap.prev) { |
578 | * drm_gpuva_link(ctx->prev_va); |
579 | * ctx->prev_va = NULL; |
580 | * } |
581 | * |
582 | * if (op->remap.next) { |
583 | * drm_gpuva_link(ctx->next_va); |
584 | * ctx->next_va = NULL; |
585 | * } |
586 | * |
587 | * return 0; |
588 | * } |
589 | * |
590 | * int driver_gpuva_unmap(struct drm_gpuva_op *op, void *__ctx) |
591 | * { |
592 | * drm_gpuva_unlink(op->unmap.va); |
593 | * drm_gpuva_unmap(&op->unmap); |
594 | * kfree(op->unmap.va); |
595 | * |
596 | * return 0; |
597 | * } |
598 | */ |
599 | |
600 | #define to_drm_gpuva(__node) container_of((__node), struct drm_gpuva, rb.node) |
601 | |
602 | #define GPUVA_START(node) ((node)->va.addr) |
603 | #define GPUVA_LAST(node) ((node)->va.addr + (node)->va.range - 1) |
604 | |
605 | /* We do not actually use drm_gpuva_it_next(), tell the compiler to not complain |
606 | * about this. |
607 | */ |
608 | INTERVAL_TREE_DEFINE(struct drm_gpuva, rb.node, u64, rb.__subtree_last, |
609 | GPUVA_START, GPUVA_LAST, static __maybe_unused, |
610 | drm_gpuva_it) |
611 | |
612 | static int __drm_gpuva_insert(struct drm_gpuvm *gpuvm, |
613 | struct drm_gpuva *va); |
614 | static void __drm_gpuva_remove(struct drm_gpuva *va); |
615 | |
616 | static bool |
617 | drm_gpuvm_check_overflow(u64 addr, u64 range) |
618 | { |
619 | u64 end; |
620 | |
621 | return WARN(check_add_overflow(addr, range, &end), |
622 | "GPUVA address limited to %zu bytes.\n" , sizeof(end)); |
623 | } |
624 | |
625 | static bool |
626 | drm_gpuvm_in_mm_range(struct drm_gpuvm *gpuvm, u64 addr, u64 range) |
627 | { |
628 | u64 end = addr + range; |
629 | u64 mm_start = gpuvm->mm_start; |
630 | u64 mm_end = mm_start + gpuvm->mm_range; |
631 | |
632 | return addr >= mm_start && end <= mm_end; |
633 | } |
634 | |
635 | static bool |
636 | drm_gpuvm_in_kernel_node(struct drm_gpuvm *gpuvm, u64 addr, u64 range) |
637 | { |
638 | u64 end = addr + range; |
639 | u64 kstart = gpuvm->kernel_alloc_node.va.addr; |
640 | u64 krange = gpuvm->kernel_alloc_node.va.range; |
641 | u64 kend = kstart + krange; |
642 | |
643 | return krange && addr < kend && kstart < end; |
644 | } |
645 | |
646 | static bool |
647 | drm_gpuvm_range_valid(struct drm_gpuvm *gpuvm, |
648 | u64 addr, u64 range) |
649 | { |
650 | return !drm_gpuvm_check_overflow(addr, range) && |
651 | drm_gpuvm_in_mm_range(gpuvm, addr, range) && |
652 | !drm_gpuvm_in_kernel_node(gpuvm, addr, range); |
653 | } |
654 | |
655 | /** |
656 | * drm_gpuvm_init() - initialize a &drm_gpuvm |
657 | * @gpuvm: pointer to the &drm_gpuvm to initialize |
658 | * @name: the name of the GPU VA space |
659 | * @start_offset: the start offset of the GPU VA space |
660 | * @range: the size of the GPU VA space |
661 | * @reserve_offset: the start of the kernel reserved GPU VA area |
662 | * @reserve_range: the size of the kernel reserved GPU VA area |
663 | * @ops: &drm_gpuvm_ops called on &drm_gpuvm_sm_map / &drm_gpuvm_sm_unmap |
664 | * |
665 | * The &drm_gpuvm must be initialized with this function before use. |
666 | * |
667 | * Note that @gpuvm must be cleared to 0 before calling this function. The given |
668 | * &name is expected to be managed by the surrounding driver structures. |
669 | */ |
670 | void |
671 | drm_gpuvm_init(struct drm_gpuvm *gpuvm, |
672 | const char *name, |
673 | u64 start_offset, u64 range, |
674 | u64 reserve_offset, u64 reserve_range, |
675 | const struct drm_gpuvm_ops *ops) |
676 | { |
677 | gpuvm->rb.tree = RB_ROOT_CACHED; |
678 | INIT_LIST_HEAD(list: &gpuvm->rb.list); |
679 | |
680 | drm_gpuvm_check_overflow(addr: start_offset, range); |
681 | gpuvm->mm_start = start_offset; |
682 | gpuvm->mm_range = range; |
683 | |
684 | gpuvm->name = name ? name : "unknown" ; |
685 | gpuvm->ops = ops; |
686 | |
687 | memset(&gpuvm->kernel_alloc_node, 0, sizeof(struct drm_gpuva)); |
688 | |
689 | if (reserve_range) { |
690 | gpuvm->kernel_alloc_node.va.addr = reserve_offset; |
691 | gpuvm->kernel_alloc_node.va.range = reserve_range; |
692 | |
693 | if (likely(!drm_gpuvm_check_overflow(reserve_offset, |
694 | reserve_range))) |
695 | __drm_gpuva_insert(gpuvm, va: &gpuvm->kernel_alloc_node); |
696 | } |
697 | } |
698 | EXPORT_SYMBOL_GPL(drm_gpuvm_init); |
699 | |
700 | /** |
701 | * drm_gpuvm_destroy() - cleanup a &drm_gpuvm |
702 | * @gpuvm: pointer to the &drm_gpuvm to clean up |
703 | * |
704 | * Note that it is a bug to call this function on a manager that still |
705 | * holds GPU VA mappings. |
706 | */ |
707 | void |
708 | drm_gpuvm_destroy(struct drm_gpuvm *gpuvm) |
709 | { |
710 | gpuvm->name = NULL; |
711 | |
712 | if (gpuvm->kernel_alloc_node.va.range) |
713 | __drm_gpuva_remove(va: &gpuvm->kernel_alloc_node); |
714 | |
715 | WARN(!RB_EMPTY_ROOT(&gpuvm->rb.tree.rb_root), |
716 | "GPUVA tree is not empty, potentially leaking memory." ); |
717 | } |
718 | EXPORT_SYMBOL_GPL(drm_gpuvm_destroy); |
719 | |
720 | static int |
721 | __drm_gpuva_insert(struct drm_gpuvm *gpuvm, |
722 | struct drm_gpuva *va) |
723 | { |
724 | struct rb_node *node; |
725 | struct list_head *head; |
726 | |
727 | if (drm_gpuva_it_iter_first(root: &gpuvm->rb.tree, |
728 | GPUVA_START(va), |
729 | GPUVA_LAST(va))) |
730 | return -EEXIST; |
731 | |
732 | va->vm = gpuvm; |
733 | |
734 | drm_gpuva_it_insert(node: va, root: &gpuvm->rb.tree); |
735 | |
736 | node = rb_prev(&va->rb.node); |
737 | if (node) |
738 | head = &(to_drm_gpuva(node))->rb.entry; |
739 | else |
740 | head = &gpuvm->rb.list; |
741 | |
742 | list_add(new: &va->rb.entry, head); |
743 | |
744 | return 0; |
745 | } |
746 | |
747 | /** |
748 | * drm_gpuva_insert() - insert a &drm_gpuva |
749 | * @gpuvm: the &drm_gpuvm to insert the &drm_gpuva in |
750 | * @va: the &drm_gpuva to insert |
751 | * |
752 | * Insert a &drm_gpuva with a given address and range into a |
753 | * &drm_gpuvm. |
754 | * |
755 | * It is safe to use this function using the safe versions of iterating the GPU |
756 | * VA space, such as drm_gpuvm_for_each_va_safe() and |
757 | * drm_gpuvm_for_each_va_range_safe(). |
758 | * |
759 | * Returns: 0 on success, negative error code on failure. |
760 | */ |
761 | int |
762 | drm_gpuva_insert(struct drm_gpuvm *gpuvm, |
763 | struct drm_gpuva *va) |
764 | { |
765 | u64 addr = va->va.addr; |
766 | u64 range = va->va.range; |
767 | |
768 | if (unlikely(!drm_gpuvm_range_valid(gpuvm, addr, range))) |
769 | return -EINVAL; |
770 | |
771 | return __drm_gpuva_insert(gpuvm, va); |
772 | } |
773 | EXPORT_SYMBOL_GPL(drm_gpuva_insert); |
774 | |
775 | static void |
776 | __drm_gpuva_remove(struct drm_gpuva *va) |
777 | { |
778 | drm_gpuva_it_remove(node: va, root: &va->vm->rb.tree); |
779 | list_del_init(entry: &va->rb.entry); |
780 | } |
781 | |
782 | /** |
783 | * drm_gpuva_remove() - remove a &drm_gpuva |
784 | * @va: the &drm_gpuva to remove |
785 | * |
786 | * This removes the given &va from the underlaying tree. |
787 | * |
788 | * It is safe to use this function using the safe versions of iterating the GPU |
789 | * VA space, such as drm_gpuvm_for_each_va_safe() and |
790 | * drm_gpuvm_for_each_va_range_safe(). |
791 | */ |
792 | void |
793 | drm_gpuva_remove(struct drm_gpuva *va) |
794 | { |
795 | struct drm_gpuvm *gpuvm = va->vm; |
796 | |
797 | if (unlikely(va == &gpuvm->kernel_alloc_node)) { |
798 | WARN(1, "Can't destroy kernel reserved node.\n" ); |
799 | return; |
800 | } |
801 | |
802 | __drm_gpuva_remove(va); |
803 | } |
804 | EXPORT_SYMBOL_GPL(drm_gpuva_remove); |
805 | |
806 | /** |
807 | * drm_gpuva_link() - link a &drm_gpuva |
808 | * @va: the &drm_gpuva to link |
809 | * |
810 | * This adds the given &va to the GPU VA list of the &drm_gem_object it is |
811 | * associated with. |
812 | * |
813 | * This function expects the caller to protect the GEM's GPUVA list against |
814 | * concurrent access using the GEMs dma_resv lock. |
815 | */ |
816 | void |
817 | drm_gpuva_link(struct drm_gpuva *va) |
818 | { |
819 | struct drm_gem_object *obj = va->gem.obj; |
820 | |
821 | if (unlikely(!obj)) |
822 | return; |
823 | |
824 | drm_gem_gpuva_assert_lock_held(obj); |
825 | |
826 | list_add_tail(new: &va->gem.entry, head: &obj->gpuva.list); |
827 | } |
828 | EXPORT_SYMBOL_GPL(drm_gpuva_link); |
829 | |
830 | /** |
831 | * drm_gpuva_unlink() - unlink a &drm_gpuva |
832 | * @va: the &drm_gpuva to unlink |
833 | * |
834 | * This removes the given &va from the GPU VA list of the &drm_gem_object it is |
835 | * associated with. |
836 | * |
837 | * This function expects the caller to protect the GEM's GPUVA list against |
838 | * concurrent access using the GEMs dma_resv lock. |
839 | */ |
840 | void |
841 | drm_gpuva_unlink(struct drm_gpuva *va) |
842 | { |
843 | struct drm_gem_object *obj = va->gem.obj; |
844 | |
845 | if (unlikely(!obj)) |
846 | return; |
847 | |
848 | drm_gem_gpuva_assert_lock_held(obj); |
849 | |
850 | list_del_init(entry: &va->gem.entry); |
851 | } |
852 | EXPORT_SYMBOL_GPL(drm_gpuva_unlink); |
853 | |
854 | /** |
855 | * drm_gpuva_find_first() - find the first &drm_gpuva in the given range |
856 | * @gpuvm: the &drm_gpuvm to search in |
857 | * @addr: the &drm_gpuvas address |
858 | * @range: the &drm_gpuvas range |
859 | * |
860 | * Returns: the first &drm_gpuva within the given range |
861 | */ |
862 | struct drm_gpuva * |
863 | drm_gpuva_find_first(struct drm_gpuvm *gpuvm, |
864 | u64 addr, u64 range) |
865 | { |
866 | u64 last = addr + range - 1; |
867 | |
868 | return drm_gpuva_it_iter_first(root: &gpuvm->rb.tree, start: addr, last); |
869 | } |
870 | EXPORT_SYMBOL_GPL(drm_gpuva_find_first); |
871 | |
872 | /** |
873 | * drm_gpuva_find() - find a &drm_gpuva |
874 | * @gpuvm: the &drm_gpuvm to search in |
875 | * @addr: the &drm_gpuvas address |
876 | * @range: the &drm_gpuvas range |
877 | * |
878 | * Returns: the &drm_gpuva at a given &addr and with a given &range |
879 | */ |
880 | struct drm_gpuva * |
881 | drm_gpuva_find(struct drm_gpuvm *gpuvm, |
882 | u64 addr, u64 range) |
883 | { |
884 | struct drm_gpuva *va; |
885 | |
886 | va = drm_gpuva_find_first(gpuvm, addr, range); |
887 | if (!va) |
888 | goto out; |
889 | |
890 | if (va->va.addr != addr || |
891 | va->va.range != range) |
892 | goto out; |
893 | |
894 | return va; |
895 | |
896 | out: |
897 | return NULL; |
898 | } |
899 | EXPORT_SYMBOL_GPL(drm_gpuva_find); |
900 | |
901 | /** |
902 | * drm_gpuva_find_prev() - find the &drm_gpuva before the given address |
903 | * @gpuvm: the &drm_gpuvm to search in |
904 | * @start: the given GPU VA's start address |
905 | * |
906 | * Find the adjacent &drm_gpuva before the GPU VA with given &start address. |
907 | * |
908 | * Note that if there is any free space between the GPU VA mappings no mapping |
909 | * is returned. |
910 | * |
911 | * Returns: a pointer to the found &drm_gpuva or NULL if none was found |
912 | */ |
913 | struct drm_gpuva * |
914 | drm_gpuva_find_prev(struct drm_gpuvm *gpuvm, u64 start) |
915 | { |
916 | if (!drm_gpuvm_range_valid(gpuvm, addr: start - 1, range: 1)) |
917 | return NULL; |
918 | |
919 | return drm_gpuva_it_iter_first(root: &gpuvm->rb.tree, start: start - 1, last: start); |
920 | } |
921 | EXPORT_SYMBOL_GPL(drm_gpuva_find_prev); |
922 | |
923 | /** |
924 | * drm_gpuva_find_next() - find the &drm_gpuva after the given address |
925 | * @gpuvm: the &drm_gpuvm to search in |
926 | * @end: the given GPU VA's end address |
927 | * |
928 | * Find the adjacent &drm_gpuva after the GPU VA with given &end address. |
929 | * |
930 | * Note that if there is any free space between the GPU VA mappings no mapping |
931 | * is returned. |
932 | * |
933 | * Returns: a pointer to the found &drm_gpuva or NULL if none was found |
934 | */ |
935 | struct drm_gpuva * |
936 | drm_gpuva_find_next(struct drm_gpuvm *gpuvm, u64 end) |
937 | { |
938 | if (!drm_gpuvm_range_valid(gpuvm, addr: end, range: 1)) |
939 | return NULL; |
940 | |
941 | return drm_gpuva_it_iter_first(root: &gpuvm->rb.tree, start: end, last: end + 1); |
942 | } |
943 | EXPORT_SYMBOL_GPL(drm_gpuva_find_next); |
944 | |
945 | /** |
946 | * drm_gpuvm_interval_empty() - indicate whether a given interval of the VA space |
947 | * is empty |
948 | * @gpuvm: the &drm_gpuvm to check the range for |
949 | * @addr: the start address of the range |
950 | * @range: the range of the interval |
951 | * |
952 | * Returns: true if the interval is empty, false otherwise |
953 | */ |
954 | bool |
955 | drm_gpuvm_interval_empty(struct drm_gpuvm *gpuvm, u64 addr, u64 range) |
956 | { |
957 | return !drm_gpuva_find_first(gpuvm, addr, range); |
958 | } |
959 | EXPORT_SYMBOL_GPL(drm_gpuvm_interval_empty); |
960 | |
961 | /** |
962 | * drm_gpuva_map() - helper to insert a &drm_gpuva according to a |
963 | * &drm_gpuva_op_map |
964 | * @gpuvm: the &drm_gpuvm |
965 | * @va: the &drm_gpuva to insert |
966 | * @op: the &drm_gpuva_op_map to initialize @va with |
967 | * |
968 | * Initializes the @va from the @op and inserts it into the given @gpuvm. |
969 | */ |
970 | void |
971 | drm_gpuva_map(struct drm_gpuvm *gpuvm, |
972 | struct drm_gpuva *va, |
973 | struct drm_gpuva_op_map *op) |
974 | { |
975 | drm_gpuva_init_from_op(va, op); |
976 | drm_gpuva_insert(gpuvm, va); |
977 | } |
978 | EXPORT_SYMBOL_GPL(drm_gpuva_map); |
979 | |
980 | /** |
981 | * drm_gpuva_remap() - helper to remap a &drm_gpuva according to a |
982 | * &drm_gpuva_op_remap |
983 | * @prev: the &drm_gpuva to remap when keeping the start of a mapping |
984 | * @next: the &drm_gpuva to remap when keeping the end of a mapping |
985 | * @op: the &drm_gpuva_op_remap to initialize @prev and @next with |
986 | * |
987 | * Removes the currently mapped &drm_gpuva and remaps it using @prev and/or |
988 | * @next. |
989 | */ |
990 | void |
991 | drm_gpuva_remap(struct drm_gpuva *prev, |
992 | struct drm_gpuva *next, |
993 | struct drm_gpuva_op_remap *op) |
994 | { |
995 | struct drm_gpuva *curr = op->unmap->va; |
996 | struct drm_gpuvm *gpuvm = curr->vm; |
997 | |
998 | drm_gpuva_remove(curr); |
999 | |
1000 | if (op->prev) { |
1001 | drm_gpuva_init_from_op(va: prev, op: op->prev); |
1002 | drm_gpuva_insert(gpuvm, prev); |
1003 | } |
1004 | |
1005 | if (op->next) { |
1006 | drm_gpuva_init_from_op(va: next, op: op->next); |
1007 | drm_gpuva_insert(gpuvm, next); |
1008 | } |
1009 | } |
1010 | EXPORT_SYMBOL_GPL(drm_gpuva_remap); |
1011 | |
1012 | /** |
1013 | * drm_gpuva_unmap() - helper to remove a &drm_gpuva according to a |
1014 | * &drm_gpuva_op_unmap |
1015 | * @op: the &drm_gpuva_op_unmap specifying the &drm_gpuva to remove |
1016 | * |
1017 | * Removes the &drm_gpuva associated with the &drm_gpuva_op_unmap. |
1018 | */ |
1019 | void |
1020 | drm_gpuva_unmap(struct drm_gpuva_op_unmap *op) |
1021 | { |
1022 | drm_gpuva_remove(op->va); |
1023 | } |
1024 | EXPORT_SYMBOL_GPL(drm_gpuva_unmap); |
1025 | |
1026 | static int |
1027 | op_map_cb(const struct drm_gpuvm_ops *fn, void *priv, |
1028 | u64 addr, u64 range, |
1029 | struct drm_gem_object *obj, u64 offset) |
1030 | { |
1031 | struct drm_gpuva_op op = {}; |
1032 | |
1033 | op.op = DRM_GPUVA_OP_MAP; |
1034 | op.map.va.addr = addr; |
1035 | op.map.va.range = range; |
1036 | op.map.gem.obj = obj; |
1037 | op.map.gem.offset = offset; |
1038 | |
1039 | return fn->sm_step_map(&op, priv); |
1040 | } |
1041 | |
1042 | static int |
1043 | op_remap_cb(const struct drm_gpuvm_ops *fn, void *priv, |
1044 | struct drm_gpuva_op_map *prev, |
1045 | struct drm_gpuva_op_map *next, |
1046 | struct drm_gpuva_op_unmap *unmap) |
1047 | { |
1048 | struct drm_gpuva_op op = {}; |
1049 | struct drm_gpuva_op_remap *r; |
1050 | |
1051 | op.op = DRM_GPUVA_OP_REMAP; |
1052 | r = &op.remap; |
1053 | r->prev = prev; |
1054 | r->next = next; |
1055 | r->unmap = unmap; |
1056 | |
1057 | return fn->sm_step_remap(&op, priv); |
1058 | } |
1059 | |
1060 | static int |
1061 | op_unmap_cb(const struct drm_gpuvm_ops *fn, void *priv, |
1062 | struct drm_gpuva *va, bool merge) |
1063 | { |
1064 | struct drm_gpuva_op op = {}; |
1065 | |
1066 | op.op = DRM_GPUVA_OP_UNMAP; |
1067 | op.unmap.va = va; |
1068 | op.unmap.keep = merge; |
1069 | |
1070 | return fn->sm_step_unmap(&op, priv); |
1071 | } |
1072 | |
1073 | static int |
1074 | __drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, |
1075 | const struct drm_gpuvm_ops *ops, void *priv, |
1076 | u64 req_addr, u64 req_range, |
1077 | struct drm_gem_object *req_obj, u64 req_offset) |
1078 | { |
1079 | struct drm_gpuva *va, *next; |
1080 | u64 req_end = req_addr + req_range; |
1081 | int ret; |
1082 | |
1083 | if (unlikely(!drm_gpuvm_range_valid(gpuvm, req_addr, req_range))) |
1084 | return -EINVAL; |
1085 | |
1086 | drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req_addr, req_end) { |
1087 | struct drm_gem_object *obj = va->gem.obj; |
1088 | u64 offset = va->gem.offset; |
1089 | u64 addr = va->va.addr; |
1090 | u64 range = va->va.range; |
1091 | u64 end = addr + range; |
1092 | bool merge = !!va->gem.obj; |
1093 | |
1094 | if (addr == req_addr) { |
1095 | merge &= obj == req_obj && |
1096 | offset == req_offset; |
1097 | |
1098 | if (end == req_end) { |
1099 | ret = op_unmap_cb(fn: ops, priv, va, merge); |
1100 | if (ret) |
1101 | return ret; |
1102 | break; |
1103 | } |
1104 | |
1105 | if (end < req_end) { |
1106 | ret = op_unmap_cb(fn: ops, priv, va, merge); |
1107 | if (ret) |
1108 | return ret; |
1109 | continue; |
1110 | } |
1111 | |
1112 | if (end > req_end) { |
1113 | struct drm_gpuva_op_map n = { |
1114 | .va.addr = req_end, |
1115 | .va.range = range - req_range, |
1116 | .gem.obj = obj, |
1117 | .gem.offset = offset + req_range, |
1118 | }; |
1119 | struct drm_gpuva_op_unmap u = { |
1120 | .va = va, |
1121 | .keep = merge, |
1122 | }; |
1123 | |
1124 | ret = op_remap_cb(fn: ops, priv, NULL, next: &n, unmap: &u); |
1125 | if (ret) |
1126 | return ret; |
1127 | break; |
1128 | } |
1129 | } else if (addr < req_addr) { |
1130 | u64 ls_range = req_addr - addr; |
1131 | struct drm_gpuva_op_map p = { |
1132 | .va.addr = addr, |
1133 | .va.range = ls_range, |
1134 | .gem.obj = obj, |
1135 | .gem.offset = offset, |
1136 | }; |
1137 | struct drm_gpuva_op_unmap u = { .va = va }; |
1138 | |
1139 | merge &= obj == req_obj && |
1140 | offset + ls_range == req_offset; |
1141 | u.keep = merge; |
1142 | |
1143 | if (end == req_end) { |
1144 | ret = op_remap_cb(fn: ops, priv, prev: &p, NULL, unmap: &u); |
1145 | if (ret) |
1146 | return ret; |
1147 | break; |
1148 | } |
1149 | |
1150 | if (end < req_end) { |
1151 | ret = op_remap_cb(fn: ops, priv, prev: &p, NULL, unmap: &u); |
1152 | if (ret) |
1153 | return ret; |
1154 | continue; |
1155 | } |
1156 | |
1157 | if (end > req_end) { |
1158 | struct drm_gpuva_op_map n = { |
1159 | .va.addr = req_end, |
1160 | .va.range = end - req_end, |
1161 | .gem.obj = obj, |
1162 | .gem.offset = offset + ls_range + |
1163 | req_range, |
1164 | }; |
1165 | |
1166 | ret = op_remap_cb(fn: ops, priv, prev: &p, next: &n, unmap: &u); |
1167 | if (ret) |
1168 | return ret; |
1169 | break; |
1170 | } |
1171 | } else if (addr > req_addr) { |
1172 | merge &= obj == req_obj && |
1173 | offset == req_offset + |
1174 | (addr - req_addr); |
1175 | |
1176 | if (end == req_end) { |
1177 | ret = op_unmap_cb(fn: ops, priv, va, merge); |
1178 | if (ret) |
1179 | return ret; |
1180 | break; |
1181 | } |
1182 | |
1183 | if (end < req_end) { |
1184 | ret = op_unmap_cb(fn: ops, priv, va, merge); |
1185 | if (ret) |
1186 | return ret; |
1187 | continue; |
1188 | } |
1189 | |
1190 | if (end > req_end) { |
1191 | struct drm_gpuva_op_map n = { |
1192 | .va.addr = req_end, |
1193 | .va.range = end - req_end, |
1194 | .gem.obj = obj, |
1195 | .gem.offset = offset + req_end - addr, |
1196 | }; |
1197 | struct drm_gpuva_op_unmap u = { |
1198 | .va = va, |
1199 | .keep = merge, |
1200 | }; |
1201 | |
1202 | ret = op_remap_cb(fn: ops, priv, NULL, next: &n, unmap: &u); |
1203 | if (ret) |
1204 | return ret; |
1205 | break; |
1206 | } |
1207 | } |
1208 | } |
1209 | |
1210 | return op_map_cb(fn: ops, priv, |
1211 | addr: req_addr, range: req_range, |
1212 | obj: req_obj, offset: req_offset); |
1213 | } |
1214 | |
1215 | static int |
1216 | __drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, |
1217 | const struct drm_gpuvm_ops *ops, void *priv, |
1218 | u64 req_addr, u64 req_range) |
1219 | { |
1220 | struct drm_gpuva *va, *next; |
1221 | u64 req_end = req_addr + req_range; |
1222 | int ret; |
1223 | |
1224 | if (unlikely(!drm_gpuvm_range_valid(gpuvm, req_addr, req_range))) |
1225 | return -EINVAL; |
1226 | |
1227 | drm_gpuvm_for_each_va_range_safe(va, next, gpuvm, req_addr, req_end) { |
1228 | struct drm_gpuva_op_map prev = {}, next = {}; |
1229 | bool prev_split = false, next_split = false; |
1230 | struct drm_gem_object *obj = va->gem.obj; |
1231 | u64 offset = va->gem.offset; |
1232 | u64 addr = va->va.addr; |
1233 | u64 range = va->va.range; |
1234 | u64 end = addr + range; |
1235 | |
1236 | if (addr < req_addr) { |
1237 | prev.va.addr = addr; |
1238 | prev.va.range = req_addr - addr; |
1239 | prev.gem.obj = obj; |
1240 | prev.gem.offset = offset; |
1241 | |
1242 | prev_split = true; |
1243 | } |
1244 | |
1245 | if (end > req_end) { |
1246 | next.va.addr = req_end; |
1247 | next.va.range = end - req_end; |
1248 | next.gem.obj = obj; |
1249 | next.gem.offset = offset + (req_end - addr); |
1250 | |
1251 | next_split = true; |
1252 | } |
1253 | |
1254 | if (prev_split || next_split) { |
1255 | struct drm_gpuva_op_unmap unmap = { .va = va }; |
1256 | |
1257 | ret = op_remap_cb(fn: ops, priv, |
1258 | prev: prev_split ? &prev : NULL, |
1259 | next: next_split ? &next : NULL, |
1260 | unmap: &unmap); |
1261 | if (ret) |
1262 | return ret; |
1263 | } else { |
1264 | ret = op_unmap_cb(fn: ops, priv, va, merge: false); |
1265 | if (ret) |
1266 | return ret; |
1267 | } |
1268 | } |
1269 | |
1270 | return 0; |
1271 | } |
1272 | |
1273 | /** |
1274 | * drm_gpuvm_sm_map() - creates the &drm_gpuva_op split/merge steps |
1275 | * @gpuvm: the &drm_gpuvm representing the GPU VA space |
1276 | * @req_addr: the start address of the new mapping |
1277 | * @req_range: the range of the new mapping |
1278 | * @req_obj: the &drm_gem_object to map |
1279 | * @req_offset: the offset within the &drm_gem_object |
1280 | * @priv: pointer to a driver private data structure |
1281 | * |
1282 | * This function iterates the given range of the GPU VA space. It utilizes the |
1283 | * &drm_gpuvm_ops to call back into the driver providing the split and merge |
1284 | * steps. |
1285 | * |
1286 | * Drivers may use these callbacks to update the GPU VA space right away within |
1287 | * the callback. In case the driver decides to copy and store the operations for |
1288 | * later processing neither this function nor &drm_gpuvm_sm_unmap is allowed to |
1289 | * be called before the &drm_gpuvm's view of the GPU VA space was |
1290 | * updated with the previous set of operations. To update the |
1291 | * &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(), |
1292 | * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be |
1293 | * used. |
1294 | * |
1295 | * A sequence of callbacks can contain map, unmap and remap operations, but |
1296 | * the sequence of callbacks might also be empty if no operation is required, |
1297 | * e.g. if the requested mapping already exists in the exact same way. |
1298 | * |
1299 | * There can be an arbitrary amount of unmap operations, a maximum of two remap |
1300 | * operations and a single map operation. The latter one represents the original |
1301 | * map operation requested by the caller. |
1302 | * |
1303 | * Returns: 0 on success or a negative error code |
1304 | */ |
1305 | int |
1306 | drm_gpuvm_sm_map(struct drm_gpuvm *gpuvm, void *priv, |
1307 | u64 req_addr, u64 req_range, |
1308 | struct drm_gem_object *req_obj, u64 req_offset) |
1309 | { |
1310 | const struct drm_gpuvm_ops *ops = gpuvm->ops; |
1311 | |
1312 | if (unlikely(!(ops && ops->sm_step_map && |
1313 | ops->sm_step_remap && |
1314 | ops->sm_step_unmap))) |
1315 | return -EINVAL; |
1316 | |
1317 | return __drm_gpuvm_sm_map(gpuvm, ops, priv, |
1318 | req_addr, req_range, |
1319 | req_obj, req_offset); |
1320 | } |
1321 | EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map); |
1322 | |
1323 | /** |
1324 | * drm_gpuvm_sm_unmap() - creates the &drm_gpuva_ops to split on unmap |
1325 | * @gpuvm: the &drm_gpuvm representing the GPU VA space |
1326 | * @priv: pointer to a driver private data structure |
1327 | * @req_addr: the start address of the range to unmap |
1328 | * @req_range: the range of the mappings to unmap |
1329 | * |
1330 | * This function iterates the given range of the GPU VA space. It utilizes the |
1331 | * &drm_gpuvm_ops to call back into the driver providing the operations to |
1332 | * unmap and, if required, split existent mappings. |
1333 | * |
1334 | * Drivers may use these callbacks to update the GPU VA space right away within |
1335 | * the callback. In case the driver decides to copy and store the operations for |
1336 | * later processing neither this function nor &drm_gpuvm_sm_map is allowed to be |
1337 | * called before the &drm_gpuvm's view of the GPU VA space was updated |
1338 | * with the previous set of operations. To update the &drm_gpuvm's view |
1339 | * of the GPU VA space drm_gpuva_insert(), drm_gpuva_destroy_locked() and/or |
1340 | * drm_gpuva_destroy_unlocked() should be used. |
1341 | * |
1342 | * A sequence of callbacks can contain unmap and remap operations, depending on |
1343 | * whether there are actual overlapping mappings to split. |
1344 | * |
1345 | * There can be an arbitrary amount of unmap operations and a maximum of two |
1346 | * remap operations. |
1347 | * |
1348 | * Returns: 0 on success or a negative error code |
1349 | */ |
1350 | int |
1351 | drm_gpuvm_sm_unmap(struct drm_gpuvm *gpuvm, void *priv, |
1352 | u64 req_addr, u64 req_range) |
1353 | { |
1354 | const struct drm_gpuvm_ops *ops = gpuvm->ops; |
1355 | |
1356 | if (unlikely(!(ops && ops->sm_step_remap && |
1357 | ops->sm_step_unmap))) |
1358 | return -EINVAL; |
1359 | |
1360 | return __drm_gpuvm_sm_unmap(gpuvm, ops, priv, |
1361 | req_addr, req_range); |
1362 | } |
1363 | EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap); |
1364 | |
1365 | static struct drm_gpuva_op * |
1366 | gpuva_op_alloc(struct drm_gpuvm *gpuvm) |
1367 | { |
1368 | const struct drm_gpuvm_ops *fn = gpuvm->ops; |
1369 | struct drm_gpuva_op *op; |
1370 | |
1371 | if (fn && fn->op_alloc) |
1372 | op = fn->op_alloc(); |
1373 | else |
1374 | op = kzalloc(size: sizeof(*op), GFP_KERNEL); |
1375 | |
1376 | if (unlikely(!op)) |
1377 | return NULL; |
1378 | |
1379 | return op; |
1380 | } |
1381 | |
1382 | static void |
1383 | gpuva_op_free(struct drm_gpuvm *gpuvm, |
1384 | struct drm_gpuva_op *op) |
1385 | { |
1386 | const struct drm_gpuvm_ops *fn = gpuvm->ops; |
1387 | |
1388 | if (fn && fn->op_free) |
1389 | fn->op_free(op); |
1390 | else |
1391 | kfree(objp: op); |
1392 | } |
1393 | |
1394 | static int |
1395 | drm_gpuva_sm_step(struct drm_gpuva_op *__op, |
1396 | void *priv) |
1397 | { |
1398 | struct { |
1399 | struct drm_gpuvm *vm; |
1400 | struct drm_gpuva_ops *ops; |
1401 | } *args = priv; |
1402 | struct drm_gpuvm *gpuvm = args->vm; |
1403 | struct drm_gpuva_ops *ops = args->ops; |
1404 | struct drm_gpuva_op *op; |
1405 | |
1406 | op = gpuva_op_alloc(gpuvm); |
1407 | if (unlikely(!op)) |
1408 | goto err; |
1409 | |
1410 | memcpy(op, __op, sizeof(*op)); |
1411 | |
1412 | if (op->op == DRM_GPUVA_OP_REMAP) { |
1413 | struct drm_gpuva_op_remap *__r = &__op->remap; |
1414 | struct drm_gpuva_op_remap *r = &op->remap; |
1415 | |
1416 | r->unmap = kmemdup(p: __r->unmap, size: sizeof(*r->unmap), |
1417 | GFP_KERNEL); |
1418 | if (unlikely(!r->unmap)) |
1419 | goto err_free_op; |
1420 | |
1421 | if (__r->prev) { |
1422 | r->prev = kmemdup(p: __r->prev, size: sizeof(*r->prev), |
1423 | GFP_KERNEL); |
1424 | if (unlikely(!r->prev)) |
1425 | goto err_free_unmap; |
1426 | } |
1427 | |
1428 | if (__r->next) { |
1429 | r->next = kmemdup(p: __r->next, size: sizeof(*r->next), |
1430 | GFP_KERNEL); |
1431 | if (unlikely(!r->next)) |
1432 | goto err_free_prev; |
1433 | } |
1434 | } |
1435 | |
1436 | list_add_tail(new: &op->entry, head: &ops->list); |
1437 | |
1438 | return 0; |
1439 | |
1440 | err_free_unmap: |
1441 | kfree(objp: op->remap.unmap); |
1442 | err_free_prev: |
1443 | kfree(objp: op->remap.prev); |
1444 | err_free_op: |
1445 | gpuva_op_free(gpuvm, op); |
1446 | err: |
1447 | return -ENOMEM; |
1448 | } |
1449 | |
1450 | static const struct drm_gpuvm_ops gpuvm_list_ops = { |
1451 | .sm_step_map = drm_gpuva_sm_step, |
1452 | .sm_step_remap = drm_gpuva_sm_step, |
1453 | .sm_step_unmap = drm_gpuva_sm_step, |
1454 | }; |
1455 | |
1456 | /** |
1457 | * drm_gpuvm_sm_map_ops_create() - creates the &drm_gpuva_ops to split and merge |
1458 | * @gpuvm: the &drm_gpuvm representing the GPU VA space |
1459 | * @req_addr: the start address of the new mapping |
1460 | * @req_range: the range of the new mapping |
1461 | * @req_obj: the &drm_gem_object to map |
1462 | * @req_offset: the offset within the &drm_gem_object |
1463 | * |
1464 | * This function creates a list of operations to perform splitting and merging |
1465 | * of existent mapping(s) with the newly requested one. |
1466 | * |
1467 | * The list can be iterated with &drm_gpuva_for_each_op and must be processed |
1468 | * in the given order. It can contain map, unmap and remap operations, but it |
1469 | * also can be empty if no operation is required, e.g. if the requested mapping |
1470 | * already exists is the exact same way. |
1471 | * |
1472 | * There can be an arbitrary amount of unmap operations, a maximum of two remap |
1473 | * operations and a single map operation. The latter one represents the original |
1474 | * map operation requested by the caller. |
1475 | * |
1476 | * Note that before calling this function again with another mapping request it |
1477 | * is necessary to update the &drm_gpuvm's view of the GPU VA space. The |
1478 | * previously obtained operations must be either processed or abandoned. To |
1479 | * update the &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(), |
1480 | * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be |
1481 | * used. |
1482 | * |
1483 | * After the caller finished processing the returned &drm_gpuva_ops, they must |
1484 | * be freed with &drm_gpuva_ops_free. |
1485 | * |
1486 | * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure |
1487 | */ |
1488 | struct drm_gpuva_ops * |
1489 | drm_gpuvm_sm_map_ops_create(struct drm_gpuvm *gpuvm, |
1490 | u64 req_addr, u64 req_range, |
1491 | struct drm_gem_object *req_obj, u64 req_offset) |
1492 | { |
1493 | struct drm_gpuva_ops *ops; |
1494 | struct { |
1495 | struct drm_gpuvm *vm; |
1496 | struct drm_gpuva_ops *ops; |
1497 | } args; |
1498 | int ret; |
1499 | |
1500 | ops = kzalloc(size: sizeof(*ops), GFP_KERNEL); |
1501 | if (unlikely(!ops)) |
1502 | return ERR_PTR(error: -ENOMEM); |
1503 | |
1504 | INIT_LIST_HEAD(list: &ops->list); |
1505 | |
1506 | args.vm = gpuvm; |
1507 | args.ops = ops; |
1508 | |
1509 | ret = __drm_gpuvm_sm_map(gpuvm, ops: &gpuvm_list_ops, priv: &args, |
1510 | req_addr, req_range, |
1511 | req_obj, req_offset); |
1512 | if (ret) |
1513 | goto err_free_ops; |
1514 | |
1515 | return ops; |
1516 | |
1517 | err_free_ops: |
1518 | drm_gpuva_ops_free(gpuvm, ops); |
1519 | return ERR_PTR(error: ret); |
1520 | } |
1521 | EXPORT_SYMBOL_GPL(drm_gpuvm_sm_map_ops_create); |
1522 | |
1523 | /** |
1524 | * drm_gpuvm_sm_unmap_ops_create() - creates the &drm_gpuva_ops to split on |
1525 | * unmap |
1526 | * @gpuvm: the &drm_gpuvm representing the GPU VA space |
1527 | * @req_addr: the start address of the range to unmap |
1528 | * @req_range: the range of the mappings to unmap |
1529 | * |
1530 | * This function creates a list of operations to perform unmapping and, if |
1531 | * required, splitting of the mappings overlapping the unmap range. |
1532 | * |
1533 | * The list can be iterated with &drm_gpuva_for_each_op and must be processed |
1534 | * in the given order. It can contain unmap and remap operations, depending on |
1535 | * whether there are actual overlapping mappings to split. |
1536 | * |
1537 | * There can be an arbitrary amount of unmap operations and a maximum of two |
1538 | * remap operations. |
1539 | * |
1540 | * Note that before calling this function again with another range to unmap it |
1541 | * is necessary to update the &drm_gpuvm's view of the GPU VA space. The |
1542 | * previously obtained operations must be processed or abandoned. To update the |
1543 | * &drm_gpuvm's view of the GPU VA space drm_gpuva_insert(), |
1544 | * drm_gpuva_destroy_locked() and/or drm_gpuva_destroy_unlocked() should be |
1545 | * used. |
1546 | * |
1547 | * After the caller finished processing the returned &drm_gpuva_ops, they must |
1548 | * be freed with &drm_gpuva_ops_free. |
1549 | * |
1550 | * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure |
1551 | */ |
1552 | struct drm_gpuva_ops * |
1553 | drm_gpuvm_sm_unmap_ops_create(struct drm_gpuvm *gpuvm, |
1554 | u64 req_addr, u64 req_range) |
1555 | { |
1556 | struct drm_gpuva_ops *ops; |
1557 | struct { |
1558 | struct drm_gpuvm *vm; |
1559 | struct drm_gpuva_ops *ops; |
1560 | } args; |
1561 | int ret; |
1562 | |
1563 | ops = kzalloc(size: sizeof(*ops), GFP_KERNEL); |
1564 | if (unlikely(!ops)) |
1565 | return ERR_PTR(error: -ENOMEM); |
1566 | |
1567 | INIT_LIST_HEAD(list: &ops->list); |
1568 | |
1569 | args.vm = gpuvm; |
1570 | args.ops = ops; |
1571 | |
1572 | ret = __drm_gpuvm_sm_unmap(gpuvm, ops: &gpuvm_list_ops, priv: &args, |
1573 | req_addr, req_range); |
1574 | if (ret) |
1575 | goto err_free_ops; |
1576 | |
1577 | return ops; |
1578 | |
1579 | err_free_ops: |
1580 | drm_gpuva_ops_free(gpuvm, ops); |
1581 | return ERR_PTR(error: ret); |
1582 | } |
1583 | EXPORT_SYMBOL_GPL(drm_gpuvm_sm_unmap_ops_create); |
1584 | |
1585 | /** |
1586 | * drm_gpuvm_prefetch_ops_create() - creates the &drm_gpuva_ops to prefetch |
1587 | * @gpuvm: the &drm_gpuvm representing the GPU VA space |
1588 | * @addr: the start address of the range to prefetch |
1589 | * @range: the range of the mappings to prefetch |
1590 | * |
1591 | * This function creates a list of operations to perform prefetching. |
1592 | * |
1593 | * The list can be iterated with &drm_gpuva_for_each_op and must be processed |
1594 | * in the given order. It can contain prefetch operations. |
1595 | * |
1596 | * There can be an arbitrary amount of prefetch operations. |
1597 | * |
1598 | * After the caller finished processing the returned &drm_gpuva_ops, they must |
1599 | * be freed with &drm_gpuva_ops_free. |
1600 | * |
1601 | * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure |
1602 | */ |
1603 | struct drm_gpuva_ops * |
1604 | drm_gpuvm_prefetch_ops_create(struct drm_gpuvm *gpuvm, |
1605 | u64 addr, u64 range) |
1606 | { |
1607 | struct drm_gpuva_ops *ops; |
1608 | struct drm_gpuva_op *op; |
1609 | struct drm_gpuva *va; |
1610 | u64 end = addr + range; |
1611 | int ret; |
1612 | |
1613 | ops = kzalloc(size: sizeof(*ops), GFP_KERNEL); |
1614 | if (!ops) |
1615 | return ERR_PTR(error: -ENOMEM); |
1616 | |
1617 | INIT_LIST_HEAD(list: &ops->list); |
1618 | |
1619 | drm_gpuvm_for_each_va_range(va, gpuvm, addr, end) { |
1620 | op = gpuva_op_alloc(gpuvm); |
1621 | if (!op) { |
1622 | ret = -ENOMEM; |
1623 | goto err_free_ops; |
1624 | } |
1625 | |
1626 | op->op = DRM_GPUVA_OP_PREFETCH; |
1627 | op->prefetch.va = va; |
1628 | list_add_tail(new: &op->entry, head: &ops->list); |
1629 | } |
1630 | |
1631 | return ops; |
1632 | |
1633 | err_free_ops: |
1634 | drm_gpuva_ops_free(gpuvm, ops); |
1635 | return ERR_PTR(error: ret); |
1636 | } |
1637 | EXPORT_SYMBOL_GPL(drm_gpuvm_prefetch_ops_create); |
1638 | |
1639 | /** |
1640 | * drm_gpuvm_gem_unmap_ops_create() - creates the &drm_gpuva_ops to unmap a GEM |
1641 | * @gpuvm: the &drm_gpuvm representing the GPU VA space |
1642 | * @obj: the &drm_gem_object to unmap |
1643 | * |
1644 | * This function creates a list of operations to perform unmapping for every |
1645 | * GPUVA attached to a GEM. |
1646 | * |
1647 | * The list can be iterated with &drm_gpuva_for_each_op and consists out of an |
1648 | * arbitrary amount of unmap operations. |
1649 | * |
1650 | * After the caller finished processing the returned &drm_gpuva_ops, they must |
1651 | * be freed with &drm_gpuva_ops_free. |
1652 | * |
1653 | * It is the callers responsibility to protect the GEMs GPUVA list against |
1654 | * concurrent access using the GEMs dma_resv lock. |
1655 | * |
1656 | * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure |
1657 | */ |
1658 | struct drm_gpuva_ops * |
1659 | drm_gpuvm_gem_unmap_ops_create(struct drm_gpuvm *gpuvm, |
1660 | struct drm_gem_object *obj) |
1661 | { |
1662 | struct drm_gpuva_ops *ops; |
1663 | struct drm_gpuva_op *op; |
1664 | struct drm_gpuva *va; |
1665 | int ret; |
1666 | |
1667 | drm_gem_gpuva_assert_lock_held(obj); |
1668 | |
1669 | ops = kzalloc(size: sizeof(*ops), GFP_KERNEL); |
1670 | if (!ops) |
1671 | return ERR_PTR(error: -ENOMEM); |
1672 | |
1673 | INIT_LIST_HEAD(list: &ops->list); |
1674 | |
1675 | drm_gem_for_each_gpuva(va, obj) { |
1676 | op = gpuva_op_alloc(gpuvm); |
1677 | if (!op) { |
1678 | ret = -ENOMEM; |
1679 | goto err_free_ops; |
1680 | } |
1681 | |
1682 | op->op = DRM_GPUVA_OP_UNMAP; |
1683 | op->unmap.va = va; |
1684 | list_add_tail(new: &op->entry, head: &ops->list); |
1685 | } |
1686 | |
1687 | return ops; |
1688 | |
1689 | err_free_ops: |
1690 | drm_gpuva_ops_free(gpuvm, ops); |
1691 | return ERR_PTR(error: ret); |
1692 | } |
1693 | EXPORT_SYMBOL_GPL(drm_gpuvm_gem_unmap_ops_create); |
1694 | |
1695 | /** |
1696 | * drm_gpuva_ops_free() - free the given &drm_gpuva_ops |
1697 | * @gpuvm: the &drm_gpuvm the ops were created for |
1698 | * @ops: the &drm_gpuva_ops to free |
1699 | * |
1700 | * Frees the given &drm_gpuva_ops structure including all the ops associated |
1701 | * with it. |
1702 | */ |
1703 | void |
1704 | drm_gpuva_ops_free(struct drm_gpuvm *gpuvm, |
1705 | struct drm_gpuva_ops *ops) |
1706 | { |
1707 | struct drm_gpuva_op *op, *next; |
1708 | |
1709 | drm_gpuva_for_each_op_safe(op, next, ops) { |
1710 | list_del(entry: &op->entry); |
1711 | |
1712 | if (op->op == DRM_GPUVA_OP_REMAP) { |
1713 | kfree(objp: op->remap.prev); |
1714 | kfree(objp: op->remap.next); |
1715 | kfree(objp: op->remap.unmap); |
1716 | } |
1717 | |
1718 | gpuva_op_free(gpuvm, op); |
1719 | } |
1720 | |
1721 | kfree(objp: ops); |
1722 | } |
1723 | EXPORT_SYMBOL_GPL(drm_gpuva_ops_free); |
1724 | |
1725 | MODULE_DESCRIPTION("DRM GPUVM" ); |
1726 | MODULE_LICENSE("GPL" ); |
1727 | |