1 | // SPDX-License-Identifier: MIT |
2 | |
3 | /* |
4 | * Locking: |
5 | * |
6 | * The uvmm mutex protects any operations on the GPU VA space provided by the |
7 | * DRM GPU VA manager. |
8 | * |
9 | * The GEMs dma_resv lock protects the GEMs GPUVA list, hence link/unlink of a |
10 | * mapping to it's backing GEM must be performed under this lock. |
11 | * |
12 | * Actual map/unmap operations within the fence signalling critical path are |
13 | * protected by installing DMA fences to the corresponding GEMs DMA |
14 | * reservations, such that concurrent BO moves, which itself walk the GEMs GPUVA |
15 | * list in order to map/unmap it's entries, can't occur concurrently. |
16 | * |
17 | * Accessing the DRM_GPUVA_INVALIDATED flag doesn't need any separate |
18 | * protection, since there are no accesses other than from BO move callbacks |
19 | * and from the fence signalling critical path, which are already protected by |
20 | * the corresponding GEMs DMA reservation fence. |
21 | */ |
22 | |
23 | #include "nouveau_drv.h" |
24 | #include "nouveau_gem.h" |
25 | #include "nouveau_mem.h" |
26 | #include "nouveau_uvmm.h" |
27 | |
28 | #include <nvif/vmm.h> |
29 | #include <nvif/mem.h> |
30 | |
31 | #include <nvif/class.h> |
32 | #include <nvif/if000c.h> |
33 | #include <nvif/if900d.h> |
34 | |
35 | #define NOUVEAU_VA_SPACE_BITS 47 /* FIXME */ |
36 | #define NOUVEAU_VA_SPACE_START 0x0 |
37 | #define NOUVEAU_VA_SPACE_END (1ULL << NOUVEAU_VA_SPACE_BITS) |
38 | |
39 | #define list_last_op(_ops) list_last_entry(_ops, struct bind_job_op, entry) |
40 | #define list_prev_op(_op) list_prev_entry(_op, entry) |
41 | #define list_for_each_op(_op, _ops) list_for_each_entry(_op, _ops, entry) |
42 | #define list_for_each_op_from_reverse(_op, _ops) \ |
43 | list_for_each_entry_from_reverse(_op, _ops, entry) |
44 | #define list_for_each_op_safe(_op, _n, _ops) list_for_each_entry_safe(_op, _n, _ops, entry) |
45 | |
46 | enum vm_bind_op { |
47 | OP_MAP = DRM_NOUVEAU_VM_BIND_OP_MAP, |
48 | OP_UNMAP = DRM_NOUVEAU_VM_BIND_OP_UNMAP, |
49 | OP_MAP_SPARSE, |
50 | OP_UNMAP_SPARSE, |
51 | }; |
52 | |
53 | struct nouveau_uvma_prealloc { |
54 | struct nouveau_uvma *map; |
55 | struct nouveau_uvma *prev; |
56 | struct nouveau_uvma *next; |
57 | }; |
58 | |
59 | struct bind_job_op { |
60 | struct list_head entry; |
61 | |
62 | enum vm_bind_op op; |
63 | u32 flags; |
64 | |
65 | struct { |
66 | u64 addr; |
67 | u64 range; |
68 | } va; |
69 | |
70 | struct { |
71 | u32 handle; |
72 | u64 offset; |
73 | struct drm_gem_object *obj; |
74 | } gem; |
75 | |
76 | struct nouveau_uvma_region *reg; |
77 | struct nouveau_uvma_prealloc new; |
78 | struct drm_gpuva_ops *ops; |
79 | }; |
80 | |
81 | struct uvmm_map_args { |
82 | struct nouveau_uvma_region *region; |
83 | u64 addr; |
84 | u64 range; |
85 | u8 kind; |
86 | }; |
87 | |
88 | static int |
89 | nouveau_uvmm_vmm_sparse_ref(struct nouveau_uvmm *uvmm, |
90 | u64 addr, u64 range) |
91 | { |
92 | struct nvif_vmm *vmm = &uvmm->vmm.vmm; |
93 | |
94 | return nvif_vmm_raw_sparse(vmm, addr, range, true); |
95 | } |
96 | |
97 | static int |
98 | nouveau_uvmm_vmm_sparse_unref(struct nouveau_uvmm *uvmm, |
99 | u64 addr, u64 range) |
100 | { |
101 | struct nvif_vmm *vmm = &uvmm->vmm.vmm; |
102 | |
103 | return nvif_vmm_raw_sparse(vmm, addr, range, false); |
104 | } |
105 | |
106 | static int |
107 | nouveau_uvmm_vmm_get(struct nouveau_uvmm *uvmm, |
108 | u64 addr, u64 range) |
109 | { |
110 | struct nvif_vmm *vmm = &uvmm->vmm.vmm; |
111 | |
112 | return nvif_vmm_raw_get(vmm, addr, range, PAGE_SHIFT); |
113 | } |
114 | |
115 | static int |
116 | nouveau_uvmm_vmm_put(struct nouveau_uvmm *uvmm, |
117 | u64 addr, u64 range) |
118 | { |
119 | struct nvif_vmm *vmm = &uvmm->vmm.vmm; |
120 | |
121 | return nvif_vmm_raw_put(vmm, addr, range, PAGE_SHIFT); |
122 | } |
123 | |
124 | static int |
125 | nouveau_uvmm_vmm_unmap(struct nouveau_uvmm *uvmm, |
126 | u64 addr, u64 range, bool sparse) |
127 | { |
128 | struct nvif_vmm *vmm = &uvmm->vmm.vmm; |
129 | |
130 | return nvif_vmm_raw_unmap(vmm, addr, range, PAGE_SHIFT, sparse); |
131 | } |
132 | |
133 | static int |
134 | nouveau_uvmm_vmm_map(struct nouveau_uvmm *uvmm, |
135 | u64 addr, u64 range, |
136 | u64 bo_offset, u8 kind, |
137 | struct nouveau_mem *mem) |
138 | { |
139 | struct nvif_vmm *vmm = &uvmm->vmm.vmm; |
140 | union { |
141 | struct gf100_vmm_map_v0 gf100; |
142 | } args; |
143 | u32 argc = 0; |
144 | |
145 | switch (vmm->object.oclass) { |
146 | case NVIF_CLASS_VMM_GF100: |
147 | case NVIF_CLASS_VMM_GM200: |
148 | case NVIF_CLASS_VMM_GP100: |
149 | args.gf100.version = 0; |
150 | if (mem->mem.type & NVIF_MEM_VRAM) |
151 | args.gf100.vol = 0; |
152 | else |
153 | args.gf100.vol = 1; |
154 | args.gf100.ro = 0; |
155 | args.gf100.priv = 0; |
156 | args.gf100.kind = kind; |
157 | argc = sizeof(args.gf100); |
158 | break; |
159 | default: |
160 | WARN_ON(1); |
161 | return -ENOSYS; |
162 | } |
163 | |
164 | return nvif_vmm_raw_map(vmm, addr, range, PAGE_SHIFT, |
165 | &args, argc, |
166 | &mem->mem, bo_offset); |
167 | } |
168 | |
169 | static int |
170 | nouveau_uvma_region_sparse_unref(struct nouveau_uvma_region *reg) |
171 | { |
172 | u64 addr = reg->va.addr; |
173 | u64 range = reg->va.range; |
174 | |
175 | return nouveau_uvmm_vmm_sparse_unref(uvmm: reg->uvmm, addr, range); |
176 | } |
177 | |
178 | static int |
179 | nouveau_uvma_vmm_put(struct nouveau_uvma *uvma) |
180 | { |
181 | u64 addr = uvma->va.va.addr; |
182 | u64 range = uvma->va.va.range; |
183 | |
184 | return nouveau_uvmm_vmm_put(to_uvmm(uvma), addr, range); |
185 | } |
186 | |
187 | static int |
188 | nouveau_uvma_map(struct nouveau_uvma *uvma, |
189 | struct nouveau_mem *mem) |
190 | { |
191 | u64 addr = uvma->va.va.addr; |
192 | u64 offset = uvma->va.gem.offset; |
193 | u64 range = uvma->va.va.range; |
194 | |
195 | return nouveau_uvmm_vmm_map(to_uvmm(uvma), addr, range, |
196 | bo_offset: offset, kind: uvma->kind, mem); |
197 | } |
198 | |
199 | static int |
200 | nouveau_uvma_unmap(struct nouveau_uvma *uvma) |
201 | { |
202 | u64 addr = uvma->va.va.addr; |
203 | u64 range = uvma->va.va.range; |
204 | bool sparse = !!uvma->region; |
205 | |
206 | if (drm_gpuva_invalidated(va: &uvma->va)) |
207 | return 0; |
208 | |
209 | return nouveau_uvmm_vmm_unmap(to_uvmm(uvma), addr, range, sparse); |
210 | } |
211 | |
212 | static int |
213 | nouveau_uvma_alloc(struct nouveau_uvma **puvma) |
214 | { |
215 | *puvma = kzalloc(size: sizeof(**puvma), GFP_KERNEL); |
216 | if (!*puvma) |
217 | return -ENOMEM; |
218 | |
219 | return 0; |
220 | } |
221 | |
222 | static void |
223 | nouveau_uvma_free(struct nouveau_uvma *uvma) |
224 | { |
225 | kfree(objp: uvma); |
226 | } |
227 | |
228 | static void |
229 | nouveau_uvma_gem_get(struct nouveau_uvma *uvma) |
230 | { |
231 | drm_gem_object_get(obj: uvma->va.gem.obj); |
232 | } |
233 | |
234 | static void |
235 | nouveau_uvma_gem_put(struct nouveau_uvma *uvma) |
236 | { |
237 | drm_gem_object_put(obj: uvma->va.gem.obj); |
238 | } |
239 | |
240 | static int |
241 | nouveau_uvma_region_alloc(struct nouveau_uvma_region **preg) |
242 | { |
243 | *preg = kzalloc(size: sizeof(**preg), GFP_KERNEL); |
244 | if (!*preg) |
245 | return -ENOMEM; |
246 | |
247 | kref_init(kref: &(*preg)->kref); |
248 | |
249 | return 0; |
250 | } |
251 | |
252 | static void |
253 | nouveau_uvma_region_free(struct kref *kref) |
254 | { |
255 | struct nouveau_uvma_region *reg = |
256 | container_of(kref, struct nouveau_uvma_region, kref); |
257 | |
258 | kfree(objp: reg); |
259 | } |
260 | |
261 | static void |
262 | nouveau_uvma_region_get(struct nouveau_uvma_region *reg) |
263 | { |
264 | kref_get(kref: ®->kref); |
265 | } |
266 | |
267 | static void |
268 | nouveau_uvma_region_put(struct nouveau_uvma_region *reg) |
269 | { |
270 | kref_put(kref: ®->kref, release: nouveau_uvma_region_free); |
271 | } |
272 | |
273 | static int |
274 | __nouveau_uvma_region_insert(struct nouveau_uvmm *uvmm, |
275 | struct nouveau_uvma_region *reg) |
276 | { |
277 | u64 addr = reg->va.addr; |
278 | u64 range = reg->va.range; |
279 | u64 last = addr + range - 1; |
280 | MA_STATE(mas, &uvmm->region_mt, addr, addr); |
281 | |
282 | if (unlikely(mas_walk(&mas))) |
283 | return -EEXIST; |
284 | |
285 | if (unlikely(mas.last < last)) |
286 | return -EEXIST; |
287 | |
288 | mas.index = addr; |
289 | mas.last = last; |
290 | |
291 | mas_store_gfp(mas: &mas, entry: reg, GFP_KERNEL); |
292 | |
293 | reg->uvmm = uvmm; |
294 | |
295 | return 0; |
296 | } |
297 | |
298 | static int |
299 | nouveau_uvma_region_insert(struct nouveau_uvmm *uvmm, |
300 | struct nouveau_uvma_region *reg, |
301 | u64 addr, u64 range) |
302 | { |
303 | int ret; |
304 | |
305 | reg->uvmm = uvmm; |
306 | reg->va.addr = addr; |
307 | reg->va.range = range; |
308 | |
309 | ret = __nouveau_uvma_region_insert(uvmm, reg); |
310 | if (ret) |
311 | return ret; |
312 | |
313 | return 0; |
314 | } |
315 | |
316 | static void |
317 | nouveau_uvma_region_remove(struct nouveau_uvma_region *reg) |
318 | { |
319 | struct nouveau_uvmm *uvmm = reg->uvmm; |
320 | MA_STATE(mas, &uvmm->region_mt, reg->va.addr, 0); |
321 | |
322 | mas_erase(mas: &mas); |
323 | } |
324 | |
325 | static int |
326 | nouveau_uvma_region_create(struct nouveau_uvmm *uvmm, |
327 | u64 addr, u64 range) |
328 | { |
329 | struct nouveau_uvma_region *reg; |
330 | int ret; |
331 | |
332 | if (!drm_gpuvm_interval_empty(gpuvm: &uvmm->base, addr, range)) |
333 | return -ENOSPC; |
334 | |
335 | ret = nouveau_uvma_region_alloc(preg: ®); |
336 | if (ret) |
337 | return ret; |
338 | |
339 | ret = nouveau_uvma_region_insert(uvmm, reg, addr, range); |
340 | if (ret) |
341 | goto err_free_region; |
342 | |
343 | ret = nouveau_uvmm_vmm_sparse_ref(uvmm, addr, range); |
344 | if (ret) |
345 | goto err_region_remove; |
346 | |
347 | return 0; |
348 | |
349 | err_region_remove: |
350 | nouveau_uvma_region_remove(reg); |
351 | err_free_region: |
352 | nouveau_uvma_region_put(reg); |
353 | return ret; |
354 | } |
355 | |
356 | static struct nouveau_uvma_region * |
357 | nouveau_uvma_region_find_first(struct nouveau_uvmm *uvmm, |
358 | u64 addr, u64 range) |
359 | { |
360 | MA_STATE(mas, &uvmm->region_mt, addr, 0); |
361 | |
362 | return mas_find(mas: &mas, max: addr + range - 1); |
363 | } |
364 | |
365 | static struct nouveau_uvma_region * |
366 | nouveau_uvma_region_find(struct nouveau_uvmm *uvmm, |
367 | u64 addr, u64 range) |
368 | { |
369 | struct nouveau_uvma_region *reg; |
370 | |
371 | reg = nouveau_uvma_region_find_first(uvmm, addr, range); |
372 | if (!reg) |
373 | return NULL; |
374 | |
375 | if (reg->va.addr != addr || |
376 | reg->va.range != range) |
377 | return NULL; |
378 | |
379 | return reg; |
380 | } |
381 | |
382 | static bool |
383 | nouveau_uvma_region_empty(struct nouveau_uvma_region *reg) |
384 | { |
385 | struct nouveau_uvmm *uvmm = reg->uvmm; |
386 | |
387 | return drm_gpuvm_interval_empty(gpuvm: &uvmm->base, |
388 | addr: reg->va.addr, |
389 | range: reg->va.range); |
390 | } |
391 | |
392 | static int |
393 | __nouveau_uvma_region_destroy(struct nouveau_uvma_region *reg) |
394 | { |
395 | struct nouveau_uvmm *uvmm = reg->uvmm; |
396 | u64 addr = reg->va.addr; |
397 | u64 range = reg->va.range; |
398 | |
399 | if (!nouveau_uvma_region_empty(reg)) |
400 | return -EBUSY; |
401 | |
402 | nouveau_uvma_region_remove(reg); |
403 | nouveau_uvmm_vmm_sparse_unref(uvmm, addr, range); |
404 | nouveau_uvma_region_put(reg); |
405 | |
406 | return 0; |
407 | } |
408 | |
409 | static int |
410 | nouveau_uvma_region_destroy(struct nouveau_uvmm *uvmm, |
411 | u64 addr, u64 range) |
412 | { |
413 | struct nouveau_uvma_region *reg; |
414 | |
415 | reg = nouveau_uvma_region_find(uvmm, addr, range); |
416 | if (!reg) |
417 | return -ENOENT; |
418 | |
419 | return __nouveau_uvma_region_destroy(reg); |
420 | } |
421 | |
422 | static void |
423 | nouveau_uvma_region_dirty(struct nouveau_uvma_region *reg) |
424 | { |
425 | |
426 | init_completion(x: ®->complete); |
427 | reg->dirty = true; |
428 | } |
429 | |
430 | static void |
431 | nouveau_uvma_region_complete(struct nouveau_uvma_region *reg) |
432 | { |
433 | complete_all(®->complete); |
434 | } |
435 | |
436 | static void |
437 | op_map_prepare_unwind(struct nouveau_uvma *uvma) |
438 | { |
439 | nouveau_uvma_gem_put(uvma); |
440 | drm_gpuva_remove(va: &uvma->va); |
441 | nouveau_uvma_free(uvma); |
442 | } |
443 | |
444 | static void |
445 | op_unmap_prepare_unwind(struct drm_gpuva *va) |
446 | { |
447 | drm_gpuva_insert(gpuvm: va->vm, va); |
448 | } |
449 | |
450 | static void |
451 | nouveau_uvmm_sm_prepare_unwind(struct nouveau_uvmm *uvmm, |
452 | struct nouveau_uvma_prealloc *new, |
453 | struct drm_gpuva_ops *ops, |
454 | struct drm_gpuva_op *last, |
455 | struct uvmm_map_args *args) |
456 | { |
457 | struct drm_gpuva_op *op = last; |
458 | u64 vmm_get_start = args ? args->addr : 0; |
459 | u64 vmm_get_end = args ? args->addr + args->range : 0; |
460 | |
461 | /* Unwind GPUVA space. */ |
462 | drm_gpuva_for_each_op_from_reverse(op, ops) { |
463 | switch (op->op) { |
464 | case DRM_GPUVA_OP_MAP: |
465 | op_map_prepare_unwind(uvma: new->map); |
466 | break; |
467 | case DRM_GPUVA_OP_REMAP: { |
468 | struct drm_gpuva_op_remap *r = &op->remap; |
469 | |
470 | if (r->next) |
471 | op_map_prepare_unwind(uvma: new->next); |
472 | |
473 | if (r->prev) |
474 | op_map_prepare_unwind(uvma: new->prev); |
475 | |
476 | op_unmap_prepare_unwind(va: r->unmap->va); |
477 | break; |
478 | } |
479 | case DRM_GPUVA_OP_UNMAP: |
480 | op_unmap_prepare_unwind(va: op->unmap.va); |
481 | break; |
482 | default: |
483 | break; |
484 | } |
485 | } |
486 | |
487 | /* Unmap operation don't allocate page tables, hence skip the following |
488 | * page table unwind. |
489 | */ |
490 | if (!args) |
491 | return; |
492 | |
493 | drm_gpuva_for_each_op(op, ops) { |
494 | switch (op->op) { |
495 | case DRM_GPUVA_OP_MAP: { |
496 | u64 vmm_get_range = vmm_get_end - vmm_get_start; |
497 | |
498 | if (vmm_get_range) |
499 | nouveau_uvmm_vmm_put(uvmm, addr: vmm_get_start, |
500 | range: vmm_get_range); |
501 | break; |
502 | } |
503 | case DRM_GPUVA_OP_REMAP: { |
504 | struct drm_gpuva_op_remap *r = &op->remap; |
505 | struct drm_gpuva *va = r->unmap->va; |
506 | u64 ustart = va->va.addr; |
507 | u64 urange = va->va.range; |
508 | u64 uend = ustart + urange; |
509 | |
510 | if (r->prev) |
511 | vmm_get_start = uend; |
512 | |
513 | if (r->next) |
514 | vmm_get_end = ustart; |
515 | |
516 | if (r->prev && r->next) |
517 | vmm_get_start = vmm_get_end = 0; |
518 | |
519 | break; |
520 | } |
521 | case DRM_GPUVA_OP_UNMAP: { |
522 | struct drm_gpuva_op_unmap *u = &op->unmap; |
523 | struct drm_gpuva *va = u->va; |
524 | u64 ustart = va->va.addr; |
525 | u64 urange = va->va.range; |
526 | u64 uend = ustart + urange; |
527 | |
528 | /* Nothing to do for mappings we merge with. */ |
529 | if (uend == vmm_get_start || |
530 | ustart == vmm_get_end) |
531 | break; |
532 | |
533 | if (ustart > vmm_get_start) { |
534 | u64 vmm_get_range = ustart - vmm_get_start; |
535 | |
536 | nouveau_uvmm_vmm_put(uvmm, addr: vmm_get_start, |
537 | range: vmm_get_range); |
538 | } |
539 | vmm_get_start = uend; |
540 | break; |
541 | } |
542 | default: |
543 | break; |
544 | } |
545 | |
546 | if (op == last) |
547 | break; |
548 | } |
549 | } |
550 | |
551 | static void |
552 | nouveau_uvmm_sm_map_prepare_unwind(struct nouveau_uvmm *uvmm, |
553 | struct nouveau_uvma_prealloc *new, |
554 | struct drm_gpuva_ops *ops, |
555 | u64 addr, u64 range) |
556 | { |
557 | struct drm_gpuva_op *last = drm_gpuva_last_op(ops); |
558 | struct uvmm_map_args args = { |
559 | .addr = addr, |
560 | .range = range, |
561 | }; |
562 | |
563 | nouveau_uvmm_sm_prepare_unwind(uvmm, new, ops, last, args: &args); |
564 | } |
565 | |
566 | static void |
567 | nouveau_uvmm_sm_unmap_prepare_unwind(struct nouveau_uvmm *uvmm, |
568 | struct nouveau_uvma_prealloc *new, |
569 | struct drm_gpuva_ops *ops) |
570 | { |
571 | struct drm_gpuva_op *last = drm_gpuva_last_op(ops); |
572 | |
573 | nouveau_uvmm_sm_prepare_unwind(uvmm, new, ops, last, NULL); |
574 | } |
575 | |
576 | static int |
577 | op_map_prepare(struct nouveau_uvmm *uvmm, |
578 | struct nouveau_uvma **puvma, |
579 | struct drm_gpuva_op_map *op, |
580 | struct uvmm_map_args *args) |
581 | { |
582 | struct nouveau_uvma *uvma; |
583 | int ret; |
584 | |
585 | ret = nouveau_uvma_alloc(puvma: &uvma); |
586 | if (ret) |
587 | return ret; |
588 | |
589 | uvma->region = args->region; |
590 | uvma->kind = args->kind; |
591 | |
592 | drm_gpuva_map(gpuvm: &uvmm->base, va: &uvma->va, op); |
593 | |
594 | /* Keep a reference until this uvma is destroyed. */ |
595 | nouveau_uvma_gem_get(uvma); |
596 | |
597 | *puvma = uvma; |
598 | return 0; |
599 | } |
600 | |
601 | static void |
602 | op_unmap_prepare(struct drm_gpuva_op_unmap *u) |
603 | { |
604 | drm_gpuva_unmap(op: u); |
605 | } |
606 | |
607 | static int |
608 | nouveau_uvmm_sm_prepare(struct nouveau_uvmm *uvmm, |
609 | struct nouveau_uvma_prealloc *new, |
610 | struct drm_gpuva_ops *ops, |
611 | struct uvmm_map_args *args) |
612 | { |
613 | struct drm_gpuva_op *op; |
614 | u64 vmm_get_start = args ? args->addr : 0; |
615 | u64 vmm_get_end = args ? args->addr + args->range : 0; |
616 | int ret; |
617 | |
618 | drm_gpuva_for_each_op(op, ops) { |
619 | switch (op->op) { |
620 | case DRM_GPUVA_OP_MAP: { |
621 | u64 vmm_get_range = vmm_get_end - vmm_get_start; |
622 | |
623 | ret = op_map_prepare(uvmm, puvma: &new->map, op: &op->map, args); |
624 | if (ret) |
625 | goto unwind; |
626 | |
627 | if (args && vmm_get_range) { |
628 | ret = nouveau_uvmm_vmm_get(uvmm, addr: vmm_get_start, |
629 | range: vmm_get_range); |
630 | if (ret) { |
631 | op_map_prepare_unwind(uvma: new->map); |
632 | goto unwind; |
633 | } |
634 | } |
635 | break; |
636 | } |
637 | case DRM_GPUVA_OP_REMAP: { |
638 | struct drm_gpuva_op_remap *r = &op->remap; |
639 | struct drm_gpuva *va = r->unmap->va; |
640 | struct uvmm_map_args remap_args = { |
641 | .kind = uvma_from_va(va)->kind, |
642 | .region = uvma_from_va(va)->region, |
643 | }; |
644 | u64 ustart = va->va.addr; |
645 | u64 urange = va->va.range; |
646 | u64 uend = ustart + urange; |
647 | |
648 | op_unmap_prepare(u: r->unmap); |
649 | |
650 | if (r->prev) { |
651 | ret = op_map_prepare(uvmm, puvma: &new->prev, op: r->prev, |
652 | args: &remap_args); |
653 | if (ret) |
654 | goto unwind; |
655 | |
656 | if (args) |
657 | vmm_get_start = uend; |
658 | } |
659 | |
660 | if (r->next) { |
661 | ret = op_map_prepare(uvmm, puvma: &new->next, op: r->next, |
662 | args: &remap_args); |
663 | if (ret) { |
664 | if (r->prev) |
665 | op_map_prepare_unwind(uvma: new->prev); |
666 | goto unwind; |
667 | } |
668 | |
669 | if (args) |
670 | vmm_get_end = ustart; |
671 | } |
672 | |
673 | if (args && (r->prev && r->next)) |
674 | vmm_get_start = vmm_get_end = 0; |
675 | |
676 | break; |
677 | } |
678 | case DRM_GPUVA_OP_UNMAP: { |
679 | struct drm_gpuva_op_unmap *u = &op->unmap; |
680 | struct drm_gpuva *va = u->va; |
681 | u64 ustart = va->va.addr; |
682 | u64 urange = va->va.range; |
683 | u64 uend = ustart + urange; |
684 | |
685 | op_unmap_prepare(u); |
686 | |
687 | if (!args) |
688 | break; |
689 | |
690 | /* Nothing to do for mappings we merge with. */ |
691 | if (uend == vmm_get_start || |
692 | ustart == vmm_get_end) |
693 | break; |
694 | |
695 | if (ustart > vmm_get_start) { |
696 | u64 vmm_get_range = ustart - vmm_get_start; |
697 | |
698 | ret = nouveau_uvmm_vmm_get(uvmm, addr: vmm_get_start, |
699 | range: vmm_get_range); |
700 | if (ret) { |
701 | op_unmap_prepare_unwind(va); |
702 | goto unwind; |
703 | } |
704 | } |
705 | vmm_get_start = uend; |
706 | |
707 | break; |
708 | } |
709 | default: |
710 | ret = -EINVAL; |
711 | goto unwind; |
712 | } |
713 | } |
714 | |
715 | return 0; |
716 | |
717 | unwind: |
718 | if (op != drm_gpuva_first_op(ops)) |
719 | nouveau_uvmm_sm_prepare_unwind(uvmm, new, ops, |
720 | drm_gpuva_prev_op(op), |
721 | args); |
722 | return ret; |
723 | } |
724 | |
725 | static int |
726 | nouveau_uvmm_sm_map_prepare(struct nouveau_uvmm *uvmm, |
727 | struct nouveau_uvma_prealloc *new, |
728 | struct nouveau_uvma_region *region, |
729 | struct drm_gpuva_ops *ops, |
730 | u64 addr, u64 range, u8 kind) |
731 | { |
732 | struct uvmm_map_args args = { |
733 | .region = region, |
734 | .addr = addr, |
735 | .range = range, |
736 | .kind = kind, |
737 | }; |
738 | |
739 | return nouveau_uvmm_sm_prepare(uvmm, new, ops, args: &args); |
740 | } |
741 | |
742 | static int |
743 | nouveau_uvmm_sm_unmap_prepare(struct nouveau_uvmm *uvmm, |
744 | struct nouveau_uvma_prealloc *new, |
745 | struct drm_gpuva_ops *ops) |
746 | { |
747 | return nouveau_uvmm_sm_prepare(uvmm, new, ops, NULL); |
748 | } |
749 | |
750 | static struct drm_gem_object * |
751 | op_gem_obj(struct drm_gpuva_op *op) |
752 | { |
753 | switch (op->op) { |
754 | case DRM_GPUVA_OP_MAP: |
755 | return op->map.gem.obj; |
756 | case DRM_GPUVA_OP_REMAP: |
757 | /* Actually, we're looking for the GEMs backing remap.prev and |
758 | * remap.next, but since this is a remap they're identical to |
759 | * the GEM backing the unmapped GPUVA. |
760 | */ |
761 | return op->remap.unmap->va->gem.obj; |
762 | case DRM_GPUVA_OP_UNMAP: |
763 | return op->unmap.va->gem.obj; |
764 | default: |
765 | WARN(1, "Unknown operation.\n" ); |
766 | return NULL; |
767 | } |
768 | } |
769 | |
770 | static void |
771 | op_map(struct nouveau_uvma *uvma) |
772 | { |
773 | struct nouveau_bo *nvbo = nouveau_gem_object(gem: uvma->va.gem.obj); |
774 | |
775 | nouveau_uvma_map(uvma, mem: nouveau_mem(reg: nvbo->bo.resource)); |
776 | } |
777 | |
778 | static void |
779 | op_unmap(struct drm_gpuva_op_unmap *u) |
780 | { |
781 | struct drm_gpuva *va = u->va; |
782 | struct nouveau_uvma *uvma = uvma_from_va(va); |
783 | |
784 | /* nouveau_uvma_unmap() does not unmap if backing BO is evicted. */ |
785 | if (!u->keep) |
786 | nouveau_uvma_unmap(uvma); |
787 | } |
788 | |
789 | static void |
790 | op_unmap_range(struct drm_gpuva_op_unmap *u, |
791 | u64 addr, u64 range) |
792 | { |
793 | struct nouveau_uvma *uvma = uvma_from_va(u->va); |
794 | bool sparse = !!uvma->region; |
795 | |
796 | if (!drm_gpuva_invalidated(va: u->va)) |
797 | nouveau_uvmm_vmm_unmap(to_uvmm(uvma), addr, range, sparse); |
798 | } |
799 | |
800 | static void |
801 | op_remap(struct drm_gpuva_op_remap *r, |
802 | struct nouveau_uvma_prealloc *new) |
803 | { |
804 | struct drm_gpuva_op_unmap *u = r->unmap; |
805 | struct nouveau_uvma *uvma = uvma_from_va(u->va); |
806 | u64 addr = uvma->va.va.addr; |
807 | u64 range = uvma->va.va.range; |
808 | |
809 | if (r->prev) |
810 | addr = r->prev->va.addr + r->prev->va.range; |
811 | |
812 | if (r->next) |
813 | range = r->next->va.addr - addr; |
814 | |
815 | op_unmap_range(u, addr, range); |
816 | } |
817 | |
818 | static int |
819 | nouveau_uvmm_sm(struct nouveau_uvmm *uvmm, |
820 | struct nouveau_uvma_prealloc *new, |
821 | struct drm_gpuva_ops *ops) |
822 | { |
823 | struct drm_gpuva_op *op; |
824 | |
825 | drm_gpuva_for_each_op(op, ops) { |
826 | switch (op->op) { |
827 | case DRM_GPUVA_OP_MAP: |
828 | op_map(uvma: new->map); |
829 | break; |
830 | case DRM_GPUVA_OP_REMAP: |
831 | op_remap(r: &op->remap, new); |
832 | break; |
833 | case DRM_GPUVA_OP_UNMAP: |
834 | op_unmap(u: &op->unmap); |
835 | break; |
836 | default: |
837 | break; |
838 | } |
839 | } |
840 | |
841 | return 0; |
842 | } |
843 | |
844 | static int |
845 | nouveau_uvmm_sm_map(struct nouveau_uvmm *uvmm, |
846 | struct nouveau_uvma_prealloc *new, |
847 | struct drm_gpuva_ops *ops) |
848 | { |
849 | return nouveau_uvmm_sm(uvmm, new, ops); |
850 | } |
851 | |
852 | static int |
853 | nouveau_uvmm_sm_unmap(struct nouveau_uvmm *uvmm, |
854 | struct nouveau_uvma_prealloc *new, |
855 | struct drm_gpuva_ops *ops) |
856 | { |
857 | return nouveau_uvmm_sm(uvmm, new, ops); |
858 | } |
859 | |
860 | static void |
861 | nouveau_uvmm_sm_cleanup(struct nouveau_uvmm *uvmm, |
862 | struct nouveau_uvma_prealloc *new, |
863 | struct drm_gpuva_ops *ops, bool unmap) |
864 | { |
865 | struct drm_gpuva_op *op; |
866 | |
867 | drm_gpuva_for_each_op(op, ops) { |
868 | switch (op->op) { |
869 | case DRM_GPUVA_OP_MAP: |
870 | break; |
871 | case DRM_GPUVA_OP_REMAP: { |
872 | struct drm_gpuva_op_remap *r = &op->remap; |
873 | struct drm_gpuva_op_map *p = r->prev; |
874 | struct drm_gpuva_op_map *n = r->next; |
875 | struct drm_gpuva *va = r->unmap->va; |
876 | struct nouveau_uvma *uvma = uvma_from_va(va); |
877 | |
878 | if (unmap) { |
879 | u64 addr = va->va.addr; |
880 | u64 end = addr + va->va.range; |
881 | |
882 | if (p) |
883 | addr = p->va.addr + p->va.range; |
884 | |
885 | if (n) |
886 | end = n->va.addr; |
887 | |
888 | nouveau_uvmm_vmm_put(uvmm, addr, range: end - addr); |
889 | } |
890 | |
891 | nouveau_uvma_gem_put(uvma); |
892 | nouveau_uvma_free(uvma); |
893 | break; |
894 | } |
895 | case DRM_GPUVA_OP_UNMAP: { |
896 | struct drm_gpuva_op_unmap *u = &op->unmap; |
897 | struct drm_gpuva *va = u->va; |
898 | struct nouveau_uvma *uvma = uvma_from_va(va); |
899 | |
900 | if (unmap) |
901 | nouveau_uvma_vmm_put(uvma); |
902 | |
903 | nouveau_uvma_gem_put(uvma); |
904 | nouveau_uvma_free(uvma); |
905 | break; |
906 | } |
907 | default: |
908 | break; |
909 | } |
910 | } |
911 | } |
912 | |
913 | static void |
914 | nouveau_uvmm_sm_map_cleanup(struct nouveau_uvmm *uvmm, |
915 | struct nouveau_uvma_prealloc *new, |
916 | struct drm_gpuva_ops *ops) |
917 | { |
918 | nouveau_uvmm_sm_cleanup(uvmm, new, ops, unmap: false); |
919 | } |
920 | |
921 | static void |
922 | nouveau_uvmm_sm_unmap_cleanup(struct nouveau_uvmm *uvmm, |
923 | struct nouveau_uvma_prealloc *new, |
924 | struct drm_gpuva_ops *ops) |
925 | { |
926 | nouveau_uvmm_sm_cleanup(uvmm, new, ops, unmap: true); |
927 | } |
928 | |
929 | static int |
930 | nouveau_uvmm_validate_range(struct nouveau_uvmm *uvmm, u64 addr, u64 range) |
931 | { |
932 | u64 end = addr + range; |
933 | u64 kernel_managed_end = uvmm->kernel_managed_addr + |
934 | uvmm->kernel_managed_size; |
935 | |
936 | if (addr & ~PAGE_MASK) |
937 | return -EINVAL; |
938 | |
939 | if (range & ~PAGE_MASK) |
940 | return -EINVAL; |
941 | |
942 | if (end <= addr) |
943 | return -EINVAL; |
944 | |
945 | if (addr < NOUVEAU_VA_SPACE_START || |
946 | end > NOUVEAU_VA_SPACE_END) |
947 | return -EINVAL; |
948 | |
949 | if (addr < kernel_managed_end && |
950 | end > uvmm->kernel_managed_addr) |
951 | return -EINVAL; |
952 | |
953 | return 0; |
954 | } |
955 | |
956 | static int |
957 | nouveau_uvmm_bind_job_alloc(struct nouveau_uvmm_bind_job **pjob) |
958 | { |
959 | *pjob = kzalloc(size: sizeof(**pjob), GFP_KERNEL); |
960 | if (!*pjob) |
961 | return -ENOMEM; |
962 | |
963 | kref_init(kref: &(*pjob)->kref); |
964 | |
965 | return 0; |
966 | } |
967 | |
968 | static void |
969 | nouveau_uvmm_bind_job_free(struct kref *kref) |
970 | { |
971 | struct nouveau_uvmm_bind_job *job = |
972 | container_of(kref, struct nouveau_uvmm_bind_job, kref); |
973 | |
974 | nouveau_job_free(job: &job->base); |
975 | kfree(objp: job); |
976 | } |
977 | |
978 | static void |
979 | nouveau_uvmm_bind_job_get(struct nouveau_uvmm_bind_job *job) |
980 | { |
981 | kref_get(kref: &job->kref); |
982 | } |
983 | |
984 | static void |
985 | nouveau_uvmm_bind_job_put(struct nouveau_uvmm_bind_job *job) |
986 | { |
987 | kref_put(kref: &job->kref, release: nouveau_uvmm_bind_job_free); |
988 | } |
989 | |
990 | static int |
991 | bind_validate_op(struct nouveau_job *job, |
992 | struct bind_job_op *op) |
993 | { |
994 | struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli: job->cli); |
995 | struct drm_gem_object *obj = op->gem.obj; |
996 | |
997 | if (op->op == OP_MAP) { |
998 | if (op->gem.offset & ~PAGE_MASK) |
999 | return -EINVAL; |
1000 | |
1001 | if (obj->size <= op->gem.offset) |
1002 | return -EINVAL; |
1003 | |
1004 | if (op->va.range > (obj->size - op->gem.offset)) |
1005 | return -EINVAL; |
1006 | } |
1007 | |
1008 | return nouveau_uvmm_validate_range(uvmm, addr: op->va.addr, range: op->va.range); |
1009 | } |
1010 | |
1011 | static void |
1012 | bind_validate_map_sparse(struct nouveau_job *job, u64 addr, u64 range) |
1013 | { |
1014 | struct nouveau_uvmm_bind_job *bind_job; |
1015 | struct nouveau_sched_entity *entity = job->entity; |
1016 | struct bind_job_op *op; |
1017 | u64 end = addr + range; |
1018 | |
1019 | again: |
1020 | spin_lock(lock: &entity->job.list.lock); |
1021 | list_for_each_entry(bind_job, &entity->job.list.head, entry) { |
1022 | list_for_each_op(op, &bind_job->ops) { |
1023 | if (op->op == OP_UNMAP) { |
1024 | u64 op_addr = op->va.addr; |
1025 | u64 op_end = op_addr + op->va.range; |
1026 | |
1027 | if (!(end <= op_addr || addr >= op_end)) { |
1028 | nouveau_uvmm_bind_job_get(job: bind_job); |
1029 | spin_unlock(lock: &entity->job.list.lock); |
1030 | wait_for_completion(&bind_job->complete); |
1031 | nouveau_uvmm_bind_job_put(job: bind_job); |
1032 | goto again; |
1033 | } |
1034 | } |
1035 | } |
1036 | } |
1037 | spin_unlock(lock: &entity->job.list.lock); |
1038 | } |
1039 | |
1040 | static int |
1041 | bind_validate_map_common(struct nouveau_job *job, u64 addr, u64 range, |
1042 | bool sparse) |
1043 | { |
1044 | struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli: job->cli); |
1045 | struct nouveau_uvma_region *reg; |
1046 | u64 reg_addr, reg_end; |
1047 | u64 end = addr + range; |
1048 | |
1049 | again: |
1050 | nouveau_uvmm_lock(uvmm); |
1051 | reg = nouveau_uvma_region_find_first(uvmm, addr, range); |
1052 | if (!reg) { |
1053 | nouveau_uvmm_unlock(uvmm); |
1054 | return 0; |
1055 | } |
1056 | |
1057 | /* Generally, job submits are serialized, hence only |
1058 | * dirty regions can be modified concurrently. |
1059 | */ |
1060 | if (reg->dirty) { |
1061 | nouveau_uvma_region_get(reg); |
1062 | nouveau_uvmm_unlock(uvmm); |
1063 | wait_for_completion(®->complete); |
1064 | nouveau_uvma_region_put(reg); |
1065 | goto again; |
1066 | } |
1067 | nouveau_uvmm_unlock(uvmm); |
1068 | |
1069 | if (sparse) |
1070 | return -ENOSPC; |
1071 | |
1072 | reg_addr = reg->va.addr; |
1073 | reg_end = reg_addr + reg->va.range; |
1074 | |
1075 | /* Make sure the mapping is either outside of a |
1076 | * region or fully enclosed by a region. |
1077 | */ |
1078 | if (reg_addr > addr || reg_end < end) |
1079 | return -ENOSPC; |
1080 | |
1081 | return 0; |
1082 | } |
1083 | |
1084 | static int |
1085 | bind_validate_region(struct nouveau_job *job) |
1086 | { |
1087 | struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job); |
1088 | struct bind_job_op *op; |
1089 | int ret; |
1090 | |
1091 | list_for_each_op(op, &bind_job->ops) { |
1092 | u64 op_addr = op->va.addr; |
1093 | u64 op_range = op->va.range; |
1094 | bool sparse = false; |
1095 | |
1096 | switch (op->op) { |
1097 | case OP_MAP_SPARSE: |
1098 | sparse = true; |
1099 | bind_validate_map_sparse(job, addr: op_addr, range: op_range); |
1100 | fallthrough; |
1101 | case OP_MAP: |
1102 | ret = bind_validate_map_common(job, addr: op_addr, range: op_range, |
1103 | sparse); |
1104 | if (ret) |
1105 | return ret; |
1106 | break; |
1107 | default: |
1108 | break; |
1109 | } |
1110 | } |
1111 | |
1112 | return 0; |
1113 | } |
1114 | |
1115 | static void |
1116 | bind_link_gpuvas(struct drm_gpuva_ops *ops, struct nouveau_uvma_prealloc *new) |
1117 | { |
1118 | struct drm_gpuva_op *op; |
1119 | |
1120 | drm_gpuva_for_each_op(op, ops) { |
1121 | switch (op->op) { |
1122 | case DRM_GPUVA_OP_MAP: |
1123 | drm_gpuva_link(va: &new->map->va); |
1124 | break; |
1125 | case DRM_GPUVA_OP_REMAP: |
1126 | if (op->remap.prev) |
1127 | drm_gpuva_link(va: &new->prev->va); |
1128 | if (op->remap.next) |
1129 | drm_gpuva_link(va: &new->next->va); |
1130 | drm_gpuva_unlink(va: op->remap.unmap->va); |
1131 | break; |
1132 | case DRM_GPUVA_OP_UNMAP: |
1133 | drm_gpuva_unlink(va: op->unmap.va); |
1134 | break; |
1135 | default: |
1136 | break; |
1137 | } |
1138 | } |
1139 | } |
1140 | |
1141 | static int |
1142 | nouveau_uvmm_bind_job_submit(struct nouveau_job *job) |
1143 | { |
1144 | struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli: job->cli); |
1145 | struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job); |
1146 | struct nouveau_sched_entity *entity = job->entity; |
1147 | struct drm_exec *exec = &job->exec; |
1148 | struct bind_job_op *op; |
1149 | int ret; |
1150 | |
1151 | list_for_each_op(op, &bind_job->ops) { |
1152 | if (op->op == OP_MAP) { |
1153 | op->gem.obj = drm_gem_object_lookup(filp: job->file_priv, |
1154 | handle: op->gem.handle); |
1155 | if (!op->gem.obj) |
1156 | return -ENOENT; |
1157 | } |
1158 | |
1159 | ret = bind_validate_op(job, op); |
1160 | if (ret) |
1161 | return ret; |
1162 | } |
1163 | |
1164 | /* If a sparse region or mapping overlaps a dirty region, we need to |
1165 | * wait for the region to complete the unbind process. This is due to |
1166 | * how page table management is currently implemented. A future |
1167 | * implementation might change this. |
1168 | */ |
1169 | ret = bind_validate_region(job); |
1170 | if (ret) |
1171 | return ret; |
1172 | |
1173 | /* Once we start modifying the GPU VA space we need to keep holding the |
1174 | * uvmm lock until we can't fail anymore. This is due to the set of GPU |
1175 | * VA space changes must appear atomically and we need to be able to |
1176 | * unwind all GPU VA space changes on failure. |
1177 | */ |
1178 | nouveau_uvmm_lock(uvmm); |
1179 | list_for_each_op(op, &bind_job->ops) { |
1180 | switch (op->op) { |
1181 | case OP_MAP_SPARSE: |
1182 | ret = nouveau_uvma_region_create(uvmm, |
1183 | addr: op->va.addr, |
1184 | range: op->va.range); |
1185 | if (ret) |
1186 | goto unwind_continue; |
1187 | |
1188 | break; |
1189 | case OP_UNMAP_SPARSE: |
1190 | op->reg = nouveau_uvma_region_find(uvmm, addr: op->va.addr, |
1191 | range: op->va.range); |
1192 | if (!op->reg || op->reg->dirty) { |
1193 | ret = -ENOENT; |
1194 | goto unwind_continue; |
1195 | } |
1196 | |
1197 | op->ops = drm_gpuvm_sm_unmap_ops_create(gpuvm: &uvmm->base, |
1198 | addr: op->va.addr, |
1199 | range: op->va.range); |
1200 | if (IS_ERR(ptr: op->ops)) { |
1201 | ret = PTR_ERR(ptr: op->ops); |
1202 | goto unwind_continue; |
1203 | } |
1204 | |
1205 | ret = nouveau_uvmm_sm_unmap_prepare(uvmm, new: &op->new, |
1206 | ops: op->ops); |
1207 | if (ret) { |
1208 | drm_gpuva_ops_free(gpuvm: &uvmm->base, ops: op->ops); |
1209 | op->ops = NULL; |
1210 | op->reg = NULL; |
1211 | goto unwind_continue; |
1212 | } |
1213 | |
1214 | nouveau_uvma_region_dirty(reg: op->reg); |
1215 | |
1216 | break; |
1217 | case OP_MAP: { |
1218 | struct nouveau_uvma_region *reg; |
1219 | |
1220 | reg = nouveau_uvma_region_find_first(uvmm, |
1221 | addr: op->va.addr, |
1222 | range: op->va.range); |
1223 | if (reg) { |
1224 | u64 reg_addr = reg->va.addr; |
1225 | u64 reg_end = reg_addr + reg->va.range; |
1226 | u64 op_addr = op->va.addr; |
1227 | u64 op_end = op_addr + op->va.range; |
1228 | |
1229 | if (unlikely(reg->dirty)) { |
1230 | ret = -EINVAL; |
1231 | goto unwind_continue; |
1232 | } |
1233 | |
1234 | /* Make sure the mapping is either outside of a |
1235 | * region or fully enclosed by a region. |
1236 | */ |
1237 | if (reg_addr > op_addr || reg_end < op_end) { |
1238 | ret = -ENOSPC; |
1239 | goto unwind_continue; |
1240 | } |
1241 | } |
1242 | |
1243 | op->ops = drm_gpuvm_sm_map_ops_create(gpuvm: &uvmm->base, |
1244 | addr: op->va.addr, |
1245 | range: op->va.range, |
1246 | obj: op->gem.obj, |
1247 | offset: op->gem.offset); |
1248 | if (IS_ERR(ptr: op->ops)) { |
1249 | ret = PTR_ERR(ptr: op->ops); |
1250 | goto unwind_continue; |
1251 | } |
1252 | |
1253 | ret = nouveau_uvmm_sm_map_prepare(uvmm, new: &op->new, |
1254 | region: reg, ops: op->ops, |
1255 | addr: op->va.addr, |
1256 | range: op->va.range, |
1257 | kind: op->flags & 0xff); |
1258 | if (ret) { |
1259 | drm_gpuva_ops_free(gpuvm: &uvmm->base, ops: op->ops); |
1260 | op->ops = NULL; |
1261 | goto unwind_continue; |
1262 | } |
1263 | |
1264 | break; |
1265 | } |
1266 | case OP_UNMAP: |
1267 | op->ops = drm_gpuvm_sm_unmap_ops_create(gpuvm: &uvmm->base, |
1268 | addr: op->va.addr, |
1269 | range: op->va.range); |
1270 | if (IS_ERR(ptr: op->ops)) { |
1271 | ret = PTR_ERR(ptr: op->ops); |
1272 | goto unwind_continue; |
1273 | } |
1274 | |
1275 | ret = nouveau_uvmm_sm_unmap_prepare(uvmm, new: &op->new, |
1276 | ops: op->ops); |
1277 | if (ret) { |
1278 | drm_gpuva_ops_free(gpuvm: &uvmm->base, ops: op->ops); |
1279 | op->ops = NULL; |
1280 | goto unwind_continue; |
1281 | } |
1282 | |
1283 | break; |
1284 | default: |
1285 | ret = -EINVAL; |
1286 | goto unwind_continue; |
1287 | } |
1288 | } |
1289 | |
1290 | drm_exec_init(exec, DRM_EXEC_INTERRUPTIBLE_WAIT | |
1291 | DRM_EXEC_IGNORE_DUPLICATES); |
1292 | drm_exec_until_all_locked(exec) { |
1293 | list_for_each_op(op, &bind_job->ops) { |
1294 | struct drm_gpuva_op *va_op; |
1295 | |
1296 | if (IS_ERR_OR_NULL(ptr: op->ops)) |
1297 | continue; |
1298 | |
1299 | drm_gpuva_for_each_op(va_op, op->ops) { |
1300 | struct drm_gem_object *obj = op_gem_obj(op: va_op); |
1301 | |
1302 | if (unlikely(!obj)) |
1303 | continue; |
1304 | |
1305 | ret = drm_exec_prepare_obj(exec, obj, num_fences: 1); |
1306 | drm_exec_retry_on_contention(exec); |
1307 | if (ret) { |
1308 | op = list_last_op(&bind_job->ops); |
1309 | goto unwind; |
1310 | } |
1311 | } |
1312 | } |
1313 | } |
1314 | |
1315 | list_for_each_op(op, &bind_job->ops) { |
1316 | struct drm_gpuva_op *va_op; |
1317 | |
1318 | if (IS_ERR_OR_NULL(ptr: op->ops)) |
1319 | continue; |
1320 | |
1321 | drm_gpuva_for_each_op(va_op, op->ops) { |
1322 | struct drm_gem_object *obj = op_gem_obj(op: va_op); |
1323 | |
1324 | if (unlikely(!obj)) |
1325 | continue; |
1326 | |
1327 | /* Don't validate GEMs backing mappings we're about to |
1328 | * unmap, it's not worth the effort. |
1329 | */ |
1330 | if (unlikely(va_op->op == DRM_GPUVA_OP_UNMAP)) |
1331 | continue; |
1332 | |
1333 | ret = nouveau_bo_validate(nouveau_gem_object(gem: obj), |
1334 | interruptible: true, no_wait_gpu: false); |
1335 | if (ret) { |
1336 | op = list_last_op(&bind_job->ops); |
1337 | goto unwind; |
1338 | } |
1339 | } |
1340 | } |
1341 | |
1342 | /* Link and unlink GPUVAs while holding the dma_resv lock. |
1343 | * |
1344 | * As long as we validate() all GEMs and add fences to all GEMs DMA |
1345 | * reservations backing map and remap operations we can be sure there |
1346 | * won't be any concurrent (in)validations during job execution, hence |
1347 | * we're safe to check drm_gpuva_invalidated() within the fence |
1348 | * signalling critical path without holding a separate lock. |
1349 | * |
1350 | * GPUVAs about to be unmapped are safe as well, since they're unlinked |
1351 | * already. |
1352 | * |
1353 | * GEMs from map and remap operations must be validated before linking |
1354 | * their corresponding mappings to prevent the actual PT update to |
1355 | * happen right away in validate() rather than asynchronously as |
1356 | * intended. |
1357 | * |
1358 | * Note that after linking and unlinking the GPUVAs in this loop this |
1359 | * function cannot fail anymore, hence there is no need for an unwind |
1360 | * path. |
1361 | */ |
1362 | list_for_each_op(op, &bind_job->ops) { |
1363 | switch (op->op) { |
1364 | case OP_UNMAP_SPARSE: |
1365 | case OP_MAP: |
1366 | case OP_UNMAP: |
1367 | bind_link_gpuvas(ops: op->ops, new: &op->new); |
1368 | break; |
1369 | default: |
1370 | break; |
1371 | } |
1372 | } |
1373 | nouveau_uvmm_unlock(uvmm); |
1374 | |
1375 | spin_lock(lock: &entity->job.list.lock); |
1376 | list_add(new: &bind_job->entry, head: &entity->job.list.head); |
1377 | spin_unlock(lock: &entity->job.list.lock); |
1378 | |
1379 | return 0; |
1380 | |
1381 | unwind_continue: |
1382 | op = list_prev_op(op); |
1383 | unwind: |
1384 | list_for_each_op_from_reverse(op, &bind_job->ops) { |
1385 | switch (op->op) { |
1386 | case OP_MAP_SPARSE: |
1387 | nouveau_uvma_region_destroy(uvmm, addr: op->va.addr, |
1388 | range: op->va.range); |
1389 | break; |
1390 | case OP_UNMAP_SPARSE: |
1391 | __nouveau_uvma_region_insert(uvmm, reg: op->reg); |
1392 | nouveau_uvmm_sm_unmap_prepare_unwind(uvmm, new: &op->new, |
1393 | ops: op->ops); |
1394 | break; |
1395 | case OP_MAP: |
1396 | nouveau_uvmm_sm_map_prepare_unwind(uvmm, new: &op->new, |
1397 | ops: op->ops, |
1398 | addr: op->va.addr, |
1399 | range: op->va.range); |
1400 | break; |
1401 | case OP_UNMAP: |
1402 | nouveau_uvmm_sm_unmap_prepare_unwind(uvmm, new: &op->new, |
1403 | ops: op->ops); |
1404 | break; |
1405 | } |
1406 | |
1407 | drm_gpuva_ops_free(gpuvm: &uvmm->base, ops: op->ops); |
1408 | op->ops = NULL; |
1409 | op->reg = NULL; |
1410 | } |
1411 | |
1412 | nouveau_uvmm_unlock(uvmm); |
1413 | drm_exec_fini(exec); |
1414 | return ret; |
1415 | } |
1416 | |
1417 | static void |
1418 | nouveau_uvmm_bind_job_armed_submit(struct nouveau_job *job) |
1419 | { |
1420 | struct drm_exec *exec = &job->exec; |
1421 | struct drm_gem_object *obj; |
1422 | unsigned long index; |
1423 | |
1424 | drm_exec_for_each_locked_object(exec, index, obj) |
1425 | dma_resv_add_fence(obj: obj->resv, fence: job->done_fence, usage: job->resv_usage); |
1426 | |
1427 | drm_exec_fini(exec); |
1428 | } |
1429 | |
1430 | static struct dma_fence * |
1431 | nouveau_uvmm_bind_job_run(struct nouveau_job *job) |
1432 | { |
1433 | struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job); |
1434 | struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli: job->cli); |
1435 | struct bind_job_op *op; |
1436 | int ret = 0; |
1437 | |
1438 | list_for_each_op(op, &bind_job->ops) { |
1439 | switch (op->op) { |
1440 | case OP_MAP_SPARSE: |
1441 | /* noop */ |
1442 | break; |
1443 | case OP_MAP: |
1444 | ret = nouveau_uvmm_sm_map(uvmm, new: &op->new, ops: op->ops); |
1445 | if (ret) |
1446 | goto out; |
1447 | break; |
1448 | case OP_UNMAP_SPARSE: |
1449 | fallthrough; |
1450 | case OP_UNMAP: |
1451 | ret = nouveau_uvmm_sm_unmap(uvmm, new: &op->new, ops: op->ops); |
1452 | if (ret) |
1453 | goto out; |
1454 | break; |
1455 | } |
1456 | } |
1457 | |
1458 | out: |
1459 | if (ret) |
1460 | NV_PRINTK(err, job->cli, "bind job failed: %d\n" , ret); |
1461 | return ERR_PTR(error: ret); |
1462 | } |
1463 | |
1464 | static void |
1465 | nouveau_uvmm_bind_job_free_work_fn(struct work_struct *work) |
1466 | { |
1467 | struct nouveau_uvmm_bind_job *bind_job = |
1468 | container_of(work, struct nouveau_uvmm_bind_job, work); |
1469 | struct nouveau_job *job = &bind_job->base; |
1470 | struct nouveau_uvmm *uvmm = nouveau_cli_uvmm(cli: job->cli); |
1471 | struct nouveau_sched_entity *entity = job->entity; |
1472 | struct bind_job_op *op, *next; |
1473 | |
1474 | list_for_each_op(op, &bind_job->ops) { |
1475 | struct drm_gem_object *obj = op->gem.obj; |
1476 | |
1477 | /* When nouveau_uvmm_bind_job_submit() fails op->ops and op->reg |
1478 | * will be NULL, hence skip the cleanup. |
1479 | */ |
1480 | switch (op->op) { |
1481 | case OP_MAP_SPARSE: |
1482 | /* noop */ |
1483 | break; |
1484 | case OP_UNMAP_SPARSE: |
1485 | if (!IS_ERR_OR_NULL(ptr: op->ops)) |
1486 | nouveau_uvmm_sm_unmap_cleanup(uvmm, new: &op->new, |
1487 | ops: op->ops); |
1488 | |
1489 | if (op->reg) { |
1490 | nouveau_uvma_region_sparse_unref(reg: op->reg); |
1491 | nouveau_uvmm_lock(uvmm); |
1492 | nouveau_uvma_region_remove(reg: op->reg); |
1493 | nouveau_uvmm_unlock(uvmm); |
1494 | nouveau_uvma_region_complete(reg: op->reg); |
1495 | nouveau_uvma_region_put(reg: op->reg); |
1496 | } |
1497 | |
1498 | break; |
1499 | case OP_MAP: |
1500 | if (!IS_ERR_OR_NULL(ptr: op->ops)) |
1501 | nouveau_uvmm_sm_map_cleanup(uvmm, new: &op->new, |
1502 | ops: op->ops); |
1503 | break; |
1504 | case OP_UNMAP: |
1505 | if (!IS_ERR_OR_NULL(ptr: op->ops)) |
1506 | nouveau_uvmm_sm_unmap_cleanup(uvmm, new: &op->new, |
1507 | ops: op->ops); |
1508 | break; |
1509 | } |
1510 | |
1511 | if (!IS_ERR_OR_NULL(ptr: op->ops)) |
1512 | drm_gpuva_ops_free(gpuvm: &uvmm->base, ops: op->ops); |
1513 | |
1514 | if (obj) |
1515 | drm_gem_object_put(obj); |
1516 | } |
1517 | |
1518 | spin_lock(lock: &entity->job.list.lock); |
1519 | list_del(entry: &bind_job->entry); |
1520 | spin_unlock(lock: &entity->job.list.lock); |
1521 | |
1522 | complete_all(&bind_job->complete); |
1523 | wake_up(&entity->job.wq); |
1524 | |
1525 | /* Remove and free ops after removing the bind job from the job list to |
1526 | * avoid races against bind_validate_map_sparse(). |
1527 | */ |
1528 | list_for_each_op_safe(op, next, &bind_job->ops) { |
1529 | list_del(entry: &op->entry); |
1530 | kfree(objp: op); |
1531 | } |
1532 | |
1533 | nouveau_uvmm_bind_job_put(job: bind_job); |
1534 | } |
1535 | |
1536 | static void |
1537 | nouveau_uvmm_bind_job_free_qwork(struct nouveau_job *job) |
1538 | { |
1539 | struct nouveau_uvmm_bind_job *bind_job = to_uvmm_bind_job(job); |
1540 | struct nouveau_sched_entity *entity = job->entity; |
1541 | |
1542 | nouveau_sched_entity_qwork(entity, work: &bind_job->work); |
1543 | } |
1544 | |
1545 | static struct nouveau_job_ops nouveau_bind_job_ops = { |
1546 | .submit = nouveau_uvmm_bind_job_submit, |
1547 | .armed_submit = nouveau_uvmm_bind_job_armed_submit, |
1548 | .run = nouveau_uvmm_bind_job_run, |
1549 | .free = nouveau_uvmm_bind_job_free_qwork, |
1550 | }; |
1551 | |
1552 | static int |
1553 | bind_job_op_from_uop(struct bind_job_op **pop, |
1554 | struct drm_nouveau_vm_bind_op *uop) |
1555 | { |
1556 | struct bind_job_op *op; |
1557 | |
1558 | op = *pop = kzalloc(size: sizeof(*op), GFP_KERNEL); |
1559 | if (!op) |
1560 | return -ENOMEM; |
1561 | |
1562 | switch (uop->op) { |
1563 | case OP_MAP: |
1564 | op->op = uop->flags & DRM_NOUVEAU_VM_BIND_SPARSE ? |
1565 | OP_MAP_SPARSE : OP_MAP; |
1566 | break; |
1567 | case OP_UNMAP: |
1568 | op->op = uop->flags & DRM_NOUVEAU_VM_BIND_SPARSE ? |
1569 | OP_UNMAP_SPARSE : OP_UNMAP; |
1570 | break; |
1571 | default: |
1572 | op->op = uop->op; |
1573 | break; |
1574 | } |
1575 | |
1576 | op->flags = uop->flags; |
1577 | op->va.addr = uop->addr; |
1578 | op->va.range = uop->range; |
1579 | op->gem.handle = uop->handle; |
1580 | op->gem.offset = uop->bo_offset; |
1581 | |
1582 | return 0; |
1583 | } |
1584 | |
1585 | static void |
1586 | bind_job_ops_free(struct list_head *ops) |
1587 | { |
1588 | struct bind_job_op *op, *next; |
1589 | |
1590 | list_for_each_op_safe(op, next, ops) { |
1591 | list_del(entry: &op->entry); |
1592 | kfree(objp: op); |
1593 | } |
1594 | } |
1595 | |
1596 | static int |
1597 | nouveau_uvmm_bind_job_init(struct nouveau_uvmm_bind_job **pjob, |
1598 | struct nouveau_uvmm_bind_job_args *__args) |
1599 | { |
1600 | struct nouveau_uvmm_bind_job *job; |
1601 | struct nouveau_job_args args = {}; |
1602 | struct bind_job_op *op; |
1603 | int i, ret; |
1604 | |
1605 | ret = nouveau_uvmm_bind_job_alloc(pjob: &job); |
1606 | if (ret) |
1607 | return ret; |
1608 | |
1609 | INIT_LIST_HEAD(list: &job->ops); |
1610 | INIT_LIST_HEAD(list: &job->entry); |
1611 | |
1612 | for (i = 0; i < __args->op.count; i++) { |
1613 | ret = bind_job_op_from_uop(pop: &op, uop: &__args->op.s[i]); |
1614 | if (ret) |
1615 | goto err_free; |
1616 | |
1617 | list_add_tail(new: &op->entry, head: &job->ops); |
1618 | } |
1619 | |
1620 | init_completion(x: &job->complete); |
1621 | INIT_WORK(&job->work, nouveau_uvmm_bind_job_free_work_fn); |
1622 | |
1623 | args.sched_entity = __args->sched_entity; |
1624 | args.file_priv = __args->file_priv; |
1625 | |
1626 | args.in_sync.count = __args->in_sync.count; |
1627 | args.in_sync.s = __args->in_sync.s; |
1628 | |
1629 | args.out_sync.count = __args->out_sync.count; |
1630 | args.out_sync.s = __args->out_sync.s; |
1631 | |
1632 | args.sync = !(__args->flags & DRM_NOUVEAU_VM_BIND_RUN_ASYNC); |
1633 | args.ops = &nouveau_bind_job_ops; |
1634 | args.resv_usage = DMA_RESV_USAGE_BOOKKEEP; |
1635 | |
1636 | ret = nouveau_job_init(job: &job->base, args: &args); |
1637 | if (ret) |
1638 | goto err_free; |
1639 | |
1640 | *pjob = job; |
1641 | return 0; |
1642 | |
1643 | err_free: |
1644 | bind_job_ops_free(ops: &job->ops); |
1645 | kfree(objp: job); |
1646 | *pjob = NULL; |
1647 | |
1648 | return ret; |
1649 | } |
1650 | |
1651 | int |
1652 | nouveau_uvmm_ioctl_vm_init(struct drm_device *dev, |
1653 | void *data, |
1654 | struct drm_file *file_priv) |
1655 | { |
1656 | struct nouveau_cli *cli = nouveau_cli(fpriv: file_priv); |
1657 | struct drm_nouveau_vm_init *init = data; |
1658 | |
1659 | return nouveau_uvmm_init(uvmm: &cli->uvmm, cli, kernel_managed_addr: init->kernel_managed_addr, |
1660 | kernel_managed_size: init->kernel_managed_size); |
1661 | } |
1662 | |
1663 | static int |
1664 | nouveau_uvmm_vm_bind(struct nouveau_uvmm_bind_job_args *args) |
1665 | { |
1666 | struct nouveau_uvmm_bind_job *job; |
1667 | int ret; |
1668 | |
1669 | ret = nouveau_uvmm_bind_job_init(pjob: &job, args: args); |
1670 | if (ret) |
1671 | return ret; |
1672 | |
1673 | ret = nouveau_job_submit(job: &job->base); |
1674 | if (ret) |
1675 | goto err_job_fini; |
1676 | |
1677 | return 0; |
1678 | |
1679 | err_job_fini: |
1680 | nouveau_job_fini(job: &job->base); |
1681 | return ret; |
1682 | } |
1683 | |
1684 | static int |
1685 | nouveau_uvmm_vm_bind_ucopy(struct nouveau_uvmm_bind_job_args *args, |
1686 | struct drm_nouveau_vm_bind *req) |
1687 | { |
1688 | struct drm_nouveau_sync **s; |
1689 | u32 inc = req->wait_count; |
1690 | u64 ins = req->wait_ptr; |
1691 | u32 outc = req->sig_count; |
1692 | u64 outs = req->sig_ptr; |
1693 | u32 opc = req->op_count; |
1694 | u64 ops = req->op_ptr; |
1695 | int ret; |
1696 | |
1697 | args->flags = req->flags; |
1698 | |
1699 | if (opc) { |
1700 | args->op.count = opc; |
1701 | args->op.s = u_memcpya(user: ops, nmemb: opc, |
1702 | size: sizeof(*args->op.s)); |
1703 | if (IS_ERR(ptr: args->op.s)) |
1704 | return PTR_ERR(ptr: args->op.s); |
1705 | } |
1706 | |
1707 | if (inc) { |
1708 | s = &args->in_sync.s; |
1709 | |
1710 | args->in_sync.count = inc; |
1711 | *s = u_memcpya(user: ins, nmemb: inc, size: sizeof(**s)); |
1712 | if (IS_ERR(ptr: *s)) { |
1713 | ret = PTR_ERR(ptr: *s); |
1714 | goto err_free_ops; |
1715 | } |
1716 | } |
1717 | |
1718 | if (outc) { |
1719 | s = &args->out_sync.s; |
1720 | |
1721 | args->out_sync.count = outc; |
1722 | *s = u_memcpya(user: outs, nmemb: outc, size: sizeof(**s)); |
1723 | if (IS_ERR(ptr: *s)) { |
1724 | ret = PTR_ERR(ptr: *s); |
1725 | goto err_free_ins; |
1726 | } |
1727 | } |
1728 | |
1729 | return 0; |
1730 | |
1731 | err_free_ops: |
1732 | u_free(addr: args->op.s); |
1733 | err_free_ins: |
1734 | u_free(addr: args->in_sync.s); |
1735 | return ret; |
1736 | } |
1737 | |
1738 | static void |
1739 | nouveau_uvmm_vm_bind_ufree(struct nouveau_uvmm_bind_job_args *args) |
1740 | { |
1741 | u_free(addr: args->op.s); |
1742 | u_free(addr: args->in_sync.s); |
1743 | u_free(addr: args->out_sync.s); |
1744 | } |
1745 | |
1746 | int |
1747 | nouveau_uvmm_ioctl_vm_bind(struct drm_device *dev, |
1748 | void *data, |
1749 | struct drm_file *file_priv) |
1750 | { |
1751 | struct nouveau_cli *cli = nouveau_cli(fpriv: file_priv); |
1752 | struct nouveau_uvmm_bind_job_args args = {}; |
1753 | struct drm_nouveau_vm_bind *req = data; |
1754 | int ret = 0; |
1755 | |
1756 | if (unlikely(!nouveau_cli_uvmm_locked(cli))) |
1757 | return -ENOSYS; |
1758 | |
1759 | ret = nouveau_uvmm_vm_bind_ucopy(args: &args, req); |
1760 | if (ret) |
1761 | return ret; |
1762 | |
1763 | args.sched_entity = &cli->sched_entity; |
1764 | args.file_priv = file_priv; |
1765 | |
1766 | ret = nouveau_uvmm_vm_bind(args: &args); |
1767 | if (ret) |
1768 | goto out_free_args; |
1769 | |
1770 | out_free_args: |
1771 | nouveau_uvmm_vm_bind_ufree(args: &args); |
1772 | return ret; |
1773 | } |
1774 | |
1775 | void |
1776 | nouveau_uvmm_bo_map_all(struct nouveau_bo *nvbo, struct nouveau_mem *mem) |
1777 | { |
1778 | struct drm_gem_object *obj = &nvbo->bo.base; |
1779 | struct drm_gpuva *va; |
1780 | |
1781 | dma_resv_assert_held(obj->resv); |
1782 | |
1783 | drm_gem_for_each_gpuva(va, obj) { |
1784 | struct nouveau_uvma *uvma = uvma_from_va(va); |
1785 | |
1786 | nouveau_uvma_map(uvma, mem); |
1787 | drm_gpuva_invalidate(va, invalidate: false); |
1788 | } |
1789 | } |
1790 | |
1791 | void |
1792 | nouveau_uvmm_bo_unmap_all(struct nouveau_bo *nvbo) |
1793 | { |
1794 | struct drm_gem_object *obj = &nvbo->bo.base; |
1795 | struct drm_gpuva *va; |
1796 | |
1797 | dma_resv_assert_held(obj->resv); |
1798 | |
1799 | drm_gem_for_each_gpuva(va, obj) { |
1800 | struct nouveau_uvma *uvma = uvma_from_va(va); |
1801 | |
1802 | nouveau_uvma_unmap(uvma); |
1803 | drm_gpuva_invalidate(va, invalidate: true); |
1804 | } |
1805 | } |
1806 | |
1807 | int |
1808 | nouveau_uvmm_init(struct nouveau_uvmm *uvmm, struct nouveau_cli *cli, |
1809 | u64 kernel_managed_addr, u64 kernel_managed_size) |
1810 | { |
1811 | int ret; |
1812 | u64 kernel_managed_end = kernel_managed_addr + kernel_managed_size; |
1813 | |
1814 | mutex_init(&uvmm->mutex); |
1815 | dma_resv_init(obj: &uvmm->resv); |
1816 | mt_init_flags(mt: &uvmm->region_mt, MT_FLAGS_LOCK_EXTERN); |
1817 | mt_set_external_lock(&uvmm->region_mt, &uvmm->mutex); |
1818 | |
1819 | mutex_lock(&cli->mutex); |
1820 | |
1821 | if (unlikely(cli->uvmm.disabled)) { |
1822 | ret = -ENOSYS; |
1823 | goto out_unlock; |
1824 | } |
1825 | |
1826 | if (kernel_managed_end <= kernel_managed_addr) { |
1827 | ret = -EINVAL; |
1828 | goto out_unlock; |
1829 | } |
1830 | |
1831 | if (kernel_managed_end > NOUVEAU_VA_SPACE_END) { |
1832 | ret = -EINVAL; |
1833 | goto out_unlock; |
1834 | } |
1835 | |
1836 | uvmm->kernel_managed_addr = kernel_managed_addr; |
1837 | uvmm->kernel_managed_size = kernel_managed_size; |
1838 | |
1839 | drm_gpuvm_init(gpuvm: &uvmm->base, name: cli->name, |
1840 | NOUVEAU_VA_SPACE_START, |
1841 | NOUVEAU_VA_SPACE_END, |
1842 | reserve_offset: kernel_managed_addr, reserve_range: kernel_managed_size, |
1843 | NULL); |
1844 | |
1845 | ret = nvif_vmm_ctor(&cli->mmu, "uvmm" , |
1846 | cli->vmm.vmm.object.oclass, RAW, |
1847 | kernel_managed_addr, kernel_managed_size, |
1848 | NULL, 0, &cli->uvmm.vmm.vmm); |
1849 | if (ret) |
1850 | goto out_free_gpuva_mgr; |
1851 | |
1852 | cli->uvmm.vmm.cli = cli; |
1853 | mutex_unlock(lock: &cli->mutex); |
1854 | |
1855 | return 0; |
1856 | |
1857 | out_free_gpuva_mgr: |
1858 | drm_gpuvm_destroy(gpuvm: &uvmm->base); |
1859 | out_unlock: |
1860 | mutex_unlock(lock: &cli->mutex); |
1861 | return ret; |
1862 | } |
1863 | |
1864 | void |
1865 | nouveau_uvmm_fini(struct nouveau_uvmm *uvmm) |
1866 | { |
1867 | MA_STATE(mas, &uvmm->region_mt, 0, 0); |
1868 | struct nouveau_uvma_region *reg; |
1869 | struct nouveau_cli *cli = uvmm->vmm.cli; |
1870 | struct nouveau_sched_entity *entity = &cli->sched_entity; |
1871 | struct drm_gpuva *va, *next; |
1872 | |
1873 | if (!cli) |
1874 | return; |
1875 | |
1876 | rmb(); /* for list_empty to work without lock */ |
1877 | wait_event(entity->job.wq, list_empty(&entity->job.list.head)); |
1878 | |
1879 | nouveau_uvmm_lock(uvmm); |
1880 | drm_gpuvm_for_each_va_safe(va, next, &uvmm->base) { |
1881 | struct nouveau_uvma *uvma = uvma_from_va(va); |
1882 | struct drm_gem_object *obj = va->gem.obj; |
1883 | |
1884 | if (unlikely(va == &uvmm->base.kernel_alloc_node)) |
1885 | continue; |
1886 | |
1887 | drm_gpuva_remove(va); |
1888 | |
1889 | dma_resv_lock(obj: obj->resv, NULL); |
1890 | drm_gpuva_unlink(va); |
1891 | dma_resv_unlock(obj: obj->resv); |
1892 | |
1893 | nouveau_uvma_unmap(uvma); |
1894 | nouveau_uvma_vmm_put(uvma); |
1895 | |
1896 | nouveau_uvma_gem_put(uvma); |
1897 | nouveau_uvma_free(uvma); |
1898 | } |
1899 | |
1900 | mas_for_each(&mas, reg, ULONG_MAX) { |
1901 | mas_erase(mas: &mas); |
1902 | nouveau_uvma_region_sparse_unref(reg); |
1903 | nouveau_uvma_region_put(reg); |
1904 | } |
1905 | |
1906 | WARN(!mtree_empty(&uvmm->region_mt), |
1907 | "nouveau_uvma_region tree not empty, potentially leaking memory." ); |
1908 | __mt_destroy(mt: &uvmm->region_mt); |
1909 | nouveau_uvmm_unlock(uvmm); |
1910 | |
1911 | mutex_lock(&cli->mutex); |
1912 | nouveau_vmm_fini(&uvmm->vmm); |
1913 | drm_gpuvm_destroy(gpuvm: &uvmm->base); |
1914 | mutex_unlock(lock: &cli->mutex); |
1915 | |
1916 | dma_resv_fini(obj: &uvmm->resv); |
1917 | } |
1918 | |