1 | /* |
2 | * Copyright 2017 Advanced Micro Devices, Inc. |
3 | * |
4 | * Permission is hereby granted, free of charge, to any person obtaining a |
5 | * copy of this software and associated documentation files (the "Software"), |
6 | * to deal in the Software without restriction, including without limitation |
7 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
8 | * and/or sell copies of the Software, and to permit persons to whom the |
9 | * Software is furnished to do so, subject to the following conditions: |
10 | * |
11 | * The above copyright notice and this permission notice shall be included in |
12 | * all copies or substantial portions of the Software. |
13 | * |
14 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
15 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
16 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
17 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
18 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
19 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
20 | * OTHER DEALINGS IN THE SOFTWARE. |
21 | * |
22 | */ |
23 | #include "amdgpu_ids.h" |
24 | |
25 | #include <linux/idr.h> |
26 | #include <linux/dma-fence-array.h> |
27 | |
28 | |
29 | #include "amdgpu.h" |
30 | #include "amdgpu_trace.h" |
31 | |
32 | /* |
33 | * PASID manager |
34 | * |
35 | * PASIDs are global address space identifiers that can be shared |
36 | * between the GPU, an IOMMU and the driver. VMs on different devices |
37 | * may use the same PASID if they share the same address |
38 | * space. Therefore PASIDs are allocated using a global IDA. VMs are |
39 | * looked up from the PASID per amdgpu_device. |
40 | */ |
41 | static DEFINE_IDA(amdgpu_pasid_ida); |
42 | |
43 | /* Helper to free pasid from a fence callback */ |
44 | struct amdgpu_pasid_cb { |
45 | struct dma_fence_cb cb; |
46 | u32 pasid; |
47 | }; |
48 | |
49 | /** |
50 | * amdgpu_pasid_alloc - Allocate a PASID |
51 | * @bits: Maximum width of the PASID in bits, must be at least 1 |
52 | * |
53 | * Allocates a PASID of the given width while keeping smaller PASIDs |
54 | * available if possible. |
55 | * |
56 | * Returns a positive integer on success. Returns %-EINVAL if bits==0. |
57 | * Returns %-ENOSPC if no PASID was available. Returns %-ENOMEM on |
58 | * memory allocation failure. |
59 | */ |
60 | int amdgpu_pasid_alloc(unsigned int bits) |
61 | { |
62 | int pasid = -EINVAL; |
63 | |
64 | for (bits = min(bits, 31U); bits > 0; bits--) { |
65 | pasid = ida_alloc_range(&amdgpu_pasid_ida, min: 1U << (bits - 1), |
66 | max: (1U << bits) - 1, GFP_KERNEL); |
67 | if (pasid != -ENOSPC) |
68 | break; |
69 | } |
70 | |
71 | if (pasid >= 0) |
72 | trace_amdgpu_pasid_allocated(pasid); |
73 | |
74 | return pasid; |
75 | } |
76 | |
77 | /** |
78 | * amdgpu_pasid_free - Free a PASID |
79 | * @pasid: PASID to free |
80 | */ |
81 | void amdgpu_pasid_free(u32 pasid) |
82 | { |
83 | trace_amdgpu_pasid_freed(pasid); |
84 | ida_free(&amdgpu_pasid_ida, id: pasid); |
85 | } |
86 | |
87 | static void amdgpu_pasid_free_cb(struct dma_fence *fence, |
88 | struct dma_fence_cb *_cb) |
89 | { |
90 | struct amdgpu_pasid_cb *cb = |
91 | container_of(_cb, struct amdgpu_pasid_cb, cb); |
92 | |
93 | amdgpu_pasid_free(pasid: cb->pasid); |
94 | dma_fence_put(fence); |
95 | kfree(objp: cb); |
96 | } |
97 | |
98 | /** |
99 | * amdgpu_pasid_free_delayed - free pasid when fences signal |
100 | * |
101 | * @resv: reservation object with the fences to wait for |
102 | * @pasid: pasid to free |
103 | * |
104 | * Free the pasid only after all the fences in resv are signaled. |
105 | */ |
106 | void amdgpu_pasid_free_delayed(struct dma_resv *resv, |
107 | u32 pasid) |
108 | { |
109 | struct amdgpu_pasid_cb *cb; |
110 | struct dma_fence *fence; |
111 | int r; |
112 | |
113 | r = dma_resv_get_singleton(obj: resv, usage: DMA_RESV_USAGE_BOOKKEEP, fence: &fence); |
114 | if (r) |
115 | goto fallback; |
116 | |
117 | if (!fence) { |
118 | amdgpu_pasid_free(pasid); |
119 | return; |
120 | } |
121 | |
122 | cb = kmalloc(size: sizeof(*cb), GFP_KERNEL); |
123 | if (!cb) { |
124 | /* Last resort when we are OOM */ |
125 | dma_fence_wait(fence, intr: false); |
126 | dma_fence_put(fence); |
127 | amdgpu_pasid_free(pasid); |
128 | } else { |
129 | cb->pasid = pasid; |
130 | if (dma_fence_add_callback(fence, cb: &cb->cb, |
131 | func: amdgpu_pasid_free_cb)) |
132 | amdgpu_pasid_free_cb(fence, cb: &cb->cb); |
133 | } |
134 | |
135 | return; |
136 | |
137 | fallback: |
138 | /* Not enough memory for the delayed delete, as last resort |
139 | * block for all the fences to complete. |
140 | */ |
141 | dma_resv_wait_timeout(obj: resv, usage: DMA_RESV_USAGE_BOOKKEEP, |
142 | intr: false, MAX_SCHEDULE_TIMEOUT); |
143 | amdgpu_pasid_free(pasid); |
144 | } |
145 | |
146 | /* |
147 | * VMID manager |
148 | * |
149 | * VMIDs are a per VMHUB identifier for page tables handling. |
150 | */ |
151 | |
152 | /** |
153 | * amdgpu_vmid_had_gpu_reset - check if reset occured since last use |
154 | * |
155 | * @adev: amdgpu_device pointer |
156 | * @id: VMID structure |
157 | * |
158 | * Check if GPU reset occured since last use of the VMID. |
159 | */ |
160 | bool amdgpu_vmid_had_gpu_reset(struct amdgpu_device *adev, |
161 | struct amdgpu_vmid *id) |
162 | { |
163 | return id->current_gpu_reset_count != |
164 | atomic_read(v: &adev->gpu_reset_counter); |
165 | } |
166 | |
167 | /* Check if we need to switch to another set of resources */ |
168 | static bool amdgpu_vmid_gds_switch_needed(struct amdgpu_vmid *id, |
169 | struct amdgpu_job *job) |
170 | { |
171 | return id->gds_base != job->gds_base || |
172 | id->gds_size != job->gds_size || |
173 | id->gws_base != job->gws_base || |
174 | id->gws_size != job->gws_size || |
175 | id->oa_base != job->oa_base || |
176 | id->oa_size != job->oa_size; |
177 | } |
178 | |
179 | /* Check if the id is compatible with the job */ |
180 | static bool amdgpu_vmid_compatible(struct amdgpu_vmid *id, |
181 | struct amdgpu_job *job) |
182 | { |
183 | return id->pd_gpu_addr == job->vm_pd_addr && |
184 | !amdgpu_vmid_gds_switch_needed(id, job); |
185 | } |
186 | |
187 | /** |
188 | * amdgpu_vmid_grab_idle - grab idle VMID |
189 | * |
190 | * @ring: ring we want to submit job to |
191 | * @idle: resulting idle VMID |
192 | * @fence: fence to wait for if no id could be grabbed |
193 | * |
194 | * Try to find an idle VMID, if none is idle add a fence to wait to the sync |
195 | * object. Returns -ENOMEM when we are out of memory. |
196 | */ |
197 | static int amdgpu_vmid_grab_idle(struct amdgpu_ring *ring, |
198 | struct amdgpu_vmid **idle, |
199 | struct dma_fence **fence) |
200 | { |
201 | struct amdgpu_device *adev = ring->adev; |
202 | unsigned vmhub = ring->vm_hub; |
203 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; |
204 | struct dma_fence **fences; |
205 | unsigned i; |
206 | |
207 | if (!dma_fence_is_signaled(fence: ring->vmid_wait)) { |
208 | *fence = dma_fence_get(fence: ring->vmid_wait); |
209 | return 0; |
210 | } |
211 | |
212 | fences = kmalloc_array(n: id_mgr->num_ids, size: sizeof(void *), GFP_KERNEL); |
213 | if (!fences) |
214 | return -ENOMEM; |
215 | |
216 | /* Check if we have an idle VMID */ |
217 | i = 0; |
218 | list_for_each_entry((*idle), &id_mgr->ids_lru, list) { |
219 | /* Don't use per engine and per process VMID at the same time */ |
220 | struct amdgpu_ring *r = adev->vm_manager.concurrent_flush ? |
221 | NULL : ring; |
222 | |
223 | fences[i] = amdgpu_sync_peek_fence(sync: &(*idle)->active, ring: r); |
224 | if (!fences[i]) |
225 | break; |
226 | ++i; |
227 | } |
228 | |
229 | /* If we can't find a idle VMID to use, wait till one becomes available */ |
230 | if (&(*idle)->list == &id_mgr->ids_lru) { |
231 | u64 fence_context = adev->vm_manager.fence_context + ring->idx; |
232 | unsigned seqno = ++adev->vm_manager.seqno[ring->idx]; |
233 | struct dma_fence_array *array; |
234 | unsigned j; |
235 | |
236 | *idle = NULL; |
237 | for (j = 0; j < i; ++j) |
238 | dma_fence_get(fence: fences[j]); |
239 | |
240 | array = dma_fence_array_create(num_fences: i, fences, context: fence_context, |
241 | seqno, signal_on_any: true); |
242 | if (!array) { |
243 | for (j = 0; j < i; ++j) |
244 | dma_fence_put(fence: fences[j]); |
245 | kfree(objp: fences); |
246 | return -ENOMEM; |
247 | } |
248 | |
249 | *fence = dma_fence_get(fence: &array->base); |
250 | dma_fence_put(fence: ring->vmid_wait); |
251 | ring->vmid_wait = &array->base; |
252 | return 0; |
253 | } |
254 | kfree(objp: fences); |
255 | |
256 | return 0; |
257 | } |
258 | |
259 | /** |
260 | * amdgpu_vmid_grab_reserved - try to assign reserved VMID |
261 | * |
262 | * @vm: vm to allocate id for |
263 | * @ring: ring we want to submit job to |
264 | * @job: job who wants to use the VMID |
265 | * @id: resulting VMID |
266 | * @fence: fence to wait for if no id could be grabbed |
267 | * |
268 | * Try to assign a reserved VMID. |
269 | */ |
270 | static int amdgpu_vmid_grab_reserved(struct amdgpu_vm *vm, |
271 | struct amdgpu_ring *ring, |
272 | struct amdgpu_job *job, |
273 | struct amdgpu_vmid **id, |
274 | struct dma_fence **fence) |
275 | { |
276 | struct amdgpu_device *adev = ring->adev; |
277 | unsigned vmhub = ring->vm_hub; |
278 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; |
279 | uint64_t fence_context = adev->fence_context + ring->idx; |
280 | bool needs_flush = vm->use_cpu_for_update; |
281 | uint64_t updates = amdgpu_vm_tlb_seq(vm); |
282 | int r; |
283 | |
284 | *id = id_mgr->reserved; |
285 | if ((*id)->owner != vm->immediate.fence_context || |
286 | !amdgpu_vmid_compatible(id: *id, job) || |
287 | (*id)->flushed_updates < updates || |
288 | !(*id)->last_flush || |
289 | ((*id)->last_flush->context != fence_context && |
290 | !dma_fence_is_signaled(fence: (*id)->last_flush))) { |
291 | struct dma_fence *tmp; |
292 | |
293 | /* Don't use per engine and per process VMID at the same time */ |
294 | if (adev->vm_manager.concurrent_flush) |
295 | ring = NULL; |
296 | |
297 | /* to prevent one context starved by another context */ |
298 | (*id)->pd_gpu_addr = 0; |
299 | tmp = amdgpu_sync_peek_fence(sync: &(*id)->active, ring); |
300 | if (tmp) { |
301 | *id = NULL; |
302 | *fence = dma_fence_get(fence: tmp); |
303 | return 0; |
304 | } |
305 | needs_flush = true; |
306 | } |
307 | |
308 | /* Good we can use this VMID. Remember this submission as |
309 | * user of the VMID. |
310 | */ |
311 | r = amdgpu_sync_fence(sync: &(*id)->active, f: &job->base.s_fence->finished); |
312 | if (r) |
313 | return r; |
314 | |
315 | job->vm_needs_flush = needs_flush; |
316 | job->spm_update_needed = true; |
317 | return 0; |
318 | } |
319 | |
320 | /** |
321 | * amdgpu_vmid_grab_used - try to reuse a VMID |
322 | * |
323 | * @vm: vm to allocate id for |
324 | * @ring: ring we want to submit job to |
325 | * @job: job who wants to use the VMID |
326 | * @id: resulting VMID |
327 | * @fence: fence to wait for if no id could be grabbed |
328 | * |
329 | * Try to reuse a VMID for this submission. |
330 | */ |
331 | static int amdgpu_vmid_grab_used(struct amdgpu_vm *vm, |
332 | struct amdgpu_ring *ring, |
333 | struct amdgpu_job *job, |
334 | struct amdgpu_vmid **id, |
335 | struct dma_fence **fence) |
336 | { |
337 | struct amdgpu_device *adev = ring->adev; |
338 | unsigned vmhub = ring->vm_hub; |
339 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; |
340 | uint64_t fence_context = adev->fence_context + ring->idx; |
341 | uint64_t updates = amdgpu_vm_tlb_seq(vm); |
342 | int r; |
343 | |
344 | job->vm_needs_flush = vm->use_cpu_for_update; |
345 | |
346 | /* Check if we can use a VMID already assigned to this VM */ |
347 | list_for_each_entry_reverse((*id), &id_mgr->ids_lru, list) { |
348 | bool needs_flush = vm->use_cpu_for_update; |
349 | |
350 | /* Check all the prerequisites to using this VMID */ |
351 | if ((*id)->owner != vm->immediate.fence_context) |
352 | continue; |
353 | |
354 | if (!amdgpu_vmid_compatible(id: *id, job)) |
355 | continue; |
356 | |
357 | if (!(*id)->last_flush || |
358 | ((*id)->last_flush->context != fence_context && |
359 | !dma_fence_is_signaled(fence: (*id)->last_flush))) |
360 | needs_flush = true; |
361 | |
362 | if ((*id)->flushed_updates < updates) |
363 | needs_flush = true; |
364 | |
365 | if (needs_flush && !adev->vm_manager.concurrent_flush) |
366 | continue; |
367 | |
368 | /* Good, we can use this VMID. Remember this submission as |
369 | * user of the VMID. |
370 | */ |
371 | r = amdgpu_sync_fence(sync: &(*id)->active, |
372 | f: &job->base.s_fence->finished); |
373 | if (r) |
374 | return r; |
375 | |
376 | job->vm_needs_flush |= needs_flush; |
377 | return 0; |
378 | } |
379 | |
380 | *id = NULL; |
381 | return 0; |
382 | } |
383 | |
384 | /** |
385 | * amdgpu_vmid_grab - allocate the next free VMID |
386 | * |
387 | * @vm: vm to allocate id for |
388 | * @ring: ring we want to submit job to |
389 | * @job: job who wants to use the VMID |
390 | * @fence: fence to wait for if no id could be grabbed |
391 | * |
392 | * Allocate an id for the vm, adding fences to the sync obj as necessary. |
393 | */ |
394 | int amdgpu_vmid_grab(struct amdgpu_vm *vm, struct amdgpu_ring *ring, |
395 | struct amdgpu_job *job, struct dma_fence **fence) |
396 | { |
397 | struct amdgpu_device *adev = ring->adev; |
398 | unsigned vmhub = ring->vm_hub; |
399 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; |
400 | struct amdgpu_vmid *idle = NULL; |
401 | struct amdgpu_vmid *id = NULL; |
402 | int r = 0; |
403 | |
404 | mutex_lock(&id_mgr->lock); |
405 | r = amdgpu_vmid_grab_idle(ring, idle: &idle, fence); |
406 | if (r || !idle) |
407 | goto error; |
408 | |
409 | if (vm->reserved_vmid[vmhub] || (enforce_isolation && (vmhub == AMDGPU_GFXHUB(0)))) { |
410 | r = amdgpu_vmid_grab_reserved(vm, ring, job, id: &id, fence); |
411 | if (r || !id) |
412 | goto error; |
413 | } else { |
414 | r = amdgpu_vmid_grab_used(vm, ring, job, id: &id, fence); |
415 | if (r) |
416 | goto error; |
417 | |
418 | if (!id) { |
419 | /* Still no ID to use? Then use the idle one found earlier */ |
420 | id = idle; |
421 | |
422 | /* Remember this submission as user of the VMID */ |
423 | r = amdgpu_sync_fence(sync: &id->active, |
424 | f: &job->base.s_fence->finished); |
425 | if (r) |
426 | goto error; |
427 | |
428 | job->vm_needs_flush = true; |
429 | } |
430 | |
431 | list_move_tail(list: &id->list, head: &id_mgr->ids_lru); |
432 | } |
433 | |
434 | job->gds_switch_needed = amdgpu_vmid_gds_switch_needed(id, job); |
435 | if (job->vm_needs_flush) { |
436 | id->flushed_updates = amdgpu_vm_tlb_seq(vm); |
437 | dma_fence_put(fence: id->last_flush); |
438 | id->last_flush = NULL; |
439 | } |
440 | job->vmid = id - id_mgr->ids; |
441 | job->pasid = vm->pasid; |
442 | |
443 | id->gds_base = job->gds_base; |
444 | id->gds_size = job->gds_size; |
445 | id->gws_base = job->gws_base; |
446 | id->gws_size = job->gws_size; |
447 | id->oa_base = job->oa_base; |
448 | id->oa_size = job->oa_size; |
449 | id->pd_gpu_addr = job->vm_pd_addr; |
450 | id->owner = vm->immediate.fence_context; |
451 | |
452 | trace_amdgpu_vm_grab_id(vm, ring, job); |
453 | |
454 | error: |
455 | mutex_unlock(lock: &id_mgr->lock); |
456 | return r; |
457 | } |
458 | |
459 | int amdgpu_vmid_alloc_reserved(struct amdgpu_device *adev, |
460 | unsigned vmhub) |
461 | { |
462 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; |
463 | |
464 | mutex_lock(&id_mgr->lock); |
465 | |
466 | ++id_mgr->reserved_use_count; |
467 | if (!id_mgr->reserved) { |
468 | struct amdgpu_vmid *id; |
469 | |
470 | id = list_first_entry(&id_mgr->ids_lru, struct amdgpu_vmid, |
471 | list); |
472 | /* Remove from normal round robin handling */ |
473 | list_del_init(entry: &id->list); |
474 | id_mgr->reserved = id; |
475 | } |
476 | |
477 | mutex_unlock(lock: &id_mgr->lock); |
478 | return 0; |
479 | } |
480 | |
481 | void amdgpu_vmid_free_reserved(struct amdgpu_device *adev, |
482 | unsigned vmhub) |
483 | { |
484 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; |
485 | |
486 | mutex_lock(&id_mgr->lock); |
487 | if (!--id_mgr->reserved_use_count) { |
488 | /* give the reserved ID back to normal round robin */ |
489 | list_add(new: &id_mgr->reserved->list, head: &id_mgr->ids_lru); |
490 | id_mgr->reserved = NULL; |
491 | } |
492 | |
493 | mutex_unlock(lock: &id_mgr->lock); |
494 | } |
495 | |
496 | /** |
497 | * amdgpu_vmid_reset - reset VMID to zero |
498 | * |
499 | * @adev: amdgpu device structure |
500 | * @vmhub: vmhub type |
501 | * @vmid: vmid number to use |
502 | * |
503 | * Reset saved GDW, GWS and OA to force switch on next flush. |
504 | */ |
505 | void amdgpu_vmid_reset(struct amdgpu_device *adev, unsigned vmhub, |
506 | unsigned vmid) |
507 | { |
508 | struct amdgpu_vmid_mgr *id_mgr = &adev->vm_manager.id_mgr[vmhub]; |
509 | struct amdgpu_vmid *id = &id_mgr->ids[vmid]; |
510 | |
511 | mutex_lock(&id_mgr->lock); |
512 | id->owner = 0; |
513 | id->gds_base = 0; |
514 | id->gds_size = 0; |
515 | id->gws_base = 0; |
516 | id->gws_size = 0; |
517 | id->oa_base = 0; |
518 | id->oa_size = 0; |
519 | mutex_unlock(lock: &id_mgr->lock); |
520 | } |
521 | |
522 | /** |
523 | * amdgpu_vmid_reset_all - reset VMID to zero |
524 | * |
525 | * @adev: amdgpu device structure |
526 | * |
527 | * Reset VMID to force flush on next use |
528 | */ |
529 | void amdgpu_vmid_reset_all(struct amdgpu_device *adev) |
530 | { |
531 | unsigned i, j; |
532 | |
533 | for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { |
534 | struct amdgpu_vmid_mgr *id_mgr = |
535 | &adev->vm_manager.id_mgr[i]; |
536 | |
537 | for (j = 1; j < id_mgr->num_ids; ++j) |
538 | amdgpu_vmid_reset(adev, vmhub: i, vmid: j); |
539 | } |
540 | } |
541 | |
542 | /** |
543 | * amdgpu_vmid_mgr_init - init the VMID manager |
544 | * |
545 | * @adev: amdgpu_device pointer |
546 | * |
547 | * Initialize the VM manager structures |
548 | */ |
549 | void amdgpu_vmid_mgr_init(struct amdgpu_device *adev) |
550 | { |
551 | unsigned i, j; |
552 | |
553 | for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { |
554 | struct amdgpu_vmid_mgr *id_mgr = |
555 | &adev->vm_manager.id_mgr[i]; |
556 | |
557 | mutex_init(&id_mgr->lock); |
558 | INIT_LIST_HEAD(list: &id_mgr->ids_lru); |
559 | id_mgr->reserved_use_count = 0; |
560 | |
561 | /* manage only VMIDs not used by KFD */ |
562 | id_mgr->num_ids = adev->vm_manager.first_kfd_vmid; |
563 | |
564 | /* skip over VMID 0, since it is the system VM */ |
565 | for (j = 1; j < id_mgr->num_ids; ++j) { |
566 | amdgpu_vmid_reset(adev, vmhub: i, vmid: j); |
567 | amdgpu_sync_create(sync: &id_mgr->ids[j].active); |
568 | list_add_tail(new: &id_mgr->ids[j].list, head: &id_mgr->ids_lru); |
569 | } |
570 | } |
571 | /* alloc a default reserved vmid to enforce isolation */ |
572 | if (enforce_isolation) |
573 | amdgpu_vmid_alloc_reserved(adev, AMDGPU_GFXHUB(0)); |
574 | |
575 | } |
576 | |
577 | /** |
578 | * amdgpu_vmid_mgr_fini - cleanup VM manager |
579 | * |
580 | * @adev: amdgpu_device pointer |
581 | * |
582 | * Cleanup the VM manager and free resources. |
583 | */ |
584 | void amdgpu_vmid_mgr_fini(struct amdgpu_device *adev) |
585 | { |
586 | unsigned i, j; |
587 | |
588 | for (i = 0; i < AMDGPU_MAX_VMHUBS; ++i) { |
589 | struct amdgpu_vmid_mgr *id_mgr = |
590 | &adev->vm_manager.id_mgr[i]; |
591 | |
592 | mutex_destroy(lock: &id_mgr->lock); |
593 | for (j = 0; j < AMDGPU_NUM_VMID; ++j) { |
594 | struct amdgpu_vmid *id = &id_mgr->ids[j]; |
595 | |
596 | amdgpu_sync_free(sync: &id->active); |
597 | dma_fence_put(fence: id->last_flush); |
598 | dma_fence_put(fence: id->pasid_mapping); |
599 | } |
600 | } |
601 | } |
602 | |