1 | /* |
2 | * Copyright 2008 Jerome Glisse. |
3 | * All Rights Reserved. |
4 | * |
5 | * Permission is hereby granted, free of charge, to any person obtaining a |
6 | * copy of this software and associated documentation files (the "Software"), |
7 | * to deal in the Software without restriction, including without limitation |
8 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
9 | * and/or sell copies of the Software, and to permit persons to whom the |
10 | * Software is furnished to do so, subject to the following conditions: |
11 | * |
12 | * The above copyright notice and this permission notice (including the next |
13 | * paragraph) shall be included in all copies or substantial portions of the |
14 | * Software. |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER |
22 | * DEALINGS IN THE SOFTWARE. |
23 | * |
24 | * Authors: |
25 | * Jerome Glisse <glisse@freedesktop.org> |
26 | */ |
27 | |
28 | #include <linux/file.h> |
29 | #include <linux/pagemap.h> |
30 | #include <linux/sync_file.h> |
31 | #include <linux/dma-buf.h> |
32 | |
33 | #include <drm/amdgpu_drm.h> |
34 | #include <drm/drm_syncobj.h> |
35 | #include <drm/ttm/ttm_tt.h> |
36 | |
37 | #include "amdgpu_cs.h" |
38 | #include "amdgpu.h" |
39 | #include "amdgpu_trace.h" |
40 | #include "amdgpu_gmc.h" |
41 | #include "amdgpu_gem.h" |
42 | #include "amdgpu_ras.h" |
43 | |
44 | static int amdgpu_cs_parser_init(struct amdgpu_cs_parser *p, |
45 | struct amdgpu_device *adev, |
46 | struct drm_file *filp, |
47 | union drm_amdgpu_cs *cs) |
48 | { |
49 | struct amdgpu_fpriv *fpriv = filp->driver_priv; |
50 | |
51 | if (cs->in.num_chunks == 0) |
52 | return -EINVAL; |
53 | |
54 | memset(s: p, c: 0, n: sizeof(*p)); |
55 | p->adev = adev; |
56 | p->filp = filp; |
57 | |
58 | p->ctx = amdgpu_ctx_get(fpriv, id: cs->in.ctx_id); |
59 | if (!p->ctx) |
60 | return -EINVAL; |
61 | |
62 | if (atomic_read(v: &p->ctx->guilty)) { |
63 | amdgpu_ctx_put(ctx: p->ctx); |
64 | return -ECANCELED; |
65 | } |
66 | |
67 | amdgpu_sync_create(sync: &p->sync); |
68 | return 0; |
69 | } |
70 | |
71 | static int amdgpu_cs_job_idx(struct amdgpu_cs_parser *p, |
72 | struct drm_amdgpu_cs_chunk_ib *chunk_ib) |
73 | { |
74 | struct drm_sched_entity *entity; |
75 | unsigned int i; |
76 | int r; |
77 | |
78 | r = amdgpu_ctx_get_entity(ctx: p->ctx, hw_ip: chunk_ib->ip_type, |
79 | instance: chunk_ib->ip_instance, |
80 | ring: chunk_ib->ring, entity: &entity); |
81 | if (r) |
82 | return r; |
83 | |
84 | /* |
85 | * Abort if there is no run queue associated with this entity. |
86 | * Possibly because of disabled HW IP. |
87 | */ |
88 | if (entity->rq == NULL) |
89 | return -EINVAL; |
90 | |
91 | /* Check if we can add this IB to some existing job */ |
92 | for (i = 0; i < p->gang_size; ++i) |
93 | if (p->entities[i] == entity) |
94 | return i; |
95 | |
96 | /* If not increase the gang size if possible */ |
97 | if (i == AMDGPU_CS_GANG_SIZE) |
98 | return -EINVAL; |
99 | |
100 | p->entities[i] = entity; |
101 | p->gang_size = i + 1; |
102 | return i; |
103 | } |
104 | |
105 | static int amdgpu_cs_p1_ib(struct amdgpu_cs_parser *p, |
106 | struct drm_amdgpu_cs_chunk_ib *chunk_ib, |
107 | unsigned int *num_ibs) |
108 | { |
109 | int r; |
110 | |
111 | r = amdgpu_cs_job_idx(p, chunk_ib); |
112 | if (r < 0) |
113 | return r; |
114 | |
115 | ++(num_ibs[r]); |
116 | p->gang_leader_idx = r; |
117 | return 0; |
118 | } |
119 | |
120 | static int amdgpu_cs_p1_user_fence(struct amdgpu_cs_parser *p, |
121 | struct drm_amdgpu_cs_chunk_fence *data, |
122 | uint32_t *offset) |
123 | { |
124 | struct drm_gem_object *gobj; |
125 | struct amdgpu_bo *bo; |
126 | unsigned long size; |
127 | int r; |
128 | |
129 | gobj = drm_gem_object_lookup(filp: p->filp, handle: data->handle); |
130 | if (gobj == NULL) |
131 | return -EINVAL; |
132 | |
133 | bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); |
134 | p->uf_entry.priority = 0; |
135 | p->uf_entry.tv.bo = &bo->tbo; |
136 | /* One for TTM and two for the CS job */ |
137 | p->uf_entry.tv.num_shared = 3; |
138 | |
139 | drm_gem_object_put(obj: gobj); |
140 | |
141 | size = amdgpu_bo_size(bo); |
142 | if (size != PAGE_SIZE || (data->offset + 8) > size) { |
143 | r = -EINVAL; |
144 | goto error_unref; |
145 | } |
146 | |
147 | if (amdgpu_ttm_tt_get_usermm(ttm: bo->tbo.ttm)) { |
148 | r = -EINVAL; |
149 | goto error_unref; |
150 | } |
151 | |
152 | *offset = data->offset; |
153 | |
154 | return 0; |
155 | |
156 | error_unref: |
157 | amdgpu_bo_unref(bo: &bo); |
158 | return r; |
159 | } |
160 | |
161 | static int amdgpu_cs_p1_bo_handles(struct amdgpu_cs_parser *p, |
162 | struct drm_amdgpu_bo_list_in *data) |
163 | { |
164 | struct drm_amdgpu_bo_list_entry *info; |
165 | int r; |
166 | |
167 | r = amdgpu_bo_create_list_entry_array(in: data, info_param: &info); |
168 | if (r) |
169 | return r; |
170 | |
171 | r = amdgpu_bo_list_create(adev: p->adev, filp: p->filp, info, num_entries: data->bo_number, |
172 | list: &p->bo_list); |
173 | if (r) |
174 | goto error_free; |
175 | |
176 | kvfree(addr: info); |
177 | return 0; |
178 | |
179 | error_free: |
180 | kvfree(addr: info); |
181 | |
182 | return r; |
183 | } |
184 | |
185 | /* Copy the data from userspace and go over it the first time */ |
186 | static int amdgpu_cs_pass1(struct amdgpu_cs_parser *p, |
187 | union drm_amdgpu_cs *cs) |
188 | { |
189 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; |
190 | unsigned int num_ibs[AMDGPU_CS_GANG_SIZE] = { }; |
191 | struct amdgpu_vm *vm = &fpriv->vm; |
192 | uint64_t *chunk_array_user; |
193 | uint64_t *chunk_array; |
194 | uint32_t uf_offset = 0; |
195 | unsigned int size; |
196 | int ret; |
197 | int i; |
198 | |
199 | chunk_array = kvmalloc_array(n: cs->in.num_chunks, size: sizeof(uint64_t), |
200 | GFP_KERNEL); |
201 | if (!chunk_array) |
202 | return -ENOMEM; |
203 | |
204 | /* get chunks */ |
205 | chunk_array_user = u64_to_user_ptr(cs->in.chunks); |
206 | if (copy_from_user(to: chunk_array, from: chunk_array_user, |
207 | n: sizeof(uint64_t)*cs->in.num_chunks)) { |
208 | ret = -EFAULT; |
209 | goto free_chunk; |
210 | } |
211 | |
212 | p->nchunks = cs->in.num_chunks; |
213 | p->chunks = kvmalloc_array(n: p->nchunks, size: sizeof(struct amdgpu_cs_chunk), |
214 | GFP_KERNEL); |
215 | if (!p->chunks) { |
216 | ret = -ENOMEM; |
217 | goto free_chunk; |
218 | } |
219 | |
220 | for (i = 0; i < p->nchunks; i++) { |
221 | struct drm_amdgpu_cs_chunk __user **chunk_ptr = NULL; |
222 | struct drm_amdgpu_cs_chunk user_chunk; |
223 | uint32_t __user *cdata; |
224 | |
225 | chunk_ptr = u64_to_user_ptr(chunk_array[i]); |
226 | if (copy_from_user(to: &user_chunk, from: chunk_ptr, |
227 | n: sizeof(struct drm_amdgpu_cs_chunk))) { |
228 | ret = -EFAULT; |
229 | i--; |
230 | goto free_partial_kdata; |
231 | } |
232 | p->chunks[i].chunk_id = user_chunk.chunk_id; |
233 | p->chunks[i].length_dw = user_chunk.length_dw; |
234 | |
235 | size = p->chunks[i].length_dw; |
236 | cdata = u64_to_user_ptr(user_chunk.chunk_data); |
237 | |
238 | p->chunks[i].kdata = kvmalloc_array(n: size, size: sizeof(uint32_t), |
239 | GFP_KERNEL); |
240 | if (p->chunks[i].kdata == NULL) { |
241 | ret = -ENOMEM; |
242 | i--; |
243 | goto free_partial_kdata; |
244 | } |
245 | size *= sizeof(uint32_t); |
246 | if (copy_from_user(to: p->chunks[i].kdata, from: cdata, n: size)) { |
247 | ret = -EFAULT; |
248 | goto free_partial_kdata; |
249 | } |
250 | |
251 | /* Assume the worst on the following checks */ |
252 | ret = -EINVAL; |
253 | switch (p->chunks[i].chunk_id) { |
254 | case AMDGPU_CHUNK_ID_IB: |
255 | if (size < sizeof(struct drm_amdgpu_cs_chunk_ib)) |
256 | goto free_partial_kdata; |
257 | |
258 | ret = amdgpu_cs_p1_ib(p, chunk_ib: p->chunks[i].kdata, num_ibs); |
259 | if (ret) |
260 | goto free_partial_kdata; |
261 | break; |
262 | |
263 | case AMDGPU_CHUNK_ID_FENCE: |
264 | if (size < sizeof(struct drm_amdgpu_cs_chunk_fence)) |
265 | goto free_partial_kdata; |
266 | |
267 | ret = amdgpu_cs_p1_user_fence(p, data: p->chunks[i].kdata, |
268 | offset: &uf_offset); |
269 | if (ret) |
270 | goto free_partial_kdata; |
271 | break; |
272 | |
273 | case AMDGPU_CHUNK_ID_BO_HANDLES: |
274 | if (size < sizeof(struct drm_amdgpu_bo_list_in)) |
275 | goto free_partial_kdata; |
276 | |
277 | ret = amdgpu_cs_p1_bo_handles(p, data: p->chunks[i].kdata); |
278 | if (ret) |
279 | goto free_partial_kdata; |
280 | break; |
281 | |
282 | case AMDGPU_CHUNK_ID_DEPENDENCIES: |
283 | case AMDGPU_CHUNK_ID_SYNCOBJ_IN: |
284 | case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: |
285 | case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES: |
286 | case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT: |
287 | case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL: |
288 | break; |
289 | |
290 | default: |
291 | goto free_partial_kdata; |
292 | } |
293 | } |
294 | |
295 | if (!p->gang_size) { |
296 | ret = -EINVAL; |
297 | goto free_partial_kdata; |
298 | } |
299 | |
300 | for (i = 0; i < p->gang_size; ++i) { |
301 | ret = amdgpu_job_alloc(adev: p->adev, vm, entity: p->entities[i], owner: vm, |
302 | num_ibs: num_ibs[i], job: &p->jobs[i]); |
303 | if (ret) |
304 | goto free_all_kdata; |
305 | } |
306 | p->gang_leader = p->jobs[p->gang_leader_idx]; |
307 | |
308 | if (p->ctx->vram_lost_counter != p->gang_leader->vram_lost_counter) { |
309 | ret = -ECANCELED; |
310 | goto free_all_kdata; |
311 | } |
312 | |
313 | if (p->uf_entry.tv.bo) |
314 | p->gang_leader->uf_addr = uf_offset; |
315 | kvfree(addr: chunk_array); |
316 | |
317 | /* Use this opportunity to fill in task info for the vm */ |
318 | amdgpu_vm_set_task_info(vm); |
319 | |
320 | return 0; |
321 | |
322 | free_all_kdata: |
323 | i = p->nchunks - 1; |
324 | free_partial_kdata: |
325 | for (; i >= 0; i--) |
326 | kvfree(addr: p->chunks[i].kdata); |
327 | kvfree(addr: p->chunks); |
328 | p->chunks = NULL; |
329 | p->nchunks = 0; |
330 | free_chunk: |
331 | kvfree(addr: chunk_array); |
332 | |
333 | return ret; |
334 | } |
335 | |
336 | static int amdgpu_cs_p2_ib(struct amdgpu_cs_parser *p, |
337 | struct amdgpu_cs_chunk *chunk, |
338 | unsigned int *ce_preempt, |
339 | unsigned int *de_preempt) |
340 | { |
341 | struct drm_amdgpu_cs_chunk_ib *chunk_ib = chunk->kdata; |
342 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; |
343 | struct amdgpu_vm *vm = &fpriv->vm; |
344 | struct amdgpu_ring *ring; |
345 | struct amdgpu_job *job; |
346 | struct amdgpu_ib *ib; |
347 | int r; |
348 | |
349 | r = amdgpu_cs_job_idx(p, chunk_ib); |
350 | if (r < 0) |
351 | return r; |
352 | |
353 | job = p->jobs[r]; |
354 | ring = amdgpu_job_ring(job); |
355 | ib = &job->ibs[job->num_ibs++]; |
356 | |
357 | /* MM engine doesn't support user fences */ |
358 | if (p->uf_entry.tv.bo && ring->funcs->no_user_fence) |
359 | return -EINVAL; |
360 | |
361 | if (chunk_ib->ip_type == AMDGPU_HW_IP_GFX && |
362 | chunk_ib->flags & AMDGPU_IB_FLAG_PREEMPT) { |
363 | if (chunk_ib->flags & AMDGPU_IB_FLAG_CE) |
364 | (*ce_preempt)++; |
365 | else |
366 | (*de_preempt)++; |
367 | |
368 | /* Each GFX command submit allows only 1 IB max |
369 | * preemptible for CE & DE */ |
370 | if (*ce_preempt > 1 || *de_preempt > 1) |
371 | return -EINVAL; |
372 | } |
373 | |
374 | if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) |
375 | job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT; |
376 | |
377 | r = amdgpu_ib_get(adev: p->adev, vm, size: ring->funcs->parse_cs ? |
378 | chunk_ib->ib_bytes : 0, |
379 | pool: AMDGPU_IB_POOL_DELAYED, ib); |
380 | if (r) { |
381 | DRM_ERROR("Failed to get ib !\n" ); |
382 | return r; |
383 | } |
384 | |
385 | ib->gpu_addr = chunk_ib->va_start; |
386 | ib->length_dw = chunk_ib->ib_bytes / 4; |
387 | ib->flags = chunk_ib->flags; |
388 | return 0; |
389 | } |
390 | |
391 | static int amdgpu_cs_p2_dependencies(struct amdgpu_cs_parser *p, |
392 | struct amdgpu_cs_chunk *chunk) |
393 | { |
394 | struct drm_amdgpu_cs_chunk_dep *deps = chunk->kdata; |
395 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; |
396 | unsigned num_deps; |
397 | int i, r; |
398 | |
399 | num_deps = chunk->length_dw * 4 / |
400 | sizeof(struct drm_amdgpu_cs_chunk_dep); |
401 | |
402 | for (i = 0; i < num_deps; ++i) { |
403 | struct amdgpu_ctx *ctx; |
404 | struct drm_sched_entity *entity; |
405 | struct dma_fence *fence; |
406 | |
407 | ctx = amdgpu_ctx_get(fpriv, id: deps[i].ctx_id); |
408 | if (ctx == NULL) |
409 | return -EINVAL; |
410 | |
411 | r = amdgpu_ctx_get_entity(ctx, hw_ip: deps[i].ip_type, |
412 | instance: deps[i].ip_instance, |
413 | ring: deps[i].ring, entity: &entity); |
414 | if (r) { |
415 | amdgpu_ctx_put(ctx); |
416 | return r; |
417 | } |
418 | |
419 | fence = amdgpu_ctx_get_fence(ctx, entity, seq: deps[i].handle); |
420 | amdgpu_ctx_put(ctx); |
421 | |
422 | if (IS_ERR(ptr: fence)) |
423 | return PTR_ERR(ptr: fence); |
424 | else if (!fence) |
425 | continue; |
426 | |
427 | if (chunk->chunk_id == AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES) { |
428 | struct drm_sched_fence *s_fence; |
429 | struct dma_fence *old = fence; |
430 | |
431 | s_fence = to_drm_sched_fence(f: fence); |
432 | fence = dma_fence_get(fence: &s_fence->scheduled); |
433 | dma_fence_put(fence: old); |
434 | } |
435 | |
436 | r = amdgpu_sync_fence(sync: &p->sync, f: fence); |
437 | dma_fence_put(fence); |
438 | if (r) |
439 | return r; |
440 | } |
441 | return 0; |
442 | } |
443 | |
444 | static int amdgpu_syncobj_lookup_and_add(struct amdgpu_cs_parser *p, |
445 | uint32_t handle, u64 point, |
446 | u64 flags) |
447 | { |
448 | struct dma_fence *fence; |
449 | int r; |
450 | |
451 | r = drm_syncobj_find_fence(p->filp, handle, point, flags, &fence); |
452 | if (r) { |
453 | DRM_ERROR("syncobj %u failed to find fence @ %llu (%d)!\n" , |
454 | handle, point, r); |
455 | return r; |
456 | } |
457 | |
458 | r = amdgpu_sync_fence(sync: &p->sync, f: fence); |
459 | dma_fence_put(fence); |
460 | return r; |
461 | } |
462 | |
463 | static int amdgpu_cs_p2_syncobj_in(struct amdgpu_cs_parser *p, |
464 | struct amdgpu_cs_chunk *chunk) |
465 | { |
466 | struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata; |
467 | unsigned num_deps; |
468 | int i, r; |
469 | |
470 | num_deps = chunk->length_dw * 4 / |
471 | sizeof(struct drm_amdgpu_cs_chunk_sem); |
472 | for (i = 0; i < num_deps; ++i) { |
473 | r = amdgpu_syncobj_lookup_and_add(p, deps[i].handle, 0, 0); |
474 | if (r) |
475 | return r; |
476 | } |
477 | |
478 | return 0; |
479 | } |
480 | |
481 | static int amdgpu_cs_p2_syncobj_timeline_wait(struct amdgpu_cs_parser *p, |
482 | struct amdgpu_cs_chunk *chunk) |
483 | { |
484 | struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata; |
485 | unsigned num_deps; |
486 | int i, r; |
487 | |
488 | num_deps = chunk->length_dw * 4 / |
489 | sizeof(struct drm_amdgpu_cs_chunk_syncobj); |
490 | for (i = 0; i < num_deps; ++i) { |
491 | r = amdgpu_syncobj_lookup_and_add(p, syncobj_deps[i].handle, |
492 | syncobj_deps[i].point, |
493 | syncobj_deps[i].flags); |
494 | if (r) |
495 | return r; |
496 | } |
497 | |
498 | return 0; |
499 | } |
500 | |
501 | static int amdgpu_cs_p2_syncobj_out(struct amdgpu_cs_parser *p, |
502 | struct amdgpu_cs_chunk *chunk) |
503 | { |
504 | struct drm_amdgpu_cs_chunk_sem *deps = chunk->kdata; |
505 | unsigned num_deps; |
506 | int i; |
507 | |
508 | num_deps = chunk->length_dw * 4 / |
509 | sizeof(struct drm_amdgpu_cs_chunk_sem); |
510 | |
511 | if (p->post_deps) |
512 | return -EINVAL; |
513 | |
514 | p->post_deps = kmalloc_array(n: num_deps, size: sizeof(*p->post_deps), |
515 | GFP_KERNEL); |
516 | p->num_post_deps = 0; |
517 | |
518 | if (!p->post_deps) |
519 | return -ENOMEM; |
520 | |
521 | |
522 | for (i = 0; i < num_deps; ++i) { |
523 | p->post_deps[i].syncobj = |
524 | drm_syncobj_find(file_private: p->filp, handle: deps[i].handle); |
525 | if (!p->post_deps[i].syncobj) |
526 | return -EINVAL; |
527 | p->post_deps[i].chain = NULL; |
528 | p->post_deps[i].point = 0; |
529 | p->num_post_deps++; |
530 | } |
531 | |
532 | return 0; |
533 | } |
534 | |
535 | static int amdgpu_cs_p2_syncobj_timeline_signal(struct amdgpu_cs_parser *p, |
536 | struct amdgpu_cs_chunk *chunk) |
537 | { |
538 | struct drm_amdgpu_cs_chunk_syncobj *syncobj_deps = chunk->kdata; |
539 | unsigned num_deps; |
540 | int i; |
541 | |
542 | num_deps = chunk->length_dw * 4 / |
543 | sizeof(struct drm_amdgpu_cs_chunk_syncobj); |
544 | |
545 | if (p->post_deps) |
546 | return -EINVAL; |
547 | |
548 | p->post_deps = kmalloc_array(n: num_deps, size: sizeof(*p->post_deps), |
549 | GFP_KERNEL); |
550 | p->num_post_deps = 0; |
551 | |
552 | if (!p->post_deps) |
553 | return -ENOMEM; |
554 | |
555 | for (i = 0; i < num_deps; ++i) { |
556 | struct amdgpu_cs_post_dep *dep = &p->post_deps[i]; |
557 | |
558 | dep->chain = NULL; |
559 | if (syncobj_deps[i].point) { |
560 | dep->chain = dma_fence_chain_alloc(); |
561 | if (!dep->chain) |
562 | return -ENOMEM; |
563 | } |
564 | |
565 | dep->syncobj = drm_syncobj_find(file_private: p->filp, |
566 | handle: syncobj_deps[i].handle); |
567 | if (!dep->syncobj) { |
568 | dma_fence_chain_free(chain: dep->chain); |
569 | return -EINVAL; |
570 | } |
571 | dep->point = syncobj_deps[i].point; |
572 | p->num_post_deps++; |
573 | } |
574 | |
575 | return 0; |
576 | } |
577 | |
578 | static int amdgpu_cs_pass2(struct amdgpu_cs_parser *p) |
579 | { |
580 | unsigned int ce_preempt = 0, de_preempt = 0; |
581 | int i, r; |
582 | |
583 | for (i = 0; i < p->nchunks; ++i) { |
584 | struct amdgpu_cs_chunk *chunk; |
585 | |
586 | chunk = &p->chunks[i]; |
587 | |
588 | switch (chunk->chunk_id) { |
589 | case AMDGPU_CHUNK_ID_IB: |
590 | r = amdgpu_cs_p2_ib(p, chunk, ce_preempt: &ce_preempt, de_preempt: &de_preempt); |
591 | if (r) |
592 | return r; |
593 | break; |
594 | case AMDGPU_CHUNK_ID_DEPENDENCIES: |
595 | case AMDGPU_CHUNK_ID_SCHEDULED_DEPENDENCIES: |
596 | r = amdgpu_cs_p2_dependencies(p, chunk); |
597 | if (r) |
598 | return r; |
599 | break; |
600 | case AMDGPU_CHUNK_ID_SYNCOBJ_IN: |
601 | r = amdgpu_cs_p2_syncobj_in(p, chunk); |
602 | if (r) |
603 | return r; |
604 | break; |
605 | case AMDGPU_CHUNK_ID_SYNCOBJ_OUT: |
606 | r = amdgpu_cs_p2_syncobj_out(p, chunk); |
607 | if (r) |
608 | return r; |
609 | break; |
610 | case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_WAIT: |
611 | r = amdgpu_cs_p2_syncobj_timeline_wait(p, chunk); |
612 | if (r) |
613 | return r; |
614 | break; |
615 | case AMDGPU_CHUNK_ID_SYNCOBJ_TIMELINE_SIGNAL: |
616 | r = amdgpu_cs_p2_syncobj_timeline_signal(p, chunk); |
617 | if (r) |
618 | return r; |
619 | break; |
620 | } |
621 | } |
622 | |
623 | return 0; |
624 | } |
625 | |
626 | /* Convert microseconds to bytes. */ |
627 | static u64 us_to_bytes(struct amdgpu_device *adev, s64 us) |
628 | { |
629 | if (us <= 0 || !adev->mm_stats.log2_max_MBps) |
630 | return 0; |
631 | |
632 | /* Since accum_us is incremented by a million per second, just |
633 | * multiply it by the number of MB/s to get the number of bytes. |
634 | */ |
635 | return us << adev->mm_stats.log2_max_MBps; |
636 | } |
637 | |
638 | static s64 bytes_to_us(struct amdgpu_device *adev, u64 bytes) |
639 | { |
640 | if (!adev->mm_stats.log2_max_MBps) |
641 | return 0; |
642 | |
643 | return bytes >> adev->mm_stats.log2_max_MBps; |
644 | } |
645 | |
646 | /* Returns how many bytes TTM can move right now. If no bytes can be moved, |
647 | * it returns 0. If it returns non-zero, it's OK to move at least one buffer, |
648 | * which means it can go over the threshold once. If that happens, the driver |
649 | * will be in debt and no other buffer migrations can be done until that debt |
650 | * is repaid. |
651 | * |
652 | * This approach allows moving a buffer of any size (it's important to allow |
653 | * that). |
654 | * |
655 | * The currency is simply time in microseconds and it increases as the clock |
656 | * ticks. The accumulated microseconds (us) are converted to bytes and |
657 | * returned. |
658 | */ |
659 | static void amdgpu_cs_get_threshold_for_moves(struct amdgpu_device *adev, |
660 | u64 *max_bytes, |
661 | u64 *max_vis_bytes) |
662 | { |
663 | s64 time_us, increment_us; |
664 | u64 free_vram, total_vram, used_vram; |
665 | /* Allow a maximum of 200 accumulated ms. This is basically per-IB |
666 | * throttling. |
667 | * |
668 | * It means that in order to get full max MBps, at least 5 IBs per |
669 | * second must be submitted and not more than 200ms apart from each |
670 | * other. |
671 | */ |
672 | const s64 us_upper_bound = 200000; |
673 | |
674 | if (!adev->mm_stats.log2_max_MBps) { |
675 | *max_bytes = 0; |
676 | *max_vis_bytes = 0; |
677 | return; |
678 | } |
679 | |
680 | total_vram = adev->gmc.real_vram_size - atomic64_read(&adev->vram_pin_size); |
681 | used_vram = ttm_resource_manager_usage(&adev->mman.vram_mgr.manager); |
682 | free_vram = used_vram >= total_vram ? 0 : total_vram - used_vram; |
683 | |
684 | spin_lock(lock: &adev->mm_stats.lock); |
685 | |
686 | /* Increase the amount of accumulated us. */ |
687 | time_us = ktime_to_us(ktime_get()); |
688 | increment_us = time_us - adev->mm_stats.last_update_us; |
689 | adev->mm_stats.last_update_us = time_us; |
690 | adev->mm_stats.accum_us = min(adev->mm_stats.accum_us + increment_us, |
691 | us_upper_bound); |
692 | |
693 | /* This prevents the short period of low performance when the VRAM |
694 | * usage is low and the driver is in debt or doesn't have enough |
695 | * accumulated us to fill VRAM quickly. |
696 | * |
697 | * The situation can occur in these cases: |
698 | * - a lot of VRAM is freed by userspace |
699 | * - the presence of a big buffer causes a lot of evictions |
700 | * (solution: split buffers into smaller ones) |
701 | * |
702 | * If 128 MB or 1/8th of VRAM is free, start filling it now by setting |
703 | * accum_us to a positive number. |
704 | */ |
705 | if (free_vram >= 128 * 1024 * 1024 || free_vram >= total_vram / 8) { |
706 | s64 min_us; |
707 | |
708 | /* Be more aggressive on dGPUs. Try to fill a portion of free |
709 | * VRAM now. |
710 | */ |
711 | if (!(adev->flags & AMD_IS_APU)) |
712 | min_us = bytes_to_us(adev, free_vram / 4); |
713 | else |
714 | min_us = 0; /* Reset accum_us on APUs. */ |
715 | |
716 | adev->mm_stats.accum_us = max(min_us, adev->mm_stats.accum_us); |
717 | } |
718 | |
719 | /* This is set to 0 if the driver is in debt to disallow (optional) |
720 | * buffer moves. |
721 | */ |
722 | *max_bytes = us_to_bytes(adev, adev->mm_stats.accum_us); |
723 | |
724 | /* Do the same for visible VRAM if half of it is free */ |
725 | if (!amdgpu_gmc_vram_full_visible(gmc: &adev->gmc)) { |
726 | u64 total_vis_vram = adev->gmc.visible_vram_size; |
727 | u64 used_vis_vram = |
728 | amdgpu_vram_mgr_vis_usage(&adev->mman.vram_mgr); |
729 | |
730 | if (used_vis_vram < total_vis_vram) { |
731 | u64 free_vis_vram = total_vis_vram - used_vis_vram; |
732 | adev->mm_stats.accum_us_vis = min(adev->mm_stats.accum_us_vis + |
733 | increment_us, us_upper_bound); |
734 | |
735 | if (free_vis_vram >= total_vis_vram / 2) |
736 | adev->mm_stats.accum_us_vis = |
737 | max(bytes_to_us(adev, free_vis_vram / 2), |
738 | adev->mm_stats.accum_us_vis); |
739 | } |
740 | |
741 | *max_vis_bytes = us_to_bytes(adev, adev->mm_stats.accum_us_vis); |
742 | } else { |
743 | *max_vis_bytes = 0; |
744 | } |
745 | |
746 | spin_unlock(lock: &adev->mm_stats.lock); |
747 | } |
748 | |
749 | /* Report how many bytes have really been moved for the last command |
750 | * submission. This can result in a debt that can stop buffer migrations |
751 | * temporarily. |
752 | */ |
753 | void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes, |
754 | u64 num_vis_bytes) |
755 | { |
756 | spin_lock(lock: &adev->mm_stats.lock); |
757 | adev->mm_stats.accum_us -= bytes_to_us(adev, num_bytes); |
758 | adev->mm_stats.accum_us_vis -= bytes_to_us(adev, num_vis_bytes); |
759 | spin_unlock(lock: &adev->mm_stats.lock); |
760 | } |
761 | |
762 | static int amdgpu_cs_bo_validate(void *param, struct amdgpu_bo *bo) |
763 | { |
764 | struct amdgpu_device *adev = amdgpu_ttm_adev(bdev: bo->tbo.bdev); |
765 | struct amdgpu_cs_parser *p = param; |
766 | struct ttm_operation_ctx ctx = { |
767 | .interruptible = true, |
768 | .no_wait_gpu = false, |
769 | .resv = bo->tbo.base.resv |
770 | }; |
771 | uint32_t domain; |
772 | int r; |
773 | |
774 | if (bo->tbo.pin_count) |
775 | return 0; |
776 | |
777 | /* Don't move this buffer if we have depleted our allowance |
778 | * to move it. Don't move anything if the threshold is zero. |
779 | */ |
780 | if (p->bytes_moved < p->bytes_moved_threshold && |
781 | (!bo->tbo.base.dma_buf || |
782 | list_empty(head: &bo->tbo.base.dma_buf->attachments))) { |
783 | if (!amdgpu_gmc_vram_full_visible(gmc: &adev->gmc) && |
784 | (bo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED)) { |
785 | /* And don't move a CPU_ACCESS_REQUIRED BO to limited |
786 | * visible VRAM if we've depleted our allowance to do |
787 | * that. |
788 | */ |
789 | if (p->bytes_moved_vis < p->bytes_moved_vis_threshold) |
790 | domain = bo->preferred_domains; |
791 | else |
792 | domain = bo->allowed_domains; |
793 | } else { |
794 | domain = bo->preferred_domains; |
795 | } |
796 | } else { |
797 | domain = bo->allowed_domains; |
798 | } |
799 | |
800 | retry: |
801 | amdgpu_bo_placement_from_domain(abo: bo, domain); |
802 | r = ttm_bo_validate(bo: &bo->tbo, placement: &bo->placement, ctx: &ctx); |
803 | |
804 | p->bytes_moved += ctx.bytes_moved; |
805 | if (!amdgpu_gmc_vram_full_visible(gmc: &adev->gmc) && |
806 | amdgpu_bo_in_cpu_visible_vram(bo)) |
807 | p->bytes_moved_vis += ctx.bytes_moved; |
808 | |
809 | if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) { |
810 | domain = bo->allowed_domains; |
811 | goto retry; |
812 | } |
813 | |
814 | return r; |
815 | } |
816 | |
817 | static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p, |
818 | struct list_head *validated) |
819 | { |
820 | struct ttm_operation_ctx ctx = { true, false }; |
821 | struct amdgpu_bo_list_entry *lobj; |
822 | int r; |
823 | |
824 | list_for_each_entry(lobj, validated, tv.head) { |
825 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo: lobj->tv.bo); |
826 | struct mm_struct *usermm; |
827 | |
828 | usermm = amdgpu_ttm_tt_get_usermm(ttm: bo->tbo.ttm); |
829 | if (usermm && usermm != current->mm) |
830 | return -EPERM; |
831 | |
832 | if (amdgpu_ttm_tt_is_userptr(ttm: bo->tbo.ttm) && |
833 | lobj->user_invalidated && lobj->user_pages) { |
834 | amdgpu_bo_placement_from_domain(abo: bo, |
835 | AMDGPU_GEM_DOMAIN_CPU); |
836 | r = ttm_bo_validate(bo: &bo->tbo, placement: &bo->placement, ctx: &ctx); |
837 | if (r) |
838 | return r; |
839 | |
840 | amdgpu_ttm_tt_set_user_pages(ttm: bo->tbo.ttm, |
841 | pages: lobj->user_pages); |
842 | } |
843 | |
844 | r = amdgpu_cs_bo_validate(param: p, bo); |
845 | if (r) |
846 | return r; |
847 | |
848 | kvfree(addr: lobj->user_pages); |
849 | lobj->user_pages = NULL; |
850 | } |
851 | return 0; |
852 | } |
853 | |
854 | static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p, |
855 | union drm_amdgpu_cs *cs) |
856 | { |
857 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; |
858 | struct amdgpu_vm *vm = &fpriv->vm; |
859 | struct amdgpu_bo_list_entry *e; |
860 | struct list_head duplicates; |
861 | unsigned int i; |
862 | int r; |
863 | |
864 | INIT_LIST_HEAD(list: &p->validated); |
865 | |
866 | /* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */ |
867 | if (cs->in.bo_list_handle) { |
868 | if (p->bo_list) |
869 | return -EINVAL; |
870 | |
871 | r = amdgpu_bo_list_get(fpriv, id: cs->in.bo_list_handle, |
872 | result: &p->bo_list); |
873 | if (r) |
874 | return r; |
875 | } else if (!p->bo_list) { |
876 | /* Create a empty bo_list when no handle is provided */ |
877 | r = amdgpu_bo_list_create(adev: p->adev, filp: p->filp, NULL, num_entries: 0, |
878 | list: &p->bo_list); |
879 | if (r) |
880 | return r; |
881 | } |
882 | |
883 | mutex_lock(lock: &p->bo_list->bo_list_mutex); |
884 | |
885 | /* One for TTM and one for the CS job */ |
886 | amdgpu_bo_list_for_each_entry(e, p->bo_list) |
887 | e->tv.num_shared = 2; |
888 | |
889 | amdgpu_bo_list_get_list(list: p->bo_list, validated: &p->validated); |
890 | |
891 | INIT_LIST_HEAD(list: &duplicates); |
892 | amdgpu_vm_get_pd_bo(vm: &fpriv->vm, validated: &p->validated, entry: &p->vm_pd); |
893 | |
894 | if (p->uf_entry.tv.bo && !ttm_to_amdgpu_bo(tbo: p->uf_entry.tv.bo)->parent) |
895 | list_add(new: &p->uf_entry.tv.head, head: &p->validated); |
896 | |
897 | /* Get userptr backing pages. If pages are updated after registered |
898 | * in amdgpu_gem_userptr_ioctl(), amdgpu_cs_list_validate() will do |
899 | * amdgpu_ttm_backend_bind() to flush and invalidate new pages |
900 | */ |
901 | amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { |
902 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo: e->tv.bo); |
903 | bool userpage_invalidated = false; |
904 | int i; |
905 | |
906 | e->user_pages = kvmalloc_array(n: bo->tbo.ttm->num_pages, |
907 | size: sizeof(struct page *), |
908 | GFP_KERNEL | __GFP_ZERO); |
909 | if (!e->user_pages) { |
910 | DRM_ERROR("kvmalloc_array failure\n" ); |
911 | r = -ENOMEM; |
912 | goto out_free_user_pages; |
913 | } |
914 | |
915 | r = amdgpu_ttm_tt_get_user_pages(bo, pages: e->user_pages, range: &e->range); |
916 | if (r) { |
917 | kvfree(addr: e->user_pages); |
918 | e->user_pages = NULL; |
919 | goto out_free_user_pages; |
920 | } |
921 | |
922 | for (i = 0; i < bo->tbo.ttm->num_pages; i++) { |
923 | if (bo->tbo.ttm->pages[i] != e->user_pages[i]) { |
924 | userpage_invalidated = true; |
925 | break; |
926 | } |
927 | } |
928 | e->user_invalidated = userpage_invalidated; |
929 | } |
930 | |
931 | r = ttm_eu_reserve_buffers(ticket: &p->ticket, list: &p->validated, intr: true, |
932 | dups: &duplicates); |
933 | if (unlikely(r != 0)) { |
934 | if (r != -ERESTARTSYS) |
935 | DRM_ERROR("ttm_eu_reserve_buffers failed.\n" ); |
936 | goto out_free_user_pages; |
937 | } |
938 | |
939 | amdgpu_bo_list_for_each_entry(e, p->bo_list) { |
940 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo: e->tv.bo); |
941 | |
942 | e->bo_va = amdgpu_vm_bo_find(vm, bo); |
943 | } |
944 | |
945 | amdgpu_cs_get_threshold_for_moves(p->adev, &p->bytes_moved_threshold, |
946 | &p->bytes_moved_vis_threshold); |
947 | p->bytes_moved = 0; |
948 | p->bytes_moved_vis = 0; |
949 | |
950 | r = amdgpu_vm_validate_pt_bos(adev: p->adev, vm: &fpriv->vm, |
951 | callback: amdgpu_cs_bo_validate, param: p); |
952 | if (r) { |
953 | DRM_ERROR("amdgpu_vm_validate_pt_bos() failed.\n" ); |
954 | goto error_validate; |
955 | } |
956 | |
957 | r = amdgpu_cs_list_validate(p, validated: &duplicates); |
958 | if (r) |
959 | goto error_validate; |
960 | |
961 | r = amdgpu_cs_list_validate(p, validated: &p->validated); |
962 | if (r) |
963 | goto error_validate; |
964 | |
965 | if (p->uf_entry.tv.bo) { |
966 | struct amdgpu_bo *uf = ttm_to_amdgpu_bo(tbo: p->uf_entry.tv.bo); |
967 | |
968 | r = amdgpu_ttm_alloc_gart(bo: &uf->tbo); |
969 | if (r) |
970 | goto error_validate; |
971 | |
972 | p->gang_leader->uf_addr += amdgpu_bo_gpu_offset(uf); |
973 | } |
974 | |
975 | amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved, |
976 | p->bytes_moved_vis); |
977 | |
978 | for (i = 0; i < p->gang_size; ++i) |
979 | amdgpu_job_set_resources(job: p->jobs[i], gds: p->bo_list->gds_obj, |
980 | gws: p->bo_list->gws_obj, |
981 | oa: p->bo_list->oa_obj); |
982 | return 0; |
983 | |
984 | error_validate: |
985 | ttm_eu_backoff_reservation(ticket: &p->ticket, list: &p->validated); |
986 | |
987 | out_free_user_pages: |
988 | amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { |
989 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo: e->tv.bo); |
990 | |
991 | if (!e->user_pages) |
992 | continue; |
993 | amdgpu_ttm_tt_get_user_pages_done(ttm: bo->tbo.ttm, range: e->range); |
994 | kvfree(addr: e->user_pages); |
995 | e->user_pages = NULL; |
996 | e->range = NULL; |
997 | } |
998 | mutex_unlock(lock: &p->bo_list->bo_list_mutex); |
999 | return r; |
1000 | } |
1001 | |
1002 | static void trace_amdgpu_cs_ibs(struct amdgpu_cs_parser *p) |
1003 | { |
1004 | int i, j; |
1005 | |
1006 | if (!trace_amdgpu_cs_enabled()) |
1007 | return; |
1008 | |
1009 | for (i = 0; i < p->gang_size; ++i) { |
1010 | struct amdgpu_job *job = p->jobs[i]; |
1011 | |
1012 | for (j = 0; j < job->num_ibs; ++j) |
1013 | trace_amdgpu_cs(p, job, ib: &job->ibs[j]); |
1014 | } |
1015 | } |
1016 | |
1017 | static int amdgpu_cs_patch_ibs(struct amdgpu_cs_parser *p, |
1018 | struct amdgpu_job *job) |
1019 | { |
1020 | struct amdgpu_ring *ring = amdgpu_job_ring(job); |
1021 | unsigned int i; |
1022 | int r; |
1023 | |
1024 | /* Only for UVD/VCE VM emulation */ |
1025 | if (!ring->funcs->parse_cs && !ring->funcs->patch_cs_in_place) |
1026 | return 0; |
1027 | |
1028 | for (i = 0; i < job->num_ibs; ++i) { |
1029 | struct amdgpu_ib *ib = &job->ibs[i]; |
1030 | struct amdgpu_bo_va_mapping *m; |
1031 | struct amdgpu_bo *aobj; |
1032 | uint64_t va_start; |
1033 | uint8_t *kptr; |
1034 | |
1035 | va_start = ib->gpu_addr & AMDGPU_GMC_HOLE_MASK; |
1036 | r = amdgpu_cs_find_mapping(parser: p, addr: va_start, bo: &aobj, mapping: &m); |
1037 | if (r) { |
1038 | DRM_ERROR("IB va_start is invalid\n" ); |
1039 | return r; |
1040 | } |
1041 | |
1042 | if ((va_start + ib->length_dw * 4) > |
1043 | (m->last + 1) * AMDGPU_GPU_PAGE_SIZE) { |
1044 | DRM_ERROR("IB va_start+ib_bytes is invalid\n" ); |
1045 | return -EINVAL; |
1046 | } |
1047 | |
1048 | /* the IB should be reserved at this point */ |
1049 | r = amdgpu_bo_kmap(bo: aobj, ptr: (void **)&kptr); |
1050 | if (r) { |
1051 | return r; |
1052 | } |
1053 | |
1054 | kptr += va_start - (m->start * AMDGPU_GPU_PAGE_SIZE); |
1055 | |
1056 | if (ring->funcs->parse_cs) { |
1057 | memcpy(to: ib->ptr, from: kptr, len: ib->length_dw * 4); |
1058 | amdgpu_bo_kunmap(bo: aobj); |
1059 | |
1060 | r = amdgpu_ring_parse_cs(ring, p, job, ib); |
1061 | if (r) |
1062 | return r; |
1063 | } else { |
1064 | ib->ptr = (uint32_t *)kptr; |
1065 | r = amdgpu_ring_patch_cs_in_place(ring, p, job, ib); |
1066 | amdgpu_bo_kunmap(bo: aobj); |
1067 | if (r) |
1068 | return r; |
1069 | } |
1070 | } |
1071 | |
1072 | return 0; |
1073 | } |
1074 | |
1075 | static int amdgpu_cs_patch_jobs(struct amdgpu_cs_parser *p) |
1076 | { |
1077 | unsigned int i; |
1078 | int r; |
1079 | |
1080 | for (i = 0; i < p->gang_size; ++i) { |
1081 | r = amdgpu_cs_patch_ibs(p, job: p->jobs[i]); |
1082 | if (r) |
1083 | return r; |
1084 | } |
1085 | return 0; |
1086 | } |
1087 | |
1088 | static int amdgpu_cs_vm_handling(struct amdgpu_cs_parser *p) |
1089 | { |
1090 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; |
1091 | struct amdgpu_job *job = p->gang_leader; |
1092 | struct amdgpu_device *adev = p->adev; |
1093 | struct amdgpu_vm *vm = &fpriv->vm; |
1094 | struct amdgpu_bo_list_entry *e; |
1095 | struct amdgpu_bo_va *bo_va; |
1096 | struct amdgpu_bo *bo; |
1097 | unsigned int i; |
1098 | int r; |
1099 | |
1100 | r = amdgpu_vm_clear_freed(adev, vm, NULL); |
1101 | if (r) |
1102 | return r; |
1103 | |
1104 | r = amdgpu_vm_bo_update(adev, bo_va: fpriv->prt_va, clear: false); |
1105 | if (r) |
1106 | return r; |
1107 | |
1108 | r = amdgpu_sync_fence(sync: &p->sync, f: fpriv->prt_va->last_pt_update); |
1109 | if (r) |
1110 | return r; |
1111 | |
1112 | if (fpriv->csa_va) { |
1113 | bo_va = fpriv->csa_va; |
1114 | BUG_ON(!bo_va); |
1115 | r = amdgpu_vm_bo_update(adev, bo_va, clear: false); |
1116 | if (r) |
1117 | return r; |
1118 | |
1119 | r = amdgpu_sync_fence(sync: &p->sync, f: bo_va->last_pt_update); |
1120 | if (r) |
1121 | return r; |
1122 | } |
1123 | |
1124 | amdgpu_bo_list_for_each_entry(e, p->bo_list) { |
1125 | /* ignore duplicates */ |
1126 | bo = ttm_to_amdgpu_bo(tbo: e->tv.bo); |
1127 | if (!bo) |
1128 | continue; |
1129 | |
1130 | bo_va = e->bo_va; |
1131 | if (bo_va == NULL) |
1132 | continue; |
1133 | |
1134 | r = amdgpu_vm_bo_update(adev, bo_va, clear: false); |
1135 | if (r) |
1136 | return r; |
1137 | |
1138 | r = amdgpu_sync_fence(sync: &p->sync, f: bo_va->last_pt_update); |
1139 | if (r) |
1140 | return r; |
1141 | } |
1142 | |
1143 | r = amdgpu_vm_handle_moved(adev, vm); |
1144 | if (r) |
1145 | return r; |
1146 | |
1147 | r = amdgpu_vm_update_pdes(adev, vm, immediate: false); |
1148 | if (r) |
1149 | return r; |
1150 | |
1151 | r = amdgpu_sync_fence(sync: &p->sync, f: vm->last_update); |
1152 | if (r) |
1153 | return r; |
1154 | |
1155 | for (i = 0; i < p->gang_size; ++i) { |
1156 | job = p->jobs[i]; |
1157 | |
1158 | if (!job->vm) |
1159 | continue; |
1160 | |
1161 | job->vm_pd_addr = amdgpu_gmc_pd_addr(bo: vm->root.bo); |
1162 | } |
1163 | |
1164 | if (amdgpu_vm_debug) { |
1165 | /* Invalidate all BOs to test for userspace bugs */ |
1166 | amdgpu_bo_list_for_each_entry(e, p->bo_list) { |
1167 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo: e->tv.bo); |
1168 | |
1169 | /* ignore duplicates */ |
1170 | if (!bo) |
1171 | continue; |
1172 | |
1173 | amdgpu_vm_bo_invalidate(adev, bo, evicted: false); |
1174 | } |
1175 | } |
1176 | |
1177 | return 0; |
1178 | } |
1179 | |
1180 | static int amdgpu_cs_sync_rings(struct amdgpu_cs_parser *p) |
1181 | { |
1182 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; |
1183 | struct drm_gpu_scheduler *sched; |
1184 | struct amdgpu_bo_list_entry *e; |
1185 | struct dma_fence *fence; |
1186 | unsigned int i; |
1187 | int r; |
1188 | |
1189 | r = amdgpu_ctx_wait_prev_fence(ctx: p->ctx, entity: p->entities[p->gang_leader_idx]); |
1190 | if (r) { |
1191 | if (r != -ERESTARTSYS) |
1192 | DRM_ERROR("amdgpu_ctx_wait_prev_fence failed.\n" ); |
1193 | return r; |
1194 | } |
1195 | |
1196 | list_for_each_entry(e, &p->validated, tv.head) { |
1197 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo: e->tv.bo); |
1198 | struct dma_resv *resv = bo->tbo.base.resv; |
1199 | enum amdgpu_sync_mode sync_mode; |
1200 | |
1201 | sync_mode = amdgpu_bo_explicit_sync(bo) ? |
1202 | AMDGPU_SYNC_EXPLICIT : AMDGPU_SYNC_NE_OWNER; |
1203 | r = amdgpu_sync_resv(adev: p->adev, sync: &p->sync, resv, mode: sync_mode, |
1204 | owner: &fpriv->vm); |
1205 | if (r) |
1206 | return r; |
1207 | } |
1208 | |
1209 | for (i = 0; i < p->gang_size; ++i) { |
1210 | r = amdgpu_sync_push_to_job(sync: &p->sync, job: p->jobs[i]); |
1211 | if (r) |
1212 | return r; |
1213 | } |
1214 | |
1215 | sched = p->gang_leader->base.entity->rq->sched; |
1216 | while ((fence = amdgpu_sync_get_fence(sync: &p->sync))) { |
1217 | struct drm_sched_fence *s_fence = to_drm_sched_fence(f: fence); |
1218 | |
1219 | /* |
1220 | * When we have an dependency it might be necessary to insert a |
1221 | * pipeline sync to make sure that all caches etc are flushed and the |
1222 | * next job actually sees the results from the previous one |
1223 | * before we start executing on the same scheduler ring. |
1224 | */ |
1225 | if (!s_fence || s_fence->sched != sched) { |
1226 | dma_fence_put(fence); |
1227 | continue; |
1228 | } |
1229 | |
1230 | r = amdgpu_sync_fence(sync: &p->gang_leader->explicit_sync, f: fence); |
1231 | dma_fence_put(fence); |
1232 | if (r) |
1233 | return r; |
1234 | } |
1235 | return 0; |
1236 | } |
1237 | |
1238 | static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p) |
1239 | { |
1240 | int i; |
1241 | |
1242 | for (i = 0; i < p->num_post_deps; ++i) { |
1243 | if (p->post_deps[i].chain && p->post_deps[i].point) { |
1244 | drm_syncobj_add_point(syncobj: p->post_deps[i].syncobj, |
1245 | chain: p->post_deps[i].chain, |
1246 | fence: p->fence, point: p->post_deps[i].point); |
1247 | p->post_deps[i].chain = NULL; |
1248 | } else { |
1249 | drm_syncobj_replace_fence(syncobj: p->post_deps[i].syncobj, |
1250 | fence: p->fence); |
1251 | } |
1252 | } |
1253 | } |
1254 | |
1255 | static int amdgpu_cs_submit(struct amdgpu_cs_parser *p, |
1256 | union drm_amdgpu_cs *cs) |
1257 | { |
1258 | struct amdgpu_fpriv *fpriv = p->filp->driver_priv; |
1259 | struct amdgpu_job *leader = p->gang_leader; |
1260 | struct amdgpu_bo_list_entry *e; |
1261 | unsigned int i; |
1262 | uint64_t seq; |
1263 | int r; |
1264 | |
1265 | for (i = 0; i < p->gang_size; ++i) |
1266 | drm_sched_job_arm(job: &p->jobs[i]->base); |
1267 | |
1268 | for (i = 0; i < p->gang_size; ++i) { |
1269 | struct dma_fence *fence; |
1270 | |
1271 | if (p->jobs[i] == leader) |
1272 | continue; |
1273 | |
1274 | fence = &p->jobs[i]->base.s_fence->scheduled; |
1275 | dma_fence_get(fence); |
1276 | r = drm_sched_job_add_dependency(job: &leader->base, fence); |
1277 | if (r) { |
1278 | dma_fence_put(fence); |
1279 | goto error_cleanup; |
1280 | } |
1281 | } |
1282 | |
1283 | if (p->gang_size > 1) { |
1284 | for (i = 0; i < p->gang_size; ++i) |
1285 | amdgpu_job_set_gang_leader(job: p->jobs[i], leader); |
1286 | } |
1287 | |
1288 | /* No memory allocation is allowed while holding the notifier lock. |
1289 | * The lock is held until amdgpu_cs_submit is finished and fence is |
1290 | * added to BOs. |
1291 | */ |
1292 | mutex_lock(lock: &p->adev->notifier_lock); |
1293 | |
1294 | /* If userptr are invalidated after amdgpu_cs_parser_bos(), return |
1295 | * -EAGAIN, drmIoctl in libdrm will restart the amdgpu_cs_ioctl. |
1296 | */ |
1297 | r = 0; |
1298 | amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) { |
1299 | struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo: e->tv.bo); |
1300 | |
1301 | r |= !amdgpu_ttm_tt_get_user_pages_done(ttm: bo->tbo.ttm, range: e->range); |
1302 | e->range = NULL; |
1303 | } |
1304 | if (r) { |
1305 | r = -EAGAIN; |
1306 | goto error_unlock; |
1307 | } |
1308 | |
1309 | p->fence = dma_fence_get(fence: &leader->base.s_fence->finished); |
1310 | list_for_each_entry(e, &p->validated, tv.head) { |
1311 | |
1312 | /* Everybody except for the gang leader uses READ */ |
1313 | for (i = 0; i < p->gang_size; ++i) { |
1314 | if (p->jobs[i] == leader) |
1315 | continue; |
1316 | |
1317 | dma_resv_add_fence(obj: e->tv.bo->base.resv, |
1318 | fence: &p->jobs[i]->base.s_fence->finished, |
1319 | usage: DMA_RESV_USAGE_READ); |
1320 | } |
1321 | |
1322 | /* The gang leader is remembered as writer */ |
1323 | e->tv.num_shared = 0; |
1324 | } |
1325 | |
1326 | seq = amdgpu_ctx_add_fence(ctx: p->ctx, entity: p->entities[p->gang_leader_idx], |
1327 | fence: p->fence); |
1328 | amdgpu_cs_post_dependencies(p); |
1329 | |
1330 | if ((leader->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) && |
1331 | !p->ctx->preamble_presented) { |
1332 | leader->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST; |
1333 | p->ctx->preamble_presented = true; |
1334 | } |
1335 | |
1336 | cs->out.handle = seq; |
1337 | leader->uf_sequence = seq; |
1338 | |
1339 | amdgpu_vm_bo_trace_cs(vm: &fpriv->vm, ticket: &p->ticket); |
1340 | for (i = 0; i < p->gang_size; ++i) { |
1341 | amdgpu_job_free_resources(job: p->jobs[i]); |
1342 | trace_amdgpu_cs_ioctl(job: p->jobs[i]); |
1343 | drm_sched_entity_push_job(sched_job: &p->jobs[i]->base); |
1344 | p->jobs[i] = NULL; |
1345 | } |
1346 | |
1347 | amdgpu_vm_move_to_lru_tail(adev: p->adev, vm: &fpriv->vm); |
1348 | ttm_eu_fence_buffer_objects(ticket: &p->ticket, list: &p->validated, fence: p->fence); |
1349 | |
1350 | mutex_unlock(lock: &p->adev->notifier_lock); |
1351 | mutex_unlock(lock: &p->bo_list->bo_list_mutex); |
1352 | return 0; |
1353 | |
1354 | error_unlock: |
1355 | mutex_unlock(lock: &p->adev->notifier_lock); |
1356 | |
1357 | error_cleanup: |
1358 | for (i = 0; i < p->gang_size; ++i) |
1359 | drm_sched_job_cleanup(job: &p->jobs[i]->base); |
1360 | return r; |
1361 | } |
1362 | |
1363 | /* Cleanup the parser structure */ |
1364 | static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser) |
1365 | { |
1366 | unsigned i; |
1367 | |
1368 | amdgpu_sync_free(sync: &parser->sync); |
1369 | for (i = 0; i < parser->num_post_deps; i++) { |
1370 | drm_syncobj_put(obj: parser->post_deps[i].syncobj); |
1371 | kfree(objp: parser->post_deps[i].chain); |
1372 | } |
1373 | kfree(objp: parser->post_deps); |
1374 | |
1375 | dma_fence_put(fence: parser->fence); |
1376 | |
1377 | if (parser->ctx) |
1378 | amdgpu_ctx_put(ctx: parser->ctx); |
1379 | if (parser->bo_list) |
1380 | amdgpu_bo_list_put(list: parser->bo_list); |
1381 | |
1382 | for (i = 0; i < parser->nchunks; i++) |
1383 | kvfree(addr: parser->chunks[i].kdata); |
1384 | kvfree(addr: parser->chunks); |
1385 | for (i = 0; i < parser->gang_size; ++i) { |
1386 | if (parser->jobs[i]) |
1387 | amdgpu_job_free(job: parser->jobs[i]); |
1388 | } |
1389 | if (parser->uf_entry.tv.bo) { |
1390 | struct amdgpu_bo *uf = ttm_to_amdgpu_bo(tbo: parser->uf_entry.tv.bo); |
1391 | |
1392 | amdgpu_bo_unref(bo: &uf); |
1393 | } |
1394 | } |
1395 | |
1396 | int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp) |
1397 | { |
1398 | struct amdgpu_device *adev = drm_to_adev(ddev: dev); |
1399 | struct amdgpu_cs_parser parser; |
1400 | int r; |
1401 | |
1402 | if (amdgpu_ras_intr_triggered()) |
1403 | return -EHWPOISON; |
1404 | |
1405 | if (!adev->accel_working) |
1406 | return -EBUSY; |
1407 | |
1408 | r = amdgpu_cs_parser_init(p: &parser, adev, filp, cs: data); |
1409 | if (r) { |
1410 | if (printk_ratelimit()) |
1411 | DRM_ERROR("Failed to initialize parser %d!\n" , r); |
1412 | return r; |
1413 | } |
1414 | |
1415 | r = amdgpu_cs_pass1(p: &parser, cs: data); |
1416 | if (r) |
1417 | goto error_fini; |
1418 | |
1419 | r = amdgpu_cs_pass2(p: &parser); |
1420 | if (r) |
1421 | goto error_fini; |
1422 | |
1423 | r = amdgpu_cs_parser_bos(p: &parser, cs: data); |
1424 | if (r) { |
1425 | if (r == -ENOMEM) |
1426 | DRM_ERROR("Not enough memory for command submission!\n" ); |
1427 | else if (r != -ERESTARTSYS && r != -EAGAIN) |
1428 | DRM_ERROR("Failed to process the buffer list %d!\n" , r); |
1429 | goto error_fini; |
1430 | } |
1431 | |
1432 | r = amdgpu_cs_patch_jobs(p: &parser); |
1433 | if (r) |
1434 | goto error_backoff; |
1435 | |
1436 | r = amdgpu_cs_vm_handling(p: &parser); |
1437 | if (r) |
1438 | goto error_backoff; |
1439 | |
1440 | r = amdgpu_cs_sync_rings(p: &parser); |
1441 | if (r) |
1442 | goto error_backoff; |
1443 | |
1444 | trace_amdgpu_cs_ibs(p: &parser); |
1445 | |
1446 | r = amdgpu_cs_submit(p: &parser, cs: data); |
1447 | if (r) |
1448 | goto error_backoff; |
1449 | |
1450 | amdgpu_cs_parser_fini(parser: &parser); |
1451 | return 0; |
1452 | |
1453 | error_backoff: |
1454 | ttm_eu_backoff_reservation(ticket: &parser.ticket, list: &parser.validated); |
1455 | mutex_unlock(lock: &parser.bo_list->bo_list_mutex); |
1456 | |
1457 | error_fini: |
1458 | amdgpu_cs_parser_fini(parser: &parser); |
1459 | return r; |
1460 | } |
1461 | |
1462 | /** |
1463 | * amdgpu_cs_wait_ioctl - wait for a command submission to finish |
1464 | * |
1465 | * @dev: drm device |
1466 | * @data: data from userspace |
1467 | * @filp: file private |
1468 | * |
1469 | * Wait for the command submission identified by handle to finish. |
1470 | */ |
1471 | int amdgpu_cs_wait_ioctl(struct drm_device *dev, void *data, |
1472 | struct drm_file *filp) |
1473 | { |
1474 | union drm_amdgpu_wait_cs *wait = data; |
1475 | unsigned long timeout = amdgpu_gem_timeout(timeout_ns: wait->in.timeout); |
1476 | struct drm_sched_entity *entity; |
1477 | struct amdgpu_ctx *ctx; |
1478 | struct dma_fence *fence; |
1479 | long r; |
1480 | |
1481 | ctx = amdgpu_ctx_get(fpriv: filp->driver_priv, id: wait->in.ctx_id); |
1482 | if (ctx == NULL) |
1483 | return -EINVAL; |
1484 | |
1485 | r = amdgpu_ctx_get_entity(ctx, hw_ip: wait->in.ip_type, instance: wait->in.ip_instance, |
1486 | ring: wait->in.ring, entity: &entity); |
1487 | if (r) { |
1488 | amdgpu_ctx_put(ctx); |
1489 | return r; |
1490 | } |
1491 | |
1492 | fence = amdgpu_ctx_get_fence(ctx, entity, seq: wait->in.handle); |
1493 | if (IS_ERR(ptr: fence)) |
1494 | r = PTR_ERR(ptr: fence); |
1495 | else if (fence) { |
1496 | r = dma_fence_wait_timeout(fence, intr: true, timeout); |
1497 | if (r > 0 && fence->error) |
1498 | r = fence->error; |
1499 | dma_fence_put(fence); |
1500 | } else |
1501 | r = 1; |
1502 | |
1503 | amdgpu_ctx_put(ctx); |
1504 | if (r < 0) |
1505 | return r; |
1506 | |
1507 | memset(s: wait, c: 0, n: sizeof(*wait)); |
1508 | wait->out.status = (r == 0); |
1509 | |
1510 | return 0; |
1511 | } |
1512 | |
1513 | /** |
1514 | * amdgpu_cs_get_fence - helper to get fence from drm_amdgpu_fence |
1515 | * |
1516 | * @adev: amdgpu device |
1517 | * @filp: file private |
1518 | * @user: drm_amdgpu_fence copied from user space |
1519 | */ |
1520 | static struct dma_fence *amdgpu_cs_get_fence(struct amdgpu_device *adev, |
1521 | struct drm_file *filp, |
1522 | struct drm_amdgpu_fence *user) |
1523 | { |
1524 | struct drm_sched_entity *entity; |
1525 | struct amdgpu_ctx *ctx; |
1526 | struct dma_fence *fence; |
1527 | int r; |
1528 | |
1529 | ctx = amdgpu_ctx_get(fpriv: filp->driver_priv, id: user->ctx_id); |
1530 | if (ctx == NULL) |
1531 | return ERR_PTR(-EINVAL); |
1532 | |
1533 | r = amdgpu_ctx_get_entity(ctx, hw_ip: user->ip_type, instance: user->ip_instance, |
1534 | ring: user->ring, entity: &entity); |
1535 | if (r) { |
1536 | amdgpu_ctx_put(ctx); |
1537 | return ERR_PTR(error: r); |
1538 | } |
1539 | |
1540 | fence = amdgpu_ctx_get_fence(ctx, entity, seq: user->seq_no); |
1541 | amdgpu_ctx_put(ctx); |
1542 | |
1543 | return fence; |
1544 | } |
1545 | |
1546 | int amdgpu_cs_fence_to_handle_ioctl(struct drm_device *dev, void *data, |
1547 | struct drm_file *filp) |
1548 | { |
1549 | struct amdgpu_device *adev = drm_to_adev(ddev: dev); |
1550 | union drm_amdgpu_fence_to_handle *info = data; |
1551 | struct dma_fence *fence; |
1552 | struct drm_syncobj *syncobj; |
1553 | struct sync_file *sync_file; |
1554 | int fd, r; |
1555 | |
1556 | fence = amdgpu_cs_get_fence(adev, filp, user: &info->in.fence); |
1557 | if (IS_ERR(ptr: fence)) |
1558 | return PTR_ERR(ptr: fence); |
1559 | |
1560 | if (!fence) |
1561 | fence = dma_fence_get_stub(); |
1562 | |
1563 | switch (info->in.what) { |
1564 | case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ: |
1565 | r = drm_syncobj_create(out_syncobj: &syncobj, flags: 0, fence); |
1566 | dma_fence_put(fence); |
1567 | if (r) |
1568 | return r; |
1569 | r = drm_syncobj_get_handle(file_private: filp, syncobj, handle: &info->out.handle); |
1570 | drm_syncobj_put(obj: syncobj); |
1571 | return r; |
1572 | |
1573 | case AMDGPU_FENCE_TO_HANDLE_GET_SYNCOBJ_FD: |
1574 | r = drm_syncobj_create(out_syncobj: &syncobj, flags: 0, fence); |
1575 | dma_fence_put(fence); |
1576 | if (r) |
1577 | return r; |
1578 | r = drm_syncobj_get_fd(syncobj, p_fd: (int *)&info->out.handle); |
1579 | drm_syncobj_put(obj: syncobj); |
1580 | return r; |
1581 | |
1582 | case AMDGPU_FENCE_TO_HANDLE_GET_SYNC_FILE_FD: |
1583 | fd = get_unused_fd_flags(O_CLOEXEC); |
1584 | if (fd < 0) { |
1585 | dma_fence_put(fence); |
1586 | return fd; |
1587 | } |
1588 | |
1589 | sync_file = sync_file_create(fence); |
1590 | dma_fence_put(fence); |
1591 | if (!sync_file) { |
1592 | put_unused_fd(fd); |
1593 | return -ENOMEM; |
1594 | } |
1595 | |
1596 | fd_install(fd, file: sync_file->file); |
1597 | info->out.handle = fd; |
1598 | return 0; |
1599 | |
1600 | default: |
1601 | dma_fence_put(fence); |
1602 | return -EINVAL; |
1603 | } |
1604 | } |
1605 | |
1606 | /** |
1607 | * amdgpu_cs_wait_all_fences - wait on all fences to signal |
1608 | * |
1609 | * @adev: amdgpu device |
1610 | * @filp: file private |
1611 | * @wait: wait parameters |
1612 | * @fences: array of drm_amdgpu_fence |
1613 | */ |
1614 | static int amdgpu_cs_wait_all_fences(struct amdgpu_device *adev, |
1615 | struct drm_file *filp, |
1616 | union drm_amdgpu_wait_fences *wait, |
1617 | struct drm_amdgpu_fence *fences) |
1618 | { |
1619 | uint32_t fence_count = wait->in.fence_count; |
1620 | unsigned int i; |
1621 | long r = 1; |
1622 | |
1623 | for (i = 0; i < fence_count; i++) { |
1624 | struct dma_fence *fence; |
1625 | unsigned long timeout = amdgpu_gem_timeout(timeout_ns: wait->in.timeout_ns); |
1626 | |
1627 | fence = amdgpu_cs_get_fence(adev, filp, user: &fences[i]); |
1628 | if (IS_ERR(ptr: fence)) |
1629 | return PTR_ERR(ptr: fence); |
1630 | else if (!fence) |
1631 | continue; |
1632 | |
1633 | r = dma_fence_wait_timeout(fence, intr: true, timeout); |
1634 | dma_fence_put(fence); |
1635 | if (r < 0) |
1636 | return r; |
1637 | |
1638 | if (r == 0) |
1639 | break; |
1640 | |
1641 | if (fence->error) |
1642 | return fence->error; |
1643 | } |
1644 | |
1645 | memset(s: wait, c: 0, n: sizeof(*wait)); |
1646 | wait->out.status = (r > 0); |
1647 | |
1648 | return 0; |
1649 | } |
1650 | |
1651 | /** |
1652 | * amdgpu_cs_wait_any_fence - wait on any fence to signal |
1653 | * |
1654 | * @adev: amdgpu device |
1655 | * @filp: file private |
1656 | * @wait: wait parameters |
1657 | * @fences: array of drm_amdgpu_fence |
1658 | */ |
1659 | static int amdgpu_cs_wait_any_fence(struct amdgpu_device *adev, |
1660 | struct drm_file *filp, |
1661 | union drm_amdgpu_wait_fences *wait, |
1662 | struct drm_amdgpu_fence *fences) |
1663 | { |
1664 | unsigned long timeout = amdgpu_gem_timeout(timeout_ns: wait->in.timeout_ns); |
1665 | uint32_t fence_count = wait->in.fence_count; |
1666 | uint32_t first = ~0; |
1667 | struct dma_fence **array; |
1668 | unsigned int i; |
1669 | long r; |
1670 | |
1671 | /* Prepare the fence array */ |
1672 | array = kcalloc(n: fence_count, size: sizeof(struct dma_fence *), GFP_KERNEL); |
1673 | |
1674 | if (array == NULL) |
1675 | return -ENOMEM; |
1676 | |
1677 | for (i = 0; i < fence_count; i++) { |
1678 | struct dma_fence *fence; |
1679 | |
1680 | fence = amdgpu_cs_get_fence(adev, filp, user: &fences[i]); |
1681 | if (IS_ERR(ptr: fence)) { |
1682 | r = PTR_ERR(ptr: fence); |
1683 | goto err_free_fence_array; |
1684 | } else if (fence) { |
1685 | array[i] = fence; |
1686 | } else { /* NULL, the fence has been already signaled */ |
1687 | r = 1; |
1688 | first = i; |
1689 | goto out; |
1690 | } |
1691 | } |
1692 | |
1693 | r = dma_fence_wait_any_timeout(fences: array, count: fence_count, intr: true, timeout, |
1694 | idx: &first); |
1695 | if (r < 0) |
1696 | goto err_free_fence_array; |
1697 | |
1698 | out: |
1699 | memset(s: wait, c: 0, n: sizeof(*wait)); |
1700 | wait->out.status = (r > 0); |
1701 | wait->out.first_signaled = first; |
1702 | |
1703 | if (first < fence_count && array[first]) |
1704 | r = array[first]->error; |
1705 | else |
1706 | r = 0; |
1707 | |
1708 | err_free_fence_array: |
1709 | for (i = 0; i < fence_count; i++) |
1710 | dma_fence_put(fence: array[i]); |
1711 | kfree(objp: array); |
1712 | |
1713 | return r; |
1714 | } |
1715 | |
1716 | /** |
1717 | * amdgpu_cs_wait_fences_ioctl - wait for multiple command submissions to finish |
1718 | * |
1719 | * @dev: drm device |
1720 | * @data: data from userspace |
1721 | * @filp: file private |
1722 | */ |
1723 | int amdgpu_cs_wait_fences_ioctl(struct drm_device *dev, void *data, |
1724 | struct drm_file *filp) |
1725 | { |
1726 | struct amdgpu_device *adev = drm_to_adev(ddev: dev); |
1727 | union drm_amdgpu_wait_fences *wait = data; |
1728 | uint32_t fence_count = wait->in.fence_count; |
1729 | struct drm_amdgpu_fence *fences_user; |
1730 | struct drm_amdgpu_fence *fences; |
1731 | int r; |
1732 | |
1733 | /* Get the fences from userspace */ |
1734 | fences = kmalloc_array(n: fence_count, size: sizeof(struct drm_amdgpu_fence), |
1735 | GFP_KERNEL); |
1736 | if (fences == NULL) |
1737 | return -ENOMEM; |
1738 | |
1739 | fences_user = u64_to_user_ptr(wait->in.fences); |
1740 | if (copy_from_user(to: fences, from: fences_user, |
1741 | n: sizeof(struct drm_amdgpu_fence) * fence_count)) { |
1742 | r = -EFAULT; |
1743 | goto err_free_fences; |
1744 | } |
1745 | |
1746 | if (wait->in.wait_all) |
1747 | r = amdgpu_cs_wait_all_fences(adev, filp, wait, fences); |
1748 | else |
1749 | r = amdgpu_cs_wait_any_fence(adev, filp, wait, fences); |
1750 | |
1751 | err_free_fences: |
1752 | kfree(objp: fences); |
1753 | |
1754 | return r; |
1755 | } |
1756 | |
1757 | /** |
1758 | * amdgpu_cs_find_mapping - find bo_va for VM address |
1759 | * |
1760 | * @parser: command submission parser context |
1761 | * @addr: VM address |
1762 | * @bo: resulting BO of the mapping found |
1763 | * @map: Placeholder to return found BO mapping |
1764 | * |
1765 | * Search the buffer objects in the command submission context for a certain |
1766 | * virtual memory address. Returns allocation structure when found, NULL |
1767 | * otherwise. |
1768 | */ |
1769 | int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser, |
1770 | uint64_t addr, struct amdgpu_bo **bo, |
1771 | struct amdgpu_bo_va_mapping **map) |
1772 | { |
1773 | struct amdgpu_fpriv *fpriv = parser->filp->driver_priv; |
1774 | struct ttm_operation_ctx ctx = { false, false }; |
1775 | struct amdgpu_vm *vm = &fpriv->vm; |
1776 | struct amdgpu_bo_va_mapping *mapping; |
1777 | int r; |
1778 | |
1779 | addr /= AMDGPU_GPU_PAGE_SIZE; |
1780 | |
1781 | mapping = amdgpu_vm_bo_lookup_mapping(vm, addr); |
1782 | if (!mapping || !mapping->bo_va || !mapping->bo_va->base.bo) |
1783 | return -EINVAL; |
1784 | |
1785 | *bo = mapping->bo_va->base.bo; |
1786 | *map = mapping; |
1787 | |
1788 | /* Double check that the BO is reserved by this CS */ |
1789 | if (dma_resv_locking_ctx((*bo)->tbo.base.resv) != &parser->ticket) |
1790 | return -EINVAL; |
1791 | |
1792 | if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) { |
1793 | (*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS; |
1794 | amdgpu_bo_placement_from_domain(abo: *bo, domain: (*bo)->allowed_domains); |
1795 | r = ttm_bo_validate(bo: &(*bo)->tbo, placement: &(*bo)->placement, ctx: &ctx); |
1796 | if (r) |
1797 | return r; |
1798 | } |
1799 | |
1800 | return amdgpu_ttm_alloc_gart(bo: &(*bo)->tbo); |
1801 | } |
1802 | |