1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2020-2023 Intel Corporation |
4 | */ |
5 | |
6 | #include <drm/drm_file.h> |
7 | |
8 | #include <linux/bitfield.h> |
9 | #include <linux/highmem.h> |
10 | #include <linux/pci.h> |
11 | #include <linux/module.h> |
12 | #include <uapi/drm/ivpu_accel.h> |
13 | |
14 | #include "ivpu_drv.h" |
15 | #include "ivpu_hw.h" |
16 | #include "ivpu_ipc.h" |
17 | #include "ivpu_job.h" |
18 | #include "ivpu_jsm_msg.h" |
19 | #include "ivpu_pm.h" |
20 | |
21 | #define CMD_BUF_IDX 0 |
22 | #define JOB_ID_JOB_MASK GENMASK(7, 0) |
23 | #define JOB_ID_CONTEXT_MASK GENMASK(31, 8) |
24 | #define JOB_MAX_BUFFER_COUNT 65535 |
25 | |
26 | static void ivpu_cmdq_ring_db(struct ivpu_device *vdev, struct ivpu_cmdq *cmdq) |
27 | { |
28 | ivpu_hw_reg_db_set(vdev, db_id: cmdq->db_id); |
29 | } |
30 | |
31 | static struct ivpu_cmdq *ivpu_cmdq_alloc(struct ivpu_file_priv *file_priv, u16 engine) |
32 | { |
33 | struct xa_limit db_xa_limit = {.max = IVPU_MAX_DB, .min = IVPU_MIN_DB}; |
34 | struct ivpu_device *vdev = file_priv->vdev; |
35 | struct vpu_job_queue_header *; |
36 | struct ivpu_cmdq *cmdq; |
37 | int ret; |
38 | |
39 | cmdq = kzalloc(size: sizeof(*cmdq), GFP_KERNEL); |
40 | if (!cmdq) |
41 | return NULL; |
42 | |
43 | ret = xa_alloc(xa: &vdev->db_xa, id: &cmdq->db_id, NULL, limit: db_xa_limit, GFP_KERNEL); |
44 | if (ret) { |
45 | ivpu_err(vdev, "Failed to allocate doorbell id: %d\n" , ret); |
46 | goto err_free_cmdq; |
47 | } |
48 | |
49 | cmdq->mem = ivpu_bo_create_global(vdev, SZ_4K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE); |
50 | if (!cmdq->mem) |
51 | goto err_erase_xa; |
52 | |
53 | cmdq->entry_count = (u32)((ivpu_bo_size(bo: cmdq->mem) - sizeof(struct vpu_job_queue_header)) / |
54 | sizeof(struct vpu_job_queue_entry)); |
55 | |
56 | cmdq->jobq = (struct vpu_job_queue *)ivpu_bo_vaddr(bo: cmdq->mem); |
57 | jobq_header = &cmdq->jobq->header; |
58 | jobq_header->engine_idx = engine; |
59 | jobq_header->head = 0; |
60 | jobq_header->tail = 0; |
61 | wmb(); /* Flush WC buffer for jobq->header */ |
62 | |
63 | return cmdq; |
64 | |
65 | err_erase_xa: |
66 | xa_erase(&vdev->db_xa, index: cmdq->db_id); |
67 | err_free_cmdq: |
68 | kfree(objp: cmdq); |
69 | return NULL; |
70 | } |
71 | |
72 | static void ivpu_cmdq_free(struct ivpu_file_priv *file_priv, struct ivpu_cmdq *cmdq) |
73 | { |
74 | if (!cmdq) |
75 | return; |
76 | |
77 | ivpu_bo_free(bo: cmdq->mem); |
78 | xa_erase(&file_priv->vdev->db_xa, index: cmdq->db_id); |
79 | kfree(objp: cmdq); |
80 | } |
81 | |
82 | static struct ivpu_cmdq *ivpu_cmdq_acquire(struct ivpu_file_priv *file_priv, u16 engine) |
83 | { |
84 | struct ivpu_device *vdev = file_priv->vdev; |
85 | struct ivpu_cmdq *cmdq = file_priv->cmdq[engine]; |
86 | int ret; |
87 | |
88 | lockdep_assert_held(&file_priv->lock); |
89 | |
90 | if (!cmdq) { |
91 | cmdq = ivpu_cmdq_alloc(file_priv, engine); |
92 | if (!cmdq) |
93 | return NULL; |
94 | file_priv->cmdq[engine] = cmdq; |
95 | } |
96 | |
97 | if (cmdq->db_registered) |
98 | return cmdq; |
99 | |
100 | ret = ivpu_jsm_register_db(vdev, ctx_id: file_priv->ctx.id, db_id: cmdq->db_id, |
101 | jobq_base: cmdq->mem->vpu_addr, jobq_size: ivpu_bo_size(bo: cmdq->mem)); |
102 | if (ret) |
103 | return NULL; |
104 | |
105 | cmdq->db_registered = true; |
106 | |
107 | return cmdq; |
108 | } |
109 | |
110 | static void ivpu_cmdq_release_locked(struct ivpu_file_priv *file_priv, u16 engine) |
111 | { |
112 | struct ivpu_cmdq *cmdq = file_priv->cmdq[engine]; |
113 | |
114 | lockdep_assert_held(&file_priv->lock); |
115 | |
116 | if (cmdq) { |
117 | file_priv->cmdq[engine] = NULL; |
118 | if (cmdq->db_registered) |
119 | ivpu_jsm_unregister_db(vdev: file_priv->vdev, db_id: cmdq->db_id); |
120 | |
121 | ivpu_cmdq_free(file_priv, cmdq); |
122 | } |
123 | } |
124 | |
125 | void ivpu_cmdq_release_all_locked(struct ivpu_file_priv *file_priv) |
126 | { |
127 | int i; |
128 | |
129 | lockdep_assert_held(&file_priv->lock); |
130 | |
131 | for (i = 0; i < IVPU_NUM_ENGINES; i++) |
132 | ivpu_cmdq_release_locked(file_priv, engine: i); |
133 | } |
134 | |
135 | /* |
136 | * Mark the doorbell as unregistered and reset job queue pointers. |
137 | * This function needs to be called when the VPU hardware is restarted |
138 | * and FW loses job queue state. The next time job queue is used it |
139 | * will be registered again. |
140 | */ |
141 | static void ivpu_cmdq_reset_locked(struct ivpu_file_priv *file_priv, u16 engine) |
142 | { |
143 | struct ivpu_cmdq *cmdq = file_priv->cmdq[engine]; |
144 | |
145 | lockdep_assert_held(&file_priv->lock); |
146 | |
147 | if (cmdq) { |
148 | cmdq->db_registered = false; |
149 | cmdq->jobq->header.head = 0; |
150 | cmdq->jobq->header.tail = 0; |
151 | wmb(); /* Flush WC buffer for jobq header */ |
152 | } |
153 | } |
154 | |
155 | static void ivpu_cmdq_reset_all(struct ivpu_file_priv *file_priv) |
156 | { |
157 | int i; |
158 | |
159 | mutex_lock(&file_priv->lock); |
160 | |
161 | for (i = 0; i < IVPU_NUM_ENGINES; i++) |
162 | ivpu_cmdq_reset_locked(file_priv, engine: i); |
163 | |
164 | mutex_unlock(lock: &file_priv->lock); |
165 | } |
166 | |
167 | void ivpu_cmdq_reset_all_contexts(struct ivpu_device *vdev) |
168 | { |
169 | struct ivpu_file_priv *file_priv; |
170 | unsigned long ctx_id; |
171 | |
172 | mutex_lock(&vdev->context_list_lock); |
173 | |
174 | xa_for_each(&vdev->context_xa, ctx_id, file_priv) |
175 | ivpu_cmdq_reset_all(file_priv); |
176 | |
177 | mutex_unlock(lock: &vdev->context_list_lock); |
178 | |
179 | } |
180 | |
181 | static int ivpu_cmdq_push_job(struct ivpu_cmdq *cmdq, struct ivpu_job *job) |
182 | { |
183 | struct ivpu_device *vdev = job->vdev; |
184 | struct vpu_job_queue_header * = &cmdq->jobq->header; |
185 | struct vpu_job_queue_entry *entry; |
186 | u32 tail = READ_ONCE(header->tail); |
187 | u32 next_entry = (tail + 1) % cmdq->entry_count; |
188 | |
189 | /* Check if there is space left in job queue */ |
190 | if (next_entry == header->head) { |
191 | ivpu_dbg(vdev, JOB, "Job queue full: ctx %d engine %d db %d head %d tail %d\n" , |
192 | job->file_priv->ctx.id, job->engine_idx, cmdq->db_id, header->head, tail); |
193 | return -EBUSY; |
194 | } |
195 | |
196 | entry = &cmdq->jobq->job[tail]; |
197 | entry->batch_buf_addr = job->cmd_buf_vpu_addr; |
198 | entry->job_id = job->job_id; |
199 | entry->flags = 0; |
200 | if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_SUBMISSION)) |
201 | entry->flags = VPU_JOB_FLAGS_NULL_SUBMISSION_MASK; |
202 | wmb(); /* Ensure that tail is updated after filling entry */ |
203 | header->tail = next_entry; |
204 | wmb(); /* Flush WC buffer for jobq header */ |
205 | |
206 | return 0; |
207 | } |
208 | |
209 | struct ivpu_fence { |
210 | struct dma_fence base; |
211 | spinlock_t lock; /* protects base */ |
212 | struct ivpu_device *vdev; |
213 | }; |
214 | |
215 | static inline struct ivpu_fence *to_vpu_fence(struct dma_fence *fence) |
216 | { |
217 | return container_of(fence, struct ivpu_fence, base); |
218 | } |
219 | |
220 | static const char *ivpu_fence_get_driver_name(struct dma_fence *fence) |
221 | { |
222 | return DRIVER_NAME; |
223 | } |
224 | |
225 | static const char *ivpu_fence_get_timeline_name(struct dma_fence *fence) |
226 | { |
227 | struct ivpu_fence *ivpu_fence = to_vpu_fence(fence); |
228 | |
229 | return dev_name(dev: ivpu_fence->vdev->drm.dev); |
230 | } |
231 | |
232 | static const struct dma_fence_ops ivpu_fence_ops = { |
233 | .get_driver_name = ivpu_fence_get_driver_name, |
234 | .get_timeline_name = ivpu_fence_get_timeline_name, |
235 | }; |
236 | |
237 | static struct dma_fence *ivpu_fence_create(struct ivpu_device *vdev) |
238 | { |
239 | struct ivpu_fence *fence; |
240 | |
241 | fence = kzalloc(size: sizeof(*fence), GFP_KERNEL); |
242 | if (!fence) |
243 | return NULL; |
244 | |
245 | fence->vdev = vdev; |
246 | spin_lock_init(&fence->lock); |
247 | dma_fence_init(fence: &fence->base, ops: &ivpu_fence_ops, lock: &fence->lock, context: dma_fence_context_alloc(num: 1), seqno: 1); |
248 | |
249 | return &fence->base; |
250 | } |
251 | |
252 | static void ivpu_job_destroy(struct ivpu_job *job) |
253 | { |
254 | struct ivpu_device *vdev = job->vdev; |
255 | u32 i; |
256 | |
257 | ivpu_dbg(vdev, JOB, "Job destroyed: id %3u ctx %2d engine %d" , |
258 | job->job_id, job->file_priv->ctx.id, job->engine_idx); |
259 | |
260 | for (i = 0; i < job->bo_count; i++) |
261 | if (job->bos[i]) |
262 | drm_gem_object_put(obj: &job->bos[i]->base.base); |
263 | |
264 | dma_fence_put(fence: job->done_fence); |
265 | ivpu_file_priv_put(link: &job->file_priv); |
266 | kfree(objp: job); |
267 | } |
268 | |
269 | static struct ivpu_job * |
270 | ivpu_job_create(struct ivpu_file_priv *file_priv, u32 engine_idx, u32 bo_count) |
271 | { |
272 | struct ivpu_device *vdev = file_priv->vdev; |
273 | struct ivpu_job *job; |
274 | |
275 | job = kzalloc(struct_size(job, bos, bo_count), GFP_KERNEL); |
276 | if (!job) |
277 | return NULL; |
278 | |
279 | job->vdev = vdev; |
280 | job->engine_idx = engine_idx; |
281 | job->bo_count = bo_count; |
282 | job->done_fence = ivpu_fence_create(vdev); |
283 | if (!job->done_fence) { |
284 | ivpu_warn_ratelimited(vdev, "Failed to create a fence\n" ); |
285 | goto err_free_job; |
286 | } |
287 | |
288 | job->file_priv = ivpu_file_priv_get(file_priv); |
289 | |
290 | ivpu_dbg(vdev, JOB, "Job created: ctx %2d engine %d" , file_priv->ctx.id, job->engine_idx); |
291 | return job; |
292 | |
293 | err_free_job: |
294 | kfree(objp: job); |
295 | return NULL; |
296 | } |
297 | |
298 | static int ivpu_job_signal_and_destroy(struct ivpu_device *vdev, u32 job_id, u32 job_status) |
299 | { |
300 | struct ivpu_job *job; |
301 | |
302 | job = xa_erase(&vdev->submitted_jobs_xa, index: job_id); |
303 | if (!job) |
304 | return -ENOENT; |
305 | |
306 | if (job->file_priv->has_mmu_faults) |
307 | job_status = DRM_IVPU_JOB_STATUS_ABORTED; |
308 | |
309 | job->bos[CMD_BUF_IDX]->job_status = job_status; |
310 | dma_fence_signal(fence: job->done_fence); |
311 | |
312 | ivpu_dbg(vdev, JOB, "Job complete: id %3u ctx %2d engine %d status 0x%x\n" , |
313 | job->job_id, job->file_priv->ctx.id, job->engine_idx, job_status); |
314 | |
315 | ivpu_job_destroy(job); |
316 | ivpu_stop_job_timeout_detection(vdev); |
317 | |
318 | ivpu_rpm_put(vdev); |
319 | return 0; |
320 | } |
321 | |
322 | void ivpu_jobs_abort_all(struct ivpu_device *vdev) |
323 | { |
324 | struct ivpu_job *job; |
325 | unsigned long id; |
326 | |
327 | xa_for_each(&vdev->submitted_jobs_xa, id, job) |
328 | ivpu_job_signal_and_destroy(vdev, job_id: id, DRM_IVPU_JOB_STATUS_ABORTED); |
329 | } |
330 | |
331 | static int ivpu_job_submit(struct ivpu_job *job) |
332 | { |
333 | struct ivpu_file_priv *file_priv = job->file_priv; |
334 | struct ivpu_device *vdev = job->vdev; |
335 | struct xa_limit job_id_range; |
336 | struct ivpu_cmdq *cmdq; |
337 | int ret; |
338 | |
339 | ret = ivpu_rpm_get(vdev); |
340 | if (ret < 0) |
341 | return ret; |
342 | |
343 | mutex_lock(&file_priv->lock); |
344 | |
345 | cmdq = ivpu_cmdq_acquire(file_priv: job->file_priv, engine: job->engine_idx); |
346 | if (!cmdq) { |
347 | ivpu_warn_ratelimited(vdev, "Failed get job queue, ctx %d engine %d\n" , |
348 | file_priv->ctx.id, job->engine_idx); |
349 | ret = -EINVAL; |
350 | goto err_unlock_file_priv; |
351 | } |
352 | |
353 | job_id_range.min = FIELD_PREP(JOB_ID_CONTEXT_MASK, (file_priv->ctx.id - 1)); |
354 | job_id_range.max = job_id_range.min | JOB_ID_JOB_MASK; |
355 | |
356 | xa_lock(&vdev->submitted_jobs_xa); |
357 | ret = __xa_alloc(&vdev->submitted_jobs_xa, id: &job->job_id, entry: job, job_id_range, GFP_KERNEL); |
358 | if (ret) { |
359 | ivpu_dbg(vdev, JOB, "Too many active jobs in ctx %d\n" , |
360 | file_priv->ctx.id); |
361 | ret = -EBUSY; |
362 | goto err_unlock_submitted_jobs_xa; |
363 | } |
364 | |
365 | ret = ivpu_cmdq_push_job(cmdq, job); |
366 | if (ret) |
367 | goto err_erase_xa; |
368 | |
369 | ivpu_start_job_timeout_detection(vdev); |
370 | |
371 | if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW)) { |
372 | cmdq->jobq->header.head = cmdq->jobq->header.tail; |
373 | wmb(); /* Flush WC buffer for jobq header */ |
374 | } else { |
375 | ivpu_cmdq_ring_db(vdev, cmdq); |
376 | } |
377 | |
378 | ivpu_dbg(vdev, JOB, "Job submitted: id %3u ctx %2d engine %d addr 0x%llx next %d\n" , |
379 | job->job_id, file_priv->ctx.id, job->engine_idx, |
380 | job->cmd_buf_vpu_addr, cmdq->jobq->header.tail); |
381 | |
382 | xa_unlock(&vdev->submitted_jobs_xa); |
383 | |
384 | mutex_unlock(lock: &file_priv->lock); |
385 | |
386 | if (unlikely(ivpu_test_mode & IVPU_TEST_MODE_NULL_HW)) |
387 | ivpu_job_signal_and_destroy(vdev, job_id: job->job_id, VPU_JSM_STATUS_SUCCESS); |
388 | |
389 | return 0; |
390 | |
391 | err_erase_xa: |
392 | __xa_erase(&vdev->submitted_jobs_xa, index: job->job_id); |
393 | err_unlock_submitted_jobs_xa: |
394 | xa_unlock(&vdev->submitted_jobs_xa); |
395 | err_unlock_file_priv: |
396 | mutex_unlock(lock: &file_priv->lock); |
397 | ivpu_rpm_put(vdev); |
398 | return ret; |
399 | } |
400 | |
401 | static int |
402 | ivpu_job_prepare_bos_for_submit(struct drm_file *file, struct ivpu_job *job, u32 *buf_handles, |
403 | u32 buf_count, u32 commands_offset) |
404 | { |
405 | struct ivpu_file_priv *file_priv = file->driver_priv; |
406 | struct ivpu_device *vdev = file_priv->vdev; |
407 | struct ww_acquire_ctx acquire_ctx; |
408 | enum dma_resv_usage usage; |
409 | struct ivpu_bo *bo; |
410 | int ret; |
411 | u32 i; |
412 | |
413 | for (i = 0; i < buf_count; i++) { |
414 | struct drm_gem_object *obj = drm_gem_object_lookup(filp: file, handle: buf_handles[i]); |
415 | |
416 | if (!obj) |
417 | return -ENOENT; |
418 | |
419 | job->bos[i] = to_ivpu_bo(obj); |
420 | |
421 | ret = ivpu_bo_pin(bo: job->bos[i]); |
422 | if (ret) |
423 | return ret; |
424 | } |
425 | |
426 | bo = job->bos[CMD_BUF_IDX]; |
427 | if (!dma_resv_test_signaled(obj: bo->base.base.resv, usage: DMA_RESV_USAGE_READ)) { |
428 | ivpu_warn(vdev, "Buffer is already in use\n" ); |
429 | return -EBUSY; |
430 | } |
431 | |
432 | if (commands_offset >= ivpu_bo_size(bo)) { |
433 | ivpu_warn(vdev, "Invalid command buffer offset %u\n" , commands_offset); |
434 | return -EINVAL; |
435 | } |
436 | |
437 | job->cmd_buf_vpu_addr = bo->vpu_addr + commands_offset; |
438 | |
439 | ret = drm_gem_lock_reservations(objs: (struct drm_gem_object **)job->bos, count: buf_count, |
440 | acquire_ctx: &acquire_ctx); |
441 | if (ret) { |
442 | ivpu_warn(vdev, "Failed to lock reservations: %d\n" , ret); |
443 | return ret; |
444 | } |
445 | |
446 | for (i = 0; i < buf_count; i++) { |
447 | ret = dma_resv_reserve_fences(obj: job->bos[i]->base.base.resv, num_fences: 1); |
448 | if (ret) { |
449 | ivpu_warn(vdev, "Failed to reserve fences: %d\n" , ret); |
450 | goto unlock_reservations; |
451 | } |
452 | } |
453 | |
454 | for (i = 0; i < buf_count; i++) { |
455 | usage = (i == CMD_BUF_IDX) ? DMA_RESV_USAGE_WRITE : DMA_RESV_USAGE_BOOKKEEP; |
456 | dma_resv_add_fence(obj: job->bos[i]->base.base.resv, fence: job->done_fence, usage); |
457 | } |
458 | |
459 | unlock_reservations: |
460 | drm_gem_unlock_reservations(objs: (struct drm_gem_object **)job->bos, count: buf_count, acquire_ctx: &acquire_ctx); |
461 | |
462 | wmb(); /* Flush write combining buffers */ |
463 | |
464 | return ret; |
465 | } |
466 | |
467 | int ivpu_submit_ioctl(struct drm_device *dev, void *data, struct drm_file *file) |
468 | { |
469 | struct ivpu_file_priv *file_priv = file->driver_priv; |
470 | struct ivpu_device *vdev = file_priv->vdev; |
471 | struct drm_ivpu_submit *params = data; |
472 | struct ivpu_job *job; |
473 | u32 *buf_handles; |
474 | int idx, ret; |
475 | |
476 | if (params->engine > DRM_IVPU_ENGINE_COPY) |
477 | return -EINVAL; |
478 | |
479 | if (params->priority > DRM_IVPU_JOB_PRIORITY_REALTIME) |
480 | return -EINVAL; |
481 | |
482 | if (params->buffer_count == 0 || params->buffer_count > JOB_MAX_BUFFER_COUNT) |
483 | return -EINVAL; |
484 | |
485 | if (!IS_ALIGNED(params->commands_offset, 8)) |
486 | return -EINVAL; |
487 | |
488 | if (!file_priv->ctx.id) |
489 | return -EINVAL; |
490 | |
491 | if (file_priv->has_mmu_faults) |
492 | return -EBADFD; |
493 | |
494 | buf_handles = kcalloc(n: params->buffer_count, size: sizeof(u32), GFP_KERNEL); |
495 | if (!buf_handles) |
496 | return -ENOMEM; |
497 | |
498 | ret = copy_from_user(to: buf_handles, |
499 | from: (void __user *)params->buffers_ptr, |
500 | n: params->buffer_count * sizeof(u32)); |
501 | if (ret) { |
502 | ret = -EFAULT; |
503 | goto err_free_handles; |
504 | } |
505 | |
506 | if (!drm_dev_enter(dev: &vdev->drm, idx: &idx)) { |
507 | ret = -ENODEV; |
508 | goto err_free_handles; |
509 | } |
510 | |
511 | ivpu_dbg(vdev, JOB, "Submit ioctl: ctx %u buf_count %u\n" , |
512 | file_priv->ctx.id, params->buffer_count); |
513 | |
514 | job = ivpu_job_create(file_priv, engine_idx: params->engine, bo_count: params->buffer_count); |
515 | if (!job) { |
516 | ivpu_err(vdev, "Failed to create job\n" ); |
517 | ret = -ENOMEM; |
518 | goto err_exit_dev; |
519 | } |
520 | |
521 | ret = ivpu_job_prepare_bos_for_submit(file, job, buf_handles, buf_count: params->buffer_count, |
522 | commands_offset: params->commands_offset); |
523 | if (ret) { |
524 | ivpu_err(vdev, "Failed to prepare job: %d\n" , ret); |
525 | goto err_destroy_job; |
526 | } |
527 | |
528 | down_read(sem: &vdev->pm->reset_lock); |
529 | ret = ivpu_job_submit(job); |
530 | up_read(sem: &vdev->pm->reset_lock); |
531 | if (ret) |
532 | goto err_signal_fence; |
533 | |
534 | drm_dev_exit(idx); |
535 | kfree(objp: buf_handles); |
536 | return ret; |
537 | |
538 | err_signal_fence: |
539 | dma_fence_signal(fence: job->done_fence); |
540 | err_destroy_job: |
541 | ivpu_job_destroy(job); |
542 | err_exit_dev: |
543 | drm_dev_exit(idx); |
544 | err_free_handles: |
545 | kfree(objp: buf_handles); |
546 | return ret; |
547 | } |
548 | |
549 | static void |
550 | ivpu_job_done_callback(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr, |
551 | struct vpu_jsm_msg *jsm_msg) |
552 | { |
553 | struct vpu_ipc_msg_payload_job_done *payload; |
554 | int ret; |
555 | |
556 | if (!jsm_msg) { |
557 | ivpu_err(vdev, "IPC message has no JSM payload\n" ); |
558 | return; |
559 | } |
560 | |
561 | if (jsm_msg->result != VPU_JSM_STATUS_SUCCESS) { |
562 | ivpu_err(vdev, "Invalid JSM message result: %d\n" , jsm_msg->result); |
563 | return; |
564 | } |
565 | |
566 | payload = (struct vpu_ipc_msg_payload_job_done *)&jsm_msg->payload; |
567 | ret = ivpu_job_signal_and_destroy(vdev, job_id: payload->job_id, job_status: payload->job_status); |
568 | if (!ret && !xa_empty(xa: &vdev->submitted_jobs_xa)) |
569 | ivpu_start_job_timeout_detection(vdev); |
570 | } |
571 | |
572 | void ivpu_job_done_consumer_init(struct ivpu_device *vdev) |
573 | { |
574 | ivpu_ipc_consumer_add(vdev, cons: &vdev->job_done_consumer, |
575 | VPU_IPC_CHAN_JOB_RET, callback: ivpu_job_done_callback); |
576 | } |
577 | |
578 | void ivpu_job_done_consumer_fini(struct ivpu_device *vdev) |
579 | { |
580 | ivpu_ipc_consumer_del(vdev, cons: &vdev->job_done_consumer); |
581 | } |
582 | |