1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* |
3 | * Copyright (C) 2020-2024 Intel Corporation |
4 | */ |
5 | |
6 | #include <linux/genalloc.h> |
7 | #include <linux/highmem.h> |
8 | #include <linux/pm_runtime.h> |
9 | #include <linux/wait.h> |
10 | |
11 | #include "ivpu_drv.h" |
12 | #include "ivpu_gem.h" |
13 | #include "ivpu_hw.h" |
14 | #include "ivpu_hw_reg_io.h" |
15 | #include "ivpu_ipc.h" |
16 | #include "ivpu_jsm_msg.h" |
17 | #include "ivpu_pm.h" |
18 | |
19 | #define IPC_MAX_RX_MSG 128 |
20 | |
21 | struct ivpu_ipc_tx_buf { |
22 | struct ivpu_ipc_hdr ipc; |
23 | struct vpu_jsm_msg jsm; |
24 | }; |
25 | |
26 | static void ivpu_ipc_msg_dump(struct ivpu_device *vdev, char *c, |
27 | struct ivpu_ipc_hdr *ipc_hdr, u32 vpu_addr) |
28 | { |
29 | ivpu_dbg(vdev, IPC, |
30 | "%s: vpu:0x%x (data_addr:0x%08x, data_size:0x%x, channel:0x%x, src_node:0x%x, dst_node:0x%x, status:0x%x)" , |
31 | c, vpu_addr, ipc_hdr->data_addr, ipc_hdr->data_size, ipc_hdr->channel, |
32 | ipc_hdr->src_node, ipc_hdr->dst_node, ipc_hdr->status); |
33 | } |
34 | |
35 | static void ivpu_jsm_msg_dump(struct ivpu_device *vdev, char *c, |
36 | struct vpu_jsm_msg *jsm_msg, u32 vpu_addr) |
37 | { |
38 | u32 *payload = (u32 *)&jsm_msg->payload; |
39 | |
40 | ivpu_dbg(vdev, JSM, |
41 | "%s: vpu:0x%08x (type:%s, status:0x%x, id: 0x%x, result: 0x%x, payload:0x%x 0x%x 0x%x 0x%x 0x%x)\n" , |
42 | c, vpu_addr, ivpu_jsm_msg_type_to_str(jsm_msg->type), |
43 | jsm_msg->status, jsm_msg->request_id, jsm_msg->result, |
44 | payload[0], payload[1], payload[2], payload[3], payload[4]); |
45 | } |
46 | |
47 | static void |
48 | ivpu_ipc_rx_mark_free(struct ivpu_device *vdev, struct ivpu_ipc_hdr *ipc_hdr, |
49 | struct vpu_jsm_msg *jsm_msg) |
50 | { |
51 | ipc_hdr->status = IVPU_IPC_HDR_FREE; |
52 | if (jsm_msg) |
53 | jsm_msg->status = VPU_JSM_MSG_FREE; |
54 | wmb(); /* Flush WC buffers for message statuses */ |
55 | } |
56 | |
57 | static void ivpu_ipc_mem_fini(struct ivpu_device *vdev) |
58 | { |
59 | struct ivpu_ipc_info *ipc = vdev->ipc; |
60 | |
61 | ivpu_bo_free(bo: ipc->mem_rx); |
62 | ivpu_bo_free(bo: ipc->mem_tx); |
63 | } |
64 | |
65 | static int |
66 | ivpu_ipc_tx_prepare(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, |
67 | struct vpu_jsm_msg *req) |
68 | { |
69 | struct ivpu_ipc_info *ipc = vdev->ipc; |
70 | struct ivpu_ipc_tx_buf *tx_buf; |
71 | u32 tx_buf_vpu_addr; |
72 | u32 jsm_vpu_addr; |
73 | |
74 | tx_buf_vpu_addr = gen_pool_alloc(pool: ipc->mm_tx, size: sizeof(*tx_buf)); |
75 | if (!tx_buf_vpu_addr) { |
76 | ivpu_err_ratelimited(vdev, "Failed to reserve IPC buffer, size %ld\n" , |
77 | sizeof(*tx_buf)); |
78 | return -ENOMEM; |
79 | } |
80 | |
81 | tx_buf = ivpu_to_cpu_addr(bo: ipc->mem_tx, vpu_addr: tx_buf_vpu_addr); |
82 | if (drm_WARN_ON(&vdev->drm, !tx_buf)) { |
83 | gen_pool_free(pool: ipc->mm_tx, addr: tx_buf_vpu_addr, size: sizeof(*tx_buf)); |
84 | return -EIO; |
85 | } |
86 | |
87 | jsm_vpu_addr = tx_buf_vpu_addr + offsetof(struct ivpu_ipc_tx_buf, jsm); |
88 | |
89 | if (tx_buf->ipc.status != IVPU_IPC_HDR_FREE) |
90 | ivpu_warn_ratelimited(vdev, "IPC message vpu:0x%x not released by firmware\n" , |
91 | tx_buf_vpu_addr); |
92 | |
93 | if (tx_buf->jsm.status != VPU_JSM_MSG_FREE) |
94 | ivpu_warn_ratelimited(vdev, "JSM message vpu:0x%x not released by firmware\n" , |
95 | jsm_vpu_addr); |
96 | |
97 | memset(tx_buf, 0, sizeof(*tx_buf)); |
98 | tx_buf->ipc.data_addr = jsm_vpu_addr; |
99 | /* TODO: Set data_size to actual JSM message size, not union of all messages */ |
100 | tx_buf->ipc.data_size = sizeof(*req); |
101 | tx_buf->ipc.channel = cons->channel; |
102 | tx_buf->ipc.src_node = 0; |
103 | tx_buf->ipc.dst_node = 1; |
104 | tx_buf->ipc.status = IVPU_IPC_HDR_ALLOCATED; |
105 | tx_buf->jsm.type = req->type; |
106 | tx_buf->jsm.status = VPU_JSM_MSG_ALLOCATED; |
107 | tx_buf->jsm.payload = req->payload; |
108 | |
109 | req->request_id = atomic_inc_return(v: &ipc->request_id); |
110 | tx_buf->jsm.request_id = req->request_id; |
111 | cons->request_id = req->request_id; |
112 | wmb(); /* Flush WC buffers for IPC, JSM msgs */ |
113 | |
114 | cons->tx_vpu_addr = tx_buf_vpu_addr; |
115 | |
116 | ivpu_jsm_msg_dump(vdev, c: "TX" , jsm_msg: &tx_buf->jsm, vpu_addr: jsm_vpu_addr); |
117 | ivpu_ipc_msg_dump(vdev, c: "TX" , ipc_hdr: &tx_buf->ipc, vpu_addr: tx_buf_vpu_addr); |
118 | |
119 | return 0; |
120 | } |
121 | |
122 | static void ivpu_ipc_tx_release(struct ivpu_device *vdev, u32 vpu_addr) |
123 | { |
124 | struct ivpu_ipc_info *ipc = vdev->ipc; |
125 | |
126 | if (vpu_addr) |
127 | gen_pool_free(pool: ipc->mm_tx, addr: vpu_addr, size: sizeof(struct ivpu_ipc_tx_buf)); |
128 | } |
129 | |
130 | static void ivpu_ipc_tx(struct ivpu_device *vdev, u32 vpu_addr) |
131 | { |
132 | ivpu_hw_reg_ipc_tx_set(vdev, vpu_addr); |
133 | } |
134 | |
135 | static void |
136 | ivpu_ipc_rx_msg_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, |
137 | struct ivpu_ipc_hdr *ipc_hdr, struct vpu_jsm_msg *jsm_msg) |
138 | { |
139 | struct ivpu_ipc_info *ipc = vdev->ipc; |
140 | struct ivpu_ipc_rx_msg *rx_msg; |
141 | |
142 | lockdep_assert_held(&ipc->cons_lock); |
143 | lockdep_assert_irqs_disabled(); |
144 | |
145 | rx_msg = kzalloc(size: sizeof(*rx_msg), GFP_ATOMIC); |
146 | if (!rx_msg) { |
147 | ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg); |
148 | return; |
149 | } |
150 | |
151 | atomic_inc(v: &ipc->rx_msg_count); |
152 | |
153 | rx_msg->ipc_hdr = ipc_hdr; |
154 | rx_msg->jsm_msg = jsm_msg; |
155 | rx_msg->callback = cons->rx_callback; |
156 | |
157 | if (rx_msg->callback) { |
158 | list_add_tail(new: &rx_msg->link, head: &ipc->cb_msg_list); |
159 | } else { |
160 | spin_lock(lock: &cons->rx_lock); |
161 | list_add_tail(new: &rx_msg->link, head: &cons->rx_msg_list); |
162 | spin_unlock(lock: &cons->rx_lock); |
163 | wake_up(&cons->rx_msg_wq); |
164 | } |
165 | } |
166 | |
167 | static void |
168 | ivpu_ipc_rx_msg_del(struct ivpu_device *vdev, struct ivpu_ipc_rx_msg *rx_msg) |
169 | { |
170 | list_del(entry: &rx_msg->link); |
171 | ivpu_ipc_rx_mark_free(vdev, ipc_hdr: rx_msg->ipc_hdr, jsm_msg: rx_msg->jsm_msg); |
172 | atomic_dec(v: &vdev->ipc->rx_msg_count); |
173 | kfree(objp: rx_msg); |
174 | } |
175 | |
176 | void ivpu_ipc_consumer_add(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, |
177 | u32 channel, ivpu_ipc_rx_callback_t rx_callback) |
178 | { |
179 | struct ivpu_ipc_info *ipc = vdev->ipc; |
180 | |
181 | INIT_LIST_HEAD(list: &cons->link); |
182 | cons->channel = channel; |
183 | cons->tx_vpu_addr = 0; |
184 | cons->request_id = 0; |
185 | cons->aborted = false; |
186 | cons->rx_callback = rx_callback; |
187 | spin_lock_init(&cons->rx_lock); |
188 | INIT_LIST_HEAD(list: &cons->rx_msg_list); |
189 | init_waitqueue_head(&cons->rx_msg_wq); |
190 | |
191 | spin_lock_irq(lock: &ipc->cons_lock); |
192 | list_add_tail(new: &cons->link, head: &ipc->cons_list); |
193 | spin_unlock_irq(lock: &ipc->cons_lock); |
194 | } |
195 | |
196 | void ivpu_ipc_consumer_del(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons) |
197 | { |
198 | struct ivpu_ipc_info *ipc = vdev->ipc; |
199 | struct ivpu_ipc_rx_msg *rx_msg, *r; |
200 | |
201 | spin_lock_irq(lock: &ipc->cons_lock); |
202 | list_del(entry: &cons->link); |
203 | spin_unlock_irq(lock: &ipc->cons_lock); |
204 | |
205 | spin_lock_irq(lock: &cons->rx_lock); |
206 | list_for_each_entry_safe(rx_msg, r, &cons->rx_msg_list, link) |
207 | ivpu_ipc_rx_msg_del(vdev, rx_msg); |
208 | spin_unlock_irq(lock: &cons->rx_lock); |
209 | |
210 | ivpu_ipc_tx_release(vdev, vpu_addr: cons->tx_vpu_addr); |
211 | } |
212 | |
213 | static int |
214 | ivpu_ipc_send(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, struct vpu_jsm_msg *req) |
215 | { |
216 | struct ivpu_ipc_info *ipc = vdev->ipc; |
217 | int ret; |
218 | |
219 | mutex_lock(&ipc->lock); |
220 | |
221 | if (!ipc->on) { |
222 | ret = -EAGAIN; |
223 | goto unlock; |
224 | } |
225 | |
226 | ret = ivpu_ipc_tx_prepare(vdev, cons, req); |
227 | if (ret) |
228 | goto unlock; |
229 | |
230 | ivpu_ipc_tx(vdev, vpu_addr: cons->tx_vpu_addr); |
231 | |
232 | unlock: |
233 | mutex_unlock(lock: &ipc->lock); |
234 | return ret; |
235 | } |
236 | |
237 | static bool ivpu_ipc_rx_need_wakeup(struct ivpu_ipc_consumer *cons) |
238 | { |
239 | bool ret; |
240 | |
241 | spin_lock_irq(lock: &cons->rx_lock); |
242 | ret = !list_empty(head: &cons->rx_msg_list) || cons->aborted; |
243 | spin_unlock_irq(lock: &cons->rx_lock); |
244 | |
245 | return ret; |
246 | } |
247 | |
248 | int ivpu_ipc_receive(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, |
249 | struct ivpu_ipc_hdr *ipc_buf, |
250 | struct vpu_jsm_msg *jsm_msg, unsigned long timeout_ms) |
251 | { |
252 | struct ivpu_ipc_rx_msg *rx_msg; |
253 | int wait_ret, ret = 0; |
254 | |
255 | if (drm_WARN_ONCE(&vdev->drm, cons->rx_callback, "Consumer works only in async mode\n" )) |
256 | return -EINVAL; |
257 | |
258 | wait_ret = wait_event_timeout(cons->rx_msg_wq, |
259 | ivpu_ipc_rx_need_wakeup(cons), |
260 | msecs_to_jiffies(timeout_ms)); |
261 | |
262 | if (wait_ret == 0) |
263 | return -ETIMEDOUT; |
264 | |
265 | spin_lock_irq(lock: &cons->rx_lock); |
266 | if (cons->aborted) { |
267 | spin_unlock_irq(lock: &cons->rx_lock); |
268 | return -ECANCELED; |
269 | } |
270 | rx_msg = list_first_entry_or_null(&cons->rx_msg_list, struct ivpu_ipc_rx_msg, link); |
271 | if (!rx_msg) { |
272 | spin_unlock_irq(lock: &cons->rx_lock); |
273 | return -EAGAIN; |
274 | } |
275 | |
276 | if (ipc_buf) |
277 | memcpy(ipc_buf, rx_msg->ipc_hdr, sizeof(*ipc_buf)); |
278 | if (rx_msg->jsm_msg) { |
279 | u32 size = min_t(int, rx_msg->ipc_hdr->data_size, sizeof(*jsm_msg)); |
280 | |
281 | if (rx_msg->jsm_msg->result != VPU_JSM_STATUS_SUCCESS) { |
282 | ivpu_dbg(vdev, IPC, "IPC resp result error: %d\n" , rx_msg->jsm_msg->result); |
283 | ret = -EBADMSG; |
284 | } |
285 | |
286 | if (jsm_msg) |
287 | memcpy(jsm_msg, rx_msg->jsm_msg, size); |
288 | } |
289 | |
290 | ivpu_ipc_rx_msg_del(vdev, rx_msg); |
291 | spin_unlock_irq(lock: &cons->rx_lock); |
292 | return ret; |
293 | } |
294 | |
295 | static int |
296 | ivpu_ipc_send_receive_internal(struct ivpu_device *vdev, struct vpu_jsm_msg *req, |
297 | enum vpu_ipc_msg_type expected_resp_type, |
298 | struct vpu_jsm_msg *resp, u32 channel, |
299 | unsigned long timeout_ms) |
300 | { |
301 | struct ivpu_ipc_consumer cons; |
302 | int ret; |
303 | |
304 | ivpu_ipc_consumer_add(vdev, cons: &cons, channel, NULL); |
305 | |
306 | ret = ivpu_ipc_send(vdev, cons: &cons, req); |
307 | if (ret) { |
308 | ivpu_warn_ratelimited(vdev, "IPC send failed: %d\n" , ret); |
309 | goto consumer_del; |
310 | } |
311 | |
312 | ret = ivpu_ipc_receive(vdev, cons: &cons, NULL, jsm_msg: resp, timeout_ms); |
313 | if (ret) { |
314 | ivpu_warn_ratelimited(vdev, "IPC receive failed: type %s, ret %d\n" , |
315 | ivpu_jsm_msg_type_to_str(req->type), ret); |
316 | goto consumer_del; |
317 | } |
318 | |
319 | if (resp->type != expected_resp_type) { |
320 | ivpu_warn_ratelimited(vdev, "Invalid JSM response type: 0x%x\n" , resp->type); |
321 | ret = -EBADE; |
322 | } |
323 | |
324 | consumer_del: |
325 | ivpu_ipc_consumer_del(vdev, cons: &cons); |
326 | return ret; |
327 | } |
328 | |
329 | int ivpu_ipc_send_receive_active(struct ivpu_device *vdev, struct vpu_jsm_msg *req, |
330 | enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp, |
331 | u32 channel, unsigned long timeout_ms) |
332 | { |
333 | struct vpu_jsm_msg hb_req = { .type = VPU_JSM_MSG_QUERY_ENGINE_HB }; |
334 | struct vpu_jsm_msg hb_resp; |
335 | int ret, hb_ret; |
336 | |
337 | drm_WARN_ON(&vdev->drm, pm_runtime_status_suspended(vdev->drm.dev)); |
338 | |
339 | ret = ivpu_ipc_send_receive_internal(vdev, req, expected_resp_type: expected_resp, resp, channel, timeout_ms); |
340 | if (ret != -ETIMEDOUT) |
341 | return ret; |
342 | |
343 | hb_ret = ivpu_ipc_send_receive_internal(vdev, req: &hb_req, expected_resp_type: VPU_JSM_MSG_QUERY_ENGINE_HB_DONE, |
344 | resp: &hb_resp, VPU_IPC_CHAN_ASYNC_CMD, |
345 | timeout_ms: vdev->timeout.jsm); |
346 | if (hb_ret == -ETIMEDOUT) |
347 | ivpu_pm_trigger_recovery(vdev, reason: "IPC timeout" ); |
348 | |
349 | return ret; |
350 | } |
351 | |
352 | int ivpu_ipc_send_receive(struct ivpu_device *vdev, struct vpu_jsm_msg *req, |
353 | enum vpu_ipc_msg_type expected_resp, struct vpu_jsm_msg *resp, |
354 | u32 channel, unsigned long timeout_ms) |
355 | { |
356 | int ret; |
357 | |
358 | ret = ivpu_rpm_get(vdev); |
359 | if (ret < 0) |
360 | return ret; |
361 | |
362 | ret = ivpu_ipc_send_receive_active(vdev, req, expected_resp, resp, channel, timeout_ms); |
363 | |
364 | ivpu_rpm_put(vdev); |
365 | return ret; |
366 | } |
367 | |
368 | static bool |
369 | ivpu_ipc_match_consumer(struct ivpu_device *vdev, struct ivpu_ipc_consumer *cons, |
370 | struct ivpu_ipc_hdr *ipc_hdr, struct vpu_jsm_msg *jsm_msg) |
371 | { |
372 | if (cons->channel != ipc_hdr->channel) |
373 | return false; |
374 | |
375 | if (!jsm_msg || jsm_msg->request_id == cons->request_id) |
376 | return true; |
377 | |
378 | return false; |
379 | } |
380 | |
381 | void ivpu_ipc_irq_handler(struct ivpu_device *vdev, bool *wake_thread) |
382 | { |
383 | struct ivpu_ipc_info *ipc = vdev->ipc; |
384 | struct ivpu_ipc_consumer *cons; |
385 | struct ivpu_ipc_hdr *ipc_hdr; |
386 | struct vpu_jsm_msg *jsm_msg; |
387 | unsigned long flags; |
388 | bool dispatched; |
389 | u32 vpu_addr; |
390 | |
391 | /* |
392 | * Driver needs to purge all messages from IPC FIFO to clear IPC interrupt. |
393 | * Without purge IPC FIFO to 0 next IPC interrupts won't be generated. |
394 | */ |
395 | while (ivpu_hw_reg_ipc_rx_count_get(vdev)) { |
396 | vpu_addr = ivpu_hw_reg_ipc_rx_addr_get(vdev); |
397 | if (vpu_addr == REG_IO_ERROR) { |
398 | ivpu_err_ratelimited(vdev, "Failed to read IPC rx addr register\n" ); |
399 | return; |
400 | } |
401 | |
402 | ipc_hdr = ivpu_to_cpu_addr(bo: ipc->mem_rx, vpu_addr); |
403 | if (!ipc_hdr) { |
404 | ivpu_warn_ratelimited(vdev, "IPC msg 0x%x out of range\n" , vpu_addr); |
405 | continue; |
406 | } |
407 | ivpu_ipc_msg_dump(vdev, c: "RX" , ipc_hdr, vpu_addr); |
408 | |
409 | jsm_msg = NULL; |
410 | if (ipc_hdr->channel != IVPU_IPC_CHAN_BOOT_MSG) { |
411 | jsm_msg = ivpu_to_cpu_addr(bo: ipc->mem_rx, vpu_addr: ipc_hdr->data_addr); |
412 | if (!jsm_msg) { |
413 | ivpu_warn_ratelimited(vdev, "JSM msg 0x%x out of range\n" , |
414 | ipc_hdr->data_addr); |
415 | ivpu_ipc_rx_mark_free(vdev, ipc_hdr, NULL); |
416 | continue; |
417 | } |
418 | ivpu_jsm_msg_dump(vdev, c: "RX" , jsm_msg, vpu_addr: ipc_hdr->data_addr); |
419 | } |
420 | |
421 | if (atomic_read(v: &ipc->rx_msg_count) > IPC_MAX_RX_MSG) { |
422 | ivpu_warn_ratelimited(vdev, "IPC RX msg dropped, msg count %d\n" , |
423 | IPC_MAX_RX_MSG); |
424 | ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg); |
425 | continue; |
426 | } |
427 | |
428 | dispatched = false; |
429 | spin_lock_irqsave(&ipc->cons_lock, flags); |
430 | list_for_each_entry(cons, &ipc->cons_list, link) { |
431 | if (ivpu_ipc_match_consumer(vdev, cons, ipc_hdr, jsm_msg)) { |
432 | ivpu_ipc_rx_msg_add(vdev, cons, ipc_hdr, jsm_msg); |
433 | dispatched = true; |
434 | break; |
435 | } |
436 | } |
437 | spin_unlock_irqrestore(lock: &ipc->cons_lock, flags); |
438 | |
439 | if (!dispatched) { |
440 | ivpu_dbg(vdev, IPC, "IPC RX msg 0x%x dropped (no consumer)\n" , vpu_addr); |
441 | ivpu_ipc_rx_mark_free(vdev, ipc_hdr, jsm_msg); |
442 | } |
443 | } |
444 | |
445 | if (wake_thread) |
446 | *wake_thread = !list_empty(head: &ipc->cb_msg_list); |
447 | } |
448 | |
449 | irqreturn_t ivpu_ipc_irq_thread_handler(struct ivpu_device *vdev) |
450 | { |
451 | struct ivpu_ipc_info *ipc = vdev->ipc; |
452 | struct ivpu_ipc_rx_msg *rx_msg, *r; |
453 | struct list_head cb_msg_list; |
454 | |
455 | INIT_LIST_HEAD(list: &cb_msg_list); |
456 | |
457 | spin_lock_irq(lock: &ipc->cons_lock); |
458 | list_splice_tail_init(list: &ipc->cb_msg_list, head: &cb_msg_list); |
459 | spin_unlock_irq(lock: &ipc->cons_lock); |
460 | |
461 | list_for_each_entry_safe(rx_msg, r, &cb_msg_list, link) { |
462 | rx_msg->callback(vdev, rx_msg->ipc_hdr, rx_msg->jsm_msg); |
463 | ivpu_ipc_rx_msg_del(vdev, rx_msg); |
464 | } |
465 | |
466 | return IRQ_HANDLED; |
467 | } |
468 | |
469 | int ivpu_ipc_init(struct ivpu_device *vdev) |
470 | { |
471 | struct ivpu_ipc_info *ipc = vdev->ipc; |
472 | int ret; |
473 | |
474 | ipc->mem_tx = ivpu_bo_create_global(vdev, SZ_16K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE); |
475 | if (!ipc->mem_tx) { |
476 | ivpu_err(vdev, "Failed to allocate mem_tx\n" ); |
477 | return -ENOMEM; |
478 | } |
479 | |
480 | ipc->mem_rx = ivpu_bo_create_global(vdev, SZ_16K, DRM_IVPU_BO_WC | DRM_IVPU_BO_MAPPABLE); |
481 | if (!ipc->mem_rx) { |
482 | ivpu_err(vdev, "Failed to allocate mem_rx\n" ); |
483 | ret = -ENOMEM; |
484 | goto err_free_tx; |
485 | } |
486 | |
487 | ipc->mm_tx = devm_gen_pool_create(dev: vdev->drm.dev, __ffs(IVPU_IPC_ALIGNMENT), |
488 | nid: -1, name: "TX_IPC_JSM" ); |
489 | if (IS_ERR(ptr: ipc->mm_tx)) { |
490 | ret = PTR_ERR(ptr: ipc->mm_tx); |
491 | ivpu_err(vdev, "Failed to create gen pool, %pe\n" , ipc->mm_tx); |
492 | goto err_free_rx; |
493 | } |
494 | |
495 | ret = gen_pool_add(pool: ipc->mm_tx, addr: ipc->mem_tx->vpu_addr, size: ivpu_bo_size(bo: ipc->mem_tx), nid: -1); |
496 | if (ret) { |
497 | ivpu_err(vdev, "gen_pool_add failed, ret %d\n" , ret); |
498 | goto err_free_rx; |
499 | } |
500 | |
501 | spin_lock_init(&ipc->cons_lock); |
502 | INIT_LIST_HEAD(list: &ipc->cons_list); |
503 | INIT_LIST_HEAD(list: &ipc->cb_msg_list); |
504 | ret = drmm_mutex_init(&vdev->drm, &ipc->lock); |
505 | if (ret) { |
506 | ivpu_err(vdev, "Failed to initialize ipc->lock, ret %d\n" , ret); |
507 | goto err_free_rx; |
508 | } |
509 | ivpu_ipc_reset(vdev); |
510 | return 0; |
511 | |
512 | err_free_rx: |
513 | ivpu_bo_free(bo: ipc->mem_rx); |
514 | err_free_tx: |
515 | ivpu_bo_free(bo: ipc->mem_tx); |
516 | return ret; |
517 | } |
518 | |
519 | void ivpu_ipc_fini(struct ivpu_device *vdev) |
520 | { |
521 | struct ivpu_ipc_info *ipc = vdev->ipc; |
522 | |
523 | drm_WARN_ON(&vdev->drm, ipc->on); |
524 | drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cons_list)); |
525 | drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cb_msg_list)); |
526 | drm_WARN_ON(&vdev->drm, atomic_read(&ipc->rx_msg_count) > 0); |
527 | |
528 | ivpu_ipc_mem_fini(vdev); |
529 | } |
530 | |
531 | void ivpu_ipc_enable(struct ivpu_device *vdev) |
532 | { |
533 | struct ivpu_ipc_info *ipc = vdev->ipc; |
534 | |
535 | mutex_lock(&ipc->lock); |
536 | ipc->on = true; |
537 | mutex_unlock(lock: &ipc->lock); |
538 | } |
539 | |
540 | void ivpu_ipc_disable(struct ivpu_device *vdev) |
541 | { |
542 | struct ivpu_ipc_info *ipc = vdev->ipc; |
543 | struct ivpu_ipc_consumer *cons, *c; |
544 | struct ivpu_ipc_rx_msg *rx_msg, *r; |
545 | |
546 | drm_WARN_ON(&vdev->drm, !list_empty(&ipc->cb_msg_list)); |
547 | |
548 | mutex_lock(&ipc->lock); |
549 | ipc->on = false; |
550 | mutex_unlock(lock: &ipc->lock); |
551 | |
552 | spin_lock_irq(lock: &ipc->cons_lock); |
553 | list_for_each_entry_safe(cons, c, &ipc->cons_list, link) { |
554 | spin_lock(lock: &cons->rx_lock); |
555 | if (!cons->rx_callback) |
556 | cons->aborted = true; |
557 | list_for_each_entry_safe(rx_msg, r, &cons->rx_msg_list, link) |
558 | ivpu_ipc_rx_msg_del(vdev, rx_msg); |
559 | spin_unlock(lock: &cons->rx_lock); |
560 | wake_up(&cons->rx_msg_wq); |
561 | } |
562 | spin_unlock_irq(lock: &ipc->cons_lock); |
563 | |
564 | drm_WARN_ON(&vdev->drm, atomic_read(&ipc->rx_msg_count) > 0); |
565 | } |
566 | |
567 | void ivpu_ipc_reset(struct ivpu_device *vdev) |
568 | { |
569 | struct ivpu_ipc_info *ipc = vdev->ipc; |
570 | |
571 | mutex_lock(&ipc->lock); |
572 | drm_WARN_ON(&vdev->drm, ipc->on); |
573 | |
574 | memset(ivpu_bo_vaddr(ipc->mem_tx), 0, ivpu_bo_size(ipc->mem_tx)); |
575 | memset(ivpu_bo_vaddr(ipc->mem_rx), 0, ivpu_bo_size(ipc->mem_rx)); |
576 | wmb(); /* Flush WC buffers for TX and RX rings */ |
577 | |
578 | mutex_unlock(lock: &ipc->lock); |
579 | } |
580 | |