1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | |
3 | /* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */ |
4 | /* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ |
5 | |
6 | #include <asm/byteorder.h> |
7 | #include <linux/completion.h> |
8 | #include <linux/crc32.h> |
9 | #include <linux/delay.h> |
10 | #include <linux/dma-mapping.h> |
11 | #include <linux/kref.h> |
12 | #include <linux/list.h> |
13 | #include <linux/mhi.h> |
14 | #include <linux/mm.h> |
15 | #include <linux/moduleparam.h> |
16 | #include <linux/mutex.h> |
17 | #include <linux/overflow.h> |
18 | #include <linux/pci.h> |
19 | #include <linux/scatterlist.h> |
20 | #include <linux/types.h> |
21 | #include <linux/uaccess.h> |
22 | #include <linux/workqueue.h> |
23 | #include <linux/wait.h> |
24 | #include <drm/drm_device.h> |
25 | #include <drm/drm_file.h> |
26 | #include <uapi/drm/qaic_accel.h> |
27 | |
28 | #include "qaic.h" |
29 | |
30 | #define MANAGE_MAGIC_NUMBER ((__force __le32)0x43494151) /* "QAIC" in little endian */ |
31 | #define QAIC_DBC_Q_GAP SZ_256 |
32 | #define QAIC_DBC_Q_BUF_ALIGN SZ_4K |
33 | #define QAIC_MANAGE_EXT_MSG_LENGTH SZ_64K /* Max DMA message length */ |
34 | #define QAIC_WRAPPER_MAX_SIZE SZ_4K |
35 | #define QAIC_MHI_RETRY_WAIT_MS 100 |
36 | #define QAIC_MHI_RETRY_MAX 20 |
37 | |
38 | static unsigned int control_resp_timeout_s = 60; /* 60 sec default */ |
39 | module_param(control_resp_timeout_s, uint, 0600); |
40 | MODULE_PARM_DESC(control_resp_timeout_s, "Timeout for NNC responses from QSM" ); |
41 | |
42 | struct manage_msg { |
43 | u32 len; |
44 | u32 count; |
45 | u8 data[]; |
46 | }; |
47 | |
48 | /* |
49 | * wire encoding structures for the manage protocol. |
50 | * All fields are little endian on the wire |
51 | */ |
52 | struct wire_msg_hdr { |
53 | __le32 crc32; /* crc of everything following this field in the message */ |
54 | __le32 magic_number; |
55 | __le32 sequence_number; |
56 | __le32 len; /* length of this message */ |
57 | __le32 count; /* number of transactions in this message */ |
58 | __le32 handle; /* unique id to track the resources consumed */ |
59 | __le32 partition_id; /* partition id for the request (signed) */ |
60 | __le32 padding; /* must be 0 */ |
61 | } __packed; |
62 | |
63 | struct wire_msg { |
64 | struct wire_msg_hdr hdr; |
65 | u8 data[]; |
66 | } __packed; |
67 | |
68 | struct wire_trans_hdr { |
69 | __le32 type; |
70 | __le32 len; |
71 | } __packed; |
72 | |
73 | /* Each message sent from driver to device are organized in a list of wrapper_msg */ |
74 | struct wrapper_msg { |
75 | struct list_head list; |
76 | struct kref ref_count; |
77 | u32 len; /* length of data to transfer */ |
78 | struct wrapper_list *head; |
79 | union { |
80 | struct wire_msg msg; |
81 | struct wire_trans_hdr trans; |
82 | }; |
83 | }; |
84 | |
85 | struct wrapper_list { |
86 | struct list_head list; |
87 | spinlock_t lock; /* Protects the list state during additions and removals */ |
88 | }; |
89 | |
90 | struct wire_trans_passthrough { |
91 | struct wire_trans_hdr hdr; |
92 | u8 data[]; |
93 | } __packed; |
94 | |
95 | struct wire_addr_size_pair { |
96 | __le64 addr; |
97 | __le64 size; |
98 | } __packed; |
99 | |
100 | struct wire_trans_dma_xfer { |
101 | struct wire_trans_hdr hdr; |
102 | __le32 tag; |
103 | __le32 count; |
104 | __le32 dma_chunk_id; |
105 | __le32 padding; |
106 | struct wire_addr_size_pair data[]; |
107 | } __packed; |
108 | |
109 | /* Initiated by device to continue the DMA xfer of a large piece of data */ |
110 | struct wire_trans_dma_xfer_cont { |
111 | struct wire_trans_hdr hdr; |
112 | __le32 dma_chunk_id; |
113 | __le32 padding; |
114 | __le64 xferred_size; |
115 | } __packed; |
116 | |
117 | struct wire_trans_activate_to_dev { |
118 | struct wire_trans_hdr hdr; |
119 | __le64 req_q_addr; |
120 | __le64 rsp_q_addr; |
121 | __le32 req_q_size; |
122 | __le32 rsp_q_size; |
123 | __le32 buf_len; |
124 | __le32 options; /* unused, but BIT(16) has meaning to the device */ |
125 | } __packed; |
126 | |
127 | struct wire_trans_activate_from_dev { |
128 | struct wire_trans_hdr hdr; |
129 | __le32 status; |
130 | __le32 dbc_id; |
131 | __le64 options; /* unused */ |
132 | } __packed; |
133 | |
134 | struct wire_trans_deactivate_from_dev { |
135 | struct wire_trans_hdr hdr; |
136 | __le32 status; |
137 | __le32 dbc_id; |
138 | } __packed; |
139 | |
140 | struct wire_trans_terminate_to_dev { |
141 | struct wire_trans_hdr hdr; |
142 | __le32 handle; |
143 | __le32 padding; |
144 | } __packed; |
145 | |
146 | struct wire_trans_terminate_from_dev { |
147 | struct wire_trans_hdr hdr; |
148 | __le32 status; |
149 | __le32 padding; |
150 | } __packed; |
151 | |
152 | struct wire_trans_status_to_dev { |
153 | struct wire_trans_hdr hdr; |
154 | } __packed; |
155 | |
156 | struct wire_trans_status_from_dev { |
157 | struct wire_trans_hdr hdr; |
158 | __le16 major; |
159 | __le16 minor; |
160 | __le32 status; |
161 | __le64 status_flags; |
162 | } __packed; |
163 | |
164 | struct wire_trans_validate_part_to_dev { |
165 | struct wire_trans_hdr hdr; |
166 | __le32 part_id; |
167 | __le32 padding; |
168 | } __packed; |
169 | |
170 | struct wire_trans_validate_part_from_dev { |
171 | struct wire_trans_hdr hdr; |
172 | __le32 status; |
173 | __le32 padding; |
174 | } __packed; |
175 | |
176 | struct xfer_queue_elem { |
177 | /* |
178 | * Node in list of ongoing transfer request on control channel. |
179 | * Maintained by root device struct. |
180 | */ |
181 | struct list_head list; |
182 | /* Sequence number of this transfer request */ |
183 | u32 seq_num; |
184 | /* This is used to wait on until completion of transfer request */ |
185 | struct completion xfer_done; |
186 | /* Received data from device */ |
187 | void *buf; |
188 | }; |
189 | |
190 | struct dma_xfer { |
191 | /* Node in list of DMA transfers which is used for cleanup */ |
192 | struct list_head list; |
193 | /* SG table of memory used for DMA */ |
194 | struct sg_table *sgt; |
195 | /* Array pages used for DMA */ |
196 | struct page **page_list; |
197 | /* Number of pages used for DMA */ |
198 | unsigned long nr_pages; |
199 | }; |
200 | |
201 | struct ioctl_resources { |
202 | /* List of all DMA transfers which is used later for cleanup */ |
203 | struct list_head dma_xfers; |
204 | /* Base address of request queue which belongs to a DBC */ |
205 | void *buf; |
206 | /* |
207 | * Base bus address of request queue which belongs to a DBC. Response |
208 | * queue base bus address can be calculated by adding size of request |
209 | * queue to base bus address of request queue. |
210 | */ |
211 | dma_addr_t dma_addr; |
212 | /* Total size of request queue and response queue in byte */ |
213 | u32 total_size; |
214 | /* Total number of elements that can be queued in each of request and response queue */ |
215 | u32 nelem; |
216 | /* Base address of response queue which belongs to a DBC */ |
217 | void *rsp_q_base; |
218 | /* Status of the NNC message received */ |
219 | u32 status; |
220 | /* DBC id of the DBC received from device */ |
221 | u32 dbc_id; |
222 | /* |
223 | * DMA transfer request messages can be big in size and it may not be |
224 | * possible to send them in one shot. In such cases the messages are |
225 | * broken into chunks, this field stores ID of such chunks. |
226 | */ |
227 | u32 dma_chunk_id; |
228 | /* Total number of bytes transferred for a DMA xfer request */ |
229 | u64 xferred_dma_size; |
230 | /* Header of transaction message received from user. Used during DMA xfer request. */ |
231 | void *trans_hdr; |
232 | }; |
233 | |
234 | struct resp_work { |
235 | struct work_struct work; |
236 | struct qaic_device *qdev; |
237 | void *buf; |
238 | }; |
239 | |
240 | /* |
241 | * Since we're working with little endian messages, its useful to be able to |
242 | * increment without filling a whole line with conversions back and forth just |
243 | * to add one(1) to a message count. |
244 | */ |
245 | static __le32 incr_le32(__le32 val) |
246 | { |
247 | return cpu_to_le32(le32_to_cpu(val) + 1); |
248 | } |
249 | |
250 | static u32 gen_crc(void *msg) |
251 | { |
252 | struct wrapper_list *wrappers = msg; |
253 | struct wrapper_msg *w; |
254 | u32 crc = ~0; |
255 | |
256 | list_for_each_entry(w, &wrappers->list, list) |
257 | crc = crc32(crc, &w->msg, w->len); |
258 | |
259 | return crc ^ ~0; |
260 | } |
261 | |
262 | static u32 gen_crc_stub(void *msg) |
263 | { |
264 | return 0; |
265 | } |
266 | |
267 | static bool valid_crc(void *msg) |
268 | { |
269 | struct wire_msg_hdr *hdr = msg; |
270 | bool ret; |
271 | u32 crc; |
272 | |
273 | /* |
274 | * The output of this algorithm is always converted to the native |
275 | * endianness. |
276 | */ |
277 | crc = le32_to_cpu(hdr->crc32); |
278 | hdr->crc32 = 0; |
279 | ret = (crc32(~0, msg, le32_to_cpu(hdr->len)) ^ ~0) == crc; |
280 | hdr->crc32 = cpu_to_le32(crc); |
281 | return ret; |
282 | } |
283 | |
284 | static bool valid_crc_stub(void *msg) |
285 | { |
286 | return true; |
287 | } |
288 | |
289 | static void free_wrapper(struct kref *ref) |
290 | { |
291 | struct wrapper_msg *wrapper = container_of(ref, struct wrapper_msg, ref_count); |
292 | |
293 | list_del(entry: &wrapper->list); |
294 | kfree(objp: wrapper); |
295 | } |
296 | |
297 | static void save_dbc_buf(struct qaic_device *qdev, struct ioctl_resources *resources, |
298 | struct qaic_user *usr) |
299 | { |
300 | u32 dbc_id = resources->dbc_id; |
301 | |
302 | if (resources->buf) { |
303 | wait_event_interruptible(qdev->dbc[dbc_id].dbc_release, !qdev->dbc[dbc_id].in_use); |
304 | qdev->dbc[dbc_id].req_q_base = resources->buf; |
305 | qdev->dbc[dbc_id].rsp_q_base = resources->rsp_q_base; |
306 | qdev->dbc[dbc_id].dma_addr = resources->dma_addr; |
307 | qdev->dbc[dbc_id].total_size = resources->total_size; |
308 | qdev->dbc[dbc_id].nelem = resources->nelem; |
309 | enable_dbc(qdev, dbc_id, usr); |
310 | qdev->dbc[dbc_id].in_use = true; |
311 | resources->buf = NULL; |
312 | } |
313 | } |
314 | |
315 | static void free_dbc_buf(struct qaic_device *qdev, struct ioctl_resources *resources) |
316 | { |
317 | if (resources->buf) |
318 | dma_free_coherent(dev: &qdev->pdev->dev, size: resources->total_size, cpu_addr: resources->buf, |
319 | dma_handle: resources->dma_addr); |
320 | resources->buf = NULL; |
321 | } |
322 | |
323 | static void free_dma_xfers(struct qaic_device *qdev, struct ioctl_resources *resources) |
324 | { |
325 | struct dma_xfer *xfer; |
326 | struct dma_xfer *x; |
327 | int i; |
328 | |
329 | list_for_each_entry_safe(xfer, x, &resources->dma_xfers, list) { |
330 | dma_unmap_sgtable(dev: &qdev->pdev->dev, sgt: xfer->sgt, dir: DMA_TO_DEVICE, attrs: 0); |
331 | sg_free_table(xfer->sgt); |
332 | kfree(objp: xfer->sgt); |
333 | for (i = 0; i < xfer->nr_pages; ++i) |
334 | put_page(page: xfer->page_list[i]); |
335 | kfree(objp: xfer->page_list); |
336 | list_del(entry: &xfer->list); |
337 | kfree(objp: xfer); |
338 | } |
339 | } |
340 | |
341 | static struct wrapper_msg *add_wrapper(struct wrapper_list *wrappers, u32 size) |
342 | { |
343 | struct wrapper_msg *w = kzalloc(size, GFP_KERNEL); |
344 | |
345 | if (!w) |
346 | return NULL; |
347 | list_add_tail(new: &w->list, head: &wrappers->list); |
348 | kref_init(kref: &w->ref_count); |
349 | w->head = wrappers; |
350 | return w; |
351 | } |
352 | |
353 | static int encode_passthrough(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers, |
354 | u32 *user_len) |
355 | { |
356 | struct qaic_manage_trans_passthrough *in_trans = trans; |
357 | struct wire_trans_passthrough *out_trans; |
358 | struct wrapper_msg *trans_wrapper; |
359 | struct wrapper_msg *wrapper; |
360 | struct wire_msg *msg; |
361 | u32 msg_hdr_len; |
362 | |
363 | wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list); |
364 | msg = &wrapper->msg; |
365 | msg_hdr_len = le32_to_cpu(msg->hdr.len); |
366 | |
367 | if (in_trans->hdr.len % 8 != 0) |
368 | return -EINVAL; |
369 | |
370 | if (size_add(addend1: msg_hdr_len, addend2: in_trans->hdr.len) > QAIC_MANAGE_EXT_MSG_LENGTH) |
371 | return -ENOSPC; |
372 | |
373 | trans_wrapper = add_wrapper(wrappers, |
374 | offsetof(struct wrapper_msg, trans) + in_trans->hdr.len); |
375 | if (!trans_wrapper) |
376 | return -ENOMEM; |
377 | trans_wrapper->len = in_trans->hdr.len; |
378 | out_trans = (struct wire_trans_passthrough *)&trans_wrapper->trans; |
379 | |
380 | memcpy(out_trans->data, in_trans->data, in_trans->hdr.len - sizeof(in_trans->hdr)); |
381 | msg->hdr.len = cpu_to_le32(msg_hdr_len + in_trans->hdr.len); |
382 | msg->hdr.count = incr_le32(val: msg->hdr.count); |
383 | *user_len += in_trans->hdr.len; |
384 | out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_PASSTHROUGH_TO_DEV); |
385 | out_trans->hdr.len = cpu_to_le32(in_trans->hdr.len); |
386 | |
387 | return 0; |
388 | } |
389 | |
390 | /* returns error code for failure, 0 if enough pages alloc'd, 1 if dma_cont is needed */ |
391 | static int find_and_map_user_pages(struct qaic_device *qdev, |
392 | struct qaic_manage_trans_dma_xfer *in_trans, |
393 | struct ioctl_resources *resources, struct dma_xfer *xfer) |
394 | { |
395 | u64 xfer_start_addr, remaining, end, total; |
396 | unsigned long need_pages; |
397 | struct page **page_list; |
398 | unsigned long nr_pages; |
399 | struct sg_table *sgt; |
400 | int ret; |
401 | int i; |
402 | |
403 | if (check_add_overflow(in_trans->addr, resources->xferred_dma_size, &xfer_start_addr)) |
404 | return -EINVAL; |
405 | |
406 | if (in_trans->size < resources->xferred_dma_size) |
407 | return -EINVAL; |
408 | remaining = in_trans->size - resources->xferred_dma_size; |
409 | if (remaining == 0) |
410 | return 0; |
411 | |
412 | if (check_add_overflow(xfer_start_addr, remaining, &end)) |
413 | return -EINVAL; |
414 | |
415 | total = remaining + offset_in_page(xfer_start_addr); |
416 | if (total >= SIZE_MAX) |
417 | return -EINVAL; |
418 | |
419 | need_pages = DIV_ROUND_UP(total, PAGE_SIZE); |
420 | |
421 | nr_pages = need_pages; |
422 | |
423 | while (1) { |
424 | page_list = kmalloc_array(n: nr_pages, size: sizeof(*page_list), GFP_KERNEL | __GFP_NOWARN); |
425 | if (!page_list) { |
426 | nr_pages = nr_pages / 2; |
427 | if (!nr_pages) |
428 | return -ENOMEM; |
429 | } else { |
430 | break; |
431 | } |
432 | } |
433 | |
434 | ret = get_user_pages_fast(start: xfer_start_addr, nr_pages, gup_flags: 0, pages: page_list); |
435 | if (ret < 0) |
436 | goto free_page_list; |
437 | if (ret != nr_pages) { |
438 | nr_pages = ret; |
439 | ret = -EFAULT; |
440 | goto put_pages; |
441 | } |
442 | |
443 | sgt = kmalloc(size: sizeof(*sgt), GFP_KERNEL); |
444 | if (!sgt) { |
445 | ret = -ENOMEM; |
446 | goto put_pages; |
447 | } |
448 | |
449 | ret = sg_alloc_table_from_pages(sgt, pages: page_list, n_pages: nr_pages, |
450 | offset_in_page(xfer_start_addr), |
451 | size: remaining, GFP_KERNEL); |
452 | if (ret) { |
453 | ret = -ENOMEM; |
454 | goto free_sgt; |
455 | } |
456 | |
457 | ret = dma_map_sgtable(dev: &qdev->pdev->dev, sgt, dir: DMA_TO_DEVICE, attrs: 0); |
458 | if (ret) |
459 | goto free_table; |
460 | |
461 | xfer->sgt = sgt; |
462 | xfer->page_list = page_list; |
463 | xfer->nr_pages = nr_pages; |
464 | |
465 | return need_pages > nr_pages ? 1 : 0; |
466 | |
467 | free_table: |
468 | sg_free_table(sgt); |
469 | free_sgt: |
470 | kfree(objp: sgt); |
471 | put_pages: |
472 | for (i = 0; i < nr_pages; ++i) |
473 | put_page(page: page_list[i]); |
474 | free_page_list: |
475 | kfree(objp: page_list); |
476 | return ret; |
477 | } |
478 | |
479 | /* returns error code for failure, 0 if everything was encoded, 1 if dma_cont is needed */ |
480 | static int encode_addr_size_pairs(struct dma_xfer *xfer, struct wrapper_list *wrappers, |
481 | struct ioctl_resources *resources, u32 msg_hdr_len, u32 *size, |
482 | struct wire_trans_dma_xfer **out_trans) |
483 | { |
484 | struct wrapper_msg *trans_wrapper; |
485 | struct sg_table *sgt = xfer->sgt; |
486 | struct wire_addr_size_pair *asp; |
487 | struct scatterlist *sg; |
488 | struct wrapper_msg *w; |
489 | unsigned int dma_len; |
490 | u64 dma_chunk_len; |
491 | void *boundary; |
492 | int nents_dma; |
493 | int nents; |
494 | int i; |
495 | |
496 | nents = sgt->nents; |
497 | nents_dma = nents; |
498 | *size = QAIC_MANAGE_EXT_MSG_LENGTH - msg_hdr_len - sizeof(**out_trans); |
499 | for_each_sgtable_sg(sgt, sg, i) { |
500 | *size -= sizeof(*asp); |
501 | /* Save 1K for possible follow-up transactions. */ |
502 | if (*size < SZ_1K) { |
503 | nents_dma = i; |
504 | break; |
505 | } |
506 | } |
507 | |
508 | trans_wrapper = add_wrapper(wrappers, QAIC_WRAPPER_MAX_SIZE); |
509 | if (!trans_wrapper) |
510 | return -ENOMEM; |
511 | *out_trans = (struct wire_trans_dma_xfer *)&trans_wrapper->trans; |
512 | |
513 | asp = (*out_trans)->data; |
514 | boundary = (void *)trans_wrapper + QAIC_WRAPPER_MAX_SIZE; |
515 | *size = 0; |
516 | |
517 | dma_len = 0; |
518 | w = trans_wrapper; |
519 | dma_chunk_len = 0; |
520 | for_each_sg(sgt->sgl, sg, nents_dma, i) { |
521 | asp->size = cpu_to_le64(dma_len); |
522 | dma_chunk_len += dma_len; |
523 | if (dma_len) { |
524 | asp++; |
525 | if ((void *)asp + sizeof(*asp) > boundary) { |
526 | w->len = (void *)asp - (void *)&w->msg; |
527 | *size += w->len; |
528 | w = add_wrapper(wrappers, QAIC_WRAPPER_MAX_SIZE); |
529 | if (!w) |
530 | return -ENOMEM; |
531 | boundary = (void *)w + QAIC_WRAPPER_MAX_SIZE; |
532 | asp = (struct wire_addr_size_pair *)&w->msg; |
533 | } |
534 | } |
535 | asp->addr = cpu_to_le64(sg_dma_address(sg)); |
536 | dma_len = sg_dma_len(sg); |
537 | } |
538 | /* finalize the last segment */ |
539 | asp->size = cpu_to_le64(dma_len); |
540 | w->len = (void *)asp + sizeof(*asp) - (void *)&w->msg; |
541 | *size += w->len; |
542 | dma_chunk_len += dma_len; |
543 | resources->xferred_dma_size += dma_chunk_len; |
544 | |
545 | return nents_dma < nents ? 1 : 0; |
546 | } |
547 | |
548 | static void cleanup_xfer(struct qaic_device *qdev, struct dma_xfer *xfer) |
549 | { |
550 | int i; |
551 | |
552 | dma_unmap_sgtable(dev: &qdev->pdev->dev, sgt: xfer->sgt, dir: DMA_TO_DEVICE, attrs: 0); |
553 | sg_free_table(xfer->sgt); |
554 | kfree(objp: xfer->sgt); |
555 | for (i = 0; i < xfer->nr_pages; ++i) |
556 | put_page(page: xfer->page_list[i]); |
557 | kfree(objp: xfer->page_list); |
558 | } |
559 | |
560 | static int encode_dma(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers, |
561 | u32 *user_len, struct ioctl_resources *resources, struct qaic_user *usr) |
562 | { |
563 | struct qaic_manage_trans_dma_xfer *in_trans = trans; |
564 | struct wire_trans_dma_xfer *out_trans; |
565 | struct wrapper_msg *wrapper; |
566 | struct dma_xfer *xfer; |
567 | struct wire_msg *msg; |
568 | bool need_cont_dma; |
569 | u32 msg_hdr_len; |
570 | u32 size; |
571 | int ret; |
572 | |
573 | wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list); |
574 | msg = &wrapper->msg; |
575 | msg_hdr_len = le32_to_cpu(msg->hdr.len); |
576 | |
577 | /* There should be enough space to hold at least one ASP entry. */ |
578 | if (size_add(addend1: msg_hdr_len, addend2: sizeof(*out_trans) + sizeof(struct wire_addr_size_pair)) > |
579 | QAIC_MANAGE_EXT_MSG_LENGTH) |
580 | return -ENOMEM; |
581 | |
582 | xfer = kmalloc(size: sizeof(*xfer), GFP_KERNEL); |
583 | if (!xfer) |
584 | return -ENOMEM; |
585 | |
586 | ret = find_and_map_user_pages(qdev, in_trans, resources, xfer); |
587 | if (ret < 0) |
588 | goto free_xfer; |
589 | |
590 | need_cont_dma = (bool)ret; |
591 | |
592 | ret = encode_addr_size_pairs(xfer, wrappers, resources, msg_hdr_len, size: &size, out_trans: &out_trans); |
593 | if (ret < 0) |
594 | goto cleanup_xfer; |
595 | |
596 | need_cont_dma = need_cont_dma || (bool)ret; |
597 | |
598 | msg->hdr.len = cpu_to_le32(msg_hdr_len + size); |
599 | msg->hdr.count = incr_le32(val: msg->hdr.count); |
600 | |
601 | out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_DMA_XFER_TO_DEV); |
602 | out_trans->hdr.len = cpu_to_le32(size); |
603 | out_trans->tag = cpu_to_le32(in_trans->tag); |
604 | out_trans->count = cpu_to_le32((size - sizeof(*out_trans)) / |
605 | sizeof(struct wire_addr_size_pair)); |
606 | |
607 | *user_len += in_trans->hdr.len; |
608 | |
609 | if (resources->dma_chunk_id) { |
610 | out_trans->dma_chunk_id = cpu_to_le32(resources->dma_chunk_id); |
611 | } else if (need_cont_dma) { |
612 | while (resources->dma_chunk_id == 0) |
613 | resources->dma_chunk_id = atomic_inc_return(v: &usr->chunk_id); |
614 | |
615 | out_trans->dma_chunk_id = cpu_to_le32(resources->dma_chunk_id); |
616 | } |
617 | resources->trans_hdr = trans; |
618 | |
619 | list_add(new: &xfer->list, head: &resources->dma_xfers); |
620 | return 0; |
621 | |
622 | cleanup_xfer: |
623 | cleanup_xfer(qdev, xfer); |
624 | free_xfer: |
625 | kfree(objp: xfer); |
626 | return ret; |
627 | } |
628 | |
629 | static int encode_activate(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers, |
630 | u32 *user_len, struct ioctl_resources *resources) |
631 | { |
632 | struct qaic_manage_trans_activate_to_dev *in_trans = trans; |
633 | struct wire_trans_activate_to_dev *out_trans; |
634 | struct wrapper_msg *trans_wrapper; |
635 | struct wrapper_msg *wrapper; |
636 | struct wire_msg *msg; |
637 | dma_addr_t dma_addr; |
638 | u32 msg_hdr_len; |
639 | void *buf; |
640 | u32 nelem; |
641 | u32 size; |
642 | int ret; |
643 | |
644 | wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list); |
645 | msg = &wrapper->msg; |
646 | msg_hdr_len = le32_to_cpu(msg->hdr.len); |
647 | |
648 | if (size_add(addend1: msg_hdr_len, addend2: sizeof(*out_trans)) > QAIC_MANAGE_MAX_MSG_LENGTH) |
649 | return -ENOSPC; |
650 | |
651 | if (!in_trans->queue_size) |
652 | return -EINVAL; |
653 | |
654 | if (in_trans->pad) |
655 | return -EINVAL; |
656 | |
657 | nelem = in_trans->queue_size; |
658 | size = (get_dbc_req_elem_size() + get_dbc_rsp_elem_size()) * nelem; |
659 | if (size / nelem != get_dbc_req_elem_size() + get_dbc_rsp_elem_size()) |
660 | return -EINVAL; |
661 | |
662 | if (size + QAIC_DBC_Q_GAP + QAIC_DBC_Q_BUF_ALIGN < size) |
663 | return -EINVAL; |
664 | |
665 | size = ALIGN((size + QAIC_DBC_Q_GAP), QAIC_DBC_Q_BUF_ALIGN); |
666 | |
667 | buf = dma_alloc_coherent(dev: &qdev->pdev->dev, size, dma_handle: &dma_addr, GFP_KERNEL); |
668 | if (!buf) |
669 | return -ENOMEM; |
670 | |
671 | trans_wrapper = add_wrapper(wrappers, |
672 | offsetof(struct wrapper_msg, trans) + sizeof(*out_trans)); |
673 | if (!trans_wrapper) { |
674 | ret = -ENOMEM; |
675 | goto free_dma; |
676 | } |
677 | trans_wrapper->len = sizeof(*out_trans); |
678 | out_trans = (struct wire_trans_activate_to_dev *)&trans_wrapper->trans; |
679 | |
680 | out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_ACTIVATE_TO_DEV); |
681 | out_trans->hdr.len = cpu_to_le32(sizeof(*out_trans)); |
682 | out_trans->buf_len = cpu_to_le32(size); |
683 | out_trans->req_q_addr = cpu_to_le64(dma_addr); |
684 | out_trans->req_q_size = cpu_to_le32(nelem); |
685 | out_trans->rsp_q_addr = cpu_to_le64(dma_addr + size - nelem * get_dbc_rsp_elem_size()); |
686 | out_trans->rsp_q_size = cpu_to_le32(nelem); |
687 | out_trans->options = cpu_to_le32(in_trans->options); |
688 | |
689 | *user_len += in_trans->hdr.len; |
690 | msg->hdr.len = cpu_to_le32(msg_hdr_len + sizeof(*out_trans)); |
691 | msg->hdr.count = incr_le32(val: msg->hdr.count); |
692 | |
693 | resources->buf = buf; |
694 | resources->dma_addr = dma_addr; |
695 | resources->total_size = size; |
696 | resources->nelem = nelem; |
697 | resources->rsp_q_base = buf + size - nelem * get_dbc_rsp_elem_size(); |
698 | return 0; |
699 | |
700 | free_dma: |
701 | dma_free_coherent(dev: &qdev->pdev->dev, size, cpu_addr: buf, dma_handle: dma_addr); |
702 | return ret; |
703 | } |
704 | |
705 | static int encode_deactivate(struct qaic_device *qdev, void *trans, |
706 | u32 *user_len, struct qaic_user *usr) |
707 | { |
708 | struct qaic_manage_trans_deactivate *in_trans = trans; |
709 | |
710 | if (in_trans->dbc_id >= qdev->num_dbc || in_trans->pad) |
711 | return -EINVAL; |
712 | |
713 | *user_len += in_trans->hdr.len; |
714 | |
715 | return disable_dbc(qdev, dbc_id: in_trans->dbc_id, usr); |
716 | } |
717 | |
718 | static int encode_status(struct qaic_device *qdev, void *trans, struct wrapper_list *wrappers, |
719 | u32 *user_len) |
720 | { |
721 | struct qaic_manage_trans_status_to_dev *in_trans = trans; |
722 | struct wire_trans_status_to_dev *out_trans; |
723 | struct wrapper_msg *trans_wrapper; |
724 | struct wrapper_msg *wrapper; |
725 | struct wire_msg *msg; |
726 | u32 msg_hdr_len; |
727 | |
728 | wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list); |
729 | msg = &wrapper->msg; |
730 | msg_hdr_len = le32_to_cpu(msg->hdr.len); |
731 | |
732 | if (size_add(addend1: msg_hdr_len, addend2: in_trans->hdr.len) > QAIC_MANAGE_MAX_MSG_LENGTH) |
733 | return -ENOSPC; |
734 | |
735 | trans_wrapper = add_wrapper(wrappers, size: sizeof(*trans_wrapper)); |
736 | if (!trans_wrapper) |
737 | return -ENOMEM; |
738 | |
739 | trans_wrapper->len = sizeof(*out_trans); |
740 | out_trans = (struct wire_trans_status_to_dev *)&trans_wrapper->trans; |
741 | |
742 | out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_STATUS_TO_DEV); |
743 | out_trans->hdr.len = cpu_to_le32(in_trans->hdr.len); |
744 | msg->hdr.len = cpu_to_le32(msg_hdr_len + in_trans->hdr.len); |
745 | msg->hdr.count = incr_le32(val: msg->hdr.count); |
746 | *user_len += in_trans->hdr.len; |
747 | |
748 | return 0; |
749 | } |
750 | |
751 | static int encode_message(struct qaic_device *qdev, struct manage_msg *user_msg, |
752 | struct wrapper_list *wrappers, struct ioctl_resources *resources, |
753 | struct qaic_user *usr) |
754 | { |
755 | struct qaic_manage_trans_hdr *trans_hdr; |
756 | struct wrapper_msg *wrapper; |
757 | struct wire_msg *msg; |
758 | u32 user_len = 0; |
759 | int ret; |
760 | int i; |
761 | |
762 | if (!user_msg->count || |
763 | user_msg->len < sizeof(*trans_hdr)) { |
764 | ret = -EINVAL; |
765 | goto out; |
766 | } |
767 | |
768 | wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list); |
769 | msg = &wrapper->msg; |
770 | |
771 | msg->hdr.len = cpu_to_le32(sizeof(msg->hdr)); |
772 | |
773 | if (resources->dma_chunk_id) { |
774 | ret = encode_dma(qdev, trans: resources->trans_hdr, wrappers, user_len: &user_len, resources, usr); |
775 | msg->hdr.count = cpu_to_le32(1); |
776 | goto out; |
777 | } |
778 | |
779 | for (i = 0; i < user_msg->count; ++i) { |
780 | if (user_len > user_msg->len - sizeof(*trans_hdr)) { |
781 | ret = -EINVAL; |
782 | break; |
783 | } |
784 | trans_hdr = (struct qaic_manage_trans_hdr *)(user_msg->data + user_len); |
785 | if (trans_hdr->len < sizeof(trans_hdr) || |
786 | size_add(addend1: user_len, addend2: trans_hdr->len) > user_msg->len) { |
787 | ret = -EINVAL; |
788 | break; |
789 | } |
790 | |
791 | switch (trans_hdr->type) { |
792 | case QAIC_TRANS_PASSTHROUGH_FROM_USR: |
793 | ret = encode_passthrough(qdev, trans: trans_hdr, wrappers, user_len: &user_len); |
794 | break; |
795 | case QAIC_TRANS_DMA_XFER_FROM_USR: |
796 | ret = encode_dma(qdev, trans: trans_hdr, wrappers, user_len: &user_len, resources, usr); |
797 | break; |
798 | case QAIC_TRANS_ACTIVATE_FROM_USR: |
799 | ret = encode_activate(qdev, trans: trans_hdr, wrappers, user_len: &user_len, resources); |
800 | break; |
801 | case QAIC_TRANS_DEACTIVATE_FROM_USR: |
802 | ret = encode_deactivate(qdev, trans: trans_hdr, user_len: &user_len, usr); |
803 | break; |
804 | case QAIC_TRANS_STATUS_FROM_USR: |
805 | ret = encode_status(qdev, trans: trans_hdr, wrappers, user_len: &user_len); |
806 | break; |
807 | default: |
808 | ret = -EINVAL; |
809 | break; |
810 | } |
811 | |
812 | if (ret) |
813 | break; |
814 | } |
815 | |
816 | if (user_len != user_msg->len) |
817 | ret = -EINVAL; |
818 | out: |
819 | if (ret) { |
820 | free_dma_xfers(qdev, resources); |
821 | free_dbc_buf(qdev, resources); |
822 | return ret; |
823 | } |
824 | |
825 | return 0; |
826 | } |
827 | |
828 | static int decode_passthrough(struct qaic_device *qdev, void *trans, struct manage_msg *user_msg, |
829 | u32 *msg_len) |
830 | { |
831 | struct qaic_manage_trans_passthrough *out_trans; |
832 | struct wire_trans_passthrough *in_trans = trans; |
833 | u32 len; |
834 | |
835 | out_trans = (void *)user_msg->data + user_msg->len; |
836 | |
837 | len = le32_to_cpu(in_trans->hdr.len); |
838 | if (len % 8 != 0) |
839 | return -EINVAL; |
840 | |
841 | if (user_msg->len + len > QAIC_MANAGE_MAX_MSG_LENGTH) |
842 | return -ENOSPC; |
843 | |
844 | memcpy(out_trans->data, in_trans->data, len - sizeof(in_trans->hdr)); |
845 | user_msg->len += len; |
846 | *msg_len += len; |
847 | out_trans->hdr.type = le32_to_cpu(in_trans->hdr.type); |
848 | out_trans->hdr.len = len; |
849 | |
850 | return 0; |
851 | } |
852 | |
853 | static int decode_activate(struct qaic_device *qdev, void *trans, struct manage_msg *user_msg, |
854 | u32 *msg_len, struct ioctl_resources *resources, struct qaic_user *usr) |
855 | { |
856 | struct qaic_manage_trans_activate_from_dev *out_trans; |
857 | struct wire_trans_activate_from_dev *in_trans = trans; |
858 | u32 len; |
859 | |
860 | out_trans = (void *)user_msg->data + user_msg->len; |
861 | |
862 | len = le32_to_cpu(in_trans->hdr.len); |
863 | if (user_msg->len + len > QAIC_MANAGE_MAX_MSG_LENGTH) |
864 | return -ENOSPC; |
865 | |
866 | user_msg->len += len; |
867 | *msg_len += len; |
868 | out_trans->hdr.type = le32_to_cpu(in_trans->hdr.type); |
869 | out_trans->hdr.len = len; |
870 | out_trans->status = le32_to_cpu(in_trans->status); |
871 | out_trans->dbc_id = le32_to_cpu(in_trans->dbc_id); |
872 | out_trans->options = le64_to_cpu(in_trans->options); |
873 | |
874 | if (!resources->buf) |
875 | /* how did we get an activate response without a request? */ |
876 | return -EINVAL; |
877 | |
878 | if (out_trans->dbc_id >= qdev->num_dbc) |
879 | /* |
880 | * The device assigned an invalid resource, which should never |
881 | * happen. Return an error so the user can try to recover. |
882 | */ |
883 | return -ENODEV; |
884 | |
885 | if (out_trans->status) |
886 | /* |
887 | * Allocating resources failed on device side. This is not an |
888 | * expected behaviour, user is expected to handle this situation. |
889 | */ |
890 | return -ECANCELED; |
891 | |
892 | resources->status = out_trans->status; |
893 | resources->dbc_id = out_trans->dbc_id; |
894 | save_dbc_buf(qdev, resources, usr); |
895 | |
896 | return 0; |
897 | } |
898 | |
899 | static int decode_deactivate(struct qaic_device *qdev, void *trans, u32 *msg_len, |
900 | struct qaic_user *usr) |
901 | { |
902 | struct wire_trans_deactivate_from_dev *in_trans = trans; |
903 | u32 dbc_id = le32_to_cpu(in_trans->dbc_id); |
904 | u32 status = le32_to_cpu(in_trans->status); |
905 | |
906 | if (dbc_id >= qdev->num_dbc) |
907 | /* |
908 | * The device assigned an invalid resource, which should never |
909 | * happen. Inject an error so the user can try to recover. |
910 | */ |
911 | return -ENODEV; |
912 | |
913 | if (status) { |
914 | /* |
915 | * Releasing resources failed on the device side, which puts |
916 | * us in a bind since they may still be in use, so enable the |
917 | * dbc. User is expected to retry deactivation. |
918 | */ |
919 | enable_dbc(qdev, dbc_id, usr); |
920 | return -ECANCELED; |
921 | } |
922 | |
923 | release_dbc(qdev, dbc_id); |
924 | *msg_len += sizeof(*in_trans); |
925 | |
926 | return 0; |
927 | } |
928 | |
929 | static int decode_status(struct qaic_device *qdev, void *trans, struct manage_msg *user_msg, |
930 | u32 *user_len, struct wire_msg *msg) |
931 | { |
932 | struct qaic_manage_trans_status_from_dev *out_trans; |
933 | struct wire_trans_status_from_dev *in_trans = trans; |
934 | u32 len; |
935 | |
936 | out_trans = (void *)user_msg->data + user_msg->len; |
937 | |
938 | len = le32_to_cpu(in_trans->hdr.len); |
939 | if (user_msg->len + len > QAIC_MANAGE_MAX_MSG_LENGTH) |
940 | return -ENOSPC; |
941 | |
942 | out_trans->hdr.type = QAIC_TRANS_STATUS_FROM_DEV; |
943 | out_trans->hdr.len = len; |
944 | out_trans->major = le16_to_cpu(in_trans->major); |
945 | out_trans->minor = le16_to_cpu(in_trans->minor); |
946 | out_trans->status_flags = le64_to_cpu(in_trans->status_flags); |
947 | out_trans->status = le32_to_cpu(in_trans->status); |
948 | *user_len += le32_to_cpu(in_trans->hdr.len); |
949 | user_msg->len += len; |
950 | |
951 | if (out_trans->status) |
952 | return -ECANCELED; |
953 | if (out_trans->status_flags & BIT(0) && !valid_crc(msg)) |
954 | return -EPIPE; |
955 | |
956 | return 0; |
957 | } |
958 | |
959 | static int decode_message(struct qaic_device *qdev, struct manage_msg *user_msg, |
960 | struct wire_msg *msg, struct ioctl_resources *resources, |
961 | struct qaic_user *usr) |
962 | { |
963 | u32 msg_hdr_len = le32_to_cpu(msg->hdr.len); |
964 | struct wire_trans_hdr *trans_hdr; |
965 | u32 msg_len = 0; |
966 | int ret; |
967 | int i; |
968 | |
969 | if (msg_hdr_len < sizeof(*trans_hdr) || |
970 | msg_hdr_len > QAIC_MANAGE_MAX_MSG_LENGTH) |
971 | return -EINVAL; |
972 | |
973 | user_msg->len = 0; |
974 | user_msg->count = le32_to_cpu(msg->hdr.count); |
975 | |
976 | for (i = 0; i < user_msg->count; ++i) { |
977 | u32 hdr_len; |
978 | |
979 | if (msg_len > msg_hdr_len - sizeof(*trans_hdr)) |
980 | return -EINVAL; |
981 | |
982 | trans_hdr = (struct wire_trans_hdr *)(msg->data + msg_len); |
983 | hdr_len = le32_to_cpu(trans_hdr->len); |
984 | if (hdr_len < sizeof(*trans_hdr) || |
985 | size_add(addend1: msg_len, addend2: hdr_len) > msg_hdr_len) |
986 | return -EINVAL; |
987 | |
988 | switch (le32_to_cpu(trans_hdr->type)) { |
989 | case QAIC_TRANS_PASSTHROUGH_FROM_DEV: |
990 | ret = decode_passthrough(qdev, trans: trans_hdr, user_msg, msg_len: &msg_len); |
991 | break; |
992 | case QAIC_TRANS_ACTIVATE_FROM_DEV: |
993 | ret = decode_activate(qdev, trans: trans_hdr, user_msg, msg_len: &msg_len, resources, usr); |
994 | break; |
995 | case QAIC_TRANS_DEACTIVATE_FROM_DEV: |
996 | ret = decode_deactivate(qdev, trans: trans_hdr, msg_len: &msg_len, usr); |
997 | break; |
998 | case QAIC_TRANS_STATUS_FROM_DEV: |
999 | ret = decode_status(qdev, trans: trans_hdr, user_msg, user_len: &msg_len, msg); |
1000 | break; |
1001 | default: |
1002 | return -EINVAL; |
1003 | } |
1004 | |
1005 | if (ret) |
1006 | return ret; |
1007 | } |
1008 | |
1009 | if (msg_len != (msg_hdr_len - sizeof(msg->hdr))) |
1010 | return -EINVAL; |
1011 | |
1012 | return 0; |
1013 | } |
1014 | |
1015 | static void *msg_xfer(struct qaic_device *qdev, struct wrapper_list *wrappers, u32 seq_num, |
1016 | bool ignore_signal) |
1017 | { |
1018 | struct xfer_queue_elem elem; |
1019 | struct wire_msg *out_buf; |
1020 | struct wrapper_msg *w; |
1021 | long ret = -EAGAIN; |
1022 | int xfer_count = 0; |
1023 | int retry_count; |
1024 | |
1025 | /* Allow QAIC_BOOT state since we need to check control protocol version */ |
1026 | if (qdev->dev_state == QAIC_OFFLINE) { |
1027 | mutex_unlock(lock: &qdev->cntl_mutex); |
1028 | return ERR_PTR(error: -ENODEV); |
1029 | } |
1030 | |
1031 | /* Attempt to avoid a partial commit of a message */ |
1032 | list_for_each_entry(w, &wrappers->list, list) |
1033 | xfer_count++; |
1034 | |
1035 | for (retry_count = 0; retry_count < QAIC_MHI_RETRY_MAX; retry_count++) { |
1036 | if (xfer_count <= mhi_get_free_desc_count(mhi_dev: qdev->cntl_ch, dir: DMA_TO_DEVICE)) { |
1037 | ret = 0; |
1038 | break; |
1039 | } |
1040 | msleep_interruptible(QAIC_MHI_RETRY_WAIT_MS); |
1041 | if (signal_pending(current)) |
1042 | break; |
1043 | } |
1044 | |
1045 | if (ret) { |
1046 | mutex_unlock(lock: &qdev->cntl_mutex); |
1047 | return ERR_PTR(error: ret); |
1048 | } |
1049 | |
1050 | elem.seq_num = seq_num; |
1051 | elem.buf = NULL; |
1052 | init_completion(x: &elem.xfer_done); |
1053 | if (likely(!qdev->cntl_lost_buf)) { |
1054 | /* |
1055 | * The max size of request to device is QAIC_MANAGE_EXT_MSG_LENGTH. |
1056 | * The max size of response from device is QAIC_MANAGE_MAX_MSG_LENGTH. |
1057 | */ |
1058 | out_buf = kmalloc(QAIC_MANAGE_MAX_MSG_LENGTH, GFP_KERNEL); |
1059 | if (!out_buf) { |
1060 | mutex_unlock(lock: &qdev->cntl_mutex); |
1061 | return ERR_PTR(error: -ENOMEM); |
1062 | } |
1063 | |
1064 | ret = mhi_queue_buf(mhi_dev: qdev->cntl_ch, dir: DMA_FROM_DEVICE, buf: out_buf, |
1065 | QAIC_MANAGE_MAX_MSG_LENGTH, mflags: MHI_EOT); |
1066 | if (ret) { |
1067 | mutex_unlock(lock: &qdev->cntl_mutex); |
1068 | return ERR_PTR(error: ret); |
1069 | } |
1070 | } else { |
1071 | /* |
1072 | * we lost a buffer because we queued a recv buf, but then |
1073 | * queuing the corresponding tx buf failed. To try to avoid |
1074 | * a memory leak, lets reclaim it and use it for this |
1075 | * transaction. |
1076 | */ |
1077 | qdev->cntl_lost_buf = false; |
1078 | } |
1079 | |
1080 | list_for_each_entry(w, &wrappers->list, list) { |
1081 | kref_get(kref: &w->ref_count); |
1082 | retry_count = 0; |
1083 | ret = mhi_queue_buf(mhi_dev: qdev->cntl_ch, dir: DMA_TO_DEVICE, buf: &w->msg, len: w->len, |
1084 | mflags: list_is_last(list: &w->list, head: &wrappers->list) ? MHI_EOT : MHI_CHAIN); |
1085 | if (ret) { |
1086 | qdev->cntl_lost_buf = true; |
1087 | kref_put(kref: &w->ref_count, release: free_wrapper); |
1088 | mutex_unlock(lock: &qdev->cntl_mutex); |
1089 | return ERR_PTR(error: ret); |
1090 | } |
1091 | } |
1092 | |
1093 | list_add_tail(new: &elem.list, head: &qdev->cntl_xfer_list); |
1094 | mutex_unlock(lock: &qdev->cntl_mutex); |
1095 | |
1096 | if (ignore_signal) |
1097 | ret = wait_for_completion_timeout(x: &elem.xfer_done, timeout: control_resp_timeout_s * HZ); |
1098 | else |
1099 | ret = wait_for_completion_interruptible_timeout(x: &elem.xfer_done, |
1100 | timeout: control_resp_timeout_s * HZ); |
1101 | /* |
1102 | * not using _interruptable because we have to cleanup or we'll |
1103 | * likely cause memory corruption |
1104 | */ |
1105 | mutex_lock(&qdev->cntl_mutex); |
1106 | if (!list_empty(head: &elem.list)) |
1107 | list_del(entry: &elem.list); |
1108 | if (!ret && !elem.buf) |
1109 | ret = -ETIMEDOUT; |
1110 | else if (ret > 0 && !elem.buf) |
1111 | ret = -EIO; |
1112 | mutex_unlock(lock: &qdev->cntl_mutex); |
1113 | |
1114 | if (ret < 0) { |
1115 | kfree(objp: elem.buf); |
1116 | return ERR_PTR(error: ret); |
1117 | } else if (!qdev->valid_crc(elem.buf)) { |
1118 | kfree(objp: elem.buf); |
1119 | return ERR_PTR(error: -EPIPE); |
1120 | } |
1121 | |
1122 | return elem.buf; |
1123 | } |
1124 | |
1125 | /* Add a transaction to abort the outstanding DMA continuation */ |
1126 | static int abort_dma_cont(struct qaic_device *qdev, struct wrapper_list *wrappers, u32 dma_chunk_id) |
1127 | { |
1128 | struct wire_trans_dma_xfer *out_trans; |
1129 | u32 size = sizeof(*out_trans); |
1130 | struct wrapper_msg *wrapper; |
1131 | struct wrapper_msg *w; |
1132 | struct wire_msg *msg; |
1133 | |
1134 | wrapper = list_first_entry(&wrappers->list, struct wrapper_msg, list); |
1135 | msg = &wrapper->msg; |
1136 | |
1137 | /* Remove all but the first wrapper which has the msg header */ |
1138 | list_for_each_entry_safe(wrapper, w, &wrappers->list, list) |
1139 | if (!list_is_first(list: &wrapper->list, head: &wrappers->list)) |
1140 | kref_put(kref: &wrapper->ref_count, release: free_wrapper); |
1141 | |
1142 | wrapper = add_wrapper(wrappers, size: sizeof(*wrapper)); |
1143 | |
1144 | if (!wrapper) |
1145 | return -ENOMEM; |
1146 | |
1147 | out_trans = (struct wire_trans_dma_xfer *)&wrapper->trans; |
1148 | out_trans->hdr.type = cpu_to_le32(QAIC_TRANS_DMA_XFER_TO_DEV); |
1149 | out_trans->hdr.len = cpu_to_le32(size); |
1150 | out_trans->tag = cpu_to_le32(0); |
1151 | out_trans->count = cpu_to_le32(0); |
1152 | out_trans->dma_chunk_id = cpu_to_le32(dma_chunk_id); |
1153 | |
1154 | msg->hdr.len = cpu_to_le32(size + sizeof(*msg)); |
1155 | msg->hdr.count = cpu_to_le32(1); |
1156 | wrapper->len = size; |
1157 | |
1158 | return 0; |
1159 | } |
1160 | |
1161 | static struct wrapper_list *alloc_wrapper_list(void) |
1162 | { |
1163 | struct wrapper_list *wrappers; |
1164 | |
1165 | wrappers = kmalloc(size: sizeof(*wrappers), GFP_KERNEL); |
1166 | if (!wrappers) |
1167 | return NULL; |
1168 | INIT_LIST_HEAD(list: &wrappers->list); |
1169 | spin_lock_init(&wrappers->lock); |
1170 | |
1171 | return wrappers; |
1172 | } |
1173 | |
1174 | static int qaic_manage_msg_xfer(struct qaic_device *qdev, struct qaic_user *usr, |
1175 | struct manage_msg *user_msg, struct ioctl_resources *resources, |
1176 | struct wire_msg **rsp) |
1177 | { |
1178 | struct wrapper_list *wrappers; |
1179 | struct wrapper_msg *wrapper; |
1180 | struct wrapper_msg *w; |
1181 | bool all_done = false; |
1182 | struct wire_msg *msg; |
1183 | int ret; |
1184 | |
1185 | wrappers = alloc_wrapper_list(); |
1186 | if (!wrappers) |
1187 | return -ENOMEM; |
1188 | |
1189 | wrapper = add_wrapper(wrappers, size: sizeof(*wrapper)); |
1190 | if (!wrapper) { |
1191 | kfree(objp: wrappers); |
1192 | return -ENOMEM; |
1193 | } |
1194 | |
1195 | msg = &wrapper->msg; |
1196 | wrapper->len = sizeof(*msg); |
1197 | |
1198 | ret = encode_message(qdev, user_msg, wrappers, resources, usr); |
1199 | if (ret && resources->dma_chunk_id) |
1200 | ret = abort_dma_cont(qdev, wrappers, dma_chunk_id: resources->dma_chunk_id); |
1201 | if (ret) |
1202 | goto encode_failed; |
1203 | |
1204 | ret = mutex_lock_interruptible(&qdev->cntl_mutex); |
1205 | if (ret) |
1206 | goto lock_failed; |
1207 | |
1208 | msg->hdr.magic_number = MANAGE_MAGIC_NUMBER; |
1209 | msg->hdr.sequence_number = cpu_to_le32(qdev->next_seq_num++); |
1210 | |
1211 | if (usr) { |
1212 | msg->hdr.handle = cpu_to_le32(usr->handle); |
1213 | msg->hdr.partition_id = cpu_to_le32(usr->qddev->partition_id); |
1214 | } else { |
1215 | msg->hdr.handle = 0; |
1216 | msg->hdr.partition_id = cpu_to_le32(QAIC_NO_PARTITION); |
1217 | } |
1218 | |
1219 | msg->hdr.padding = cpu_to_le32(0); |
1220 | msg->hdr.crc32 = cpu_to_le32(qdev->gen_crc(wrappers)); |
1221 | |
1222 | /* msg_xfer releases the mutex */ |
1223 | *rsp = msg_xfer(qdev, wrappers, seq_num: qdev->next_seq_num - 1, ignore_signal: false); |
1224 | if (IS_ERR(ptr: *rsp)) |
1225 | ret = PTR_ERR(ptr: *rsp); |
1226 | |
1227 | lock_failed: |
1228 | free_dma_xfers(qdev, resources); |
1229 | encode_failed: |
1230 | spin_lock(lock: &wrappers->lock); |
1231 | list_for_each_entry_safe(wrapper, w, &wrappers->list, list) |
1232 | kref_put(kref: &wrapper->ref_count, release: free_wrapper); |
1233 | all_done = list_empty(head: &wrappers->list); |
1234 | spin_unlock(lock: &wrappers->lock); |
1235 | if (all_done) |
1236 | kfree(objp: wrappers); |
1237 | |
1238 | return ret; |
1239 | } |
1240 | |
1241 | static int qaic_manage(struct qaic_device *qdev, struct qaic_user *usr, struct manage_msg *user_msg) |
1242 | { |
1243 | struct wire_trans_dma_xfer_cont *dma_cont = NULL; |
1244 | struct ioctl_resources resources; |
1245 | struct wire_msg *rsp = NULL; |
1246 | int ret; |
1247 | |
1248 | memset(&resources, 0, sizeof(struct ioctl_resources)); |
1249 | |
1250 | INIT_LIST_HEAD(list: &resources.dma_xfers); |
1251 | |
1252 | if (user_msg->len > QAIC_MANAGE_MAX_MSG_LENGTH || |
1253 | user_msg->count > QAIC_MANAGE_MAX_MSG_LENGTH / sizeof(struct qaic_manage_trans_hdr)) |
1254 | return -EINVAL; |
1255 | |
1256 | dma_xfer_continue: |
1257 | ret = qaic_manage_msg_xfer(qdev, usr, user_msg, resources: &resources, rsp: &rsp); |
1258 | if (ret) |
1259 | return ret; |
1260 | /* dma_cont should be the only transaction if present */ |
1261 | if (le32_to_cpu(rsp->hdr.count) == 1) { |
1262 | dma_cont = (struct wire_trans_dma_xfer_cont *)rsp->data; |
1263 | if (le32_to_cpu(dma_cont->hdr.type) != QAIC_TRANS_DMA_XFER_CONT) |
1264 | dma_cont = NULL; |
1265 | } |
1266 | if (dma_cont) { |
1267 | if (le32_to_cpu(dma_cont->dma_chunk_id) == resources.dma_chunk_id && |
1268 | le64_to_cpu(dma_cont->xferred_size) == resources.xferred_dma_size) { |
1269 | kfree(objp: rsp); |
1270 | goto dma_xfer_continue; |
1271 | } |
1272 | |
1273 | ret = -EINVAL; |
1274 | goto dma_cont_failed; |
1275 | } |
1276 | |
1277 | ret = decode_message(qdev, user_msg, msg: rsp, resources: &resources, usr); |
1278 | |
1279 | dma_cont_failed: |
1280 | free_dbc_buf(qdev, resources: &resources); |
1281 | kfree(objp: rsp); |
1282 | return ret; |
1283 | } |
1284 | |
1285 | int qaic_manage_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) |
1286 | { |
1287 | struct qaic_manage_msg *user_msg = data; |
1288 | struct qaic_device *qdev; |
1289 | struct manage_msg *msg; |
1290 | struct qaic_user *usr; |
1291 | u8 __user *user_data; |
1292 | int qdev_rcu_id; |
1293 | int usr_rcu_id; |
1294 | int ret; |
1295 | |
1296 | if (user_msg->len > QAIC_MANAGE_MAX_MSG_LENGTH) |
1297 | return -EINVAL; |
1298 | |
1299 | usr = file_priv->driver_priv; |
1300 | |
1301 | usr_rcu_id = srcu_read_lock(ssp: &usr->qddev_lock); |
1302 | if (!usr->qddev) { |
1303 | srcu_read_unlock(ssp: &usr->qddev_lock, idx: usr_rcu_id); |
1304 | return -ENODEV; |
1305 | } |
1306 | |
1307 | qdev = usr->qddev->qdev; |
1308 | |
1309 | qdev_rcu_id = srcu_read_lock(ssp: &qdev->dev_lock); |
1310 | if (qdev->dev_state != QAIC_ONLINE) { |
1311 | srcu_read_unlock(ssp: &qdev->dev_lock, idx: qdev_rcu_id); |
1312 | srcu_read_unlock(ssp: &usr->qddev_lock, idx: usr_rcu_id); |
1313 | return -ENODEV; |
1314 | } |
1315 | |
1316 | msg = kzalloc(QAIC_MANAGE_MAX_MSG_LENGTH + sizeof(*msg), GFP_KERNEL); |
1317 | if (!msg) { |
1318 | ret = -ENOMEM; |
1319 | goto out; |
1320 | } |
1321 | |
1322 | msg->len = user_msg->len; |
1323 | msg->count = user_msg->count; |
1324 | |
1325 | user_data = u64_to_user_ptr(user_msg->data); |
1326 | |
1327 | if (copy_from_user(to: msg->data, from: user_data, n: user_msg->len)) { |
1328 | ret = -EFAULT; |
1329 | goto free_msg; |
1330 | } |
1331 | |
1332 | ret = qaic_manage(qdev, usr, user_msg: msg); |
1333 | |
1334 | /* |
1335 | * If the qaic_manage() is successful then we copy the message onto |
1336 | * userspace memory but we have an exception for -ECANCELED. |
1337 | * For -ECANCELED, it means that device has NACKed the message with a |
1338 | * status error code which userspace would like to know. |
1339 | */ |
1340 | if (ret == -ECANCELED || !ret) { |
1341 | if (copy_to_user(to: user_data, from: msg->data, n: msg->len)) { |
1342 | ret = -EFAULT; |
1343 | } else { |
1344 | user_msg->len = msg->len; |
1345 | user_msg->count = msg->count; |
1346 | } |
1347 | } |
1348 | |
1349 | free_msg: |
1350 | kfree(objp: msg); |
1351 | out: |
1352 | srcu_read_unlock(ssp: &qdev->dev_lock, idx: qdev_rcu_id); |
1353 | srcu_read_unlock(ssp: &usr->qddev_lock, idx: usr_rcu_id); |
1354 | return ret; |
1355 | } |
1356 | |
1357 | int get_cntl_version(struct qaic_device *qdev, struct qaic_user *usr, u16 *major, u16 *minor) |
1358 | { |
1359 | struct qaic_manage_trans_status_from_dev *status_result; |
1360 | struct qaic_manage_trans_status_to_dev *status_query; |
1361 | struct manage_msg *user_msg; |
1362 | int ret; |
1363 | |
1364 | user_msg = kmalloc(size: sizeof(*user_msg) + sizeof(*status_result), GFP_KERNEL); |
1365 | if (!user_msg) { |
1366 | ret = -ENOMEM; |
1367 | goto out; |
1368 | } |
1369 | user_msg->len = sizeof(*status_query); |
1370 | user_msg->count = 1; |
1371 | |
1372 | status_query = (struct qaic_manage_trans_status_to_dev *)user_msg->data; |
1373 | status_query->hdr.type = QAIC_TRANS_STATUS_FROM_USR; |
1374 | status_query->hdr.len = sizeof(status_query->hdr); |
1375 | |
1376 | ret = qaic_manage(qdev, usr, user_msg); |
1377 | if (ret) |
1378 | goto kfree_user_msg; |
1379 | status_result = (struct qaic_manage_trans_status_from_dev *)user_msg->data; |
1380 | *major = status_result->major; |
1381 | *minor = status_result->minor; |
1382 | |
1383 | if (status_result->status_flags & BIT(0)) { /* device is using CRC */ |
1384 | /* By default qdev->gen_crc is programmed to generate CRC */ |
1385 | qdev->valid_crc = valid_crc; |
1386 | } else { |
1387 | /* By default qdev->valid_crc is programmed to bypass CRC */ |
1388 | qdev->gen_crc = gen_crc_stub; |
1389 | } |
1390 | |
1391 | kfree_user_msg: |
1392 | kfree(objp: user_msg); |
1393 | out: |
1394 | return ret; |
1395 | } |
1396 | |
1397 | static void resp_worker(struct work_struct *work) |
1398 | { |
1399 | struct resp_work *resp = container_of(work, struct resp_work, work); |
1400 | struct qaic_device *qdev = resp->qdev; |
1401 | struct wire_msg *msg = resp->buf; |
1402 | struct xfer_queue_elem *elem; |
1403 | struct xfer_queue_elem *i; |
1404 | bool found = false; |
1405 | |
1406 | mutex_lock(&qdev->cntl_mutex); |
1407 | list_for_each_entry_safe(elem, i, &qdev->cntl_xfer_list, list) { |
1408 | if (elem->seq_num == le32_to_cpu(msg->hdr.sequence_number)) { |
1409 | found = true; |
1410 | list_del_init(entry: &elem->list); |
1411 | elem->buf = msg; |
1412 | complete_all(&elem->xfer_done); |
1413 | break; |
1414 | } |
1415 | } |
1416 | mutex_unlock(lock: &qdev->cntl_mutex); |
1417 | |
1418 | if (!found) |
1419 | /* request must have timed out, drop packet */ |
1420 | kfree(objp: msg); |
1421 | |
1422 | kfree(objp: resp); |
1423 | } |
1424 | |
1425 | static void free_wrapper_from_list(struct wrapper_list *wrappers, struct wrapper_msg *wrapper) |
1426 | { |
1427 | bool all_done = false; |
1428 | |
1429 | spin_lock(lock: &wrappers->lock); |
1430 | kref_put(kref: &wrapper->ref_count, release: free_wrapper); |
1431 | all_done = list_empty(head: &wrappers->list); |
1432 | spin_unlock(lock: &wrappers->lock); |
1433 | |
1434 | if (all_done) |
1435 | kfree(objp: wrappers); |
1436 | } |
1437 | |
1438 | void qaic_mhi_ul_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result) |
1439 | { |
1440 | struct wire_msg *msg = mhi_result->buf_addr; |
1441 | struct wrapper_msg *wrapper = container_of(msg, struct wrapper_msg, msg); |
1442 | |
1443 | free_wrapper_from_list(wrappers: wrapper->head, wrapper); |
1444 | } |
1445 | |
1446 | void qaic_mhi_dl_xfer_cb(struct mhi_device *mhi_dev, struct mhi_result *mhi_result) |
1447 | { |
1448 | struct qaic_device *qdev = dev_get_drvdata(dev: &mhi_dev->dev); |
1449 | struct wire_msg *msg = mhi_result->buf_addr; |
1450 | struct resp_work *resp; |
1451 | |
1452 | if (mhi_result->transaction_status || msg->hdr.magic_number != MANAGE_MAGIC_NUMBER) { |
1453 | kfree(objp: msg); |
1454 | return; |
1455 | } |
1456 | |
1457 | resp = kmalloc(size: sizeof(*resp), GFP_ATOMIC); |
1458 | if (!resp) { |
1459 | kfree(objp: msg); |
1460 | return; |
1461 | } |
1462 | |
1463 | INIT_WORK(&resp->work, resp_worker); |
1464 | resp->qdev = qdev; |
1465 | resp->buf = msg; |
1466 | queue_work(wq: qdev->cntl_wq, work: &resp->work); |
1467 | } |
1468 | |
1469 | int qaic_control_open(struct qaic_device *qdev) |
1470 | { |
1471 | if (!qdev->cntl_ch) |
1472 | return -ENODEV; |
1473 | |
1474 | qdev->cntl_lost_buf = false; |
1475 | /* |
1476 | * By default qaic should assume that device has CRC enabled. |
1477 | * Qaic comes to know if device has CRC enabled or disabled during the |
1478 | * device status transaction, which is the first transaction performed |
1479 | * on control channel. |
1480 | * |
1481 | * So CRC validation of first device status transaction response is |
1482 | * ignored (by calling valid_crc_stub) and is done later during decoding |
1483 | * if device has CRC enabled. |
1484 | * Now that qaic knows whether device has CRC enabled or not it acts |
1485 | * accordingly. |
1486 | */ |
1487 | qdev->gen_crc = gen_crc; |
1488 | qdev->valid_crc = valid_crc_stub; |
1489 | |
1490 | return mhi_prepare_for_transfer(mhi_dev: qdev->cntl_ch); |
1491 | } |
1492 | |
1493 | void qaic_control_close(struct qaic_device *qdev) |
1494 | { |
1495 | mhi_unprepare_from_transfer(mhi_dev: qdev->cntl_ch); |
1496 | } |
1497 | |
1498 | void qaic_release_usr(struct qaic_device *qdev, struct qaic_user *usr) |
1499 | { |
1500 | struct wire_trans_terminate_to_dev *trans; |
1501 | struct wrapper_list *wrappers; |
1502 | struct wrapper_msg *wrapper; |
1503 | struct wire_msg *msg; |
1504 | struct wire_msg *rsp; |
1505 | |
1506 | wrappers = alloc_wrapper_list(); |
1507 | if (!wrappers) |
1508 | return; |
1509 | |
1510 | wrapper = add_wrapper(wrappers, size: sizeof(*wrapper) + sizeof(*msg) + sizeof(*trans)); |
1511 | if (!wrapper) |
1512 | return; |
1513 | |
1514 | msg = &wrapper->msg; |
1515 | |
1516 | trans = (struct wire_trans_terminate_to_dev *)msg->data; |
1517 | |
1518 | trans->hdr.type = cpu_to_le32(QAIC_TRANS_TERMINATE_TO_DEV); |
1519 | trans->hdr.len = cpu_to_le32(sizeof(*trans)); |
1520 | trans->handle = cpu_to_le32(usr->handle); |
1521 | |
1522 | mutex_lock(&qdev->cntl_mutex); |
1523 | wrapper->len = sizeof(msg->hdr) + sizeof(*trans); |
1524 | msg->hdr.magic_number = MANAGE_MAGIC_NUMBER; |
1525 | msg->hdr.sequence_number = cpu_to_le32(qdev->next_seq_num++); |
1526 | msg->hdr.len = cpu_to_le32(wrapper->len); |
1527 | msg->hdr.count = cpu_to_le32(1); |
1528 | msg->hdr.handle = cpu_to_le32(usr->handle); |
1529 | msg->hdr.padding = cpu_to_le32(0); |
1530 | msg->hdr.crc32 = cpu_to_le32(qdev->gen_crc(wrappers)); |
1531 | |
1532 | /* |
1533 | * msg_xfer releases the mutex |
1534 | * We don't care about the return of msg_xfer since we will not do |
1535 | * anything different based on what happens. |
1536 | * We ignore pending signals since one will be set if the user is |
1537 | * killed, and we need give the device a chance to cleanup, otherwise |
1538 | * DMA may still be in progress when we return. |
1539 | */ |
1540 | rsp = msg_xfer(qdev, wrappers, seq_num: qdev->next_seq_num - 1, ignore_signal: true); |
1541 | if (!IS_ERR(ptr: rsp)) |
1542 | kfree(objp: rsp); |
1543 | free_wrapper_from_list(wrappers, wrapper); |
1544 | } |
1545 | |
1546 | void wake_all_cntl(struct qaic_device *qdev) |
1547 | { |
1548 | struct xfer_queue_elem *elem; |
1549 | struct xfer_queue_elem *i; |
1550 | |
1551 | mutex_lock(&qdev->cntl_mutex); |
1552 | list_for_each_entry_safe(elem, i, &qdev->cntl_xfer_list, list) { |
1553 | list_del_init(entry: &elem->list); |
1554 | complete_all(&elem->xfer_done); |
1555 | } |
1556 | mutex_unlock(lock: &qdev->cntl_mutex); |
1557 | } |
1558 | |