1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | |
3 | /* Copyright (c) 2019-2021, The Linux Foundation. All rights reserved. */ |
4 | /* Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved. */ |
5 | |
6 | #include <linux/bitfield.h> |
7 | #include <linux/bits.h> |
8 | #include <linux/completion.h> |
9 | #include <linux/delay.h> |
10 | #include <linux/dma-buf.h> |
11 | #include <linux/dma-mapping.h> |
12 | #include <linux/interrupt.h> |
13 | #include <linux/kref.h> |
14 | #include <linux/list.h> |
15 | #include <linux/math64.h> |
16 | #include <linux/mm.h> |
17 | #include <linux/moduleparam.h> |
18 | #include <linux/scatterlist.h> |
19 | #include <linux/spinlock.h> |
20 | #include <linux/srcu.h> |
21 | #include <linux/types.h> |
22 | #include <linux/uaccess.h> |
23 | #include <linux/wait.h> |
24 | #include <drm/drm_file.h> |
25 | #include <drm/drm_gem.h> |
26 | #include <drm/drm_prime.h> |
27 | #include <drm/drm_print.h> |
28 | #include <uapi/drm/qaic_accel.h> |
29 | |
30 | #include "qaic.h" |
31 | |
32 | #define SEM_VAL_MASK GENMASK_ULL(11, 0) |
33 | #define SEM_INDEX_MASK GENMASK_ULL(4, 0) |
34 | #define BULK_XFER BIT(3) |
35 | #define GEN_COMPLETION BIT(4) |
36 | #define INBOUND_XFER 1 |
37 | #define OUTBOUND_XFER 2 |
38 | #define REQHP_OFF 0x0 /* we read this */ |
39 | #define REQTP_OFF 0x4 /* we write this */ |
40 | #define RSPHP_OFF 0x8 /* we write this */ |
41 | #define RSPTP_OFF 0xc /* we read this */ |
42 | |
43 | #define ENCODE_SEM(val, index, sync, cmd, flags) \ |
44 | ({ \ |
45 | FIELD_PREP(GENMASK(11, 0), (val)) | \ |
46 | FIELD_PREP(GENMASK(20, 16), (index)) | \ |
47 | FIELD_PREP(BIT(22), (sync)) | \ |
48 | FIELD_PREP(GENMASK(26, 24), (cmd)) | \ |
49 | FIELD_PREP(GENMASK(30, 29), (flags)) | \ |
50 | FIELD_PREP(BIT(31), (cmd) ? 1 : 0); \ |
51 | }) |
52 | #define NUM_EVENTS 128 |
53 | #define NUM_DELAYS 10 |
54 | #define fifo_at(base, offset) ((base) + (offset) * get_dbc_req_elem_size()) |
55 | |
56 | static unsigned int wait_exec_default_timeout_ms = 5000; /* 5 sec default */ |
57 | module_param(wait_exec_default_timeout_ms, uint, 0600); |
58 | MODULE_PARM_DESC(wait_exec_default_timeout_ms, "Default timeout for DRM_IOCTL_QAIC_WAIT_BO" ); |
59 | |
60 | static unsigned int datapath_poll_interval_us = 100; /* 100 usec default */ |
61 | module_param(datapath_poll_interval_us, uint, 0600); |
62 | MODULE_PARM_DESC(datapath_poll_interval_us, |
63 | "Amount of time to sleep between activity when datapath polling is enabled" ); |
64 | |
65 | struct dbc_req { |
66 | /* |
67 | * A request ID is assigned to each memory handle going in DMA queue. |
68 | * As a single memory handle can enqueue multiple elements in DMA queue |
69 | * all of them will have the same request ID. |
70 | */ |
71 | __le16 req_id; |
72 | /* Future use */ |
73 | __u8 seq_id; |
74 | /* |
75 | * Special encoded variable |
76 | * 7 0 - Do not force to generate MSI after DMA is completed |
77 | * 1 - Force to generate MSI after DMA is completed |
78 | * 6:5 Reserved |
79 | * 4 1 - Generate completion element in the response queue |
80 | * 0 - No Completion Code |
81 | * 3 0 - DMA request is a Link list transfer |
82 | * 1 - DMA request is a Bulk transfer |
83 | * 2 Reserved |
84 | * 1:0 00 - No DMA transfer involved |
85 | * 01 - DMA transfer is part of inbound transfer |
86 | * 10 - DMA transfer has outbound transfer |
87 | * 11 - NA |
88 | */ |
89 | __u8 cmd; |
90 | __le32 resv; |
91 | /* Source address for the transfer */ |
92 | __le64 src_addr; |
93 | /* Destination address for the transfer */ |
94 | __le64 dest_addr; |
95 | /* Length of transfer request */ |
96 | __le32 len; |
97 | __le32 resv2; |
98 | /* Doorbell address */ |
99 | __le64 db_addr; |
100 | /* |
101 | * Special encoded variable |
102 | * 7 1 - Doorbell(db) write |
103 | * 0 - No doorbell write |
104 | * 6:2 Reserved |
105 | * 1:0 00 - 32 bit access, db address must be aligned to 32bit-boundary |
106 | * 01 - 16 bit access, db address must be aligned to 16bit-boundary |
107 | * 10 - 8 bit access, db address must be aligned to 8bit-boundary |
108 | * 11 - Reserved |
109 | */ |
110 | __u8 db_len; |
111 | __u8 resv3; |
112 | __le16 resv4; |
113 | /* 32 bit data written to doorbell address */ |
114 | __le32 db_data; |
115 | /* |
116 | * Special encoded variable |
117 | * All the fields of sem_cmdX are passed from user and all are ORed |
118 | * together to form sem_cmd. |
119 | * 0:11 Semaphore value |
120 | * 15:12 Reserved |
121 | * 20:16 Semaphore index |
122 | * 21 Reserved |
123 | * 22 Semaphore Sync |
124 | * 23 Reserved |
125 | * 26:24 Semaphore command |
126 | * 28:27 Reserved |
127 | * 29 Semaphore DMA out bound sync fence |
128 | * 30 Semaphore DMA in bound sync fence |
129 | * 31 Enable semaphore command |
130 | */ |
131 | __le32 sem_cmd0; |
132 | __le32 sem_cmd1; |
133 | __le32 sem_cmd2; |
134 | __le32 sem_cmd3; |
135 | } __packed; |
136 | |
137 | struct dbc_rsp { |
138 | /* Request ID of the memory handle whose DMA transaction is completed */ |
139 | __le16 req_id; |
140 | /* Status of the DMA transaction. 0 : Success otherwise failure */ |
141 | __le16 status; |
142 | } __packed; |
143 | |
144 | static inline bool bo_queued(struct qaic_bo *bo) |
145 | { |
146 | return !list_empty(head: &bo->xfer_list); |
147 | } |
148 | |
149 | inline int get_dbc_req_elem_size(void) |
150 | { |
151 | return sizeof(struct dbc_req); |
152 | } |
153 | |
154 | inline int get_dbc_rsp_elem_size(void) |
155 | { |
156 | return sizeof(struct dbc_rsp); |
157 | } |
158 | |
159 | static void free_slice(struct kref *kref) |
160 | { |
161 | struct bo_slice *slice = container_of(kref, struct bo_slice, ref_count); |
162 | |
163 | slice->bo->total_slice_nents -= slice->nents; |
164 | list_del(entry: &slice->slice); |
165 | drm_gem_object_put(obj: &slice->bo->base); |
166 | sg_free_table(slice->sgt); |
167 | kfree(objp: slice->sgt); |
168 | kfree(objp: slice->reqs); |
169 | kfree(objp: slice); |
170 | } |
171 | |
172 | static int clone_range_of_sgt_for_slice(struct qaic_device *qdev, struct sg_table **sgt_out, |
173 | struct sg_table *sgt_in, u64 size, u64 offset) |
174 | { |
175 | int total_len, len, nents, offf = 0, offl = 0; |
176 | struct scatterlist *sg, *sgn, *sgf, *sgl; |
177 | struct sg_table *sgt; |
178 | int ret, j; |
179 | |
180 | /* find out number of relevant nents needed for this mem */ |
181 | total_len = 0; |
182 | sgf = NULL; |
183 | sgl = NULL; |
184 | nents = 0; |
185 | |
186 | size = size ? size : PAGE_SIZE; |
187 | for (sg = sgt_in->sgl; sg; sg = sg_next(sg)) { |
188 | len = sg_dma_len(sg); |
189 | |
190 | if (!len) |
191 | continue; |
192 | if (offset >= total_len && offset < total_len + len) { |
193 | sgf = sg; |
194 | offf = offset - total_len; |
195 | } |
196 | if (sgf) |
197 | nents++; |
198 | if (offset + size >= total_len && |
199 | offset + size <= total_len + len) { |
200 | sgl = sg; |
201 | offl = offset + size - total_len; |
202 | break; |
203 | } |
204 | total_len += len; |
205 | } |
206 | |
207 | if (!sgf || !sgl) { |
208 | ret = -EINVAL; |
209 | goto out; |
210 | } |
211 | |
212 | sgt = kzalloc(size: sizeof(*sgt), GFP_KERNEL); |
213 | if (!sgt) { |
214 | ret = -ENOMEM; |
215 | goto out; |
216 | } |
217 | |
218 | ret = sg_alloc_table(sgt, nents, GFP_KERNEL); |
219 | if (ret) |
220 | goto free_sgt; |
221 | |
222 | /* copy relevant sg node and fix page and length */ |
223 | sgn = sgf; |
224 | for_each_sgtable_sg(sgt, sg, j) { |
225 | memcpy(sg, sgn, sizeof(*sg)); |
226 | if (sgn == sgf) { |
227 | sg_dma_address(sg) += offf; |
228 | sg_dma_len(sg) -= offf; |
229 | sg_set_page(sg, page: sg_page(sg: sgn), sg_dma_len(sg), offset: offf); |
230 | } else { |
231 | offf = 0; |
232 | } |
233 | if (sgn == sgl) { |
234 | sg_dma_len(sg) = offl - offf; |
235 | sg_set_page(sg, page: sg_page(sg: sgn), len: offl - offf, offset: offf); |
236 | sg_mark_end(sg); |
237 | break; |
238 | } |
239 | sgn = sg_next(sgn); |
240 | } |
241 | |
242 | *sgt_out = sgt; |
243 | return ret; |
244 | |
245 | free_sgt: |
246 | kfree(objp: sgt); |
247 | out: |
248 | *sgt_out = NULL; |
249 | return ret; |
250 | } |
251 | |
252 | static int encode_reqs(struct qaic_device *qdev, struct bo_slice *slice, |
253 | struct qaic_attach_slice_entry *req) |
254 | { |
255 | __le64 db_addr = cpu_to_le64(req->db_addr); |
256 | __le32 db_data = cpu_to_le32(req->db_data); |
257 | struct scatterlist *sg; |
258 | __u8 cmd = BULK_XFER; |
259 | int presync_sem; |
260 | u64 dev_addr; |
261 | __u8 db_len; |
262 | int i; |
263 | |
264 | if (!slice->no_xfer) |
265 | cmd |= (slice->dir == DMA_TO_DEVICE ? INBOUND_XFER : OUTBOUND_XFER); |
266 | |
267 | if (req->db_len && !IS_ALIGNED(req->db_addr, req->db_len / 8)) |
268 | return -EINVAL; |
269 | |
270 | presync_sem = req->sem0.presync + req->sem1.presync + req->sem2.presync + req->sem3.presync; |
271 | if (presync_sem > 1) |
272 | return -EINVAL; |
273 | |
274 | presync_sem = req->sem0.presync << 0 | req->sem1.presync << 1 | |
275 | req->sem2.presync << 2 | req->sem3.presync << 3; |
276 | |
277 | switch (req->db_len) { |
278 | case 32: |
279 | db_len = BIT(7); |
280 | break; |
281 | case 16: |
282 | db_len = BIT(7) | 1; |
283 | break; |
284 | case 8: |
285 | db_len = BIT(7) | 2; |
286 | break; |
287 | case 0: |
288 | db_len = 0; /* doorbell is not active for this command */ |
289 | break; |
290 | default: |
291 | return -EINVAL; /* should never hit this */ |
292 | } |
293 | |
294 | /* |
295 | * When we end up splitting up a single request (ie a buf slice) into |
296 | * multiple DMA requests, we have to manage the sync data carefully. |
297 | * There can only be one presync sem. That needs to be on every xfer |
298 | * so that the DMA engine doesn't transfer data before the receiver is |
299 | * ready. We only do the doorbell and postsync sems after the xfer. |
300 | * To guarantee previous xfers for the request are complete, we use a |
301 | * fence. |
302 | */ |
303 | dev_addr = req->dev_addr; |
304 | for_each_sgtable_sg(slice->sgt, sg, i) { |
305 | slice->reqs[i].cmd = cmd; |
306 | slice->reqs[i].src_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ? |
307 | sg_dma_address(sg) : dev_addr); |
308 | slice->reqs[i].dest_addr = cpu_to_le64(slice->dir == DMA_TO_DEVICE ? |
309 | dev_addr : sg_dma_address(sg)); |
310 | /* |
311 | * sg_dma_len(sg) returns size of a DMA segment, maximum DMA |
312 | * segment size is set to UINT_MAX by qaic and hence return |
313 | * values of sg_dma_len(sg) can never exceed u32 range. So, |
314 | * by down sizing we are not corrupting the value. |
315 | */ |
316 | slice->reqs[i].len = cpu_to_le32((u32)sg_dma_len(sg)); |
317 | switch (presync_sem) { |
318 | case BIT(0): |
319 | slice->reqs[i].sem_cmd0 = cpu_to_le32(ENCODE_SEM(req->sem0.val, |
320 | req->sem0.index, |
321 | req->sem0.presync, |
322 | req->sem0.cmd, |
323 | req->sem0.flags)); |
324 | break; |
325 | case BIT(1): |
326 | slice->reqs[i].sem_cmd1 = cpu_to_le32(ENCODE_SEM(req->sem1.val, |
327 | req->sem1.index, |
328 | req->sem1.presync, |
329 | req->sem1.cmd, |
330 | req->sem1.flags)); |
331 | break; |
332 | case BIT(2): |
333 | slice->reqs[i].sem_cmd2 = cpu_to_le32(ENCODE_SEM(req->sem2.val, |
334 | req->sem2.index, |
335 | req->sem2.presync, |
336 | req->sem2.cmd, |
337 | req->sem2.flags)); |
338 | break; |
339 | case BIT(3): |
340 | slice->reqs[i].sem_cmd3 = cpu_to_le32(ENCODE_SEM(req->sem3.val, |
341 | req->sem3.index, |
342 | req->sem3.presync, |
343 | req->sem3.cmd, |
344 | req->sem3.flags)); |
345 | break; |
346 | } |
347 | dev_addr += sg_dma_len(sg); |
348 | } |
349 | /* add post transfer stuff to last segment */ |
350 | i--; |
351 | slice->reqs[i].cmd |= GEN_COMPLETION; |
352 | slice->reqs[i].db_addr = db_addr; |
353 | slice->reqs[i].db_len = db_len; |
354 | slice->reqs[i].db_data = db_data; |
355 | /* |
356 | * Add a fence if we have more than one request going to the hardware |
357 | * representing the entirety of the user request, and the user request |
358 | * has no presync condition. |
359 | * Fences are expensive, so we try to avoid them. We rely on the |
360 | * hardware behavior to avoid needing one when there is a presync |
361 | * condition. When a presync exists, all requests for that same |
362 | * presync will be queued into a fifo. Thus, since we queue the |
363 | * post xfer activity only on the last request we queue, the hardware |
364 | * will ensure that the last queued request is processed last, thus |
365 | * making sure the post xfer activity happens at the right time without |
366 | * a fence. |
367 | */ |
368 | if (i && !presync_sem) |
369 | req->sem0.flags |= (slice->dir == DMA_TO_DEVICE ? |
370 | QAIC_SEM_INSYNCFENCE : QAIC_SEM_OUTSYNCFENCE); |
371 | slice->reqs[i].sem_cmd0 = cpu_to_le32(ENCODE_SEM(req->sem0.val, req->sem0.index, |
372 | req->sem0.presync, req->sem0.cmd, |
373 | req->sem0.flags)); |
374 | slice->reqs[i].sem_cmd1 = cpu_to_le32(ENCODE_SEM(req->sem1.val, req->sem1.index, |
375 | req->sem1.presync, req->sem1.cmd, |
376 | req->sem1.flags)); |
377 | slice->reqs[i].sem_cmd2 = cpu_to_le32(ENCODE_SEM(req->sem2.val, req->sem2.index, |
378 | req->sem2.presync, req->sem2.cmd, |
379 | req->sem2.flags)); |
380 | slice->reqs[i].sem_cmd3 = cpu_to_le32(ENCODE_SEM(req->sem3.val, req->sem3.index, |
381 | req->sem3.presync, req->sem3.cmd, |
382 | req->sem3.flags)); |
383 | |
384 | return 0; |
385 | } |
386 | |
387 | static int qaic_map_one_slice(struct qaic_device *qdev, struct qaic_bo *bo, |
388 | struct qaic_attach_slice_entry *slice_ent) |
389 | { |
390 | struct sg_table *sgt = NULL; |
391 | struct bo_slice *slice; |
392 | int ret; |
393 | |
394 | ret = clone_range_of_sgt_for_slice(qdev, sgt_out: &sgt, sgt_in: bo->sgt, size: slice_ent->size, offset: slice_ent->offset); |
395 | if (ret) |
396 | goto out; |
397 | |
398 | slice = kmalloc(size: sizeof(*slice), GFP_KERNEL); |
399 | if (!slice) { |
400 | ret = -ENOMEM; |
401 | goto free_sgt; |
402 | } |
403 | |
404 | slice->reqs = kcalloc(n: sgt->nents, size: sizeof(*slice->reqs), GFP_KERNEL); |
405 | if (!slice->reqs) { |
406 | ret = -ENOMEM; |
407 | goto free_slice; |
408 | } |
409 | |
410 | slice->no_xfer = !slice_ent->size; |
411 | slice->sgt = sgt; |
412 | slice->nents = sgt->nents; |
413 | slice->dir = bo->dir; |
414 | slice->bo = bo; |
415 | slice->size = slice_ent->size; |
416 | slice->offset = slice_ent->offset; |
417 | |
418 | ret = encode_reqs(qdev, slice, req: slice_ent); |
419 | if (ret) |
420 | goto free_req; |
421 | |
422 | bo->total_slice_nents += sgt->nents; |
423 | kref_init(kref: &slice->ref_count); |
424 | drm_gem_object_get(obj: &bo->base); |
425 | list_add_tail(new: &slice->slice, head: &bo->slices); |
426 | |
427 | return 0; |
428 | |
429 | free_req: |
430 | kfree(objp: slice->reqs); |
431 | free_slice: |
432 | kfree(objp: slice); |
433 | free_sgt: |
434 | sg_free_table(sgt); |
435 | kfree(objp: sgt); |
436 | out: |
437 | return ret; |
438 | } |
439 | |
440 | static int create_sgt(struct qaic_device *qdev, struct sg_table **sgt_out, u64 size) |
441 | { |
442 | struct scatterlist *sg; |
443 | struct sg_table *sgt; |
444 | struct page **pages; |
445 | int *pages_order; |
446 | int ; |
447 | int max_order; |
448 | int nr_pages; |
449 | int ret = 0; |
450 | int i, j, k; |
451 | int order; |
452 | |
453 | if (size) { |
454 | nr_pages = DIV_ROUND_UP(size, PAGE_SIZE); |
455 | /* |
456 | * calculate how much extra we are going to allocate, to remove |
457 | * later |
458 | */ |
459 | buf_extra = (PAGE_SIZE - size % PAGE_SIZE) % PAGE_SIZE; |
460 | max_order = min(MAX_PAGE_ORDER, get_order(size)); |
461 | } else { |
462 | /* allocate a single page for book keeping */ |
463 | nr_pages = 1; |
464 | buf_extra = 0; |
465 | max_order = 0; |
466 | } |
467 | |
468 | pages = kvmalloc_array(n: nr_pages, size: sizeof(*pages) + sizeof(*pages_order), GFP_KERNEL); |
469 | if (!pages) { |
470 | ret = -ENOMEM; |
471 | goto out; |
472 | } |
473 | pages_order = (void *)pages + sizeof(*pages) * nr_pages; |
474 | |
475 | /* |
476 | * Allocate requested memory using alloc_pages. It is possible to allocate |
477 | * the requested memory in multiple chunks by calling alloc_pages |
478 | * multiple times. Use SG table to handle multiple allocated pages. |
479 | */ |
480 | i = 0; |
481 | while (nr_pages > 0) { |
482 | order = min(get_order(nr_pages * PAGE_SIZE), max_order); |
483 | while (1) { |
484 | pages[i] = alloc_pages(GFP_KERNEL | GFP_HIGHUSER | |
485 | __GFP_NOWARN | __GFP_ZERO | |
486 | (order ? __GFP_NORETRY : __GFP_RETRY_MAYFAIL), |
487 | order); |
488 | if (pages[i]) |
489 | break; |
490 | if (!order--) { |
491 | ret = -ENOMEM; |
492 | goto free_partial_alloc; |
493 | } |
494 | } |
495 | |
496 | max_order = order; |
497 | pages_order[i] = order; |
498 | |
499 | nr_pages -= 1 << order; |
500 | if (nr_pages <= 0) |
501 | /* account for over allocation */ |
502 | buf_extra += abs(nr_pages) * PAGE_SIZE; |
503 | i++; |
504 | } |
505 | |
506 | sgt = kmalloc(size: sizeof(*sgt), GFP_KERNEL); |
507 | if (!sgt) { |
508 | ret = -ENOMEM; |
509 | goto free_partial_alloc; |
510 | } |
511 | |
512 | if (sg_alloc_table(sgt, i, GFP_KERNEL)) { |
513 | ret = -ENOMEM; |
514 | goto free_sgt; |
515 | } |
516 | |
517 | /* Populate the SG table with the allocated memory pages */ |
518 | sg = sgt->sgl; |
519 | for (k = 0; k < i; k++, sg = sg_next(sg)) { |
520 | /* Last entry requires special handling */ |
521 | if (k < i - 1) { |
522 | sg_set_page(sg, page: pages[k], PAGE_SIZE << pages_order[k], offset: 0); |
523 | } else { |
524 | sg_set_page(sg, page: pages[k], len: (PAGE_SIZE << pages_order[k]) - buf_extra, offset: 0); |
525 | sg_mark_end(sg); |
526 | } |
527 | } |
528 | |
529 | kvfree(addr: pages); |
530 | *sgt_out = sgt; |
531 | return ret; |
532 | |
533 | free_sgt: |
534 | kfree(objp: sgt); |
535 | free_partial_alloc: |
536 | for (j = 0; j < i; j++) |
537 | __free_pages(page: pages[j], order: pages_order[j]); |
538 | kvfree(addr: pages); |
539 | out: |
540 | *sgt_out = NULL; |
541 | return ret; |
542 | } |
543 | |
544 | static bool invalid_sem(struct qaic_sem *sem) |
545 | { |
546 | if (sem->val & ~SEM_VAL_MASK || sem->index & ~SEM_INDEX_MASK || |
547 | !(sem->presync == 0 || sem->presync == 1) || sem->pad || |
548 | sem->flags & ~(QAIC_SEM_INSYNCFENCE | QAIC_SEM_OUTSYNCFENCE) || |
549 | sem->cmd > QAIC_SEM_WAIT_GT_0) |
550 | return true; |
551 | return false; |
552 | } |
553 | |
554 | static int qaic_validate_req(struct qaic_device *qdev, struct qaic_attach_slice_entry *slice_ent, |
555 | u32 count, u64 total_size) |
556 | { |
557 | int i; |
558 | |
559 | for (i = 0; i < count; i++) { |
560 | if (!(slice_ent[i].db_len == 32 || slice_ent[i].db_len == 16 || |
561 | slice_ent[i].db_len == 8 || slice_ent[i].db_len == 0) || |
562 | invalid_sem(sem: &slice_ent[i].sem0) || invalid_sem(sem: &slice_ent[i].sem1) || |
563 | invalid_sem(sem: &slice_ent[i].sem2) || invalid_sem(sem: &slice_ent[i].sem3)) |
564 | return -EINVAL; |
565 | |
566 | if (slice_ent[i].offset + slice_ent[i].size > total_size) |
567 | return -EINVAL; |
568 | } |
569 | |
570 | return 0; |
571 | } |
572 | |
573 | static void qaic_free_sgt(struct sg_table *sgt) |
574 | { |
575 | struct scatterlist *sg; |
576 | |
577 | if (!sgt) |
578 | return; |
579 | |
580 | for (sg = sgt->sgl; sg; sg = sg_next(sg)) |
581 | if (sg_page(sg)) |
582 | __free_pages(page: sg_page(sg), order: get_order(size: sg->length)); |
583 | sg_free_table(sgt); |
584 | kfree(objp: sgt); |
585 | } |
586 | |
587 | static void qaic_gem_print_info(struct drm_printer *p, unsigned int indent, |
588 | const struct drm_gem_object *obj) |
589 | { |
590 | struct qaic_bo *bo = to_qaic_bo(obj); |
591 | |
592 | drm_printf_indent(p, indent, "BO DMA direction %d\n" , bo->dir); |
593 | } |
594 | |
595 | static const struct vm_operations_struct drm_vm_ops = { |
596 | .open = drm_gem_vm_open, |
597 | .close = drm_gem_vm_close, |
598 | }; |
599 | |
600 | static int qaic_gem_object_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) |
601 | { |
602 | struct qaic_bo *bo = to_qaic_bo(obj); |
603 | unsigned long offset = 0; |
604 | struct scatterlist *sg; |
605 | int ret = 0; |
606 | |
607 | if (obj->import_attach) |
608 | return -EINVAL; |
609 | |
610 | for (sg = bo->sgt->sgl; sg; sg = sg_next(sg)) { |
611 | if (sg_page(sg)) { |
612 | ret = remap_pfn_range(vma, addr: vma->vm_start + offset, page_to_pfn(sg_page(sg)), |
613 | size: sg->length, vma->vm_page_prot); |
614 | if (ret) |
615 | goto out; |
616 | offset += sg->length; |
617 | } |
618 | } |
619 | |
620 | out: |
621 | return ret; |
622 | } |
623 | |
624 | static void qaic_free_object(struct drm_gem_object *obj) |
625 | { |
626 | struct qaic_bo *bo = to_qaic_bo(obj); |
627 | |
628 | if (obj->import_attach) { |
629 | /* DMABUF/PRIME Path */ |
630 | drm_prime_gem_destroy(obj, NULL); |
631 | } else { |
632 | /* Private buffer allocation path */ |
633 | qaic_free_sgt(sgt: bo->sgt); |
634 | } |
635 | |
636 | mutex_destroy(lock: &bo->lock); |
637 | drm_gem_object_release(obj); |
638 | kfree(objp: bo); |
639 | } |
640 | |
641 | static const struct drm_gem_object_funcs qaic_gem_funcs = { |
642 | .free = qaic_free_object, |
643 | .print_info = qaic_gem_print_info, |
644 | .mmap = qaic_gem_object_mmap, |
645 | .vm_ops = &drm_vm_ops, |
646 | }; |
647 | |
648 | static void qaic_init_bo(struct qaic_bo *bo, bool reinit) |
649 | { |
650 | if (reinit) { |
651 | bo->sliced = false; |
652 | reinit_completion(x: &bo->xfer_done); |
653 | } else { |
654 | mutex_init(&bo->lock); |
655 | init_completion(x: &bo->xfer_done); |
656 | } |
657 | complete_all(&bo->xfer_done); |
658 | INIT_LIST_HEAD(list: &bo->slices); |
659 | INIT_LIST_HEAD(list: &bo->xfer_list); |
660 | } |
661 | |
662 | static struct qaic_bo *qaic_alloc_init_bo(void) |
663 | { |
664 | struct qaic_bo *bo; |
665 | |
666 | bo = kzalloc(size: sizeof(*bo), GFP_KERNEL); |
667 | if (!bo) |
668 | return ERR_PTR(error: -ENOMEM); |
669 | |
670 | qaic_init_bo(bo, reinit: false); |
671 | |
672 | return bo; |
673 | } |
674 | |
675 | int qaic_create_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) |
676 | { |
677 | struct qaic_create_bo *args = data; |
678 | int usr_rcu_id, qdev_rcu_id; |
679 | struct drm_gem_object *obj; |
680 | struct qaic_device *qdev; |
681 | struct qaic_user *usr; |
682 | struct qaic_bo *bo; |
683 | size_t size; |
684 | int ret; |
685 | |
686 | if (args->pad) |
687 | return -EINVAL; |
688 | |
689 | size = PAGE_ALIGN(args->size); |
690 | if (size == 0) |
691 | return -EINVAL; |
692 | |
693 | usr = file_priv->driver_priv; |
694 | usr_rcu_id = srcu_read_lock(ssp: &usr->qddev_lock); |
695 | if (!usr->qddev) { |
696 | ret = -ENODEV; |
697 | goto unlock_usr_srcu; |
698 | } |
699 | |
700 | qdev = usr->qddev->qdev; |
701 | qdev_rcu_id = srcu_read_lock(ssp: &qdev->dev_lock); |
702 | if (qdev->dev_state != QAIC_ONLINE) { |
703 | ret = -ENODEV; |
704 | goto unlock_dev_srcu; |
705 | } |
706 | |
707 | bo = qaic_alloc_init_bo(); |
708 | if (IS_ERR(ptr: bo)) { |
709 | ret = PTR_ERR(ptr: bo); |
710 | goto unlock_dev_srcu; |
711 | } |
712 | obj = &bo->base; |
713 | |
714 | drm_gem_private_object_init(dev, obj, size); |
715 | |
716 | obj->funcs = &qaic_gem_funcs; |
717 | ret = create_sgt(qdev, sgt_out: &bo->sgt, size); |
718 | if (ret) |
719 | goto free_bo; |
720 | |
721 | ret = drm_gem_create_mmap_offset(obj); |
722 | if (ret) |
723 | goto free_bo; |
724 | |
725 | ret = drm_gem_handle_create(file_priv, obj, handlep: &args->handle); |
726 | if (ret) |
727 | goto free_bo; |
728 | |
729 | bo->handle = args->handle; |
730 | drm_gem_object_put(obj); |
731 | srcu_read_unlock(ssp: &qdev->dev_lock, idx: qdev_rcu_id); |
732 | srcu_read_unlock(ssp: &usr->qddev_lock, idx: usr_rcu_id); |
733 | |
734 | return 0; |
735 | |
736 | free_bo: |
737 | drm_gem_object_put(obj); |
738 | unlock_dev_srcu: |
739 | srcu_read_unlock(ssp: &qdev->dev_lock, idx: qdev_rcu_id); |
740 | unlock_usr_srcu: |
741 | srcu_read_unlock(ssp: &usr->qddev_lock, idx: usr_rcu_id); |
742 | return ret; |
743 | } |
744 | |
745 | int qaic_mmap_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) |
746 | { |
747 | struct qaic_mmap_bo *args = data; |
748 | int usr_rcu_id, qdev_rcu_id; |
749 | struct drm_gem_object *obj; |
750 | struct qaic_device *qdev; |
751 | struct qaic_user *usr; |
752 | int ret = 0; |
753 | |
754 | usr = file_priv->driver_priv; |
755 | usr_rcu_id = srcu_read_lock(ssp: &usr->qddev_lock); |
756 | if (!usr->qddev) { |
757 | ret = -ENODEV; |
758 | goto unlock_usr_srcu; |
759 | } |
760 | |
761 | qdev = usr->qddev->qdev; |
762 | qdev_rcu_id = srcu_read_lock(ssp: &qdev->dev_lock); |
763 | if (qdev->dev_state != QAIC_ONLINE) { |
764 | ret = -ENODEV; |
765 | goto unlock_dev_srcu; |
766 | } |
767 | |
768 | obj = drm_gem_object_lookup(filp: file_priv, handle: args->handle); |
769 | if (!obj) { |
770 | ret = -ENOENT; |
771 | goto unlock_dev_srcu; |
772 | } |
773 | |
774 | args->offset = drm_vma_node_offset_addr(node: &obj->vma_node); |
775 | |
776 | drm_gem_object_put(obj); |
777 | |
778 | unlock_dev_srcu: |
779 | srcu_read_unlock(ssp: &qdev->dev_lock, idx: qdev_rcu_id); |
780 | unlock_usr_srcu: |
781 | srcu_read_unlock(ssp: &usr->qddev_lock, idx: usr_rcu_id); |
782 | return ret; |
783 | } |
784 | |
785 | struct drm_gem_object *qaic_gem_prime_import(struct drm_device *dev, struct dma_buf *dma_buf) |
786 | { |
787 | struct dma_buf_attachment *attach; |
788 | struct drm_gem_object *obj; |
789 | struct qaic_bo *bo; |
790 | int ret; |
791 | |
792 | bo = qaic_alloc_init_bo(); |
793 | if (IS_ERR(ptr: bo)) { |
794 | ret = PTR_ERR(ptr: bo); |
795 | goto out; |
796 | } |
797 | |
798 | obj = &bo->base; |
799 | get_dma_buf(dmabuf: dma_buf); |
800 | |
801 | attach = dma_buf_attach(dmabuf: dma_buf, dev: dev->dev); |
802 | if (IS_ERR(ptr: attach)) { |
803 | ret = PTR_ERR(ptr: attach); |
804 | goto attach_fail; |
805 | } |
806 | |
807 | if (!attach->dmabuf->size) { |
808 | ret = -EINVAL; |
809 | goto size_align_fail; |
810 | } |
811 | |
812 | drm_gem_private_object_init(dev, obj, size: attach->dmabuf->size); |
813 | /* |
814 | * skipping dma_buf_map_attachment() as we do not know the direction |
815 | * just yet. Once the direction is known in the subsequent IOCTL to |
816 | * attach slicing, we can do it then. |
817 | */ |
818 | |
819 | obj->funcs = &qaic_gem_funcs; |
820 | obj->import_attach = attach; |
821 | obj->resv = dma_buf->resv; |
822 | |
823 | return obj; |
824 | |
825 | size_align_fail: |
826 | dma_buf_detach(dmabuf: dma_buf, attach); |
827 | attach_fail: |
828 | dma_buf_put(dmabuf: dma_buf); |
829 | kfree(objp: bo); |
830 | out: |
831 | return ERR_PTR(error: ret); |
832 | } |
833 | |
834 | static int qaic_prepare_import_bo(struct qaic_bo *bo, struct qaic_attach_slice_hdr *hdr) |
835 | { |
836 | struct drm_gem_object *obj = &bo->base; |
837 | struct sg_table *sgt; |
838 | int ret; |
839 | |
840 | sgt = dma_buf_map_attachment(obj->import_attach, hdr->dir); |
841 | if (IS_ERR(ptr: sgt)) { |
842 | ret = PTR_ERR(ptr: sgt); |
843 | return ret; |
844 | } |
845 | |
846 | bo->sgt = sgt; |
847 | |
848 | return 0; |
849 | } |
850 | |
851 | static int qaic_prepare_export_bo(struct qaic_device *qdev, struct qaic_bo *bo, |
852 | struct qaic_attach_slice_hdr *hdr) |
853 | { |
854 | int ret; |
855 | |
856 | ret = dma_map_sgtable(dev: &qdev->pdev->dev, sgt: bo->sgt, dir: hdr->dir, attrs: 0); |
857 | if (ret) |
858 | return -EFAULT; |
859 | |
860 | return 0; |
861 | } |
862 | |
863 | static int qaic_prepare_bo(struct qaic_device *qdev, struct qaic_bo *bo, |
864 | struct qaic_attach_slice_hdr *hdr) |
865 | { |
866 | int ret; |
867 | |
868 | if (bo->base.import_attach) |
869 | ret = qaic_prepare_import_bo(bo, hdr); |
870 | else |
871 | ret = qaic_prepare_export_bo(qdev, bo, hdr); |
872 | bo->dir = hdr->dir; |
873 | bo->dbc = &qdev->dbc[hdr->dbc_id]; |
874 | bo->nr_slice = hdr->count; |
875 | |
876 | return ret; |
877 | } |
878 | |
879 | static void qaic_unprepare_import_bo(struct qaic_bo *bo) |
880 | { |
881 | dma_buf_unmap_attachment(bo->base.import_attach, bo->sgt, bo->dir); |
882 | bo->sgt = NULL; |
883 | } |
884 | |
885 | static void qaic_unprepare_export_bo(struct qaic_device *qdev, struct qaic_bo *bo) |
886 | { |
887 | dma_unmap_sgtable(dev: &qdev->pdev->dev, sgt: bo->sgt, dir: bo->dir, attrs: 0); |
888 | } |
889 | |
890 | static void qaic_unprepare_bo(struct qaic_device *qdev, struct qaic_bo *bo) |
891 | { |
892 | if (bo->base.import_attach) |
893 | qaic_unprepare_import_bo(bo); |
894 | else |
895 | qaic_unprepare_export_bo(qdev, bo); |
896 | |
897 | bo->dir = 0; |
898 | bo->dbc = NULL; |
899 | bo->nr_slice = 0; |
900 | } |
901 | |
902 | static void qaic_free_slices_bo(struct qaic_bo *bo) |
903 | { |
904 | struct bo_slice *slice, *temp; |
905 | |
906 | list_for_each_entry_safe(slice, temp, &bo->slices, slice) |
907 | kref_put(kref: &slice->ref_count, release: free_slice); |
908 | if (WARN_ON_ONCE(bo->total_slice_nents != 0)) |
909 | bo->total_slice_nents = 0; |
910 | bo->nr_slice = 0; |
911 | } |
912 | |
913 | static int qaic_attach_slicing_bo(struct qaic_device *qdev, struct qaic_bo *bo, |
914 | struct qaic_attach_slice_hdr *hdr, |
915 | struct qaic_attach_slice_entry *slice_ent) |
916 | { |
917 | int ret, i; |
918 | |
919 | for (i = 0; i < hdr->count; i++) { |
920 | ret = qaic_map_one_slice(qdev, bo, slice_ent: &slice_ent[i]); |
921 | if (ret) { |
922 | qaic_free_slices_bo(bo); |
923 | return ret; |
924 | } |
925 | } |
926 | |
927 | if (bo->total_slice_nents > bo->dbc->nelem) { |
928 | qaic_free_slices_bo(bo); |
929 | return -ENOSPC; |
930 | } |
931 | |
932 | return 0; |
933 | } |
934 | |
935 | int qaic_attach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) |
936 | { |
937 | struct qaic_attach_slice_entry *slice_ent; |
938 | struct qaic_attach_slice *args = data; |
939 | int rcu_id, usr_rcu_id, qdev_rcu_id; |
940 | struct dma_bridge_chan *dbc; |
941 | struct drm_gem_object *obj; |
942 | struct qaic_device *qdev; |
943 | unsigned long arg_size; |
944 | struct qaic_user *usr; |
945 | u8 __user *user_data; |
946 | struct qaic_bo *bo; |
947 | int ret; |
948 | |
949 | if (args->hdr.count == 0) |
950 | return -EINVAL; |
951 | |
952 | arg_size = args->hdr.count * sizeof(*slice_ent); |
953 | if (arg_size / args->hdr.count != sizeof(*slice_ent)) |
954 | return -EINVAL; |
955 | |
956 | if (!(args->hdr.dir == DMA_TO_DEVICE || args->hdr.dir == DMA_FROM_DEVICE)) |
957 | return -EINVAL; |
958 | |
959 | if (args->data == 0) |
960 | return -EINVAL; |
961 | |
962 | usr = file_priv->driver_priv; |
963 | usr_rcu_id = srcu_read_lock(ssp: &usr->qddev_lock); |
964 | if (!usr->qddev) { |
965 | ret = -ENODEV; |
966 | goto unlock_usr_srcu; |
967 | } |
968 | |
969 | qdev = usr->qddev->qdev; |
970 | qdev_rcu_id = srcu_read_lock(ssp: &qdev->dev_lock); |
971 | if (qdev->dev_state != QAIC_ONLINE) { |
972 | ret = -ENODEV; |
973 | goto unlock_dev_srcu; |
974 | } |
975 | |
976 | if (args->hdr.dbc_id >= qdev->num_dbc) { |
977 | ret = -EINVAL; |
978 | goto unlock_dev_srcu; |
979 | } |
980 | |
981 | user_data = u64_to_user_ptr(args->data); |
982 | |
983 | slice_ent = kzalloc(size: arg_size, GFP_KERNEL); |
984 | if (!slice_ent) { |
985 | ret = -EINVAL; |
986 | goto unlock_dev_srcu; |
987 | } |
988 | |
989 | ret = copy_from_user(to: slice_ent, from: user_data, n: arg_size); |
990 | if (ret) { |
991 | ret = -EFAULT; |
992 | goto free_slice_ent; |
993 | } |
994 | |
995 | obj = drm_gem_object_lookup(filp: file_priv, handle: args->hdr.handle); |
996 | if (!obj) { |
997 | ret = -ENOENT; |
998 | goto free_slice_ent; |
999 | } |
1000 | |
1001 | ret = qaic_validate_req(qdev, slice_ent, count: args->hdr.count, total_size: obj->size); |
1002 | if (ret) |
1003 | goto put_bo; |
1004 | |
1005 | bo = to_qaic_bo(obj); |
1006 | ret = mutex_lock_interruptible(&bo->lock); |
1007 | if (ret) |
1008 | goto put_bo; |
1009 | |
1010 | if (bo->sliced) { |
1011 | ret = -EINVAL; |
1012 | goto unlock_bo; |
1013 | } |
1014 | |
1015 | dbc = &qdev->dbc[args->hdr.dbc_id]; |
1016 | rcu_id = srcu_read_lock(ssp: &dbc->ch_lock); |
1017 | if (dbc->usr != usr) { |
1018 | ret = -EINVAL; |
1019 | goto unlock_ch_srcu; |
1020 | } |
1021 | |
1022 | ret = qaic_prepare_bo(qdev, bo, hdr: &args->hdr); |
1023 | if (ret) |
1024 | goto unlock_ch_srcu; |
1025 | |
1026 | ret = qaic_attach_slicing_bo(qdev, bo, hdr: &args->hdr, slice_ent); |
1027 | if (ret) |
1028 | goto unprepare_bo; |
1029 | |
1030 | if (args->hdr.dir == DMA_TO_DEVICE) |
1031 | dma_sync_sgtable_for_cpu(dev: &qdev->pdev->dev, sgt: bo->sgt, dir: args->hdr.dir); |
1032 | |
1033 | bo->sliced = true; |
1034 | list_add_tail(new: &bo->bo_list, head: &bo->dbc->bo_lists); |
1035 | srcu_read_unlock(ssp: &dbc->ch_lock, idx: rcu_id); |
1036 | mutex_unlock(lock: &bo->lock); |
1037 | kfree(objp: slice_ent); |
1038 | srcu_read_unlock(ssp: &qdev->dev_lock, idx: qdev_rcu_id); |
1039 | srcu_read_unlock(ssp: &usr->qddev_lock, idx: usr_rcu_id); |
1040 | |
1041 | return 0; |
1042 | |
1043 | unprepare_bo: |
1044 | qaic_unprepare_bo(qdev, bo); |
1045 | unlock_ch_srcu: |
1046 | srcu_read_unlock(ssp: &dbc->ch_lock, idx: rcu_id); |
1047 | unlock_bo: |
1048 | mutex_unlock(lock: &bo->lock); |
1049 | put_bo: |
1050 | drm_gem_object_put(obj); |
1051 | free_slice_ent: |
1052 | kfree(objp: slice_ent); |
1053 | unlock_dev_srcu: |
1054 | srcu_read_unlock(ssp: &qdev->dev_lock, idx: qdev_rcu_id); |
1055 | unlock_usr_srcu: |
1056 | srcu_read_unlock(ssp: &usr->qddev_lock, idx: usr_rcu_id); |
1057 | return ret; |
1058 | } |
1059 | |
1060 | static inline u32 fifo_space_avail(u32 head, u32 tail, u32 q_size) |
1061 | { |
1062 | u32 avail = head - tail - 1; |
1063 | |
1064 | if (head <= tail) |
1065 | avail += q_size; |
1066 | |
1067 | return avail; |
1068 | } |
1069 | |
1070 | static inline int copy_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice, u32 dbc_id, |
1071 | u32 head, u32 *ptail) |
1072 | { |
1073 | struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id]; |
1074 | struct dbc_req *reqs = slice->reqs; |
1075 | u32 tail = *ptail; |
1076 | u32 avail; |
1077 | |
1078 | avail = fifo_space_avail(head, tail, q_size: dbc->nelem); |
1079 | if (avail < slice->nents) |
1080 | return -EAGAIN; |
1081 | |
1082 | if (tail + slice->nents > dbc->nelem) { |
1083 | avail = dbc->nelem - tail; |
1084 | avail = min_t(u32, avail, slice->nents); |
1085 | memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * avail); |
1086 | reqs += avail; |
1087 | avail = slice->nents - avail; |
1088 | if (avail) |
1089 | memcpy(dbc->req_q_base, reqs, sizeof(*reqs) * avail); |
1090 | } else { |
1091 | memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * slice->nents); |
1092 | } |
1093 | |
1094 | *ptail = (tail + slice->nents) % dbc->nelem; |
1095 | |
1096 | return 0; |
1097 | } |
1098 | |
1099 | static inline int copy_partial_exec_reqs(struct qaic_device *qdev, struct bo_slice *slice, |
1100 | u64 resize, struct dma_bridge_chan *dbc, u32 head, |
1101 | u32 *ptail) |
1102 | { |
1103 | struct dbc_req *reqs = slice->reqs; |
1104 | struct dbc_req *last_req; |
1105 | u32 tail = *ptail; |
1106 | u64 last_bytes; |
1107 | u32 first_n; |
1108 | u32 avail; |
1109 | |
1110 | avail = fifo_space_avail(head, tail, q_size: dbc->nelem); |
1111 | |
1112 | /* |
1113 | * After this for loop is complete, first_n represents the index |
1114 | * of the last DMA request of this slice that needs to be |
1115 | * transferred after resizing and last_bytes represents DMA size |
1116 | * of that request. |
1117 | */ |
1118 | last_bytes = resize; |
1119 | for (first_n = 0; first_n < slice->nents; first_n++) |
1120 | if (last_bytes > le32_to_cpu(reqs[first_n].len)) |
1121 | last_bytes -= le32_to_cpu(reqs[first_n].len); |
1122 | else |
1123 | break; |
1124 | |
1125 | if (avail < (first_n + 1)) |
1126 | return -EAGAIN; |
1127 | |
1128 | if (first_n) { |
1129 | if (tail + first_n > dbc->nelem) { |
1130 | avail = dbc->nelem - tail; |
1131 | avail = min_t(u32, avail, first_n); |
1132 | memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * avail); |
1133 | last_req = reqs + avail; |
1134 | avail = first_n - avail; |
1135 | if (avail) |
1136 | memcpy(dbc->req_q_base, last_req, sizeof(*reqs) * avail); |
1137 | } else { |
1138 | memcpy(fifo_at(dbc->req_q_base, tail), reqs, sizeof(*reqs) * first_n); |
1139 | } |
1140 | } |
1141 | |
1142 | /* |
1143 | * Copy over the last entry. Here we need to adjust len to the left over |
1144 | * size, and set src and dst to the entry it is copied to. |
1145 | */ |
1146 | last_req = fifo_at(dbc->req_q_base, (tail + first_n) % dbc->nelem); |
1147 | memcpy(last_req, reqs + slice->nents - 1, sizeof(*reqs)); |
1148 | |
1149 | /* |
1150 | * last_bytes holds size of a DMA segment, maximum DMA segment size is |
1151 | * set to UINT_MAX by qaic and hence last_bytes can never exceed u32 |
1152 | * range. So, by down sizing we are not corrupting the value. |
1153 | */ |
1154 | last_req->len = cpu_to_le32((u32)last_bytes); |
1155 | last_req->src_addr = reqs[first_n].src_addr; |
1156 | last_req->dest_addr = reqs[first_n].dest_addr; |
1157 | if (!last_bytes) |
1158 | /* Disable DMA transfer */ |
1159 | last_req->cmd = GENMASK(7, 2) & reqs[first_n].cmd; |
1160 | |
1161 | *ptail = (tail + first_n + 1) % dbc->nelem; |
1162 | |
1163 | return 0; |
1164 | } |
1165 | |
1166 | static int send_bo_list_to_device(struct qaic_device *qdev, struct drm_file *file_priv, |
1167 | struct qaic_execute_entry *exec, unsigned int count, |
1168 | bool is_partial, struct dma_bridge_chan *dbc, u32 head, |
1169 | u32 *tail) |
1170 | { |
1171 | struct qaic_partial_execute_entry *pexec = (struct qaic_partial_execute_entry *)exec; |
1172 | struct drm_gem_object *obj; |
1173 | struct bo_slice *slice; |
1174 | unsigned long flags; |
1175 | struct qaic_bo *bo; |
1176 | int i, j; |
1177 | int ret; |
1178 | |
1179 | for (i = 0; i < count; i++) { |
1180 | /* |
1181 | * ref count will be decremented when the transfer of this |
1182 | * buffer is complete. It is inside dbc_irq_threaded_fn(). |
1183 | */ |
1184 | obj = drm_gem_object_lookup(filp: file_priv, |
1185 | handle: is_partial ? pexec[i].handle : exec[i].handle); |
1186 | if (!obj) { |
1187 | ret = -ENOENT; |
1188 | goto failed_to_send_bo; |
1189 | } |
1190 | |
1191 | bo = to_qaic_bo(obj); |
1192 | ret = mutex_lock_interruptible(&bo->lock); |
1193 | if (ret) |
1194 | goto failed_to_send_bo; |
1195 | |
1196 | if (!bo->sliced) { |
1197 | ret = -EINVAL; |
1198 | goto unlock_bo; |
1199 | } |
1200 | |
1201 | if (is_partial && pexec[i].resize > bo->base.size) { |
1202 | ret = -EINVAL; |
1203 | goto unlock_bo; |
1204 | } |
1205 | |
1206 | spin_lock_irqsave(&dbc->xfer_lock, flags); |
1207 | if (bo_queued(bo)) { |
1208 | spin_unlock_irqrestore(lock: &dbc->xfer_lock, flags); |
1209 | ret = -EINVAL; |
1210 | goto unlock_bo; |
1211 | } |
1212 | |
1213 | bo->req_id = dbc->next_req_id++; |
1214 | |
1215 | list_for_each_entry(slice, &bo->slices, slice) { |
1216 | for (j = 0; j < slice->nents; j++) |
1217 | slice->reqs[j].req_id = cpu_to_le16(bo->req_id); |
1218 | |
1219 | if (is_partial && (!pexec[i].resize || pexec[i].resize <= slice->offset)) |
1220 | /* Configure the slice for no DMA transfer */ |
1221 | ret = copy_partial_exec_reqs(qdev, slice, resize: 0, dbc, head, ptail: tail); |
1222 | else if (is_partial && pexec[i].resize < slice->offset + slice->size) |
1223 | /* Configure the slice to be partially DMA transferred */ |
1224 | ret = copy_partial_exec_reqs(qdev, slice, |
1225 | resize: pexec[i].resize - slice->offset, dbc, |
1226 | head, ptail: tail); |
1227 | else |
1228 | ret = copy_exec_reqs(qdev, slice, dbc_id: dbc->id, head, ptail: tail); |
1229 | if (ret) { |
1230 | spin_unlock_irqrestore(lock: &dbc->xfer_lock, flags); |
1231 | goto unlock_bo; |
1232 | } |
1233 | } |
1234 | reinit_completion(x: &bo->xfer_done); |
1235 | list_add_tail(new: &bo->xfer_list, head: &dbc->xfer_list); |
1236 | spin_unlock_irqrestore(lock: &dbc->xfer_lock, flags); |
1237 | dma_sync_sgtable_for_device(dev: &qdev->pdev->dev, sgt: bo->sgt, dir: bo->dir); |
1238 | mutex_unlock(lock: &bo->lock); |
1239 | } |
1240 | |
1241 | return 0; |
1242 | |
1243 | unlock_bo: |
1244 | mutex_unlock(lock: &bo->lock); |
1245 | failed_to_send_bo: |
1246 | if (likely(obj)) |
1247 | drm_gem_object_put(obj); |
1248 | for (j = 0; j < i; j++) { |
1249 | spin_lock_irqsave(&dbc->xfer_lock, flags); |
1250 | bo = list_last_entry(&dbc->xfer_list, struct qaic_bo, xfer_list); |
1251 | obj = &bo->base; |
1252 | list_del_init(entry: &bo->xfer_list); |
1253 | spin_unlock_irqrestore(lock: &dbc->xfer_lock, flags); |
1254 | dma_sync_sgtable_for_cpu(dev: &qdev->pdev->dev, sgt: bo->sgt, dir: bo->dir); |
1255 | drm_gem_object_put(obj); |
1256 | } |
1257 | return ret; |
1258 | } |
1259 | |
1260 | static void update_profiling_data(struct drm_file *file_priv, |
1261 | struct qaic_execute_entry *exec, unsigned int count, |
1262 | bool is_partial, u64 received_ts, u64 submit_ts, u32 queue_level) |
1263 | { |
1264 | struct qaic_partial_execute_entry *pexec = (struct qaic_partial_execute_entry *)exec; |
1265 | struct drm_gem_object *obj; |
1266 | struct qaic_bo *bo; |
1267 | int i; |
1268 | |
1269 | for (i = 0; i < count; i++) { |
1270 | /* |
1271 | * Since we already committed the BO to hardware, the only way |
1272 | * this should fail is a pending signal. We can't cancel the |
1273 | * submit to hardware, so we have to just skip the profiling |
1274 | * data. In case the signal is not fatal to the process, we |
1275 | * return success so that the user doesn't try to resubmit. |
1276 | */ |
1277 | obj = drm_gem_object_lookup(filp: file_priv, |
1278 | handle: is_partial ? pexec[i].handle : exec[i].handle); |
1279 | if (!obj) |
1280 | break; |
1281 | bo = to_qaic_bo(obj); |
1282 | bo->perf_stats.req_received_ts = received_ts; |
1283 | bo->perf_stats.req_submit_ts = submit_ts; |
1284 | bo->perf_stats.queue_level_before = queue_level; |
1285 | queue_level += bo->total_slice_nents; |
1286 | drm_gem_object_put(obj); |
1287 | } |
1288 | } |
1289 | |
1290 | static int __qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv, |
1291 | bool is_partial) |
1292 | { |
1293 | struct qaic_execute *args = data; |
1294 | struct qaic_execute_entry *exec; |
1295 | struct dma_bridge_chan *dbc; |
1296 | int usr_rcu_id, qdev_rcu_id; |
1297 | struct qaic_device *qdev; |
1298 | struct qaic_user *usr; |
1299 | u8 __user *user_data; |
1300 | unsigned long n; |
1301 | u64 received_ts; |
1302 | u32 queue_level; |
1303 | u64 submit_ts; |
1304 | int rcu_id; |
1305 | u32 head; |
1306 | u32 tail; |
1307 | u64 size; |
1308 | int ret; |
1309 | |
1310 | received_ts = ktime_get_ns(); |
1311 | |
1312 | size = is_partial ? sizeof(struct qaic_partial_execute_entry) : sizeof(*exec); |
1313 | n = (unsigned long)size * args->hdr.count; |
1314 | if (args->hdr.count == 0 || n / args->hdr.count != size) |
1315 | return -EINVAL; |
1316 | |
1317 | user_data = u64_to_user_ptr(args->data); |
1318 | |
1319 | exec = kcalloc(n: args->hdr.count, size, GFP_KERNEL); |
1320 | if (!exec) |
1321 | return -ENOMEM; |
1322 | |
1323 | if (copy_from_user(to: exec, from: user_data, n)) { |
1324 | ret = -EFAULT; |
1325 | goto free_exec; |
1326 | } |
1327 | |
1328 | usr = file_priv->driver_priv; |
1329 | usr_rcu_id = srcu_read_lock(ssp: &usr->qddev_lock); |
1330 | if (!usr->qddev) { |
1331 | ret = -ENODEV; |
1332 | goto unlock_usr_srcu; |
1333 | } |
1334 | |
1335 | qdev = usr->qddev->qdev; |
1336 | qdev_rcu_id = srcu_read_lock(ssp: &qdev->dev_lock); |
1337 | if (qdev->dev_state != QAIC_ONLINE) { |
1338 | ret = -ENODEV; |
1339 | goto unlock_dev_srcu; |
1340 | } |
1341 | |
1342 | if (args->hdr.dbc_id >= qdev->num_dbc) { |
1343 | ret = -EINVAL; |
1344 | goto unlock_dev_srcu; |
1345 | } |
1346 | |
1347 | dbc = &qdev->dbc[args->hdr.dbc_id]; |
1348 | |
1349 | rcu_id = srcu_read_lock(ssp: &dbc->ch_lock); |
1350 | if (!dbc->usr || dbc->usr->handle != usr->handle) { |
1351 | ret = -EPERM; |
1352 | goto release_ch_rcu; |
1353 | } |
1354 | |
1355 | head = readl(addr: dbc->dbc_base + REQHP_OFF); |
1356 | tail = readl(addr: dbc->dbc_base + REQTP_OFF); |
1357 | |
1358 | if (head == U32_MAX || tail == U32_MAX) { |
1359 | /* PCI link error */ |
1360 | ret = -ENODEV; |
1361 | goto release_ch_rcu; |
1362 | } |
1363 | |
1364 | queue_level = head <= tail ? tail - head : dbc->nelem - (head - tail); |
1365 | |
1366 | ret = send_bo_list_to_device(qdev, file_priv, exec, count: args->hdr.count, is_partial, dbc, |
1367 | head, tail: &tail); |
1368 | if (ret) |
1369 | goto release_ch_rcu; |
1370 | |
1371 | /* Finalize commit to hardware */ |
1372 | submit_ts = ktime_get_ns(); |
1373 | writel(val: tail, addr: dbc->dbc_base + REQTP_OFF); |
1374 | |
1375 | update_profiling_data(file_priv, exec, count: args->hdr.count, is_partial, received_ts, |
1376 | submit_ts, queue_level); |
1377 | |
1378 | if (datapath_polling) |
1379 | schedule_work(work: &dbc->poll_work); |
1380 | |
1381 | release_ch_rcu: |
1382 | srcu_read_unlock(ssp: &dbc->ch_lock, idx: rcu_id); |
1383 | unlock_dev_srcu: |
1384 | srcu_read_unlock(ssp: &qdev->dev_lock, idx: qdev_rcu_id); |
1385 | unlock_usr_srcu: |
1386 | srcu_read_unlock(ssp: &usr->qddev_lock, idx: usr_rcu_id); |
1387 | free_exec: |
1388 | kfree(objp: exec); |
1389 | return ret; |
1390 | } |
1391 | |
1392 | int qaic_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) |
1393 | { |
1394 | return __qaic_execute_bo_ioctl(dev, data, file_priv, is_partial: false); |
1395 | } |
1396 | |
1397 | int qaic_partial_execute_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) |
1398 | { |
1399 | return __qaic_execute_bo_ioctl(dev, data, file_priv, is_partial: true); |
1400 | } |
1401 | |
1402 | /* |
1403 | * Our interrupt handling is a bit more complicated than a simple ideal, but |
1404 | * sadly necessary. |
1405 | * |
1406 | * Each dbc has a completion queue. Entries in the queue correspond to DMA |
1407 | * requests which the device has processed. The hardware already has a built |
1408 | * in irq mitigation. When the device puts an entry into the queue, it will |
1409 | * only trigger an interrupt if the queue was empty. Therefore, when adding |
1410 | * the Nth event to a non-empty queue, the hardware doesn't trigger an |
1411 | * interrupt. This means the host doesn't get additional interrupts signaling |
1412 | * the same thing - the queue has something to process. |
1413 | * This behavior can be overridden in the DMA request. |
1414 | * This means that when the host receives an interrupt, it is required to |
1415 | * drain the queue. |
1416 | * |
1417 | * This behavior is what NAPI attempts to accomplish, although we can't use |
1418 | * NAPI as we don't have a netdev. We use threaded irqs instead. |
1419 | * |
1420 | * However, there is a situation where the host drains the queue fast enough |
1421 | * that every event causes an interrupt. Typically this is not a problem as |
1422 | * the rate of events would be low. However, that is not the case with |
1423 | * lprnet for example. On an Intel Xeon D-2191 where we run 8 instances of |
1424 | * lprnet, the host receives roughly 80k interrupts per second from the device |
1425 | * (per /proc/interrupts). While NAPI documentation indicates the host should |
1426 | * just chug along, sadly that behavior causes instability in some hosts. |
1427 | * |
1428 | * Therefore, we implement an interrupt disable scheme similar to NAPI. The |
1429 | * key difference is that we will delay after draining the queue for a small |
1430 | * time to allow additional events to come in via polling. Using the above |
1431 | * lprnet workload, this reduces the number of interrupts processed from |
1432 | * ~80k/sec to about 64 in 5 minutes and appears to solve the system |
1433 | * instability. |
1434 | */ |
1435 | irqreturn_t dbc_irq_handler(int irq, void *data) |
1436 | { |
1437 | struct dma_bridge_chan *dbc = data; |
1438 | int rcu_id; |
1439 | u32 head; |
1440 | u32 tail; |
1441 | |
1442 | rcu_id = srcu_read_lock(ssp: &dbc->ch_lock); |
1443 | |
1444 | if (datapath_polling) { |
1445 | srcu_read_unlock(ssp: &dbc->ch_lock, idx: rcu_id); |
1446 | /* |
1447 | * Normally datapath_polling will not have irqs enabled, but |
1448 | * when running with only one MSI the interrupt is shared with |
1449 | * MHI so it cannot be disabled. Return ASAP instead. |
1450 | */ |
1451 | return IRQ_HANDLED; |
1452 | } |
1453 | |
1454 | if (!dbc->usr) { |
1455 | srcu_read_unlock(ssp: &dbc->ch_lock, idx: rcu_id); |
1456 | return IRQ_HANDLED; |
1457 | } |
1458 | |
1459 | head = readl(addr: dbc->dbc_base + RSPHP_OFF); |
1460 | if (head == U32_MAX) { /* PCI link error */ |
1461 | srcu_read_unlock(ssp: &dbc->ch_lock, idx: rcu_id); |
1462 | return IRQ_NONE; |
1463 | } |
1464 | |
1465 | tail = readl(addr: dbc->dbc_base + RSPTP_OFF); |
1466 | if (tail == U32_MAX) { /* PCI link error */ |
1467 | srcu_read_unlock(ssp: &dbc->ch_lock, idx: rcu_id); |
1468 | return IRQ_NONE; |
1469 | } |
1470 | |
1471 | if (head == tail) { /* queue empty */ |
1472 | srcu_read_unlock(ssp: &dbc->ch_lock, idx: rcu_id); |
1473 | return IRQ_NONE; |
1474 | } |
1475 | |
1476 | if (!dbc->qdev->single_msi) |
1477 | disable_irq_nosync(irq); |
1478 | srcu_read_unlock(ssp: &dbc->ch_lock, idx: rcu_id); |
1479 | return IRQ_WAKE_THREAD; |
1480 | } |
1481 | |
1482 | void irq_polling_work(struct work_struct *work) |
1483 | { |
1484 | struct dma_bridge_chan *dbc = container_of(work, struct dma_bridge_chan, poll_work); |
1485 | unsigned long flags; |
1486 | int rcu_id; |
1487 | u32 head; |
1488 | u32 tail; |
1489 | |
1490 | rcu_id = srcu_read_lock(ssp: &dbc->ch_lock); |
1491 | |
1492 | while (1) { |
1493 | if (dbc->qdev->dev_state != QAIC_ONLINE) { |
1494 | srcu_read_unlock(ssp: &dbc->ch_lock, idx: rcu_id); |
1495 | return; |
1496 | } |
1497 | if (!dbc->usr) { |
1498 | srcu_read_unlock(ssp: &dbc->ch_lock, idx: rcu_id); |
1499 | return; |
1500 | } |
1501 | spin_lock_irqsave(&dbc->xfer_lock, flags); |
1502 | if (list_empty(head: &dbc->xfer_list)) { |
1503 | spin_unlock_irqrestore(lock: &dbc->xfer_lock, flags); |
1504 | srcu_read_unlock(ssp: &dbc->ch_lock, idx: rcu_id); |
1505 | return; |
1506 | } |
1507 | spin_unlock_irqrestore(lock: &dbc->xfer_lock, flags); |
1508 | |
1509 | head = readl(addr: dbc->dbc_base + RSPHP_OFF); |
1510 | if (head == U32_MAX) { /* PCI link error */ |
1511 | srcu_read_unlock(ssp: &dbc->ch_lock, idx: rcu_id); |
1512 | return; |
1513 | } |
1514 | |
1515 | tail = readl(addr: dbc->dbc_base + RSPTP_OFF); |
1516 | if (tail == U32_MAX) { /* PCI link error */ |
1517 | srcu_read_unlock(ssp: &dbc->ch_lock, idx: rcu_id); |
1518 | return; |
1519 | } |
1520 | |
1521 | if (head != tail) { |
1522 | irq_wake_thread(irq: dbc->irq, dev_id: dbc); |
1523 | srcu_read_unlock(ssp: &dbc->ch_lock, idx: rcu_id); |
1524 | return; |
1525 | } |
1526 | |
1527 | cond_resched(); |
1528 | usleep_range(min: datapath_poll_interval_us, max: 2 * datapath_poll_interval_us); |
1529 | } |
1530 | } |
1531 | |
1532 | irqreturn_t dbc_irq_threaded_fn(int irq, void *data) |
1533 | { |
1534 | struct dma_bridge_chan *dbc = data; |
1535 | int event_count = NUM_EVENTS; |
1536 | int delay_count = NUM_DELAYS; |
1537 | struct qaic_device *qdev; |
1538 | struct qaic_bo *bo, *i; |
1539 | struct dbc_rsp *rsp; |
1540 | unsigned long flags; |
1541 | int rcu_id; |
1542 | u16 status; |
1543 | u16 req_id; |
1544 | u32 head; |
1545 | u32 tail; |
1546 | |
1547 | rcu_id = srcu_read_lock(ssp: &dbc->ch_lock); |
1548 | qdev = dbc->qdev; |
1549 | |
1550 | head = readl(addr: dbc->dbc_base + RSPHP_OFF); |
1551 | if (head == U32_MAX) /* PCI link error */ |
1552 | goto error_out; |
1553 | |
1554 | read_fifo: |
1555 | |
1556 | if (!event_count) { |
1557 | event_count = NUM_EVENTS; |
1558 | cond_resched(); |
1559 | } |
1560 | |
1561 | /* |
1562 | * if this channel isn't assigned or gets unassigned during processing |
1563 | * we have nothing further to do |
1564 | */ |
1565 | if (!dbc->usr) |
1566 | goto error_out; |
1567 | |
1568 | tail = readl(addr: dbc->dbc_base + RSPTP_OFF); |
1569 | if (tail == U32_MAX) /* PCI link error */ |
1570 | goto error_out; |
1571 | |
1572 | if (head == tail) { /* queue empty */ |
1573 | if (delay_count) { |
1574 | --delay_count; |
1575 | usleep_range(min: 100, max: 200); |
1576 | goto read_fifo; /* check for a new event */ |
1577 | } |
1578 | goto normal_out; |
1579 | } |
1580 | |
1581 | delay_count = NUM_DELAYS; |
1582 | while (head != tail) { |
1583 | if (!event_count) |
1584 | break; |
1585 | --event_count; |
1586 | rsp = dbc->rsp_q_base + head * sizeof(*rsp); |
1587 | req_id = le16_to_cpu(rsp->req_id); |
1588 | status = le16_to_cpu(rsp->status); |
1589 | if (status) |
1590 | pci_dbg(qdev->pdev, "req_id %d failed with status %d\n" , req_id, status); |
1591 | spin_lock_irqsave(&dbc->xfer_lock, flags); |
1592 | /* |
1593 | * A BO can receive multiple interrupts, since a BO can be |
1594 | * divided into multiple slices and a buffer receives as many |
1595 | * interrupts as slices. So until it receives interrupts for |
1596 | * all the slices we cannot mark that buffer complete. |
1597 | */ |
1598 | list_for_each_entry_safe(bo, i, &dbc->xfer_list, xfer_list) { |
1599 | if (bo->req_id == req_id) |
1600 | bo->nr_slice_xfer_done++; |
1601 | else |
1602 | continue; |
1603 | |
1604 | if (bo->nr_slice_xfer_done < bo->nr_slice) |
1605 | break; |
1606 | |
1607 | /* |
1608 | * At this point we have received all the interrupts for |
1609 | * BO, which means BO execution is complete. |
1610 | */ |
1611 | dma_sync_sgtable_for_cpu(dev: &qdev->pdev->dev, sgt: bo->sgt, dir: bo->dir); |
1612 | bo->nr_slice_xfer_done = 0; |
1613 | list_del_init(entry: &bo->xfer_list); |
1614 | bo->perf_stats.req_processed_ts = ktime_get_ns(); |
1615 | complete_all(&bo->xfer_done); |
1616 | drm_gem_object_put(obj: &bo->base); |
1617 | break; |
1618 | } |
1619 | spin_unlock_irqrestore(lock: &dbc->xfer_lock, flags); |
1620 | head = (head + 1) % dbc->nelem; |
1621 | } |
1622 | |
1623 | /* |
1624 | * Update the head pointer of response queue and let the device know |
1625 | * that we have consumed elements from the queue. |
1626 | */ |
1627 | writel(val: head, addr: dbc->dbc_base + RSPHP_OFF); |
1628 | |
1629 | /* elements might have been put in the queue while we were processing */ |
1630 | goto read_fifo; |
1631 | |
1632 | normal_out: |
1633 | if (!qdev->single_msi && likely(!datapath_polling)) |
1634 | enable_irq(irq); |
1635 | else if (unlikely(datapath_polling)) |
1636 | schedule_work(work: &dbc->poll_work); |
1637 | /* checking the fifo and enabling irqs is a race, missed event check */ |
1638 | tail = readl(addr: dbc->dbc_base + RSPTP_OFF); |
1639 | if (tail != U32_MAX && head != tail) { |
1640 | if (!qdev->single_msi && likely(!datapath_polling)) |
1641 | disable_irq_nosync(irq); |
1642 | goto read_fifo; |
1643 | } |
1644 | srcu_read_unlock(ssp: &dbc->ch_lock, idx: rcu_id); |
1645 | return IRQ_HANDLED; |
1646 | |
1647 | error_out: |
1648 | srcu_read_unlock(ssp: &dbc->ch_lock, idx: rcu_id); |
1649 | if (!qdev->single_msi && likely(!datapath_polling)) |
1650 | enable_irq(irq); |
1651 | else if (unlikely(datapath_polling)) |
1652 | schedule_work(work: &dbc->poll_work); |
1653 | |
1654 | return IRQ_HANDLED; |
1655 | } |
1656 | |
1657 | int qaic_wait_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) |
1658 | { |
1659 | struct qaic_wait *args = data; |
1660 | int usr_rcu_id, qdev_rcu_id; |
1661 | struct dma_bridge_chan *dbc; |
1662 | struct drm_gem_object *obj; |
1663 | struct qaic_device *qdev; |
1664 | unsigned long timeout; |
1665 | struct qaic_user *usr; |
1666 | struct qaic_bo *bo; |
1667 | int rcu_id; |
1668 | int ret; |
1669 | |
1670 | if (args->pad != 0) |
1671 | return -EINVAL; |
1672 | |
1673 | usr = file_priv->driver_priv; |
1674 | usr_rcu_id = srcu_read_lock(ssp: &usr->qddev_lock); |
1675 | if (!usr->qddev) { |
1676 | ret = -ENODEV; |
1677 | goto unlock_usr_srcu; |
1678 | } |
1679 | |
1680 | qdev = usr->qddev->qdev; |
1681 | qdev_rcu_id = srcu_read_lock(ssp: &qdev->dev_lock); |
1682 | if (qdev->dev_state != QAIC_ONLINE) { |
1683 | ret = -ENODEV; |
1684 | goto unlock_dev_srcu; |
1685 | } |
1686 | |
1687 | if (args->dbc_id >= qdev->num_dbc) { |
1688 | ret = -EINVAL; |
1689 | goto unlock_dev_srcu; |
1690 | } |
1691 | |
1692 | dbc = &qdev->dbc[args->dbc_id]; |
1693 | |
1694 | rcu_id = srcu_read_lock(ssp: &dbc->ch_lock); |
1695 | if (dbc->usr != usr) { |
1696 | ret = -EPERM; |
1697 | goto unlock_ch_srcu; |
1698 | } |
1699 | |
1700 | obj = drm_gem_object_lookup(filp: file_priv, handle: args->handle); |
1701 | if (!obj) { |
1702 | ret = -ENOENT; |
1703 | goto unlock_ch_srcu; |
1704 | } |
1705 | |
1706 | bo = to_qaic_bo(obj); |
1707 | timeout = args->timeout ? args->timeout : wait_exec_default_timeout_ms; |
1708 | timeout = msecs_to_jiffies(m: timeout); |
1709 | ret = wait_for_completion_interruptible_timeout(x: &bo->xfer_done, timeout); |
1710 | if (!ret) { |
1711 | ret = -ETIMEDOUT; |
1712 | goto put_obj; |
1713 | } |
1714 | if (ret > 0) |
1715 | ret = 0; |
1716 | |
1717 | if (!dbc->usr) |
1718 | ret = -EPERM; |
1719 | |
1720 | put_obj: |
1721 | drm_gem_object_put(obj); |
1722 | unlock_ch_srcu: |
1723 | srcu_read_unlock(ssp: &dbc->ch_lock, idx: rcu_id); |
1724 | unlock_dev_srcu: |
1725 | srcu_read_unlock(ssp: &qdev->dev_lock, idx: qdev_rcu_id); |
1726 | unlock_usr_srcu: |
1727 | srcu_read_unlock(ssp: &usr->qddev_lock, idx: usr_rcu_id); |
1728 | return ret; |
1729 | } |
1730 | |
1731 | int qaic_perf_stats_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) |
1732 | { |
1733 | struct qaic_perf_stats_entry *ent = NULL; |
1734 | struct qaic_perf_stats *args = data; |
1735 | int usr_rcu_id, qdev_rcu_id; |
1736 | struct drm_gem_object *obj; |
1737 | struct qaic_device *qdev; |
1738 | struct qaic_user *usr; |
1739 | struct qaic_bo *bo; |
1740 | int ret, i; |
1741 | |
1742 | usr = file_priv->driver_priv; |
1743 | usr_rcu_id = srcu_read_lock(ssp: &usr->qddev_lock); |
1744 | if (!usr->qddev) { |
1745 | ret = -ENODEV; |
1746 | goto unlock_usr_srcu; |
1747 | } |
1748 | |
1749 | qdev = usr->qddev->qdev; |
1750 | qdev_rcu_id = srcu_read_lock(ssp: &qdev->dev_lock); |
1751 | if (qdev->dev_state != QAIC_ONLINE) { |
1752 | ret = -ENODEV; |
1753 | goto unlock_dev_srcu; |
1754 | } |
1755 | |
1756 | if (args->hdr.dbc_id >= qdev->num_dbc) { |
1757 | ret = -EINVAL; |
1758 | goto unlock_dev_srcu; |
1759 | } |
1760 | |
1761 | ent = kcalloc(n: args->hdr.count, size: sizeof(*ent), GFP_KERNEL); |
1762 | if (!ent) { |
1763 | ret = -EINVAL; |
1764 | goto unlock_dev_srcu; |
1765 | } |
1766 | |
1767 | ret = copy_from_user(to: ent, u64_to_user_ptr(args->data), n: args->hdr.count * sizeof(*ent)); |
1768 | if (ret) { |
1769 | ret = -EFAULT; |
1770 | goto free_ent; |
1771 | } |
1772 | |
1773 | for (i = 0; i < args->hdr.count; i++) { |
1774 | obj = drm_gem_object_lookup(filp: file_priv, handle: ent[i].handle); |
1775 | if (!obj) { |
1776 | ret = -ENOENT; |
1777 | goto free_ent; |
1778 | } |
1779 | bo = to_qaic_bo(obj); |
1780 | /* |
1781 | * perf stats ioctl is called before wait ioctl is complete then |
1782 | * the latency information is invalid. |
1783 | */ |
1784 | if (bo->perf_stats.req_processed_ts < bo->perf_stats.req_submit_ts) { |
1785 | ent[i].device_latency_us = 0; |
1786 | } else { |
1787 | ent[i].device_latency_us = div_u64(dividend: (bo->perf_stats.req_processed_ts - |
1788 | bo->perf_stats.req_submit_ts), divisor: 1000); |
1789 | } |
1790 | ent[i].submit_latency_us = div_u64(dividend: (bo->perf_stats.req_submit_ts - |
1791 | bo->perf_stats.req_received_ts), divisor: 1000); |
1792 | ent[i].queue_level_before = bo->perf_stats.queue_level_before; |
1793 | ent[i].num_queue_element = bo->total_slice_nents; |
1794 | drm_gem_object_put(obj); |
1795 | } |
1796 | |
1797 | if (copy_to_user(u64_to_user_ptr(args->data), from: ent, n: args->hdr.count * sizeof(*ent))) |
1798 | ret = -EFAULT; |
1799 | |
1800 | free_ent: |
1801 | kfree(objp: ent); |
1802 | unlock_dev_srcu: |
1803 | srcu_read_unlock(ssp: &qdev->dev_lock, idx: qdev_rcu_id); |
1804 | unlock_usr_srcu: |
1805 | srcu_read_unlock(ssp: &usr->qddev_lock, idx: usr_rcu_id); |
1806 | return ret; |
1807 | } |
1808 | |
1809 | static void detach_slice_bo(struct qaic_device *qdev, struct qaic_bo *bo) |
1810 | { |
1811 | qaic_free_slices_bo(bo); |
1812 | qaic_unprepare_bo(qdev, bo); |
1813 | qaic_init_bo(bo, reinit: true); |
1814 | list_del(entry: &bo->bo_list); |
1815 | drm_gem_object_put(obj: &bo->base); |
1816 | } |
1817 | |
1818 | int qaic_detach_slice_bo_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) |
1819 | { |
1820 | struct qaic_detach_slice *args = data; |
1821 | int rcu_id, usr_rcu_id, qdev_rcu_id; |
1822 | struct dma_bridge_chan *dbc; |
1823 | struct drm_gem_object *obj; |
1824 | struct qaic_device *qdev; |
1825 | struct qaic_user *usr; |
1826 | unsigned long flags; |
1827 | struct qaic_bo *bo; |
1828 | int ret; |
1829 | |
1830 | if (args->pad != 0) |
1831 | return -EINVAL; |
1832 | |
1833 | usr = file_priv->driver_priv; |
1834 | usr_rcu_id = srcu_read_lock(ssp: &usr->qddev_lock); |
1835 | if (!usr->qddev) { |
1836 | ret = -ENODEV; |
1837 | goto unlock_usr_srcu; |
1838 | } |
1839 | |
1840 | qdev = usr->qddev->qdev; |
1841 | qdev_rcu_id = srcu_read_lock(ssp: &qdev->dev_lock); |
1842 | if (qdev->dev_state != QAIC_ONLINE) { |
1843 | ret = -ENODEV; |
1844 | goto unlock_dev_srcu; |
1845 | } |
1846 | |
1847 | obj = drm_gem_object_lookup(filp: file_priv, handle: args->handle); |
1848 | if (!obj) { |
1849 | ret = -ENOENT; |
1850 | goto unlock_dev_srcu; |
1851 | } |
1852 | |
1853 | bo = to_qaic_bo(obj); |
1854 | ret = mutex_lock_interruptible(&bo->lock); |
1855 | if (ret) |
1856 | goto put_bo; |
1857 | |
1858 | if (!bo->sliced) { |
1859 | ret = -EINVAL; |
1860 | goto unlock_bo; |
1861 | } |
1862 | |
1863 | dbc = bo->dbc; |
1864 | rcu_id = srcu_read_lock(ssp: &dbc->ch_lock); |
1865 | if (dbc->usr != usr) { |
1866 | ret = -EINVAL; |
1867 | goto unlock_ch_srcu; |
1868 | } |
1869 | |
1870 | /* Check if BO is committed to H/W for DMA */ |
1871 | spin_lock_irqsave(&dbc->xfer_lock, flags); |
1872 | if (bo_queued(bo)) { |
1873 | spin_unlock_irqrestore(lock: &dbc->xfer_lock, flags); |
1874 | ret = -EBUSY; |
1875 | goto unlock_ch_srcu; |
1876 | } |
1877 | spin_unlock_irqrestore(lock: &dbc->xfer_lock, flags); |
1878 | |
1879 | detach_slice_bo(qdev, bo); |
1880 | |
1881 | unlock_ch_srcu: |
1882 | srcu_read_unlock(ssp: &dbc->ch_lock, idx: rcu_id); |
1883 | unlock_bo: |
1884 | mutex_unlock(lock: &bo->lock); |
1885 | put_bo: |
1886 | drm_gem_object_put(obj); |
1887 | unlock_dev_srcu: |
1888 | srcu_read_unlock(ssp: &qdev->dev_lock, idx: qdev_rcu_id); |
1889 | unlock_usr_srcu: |
1890 | srcu_read_unlock(ssp: &usr->qddev_lock, idx: usr_rcu_id); |
1891 | return ret; |
1892 | } |
1893 | |
1894 | static void empty_xfer_list(struct qaic_device *qdev, struct dma_bridge_chan *dbc) |
1895 | { |
1896 | unsigned long flags; |
1897 | struct qaic_bo *bo; |
1898 | |
1899 | spin_lock_irqsave(&dbc->xfer_lock, flags); |
1900 | while (!list_empty(head: &dbc->xfer_list)) { |
1901 | bo = list_first_entry(&dbc->xfer_list, typeof(*bo), xfer_list); |
1902 | list_del_init(entry: &bo->xfer_list); |
1903 | spin_unlock_irqrestore(lock: &dbc->xfer_lock, flags); |
1904 | bo->nr_slice_xfer_done = 0; |
1905 | bo->req_id = 0; |
1906 | bo->perf_stats.req_received_ts = 0; |
1907 | bo->perf_stats.req_submit_ts = 0; |
1908 | bo->perf_stats.req_processed_ts = 0; |
1909 | bo->perf_stats.queue_level_before = 0; |
1910 | dma_sync_sgtable_for_cpu(dev: &qdev->pdev->dev, sgt: bo->sgt, dir: bo->dir); |
1911 | complete_all(&bo->xfer_done); |
1912 | drm_gem_object_put(obj: &bo->base); |
1913 | spin_lock_irqsave(&dbc->xfer_lock, flags); |
1914 | } |
1915 | spin_unlock_irqrestore(lock: &dbc->xfer_lock, flags); |
1916 | } |
1917 | |
1918 | int disable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr) |
1919 | { |
1920 | if (!qdev->dbc[dbc_id].usr || qdev->dbc[dbc_id].usr->handle != usr->handle) |
1921 | return -EPERM; |
1922 | |
1923 | qdev->dbc[dbc_id].usr = NULL; |
1924 | synchronize_srcu(ssp: &qdev->dbc[dbc_id].ch_lock); |
1925 | return 0; |
1926 | } |
1927 | |
1928 | /** |
1929 | * enable_dbc - Enable the DBC. DBCs are disabled by removing the context of |
1930 | * user. Add user context back to DBC to enable it. This function trusts the |
1931 | * DBC ID passed and expects the DBC to be disabled. |
1932 | * @qdev: Qranium device handle |
1933 | * @dbc_id: ID of the DBC |
1934 | * @usr: User context |
1935 | */ |
1936 | void enable_dbc(struct qaic_device *qdev, u32 dbc_id, struct qaic_user *usr) |
1937 | { |
1938 | qdev->dbc[dbc_id].usr = usr; |
1939 | } |
1940 | |
1941 | void wakeup_dbc(struct qaic_device *qdev, u32 dbc_id) |
1942 | { |
1943 | struct dma_bridge_chan *dbc = &qdev->dbc[dbc_id]; |
1944 | |
1945 | dbc->usr = NULL; |
1946 | empty_xfer_list(qdev, dbc); |
1947 | synchronize_srcu(ssp: &dbc->ch_lock); |
1948 | /* |
1949 | * Threads holding channel lock, may add more elements in the xfer_list. |
1950 | * Flush out these elements from xfer_list. |
1951 | */ |
1952 | empty_xfer_list(qdev, dbc); |
1953 | } |
1954 | |
1955 | void release_dbc(struct qaic_device *qdev, u32 dbc_id) |
1956 | { |
1957 | struct qaic_bo *bo, *bo_temp; |
1958 | struct dma_bridge_chan *dbc; |
1959 | |
1960 | dbc = &qdev->dbc[dbc_id]; |
1961 | if (!dbc->in_use) |
1962 | return; |
1963 | |
1964 | wakeup_dbc(qdev, dbc_id); |
1965 | |
1966 | dma_free_coherent(dev: &qdev->pdev->dev, size: dbc->total_size, cpu_addr: dbc->req_q_base, dma_handle: dbc->dma_addr); |
1967 | dbc->total_size = 0; |
1968 | dbc->req_q_base = NULL; |
1969 | dbc->dma_addr = 0; |
1970 | dbc->nelem = 0; |
1971 | dbc->usr = NULL; |
1972 | |
1973 | list_for_each_entry_safe(bo, bo_temp, &dbc->bo_lists, bo_list) { |
1974 | drm_gem_object_get(obj: &bo->base); |
1975 | mutex_lock(&bo->lock); |
1976 | detach_slice_bo(qdev, bo); |
1977 | mutex_unlock(lock: &bo->lock); |
1978 | drm_gem_object_put(obj: &bo->base); |
1979 | } |
1980 | |
1981 | dbc->in_use = false; |
1982 | wake_up(&dbc->dbc_release); |
1983 | } |
1984 | |