1 | /* |
2 | * videobuf2-dma-contig.c - DMA contig memory allocator for videobuf2 |
3 | * |
4 | * Copyright (C) 2010 Samsung Electronics |
5 | * |
6 | * Author: Pawel Osciak <pawel@osciak.com> |
7 | * |
8 | * This program is free software; you can redistribute it and/or modify |
9 | * it under the terms of the GNU General Public License as published by |
10 | * the Free Software Foundation. |
11 | */ |
12 | |
13 | #include <linux/dma-buf.h> |
14 | #include <linux/module.h> |
15 | #include <linux/refcount.h> |
16 | #include <linux/scatterlist.h> |
17 | #include <linux/sched.h> |
18 | #include <linux/slab.h> |
19 | #include <linux/dma-mapping.h> |
20 | #include <linux/highmem.h> |
21 | |
22 | #include <media/videobuf2-v4l2.h> |
23 | #include <media/videobuf2-dma-contig.h> |
24 | #include <media/videobuf2-memops.h> |
25 | |
26 | struct vb2_dc_buf { |
27 | struct device *dev; |
28 | void *vaddr; |
29 | unsigned long size; |
30 | void *cookie; |
31 | dma_addr_t dma_addr; |
32 | unsigned long attrs; |
33 | enum dma_data_direction dma_dir; |
34 | struct sg_table *dma_sgt; |
35 | struct frame_vector *vec; |
36 | |
37 | /* MMAP related */ |
38 | struct vb2_vmarea_handler handler; |
39 | refcount_t refcount; |
40 | struct sg_table *sgt_base; |
41 | |
42 | /* DMABUF related */ |
43 | struct dma_buf_attachment *db_attach; |
44 | |
45 | struct vb2_buffer *vb; |
46 | bool non_coherent_mem; |
47 | }; |
48 | |
49 | /*********************************************/ |
50 | /* scatterlist table functions */ |
51 | /*********************************************/ |
52 | |
53 | static unsigned long vb2_dc_get_contiguous_size(struct sg_table *sgt) |
54 | { |
55 | struct scatterlist *s; |
56 | dma_addr_t expected = sg_dma_address(sgt->sgl); |
57 | unsigned int i; |
58 | unsigned long size = 0; |
59 | |
60 | for_each_sgtable_dma_sg(sgt, s, i) { |
61 | if (sg_dma_address(s) != expected) |
62 | break; |
63 | expected += sg_dma_len(s); |
64 | size += sg_dma_len(s); |
65 | } |
66 | return size; |
67 | } |
68 | |
69 | /*********************************************/ |
70 | /* callbacks for all buffers */ |
71 | /*********************************************/ |
72 | |
73 | static void *vb2_dc_cookie(struct vb2_buffer *vb, void *buf_priv) |
74 | { |
75 | struct vb2_dc_buf *buf = buf_priv; |
76 | |
77 | return &buf->dma_addr; |
78 | } |
79 | |
80 | /* |
81 | * This function may fail if: |
82 | * |
83 | * - dma_buf_vmap() fails |
84 | * E.g. due to lack of virtual mapping address space, or due to |
85 | * dmabuf->ops misconfiguration. |
86 | * |
87 | * - dma_vmap_noncontiguous() fails |
88 | * For instance, when requested buffer size is larger than totalram_pages(). |
89 | * Relevant for buffers that use non-coherent memory. |
90 | * |
91 | * - Queue DMA attrs have DMA_ATTR_NO_KERNEL_MAPPING set |
92 | * Relevant for buffers that use coherent memory. |
93 | */ |
94 | static void *vb2_dc_vaddr(struct vb2_buffer *vb, void *buf_priv) |
95 | { |
96 | struct vb2_dc_buf *buf = buf_priv; |
97 | |
98 | if (buf->vaddr) |
99 | return buf->vaddr; |
100 | |
101 | if (buf->db_attach) { |
102 | struct iosys_map map; |
103 | |
104 | if (!dma_buf_vmap_unlocked(dmabuf: buf->db_attach->dmabuf, map: &map)) |
105 | buf->vaddr = map.vaddr; |
106 | |
107 | return buf->vaddr; |
108 | } |
109 | |
110 | if (buf->non_coherent_mem) |
111 | buf->vaddr = dma_vmap_noncontiguous(dev: buf->dev, size: buf->size, |
112 | sgt: buf->dma_sgt); |
113 | return buf->vaddr; |
114 | } |
115 | |
116 | static unsigned int vb2_dc_num_users(void *buf_priv) |
117 | { |
118 | struct vb2_dc_buf *buf = buf_priv; |
119 | |
120 | return refcount_read(r: &buf->refcount); |
121 | } |
122 | |
123 | static void vb2_dc_prepare(void *buf_priv) |
124 | { |
125 | struct vb2_dc_buf *buf = buf_priv; |
126 | struct sg_table *sgt = buf->dma_sgt; |
127 | |
128 | /* This takes care of DMABUF and user-enforced cache sync hint */ |
129 | if (buf->vb->skip_cache_sync_on_prepare) |
130 | return; |
131 | |
132 | if (!buf->non_coherent_mem) |
133 | return; |
134 | |
135 | /* Non-coherent MMAP only */ |
136 | if (buf->vaddr) |
137 | flush_kernel_vmap_range(vaddr: buf->vaddr, size: buf->size); |
138 | |
139 | /* For both USERPTR and non-coherent MMAP */ |
140 | dma_sync_sgtable_for_device(dev: buf->dev, sgt, dir: buf->dma_dir); |
141 | } |
142 | |
143 | static void vb2_dc_finish(void *buf_priv) |
144 | { |
145 | struct vb2_dc_buf *buf = buf_priv; |
146 | struct sg_table *sgt = buf->dma_sgt; |
147 | |
148 | /* This takes care of DMABUF and user-enforced cache sync hint */ |
149 | if (buf->vb->skip_cache_sync_on_finish) |
150 | return; |
151 | |
152 | if (!buf->non_coherent_mem) |
153 | return; |
154 | |
155 | /* Non-coherent MMAP only */ |
156 | if (buf->vaddr) |
157 | invalidate_kernel_vmap_range(vaddr: buf->vaddr, size: buf->size); |
158 | |
159 | /* For both USERPTR and non-coherent MMAP */ |
160 | dma_sync_sgtable_for_cpu(dev: buf->dev, sgt, dir: buf->dma_dir); |
161 | } |
162 | |
163 | /*********************************************/ |
164 | /* callbacks for MMAP buffers */ |
165 | /*********************************************/ |
166 | |
167 | static void vb2_dc_put(void *buf_priv) |
168 | { |
169 | struct vb2_dc_buf *buf = buf_priv; |
170 | |
171 | if (!refcount_dec_and_test(r: &buf->refcount)) |
172 | return; |
173 | |
174 | if (buf->non_coherent_mem) { |
175 | if (buf->vaddr) |
176 | dma_vunmap_noncontiguous(dev: buf->dev, vaddr: buf->vaddr); |
177 | dma_free_noncontiguous(dev: buf->dev, size: buf->size, |
178 | sgt: buf->dma_sgt, dir: buf->dma_dir); |
179 | } else { |
180 | if (buf->sgt_base) { |
181 | sg_free_table(buf->sgt_base); |
182 | kfree(objp: buf->sgt_base); |
183 | } |
184 | dma_free_attrs(dev: buf->dev, size: buf->size, cpu_addr: buf->cookie, |
185 | dma_handle: buf->dma_addr, attrs: buf->attrs); |
186 | } |
187 | put_device(dev: buf->dev); |
188 | kfree(objp: buf); |
189 | } |
190 | |
191 | static int vb2_dc_alloc_coherent(struct vb2_dc_buf *buf) |
192 | { |
193 | struct vb2_queue *q = buf->vb->vb2_queue; |
194 | |
195 | buf->cookie = dma_alloc_attrs(dev: buf->dev, |
196 | size: buf->size, |
197 | dma_handle: &buf->dma_addr, |
198 | GFP_KERNEL | q->gfp_flags, |
199 | attrs: buf->attrs); |
200 | if (!buf->cookie) |
201 | return -ENOMEM; |
202 | |
203 | if (q->dma_attrs & DMA_ATTR_NO_KERNEL_MAPPING) |
204 | return 0; |
205 | |
206 | buf->vaddr = buf->cookie; |
207 | return 0; |
208 | } |
209 | |
210 | static int vb2_dc_alloc_non_coherent(struct vb2_dc_buf *buf) |
211 | { |
212 | struct vb2_queue *q = buf->vb->vb2_queue; |
213 | |
214 | buf->dma_sgt = dma_alloc_noncontiguous(dev: buf->dev, |
215 | size: buf->size, |
216 | dir: buf->dma_dir, |
217 | GFP_KERNEL | q->gfp_flags, |
218 | attrs: buf->attrs); |
219 | if (!buf->dma_sgt) |
220 | return -ENOMEM; |
221 | |
222 | buf->dma_addr = sg_dma_address(buf->dma_sgt->sgl); |
223 | |
224 | /* |
225 | * For non-coherent buffers the kernel mapping is created on demand |
226 | * in vb2_dc_vaddr(). |
227 | */ |
228 | return 0; |
229 | } |
230 | |
231 | static void *vb2_dc_alloc(struct vb2_buffer *vb, |
232 | struct device *dev, |
233 | unsigned long size) |
234 | { |
235 | struct vb2_dc_buf *buf; |
236 | int ret; |
237 | |
238 | if (WARN_ON(!dev)) |
239 | return ERR_PTR(error: -EINVAL); |
240 | |
241 | buf = kzalloc(size: sizeof *buf, GFP_KERNEL); |
242 | if (!buf) |
243 | return ERR_PTR(error: -ENOMEM); |
244 | |
245 | buf->attrs = vb->vb2_queue->dma_attrs; |
246 | buf->dma_dir = vb->vb2_queue->dma_dir; |
247 | buf->vb = vb; |
248 | buf->non_coherent_mem = vb->vb2_queue->non_coherent_mem; |
249 | |
250 | buf->size = size; |
251 | /* Prevent the device from being released while the buffer is used */ |
252 | buf->dev = get_device(dev); |
253 | |
254 | if (buf->non_coherent_mem) |
255 | ret = vb2_dc_alloc_non_coherent(buf); |
256 | else |
257 | ret = vb2_dc_alloc_coherent(buf); |
258 | |
259 | if (ret) { |
260 | dev_err(dev, "dma alloc of size %lu failed\n" , size); |
261 | kfree(objp: buf); |
262 | return ERR_PTR(error: -ENOMEM); |
263 | } |
264 | |
265 | buf->handler.refcount = &buf->refcount; |
266 | buf->handler.put = vb2_dc_put; |
267 | buf->handler.arg = buf; |
268 | |
269 | refcount_set(r: &buf->refcount, n: 1); |
270 | |
271 | return buf; |
272 | } |
273 | |
274 | static int vb2_dc_mmap(void *buf_priv, struct vm_area_struct *vma) |
275 | { |
276 | struct vb2_dc_buf *buf = buf_priv; |
277 | int ret; |
278 | |
279 | if (!buf) { |
280 | printk(KERN_ERR "No buffer to map\n" ); |
281 | return -EINVAL; |
282 | } |
283 | |
284 | if (buf->non_coherent_mem) |
285 | ret = dma_mmap_noncontiguous(dev: buf->dev, vma, size: buf->size, |
286 | sgt: buf->dma_sgt); |
287 | else |
288 | ret = dma_mmap_attrs(dev: buf->dev, vma, cpu_addr: buf->cookie, dma_addr: buf->dma_addr, |
289 | size: buf->size, attrs: buf->attrs); |
290 | if (ret) { |
291 | pr_err("Remapping memory failed, error: %d\n" , ret); |
292 | return ret; |
293 | } |
294 | |
295 | vm_flags_set(vma, VM_DONTEXPAND | VM_DONTDUMP); |
296 | vma->vm_private_data = &buf->handler; |
297 | vma->vm_ops = &vb2_common_vm_ops; |
298 | |
299 | vma->vm_ops->open(vma); |
300 | |
301 | pr_debug("%s: mapped dma addr 0x%08lx at 0x%08lx, size %lu\n" , |
302 | __func__, (unsigned long)buf->dma_addr, vma->vm_start, |
303 | buf->size); |
304 | |
305 | return 0; |
306 | } |
307 | |
308 | /*********************************************/ |
309 | /* DMABUF ops for exporters */ |
310 | /*********************************************/ |
311 | |
312 | struct vb2_dc_attachment { |
313 | struct sg_table sgt; |
314 | enum dma_data_direction dma_dir; |
315 | }; |
316 | |
317 | static int vb2_dc_dmabuf_ops_attach(struct dma_buf *dbuf, |
318 | struct dma_buf_attachment *dbuf_attach) |
319 | { |
320 | struct vb2_dc_attachment *attach; |
321 | unsigned int i; |
322 | struct scatterlist *rd, *wr; |
323 | struct sg_table *sgt; |
324 | struct vb2_dc_buf *buf = dbuf->priv; |
325 | int ret; |
326 | |
327 | attach = kzalloc(size: sizeof(*attach), GFP_KERNEL); |
328 | if (!attach) |
329 | return -ENOMEM; |
330 | |
331 | sgt = &attach->sgt; |
332 | /* Copy the buf->base_sgt scatter list to the attachment, as we can't |
333 | * map the same scatter list to multiple attachments at the same time. |
334 | */ |
335 | ret = sg_alloc_table(sgt, buf->sgt_base->orig_nents, GFP_KERNEL); |
336 | if (ret) { |
337 | kfree(objp: attach); |
338 | return -ENOMEM; |
339 | } |
340 | |
341 | rd = buf->sgt_base->sgl; |
342 | wr = sgt->sgl; |
343 | for (i = 0; i < sgt->orig_nents; ++i) { |
344 | sg_set_page(sg: wr, page: sg_page(sg: rd), len: rd->length, offset: rd->offset); |
345 | rd = sg_next(rd); |
346 | wr = sg_next(wr); |
347 | } |
348 | |
349 | attach->dma_dir = DMA_NONE; |
350 | dbuf_attach->priv = attach; |
351 | |
352 | return 0; |
353 | } |
354 | |
355 | static void vb2_dc_dmabuf_ops_detach(struct dma_buf *dbuf, |
356 | struct dma_buf_attachment *db_attach) |
357 | { |
358 | struct vb2_dc_attachment *attach = db_attach->priv; |
359 | struct sg_table *sgt; |
360 | |
361 | if (!attach) |
362 | return; |
363 | |
364 | sgt = &attach->sgt; |
365 | |
366 | /* release the scatterlist cache */ |
367 | if (attach->dma_dir != DMA_NONE) |
368 | /* |
369 | * Cache sync can be skipped here, as the vb2_dc memory is |
370 | * allocated from device coherent memory, which means the |
371 | * memory locations do not require any explicit cache |
372 | * maintenance prior or after being used by the device. |
373 | */ |
374 | dma_unmap_sgtable(dev: db_attach->dev, sgt, dir: attach->dma_dir, |
375 | DMA_ATTR_SKIP_CPU_SYNC); |
376 | sg_free_table(sgt); |
377 | kfree(objp: attach); |
378 | db_attach->priv = NULL; |
379 | } |
380 | |
381 | static struct sg_table *vb2_dc_dmabuf_ops_map( |
382 | struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir) |
383 | { |
384 | struct vb2_dc_attachment *attach = db_attach->priv; |
385 | struct sg_table *sgt; |
386 | |
387 | sgt = &attach->sgt; |
388 | /* return previously mapped sg table */ |
389 | if (attach->dma_dir == dma_dir) |
390 | return sgt; |
391 | |
392 | /* release any previous cache */ |
393 | if (attach->dma_dir != DMA_NONE) { |
394 | dma_unmap_sgtable(dev: db_attach->dev, sgt, dir: attach->dma_dir, |
395 | DMA_ATTR_SKIP_CPU_SYNC); |
396 | attach->dma_dir = DMA_NONE; |
397 | } |
398 | |
399 | /* |
400 | * mapping to the client with new direction, no cache sync |
401 | * required see comment in vb2_dc_dmabuf_ops_detach() |
402 | */ |
403 | if (dma_map_sgtable(dev: db_attach->dev, sgt, dir: dma_dir, |
404 | DMA_ATTR_SKIP_CPU_SYNC)) { |
405 | pr_err("failed to map scatterlist\n" ); |
406 | return ERR_PTR(error: -EIO); |
407 | } |
408 | |
409 | attach->dma_dir = dma_dir; |
410 | |
411 | return sgt; |
412 | } |
413 | |
414 | static void vb2_dc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach, |
415 | struct sg_table *sgt, enum dma_data_direction dma_dir) |
416 | { |
417 | /* nothing to be done here */ |
418 | } |
419 | |
420 | static void vb2_dc_dmabuf_ops_release(struct dma_buf *dbuf) |
421 | { |
422 | /* drop reference obtained in vb2_dc_get_dmabuf */ |
423 | vb2_dc_put(buf_priv: dbuf->priv); |
424 | } |
425 | |
426 | static int |
427 | vb2_dc_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf, |
428 | enum dma_data_direction direction) |
429 | { |
430 | return 0; |
431 | } |
432 | |
433 | static int |
434 | vb2_dc_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf, |
435 | enum dma_data_direction direction) |
436 | { |
437 | return 0; |
438 | } |
439 | |
440 | static int vb2_dc_dmabuf_ops_vmap(struct dma_buf *dbuf, struct iosys_map *map) |
441 | { |
442 | struct vb2_dc_buf *buf; |
443 | void *vaddr; |
444 | |
445 | buf = dbuf->priv; |
446 | vaddr = vb2_dc_vaddr(vb: buf->vb, buf_priv: buf); |
447 | if (!vaddr) |
448 | return -EINVAL; |
449 | |
450 | iosys_map_set_vaddr(map, vaddr); |
451 | |
452 | return 0; |
453 | } |
454 | |
455 | static int vb2_dc_dmabuf_ops_mmap(struct dma_buf *dbuf, |
456 | struct vm_area_struct *vma) |
457 | { |
458 | return vb2_dc_mmap(buf_priv: dbuf->priv, vma); |
459 | } |
460 | |
461 | static const struct dma_buf_ops vb2_dc_dmabuf_ops = { |
462 | .attach = vb2_dc_dmabuf_ops_attach, |
463 | .detach = vb2_dc_dmabuf_ops_detach, |
464 | .map_dma_buf = vb2_dc_dmabuf_ops_map, |
465 | .unmap_dma_buf = vb2_dc_dmabuf_ops_unmap, |
466 | .begin_cpu_access = vb2_dc_dmabuf_ops_begin_cpu_access, |
467 | .end_cpu_access = vb2_dc_dmabuf_ops_end_cpu_access, |
468 | .vmap = vb2_dc_dmabuf_ops_vmap, |
469 | .mmap = vb2_dc_dmabuf_ops_mmap, |
470 | .release = vb2_dc_dmabuf_ops_release, |
471 | }; |
472 | |
473 | static struct sg_table *vb2_dc_get_base_sgt(struct vb2_dc_buf *buf) |
474 | { |
475 | int ret; |
476 | struct sg_table *sgt; |
477 | |
478 | if (buf->non_coherent_mem) |
479 | return buf->dma_sgt; |
480 | |
481 | sgt = kmalloc(size: sizeof(*sgt), GFP_KERNEL); |
482 | if (!sgt) { |
483 | dev_err(buf->dev, "failed to alloc sg table\n" ); |
484 | return NULL; |
485 | } |
486 | |
487 | ret = dma_get_sgtable_attrs(dev: buf->dev, sgt, cpu_addr: buf->cookie, dma_addr: buf->dma_addr, |
488 | size: buf->size, attrs: buf->attrs); |
489 | if (ret < 0) { |
490 | dev_err(buf->dev, "failed to get scatterlist from DMA API\n" ); |
491 | kfree(objp: sgt); |
492 | return NULL; |
493 | } |
494 | |
495 | return sgt; |
496 | } |
497 | |
498 | static struct dma_buf *vb2_dc_get_dmabuf(struct vb2_buffer *vb, |
499 | void *buf_priv, |
500 | unsigned long flags) |
501 | { |
502 | struct vb2_dc_buf *buf = buf_priv; |
503 | struct dma_buf *dbuf; |
504 | DEFINE_DMA_BUF_EXPORT_INFO(exp_info); |
505 | |
506 | exp_info.ops = &vb2_dc_dmabuf_ops; |
507 | exp_info.size = buf->size; |
508 | exp_info.flags = flags; |
509 | exp_info.priv = buf; |
510 | |
511 | if (!buf->sgt_base) |
512 | buf->sgt_base = vb2_dc_get_base_sgt(buf); |
513 | |
514 | if (WARN_ON(!buf->sgt_base)) |
515 | return NULL; |
516 | |
517 | dbuf = dma_buf_export(exp_info: &exp_info); |
518 | if (IS_ERR(ptr: dbuf)) |
519 | return NULL; |
520 | |
521 | /* dmabuf keeps reference to vb2 buffer */ |
522 | refcount_inc(r: &buf->refcount); |
523 | |
524 | return dbuf; |
525 | } |
526 | |
527 | /*********************************************/ |
528 | /* callbacks for USERPTR buffers */ |
529 | /*********************************************/ |
530 | |
531 | static void vb2_dc_put_userptr(void *buf_priv) |
532 | { |
533 | struct vb2_dc_buf *buf = buf_priv; |
534 | struct sg_table *sgt = buf->dma_sgt; |
535 | int i; |
536 | struct page **pages; |
537 | |
538 | if (sgt) { |
539 | /* |
540 | * No need to sync to CPU, it's already synced to the CPU |
541 | * since the finish() memop will have been called before this. |
542 | */ |
543 | dma_unmap_sgtable(dev: buf->dev, sgt, dir: buf->dma_dir, |
544 | DMA_ATTR_SKIP_CPU_SYNC); |
545 | if (buf->dma_dir == DMA_FROM_DEVICE || |
546 | buf->dma_dir == DMA_BIDIRECTIONAL) { |
547 | pages = frame_vector_pages(vec: buf->vec); |
548 | /* sgt should exist only if vector contains pages... */ |
549 | if (!WARN_ON_ONCE(IS_ERR(pages))) |
550 | for (i = 0; i < frame_vector_count(vec: buf->vec); i++) |
551 | set_page_dirty_lock(pages[i]); |
552 | } |
553 | sg_free_table(sgt); |
554 | kfree(objp: sgt); |
555 | } else { |
556 | dma_unmap_resource(dev: buf->dev, addr: buf->dma_addr, size: buf->size, |
557 | dir: buf->dma_dir, attrs: 0); |
558 | } |
559 | vb2_destroy_framevec(vec: buf->vec); |
560 | kfree(objp: buf); |
561 | } |
562 | |
563 | static void *vb2_dc_get_userptr(struct vb2_buffer *vb, struct device *dev, |
564 | unsigned long vaddr, unsigned long size) |
565 | { |
566 | struct vb2_dc_buf *buf; |
567 | struct frame_vector *vec; |
568 | unsigned int offset; |
569 | int n_pages, i; |
570 | int ret = 0; |
571 | struct sg_table *sgt; |
572 | unsigned long contig_size; |
573 | unsigned long dma_align = dma_get_cache_alignment(); |
574 | |
575 | /* Only cache aligned DMA transfers are reliable */ |
576 | if (!IS_ALIGNED(vaddr | size, dma_align)) { |
577 | pr_debug("user data must be aligned to %lu bytes\n" , dma_align); |
578 | return ERR_PTR(error: -EINVAL); |
579 | } |
580 | |
581 | if (!size) { |
582 | pr_debug("size is zero\n" ); |
583 | return ERR_PTR(error: -EINVAL); |
584 | } |
585 | |
586 | if (WARN_ON(!dev)) |
587 | return ERR_PTR(error: -EINVAL); |
588 | |
589 | buf = kzalloc(size: sizeof *buf, GFP_KERNEL); |
590 | if (!buf) |
591 | return ERR_PTR(error: -ENOMEM); |
592 | |
593 | buf->dev = dev; |
594 | buf->dma_dir = vb->vb2_queue->dma_dir; |
595 | buf->vb = vb; |
596 | |
597 | offset = lower_32_bits(offset_in_page(vaddr)); |
598 | vec = vb2_create_framevec(start: vaddr, length: size, write: buf->dma_dir == DMA_FROM_DEVICE || |
599 | buf->dma_dir == DMA_BIDIRECTIONAL); |
600 | if (IS_ERR(ptr: vec)) { |
601 | ret = PTR_ERR(ptr: vec); |
602 | goto fail_buf; |
603 | } |
604 | buf->vec = vec; |
605 | n_pages = frame_vector_count(vec); |
606 | ret = frame_vector_to_pages(vec); |
607 | if (ret < 0) { |
608 | unsigned long *nums = frame_vector_pfns(vec); |
609 | |
610 | /* |
611 | * Failed to convert to pages... Check the memory is physically |
612 | * contiguous and use direct mapping |
613 | */ |
614 | for (i = 1; i < n_pages; i++) |
615 | if (nums[i-1] + 1 != nums[i]) |
616 | goto fail_pfnvec; |
617 | buf->dma_addr = dma_map_resource(dev: buf->dev, |
618 | __pfn_to_phys(nums[0]), size, dir: buf->dma_dir, attrs: 0); |
619 | if (dma_mapping_error(dev: buf->dev, dma_addr: buf->dma_addr)) { |
620 | ret = -ENOMEM; |
621 | goto fail_pfnvec; |
622 | } |
623 | goto out; |
624 | } |
625 | |
626 | sgt = kzalloc(size: sizeof(*sgt), GFP_KERNEL); |
627 | if (!sgt) { |
628 | pr_err("failed to allocate sg table\n" ); |
629 | ret = -ENOMEM; |
630 | goto fail_pfnvec; |
631 | } |
632 | |
633 | ret = sg_alloc_table_from_pages(sgt, pages: frame_vector_pages(vec), n_pages, |
634 | offset, size, GFP_KERNEL); |
635 | if (ret) { |
636 | pr_err("failed to initialize sg table\n" ); |
637 | goto fail_sgt; |
638 | } |
639 | |
640 | /* |
641 | * No need to sync to the device, this will happen later when the |
642 | * prepare() memop is called. |
643 | */ |
644 | if (dma_map_sgtable(dev: buf->dev, sgt, dir: buf->dma_dir, |
645 | DMA_ATTR_SKIP_CPU_SYNC)) { |
646 | pr_err("failed to map scatterlist\n" ); |
647 | ret = -EIO; |
648 | goto fail_sgt_init; |
649 | } |
650 | |
651 | contig_size = vb2_dc_get_contiguous_size(sgt); |
652 | if (contig_size < size) { |
653 | pr_err("contiguous mapping is too small %lu/%lu\n" , |
654 | contig_size, size); |
655 | ret = -EFAULT; |
656 | goto fail_map_sg; |
657 | } |
658 | |
659 | buf->dma_addr = sg_dma_address(sgt->sgl); |
660 | buf->dma_sgt = sgt; |
661 | buf->non_coherent_mem = 1; |
662 | |
663 | out: |
664 | buf->size = size; |
665 | |
666 | return buf; |
667 | |
668 | fail_map_sg: |
669 | dma_unmap_sgtable(dev: buf->dev, sgt, dir: buf->dma_dir, DMA_ATTR_SKIP_CPU_SYNC); |
670 | |
671 | fail_sgt_init: |
672 | sg_free_table(sgt); |
673 | |
674 | fail_sgt: |
675 | kfree(objp: sgt); |
676 | |
677 | fail_pfnvec: |
678 | vb2_destroy_framevec(vec); |
679 | |
680 | fail_buf: |
681 | kfree(objp: buf); |
682 | |
683 | return ERR_PTR(error: ret); |
684 | } |
685 | |
686 | /*********************************************/ |
687 | /* callbacks for DMABUF buffers */ |
688 | /*********************************************/ |
689 | |
690 | static int vb2_dc_map_dmabuf(void *mem_priv) |
691 | { |
692 | struct vb2_dc_buf *buf = mem_priv; |
693 | struct sg_table *sgt; |
694 | unsigned long contig_size; |
695 | |
696 | if (WARN_ON(!buf->db_attach)) { |
697 | pr_err("trying to pin a non attached buffer\n" ); |
698 | return -EINVAL; |
699 | } |
700 | |
701 | if (WARN_ON(buf->dma_sgt)) { |
702 | pr_err("dmabuf buffer is already pinned\n" ); |
703 | return 0; |
704 | } |
705 | |
706 | /* get the associated scatterlist for this buffer */ |
707 | sgt = dma_buf_map_attachment_unlocked(attach: buf->db_attach, direction: buf->dma_dir); |
708 | if (IS_ERR(ptr: sgt)) { |
709 | pr_err("Error getting dmabuf scatterlist\n" ); |
710 | return -EINVAL; |
711 | } |
712 | |
713 | /* checking if dmabuf is big enough to store contiguous chunk */ |
714 | contig_size = vb2_dc_get_contiguous_size(sgt); |
715 | if (contig_size < buf->size) { |
716 | pr_err("contiguous chunk is too small %lu/%lu\n" , |
717 | contig_size, buf->size); |
718 | dma_buf_unmap_attachment_unlocked(attach: buf->db_attach, sg_table: sgt, |
719 | direction: buf->dma_dir); |
720 | return -EFAULT; |
721 | } |
722 | |
723 | buf->dma_addr = sg_dma_address(sgt->sgl); |
724 | buf->dma_sgt = sgt; |
725 | buf->vaddr = NULL; |
726 | |
727 | return 0; |
728 | } |
729 | |
730 | static void vb2_dc_unmap_dmabuf(void *mem_priv) |
731 | { |
732 | struct vb2_dc_buf *buf = mem_priv; |
733 | struct sg_table *sgt = buf->dma_sgt; |
734 | struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr); |
735 | |
736 | if (WARN_ON(!buf->db_attach)) { |
737 | pr_err("trying to unpin a not attached buffer\n" ); |
738 | return; |
739 | } |
740 | |
741 | if (WARN_ON(!sgt)) { |
742 | pr_err("dmabuf buffer is already unpinned\n" ); |
743 | return; |
744 | } |
745 | |
746 | if (buf->vaddr) { |
747 | dma_buf_vunmap_unlocked(dmabuf: buf->db_attach->dmabuf, map: &map); |
748 | buf->vaddr = NULL; |
749 | } |
750 | dma_buf_unmap_attachment_unlocked(attach: buf->db_attach, sg_table: sgt, direction: buf->dma_dir); |
751 | |
752 | buf->dma_addr = 0; |
753 | buf->dma_sgt = NULL; |
754 | } |
755 | |
756 | static void vb2_dc_detach_dmabuf(void *mem_priv) |
757 | { |
758 | struct vb2_dc_buf *buf = mem_priv; |
759 | |
760 | /* if vb2 works correctly you should never detach mapped buffer */ |
761 | if (WARN_ON(buf->dma_addr)) |
762 | vb2_dc_unmap_dmabuf(mem_priv: buf); |
763 | |
764 | /* detach this attachment */ |
765 | dma_buf_detach(dmabuf: buf->db_attach->dmabuf, attach: buf->db_attach); |
766 | kfree(objp: buf); |
767 | } |
768 | |
769 | static void *vb2_dc_attach_dmabuf(struct vb2_buffer *vb, struct device *dev, |
770 | struct dma_buf *dbuf, unsigned long size) |
771 | { |
772 | struct vb2_dc_buf *buf; |
773 | struct dma_buf_attachment *dba; |
774 | |
775 | if (dbuf->size < size) |
776 | return ERR_PTR(error: -EFAULT); |
777 | |
778 | if (WARN_ON(!dev)) |
779 | return ERR_PTR(error: -EINVAL); |
780 | |
781 | buf = kzalloc(size: sizeof(*buf), GFP_KERNEL); |
782 | if (!buf) |
783 | return ERR_PTR(error: -ENOMEM); |
784 | |
785 | buf->dev = dev; |
786 | buf->vb = vb; |
787 | |
788 | /* create attachment for the dmabuf with the user device */ |
789 | dba = dma_buf_attach(dmabuf: dbuf, dev: buf->dev); |
790 | if (IS_ERR(ptr: dba)) { |
791 | pr_err("failed to attach dmabuf\n" ); |
792 | kfree(objp: buf); |
793 | return dba; |
794 | } |
795 | |
796 | buf->dma_dir = vb->vb2_queue->dma_dir; |
797 | buf->size = size; |
798 | buf->db_attach = dba; |
799 | |
800 | return buf; |
801 | } |
802 | |
803 | /*********************************************/ |
804 | /* DMA CONTIG exported functions */ |
805 | /*********************************************/ |
806 | |
807 | const struct vb2_mem_ops vb2_dma_contig_memops = { |
808 | .alloc = vb2_dc_alloc, |
809 | .put = vb2_dc_put, |
810 | .get_dmabuf = vb2_dc_get_dmabuf, |
811 | .cookie = vb2_dc_cookie, |
812 | .vaddr = vb2_dc_vaddr, |
813 | .mmap = vb2_dc_mmap, |
814 | .get_userptr = vb2_dc_get_userptr, |
815 | .put_userptr = vb2_dc_put_userptr, |
816 | .prepare = vb2_dc_prepare, |
817 | .finish = vb2_dc_finish, |
818 | .map_dmabuf = vb2_dc_map_dmabuf, |
819 | .unmap_dmabuf = vb2_dc_unmap_dmabuf, |
820 | .attach_dmabuf = vb2_dc_attach_dmabuf, |
821 | .detach_dmabuf = vb2_dc_detach_dmabuf, |
822 | .num_users = vb2_dc_num_users, |
823 | }; |
824 | EXPORT_SYMBOL_GPL(vb2_dma_contig_memops); |
825 | |
826 | /** |
827 | * vb2_dma_contig_set_max_seg_size() - configure DMA max segment size |
828 | * @dev: device for configuring DMA parameters |
829 | * @size: size of DMA max segment size to set |
830 | * |
831 | * To allow mapping the scatter-list into a single chunk in the DMA |
832 | * address space, the device is required to have the DMA max segment |
833 | * size parameter set to a value larger than the buffer size. Otherwise, |
834 | * the DMA-mapping subsystem will split the mapping into max segment |
835 | * size chunks. This function sets the DMA max segment size |
836 | * parameter to let DMA-mapping map a buffer as a single chunk in DMA |
837 | * address space. |
838 | * This code assumes that the DMA-mapping subsystem will merge all |
839 | * scatterlist segments if this is really possible (for example when |
840 | * an IOMMU is available and enabled). |
841 | * Ideally, this parameter should be set by the generic bus code, but it |
842 | * is left with the default 64KiB value due to historical litmiations in |
843 | * other subsystems (like limited USB host drivers) and there no good |
844 | * place to set it to the proper value. |
845 | * This function should be called from the drivers, which are known to |
846 | * operate on platforms with IOMMU and provide access to shared buffers |
847 | * (either USERPTR or DMABUF). This should be done before initializing |
848 | * videobuf2 queue. |
849 | */ |
850 | int vb2_dma_contig_set_max_seg_size(struct device *dev, unsigned int size) |
851 | { |
852 | if (!dev->dma_parms) { |
853 | dev_err(dev, "Failed to set max_seg_size: dma_parms is NULL\n" ); |
854 | return -ENODEV; |
855 | } |
856 | if (dma_get_max_seg_size(dev) < size) |
857 | return dma_set_max_seg_size(dev, size); |
858 | |
859 | return 0; |
860 | } |
861 | EXPORT_SYMBOL_GPL(vb2_dma_contig_set_max_seg_size); |
862 | |
863 | MODULE_DESCRIPTION("DMA-contig memory handling routines for videobuf2" ); |
864 | MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>" ); |
865 | MODULE_LICENSE("GPL" ); |
866 | MODULE_IMPORT_NS(DMA_BUF); |
867 | |