1/*
2 * videobuf2-vmalloc.c - vmalloc memory allocator for videobuf2
3 *
4 * Copyright (C) 2010 Samsung Electronics
5 *
6 * Author: Pawel Osciak <pawel@osciak.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation.
11 */
12
13#include <linux/io.h>
14#include <linux/module.h>
15#include <linux/mm.h>
16#include <linux/refcount.h>
17#include <linux/sched.h>
18#include <linux/slab.h>
19#include <linux/vmalloc.h>
20
21#include <media/videobuf2-v4l2.h>
22#include <media/videobuf2-vmalloc.h>
23#include <media/videobuf2-memops.h>
24
25struct vb2_vmalloc_buf {
26 void *vaddr;
27 struct frame_vector *vec;
28 enum dma_data_direction dma_dir;
29 unsigned long size;
30 refcount_t refcount;
31 struct vb2_vmarea_handler handler;
32 struct dma_buf *dbuf;
33};
34
35static void vb2_vmalloc_put(void *buf_priv);
36
37static void *vb2_vmalloc_alloc(struct vb2_buffer *vb, struct device *dev,
38 unsigned long size)
39{
40 struct vb2_vmalloc_buf *buf;
41
42 buf = kzalloc(size: sizeof(*buf), GFP_KERNEL | vb->vb2_queue->gfp_flags);
43 if (!buf)
44 return ERR_PTR(error: -ENOMEM);
45
46 buf->size = size;
47 buf->vaddr = vmalloc_user(size: buf->size);
48 if (!buf->vaddr) {
49 pr_debug("vmalloc of size %ld failed\n", buf->size);
50 kfree(objp: buf);
51 return ERR_PTR(error: -ENOMEM);
52 }
53
54 buf->dma_dir = vb->vb2_queue->dma_dir;
55 buf->handler.refcount = &buf->refcount;
56 buf->handler.put = vb2_vmalloc_put;
57 buf->handler.arg = buf;
58
59 refcount_set(r: &buf->refcount, n: 1);
60 return buf;
61}
62
63static void vb2_vmalloc_put(void *buf_priv)
64{
65 struct vb2_vmalloc_buf *buf = buf_priv;
66
67 if (refcount_dec_and_test(r: &buf->refcount)) {
68 vfree(addr: buf->vaddr);
69 kfree(objp: buf);
70 }
71}
72
73static void *vb2_vmalloc_get_userptr(struct vb2_buffer *vb, struct device *dev,
74 unsigned long vaddr, unsigned long size)
75{
76 struct vb2_vmalloc_buf *buf;
77 struct frame_vector *vec;
78 int n_pages, offset, i;
79 int ret = -ENOMEM;
80
81 buf = kzalloc(size: sizeof(*buf), GFP_KERNEL);
82 if (!buf)
83 return ERR_PTR(error: -ENOMEM);
84
85 buf->dma_dir = vb->vb2_queue->dma_dir;
86 offset = vaddr & ~PAGE_MASK;
87 buf->size = size;
88 vec = vb2_create_framevec(start: vaddr, length: size,
89 write: buf->dma_dir == DMA_FROM_DEVICE ||
90 buf->dma_dir == DMA_BIDIRECTIONAL);
91 if (IS_ERR(ptr: vec)) {
92 ret = PTR_ERR(ptr: vec);
93 goto fail_pfnvec_create;
94 }
95 buf->vec = vec;
96 n_pages = frame_vector_count(vec);
97 if (frame_vector_to_pages(vec) < 0) {
98 unsigned long *nums = frame_vector_pfns(vec);
99
100 /*
101 * We cannot get page pointers for these pfns. Check memory is
102 * physically contiguous and use direct mapping.
103 */
104 for (i = 1; i < n_pages; i++)
105 if (nums[i-1] + 1 != nums[i])
106 goto fail_map;
107 buf->vaddr = (__force void *)
108 ioremap(__pfn_to_phys(nums[0]), size: size + offset);
109 } else {
110 buf->vaddr = vm_map_ram(pages: frame_vector_pages(vec), count: n_pages, node: -1);
111 }
112
113 if (!buf->vaddr)
114 goto fail_map;
115 buf->vaddr += offset;
116 return buf;
117
118fail_map:
119 vb2_destroy_framevec(vec);
120fail_pfnvec_create:
121 kfree(objp: buf);
122
123 return ERR_PTR(error: ret);
124}
125
126static void vb2_vmalloc_put_userptr(void *buf_priv)
127{
128 struct vb2_vmalloc_buf *buf = buf_priv;
129 unsigned long vaddr = (unsigned long)buf->vaddr & PAGE_MASK;
130 unsigned int i;
131 struct page **pages;
132 unsigned int n_pages;
133
134 if (!buf->vec->is_pfns) {
135 n_pages = frame_vector_count(vec: buf->vec);
136 if (vaddr)
137 vm_unmap_ram(mem: (void *)vaddr, count: n_pages);
138 if (buf->dma_dir == DMA_FROM_DEVICE ||
139 buf->dma_dir == DMA_BIDIRECTIONAL) {
140 pages = frame_vector_pages(vec: buf->vec);
141 if (!WARN_ON_ONCE(IS_ERR(pages)))
142 for (i = 0; i < n_pages; i++)
143 set_page_dirty_lock(pages[i]);
144 }
145 } else {
146 iounmap(addr: (__force void __iomem *)buf->vaddr);
147 }
148 vb2_destroy_framevec(vec: buf->vec);
149 kfree(objp: buf);
150}
151
152static void *vb2_vmalloc_vaddr(struct vb2_buffer *vb, void *buf_priv)
153{
154 struct vb2_vmalloc_buf *buf = buf_priv;
155
156 if (!buf->vaddr) {
157 pr_err("Address of an unallocated plane requested or cannot map user pointer\n");
158 return NULL;
159 }
160
161 return buf->vaddr;
162}
163
164static unsigned int vb2_vmalloc_num_users(void *buf_priv)
165{
166 struct vb2_vmalloc_buf *buf = buf_priv;
167 return refcount_read(r: &buf->refcount);
168}
169
170static int vb2_vmalloc_mmap(void *buf_priv, struct vm_area_struct *vma)
171{
172 struct vb2_vmalloc_buf *buf = buf_priv;
173 int ret;
174
175 if (!buf) {
176 pr_err("No memory to map\n");
177 return -EINVAL;
178 }
179
180 ret = remap_vmalloc_range(vma, addr: buf->vaddr, pgoff: 0);
181 if (ret) {
182 pr_err("Remapping vmalloc memory, error: %d\n", ret);
183 return ret;
184 }
185
186 /*
187 * Make sure that vm_areas for 2 buffers won't be merged together
188 */
189 vm_flags_set(vma, VM_DONTEXPAND);
190
191 /*
192 * Use common vm_area operations to track buffer refcount.
193 */
194 vma->vm_private_data = &buf->handler;
195 vma->vm_ops = &vb2_common_vm_ops;
196
197 vma->vm_ops->open(vma);
198
199 return 0;
200}
201
202#ifdef CONFIG_HAS_DMA
203/*********************************************/
204/* DMABUF ops for exporters */
205/*********************************************/
206
207struct vb2_vmalloc_attachment {
208 struct sg_table sgt;
209 enum dma_data_direction dma_dir;
210};
211
212static int vb2_vmalloc_dmabuf_ops_attach(struct dma_buf *dbuf,
213 struct dma_buf_attachment *dbuf_attach)
214{
215 struct vb2_vmalloc_attachment *attach;
216 struct vb2_vmalloc_buf *buf = dbuf->priv;
217 int num_pages = PAGE_ALIGN(buf->size) / PAGE_SIZE;
218 struct sg_table *sgt;
219 struct scatterlist *sg;
220 void *vaddr = buf->vaddr;
221 int ret;
222 int i;
223
224 attach = kzalloc(size: sizeof(*attach), GFP_KERNEL);
225 if (!attach)
226 return -ENOMEM;
227
228 sgt = &attach->sgt;
229 ret = sg_alloc_table(sgt, num_pages, GFP_KERNEL);
230 if (ret) {
231 kfree(objp: attach);
232 return ret;
233 }
234 for_each_sgtable_sg(sgt, sg, i) {
235 struct page *page = vmalloc_to_page(addr: vaddr);
236
237 if (!page) {
238 sg_free_table(sgt);
239 kfree(objp: attach);
240 return -ENOMEM;
241 }
242 sg_set_page(sg, page, PAGE_SIZE, offset: 0);
243 vaddr += PAGE_SIZE;
244 }
245
246 attach->dma_dir = DMA_NONE;
247 dbuf_attach->priv = attach;
248 return 0;
249}
250
251static void vb2_vmalloc_dmabuf_ops_detach(struct dma_buf *dbuf,
252 struct dma_buf_attachment *db_attach)
253{
254 struct vb2_vmalloc_attachment *attach = db_attach->priv;
255 struct sg_table *sgt;
256
257 if (!attach)
258 return;
259
260 sgt = &attach->sgt;
261
262 /* release the scatterlist cache */
263 if (attach->dma_dir != DMA_NONE)
264 dma_unmap_sgtable(dev: db_attach->dev, sgt, dir: attach->dma_dir, attrs: 0);
265 sg_free_table(sgt);
266 kfree(objp: attach);
267 db_attach->priv = NULL;
268}
269
270static struct sg_table *vb2_vmalloc_dmabuf_ops_map(
271 struct dma_buf_attachment *db_attach, enum dma_data_direction dma_dir)
272{
273 struct vb2_vmalloc_attachment *attach = db_attach->priv;
274 struct sg_table *sgt;
275
276 sgt = &attach->sgt;
277 /* return previously mapped sg table */
278 if (attach->dma_dir == dma_dir)
279 return sgt;
280
281 /* release any previous cache */
282 if (attach->dma_dir != DMA_NONE) {
283 dma_unmap_sgtable(dev: db_attach->dev, sgt, dir: attach->dma_dir, attrs: 0);
284 attach->dma_dir = DMA_NONE;
285 }
286
287 /* mapping to the client with new direction */
288 if (dma_map_sgtable(dev: db_attach->dev, sgt, dir: dma_dir, attrs: 0)) {
289 pr_err("failed to map scatterlist\n");
290 return ERR_PTR(error: -EIO);
291 }
292
293 attach->dma_dir = dma_dir;
294
295 return sgt;
296}
297
298static void vb2_vmalloc_dmabuf_ops_unmap(struct dma_buf_attachment *db_attach,
299 struct sg_table *sgt, enum dma_data_direction dma_dir)
300{
301 /* nothing to be done here */
302}
303
304static void vb2_vmalloc_dmabuf_ops_release(struct dma_buf *dbuf)
305{
306 /* drop reference obtained in vb2_vmalloc_get_dmabuf */
307 vb2_vmalloc_put(buf_priv: dbuf->priv);
308}
309
310static int vb2_vmalloc_dmabuf_ops_vmap(struct dma_buf *dbuf,
311 struct iosys_map *map)
312{
313 struct vb2_vmalloc_buf *buf = dbuf->priv;
314
315 iosys_map_set_vaddr(map, vaddr: buf->vaddr);
316
317 return 0;
318}
319
320static int vb2_vmalloc_dmabuf_ops_mmap(struct dma_buf *dbuf,
321 struct vm_area_struct *vma)
322{
323 return vb2_vmalloc_mmap(buf_priv: dbuf->priv, vma);
324}
325
326static const struct dma_buf_ops vb2_vmalloc_dmabuf_ops = {
327 .attach = vb2_vmalloc_dmabuf_ops_attach,
328 .detach = vb2_vmalloc_dmabuf_ops_detach,
329 .map_dma_buf = vb2_vmalloc_dmabuf_ops_map,
330 .unmap_dma_buf = vb2_vmalloc_dmabuf_ops_unmap,
331 .vmap = vb2_vmalloc_dmabuf_ops_vmap,
332 .mmap = vb2_vmalloc_dmabuf_ops_mmap,
333 .release = vb2_vmalloc_dmabuf_ops_release,
334};
335
336static struct dma_buf *vb2_vmalloc_get_dmabuf(struct vb2_buffer *vb,
337 void *buf_priv,
338 unsigned long flags)
339{
340 struct vb2_vmalloc_buf *buf = buf_priv;
341 struct dma_buf *dbuf;
342 DEFINE_DMA_BUF_EXPORT_INFO(exp_info);
343
344 exp_info.ops = &vb2_vmalloc_dmabuf_ops;
345 exp_info.size = buf->size;
346 exp_info.flags = flags;
347 exp_info.priv = buf;
348
349 if (WARN_ON(!buf->vaddr))
350 return NULL;
351
352 dbuf = dma_buf_export(exp_info: &exp_info);
353 if (IS_ERR(ptr: dbuf))
354 return NULL;
355
356 /* dmabuf keeps reference to vb2 buffer */
357 refcount_inc(r: &buf->refcount);
358
359 return dbuf;
360}
361#endif /* CONFIG_HAS_DMA */
362
363
364/*********************************************/
365/* callbacks for DMABUF buffers */
366/*********************************************/
367
368static int vb2_vmalloc_map_dmabuf(void *mem_priv)
369{
370 struct vb2_vmalloc_buf *buf = mem_priv;
371 struct iosys_map map;
372 int ret;
373
374 ret = dma_buf_vmap_unlocked(dmabuf: buf->dbuf, map: &map);
375 if (ret)
376 return -EFAULT;
377 buf->vaddr = map.vaddr;
378
379 return 0;
380}
381
382static void vb2_vmalloc_unmap_dmabuf(void *mem_priv)
383{
384 struct vb2_vmalloc_buf *buf = mem_priv;
385 struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
386
387 dma_buf_vunmap_unlocked(dmabuf: buf->dbuf, map: &map);
388 buf->vaddr = NULL;
389}
390
391static void vb2_vmalloc_detach_dmabuf(void *mem_priv)
392{
393 struct vb2_vmalloc_buf *buf = mem_priv;
394 struct iosys_map map = IOSYS_MAP_INIT_VADDR(buf->vaddr);
395
396 if (buf->vaddr)
397 dma_buf_vunmap_unlocked(dmabuf: buf->dbuf, map: &map);
398
399 kfree(objp: buf);
400}
401
402static void *vb2_vmalloc_attach_dmabuf(struct vb2_buffer *vb,
403 struct device *dev,
404 struct dma_buf *dbuf,
405 unsigned long size)
406{
407 struct vb2_vmalloc_buf *buf;
408
409 if (dbuf->size < size)
410 return ERR_PTR(error: -EFAULT);
411
412 buf = kzalloc(size: sizeof(*buf), GFP_KERNEL);
413 if (!buf)
414 return ERR_PTR(error: -ENOMEM);
415
416 buf->dbuf = dbuf;
417 buf->dma_dir = vb->vb2_queue->dma_dir;
418 buf->size = size;
419
420 return buf;
421}
422
423
424const struct vb2_mem_ops vb2_vmalloc_memops = {
425 .alloc = vb2_vmalloc_alloc,
426 .put = vb2_vmalloc_put,
427 .get_userptr = vb2_vmalloc_get_userptr,
428 .put_userptr = vb2_vmalloc_put_userptr,
429#ifdef CONFIG_HAS_DMA
430 .get_dmabuf = vb2_vmalloc_get_dmabuf,
431#endif
432 .map_dmabuf = vb2_vmalloc_map_dmabuf,
433 .unmap_dmabuf = vb2_vmalloc_unmap_dmabuf,
434 .attach_dmabuf = vb2_vmalloc_attach_dmabuf,
435 .detach_dmabuf = vb2_vmalloc_detach_dmabuf,
436 .vaddr = vb2_vmalloc_vaddr,
437 .mmap = vb2_vmalloc_mmap,
438 .num_users = vb2_vmalloc_num_users,
439};
440EXPORT_SYMBOL_GPL(vb2_vmalloc_memops);
441
442MODULE_DESCRIPTION("vmalloc memory handling routines for videobuf2");
443MODULE_AUTHOR("Pawel Osciak <pawel@osciak.com>");
444MODULE_LICENSE("GPL");
445MODULE_IMPORT_NS(DMA_BUF);
446

source code of linux/drivers/media/common/videobuf2/videobuf2-vmalloc.c